aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/include/asm/atomic.h4
-rw-r--r--arch/alpha/include/asm/fpu.h2
-rw-r--r--arch/alpha/include/asm/ptrace.h5
-rw-r--r--arch/alpha/include/asm/socket.h2
-rw-r--r--arch/alpha/include/asm/uaccess.h34
-rw-r--r--arch/alpha/include/asm/unistd.h4
-rw-r--r--arch/alpha/include/asm/word-at-a-time.h55
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c3
-rw-r--r--arch/alpha/kernel/entry.S161
-rw-r--r--arch/alpha/kernel/osf_sys.c49
-rw-r--r--arch/alpha/kernel/process.c19
-rw-r--r--arch/alpha/kernel/systbls.S4
-rw-r--r--arch/alpha/lib/Makefile2
-rw-r--r--arch/alpha/lib/ev6-strncpy_from_user.S424
-rw-r--r--arch/alpha/lib/ev67-strlen_user.S107
-rw-r--r--arch/alpha/lib/strlen_user.S91
-rw-r--r--arch/alpha/lib/strncpy_from_user.S339
-rw-r--r--arch/alpha/mm/fault.c36
-rw-r--r--arch/alpha/oprofile/common.c1
-rw-r--r--arch/arm/Kconfig64
-rw-r--r--arch/arm/Kconfig.debug6
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/boot/compressed/head.S5
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi5
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi3
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi5
-rw-r--r--arch/arm/boot/dts/at91sam9g25ek.dts2
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi5
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi4
-rw-r--r--arch/arm/boot/dts/imx23.dtsi52
-rw-r--r--arch/arm/boot/dts/imx27-3ds.dts2
-rw-r--r--arch/arm/boot/dts/imx27.dtsi6
-rw-r--r--arch/arm/boot/dts/imx28.dtsi74
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts6
-rw-r--r--arch/arm/boot/dts/imx51.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-ard.dts22
-rw-r--r--arch/arm/boot/dts/imx53.dtsi7
-rw-r--r--arch/arm/boot/dts/imx6q-sabrelite.dts1
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi7
-rw-r--r--arch/arm/boot/dts/kirkwood-iconnect.dts6
-rw-r--r--arch/arm/boot/dts/twl6030.dtsi3
-rw-r--r--arch/arm/configs/armadillo800eva_defconfig2
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/tct_hammer_defconfig2
-rw-r--r--arch/arm/configs/u8500_defconfig1
-rw-r--r--arch/arm/include/asm/assembler.h8
-rw-r--r--arch/arm/include/asm/dma-mapping.h7
-rw-r--r--arch/arm/include/asm/memory.h3
-rw-r--r--arch/arm/include/asm/perf_event.h14
-rw-r--r--arch/arm/include/asm/pgtable.h40
-rw-r--r--arch/arm/include/asm/pmu.h111
-rw-r--r--arch/arm/include/asm/sched_clock.h2
-rw-r--r--arch/arm/include/asm/tlb.h4
-rw-r--r--arch/arm/include/asm/topology.h35
-rw-r--r--arch/arm/include/asm/uaccess.h58
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/kernel/Makefile3
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/kernel/hw_breakpoint.c119
-rw-r--r--arch/arm/kernel/perf_event.c444
-rw-r--r--arch/arm/kernel/perf_event_cpu.c380
-rw-r--r--arch/arm/kernel/perf_event_v6.c134
-rw-r--r--arch/arm/kernel/perf_event_v7.c307
-rw-r--r--arch/arm/kernel/perf_event_xscale.c163
-rw-r--r--arch/arm/kernel/pmu.c36
-rw-r--r--arch/arm/kernel/sched_clock.c24
-rw-r--r--arch/arm/kernel/smp_twd.c48
-rw-r--r--arch/arm/kernel/topology.c120
-rw-r--r--arch/arm/kernel/traps.c11
-rw-r--r--arch/arm/lib/Makefile23
-rw-r--r--arch/arm/lib/delay.c1
-rw-r--r--arch/arm/lib/getuser.S23
-rw-r--r--arch/arm/lib/io-readsw-armv3.S106
-rw-r--r--arch/arm/lib/io-writesw-armv3.S126
-rw-r--r--arch/arm/lib/putuser.S6
-rw-r--r--arch/arm/lib/uaccess.S564
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c10
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c6
-rw-r--r--arch/arm/mach-at91/clock.c12
-rw-r--r--arch/arm/mach-bcmring/arch.c3
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c39
-rw-r--r--arch/arm/mach-dove/common.c3
-rw-r--r--arch/arm/mach-exynos/mach-origen.c7
-rw-r--r--arch/arm/mach-exynos/mach-smdkv310.c7
-rw-r--r--arch/arm/mach-exynos/pm_domains.c2
-rw-r--r--arch/arm/mach-gemini/irq.c1
-rw-r--r--arch/arm/mach-imx/Makefile10
-rw-r--r--arch/arm/mach-imx/clk-imx25.c8
-rw-r--r--arch/arm/mach-imx/clk-imx27.c8
-rw-r--r--arch/arm/mach-imx/clk-imx31.c2
-rw-r--r--arch/arm/mach-imx/clk-imx35.c6
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c1
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c8
-rw-r--r--arch/arm/mach-imx/headsmp.S (renamed from arch/arm/mach-imx/head-v7.S)0
-rw-r--r--arch/arm/mach-imx/hotplug.c23
-rw-r--r--arch/arm/mach-imx/mach-armadillo5x0.c3
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c4
-rw-r--r--arch/arm/mach-integrator/core.c1
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c2
-rw-r--r--arch/arm/mach-kirkwood/Makefile.boot7
-rw-r--r--arch/arm/mach-kirkwood/common.c11
-rw-r--r--arch/arm/mach-kirkwood/db88f6281-bp-setup.c1
-rw-r--r--arch/arm/mach-mmp/sram.c2
-rw-r--r--arch/arm/mach-mv78xx0/addr-map.c2
-rw-r--r--arch/arm/mach-mv78xx0/common.c6
-rw-r--r--arch/arm/mach-mxs/Kconfig6
-rw-r--r--arch/arm/mach-mxs/Makefile3
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c2
-rw-r--r--arch/arm/mach-omap2/Kconfig6
-rw-r--r--arch/arm/mach-omap2/Makefile2
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c1
-rw-r--r--arch/arm/mach-omap2/clock33xx_data.c14
-rw-r--r--arch/arm/mach-omap2/clockdomain2xxx_3xxx.c50
-rw-r--r--arch/arm/mach-omap2/cm-regbits-34xx.h1
-rw-r--r--arch/arm/mach-omap2/common-board-devices.c11
-rw-r--r--arch/arm/mach-omap2/common-board-devices.h1
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c3
-rw-r--r--arch/arm/mach-omap2/devices.c3
-rw-r--r--arch/arm/mach-omap2/mux.h1
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c15
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c12
-rw-r--r--arch/arm/mach-omap2/opp4xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/pm34xx.c19
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S8
-rw-r--r--arch/arm/mach-omap2/timer.c7
-rw-r--r--arch/arm/mach-omap2/twl-common.c1
-rw-r--r--arch/arm/mach-orion5x/common.c10
-rw-r--r--arch/arm/mach-pxa/devices.c3
-rw-r--r--arch/arm/mach-pxa/raumfeld.c2
-rw-r--r--arch/arm/mach-realview/realview_eb.c3
-rw-r--r--arch/arm/mach-realview/realview_pb1176.c3
-rw-r--r--arch/arm/mach-realview/realview_pb11mp.c3
-rw-r--r--arch/arm/mach-realview/realview_pba8.c3
-rw-r--r--arch/arm/mach-realview/realview_pbx.c3
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig4
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/dma.h3
-rw-r--r--arch/arm/mach-sa1100/leds-hackkit.c1
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c13
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c4
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c3
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c2
-rw-r--r--arch/arm/mach-shmobile/intc-sh73a0.c4
-rw-r--r--arch/arm/mach-tegra/board-harmony-power.c32
-rw-r--r--arch/arm/mach-tegra/devices.c3
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-ux500/board-mop500-msp.c10
-rw-r--r--arch/arm/mach-ux500/board-mop500.c4
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c4
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c3
-rw-r--r--arch/arm/mm/context.c7
-rw-r--r--arch/arm/mm/dma-mapping.c128
-rw-r--r--arch/arm/mm/flush.c2
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c8
-rw-r--r--arch/arm/mm/tlb-v7.S6
-rw-r--r--arch/arm/plat-iop/pmu.c3
-rw-r--r--arch/arm/plat-mxc/include/mach/mx25.h1
-rw-r--r--arch/arm/plat-omap/dmtimer.c6
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h3
-rw-r--r--arch/arm/plat-omap/include/plat/multi.h9
-rw-r--r--arch/arm/plat-omap/include/plat/uncompress.h4
-rw-r--r--arch/arm/plat-omap/sram.c11
-rw-r--r--arch/arm/plat-orion/common.c8
-rw-r--r--arch/arm/plat-orion/include/plat/common.h6
-rw-r--r--arch/arm/plat-s3c24xx/dma.c2
-rw-r--r--arch/arm/plat-samsung/Kconfig3
-rw-r--r--arch/arm/plat-samsung/clock.c10
-rw-r--r--arch/arm/plat-samsung/devs.c32
-rw-r--r--arch/arm/plat-samsung/include/plat/hdmi.h16
-rw-r--r--arch/arm/plat-samsung/pm.c2
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/blackfin/Makefile1
-rw-r--r--arch/blackfin/include/asm/smp.h2
-rw-r--r--arch/blackfin/kernel/setup.c1
-rw-r--r--arch/blackfin/mach-common/smp.c223
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/asm/barrier.h27
-rw-r--r--arch/c6x/include/asm/cache.h16
-rw-r--r--arch/ia64/configs/generic_defconfig1
-rw-r--r--arch/ia64/configs/gensparse_defconfig1
-rw-r--r--arch/ia64/kernel/acpi.c5
-rw-r--r--arch/m68k/Kconfig13
-rw-r--r--arch/m68k/Kconfig.cpu19
-rw-r--r--arch/m68k/apollo/config.c16
-rw-r--r--arch/m68k/include/asm/Kbuild25
-rw-r--r--arch/m68k/include/asm/MC68332.h152
-rw-r--r--arch/m68k/include/asm/apollodma.h248
-rw-r--r--arch/m68k/include/asm/apollohw.h2
-rw-r--r--arch/m68k/include/asm/bitsperlong.h1
-rw-r--r--arch/m68k/include/asm/cputime.h6
-rw-r--r--arch/m68k/include/asm/delay.h2
-rw-r--r--arch/m68k/include/asm/device.h7
-rw-r--r--arch/m68k/include/asm/emergency-restart.h6
-rw-r--r--arch/m68k/include/asm/errno.h6
-rw-r--r--arch/m68k/include/asm/futex.h6
-rw-r--r--arch/m68k/include/asm/ioctl.h1
-rw-r--r--arch/m68k/include/asm/ipcbuf.h1
-rw-r--r--arch/m68k/include/asm/irq_regs.h1
-rw-r--r--arch/m68k/include/asm/kdebug.h1
-rw-r--r--arch/m68k/include/asm/kmap_types.h6
-rw-r--r--arch/m68k/include/asm/kvm_para.h1
-rw-r--r--arch/m68k/include/asm/local.h6
-rw-r--r--arch/m68k/include/asm/local64.h1
-rw-r--r--arch/m68k/include/asm/mac_mouse.h23
-rw-r--r--arch/m68k/include/asm/mcfmbus.h77
-rw-r--r--arch/m68k/include/asm/mman.h1
-rw-r--r--arch/m68k/include/asm/mutex.h9
-rw-r--r--arch/m68k/include/asm/percpu.h6
-rw-r--r--arch/m68k/include/asm/resource.h6
-rw-r--r--arch/m68k/include/asm/sbus.h45
-rw-r--r--arch/m68k/include/asm/scatterlist.h6
-rw-r--r--arch/m68k/include/asm/sections.h8
-rw-r--r--arch/m68k/include/asm/shm.h31
-rw-r--r--arch/m68k/include/asm/siginfo.h6
-rw-r--r--arch/m68k/include/asm/statfs.h6
-rw-r--r--arch/m68k/include/asm/topology.h6
-rw-r--r--arch/m68k/include/asm/types.h22
-rw-r--r--arch/m68k/include/asm/unaligned.h4
-rw-r--r--arch/m68k/include/asm/xor.h1
-rw-r--r--arch/m68k/kernel/setup_no.c11
-rw-r--r--arch/m68k/kernel/sys_m68k.c8
-rw-r--r--arch/m68k/kernel/vmlinux-nommu.lds2
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds2
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds2
-rw-r--r--arch/m68k/lib/muldi3.c2
-rw-r--r--arch/m68k/mm/init_mm.c2
-rw-r--r--arch/m68k/mm/init_no.c2
-rw-r--r--arch/m68k/platform/68328/head-de2.S8
-rw-r--r--arch/m68k/platform/68328/head-pilot.S10
-rw-r--r--arch/m68k/platform/68328/head-ram.S4
-rw-r--r--arch/m68k/platform/68328/head-rom.S6
-rw-r--r--arch/m68k/platform/68360/head-ram.S6
-rw-r--r--arch/m68k/platform/68360/head-rom.S8
-rw-r--r--arch/m68k/platform/coldfire/clk.c6
-rw-r--r--arch/m68k/platform/coldfire/head.S10
-rw-r--r--arch/m68k/sun3/prom/init.c48
-rw-r--r--arch/microblaze/include/asm/sections.h4
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c3
-rw-r--r--arch/microblaze/kernel/setup.c4
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S1
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/alchemy/board-mtx1.c2
-rw-r--r--arch/mips/ath79/dev-usb.c2
-rw-r--r--arch/mips/ath79/gpio.c6
-rw-r--r--arch/mips/bcm63xx/dev-spi.c4
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c89
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h3
-rw-r--r--arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h1
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h13
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h10
-rw-r--r--arch/mips/include/asm/module.h1
-rw-r--r--arch/mips/include/asm/r4k-timer.h8
-rw-r--r--arch/mips/kernel/module.c43
-rw-r--r--arch/mips/kernel/smp-cmp.c2
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/kernel/sync-r4k.c26
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/mips/mti-malta/malta-int.c9
-rw-r--r--arch/mips/mti-malta/malta-pci.c13
-rw-r--r--arch/mips/mti-malta/malta-platform.c5
-rw-r--r--arch/mips/pci/pci-ar724x.c22
-rw-r--r--arch/parisc/include/asm/atomic.h4
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/sys_parisc.c8
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-post.dtsi7
-rw-r--r--arch/powerpc/configs/85xx/p1023rds_defconfig31
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig29
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig1
-rw-r--r--arch/powerpc/configs/g5_defconfig103
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig18
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig33
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig32
-rw-r--r--arch/powerpc/include/asm/cputable.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h12
-rw-r--r--arch/powerpc/include/asm/mpic_msgr.h1
-rw-r--r--arch/powerpc/include/asm/processor.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/dma-iommu.c9
-rw-r--r--arch/powerpc/kernel/entry_64.S23
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S3
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c2
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/kgdb.c27
-rw-r--r--arch/powerpc/kernel/process.c12
-rw-r--r--arch/powerpc/kernel/smp.c11
-rw-r--r--arch/powerpc/kernel/syscalls.c8
-rw-r--r--arch/powerpc/kernel/sysfs.c10
-rw-r--r--arch/powerpc/kernel/time.c9
-rw-r--r--arch/powerpc/kernel/traps.c3
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S12
-rw-r--r--arch/powerpc/kvm/e500_tlb.c11
-rw-r--r--arch/powerpc/lib/code-patching.c2
-rw-r--r--arch/powerpc/lib/copyuser_power7.S35
-rw-r--r--arch/powerpc/lib/memcpy_power7.S4
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/mm/numa.c7
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/platforms/powernv/smp.c10
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c13
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c3
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c6
-rw-r--r--arch/powerpc/xmon/xmon.c84
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/elf.h3
-rw-r--r--arch/s390/include/asm/hugetlb.h24
-rw-r--r--arch/s390/include/asm/posix_types.h3
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/sparsemem.h2
-rw-r--r--arch/s390/include/asm/syscall.h10
-rw-r--r--arch/s390/include/asm/tlbflush.h2
-rw-r--r--arch/s390/kernel/compat_linux.c2
-rw-r--r--arch/s390/kernel/compat_wrapper.S4
-rw-r--r--arch/s390/kernel/ptrace.c7
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/sys_s390.c9
-rw-r--r--arch/s390/lib/uaccess_pt.c142
-rw-r--r--arch/s390/oprofile/init.c10
-rw-r--r--arch/sh/drivers/dma/dma-sh.c2
-rw-r--r--arch/sh/include/asm/sections.h1
-rw-r--r--arch/sh/include/cpu-sh2a/cpu/sh7269.h36
-rw-r--r--arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c195
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2
-rw-r--r--arch/sh/kernel/entry-common.S2
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c1
-rw-r--r--arch/sh/kernel/vmlinux.lds.S1
-rw-r--r--arch/sh/lib/mcount.S8
-rw-r--r--arch/sparc/kernel/module.c13
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c10
-rw-r--r--arch/sparc/mm/init_64.c28
-rw-r--r--arch/tile/include/gxio/iorpc_trio.h24
-rw-r--r--arch/um/include/asm/processor-generic.h9
-rw-r--r--arch/um/include/shared/common-offsets.h10
-rw-r--r--arch/um/include/shared/user.h11
-rw-r--r--arch/um/kernel/exec.c25
-rw-r--r--arch/um/kernel/process.c8
-rw-r--r--arch/um/kernel/signal.c6
-rw-r--r--arch/um/kernel/syscall.c24
-rw-r--r--arch/um/os-Linux/time.c2
-rw-r--r--arch/um/scripts/Makefile.rules2
-rw-r--r--arch/x86/Kconfig10
-rw-r--r--arch/x86/Makefile6
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/include/asm/mce.h8
-rw-r--r--arch/x86/include/asm/perf_event.h11
-rw-r--r--arch/x86/include/asm/spinlock.h3
-rw-r--r--arch/x86/include/asm/xen/page.h3
-rw-r--r--arch/x86/kernel/acpi/sleep.c4
-rw-r--r--arch/x86/kernel/acpi/sleep.h2
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S4
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S4
-rw-r--r--arch/x86/kernel/alternative.c4
-rw-r--r--arch/x86/kernel/apic/io_apic.c14
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c43
-rw-r--r--arch/x86/kernel/cpu/perf_event.c89
-rw-r--r--arch/x86/kernel/cpu/perf_event.h22
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c16
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c35
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c21
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c259
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h48
-rw-r--r--arch/x86/kernel/cpuid.c5
-rw-r--r--arch/x86/kernel/irq.c3
-rw-r--r--arch/x86/kernel/kdebugfs.c6
-rw-r--r--arch/x86/kernel/microcode_amd.c7
-rw-r--r--arch/x86/kernel/microcode_core.c3
-rw-r--r--arch/x86/kernel/msr.c5
-rw-r--r--arch/x86/kvm/emulate.c30
-rw-r--r--arch/x86/kvm/i8259.c17
-rw-r--r--arch/x86/kvm/mmu.c13
-rw-r--r--arch/x86/kvm/vmx.c43
-rw-r--r--arch/x86/kvm/x86.c22
-rw-r--r--arch/x86/mm/hugetlbpage.c21
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/pageattr.c10
-rw-r--r--arch/x86/mm/srat.c15
-rw-r--r--arch/x86/platform/efi/efi.c30
-rw-r--r--arch/x86/realmode/rm/Makefile2
-rw-r--r--arch/x86/syscalls/syscall_64.tbl8
-rw-r--r--arch/x86/um/Kconfig1
-rw-r--r--arch/x86/um/shared/sysdep/kernel-offsets.h3
-rw-r--r--arch/x86/um/shared/sysdep/syscalls.h2
-rw-r--r--arch/x86/um/signal.c6
-rw-r--r--arch/x86/um/sys_call_table_32.c2
-rw-r--r--arch/x86/um/syscalls_32.c27
-rw-r--r--arch/x86/um/syscalls_64.c23
-rw-r--r--arch/x86/xen/enlighten.c122
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/p2m.c121
-rw-r--r--arch/x86/xen/setup.c13
-rw-r--r--arch/x86/xen/suspend.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
412 files changed, 4786 insertions, 4481 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index d5b9b5e645c..9944dedee5b 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -18,6 +18,8 @@ config ALPHA
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
select GENERIC_CMOS_UPDATE
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 3bb7ffeae3b..c2cbe4fc391 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -14,8 +14,8 @@
*/
-#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
-#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
+#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
diff --git a/arch/alpha/include/asm/fpu.h b/arch/alpha/include/asm/fpu.h
index db00f7885fa..e477bcd5b94 100644
--- a/arch/alpha/include/asm/fpu.h
+++ b/arch/alpha/include/asm/fpu.h
@@ -1,7 +1,9 @@
#ifndef __ASM_ALPHA_FPU_H
#define __ASM_ALPHA_FPU_H
+#ifdef __KERNEL__
#include <asm/special_insns.h>
+#endif
/*
* Alpha floating-point control register defines:
diff --git a/arch/alpha/include/asm/ptrace.h b/arch/alpha/include/asm/ptrace.h
index fd698a174f2..b87755a1955 100644
--- a/arch/alpha/include/asm/ptrace.h
+++ b/arch/alpha/include/asm/ptrace.h
@@ -76,7 +76,10 @@ struct switch_stack {
#define task_pt_regs(task) \
((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
-#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0)
+#define current_pt_regs() \
+ ((struct pt_regs *) ((char *)current_thread_info() + 2*PAGE_SIZE) - 1)
+
+#define force_successful_syscall_return() (current_pt_regs()->r0 = 0)
#endif
diff --git a/arch/alpha/include/asm/socket.h b/arch/alpha/include/asm/socket.h
index dcb221a4b5b..7d2f75be932 100644
--- a/arch/alpha/include/asm/socket.h
+++ b/arch/alpha/include/asm/socket.h
@@ -76,9 +76,11 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#ifdef __KERNEL__
/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
* have to define SOCK_NONBLOCK to a different value here.
*/
#define SOCK_NONBLOCK 0x40000000
+#endif /* __KERNEL__ */
#endif /* _ASM_SOCKET_H */
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index b49ec2f8d6e..766fdfde2b7 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -433,36 +433,12 @@ clear_user(void __user *to, long len)
#undef __module_address
#undef __module_call
-/* Returns: -EFAULT if exception before terminator, N if the entire
- buffer filled, else strlen. */
+#define user_addr_max() \
+ (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
-extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len);
-
-extern inline long
-strncpy_from_user(char *to, const char __user *from, long n)
-{
- long ret = -EFAULT;
- if (__access_ok((unsigned long)from, 0, get_fs()))
- ret = __strncpy_from_user(to, from, n);
- return ret;
-}
-
-/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
-extern long __strlen_user(const char __user *);
-
-extern inline long strlen_user(const char __user *str)
-{
- return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0;
-}
-
-/* Returns: 0 if exception before NUL or reaching the supplied limit (N),
- * a value greater than N if the limit would be exceeded, else strlen. */
-extern long __strnlen_user(const char __user *, long);
-
-extern inline long strnlen_user(const char __user *str, long n)
-{
- return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0;
-}
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
/*
* About the exception table:
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 633b23b0664..a31a78eac9b 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -465,10 +465,12 @@
#define __NR_setns 501
#define __NR_accept4 502
#define __NR_sendmmsg 503
+#define __NR_process_vm_readv 504
+#define __NR_process_vm_writev 505
#ifdef __KERNEL__
-#define NR_SYSCALLS 504
+#define NR_SYSCALLS 506
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h
new file mode 100644
index 00000000000..6b340d0f152
--- /dev/null
+++ b/arch/alpha/include/asm/word-at-a-time.h
@@ -0,0 +1,55 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+#include <asm/compiler.h>
+
+/*
+ * word-at-a-time interface for Alpha.
+ */
+
+/*
+ * We do not use the word_at_a_time struct on Alpha, but it needs to be
+ * implemented to humour the generic code.
+ */
+struct word_at_a_time {
+ const unsigned long unused;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { 0 }
+
+/* Return nonzero if val has a zero */
+static inline unsigned long has_zero(unsigned long val, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long zero_locations = __kernel_cmpbge(0, val);
+ *bits = zero_locations;
+ return zero_locations;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long val, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+#define create_zero_mask(bits) (bits)
+
+static inline unsigned long find_zero(unsigned long bits)
+{
+#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67)
+ /* Simple if have CIX instructions */
+ return __kernel_cttz(bits);
+#else
+ unsigned long t1, t2, t3;
+ /* Retain lowest set bit only */
+ bits &= -bits;
+ /* Binary search for lowest set bit */
+ t1 = bits & 0xf0;
+ t2 = bits & 0xcc;
+ t3 = bits & 0xaa;
+ if (t1) t1 = 4;
+ if (t2) t2 = 2;
+ if (t3) t3 = 1;
+ return t1 + t2 + t3;
+#endif
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index d96e742d4dc..15fa821d09c 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -52,7 +52,6 @@ EXPORT_SYMBOL(alpha_write_fp_reg_s);
/* entry.S */
EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(kernel_execve);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_tcpudp_magic);
@@ -74,8 +73,6 @@ EXPORT_SYMBOL(alpha_fp_emul);
*/
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__do_clear_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__strnlen_user);
/*
* SMP-specific symbols.
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index 6d159cee5f2..ec0da0567ab 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -663,58 +663,6 @@ kernel_thread:
br ret_to_kernel
.end kernel_thread
-/*
- * kernel_execve(path, argv, envp)
- */
- .align 4
- .globl kernel_execve
- .ent kernel_execve
-kernel_execve:
- /* We can be called from a module. */
- ldgp $gp, 0($27)
- lda $sp, -(32+SIZEOF_PT_REGS+8)($sp)
- .frame $sp, 32+SIZEOF_PT_REGS+8, $26, 0
- stq $26, 0($sp)
- stq $16, 8($sp)
- stq $17, 16($sp)
- stq $18, 24($sp)
- .prologue 1
-
- lda $16, 32($sp)
- lda $17, 0
- lda $18, SIZEOF_PT_REGS
- bsr $26, memset !samegp
-
- /* Avoid the HAE being gratuitously wrong, which would cause us
- to do the whole turn off interrupts thing and restore it. */
- ldq $2, alpha_mv+HAE_CACHE
- stq $2, 152+32($sp)
-
- ldq $16, 8($sp)
- ldq $17, 16($sp)
- ldq $18, 24($sp)
- lda $19, 32($sp)
- bsr $26, do_execve !samegp
-
- ldq $26, 0($sp)
- bne $0, 1f /* error! */
-
- /* Move the temporary pt_regs struct from its current location
- to the top of the kernel stack frame. See copy_thread for
- details for a normal process. */
- lda $16, 0x4000 - SIZEOF_PT_REGS($8)
- lda $17, 32($sp)
- lda $18, SIZEOF_PT_REGS
- bsr $26, memmove !samegp
-
- /* Take that over as our new stack frame and visit userland! */
- lda $sp, 0x4000 - SIZEOF_PT_REGS($8)
- br $31, ret_from_sys_call
-
-1: lda $sp, 32+SIZEOF_PT_REGS+8($sp)
- ret
-.end kernel_execve
-
/*
* Special system calls. Most of these are special in that they either
@@ -797,115 +745,6 @@ sys_rt_sigreturn:
.end sys_rt_sigreturn
.align 4
- .globl sys_sethae
- .ent sys_sethae
-sys_sethae:
- .prologue 0
- stq $16, 152($sp)
- ret
-.end sys_sethae
-
- .align 4
- .globl osf_getpriority
- .ent osf_getpriority
-osf_getpriority:
- lda $sp, -16($sp)
- stq $26, 0($sp)
- .prologue 0
-
- jsr $26, sys_getpriority
-
- ldq $26, 0($sp)
- blt $0, 1f
-
- /* Return value is the unbiased priority, i.e. 20 - prio.
- This does result in negative return values, so signal
- no error by writing into the R0 slot. */
- lda $1, 20
- stq $31, 16($sp)
- subl $1, $0, $0
- unop
-
-1: lda $sp, 16($sp)
- ret
-.end osf_getpriority
-
- .align 4
- .globl sys_getxuid
- .ent sys_getxuid
-sys_getxuid:
- .prologue 0
- ldq $2, TI_TASK($8)
- ldq $3, TASK_CRED($2)
- ldl $0, CRED_UID($3)
- ldl $1, CRED_EUID($3)
- stq $1, 80($sp)
- ret
-.end sys_getxuid
-
- .align 4
- .globl sys_getxgid
- .ent sys_getxgid
-sys_getxgid:
- .prologue 0
- ldq $2, TI_TASK($8)
- ldq $3, TASK_CRED($2)
- ldl $0, CRED_GID($3)
- ldl $1, CRED_EGID($3)
- stq $1, 80($sp)
- ret
-.end sys_getxgid
-
- .align 4
- .globl sys_getxpid
- .ent sys_getxpid
-sys_getxpid:
- .prologue 0
- ldq $2, TI_TASK($8)
-
- /* See linux/kernel/timer.c sys_getppid for discussion
- about this loop. */
- ldq $3, TASK_GROUP_LEADER($2)
- ldq $4, TASK_REAL_PARENT($3)
- ldl $0, TASK_TGID($2)
-1: ldl $1, TASK_TGID($4)
-#ifdef CONFIG_SMP
- mov $4, $5
- mb
- ldq $3, TASK_GROUP_LEADER($2)
- ldq $4, TASK_REAL_PARENT($3)
- cmpeq $4, $5, $5
- beq $5, 1b
-#endif
- stq $1, 80($sp)
- ret
-.end sys_getxpid
-
- .align 4
- .globl sys_alpha_pipe
- .ent sys_alpha_pipe
-sys_alpha_pipe:
- lda $sp, -16($sp)
- stq $26, 0($sp)
- .prologue 0
-
- mov $31, $17
- lda $16, 8($sp)
- jsr $26, do_pipe_flags
-
- ldq $26, 0($sp)
- bne $0, 1f
-
- /* The return values are in $0 and $20. */
- ldl $1, 12($sp)
- ldl $0, 8($sp)
-
- stq $1, 80+16($sp)
-1: lda $sp, 16($sp)
- ret
-.end sys_alpha_pipe
-
- .align 4
.globl sys_execve
.ent sys_execve
sys_execve:
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 98a103621af..bc1acdda7a5 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1404,3 +1404,52 @@ SYSCALL_DEFINE3(osf_writev, unsigned long, fd,
}
#endif
+
+SYSCALL_DEFINE2(osf_getpriority, int, which, int, who)
+{
+ int prio = sys_getpriority(which, who);
+ if (prio >= 0) {
+ /* Return value is the unbiased priority, i.e. 20 - prio.
+ This does result in negative return values, so signal
+ no error */
+ force_successful_syscall_return();
+ prio = 20 - prio;
+ }
+ return prio;
+}
+
+SYSCALL_DEFINE0(getxuid)
+{
+ current_pt_regs()->r20 = sys_geteuid();
+ return sys_getuid();
+}
+
+SYSCALL_DEFINE0(getxgid)
+{
+ current_pt_regs()->r20 = sys_getegid();
+ return sys_getgid();
+}
+
+SYSCALL_DEFINE0(getxpid)
+{
+ current_pt_regs()->r20 = sys_getppid();
+ return sys_getpid();
+}
+
+SYSCALL_DEFINE0(alpha_pipe)
+{
+ int fd[2];
+ int res = do_pipe_flags(fd, 0);
+ if (!res) {
+ /* The return values are in $0 and $20. */
+ current_pt_regs()->r20 = fd[1];
+ res = fd[0];
+ }
+ return res;
+}
+
+SYSCALL_DEFINE1(sethae, unsigned long, val)
+{
+ current_pt_regs()->hae = val;
+ return 0;
+}
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 153d3fce3e8..d6fde98b74b 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -455,3 +455,22 @@ get_wchan(struct task_struct *p)
}
return pc;
}
+
+int kernel_execve(const char *path, const char *const argv[], const char *const envp[])
+{
+ /* Avoid the HAE being gratuitously wrong, which would cause us
+ to do the whole turn off interrupts thing and restore it. */
+ struct pt_regs regs = {.hae = alpha_mv.hae_cache};
+ int err = do_execve(path, argv, envp, &regs);
+ if (!err) {
+ struct pt_regs *p = current_pt_regs();
+ /* copy regs to normal position and off to userland we go... */
+ *p = regs;
+ __asm__ __volatile__ (
+ "mov %0, $sp;"
+ "br $31, ret_from_sys_call"
+ : : "r"(p));
+ }
+ return err;
+}
+EXPORT_SYMBOL(kernel_execve);
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 87835235f11..2ac6b45c3e0 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -111,7 +111,7 @@ sys_call_table:
.quad sys_socket
.quad sys_connect
.quad sys_accept
- .quad osf_getpriority /* 100 */
+ .quad sys_osf_getpriority /* 100 */
.quad sys_send
.quad sys_recv
.quad sys_sigreturn
@@ -522,6 +522,8 @@ sys_call_table:
.quad sys_setns
.quad sys_accept4
.quad sys_sendmmsg
+ .quad sys_process_vm_readv
+ .quad sys_process_vm_writev /* 505 */
.size sys_call_table, . - sys_call_table
.type sys_call_table, @object
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index c0a83ab62b7..59660743237 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -31,8 +31,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
$(ev6-y)memchr.o \
$(ev6-y)copy_user.o \
$(ev6-y)clear_user.o \
- $(ev6-y)strncpy_from_user.o \
- $(ev67-y)strlen_user.o \
$(ev6-y)csum_ipv6_magic.o \
$(ev6-y)clear_page.o \
$(ev6-y)copy_page.o \
diff --git a/arch/alpha/lib/ev6-strncpy_from_user.S b/arch/alpha/lib/ev6-strncpy_from_user.S
deleted file mode 100644
index d2e28178cac..00000000000
--- a/arch/alpha/lib/ev6-strncpy_from_user.S
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- * arch/alpha/lib/ev6-strncpy_from_user.S
- * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
- *
- * Just like strncpy except in the return value:
- *
- * -EFAULT if an exception occurs before the terminator is copied.
- * N if the buffer filled.
- *
- * Otherwise the length of the string is returned.
- *
- * Much of the information about 21264 scheduling/coding comes from:
- * Compiler Writer's Guide for the Alpha 21264
- * abbreviated as 'CWG' in other comments here
- * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
- * Scheduling notation:
- * E - either cluster
- * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
- * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
- * A bunch of instructions got moved and temp registers were changed
- * to aid in scheduling. Control flow was also re-arranged to eliminate
- * branches, and to provide longer code sequences to enable better scheduling.
- * A total rewrite (using byte load/stores for start & tail sequences)
- * is desirable, but very difficult to do without a from-scratch rewrite.
- * Save that for the future.
- */
-
-
-#include <asm/errno.h>
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one. */
-#define EX(x,y...) \
- 99: x,##y; \
- .section __ex_table,"a"; \
- .long 99b - .; \
- lda $31, $exception-99b($0); \
- .previous
-
-
- .set noat
- .set noreorder
- .text
-
- .globl __strncpy_from_user
- .ent __strncpy_from_user
- .frame $30, 0, $26
- .prologue 0
-
- .align 4
-__strncpy_from_user:
- and a0, 7, t3 # E : find dest misalignment
- beq a2, $zerolength # U :
-
- /* Are source and destination co-aligned? */
- mov a0, v0 # E : save the string start
- xor a0, a1, t4 # E :
- EX( ldq_u t1, 0(a1) ) # L : Latency=3 load first quadword
- ldq_u t0, 0(a0) # L : load first (partial) aligned dest quadword
-
- addq a2, t3, a2 # E : bias count by dest misalignment
- subq a2, 1, a3 # E :
- addq zero, 1, t10 # E :
- and t4, 7, t4 # E : misalignment between the two
-
- and a3, 7, t6 # E : number of tail bytes
- sll t10, t6, t10 # E : t10 = bitmask of last count byte
- bne t4, $unaligned # U :
- lda t2, -1 # E : build a mask against false zero
-
- /*
- * We are co-aligned; take care of a partial first word.
- * On entry to this basic block:
- * t0 == the first destination word for masking back in
- * t1 == the first source word.
- */
-
- srl a3, 3, a2 # E : a2 = loop counter = (count - 1)/8
- addq a1, 8, a1 # E :
- mskqh t2, a1, t2 # U : detection in the src word
- nop
-
- /* Create the 1st output word and detect 0's in the 1st input word. */
- mskqh t1, a1, t3 # U :
- mskql t0, a1, t0 # U : assemble the first output word
- ornot t1, t2, t2 # E :
- nop
-
- cmpbge zero, t2, t8 # E : bits set iff null found
- or t0, t3, t0 # E :
- beq a2, $a_eoc # U :
- bne t8, $a_eos # U : 2nd branch in a quad. Bad.
-
- /* On entry to this basic block:
- * t0 == a source quad not containing a null.
- * a0 - current aligned destination address
- * a1 - current aligned source address
- * a2 - count of quadwords to move.
- * NOTE: Loop improvement - unrolling this is going to be
- * a huge win, since we're going to stall otherwise.
- * Fix this later. For _really_ large copies, look
- * at using wh64 on a look-ahead basis. See the code
- * in clear_user.S and copy_user.S.
- * Presumably, since (a0) and (a1) do not overlap (by C definition)
- * Lots of nops here:
- * - Separate loads from stores
- * - Keep it to 1 branch/quadpack so the branch predictor
- * can train.
- */
-$a_loop:
- stq_u t0, 0(a0) # L :
- addq a0, 8, a0 # E :
- nop
- subq a2, 1, a2 # E :
-
- EX( ldq_u t0, 0(a1) ) # L :
- addq a1, 8, a1 # E :
- cmpbge zero, t0, t8 # E : Stall 2 cycles on t0
- beq a2, $a_eoc # U :
-
- beq t8, $a_loop # U :
- nop
- nop
- nop
-
- /* Take care of the final (partial) word store. At this point
- * the end-of-count bit is set in t8 iff it applies.
- *
- * On entry to this basic block we have:
- * t0 == the source word containing the null
- * t8 == the cmpbge mask that found it.
- */
-$a_eos:
- negq t8, t12 # E : find low bit set
- and t8, t12, t12 # E :
-
- /* We're doing a partial word store and so need to combine
- our source and original destination words. */
- ldq_u t1, 0(a0) # L :
- subq t12, 1, t6 # E :
-
- or t12, t6, t8 # E :
- zapnot t0, t8, t0 # U : clear src bytes > null
- zap t1, t8, t1 # U : clear dst bytes <= null
- or t0, t1, t0 # E :
-
- stq_u t0, 0(a0) # L :
- br $finish_up # L0 :
- nop
- nop
-
- /* Add the end-of-count bit to the eos detection bitmask. */
- .align 4
-$a_eoc:
- or t10, t8, t8
- br $a_eos
- nop
- nop
-
-
-/* The source and destination are not co-aligned. Align the destination
- and cope. We have to be very careful about not reading too much and
- causing a SEGV. */
-
- .align 4
-$u_head:
- /* We know just enough now to be able to assemble the first
- full source word. We can still find a zero at the end of it
- that prevents us from outputting the whole thing.
-
- On entry to this basic block:
- t0 == the first dest word, unmasked
- t1 == the shifted low bits of the first source word
- t6 == bytemask that is -1 in dest word bytes */
-
- EX( ldq_u t2, 8(a1) ) # L : load second src word
- addq a1, 8, a1 # E :
- mskql t0, a0, t0 # U : mask trailing garbage in dst
- extqh t2, a1, t4 # U :
-
- or t1, t4, t1 # E : first aligned src word complete
- mskqh t1, a0, t1 # U : mask leading garbage in src
- or t0, t1, t0 # E : first output word complete
- or t0, t6, t6 # E : mask original data for zero test
-
- cmpbge zero, t6, t8 # E :
- beq a2, $u_eocfin # U :
- bne t8, $u_final # U : bad news - 2nd branch in a quad
- lda t6, -1 # E : mask out the bits we have
-
- mskql t6, a1, t6 # U : already seen
- stq_u t0, 0(a0) # L : store first output word
- or t6, t2, t2 # E :
- cmpbge zero, t2, t8 # E : find nulls in second partial
-
- addq a0, 8, a0 # E :
- subq a2, 1, a2 # E :
- bne t8, $u_late_head_exit # U :
- nop
-
- /* Finally, we've got all the stupid leading edge cases taken care
- of and we can set up to enter the main loop. */
-
- extql t2, a1, t1 # U : position hi-bits of lo word
- EX( ldq_u t2, 8(a1) ) # L : read next high-order source word
- addq a1, 8, a1 # E :
- cmpbge zero, t2, t8 # E :
-
- beq a2, $u_eoc # U :
- bne t8, $u_eos # U :
- nop
- nop
-
- /* Unaligned copy main loop. In order to avoid reading too much,
- the loop is structured to detect zeros in aligned source words.
- This has, unfortunately, effectively pulled half of a loop
- iteration out into the head and half into the tail, but it does
- prevent nastiness from accumulating in the very thing we want
- to run as fast as possible.
-
- On entry to this basic block:
- t1 == the shifted high-order bits from the previous source word
- t2 == the unshifted current source word
-
- We further know that t2 does not contain a null terminator. */
-
- /*
- * Extra nops here:
- * separate load quads from store quads
- * only one branch/quad to permit predictor training
- */
-
- .align 4
-$u_loop:
- extqh t2, a1, t0 # U : extract high bits for current word
- addq a1, 8, a1 # E :
- extql t2, a1, t3 # U : extract low bits for next time
- addq a0, 8, a0 # E :
-
- or t0, t1, t0 # E : current dst word now complete
- EX( ldq_u t2, 0(a1) ) # L : load high word for next time
- subq a2, 1, a2 # E :
- nop
-
- stq_u t0, -8(a0) # L : save the current word
- mov t3, t1 # E :
- cmpbge zero, t2, t8 # E : test new word for eos
- beq a2, $u_eoc # U :
-
- beq t8, $u_loop # U :
- nop
- nop
- nop
-
- /* We've found a zero somewhere in the source word we just read.
- If it resides in the lower half, we have one (probably partial)
- word to write out, and if it resides in the upper half, we
- have one full and one partial word left to write out.
-
- On entry to this basic block:
- t1 == the shifted high-order bits from the previous source word
- t2 == the unshifted current source word. */
- .align 4
-$u_eos:
- extqh t2, a1, t0 # U :
- or t0, t1, t0 # E : first (partial) source word complete
- cmpbge zero, t0, t8 # E : is the null in this first bit?
- nop
-
- bne t8, $u_final # U :
- stq_u t0, 0(a0) # L : the null was in the high-order bits
- addq a0, 8, a0 # E :
- subq a2, 1, a2 # E :
-
- .align 4
-$u_late_head_exit:
- extql t2, a1, t0 # U :
- cmpbge zero, t0, t8 # E :
- or t8, t10, t6 # E :
- cmoveq a2, t6, t8 # E :
-
- /* Take care of a final (probably partial) result word.
- On entry to this basic block:
- t0 == assembled source word
- t8 == cmpbge mask that found the null. */
- .align 4
-$u_final:
- negq t8, t6 # E : isolate low bit set
- and t6, t8, t12 # E :
- ldq_u t1, 0(a0) # L :
- subq t12, 1, t6 # E :
-
- or t6, t12, t8 # E :
- zapnot t0, t8, t0 # U : kill source bytes > null
- zap t1, t8, t1 # U : kill dest bytes <= null
- or t0, t1, t0 # E :
-
- stq_u t0, 0(a0) # E :
- br $finish_up # U :
- nop
- nop
-
- .align 4
-$u_eoc: # end-of-count
- extqh t2, a1, t0 # U :
- or t0, t1, t0 # E :
- cmpbge zero, t0, t8 # E :
- nop
-
- .align 4
-$u_eocfin: # end-of-count, final word
- or t10, t8, t8 # E :
- br $u_final # U :
- nop
- nop
-
- /* Unaligned copy entry point. */
- .align 4
-$unaligned:
-
- srl a3, 3, a2 # U : a2 = loop counter = (count - 1)/8
- and a0, 7, t4 # E : find dest misalignment
- and a1, 7, t5 # E : find src misalignment
- mov zero, t0 # E :
-
- /* Conditionally load the first destination word and a bytemask
- with 0xff indicating that the destination byte is sacrosanct. */
-
- mov zero, t6 # E :
- beq t4, 1f # U :
- ldq_u t0, 0(a0) # L :
- lda t6, -1 # E :
-
- mskql t6, a0, t6 # E :
- nop
- nop
- nop
-
- .align 4
-1:
- subq a1, t4, a1 # E : sub dest misalignment from src addr
- /* If source misalignment is larger than dest misalignment, we need
- extra startup checks to avoid SEGV. */
- cmplt t4, t5, t12 # E :
- extql t1, a1, t1 # U : shift src into place
- lda t2, -1 # E : for creating masks later
-
- beq t12, $u_head # U :
- mskqh t2, t5, t2 # U : begin src byte validity mask
- cmpbge zero, t1, t8 # E : is there a zero?
- nop
-
- extql t2, a1, t2 # U :
- or t8, t10, t5 # E : test for end-of-count too
- cmpbge zero, t2, t3 # E :
- cmoveq a2, t5, t8 # E : Latency=2, extra map slot
-
- nop # E : goes with cmov
- andnot t8, t3, t8 # E :
- beq t8, $u_head # U :
- nop
-
- /* At this point we've found a zero in the first partial word of
- the source. We need to isolate the valid source data and mask
- it into the original destination data. (Incidentally, we know
- that we'll need at least one byte of that original dest word.) */
-
- ldq_u t0, 0(a0) # L :
- negq t8, t6 # E : build bitmask of bytes <= zero
- mskqh t1, t4, t1 # U :
- and t6, t8, t12 # E :
-
- subq t12, 1, t6 # E :
- or t6, t12, t8 # E :
- zapnot t2, t8, t2 # U : prepare source word; mirror changes
- zapnot t1, t8, t1 # U : to source validity mask
-
- andnot t0, t2, t0 # E : zero place for source to reside
- or t0, t1, t0 # E : and put it there
- stq_u t0, 0(a0) # L :
- nop
-
- .align 4
-$finish_up:
- zapnot t0, t12, t4 # U : was last byte written null?
- and t12, 0xf0, t3 # E : binary search for the address of the
- cmovne t4, 1, t4 # E : Latency=2, extra map slot
- nop # E : with cmovne
-
- and t12, 0xcc, t2 # E : last byte written
- and t12, 0xaa, t1 # E :
- cmovne t3, 4, t3 # E : Latency=2, extra map slot
- nop # E : with cmovne
-
- bic a0, 7, t0
- cmovne t2, 2, t2 # E : Latency=2, extra map slot
- nop # E : with cmovne
- nop
-
- cmovne t1, 1, t1 # E : Latency=2, extra map slot
- nop # E : with cmovne
- addq t0, t3, t0 # E :
- addq t1, t2, t1 # E :
-
- addq t0, t1, t0 # E :
- addq t0, t4, t0 # add one if we filled the buffer
- subq t0, v0, v0 # find string length
- ret # L0 :
-
- .align 4
-$zerolength:
- nop
- nop
- nop
- clr v0
-
-$exception:
- nop
- nop
- nop
- ret
-
- .end __strncpy_from_user
diff --git a/arch/alpha/lib/ev67-strlen_user.S b/arch/alpha/lib/ev67-strlen_user.S
deleted file mode 100644
index 57e0d77b81a..00000000000
--- a/arch/alpha/lib/ev67-strlen_user.S
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * arch/alpha/lib/ev67-strlen_user.S
- * 21264 version contributed by Rick Gorton <rick.gorton@api-networks.com>
- *
- * Return the length of the string including the NULL terminator
- * (strlen+1) or zero if an error occurred.
- *
- * In places where it is critical to limit the processing time,
- * and the data is not trusted, strnlen_user() should be used.
- * It will return a value greater than its second argument if
- * that limit would be exceeded. This implementation is allowed
- * to access memory beyond the limit, but will not cross a page
- * boundary when doing so.
- *
- * Much of the information about 21264 scheduling/coding comes from:
- * Compiler Writer's Guide for the Alpha 21264
- * abbreviated as 'CWG' in other comments here
- * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
- * Scheduling notation:
- * E - either cluster
- * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
- * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
- * Try not to change the actual algorithm if possible for consistency.
- */
-
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one. */
-#define EX(x,y...) \
- 99: x,##y; \
- .section __ex_table,"a"; \
- .long 99b - .; \
- lda v0, $exception-99b(zero); \
- .previous
-
-
- .set noreorder
- .set noat
- .text
-
- .globl __strlen_user
- .ent __strlen_user
- .frame sp, 0, ra
-
- .align 4
-__strlen_user:
- ldah a1, 32767(zero) # do not use plain strlen_user() for strings
- # that might be almost 2 GB long; you should
- # be using strnlen_user() instead
- nop
- nop
- nop
-
- .globl __strnlen_user
-
- .align 4
-__strnlen_user:
- .prologue 0
- EX( ldq_u t0, 0(a0) ) # L : load first quadword (a0 may be misaligned)
- lda t1, -1(zero) # E :
-
- insqh t1, a0, t1 # U :
- andnot a0, 7, v0 # E :
- or t1, t0, t0 # E :
- subq a0, 1, a0 # E : get our +1 for the return
-
- cmpbge zero, t0, t1 # E : t1 <- bitmask: bit i == 1 <==> i-th byte == 0
- subq a1, 7, t2 # E :
- subq a0, v0, t0 # E :
- bne t1, $found # U :
-
- addq t2, t0, t2 # E :
- addq a1, 1, a1 # E :
- nop # E :
- nop # E :
-
- .align 4
-$loop: ble t2, $limit # U :
- EX( ldq t0, 8(v0) ) # L :
- nop # E :
- nop # E :
-
- cmpbge zero, t0, t1 # E :
- subq t2, 8, t2 # E :
- addq v0, 8, v0 # E : addr += 8
- beq t1, $loop # U :
-
-$found: cttz t1, t2 # U0 :
- addq v0, t2, v0 # E :
- subq v0, a0, v0 # E :
- ret # L0 :
-
-$exception:
- nop
- nop
- nop
- ret
-
- .align 4 # currently redundant
-$limit:
- nop
- nop
- subq a1, t2, v0
- ret
-
- .end __strlen_user
diff --git a/arch/alpha/lib/strlen_user.S b/arch/alpha/lib/strlen_user.S
deleted file mode 100644
index 508a18e9647..00000000000
--- a/arch/alpha/lib/strlen_user.S
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * arch/alpha/lib/strlen_user.S
- *
- * Return the length of the string including the NUL terminator
- * (strlen+1) or zero if an error occurred.
- *
- * In places where it is critical to limit the processing time,
- * and the data is not trusted, strnlen_user() should be used.
- * It will return a value greater than its second argument if
- * that limit would be exceeded. This implementation is allowed
- * to access memory beyond the limit, but will not cross a page
- * boundary when doing so.
- */
-
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one. */
-#define EX(x,y...) \
- 99: x,##y; \
- .section __ex_table,"a"; \
- .long 99b - .; \
- lda v0, $exception-99b(zero); \
- .previous
-
-
- .set noreorder
- .set noat
- .text
-
- .globl __strlen_user
- .ent __strlen_user
- .frame sp, 0, ra
-
- .align 3
-__strlen_user:
- ldah a1, 32767(zero) # do not use plain strlen_user() for strings
- # that might be almost 2 GB long; you should
- # be using strnlen_user() instead
-
- .globl __strnlen_user
-
- .align 3
-__strnlen_user:
- .prologue 0
-
- EX( ldq_u t0, 0(a0) ) # load first quadword (a0 may be misaligned)
- lda t1, -1(zero)
- insqh t1, a0, t1
- andnot a0, 7, v0
- or t1, t0, t0
- subq a0, 1, a0 # get our +1 for the return
- cmpbge zero, t0, t1 # t1 <- bitmask: bit i == 1 <==> i-th byte == 0
- subq a1, 7, t2
- subq a0, v0, t0
- bne t1, $found
-
- addq t2, t0, t2
- addq a1, 1, a1
-
- .align 3
-$loop: ble t2, $limit
- EX( ldq t0, 8(v0) )
- subq t2, 8, t2
- addq v0, 8, v0 # addr += 8
- cmpbge zero, t0, t1
- beq t1, $loop
-
-$found: negq t1, t2 # clear all but least set bit
- and t1, t2, t1
-
- and t1, 0xf0, t2 # binary search for that set bit
- and t1, 0xcc, t3
- and t1, 0xaa, t4
- cmovne t2, 4, t2
- cmovne t3, 2, t3
- cmovne t4, 1, t4
- addq t2, t3, t2
- addq v0, t4, v0
- addq v0, t2, v0
- nop # dual issue next two on ev4 and ev5
- subq v0, a0, v0
-$exception:
- ret
-
- .align 3 # currently redundant
-$limit:
- subq a1, t2, v0
- ret
-
- .end __strlen_user
diff --git a/arch/alpha/lib/strncpy_from_user.S b/arch/alpha/lib/strncpy_from_user.S
deleted file mode 100644
index 73ee21160ff..00000000000
--- a/arch/alpha/lib/strncpy_from_user.S
+++ /dev/null
@@ -1,339 +0,0 @@
-/*
- * arch/alpha/lib/strncpy_from_user.S
- * Contributed by Richard Henderson (rth@tamu.edu)
- *
- * Just like strncpy except in the return value:
- *
- * -EFAULT if an exception occurs before the terminator is copied.
- * N if the buffer filled.
- *
- * Otherwise the length of the string is returned.
- */
-
-
-#include <asm/errno.h>
-#include <asm/regdef.h>
-
-
-/* Allow an exception for an insn; exit if we get one. */
-#define EX(x,y...) \
- 99: x,##y; \
- .section __ex_table,"a"; \
- .long 99b - .; \
- lda $31, $exception-99b($0); \
- .previous
-
-
- .set noat
- .set noreorder
- .text
-
- .globl __strncpy_from_user
- .ent __strncpy_from_user
- .frame $30, 0, $26
- .prologue 0
-
- .align 3
-$aligned:
- /* On entry to this basic block:
- t0 == the first destination word for masking back in
- t1 == the first source word. */
-
- /* Create the 1st output word and detect 0's in the 1st input word. */
- lda t2, -1 # e1 : build a mask against false zero
- mskqh t2, a1, t2 # e0 : detection in the src word
- mskqh t1, a1, t3 # e0 :
- ornot t1, t2, t2 # .. e1 :
- mskql t0, a1, t0 # e0 : assemble the first output word
- cmpbge zero, t2, t8 # .. e1 : bits set iff null found
- or t0, t3, t0 # e0 :
- beq a2, $a_eoc # .. e1 :
- bne t8, $a_eos # .. e1 :
-
- /* On entry to this basic block:
- t0 == a source word not containing a null. */
-
-$a_loop:
- stq_u t0, 0(a0) # e0 :
- addq a0, 8, a0 # .. e1 :
- EX( ldq_u t0, 0(a1) ) # e0 :
- addq a1, 8, a1 # .. e1 :
- subq a2, 1, a2 # e0 :
- cmpbge zero, t0, t8 # .. e1 (stall)
- beq a2, $a_eoc # e1 :
- beq t8, $a_loop # e1 :
-
- /* Take care of the final (partial) word store. At this point
- the end-of-count bit is set in t8 iff it applies.
-
- On entry to this basic block we have:
- t0 == the source word containing the null
- t8 == the cmpbge mask that found it. */
-
-$a_eos:
- negq t8, t12 # e0 : find low bit set
- and t8, t12, t12 # e1 (stall)
-
- /* For the sake of the cache, don't read a destination word
- if we're not going to need it. */
- and t12, 0x80, t6 # e0 :
- bne t6, 1f # .. e1 (zdb)
-
- /* We're doing a partial word store and so need to combine
- our source and original destination words. */
- ldq_u t1, 0(a0) # e0 :
- subq t12, 1, t6 # .. e1 :
- or t12, t6, t8 # e0 :
- unop #
- zapnot t0, t8, t0 # e0 : clear src bytes > null
- zap t1, t8, t1 # .. e1 : clear dst bytes <= null
- or t0, t1, t0 # e1 :
-
-1: stq_u t0, 0(a0)
- br $finish_up
-
- /* Add the end-of-count bit to the eos detection bitmask. */
-$a_eoc:
- or t10, t8, t8
- br $a_eos
-
- /*** The Function Entry Point ***/
- .align 3
-__strncpy_from_user:
- mov a0, v0 # save the string start
- beq a2, $zerolength
-
- /* Are source and destination co-aligned? */
- xor a0, a1, t1 # e0 :
- and a0, 7, t0 # .. e1 : find dest misalignment
- and t1, 7, t1 # e0 :
- addq a2, t0, a2 # .. e1 : bias count by dest misalignment
- subq a2, 1, a2 # e0 :
- and a2, 7, t2 # e1 :
- srl a2, 3, a2 # e0 : a2 = loop counter = (count - 1)/8
- addq zero, 1, t10 # .. e1 :
- sll t10, t2, t10 # e0 : t10 = bitmask of last count byte
- bne t1, $unaligned # .. e1 :
-
- /* We are co-aligned; take care of a partial first word. */
-
- EX( ldq_u t1, 0(a1) ) # e0 : load first src word
- addq a1, 8, a1 # .. e1 :
-
- beq t0, $aligned # avoid loading dest word if not needed
- ldq_u t0, 0(a0) # e0 :
- br $aligned # .. e1 :
-
-
-/* The source and destination are not co-aligned. Align the destination
- and cope. We have to be very careful about not reading too much and
- causing a SEGV. */
-
- .align 3
-$u_head:
- /* We know just enough now to be able to assemble the first
- full source word. We can still find a zero at the end of it
- that prevents us from outputting the whole thing.
-
- On entry to this basic block:
- t0 == the first dest word, unmasked
- t1 == the shifted low bits of the first source word
- t6 == bytemask that is -1 in dest word bytes */
-
- EX( ldq_u t2, 8(a1) ) # e0 : load second src word
- addq a1, 8, a1 # .. e1 :
- mskql t0, a0, t0 # e0 : mask trailing garbage in dst
- extqh t2, a1, t4 # e0 :
- or t1, t4, t1 # e1 : first aligned src word complete
- mskqh t1, a0, t1 # e0 : mask leading garbage in src
- or t0, t1, t0 # e0 : first output word complete
- or t0, t6, t6 # e1 : mask original data for zero test
- cmpbge zero, t6, t8 # e0 :
- beq a2, $u_eocfin # .. e1 :
- bne t8, $u_final # e1 :
-
- lda t6, -1 # e1 : mask out the bits we have
- mskql t6, a1, t6 # e0 : already seen
- stq_u t0, 0(a0) # e0 : store first output word
- or t6, t2, t2 # .. e1 :
- cmpbge zero, t2, t8 # e0 : find nulls in second partial
- addq a0, 8, a0 # .. e1 :
- subq a2, 1, a2 # e0 :
- bne t8, $u_late_head_exit # .. e1 :
-
- /* Finally, we've got all the stupid leading edge cases taken care
- of and we can set up to enter the main loop. */
-
- extql t2, a1, t1 # e0 : position hi-bits of lo word
- EX( ldq_u t2, 8(a1) ) # .. e1 : read next high-order source word
- addq a1, 8, a1 # e0 :
- cmpbge zero, t2, t8 # e1 (stall)
- beq a2, $u_eoc # e1 :
- bne t8, $u_eos # e1 :
-
- /* Unaligned copy main loop. In order to avoid reading too much,
- the loop is structured to detect zeros in aligned source words.
- This has, unfortunately, effectively pulled half of a loop
- iteration out into the head and half into the tail, but it does
- prevent nastiness from accumulating in the very thing we want
- to run as fast as possible.
-
- On entry to this basic block:
- t1 == the shifted high-order bits from the previous source word
- t2 == the unshifted current source word
-
- We further know that t2 does not contain a null terminator. */
-
- .align 3
-$u_loop:
- extqh t2, a1, t0 # e0 : extract high bits for current word
- addq a1, 8, a1 # .. e1 :
- extql t2, a1, t3 # e0 : extract low bits for next time
- addq a0, 8, a0 # .. e1 :
- or t0, t1, t0 # e0 : current dst word now complete
- EX( ldq_u t2, 0(a1) ) # .. e1 : load high word for next time
- stq_u t0, -8(a0) # e0 : save the current word
- mov t3, t1 # .. e1 :
- subq a2, 1, a2 # e0 :
- cmpbge zero, t2, t8 # .. e1 : test new word for eos
- beq a2, $u_eoc # e1 :
- beq t8, $u_loop # e1 :
-
- /* We've found a zero somewhere in the source word we just read.
- If it resides in the lower half, we have one (probably partial)
- word to write out, and if it resides in the upper half, we
- have one full and one partial word left to write out.
-
- On entry to this basic block:
- t1 == the shifted high-order bits from the previous source word
- t2 == the unshifted current source word. */
-$u_eos:
- extqh t2, a1, t0 # e0 :
- or t0, t1, t0 # e1 : first (partial) source word complete
-
- cmpbge zero, t0, t8 # e0 : is the null in this first bit?
- bne t8, $u_final # .. e1 (zdb)
-
- stq_u t0, 0(a0) # e0 : the null was in the high-order bits
- addq a0, 8, a0 # .. e1 :
- subq a2, 1, a2 # e1 :
-
-$u_late_head_exit:
- extql t2, a1, t0 # .. e0 :
- cmpbge zero, t0, t8 # e0 :
- or t8, t10, t6 # e1 :
- cmoveq a2, t6, t8 # e0 :
- nop # .. e1 :
-
- /* Take care of a final (probably partial) result word.
- On entry to this basic block:
- t0 == assembled source word
- t8 == cmpbge mask that found the null. */
-$u_final:
- negq t8, t6 # e0 : isolate low bit set
- and t6, t8, t12 # e1 :
-
- and t12, 0x80, t6 # e0 : avoid dest word load if we can
- bne t6, 1f # .. e1 (zdb)
-
- ldq_u t1, 0(a0) # e0 :
- subq t12, 1, t6 # .. e1 :
- or t6, t12, t8 # e0 :
- zapnot t0, t8, t0 # .. e1 : kill source bytes > null
- zap t1, t8, t1 # e0 : kill dest bytes <= null
- or t0, t1, t0 # e1 :
-
-1: stq_u t0, 0(a0) # e0 :
- br $finish_up
-
-$u_eoc: # end-of-count
- extqh t2, a1, t0
- or t0, t1, t0
- cmpbge zero, t0, t8
-
-$u_eocfin: # end-of-count, final word
- or t10, t8, t8
- br $u_final
-
- /* Unaligned copy entry point. */
- .align 3
-$unaligned:
-
- EX( ldq_u t1, 0(a1) ) # e0 : load first source word
-
- and a0, 7, t4 # .. e1 : find dest misalignment
- and a1, 7, t5 # e0 : find src misalignment
-
- /* Conditionally load the first destination word and a bytemask
- with 0xff indicating that the destination byte is sacrosanct. */
-
- mov zero, t0 # .. e1 :
- mov zero, t6 # e0 :
- beq t4, 1f # .. e1 :
- ldq_u t0, 0(a0) # e0 :
- lda t6, -1 # .. e1 :
- mskql t6, a0, t6 # e0 :
-1:
- subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr
-
- /* If source misalignment is larger than dest misalignment, we need
- extra startup checks to avoid SEGV. */
-
- cmplt t4, t5, t12 # e1 :
- extql t1, a1, t1 # .. e0 : shift src into place
- lda t2, -1 # e0 : for creating masks later
- beq t12, $u_head # e1 :
-
- mskqh t2, t5, t2 # e0 : begin src byte validity mask
- cmpbge zero, t1, t8 # .. e1 : is there a zero?
- extql t2, a1, t2 # e0 :
- or t8, t10, t5 # .. e1 : test for end-of-count too
- cmpbge zero, t2, t3 # e0 :
- cmoveq a2, t5, t8 # .. e1 :
- andnot t8, t3, t8 # e0 :
- beq t8, $u_head # .. e1 (zdb)
-
- /* At this point we've found a zero in the first partial word of
- the source. We need to isolate the valid source data and mask
- it into the original destination data. (Incidentally, we know
- that we'll need at least one byte of that original dest word.) */
-
- ldq_u t0, 0(a0) # e0 :
- negq t8, t6 # .. e1 : build bitmask of bytes <= zero
- mskqh t1, t4, t1 # e0 :
- and t6, t8, t12 # .. e1 :
- subq t12, 1, t6 # e0 :
- or t6, t12, t8 # e1 :
-
- zapnot t2, t8, t2 # e0 : prepare source word; mirror changes
- zapnot t1, t8, t1 # .. e1 : to source validity mask
-
- andnot t0, t2, t0 # e0 : zero place for source to reside
- or t0, t1, t0 # e1 : and put it there
- stq_u t0, 0(a0) # e0 :
-
-$finish_up:
- zapnot t0, t12, t4 # was last byte written null?
- cmovne t4, 1, t4
-
- and t12, 0xf0, t3 # binary search for the address of the
- and t12, 0xcc, t2 # last byte written
- and t12, 0xaa, t1
- bic a0, 7, t0
- cmovne t3, 4, t3
- cmovne t2, 2, t2
- cmovne t1, 1, t1
- addq t0, t3, t0
- addq t1, t2, t1
- addq t0, t1, t0
- addq t0, t4, t0 # add one if we filled the buffer
-
- subq t0, v0, v0 # find string length
- ret
-
-$zerolength:
- clr v0
-$exception:
- ret
-
- .end __strncpy_from_user
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 5eecab1a84e..0c4132dd350 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -89,6 +89,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
const struct exception_table_entry *fixup;
int fault, si_code = SEGV_MAPERR;
siginfo_t info;
+ unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (cause > 0 ? FAULT_FLAG_WRITE : 0));
/* As of EV6, a load into $31/$f31 is a prefetch, and never faults
(or is suppressed by the PALcode). Support that for older CPUs
@@ -114,6 +116,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
goto vmalloc_fault;
#endif
+retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
@@ -144,8 +147,11 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* If for any reason at all we couldn't handle the fault,
make sure we exit gracefully rather than endlessly redo
the fault. */
- fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0);
- up_read(&mm->mmap_sem);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -153,10 +159,26 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
- current->maj_flt++;
- else
- current->min_flt++;
+
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ /* No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
+
return;
/* Something tried to access memory that isn't in our memory map.
@@ -186,12 +208,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* We ran out of memory, or some other thing happened to us that
made us unable to handle the page fault gracefully. */
out_of_memory:
+ up_read(&mm->mmap_sem);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
+ up_read(&mm->mmap_sem);
/* Send a sigbus, regardless of whether we were in kernel
or user mode. */
info.si_signo = SIGBUS;
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index a0a5d27aa21..b8ce18f485d 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -12,6 +12,7 @@
#include <linux/smp.h>
#include <linux/errno.h>
#include <asm/ptrace.h>
+#include <asm/special_insns.h>
#include "op_impl.h"
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e91c7cdc6fe..76635b21629 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,7 +6,7 @@ config ARM
select HAVE_DMA_API_DEBUG
select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_DMA_ATTRS
- select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
+ select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
@@ -38,7 +38,6 @@ config ARM
select HARDIRQS_SW_RESEND
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
- select GENERIC_IRQ_PROBE
select ARCH_WANT_IPC_PARSE_VERSION
select HARDIRQS_SW_RESEND
select CPU_PM if (SUSPEND || CPU_IDLE)
@@ -126,11 +125,6 @@ config TRACE_IRQFLAGS_SUPPORT
bool
default y
-config GENERIC_LOCKBREAK
- bool
- default y
- depends on SMP && PREEMPT
-
config RWSEM_GENERIC_SPINLOCK
bool
default y
@@ -1185,12 +1179,6 @@ config XSCALE_PMU
depends on CPU_XSCALE
default y
-config CPU_HAS_PMU
- depends on (CPU_V6 || CPU_V6K || CPU_V7 || XSCALE_PMU) && \
- (!ARCH_OMAP3 || OMAP3_EMU)
- default y
- bool
-
config MULTI_IRQ_HANDLER
bool
help
@@ -1559,6 +1547,53 @@ config SCHED_SMT
MultiThreading at a cost of slightly increased overhead in some
places. If unsure say N here.
+config DISABLE_CPU_SCHED_DOMAIN_BALANCE
+ bool "(EXPERIMENTAL) Disable CPU level scheduler load-balancing"
+ help
+ Disables scheduler load-balancing at CPU sched domain level.
+
+config SCHED_HMP
+ bool "(EXPERIMENTAL) Heterogenous multiprocessor scheduling"
+ depends on DISABLE_CPU_SCHED_DOMAIN_BALANCE && SCHED_MC && FAIR_GROUP_SCHED && !SCHED_AUTOGROUP
+ help
+ Experimental scheduler optimizations for heterogeneous platforms.
+ Attempts to introspectively select task affinity to optimize power
+ and performance. Basic support for multiple (>2) cpu types is in place,
+ but it has only been tested with two types of cpus.
+ There is currently no support for migration of task groups, hence
+ !SCHED_AUTOGROUP. Furthermore, normal load-balancing must be disabled
+ between cpus of different type (DISABLE_CPU_SCHED_DOMAIN_BALANCE).
+
+config SCHED_HMP_PRIO_FILTER
+ bool "(EXPERIMENTAL) Filter HMP migrations by task priority"
+ depends on SCHED_HMP
+ default y
+ help
+ Enables task priority based HMP migration filter. Any task with
+ a NICE value above the threshold will always be on low-power cpus
+ with less compute capacity.
+
+config SCHED_HMP_PRIO_FILTER_VAL
+ int "NICE priority threshold"
+ default 5
+ depends on SCHED_HMP_PRIO_FILTER
+
+config HMP_FAST_CPU_MASK
+ string "HMP scheduler fast CPU mask"
+ depends on SCHED_HMP
+ help
+ Leave empty to use device tree information.
+ Specify the cpuids of the fast CPUs in the system as a list string,
+ e.g. cpuid 0+1 should be specified as 0-1.
+
+config HMP_SLOW_CPU_MASK
+ string "HMP scheduler slow CPU mask"
+ depends on SCHED_HMP
+ help
+ Leave empty to use device tree information.
+ Specify the cpuids of the slow CPUs in the system as a list string,
+ e.g. cpuid 0+1 should be specified as 0-1.
+
config HAVE_ARM_SCU
bool
help
@@ -1763,7 +1798,7 @@ config HIGHPTE
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
- depends on PERF_EVENTS && CPU_HAS_PMU
+ depends on PERF_EVENTS
default y
help
Enable hardware performance counter support for perf events. If
@@ -2150,6 +2185,7 @@ source "drivers/cpufreq/Kconfig"
config CPU_FREQ_IMX
tristate "CPUfreq driver for i.MX CPUs"
depends on ARCH_MXC && CPU_FREQ
+ select CPU_FREQ_TABLE
help
This enables the CPUfreq driver for i.MX CPUs.
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f15f82bf3a5..e968a52e488 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -356,15 +356,15 @@ choice
is nothing connected to read from the DCC.
config DEBUG_SEMIHOSTING
- bool "Kernel low-level debug output via semihosting I"
+ bool "Kernel low-level debug output via semihosting I/O"
help
Semihosting enables code running on an ARM target to use
the I/O facilities on a host debugger/emulator through a
- simple SVC calls. The host debugger or emulator must have
+ simple SVC call. The host debugger or emulator must have
semihosting enabled for the special svc call to be trapped
otherwise the kernel will crash.
- This is known to work with OpenOCD, as wellas
+ This is known to work with OpenOCD, as well as
ARM's Fast Models, or any other controlling environment
that implements semihosting.
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 30eae87ead6..a051dfbdd7d 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
zinstall uinstall install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
-%.dtb:
+%.dtb: scripts
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
-dtbs:
+dtbs: scripts
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
# We use MRPROPER_FILES and CLEAN_FILES now
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index b8c64b80baf..bc67cbff394 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -653,16 +653,21 @@ __armv7_mmu_cache_on:
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
#endif
mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ bic r0, r0, #1 << 28 @ clear SCTLR.TRE
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
orr r0, r0, #0x003c @ write buffer
#ifdef CONFIG_MMU
#ifdef CONFIG_CPU_ENDIAN_BE8
orr r0, r0, #1 << 25 @ big-endian page tables
#endif
+ mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
orrne r0, r0, #1 @ MMU enabled
movne r1, #0xfffffffd @ domain 0 = client
+ bic r6, r6, #1 << 31 @ 32-bit translation system
+ bic r6, r6, #3 << 0 @ use only ttbr0
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
+ mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
#endif
mcr p15, 0, r0, c7, c5, 4 @ ISB
mcr p15, 0, r0, c1, c0, 0 @ load control register
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 59509c48d7e..bd0cff3f808 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -154,5 +154,10 @@
#size-cells = <0>;
ti,hwmods = "i2c3";
};
+
+ wdt2: wdt@44e35000 {
+ compatible = "ti,omap3-wdt";
+ ti,hwmods = "wd_timer2";
+ };
};
};
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index 66389c1c6f6..7c95f76398d 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -104,6 +104,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff600 {
@@ -113,6 +114,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff800 {
@@ -122,6 +124,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@fffff200 {
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index b460d6ce9eb..195019b7ca0 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -95,6 +95,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff400 {
@@ -104,6 +105,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff600 {
@@ -113,6 +115,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioD: gpio@fffff800 {
@@ -122,6 +125,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioE: gpio@fffffa00 {
@@ -131,6 +135,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@ffffee00 {
diff --git a/arch/arm/boot/dts/at91sam9g25ek.dts b/arch/arm/boot/dts/at91sam9g25ek.dts
index 7829a4d0cb2..96514c134e5 100644
--- a/arch/arm/boot/dts/at91sam9g25ek.dts
+++ b/arch/arm/boot/dts/at91sam9g25ek.dts
@@ -15,7 +15,7 @@
compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
chosen {
- bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
+ bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
};
ahb {
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index bafa8806fc1..63751b1e744 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -113,6 +113,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff400 {
@@ -122,6 +123,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff600 {
@@ -131,6 +133,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioD: gpio@fffff800 {
@@ -140,6 +143,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioE: gpio@fffffa00 {
@@ -149,6 +153,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@ffffee00 {
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index bfac0dfc332..ef9336ae961 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -107,6 +107,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff600 {
@@ -116,6 +117,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff800 {
@@ -125,6 +127,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioD: gpio@fffffa00 {
@@ -134,6 +137,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@fffff200 {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 4a18c393b13..8a387a8d61b 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -115,6 +115,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioB: gpio@fffff600 {
@@ -124,6 +125,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioC: gpio@fffff800 {
@@ -133,6 +135,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
pioD: gpio@fffffa00 {
@@ -142,6 +145,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
};
dbgu: serial@fffff200 {
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index a874dbfb5ae..e6138310e5c 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -51,11 +51,11 @@
dma-apbh@80004000 {
compatible = "fsl,imx23-dma-apbh";
- reg = <0x80004000 2000>;
+ reg = <0x80004000 0x2000>;
};
ecc@80008000 {
- reg = <0x80008000 2000>;
+ reg = <0x80008000 0x2000>;
status = "disabled";
};
@@ -63,7 +63,7 @@
compatible = "fsl,imx23-gpmi-nand";
#address-cells = <1>;
#size-cells = <1>;
- reg = <0x8000c000 2000>, <0x8000a000 2000>;
+ reg = <0x8000c000 0x2000>, <0x8000a000 0x2000>;
reg-names = "gpmi-nand", "bch";
interrupts = <13>, <56>;
interrupt-names = "gpmi-dma", "bch";
@@ -72,14 +72,14 @@
};
ssp0: ssp@80010000 {
- reg = <0x80010000 2000>;
+ reg = <0x80010000 0x2000>;
interrupts = <15 14>;
fsl,ssp-dma-channel = <1>;
status = "disabled";
};
etm@80014000 {
- reg = <0x80014000 2000>;
+ reg = <0x80014000 0x2000>;
status = "disabled";
};
@@ -87,7 +87,7 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx23-pinctrl", "simple-bus";
- reg = <0x80018000 2000>;
+ reg = <0x80018000 0x2000>;
gpio0: gpio@0 {
compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
@@ -273,32 +273,32 @@
};
emi@80020000 {
- reg = <0x80020000 2000>;
+ reg = <0x80020000 0x2000>;
status = "disabled";
};
dma-apbx@80024000 {
compatible = "fsl,imx23-dma-apbx";
- reg = <0x80024000 2000>;
+ reg = <0x80024000 0x2000>;
};
dcp@80028000 {
- reg = <0x80028000 2000>;
+ reg = <0x80028000 0x2000>;
status = "disabled";
};
pxp@8002a000 {
- reg = <0x8002a000 2000>;
+ reg = <0x8002a000 0x2000>;
status = "disabled";
};
ocotp@8002c000 {
- reg = <0x8002c000 2000>;
+ reg = <0x8002c000 0x2000>;
status = "disabled";
};
axi-ahb@8002e000 {
- reg = <0x8002e000 2000>;
+ reg = <0x8002e000 0x2000>;
status = "disabled";
};
@@ -310,14 +310,14 @@
};
ssp1: ssp@80034000 {
- reg = <0x80034000 2000>;
+ reg = <0x80034000 0x2000>;
interrupts = <2 20>;
fsl,ssp-dma-channel = <2>;
status = "disabled";
};
tvenc@80038000 {
- reg = <0x80038000 2000>;
+ reg = <0x80038000 0x2000>;
status = "disabled";
};
};
@@ -330,37 +330,37 @@
ranges;
clkctl@80040000 {
- reg = <0x80040000 2000>;
+ reg = <0x80040000 0x2000>;
status = "disabled";
};
saif0: saif@80042000 {
- reg = <0x80042000 2000>;
+ reg = <0x80042000 0x2000>;
status = "disabled";
};
power@80044000 {
- reg = <0x80044000 2000>;
+ reg = <0x80044000 0x2000>;
status = "disabled";
};
saif1: saif@80046000 {
- reg = <0x80046000 2000>;
+ reg = <0x80046000 0x2000>;
status = "disabled";
};
audio-out@80048000 {
- reg = <0x80048000 2000>;
+ reg = <0x80048000 0x2000>;
status = "disabled";
};
audio-in@8004c000 {
- reg = <0x8004c000 2000>;
+ reg = <0x8004c000 0x2000>;
status = "disabled";
};
lradc@80050000 {
- reg = <0x80050000 2000>;
+ reg = <0x80050000 0x2000>;
status = "disabled";
};
@@ -370,26 +370,26 @@
};
i2c@80058000 {
- reg = <0x80058000 2000>;
+ reg = <0x80058000 0x2000>;
status = "disabled";
};
rtc@8005c000 {
compatible = "fsl,imx23-rtc", "fsl,stmp3xxx-rtc";
- reg = <0x8005c000 2000>;
+ reg = <0x8005c000 0x2000>;
interrupts = <22>;
};
pwm: pwm@80064000 {
compatible = "fsl,imx23-pwm";
- reg = <0x80064000 2000>;
+ reg = <0x80064000 0x2000>;
#pwm-cells = <2>;
fsl,pwm-number = <5>;
status = "disabled";
};
timrot@80068000 {
- reg = <0x80068000 2000>;
+ reg = <0x80068000 0x2000>;
status = "disabled";
};
@@ -429,7 +429,7 @@
ranges;
usbctrl@80080000 {
- reg = <0x80080000 0x10000>;
+ reg = <0x80080000 0x40000>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/imx27-3ds.dts b/arch/arm/boot/dts/imx27-3ds.dts
index d3f8296e19e..0a8978a40ec 100644
--- a/arch/arm/boot/dts/imx27-3ds.dts
+++ b/arch/arm/boot/dts/imx27-3ds.dts
@@ -27,7 +27,7 @@
status = "okay";
};
- uart@1000a000 {
+ uart1: serial@1000a000 {
fsl,uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index 00bae3aad5a..5303ab680a3 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -19,6 +19,12 @@
serial3 = &uart4;
serial4 = &uart5;
serial5 = &uart6;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ gpio4 = &gpio5;
+ gpio5 = &gpio6;
};
avic: avic-interrupt-controller@e0000000 {
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 787efac68da..3fa6d190fab 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -57,18 +57,18 @@
};
hsadc@80002000 {
- reg = <0x80002000 2000>;
+ reg = <0x80002000 0x2000>;
interrupts = <13 87>;
status = "disabled";
};
dma-apbh@80004000 {
compatible = "fsl,imx28-dma-apbh";
- reg = <0x80004000 2000>;
+ reg = <0x80004000 0x2000>;
};
perfmon@80006000 {
- reg = <0x80006000 800>;
+ reg = <0x80006000 0x800>;
interrupts = <27>;
status = "disabled";
};
@@ -77,7 +77,7 @@
compatible = "fsl,imx28-gpmi-nand";
#address-cells = <1>;
#size-cells = <1>;
- reg = <0x8000c000 2000>, <0x8000a000 2000>;
+ reg = <0x8000c000 0x2000>, <0x8000a000 0x2000>;
reg-names = "gpmi-nand", "bch";
interrupts = <88>, <41>;
interrupt-names = "gpmi-dma", "bch";
@@ -86,28 +86,28 @@
};
ssp0: ssp@80010000 {
- reg = <0x80010000 2000>;
+ reg = <0x80010000 0x2000>;
interrupts = <96 82>;
fsl,ssp-dma-channel = <0>;
status = "disabled";
};
ssp1: ssp@80012000 {
- reg = <0x80012000 2000>;
+ reg = <0x80012000 0x2000>;
interrupts = <97 83>;
fsl,ssp-dma-channel = <1>;
status = "disabled";
};
ssp2: ssp@80014000 {
- reg = <0x80014000 2000>;
+ reg = <0x80014000 0x2000>;
interrupts = <98 84>;
fsl,ssp-dma-channel = <2>;
status = "disabled";
};
ssp3: ssp@80016000 {
- reg = <0x80016000 2000>;
+ reg = <0x80016000 0x2000>;
interrupts = <99 85>;
fsl,ssp-dma-channel = <3>;
status = "disabled";
@@ -117,7 +117,7 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx28-pinctrl", "simple-bus";
- reg = <0x80018000 2000>;
+ reg = <0x80018000 0x2000>;
gpio0: gpio@0 {
compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
@@ -510,96 +510,96 @@
};
digctl@8001c000 {
- reg = <0x8001c000 2000>;
+ reg = <0x8001c000 0x2000>;
interrupts = <89>;
status = "disabled";
};
etm@80022000 {
- reg = <0x80022000 2000>;
+ reg = <0x80022000 0x2000>;
status = "disabled";
};
dma-apbx@80024000 {
compatible = "fsl,imx28-dma-apbx";
- reg = <0x80024000 2000>;
+ reg = <0x80024000 0x2000>;
};
dcp@80028000 {
- reg = <0x80028000 2000>;
+ reg = <0x80028000 0x2000>;
interrupts = <52 53 54>;
status = "disabled";
};
pxp@8002a000 {
- reg = <0x8002a000 2000>;
+ reg = <0x8002a000 0x2000>;
interrupts = <39>;
status = "disabled";
};
ocotp@8002c000 {
- reg = <0x8002c000 2000>;
+ reg = <0x8002c000 0x2000>;
status = "disabled";
};
axi-ahb@8002e000 {
- reg = <0x8002e000 2000>;
+ reg = <0x8002e000 0x2000>;
status = "disabled";
};
lcdif@80030000 {
compatible = "fsl,imx28-lcdif";
- reg = <0x80030000 2000>;
+ reg = <0x80030000 0x2000>;
interrupts = <38 86>;
status = "disabled";
};
can0: can@80032000 {
compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
- reg = <0x80032000 2000>;
+ reg = <0x80032000 0x2000>;
interrupts = <8>;
status = "disabled";
};
can1: can@80034000 {
compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
- reg = <0x80034000 2000>;
+ reg = <0x80034000 0x2000>;
interrupts = <9>;
status = "disabled";
};
simdbg@8003c000 {
- reg = <0x8003c000 200>;
+ reg = <0x8003c000 0x200>;
status = "disabled";
};
simgpmisel@8003c200 {
- reg = <0x8003c200 100>;
+ reg = <0x8003c200 0x100>;
status = "disabled";
};
simsspsel@8003c300 {
- reg = <0x8003c300 100>;
+ reg = <0x8003c300 0x100>;
status = "disabled";
};
simmemsel@8003c400 {
- reg = <0x8003c400 100>;
+ reg = <0x8003c400 0x100>;
status = "disabled";
};
gpiomon@8003c500 {
- reg = <0x8003c500 100>;
+ reg = <0x8003c500 0x100>;
status = "disabled";
};
simenet@8003c700 {
- reg = <0x8003c700 100>;
+ reg = <0x8003c700 0x100>;
status = "disabled";
};
armjtag@8003c800 {
- reg = <0x8003c800 100>;
+ reg = <0x8003c800 0x100>;
status = "disabled";
};
};
@@ -612,45 +612,45 @@
ranges;
clkctl@80040000 {
- reg = <0x80040000 2000>;
+ reg = <0x80040000 0x2000>;
status = "disabled";
};
saif0: saif@80042000 {
compatible = "fsl,imx28-saif";
- reg = <0x80042000 2000>;
+ reg = <0x80042000 0x2000>;
interrupts = <59 80>;
fsl,saif-dma-channel = <4>;
status = "disabled";
};
power@80044000 {
- reg = <0x80044000 2000>;
+ reg = <0x80044000 0x2000>;
status = "disabled";
};
saif1: saif@80046000 {
compatible = "fsl,imx28-saif";
- reg = <0x80046000 2000>;
+ reg = <0x80046000 0x2000>;
interrupts = <58 81>;
fsl,saif-dma-channel = <5>;
status = "disabled";
};
lradc@80050000 {
- reg = <0x80050000 2000>;
+ reg = <0x80050000 0x2000>;
status = "disabled";
};
spdif@80054000 {
- reg = <0x80054000 2000>;
+ reg = <0x80054000 0x2000>;
interrupts = <45 66>;
status = "disabled";
};
rtc@80056000 {
compatible = "fsl,imx28-rtc", "fsl,stmp3xxx-rtc";
- reg = <0x80056000 2000>;
+ reg = <0x80056000 0x2000>;
interrupts = <29>;
};
@@ -658,7 +658,7 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx28-i2c";
- reg = <0x80058000 2000>;
+ reg = <0x80058000 0x2000>;
interrupts = <111 68>;
clock-frequency = <100000>;
status = "disabled";
@@ -668,7 +668,7 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,imx28-i2c";
- reg = <0x8005a000 2000>;
+ reg = <0x8005a000 0x2000>;
interrupts = <110 69>;
clock-frequency = <100000>;
status = "disabled";
@@ -676,14 +676,14 @@
pwm: pwm@80064000 {
compatible = "fsl,imx28-pwm", "fsl,imx23-pwm";
- reg = <0x80064000 2000>;
+ reg = <0x80064000 0x2000>;
#pwm-cells = <2>;
fsl,pwm-number = <8>;
status = "disabled";
};
timrot@80068000 {
- reg = <0x80068000 2000>;
+ reg = <0x80068000 0x2000>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index de065b5976e..59d9789e550 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -25,8 +25,8 @@
aips@70000000 { /* aips-1 */
spba@70000000 {
esdhc@70004000 { /* ESDHC1 */
- fsl,cd-internal;
- fsl,wp-internal;
+ fsl,cd-controller;
+ fsl,wp-controller;
status = "okay";
};
@@ -53,7 +53,7 @@
spi-max-frequency = <6000000>;
reg = <0>;
interrupt-parent = <&gpio1>;
- interrupts = <8>;
+ interrupts = <8 0x4>;
regulators {
sw1_reg: sw1 {
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 53cbaa3d4f9..aba28dc87fc 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -17,6 +17,10 @@
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
};
tzic: tz-interrupt-controller@e0000000 {
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index 5b8eafcdbee..da895e93a99 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -64,12 +64,32 @@
reg = <0xf4000000 0x2000000>;
phy-mode = "mii";
interrupt-parent = <&gpio2>;
- interrupts = <31>;
+ interrupts = <31 0x8>;
reg-io-width = <4>;
+ /*
+ * VDD33A and VDDVARIO of LAN9220 are supplied by
+ * SW4_3V3 of LTC3589. Before the regulator driver
+ * for this PMIC is available, we use a fixed dummy
+ * 3V3 regulator to get LAN9220 driver probing work.
+ */
+ vdd33a-supply = <&reg_3p3v>;
+ vddvario-supply = <&reg_3p3v>;
smsc,irq-push-pull;
};
};
+ regulators {
+ compatible = "simple-bus";
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+
gpio-keys {
compatible = "gpio-keys";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index fc79cdc4b4e..cd37165edce 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -19,6 +19,13 @@
serial2 = &uart3;
serial3 = &uart4;
serial4 = &uart5;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ gpio4 = &gpio5;
+ gpio5 = &gpio6;
+ gpio6 = &gpio7;
};
tzic: tz-interrupt-controller@0fffc000 {
diff --git a/arch/arm/boot/dts/imx6q-sabrelite.dts b/arch/arm/boot/dts/imx6q-sabrelite.dts
index d42e851ceb9..72f30f3e617 100644
--- a/arch/arm/boot/dts/imx6q-sabrelite.dts
+++ b/arch/arm/boot/dts/imx6q-sabrelite.dts
@@ -53,6 +53,7 @@
fsl,pins = <
144 0x80000000 /* MX6Q_PAD_EIM_D22__GPIO_3_22 */
121 0x80000000 /* MX6Q_PAD_EIM_D19__GPIO_3_19 */
+ 953 0x80000000 /* MX6Q_PAD_GPIO_0__CCM_CLKO */
>;
};
};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 3d3c64b014e..fd57079f71a 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -19,6 +19,13 @@
serial2 = &uart3;
serial3 = &uart4;
serial4 = &uart5;
+ gpio0 = &gpio1;
+ gpio1 = &gpio2;
+ gpio2 = &gpio3;
+ gpio3 = &gpio4;
+ gpio4 = &gpio5;
+ gpio5 = &gpio6;
+ gpio6 = &gpio7;
};
cpus {
diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts
index 52d94704510..f8ca6fa8819 100644
--- a/arch/arm/boot/dts/kirkwood-iconnect.dts
+++ b/arch/arm/boot/dts/kirkwood-iconnect.dts
@@ -41,9 +41,13 @@
};
power-blue {
label = "power:blue";
- gpios = <&gpio1 11 0>;
+ gpios = <&gpio1 10 0>;
linux,default-trigger = "timer";
};
+ power-red {
+ label = "power:red";
+ gpios = <&gpio1 11 0>;
+ };
usb1 {
label = "usb1:blue";
gpios = <&gpio1 12 0>;
diff --git a/arch/arm/boot/dts/twl6030.dtsi b/arch/arm/boot/dts/twl6030.dtsi
index 3b2f3510d7e..d351b27d721 100644
--- a/arch/arm/boot/dts/twl6030.dtsi
+++ b/arch/arm/boot/dts/twl6030.dtsi
@@ -66,6 +66,7 @@
vcxio: regulator@8 {
compatible = "ti,twl6030-vcxio";
+ regulator-always-on;
};
vusb: regulator@9 {
@@ -74,10 +75,12 @@
v1v8: regulator@10 {
compatible = "ti,twl6030-v1v8";
+ regulator-always-on;
};
v2v1: regulator@11 {
compatible = "ti,twl6030-v2v1";
+ regulator-always-on;
};
clk32kg: regulator@12 {
diff --git a/arch/arm/configs/armadillo800eva_defconfig b/arch/arm/configs/armadillo800eva_defconfig
index 7d8718468e0..90610c7030f 100644
--- a/arch/arm/configs/armadillo800eva_defconfig
+++ b/arch/arm/configs/armadillo800eva_defconfig
@@ -33,7 +33,7 @@ CONFIG_AEABI=y
CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096"
+CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw"
CONFIG_CMDLINE_FORCE=y
CONFIG_KEXEC=y
CONFIG_VFP=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index f725b9637b3..3c9f32f9b6b 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -192,6 +192,7 @@ CONFIG_RTC_DRV_MC13XXX=y
CONFIG_RTC_DRV_MXC=y
CONFIG_DMADEVICES=y
CONFIG_IMX_SDMA=y
+CONFIG_MXS_DMA=y
CONFIG_COMMON_CLK_DEBUG=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index ccdb6357fb7..4edcfb4e4de 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -34,7 +34,6 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_AEABI=y
-CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
CONFIG_AUTO_ZRELADDR=y
CONFIG_FPE_NWFPE=y
CONFIG_NET=y
diff --git a/arch/arm/configs/tct_hammer_defconfig b/arch/arm/configs/tct_hammer_defconfig
index 1d24f8458be..71277a1591b 100644
--- a/arch/arm/configs/tct_hammer_defconfig
+++ b/arch/arm/configs/tct_hammer_defconfig
@@ -7,7 +7,7 @@ CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
-# CONFIG_BUG is not set
+# CONFIG_BUGVERBOSE is not set
# CONFIG_ELF_CORE is not set
# CONFIG_SHMEM is not set
CONFIG_SLOB=y
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 2d4f661d1cf..da6845493ca 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -86,6 +86,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_LM3530=y
CONFIG_LEDS_LP5521=y
+CONFIG_LEDS_GPIO=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AB8500=y
CONFIG_RTC_DRV_PL031=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 03fb93621d0..5c8b3bf4d82 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -320,4 +320,12 @@
.size \name , . - \name
.endm
+ .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+ adds \tmp, \addr, #\size - 1
+ sbcccs \tmp, \tmp, \limit
+ bcs \bad
+#endif
+ .endm
+
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 2ae842df455..5c44dcb0987 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -203,6 +203,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
}
/*
+ * This can be called during early boot to increase the size of the atomic
+ * coherent DMA pool above the default value of 256KiB. It must be called
+ * before postcore_initcall.
+ */
+extern void __init init_dma_coherent_pool_size(unsigned long size);
+
+/*
* This can be called during boot to increase the size of the consistent
* DMA region above it's default value of 2MB. It must be called before the
* memory allocator is initialised, i.e. before any core_initcall.
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e965f1b560f..5f6ddcc5645 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
#endif
#endif
+#endif /* __ASSEMBLY__ */
#ifndef PHYS_OFFSET
#ifdef PLAT_PHYS_OFFSET
@@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#endif
#endif
+#ifndef __ASSEMBLY__
+
/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index e074948d814..00416edecea 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -12,6 +12,18 @@
#ifndef __ARM_PERF_EVENT_H__
#define __ARM_PERF_EVENT_H__
-/* Nothing to see here... */
+/*
+ * The ARMv7 CPU PMU supports up to 32 event counters.
+ */
+#define ARMPMU_MAX_HWEVENTS 32
+
+#define HW_OP_UNSUPPORTED 0xFFFF
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED 0xFFFF
+
+struct pt_regs;
+extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+extern unsigned long perf_misc_flags(struct pt_regs *regs);
+#define perf_misc_flags(regs) perf_misc_flags(regs)
#endif /* __ARM_PERF_EVENT_H__ */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index f66626d71e7..41dc31f834c 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -195,6 +195,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
+#define pte_none(pte) (!pte_val(pte))
+#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
+#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
+#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
+#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
+#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
+#define pte_special(pte) (0)
+
+#define pte_present_user(pte) \
+ ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
+ (L_PTE_PRESENT | L_PTE_USER))
+
#if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval)
{
@@ -206,25 +218,15 @@ extern void __sync_icache_dcache(pte_t pteval);
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
- if (addr >= TASK_SIZE)
- set_pte_ext(ptep, pteval, 0);
- else {
+ unsigned long ext = 0;
+
+ if (addr < TASK_SIZE && pte_present_user(pteval)) {
__sync_icache_dcache(pteval);
- set_pte_ext(ptep, pteval, PTE_EXT_NG);
+ ext |= PTE_EXT_NG;
}
-}
-#define pte_none(pte) (!pte_val(pte))
-#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
-#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
-#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
-#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
-#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
-#define pte_special(pte) (0)
-
-#define pte_present_user(pte) \
- ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
- (L_PTE_PRESENT | L_PTE_USER))
+ set_pte_ext(ptep, pteval, ext);
+}
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
@@ -251,13 +253,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
- * <--------------- offset --------------------> <- type --> 0 0 0
+ * <--------------- offset ----------------------> < type -> 0 0 0
*
- * This gives us up to 63 swap files and 32GB per swap file. Note that
+ * This gives us up to 31 swap files and 64GB per swap file. Note that
* the offset field is always non-zero.
*/
#define __SWP_TYPE_SHIFT 3
-#define __SWP_TYPE_BITS 6
+#define __SWP_TYPE_BITS 5
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 4432305f4a2..0cd7824ca76 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -16,69 +16,30 @@
#include <linux/perf_event.h>
/*
- * Types of PMUs that can be accessed directly and require mutual
- * exclusion between profiling tools.
- */
-enum arm_pmu_type {
- ARM_PMU_DEVICE_CPU = 0,
- ARM_NUM_PMU_DEVICES,
-};
-
-/*
* struct arm_pmu_platdata - ARM PMU platform data
*
* @handle_irq: an optional handler which will be called from the
* interrupt and passed the address of the low level handler,
* and can be used to implement any platform specific handling
* before or after calling it.
- * @enable_irq: an optional handler which will be called after
- * request_irq and be used to handle some platform specific
- * irq enablement
- * @disable_irq: an optional handler which will be called before
- * free_irq and be used to handle some platform specific
- * irq disablement
+ * @runtime_resume: an optional handler which will be called by the
+ * runtime PM framework following a call to pm_runtime_get().
+ * Note that if pm_runtime_get() is called more than once in
+ * succession this handler will only be called once.
+ * @runtime_suspend: an optional handler which will be called by the
+ * runtime PM framework following a call to pm_runtime_put().
+ * Note that if pm_runtime_get() is called more than once in
+ * succession this handler will only be called following the
+ * final call to pm_runtime_put() that actually disables the
+ * hardware.
*/
struct arm_pmu_platdata {
irqreturn_t (*handle_irq)(int irq, void *dev,
irq_handler_t pmu_handler);
- void (*enable_irq)(int irq);
- void (*disable_irq)(int irq);
+ int (*runtime_resume)(struct device *dev);
+ int (*runtime_suspend)(struct device *dev);
};
-#ifdef CONFIG_CPU_HAS_PMU
-
-/**
- * reserve_pmu() - reserve the hardware performance counters
- *
- * Reserve the hardware performance counters in the system for exclusive use.
- * Returns 0 on success or -EBUSY if the lock is already held.
- */
-extern int
-reserve_pmu(enum arm_pmu_type type);
-
-/**
- * release_pmu() - Relinquish control of the performance counters
- *
- * Release the performance counters and allow someone else to use them.
- */
-extern void
-release_pmu(enum arm_pmu_type type);
-
-#else /* CONFIG_CPU_HAS_PMU */
-
-#include <linux/err.h>
-
-static inline int
-reserve_pmu(enum arm_pmu_type type)
-{
- return -ENODEV;
-}
-
-static inline void
-release_pmu(enum arm_pmu_type type) { }
-
-#endif /* CONFIG_CPU_HAS_PMU */
-
#ifdef CONFIG_HW_PERF_EVENTS
/* The events for a given PMU register set. */
@@ -101,24 +62,37 @@ struct pmu_hw_events {
raw_spinlock_t pmu_lock;
};
+struct cpupmu_regs {
+ u32 pmc;
+ u32 pmcntenset;
+ u32 pmuseren;
+ u32 pmintenset;
+ u32 pmxevttype[8];
+ u32 pmxevtcnt[8];
+};
+
struct arm_pmu {
struct pmu pmu;
- enum arm_pmu_type type;
cpumask_t active_irqs;
+ cpumask_t valid_cpus;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
- void (*enable)(struct hw_perf_event *evt, int idx);
- void (*disable)(struct hw_perf_event *evt, int idx);
+ void (*enable)(struct perf_event *event);
+ void (*disable)(struct perf_event *event);
int (*get_event_idx)(struct pmu_hw_events *hw_events,
- struct hw_perf_event *hwc);
+ struct perf_event *event);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
- u32 (*read_counter)(int idx);
- void (*write_counter)(int idx, u32 val);
- void (*start)(void);
- void (*stop)(void);
+ u32 (*read_counter)(struct perf_event *event);
+ void (*write_counter)(struct perf_event *event, u32 val);
+ void (*start)(struct arm_pmu *);
+ void (*stop)(struct arm_pmu *);
void (*reset)(void *);
+ int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
+ void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
+ void (*save_regs)(struct arm_pmu *, struct cpupmu_regs *);
+ void (*restore_regs)(struct arm_pmu *, struct cpupmu_regs *);
int num_events;
atomic_t active_events;
struct mutex reserve_mutex;
@@ -129,15 +103,20 @@ struct arm_pmu {
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
-int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
+extern const struct dev_pm_ops armpmu_dev_pm_ops;
+
+int armpmu_register(struct arm_pmu *armpmu, int type);
+
+u64 armpmu_event_update(struct perf_event *event);
-u64 armpmu_event_update(struct perf_event *event,
- struct hw_perf_event *hwc,
- int idx);
+int armpmu_event_set_period(struct perf_event *event);
-int armpmu_event_set_period(struct perf_event *event,
- struct hw_perf_event *hwc,
- int idx);
+int armpmu_map_event(struct perf_event *event,
+ const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+ const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX],
+ u32 raw_event_mask);
#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/arm/include/asm/sched_clock.h b/arch/arm/include/asm/sched_clock.h
index e3f75726343..05b8e82ec9f 100644
--- a/arch/arm/include/asm/sched_clock.h
+++ b/arch/arm/include/asm/sched_clock.h
@@ -10,5 +10,7 @@
extern void sched_clock_postinit(void);
extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
+extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
+ unsigned long rate);
#endif
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 314d4664eae..99a19512ee2 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
{
pgtable_page_dtor(pte);
+#ifdef CONFIG_ARM_LPAE
+ tlb_add_flush(tlb, addr);
+#else
/*
* With the classic ARM MMU, a pte page has two corresponding pmd
* entries, each covering 1MB.
@@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
addr &= PMD_MASK;
tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
tlb_add_flush(tlb, addr + SZ_1M);
+#endif
tlb_remove_page(tlb, pte);
}
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 58b8b84adcd..167597d40e2 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -26,11 +26,46 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
+int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask);
+
+#ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE
+/* Common values for CPUs */
+#ifndef SD_CPU_INIT
+#define SD_CPU_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+ .idle_idx = 1, \
+ .newidle_idx = 0, \
+ .wake_idx = 0, \
+ .forkexec_idx = 0, \
+ \
+ .flags = 0*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 0*SD_BALANCE_WAKE \
+ | 1*SD_WAKE_AFFINE \
+ | 0*SD_PREFER_LOCAL \
+ | 0*SD_SHARE_CPUPOWER \
+ | 0*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+}
+#endif
+#endif /* CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE */
#else
static inline void init_cpu_topology(void) { }
static inline void store_cpu_topology(unsigned int cpuid) { }
+static inline int cluster_to_logical_mask(unsigned int socket_id,
+ cpumask_t *cluster_mask) { return -EINVAL; }
#endif
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 479a6352e0b..77bd79f2ffd 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -101,28 +101,39 @@ extern int __get_user_1(void *);
extern int __get_user_2(void *);
extern int __get_user_4(void *);
-#define __get_user_x(__r2,__p,__e,__s,__i...) \
+#define __GUP_CLOBBER_1 "lr", "cc"
+#ifdef CONFIG_CPU_USE_DOMAINS
+#define __GUP_CLOBBER_2 "ip", "lr", "cc"
+#else
+#define __GUP_CLOBBER_2 "lr", "cc"
+#endif
+#define __GUP_CLOBBER_4 "lr", "cc"
+
+#define __get_user_x(__r2,__p,__e,__l,__s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%1", "r2") \
+ __asmeq("%3", "r1") \
"bl __get_user_" #__s \
: "=&r" (__e), "=r" (__r2) \
- : "0" (__p) \
- : __i, "cc")
+ : "0" (__p), "r" (__l) \
+ : __GUP_CLOBBER_##__s)
-#define get_user(x,p) \
+#define __get_user_check(x,p) \
({ \
+ unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __user *__p asm("r0") = (p);\
register unsigned long __r2 asm("r2"); \
+ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
- __get_user_x(__r2, __p, __e, 1, "lr"); \
- break; \
+ __get_user_x(__r2, __p, __e, __l, 1); \
+ break; \
case 2: \
- __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \
+ __get_user_x(__r2, __p, __e, __l, 2); \
break; \
case 4: \
- __get_user_x(__r2, __p, __e, 4, "lr"); \
+ __get_user_x(__r2, __p, __e, __l, 4); \
break; \
default: __e = __get_user_bad(); break; \
} \
@@ -130,42 +141,57 @@ extern int __get_user_4(void *);
__e; \
})
+#define get_user(x,p) \
+ ({ \
+ might_fault(); \
+ __get_user_check(x,p); \
+ })
+
extern int __put_user_1(void *, unsigned int);
extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);
-#define __put_user_x(__r2,__p,__e,__s) \
+#define __put_user_x(__r2,__p,__e,__l,__s) \
__asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%2", "r2") \
+ __asmeq("%3", "r1") \
"bl __put_user_" #__s \
: "=&r" (__e) \
- : "0" (__p), "r" (__r2) \
+ : "0" (__p), "r" (__r2), "r" (__l) \
: "ip", "lr", "cc")
-#define put_user(x,p) \
+#define __put_user_check(x,p) \
({ \
+ unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __r2 asm("r2") = (x); \
register const typeof(*(p)) __user *__p asm("r0") = (p);\
+ register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
case 1: \
- __put_user_x(__r2, __p, __e, 1); \
+ __put_user_x(__r2, __p, __e, __l, 1); \
break; \
case 2: \
- __put_user_x(__r2, __p, __e, 2); \
+ __put_user_x(__r2, __p, __e, __l, 2); \
break; \
case 4: \
- __put_user_x(__r2, __p, __e, 4); \
+ __put_user_x(__r2, __p, __e, __l, 4); \
break; \
case 8: \
- __put_user_x(__r2, __p, __e, 8); \
+ __put_user_x(__r2, __p, __e, __l, 8); \
break; \
default: __e = __put_user_bad(); break; \
} \
__e; \
})
+#define put_user(x,p) \
+ ({ \
+ might_fault(); \
+ __put_user_check(x,p); \
+ })
+
#else /* CONFIG_MMU */
/*
@@ -219,6 +245,7 @@ do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
+ might_fault(); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
@@ -300,6 +327,7 @@ do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
+ might_fault(); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 0cab47d4a83..2fde5fd1acc 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -404,6 +404,7 @@
#define __NR_setns (__NR_SYSCALL_BASE+375)
#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
+ /* 378 for kcmp */
/*
* The following SWIs are ARM private.
@@ -483,6 +484,7 @@
*/
#define __IGNORE_fadvise64_64
#define __IGNORE_migrate_pages
+#define __IGNORE_kcmp
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_UNISTD_H */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 7ad2d5cf700..1c432143073 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -69,8 +69,7 @@ obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
-obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 463ff4a0ec8..e337879595e 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -387,6 +387,7 @@
/* 375 */ CALL(sys_setns)
CALL(sys_process_vm_readv)
CALL(sys_process_vm_writev)
+ CALL(sys_ni_syscall) /* reserved for sys_kcmp */
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index ba386bd9410..eed4d0cdd74 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -28,6 +28,7 @@
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/smp.h>
+#include <linux/cpu_pm.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
@@ -42,6 +43,11 @@ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
/* Watchpoint currently in use for each WRP. */
static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
+#ifdef CONFIG_CPU_PM
+/* Storage for OS Save and Restore. */
+static DEFINE_PER_CPU(u32, cpu_dscr);
+#endif
+
/* Number of BRP/WRP registers on this CPU. */
static int core_num_brps;
static int core_num_wrps;
@@ -159,6 +165,12 @@ static int debug_arch_supported(void)
arch >= ARM_DEBUG_ARCH_V7_1;
}
+/* Can we determine the watchpoint access type from the fsr? */
+static int debug_exception_updates_fsr(void)
+{
+ return 0;
+}
+
/* Determine number of WRP registers available. */
static int get_num_wrp_resources(void)
{
@@ -604,13 +616,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
/* Aligned */
break;
case 1:
- /* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
- break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
+ case 3:
+ /* Allow single byte watchpoint. */
+ if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ break;
default:
ret = -EINVAL;
goto out;
@@ -619,18 +632,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
info->address &= ~alignment_mask;
info->ctrl.len <<= offset;
- /*
- * Currently we rely on an overflow handler to take
- * care of single-stepping the breakpoint when it fires.
- * In the case of userspace breakpoints on a core with V7 debug,
- * we can use the mismatch feature as a poor-man's hardware
- * single-step, but this only works for per-task breakpoints.
- */
- if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
- !core_has_mismatch_brps() || !bp->hw.bp_target)) {
- pr_warning("overflow handler required but none found\n");
- ret = -EINVAL;
+ if (!bp->overflow_handler) {
+ /*
+ * Mismatch breakpoints are required for single-stepping
+ * breakpoints.
+ */
+ if (!core_has_mismatch_brps())
+ return -EINVAL;
+
+ /* We don't allow mismatch breakpoints in kernel space. */
+ if (arch_check_bp_in_kernelspace(bp))
+ return -EPERM;
+
+ /*
+ * Per-cpu breakpoints are not supported by our stepping
+ * mechanism.
+ */
+ if (!bp->hw.bp_target)
+ return -EINVAL;
+
+ /*
+ * We only support specific access types if the fsr
+ * reports them.
+ */
+ if (!debug_exception_updates_fsr() &&
+ (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
+ info->ctrl.type == ARM_BREAKPOINT_STORE))
+ return -EINVAL;
}
+
out:
return ret;
}
@@ -706,10 +736,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
goto unlock;
/* Check that the access type matches. */
- access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
- HW_BREAKPOINT_R;
- if (!(access & hw_breakpoint_type(wp)))
- goto unlock;
+ if (debug_exception_updates_fsr()) {
+ access = (fsr & ARM_FSR_ACCESS_MASK) ?
+ HW_BREAKPOINT_W : HW_BREAKPOINT_R;
+ if (!(access & hw_breakpoint_type(wp)))
+ goto unlock;
+ }
/* We have a winner. */
info->trigger = addr;
@@ -964,6 +996,55 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
.notifier_call = dbg_reset_notify,
};
+#ifdef CONFIG_CPU_PM
+static void os_save(int cpu)
+{
+ /* Set OS Lock. */
+ asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0xC5ACCE55));
+ isb();
+
+ /* Save DSCRext. */
+ ARM_DBG_READ(c2, 2, per_cpu(cpu_dscr, cpu));
+}
+
+static void os_restore(int cpu)
+{
+ /* Restore DSCRext. */
+ ARM_DBG_WRITE(c2, 2, per_cpu(cpu_dscr, cpu));
+
+ /* Clear OS Lock. */
+ asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
+ isb();
+}
+
+static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
+ void *v)
+{
+ int cpu = smp_processor_id();
+
+ if (action == CPU_PM_ENTER)
+ os_save(cpu);
+ else if (action == CPU_PM_EXIT)
+ os_restore(cpu);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
+ .notifier_call = dbg_cpu_pm_notify,
+};
+
+static void __init pm_init(void)
+{
+ if (get_debug_arch() == ARM_DEBUG_ARCH_V7_1)
+ cpu_pm_register_notifier(&dbg_cpu_pm_nb);
+}
+#else
+static inline void pm_init(void)
+{
+}
+#endif
+
static int __init arch_hw_breakpoint_init(void)
{
u32 dscr;
@@ -1022,6 +1103,8 @@ static int __init arch_hw_breakpoint_init(void)
/* Register hotplug notifier. */
register_cpu_notifier(&dbg_reset_nb);
+
+ pm_init();
return 0;
}
arch_initcall(arch_hw_breakpoint_init);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index ab243b87118..e90b2a7e8c2 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -12,68 +12,16 @@
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
-#include <linux/bitmap.h>
-#include <linux/interrupt.h>
+#include <linux/cpumask.h>
#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/perf_event.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
-#include <asm/cputype.h>
-#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
#include <asm/stacktrace.h>
-/*
- * ARMv6 supports a maximum of 3 events, starting from index 0. If we add
- * another platform that supports more, we need to increase this to be the
- * largest of all platforms.
- *
- * ARMv7 supports up to 32 events:
- * cycle counter CCNT + 31 events counters CNT0..30.
- * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
- */
-#define ARMPMU_MAX_HWEVENTS 32
-
-static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
-static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
-static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
-
-#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
-
-/* Set at runtime when we know what CPU type we are. */
-static struct arm_pmu *cpu_pmu;
-
-const char *perf_pmu_name(void)
-{
- if (!cpu_pmu)
- return NULL;
-
- return cpu_pmu->pmu.name;
-}
-EXPORT_SYMBOL_GPL(perf_pmu_name);
-
-int perf_num_counters(void)
-{
- int max_events = 0;
-
- if (cpu_pmu != NULL)
- max_events = cpu_pmu->num_events;
-
- return max_events;
-}
-EXPORT_SYMBOL_GPL(perf_num_counters);
-
-#define HW_OP_UNSUPPORTED 0xFFFF
-
-#define C(_x) \
- PERF_COUNT_HW_CACHE_##_x
-
-#define CACHE_OP_UNSUPPORTED 0xFFFF
-
static int
armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
@@ -104,7 +52,7 @@ armpmu_map_cache_event(const unsigned (*cache_map)
}
static int
-armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
int mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
@@ -116,34 +64,36 @@ armpmu_map_raw_event(u32 raw_event_mask, u64 config)
return (int)(config & raw_event_mask);
}
-static int map_cpu_event(struct perf_event *event,
- const unsigned (*event_map)[PERF_COUNT_HW_MAX],
- const unsigned (*cache_map)
- [PERF_COUNT_HW_CACHE_MAX]
- [PERF_COUNT_HW_CACHE_OP_MAX]
- [PERF_COUNT_HW_CACHE_RESULT_MAX],
- u32 raw_event_mask)
+int
+armpmu_map_event(struct perf_event *event,
+ const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+ const unsigned (*cache_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX],
+ u32 raw_event_mask)
{
u64 config = event->attr.config;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
- return armpmu_map_event(event_map, config);
+ return armpmu_map_hw_event(event_map, config);
case PERF_TYPE_HW_CACHE:
return armpmu_map_cache_event(cache_map, config);
case PERF_TYPE_RAW:
return armpmu_map_raw_event(raw_event_mask, config);
+ default:
+ if (event->attr.type >= PERF_TYPE_MAX)
+ return armpmu_map_raw_event(raw_event_mask, config);
}
return -ENOENT;
}
-int
-armpmu_event_set_period(struct perf_event *event,
- struct hw_perf_event *hwc,
- int idx)
+int armpmu_event_set_period(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0;
@@ -167,24 +117,22 @@ armpmu_event_set_period(struct perf_event *event,
local64_set(&hwc->prev_count, (u64)-left);
- armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
+ armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
perf_event_update_userpage(event);
return ret;
}
-u64
-armpmu_event_update(struct perf_event *event,
- struct hw_perf_event *hwc,
- int idx)
+u64 armpmu_event_update(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_raw_count, new_raw_count;
again:
prev_raw_count = local64_read(&hwc->prev_count);
- new_raw_count = armpmu->read_counter(idx);
+ new_raw_count = armpmu->read_counter(event);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
@@ -207,7 +155,7 @@ armpmu_read(struct perf_event *event)
if (hwc->idx < 0)
return;
- armpmu_event_update(event, hwc, hwc->idx);
+ armpmu_event_update(event);
}
static void
@@ -216,24 +164,26 @@ armpmu_stop(struct perf_event *event, int flags)
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
*/
if (!(hwc->state & PERF_HES_STOPPED)) {
- armpmu->disable(hwc, hwc->idx);
- barrier(); /* why? */
- armpmu_event_update(event, hwc, hwc->idx);
+ armpmu->disable(event);
+ armpmu_event_update(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
-static void
-armpmu_start(struct perf_event *event, int flags)
+static void armpmu_start(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
/*
* ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below.
@@ -249,8 +199,8 @@ armpmu_start(struct perf_event *event, int flags)
* get an interrupt too soon or *way* too late if the overflow has
* happened since disabling.
*/
- armpmu_event_set_period(event, hwc, hwc->idx);
- armpmu->enable(hwc, hwc->idx);
+ armpmu_event_set_period(event);
+ armpmu->enable(event);
}
static void
@@ -261,6 +211,9 @@ armpmu_del(struct perf_event *event, int flags)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
+
WARN_ON(idx < 0);
armpmu_stop(event, PERF_EF_UPDATE);
@@ -279,10 +232,14 @@ armpmu_add(struct perf_event *event, int flags)
int idx;
int err = 0;
+ /* An event following a process won't be stopped earlier */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return 0;
+
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
- idx = armpmu->get_event_idx(hw_events, hwc);
+ idx = armpmu->get_event_idx(hw_events, event);
if (idx < 0) {
err = idx;
goto out;
@@ -293,7 +250,7 @@ armpmu_add(struct perf_event *event, int flags)
* sure it is disabled.
*/
event->hw.idx = idx;
- armpmu->disable(hwc, idx);
+ armpmu->disable(event);
hw_events->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
@@ -313,13 +270,12 @@ validate_event(struct pmu_hw_events *hw_events,
struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
return 1;
- return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
+ return armpmu->get_event_idx(hw_events, event) >= 0;
}
static int
@@ -350,99 +306,41 @@ validate_group(struct perf_event *event)
return 0;
}
-static irqreturn_t armpmu_platform_irq(int irq, void *dev)
+static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
struct arm_pmu *armpmu = (struct arm_pmu *) dev;
struct platform_device *plat_device = armpmu->plat_device;
struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
- return plat->handle_irq(irq, dev, armpmu->handle_irq);
+ if (plat && plat->handle_irq)
+ return plat->handle_irq(irq, dev, armpmu->handle_irq);
+ else
+ return armpmu->handle_irq(irq, dev);
}
static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
- int i, irq, irqs;
- struct platform_device *pmu_device = armpmu->plat_device;
- struct arm_pmu_platdata *plat =
- dev_get_platdata(&pmu_device->dev);
-
- irqs = min(pmu_device->num_resources, num_possible_cpus());
-
- for (i = 0; i < irqs; ++i) {
- if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
- continue;
- irq = platform_get_irq(pmu_device, i);
- if (irq >= 0) {
- if (plat && plat->disable_irq)
- plat->disable_irq(irq);
- free_irq(irq, armpmu);
- }
- }
-
- release_pmu(armpmu->type);
+ armpmu->free_irq(armpmu);
+ pm_runtime_put_sync(&armpmu->plat_device->dev);
}
static int
armpmu_reserve_hardware(struct arm_pmu *armpmu)
{
- struct arm_pmu_platdata *plat;
- irq_handler_t handle_irq;
- int i, err, irq, irqs;
+ int err;
struct platform_device *pmu_device = armpmu->plat_device;
if (!pmu_device)
return -ENODEV;
- err = reserve_pmu(armpmu->type);
+ pm_runtime_get_sync(&pmu_device->dev);
+ err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
if (err) {
- pr_warning("unable to reserve pmu\n");
+ armpmu_release_hardware(armpmu);
return err;
}
- plat = dev_get_platdata(&pmu_device->dev);
- if (plat && plat->handle_irq)
- handle_irq = armpmu_platform_irq;
- else
- handle_irq = armpmu->handle_irq;
-
- irqs = min(pmu_device->num_resources, num_possible_cpus());
- if (irqs < 1) {
- pr_err("no irqs for PMUs defined\n");
- return -ENODEV;
- }
-
- for (i = 0; i < irqs; ++i) {
- err = 0;
- irq = platform_get_irq(pmu_device, i);
- if (irq < 0)
- continue;
-
- /*
- * If we have a single PMU interrupt that we can't shift,
- * assume that we're running on a uniprocessor machine and
- * continue. Otherwise, continue without this interrupt.
- */
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
- pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, i);
- continue;
- }
-
- err = request_irq(irq, handle_irq,
- IRQF_DISABLED | IRQF_NOBALANCING,
- "arm-pmu", armpmu);
- if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
- armpmu_release_hardware(armpmu);
- return err;
- } else if (plat && plat->enable_irq)
- plat->enable_irq(irq);
-
- cpumask_set_cpu(i, &armpmu->active_irqs);
- }
-
return 0;
}
@@ -536,6 +434,10 @@ static int armpmu_event_init(struct perf_event *event)
int err = 0;
atomic_t *active_events = &armpmu->active_events;
+ if (event->cpu != -1 &&
+ !cpumask_test_cpu(event->cpu, &armpmu->valid_cpus))
+ return -ENOENT;
+
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
@@ -572,15 +474,41 @@ static void armpmu_enable(struct pmu *pmu)
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
if (enabled)
- armpmu->start();
+ armpmu->start(armpmu);
}
static void armpmu_disable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
- armpmu->stop();
+ armpmu->stop(armpmu);
}
+#ifdef CONFIG_PM_RUNTIME
+static int armpmu_runtime_resume(struct device *dev)
+{
+ struct arm_pmu_platdata *plat = dev_get_platdata(dev);
+
+ if (plat && plat->runtime_resume)
+ return plat->runtime_resume(dev);
+
+ return 0;
+}
+
+static int armpmu_runtime_suspend(struct device *dev)
+{
+ struct arm_pmu_platdata *plat = dev_get_platdata(dev);
+
+ if (plat && plat->runtime_suspend)
+ return plat->runtime_suspend(dev);
+
+ return 0;
+}
+#endif
+
+const struct dev_pm_ops armpmu_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
+};
+
static void __init armpmu_init(struct arm_pmu *armpmu)
{
atomic_set(&armpmu->active_events, 0);
@@ -598,174 +526,14 @@ static void __init armpmu_init(struct arm_pmu *armpmu)
};
}
-int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
+int armpmu_register(struct arm_pmu *armpmu, int type)
{
armpmu_init(armpmu);
- return perf_pmu_register(&armpmu->pmu, name, type);
+ pr_info("enabled with %s PMU driver, %d counters available\n",
+ armpmu->name, armpmu->num_events);
+ return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
}
-/* Include the PMU-specific implementations. */
-#include "perf_event_xscale.c"
-#include "perf_event_v6.c"
-#include "perf_event_v7.c"
-
-/*
- * Ensure the PMU has sane values out of reset.
- * This requires SMP to be available, so exists as a separate initcall.
- */
-static int __init
-cpu_pmu_reset(void)
-{
- if (cpu_pmu && cpu_pmu->reset)
- return on_each_cpu(cpu_pmu->reset, NULL, 1);
- return 0;
-}
-arch_initcall(cpu_pmu_reset);
-
-/*
- * PMU platform driver and devicetree bindings.
- */
-static struct of_device_id armpmu_of_device_ids[] = {
- {.compatible = "arm,cortex-a9-pmu"},
- {.compatible = "arm,cortex-a8-pmu"},
- {.compatible = "arm,arm1136-pmu"},
- {.compatible = "arm,arm1176-pmu"},
- {},
-};
-
-static struct platform_device_id armpmu_plat_device_ids[] = {
- {.name = "arm-pmu"},
- {},
-};
-
-static int __devinit armpmu_device_probe(struct platform_device *pdev)
-{
- if (!cpu_pmu)
- return -ENODEV;
-
- cpu_pmu->plat_device = pdev;
- return 0;
-}
-
-static struct platform_driver armpmu_driver = {
- .driver = {
- .name = "arm-pmu",
- .of_match_table = armpmu_of_device_ids,
- },
- .probe = armpmu_device_probe,
- .id_table = armpmu_plat_device_ids,
-};
-
-static int __init register_pmu_driver(void)
-{
- return platform_driver_register(&armpmu_driver);
-}
-device_initcall(register_pmu_driver);
-
-static struct pmu_hw_events *armpmu_get_cpu_events(void)
-{
- return &__get_cpu_var(cpu_hw_events);
-}
-
-static void __init cpu_pmu_init(struct arm_pmu *armpmu)
-{
- int cpu;
- for_each_possible_cpu(cpu) {
- struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
- events->events = per_cpu(hw_events, cpu);
- events->used_mask = per_cpu(used_mask, cpu);
- raw_spin_lock_init(&events->pmu_lock);
- }
- armpmu->get_hw_events = armpmu_get_cpu_events;
- armpmu->type = ARM_PMU_DEVICE_CPU;
-}
-
-/*
- * PMU hardware loses all context when a CPU goes offline.
- * When a CPU is hotplugged back in, since some hardware registers are
- * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
- * junk values out of them.
- */
-static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
- unsigned long action, void *hcpu)
-{
- if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
- return NOTIFY_DONE;
-
- if (cpu_pmu && cpu_pmu->reset)
- cpu_pmu->reset(NULL);
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
- .notifier_call = pmu_cpu_notify,
-};
-
-/*
- * CPU PMU identification and registration.
- */
-static int __init
-init_hw_perf_events(void)
-{
- unsigned long cpuid = read_cpuid_id();
- unsigned long implementor = (cpuid & 0xFF000000) >> 24;
- unsigned long part_number = (cpuid & 0xFFF0);
-
- /* ARM Ltd CPUs. */
- if (0x41 == implementor) {
- switch (part_number) {
- case 0xB360: /* ARM1136 */
- case 0xB560: /* ARM1156 */
- case 0xB760: /* ARM1176 */
- cpu_pmu = armv6pmu_init();
- break;
- case 0xB020: /* ARM11mpcore */
- cpu_pmu = armv6mpcore_pmu_init();
- break;
- case 0xC080: /* Cortex-A8 */
- cpu_pmu = armv7_a8_pmu_init();
- break;
- case 0xC090: /* Cortex-A9 */
- cpu_pmu = armv7_a9_pmu_init();
- break;
- case 0xC050: /* Cortex-A5 */
- cpu_pmu = armv7_a5_pmu_init();
- break;
- case 0xC0F0: /* Cortex-A15 */
- cpu_pmu = armv7_a15_pmu_init();
- break;
- case 0xC070: /* Cortex-A7 */
- cpu_pmu = armv7_a7_pmu_init();
- break;
- }
- /* Intel CPUs [xscale]. */
- } else if (0x69 == implementor) {
- part_number = (cpuid >> 13) & 0x7;
- switch (part_number) {
- case 1:
- cpu_pmu = xscale1pmu_init();
- break;
- case 2:
- cpu_pmu = xscale2pmu_init();
- break;
- }
- }
-
- if (cpu_pmu) {
- pr_info("enabled with %s PMU driver, %d counters available\n",
- cpu_pmu->name, cpu_pmu->num_events);
- cpu_pmu_init(cpu_pmu);
- register_cpu_notifier(&pmu_cpu_notifier);
- armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW);
- } else {
- pr_info("no hardware support available\n");
- }
-
- return 0;
-}
-early_initcall(init_hw_perf_events);
-
/*
* Callchain handling code.
*/
@@ -817,6 +585,10 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
struct frame_tail __user *tail;
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
@@ -844,9 +616,41 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
struct stackframe fr;
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
fr.fp = regs->ARM_fp;
fr.sp = regs->ARM_sp;
fr.lr = regs->ARM_lr;
fr.pc = regs->ARM_pc;
walk_stackframe(&fr, callchain_trace, entry);
}
+
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+ return perf_guest_cbs->get_guest_ip();
+
+ return instruction_pointer(regs);
+}
+
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+ int misc = 0;
+
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ if (perf_guest_cbs->is_user_mode())
+ misc |= PERF_RECORD_MISC_GUEST_USER;
+ else
+ misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+ } else {
+ if (user_mode(regs))
+ misc |= PERF_RECORD_MISC_USER;
+ else
+ misc |= PERF_RECORD_MISC_KERNEL;
+ }
+
+ return misc;
+}
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
new file mode 100644
index 00000000000..66248ee706c
--- /dev/null
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -0,0 +1,380 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+#define pr_fmt(fmt) "CPU PMU: " fmt
+
+#include <linux/bitmap.h>
+#include <linux/cpu_pm.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <asm/cputype.h>
+#include <asm/irq_regs.h>
+#include <asm/pmu.h>
+
+/* Set at runtime when we know what CPU type we are. */
+static DEFINE_PER_CPU(struct arm_pmu *, cpu_pmu);
+
+static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
+static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
+static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
+
+static DEFINE_PER_CPU(struct cpupmu_regs, cpu_pmu_regs);
+
+/*
+ * Despite the names, these two functions are CPU-specific and are used
+ * by the OProfile/perf code.
+ */
+const char *perf_pmu_name(void)
+{
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
+ if (!pmu)
+ return NULL;
+
+ return pmu->name;
+}
+EXPORT_SYMBOL_GPL(perf_pmu_name);
+
+int perf_num_counters(void)
+{
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
+
+ if (!pmu)
+ return 0;
+
+ return pmu->num_events;
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
+/* Include the PMU-specific implementations. */
+#include "perf_event_xscale.c"
+#include "perf_event_v6.c"
+#include "perf_event_v7.c"
+
+static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
+{
+ return &__get_cpu_var(cpu_hw_events);
+}
+
+static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
+{
+ int i, irq, irqs;
+ struct platform_device *pmu_device = cpu_pmu->plat_device;
+ int cpu = -1;
+
+ irqs = min(pmu_device->num_resources, num_possible_cpus());
+
+ for (i = 0; i < irqs; ++i) {
+ cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
+ if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
+ continue;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq >= 0)
+ free_irq(irq, cpu_pmu);
+ }
+}
+
+static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
+{
+ int i, err, irq, irqs;
+ struct platform_device *pmu_device = cpu_pmu->plat_device;
+ int cpu = -1;
+
+ if (!pmu_device)
+ return -ENODEV;
+
+ irqs = min(pmu_device->num_resources, num_possible_cpus());
+ if (irqs < 1) {
+ pr_err("no irqs for PMUs defined\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < irqs; ++i) {
+ err = 0;
+ cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
+ irq = platform_get_irq(pmu_device, i);
+ if (irq < 0)
+ continue;
+
+ /*
+ * If we have a single PMU interrupt that we can't shift,
+ * assume that we're running on a uniprocessor machine and
+ * continue. Otherwise, continue without this interrupt.
+ */
+ if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
+ pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, i);
+ continue;
+ }
+
+ err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu",
+ cpu_pmu);
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ return err;
+ }
+
+ cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
+ }
+
+ return 0;
+}
+
+static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ int cpu;
+ for_each_cpu_mask(cpu, cpu_pmu->valid_cpus) {
+ struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
+ events->events = per_cpu(hw_events, cpu);
+ events->used_mask = per_cpu(used_mask, cpu);
+ raw_spin_lock_init(&events->pmu_lock);
+ }
+
+ cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
+ cpu_pmu->request_irq = cpu_pmu_request_irq;
+ cpu_pmu->free_irq = cpu_pmu_free_irq;
+
+ /* Ensure the PMU has sane values out of reset. */
+ if (cpu_pmu && cpu_pmu->reset)
+ on_each_cpu_mask(&cpu_pmu->valid_cpus, cpu_pmu->reset, cpu_pmu, 1);
+}
+
+/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, (long)hcpu);
+
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+ return NOTIFY_DONE;
+
+ if (pmu && pmu->reset)
+ pmu->reset(pmu);
+ else
+ return NOTIFY_DONE;
+
+ return NOTIFY_OK;
+}
+
+static int cpu_pmu_pm_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ int cpu = smp_processor_id();
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, cpu);
+ struct cpupmu_regs *pmuregs = &per_cpu(cpu_pmu_regs, cpu);
+
+ if (!pmu)
+ return NOTIFY_DONE;
+
+ if (action == CPU_PM_ENTER && pmu->save_regs) {
+ pmu->save_regs(pmu, pmuregs);
+ } else if (action == CPU_PM_EXIT && pmu->restore_regs) {
+ pmu->restore_regs(pmu, pmuregs);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
+ .notifier_call = cpu_pmu_notify,
+};
+
+static struct notifier_block __cpuinitdata cpu_pmu_pm_notifier = {
+ .notifier_call = cpu_pmu_pm_notify,
+};
+
+/*
+ * PMU platform driver and devicetree bindings.
+ */
+static struct of_device_id __devinitdata cpu_pmu_of_device_ids[] = {
+ {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
+ {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
+ {.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
+ {.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
+ {.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init},
+ {.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
+ {.compatible = "arm,arm1176-pmu", .data = armv6pmu_init},
+ {.compatible = "arm,arm1136-pmu", .data = armv6pmu_init},
+ {},
+};
+
+static struct platform_device_id __devinitdata cpu_pmu_plat_device_ids[] = {
+ {.name = "arm-pmu"},
+ {},
+};
+
+/*
+ * CPU PMU identification and probing.
+ */
+static int __devinit probe_current_pmu(struct arm_pmu *pmu)
+{
+ int cpu = get_cpu();
+ unsigned long cpuid = read_cpuid_id();
+ unsigned long implementor = (cpuid & 0xFF000000) >> 24;
+ unsigned long part_number = (cpuid & 0xFFF0);
+ int ret = -ENODEV;
+
+ pr_info("probing PMU on CPU %d\n", cpu);
+
+ /* ARM Ltd CPUs. */
+ if (0x41 == implementor) {
+ switch (part_number) {
+ case 0xB360: /* ARM1136 */
+ case 0xB560: /* ARM1156 */
+ case 0xB760: /* ARM1176 */
+ ret = armv6pmu_init(pmu);
+ break;
+ case 0xB020: /* ARM11mpcore */
+ ret = armv6mpcore_pmu_init(pmu);
+ break;
+ case 0xC080: /* Cortex-A8 */
+ ret = armv7_a8_pmu_init(pmu);
+ break;
+ case 0xC090: /* Cortex-A9 */
+ ret = armv7_a9_pmu_init(pmu);
+ break;
+ case 0xC050: /* Cortex-A5 */
+ ret = armv7_a5_pmu_init(pmu);
+ break;
+ case 0xC0F0: /* Cortex-A15 */
+ ret = armv7_a15_pmu_init(pmu);
+ break;
+ case 0xC070: /* Cortex-A7 */
+ ret = armv7_a7_pmu_init(pmu);
+ break;
+ }
+ /* Intel CPUs [xscale]. */
+ } else if (0x69 == implementor) {
+ part_number = (cpuid >> 13) & 0x7;
+ switch (part_number) {
+ case 1:
+ ret = xscale1pmu_init(pmu);
+ break;
+ case 2:
+ ret = xscale2pmu_init(pmu);
+ break;
+ }
+ }
+
+ /* assume PMU support all the CPUs in this case */
+ cpumask_setall(&pmu->valid_cpus);
+
+ put_cpu();
+ return ret;
+}
+
+static int __devinit cpu_pmu_device_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ struct device_node *node = pdev->dev.of_node;
+ struct arm_pmu *pmu;
+ int ret = 0;
+ int cpu;
+
+ pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
+ if (!pmu) {
+ pr_info("failed to allocate PMU device!");
+ return -ENOMEM;
+ }
+
+ if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
+ smp_call_func_t init_fn = (smp_call_func_t)of_id->data;
+ struct device_node *ncluster;
+ int cluster = -1;
+ cpumask_t sibling_mask;
+
+ ncluster = of_parse_phandle(node, "cluster", 0);
+ if (ncluster) {
+ int len;
+ const u32 *hwid;
+ hwid = of_get_property(ncluster, "reg", &len);
+ if (hwid && len == 4)
+ cluster = be32_to_cpup(hwid);
+ }
+ /* set sibling mask to all cpu mask if socket is not specified */
+ if (cluster == -1 ||
+ cluster_to_logical_mask(cluster, &sibling_mask))
+ cpumask_setall(&sibling_mask);
+
+ smp_call_function_any(&sibling_mask, init_fn, pmu, 1);
+
+ /* now set the valid_cpus after init */
+ cpumask_copy(&pmu->valid_cpus, &sibling_mask);
+ } else {
+ ret = probe_current_pmu(pmu);
+ }
+
+ if (ret) {
+ pr_info("failed to register PMU devices!");
+ kfree(pmu);
+ return ret;
+ }
+
+ for_each_cpu_mask(cpu, pmu->valid_cpus)
+ per_cpu(cpu_pmu, cpu) = pmu;
+
+ pmu->plat_device = pdev;
+ cpu_pmu_init(pmu);
+ armpmu_register(pmu, -1);
+
+ return 0;
+}
+
+static struct platform_driver cpu_pmu_driver = {
+ .driver = {
+ .name = "arm-pmu",
+ .pm = &armpmu_dev_pm_ops,
+ .of_match_table = cpu_pmu_of_device_ids,
+ },
+ .probe = cpu_pmu_device_probe,
+ .id_table = cpu_pmu_plat_device_ids,
+};
+
+static int __init register_pmu_driver(void)
+{
+ int err;
+
+ err = register_cpu_notifier(&cpu_pmu_hotplug_notifier);
+ if (err)
+ return err;
+
+ err = cpu_pm_register_notifier(&cpu_pmu_pm_notifier);
+ if (err) {
+ unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
+ return err;
+ }
+
+ err = platform_driver_register(&cpu_pmu_driver);
+ if (err) {
+ cpu_pm_unregister_notifier(&cpu_pmu_pm_notifier);
+ unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
+ }
+
+ return err;
+}
+device_initcall(register_pmu_driver);
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index c90fcb2b696..b4b0c084511 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -401,9 +401,10 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
return ret;
}
-static inline u32
-armv6pmu_read_counter(int counter)
+static inline u32 armv6pmu_read_counter(struct perf_event *event)
{
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
unsigned long value = 0;
if (ARMV6_CYCLE_COUNTER == counter)
@@ -418,10 +419,11 @@ armv6pmu_read_counter(int counter)
return value;
}
-static inline void
-armv6pmu_write_counter(int counter,
- u32 value)
+static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
{
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
if (ARMV6_CYCLE_COUNTER == counter)
asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
else if (ARMV6_COUNTER0 == counter)
@@ -432,12 +434,13 @@ armv6pmu_write_counter(int counter,
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
}
-static void
-armv6pmu_enable_event(struct hw_perf_event *hwc,
- int idx)
+static void armv6pmu_enable_event(struct perf_event *event)
{
unsigned long val, mask, evt, flags;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = 0;
@@ -473,7 +476,8 @@ armv6pmu_handle_irq(int irq_num,
{
unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data;
- struct pmu_hw_events *cpuc;
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
+ struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
struct pt_regs *regs;
int idx;
@@ -489,7 +493,6 @@ armv6pmu_handle_irq(int irq_num,
*/
armv6_pmcr_write(pmcr);
- cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -506,13 +509,13 @@ armv6pmu_handle_irq(int irq_num,
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx);
+ armpmu_event_update(event);
perf_sample_data_init(&data, 0, hwc->last_period);
- if (!armpmu_event_set_period(event, hwc, idx))
+ if (!armpmu_event_set_period(event))
continue;
if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(hwc, idx);
+ cpu_pmu->disable(event);
}
/*
@@ -527,8 +530,7 @@ armv6pmu_handle_irq(int irq_num,
return IRQ_HANDLED;
}
-static void
-armv6pmu_start(void)
+static void armv6pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -540,8 +542,7 @@ armv6pmu_start(void)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static void
-armv6pmu_stop(void)
+static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -555,10 +556,11 @@ armv6pmu_stop(void)
static int
armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
- struct hw_perf_event *event)
+ struct perf_event *event)
{
+ struct hw_perf_event *hwc = &event->hw;
/* Always place a cycle counter into the cycle counter. */
- if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
+ if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
@@ -579,12 +581,13 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
}
}
-static void
-armv6pmu_disable_event(struct hw_perf_event *hwc,
- int idx)
+static void armv6pmu_disable_event(struct perf_event *event)
{
unsigned long val, mask, evt, flags;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -613,12 +616,13 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static void
-armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
- int idx)
+static void armv6mpcore_pmu_disable_event(struct perf_event *event)
{
unsigned long val, mask, flags, evt = 0;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -645,28 +649,28 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
static int armv6_map_event(struct perf_event *event)
{
- return map_cpu_event(event, &armv6_perf_map,
+ return armpmu_map_event(event, &armv6_perf_map,
&armv6_perf_cache_map, 0xFF);
}
-static struct arm_pmu armv6pmu = {
- .name = "v6",
- .handle_irq = armv6pmu_handle_irq,
- .enable = armv6pmu_enable_event,
- .disable = armv6pmu_disable_event,
- .read_counter = armv6pmu_read_counter,
- .write_counter = armv6pmu_write_counter,
- .get_event_idx = armv6pmu_get_event_idx,
- .start = armv6pmu_start,
- .stop = armv6pmu_stop,
- .map_event = armv6_map_event,
- .num_events = 3,
- .max_period = (1LLU << 32) - 1,
-};
-
-static struct arm_pmu *__init armv6pmu_init(void)
+static int __devinit armv6pmu_init(struct arm_pmu *cpu_pmu)
{
- return &armv6pmu;
+ *cpu_pmu = (struct arm_pmu) {
+ .name = "v6",
+ .handle_irq = armv6pmu_handle_irq,
+ .enable = armv6pmu_enable_event,
+ .disable = armv6pmu_disable_event,
+ .read_counter = armv6pmu_read_counter,
+ .write_counter = armv6pmu_write_counter,
+ .get_event_idx = armv6pmu_get_event_idx,
+ .start = armv6pmu_start,
+ .stop = armv6pmu_stop,
+ .map_event = armv6_map_event,
+ .num_events = 3,
+ .max_period = (1LLU << 32) - 1,
+ };
+
+ return 0;
}
/*
@@ -679,37 +683,37 @@ static struct arm_pmu *__init armv6pmu_init(void)
static int armv6mpcore_map_event(struct perf_event *event)
{
- return map_cpu_event(event, &armv6mpcore_perf_map,
+ return armpmu_map_event(event, &armv6mpcore_perf_map,
&armv6mpcore_perf_cache_map, 0xFF);
}
-static struct arm_pmu armv6mpcore_pmu = {
- .name = "v6mpcore",
- .handle_irq = armv6pmu_handle_irq,
- .enable = armv6pmu_enable_event,
- .disable = armv6mpcore_pmu_disable_event,
- .read_counter = armv6pmu_read_counter,
- .write_counter = armv6pmu_write_counter,
- .get_event_idx = armv6pmu_get_event_idx,
- .start = armv6pmu_start,
- .stop = armv6pmu_stop,
- .map_event = armv6mpcore_map_event,
- .num_events = 3,
- .max_period = (1LLU << 32) - 1,
-};
-
-static struct arm_pmu *__init armv6mpcore_pmu_init(void)
+static int __devinit armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
{
- return &armv6mpcore_pmu;
+ *cpu_pmu = (struct arm_pmu) {
+ .name = "v6mpcore",
+ .handle_irq = armv6pmu_handle_irq,
+ .enable = armv6pmu_enable_event,
+ .disable = armv6mpcore_pmu_disable_event,
+ .read_counter = armv6pmu_read_counter,
+ .write_counter = armv6pmu_write_counter,
+ .get_event_idx = armv6pmu_get_event_idx,
+ .start = armv6pmu_start,
+ .stop = armv6pmu_stop,
+ .map_event = armv6mpcore_map_event,
+ .num_events = 3,
+ .max_period = (1LLU << 32) - 1,
+ };
+
+ return 0;
}
#else
-static struct arm_pmu *__init armv6pmu_init(void)
+static int armv6pmu_init(struct arm_pmu *cpu_pmu)
{
- return NULL;
+ return -ENODEV;
}
-static struct arm_pmu *__init armv6mpcore_pmu_init(void)
+static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
{
- return NULL;
+ return -ENODEV;
}
#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index f04070bd218..3565d8084d6 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -18,8 +18,6 @@
#ifdef CONFIG_CPU_V7
-static struct arm_pmu armv7pmu;
-
/*
* Common ARMv7 event types
*
@@ -738,7 +736,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
*/
#define ARMV7_IDX_CYCLE_COUNTER 0
#define ARMV7_IDX_COUNTER0 1
-#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
+#define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
+ (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
#define ARMV7_MAX_COUNTERS 32
#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
@@ -804,49 +803,34 @@ static inline int armv7_pmnc_has_overflowed(u32 pmnc)
return pmnc & ARMV7_OVERFLOWED_MASK;
}
-static inline int armv7_pmnc_counter_valid(int idx)
+static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
{
- return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
+ return idx >= ARMV7_IDX_CYCLE_COUNTER &&
+ idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
}
static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
{
- int ret = 0;
- u32 counter;
-
- if (!armv7_pmnc_counter_valid(idx)) {
- pr_err("CPU%u checking wrong counter %d overflow status\n",
- smp_processor_id(), idx);
- } else {
- counter = ARMV7_IDX_TO_COUNTER(idx);
- ret = pmnc & BIT(counter);
- }
-
- return ret;
+ return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
}
static inline int armv7_pmnc_select_counter(int idx)
{
- u32 counter;
-
- if (!armv7_pmnc_counter_valid(idx)) {
- pr_err("CPU%u selecting wrong PMNC counter %d\n",
- smp_processor_id(), idx);
- return -EINVAL;
- }
-
- counter = ARMV7_IDX_TO_COUNTER(idx);
+ u32 counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
isb();
return idx;
}
-static inline u32 armv7pmu_read_counter(int idx)
+static inline u32 armv7pmu_read_counter(struct perf_event *event)
{
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
u32 value = 0;
- if (!armv7_pmnc_counter_valid(idx))
+ if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
pr_err("CPU%u reading wrong counter %d\n",
smp_processor_id(), idx);
else if (idx == ARMV7_IDX_CYCLE_COUNTER)
@@ -857,9 +841,13 @@ static inline u32 armv7pmu_read_counter(int idx)
return value;
}
-static inline void armv7pmu_write_counter(int idx, u32 value)
+static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
{
- if (!armv7_pmnc_counter_valid(idx))
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
else if (idx == ARMV7_IDX_CYCLE_COUNTER)
@@ -878,60 +866,28 @@ static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
static inline int armv7_pmnc_enable_counter(int idx)
{
- u32 counter;
-
- if (!armv7_pmnc_counter_valid(idx)) {
- pr_err("CPU%u enabling wrong PMNC counter %d\n",
- smp_processor_id(), idx);
- return -EINVAL;
- }
-
- counter = ARMV7_IDX_TO_COUNTER(idx);
+ u32 counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
return idx;
}
static inline int armv7_pmnc_disable_counter(int idx)
{
- u32 counter;
-
- if (!armv7_pmnc_counter_valid(idx)) {
- pr_err("CPU%u disabling wrong PMNC counter %d\n",
- smp_processor_id(), idx);
- return -EINVAL;
- }
-
- counter = ARMV7_IDX_TO_COUNTER(idx);
+ u32 counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
return idx;
}
static inline int armv7_pmnc_enable_intens(int idx)
{
- u32 counter;
-
- if (!armv7_pmnc_counter_valid(idx)) {
- pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
- smp_processor_id(), idx);
- return -EINVAL;
- }
-
- counter = ARMV7_IDX_TO_COUNTER(idx);
+ u32 counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
return idx;
}
static inline int armv7_pmnc_disable_intens(int idx)
{
- u32 counter;
-
- if (!armv7_pmnc_counter_valid(idx)) {
- pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
- smp_processor_id(), idx);
- return -EINVAL;
- }
-
- counter = ARMV7_IDX_TO_COUNTER(idx);
+ u32 counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
isb();
/* Clear the overflow flag in case an interrupt is pending. */
@@ -956,7 +912,7 @@ static inline u32 armv7_pmnc_getreset_flags(void)
}
#ifdef DEBUG
-static void armv7_pmnc_dump_regs(void)
+static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
{
u32 val;
unsigned int cnt;
@@ -981,7 +937,8 @@ static void armv7_pmnc_dump_regs(void)
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
printk(KERN_INFO "CCNT =0x%08x\n", val);
- for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
+ for (cnt = ARMV7_IDX_COUNTER0;
+ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
armv7_pmnc_select_counter(cnt);
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
printk(KERN_INFO "CNT[%d] count =0x%08x\n",
@@ -993,10 +950,64 @@ static void armv7_pmnc_dump_regs(void)
}
#endif
-static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
+static void armv7pmu_save_regs(struct arm_pmu *cpu_pmu,
+ struct cpupmu_regs *regs)
+{
+ unsigned int cnt;
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (regs->pmc));
+ if (!(regs->pmc & ARMV7_PMNC_E))
+ return;
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (regs->pmcntenset));
+ asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (regs->pmuseren));
+ asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (regs->pmintenset));
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (regs->pmxevtcnt[0]));
+ for (cnt = ARMV7_IDX_COUNTER0;
+ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+ armv7_pmnc_select_counter(cnt);
+ asm volatile("mrc p15, 0, %0, c9, c13, 1"
+ : "=r"(regs->pmxevttype[cnt]));
+ asm volatile("mrc p15, 0, %0, c9, c13, 2"
+ : "=r"(regs->pmxevtcnt[cnt]));
+ }
+ return;
+}
+
+static void armv7pmu_restore_regs(struct arm_pmu *cpu_pmu,
+ struct cpupmu_regs *regs)
+{
+ unsigned int cnt;
+ if (!(regs->pmc & ARMV7_PMNC_E))
+ return;
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (regs->pmcntenset));
+ asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (regs->pmuseren));
+ asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (regs->pmintenset));
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (regs->pmxevtcnt[0]));
+ for (cnt = ARMV7_IDX_COUNTER0;
+ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+ armv7_pmnc_select_counter(cnt);
+ asm volatile("mcr p15, 0, %0, c9, c13, 1"
+ : : "r"(regs->pmxevttype[cnt]));
+ asm volatile("mcr p15, 0, %0, c9, c13, 2"
+ : : "r"(regs->pmxevtcnt[cnt]));
+ }
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (regs->pmc));
+}
+
+static void armv7pmu_enable_event(struct perf_event *event)
{
unsigned long flags;
+ struct hw_perf_event *hwc = &event->hw;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ int idx = hwc->idx;
+
+ if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
+ pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
+ smp_processor_id(), idx);
+ return;
+ }
/*
* Enable counter and interrupt, and set the counter to count
@@ -1014,7 +1025,7 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
* We only need to set the event for the cycle counter if we
* have the ability to perform event filtering.
*/
- if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
+ if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base);
/*
@@ -1030,10 +1041,19 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
+static void armv7pmu_disable_event(struct perf_event *event)
{
unsigned long flags;
+ struct hw_perf_event *hwc = &event->hw;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ int idx = hwc->idx;
+
+ if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
+ pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
+ smp_processor_id(), idx);
+ return;
+ }
/*
* Disable counter and interrupt
@@ -1057,7 +1077,8 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
{
u32 pmnc;
struct perf_sample_data data;
- struct pmu_hw_events *cpuc;
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
+ struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
struct pt_regs *regs;
int idx;
@@ -1077,7 +1098,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
*/
regs = get_irq_regs();
- cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -1094,13 +1114,13 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx);
+ armpmu_event_update(event);
perf_sample_data_init(&data, 0, hwc->last_period);
- if (!armpmu_event_set_period(event, hwc, idx))
+ if (!armpmu_event_set_period(event))
continue;
if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(hwc, idx);
+ cpu_pmu->disable(event);
}
/*
@@ -1115,7 +1135,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
return IRQ_HANDLED;
}
-static void armv7pmu_start(void)
+static void armv7pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -1126,7 +1146,7 @@ static void armv7pmu_start(void)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static void armv7pmu_stop(void)
+static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -1138,10 +1158,12 @@ static void armv7pmu_stop(void)
}
static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
- struct hw_perf_event *event)
+ struct perf_event *event)
{
int idx;
- unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
/* Always place a cycle counter into the cycle counter. */
if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
@@ -1192,11 +1214,14 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event,
static void armv7pmu_reset(void *info)
{
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
u32 idx, nb_cnt = cpu_pmu->num_events;
/* The counter and interrupt enable registers are unknown at reset. */
- for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
- armv7pmu_disable_event(NULL, idx);
+ for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
+ armv7_pmnc_disable_counter(idx);
+ armv7_pmnc_disable_intens(idx);
+ }
/* Initialize & Reset PMNC: C and P bits */
armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
@@ -1204,48 +1229,53 @@ static void armv7pmu_reset(void *info)
static int armv7_a8_map_event(struct perf_event *event)
{
- return map_cpu_event(event, &armv7_a8_perf_map,
+ return armpmu_map_event(event, &armv7_a8_perf_map,
&armv7_a8_perf_cache_map, 0xFF);
}
static int armv7_a9_map_event(struct perf_event *event)
{
- return map_cpu_event(event, &armv7_a9_perf_map,
+ return armpmu_map_event(event, &armv7_a9_perf_map,
&armv7_a9_perf_cache_map, 0xFF);
}
static int armv7_a5_map_event(struct perf_event *event)
{
- return map_cpu_event(event, &armv7_a5_perf_map,
+ return armpmu_map_event(event, &armv7_a5_perf_map,
&armv7_a5_perf_cache_map, 0xFF);
}
static int armv7_a15_map_event(struct perf_event *event)
{
- return map_cpu_event(event, &armv7_a15_perf_map,
+ return armpmu_map_event(event, &armv7_a15_perf_map,
&armv7_a15_perf_cache_map, 0xFF);
}
static int armv7_a7_map_event(struct perf_event *event)
{
- return map_cpu_event(event, &armv7_a7_perf_map,
+ return armpmu_map_event(event, &armv7_a7_perf_map,
&armv7_a7_perf_cache_map, 0xFF);
}
-static struct arm_pmu armv7pmu = {
- .handle_irq = armv7pmu_handle_irq,
- .enable = armv7pmu_enable_event,
- .disable = armv7pmu_disable_event,
- .read_counter = armv7pmu_read_counter,
- .write_counter = armv7pmu_write_counter,
- .get_event_idx = armv7pmu_get_event_idx,
- .start = armv7pmu_start,
- .stop = armv7pmu_stop,
- .reset = armv7pmu_reset,
- .max_period = (1LLU << 32) - 1,
+static void armv7pmu_init(struct arm_pmu *cpu_pmu)
+{
+ *cpu_pmu = (struct arm_pmu) {
+ .handle_irq = armv7pmu_handle_irq,
+ .enable = armv7pmu_enable_event,
+ .disable = armv7pmu_disable_event,
+ .read_counter = armv7pmu_read_counter,
+ .write_counter = armv7pmu_write_counter,
+ .get_event_idx = armv7pmu_get_event_idx,
+ .start = armv7pmu_start,
+ .stop = armv7pmu_stop,
+ .reset = armv7pmu_reset,
+ .save_regs = armv7pmu_save_regs,
+ .restore_regs = armv7pmu_restore_regs,
+ .max_period = (1LLU << 32) - 1,
+ };
};
-static u32 __init armv7_read_num_pmnc_events(void)
+static u32 __devinit armv7_read_num_pmnc_events(void)
{
u32 nb_cnt;
@@ -1256,70 +1286,75 @@ static u32 __init armv7_read_num_pmnc_events(void)
return nb_cnt + 1;
}
-static struct arm_pmu *__init armv7_a8_pmu_init(void)
+static int __devinit armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
{
- armv7pmu.name = "ARMv7 Cortex-A8";
- armv7pmu.map_event = armv7_a8_map_event;
- armv7pmu.num_events = armv7_read_num_pmnc_events();
- return &armv7pmu;
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "ARMv7_Cortex_A8";
+ cpu_pmu->map_event = armv7_a8_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ return 0;
}
-static struct arm_pmu *__init armv7_a9_pmu_init(void)
+static int __devinit armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
{
- armv7pmu.name = "ARMv7 Cortex-A9";
- armv7pmu.map_event = armv7_a9_map_event;
- armv7pmu.num_events = armv7_read_num_pmnc_events();
- return &armv7pmu;
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "ARMv7_Cortex_A9";
+ cpu_pmu->map_event = armv7_a9_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ return 0;
}
-static struct arm_pmu *__init armv7_a5_pmu_init(void)
+static int __devinit armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
{
- armv7pmu.name = "ARMv7 Cortex-A5";
- armv7pmu.map_event = armv7_a5_map_event;
- armv7pmu.num_events = armv7_read_num_pmnc_events();
- return &armv7pmu;
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "ARMv7_Cortex_A5";
+ cpu_pmu->map_event = armv7_a5_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ return 0;
}
-static struct arm_pmu *__init armv7_a15_pmu_init(void)
+static int __devinit armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
{
- armv7pmu.name = "ARMv7 Cortex-A15";
- armv7pmu.map_event = armv7_a15_map_event;
- armv7pmu.num_events = armv7_read_num_pmnc_events();
- armv7pmu.set_event_filter = armv7pmu_set_event_filter;
- return &armv7pmu;
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "ARMv7_Cortex_A15";
+ cpu_pmu->map_event = armv7_a15_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
+ return 0;
}
-static struct arm_pmu *__init armv7_a7_pmu_init(void)
+static int __devinit armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
{
- armv7pmu.name = "ARMv7 Cortex-A7";
- armv7pmu.map_event = armv7_a7_map_event;
- armv7pmu.num_events = armv7_read_num_pmnc_events();
- armv7pmu.set_event_filter = armv7pmu_set_event_filter;
- return &armv7pmu;
+ armv7pmu_init(cpu_pmu);
+ cpu_pmu->name = "ARMv7_Cortex_A7";
+ cpu_pmu->map_event = armv7_a7_map_event;
+ cpu_pmu->num_events = armv7_read_num_pmnc_events();
+ cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
+ return 0;
}
#else
-static struct arm_pmu *__init armv7_a8_pmu_init(void)
+static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
{
- return NULL;
+ return -ENODEV;
}
-static struct arm_pmu *__init armv7_a9_pmu_init(void)
+static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
{
- return NULL;
+ return -ENODEV;
}
-static struct arm_pmu *__init armv7_a5_pmu_init(void)
+static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
{
- return NULL;
+ return -ENODEV;
}
-static struct arm_pmu *__init armv7_a15_pmu_init(void)
+static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
{
- return NULL;
+ return -ENODEV;
}
-static struct arm_pmu *__init armv7_a7_pmu_init(void)
+static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
{
- return NULL;
+ return -ENODEV;
}
#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index f759fe0bab6..1d3e1bf4865 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -224,7 +224,8 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc;
struct perf_sample_data data;
- struct pmu_hw_events *cpuc;
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
+ struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
struct pt_regs *regs;
int idx;
@@ -248,7 +249,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
regs = get_irq_regs();
- cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -260,13 +260,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx);
+ armpmu_event_update(event);
perf_sample_data_init(&data, 0, hwc->last_period);
- if (!armpmu_event_set_period(event, hwc, idx))
+ if (!armpmu_event_set_period(event))
continue;
if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(hwc, idx);
+ cpu_pmu->disable(event);
}
irq_work_run();
@@ -280,11 +280,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
return IRQ_HANDLED;
}
-static void
-xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
+static void xscale1pmu_enable_event(struct perf_event *event)
{
unsigned long val, mask, evt, flags;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ int idx = hwc->idx;
switch (idx) {
case XSCALE_CYCLE_COUNTER:
@@ -314,11 +316,13 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static void
-xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
+static void xscale1pmu_disable_event(struct perf_event *event)
{
unsigned long val, mask, evt, flags;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ int idx = hwc->idx;
switch (idx) {
case XSCALE_CYCLE_COUNTER:
@@ -348,9 +352,10 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
static int
xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
- struct hw_perf_event *event)
+ struct perf_event *event)
{
- if (XSCALE_PERFCTR_CCNT == event->config_base) {
+ struct hw_perf_event *hwc = &event->hw;
+ if (XSCALE_PERFCTR_CCNT == hwc->config_base) {
if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
@@ -366,8 +371,7 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
}
}
-static void
-xscale1pmu_start(void)
+static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -379,8 +383,7 @@ xscale1pmu_start(void)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static void
-xscale1pmu_stop(void)
+static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -392,9 +395,10 @@ xscale1pmu_stop(void)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static inline u32
-xscale1pmu_read_counter(int counter)
+static inline u32 xscale1pmu_read_counter(struct perf_event *event)
{
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
u32 val = 0;
switch (counter) {
@@ -412,9 +416,11 @@ xscale1pmu_read_counter(int counter)
return val;
}
-static inline void
-xscale1pmu_write_counter(int counter, u32 val)
+static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val)
{
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
@@ -430,28 +436,28 @@ xscale1pmu_write_counter(int counter, u32 val)
static int xscale_map_event(struct perf_event *event)
{
- return map_cpu_event(event, &xscale_perf_map,
+ return armpmu_map_event(event, &xscale_perf_map,
&xscale_perf_cache_map, 0xFF);
}
-static struct arm_pmu xscale1pmu = {
- .name = "xscale1",
- .handle_irq = xscale1pmu_handle_irq,
- .enable = xscale1pmu_enable_event,
- .disable = xscale1pmu_disable_event,
- .read_counter = xscale1pmu_read_counter,
- .write_counter = xscale1pmu_write_counter,
- .get_event_idx = xscale1pmu_get_event_idx,
- .start = xscale1pmu_start,
- .stop = xscale1pmu_stop,
- .map_event = xscale_map_event,
- .num_events = 3,
- .max_period = (1LLU << 32) - 1,
-};
-
-static struct arm_pmu *__init xscale1pmu_init(void)
+static int __devinit xscale1pmu_init(struct arm_pmu *cpu_pmu)
{
- return &xscale1pmu;
+ *cpu_pmu = (struct arm_pmu) {
+ .name = "xscale1",
+ .handle_irq = xscale1pmu_handle_irq,
+ .enable = xscale1pmu_enable_event,
+ .disable = xscale1pmu_disable_event,
+ .read_counter = xscale1pmu_read_counter,
+ .write_counter = xscale1pmu_write_counter,
+ .get_event_idx = xscale1pmu_get_event_idx,
+ .start = xscale1pmu_start,
+ .stop = xscale1pmu_stop,
+ .map_event = xscale_map_event,
+ .num_events = 3,
+ .max_period = (1LLU << 32) - 1,
+ };
+
+ return 0;
}
#define XSCALE2_OVERFLOWED_MASK 0x01f
@@ -567,7 +573,8 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc, of_flags;
struct perf_sample_data data;
- struct pmu_hw_events *cpuc;
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
+ struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
struct pt_regs *regs;
int idx;
@@ -585,7 +592,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
regs = get_irq_regs();
- cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -597,13 +603,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx);
+ armpmu_event_update(event);
perf_sample_data_init(&data, 0, hwc->last_period);
- if (!armpmu_event_set_period(event, hwc, idx))
+ if (!armpmu_event_set_period(event))
continue;
if (perf_event_overflow(event, &data, regs))
- cpu_pmu->disable(hwc, idx);
+ cpu_pmu->disable(event);
}
irq_work_run();
@@ -617,11 +623,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
return IRQ_HANDLED;
}
-static void
-xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
+static void xscale2pmu_enable_event(struct perf_event *event)
{
unsigned long flags, ien, evtsel;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ int idx = hwc->idx;
ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select();
@@ -661,11 +669,13 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static void
-xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
+static void xscale2pmu_disable_event(struct perf_event *event)
{
unsigned long flags, ien, evtsel, of_flags;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ int idx = hwc->idx;
ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select();
@@ -713,7 +723,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
static int
xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
- struct hw_perf_event *event)
+ struct perf_event *event)
{
int idx = xscale1pmu_get_event_idx(cpuc, event);
if (idx >= 0)
@@ -727,8 +737,7 @@ out:
return idx;
}
-static void
-xscale2pmu_start(void)
+static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -740,8 +749,7 @@ xscale2pmu_start(void)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static void
-xscale2pmu_stop(void)
+static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
@@ -753,9 +761,10 @@ xscale2pmu_stop(void)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static inline u32
-xscale2pmu_read_counter(int counter)
+static inline u32 xscale2pmu_read_counter(struct perf_event *event)
{
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
u32 val = 0;
switch (counter) {
@@ -779,9 +788,11 @@ xscale2pmu_read_counter(int counter)
return val;
}
-static inline void
-xscale2pmu_write_counter(int counter, u32 val)
+static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
{
+ struct hw_perf_event *hwc = &event->hw;
+ int counter = hwc->idx;
+
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
@@ -801,33 +812,33 @@ xscale2pmu_write_counter(int counter, u32 val)
}
}
-static struct arm_pmu xscale2pmu = {
- .name = "xscale2",
- .handle_irq = xscale2pmu_handle_irq,
- .enable = xscale2pmu_enable_event,
- .disable = xscale2pmu_disable_event,
- .read_counter = xscale2pmu_read_counter,
- .write_counter = xscale2pmu_write_counter,
- .get_event_idx = xscale2pmu_get_event_idx,
- .start = xscale2pmu_start,
- .stop = xscale2pmu_stop,
- .map_event = xscale_map_event,
- .num_events = 5,
- .max_period = (1LLU << 32) - 1,
-};
-
-static struct arm_pmu *__init xscale2pmu_init(void)
+static int __devinit xscale2pmu_init(struct arm_pmu *cpu_pmu)
{
- return &xscale2pmu;
+ *cpu_pmu = (struct arm_pmu) {
+ .name = "xscale2",
+ .handle_irq = xscale2pmu_handle_irq,
+ .enable = xscale2pmu_enable_event,
+ .disable = xscale2pmu_disable_event,
+ .read_counter = xscale2pmu_read_counter,
+ .write_counter = xscale2pmu_write_counter,
+ .get_event_idx = xscale2pmu_get_event_idx,
+ .start = xscale2pmu_start,
+ .stop = xscale2pmu_stop,
+ .map_event = xscale_map_event,
+ .num_events = 5,
+ .max_period = (1LLU << 32) - 1,
+ };
+
+ return 0;
}
#else
-static struct arm_pmu *__init xscale1pmu_init(void)
+static inline int xscale1pmu_init(struct arm_pmu *cpu_pmu)
{
- return NULL;
+ return -ENODEV;
}
-static struct arm_pmu *__init xscale2pmu_init(void)
+static inline int xscale2pmu_init(struct arm_pmu *cpu_pmu)
{
- return NULL;
+ return -ENODEV;
}
#endif /* CONFIG_CPU_XSCALE */
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
deleted file mode 100644
index 2334bf8a650..00000000000
--- a/arch/arm/kernel/pmu.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * linux/arch/arm/kernel/pmu.c
- *
- * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
- * Copyright (C) 2010 ARM Ltd, Will Deacon
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/pmu.h>
-
-/*
- * PMU locking to ensure mutual exclusion between different subsystems.
- */
-static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)];
-
-int
-reserve_pmu(enum arm_pmu_type type)
-{
- return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0;
-}
-EXPORT_SYMBOL_GPL(reserve_pmu);
-
-void
-release_pmu(enum arm_pmu_type type)
-{
- clear_bit_unlock(type, pmu_lock);
-}
-EXPORT_SYMBOL_GPL(release_pmu);
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index 27d186abbc0..f4515393248 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -21,6 +21,8 @@ struct clock_data {
u32 epoch_cyc_copy;
u32 mult;
u32 shift;
+ bool suspended;
+ bool needs_suspend;
};
static void sched_clock_poll(unsigned long wrap_ticks);
@@ -49,6 +51,9 @@ static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
u64 epoch_ns;
u32 epoch_cyc;
+ if (cd.suspended)
+ return cd.epoch_ns;
+
/*
* Load the epoch_cyc and epoch_ns atomically. We do this by
* ensuring that we always write epoch_cyc, epoch_ns and
@@ -98,6 +103,13 @@ static void sched_clock_poll(unsigned long wrap_ticks)
update_sched_clock();
}
+void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
+ unsigned long rate)
+{
+ setup_sched_clock(read, bits, rate);
+ cd.needs_suspend = true;
+}
+
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
unsigned long r, w;
@@ -169,11 +181,23 @@ void __init sched_clock_postinit(void)
static int sched_clock_suspend(void)
{
sched_clock_poll(sched_clock_timer.data);
+ if (cd.needs_suspend)
+ cd.suspended = true;
return 0;
}
+static void sched_clock_resume(void)
+{
+ if (cd.needs_suspend) {
+ cd.epoch_cyc = read_sched_clock();
+ cd.epoch_cyc_copy = cd.epoch_cyc;
+ cd.suspended = false;
+ }
+}
+
static struct syscore_ops sched_clock_ops = {
.suspend = sched_clock_suspend,
+ .resume = sched_clock_resume,
};
static int __init sched_clock_syscore_init(void)
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index fef42b21cec..e1f906989bb 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/clk.h>
-#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -96,7 +95,52 @@ static void twd_timer_stop(struct clock_event_device *clk)
disable_percpu_irq(clk->irq);
}
-#ifdef CONFIG_CPU_FREQ
+#ifdef CONFIG_COMMON_CLK
+
+/*
+ * Updates clockevent frequency when the cpu frequency changes.
+ * Called on the cpu that is changing frequency with interrupts disabled.
+ */
+static void twd_update_frequency(void *new_rate)
+{
+ twd_timer_rate = *((unsigned long *) new_rate);
+
+ clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
+}
+
+static int twd_rate_change(struct notifier_block *nb,
+ unsigned long flags, void *data)
+{
+ struct clk_notifier_data *cnd = data;
+
+ /*
+ * The twd clock events must be reprogrammed to account for the new
+ * frequency. The timer is local to a cpu, so cross-call to the
+ * changing cpu.
+ */
+ if (flags == POST_RATE_CHANGE)
+ smp_call_function(twd_update_frequency,
+ (void *)&cnd->new_rate, 1);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block twd_clk_nb = {
+ .notifier_call = twd_rate_change,
+};
+
+static int twd_clk_init(void)
+{
+ if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
+ return clk_notifier_register(twd_clk, &twd_clk_nb);
+
+ return 0;
+}
+core_initcall(twd_clk_init);
+
+#elif defined (CONFIG_CPU_FREQ)
+
+#include <linux/cpufreq.h>
/*
* Updates clockevent frequency when the cpu frequency changes.
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 198b08456e9..58dac7a80e5 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -317,11 +317,129 @@ void store_cpu_topology(unsigned int cpuid)
cpu_topology[cpuid].socket_id, mpidr);
}
+
+#ifdef CONFIG_SCHED_HMP
+
+static const char * const little_cores[] = {
+ "arm,cortex-a7",
+ NULL,
+};
+
+static bool is_little_cpu(struct device_node *cn)
+{
+ const char * const *lc;
+ for (lc = little_cores; *lc; lc++)
+ if (of_device_is_compatible(cn, *lc))
+ return true;
+ return false;
+}
+
+void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
+ struct cpumask *slow)
+{
+ struct device_node *cn = NULL;
+ int cpu = 0;
+
+ cpumask_clear(fast);
+ cpumask_clear(slow);
+
+ /*
+ * Use the config options if they are given. This helps testing
+ * HMP scheduling on systems without a big.LITTLE architecture.
+ */
+ if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
+ if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
+ WARN(1, "Failed to parse HMP fast cpu mask!\n");
+ if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
+ WARN(1, "Failed to parse HMP slow cpu mask!\n");
+ return;
+ }
+
+ /*
+ * Else, parse device tree for little cores.
+ */
+ while ((cn = of_find_node_by_type(cn, "cpu"))) {
+
+ if (cpu >= num_possible_cpus())
+ break;
+
+ if (is_little_cpu(cn))
+ cpumask_set_cpu(cpu, slow);
+ else
+ cpumask_set_cpu(cpu, fast);
+
+ cpu++;
+ }
+
+ if (!cpumask_empty(fast) && !cpumask_empty(slow))
+ return;
+
+ /*
+ * We didn't find both big and little cores so let's call all cores
+ * fast as this will keep the system running, with all cores being
+ * treated equal.
+ */
+ cpumask_setall(fast);
+ cpumask_clear(slow);
+}
+
+void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
+{
+ struct cpumask hmp_fast_cpu_mask;
+ struct cpumask hmp_slow_cpu_mask;
+ struct hmp_domain *domain;
+
+ arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
+
+ /*
+ * Initialize hmp_domains
+ * Must be ordered with respect to compute capacity.
+ * Fastest domain at head of list.
+ */
+ domain = (struct hmp_domain *)
+ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+ cpumask_copy(&domain->cpus, &hmp_slow_cpu_mask);
+ list_add(&domain->hmp_domains, hmp_domains_list);
+ domain = (struct hmp_domain *)
+ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+ cpumask_copy(&domain->cpus, &hmp_fast_cpu_mask);
+ list_add(&domain->hmp_domains, hmp_domains_list);
+}
+#endif /* CONFIG_SCHED_HMP */
+
+
+/*
+ * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
+ * @socket_id: cluster HW identifier
+ * @cluster_mask: the cpumask location to be initialized, modified by the
+ * function only if return value == 0
+ *
+ * Return:
+ *
+ * 0 on success
+ * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
+ */
+int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
+{
+ int cpu;
+
+ if (!cluster_mask)
+ return -EINVAL;
+
+ for_each_online_cpu(cpu)
+ if (socket_id == topology_physical_package_id(cpu)) {
+ cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
*/
-void init_cpu_topology(void)
+void __init init_cpu_topology(void)
{
unsigned int cpu;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index f7945218b8c..b0179b89a04 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
#endif
instr = *(u32 *) pc;
} else if (thumb_mode(regs)) {
- get_user(instr, (u16 __user *)pc);
+ if (get_user(instr, (u16 __user *)pc))
+ goto die_sig;
if (is_wide_instruction(instr)) {
unsigned int instr2;
- get_user(instr2, (u16 __user *)pc+1);
+ if (get_user(instr2, (u16 __user *)pc+1))
+ goto die_sig;
instr <<= 16;
instr |= instr2;
}
- } else {
- get_user(instr, (u32 __user *)pc);
+ } else if (get_user(instr, (u32 __user *)pc)) {
+ goto die_sig;
}
if (call_undef_hook(regs, instr) == 0)
return;
+die_sig:
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_UNDEFINED) {
printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 2473fd1fd51..af72969820b 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -16,13 +16,30 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
call_with_stack.o
mmu-y := clear_user.o copy_page.o getuser.o putuser.o
-mmu-y += copy_from_user.o copy_to_user.o
+
+# the code in uaccess.S is not preemption safe and
+# probably faster on ARMv3 only
+ifeq ($(CONFIG_PREEMPT),y)
+ mmu-y += copy_from_user.o copy_to_user.o
+else
+ifneq ($(CONFIG_CPU_32v3),y)
+ mmu-y += copy_from_user.o copy_to_user.o
+else
+ mmu-y += uaccess.o
+endif
+endif
# using lib_ here won't override already available weak symbols
obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o
-lib-$(CONFIG_MMU) += $(mmu-y)
-lib-y += io-readsw-armv4.o io-writesw-armv4.o
+lib-$(CONFIG_MMU) += $(mmu-y)
+
+ifeq ($(CONFIG_CPU_32v3),y)
+ lib-y += io-readsw-armv3.o io-writesw-armv3.o
+else
+ lib-y += io-readsw-armv4.o io-writesw-armv4.o
+endif
+
lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o
lib-$(CONFIG_ARCH_SHARK) += io-shark.o
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index d6dacc69254..395d5fbb8fa 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq)
{
pr_info("Switching to timer-based delay loop\n");
lpj_fine = freq / HZ;
+ loops_per_jiffy = lpj_fine;
arm_delay_ops.delay = __timer_delay;
arm_delay_ops.const_udelay = __timer_const_udelay;
arm_delay_ops.udelay = __timer_udelay;
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 11093a7c3e3..9b06bb41fca 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -16,8 +16,9 @@
* __get_user_X
*
* Inputs: r0 contains the address
+ * r1 contains the address limit, which must be preserved
* Outputs: r0 is the error code
- * r2, r3 contains the zero-extended value
+ * r2 contains the zero-extended value
* lr corrupted
*
* No other registers must be altered. (see <asm/uaccess.h>
@@ -27,33 +28,39 @@
* Note also that it is intended that __get_user_bad is not global.
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
ENTRY(__get_user_1)
+ check_uaccess r0, 1, r1, r2, __get_user_bad
1: TUSER(ldrb) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__get_user_1)
ENTRY(__get_user_2)
-#ifdef CONFIG_THUMB2_KERNEL
-2: TUSER(ldrb) r2, [r0]
-3: TUSER(ldrb) r3, [r0, #1]
+ check_uaccess r0, 2, r1, r2, __get_user_bad
+#ifdef CONFIG_CPU_USE_DOMAINS
+rb .req ip
+2: ldrbt r2, [r0], #1
+3: ldrbt rb, [r0], #0
#else
-2: TUSER(ldrb) r2, [r0], #1
-3: TUSER(ldrb) r3, [r0]
+rb .req r0
+2: ldrb r2, [r0]
+3: ldrb rb, [r0, #1]
#endif
#ifndef __ARMEB__
- orr r2, r2, r3, lsl #8
+ orr r2, r2, rb, lsl #8
#else
- orr r2, r3, r2, lsl #8
+ orr r2, rb, r2, lsl #8
#endif
mov r0, #0
mov pc, lr
ENDPROC(__get_user_2)
ENTRY(__get_user_4)
+ check_uaccess r0, 4, r1, r2, __get_user_bad
4: TUSER(ldr) r2, [r0]
mov r0, #0
mov pc, lr
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
new file mode 100644
index 00000000000..88487c8c4f2
--- /dev/null
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -0,0 +1,106 @@
+/*
+ * linux/arch/arm/lib/io-readsw-armv3.S
+ *
+ * Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+.Linsw_bad_alignment:
+ adr r0, .Linsw_bad_align_msg
+ mov r2, lr
+ b panic
+.Linsw_bad_align_msg:
+ .asciz "insw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
+ .align
+
+.Linsw_align: tst r1, #1
+ bne .Linsw_bad_alignment
+
+ ldr r3, [r0]
+ strb r3, [r1], #1
+ mov r3, r3, lsr #8
+ strb r3, [r1], #1
+
+ subs r2, r2, #1
+ moveq pc, lr
+
+ENTRY(__raw_readsw)
+ teq r2, #0 @ do we have to check for the zero len?
+ moveq pc, lr
+ tst r1, #3
+ bne .Linsw_align
+
+.Linsw_aligned: mov ip, #0xff
+ orr ip, ip, ip, lsl #8
+ stmfd sp!, {r4, r5, r6, lr}
+
+ subs r2, r2, #8
+ bmi .Lno_insw_8
+
+.Linsw_8_lp: ldr r3, [r0]
+ and r3, r3, ip
+ ldr r4, [r0]
+ orr r3, r3, r4, lsl #16
+
+ ldr r4, [r0]
+ and r4, r4, ip
+ ldr r5, [r0]
+ orr r4, r4, r5, lsl #16
+
+ ldr r5, [r0]
+ and r5, r5, ip
+ ldr r6, [r0]
+ orr r5, r5, r6, lsl #16
+
+ ldr r6, [r0]
+ and r6, r6, ip
+ ldr lr, [r0]
+ orr r6, r6, lr, lsl #16
+
+ stmia r1!, {r3 - r6}
+
+ subs r2, r2, #8
+ bpl .Linsw_8_lp
+
+ tst r2, #7
+ ldmeqfd sp!, {r4, r5, r6, pc}
+
+.Lno_insw_8: tst r2, #4
+ beq .Lno_insw_4
+
+ ldr r3, [r0]
+ and r3, r3, ip
+ ldr r4, [r0]
+ orr r3, r3, r4, lsl #16
+
+ ldr r4, [r0]
+ and r4, r4, ip
+ ldr r5, [r0]
+ orr r4, r4, r5, lsl #16
+
+ stmia r1!, {r3, r4}
+
+.Lno_insw_4: tst r2, #2
+ beq .Lno_insw_2
+
+ ldr r3, [r0]
+ and r3, r3, ip
+ ldr r4, [r0]
+ orr r3, r3, r4, lsl #16
+
+ str r3, [r1], #4
+
+.Lno_insw_2: tst r2, #1
+ ldrne r3, [r0]
+ strneb r3, [r1], #1
+ movne r3, r3, lsr #8
+ strneb r3, [r1]
+
+ ldmfd sp!, {r4, r5, r6, pc}
+
+
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
new file mode 100644
index 00000000000..49b800419e3
--- /dev/null
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -0,0 +1,126 @@
+/*
+ * linux/arch/arm/lib/io-writesw-armv3.S
+ *
+ * Copyright (C) 1995-2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+.Loutsw_bad_alignment:
+ adr r0, .Loutsw_bad_align_msg
+ mov r2, lr
+ b panic
+.Loutsw_bad_align_msg:
+ .asciz "outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n"
+ .align
+
+.Loutsw_align: tst r1, #1
+ bne .Loutsw_bad_alignment
+
+ add r1, r1, #2
+
+ ldr r3, [r1, #-4]
+ mov r3, r3, lsr #16
+ orr r3, r3, r3, lsl #16
+ str r3, [r0]
+ subs r2, r2, #1
+ moveq pc, lr
+
+ENTRY(__raw_writesw)
+ teq r2, #0 @ do we have to check for the zero len?
+ moveq pc, lr
+ tst r1, #3
+ bne .Loutsw_align
+
+ stmfd sp!, {r4, r5, r6, lr}
+
+ subs r2, r2, #8
+ bmi .Lno_outsw_8
+
+.Loutsw_8_lp: ldmia r1!, {r3, r4, r5, r6}
+
+ mov ip, r3, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r3, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ mov ip, r4, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r4, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ mov ip, r5, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r5, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ mov ip, r6, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r6, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ subs r2, r2, #8
+ bpl .Loutsw_8_lp
+
+ tst r2, #7
+ ldmeqfd sp!, {r4, r5, r6, pc}
+
+.Lno_outsw_8: tst r2, #4
+ beq .Lno_outsw_4
+
+ ldmia r1!, {r3, r4}
+
+ mov ip, r3, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r3, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+ mov ip, r4, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r4, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+.Lno_outsw_4: tst r2, #2
+ beq .Lno_outsw_2
+
+ ldr r3, [r1], #4
+
+ mov ip, r3, lsl #16
+ orr ip, ip, ip, lsr #16
+ str ip, [r0]
+
+ mov ip, r3, lsr #16
+ orr ip, ip, ip, lsl #16
+ str ip, [r0]
+
+.Lno_outsw_2: tst r2, #1
+
+ ldrne r3, [r1]
+
+ movne ip, r3, lsl #16
+ orrne ip, ip, ip, lsr #16
+ strne ip, [r0]
+
+ ldmfd sp!, {r4, r5, r6, pc}
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 7db25990c58..3d73dcb959b 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -16,6 +16,7 @@
* __put_user_X
*
* Inputs: r0 contains the address
+ * r1 contains the address limit, which must be preserved
* r2, r3 contains the value
* Outputs: r0 is the error code
* lr corrupted
@@ -27,16 +28,19 @@
* Note also that it is intended that __put_user_bad is not global.
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
ENTRY(__put_user_1)
+ check_uaccess r0, 1, r1, ip, __put_user_bad
1: TUSER(strb) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__put_user_1)
ENTRY(__put_user_2)
+ check_uaccess r0, 2, r1, ip, __put_user_bad
mov ip, r2, lsr #8
#ifdef CONFIG_THUMB2_KERNEL
#ifndef __ARMEB__
@@ -60,12 +64,14 @@ ENTRY(__put_user_2)
ENDPROC(__put_user_2)
ENTRY(__put_user_4)
+ check_uaccess r0, 4, r1, ip, __put_user_bad
4: TUSER(str) r2, [r0]
mov r0, #0
mov pc, lr
ENDPROC(__put_user_4)
ENTRY(__put_user_8)
+ check_uaccess r0, 8, r1, ip, __put_user_bad
#ifdef CONFIG_THUMB2_KERNEL
5: TUSER(str) r2, [r0]
6: TUSER(str) r3, [r0, #4]
diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
new file mode 100644
index 00000000000..5c908b1cb8e
--- /dev/null
+++ b/arch/arm/lib/uaccess.S
@@ -0,0 +1,564 @@
+/*
+ * linux/arch/arm/lib/uaccess.S
+ *
+ * Copyright (C) 1995, 1996,1997,1998 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Routines to block copy data to/from user memory
+ * These are highly optimised both for the 4k page size
+ * and for various alignments.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+#include <asm/domain.h>
+
+ .text
+
+#define PAGE_SHIFT 12
+
+/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
+ * Purpose : copy a block to user memory from kernel memory
+ * Params : to - user memory
+ * : from - kernel memory
+ * : n - number of bytes to copy
+ * Returns : Number of bytes NOT copied.
+ */
+
+.Lc2u_dest_not_aligned:
+ rsb ip, ip, #4
+ cmp ip, #2
+ ldrb r3, [r1], #1
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ ldrgeb r3, [r1], #1
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #1
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ sub r2, r2, ip
+ b .Lc2u_dest_aligned
+
+ENTRY(__copy_to_user)
+ stmfd sp!, {r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lc2u_not_enough
+ ands ip, r0, #3
+ bne .Lc2u_dest_not_aligned
+.Lc2u_dest_aligned:
+
+ ands ip, r1, #3
+ bne .Lc2u_src_not_aligned
+/*
+ * Seeing as there has to be at least 8 bytes to copy, we can
+ * copy one word, and force a user-mode page fault...
+ */
+
+.Lc2u_0fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lc2u_0nowords
+ ldr r3, [r1], #4
+USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lc2u_0fupi
+/*
+ * ip = max no. of bytes to copy before needing another "strt" insn
+ */
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #32
+ blt .Lc2u_0rem8lp
+
+.Lc2u_0cpy8lp: ldmia r1!, {r3 - r6}
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ ldmia r1!, {r3 - r6}
+ subs ip, ip, #32
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ bpl .Lc2u_0cpy8lp
+
+.Lc2u_0rem8lp: cmn ip, #16
+ ldmgeia r1!, {r3 - r6}
+ stmgeia r0!, {r3 - r6} @ Shouldnt fault
+ tst ip, #8
+ ldmneia r1!, {r3 - r4}
+ stmneia r0!, {r3 - r4} @ Shouldnt fault
+ tst ip, #4
+ ldrne r3, [r1], #4
+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_0fupi
+.Lc2u_0nowords: teq ip, #0
+ beq .Lc2u_finished
+.Lc2u_nowords: cmp ip, #2
+ ldrb r3, [r1], #1
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ ldrgeb r3, [r1], #1
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #1
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+
+.Lc2u_not_enough:
+ movs ip, r2
+ bne .Lc2u_nowords
+.Lc2u_finished: mov r0, #0
+ ldmfd sp!, {r2, r4 - r7, pc}
+
+.Lc2u_src_not_aligned:
+ bic r1, r1, #3
+ ldr r7, [r1], #4
+ cmp ip, #2
+ bgt .Lc2u_3fupi
+ beq .Lc2u_2fupi
+.Lc2u_1fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lc2u_1nowords
+ mov r3, r7, pull #8
+ ldr r7, [r1], #4
+ orr r3, r3, r7, push #24
+USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lc2u_1fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lc2u_1rem8lp
+
+.Lc2u_1cpy8lp: mov r3, r7, pull #8
+ ldmia r1!, {r4 - r7}
+ subs ip, ip, #16
+ orr r3, r3, r4, push #24
+ mov r4, r4, pull #8
+ orr r4, r4, r5, push #24
+ mov r5, r5, pull #8
+ orr r5, r5, r6, push #24
+ mov r6, r6, pull #8
+ orr r6, r6, r7, push #24
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ bpl .Lc2u_1cpy8lp
+
+.Lc2u_1rem8lp: tst ip, #8
+ movne r3, r7, pull #8
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, push #24
+ movne r4, r4, pull #8
+ orrne r4, r4, r7, push #24
+ stmneia r0!, {r3 - r4} @ Shouldnt fault
+ tst ip, #4
+ movne r3, r7, pull #8
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, push #24
+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_1fupi
+.Lc2u_1nowords: mov r3, r7, get_byte_1
+ teq ip, #0
+ beq .Lc2u_finished
+ cmp ip, #2
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ movge r3, r7, get_byte_2
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ movgt r3, r7, get_byte_3
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+
+.Lc2u_2fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lc2u_2nowords
+ mov r3, r7, pull #16
+ ldr r7, [r1], #4
+ orr r3, r3, r7, push #16
+USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lc2u_2fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lc2u_2rem8lp
+
+.Lc2u_2cpy8lp: mov r3, r7, pull #16
+ ldmia r1!, {r4 - r7}
+ subs ip, ip, #16
+ orr r3, r3, r4, push #16
+ mov r4, r4, pull #16
+ orr r4, r4, r5, push #16
+ mov r5, r5, pull #16
+ orr r5, r5, r6, push #16
+ mov r6, r6, pull #16
+ orr r6, r6, r7, push #16
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ bpl .Lc2u_2cpy8lp
+
+.Lc2u_2rem8lp: tst ip, #8
+ movne r3, r7, pull #16
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, push #16
+ movne r4, r4, pull #16
+ orrne r4, r4, r7, push #16
+ stmneia r0!, {r3 - r4} @ Shouldnt fault
+ tst ip, #4
+ movne r3, r7, pull #16
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, push #16
+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_2fupi
+.Lc2u_2nowords: mov r3, r7, get_byte_2
+ teq ip, #0
+ beq .Lc2u_finished
+ cmp ip, #2
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ movge r3, r7, get_byte_3
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #0
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+
+.Lc2u_3fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lc2u_3nowords
+ mov r3, r7, pull #24
+ ldr r7, [r1], #4
+ orr r3, r3, r7, push #8
+USER( TUSER( str) r3, [r0], #4) @ May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lc2u_3fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lc2u_3rem8lp
+
+.Lc2u_3cpy8lp: mov r3, r7, pull #24
+ ldmia r1!, {r4 - r7}
+ subs ip, ip, #16
+ orr r3, r3, r4, push #8
+ mov r4, r4, pull #24
+ orr r4, r4, r5, push #8
+ mov r5, r5, pull #24
+ orr r5, r5, r6, push #8
+ mov r6, r6, pull #24
+ orr r6, r6, r7, push #8
+ stmia r0!, {r3 - r6} @ Shouldnt fault
+ bpl .Lc2u_3cpy8lp
+
+.Lc2u_3rem8lp: tst ip, #8
+ movne r3, r7, pull #24
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, push #8
+ movne r4, r4, pull #24
+ orrne r4, r4, r7, push #8
+ stmneia r0!, {r3 - r4} @ Shouldnt fault
+ tst ip, #4
+ movne r3, r7, pull #24
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, push #8
+ TUSER( strne) r3, [r0], #4 @ Shouldnt fault
+ ands ip, ip, #3
+ beq .Lc2u_3fupi
+.Lc2u_3nowords: mov r3, r7, get_byte_3
+ teq ip, #0
+ beq .Lc2u_finished
+ cmp ip, #2
+USER( TUSER( strb) r3, [r0], #1) @ May fault
+ ldrgeb r3, [r1], #1
+USER( TUSER( strgeb) r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #0
+USER( TUSER( strgtb) r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+ENDPROC(__copy_to_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+9001: ldmfd sp!, {r0, r4 - r7, pc}
+ .popsection
+
+/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
+ * Purpose : copy a block from user memory to kernel memory
+ * Params : to - kernel memory
+ * : from - user memory
+ * : n - number of bytes to copy
+ * Returns : Number of bytes NOT copied.
+ */
+.Lcfu_dest_not_aligned:
+ rsb ip, ip, #4
+ cmp ip, #2
+USER( TUSER( ldrb) r3, [r1], #1) @ May fault
+ strb r3, [r0], #1
+USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
+ strgeb r3, [r0], #1
+USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ sub r2, r2, ip
+ b .Lcfu_dest_aligned
+
+ENTRY(__copy_from_user)
+ stmfd sp!, {r0, r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lcfu_not_enough
+ ands ip, r0, #3
+ bne .Lcfu_dest_not_aligned
+.Lcfu_dest_aligned:
+ ands ip, r1, #3
+ bne .Lcfu_src_not_aligned
+
+/*
+ * Seeing as there has to be at least 8 bytes to copy, we can
+ * copy one word, and force a user-mode page fault...
+ */
+
+.Lcfu_0fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lcfu_0nowords
+USER( TUSER( ldr) r3, [r1], #4)
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lcfu_0fupi
+/*
+ * ip = max no. of bytes to copy before needing another "strt" insn
+ */
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #32
+ blt .Lcfu_0rem8lp
+
+.Lcfu_0cpy8lp: ldmia r1!, {r3 - r6} @ Shouldnt fault
+ stmia r0!, {r3 - r6}
+ ldmia r1!, {r3 - r6} @ Shouldnt fault
+ subs ip, ip, #32
+ stmia r0!, {r3 - r6}
+ bpl .Lcfu_0cpy8lp
+
+.Lcfu_0rem8lp: cmn ip, #16
+ ldmgeia r1!, {r3 - r6} @ Shouldnt fault
+ stmgeia r0!, {r3 - r6}
+ tst ip, #8
+ ldmneia r1!, {r3 - r4} @ Shouldnt fault
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ TUSER( ldrne) r3, [r1], #4 @ Shouldnt fault
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .Lcfu_0fupi
+.Lcfu_0nowords: teq ip, #0
+ beq .Lcfu_finished
+.Lcfu_nowords: cmp ip, #2
+USER( TUSER( ldrb) r3, [r1], #1) @ May fault
+ strb r3, [r0], #1
+USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
+ strgeb r3, [r0], #1
+USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+
+.Lcfu_not_enough:
+ movs ip, r2
+ bne .Lcfu_nowords
+.Lcfu_finished: mov r0, #0
+ add sp, sp, #8
+ ldmfd sp!, {r4 - r7, pc}
+
+.Lcfu_src_not_aligned:
+ bic r1, r1, #3
+USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ cmp ip, #2
+ bgt .Lcfu_3fupi
+ beq .Lcfu_2fupi
+.Lcfu_1fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lcfu_1nowords
+ mov r3, r7, pull #8
+USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ orr r3, r3, r7, push #24
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lcfu_1fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lcfu_1rem8lp
+
+.Lcfu_1cpy8lp: mov r3, r7, pull #8
+ ldmia r1!, {r4 - r7} @ Shouldnt fault
+ subs ip, ip, #16
+ orr r3, r3, r4, push #24
+ mov r4, r4, pull #8
+ orr r4, r4, r5, push #24
+ mov r5, r5, pull #8
+ orr r5, r5, r6, push #24
+ mov r6, r6, pull #8
+ orr r6, r6, r7, push #24
+ stmia r0!, {r3 - r6}
+ bpl .Lcfu_1cpy8lp
+
+.Lcfu_1rem8lp: tst ip, #8
+ movne r3, r7, pull #8
+ ldmneia r1!, {r4, r7} @ Shouldnt fault
+ orrne r3, r3, r4, push #24
+ movne r4, r4, pull #8
+ orrne r4, r4, r7, push #24
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, pull #8
+USER( TUSER( ldrne) r7, [r1], #4) @ May fault
+ orrne r3, r3, r7, push #24
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .Lcfu_1fupi
+.Lcfu_1nowords: mov r3, r7, get_byte_1
+ teq ip, #0
+ beq .Lcfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+ movge r3, r7, get_byte_2
+ strgeb r3, [r0], #1
+ movgt r3, r7, get_byte_3
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+
+.Lcfu_2fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lcfu_2nowords
+ mov r3, r7, pull #16
+USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ orr r3, r3, r7, push #16
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lcfu_2fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lcfu_2rem8lp
+
+
+.Lcfu_2cpy8lp: mov r3, r7, pull #16
+ ldmia r1!, {r4 - r7} @ Shouldnt fault
+ subs ip, ip, #16
+ orr r3, r3, r4, push #16
+ mov r4, r4, pull #16
+ orr r4, r4, r5, push #16
+ mov r5, r5, pull #16
+ orr r5, r5, r6, push #16
+ mov r6, r6, pull #16
+ orr r6, r6, r7, push #16
+ stmia r0!, {r3 - r6}
+ bpl .Lcfu_2cpy8lp
+
+.Lcfu_2rem8lp: tst ip, #8
+ movne r3, r7, pull #16
+ ldmneia r1!, {r4, r7} @ Shouldnt fault
+ orrne r3, r3, r4, push #16
+ movne r4, r4, pull #16
+ orrne r4, r4, r7, push #16
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, pull #16
+USER( TUSER( ldrne) r7, [r1], #4) @ May fault
+ orrne r3, r3, r7, push #16
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .Lcfu_2fupi
+.Lcfu_2nowords: mov r3, r7, get_byte_2
+ teq ip, #0
+ beq .Lcfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+ movge r3, r7, get_byte_3
+ strgeb r3, [r0], #1
+USER( TUSER( ldrgtb) r3, [r1], #0) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+
+.Lcfu_3fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .Lcfu_3nowords
+ mov r3, r7, pull #24
+USER( TUSER( ldr) r7, [r1], #4) @ May fault
+ orr r3, r3, r7, push #8
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .Lcfu_3fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .Lcfu_3rem8lp
+
+.Lcfu_3cpy8lp: mov r3, r7, pull #24
+ ldmia r1!, {r4 - r7} @ Shouldnt fault
+ orr r3, r3, r4, push #8
+ mov r4, r4, pull #24
+ orr r4, r4, r5, push #8
+ mov r5, r5, pull #24
+ orr r5, r5, r6, push #8
+ mov r6, r6, pull #24
+ orr r6, r6, r7, push #8
+ stmia r0!, {r3 - r6}
+ subs ip, ip, #16
+ bpl .Lcfu_3cpy8lp
+
+.Lcfu_3rem8lp: tst ip, #8
+ movne r3, r7, pull #24
+ ldmneia r1!, {r4, r7} @ Shouldnt fault
+ orrne r3, r3, r4, push #8
+ movne r4, r4, pull #24
+ orrne r4, r4, r7, push #8
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, pull #24
+USER( TUSER( ldrne) r7, [r1], #4) @ May fault
+ orrne r3, r3, r7, push #8
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .Lcfu_3fupi
+.Lcfu_3nowords: mov r3, r7, get_byte_3
+ teq ip, #0
+ beq .Lcfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
+ strgeb r3, [r0], #1
+USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+ENDPROC(__copy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+ /*
+ * We took an exception. r0 contains a pointer to
+ * the byte not copied.
+ */
+9001: ldr r2, [sp], #4 @ void *to
+ sub r2, r0, r2 @ bytes copied
+ ldr r1, [sp], #4 @ unsigned long count
+ subs r4, r1, r2 @ bytes left to copy
+ movne r1, r4
+ blne __memzero
+ mov r0, r4
+ ldmfd sp!, {r4 - r7, pc}
+ .popsection
+
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 104ca40d8d1..aaa443b48c9 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -197,7 +197,7 @@ void __init at91rm9200_timer_init(void)
at91_st_read(AT91_ST_SR);
/* Make IRQs happen for the system timer */
- setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
+ setup_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
/* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
* directly for the clocksource and all clockevents, after adjusting
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 7b9c2ba396e..bce572a530e 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -726,6 +726,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
},
};
@@ -744,10 +746,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9260_rtt_device.num_resources = 2;
+ at91sam9260_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9260_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 8df5c1bdff9..bc2590d712d 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -609,6 +609,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -626,10 +628,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9261_rtt_device.num_resources = 2;
+ at91sam9261_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9261_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index eb6bbf86fb9..9b6ca734f1a 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -990,6 +990,8 @@ static struct resource rtt0_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -1006,6 +1008,8 @@ static struct resource rtt1_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -1027,14 +1031,14 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed only for the chosen RTT:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9263_rtt0_device.num_resources = 2;
+ at91sam9263_rtt0_device.num_resources = 3;
at91sam9263_rtt1_device.num_resources = 1;
pdev = &at91sam9263_rtt0_device;
r = rtt0_resources;
break;
case 1:
at91sam9263_rtt0_device.num_resources = 1;
- at91sam9263_rtt1_device.num_resources = 2;
+ at91sam9263_rtt1_device.num_resources = 3;
pdev = &at91sam9263_rtt1_device;
r = rtt1_resources;
break;
@@ -1047,6 +1051,8 @@ static void __init at91_add_device_rtt_rtc(void)
pdev->name = "rtc-at91sam9";
r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
r[1].end = r[1].start + 3;
+ r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 06073996a38..1b47319ca00 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -1293,6 +1293,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -1310,10 +1312,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9g45_rtt_device.num_resources = 2;
+ at91sam9g45_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9G45_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index f09fff93217..b3d365dadef 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -688,6 +688,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -705,10 +707,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9rl_rtt_device.num_resources = 2;
+ at91sam9rl_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9RL_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index de2ec6b8fea..188c82971eb 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -63,6 +63,12 @@ EXPORT_SYMBOL_GPL(at91_pmc_base);
#define cpu_has_300M_plla() (cpu_is_at91sam9g10())
+#define cpu_has_240M_plla() (cpu_is_at91sam9261() \
+ || cpu_is_at91sam9263() \
+ || cpu_is_at91sam9rl())
+
+#define cpu_has_210M_plla() (cpu_is_at91sam9260())
+
#define cpu_has_pllb() (!(cpu_is_at91sam9rl() \
|| cpu_is_at91sam9g45() \
|| cpu_is_at91sam9x5() \
@@ -706,6 +712,12 @@ static int __init at91_pmc_init(unsigned long main_clock)
} else if (cpu_has_800M_plla()) {
if (plla.rate_hz > 800000000)
pll_overclock = true;
+ } else if (cpu_has_240M_plla()) {
+ if (plla.rate_hz > 240000000)
+ pll_overclock = true;
+ } else if (cpu_has_210M_plla()) {
+ if (plla.rate_hz > 210000000)
+ pll_overclock = true;
} else {
if (plla.rate_hz > 209000000)
pll_overclock = true;
diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c
index 45c97b1ee9b..76e79532356 100644
--- a/arch/arm/mach-bcmring/arch.c
+++ b/arch/arm/mach-bcmring/arch.c
@@ -29,7 +29,6 @@
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/mach/time.h>
-#include <asm/pmu.h>
#include <asm/mach/arch.h>
#include <mach/dma.h>
@@ -116,7 +115,7 @@ static struct resource pmu_resource = {
static struct platform_device pmu_device = {
.name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
+ .id = -1,
.resource = &pmu_resource,
.num_resources = 1,
};
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 5de69f2fcca..f6b9fc70161 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -162,38 +162,6 @@ static void __init davinci_ntosd2_map_io(void)
dm644x_init();
}
-/*
- I2C initialization
-*/
-static struct davinci_i2c_platform_data ntosd2_i2c_pdata = {
- .bus_freq = 20 /* kHz */,
- .bus_delay = 100 /* usec */,
-};
-
-static struct i2c_board_info __initdata ntosd2_i2c_info[] = {
-};
-
-static int ntosd2_init_i2c(void)
-{
- int status;
-
- davinci_init_i2c(&ntosd2_i2c_pdata);
- status = gpio_request(NTOSD2_MSP430_IRQ, ntosd2_i2c_info[0].type);
- if (status == 0) {
- status = gpio_direction_input(NTOSD2_MSP430_IRQ);
- if (status == 0) {
- status = gpio_to_irq(NTOSD2_MSP430_IRQ);
- if (status > 0) {
- ntosd2_i2c_info[0].irq = status;
- i2c_register_board_info(1,
- ntosd2_i2c_info,
- ARRAY_SIZE(ntosd2_i2c_info));
- }
- }
- }
- return status;
-}
-
static struct davinci_mmc_config davinci_ntosd2_mmc_config = {
.wires = 4,
.version = MMC_CTLR_VERSION_1
@@ -218,7 +186,6 @@ static __init void davinci_ntosd2_init(void)
{
struct clk *aemif_clk;
struct davinci_soc_info *soc_info = &davinci_soc_info;
- int status;
aemif_clk = clk_get(NULL, "aemif");
clk_enable(aemif_clk);
@@ -242,12 +209,6 @@ static __init void davinci_ntosd2_init(void)
platform_add_devices(davinci_ntosd2_devices,
ARRAY_SIZE(davinci_ntosd2_devices));
- /* Initialize I2C interface specific for this board */
- status = ntosd2_init_i2c();
- if (status < 0)
- pr_warning("davinci_ntosd2_init: msp430 irq setup failed:"
- " %d\n", status);
-
davinci_serial_init(&uart_config);
dm644x_init_asp(&dm644x_ntosd2_snd_data);
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 4db5de54b6a..6321567d8ea 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -102,7 +102,8 @@ void __init dove_ehci1_init(void)
void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data, DOVE_GE00_PHYS_BASE,
- IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR);
+ IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR,
+ 1600);
}
/*****************************************************************************
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
index 5ca80307d6d..4e574c24581 100644
--- a/arch/arm/mach-exynos/mach-origen.c
+++ b/arch/arm/mach-exynos/mach-origen.c
@@ -42,6 +42,7 @@
#include <plat/backlight.h>
#include <plat/fb.h>
#include <plat/mfc.h>
+#include <plat/hdmi.h>
#include <mach/ohci.h>
#include <mach/map.h>
@@ -734,6 +735,11 @@ static void __init origen_bt_setup(void)
s3c_gpio_setpull(EXYNOS4_GPX2(2), S3C_GPIO_PULL_NONE);
}
+/* I2C module and id for HDMIPHY */
+static struct i2c_board_info hdmiphy_info = {
+ I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38),
+};
+
static void s5p_tv_setup(void)
{
/* Direct HPD to HDMI chip */
@@ -781,6 +787,7 @@ static void __init origen_machine_init(void)
s5p_tv_setup();
s5p_i2c_hdmiphy_set_platdata(NULL);
+ s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);
#ifdef CONFIG_DRM_EXYNOS
s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata;
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
index 3cfa688d274..73f2bce097e 100644
--- a/arch/arm/mach-exynos/mach-smdkv310.c
+++ b/arch/arm/mach-exynos/mach-smdkv310.c
@@ -40,6 +40,7 @@
#include <plat/mfc.h>
#include <plat/ehci.h>
#include <plat/clock.h>
+#include <plat/hdmi.h>
#include <mach/map.h>
#include <mach/ohci.h>
@@ -354,6 +355,11 @@ static struct platform_pwm_backlight_data smdkv310_bl_data = {
.pwm_period_ns = 1000,
};
+/* I2C module and id for HDMIPHY */
+static struct i2c_board_info hdmiphy_info = {
+ I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38),
+};
+
static void s5p_tv_setup(void)
{
/* direct HPD to HDMI chip */
@@ -388,6 +394,7 @@ static void __init smdkv310_machine_init(void)
s5p_tv_setup();
s5p_i2c_hdmiphy_set_platdata(NULL);
+ s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);
samsung_keypad_set_platdata(&smdkv310_keypad_data);
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 373c3c00d24..c0bc83a7663 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -115,7 +115,7 @@ static __init int exynos_pm_dt_parse_domains(void)
}
#endif /* CONFIG_OF */
-static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
+static __init __maybe_unused void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
struct exynos_pm_domain *pd)
{
if (pdev->dev.bus) {
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c
index ca70e5fcc7a..020852d3bdd 100644
--- a/arch/arm/mach-gemini/irq.c
+++ b/arch/arm/mach-gemini/irq.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
+#include <asm/system_misc.h>
#include <mach/hardware.h>
#define IRQ_SOURCE(base_addr) (base_addr + 0x00)
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 07f7c226e4c..d004d37ad9d 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -9,7 +9,8 @@ obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o
obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o
-obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+imx5-pm-$(CONFIG_PM) += pm-imx5.o
+obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o $(imx5-pm-y) cpu_op-mx51.o
obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \
clk-pfd.o clk-busy.o
@@ -70,14 +71,13 @@ obj-$(CONFIG_DEBUG_LL) += lluart.o
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o
obj-$(CONFIG_HAVE_IMX_SRC) += src.o
-obj-$(CONFIG_CPU_V7) += head-v7.o
-AFLAGS_head-v7.o :=-Wa,-march=armv7-a
-obj-$(CONFIG_SMP) += platsmp.o
+AFLAGS_headsmp.o :=-Wa,-march=armv7-a
+obj-$(CONFIG_SMP) += headsmp.o platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
ifeq ($(CONFIG_PM),y)
-obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
+obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o
endif
# i.MX5 based machines
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c
index fdd8cc87c9f..d20d4795f4e 100644
--- a/arch/arm/mach-imx/clk-imx25.c
+++ b/arch/arm/mach-imx/clk-imx25.c
@@ -222,10 +222,8 @@ int __init mx25_clocks_init(void)
clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
- clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0");
- clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0");
- clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
- clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
+ clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1");
clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
@@ -243,6 +241,6 @@ int __init mx25_clocks_init(void)
clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
clk_register_clkdev(clk[iim_ipg], "iim", NULL);
- mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
+ mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), MX25_INT_GPT1);
return 0;
}
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
index 7aa6313fb16..f69ca468004 100644
--- a/arch/arm/mach-imx/clk-imx27.c
+++ b/arch/arm/mach-imx/clk-imx27.c
@@ -223,7 +223,7 @@ int __init mx27_clocks_init(unsigned long fref)
clk_register_clkdev(clk[per3_gate], "per", "imx-fb.0");
clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0");
clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx-fb.0");
- clk_register_clkdev(clk[csi_ahb_gate], NULL, "mx2-camera.0");
+ clk_register_clkdev(clk[csi_ahb_gate], "ahb", "mx2-camera.0");
clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc");
clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc");
@@ -250,8 +250,10 @@ int __init mx27_clocks_init(unsigned long fref)
clk_register_clkdev(clk[i2c2_ipg_gate], NULL, "imx-i2c.1");
clk_register_clkdev(clk[owire_ipg_gate], NULL, "mxc_w1.0");
clk_register_clkdev(clk[kpp_ipg_gate], NULL, "imx-keypad");
- clk_register_clkdev(clk[emma_ahb_gate], "ahb", "imx-emma");
- clk_register_clkdev(clk[emma_ipg_gate], "ipg", "imx-emma");
+ clk_register_clkdev(clk[emma_ahb_gate], "emma-ahb", "mx2-camera.0");
+ clk_register_clkdev(clk[emma_ipg_gate], "emma-ipg", "mx2-camera.0");
+ clk_register_clkdev(clk[emma_ahb_gate], "ahb", "m2m-emmaprp.0");
+ clk_register_clkdev(clk[emma_ipg_gate], "ipg", "m2m-emmaprp.0");
clk_register_clkdev(clk[iim_ipg_gate], "iim", NULL);
clk_register_clkdev(clk[gpio_ipg_gate], "gpio", NULL);
clk_register_clkdev(clk[brom_ahb_gate], "brom", NULL);
diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c
index 8e19e70f90f..1253af2d997 100644
--- a/arch/arm/mach-imx/clk-imx31.c
+++ b/arch/arm/mach-imx/clk-imx31.c
@@ -130,7 +130,7 @@ int __init mx31_clocks_init(unsigned long fref)
clk_register_clkdev(clk[nfc], NULL, "mxc_nand.0");
clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
- clk_register_clkdev(clk[kpp_gate], "kpp", NULL);
+ clk_register_clkdev(clk[kpp_gate], NULL, "imx-keypad");
clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.0");
clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.0");
clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index c6422fb10ba..65fb8bcd86c 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -230,10 +230,8 @@ int __init mx35_clocks_init()
clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
- clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0");
- clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0");
- clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
- clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
+ clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
/* i.mx35 has the i.mx21 type uart */
clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
index f6086693ebd..4bdcaa97bd9 100644
--- a/arch/arm/mach-imx/clk-imx51-imx53.c
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -303,6 +303,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
clk_prepare_enable(clk[aips_tz2]); /* fec */
clk_prepare_enable(clk[spba]);
clk_prepare_enable(clk[emi_fast_gate]); /* fec */
+ clk_prepare_enable(clk[emi_slow_gate]); /* eim */
clk_prepare_enable(clk[tmax1]);
clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */
clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index ea89520b6e2..4233d9e3531 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -152,7 +152,7 @@ enum mx6q_clks {
ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
- ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2,
+ ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, ldb_di0_div_3_5, ldb_di1_div_3_5,
clk_max
};
@@ -288,8 +288,10 @@ int __init mx6q_clocks_init(void)
clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
- clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_sel", base + 0x20, 10, 1);
- clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_sel", base + 0x20, 11, 1);
+ clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+ clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1);
+ clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+ clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1);
clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3);
diff --git a/arch/arm/mach-imx/head-v7.S b/arch/arm/mach-imx/headsmp.S
index 7e49deb128a..7e49deb128a 100644
--- a/arch/arm/mach-imx/head-v7.S
+++ b/arch/arm/mach-imx/headsmp.S
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 20ed2d56c1a..f8f7437c83b 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -42,22 +42,6 @@ static inline void cpu_enter_lowpower(void)
: "cc");
}
-static inline void cpu_leave_lowpower(void)
-{
- unsigned int v;
-
- asm volatile(
- "mrc p15, 0, %0, c1, c0, 0\n"
- " orr %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- " mrc p15, 0, %0, c1, c0, 1\n"
- " orr %0, %0, %2\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- : "=&r" (v)
- : "Ir" (CR_C), "Ir" (0x40)
- : "cc");
-}
-
/*
* platform-specific code to shutdown a CPU
*
@@ -67,11 +51,10 @@ void platform_cpu_die(unsigned int cpu)
{
cpu_enter_lowpower();
imx_enable_cpu(cpu, false);
- cpu_do_idle();
- cpu_leave_lowpower();
- /* We should never return from idle */
- panic("cpu %d unexpectedly exit from shutdown\n", cpu);
+ /* spin here until hardware takes it down */
+ while (1)
+ ;
}
int platform_cpu_disable(unsigned int cpu)
diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c
index 2c6ab3273f9..5985ed1b8c9 100644
--- a/arch/arm/mach-imx/mach-armadillo5x0.c
+++ b/arch/arm/mach-imx/mach-armadillo5x0.c
@@ -526,7 +526,8 @@ static void __init armadillo5x0_init(void)
imx31_add_mxc_nand(&armadillo5x0_nand_board_info);
/* set NAND page size to 2k if not configured via boot mode pins */
- __raw_writel(__raw_readl(MXC_CCM_RCSR) | (1 << 30), MXC_CCM_RCSR);
+ __raw_writel(__raw_readl(mx3_ccm_base + MXC_CCM_RCSR) |
+ (1 << 30), mx3_ccm_base + MXC_CCM_RCSR);
/* RTC */
/* Get RTC IRQ and register the chip */
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 5ec0608f2a7..045b3f6a387 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -71,7 +71,7 @@ soft:
/* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
static int ksz9021rn_phy_fixup(struct phy_device *phydev)
{
- if (IS_ENABLED(CONFIG_PHYLIB)) {
+ if (IS_BUILTIN(CONFIG_PHYLIB)) {
/* min rx data delay */
phy_write(phydev, 0x0b, 0x8105);
phy_write(phydev, 0x0c, 0x0000);
@@ -112,7 +112,7 @@ put_clk:
static void __init imx6q_sabrelite_init(void)
{
- if (IS_ENABLED(CONFIG_PHYLIB))
+ if (IS_BUILTIN(CONFIG_PHYLIB))
phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
ksz9021rn_phy_fixup);
imx6q_sabrelite_cko1_setup();
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index ebf680bebdf..3fa6c51390d 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 7b1055c8e0b..3b2267529f5 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -456,7 +456,7 @@ static void __init ap_init_timer(void)
clk = clk_get_sys("ap_timer", NULL);
BUG_ON(IS_ERR(clk));
- clk_enable(clk);
+ clk_prepare_enable(clk);
rate = clk_get_rate(clk);
writel(0, TIMER0_VA_BASE + TIMER_CTRL);
diff --git a/arch/arm/mach-kirkwood/Makefile.boot b/arch/arm/mach-kirkwood/Makefile.boot
index 2a576abf409..a13299d758e 100644
--- a/arch/arm/mach-kirkwood/Makefile.boot
+++ b/arch/arm/mach-kirkwood/Makefile.boot
@@ -7,7 +7,8 @@ dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns320.dtb
dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns325.dtb
dtb-$(CONFIG_MACH_ICONNECT_DT) += kirkwood-iconnect.dtb
dtb-$(CONFIG_MACH_IB62X0_DT) += kirkwood-ib62x0.dtb
-dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-qnap-ts219.dtb
+dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-ts219-6281.dtb
+dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-ts219-6282.dtb
dtb-$(CONFIG_MACH_GOFLEXNET_DT) += kirkwood-goflexnet.dtb
-dbt-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb
-dbt-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb
+dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb
+dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index c4b64adcbfc..1201191d7f1 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -301,7 +301,7 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
- IRQ_KIRKWOOD_GE00_ERR);
+ IRQ_KIRKWOOD_GE00_ERR, 1600);
/* The interface forgets the MAC address assigned by u-boot if
the clock is turned off, so claim the clk now. */
clk_prepare_enable(ge0);
@@ -315,7 +315,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge01_init(eth_data,
GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
- IRQ_KIRKWOOD_GE01_ERR);
+ IRQ_KIRKWOOD_GE01_ERR, 1600);
clk_prepare_enable(ge1);
}
@@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void)
void __init kirkwood_init_early(void)
{
orion_time_set_base(TIMER_VIRT_BASE);
+
+ /*
+ * Some Kirkwood devices allocate their coherent buffers from atomic
+ * context. Increase size of atomic coherent pool to make sure such
+ * the allocations won't fail.
+ */
+ init_dma_coherent_pool_size(SZ_1M);
}
int kirkwood_tclk;
diff --git a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
index d9335937959..be90b7d0e10 100644
--- a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
+++ b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/sizes.h>
#include <linux/platform_device.h>
#include <linux/mtd/partitions.h>
#include <linux/ata_platform.h>
diff --git a/arch/arm/mach-mmp/sram.c b/arch/arm/mach-mmp/sram.c
index 4304f951937..7e8a5a2e1ec 100644
--- a/arch/arm/mach-mmp/sram.c
+++ b/arch/arm/mach-mmp/sram.c
@@ -68,7 +68,7 @@ static int __devinit sram_probe(struct platform_device *pdev)
struct resource *res;
int ret = 0;
- if (!pdata && !pdata->pool_name)
+ if (!pdata || !pdata->pool_name)
return -ENODEV;
info = kzalloc(sizeof(*info), GFP_KERNEL);
diff --git a/arch/arm/mach-mv78xx0/addr-map.c b/arch/arm/mach-mv78xx0/addr-map.c
index 62b53d710ef..a9bc84180d2 100644
--- a/arch/arm/mach-mv78xx0/addr-map.c
+++ b/arch/arm/mach-mv78xx0/addr-map.c
@@ -37,7 +37,7 @@
#define WIN0_OFF(n) (BRIDGE_VIRT_BASE + 0x0000 + ((n) << 4))
#define WIN8_OFF(n) (BRIDGE_VIRT_BASE + 0x0900 + (((n) - 8) << 4))
-static void __init __iomem *win_cfg_base(int win)
+static void __init __iomem *win_cfg_base(const struct orion_addr_map_cfg *cfg, int win)
{
/*
* Find the control register base address for this window.
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index b4c53b846c9..3057f7d4329 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -213,7 +213,8 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
- IRQ_MV78XX0_GE_ERR);
+ IRQ_MV78XX0_GE_ERR,
+ MV643XX_TX_CSUM_DEFAULT_LIMIT);
}
@@ -224,7 +225,8 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge01_init(eth_data,
GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
- NO_IRQ);
+ NO_IRQ,
+ MV643XX_TX_CSUM_DEFAULT_LIMIT);
}
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index ccdf83b17cf..9a8bbda195b 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -2,9 +2,6 @@ if ARCH_MXS
source "arch/arm/mach-mxs/devices/Kconfig"
-config MXS_OCOTP
- bool
-
config SOC_IMX23
bool
select ARM_AMBA
@@ -66,7 +63,6 @@ config MACH_MX28EVK
select MXS_HAVE_PLATFORM_MXS_SAIF
select MXS_HAVE_PLATFORM_MXS_I2C
select MXS_HAVE_PLATFORM_RTC_STMP3XXX
- select MXS_OCOTP
help
Include support for MX28EVK platform. This includes specific
configurations for the board and its peripherals.
@@ -94,7 +90,6 @@ config MODULE_M28
select MXS_HAVE_PLATFORM_MXS_I2C
select MXS_HAVE_PLATFORM_MXS_MMC
select MXS_HAVE_PLATFORM_MXSFB
- select MXS_OCOTP
config MODULE_APX4
bool
@@ -106,7 +101,6 @@ config MODULE_APX4
select MXS_HAVE_PLATFORM_MXS_I2C
select MXS_HAVE_PLATFORM_MXS_MMC
select MXS_HAVE_PLATFORM_MXS_SAIF
- select MXS_OCOTP
config MACH_TX28
bool "Ka-Ro TX28 module"
diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile
index e41590ccb43..fed3695a133 100644
--- a/arch/arm/mach-mxs/Makefile
+++ b/arch/arm/mach-mxs/Makefile
@@ -1,7 +1,6 @@
# Common support
-obj-y := devices.o icoll.o iomux.o system.o timer.o mm.o
+obj-y := devices.o icoll.o iomux.o ocotp.o system.o timer.o mm.o
-obj-$(CONFIG_MXS_OCOTP) += ocotp.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_MACH_MXS_DT) += mach-mxs.o
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 8dabfe81d07..ff886e01a0b 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -261,7 +261,7 @@ static void __init apx4devkit_init(void)
enable_clk_enet_out();
if (IS_BUILTIN(CONFIG_PHYLIB))
- phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK,
+ phy_register_fixup_for_uid(PHY_ID_KSZ8051, MICREL_PHY_ID_MASK,
apx4devkit_phy_fixup);
mxsfb_pdata.mode_list = apx4devkit_video_modes;
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index dd2db025f77..346fd26f3aa 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -62,13 +62,14 @@ config ARCH_OMAP4
select PM_OPP if PM
select USB_ARCH_HAS_EHCI if USB_SUPPORT
select ARM_CPU_SUSPEND if PM
- select ARCH_NEEDS_CPU_IDLE_COUPLED
+ select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
config SOC_OMAP5
bool "TI OMAP5"
select CPU_V7
select ARM_GIC
select HAVE_SMP
+ select ARM_CPU_SUSPEND if PM
comment "OMAP Core Type"
depends on ARCH_OMAP2
@@ -231,10 +232,11 @@ config MACH_OMAP3_PANDORA
select OMAP_PACKAGE_CBB
select REGULATOR_FIXED_VOLTAGE if REGULATOR
-config MACH_OMAP3_TOUCHBOOK
+config MACH_TOUCHBOOK
bool "OMAP3 Touch Book"
depends on ARCH_OMAP3
default y
+ select OMAP_PACKAGE_CBB
config MACH_OMAP_3430SDP
bool "OMAP 3430 SDP board"
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index f6a24b3f9c4..34c2c7f59f0 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -255,7 +255,7 @@ obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-display.o
obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o
obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o
obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o
-obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o
+obj-$(CONFIG_MACH_TOUCHBOOK) += board-omap3touchbook.o
obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o
obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 74915295482..28214483aab 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -554,6 +554,8 @@ static const struct usbhs_omap_board_data igep3_usbhs_bdata __initconst = {
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
+ /* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */
+ OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#endif
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index ef230a0eb5e..0d362e9f9cb 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -58,6 +58,7 @@
#include "hsmmc.h"
#include "common-board-devices.h"
+#define OMAP3_EVM_TS_GPIO 175
#define OMAP3_EVM_EHCI_VBUS 22
#define OMAP3_EVM_EHCI_SELECT 61
diff --git a/arch/arm/mach-omap2/clock33xx_data.c b/arch/arm/mach-omap2/clock33xx_data.c
index 25bbcc7ca4d..ae27de8899a 100644
--- a/arch/arm/mach-omap2/clock33xx_data.c
+++ b/arch/arm/mach-omap2/clock33xx_data.c
@@ -1036,13 +1036,13 @@ static struct omap_clk am33xx_clks[] = {
CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX),
CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX),
CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX),
- CLK(NULL, "gpt1_fck", &timer1_fck, CK_AM33XX),
- CLK(NULL, "gpt2_fck", &timer2_fck, CK_AM33XX),
- CLK(NULL, "gpt3_fck", &timer3_fck, CK_AM33XX),
- CLK(NULL, "gpt4_fck", &timer4_fck, CK_AM33XX),
- CLK(NULL, "gpt5_fck", &timer5_fck, CK_AM33XX),
- CLK(NULL, "gpt6_fck", &timer6_fck, CK_AM33XX),
- CLK(NULL, "gpt7_fck", &timer7_fck, CK_AM33XX),
+ CLK(NULL, "timer1_fck", &timer1_fck, CK_AM33XX),
+ CLK(NULL, "timer2_fck", &timer2_fck, CK_AM33XX),
+ CLK(NULL, "timer3_fck", &timer3_fck, CK_AM33XX),
+ CLK(NULL, "timer4_fck", &timer4_fck, CK_AM33XX),
+ CLK(NULL, "timer5_fck", &timer5_fck, CK_AM33XX),
+ CLK(NULL, "timer6_fck", &timer6_fck, CK_AM33XX),
+ CLK(NULL, "timer7_fck", &timer7_fck, CK_AM33XX),
CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX),
CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX),
CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX),
diff --git a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
index a0d68dbecfa..f99e65cfb86 100644
--- a/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
@@ -241,6 +241,52 @@ static void omap3_clkdm_deny_idle(struct clockdomain *clkdm)
_clkdm_del_autodeps(clkdm);
}
+static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm)
+{
+ bool hwsup = false;
+
+ if (!clkdm->clktrctrl_mask)
+ return 0;
+
+ hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+ clkdm->clktrctrl_mask);
+
+ if (hwsup) {
+ /* Disable HW transitions when we are changing deps */
+ _disable_hwsup(clkdm);
+ _clkdm_add_autodeps(clkdm);
+ _enable_hwsup(clkdm);
+ } else {
+ if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
+ omap3_clkdm_wakeup(clkdm);
+ }
+
+ return 0;
+}
+
+static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm)
+{
+ bool hwsup = false;
+
+ if (!clkdm->clktrctrl_mask)
+ return 0;
+
+ hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
+ clkdm->clktrctrl_mask);
+
+ if (hwsup) {
+ /* Disable HW transitions when we are changing deps */
+ _disable_hwsup(clkdm);
+ _clkdm_del_autodeps(clkdm);
+ _enable_hwsup(clkdm);
+ } else {
+ if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)
+ omap3_clkdm_sleep(clkdm);
+ }
+
+ return 0;
+}
+
struct clkdm_ops omap2_clkdm_operations = {
.clkdm_add_wkdep = omap2_clkdm_add_wkdep,
.clkdm_del_wkdep = omap2_clkdm_del_wkdep,
@@ -267,6 +313,6 @@ struct clkdm_ops omap3_clkdm_operations = {
.clkdm_wakeup = omap3_clkdm_wakeup,
.clkdm_allow_idle = omap3_clkdm_allow_idle,
.clkdm_deny_idle = omap3_clkdm_deny_idle,
- .clkdm_clk_enable = omap2_clkdm_clk_enable,
- .clkdm_clk_disable = omap2_clkdm_clk_disable,
+ .clkdm_clk_enable = omap3xxx_clkdm_clk_enable,
+ .clkdm_clk_disable = omap3xxx_clkdm_clk_disable,
};
diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h
index 766338fe4d3..975f6bda0e0 100644
--- a/arch/arm/mach-omap2/cm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-34xx.h
@@ -67,6 +67,7 @@
#define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0)
/* CM_IDLEST_IVA2 */
+#define OMAP3430_ST_IVA2_SHIFT 0
#define OMAP3430_ST_IVA2_MASK (1 << 0)
/* CM_IDLEST_PLL_IVA2 */
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
index 14734746457..c1875862679 100644
--- a/arch/arm/mach-omap2/common-board-devices.c
+++ b/arch/arm/mach-omap2/common-board-devices.c
@@ -35,16 +35,6 @@ static struct omap2_mcspi_device_config ads7846_mcspi_config = {
.turbo_mode = 0,
};
-/*
- * ADS7846 driver maybe request a gpio according to the value
- * of pdata->get_pendown_state, but we have done this. So set
- * get_pendown_state to avoid twice gpio requesting.
- */
-static int omap3_get_pendown_state(void)
-{
- return !gpio_get_value(OMAP3_EVM_TS_GPIO);
-}
-
static struct ads7846_platform_data ads7846_config = {
.x_max = 0x0fff,
.y_max = 0x0fff,
@@ -55,7 +45,6 @@ static struct ads7846_platform_data ads7846_config = {
.debounce_rep = 1,
.gpio_pendown = -EINVAL,
.keep_vref_on = 1,
- .get_pendown_state = &omap3_get_pendown_state,
};
static struct spi_board_info ads7846_spi_board_info __initdata = {
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
index 4c4ef6a6166..a0b4a42836a 100644
--- a/arch/arm/mach-omap2/common-board-devices.h
+++ b/arch/arm/mach-omap2/common-board-devices.h
@@ -4,7 +4,6 @@
#include "twl-common.h"
#define NAND_BLOCK_SIZE SZ_128K
-#define OMAP3_EVM_TS_GPIO 175
struct mtd_partition;
struct ads7846_platform_data;
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index ee05e193fc6..288bee6cbb7 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -238,8 +238,9 @@ int __init omap4_idle_init(void)
for_each_cpu(cpu_id, cpu_online_mask) {
dev = &per_cpu(omap4_idle_dev, cpu_id);
dev->cpu = cpu_id;
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
dev->coupled_cpus = *cpu_online_mask;
-
+#endif
cpuidle_register_driver(&omap4_idle_driver);
if (cpuidle_register_device(dev)) {
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index c00c68961bb..02b9478b786 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -23,7 +23,6 @@
#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
-#include <asm/pmu.h>
#include "iomap.h"
#include <plat/board.h>
@@ -448,7 +447,7 @@ static struct resource omap3_pmu_resource = {
static struct platform_device omap_pmu_device = {
.name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
+ .id = -1,
.num_resources = 1,
};
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 471e62a74a1..76f9b3c2f58 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -127,7 +127,6 @@ struct omap_mux_partition {
* @gpio: GPIO number
* @muxnames: available signal modes for a ball
* @balls: available balls on the package
- * @partition: mux partition
*/
struct omap_mux {
u16 reg_offset;
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index 05fdebfaa19..330d4c6e746 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -46,7 +46,7 @@
static void __iomem *wakeupgen_base;
static void __iomem *sar_base;
static DEFINE_SPINLOCK(wakeupgen_lock);
-static unsigned int irq_target_cpu[NR_IRQS];
+static unsigned int irq_target_cpu[MAX_IRQS];
static unsigned int irq_banks = MAX_NR_REG_BANKS;
static unsigned int max_irqs = MAX_IRQS;
static unsigned int omap_secure_apis;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 6ca8e519968..37afbd173c2 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1889,6 +1889,7 @@ static int _enable(struct omap_hwmod *oh)
_enable_sysc(oh);
}
} else {
+ _omap4_disable_module(oh);
_disable_clocks(oh);
pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
oh->name, r);
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index c9e38200216..ce7e6068768 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -100,9 +100,9 @@ static struct omap_hwmod omap3xxx_mpu_hwmod = {
/* IVA2 (IVA2) */
static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = {
- { .name = "logic", .rst_shift = 0 },
- { .name = "seq0", .rst_shift = 1 },
- { .name = "seq1", .rst_shift = 2 },
+ { .name = "logic", .rst_shift = 0, .st_shift = 8 },
+ { .name = "seq0", .rst_shift = 1, .st_shift = 9 },
+ { .name = "seq1", .rst_shift = 2, .st_shift = 10 },
};
static struct omap_hwmod omap3xxx_iva_hwmod = {
@@ -112,6 +112,15 @@ static struct omap_hwmod omap3xxx_iva_hwmod = {
.rst_lines = omap3xxx_iva_resets,
.rst_lines_cnt = ARRAY_SIZE(omap3xxx_iva_resets),
.main_clk = "iva2_ck",
+ .prcm = {
+ .omap2 = {
+ .module_offs = OMAP3430_IVA2_MOD,
+ .prcm_reg_id = 1,
+ .module_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
+ .idlest_reg_id = 1,
+ .idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT,
+ }
+ },
};
/* timer class */
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 242aee498ce..afb60917a94 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -4210,7 +4210,7 @@ static struct omap_hwmod_ocp_if omap44xx_dsp__iva = {
};
/* dsp -> sl2if */
-static struct omap_hwmod_ocp_if omap44xx_dsp__sl2if = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_dsp__sl2if = {
.master = &omap44xx_dsp_hwmod,
.slave = &omap44xx_sl2if_hwmod,
.clk = "dpll_iva_m5x2_ck",
@@ -4828,7 +4828,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = {
};
/* iva -> sl2if */
-static struct omap_hwmod_ocp_if omap44xx_iva__sl2if = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_iva__sl2if = {
.master = &omap44xx_iva_hwmod,
.slave = &omap44xx_sl2if_hwmod,
.clk = "dpll_iva_m5x2_ck",
@@ -5362,7 +5362,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__scrm = {
};
/* l3_main_2 -> sl2if */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sl2if = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = {
.master = &omap44xx_l3_main_2_hwmod,
.slave = &omap44xx_sl2if_hwmod,
.clk = "l3_div_ck",
@@ -6032,7 +6032,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_abe__dmic,
&omap44xx_l4_abe__dmic_dma,
&omap44xx_dsp__iva,
- &omap44xx_dsp__sl2if,
+ /* &omap44xx_dsp__sl2if, */
&omap44xx_l4_cfg__dsp,
&omap44xx_l3_main_2__dss,
&omap44xx_l4_per__dss,
@@ -6068,7 +6068,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_per__i2c4,
&omap44xx_l3_main_2__ipu,
&omap44xx_l3_main_2__iss,
- &omap44xx_iva__sl2if,
+ /* &omap44xx_iva__sl2if, */
&omap44xx_l3_main_2__iva,
&omap44xx_l4_wkup__kbd,
&omap44xx_l4_cfg__mailbox,
@@ -6099,7 +6099,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_cfg__cm_core,
&omap44xx_l4_wkup__prm,
&omap44xx_l4_wkup__scrm,
- &omap44xx_l3_main_2__sl2if,
+ /* &omap44xx_l3_main_2__sl2if, */
&omap44xx_l4_abe__slimbus1,
&omap44xx_l4_abe__slimbus1_dma,
&omap44xx_l4_per__slimbus2,
diff --git a/arch/arm/mach-omap2/opp4xxx_data.c b/arch/arm/mach-omap2/opp4xxx_data.c
index 2293ba27101..c95415da23c 100644
--- a/arch/arm/mach-omap2/opp4xxx_data.c
+++ b/arch/arm/mach-omap2/opp4xxx_data.c
@@ -94,7 +94,7 @@ int __init omap4_opp_init(void)
{
int r = -ENODEV;
- if (!cpu_is_omap44xx())
+ if (!cpu_is_omap443x())
return r;
r = omap_init_opp_table(omap44xx_opp_def_list,
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index e4fc88c65db..05bd8f02723 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -272,21 +272,16 @@ void omap_sram_idle(void)
per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
- if (mpu_next_state < PWRDM_POWER_ON) {
- pwrdm_pre_transition(mpu_pwrdm);
- pwrdm_pre_transition(neon_pwrdm);
- }
+ pwrdm_pre_transition(NULL);
/* PER */
if (per_next_state < PWRDM_POWER_ON) {
- pwrdm_pre_transition(per_pwrdm);
per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
omap2_gpio_prepare_for_idle(per_going_off);
}
/* CORE */
if (core_next_state < PWRDM_POWER_ON) {
- pwrdm_pre_transition(core_pwrdm);
if (core_next_state == PWRDM_POWER_OFF) {
omap3_core_save_context();
omap3_cm_save_context();
@@ -339,20 +334,14 @@ void omap_sram_idle(void)
omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
OMAP3430_GR_MOD,
OMAP3_PRM_VOLTCTRL_OFFSET);
- pwrdm_post_transition(core_pwrdm);
}
omap3_intc_resume_idle();
+ pwrdm_post_transition(NULL);
+
/* PER */
- if (per_next_state < PWRDM_POWER_ON) {
+ if (per_next_state < PWRDM_POWER_ON)
omap2_gpio_resume_after_idle();
- pwrdm_post_transition(per_pwrdm);
- }
-
- if (mpu_next_state < PWRDM_POWER_ON) {
- pwrdm_post_transition(mpu_pwrdm);
- pwrdm_post_transition(neon_pwrdm);
- }
}
static void omap3_pm_idle(void)
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index 9f6b83d1b19..91e71d8f46f 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -56,9 +56,13 @@ ppa_por_params:
* The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
* It returns to the caller for CPU INACTIVE and ON power states or in case
* CPU failed to transition to targeted OFF/DORMANT state.
+ *
+ * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
+ * stack frame and it expects the caller to take care of it. Hence the entire
+ * stack frame is saved to avoid possible stack corruption.
*/
ENTRY(omap4_finish_suspend)
- stmfd sp!, {lr}
+ stmfd sp!, {r4-r12, lr}
cmp r0, #0x0
beq do_WFI @ No lowpower state, jump to WFI
@@ -226,7 +230,7 @@ scu_gp_clear:
skip_scu_gp_clear:
isb
dsb
- ldmfd sp!, {pc}
+ ldmfd sp!, {r4-r12, pc}
ENDPROC(omap4_finish_suspend)
/*
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 2ff6d41ec6c..2ba4f57dda8 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -260,6 +260,7 @@ static u32 notrace dmtimer_read_sched_clock(void)
return 0;
}
+#ifdef CONFIG_OMAP_32K_TIMER
/* Setup free-running counter for clocksource */
static int __init omap2_sync32k_clocksource_init(void)
{
@@ -299,6 +300,12 @@ static int __init omap2_sync32k_clocksource_init(void)
return ret;
}
+#else
+static inline int omap2_sync32k_clocksource_init(void)
+{
+ return -ENODEV;
+}
+#endif
static void __init omap2_gptimer_clocksource_init(int gptimer_id,
const char *fck_source)
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index de47f170ba5..db5ff664237 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -67,6 +67,7 @@ void __init omap_pmic_init(int bus, u32 clkrate,
const char *pmic_type, int pmic_irq,
struct twl4030_platform_data *pmic_data)
{
+ omap_mux_init_signal("sys_nirq", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
strncpy(pmic_i2c_board_info.type, pmic_type,
sizeof(pmic_i2c_board_info.type));
pmic_i2c_board_info.irq = pmic_irq;
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index 9148b229d0d..a6cd14ab1e4 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -109,7 +109,8 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
- IRQ_ORION5X_ETH_ERR);
+ IRQ_ORION5X_ETH_ERR,
+ MV643XX_TX_CSUM_DEFAULT_LIMIT);
}
@@ -203,6 +204,13 @@ void __init orion5x_wdt_init(void)
void __init orion5x_init_early(void)
{
orion_time_set_base(TIMER_VIRT_BASE);
+
+ /*
+ * Some Orion5x devices allocate their coherent buffers from atomic
+ * context. Increase size of atomic coherent pool to make sure such
+ * the allocations won't fail.
+ */
+ init_dma_coherent_pool_size(SZ_1M);
}
int orion5x_tclk;
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 166eee5b8a7..c1f3b1279d9 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -6,7 +6,6 @@
#include <linux/spi/pxa2xx_spi.h>
#include <linux/i2c/pxa-i2c.h>
-#include <asm/pmu.h>
#include <mach/udc.h>
#include <mach/pxa3xx-u2d.h>
#include <mach/pxafb.h>
@@ -42,7 +41,7 @@ static struct resource pxa_resource_pmu = {
struct platform_device pxa_device_pmu = {
.name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
+ .id = -1,
.resource = &pxa_resource_pmu,
.num_resources = 1,
};
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 5905ed130e9..d89d87ae144 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -953,12 +953,12 @@ static struct i2c_board_info raumfeld_connector_i2c_board_info __initdata = {
static struct eeti_ts_platform_data eeti_ts_pdata = {
.irq_active_high = 1,
+ .irq_gpio = GPIO_TOUCH_IRQ,
};
static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = {
.type = "eeti_ts",
.addr = 0x0a,
- .irq = PXA_GPIO_TO_IRQ(GPIO_TOUCH_IRQ),
.platform_data = &eeti_ts_pdata,
};
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index baf382c5e77..d7a6e9cebba 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -32,7 +32,6 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
-#include <asm/pmu.h>
#include <asm/pgtable.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
@@ -297,7 +296,7 @@ static struct resource pmu_resources[] = {
static struct platform_device pmu_device = {
.name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
+ .id = -1,
.num_resources = ARRAY_SIZE(pmu_resources),
.resource = pmu_resources,
};
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
index b1d7cafa1a6..361f898884c 100644
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ b/arch/arm/mach-realview/realview_pb1176.c
@@ -34,7 +34,6 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
-#include <asm/pmu.h>
#include <asm/pgtable.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
@@ -280,7 +279,7 @@ static struct resource pmu_resource = {
static struct platform_device pmu_device = {
.name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
+ .id = -1,
.num_resources = 1,
.resource = &pmu_resource,
};
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c
index a98c536e332..c56bc8d4d11 100644
--- a/arch/arm/mach-realview/realview_pb11mp.c
+++ b/arch/arm/mach-realview/realview_pb11mp.c
@@ -32,7 +32,6 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
-#include <asm/pmu.h>
#include <asm/pgtable.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
@@ -263,7 +262,7 @@ static struct resource pmu_resources[] = {
static struct platform_device pmu_device = {
.name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
+ .id = -1,
.num_resources = ARRAY_SIZE(pmu_resources),
.resource = pmu_resources,
};
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c
index 59650174e6e..04093758245 100644
--- a/arch/arm/mach-realview/realview_pba8.c
+++ b/arch/arm/mach-realview/realview_pba8.c
@@ -31,7 +31,6 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
-#include <asm/pmu.h>
#include <asm/pgtable.h>
#include <asm/hardware/gic.h>
@@ -241,7 +240,7 @@ static struct resource pmu_resource = {
static struct platform_device pmu_device = {
.name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
+ .id = -1,
.num_resources = 1,
.resource = &pmu_resource,
};
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index 3f2f605624e..97885dc11e8 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -30,7 +30,6 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
-#include <asm/pmu.h>
#include <asm/smp_twd.h>
#include <asm/pgtable.h>
#include <asm/hardware/gic.h>
@@ -280,7 +279,7 @@ static struct resource pmu_resources[] = {
static struct platform_device pmu_device = {
.name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
+ .id = -1,
.num_resources = ARRAY_SIZE(pmu_resources),
.resource = pmu_resources,
};
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index e24961109b7..d56b0f7f2b2 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -483,7 +483,7 @@ config MACH_NEO1973_GTA02
select I2C
select POWER_SUPPLY
select MACH_NEO1973
- select S3C2410_PWM
+ select S3C24XX_PWM
select S3C_DEV_USB_HOST
help
Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
@@ -493,7 +493,7 @@ config MACH_RX1950
select S3C24XX_DCLK
select PM_H1940 if PM
select I2C
- select S3C2410_PWM
+ select S3C24XX_PWM
select S3C_DEV_NAND
select S3C2410_IOTIMING if S3C2440_CPUFREQ
select S3C2440_XTAL_16934400
diff --git a/arch/arm/mach-s3c24xx/include/mach/dma.h b/arch/arm/mach-s3c24xx/include/mach/dma.h
index 454831b6603..ee99fd56c04 100644
--- a/arch/arm/mach-s3c24xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c24xx/include/mach/dma.h
@@ -24,7 +24,8 @@
*/
enum dma_ch {
- DMACH_XD0,
+ DMACH_DT_PROP = -1, /* not yet supported, do not use */
+ DMACH_XD0 = 0,
DMACH_XD1,
DMACH_SDI,
DMACH_SPI0,
diff --git a/arch/arm/mach-sa1100/leds-hackkit.c b/arch/arm/mach-sa1100/leds-hackkit.c
index 6a2352436e6..f8e47235bab 100644
--- a/arch/arm/mach-sa1100/leds-hackkit.c
+++ b/arch/arm/mach-sa1100/leds-hackkit.c
@@ -10,6 +10,7 @@
* as cpu led, the green one is used as timer led.
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/leds.h>
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index cf10f92856d..453a6e50db8 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -520,13 +520,14 @@ static struct platform_device hdmi_lcdc_device = {
};
/* GPIO KEY */
-#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 }
+#define GPIO_KEY(c, g, d, ...) \
+ { .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ }
static struct gpio_keys_button gpio_buttons[] = {
- GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW1"),
- GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW2"),
- GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW3"),
- GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW4"),
+ GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW3", .wakeup = 1),
+ GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW4"),
+ GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW5"),
+ GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW6"),
};
static struct gpio_keys_platform_data gpio_key_info = {
@@ -901,8 +902,8 @@ static struct platform_device *eva_devices[] __initdata = {
&camera_device,
&ceu0_device,
&fsi_device,
- &fsi_hdmi_device,
&fsi_wm8978_device,
+ &fsi_hdmi_device,
};
static void __init eva_clock_init(void)
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index 53b7ea92c32..3b8a0171c3c 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -346,11 +346,11 @@ static struct resource sh_mmcif_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = gic_spi(141),
+ .start = gic_spi(140),
.flags = IORESOURCE_IRQ,
},
[2] = {
- .start = gic_spi(140),
+ .start = gic_spi(141),
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 7ea2b31e319..c129542f6ae 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -695,6 +695,7 @@ static struct platform_device usbhs0_device = {
* - J30 "open"
* - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET
* - add .get_vbus = usbhs_get_vbus in usbhs1_private
+ * - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices.
*/
#define IRQ8 evt2irq(0x0300)
#define USB_PHY_MODE (1 << 4)
@@ -1325,8 +1326,8 @@ static struct platform_device *mackerel_devices[] __initdata = {
&nor_flash_device,
&smc911x_device,
&lcdc_device,
- &usbhs1_device,
&usbhs0_device,
+ &usbhs1_device,
&leds_device,
&fsi_device,
&fsi_ak4643_device,
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index 3a528cf4366..fcf5a47f477 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -67,7 +67,7 @@ static struct smsc911x_platform_config smsc911x_platdata = {
static struct platform_device eth_device = {
.name = "smsc911x",
- .id = 0,
+ .id = -1,
.dev = {
.platform_data = &smsc911x_platdata,
},
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
index ee447404c85..588555a67d9 100644
--- a/arch/arm/mach-shmobile/intc-sh73a0.c
+++ b/arch/arm/mach-shmobile/intc-sh73a0.c
@@ -259,9 +259,9 @@ static int sh73a0_set_wake(struct irq_data *data, unsigned int on)
return 0; /* always allow wakeup */
}
-#define RELOC_BASE 0x1000
+#define RELOC_BASE 0x1200
-/* INTCA IRQ pins at INTCS + 0x1000 to make space for GIC+INTC handling */
+/* INTCA IRQ pins at INTCS + RELOC_BASE to make space for GIC+INTC handling */
#define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE)
INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000,
diff --git a/arch/arm/mach-tegra/board-harmony-power.c b/arch/arm/mach-tegra/board-harmony-power.c
index 8fd387bf31f..b7344beec10 100644
--- a/arch/arm/mach-tegra/board-harmony-power.c
+++ b/arch/arm/mach-tegra/board-harmony-power.c
@@ -51,7 +51,7 @@ static struct regulator_init_data ldo0_data = {
.consumer_supplies = tps658621_ldo0_supply,
};
-#define HARMONY_REGULATOR_INIT(_id, _name, _supply, _minmv, _maxmv) \
+#define HARMONY_REGULATOR_INIT(_id, _name, _supply, _minmv, _maxmv, _on)\
static struct regulator_init_data _id##_data = { \
.supply_regulator = _supply, \
.constraints = { \
@@ -63,21 +63,22 @@ static struct regulator_init_data ldo0_data = {
.valid_ops_mask = (REGULATOR_CHANGE_MODE | \
REGULATOR_CHANGE_STATUS | \
REGULATOR_CHANGE_VOLTAGE), \
+ .always_on = _on, \
}, \
}
-HARMONY_REGULATOR_INIT(sm0, "vdd_sm0", "vdd_sys", 725, 1500);
-HARMONY_REGULATOR_INIT(sm1, "vdd_sm1", "vdd_sys", 725, 1500);
-HARMONY_REGULATOR_INIT(sm2, "vdd_sm2", "vdd_sys", 3000, 4550);
-HARMONY_REGULATOR_INIT(ldo1, "vdd_ldo1", "vdd_sm2", 725, 1500);
-HARMONY_REGULATOR_INIT(ldo2, "vdd_ldo2", "vdd_sm2", 725, 1500);
-HARMONY_REGULATOR_INIT(ldo3, "vdd_ldo3", "vdd_sm2", 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo4, "vdd_ldo4", "vdd_sm2", 1700, 2475);
-HARMONY_REGULATOR_INIT(ldo5, "vdd_ldo5", NULL, 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo6, "vdd_ldo6", "vdd_sm2", 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo7, "vdd_ldo7", "vdd_sm2", 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo8, "vdd_ldo8", "vdd_sm2", 1250, 3300);
-HARMONY_REGULATOR_INIT(ldo9, "vdd_ldo9", "vdd_sm2", 1250, 3300);
+HARMONY_REGULATOR_INIT(sm0, "vdd_sm0", "vdd_sys", 725, 1500, 1);
+HARMONY_REGULATOR_INIT(sm1, "vdd_sm1", "vdd_sys", 725, 1500, 1);
+HARMONY_REGULATOR_INIT(sm2, "vdd_sm2", "vdd_sys", 3000, 4550, 1);
+HARMONY_REGULATOR_INIT(ldo1, "vdd_ldo1", "vdd_sm2", 725, 1500, 1);
+HARMONY_REGULATOR_INIT(ldo2, "vdd_ldo2", "vdd_sm2", 725, 1500, 0);
+HARMONY_REGULATOR_INIT(ldo3, "vdd_ldo3", "vdd_sm2", 1250, 3300, 1);
+HARMONY_REGULATOR_INIT(ldo4, "vdd_ldo4", "vdd_sm2", 1700, 2475, 1);
+HARMONY_REGULATOR_INIT(ldo5, "vdd_ldo5", NULL, 1250, 3300, 1);
+HARMONY_REGULATOR_INIT(ldo6, "vdd_ldo6", "vdd_sm2", 1250, 3300, 0);
+HARMONY_REGULATOR_INIT(ldo7, "vdd_ldo7", "vdd_sm2", 1250, 3300, 0);
+HARMONY_REGULATOR_INIT(ldo8, "vdd_ldo8", "vdd_sm2", 1250, 3300, 0);
+HARMONY_REGULATOR_INIT(ldo9, "vdd_ldo9", "vdd_sm2", 1250, 3300, 1);
#define TPS_REG(_id, _data) \
{ \
@@ -119,9 +120,10 @@ static struct i2c_board_info __initdata harmony_regulators[] = {
int __init harmony_regulator_init(void)
{
+ regulator_register_always_on(0, "vdd_sys",
+ NULL, 0, 5000000);
+
if (machine_is_harmony()) {
- regulator_register_always_on(0, "vdd_sys",
- NULL, 0, 5000000);
i2c_register_board_info(3, harmony_regulators, 1);
} else { /* Harmony, booted using device tree */
struct device_node *np;
diff --git a/arch/arm/mach-tegra/devices.c b/arch/arm/mach-tegra/devices.c
index c70e65ffa36..61e9603744a 100644
--- a/arch/arm/mach-tegra/devices.c
+++ b/arch/arm/mach-tegra/devices.c
@@ -23,7 +23,6 @@
#include <linux/fsl_devices.h>
#include <linux/serial_8250.h>
#include <linux/i2c-tegra.h>
-#include <asm/pmu.h>
#include <mach/irqs.h>
#include <mach/iomap.h>
#include <mach/dma.h>
@@ -516,7 +515,7 @@ static struct resource tegra_pmu_resources[] = {
struct platform_device tegra_pmu_device = {
.name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
+ .id = -1,
.num_resources = ARRAY_SIZE(tegra_pmu_resources),
.resource = tegra_pmu_resources,
};
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index c013bbf79ca..53d3d46dec1 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -41,7 +41,6 @@ config MACH_HREFV60
config MACH_SNOWBALL
bool "U8500 Snowball platform"
select MACH_MOP500
- select LEDS_GPIO
help
Include support for the snowball development platform.
diff --git a/arch/arm/mach-ux500/board-mop500-msp.c b/arch/arm/mach-ux500/board-mop500-msp.c
index 99604803874..df15646036a 100644
--- a/arch/arm/mach-ux500/board-mop500-msp.c
+++ b/arch/arm/mach-ux500/board-mop500-msp.c
@@ -191,9 +191,9 @@ static struct platform_device *db8500_add_msp_i2s(struct device *parent,
return pdev;
}
-/* Platform device for ASoC U8500 machine */
-static struct platform_device snd_soc_u8500 = {
- .name = "snd-soc-u8500",
+/* Platform device for ASoC MOP500 machine */
+static struct platform_device snd_soc_mop500 = {
+ .name = "snd-soc-mop500",
.id = 0,
.dev = {
.platform_data = NULL,
@@ -227,8 +227,8 @@ int mop500_msp_init(struct device *parent)
{
struct platform_device *msp1;
- pr_info("%s: Register platform-device 'snd-soc-u8500'.\n", __func__);
- platform_device_register(&snd_soc_u8500);
+ pr_info("%s: Register platform-device 'snd-soc-mop500'.\n", __func__);
+ platform_device_register(&snd_soc_mop500);
pr_info("Initialize MSP I2S-devices.\n");
db8500_add_msp_i2s(parent, 0, U8500_MSP0_BASE, IRQ_DB8500_MSP0,
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 8674a890fd1..a534d8880de 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -797,6 +797,7 @@ static void __init u8500_init_machine(void)
ARRAY_SIZE(mop500_platform_devs));
mop500_sdi_init(parent);
+ mop500_msp_init(parent);
i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
i2c_register_board_info(2, mop500_i2c2_devices,
@@ -804,6 +805,8 @@ static void __init u8500_init_machine(void)
mop500_uib_init();
+ } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
+ mop500_msp_init(parent);
} else if (of_machine_is_compatible("st-ericsson,hrefv60+")) {
/*
* The HREFv60 board removed a GPIO expander and routed
@@ -815,6 +818,7 @@ static void __init u8500_init_machine(void)
ARRAY_SIZE(mop500_platform_devs));
hrefv60_sdi_init(parent);
+ mop500_msp_init(parent);
i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES;
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index db3c52d56ca..7e6c384881a 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -18,8 +18,8 @@
#include <linux/io.h>
#include <linux/mfd/abx500/ab8500.h>
-#include <asm/mach/map.h>
#include <asm/pmu.h>
+#include <asm/mach/map.h>
#include <plat/gpio-nomadik.h>
#include <mach/hardware.h>
#include <mach/setup.h>
@@ -122,7 +122,7 @@ struct arm_pmu_platdata db8500_pmu_platdata = {
static struct platform_device db8500_pmu_device = {
.name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
+ .id = -1,
.num_resources = ARRAY_SIZE(db8500_pmu_resources),
.resource = db8500_pmu_resources,
.dev.platform_data = &db8500_pmu_platdata,
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index 61c492403b0..e4073a60a86 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -13,7 +13,6 @@
#include <asm/hardware/arm_timer.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/gic.h>
-#include <asm/pmu.h>
#include <asm/smp_scu.h>
#include <asm/smp_twd.h>
@@ -144,7 +143,7 @@ static struct resource pmu_resources[] = {
static struct platform_device pmu_device = {
.name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
+ .id = -1,
.num_resources = ARRAY_SIZE(pmu_resources),
.resource = pmu_resources,
};
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 119bc52ab93..4e07eec1270 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
pid = task_pid_nr(thread->task) << ASID_BITS;
asm volatile(
" mrc p15, 0, %0, c13, c0, 1\n"
- " bfi %1, %0, #0, %2\n"
- " mcr p15, 0, %1, c13, c0, 1\n"
+ " and %0, %0, %2\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c13, c0, 1\n"
: "=r" (contextidr), "+r" (pid)
- : "I" (ASID_BITS));
+ : "I" (~ASID_MASK));
isb();
return NOTIFY_OK;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c2cdf6500f7..13f555d6249 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
vunmap(cpu_addr);
}
+#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
+
struct dma_pool {
size_t size;
spinlock_t lock;
unsigned long *bitmap;
unsigned long nr_pages;
void *vaddr;
- struct page *page;
+ struct page **pages;
};
static struct dma_pool atomic_pool = {
- .size = SZ_256K,
+ .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
};
static int __init early_coherent_pool(char *p)
@@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p)
}
early_param("coherent_pool", early_coherent_pool);
+void __init init_dma_coherent_pool_size(unsigned long size)
+{
+ /*
+ * Catch any attempt to set the pool size too late.
+ */
+ BUG_ON(atomic_pool.vaddr);
+
+ /*
+ * Set architecture specific coherent pool size only if
+ * it has not been changed by kernel command line parameter.
+ */
+ if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
+ atomic_pool.size = size;
+}
+
/*
* Initialise the coherent pool for atomic allocations.
*/
@@ -297,6 +314,7 @@ static int __init atomic_pool_init(void)
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
unsigned long *bitmap;
struct page *page;
+ struct page **pages;
void *ptr;
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
@@ -304,21 +322,33 @@ static int __init atomic_pool_init(void)
if (!bitmap)
goto no_bitmap;
+ pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ goto no_pages;
+
if (IS_ENABLED(CONFIG_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
else
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
&page, NULL);
if (ptr) {
+ int i;
+
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = page + i;
+
spin_lock_init(&pool->lock);
pool->vaddr = ptr;
- pool->page = page;
+ pool->pages = pages;
pool->bitmap = bitmap;
pool->nr_pages = nr_pages;
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
(unsigned)pool->size / 1024);
return 0;
}
+
+ kfree(pages);
+no_pages:
kfree(bitmap);
no_bitmap:
pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
@@ -358,7 +388,7 @@ void __init dma_contiguous_remap(void)
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
- return;
+ continue;
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
@@ -423,7 +453,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
unsigned int pageno;
unsigned long flags;
void *ptr = NULL;
- size_t align;
+ unsigned long align_mask;
if (!pool->vaddr) {
WARN(1, "coherent pool not initialised!\n");
@@ -435,35 +465,53 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
* small, so align them to their order in pages, minimum is a page
* size. This helps reduce fragmentation of the DMA space.
*/
- align = PAGE_SIZE << get_order(size);
+ align_mask = (1 << get_order(size)) - 1;
spin_lock_irqsave(&pool->lock, flags);
pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
- 0, count, (1 << align) - 1);
+ 0, count, align_mask);
if (pageno < pool->nr_pages) {
bitmap_set(pool->bitmap, pageno, count);
ptr = pool->vaddr + PAGE_SIZE * pageno;
- *ret_page = pool->page + pageno;
+ *ret_page = pool->pages[pageno];
+ } else {
+ pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
+ "Please increase it with coherent_pool= kernel parameter!\n",
+ (unsigned)pool->size / 1024);
}
spin_unlock_irqrestore(&pool->lock, flags);
return ptr;
}
+static bool __in_atomic_pool(void *start, size_t size)
+{
+ struct dma_pool *pool = &atomic_pool;
+ void *end = start + size;
+ void *pool_start = pool->vaddr;
+ void *pool_end = pool->vaddr + pool->size;
+
+ if (start < pool_start || start >= pool_end)
+ return false;
+
+ if (end <= pool_end)
+ return true;
+
+ WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
+ start, end - 1, pool_start, pool_end - 1);
+
+ return false;
+}
+
static int __free_from_pool(void *start, size_t size)
{
struct dma_pool *pool = &atomic_pool;
unsigned long pageno, count;
unsigned long flags;
- if (start < pool->vaddr || start > pool->vaddr + pool->size)
+ if (!__in_atomic_pool(start, size))
return 0;
- if (start + size > pool->vaddr + pool->size) {
- WARN(1, "freeing wrong coherent size from pool\n");
- return 0;
- }
-
pageno = (start - pool->vaddr) >> PAGE_SHIFT;
count = size >> PAGE_SHIFT;
@@ -648,12 +696,12 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
if (arch_is_coherent() || nommu()) {
__dma_free_buffer(page, size);
+ } else if (__free_from_pool(cpu_addr, size)) {
+ return;
} else if (!IS_ENABLED(CONFIG_CMA)) {
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {
- if (__free_from_pool(cpu_addr, size))
- return;
/*
* Non-atomic allocations cannot be freed with IRQs disabled
*/
@@ -1090,10 +1138,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
return 0;
}
+static struct page **__atomic_get_pages(void *addr)
+{
+ struct dma_pool *pool = &atomic_pool;
+ struct page **pages = pool->pages;
+ int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
+
+ return pages + offs;
+}
+
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
{
struct vm_struct *area;
+ if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
+ return __atomic_get_pages(cpu_addr);
+
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
return cpu_addr;
@@ -1103,6 +1163,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
return NULL;
}
+static void *__iommu_alloc_atomic(struct device *dev, size_t size,
+ dma_addr_t *handle)
+{
+ struct page *page;
+ void *addr;
+
+ addr = __alloc_from_pool(size, &page);
+ if (!addr)
+ return NULL;
+
+ *handle = __iommu_create_mapping(dev, &page, size);
+ if (*handle == DMA_ERROR_CODE)
+ goto err_mapping;
+
+ return addr;
+
+err_mapping:
+ __free_from_pool(addr, size);
+ return NULL;
+}
+
+static void __iommu_free_atomic(struct device *dev, struct page **pages,
+ dma_addr_t handle, size_t size)
+{
+ __iommu_remove_mapping(dev, handle, size);
+ __free_from_pool(page_address(pages[0]), size);
+}
+
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
@@ -1113,6 +1201,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
+ if (gfp & GFP_ATOMIC)
+ return __iommu_alloc_atomic(dev, size, handle);
+
pages = __iommu_alloc_buffer(dev, size, gfp);
if (!pages)
return NULL;
@@ -1179,6 +1270,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
return;
}
+ if (__in_atomic_pool(cpu_addr, size)) {
+ __iommu_free_atomic(dev, pages, handle, size);
+ return;
+ }
+
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
unmap_kernel_range((unsigned long)cpu_addr, size);
vunmap(cpu_addr);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 77458548e03..40ca11ed6e5 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -231,8 +231,6 @@ void __sync_icache_dcache(pte_t pteval)
struct page *page;
struct address_space *mapping;
- if (!pte_present_user(pteval))
- return;
if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
/* only flush non-aliasing VIPT caches for exec mappings */
return;
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 6776160618e..a8ee92da354 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
/* permanent static mappings from iotable_init() */
#define VM_ARM_STATIC_MAPPING 0x40000000
+/* empty mapping */
+#define VM_ARM_EMPTY_MAPPING 0x20000000
+
/* mapping type (attributes) for permanent static mappings */
#define VM_ARM_MTYPE(mt) ((mt) << 20)
#define VM_ARM_MTYPE_MASK (0x1f << 20)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4c2d0451e84..c2fa21d0103 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
vm->addr = (void *)addr;
vm->size = SECTION_SIZE;
- vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+ vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
vm->caller = pmd_empty_section_gap;
vm_area_add_early(vm);
}
@@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void)
/* we're still single threaded hence no lock needed here */
for (vm = vmlist; vm; vm = vm->next) {
- if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+ if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
continue;
addr = (unsigned long)vm->addr;
if (addr < next)
@@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void)
* Check whether this memory bank would partially overlap
* the vmalloc area.
*/
- if (__va(bank->start + bank->size) > vmalloc_min ||
- __va(bank->start + bank->size) < __va(bank->start)) {
+ if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
+ __va(bank->start + bank->size - 1) <= __va(bank->start)) {
unsigned long newsize = vmalloc_min - __va(bank->start);
printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
"to -%.8llx (vmalloc region overlap).\n",
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index c2021139cb5..ea94765acf9 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -38,10 +38,10 @@ ENTRY(v7wbi_flush_user_tlb_range)
dsb
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
-#ifdef CONFIG_ARM_ERRATA_720789
- mov r3, #0
-#else
asid r3, r3 @ mask ASID
+#ifdef CONFIG_ARM_ERRATA_720789
+ ALT_SMP(W(mov) r3, #0 )
+ ALT_UP(W(nop) )
#endif
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
diff --git a/arch/arm/plat-iop/pmu.c b/arch/arm/plat-iop/pmu.c
index a2024b8685a..ad9f9744a82 100644
--- a/arch/arm/plat-iop/pmu.c
+++ b/arch/arm/plat-iop/pmu.c
@@ -9,7 +9,6 @@
*/
#include <linux/platform_device.h>
-#include <asm/pmu.h>
#include <mach/irqs.h>
static struct resource pmu_resource = {
@@ -26,7 +25,7 @@ static struct resource pmu_resource = {
static struct platform_device pmu_device = {
.name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
+ .id = -1,
.resource = &pmu_resource,
.num_resources = 1,
};
diff --git a/arch/arm/plat-mxc/include/mach/mx25.h b/arch/arm/plat-mxc/include/mach/mx25.h
index 627d94f1b01..ec466400a20 100644
--- a/arch/arm/plat-mxc/include/mach/mx25.h
+++ b/arch/arm/plat-mxc/include/mach/mx25.h
@@ -98,6 +98,7 @@
#define MX25_INT_UART1 (NR_IRQS_LEGACY + 45)
#define MX25_INT_GPIO2 (NR_IRQS_LEGACY + 51)
#define MX25_INT_GPIO1 (NR_IRQS_LEGACY + 52)
+#define MX25_INT_GPT1 (NR_IRQS_LEGACY + 54)
#define MX25_INT_FEC (NR_IRQS_LEGACY + 57)
#define MX25_DMA_REQ_SSI2_RX1 22
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 626ad8cad7a..938b50a3343 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -189,6 +189,7 @@ struct omap_dm_timer *omap_dm_timer_request(void)
timer->reserved = 1;
break;
}
+ spin_unlock_irqrestore(&dm_timer_lock, flags);
if (timer) {
ret = omap_dm_timer_prepare(timer);
@@ -197,7 +198,6 @@ struct omap_dm_timer *omap_dm_timer_request(void)
timer = NULL;
}
}
- spin_unlock_irqrestore(&dm_timer_lock, flags);
if (!timer)
pr_debug("%s: timer request failed!\n", __func__);
@@ -220,6 +220,7 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)
break;
}
}
+ spin_unlock_irqrestore(&dm_timer_lock, flags);
if (timer) {
ret = omap_dm_timer_prepare(timer);
@@ -228,7 +229,6 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id)
timer = NULL;
}
}
- spin_unlock_irqrestore(&dm_timer_lock, flags);
if (!timer)
pr_debug("%s: timer%d request failed!\n", __func__, id);
@@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_enable);
void omap_dm_timer_disable(struct omap_dm_timer *timer)
{
- pm_runtime_put(&timer->pdev->dev);
+ pm_runtime_put_sync(&timer->pdev->dev);
}
EXPORT_SYMBOL_GPL(omap_dm_timer_disable);
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 68b180edcff..bb5d08a70db 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -372,7 +372,8 @@ IS_OMAP_TYPE(3430, 0x3430)
#define cpu_class_is_omap1() (cpu_is_omap7xx() || cpu_is_omap15xx() || \
cpu_is_omap16xx())
#define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \
- cpu_is_omap44xx() || soc_is_omap54xx())
+ cpu_is_omap44xx() || soc_is_omap54xx() || \
+ soc_is_am33xx())
/* Various silicon revisions for omap2 */
#define OMAP242X_CLASS 0x24200024
diff --git a/arch/arm/plat-omap/include/plat/multi.h b/arch/arm/plat-omap/include/plat/multi.h
index 045e320f106..324d31b1485 100644
--- a/arch/arm/plat-omap/include/plat/multi.h
+++ b/arch/arm/plat-omap/include/plat/multi.h
@@ -108,4 +108,13 @@
# endif
#endif
+#ifdef CONFIG_SOC_AM33XX
+# ifdef OMAP_NAME
+# undef MULTI_OMAP2
+# define MULTI_OMAP2
+# else
+# define OMAP_NAME am33xx
+# endif
+#endif
+
#endif /* __PLAT_OMAP_MULTI_H */
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index b8d19a13678..7f7b112accc 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -110,7 +110,7 @@ static inline void flush(void)
_DEBUG_LL_ENTRY(mach, AM33XX_UART##p##_BASE, OMAP_PORT_SHIFT, \
AM33XXUART##p)
-static inline void __arch_decomp_setup(unsigned long arch_id)
+static inline void arch_decomp_setup(void)
{
int port = 0;
@@ -198,8 +198,6 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
} while (0);
}
-#define arch_decomp_setup() __arch_decomp_setup(arch_id)
-
/*
* nothing to do
*/
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 766181cb5c9..024f3b08db2 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -68,6 +68,7 @@
static unsigned long omap_sram_start;
static void __iomem *omap_sram_base;
+static unsigned long omap_sram_skip;
static unsigned long omap_sram_size;
static void __iomem *omap_sram_ceil;
@@ -106,6 +107,7 @@ static int is_sram_locked(void)
*/
static void __init omap_detect_sram(void)
{
+ omap_sram_skip = SRAM_BOOTLOADER_SZ;
if (cpu_class_is_omap2()) {
if (is_sram_locked()) {
if (cpu_is_omap34xx()) {
@@ -113,6 +115,7 @@ static void __init omap_detect_sram(void)
if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) ||
(omap_type() == OMAP2_DEVICE_TYPE_SEC)) {
omap_sram_size = 0x7000; /* 28K */
+ omap_sram_skip += SZ_16K;
} else {
omap_sram_size = 0x8000; /* 32K */
}
@@ -175,8 +178,10 @@ static void __init omap_map_sram(void)
return;
#ifdef CONFIG_OMAP4_ERRATA_I688
+ if (cpu_is_omap44xx()) {
omap_sram_start += PAGE_SIZE;
omap_sram_size -= SZ_16K;
+ }
#endif
if (cpu_is_omap34xx()) {
/*
@@ -203,8 +208,8 @@ static void __init omap_map_sram(void)
* Looks like we need to preserve some bootloader code at the
* beginning of SRAM for jumping to flash for reboot to work...
*/
- memset_io(omap_sram_base + SRAM_BOOTLOADER_SZ, 0,
- omap_sram_size - SRAM_BOOTLOADER_SZ);
+ memset_io(omap_sram_base + omap_sram_skip, 0,
+ omap_sram_size - omap_sram_skip);
}
/*
@@ -218,7 +223,7 @@ void *omap_sram_push_address(unsigned long size)
{
unsigned long available, new_ceil = (unsigned long)omap_sram_ceil;
- available = omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ);
+ available = omap_sram_ceil - (omap_sram_base + omap_sram_skip);
if (size > available) {
pr_err("Not enough space in SRAM\n");
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index d245a87dc01..b8b747a9d36 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -291,10 +291,12 @@ static struct platform_device orion_ge00 = {
void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err)
+ unsigned long irq_err,
+ unsigned int tx_csum_limit)
{
fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
+ orion_ge00_shared_data.tx_csum_limit = tx_csum_limit;
ge_complete(&orion_ge00_shared_data,
orion_ge00_resources, irq, &orion_ge00_shared,
eth_data, &orion_ge00);
@@ -343,10 +345,12 @@ static struct platform_device orion_ge01 = {
void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err)
+ unsigned long irq_err,
+ unsigned int tx_csum_limit)
{
fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
+ orion_ge01_shared_data.tx_csum_limit = tx_csum_limit;
ge_complete(&orion_ge01_shared_data,
orion_ge01_resources, irq, &orion_ge01_shared,
eth_data, &orion_ge01);
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index e00fdb21360..ae2377ef63e 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -39,12 +39,14 @@ void __init orion_rtc_init(unsigned long mapbase,
void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err);
+ unsigned long irq_err,
+ unsigned int tx_csum_limit);
void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err);
+ unsigned long irq_err,
+ unsigned int tx_csum_limit);
void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 28f898f7538..db98e7021f0 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -430,7 +430,7 @@ s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
* when necessary.
*/
-int s3c2410_dma_enqueue(unsigned int channel, void *id,
+int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
dma_addr_t data, int size)
{
struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 7aca31c1df1..9c3b90c3538 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -403,7 +403,8 @@ config S5P_DEV_USB_EHCI
config S3C24XX_PWM
bool "PWM device support"
- select HAVE_PWM
+ select PWM
+ select PWM_SAMSUNG
help
Support for exporting the PWM timer blocks via the pwm device
system
diff --git a/arch/arm/plat-samsung/clock.c b/arch/arm/plat-samsung/clock.c
index 65c5eca475e..d1116e2dfbe 100644
--- a/arch/arm/plat-samsung/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -144,6 +144,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
int clk_set_rate(struct clk *clk, unsigned long rate)
{
+ unsigned long flags;
int ret;
if (IS_ERR(clk))
@@ -159,9 +160,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
if (clk->ops == NULL || clk->ops->set_rate == NULL)
return -EINVAL;
- spin_lock(&clocks_lock);
+ spin_lock_irqsave(&clocks_lock, flags);
ret = (clk->ops->set_rate)(clk, rate);
- spin_unlock(&clocks_lock);
+ spin_unlock_irqrestore(&clocks_lock, flags);
return ret;
}
@@ -173,17 +174,18 @@ struct clk *clk_get_parent(struct clk *clk)
int clk_set_parent(struct clk *clk, struct clk *parent)
{
+ unsigned long flags;
int ret = 0;
if (IS_ERR(clk))
return -EINVAL;
- spin_lock(&clocks_lock);
+ spin_lock_irqsave(&clocks_lock, flags);
if (clk->ops && clk->ops->set_parent)
ret = (clk->ops->set_parent)(clk, parent);
- spin_unlock(&clocks_lock);
+ spin_unlock_irqrestore(&clocks_lock, flags);
return ret;
}
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 74e31ce3553..6ff45d53362 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -32,8 +32,9 @@
#include <linux/platform_data/s3c-hsudc.h>
#include <linux/platform_data/s3c-hsotg.h>
+#include <media/s5p_hdmi.h>
+
#include <asm/irq.h>
-#include <asm/pmu.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
@@ -748,7 +749,8 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd)
if (!pd) {
pd = &default_i2c_data;
- if (soc_is_exynos4210())
+ if (soc_is_exynos4210() ||
+ soc_is_exynos4212() || soc_is_exynos4412())
pd->bus_num = 8;
else if (soc_is_s5pv210())
pd->bus_num = 3;
@@ -759,6 +761,30 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd)
npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c),
&s5p_device_i2c_hdmiphy);
}
+
+struct s5p_hdmi_platform_data s5p_hdmi_def_platdata;
+
+void __init s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info,
+ struct i2c_board_info *mhl_info, int mhl_bus)
+{
+ struct s5p_hdmi_platform_data *pd = &s5p_hdmi_def_platdata;
+
+ if (soc_is_exynos4210() ||
+ soc_is_exynos4212() || soc_is_exynos4412())
+ pd->hdmiphy_bus = 8;
+ else if (soc_is_s5pv210())
+ pd->hdmiphy_bus = 3;
+ else
+ pd->hdmiphy_bus = 0;
+
+ pd->hdmiphy_info = hdmiphy_info;
+ pd->mhl_info = mhl_info;
+ pd->mhl_bus = mhl_bus;
+
+ s3c_set_platdata(pd, sizeof(struct s5p_hdmi_platform_data),
+ &s5p_device_hdmi);
+}
+
#endif /* CONFIG_S5P_DEV_I2C_HDMIPHY */
/* I2S */
@@ -1105,7 +1131,7 @@ static struct resource s5p_pmu_resource[] = {
static struct platform_device s5p_device_pmu = {
.name = "arm-pmu",
- .id = ARM_PMU_DEVICE_CPU,
+ .id = -1,
.num_resources = ARRAY_SIZE(s5p_pmu_resource),
.resource = s5p_pmu_resource,
};
diff --git a/arch/arm/plat-samsung/include/plat/hdmi.h b/arch/arm/plat-samsung/include/plat/hdmi.h
new file mode 100644
index 00000000000..331d046ac2c
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/hdmi.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __PLAT_SAMSUNG_HDMI_H
+#define __PLAT_SAMSUNG_HDMI_H __FILE__
+
+extern void s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info,
+ struct i2c_board_info *mhl_info, int mhl_bus);
+
+#endif /* __PLAT_SAMSUNG_HDMI_H */
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 64ab65f0fdb..15070284343 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -74,7 +74,7 @@ unsigned char pm_uart_udivslot;
#ifdef CONFIG_SAMSUNG_PM_DEBUG
-struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS];
+static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS];
static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
{
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index fb849d044bd..c834b32af27 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -719,8 +719,10 @@ static int __init vfp_init(void)
if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
elf_hwcap |= HWCAP_NEON;
#endif
+#ifdef CONFIG_VFPv3
if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
elf_hwcap |= HWCAP_VFPv4;
+#endif
}
}
return 0;
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index f3486192063..c7092e6057c 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -38,6 +38,7 @@ config BLACKFIN
select GENERIC_ATOMIC64
select GENERIC_IRQ_PROBE
select IRQ_PER_CPU if SMP
+ select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
select GENERIC_SMP_IDLE_THREAD
select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index d3d7e64ca96..66cf00095b8 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -20,7 +20,6 @@ endif
KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
KBUILD_CFLAGS_MODULE += -mlong-calls
LDFLAGS += -m elf32bfin
-KALLSYMS += --symbol-prefix=_
KBUILD_DEFCONFIG := BF537-STAMP_defconfig
diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h
index dc3d144b4bb..9631598dcc5 100644
--- a/arch/blackfin/include/asm/smp.h
+++ b/arch/blackfin/include/asm/smp.h
@@ -18,6 +18,8 @@
#define raw_smp_processor_id() blackfin_core_id()
extern void bfin_relocate_coreb_l1_mem(void);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr);
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index ada8f0fc71e..fb96e607adc 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -52,7 +52,6 @@ EXPORT_SYMBOL(reserved_mem_dcache_on);
#ifdef CONFIG_MTD_UCLINUX
extern struct map_info uclinux_ram_map;
unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
-unsigned long _ebss;
EXPORT_SYMBOL(memory_mtd_end);
EXPORT_SYMBOL(memory_mtd_start);
EXPORT_SYMBOL(mtd_size);
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 00bbe672b3b..a40151306b7 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
-#define BFIN_IPI_TIMER 0
-#define BFIN_IPI_RESCHEDULE 1
-#define BFIN_IPI_CALL_FUNC 2
-#define BFIN_IPI_CPU_STOP 3
+enum ipi_message_type {
+ BFIN_IPI_TIMER,
+ BFIN_IPI_RESCHEDULE,
+ BFIN_IPI_CALL_FUNC,
+ BFIN_IPI_CALL_FUNC_SINGLE,
+ BFIN_IPI_CPU_STOP,
+};
struct blackfin_flush_data {
unsigned long start;
@@ -60,35 +63,20 @@ struct blackfin_flush_data {
void *secondary_stack;
-
-struct smp_call_struct {
- void (*func)(void *info);
- void *info;
- int wait;
- cpumask_t *waitmask;
-};
-
static struct blackfin_flush_data smp_flush_data;
static DEFINE_SPINLOCK(stop_lock);
-struct ipi_message {
- unsigned long type;
- struct smp_call_struct call_struct;
-};
-
/* A magic number - stress test shows this is safe for common cases */
#define BFIN_IPI_MSGQ_LEN 5
/* Simple FIFO buffer, overflow leads to panic */
-struct ipi_message_queue {
- spinlock_t lock;
+struct ipi_data {
unsigned long count;
- unsigned long head; /* head of the queue */
- struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
+ unsigned long bits;
};
-static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
+static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
static void ipi_cpu_stop(unsigned int cpu)
{
@@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info)
blackfin_icache_flush_range(fdata->start, fdata->end);
}
-static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
-{
- int wait;
- void (*func)(void *info);
- void *info;
- func = msg->call_struct.func;
- info = msg->call_struct.info;
- wait = msg->call_struct.wait;
- func(info);
- if (wait) {
-#ifdef __ARCH_SYNC_CORE_DCACHE
- /*
- * 'wait' usually means synchronization between CPUs.
- * Invalidate D cache in case shared data was changed
- * by func() to ensure cache coherence.
- */
- resync_core_dcache();
-#endif
- cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
- }
-}
-
/* Use IRQ_SUPPLE_0 to request reschedule.
* When returning from interrupt to user space,
* there is chance to reschedule */
@@ -172,152 +138,95 @@ void ipi_timer(void)
static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
{
- struct ipi_message *msg;
- struct ipi_message_queue *msg_queue;
+ struct ipi_data *bfin_ipi_data;
unsigned int cpu = smp_processor_id();
- unsigned long flags;
+ unsigned long pending;
+ unsigned long msg;
platform_clear_ipi(cpu, IRQ_SUPPLE_1);
- msg_queue = &__get_cpu_var(ipi_msg_queue);
-
- spin_lock_irqsave(&msg_queue->lock, flags);
-
- while (msg_queue->count) {
- msg = &msg_queue->ipi_message[msg_queue->head];
- switch (msg->type) {
- case BFIN_IPI_TIMER:
- ipi_timer();
- break;
- case BFIN_IPI_RESCHEDULE:
- scheduler_ipi();
- break;
- case BFIN_IPI_CALL_FUNC:
- ipi_call_function(cpu, msg);
- break;
- case BFIN_IPI_CPU_STOP:
- ipi_cpu_stop(cpu);
- break;
- default:
- printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
- cpu, msg->type);
- break;
- }
- msg_queue->head++;
- msg_queue->head %= BFIN_IPI_MSGQ_LEN;
- msg_queue->count--;
+ bfin_ipi_data = &__get_cpu_var(bfin_ipi);
+
+ while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
+ msg = 0;
+ do {
+ msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
+ switch (msg) {
+ case BFIN_IPI_TIMER:
+ ipi_timer();
+ break;
+ case BFIN_IPI_RESCHEDULE:
+ scheduler_ipi();
+ break;
+ case BFIN_IPI_CALL_FUNC:
+ generic_smp_call_function_interrupt();
+ break;
+
+ case BFIN_IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
+
+ case BFIN_IPI_CPU_STOP:
+ ipi_cpu_stop(cpu);
+ break;
+ }
+ } while (msg < BITS_PER_LONG);
+
+ smp_mb();
}
- spin_unlock_irqrestore(&msg_queue->lock, flags);
return IRQ_HANDLED;
}
-static void ipi_queue_init(void)
+static void bfin_ipi_init(void)
{
unsigned int cpu;
- struct ipi_message_queue *msg_queue;
+ struct ipi_data *bfin_ipi_data;
for_each_possible_cpu(cpu) {
- msg_queue = &per_cpu(ipi_msg_queue, cpu);
- spin_lock_init(&msg_queue->lock);
- msg_queue->count = 0;
- msg_queue->head = 0;
+ bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
+ bfin_ipi_data->bits = 0;
+ bfin_ipi_data->count = 0;
}
}
-static inline void smp_send_message(cpumask_t callmap, unsigned long type,
- void (*func) (void *info), void *info, int wait)
+void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
{
unsigned int cpu;
- struct ipi_message_queue *msg_queue;
- struct ipi_message *msg;
- unsigned long flags, next_msg;
- cpumask_t waitmask; /* waitmask is shared by all cpus */
-
- cpumask_copy(&waitmask, &callmap);
- for_each_cpu(cpu, &callmap) {
- msg_queue = &per_cpu(ipi_msg_queue, cpu);
- spin_lock_irqsave(&msg_queue->lock, flags);
- if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
- next_msg = (msg_queue->head + msg_queue->count)
- % BFIN_IPI_MSGQ_LEN;
- msg = &msg_queue->ipi_message[next_msg];
- msg->type = type;
- if (type == BFIN_IPI_CALL_FUNC) {
- msg->call_struct.func = func;
- msg->call_struct.info = info;
- msg->call_struct.wait = wait;
- msg->call_struct.waitmask = &waitmask;
- }
- msg_queue->count++;
- } else
- panic("IPI message queue overflow\n");
- spin_unlock_irqrestore(&msg_queue->lock, flags);
+ struct ipi_data *bfin_ipi_data;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for_each_cpu(cpu, cpumask) {
+ bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
+ smp_mb();
+ set_bit(msg, &bfin_ipi_data->bits);
+ bfin_ipi_data->count++;
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
}
- if (wait) {
- while (!cpumask_empty(&waitmask))
- blackfin_dcache_invalidate_range(
- (unsigned long)(&waitmask),
- (unsigned long)(&waitmask));
-#ifdef __ARCH_SYNC_CORE_DCACHE
- /*
- * Invalidate D cache in case shared data was changed by
- * other processors to ensure cache coherence.
- */
- resync_core_dcache();
-#endif
- }
+ local_irq_restore(flags);
}
-int smp_call_function(void (*func)(void *info), void *info, int wait)
+void arch_send_call_function_single_ipi(int cpu)
{
- cpumask_t callmap;
-
- preempt_disable();
- cpumask_copy(&callmap, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), &callmap);
- if (!cpumask_empty(&callmap))
- smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
-
- preempt_enable();
-
- return 0;
+ send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
}
-EXPORT_SYMBOL_GPL(smp_call_function);
-int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
- int wait)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- unsigned int cpu = cpuid;
- cpumask_t callmap;
-
- if (cpu_is_offline(cpu))
- return 0;
- cpumask_clear(&callmap);
- cpumask_set_cpu(cpu, &callmap);
-
- smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
-
- return 0;
+ send_ipi(mask, BFIN_IPI_CALL_FUNC);
}
-EXPORT_SYMBOL_GPL(smp_call_function_single);
void smp_send_reschedule(int cpu)
{
- cpumask_t callmap;
- /* simply trigger an ipi */
-
- cpumask_clear(&callmap);
- cpumask_set_cpu(cpu, &callmap);
-
- smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
+ send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
return;
}
void smp_send_msg(const struct cpumask *mask, unsigned long type)
{
- smp_send_message(*mask, type, NULL, NULL, 0);
+ send_ipi(mask, type);
}
void smp_timer_broadcast(const struct cpumask *mask)
@@ -333,7 +242,7 @@ void smp_send_stop(void)
cpumask_copy(&callmap, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &callmap);
if (!cpumask_empty(&callmap))
- smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
+ send_ipi(&callmap, BFIN_IPI_CPU_STOP);
preempt_enable();
@@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
platform_prepare_cpus(max_cpus);
- ipi_queue_init();
+ bfin_ipi_init();
platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
}
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 052f81a7623..983c859e40b 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -6,6 +6,7 @@
config C6X
def_bool y
select CLKDEV_LOOKUP
+ select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 3af601e31e6..f08e89183cd 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -2,6 +2,7 @@ include include/asm-generic/Kbuild.asm
generic-y += atomic.h
generic-y += auxvec.h
+generic-y += barrier.h
generic-y += bitsperlong.h
generic-y += bugs.h
generic-y += cputime.h
diff --git a/arch/c6x/include/asm/barrier.h b/arch/c6x/include/asm/barrier.h
deleted file mode 100644
index 538240e8590..00000000000
--- a/arch/c6x/include/asm/barrier.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Port on Texas Instruments TMS320C6x architecture
- *
- * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef _ASM_C6X_BARRIER_H
-#define _ASM_C6X_BARRIER_H
-
-#define nop() asm("NOP\n");
-
-#define mb() barrier()
-#define rmb() barrier()
-#define wmb() barrier()
-#define set_mb(var, value) do { var = value; mb(); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_read_barrier_depends() do { } while (0)
-
-#endif /* _ASM_C6X_BARRIER_H */
diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h
index 6d521d96d94..09c5a0f5f4d 100644
--- a/arch/c6x/include/asm/cache.h
+++ b/arch/c6x/include/asm/cache.h
@@ -1,7 +1,7 @@
/*
* Port on Texas Instruments TMS320C6x architecture
*
- * Copyright (C) 2005, 2006, 2009, 2010 Texas Instruments Incorporated
+ * Copyright (C) 2005, 2006, 2009, 2010, 2012 Texas Instruments Incorporated
* Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
*
* This program is free software; you can redistribute it and/or modify
@@ -16,9 +16,14 @@
/*
* Cache line size
*/
-#define L1D_CACHE_BYTES 64
-#define L1P_CACHE_BYTES 32
-#define L2_CACHE_BYTES 128
+#define L1D_CACHE_SHIFT 6
+#define L1D_CACHE_BYTES (1 << L1D_CACHE_SHIFT)
+
+#define L1P_CACHE_SHIFT 5
+#define L1P_CACHE_BYTES (1 << L1P_CACHE_SHIFT)
+
+#define L2_CACHE_SHIFT 7
+#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT)
/*
* L2 used as cache
@@ -29,7 +34,8 @@
* For practical reasons the L1_CACHE_BYTES defines should not be smaller than
* the L2 line size
*/
-#define L1_CACHE_BYTES L2_CACHE_BYTES
+#define L1_CACHE_SHIFT L2_CACHE_SHIFT
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define L2_CACHE_ALIGN_LOW(x) \
(((x) & ~(L2_CACHE_BYTES - 1)))
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 954d81e2e83..7913695b2fc 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -234,5 +234,4 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_T10DIF=y
-CONFIG_MISC_DEVICES=y
CONFIG_INTEL_IOMMU=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index 91c41ecfa6d..f8e91336542 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -209,4 +209,3 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_CRYPTO_MD5=y
-CONFIG_MISC_DEVICES=y
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 6f38b6120d9..440578850ae 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -497,7 +497,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
srat_num_cpus++;
}
-void __init
+int __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
unsigned long paddr, size;
@@ -512,7 +512,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
/* Ignore disabled entries */
if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
- return;
+ return -1;
/* record this node in proximity bitmap */
pxm_bit_set(pxm);
@@ -531,6 +531,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
p->size = size;
p->nid = pxm;
num_node_memblks++;
+ return 0;
}
void __init acpi_numa_arch_fixup(void)
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 0b0f8b8c4a2..b22df9410dc 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -5,6 +5,7 @@ config M68K
select HAVE_AOUT if MMU
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
+ select GENERIC_ATOMIC64
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
select GENERIC_CPU_DEVICES
select GENERIC_STRNCPY_FROM_USER if MMU
@@ -54,18 +55,6 @@ config ZONE_DMA
bool
default y
-config CPU_HAS_NO_BITFIELDS
- bool
-
-config CPU_HAS_NO_MULDIV64
- bool
-
-config CPU_HAS_ADDRESS_SPACES
- bool
-
-config FPU
- bool
-
config HZ
int
default 1000 if CLEOPATRA
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 43a9f8f1b8e..c4eb79edece 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -28,6 +28,7 @@ config COLDFIRE
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_MULDIV64
select GENERIC_CSUM
+ select HAVE_CLK
endchoice
@@ -37,6 +38,7 @@ config M68000
bool
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_MULDIV64
+ select CPU_HAS_NO_UNALIGNED
select GENERIC_CSUM
help
The Freescale (was Motorola) 68000 CPU is the first generation of
@@ -48,6 +50,7 @@ config M68000
config MCPU32
bool
select CPU_HAS_NO_BITFIELDS
+ select CPU_HAS_NO_UNALIGNED
help
The Freescale (was then Motorola) CPU32 is a CPU core that is
based on the 68020 processor. For the most part it is used in
@@ -56,7 +59,6 @@ config MCPU32
config M68020
bool "68020 support"
depends on MMU
- select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68020
@@ -67,7 +69,6 @@ config M68020
config M68030
bool "68030 support"
depends on MMU && !MMU_SUN3
- select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68030
@@ -77,7 +78,6 @@ config M68030
config M68040
bool "68040 support"
depends on MMU && !MMU_SUN3
- select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68LC040
@@ -88,7 +88,6 @@ config M68040
config M68060
bool "68060 support"
depends on MMU && !MMU_SUN3
- select GENERIC_ATOMIC64
select CPU_HAS_ADDRESS_SPACES
help
If you anticipate running this kernel on a computer with a MC68060
@@ -376,6 +375,18 @@ config NODES_SHIFT
default "3"
depends on !SINGLE_MEMORY_CHUNK
+config CPU_HAS_NO_BITFIELDS
+ bool
+
+config CPU_HAS_NO_MULDIV64
+ bool
+
+config CPU_HAS_NO_UNALIGNED
+ bool
+
+config CPU_HAS_ADDRESS_SPACES
+ bool
+
config FPU
bool
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index 0a30406b944..f5565d6eeb8 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -177,8 +177,8 @@ irqreturn_t dn_timer_int(int irq, void *dev_id)
timer_handler(irq, dev_id);
- x=*(volatile unsigned char *)(timer+3);
- x=*(volatile unsigned char *)(timer+5);
+ x = *(volatile unsigned char *)(apollo_timer + 3);
+ x = *(volatile unsigned char *)(apollo_timer + 5);
return IRQ_HANDLED;
}
@@ -186,17 +186,17 @@ irqreturn_t dn_timer_int(int irq, void *dev_id)
void dn_sched_init(irq_handler_t timer_routine)
{
/* program timer 1 */
- *(volatile unsigned char *)(timer+3)=0x01;
- *(volatile unsigned char *)(timer+1)=0x40;
- *(volatile unsigned char *)(timer+5)=0x09;
- *(volatile unsigned char *)(timer+7)=0xc4;
+ *(volatile unsigned char *)(apollo_timer + 3) = 0x01;
+ *(volatile unsigned char *)(apollo_timer + 1) = 0x40;
+ *(volatile unsigned char *)(apollo_timer + 5) = 0x09;
+ *(volatile unsigned char *)(apollo_timer + 7) = 0xc4;
/* enable IRQ of PIC B */
*(volatile unsigned char *)(pica+1)&=(~8);
#if 0
- printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
- printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
+ printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3));
+ printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3));
#endif
if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine))
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index eafa2539a8e..a74e5d95c38 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,4 +1,29 @@
include include/asm-generic/Kbuild.asm
header-y += cachectl.h
+generic-y += bitsperlong.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += futex.h
+generic-y += ioctl.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local64.h
+generic-y += local.h
+generic-y += mman.h
+generic-y += mutex.h
+generic-y += percpu.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += siginfo.h
+generic-y += statfs.h
+generic-y += topology.h
+generic-y += types.h
generic-y += word-at-a-time.h
+generic-y += xor.h
diff --git a/arch/m68k/include/asm/MC68332.h b/arch/m68k/include/asm/MC68332.h
deleted file mode 100644
index 6bb8f02685a..00000000000
--- a/arch/m68k/include/asm/MC68332.h
+++ /dev/null
@@ -1,152 +0,0 @@
-
-/* include/asm-m68knommu/MC68332.h: '332 control registers
- *
- * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>,
- *
- */
-
-#ifndef _MC68332_H_
-#define _MC68332_H_
-
-#define BYTE_REF(addr) (*((volatile unsigned char*)addr))
-#define WORD_REF(addr) (*((volatile unsigned short*)addr))
-
-#define PORTE_ADDR 0xfffa11
-#define PORTE BYTE_REF(PORTE_ADDR)
-#define DDRE_ADDR 0xfffa15
-#define DDRE BYTE_REF(DDRE_ADDR)
-#define PEPAR_ADDR 0xfffa17
-#define PEPAR BYTE_REF(PEPAR_ADDR)
-
-#define PORTF_ADDR 0xfffa19
-#define PORTF BYTE_REF(PORTF_ADDR)
-#define DDRF_ADDR 0xfffa1d
-#define DDRF BYTE_REF(DDRF_ADDR)
-#define PFPAR_ADDR 0xfffa1f
-#define PFPAR BYTE_REF(PFPAR_ADDR)
-
-#define PORTQS_ADDR 0xfffc15
-#define PORTQS BYTE_REF(PORTQS_ADDR)
-#define DDRQS_ADDR 0xfffc17
-#define DDRQS BYTE_REF(DDRQS_ADDR)
-#define PQSPAR_ADDR 0xfffc16
-#define PQSPAR BYTE_REF(PQSPAR_ADDR)
-
-#define CSPAR0_ADDR 0xFFFA44
-#define CSPAR0 WORD_REF(CSPAR0_ADDR)
-#define CSPAR1_ADDR 0xFFFA46
-#define CSPAR1 WORD_REF(CSPAR1_ADDR)
-#define CSARBT_ADDR 0xFFFA48
-#define CSARBT WORD_REF(CSARBT_ADDR)
-#define CSOPBT_ADDR 0xFFFA4A
-#define CSOPBT WORD_REF(CSOPBT_ADDR)
-#define CSBAR0_ADDR 0xFFFA4C
-#define CSBAR0 WORD_REF(CSBAR0_ADDR)
-#define CSOR0_ADDR 0xFFFA4E
-#define CSOR0 WORD_REF(CSOR0_ADDR)
-#define CSBAR1_ADDR 0xFFFA50
-#define CSBAR1 WORD_REF(CSBAR1_ADDR)
-#define CSOR1_ADDR 0xFFFA52
-#define CSOR1 WORD_REF(CSOR1_ADDR)
-#define CSBAR2_ADDR 0xFFFA54
-#define CSBAR2 WORD_REF(CSBAR2_ADDR)
-#define CSOR2_ADDR 0xFFFA56
-#define CSOR2 WORD_REF(CSOR2_ADDR)
-#define CSBAR3_ADDR 0xFFFA58
-#define CSBAR3 WORD_REF(CSBAR3_ADDR)
-#define CSOR3_ADDR 0xFFFA5A
-#define CSOR3 WORD_REF(CSOR3_ADDR)
-#define CSBAR4_ADDR 0xFFFA5C
-#define CSBAR4 WORD_REF(CSBAR4_ADDR)
-#define CSOR4_ADDR 0xFFFA5E
-#define CSOR4 WORD_REF(CSOR4_ADDR)
-#define CSBAR5_ADDR 0xFFFA60
-#define CSBAR5 WORD_REF(CSBAR5_ADDR)
-#define CSOR5_ADDR 0xFFFA62
-#define CSOR5 WORD_REF(CSOR5_ADDR)
-#define CSBAR6_ADDR 0xFFFA64
-#define CSBAR6 WORD_REF(CSBAR6_ADDR)
-#define CSOR6_ADDR 0xFFFA66
-#define CSOR6 WORD_REF(CSOR6_ADDR)
-#define CSBAR7_ADDR 0xFFFA68
-#define CSBAR7 WORD_REF(CSBAR7_ADDR)
-#define CSOR7_ADDR 0xFFFA6A
-#define CSOR7 WORD_REF(CSOR7_ADDR)
-#define CSBAR8_ADDR 0xFFFA6C
-#define CSBAR8 WORD_REF(CSBAR8_ADDR)
-#define CSOR8_ADDR 0xFFFA6E
-#define CSOR8 WORD_REF(CSOR8_ADDR)
-#define CSBAR9_ADDR 0xFFFA70
-#define CSBAR9 WORD_REF(CSBAR9_ADDR)
-#define CSOR9_ADDR 0xFFFA72
-#define CSOR9 WORD_REF(CSOR9_ADDR)
-#define CSBAR10_ADDR 0xFFFA74
-#define CSBAR10 WORD_REF(CSBAR10_ADDR)
-#define CSOR10_ADDR 0xFFFA76
-#define CSOR10 WORD_REF(CSOR10_ADDR)
-
-#define CSOR_MODE_ASYNC 0x0000
-#define CSOR_MODE_SYNC 0x8000
-#define CSOR_MODE_MASK 0x8000
-#define CSOR_BYTE_DISABLE 0x0000
-#define CSOR_BYTE_UPPER 0x4000
-#define CSOR_BYTE_LOWER 0x2000
-#define CSOR_BYTE_BOTH 0x6000
-#define CSOR_BYTE_MASK 0x6000
-#define CSOR_RW_RSVD 0x0000
-#define CSOR_RW_READ 0x0800
-#define CSOR_RW_WRITE 0x1000
-#define CSOR_RW_BOTH 0x1800
-#define CSOR_RW_MASK 0x1800
-#define CSOR_STROBE_DS 0x0400
-#define CSOR_STROBE_AS 0x0000
-#define CSOR_STROBE_MASK 0x0400
-#define CSOR_DSACK_WAIT(x) (wait << 6)
-#define CSOR_DSACK_FTERM (14 << 6)
-#define CSOR_DSACK_EXTERNAL (15 << 6)
-#define CSOR_DSACK_MASK 0x03c0
-#define CSOR_SPACE_CPU 0x0000
-#define CSOR_SPACE_USER 0x0010
-#define CSOR_SPACE_SU 0x0020
-#define CSOR_SPACE_BOTH 0x0030
-#define CSOR_SPACE_MASK 0x0030
-#define CSOR_IPL_ALL 0x0000
-#define CSOR_IPL_PRIORITY(x) (x << 1)
-#define CSOR_IPL_MASK 0x000e
-#define CSOR_AVEC_ON 0x0001
-#define CSOR_AVEC_OFF 0x0000
-#define CSOR_AVEC_MASK 0x0001
-
-#define CSBAR_ADDR(x) ((addr >> 11) << 3)
-#define CSBAR_ADDR_MASK 0xfff8
-#define CSBAR_BLKSIZE_2K 0x0000
-#define CSBAR_BLKSIZE_8K 0x0001
-#define CSBAR_BLKSIZE_16K 0x0002
-#define CSBAR_BLKSIZE_64K 0x0003
-#define CSBAR_BLKSIZE_128K 0x0004
-#define CSBAR_BLKSIZE_256K 0x0005
-#define CSBAR_BLKSIZE_512K 0x0006
-#define CSBAR_BLKSIZE_1M 0x0007
-#define CSBAR_BLKSIZE_MASK 0x0007
-
-#define CSPAR_DISC 0
-#define CSPAR_ALT 1
-#define CSPAR_CS8 2
-#define CSPAR_CS16 3
-#define CSPAR_MASK 3
-
-#define CSPAR0_CSBOOT(x) (x << 0)
-#define CSPAR0_CS0(x) (x << 2)
-#define CSPAR0_CS1(x) (x << 4)
-#define CSPAR0_CS2(x) (x << 6)
-#define CSPAR0_CS3(x) (x << 8)
-#define CSPAR0_CS4(x) (x << 10)
-#define CSPAR0_CS5(x) (x << 12)
-
-#define CSPAR1_CS6(x) (x << 0)
-#define CSPAR1_CS7(x) (x << 2)
-#define CSPAR1_CS8(x) (x << 4)
-#define CSPAR1_CS9(x) (x << 6)
-#define CSPAR1_CS10(x) (x << 8)
-
-#endif
diff --git a/arch/m68k/include/asm/apollodma.h b/arch/m68k/include/asm/apollodma.h
deleted file mode 100644
index 954adc851ad..00000000000
--- a/arch/m68k/include/asm/apollodma.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * linux/include/asm/dma.h: Defines for using and allocating dma channels.
- * Written by Hennus Bergman, 1992.
- * High DMA channel support & info by Hannu Savolainen
- * and John Boyd, Nov. 1992.
- */
-
-#ifndef _ASM_APOLLO_DMA_H
-#define _ASM_APOLLO_DMA_H
-
-#include <asm/apollohw.h> /* need byte IO */
-#include <linux/spinlock.h> /* And spinlocks */
-#include <linux/delay.h>
-
-
-#define dma_outb(val,addr) (*((volatile unsigned char *)(addr+IO_BASE)) = (val))
-#define dma_inb(addr) (*((volatile unsigned char *)(addr+IO_BASE)))
-
-/*
- * NOTES about DMA transfers:
- *
- * controller 1: channels 0-3, byte operations, ports 00-1F
- * controller 2: channels 4-7, word operations, ports C0-DF
- *
- * - ALL registers are 8 bits only, regardless of transfer size
- * - channel 4 is not used - cascades 1 into 2.
- * - channels 0-3 are byte - addresses/counts are for physical bytes
- * - channels 5-7 are word - addresses/counts are for physical words
- * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
- * - transfer count loaded to registers is 1 less than actual count
- * - controller 2 offsets are all even (2x offsets for controller 1)
- * - page registers for 5-7 don't use data bit 0, represent 128K pages
- * - page registers for 0-3 use bit 0, represent 64K pages
- *
- * DMA transfers are limited to the lower 16MB of _physical_ memory.
- * Note that addresses loaded into registers must be _physical_ addresses,
- * not logical addresses (which may differ if paging is active).
- *
- * Address mapping for channels 0-3:
- *
- * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * P7 ... P0 A7 ... A0 A7 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
- *
- * Address mapping for channels 5-7:
- *
- * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
- * | ... | \ \ ... \ \ \ ... \ \
- * | ... | \ \ ... \ \ \ ... \ (not used)
- * | ... | \ \ ... \ \ \ ... \
- * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
- *
- * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
- * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
- * the hardware level, so odd-byte transfers aren't possible).
- *
- * Transfer count (_not # bytes_) is limited to 64K, represented as actual
- * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
- * and up to 128K bytes may be transferred on channels 5-7 in one operation.
- *
- */
-
-#define MAX_DMA_CHANNELS 8
-
-/* The maximum address that we can perform a DMA transfer to on this platform */#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000)
-
-/* 8237 DMA controllers */
-#define IO_DMA1_BASE 0x10C00 /* 8 bit slave DMA, channels 0..3 */
-#define IO_DMA2_BASE 0x10D00 /* 16 bit master DMA, ch 4(=slave input)..7 */
-
-/* DMA controller registers */
-#define DMA1_CMD_REG (IO_DMA1_BASE+0x08) /* command register (w) */
-#define DMA1_STAT_REG (IO_DMA1_BASE+0x08) /* status register (r) */
-#define DMA1_REQ_REG (IO_DMA1_BASE+0x09) /* request register (w) */
-#define DMA1_MASK_REG (IO_DMA1_BASE+0x0A) /* single-channel mask (w) */
-#define DMA1_MODE_REG (IO_DMA1_BASE+0x0B) /* mode register (w) */
-#define DMA1_CLEAR_FF_REG (IO_DMA1_BASE+0x0C) /* clear pointer flip-flop (w) */
-#define DMA1_TEMP_REG (IO_DMA1_BASE+0x0D) /* Temporary Register (r) */
-#define DMA1_RESET_REG (IO_DMA1_BASE+0x0D) /* Master Clear (w) */
-#define DMA1_CLR_MASK_REG (IO_DMA1_BASE+0x0E) /* Clear Mask */
-#define DMA1_MASK_ALL_REG (IO_DMA1_BASE+0x0F) /* all-channels mask (w) */
-
-#define DMA2_CMD_REG (IO_DMA2_BASE+0x10) /* command register (w) */
-#define DMA2_STAT_REG (IO_DMA2_BASE+0x10) /* status register (r) */
-#define DMA2_REQ_REG (IO_DMA2_BASE+0x12) /* request register (w) */
-#define DMA2_MASK_REG (IO_DMA2_BASE+0x14) /* single-channel mask (w) */
-#define DMA2_MODE_REG (IO_DMA2_BASE+0x16) /* mode register (w) */
-#define DMA2_CLEAR_FF_REG (IO_DMA2_BASE+0x18) /* clear pointer flip-flop (w) */
-#define DMA2_TEMP_REG (IO_DMA2_BASE+0x1A) /* Temporary Register (r) */
-#define DMA2_RESET_REG (IO_DMA2_BASE+0x1A) /* Master Clear (w) */
-#define DMA2_CLR_MASK_REG (IO_DMA2_BASE+0x1C) /* Clear Mask */
-#define DMA2_MASK_ALL_REG (IO_DMA2_BASE+0x1E) /* all-channels mask (w) */
-
-#define DMA_ADDR_0 (IO_DMA1_BASE+0x00) /* DMA address registers */
-#define DMA_ADDR_1 (IO_DMA1_BASE+0x02)
-#define DMA_ADDR_2 (IO_DMA1_BASE+0x04)
-#define DMA_ADDR_3 (IO_DMA1_BASE+0x06)
-#define DMA_ADDR_4 (IO_DMA2_BASE+0x00)
-#define DMA_ADDR_5 (IO_DMA2_BASE+0x04)
-#define DMA_ADDR_6 (IO_DMA2_BASE+0x08)
-#define DMA_ADDR_7 (IO_DMA2_BASE+0x0C)
-
-#define DMA_CNT_0 (IO_DMA1_BASE+0x01) /* DMA count registers */
-#define DMA_CNT_1 (IO_DMA1_BASE+0x03)
-#define DMA_CNT_2 (IO_DMA1_BASE+0x05)
-#define DMA_CNT_3 (IO_DMA1_BASE+0x07)
-#define DMA_CNT_4 (IO_DMA2_BASE+0x02)
-#define DMA_CNT_5 (IO_DMA2_BASE+0x06)
-#define DMA_CNT_6 (IO_DMA2_BASE+0x0A)
-#define DMA_CNT_7 (IO_DMA2_BASE+0x0E)
-
-#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
-#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
-#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
-
-#define DMA_AUTOINIT 0x10
-
-#define DMA_8BIT 0
-#define DMA_16BIT 1
-#define DMA_BUSMASTER 2
-
-extern spinlock_t dma_spin_lock;
-
-static __inline__ unsigned long claim_dma_lock(void)
-{
- unsigned long flags;
- spin_lock_irqsave(&dma_spin_lock, flags);
- return flags;
-}
-
-static __inline__ void release_dma_lock(unsigned long flags)
-{
- spin_unlock_irqrestore(&dma_spin_lock, flags);
-}
-
-/* enable/disable a specific DMA channel */
-static __inline__ void enable_dma(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(dmanr, DMA1_MASK_REG);
- else
- dma_outb(dmanr & 3, DMA2_MASK_REG);
-}
-
-static __inline__ void disable_dma(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(dmanr | 4, DMA1_MASK_REG);
- else
- dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
-}
-
-/* Clear the 'DMA Pointer Flip Flop'.
- * Write 0 for LSB/MSB, 1 for MSB/LSB access.
- * Use this once to initialize the FF to a known state.
- * After that, keep track of it. :-)
- * --- In order to do that, the DMA routines below should ---
- * --- only be used while holding the DMA lock ! ---
- */
-static __inline__ void clear_dma_ff(unsigned int dmanr)
-{
- if (dmanr<=3)
- dma_outb(0, DMA1_CLEAR_FF_REG);
- else
- dma_outb(0, DMA2_CLEAR_FF_REG);
-}
-
-/* set mode (above) for a specific DMA channel */
-static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
-{
- if (dmanr<=3)
- dma_outb(mode | dmanr, DMA1_MODE_REG);
- else
- dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
-}
-
-/* Set transfer address & page bits for specific DMA channel.
- * Assumes dma flipflop is clear.
- */
-static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
-{
- if (dmanr <= 3) {
- dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- } else {
- dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
- dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
- }
-}
-
-
-/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
- * a specific DMA channel.
- * You must ensure the parameters are valid.
- * NOTE: from a manual: "the number of transfers is one more
- * than the initial word count"! This is taken into account.
- * Assumes dma flip-flop is clear.
- * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
- */
-static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
-{
- count--;
- if (dmanr <= 3) {
- dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
- dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
- } else {
- dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
- dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
- }
-}
-
-
-/* Get DMA residue count. After a DMA transfer, this
- * should return zero. Reading this while a DMA transfer is
- * still in progress will return unpredictable results.
- * If called before the channel has been used, it may return 1.
- * Otherwise, it returns the number of _bytes_ left to transfer.
- *
- * Assumes DMA flip-flop is clear.
- */
-static __inline__ int get_dma_residue(unsigned int dmanr)
-{
- unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
- : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
-
- /* using short to get 16-bit wrap around */
- unsigned short count;
-
- count = 1 + dma_inb(io_port);
- count += dma_inb(io_port) << 8;
-
- return (dmanr<=3)? count : (count<<1);
-}
-
-
-/* These are in kernel/dma.c: */
-extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr); /* release it again */
-
-/* These are in arch/m68k/apollo/dma.c: */
-extern unsigned short dma_map_page(unsigned long phys_addr,int count,int type);
-extern void dma_unmap_page(unsigned short dma_addr);
-
-#endif /* _ASM_APOLLO_DMA_H */
diff --git a/arch/m68k/include/asm/apollohw.h b/arch/m68k/include/asm/apollohw.h
index a1373b9aa28..635ef4f8901 100644
--- a/arch/m68k/include/asm/apollohw.h
+++ b/arch/m68k/include/asm/apollohw.h
@@ -98,7 +98,7 @@ extern u_long timer_physaddr;
#define cpuctrl (*(volatile unsigned int *)(IO_BASE + cpuctrl_physaddr))
#define pica (IO_BASE + pica_physaddr)
#define picb (IO_BASE + picb_physaddr)
-#define timer (IO_BASE + timer_physaddr)
+#define apollo_timer (IO_BASE + timer_physaddr)
#define addr_xlat_map ((unsigned short *)(IO_BASE + 0x17000))
#define isaIO2mem(x) (((((x) & 0x3f8) << 7) | (((x) & 0xfc00) >> 6) | ((x) & 0x7)) + 0x40000 + IO_BASE)
diff --git a/arch/m68k/include/asm/bitsperlong.h b/arch/m68k/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b..00000000000
--- a/arch/m68k/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/m68k/include/asm/cputime.h b/arch/m68k/include/asm/cputime.h
deleted file mode 100644
index c79c5e89230..00000000000
--- a/arch/m68k/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __M68K_CPUTIME_H
-#define __M68K_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __M68K_CPUTIME_H */
diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h
index 9c09becfd4c..12d8fe4f1d3 100644
--- a/arch/m68k/include/asm/delay.h
+++ b/arch/m68k/include/asm/delay.h
@@ -43,7 +43,7 @@ static inline void __delay(unsigned long loops)
extern void __bad_udelay(void);
-#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+#ifdef CONFIG_CPU_HAS_NO_MULDIV64
/*
* The simpler m68k and ColdFire processors do not have a 32*32->64
* multiply instruction. So we need to handle them a little differently.
diff --git a/arch/m68k/include/asm/device.h b/arch/m68k/include/asm/device.h
deleted file mode 100644
index d8f9872b0e2..00000000000
--- a/arch/m68k/include/asm/device.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#include <asm-generic/device.h>
-
diff --git a/arch/m68k/include/asm/emergency-restart.h b/arch/m68k/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42..00000000000
--- a/arch/m68k/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/m68k/include/asm/errno.h b/arch/m68k/include/asm/errno.h
deleted file mode 100644
index 0d4e188d6ef..00000000000
--- a/arch/m68k/include/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_ERRNO_H
-#define _M68K_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif /* _M68K_ERRNO_H */
diff --git a/arch/m68k/include/asm/futex.h b/arch/m68k/include/asm/futex.h
deleted file mode 100644
index 6a332a9f099..00000000000
--- a/arch/m68k/include/asm/futex.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
-
-#include <asm-generic/futex.h>
-
-#endif
diff --git a/arch/m68k/include/asm/ioctl.h b/arch/m68k/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe..00000000000
--- a/arch/m68k/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/m68k/include/asm/ipcbuf.h b/arch/m68k/include/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d..00000000000
--- a/arch/m68k/include/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/m68k/include/asm/irq_regs.h b/arch/m68k/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b7027..00000000000
--- a/arch/m68k/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/m68k/include/asm/kdebug.h b/arch/m68k/include/asm/kdebug.h
deleted file mode 100644
index 6ece1b03766..00000000000
--- a/arch/m68k/include/asm/kdebug.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/m68k/include/asm/kmap_types.h b/arch/m68k/include/asm/kmap_types.h
deleted file mode 100644
index 3413cc1390e..00000000000
--- a/arch/m68k/include/asm/kmap_types.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_M68K_KMAP_TYPES_H
-#define __ASM_M68K_KMAP_TYPES_H
-
-#include <asm-generic/kmap_types.h>
-
-#endif /* __ASM_M68K_KMAP_TYPES_H */
diff --git a/arch/m68k/include/asm/kvm_para.h b/arch/m68k/include/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b95..00000000000
--- a/arch/m68k/include/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/m68k/include/asm/local.h b/arch/m68k/include/asm/local.h
deleted file mode 100644
index 6c259263e1f..00000000000
--- a/arch/m68k/include/asm/local.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_M68K_LOCAL_H
-#define _ASM_M68K_LOCAL_H
-
-#include <asm-generic/local.h>
-
-#endif /* _ASM_M68K_LOCAL_H */
diff --git a/arch/m68k/include/asm/local64.h b/arch/m68k/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc23..00000000000
--- a/arch/m68k/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/m68k/include/asm/mac_mouse.h b/arch/m68k/include/asm/mac_mouse.h
deleted file mode 100644
index 39a5c292eae..00000000000
--- a/arch/m68k/include/asm/mac_mouse.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _ASM_MAC_MOUSE_H
-#define _ASM_MAC_MOUSE_H
-
-/*
- * linux/include/asm-m68k/mac_mouse.h
- * header file for Macintosh ADB mouse driver
- * 27-10-97 Michael Schmitz
- * copied from:
- * header file for Atari Mouse driver
- * by Robert de Vries (robert@and.nl) on 19Jul93
- */
-
-struct mouse_status {
- char buttons;
- short dx;
- short dy;
- int ready;
- int active;
- wait_queue_head_t wait;
- struct fasync_struct *fasyncptr;
-};
-
-#endif
diff --git a/arch/m68k/include/asm/mcfmbus.h b/arch/m68k/include/asm/mcfmbus.h
deleted file mode 100644
index 319899c47a2..00000000000
--- a/arch/m68k/include/asm/mcfmbus.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/****************************************************************************/
-
-/*
- * mcfmbus.h -- Coldfire MBUS support defines.
- *
- * (C) Copyright 1999, Martin Floeer (mfloeer@axcent.de)
- */
-
-/****************************************************************************/
-
-
-#ifndef mcfmbus_h
-#define mcfmbus_h
-
-
-#define MCFMBUS_BASE 0x280
-#define MCFMBUS_IRQ_VECTOR 0x19
-#define MCFMBUS_IRQ 0x1
-#define MCFMBUS_CLK 0x3f
-#define MCFMBUS_IRQ_LEVEL 0x07 /*IRQ Level 1*/
-#define MCFMBUS_ADDRESS 0x01
-
-
-/*
-* Define the 5307 MBUS register set addresses
-*/
-
-#define MCFMBUS_MADR 0x00
-#define MCFMBUS_MFDR 0x04
-#define MCFMBUS_MBCR 0x08
-#define MCFMBUS_MBSR 0x0C
-#define MCFMBUS_MBDR 0x10
-
-
-#define MCFMBUS_MADR_ADDR(a) (((a)&0x7F)<<0x01) /*Slave Address*/
-
-#define MCFMBUS_MFDR_MBC(a) ((a)&0x3F) /*M-Bus Clock*/
-
-/*
-* Define bit flags in Control Register
-*/
-
-#define MCFMBUS_MBCR_MEN (0x80) /* M-Bus Enable */
-#define MCFMBUS_MBCR_MIEN (0x40) /* M-Bus Interrupt Enable */
-#define MCFMBUS_MBCR_MSTA (0x20) /* Master/Slave Mode Select Bit */
-#define MCFMBUS_MBCR_MTX (0x10) /* Transmit/Rcv Mode Select Bit */
-#define MCFMBUS_MBCR_TXAK (0x08) /* Transmit Acknowledge Enable */
-#define MCFMBUS_MBCR_RSTA (0x04) /* Repeat Start */
-
-/*
-* Define bit flags in Status Register
-*/
-
-#define MCFMBUS_MBSR_MCF (0x80) /* Data Transfer Complete */
-#define MCFMBUS_MBSR_MAAS (0x40) /* Addressed as a Slave */
-#define MCFMBUS_MBSR_MBB (0x20) /* Bus Busy */
-#define MCFMBUS_MBSR_MAL (0x10) /* Arbitration Lost */
-#define MCFMBUS_MBSR_SRW (0x04) /* Slave Transmit */
-#define MCFMBUS_MBSR_MIF (0x02) /* M-Bus Interrupt */
-#define MCFMBUS_MBSR_RXAK (0x01) /* No Acknowledge Received */
-
-/*
-* Define bit flags in DATA I/O Register
-*/
-
-#define MCFMBUS_MBDR_READ (0x01) /* 1=read 0=write MBUS */
-
-#define MBUSIOCSCLOCK 1
-#define MBUSIOCGCLOCK 2
-#define MBUSIOCSADDR 3
-#define MBUSIOCGADDR 4
-#define MBUSIOCSSLADDR 5
-#define MBUSIOCGSLADDR 6
-#define MBUSIOCSSUBADDR 7
-#define MBUSIOCGSUBADDR 8
-
-#endif
diff --git a/arch/m68k/include/asm/mman.h b/arch/m68k/include/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab..00000000000
--- a/arch/m68k/include/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/m68k/include/asm/mutex.h b/arch/m68k/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc1..00000000000
--- a/arch/m68k/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/m68k/include/asm/percpu.h b/arch/m68k/include/asm/percpu.h
deleted file mode 100644
index 0859d048faf..00000000000
--- a/arch/m68k/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_M68K_PERCPU_H
-#define __ASM_M68K_PERCPU_H
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ASM_M68K_PERCPU_H */
diff --git a/arch/m68k/include/asm/resource.h b/arch/m68k/include/asm/resource.h
deleted file mode 100644
index e7d35019f33..00000000000
--- a/arch/m68k/include/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_RESOURCE_H
-#define _M68K_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif /* _M68K_RESOURCE_H */
diff --git a/arch/m68k/include/asm/sbus.h b/arch/m68k/include/asm/sbus.h
deleted file mode 100644
index bfe3ba147f2..00000000000
--- a/arch/m68k/include/asm/sbus.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * some sbus structures and macros to make usage of sbus drivers possible
- */
-
-#ifndef __M68K_SBUS_H
-#define __M68K_SBUS_H
-
-struct sbus_dev {
- struct {
- unsigned int which_io;
- unsigned int phys_addr;
- } reg_addrs[1];
-};
-
-/* sbus IO functions stolen from include/asm-sparc/io.h for the serial driver */
-/* No SBUS on the Sun3, kludge -- sam */
-
-static inline void _sbus_writeb(unsigned char val, unsigned long addr)
-{
- *(volatile unsigned char *)addr = val;
-}
-
-static inline unsigned char _sbus_readb(unsigned long addr)
-{
- return *(volatile unsigned char *)addr;
-}
-
-static inline void _sbus_writel(unsigned long val, unsigned long addr)
-{
- *(volatile unsigned long *)addr = val;
-
-}
-
-extern inline unsigned long _sbus_readl(unsigned long addr)
-{
- return *(volatile unsigned long *)addr;
-}
-
-
-#define sbus_readb(a) _sbus_readb((unsigned long)a)
-#define sbus_writeb(v, a) _sbus_writeb(v, (unsigned long)a)
-#define sbus_readl(a) _sbus_readl((unsigned long)a)
-#define sbus_writel(v, a) _sbus_writel(v, (unsigned long)a)
-
-#endif
diff --git a/arch/m68k/include/asm/scatterlist.h b/arch/m68k/include/asm/scatterlist.h
deleted file mode 100644
index 312505452a1..00000000000
--- a/arch/m68k/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_SCATTERLIST_H
-#define _M68K_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* !(_M68K_SCATTERLIST_H) */
diff --git a/arch/m68k/include/asm/sections.h b/arch/m68k/include/asm/sections.h
deleted file mode 100644
index 5277e52715e..00000000000
--- a/arch/m68k/include/asm/sections.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _ASM_M68K_SECTIONS_H
-#define _ASM_M68K_SECTIONS_H
-
-#include <asm-generic/sections.h>
-
-extern char _sbss[], _ebss[];
-
-#endif /* _ASM_M68K_SECTIONS_H */
diff --git a/arch/m68k/include/asm/shm.h b/arch/m68k/include/asm/shm.h
deleted file mode 100644
index fa56ec84a12..00000000000
--- a/arch/m68k/include/asm/shm.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef _M68K_SHM_H
-#define _M68K_SHM_H
-
-
-/* format of page table entries that correspond to shared memory pages
- currently out in swap space (see also mm/swap.c):
- bits 0-1 (PAGE_PRESENT) is = 0
- bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE
- bits 31..9 are used like this:
- bits 15..9 (SHM_ID) the id of the shared memory segment
- bits 30..16 (SHM_IDX) the index of the page within the shared memory segment
- (actually only bits 25..16 get used since SHMMAX is so low)
- bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach
-*/
-/* on the m68k both bits 0 and 1 must be zero */
-/* format on the sun3 is similar, but bits 30, 31 are set to zero and all
- others are reduced by 2. --m */
-
-#ifndef CONFIG_SUN3
-#define SHM_ID_SHIFT 9
-#else
-#define SHM_ID_SHIFT 7
-#endif
-#define _SHM_ID_BITS 7
-#define SHM_ID_MASK ((1<<_SHM_ID_BITS)-1)
-
-#define SHM_IDX_SHIFT (SHM_ID_SHIFT+_SHM_ID_BITS)
-#define _SHM_IDX_BITS 15
-#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1)
-
-#endif /* _M68K_SHM_H */
diff --git a/arch/m68k/include/asm/siginfo.h b/arch/m68k/include/asm/siginfo.h
deleted file mode 100644
index 851d3d784b5..00000000000
--- a/arch/m68k/include/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_SIGINFO_H
-#define _M68K_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/m68k/include/asm/statfs.h b/arch/m68k/include/asm/statfs.h
deleted file mode 100644
index 08d93f14e06..00000000000
--- a/arch/m68k/include/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _M68K_STATFS_H
-#define _M68K_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif /* _M68K_STATFS_H */
diff --git a/arch/m68k/include/asm/topology.h b/arch/m68k/include/asm/topology.h
deleted file mode 100644
index ca173e9f26f..00000000000
--- a/arch/m68k/include/asm/topology.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_M68K_TOPOLOGY_H
-#define _ASM_M68K_TOPOLOGY_H
-
-#include <asm-generic/topology.h>
-
-#endif /* _ASM_M68K_TOPOLOGY_H */
diff --git a/arch/m68k/include/asm/types.h b/arch/m68k/include/asm/types.h
deleted file mode 100644
index 89705adcbd5..00000000000
--- a/arch/m68k/include/asm/types.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _M68K_TYPES_H
-#define _M68K_TYPES_H
-
-/*
- * This file is never included by application software unless
- * explicitly requested (e.g., via linux/types.h) in which case the
- * application is Linux specific so (user-) name space pollution is
- * not a major issue. However, for interoperability, libraries still
- * need to be careful to avoid a name clashes.
- */
-#include <asm-generic/int-ll64.h>
-
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-#ifdef __KERNEL__
-
-#define BITS_PER_LONG 32
-
-#endif /* __KERNEL__ */
-
-#endif /* _M68K_TYPES_H */
diff --git a/arch/m68k/include/asm/unaligned.h b/arch/m68k/include/asm/unaligned.h
index f4043ae63db..2b3ca0bf7a0 100644
--- a/arch/m68k/include/asm/unaligned.h
+++ b/arch/m68k/include/asm/unaligned.h
@@ -2,7 +2,7 @@
#define _ASM_M68K_UNALIGNED_H
-#if defined(CONFIG_COLDFIRE) || defined(CONFIG_M68000)
+#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
#include <linux/unaligned/be_struct.h>
#include <linux/unaligned/le_byteshift.h>
#include <linux/unaligned/generic.h>
@@ -12,7 +12,7 @@
#else
/*
- * The m68k can do unaligned accesses itself.
+ * The m68k can do unaligned accesses itself.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
diff --git a/arch/m68k/include/asm/xor.h b/arch/m68k/include/asm/xor.h
deleted file mode 100644
index c82eb12a5b1..00000000000
--- a/arch/m68k/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 7dc186b7a85..71fb29938db 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -218,13 +218,10 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
#endif
- pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
- "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
- (int) &_sdata, (int) &_edata,
- (int) &_sbss, (int) &_ebss);
- pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ",
- (int) &_ebss, (int) memory_start,
- (int) memory_start, (int) memory_end);
+ pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n",
+ _stext, _etext, _sdata, _edata, __bss_start, __bss_stop);
+ pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
+ __bss_stop, memory_start, memory_start, memory_end);
/* Keep a copy of command line */
*cmdline_p = &command_line[0];
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 8623f8dc16f..9a5932ec368 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
goto bad_access;
}
- mem_value = *mem;
+ /*
+ * No need to check for EFAULT; we know that the page is
+ * present and writable.
+ */
+ __get_user(mem_value, mem);
if (mem_value == oldval)
- *mem = newval;
+ __put_user(newval, mem);
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
index 40e02d9c38b..06a763f49fd 100644
--- a/arch/m68k/kernel/vmlinux-nommu.lds
+++ b/arch/m68k/kernel/vmlinux-nommu.lds
@@ -78,9 +78,7 @@ SECTIONS {
__init_end = .;
}
- _sbss = .;
BSS_SECTION(0, 0, 0)
- _ebss = .;
_end = .;
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index 63407c83682..d0993594f55 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -31,9 +31,7 @@ SECTIONS
RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
- _sbss = .;
BSS_SECTION(0, 0, 0)
- _ebss = .;
_edata = .; /* End of data section */
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index ad0f46d64c0..8080469ee6c 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -44,9 +44,7 @@ __init_begin = .;
. = ALIGN(PAGE_SIZE);
__init_end = .;
- _sbss = .;
BSS_SECTION(0, 0, 0)
- _ebss = .;
_end = . ;
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
index 79e928a525d..ee5f0b1b5c5 100644
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -19,7 +19,7 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+#ifdef CONFIG_CPU_HAS_NO_MULDIV64
#define SI_TYPE_SIZE 32
#define __BITS4 (SI_TYPE_SIZE / 4)
diff --git a/arch/m68k/mm/init_mm.c b/arch/m68k/mm/init_mm.c
index f77f258dce3..282f9de6896 100644
--- a/arch/m68k/mm/init_mm.c
+++ b/arch/m68k/mm/init_mm.c
@@ -104,7 +104,7 @@ void __init print_memmap(void)
MLK_ROUNDUP(__init_begin, __init_end),
MLK_ROUNDUP(_stext, _etext),
MLK_ROUNDUP(_sdata, _edata),
- MLK_ROUNDUP(_sbss, _ebss));
+ MLK_ROUNDUP(__bss_start, __bss_stop));
}
void __init mem_init(void)
diff --git a/arch/m68k/mm/init_no.c b/arch/m68k/mm/init_no.c
index 345ec0d83e3..688e3664aea 100644
--- a/arch/m68k/mm/init_no.c
+++ b/arch/m68k/mm/init_no.c
@@ -91,7 +91,7 @@ void __init mem_init(void)
totalram_pages = free_all_bootmem();
codek = (_etext - _stext) >> 10;
- datak = (_ebss - _sdata) >> 10;
+ datak = (__bss_stop - _sdata) >> 10;
initk = (__init_begin - __init_end) >> 10;
tmp = nr_free_pages() << PAGE_SHIFT;
diff --git a/arch/m68k/platform/68328/head-de2.S b/arch/m68k/platform/68328/head-de2.S
index f632fdcb93e..537d3245b53 100644
--- a/arch/m68k/platform/68328/head-de2.S
+++ b/arch/m68k/platform/68328/head-de2.S
@@ -60,8 +60,8 @@ _start:
* Move ROM filesystem above bss :-)
*/
- moveal #_sbss, %a0 /* romfs at the start of bss */
- moveal #_ebss, %a1 /* Set up destination */
+ moveal #__bss_start, %a0 /* romfs at the start of bss */
+ moveal #__bss_stop, %a1 /* Set up destination */
movel %a0, %a2 /* Copy of bss start */
movel 8(%a0), %d1 /* Get size of ROMFS */
@@ -84,8 +84,8 @@ _start:
* Initialize BSS segment to 0
*/
- lea _sbss, %a0
- lea _ebss, %a1
+ lea __bss_start, %a0
+ lea __bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
2: cmpal %a0, %a1
diff --git a/arch/m68k/platform/68328/head-pilot.S b/arch/m68k/platform/68328/head-pilot.S
index 2ebfd642081..45a9dad29e3 100644
--- a/arch/m68k/platform/68328/head-pilot.S
+++ b/arch/m68k/platform/68328/head-pilot.S
@@ -110,7 +110,7 @@ L0:
movel #CONFIG_VECTORBASE, %d7
addl #16, %d7
moveal %d7, %a0
- moveal #_ebss, %a1
+ moveal #__bss_stop, %a1
lea %a1@(512), %a2
DBG_PUTC('C')
@@ -138,8 +138,8 @@ LD1:
DBG_PUTC('E')
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
L1:
@@ -150,7 +150,7 @@ L1:
DBG_PUTC('F')
/* Copy command line from end of bss to command line */
- moveal #_ebss, %a0
+ moveal #__bss_stop, %a0
moveal #command_line, %a1
lea %a1@(512), %a2
@@ -165,7 +165,7 @@ L3:
movel #_sdata, %d0
movel %d0, _rambase
- movel #_ebss, %d0
+ movel #__bss_stop, %d0
movel %d0, _ramstart
movel %a4, %d0
diff --git a/arch/m68k/platform/68328/head-ram.S b/arch/m68k/platform/68328/head-ram.S
index 7f1aeeacb21..5189ef92609 100644
--- a/arch/m68k/platform/68328/head-ram.S
+++ b/arch/m68k/platform/68328/head-ram.S
@@ -76,8 +76,8 @@ pclp3:
beq pclp3
#endif /* DEBUG */
moveal #0x007ffff0, %ssp
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 >= %a1 */
L1:
diff --git a/arch/m68k/platform/68328/head-rom.S b/arch/m68k/platform/68328/head-rom.S
index a5ff96d0295..3dff98ba2e9 100644
--- a/arch/m68k/platform/68328/head-rom.S
+++ b/arch/m68k/platform/68328/head-rom.S
@@ -59,8 +59,8 @@ _stext: movew #0x2700,%sr
cmpal %a1, %a2
bhi 1b
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
1:
@@ -70,7 +70,7 @@ _stext: movew #0x2700,%sr
movel #_sdata, %d0
movel %d0, _rambase
- movel #_ebss, %d0
+ movel #__bss_stop, %d0
movel %d0, _ramstart
movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0
movel %d0, _ramend
diff --git a/arch/m68k/platform/68360/head-ram.S b/arch/m68k/platform/68360/head-ram.S
index 8eb94fb6b97..acd213170d8 100644
--- a/arch/m68k/platform/68360/head-ram.S
+++ b/arch/m68k/platform/68360/head-ram.S
@@ -219,8 +219,8 @@ LD1:
cmp.l #_edata, %a1
blt LD1
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
L1:
@@ -234,7 +234,7 @@ load_quicc:
store_ram_size:
/* Set ram size information */
move.l #_sdata, _rambase
- move.l #_ebss, _ramstart
+ move.l #__bss_stop, _ramstart
move.l #RAMEND, %d0
sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
move.l %d0, _ramend /* Different from RAMEND.*/
diff --git a/arch/m68k/platform/68360/head-rom.S b/arch/m68k/platform/68360/head-rom.S
index 97510e55b80..dfc756d9988 100644
--- a/arch/m68k/platform/68360/head-rom.S
+++ b/arch/m68k/platform/68360/head-rom.S
@@ -13,7 +13,7 @@
*/
.global _stext
-.global _sbss
+.global __bss_start
.global _start
.global _rambase
@@ -229,8 +229,8 @@ LD1:
cmp.l #_edata, %a1
blt LD1
- moveal #_sbss, %a0
- moveal #_ebss, %a1
+ moveal #__bss_start, %a0
+ moveal #__bss_stop, %a1
/* Copy 0 to %a0 until %a0 == %a1 */
L1:
@@ -244,7 +244,7 @@ load_quicc:
store_ram_size:
/* Set ram size information */
move.l #_sdata, _rambase
- move.l #_ebss, _ramstart
+ move.l #__bss_stop, _ramstart
move.l #RAMEND, %d0
sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
move.l %d0, _ramend /* Different from RAMEND.*/
diff --git a/arch/m68k/platform/coldfire/clk.c b/arch/m68k/platform/coldfire/clk.c
index 75f9ee967ea..9cd13b4ce42 100644
--- a/arch/m68k/platform/coldfire/clk.c
+++ b/arch/m68k/platform/coldfire/clk.c
@@ -146,9 +146,3 @@ struct clk_ops clk_ops1 = {
};
#endif /* MCFPM_PPMCR1 */
#endif /* MCFPM_PPMCR0 */
-
-struct clk *devm_clk_get(struct device *dev, const char *id)
-{
- return NULL;
-}
-EXPORT_SYMBOL(devm_clk_get);
diff --git a/arch/m68k/platform/coldfire/head.S b/arch/m68k/platform/coldfire/head.S
index 4e0c9eb3bd1..b88f5716f35 100644
--- a/arch/m68k/platform/coldfire/head.S
+++ b/arch/m68k/platform/coldfire/head.S
@@ -230,8 +230,8 @@ _vstart:
/*
* Move ROM filesystem above bss :-)
*/
- lea _sbss,%a0 /* get start of bss */
- lea _ebss,%a1 /* set up destination */
+ lea __bss_start,%a0 /* get start of bss */
+ lea __bss_stop,%a1 /* set up destination */
movel %a0,%a2 /* copy of bss start */
movel 8(%a0),%d0 /* get size of ROMFS */
@@ -249,7 +249,7 @@ _copy_romfs:
bne _copy_romfs
#else /* CONFIG_ROMFS_FS */
- lea _ebss,%a1
+ lea __bss_stop,%a1
movel %a1,_ramstart
#endif /* CONFIG_ROMFS_FS */
@@ -257,8 +257,8 @@ _copy_romfs:
/*
* Zero out the bss region.
*/
- lea _sbss,%a0 /* get start of bss */
- lea _ebss,%a1 /* get end of bss */
+ lea __bss_start,%a0 /* get start of bss */
+ lea __bss_stop,%a1 /* get end of bss */
clrl %d0 /* set value */
_clear_bss:
movel %d0,(%a0)+ /* clear each word */
diff --git a/arch/m68k/sun3/prom/init.c b/arch/m68k/sun3/prom/init.c
index d8e6349336b..eeba067d565 100644
--- a/arch/m68k/sun3/prom/init.c
+++ b/arch/m68k/sun3/prom/init.c
@@ -22,57 +22,13 @@ int prom_root_node;
struct linux_nodeops *prom_nodeops;
/* You must call prom_init() before you attempt to use any of the
- * routines in the prom library. It returns 0 on success, 1 on
- * failure. It gets passed the pointer to the PROM vector.
+ * routines in the prom library.
+ * It gets passed the pointer to the PROM vector.
*/
-extern void prom_meminit(void);
-extern void prom_ranges_init(void);
-
void __init prom_init(struct linux_romvec *rp)
{
romvec = rp;
-#ifndef CONFIG_SUN3
- switch(romvec->pv_romvers) {
- case 0:
- prom_vers = PROM_V0;
- break;
- case 2:
- prom_vers = PROM_V2;
- break;
- case 3:
- prom_vers = PROM_V3;
- break;
- case 4:
- prom_vers = PROM_P1275;
- prom_printf("PROMLIB: Sun IEEE Prom not supported yet\n");
- prom_halt();
- break;
- default:
- prom_printf("PROMLIB: Bad PROM version %d\n",
- romvec->pv_romvers);
- prom_halt();
- break;
- };
-
- prom_rev = romvec->pv_plugin_revision;
- prom_prev = romvec->pv_printrev;
- prom_nodeops = romvec->pv_nodeops;
-
- prom_root_node = prom_getsibling(0);
- if((prom_root_node == 0) || (prom_root_node == -1))
- prom_halt();
-
- if((((unsigned long) prom_nodeops) == 0) ||
- (((unsigned long) prom_nodeops) == -1))
- prom_halt();
-
- prom_meminit();
-
- prom_ranges_init();
-#endif
-// printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n",
-// romvec->pv_romvers, prom_rev);
/* Initialization successful. */
return;
diff --git a/arch/microblaze/include/asm/sections.h b/arch/microblaze/include/asm/sections.h
index 4487e150b45..c07ed5d2a82 100644
--- a/arch/microblaze/include/asm/sections.h
+++ b/arch/microblaze/include/asm/sections.h
@@ -18,10 +18,6 @@ extern char _ssbss[], _esbss[];
extern unsigned long __ivt_start[], __ivt_end[];
extern char _etext[], _stext[];
-# ifdef CONFIG_MTD_UCLINUX
-extern char *_ebss;
-# endif
-
extern u32 _fdt_start[], _fdt_end[];
# endif /* !__ASSEMBLY__ */
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
index bb4907c828d..2b25bcf05c0 100644
--- a/arch/microblaze/kernel/microblaze_ksyms.c
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -21,9 +21,6 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
-extern char *_ebss;
-EXPORT_SYMBOL_GPL(_ebss);
-
#ifdef CONFIG_FUNCTION_TRACER
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 16d8dfd9094..4da971d4392 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -121,7 +121,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
/* Move ROMFS out of BSS before clearing it */
if (romfs_size > 0) {
- memmove(&_ebss, (int *)romfs_base, romfs_size);
+ memmove(&__bss_stop, (int *)romfs_base, romfs_size);
klimit += romfs_size;
}
#endif
@@ -165,7 +165,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
BUG_ON(romfs_size < 0); /* What else can we do? */
printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
- romfs_size, romfs_base, (unsigned)&_ebss);
+ romfs_size, romfs_base, (unsigned)&__bss_stop);
printk("New klimit: 0x%08x\n", (unsigned)klimit);
#endif
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 109e9d86ade..936d01a689d 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -131,7 +131,6 @@ SECTIONS {
*(COMMON)
. = ALIGN (4) ;
__bss_stop = . ;
- _ebss = . ;
}
. = ALIGN(PAGE_SIZE);
_end = .;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 331d574df99..faf65286574 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -89,6 +89,7 @@ config ATH79
select CEVT_R4K
select CSRC_R4K
select DMA_NONCOHERENT
+ select HAVE_CLK
select IRQ_CPU
select MIPS_MACHINE
select SYS_HAS_CPU_MIPS32_R2
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 99969484c47..a124c251c0c 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -228,6 +228,8 @@ static int mtx1_pci_idsel(unsigned int devsel, int assert)
* adapter on the mtx-1 "singleboard" variant. It triggers a custom
* logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals.
*/
+ udelay(1);
+
if (assert && devsel != 0)
/* Suppress signal to Cardbus */
alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */
diff --git a/arch/mips/ath79/dev-usb.c b/arch/mips/ath79/dev-usb.c
index 36e9570e7bc..b2a2311ec85 100644
--- a/arch/mips/ath79/dev-usb.c
+++ b/arch/mips/ath79/dev-usb.c
@@ -145,6 +145,8 @@ static void __init ar7240_usb_setup(void)
ath79_ohci_resources[0].start = AR7240_OHCI_BASE;
ath79_ohci_resources[0].end = AR7240_OHCI_BASE + AR7240_OHCI_SIZE - 1;
+ ath79_ohci_resources[1].start = ATH79_CPU_IRQ_USB;
+ ath79_ohci_resources[1].end = ATH79_CPU_IRQ_USB;
platform_device_register(&ath79_ohci_device);
}
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
index 29054f21183..48fe762d252 100644
--- a/arch/mips/ath79/gpio.c
+++ b/arch/mips/ath79/gpio.c
@@ -188,8 +188,10 @@ void __init ath79_gpio_init(void)
if (soc_is_ar71xx())
ath79_gpio_count = AR71XX_GPIO_COUNT;
- else if (soc_is_ar724x())
- ath79_gpio_count = AR724X_GPIO_COUNT;
+ else if (soc_is_ar7240())
+ ath79_gpio_count = AR7240_GPIO_COUNT;
+ else if (soc_is_ar7241() || soc_is_ar7242())
+ ath79_gpio_count = AR7241_GPIO_COUNT;
else if (soc_is_ar913x())
ath79_gpio_count = AR913X_GPIO_COUNT;
else if (soc_is_ar933x())
diff --git a/arch/mips/bcm63xx/dev-spi.c b/arch/mips/bcm63xx/dev-spi.c
index e39f73048d4..f1c9c3e2f67 100644
--- a/arch/mips/bcm63xx/dev-spi.c
+++ b/arch/mips/bcm63xx/dev-spi.c
@@ -106,11 +106,15 @@ int __init bcm63xx_spi_register(void)
if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1;
spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE;
+ spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT;
+ spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH;
}
if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
+ spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
+ spi_pdata.msg_ctl_width = SPI_6358_MSG_CTL_WIDTH;
}
bcm63xx_spi_regs_init();
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 7fb1f222b8a..274cd4fad30 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -61,6 +61,12 @@ static void octeon_irq_set_ciu_mapping(int irq, int line, int bit,
octeon_irq_ciu_to_irq[line][bit] = irq;
}
+static void octeon_irq_force_ciu_mapping(struct irq_domain *domain,
+ int irq, int line, int bit)
+{
+ irq_domain_associate(domain, irq, line << 6 | bit);
+}
+
static int octeon_coreid_for_cpu(int cpu)
{
#ifdef CONFIG_SMP
@@ -183,19 +189,9 @@ static void __init octeon_irq_init_core(void)
mutex_init(&cd->core_irq_mutex);
irq = OCTEON_IRQ_SW0 + i;
- switch (irq) {
- case OCTEON_IRQ_TIMER:
- case OCTEON_IRQ_SW0:
- case OCTEON_IRQ_SW1:
- case OCTEON_IRQ_5:
- case OCTEON_IRQ_PERF:
- irq_set_chip_data(irq, cd);
- irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
- handle_percpu_irq);
- break;
- default:
- break;
- }
+ irq_set_chip_data(irq, cd);
+ irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
+ handle_percpu_irq);
}
}
@@ -890,7 +886,6 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,
unsigned int type;
unsigned int pin;
unsigned int trigger;
- struct octeon_irq_gpio_domain_data *gpiod;
if (d->of_node != node)
return -EINVAL;
@@ -925,8 +920,7 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,
break;
}
*out_type = type;
- gpiod = d->host_data;
- *out_hwirq = gpiod->base_hwirq + pin;
+ *out_hwirq = pin;
return 0;
}
@@ -996,19 +990,21 @@ static int octeon_irq_ciu_map(struct irq_domain *d,
static int octeon_irq_gpio_map(struct irq_domain *d,
unsigned int virq, irq_hw_number_t hw)
{
- unsigned int line = hw >> 6;
- unsigned int bit = hw & 63;
+ struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
+ unsigned int line, bit;
if (!octeon_irq_virq_in_range(virq))
return -EINVAL;
+ hw += gpiod->base_hwirq;
+ line = hw >> 6;
+ bit = hw & 63;
if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
octeon_irq_set_ciu_mapping(virq, line, bit,
octeon_irq_gpio_chip,
octeon_irq_handle_gpio);
-
return 0;
}
@@ -1149,6 +1145,7 @@ static void __init octeon_irq_init_ciu(void)
struct irq_chip *chip_wd;
struct device_node *gpio_node;
struct device_node *ciu_node;
+ struct irq_domain *ciu_domain = NULL;
octeon_irq_init_ciu_percpu();
octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
@@ -1177,31 +1174,6 @@ static void __init octeon_irq_init_ciu(void)
/* Mips internal */
octeon_irq_init_core();
- /* CIU_0 */
- for (i = 0; i < 16; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq);
-
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
-
- for (i = 0; i < 4; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq);
- for (i = 0; i < 4; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq);
-
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq);
- for (i = 0; i < 4; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip, handle_edge_irq);
-
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq);
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq);
-
- /* CIU_1 */
- for (i = 0; i < 16; i++)
- octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
-
- octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq);
-
gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
if (gpio_node) {
struct octeon_irq_gpio_domain_data *gpiod;
@@ -1219,10 +1191,35 @@ static void __init octeon_irq_init_ciu(void)
ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");
if (ciu_node) {
- irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
+ ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
of_node_put(ciu_node);
} else
- pr_warn("Cannot find device node for cavium,octeon-3860-ciu.\n");
+ panic("Cannot find device node for cavium,octeon-3860-ciu.");
+
+ /* CIU_0 */
+ for (i = 0; i < 16; i++)
+ octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
+
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
+ octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
+
+ for (i = 0; i < 4; i++)
+ octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
+ for (i = 0; i < 4; i++)
+ octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
+
+ octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
+ for (i = 0; i < 4; i++)
+ octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
+
+ octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
+ octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_BOOTDMA, 0, 63);
+
+ /* CIU_1 */
+ for (i = 0; i < 16; i++)
+ octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
+
+ octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
/* Enable the CIU lines */
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index 1caa78ad06d..dde504477fa 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -393,7 +393,8 @@
#define AR71XX_GPIO_REG_FUNC 0x28
#define AR71XX_GPIO_COUNT 16
-#define AR724X_GPIO_COUNT 18
+#define AR7240_GPIO_COUNT 18
+#define AR7241_GPIO_COUNT 20
#define AR913X_GPIO_COUNT 22
#define AR933X_GPIO_COUNT 30
#define AR934X_GPIO_COUNT 23
diff --git a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
index 4476fa03bf3..6ddae926bf7 100644
--- a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
@@ -42,7 +42,6 @@
#define cpu_has_mips64r1 0
#define cpu_has_mips64r2 0
-#define cpu_has_dsp 0
#define cpu_has_mipsmt 0
#define cpu_has_64bits 0
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
index 7d98dbe5d4b..c9bae136260 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
@@ -9,6 +9,8 @@ int __init bcm63xx_spi_register(void);
struct bcm63xx_spi_pdata {
unsigned int fifo_size;
+ unsigned int msg_type_shift;
+ unsigned int msg_ctl_width;
int bus_num;
int num_chipselect;
u32 speed_hz;
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index 4ccc2a748af..61f2a2a5099 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -1054,7 +1054,8 @@
#define SPI_6338_FILL_BYTE 0x07
#define SPI_6338_MSG_TAIL 0x09
#define SPI_6338_RX_TAIL 0x0b
-#define SPI_6338_MSG_CTL 0x40
+#define SPI_6338_MSG_CTL 0x40 /* 8-bits register */
+#define SPI_6338_MSG_CTL_WIDTH 8
#define SPI_6338_MSG_DATA 0x41
#define SPI_6338_MSG_DATA_SIZE 0x3f
#define SPI_6338_RX_DATA 0x80
@@ -1070,7 +1071,8 @@
#define SPI_6348_FILL_BYTE 0x07
#define SPI_6348_MSG_TAIL 0x09
#define SPI_6348_RX_TAIL 0x0b
-#define SPI_6348_MSG_CTL 0x40
+#define SPI_6348_MSG_CTL 0x40 /* 8-bits register */
+#define SPI_6348_MSG_CTL_WIDTH 8
#define SPI_6348_MSG_DATA 0x41
#define SPI_6348_MSG_DATA_SIZE 0x3f
#define SPI_6348_RX_DATA 0x80
@@ -1078,6 +1080,7 @@
/* BCM 6358 SPI core */
#define SPI_6358_MSG_CTL 0x00 /* 16-bits register */
+#define SPI_6358_MSG_CTL_WIDTH 16
#define SPI_6358_MSG_DATA 0x02
#define SPI_6358_MSG_DATA_SIZE 0x21e
#define SPI_6358_RX_DATA 0x400
@@ -1094,6 +1097,7 @@
/* BCM 6358 SPI core */
#define SPI_6368_MSG_CTL 0x00 /* 16-bits register */
+#define SPI_6368_MSG_CTL_WIDTH 16
#define SPI_6368_MSG_DATA 0x02
#define SPI_6368_MSG_DATA_SIZE 0x21e
#define SPI_6368_RX_DATA 0x400
@@ -1115,7 +1119,10 @@
#define SPI_HD_W 0x01
#define SPI_HD_R 0x02
#define SPI_BYTE_CNT_SHIFT 0
-#define SPI_MSG_TYPE_SHIFT 14
+#define SPI_6338_MSG_TYPE_SHIFT 6
+#define SPI_6348_MSG_TYPE_SHIFT 6
+#define SPI_6358_MSG_TYPE_SHIFT 14
+#define SPI_6368_MSG_TYPE_SHIFT 14
/* Command */
#define SPI_CMD_NOOP 0x00
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
index 418992042f6..c22a3078bf1 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/irq.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -21,14 +21,10 @@ enum octeon_irq {
OCTEON_IRQ_TIMER,
/* sources in CIU_INTX_EN0 */
OCTEON_IRQ_WORKQ0,
- OCTEON_IRQ_GPIO0 = OCTEON_IRQ_WORKQ0 + 16,
- OCTEON_IRQ_WDOG0 = OCTEON_IRQ_GPIO0 + 16,
+ OCTEON_IRQ_WDOG0 = OCTEON_IRQ_WORKQ0 + 16,
OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15,
OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16,
OCTEON_IRQ_MBOX1,
- OCTEON_IRQ_UART0,
- OCTEON_IRQ_UART1,
- OCTEON_IRQ_UART2,
OCTEON_IRQ_PCI_INT0,
OCTEON_IRQ_PCI_INT1,
OCTEON_IRQ_PCI_INT2,
@@ -38,8 +34,6 @@ enum octeon_irq {
OCTEON_IRQ_PCI_MSI2,
OCTEON_IRQ_PCI_MSI3,
- OCTEON_IRQ_TWSI,
- OCTEON_IRQ_TWSI2,
OCTEON_IRQ_RML,
OCTEON_IRQ_TIMER0,
OCTEON_IRQ_TIMER1,
@@ -47,8 +41,6 @@ enum octeon_irq {
OCTEON_IRQ_TIMER3,
OCTEON_IRQ_USB0,
OCTEON_IRQ_USB1,
- OCTEON_IRQ_MII0,
- OCTEON_IRQ_MII1,
OCTEON_IRQ_BOOTDMA,
#ifndef CONFIG_PCI_MSI
OCTEON_IRQ_LAST = 127
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 7531ecd654d..dca8bce8c7a 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -10,6 +10,7 @@ struct mod_arch_specific {
struct list_head dbe_list;
const struct exception_table_entry *dbe_start;
const struct exception_table_entry *dbe_end;
+ struct mips_hi16 *r_mips_hi16_list;
};
typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */
diff --git a/arch/mips/include/asm/r4k-timer.h b/arch/mips/include/asm/r4k-timer.h
index a37d12b3b61..afe9e0e03fe 100644
--- a/arch/mips/include/asm/r4k-timer.h
+++ b/arch/mips/include/asm/r4k-timer.h
@@ -12,16 +12,16 @@
#ifdef CONFIG_SYNC_R4K
-extern void synchronise_count_master(void);
-extern void synchronise_count_slave(void);
+extern void synchronise_count_master(int cpu);
+extern void synchronise_count_slave(int cpu);
#else
-static inline void synchronise_count_master(void)
+static inline void synchronise_count_master(int cpu)
{
}
-static inline void synchronise_count_slave(void)
+static inline void synchronise_count_slave(int cpu)
{
}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index a5066b1c3de..4f8c3cba8c0 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -39,8 +39,6 @@ struct mips_hi16 {
Elf_Addr value;
};
-static struct mips_hi16 *mips_hi16_list;
-
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
@@ -128,8 +126,8 @@ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
n->addr = (Elf_Addr *)location;
n->value = v;
- n->next = mips_hi16_list;
- mips_hi16_list = n;
+ n->next = me->arch.r_mips_hi16_list;
+ me->arch.r_mips_hi16_list = n;
return 0;
}
@@ -142,18 +140,28 @@ static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
return 0;
}
+static void free_relocation_chain(struct mips_hi16 *l)
+{
+ struct mips_hi16 *next;
+
+ while (l) {
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
+}
+
static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
{
unsigned long insnlo = *location;
+ struct mips_hi16 *l;
Elf_Addr val, vallo;
/* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
- if (mips_hi16_list != NULL) {
- struct mips_hi16 *l;
-
- l = mips_hi16_list;
+ if (me->arch.r_mips_hi16_list != NULL) {
+ l = me->arch.r_mips_hi16_list;
while (l != NULL) {
struct mips_hi16 *next;
unsigned long insn;
@@ -188,7 +196,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
l = next;
}
- mips_hi16_list = NULL;
+ me->arch.r_mips_hi16_list = NULL;
}
/*
@@ -201,6 +209,9 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
return 0;
out_danger:
+ free_relocation_chain(l);
+ me->arch.r_mips_hi16_list = NULL;
+
pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name);
return -ENOEXEC;
@@ -273,6 +284,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
+ me->arch.r_mips_hi16_list = NULL;
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -296,6 +308,19 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
return res;
}
+ /*
+ * Normally the hi16 list should be deallocated at this point. A
+ * malformed binary however could contain a series of R_MIPS_HI16
+ * relocations not followed by a R_MIPS_LO16 relocation. In that
+ * case, free up the list and return an error.
+ */
+ if (me->arch.r_mips_hi16_list) {
+ free_relocation_chain(me->arch.r_mips_hi16_list);
+ me->arch.r_mips_hi16_list = NULL;
+
+ return -ENOEXEC;
+ }
+
return 0;
}
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index e7e03ecf549..afc379ca375 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -102,7 +102,7 @@ static void cmp_init_secondary(void)
c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
#endif
#ifdef CONFIG_MIPS_MT_SMTC
- c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC;
+ c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
#endif
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 31637d8c873..9005bf9fb85 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -130,7 +130,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_set(cpu, cpu_callin_map);
- synchronise_count_slave();
+ synchronise_count_slave(cpu);
/*
* irq will be enabled in ->smp_finish(), enabling it too early
@@ -173,7 +173,6 @@ void smp_send_stop(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
mp_ops->cpus_done();
- synchronise_count_master();
}
/* called from main before smp_init() */
@@ -206,6 +205,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
while (!cpu_isset(cpu, cpu_callin_map))
udelay(100);
+ synchronise_count_master(cpu);
return 0;
}
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 842d55e411f..7f1eca3858d 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -28,12 +28,11 @@ static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
#define COUNTON 100
#define NR_LOOPS 5
-void __cpuinit synchronise_count_master(void)
+void __cpuinit synchronise_count_master(int cpu)
{
int i;
unsigned long flags;
unsigned int initcount;
- int nslaves;
#ifdef CONFIG_MIPS_MT_SMTC
/*
@@ -43,8 +42,7 @@ void __cpuinit synchronise_count_master(void)
return;
#endif
- printk(KERN_INFO "Synchronize counters across %u CPUs: ",
- num_online_cpus());
+ printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
local_irq_save(flags);
@@ -52,7 +50,7 @@ void __cpuinit synchronise_count_master(void)
* Notify the slaves that it's time to start
*/
atomic_set(&count_reference, read_c0_count());
- atomic_set(&count_start_flag, 1);
+ atomic_set(&count_start_flag, cpu);
smp_wmb();
/* Count will be initialised to current timer for all CPU's */
@@ -69,10 +67,9 @@ void __cpuinit synchronise_count_master(void)
* two CPUs.
*/
- nslaves = num_online_cpus()-1;
for (i = 0; i < NR_LOOPS; i++) {
- /* slaves loop on '!= ncpus' */
- while (atomic_read(&count_count_start) != nslaves)
+ /* slaves loop on '!= 2' */
+ while (atomic_read(&count_count_start) != 1)
mb();
atomic_set(&count_count_stop, 0);
smp_wmb();
@@ -89,7 +86,7 @@ void __cpuinit synchronise_count_master(void)
/*
* Wait for all slaves to leave the synchronization point:
*/
- while (atomic_read(&count_count_stop) != nslaves)
+ while (atomic_read(&count_count_stop) != 1)
mb();
atomic_set(&count_count_start, 0);
smp_wmb();
@@ -97,6 +94,7 @@ void __cpuinit synchronise_count_master(void)
}
/* Arrange for an interrupt in a short while */
write_c0_compare(read_c0_count() + COUNTON);
+ atomic_set(&count_start_flag, 0);
local_irq_restore(flags);
@@ -108,11 +106,10 @@ void __cpuinit synchronise_count_master(void)
printk("done.\n");
}
-void __cpuinit synchronise_count_slave(void)
+void __cpuinit synchronise_count_slave(int cpu)
{
int i;
unsigned int initcount;
- int ncpus;
#ifdef CONFIG_MIPS_MT_SMTC
/*
@@ -127,16 +124,15 @@ void __cpuinit synchronise_count_slave(void)
* so we first wait for the master to say everyone is ready
*/
- while (!atomic_read(&count_start_flag))
+ while (atomic_read(&count_start_flag) != cpu)
mb();
/* Count will be initialised to next expire for all CPU's */
initcount = atomic_read(&count_reference);
- ncpus = num_online_cpus();
for (i = 0; i < NR_LOOPS; i++) {
atomic_inc(&count_count_start);
- while (atomic_read(&count_count_start) != ncpus)
+ while (atomic_read(&count_count_start) != 2)
mb();
/*
@@ -146,7 +142,7 @@ void __cpuinit synchronise_count_slave(void)
write_c0_count(initcount);
atomic_inc(&count_count_stop);
- while (atomic_read(&count_count_stop) != ncpus)
+ while (atomic_read(&count_count_stop) != 2)
mb();
}
/* Arrange for an interrupt in a short while */
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 33aadbcf170..dcfd573871c 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -152,6 +152,8 @@ static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end,
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
+ if (PageTail(page))
+ get_huge_page_tail(page);
(*nr)++;
page++;
refs++;
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 7b13a4caeea..fea823f1847 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -273,16 +273,19 @@ asmlinkage void plat_irq_dispatch(void)
unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
int irq;
+ if (unlikely(!pending)) {
+ spurious_interrupt();
+ return;
+ }
+
irq = irq_ffs(pending);
if (irq == MIPSCPU_INT_I8259A)
malta_hw0_irqdispatch();
else if (gic_present && ((1 << irq) & ipi_map[smp_processor_id()]))
malta_ipi_irqdispatch();
- else if (irq >= 0)
- do_IRQ(MIPS_CPU_IRQ_BASE + irq);
else
- spurious_interrupt();
+ do_IRQ(MIPS_CPU_IRQ_BASE + irq);
}
#ifdef CONFIG_MIPS_MT_SMP
diff --git a/arch/mips/mti-malta/malta-pci.c b/arch/mips/mti-malta/malta-pci.c
index 284dea54faf..2147cb34e70 100644
--- a/arch/mips/mti-malta/malta-pci.c
+++ b/arch/mips/mti-malta/malta-pci.c
@@ -252,16 +252,3 @@ void __init mips_pcibios_init(void)
register_pci_controller(controller);
}
-
-/* Enable PCI 2.1 compatibility in PIIX4 */
-static void __devinit quirk_dlcsetup(struct pci_dev *dev)
-{
- u8 odlc, ndlc;
- (void) pci_read_config_byte(dev, 0x82, &odlc);
- /* Enable passive releases and delayed transaction */
- ndlc = odlc | 7;
- (void) pci_write_config_byte(dev, 0x82, ndlc);
-}
-
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
- quirk_dlcsetup);
diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c
index 4c35301720e..80562b81f0f 100644
--- a/arch/mips/mti-malta/malta-platform.c
+++ b/arch/mips/mti-malta/malta-platform.c
@@ -138,11 +138,6 @@ static int __init malta_add_devices(void)
if (err)
return err;
- /*
- * Set RTC to BCD mode to support current alarm code.
- */
- CMOS_WRITE(CMOS_READ(RTC_CONTROL) & ~RTC_DM_BINARY, RTC_CONTROL);
-
return 0;
}
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 414a7459858..86d77a66645 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -23,9 +23,12 @@
#define AR724X_PCI_MEM_BASE 0x10000000
#define AR724X_PCI_MEM_SIZE 0x08000000
+#define AR724X_PCI_REG_RESET 0x18
#define AR724X_PCI_REG_INT_STATUS 0x4c
#define AR724X_PCI_REG_INT_MASK 0x50
+#define AR724X_PCI_RESET_LINK_UP BIT(0)
+
#define AR724X_PCI_INT_DEV0 BIT(14)
#define AR724X_PCI_IRQ_COUNT 1
@@ -38,6 +41,15 @@ static void __iomem *ar724x_pci_ctrl_base;
static u32 ar724x_pci_bar0_value;
static bool ar724x_pci_bar0_is_cached;
+static bool ar724x_pci_link_up;
+
+static inline bool ar724x_pci_check_link(void)
+{
+ u32 reset;
+
+ reset = __raw_readl(ar724x_pci_ctrl_base + AR724X_PCI_REG_RESET);
+ return reset & AR724X_PCI_RESET_LINK_UP;
+}
static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, uint32_t *value)
@@ -46,6 +58,9 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
void __iomem *base;
u32 data;
+ if (!ar724x_pci_link_up)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
if (devfn)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -96,6 +111,9 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
u32 data;
int s;
+ if (!ar724x_pci_link_up)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
if (devfn)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -280,6 +298,10 @@ int __init ar724x_pcibios_init(int irq)
if (ar724x_pci_ctrl_base == NULL)
goto err_unmap_devcfg;
+ ar724x_pci_link_up = ar724x_pci_check_link();
+ if (!ar724x_pci_link_up)
+ pr_warn("ar724x: PCIe link is down\n");
+
ar724x_pci_irq_init(irq);
register_pci_controller(&ar724x_pci_controller);
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 6c6defc2461..af9cf30ed47 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -141,7 +141,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
+#define ATOMIC_INIT(i) { (i) }
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
@@ -150,7 +150,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#ifdef CONFIG_64BIT
-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
+#define ATOMIC64_INIT(i) { (i) }
static __inline__ s64
__atomic64_add_return(s64 i, atomic64_t *v)
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index d4b94b395c1..2c05a9292a8 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -309,7 +309,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
cregs->ksp = (unsigned long)stack
+ (pregs->gr[21] & (THREAD_SIZE - 1));
cregs->gr[30] = usp;
- if (p->personality == PER_HPUX) {
+ if (personality(p->personality) == PER_HPUX) {
#ifdef CONFIG_HPUX
cregs->kpc = (unsigned long) &hpux_child_return;
#else
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index c9b932260f4..7426e40699b 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -225,12 +225,12 @@ long parisc_personality(unsigned long personality)
long err;
if (personality(current->personality) == PER_LINUX32
- && personality == PER_LINUX)
- personality = PER_LINUX32;
+ && personality(personality) == PER_LINUX)
+ personality = (personality & ~PER_MASK) | PER_LINUX32;
err = sys_personality(personality);
- if (err == PER_LINUX32)
- err = PER_LINUX;
+ if (personality(err) == PER_LINUX32)
+ err = (err & ~PER_MASK) | PER_LINUX;
return err;
}
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
index 8d35d2c1f69..4f9c9f682ec 100644
--- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
@@ -345,6 +345,13 @@
/include/ "qoriq-duart-1.dtsi"
/include/ "qoriq-gpio-0.dtsi"
/include/ "qoriq-usb2-mph-0.dtsi"
+ usb@210000 {
+ compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
+ port0;
+ };
/include/ "qoriq-usb2-dr-0.dtsi"
+ usb@211000 {
+ compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
+ };
/include/ "qoriq-sec4.0-0.dtsi"
};
diff --git a/arch/powerpc/configs/85xx/p1023rds_defconfig b/arch/powerpc/configs/85xx/p1023rds_defconfig
index f4337bacd0e..26e541c4662 100644
--- a/arch/powerpc/configs/85xx/p1023rds_defconfig
+++ b/arch/powerpc/configs/85xx/p1023rds_defconfig
@@ -6,28 +6,27 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_KALLSYMS_ALL=y
-CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_EMBEDDED=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
CONFIG_P1023_RDS=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_CPM2=y
-CONFIG_GPIO_MPC8XXX=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
@@ -63,11 +62,11 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -80,15 +79,14 @@ CONFIG_SATA_FSL=y
CONFIG_SATA_SIL24=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
+CONFIG_FS_ENET=y
+CONFIG_FSL_PQ_MDIO=y
+CONFIG_E1000E=y
CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_CICADA_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_FS_ENET=y
-CONFIG_E1000E=y
-CONFIG_FSL_PQ_MDIO=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -98,16 +96,15 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
CONFIG_SERIAL_QE=m
-CONFIG_HW_RANDOM=y
CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_CPM=m
CONFIG_I2C_MPC=y
+CONFIG_GPIO_MPC8XXX=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_SOUND=y
@@ -123,7 +120,6 @@ CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
# CONFIG_NET_DMA is not set
CONFIG_STAGING=y
-# CONFIG_STAGING_EXCLUDE_BUILD is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -150,22 +146,15 @@ CONFIG_QNX4FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_CRC_T10DIF=y
CONFIG_FRAME_WARN=8092
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index cbb98c1234f..8b3d57c1ebe 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -6,8 +6,8 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
-CONFIG_RCU_TRACE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -21,23 +21,22 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
CONFIG_P2041_RDB=y
CONFIG_P3041_DS=y
CONFIG_P4080_DS=y
CONFIG_P5020_DS=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_FORCE_MAX_ZONEORDER=13
-CONFIG_FSL_LBC=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
-CONFIG_PCI_MSI=y
# CONFIG_PCIEASPM is not set
+CONFIG_PCI_MSI=y
CONFIG_RAPIDIO=y
CONFIG_FSL_RIO=y
CONFIG_NET=y
@@ -70,6 +69,7 @@ CONFIG_INET_IPCOMP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
@@ -77,17 +77,14 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_NAND_IDS=y
-CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
@@ -115,11 +112,9 @@ CONFIG_SERIO_LIBPS2=y
CONFIG_PPC_EPAPR_HV_BYTECHAN=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
-CONFIG_HW_RANDOM=y
CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
@@ -132,7 +127,6 @@ CONFIG_SPI_FSL_ESPI=y
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_USB_HID=m
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
@@ -142,8 +136,6 @@ CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_OF=y
-CONFIG_MMC_SDHCI_OF_ESDHC=y
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_EDAC_MPC85XX=y
@@ -170,19 +162,16 @@ CONFIG_HUGETLBFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=m
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_RCU_TRACE=y
CONFIG_CRYPTO_NULL=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index dd89de8b0b7..0516e22ca3d 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -56,6 +56,7 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 15130066e5e..07b7f2af2dc 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -1,8 +1,10 @@
+CONFIG_PPC64=y
+CONFIG_ALTIVEC=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_BLK_DEV_INITRD=y
@@ -13,15 +15,16 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=4
-CONFIG_KEXEC=y
-# CONFIG_RELOCATABLE is not set
+# CONFIG_PPC_PSERIES is not set
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_PMAC64=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_KEXEC=y
+CONFIG_IRQ_ALL_CPUS=y
+# CONFIG_MIGRATION is not set
CONFIG_PCI_MSI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -49,6 +52,7 @@ CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_QUEUE=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
@@ -56,6 +60,8 @@ CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_CDROM_PKTCDVD=m
CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDE_PMAC=y
+CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
@@ -79,24 +85,33 @@ CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
-CONFIG_MACINTOSH_DRIVERS=y
+CONFIG_IEEE1394=y
+CONFIG_IEEE1394_OHCI1394=y
+CONFIG_IEEE1394_SBP2=m
+CONFIG_IEEE1394_ETH1394=m
+CONFIG_IEEE1394_RAWIO=y
+CONFIG_IEEE1394_VIDEO1394=m
+CONFIG_IEEE1394_DV1394=m
+CONFIG_ADB_PMU=y
+CONFIG_PMAC_SMU=y
CONFIG_MAC_EMUMOUSEBTN=y
+CONFIG_THERM_PM72=y
+CONFIG_WINDFARM=y
+CONFIG_WINDFARM_PM81=y
+CONFIG_WINDFARM_PM91=y
+CONFIG_WINDFARM_PM112=y
+CONFIG_WINDFARM_PM121=y
CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
CONFIG_DUMMY=m
-CONFIG_MII=y
+CONFIG_BONDING=m
CONFIG_TUN=m
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_SUNGEM=y
CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_TIGON3=y
CONFIG_E1000=y
-CONFIG_SUNGEM=y
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPPOE=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
+CONFIG_TIGON3=y
CONFIG_USB_CATC=m
CONFIG_USB_KAWETH=m
CONFIG_USB_PEGASUS=m
@@ -106,24 +121,36 @@ CONFIG_USB_USBNET=m
# CONFIG_USB_NET_NET1080 is not set
# CONFIG_USB_NET_CDC_SUBSET is not set
# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_PPP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPPOE=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_JOYDEV=m
CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_HW_RANDOM is not set
CONFIG_GEN_RTC=y
CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
# CONFIG_HWMON is not set
-CONFIG_AGP=y
-CONFIG_DRM=y
-CONFIG_DRM_NOUVEAU=y
+CONFIG_AGP=m
+CONFIG_AGP_UNINORTH=m
CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_OF=y
+CONFIG_FB_NVIDIA=y
+CONFIG_FB_NVIDIA_I2C=y
CONFIG_FB_RADEON=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
CONFIG_SOUND=m
CONFIG_SND=m
@@ -131,7 +158,15 @@ CONFIG_SND_SEQUENCER=m
CONFIG_SND_MIXER_OSS=m
CONFIG_SND_PCM_OSS=m
CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_POWERMAC=m
+CONFIG_SND_AOA=m
+CONFIG_SND_AOA_FABRIC_LAYOUT=m
+CONFIG_SND_AOA_ONYX=m
+CONFIG_SND_AOA_TAS=m
+CONFIG_SND_AOA_TOONIE=m
CONFIG_SND_USB_AUDIO=m
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
CONFIG_HID_GYRATION=y
CONFIG_LOGITECH_FF=y
CONFIG_HID_PANTHERLORD=y
@@ -139,12 +174,13 @@ CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
-CONFIG_HID_PID=y
-CONFIG_USB_HIDDEV=y
CONFIG_USB=y
+CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_HCD_PPC_OF is not set
CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_ACM=m
CONFIG_USB_PRINTER=y
CONFIG_USB_STORAGE=y
@@ -208,6 +244,8 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
CONFIG_REISERFS_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_POSIX_ACL=y
+CONFIG_INOTIFY=y
+CONFIG_AUTOFS_FS=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -221,12 +259,14 @@ CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NFSD=y
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_CIFS=m
+CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_1250=y
CONFIG_NLS_CODEPAGE_1251=y
@@ -234,23 +274,29 @@ CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
+CONFIG_CRC_T10DIF=y
+CONFIG_LIBCRC32C=m
CONFIG_MAGIC_SYSRQ=y
-# CONFIG_UNUSED_SYMBOLS is not set
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_LATENCYTOP=y
-CONFIG_STRICT_DEVMEM=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_BOOTX_TEXT=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
@@ -260,6 +306,3 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
-# CONFIG_VIRTUALIZATION is not set
-CONFIG_CRC_T10DIF=y
-CONFIG_LIBCRC32C=m
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 5aac9a8bc53..9352e4430c3 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -2,12 +2,12 @@ CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_PPC_CHRP is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_83xx=y
@@ -25,7 +25,6 @@ CONFIG_ASP834x=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_MATH_EMULATION=y
-CONFIG_SPARSE_IRQ=y
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -42,10 +41,9 @@ CONFIG_INET_ESP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_OF_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
@@ -64,15 +62,14 @@ CONFIG_ATA=y
CONFIG_SATA_FSL=y
CONFIG_SATA_SIL=y
CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_UCC_GETH=y
+CONFIG_GIANFAR=y
CONFIG_MARVELL_PHY=y
CONFIG_DAVICOM_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_ICPLUS_PHY=y
CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_GIANFAR=y
-CONFIG_UCC_GETH=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -112,17 +109,12 @@ CONFIG_RTC_DRV_DS1374=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_INOTIFY=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_CRC_T10DIF=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 03ee911c457..8b5bda27d24 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -5,7 +5,9 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -17,6 +19,8 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
CONFIG_MPC8540_ADS=y
CONFIG_MPC8560_ADS=y
CONFIG_MPC85xx_CDS=y
@@ -40,8 +44,6 @@ CONFIG_SBC8548=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
CONFIG_FORCE_MAX_ZONEORDER=12
@@ -74,36 +76,25 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
CONFIG_FTL=y
-CONFIG_MTD_GEN_PROBE=y
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_NAND_IDS=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_M25P80=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -115,6 +106,7 @@ CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_FSL=y
CONFIG_PATA_ALI=y
+CONFIG_PATA_VIA=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_FS_ENET=y
@@ -134,7 +126,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
@@ -183,7 +174,6 @@ CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
@@ -229,18 +219,13 @@ CONFIG_QNX4FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_CRC_T10DIF=y
CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index fdfa84dc908..b0974e7e98a 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -7,7 +7,9 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -19,6 +21,8 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
CONFIG_MPC8540_ADS=y
CONFIG_MPC8560_ADS=y
CONFIG_MPC85xx_CDS=y
@@ -42,8 +46,6 @@ CONFIG_SBC8548=y
CONFIG_QUICC_ENGINE=y
CONFIG_QE_GPIO=y
CONFIG_HIGHMEM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
CONFIG_IRQ_ALL_CPUS=y
@@ -77,36 +79,25 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
CONFIG_FTL=y
-CONFIG_MTD_GEN_PROBE=y
-CONFIG_MTD_MAP_BANK_WIDTH_1=y
-CONFIG_MTD_MAP_BANK_WIDTH_2=y
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
-CONFIG_MTD_CFI_I1=y
-CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_UTIL=y
CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_NAND_IDS=y
-CONFIG_MTD_NAND_ECC=y
-CONFIG_MTD_M25P80=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
@@ -137,7 +128,6 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
@@ -186,7 +176,6 @@ CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
@@ -232,18 +221,13 @@ CONFIG_QNX4FS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
CONFIG_CRC_T10DIF=y
CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 50d82c8a037..b3c083de17a 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -553,9 +553,7 @@ static inline int cpu_has_feature(unsigned long feature)
& feature);
}
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
#define HBP_NUM 1
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 50ea12fd7bf..a8bf5c673a3 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -33,6 +33,7 @@
#include <asm/kvm_asm.h>
#include <asm/processor.h>
#include <asm/page.h>
+#include <asm/cacheflush.h>
#define KVM_MAX_VCPUS NR_CPUS
#define KVM_MAX_VCORES NR_CPUS
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 0124937a23b..e006f0bdea9 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -219,4 +219,16 @@ void kvmppc_claim_lpid(long lpid);
void kvmppc_free_lpid(long lpid);
void kvmppc_init_lpid(unsigned long nr_lpids);
+static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
+{
+ /* Clear i-cache for new pages */
+ struct page *page;
+ page = pfn_to_page(pfn);
+ if (!test_bit(PG_arch_1, &page->flags)) {
+ flush_dcache_icache_page(page);
+ set_bit(PG_arch_1, &page->flags);
+ }
+}
+
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h
index 326d33ca55c..d4f471fb103 100644
--- a/arch/powerpc/include/asm/mpic_msgr.h
+++ b/arch/powerpc/include/asm/mpic_msgr.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <asm/smp.h>
+#include <asm/io.h>
struct mpic_msgr {
u32 __iomem *base;
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 53b6dfa8334..54b73a28c20 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -386,6 +386,7 @@ extern unsigned long cpuidle_disable;
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */
+extern void power7_nap(void);
#ifdef CONFIG_PSERIES_IDLE
extern void update_smt_snooze_delay(int snooze);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 85b05c463fa..e8995727b1c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -76,6 +76,7 @@ int main(void)
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(NMI_MASK, NMI_MASK);
DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
+ DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 5b25c8060fd..a892680668d 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void)
void doorbell_cause_ipi(int cpu, unsigned long data)
{
+ /* Order previous accesses vs. msgsnd, which is treated as a store */
+ mb();
ppc_msgsnd(PPC_DBELL, 0, data);
}
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 2d7bb8ced13..e4897523de4 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -83,11 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
return 0;
}
- if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) {
- dev_info(dev, "Warning: IOMMU window too big for device mask\n");
- dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n",
- mask, (tbl->it_offset + tbl->it_size) <<
- IOMMU_PAGE_SHIFT);
+ if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) {
+ dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
+ dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
+ mask, tbl->it_offset << IOMMU_PAGE_SHIFT);
return 0;
} else
return 1;
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 4b01a25e29e..b40e0b4815b 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -370,6 +370,12 @@ _GLOBAL(ret_from_fork)
li r3,0
b syscall_exit
+ .section ".toc","aw"
+DSCR_DEFAULT:
+ .tc dscr_default[TC],dscr_default
+
+ .section ".text"
+
/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
@@ -509,9 +515,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
mr r1,r8 /* start using new stack pointer */
std r7,PACAKSAVE(r13)
- ld r6,_CCR(r1)
- mtcrf 0xFF,r6
-
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
ld r0,THREAD_VRSAVE(r4)
@@ -520,14 +523,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
+ lwz r6,THREAD_DSCR_INHERIT(r4)
+ ld r7,DSCR_DEFAULT@toc(2)
ld r0,THREAD_DSCR(r4)
- cmpd r0,r25
- beq 1f
+ cmpwi r6,0
+ bne 1f
+ ld r0,0(r7)
+1: cmpd r0,r25
+ beq 2f
mtspr SPRN_DSCR,r0
-1:
+2:
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#endif
+ ld r6,_CCR(r1)
+ mtcrf 0xFF,r6
+
/* r3-r13 are destroyed -- Cort */
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e894515e77b..39aa97d3ff8 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -186,7 +186,7 @@ hardware_interrupt_hv:
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
- MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
+ STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
@@ -486,6 +486,7 @@ machine_check_common:
STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
+ STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index f3a82dde61d..956a4c496de 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -253,7 +253,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
/* Do not emulate user-space instructions, instead single-step them */
if (user_mode(regs)) {
- bp->ctx->task->thread.last_hit_ubp = bp;
+ current->thread.last_hit_ubp = bp;
regs->msr |= MSR_SE;
goto out;
}
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 7140d838339..e11863f4e59 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -28,7 +28,9 @@ _GLOBAL(power7_idle)
lwz r4,ADDROFF(powersave_nap)(r3)
cmpwi 0,r4,0
beqlr
+ /* fall through */
+_GLOBAL(power7_nap)
/* NAP is a state loss, we create a regs frame on the
* stack, fill it up with the state we care about and
* stick a pointer to it in PACAR1. We really only
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 782bd0a3c2f..c470a40b29f 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -25,6 +25,7 @@
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/debug.h>
+#include <linux/slab.h>
/*
* This table contains the mapping between PowerPC hardware trap types, and
@@ -101,6 +102,21 @@ static int computeSignal(unsigned int tt)
return SIGHUP; /* default for things we don't know about */
}
+/**
+ *
+ * kgdb_skipexception - Bail out of KGDB when we've been triggered.
+ * @exception: Exception vector number
+ * @regs: Current &struct pt_regs.
+ *
+ * On some architectures we need to skip a breakpoint exception when
+ * it occurs after a breakpoint has been removed.
+ *
+ */
+int kgdb_skipexception(int exception, struct pt_regs *regs)
+{
+ return kgdb_isremovedbreak(regs->nip);
+}
+
static int kgdb_call_nmi_hook(struct pt_regs *regs)
{
kgdb_nmicallback(raw_smp_processor_id(), regs);
@@ -138,6 +154,8 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
static int kgdb_singlestep(struct pt_regs *regs)
{
struct thread_info *thread_info, *exception_thread_info;
+ struct thread_info *backup_current_thread_info = \
+ (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
if (user_mode(regs))
return 0;
@@ -155,13 +173,17 @@ static int kgdb_singlestep(struct pt_regs *regs)
thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
exception_thread_info = current_thread_info();
- if (thread_info != exception_thread_info)
+ if (thread_info != exception_thread_info) {
+ /* Save the original current_thread_info. */
+ memcpy(backup_current_thread_info, exception_thread_info, sizeof *thread_info);
memcpy(exception_thread_info, thread_info, sizeof *thread_info);
+ }
kgdb_handle_exception(0, SIGTRAP, 0, regs);
if (thread_info != exception_thread_info)
- memcpy(thread_info, exception_thread_info, sizeof *thread_info);
+ /* Restore current_thread_info lastly. */
+ memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info);
return 1;
}
@@ -410,7 +432,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
#else
linux_regs->msr |= MSR_SE;
#endif
- kgdb_single_step = 1;
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 710f400476d..1a1f2ddfb58 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -802,16 +802,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#endif /* CONFIG_PPC_STD_MMU_64 */
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_DSCR)) {
- if (current->thread.dscr_inherit) {
- p->thread.dscr_inherit = 1;
- p->thread.dscr = current->thread.dscr;
- } else if (0 != dscr_default) {
- p->thread.dscr_inherit = 1;
- p->thread.dscr = dscr_default;
- } else {
- p->thread.dscr_inherit = 0;
- p->thread.dscr = 0;
- }
+ p->thread.dscr_inherit = current->thread.dscr_inherit;
+ p->thread.dscr = current->thread.dscr;
}
#endif
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 0321007086f..8d4214afc21 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -198,8 +198,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
char *message = (char *)&info->messages;
+ /*
+ * Order previous accesses before accesses in the IPI handler.
+ */
+ smp_mb();
message[msg] = 1;
- mb();
+ /*
+ * cause_ipi functions are required to include a full barrier
+ * before doing whatever causes the IPI.
+ */
smp_ops->cause_ipi(cpu, info->data);
}
@@ -211,7 +218,7 @@ irqreturn_t smp_ipi_demux(void)
mb(); /* order any irq clear */
do {
- all = xchg_local(&info->messages, 0);
+ all = xchg(&info->messages, 0);
#ifdef __BIG_ENDIAN
if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index f2496f2faec..4e3cc47f26b 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality)
long ret;
if (personality(current->personality) == PER_LINUX32
- && personality == PER_LINUX)
- personality = PER_LINUX32;
+ && personality(personality) == PER_LINUX)
+ personality = (personality & ~PER_MASK) | PER_LINUX32;
ret = sys_personality(personality);
- if (ret == PER_LINUX32)
- ret = PER_LINUX;
+ if (personality(ret) == PER_LINUX32)
+ ret = (ret & ~PER_MASK) | PER_LINUX;
return ret;
}
#endif
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 3529446c2ab..8302af64921 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -194,6 +194,14 @@ static ssize_t show_dscr_default(struct device *dev,
return sprintf(buf, "%lx\n", dscr_default);
}
+static void update_dscr(void *dummy)
+{
+ if (!current->thread.dscr_inherit) {
+ current->thread.dscr = dscr_default;
+ mtspr(SPRN_DSCR, dscr_default);
+ }
+}
+
static ssize_t __used store_dscr_default(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -206,6 +214,8 @@ static ssize_t __used store_dscr_default(struct device *dev,
return -EINVAL;
dscr_default = val;
+ on_each_cpu(update_dscr, NULL, 1);
+
return count;
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index be171ee73bf..e49e93191b6 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -535,6 +535,15 @@ void timer_interrupt(struct pt_regs * regs)
trace_timer_interrupt_exit(regs);
}
+/*
+ * Hypervisor decrementer interrupts shouldn't occur but are sometimes
+ * left pending on exit from a KVM guest. We don't need to do anything
+ * to clear them, as they are edge-triggered.
+ */
+void hdec_interrupt(struct pt_regs *regs)
+{
+}
+
#ifdef CONFIG_SUSPEND
static void generic_suspend_disable_irqs(void)
{
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 158972341a2..ae0843fa7a6 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -972,8 +972,9 @@ static int emulate_instruction(struct pt_regs *regs)
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mtdscr, regs);
rd = (instword >> 21) & 0x1f;
- mtspr(SPRN_DSCR, regs->gpr[rd]);
+ current->thread.dscr = regs->gpr[rd];
current->thread.dscr_inherit = 1;
+ mtspr(SPRN_DSCR, current->thread.dscr);
return 0;
}
#endif
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index f922c29bb23..837f13e7b6b 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -211,6 +211,9 @@ next_pteg:
pteg1 |= PP_RWRX;
}
+ if (orig_pte->may_execute)
+ kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
+
local_irq_disable();
if (pteg[rr]) {
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 10fc8ec9d2a..0688b6b3958 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -126,6 +126,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
if (!orig_pte->may_execute)
rflags |= HPTE_R_N;
+ else
+ kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 5a84c8d3d04..44b72feaff7 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1421,13 +1421,13 @@ _GLOBAL(kvmppc_h_cede)
sync /* order setting ceded vs. testing prodded */
lbz r5,VCPU_PRODDED(r3)
cmpwi r5,0
- bne 1f
+ bne kvm_cede_prodded
li r0,0 /* set trap to 0 to say hcall is handled */
stw r0,VCPU_TRAP(r3)
li r0,H_SUCCESS
std r0,VCPU_GPR(R3)(r3)
BEGIN_FTR_SECTION
- b 2f /* just send it up to host on 970 */
+ b kvm_cede_exit /* just send it up to host on 970 */
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
/*
@@ -1446,7 +1446,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
or r4,r4,r0
PPC_POPCNTW(R7,R4)
cmpw r7,r8
- bge 2f
+ bge kvm_cede_exit
stwcx. r4,0,r6
bne 31b
li r0,1
@@ -1555,7 +1555,8 @@ kvm_end_cede:
b hcall_real_fallback
/* cede when already previously prodded case */
-1: li r0,0
+kvm_cede_prodded:
+ li r0,0
stb r0,VCPU_PRODDED(r3)
sync /* order testing prodded vs. clearing ceded */
stb r0,VCPU_CEDED(r3)
@@ -1563,7 +1564,8 @@ kvm_end_cede:
blr
/* we've ceded but we want to give control to the host */
-2: li r3,H_TOO_HARD
+kvm_cede_exit:
+ li r3,H_TOO_HARD
blr
secondary_too_late:
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index c510fc96130..a2b66717813 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -322,11 +322,11 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
{
if (vcpu_e500->g2h_tlb1_map)
- memset(vcpu_e500->g2h_tlb1_map,
- sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0);
+ memset(vcpu_e500->g2h_tlb1_map, 0,
+ sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
if (vcpu_e500->h2g_tlb1_rmap)
- memset(vcpu_e500->h2g_tlb1_rmap,
- sizeof(unsigned int) * host_tlb_params[1].entries, 0);
+ memset(vcpu_e500->h2g_tlb1_rmap, 0,
+ sizeof(unsigned int) * host_tlb_params[1].entries);
}
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -539,6 +539,9 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
ref, gvaddr, stlbe);
+
+ /* Clear i-cache for new pages */
+ kvmppc_mmu_flush_icache(pfn);
}
/* XXX only map the one-one case, for now use TLB0 */
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index dd223b3eb33..17e5b236431 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -20,7 +20,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
{
int err;
- err = __put_user(instr, addr);
+ __put_user_size(instr, addr, 4, err);
if (err)
return err;
asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index f9ede7c6606..0d24ff15f5f 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -288,7 +288,7 @@ err1; stb r0,0(r3)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
bl .enter_vmx_usercopy
- cmpwi r3,0
+ cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STACKFRAMESIZE+48(r1)
ld r4,STACKFRAMESIZE+56(r1)
@@ -326,38 +326,7 @@ err1; stb r0,0(r3)
dcbt r0,r8,0b01010 /* GO */
.machine pop
- /*
- * We prefetch both the source and destination using enhanced touch
- * instructions. We use a stream ID of 0 for the load side and
- * 1 for the store side.
- */
- clrrdi r6,r4,7
- clrrdi r9,r3,7
- ori r9,r9,1 /* stream=1 */
-
- srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
- cmpldi cr1,r7,0x3FF
- ble cr1,1f
- li r7,0x3FF
-1: lis r0,0x0E00 /* depth=7 */
- sldi r7,r7,7
- or r7,r7,r0
- ori r10,r7,1 /* stream=1 */
-
- lis r8,0x8000 /* GO=1 */
- clrldi r8,r8,32
-
-.machine push
-.machine "power4"
- dcbt r0,r6,0b01000
- dcbt r0,r7,0b01010
- dcbtst r0,r9,0b01000
- dcbtst r0,r10,0b01010
- eieio
- dcbt r0,r8,0b01010 /* GO */
-.machine pop
-
- beq .Lunwind_stack_nonvmx_copy
+ beq cr1,.Lunwind_stack_nonvmx_copy
/*
* If source and destination are not relatively aligned we use a
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 0efdc51bc71..7ba6c96de77 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -222,7 +222,7 @@ _GLOBAL(memcpy_power7)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
bl .enter_vmx_copy
- cmpwi r3,0
+ cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STACKFRAMESIZE+48(r1)
ld r4,STACKFRAMESIZE+56(r1)
@@ -260,7 +260,7 @@ _GLOBAL(memcpy_power7)
dcbt r0,r8,0b01010 /* GO */
.machine pop
- beq .Lunwind_stack_nonvmx_copy
+ beq cr1,.Lunwind_stack_nonvmx_copy
/*
* If source and destination are not relatively aligned we use a
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index baaafde7d13..fbdad0e3929 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -469,6 +469,7 @@ void flush_dcache_icache_page(struct page *page)
__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
#endif
}
+EXPORT_SYMBOL(flush_dcache_icache_page);
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 39b159751c3..59213cfaeca 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1436,11 +1436,11 @@ static long vphn_get_associativity(unsigned long cpu,
/*
* Update the node maps and sysfs entries for each cpu whose home node
- * has changed.
+ * has changed. Returns 1 when the topology has changed, and 0 otherwise.
*/
int arch_update_cpu_topology(void)
{
- int cpu, nid, old_nid;
+ int cpu, nid, old_nid, changed = 0;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
struct device *dev;
@@ -1466,9 +1466,10 @@ int arch_update_cpu_topology(void)
dev = get_cpu_device(cpu);
if (dev)
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+ changed = 1;
}
- return 1;
+ return changed;
}
static void topology_work_fn(struct work_struct *work)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 77b49ddda9d..7cd2dbd6e4c 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1431,7 +1431,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
val = read_pmc(event->hw.idx);
- if ((int)val < 0) {
+ if (pmc_overflow(val)) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs);
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 3ef46254c35..7698b6e13c5 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -106,14 +106,6 @@ static void pnv_smp_cpu_kill_self(void)
{
unsigned int cpu;
- /* If powersave_nap is enabled, use NAP mode, else just
- * spin aimlessly
- */
- if (!powersave_nap) {
- generic_mach_cpu_die();
- return;
- }
-
/* Standard hot unplug procedure */
local_irq_disable();
idle_task_exit();
@@ -128,7 +120,7 @@ static void pnv_smp_cpu_kill_self(void)
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
while (!generic_check_cpu_restart(cpu)) {
- power7_idle();
+ power7_nap();
if (!generic_check_cpu_restart(cpu)) {
DBG("CPU%d Unexpected exit while offline !\n", cpu);
/* We may be getting an IPI, so we re-enable
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index a7b2a600d0a..c37f4613632 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -465,7 +465,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
iounmap(hose->cfg_data);
iounmap(hose->cfg_addr);
pcibios_free_controller(hose);
- return 0;
+ return -ENODEV;
}
setup_pci_cmd(hose);
@@ -827,6 +827,7 @@ struct device_node *fsl_pci_primary;
void __devinit fsl_pci_init(void)
{
+ int ret;
struct device_node *node;
struct pci_controller *hose;
dma_addr_t max = 0xffffffff;
@@ -855,10 +856,12 @@ void __devinit fsl_pci_init(void)
if (!fsl_pci_primary)
fsl_pci_primary = node;
- fsl_add_bridge(node, fsl_pci_primary == node);
- hose = pci_find_hose_for_OF_device(node);
- max = min(max, hose->dma_window_base_cur +
- hose->dma_window_size);
+ ret = fsl_add_bridge(node, fsl_pci_primary == node);
+ if (ret == 0) {
+ hose = pci_find_hose_for_OF_device(node);
+ max = min(max, hose->dma_window_base_cur +
+ hose->dma_window_size);
+ }
}
}
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index 483d8fa72e8..e961f8c4a8f 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -14,6 +14,9 @@
#include <linux/list.h>
#include <linux/of_platform.h>
#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index 14469cf9df6..df0fc582146 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -65,7 +65,11 @@ static inline void icp_hv_set_xirr(unsigned int value)
static inline void icp_hv_set_qirr(int n_cpu , u8 value)
{
int hw_cpu = get_hard_smp_processor_id(n_cpu);
- long rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
+ long rc;
+
+ /* Make sure all previous accesses are ordered before IPI sending */
+ mb();
+ rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
if (rc != H_SUCCESS) {
pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
"returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index eab3492a45c..9b49c65ee7a 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -17,6 +17,7 @@
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/kallsyms.h>
+#include <linux/kmsg_dump.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/sysrq.h>
@@ -894,13 +895,13 @@ cmds(struct pt_regs *excp)
#endif
default:
printf("Unrecognized command: ");
- do {
+ do {
if (' ' < cmd && cmd <= '~')
putchar(cmd);
else
printf("\\x%x", cmd);
cmd = inchar();
- } while (cmd != '\n');
+ } while (cmd != '\n');
printf(" (type ? for help)\n");
break;
}
@@ -1097,7 +1098,7 @@ static long check_bp_loc(unsigned long addr)
return 1;
}
-static char *breakpoint_help_string =
+static char *breakpoint_help_string =
"Breakpoint command usage:\n"
"b show breakpoints\n"
"b <addr> [cnt] set breakpoint at given instr addr\n"
@@ -1193,7 +1194,7 @@ bpt_cmds(void)
default:
termch = cmd;
- cmd = skipbl();
+ cmd = skipbl();
if (cmd == '?') {
printf(breakpoint_help_string);
break;
@@ -1359,7 +1360,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr,
sp + REGS_OFFSET);
break;
}
- printf("--- Exception: %lx %s at ", regs.trap,
+ printf("--- Exception: %lx %s at ", regs.trap,
getvecname(TRAP(&regs)));
pc = regs.nip;
lr = regs.link;
@@ -1623,14 +1624,14 @@ static void super_regs(void)
cmd = skipbl();
if (cmd == '\n') {
- unsigned long sp, toc;
+ unsigned long sp, toc;
asm("mr %0,1" : "=r" (sp) :);
asm("mr %0,2" : "=r" (toc) :);
printf("msr = "REG" sprg0= "REG"\n",
mfmsr(), mfspr(SPRN_SPRG0));
printf("pvr = "REG" sprg1= "REG"\n",
- mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));
+ mfspr(SPRN_PVR), mfspr(SPRN_SPRG1));
printf("dec = "REG" sprg2= "REG"\n",
mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));
printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
@@ -1783,7 +1784,7 @@ byterev(unsigned char *val, int size)
static int brev;
static int mnoread;
-static char *memex_help_string =
+static char *memex_help_string =
"Memory examine command usage:\n"
"m [addr] [flags] examine/change memory\n"
" addr is optional. will start where left off.\n"
@@ -1798,7 +1799,7 @@ static char *memex_help_string =
"NOTE: flags are saved as defaults\n"
"";
-static char *memex_subcmd_help_string =
+static char *memex_subcmd_help_string =
"Memory examine subcommands:\n"
" hexval write this val to current location\n"
" 'string' write chars from string to this location\n"
@@ -2064,7 +2065,7 @@ prdump(unsigned long adrs, long ndump)
nr = mread(adrs, temp, r);
adrs += nr;
for (m = 0; m < r; ++m) {
- if ((m & (sizeof(long) - 1)) == 0 && m > 0)
+ if ((m & (sizeof(long) - 1)) == 0 && m > 0)
putchar(' ');
if (m < nr)
printf("%.2x", temp[m]);
@@ -2072,7 +2073,7 @@ prdump(unsigned long adrs, long ndump)
printf("%s", fault_chars[fault_type]);
}
for (; m < 16; ++m) {
- if ((m & (sizeof(long) - 1)) == 0)
+ if ((m & (sizeof(long) - 1)) == 0)
putchar(' ');
printf(" ");
}
@@ -2148,45 +2149,28 @@ print_address(unsigned long addr)
void
dump_log_buf(void)
{
- const unsigned long size = 128;
- unsigned long end, addr;
- unsigned char buf[size + 1];
-
- addr = 0;
- buf[size] = '\0';
-
- if (setjmp(bus_error_jmp) != 0) {
- printf("Unable to lookup symbol __log_buf!\n");
- return;
- }
-
- catch_memory_errors = 1;
- sync();
- addr = kallsyms_lookup_name("__log_buf");
-
- if (! addr)
- printf("Symbol __log_buf not found!\n");
- else {
- end = addr + (1 << CONFIG_LOG_BUF_SHIFT);
- while (addr < end) {
- if (! mread(addr, buf, size)) {
- printf("Can't read memory at address 0x%lx\n", addr);
- break;
- }
-
- printf("%s", buf);
-
- if (strlen(buf) < size)
- break;
-
- addr += size;
- }
- }
-
- sync();
- /* wait a little while to see if we get a machine check */
- __delay(200);
- catch_memory_errors = 0;
+ struct kmsg_dumper dumper = { .active = 1 };
+ unsigned char buf[128];
+ size_t len;
+
+ if (setjmp(bus_error_jmp) != 0) {
+ printf("Error dumping printk buffer!\n");
+ return;
+ }
+
+ catch_memory_errors = 1;
+ sync();
+
+ kmsg_dump_rewind_nolock(&dumper);
+ while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) {
+ buf[len] = '\0';
+ printf("%s", buf);
+ }
+
+ sync();
+ /* wait a little while to see if we get a machine check */
+ __delay(200);
+ catch_memory_errors = 0;
}
/*
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 76de6b68487..107610e01a2 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -124,6 +124,7 @@ config S390
select GENERIC_TIME_VSYSCALL
select GENERIC_CLOCKEVENTS
select KTIME_SCALAR if 32BIT
+ select HAVE_ARCH_SECCOMP_FILTER
config SCHED_OMIT_FRAME_POINTER
def_bool y
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 32e8449640f..9b94a160fe7 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -180,7 +180,8 @@ extern char elf_platform[];
#define ELF_PLATFORM (elf_platform)
#ifndef CONFIG_64BIT
-#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
#else /* CONFIG_64BIT */
#define SET_PERSONALITY(ex) \
do { \
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 799ed0f1643..2d6e6e38056 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -66,16 +66,6 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
return pte;
}
-static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- pte_t pte = huge_ptep_get(ptep);
-
- mm->context.flush_mm = 1;
- pmd_clear((pmd_t *) ptep);
- return pte;
-}
-
static inline void __pmd_csp(pmd_t *pmdp)
{
register unsigned long reg2 asm("2") = pmd_val(*pmdp);
@@ -117,6 +107,15 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
__pmd_csp(pmdp);
}
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = huge_ptep_get(ptep);
+
+ huge_ptep_invalidate(mm, addr, ptep);
+ return pte;
+}
+
#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
({ \
int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
@@ -131,10 +130,7 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
({ \
pte_t __pte = huge_ptep_get(__ptep); \
if (pte_write(__pte)) { \
- (__mm)->context.flush_mm = 1; \
- if (atomic_read(&(__mm)->context.attach_count) > 1 || \
- (__mm) != current->active_mm) \
- huge_ptep_invalidate(__mm, __addr, __ptep); \
+ huge_ptep_invalidate(__mm, __addr, __ptep); \
set_huge_pte_at(__mm, __addr, __ptep, \
huge_pte_wrprotect(__pte)); \
} \
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index 7bcc14e395f..bf2a2ad2f80 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -13,6 +13,7 @@
*/
typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
#define __kernel_size_t __kernel_size_t
typedef unsigned short __kernel_old_dev_t;
@@ -25,7 +26,6 @@ typedef unsigned short __kernel_mode_t;
typedef unsigned short __kernel_ipc_pid_t;
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
-typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
#else /* __s390x__ */
@@ -35,7 +35,6 @@ typedef unsigned int __kernel_mode_t;
typedef int __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
-typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index a0a8340daaf..ce26ac3cb16 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -44,6 +44,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
}
static inline int smp_find_processor_id(int address) { return 0; }
+static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_yield(void) { }
diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h
index 0fb34027d3f..a60d085ddb4 100644
--- a/arch/s390/include/asm/sparsemem.h
+++ b/arch/s390/include/asm/sparsemem.h
@@ -4,13 +4,11 @@
#ifdef CONFIG_64BIT
#define SECTION_SIZE_BITS 28
-#define MAX_PHYSADDR_BITS 46
#define MAX_PHYSMEM_BITS 46
#else
#define SECTION_SIZE_BITS 25
-#define MAX_PHYSADDR_BITS 31
#define MAX_PHYSMEM_BITS 31
#endif /* CONFIG_64BIT */
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index fb214dd9b7e..fe7b99759e1 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -12,6 +12,7 @@
#ifndef _ASM_SYSCALL_H
#define _ASM_SYSCALL_H 1
+#include <linux/audit.h>
#include <linux/sched.h>
#include <linux/err.h>
#include <asm/ptrace.h>
@@ -87,4 +88,13 @@ static inline void syscall_set_arguments(struct task_struct *task,
regs->orig_gpr2 = args[0];
}
+static inline int syscall_get_arch(struct task_struct *task,
+ struct pt_regs *regs)
+{
+#ifdef CONFIG_COMPAT
+ if (test_tsk_thread_flag(task, TIF_31BIT))
+ return AUDIT_ARCH_S390;
+#endif
+ return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390;
+}
#endif /* _ASM_SYSCALL_H */
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 9fde315f3a7..1d8fe2b17ef 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -90,12 +90,10 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
{
- spin_lock(&mm->page_table_lock);
if (mm->context.flush_mm) {
__tlb_flush_mm(mm);
mm->context.flush_mm = 0;
}
- spin_unlock(&mm->page_table_lock);
}
/*
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index d1225089a4b..f606d935f49 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -620,7 +620,6 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
return -EFAULT;
if (a.offset & ~PAGE_MASK)
return -EINVAL;
- a.addr = (unsigned long) compat_ptr(a.addr);
return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
a.offset >> PAGE_SHIFT);
}
@@ -631,7 +630,6 @@ asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
- a.addr = (unsigned long) compat_ptr(a.addr);
return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
}
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index e835d6d5b7f..2d82cfcbce5 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1635,7 +1635,7 @@ ENTRY(compat_sys_process_vm_readv_wrapper)
llgfr %r6,%r6 # unsigned long
llgf %r0,164(%r15) # unsigned long
stg %r0,160(%r15)
- jg sys_process_vm_readv
+ jg compat_sys_process_vm_readv
ENTRY(compat_sys_process_vm_writev_wrapper)
lgfr %r2,%r2 # compat_pid_t
@@ -1645,4 +1645,4 @@ ENTRY(compat_sys_process_vm_writev_wrapper)
llgfr %r6,%r6 # unsigned long
llgf %r0,164(%r15) # unsigned long
stg %r0,160(%r15)
- jg sys_process_vm_writev
+ jg compat_sys_process_vm_writev
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index f4eb37680b9..e4be113fbac 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -719,7 +719,11 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
long ret = 0;
/* Do the secure computing check first. */
- secure_computing_strict(regs->gprs[2]);
+ if (secure_computing(regs->gprs[2])) {
+ /* seccomp failures shouldn't expose any additional code. */
+ ret = -1;
+ goto out;
+ }
/*
* The sysc_tracesys code in entry.S stored the system
@@ -745,6 +749,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
regs->gprs[2], regs->orig_gpr2,
regs->gprs[3], regs->gprs[4],
regs->gprs[5]);
+out:
return ret ?: regs->gprs[2];
}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index f86c81e13c3..40b57693de3 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -974,11 +974,13 @@ static void __init setup_hwcaps(void)
if (MACHINE_HAS_HPAGE)
elf_hwcap |= HWCAP_S390_HPAGE;
+#if defined(CONFIG_64BIT)
/*
* 64-bit register support for 31-bit processes
* HWCAP_S390_HIGH_GPRS is bit 9.
*/
elf_hwcap |= HWCAP_S390_HIGH_GPRS;
+#endif
get_cpu_id(&cpu_id);
switch (cpu_id.machine) {
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index b4a29eee41b..d0964d22adb 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -81,11 +81,12 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality)
{
unsigned int ret;
- if (current->personality == PER_LINUX32 && personality == PER_LINUX)
- personality = PER_LINUX32;
+ if (personality(current->personality) == PER_LINUX32 &&
+ personality(personality) == PER_LINUX)
+ personality |= PER_LINUX32;
ret = sys_personality(personality);
- if (ret == PER_LINUX32)
- ret = PER_LINUX;
+ if (personality(ret) == PER_LINUX32)
+ ret &= ~PER_LINUX32;
return ret;
}
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 60ee2b88379..2d37bb861fa 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -2,69 +2,82 @@
* User access functions based on page table walks for enhanced
* system layout without hardware support.
*
- * Copyright IBM Corp. 2006
+ * Copyright IBM Corp. 2006, 2012
* Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
*/
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/mm.h>
+#include <linux/hugetlb.h>
#include <asm/uaccess.h>
#include <asm/futex.h>
#include "uaccess.h"
-static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
+
+/*
+ * Returns kernel address for user virtual address. If the returned address is
+ * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
+ * contains the (negative) exception code.
+ */
+static __always_inline unsigned long follow_table(struct mm_struct *mm,
+ unsigned long addr, int write)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
+ pte_t *ptep;
pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- return (pte_t *) 0x3a;
+ return -0x3aUL;
pud = pud_offset(pgd, addr);
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
- return (pte_t *) 0x3b;
+ return -0x3bUL;
pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- return (pte_t *) 0x10;
+ if (pmd_none(*pmd))
+ return -0x10UL;
+ if (pmd_huge(*pmd)) {
+ if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
+ return -0x04UL;
+ return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
+ }
+ if (unlikely(pmd_bad(*pmd)))
+ return -0x10UL;
+
+ ptep = pte_offset_map(pmd, addr);
+ if (!pte_present(*ptep))
+ return -0x11UL;
+ if (write && !pte_write(*ptep))
+ return -0x04UL;
- return pte_offset_map(pmd, addr);
+ return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
}
static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
size_t n, int write_user)
{
struct mm_struct *mm = current->mm;
- unsigned long offset, pfn, done, size;
- pte_t *pte;
+ unsigned long offset, done, size, kaddr;
void *from, *to;
done = 0;
retry:
spin_lock(&mm->page_table_lock);
do {
- pte = follow_table(mm, uaddr);
- if ((unsigned long) pte < 0x1000)
+ kaddr = follow_table(mm, uaddr, write_user);
+ if (IS_ERR_VALUE(kaddr))
goto fault;
- if (!pte_present(*pte)) {
- pte = (pte_t *) 0x11;
- goto fault;
- } else if (write_user && !pte_write(*pte)) {
- pte = (pte_t *) 0x04;
- goto fault;
- }
- pfn = pte_pfn(*pte);
- offset = uaddr & (PAGE_SIZE - 1);
+ offset = uaddr & ~PAGE_MASK;
size = min(n - done, PAGE_SIZE - offset);
if (write_user) {
- to = (void *)((pfn << PAGE_SHIFT) + offset);
+ to = (void *) kaddr;
from = kptr + done;
} else {
- from = (void *)((pfn << PAGE_SHIFT) + offset);
+ from = (void *) kaddr;
to = kptr + done;
}
memcpy(to, from, size);
@@ -75,7 +88,7 @@ retry:
return n - done;
fault:
spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, (unsigned long) pte, write_user))
+ if (__handle_fault(uaddr, -kaddr, write_user))
return n - done;
goto retry;
}
@@ -84,27 +97,22 @@ fault:
* Do DAT for user address by page table walk, return kernel address.
* This function needs to be called with current->mm->page_table_lock held.
*/
-static __always_inline unsigned long __dat_user_addr(unsigned long uaddr)
+static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
+ int write)
{
struct mm_struct *mm = current->mm;
- unsigned long pfn;
- pte_t *pte;
+ unsigned long kaddr;
int rc;
retry:
- pte = follow_table(mm, uaddr);
- if ((unsigned long) pte < 0x1000)
- goto fault;
- if (!pte_present(*pte)) {
- pte = (pte_t *) 0x11;
+ kaddr = follow_table(mm, uaddr, write);
+ if (IS_ERR_VALUE(kaddr))
goto fault;
- }
- pfn = pte_pfn(*pte);
- return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
+ return kaddr;
fault:
spin_unlock(&mm->page_table_lock);
- rc = __handle_fault(uaddr, (unsigned long) pte, 0);
+ rc = __handle_fault(uaddr, -kaddr, write);
spin_lock(&mm->page_table_lock);
if (!rc)
goto retry;
@@ -159,11 +167,9 @@ static size_t clear_user_pt(size_t n, void __user *to)
static size_t strnlen_user_pt(size_t count, const char __user *src)
{
- char *addr;
unsigned long uaddr = (unsigned long) src;
struct mm_struct *mm = current->mm;
- unsigned long offset, pfn, done, len;
- pte_t *pte;
+ unsigned long offset, done, len, kaddr;
size_t len_str;
if (segment_eq(get_fs(), KERNEL_DS))
@@ -172,19 +178,13 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
retry:
spin_lock(&mm->page_table_lock);
do {
- pte = follow_table(mm, uaddr);
- if ((unsigned long) pte < 0x1000)
- goto fault;
- if (!pte_present(*pte)) {
- pte = (pte_t *) 0x11;
+ kaddr = follow_table(mm, uaddr, 0);
+ if (IS_ERR_VALUE(kaddr))
goto fault;
- }
- pfn = pte_pfn(*pte);
- offset = uaddr & (PAGE_SIZE-1);
- addr = (char *)(pfn << PAGE_SHIFT) + offset;
+ offset = uaddr & ~PAGE_MASK;
len = min(count - done, PAGE_SIZE - offset);
- len_str = strnlen(addr, len);
+ len_str = strnlen((char *) kaddr, len);
done += len_str;
uaddr += len_str;
} while ((len_str == len) && (done < count));
@@ -192,7 +192,7 @@ retry:
return done + 1;
fault:
spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, (unsigned long) pte, 0))
+ if (__handle_fault(uaddr, -kaddr, 0))
return 0;
goto retry;
}
@@ -225,11 +225,10 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
const void __user *from)
{
struct mm_struct *mm = current->mm;
- unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
- uaddr, done, size, error_code;
+ unsigned long offset_max, uaddr, done, size, error_code;
unsigned long uaddr_from = (unsigned long) from;
unsigned long uaddr_to = (unsigned long) to;
- pte_t *pte_from, *pte_to;
+ unsigned long kaddr_to, kaddr_from;
int write_user;
if (segment_eq(get_fs(), KERNEL_DS)) {
@@ -242,38 +241,23 @@ retry:
do {
write_user = 0;
uaddr = uaddr_from;
- pte_from = follow_table(mm, uaddr_from);
- error_code = (unsigned long) pte_from;
- if (error_code < 0x1000)
- goto fault;
- if (!pte_present(*pte_from)) {
- error_code = 0x11;
+ kaddr_from = follow_table(mm, uaddr_from, 0);
+ error_code = kaddr_from;
+ if (IS_ERR_VALUE(error_code))
goto fault;
- }
write_user = 1;
uaddr = uaddr_to;
- pte_to = follow_table(mm, uaddr_to);
- error_code = (unsigned long) pte_to;
- if (error_code < 0x1000)
- goto fault;
- if (!pte_present(*pte_to)) {
- error_code = 0x11;
+ kaddr_to = follow_table(mm, uaddr_to, 1);
+ error_code = (unsigned long) kaddr_to;
+ if (IS_ERR_VALUE(error_code))
goto fault;
- } else if (!pte_write(*pte_to)) {
- error_code = 0x04;
- goto fault;
- }
- pfn_from = pte_pfn(*pte_from);
- pfn_to = pte_pfn(*pte_to);
- offset_from = uaddr_from & (PAGE_SIZE-1);
- offset_to = uaddr_from & (PAGE_SIZE-1);
- offset_max = max(offset_from, offset_to);
+ offset_max = max(uaddr_from & ~PAGE_MASK,
+ uaddr_to & ~PAGE_MASK);
size = min(n - done, PAGE_SIZE - offset_max);
- memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
- (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
+ memcpy((void *) kaddr_to, (void *) kaddr_from, size);
done += size;
uaddr_from += size;
uaddr_to += size;
@@ -282,7 +266,7 @@ retry:
return n - done;
fault:
spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, error_code, write_user))
+ if (__handle_fault(uaddr, -error_code, write_user))
return n - done;
goto retry;
}
@@ -341,7 +325,7 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
return __futex_atomic_op_pt(op, uaddr, oparg, old);
spin_lock(&current->mm->page_table_lock);
uaddr = (u32 __force __user *)
- __dat_user_addr((__force unsigned long) uaddr);
+ __dat_user_addr((__force unsigned long) uaddr, 1);
if (!uaddr) {
spin_unlock(&current->mm->page_table_lock);
return -EFAULT;
@@ -378,7 +362,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
spin_lock(&current->mm->page_table_lock);
uaddr = (u32 __force __user *)
- __dat_user_addr((__force unsigned long) uaddr);
+ __dat_user_addr((__force unsigned long) uaddr, 1);
if (!uaddr) {
spin_unlock(&current->mm->page_table_lock);
return -EFAULT;
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index a1e9d69a9c9..584b93674ea 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -169,7 +169,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf,
if (*offset)
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val < oprofile_min_interval)
oprofile_hw_interval = oprofile_min_interval;
@@ -212,7 +212,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val != 0)
return -EINVAL;
@@ -243,7 +243,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val != 0 && val != 1)
@@ -278,7 +278,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val != 0 && val != 1)
@@ -317,7 +317,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
- if (retval)
+ if (retval <= 0)
return retval;
if (val != 0 && val != 1)
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c
index 4c171f13b0e..b2256562314 100644
--- a/arch/sh/drivers/dma/dma-sh.c
+++ b/arch/sh/drivers/dma/dma-sh.c
@@ -335,7 +335,7 @@ static int dmae_irq_init(void)
for (n = 0; n < NR_DMAE; n++) {
int i = request_irq(get_dma_error_irq(n), dma_err,
- IRQF_SHARED, dmae_name[n], NULL);
+ IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]);
if (unlikely(i < 0)) {
printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]);
return i;
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index 4a5350037c8..1b6199740e9 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -6,7 +6,6 @@
extern long __nosave_begin, __nosave_end;
extern long __machvec_start, __machvec_end;
extern char __uncached_start, __uncached_end;
-extern char _ebss[];
extern char __start_eh_frame[], __stop_eh_frame[];
#endif /* __ASM_SH_SECTIONS_H */
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
index 48d14498e77..2a0ca8780f0 100644
--- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h
+++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h
@@ -183,18 +183,30 @@ enum {
GPIO_FN_DV_DATA1, GPIO_FN_DV_DATA0,
GPIO_FN_LCD_CLK, GPIO_FN_LCD_EXTCLK,
GPIO_FN_LCD_VSYNC, GPIO_FN_LCD_HSYNC, GPIO_FN_LCD_DE,
- GPIO_FN_LCD_DATA23, GPIO_FN_LCD_DATA22,
- GPIO_FN_LCD_DATA21, GPIO_FN_LCD_DATA20,
- GPIO_FN_LCD_DATA19, GPIO_FN_LCD_DATA18,
- GPIO_FN_LCD_DATA17, GPIO_FN_LCD_DATA16,
- GPIO_FN_LCD_DATA15, GPIO_FN_LCD_DATA14,
- GPIO_FN_LCD_DATA13, GPIO_FN_LCD_DATA12,
- GPIO_FN_LCD_DATA11, GPIO_FN_LCD_DATA10,
- GPIO_FN_LCD_DATA9, GPIO_FN_LCD_DATA8,
- GPIO_FN_LCD_DATA7, GPIO_FN_LCD_DATA6,
- GPIO_FN_LCD_DATA5, GPIO_FN_LCD_DATA4,
- GPIO_FN_LCD_DATA3, GPIO_FN_LCD_DATA2,
- GPIO_FN_LCD_DATA1, GPIO_FN_LCD_DATA0,
+ GPIO_FN_LCD_DATA23_PG23, GPIO_FN_LCD_DATA22_PG22,
+ GPIO_FN_LCD_DATA21_PG21, GPIO_FN_LCD_DATA20_PG20,
+ GPIO_FN_LCD_DATA19_PG19, GPIO_FN_LCD_DATA18_PG18,
+ GPIO_FN_LCD_DATA17_PG17, GPIO_FN_LCD_DATA16_PG16,
+ GPIO_FN_LCD_DATA15_PG15, GPIO_FN_LCD_DATA14_PG14,
+ GPIO_FN_LCD_DATA13_PG13, GPIO_FN_LCD_DATA12_PG12,
+ GPIO_FN_LCD_DATA11_PG11, GPIO_FN_LCD_DATA10_PG10,
+ GPIO_FN_LCD_DATA9_PG9, GPIO_FN_LCD_DATA8_PG8,
+ GPIO_FN_LCD_DATA7_PG7, GPIO_FN_LCD_DATA6_PG6,
+ GPIO_FN_LCD_DATA5_PG5, GPIO_FN_LCD_DATA4_PG4,
+ GPIO_FN_LCD_DATA3_PG3, GPIO_FN_LCD_DATA2_PG2,
+ GPIO_FN_LCD_DATA1_PG1, GPIO_FN_LCD_DATA0_PG0,
+ GPIO_FN_LCD_DATA23_PJ23, GPIO_FN_LCD_DATA22_PJ22,
+ GPIO_FN_LCD_DATA21_PJ21, GPIO_FN_LCD_DATA20_PJ20,
+ GPIO_FN_LCD_DATA19_PJ19, GPIO_FN_LCD_DATA18_PJ18,
+ GPIO_FN_LCD_DATA17_PJ17, GPIO_FN_LCD_DATA16_PJ16,
+ GPIO_FN_LCD_DATA15_PJ15, GPIO_FN_LCD_DATA14_PJ14,
+ GPIO_FN_LCD_DATA13_PJ13, GPIO_FN_LCD_DATA12_PJ12,
+ GPIO_FN_LCD_DATA11_PJ11, GPIO_FN_LCD_DATA10_PJ10,
+ GPIO_FN_LCD_DATA9_PJ9, GPIO_FN_LCD_DATA8_PJ8,
+ GPIO_FN_LCD_DATA7_PJ7, GPIO_FN_LCD_DATA6_PJ6,
+ GPIO_FN_LCD_DATA5_PJ5, GPIO_FN_LCD_DATA4_PJ4,
+ GPIO_FN_LCD_DATA3_PJ3, GPIO_FN_LCD_DATA2_PJ2,
+ GPIO_FN_LCD_DATA1_PJ1, GPIO_FN_LCD_DATA0_PJ0,
GPIO_FN_LCD_M_DISP,
};
diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c
index f25127c46ec..039e4587dd9 100644
--- a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c
+++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c
@@ -758,12 +758,22 @@ enum {
DV_DATA3_MARK, DV_DATA2_MARK, DV_DATA1_MARK, DV_DATA0_MARK,
LCD_CLK_MARK, LCD_EXTCLK_MARK,
LCD_VSYNC_MARK, LCD_HSYNC_MARK, LCD_DE_MARK,
- LCD_DATA23_MARK, LCD_DATA22_MARK, LCD_DATA21_MARK, LCD_DATA20_MARK,
- LCD_DATA19_MARK, LCD_DATA18_MARK, LCD_DATA17_MARK, LCD_DATA16_MARK,
- LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK,
- LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK,
- LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK,
- LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK,
+ LCD_DATA23_PG23_MARK, LCD_DATA22_PG22_MARK, LCD_DATA21_PG21_MARK,
+ LCD_DATA20_PG20_MARK, LCD_DATA19_PG19_MARK, LCD_DATA18_PG18_MARK,
+ LCD_DATA17_PG17_MARK, LCD_DATA16_PG16_MARK, LCD_DATA15_PG15_MARK,
+ LCD_DATA14_PG14_MARK, LCD_DATA13_PG13_MARK, LCD_DATA12_PG12_MARK,
+ LCD_DATA11_PG11_MARK, LCD_DATA10_PG10_MARK, LCD_DATA9_PG9_MARK,
+ LCD_DATA8_PG8_MARK, LCD_DATA7_PG7_MARK, LCD_DATA6_PG6_MARK,
+ LCD_DATA5_PG5_MARK, LCD_DATA4_PG4_MARK, LCD_DATA3_PG3_MARK,
+ LCD_DATA2_PG2_MARK, LCD_DATA1_PG1_MARK, LCD_DATA0_PG0_MARK,
+ LCD_DATA23_PJ23_MARK, LCD_DATA22_PJ22_MARK, LCD_DATA21_PJ21_MARK,
+ LCD_DATA20_PJ20_MARK, LCD_DATA19_PJ19_MARK, LCD_DATA18_PJ18_MARK,
+ LCD_DATA17_PJ17_MARK, LCD_DATA16_PJ16_MARK, LCD_DATA15_PJ15_MARK,
+ LCD_DATA14_PJ14_MARK, LCD_DATA13_PJ13_MARK, LCD_DATA12_PJ12_MARK,
+ LCD_DATA11_PJ11_MARK, LCD_DATA10_PJ10_MARK, LCD_DATA9_PJ9_MARK,
+ LCD_DATA8_PJ8_MARK, LCD_DATA7_PJ7_MARK, LCD_DATA6_PJ6_MARK,
+ LCD_DATA5_PJ5_MARK, LCD_DATA4_PJ4_MARK, LCD_DATA3_PJ3_MARK,
+ LCD_DATA2_PJ2_MARK, LCD_DATA1_PJ1_MARK, LCD_DATA0_PJ0_MARK,
LCD_TCON6_MARK, LCD_TCON5_MARK, LCD_TCON4_MARK,
LCD_TCON3_MARK, LCD_TCON2_MARK, LCD_TCON1_MARK, LCD_TCON0_MARK,
LCD_M_DISP_MARK,
@@ -1036,6 +1046,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PF1_DATA, PF1MD_000),
PINMUX_DATA(BACK_MARK, PF1MD_001),
+ PINMUX_DATA(SSL10_MARK, PF1MD_011),
PINMUX_DATA(TIOC4B_MARK, PF1MD_100),
PINMUX_DATA(DACK0_MARK, PF1MD_101),
@@ -1049,47 +1060,50 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PG27_DATA, PG27MD_00),
PINMUX_DATA(LCD_TCON2_MARK, PG27MD_10),
PINMUX_DATA(LCD_EXTCLK_MARK, PG27MD_11),
+ PINMUX_DATA(LCD_DE_MARK, PG27MD_11),
PINMUX_DATA(PG26_DATA, PG26MD_00),
PINMUX_DATA(LCD_TCON1_MARK, PG26MD_10),
+ PINMUX_DATA(LCD_HSYNC_MARK, PG26MD_10),
PINMUX_DATA(PG25_DATA, PG25MD_00),
PINMUX_DATA(LCD_TCON0_MARK, PG25MD_10),
+ PINMUX_DATA(LCD_VSYNC_MARK, PG25MD_10),
PINMUX_DATA(PG24_DATA, PG24MD_00),
PINMUX_DATA(LCD_CLK_MARK, PG24MD_10),
PINMUX_DATA(PG23_DATA, PG23MD_000),
- PINMUX_DATA(LCD_DATA23_MARK, PG23MD_010),
+ PINMUX_DATA(LCD_DATA23_PG23_MARK, PG23MD_010),
PINMUX_DATA(LCD_TCON6_MARK, PG23MD_011),
PINMUX_DATA(TXD5_MARK, PG23MD_100),
PINMUX_DATA(PG22_DATA, PG22MD_000),
- PINMUX_DATA(LCD_DATA22_MARK, PG22MD_010),
+ PINMUX_DATA(LCD_DATA22_PG22_MARK, PG22MD_010),
PINMUX_DATA(LCD_TCON5_MARK, PG22MD_011),
PINMUX_DATA(RXD5_MARK, PG22MD_100),
PINMUX_DATA(PG21_DATA, PG21MD_000),
PINMUX_DATA(DV_DATA7_MARK, PG21MD_001),
- PINMUX_DATA(LCD_DATA21_MARK, PG21MD_010),
+ PINMUX_DATA(LCD_DATA21_PG21_MARK, PG21MD_010),
PINMUX_DATA(LCD_TCON4_MARK, PG21MD_011),
PINMUX_DATA(TXD4_MARK, PG21MD_100),
PINMUX_DATA(PG20_DATA, PG20MD_000),
PINMUX_DATA(DV_DATA6_MARK, PG20MD_001),
- PINMUX_DATA(LCD_DATA20_MARK, PG21MD_010),
+ PINMUX_DATA(LCD_DATA20_PG20_MARK, PG21MD_010),
PINMUX_DATA(LCD_TCON3_MARK, PG20MD_011),
PINMUX_DATA(RXD4_MARK, PG20MD_100),
PINMUX_DATA(PG19_DATA, PG19MD_000),
PINMUX_DATA(DV_DATA5_MARK, PG19MD_001),
- PINMUX_DATA(LCD_DATA19_MARK, PG19MD_010),
+ PINMUX_DATA(LCD_DATA19_PG19_MARK, PG19MD_010),
PINMUX_DATA(SPDIF_OUT_MARK, PG19MD_011),
PINMUX_DATA(SCK5_MARK, PG19MD_100),
PINMUX_DATA(PG18_DATA, PG18MD_000),
PINMUX_DATA(DV_DATA4_MARK, PG18MD_001),
- PINMUX_DATA(LCD_DATA18_MARK, PG18MD_010),
+ PINMUX_DATA(LCD_DATA18_PG18_MARK, PG18MD_010),
PINMUX_DATA(SPDIF_IN_MARK, PG18MD_011),
PINMUX_DATA(SCK4_MARK, PG18MD_100),
@@ -1097,103 +1111,103 @@ static pinmux_enum_t pinmux_data[] = {
// we're going with 2 bits
PINMUX_DATA(PG17_DATA, PG17MD_00),
PINMUX_DATA(WE3ICIOWRAHDQMUU_MARK, PG17MD_01),
- PINMUX_DATA(LCD_DATA17_MARK, PG17MD_10),
+ PINMUX_DATA(LCD_DATA17_PG17_MARK, PG17MD_10),
// TODO hardware manual has PG16 3 bits wide in reg picture and 2 bits in description
// we're going with 2 bits
PINMUX_DATA(PG16_DATA, PG16MD_00),
PINMUX_DATA(WE2ICIORDDQMUL_MARK, PG16MD_01),
- PINMUX_DATA(LCD_DATA16_MARK, PG16MD_10),
+ PINMUX_DATA(LCD_DATA16_PG16_MARK, PG16MD_10),
PINMUX_DATA(PG15_DATA, PG15MD_00),
PINMUX_DATA(D31_MARK, PG15MD_01),
- PINMUX_DATA(LCD_DATA15_MARK, PG15MD_10),
+ PINMUX_DATA(LCD_DATA15_PG15_MARK, PG15MD_10),
PINMUX_DATA(PINT7_PG_MARK, PG15MD_11),
PINMUX_DATA(PG14_DATA, PG14MD_00),
PINMUX_DATA(D30_MARK, PG14MD_01),
- PINMUX_DATA(LCD_DATA14_MARK, PG14MD_10),
+ PINMUX_DATA(LCD_DATA14_PG14_MARK, PG14MD_10),
PINMUX_DATA(PINT6_PG_MARK, PG14MD_11),
PINMUX_DATA(PG13_DATA, PG13MD_00),
PINMUX_DATA(D29_MARK, PG13MD_01),
- PINMUX_DATA(LCD_DATA13_MARK, PG13MD_10),
+ PINMUX_DATA(LCD_DATA13_PG13_MARK, PG13MD_10),
PINMUX_DATA(PINT5_PG_MARK, PG13MD_11),
PINMUX_DATA(PG12_DATA, PG12MD_00),
PINMUX_DATA(D28_MARK, PG12MD_01),
- PINMUX_DATA(LCD_DATA12_MARK, PG12MD_10),
+ PINMUX_DATA(LCD_DATA12_PG12_MARK, PG12MD_10),
PINMUX_DATA(PINT4_PG_MARK, PG12MD_11),
PINMUX_DATA(PG11_DATA, PG11MD_000),
PINMUX_DATA(D27_MARK, PG11MD_001),
- PINMUX_DATA(LCD_DATA11_MARK, PG11MD_010),
+ PINMUX_DATA(LCD_DATA11_PG11_MARK, PG11MD_010),
PINMUX_DATA(PINT3_PG_MARK, PG11MD_011),
PINMUX_DATA(TIOC3D_MARK, PG11MD_100),
PINMUX_DATA(PG10_DATA, PG10MD_000),
PINMUX_DATA(D26_MARK, PG10MD_001),
- PINMUX_DATA(LCD_DATA10_MARK, PG10MD_010),
+ PINMUX_DATA(LCD_DATA10_PG10_MARK, PG10MD_010),
PINMUX_DATA(PINT2_PG_MARK, PG10MD_011),
PINMUX_DATA(TIOC3C_MARK, PG10MD_100),
PINMUX_DATA(PG9_DATA, PG9MD_000),
PINMUX_DATA(D25_MARK, PG9MD_001),
- PINMUX_DATA(LCD_DATA9_MARK, PG9MD_010),
+ PINMUX_DATA(LCD_DATA9_PG9_MARK, PG9MD_010),
PINMUX_DATA(PINT1_PG_MARK, PG9MD_011),
PINMUX_DATA(TIOC3B_MARK, PG9MD_100),
PINMUX_DATA(PG8_DATA, PG8MD_000),
PINMUX_DATA(D24_MARK, PG8MD_001),
- PINMUX_DATA(LCD_DATA8_MARK, PG8MD_010),
+ PINMUX_DATA(LCD_DATA8_PG8_MARK, PG8MD_010),
PINMUX_DATA(PINT0_PG_MARK, PG8MD_011),
PINMUX_DATA(TIOC3A_MARK, PG8MD_100),
PINMUX_DATA(PG7_DATA, PG7MD_000),
PINMUX_DATA(D23_MARK, PG7MD_001),
- PINMUX_DATA(LCD_DATA7_MARK, PG7MD_010),
+ PINMUX_DATA(LCD_DATA7_PG7_MARK, PG7MD_010),
PINMUX_DATA(IRQ7_PG_MARK, PG7MD_011),
PINMUX_DATA(TIOC2B_MARK, PG7MD_100),
PINMUX_DATA(PG6_DATA, PG6MD_000),
PINMUX_DATA(D22_MARK, PG6MD_001),
- PINMUX_DATA(LCD_DATA6_MARK, PG6MD_010),
+ PINMUX_DATA(LCD_DATA6_PG6_MARK, PG6MD_010),
PINMUX_DATA(IRQ6_PG_MARK, PG6MD_011),
PINMUX_DATA(TIOC2A_MARK, PG6MD_100),
PINMUX_DATA(PG5_DATA, PG5MD_000),
PINMUX_DATA(D21_MARK, PG5MD_001),
- PINMUX_DATA(LCD_DATA5_MARK, PG5MD_010),
+ PINMUX_DATA(LCD_DATA5_PG5_MARK, PG5MD_010),
PINMUX_DATA(IRQ5_PG_MARK, PG5MD_011),
PINMUX_DATA(TIOC1B_MARK, PG5MD_100),
PINMUX_DATA(PG4_DATA, PG4MD_000),
PINMUX_DATA(D20_MARK, PG4MD_001),
- PINMUX_DATA(LCD_DATA4_MARK, PG4MD_010),
+ PINMUX_DATA(LCD_DATA4_PG4_MARK, PG4MD_010),
PINMUX_DATA(IRQ4_PG_MARK, PG4MD_011),
PINMUX_DATA(TIOC1A_MARK, PG4MD_100),
PINMUX_DATA(PG3_DATA, PG3MD_000),
PINMUX_DATA(D19_MARK, PG3MD_001),
- PINMUX_DATA(LCD_DATA3_MARK, PG3MD_010),
+ PINMUX_DATA(LCD_DATA3_PG3_MARK, PG3MD_010),
PINMUX_DATA(IRQ3_PG_MARK, PG3MD_011),
PINMUX_DATA(TIOC0D_MARK, PG3MD_100),
PINMUX_DATA(PG2_DATA, PG2MD_000),
PINMUX_DATA(D18_MARK, PG2MD_001),
- PINMUX_DATA(LCD_DATA2_MARK, PG2MD_010),
+ PINMUX_DATA(LCD_DATA2_PG2_MARK, PG2MD_010),
PINMUX_DATA(IRQ2_PG_MARK, PG2MD_011),
PINMUX_DATA(TIOC0C_MARK, PG2MD_100),
PINMUX_DATA(PG1_DATA, PG1MD_000),
PINMUX_DATA(D17_MARK, PG1MD_001),
- PINMUX_DATA(LCD_DATA1_MARK, PG1MD_010),
+ PINMUX_DATA(LCD_DATA1_PG1_MARK, PG1MD_010),
PINMUX_DATA(IRQ1_PG_MARK, PG1MD_011),
PINMUX_DATA(TIOC0B_MARK, PG1MD_100),
PINMUX_DATA(PG0_DATA, PG0MD_000),
PINMUX_DATA(D16_MARK, PG0MD_001),
- PINMUX_DATA(LCD_DATA0_MARK, PG0MD_010),
+ PINMUX_DATA(LCD_DATA0_PG0_MARK, PG0MD_010),
PINMUX_DATA(IRQ0_PG_MARK, PG0MD_011),
PINMUX_DATA(TIOC0A_MARK, PG0MD_100),
@@ -1275,14 +1289,14 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PJ23_DATA, PJ23MD_000),
PINMUX_DATA(DV_DATA23_MARK, PJ23MD_001),
- PINMUX_DATA(LCD_DATA23_MARK, PJ23MD_010),
+ PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010),
PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011),
PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100),
PINMUX_DATA(CTX1_MARK, PJ23MD_101),
PINMUX_DATA(PJ22_DATA, PJ22MD_000),
PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001),
- PINMUX_DATA(LCD_DATA22_MARK, PJ22MD_010),
+ PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010),
PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011),
PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100),
PINMUX_DATA(CRX1_MARK, PJ22MD_101),
@@ -1290,14 +1304,14 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PJ21_DATA, PJ21MD_000),
PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001),
- PINMUX_DATA(LCD_DATA21_MARK, PJ21MD_010),
+ PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010),
PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011),
PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100),
PINMUX_DATA(CTX2_MARK, PJ21MD_101),
PINMUX_DATA(PJ20_DATA, PJ20MD_000),
PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001),
- PINMUX_DATA(LCD_DATA20_MARK, PJ20MD_010),
+ PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010),
PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011),
PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100),
PINMUX_DATA(CRX2_MARK, PJ20MD_101),
@@ -1305,7 +1319,7 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PJ19_DATA, PJ19MD_000),
PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001),
- PINMUX_DATA(LCD_DATA19_MARK, PJ19MD_010),
+ PINMUX_DATA(LCD_DATA19_PJ19_MARK, PJ19MD_010),
PINMUX_DATA(MISO0_PJ19_MARK, PJ19MD_011),
PINMUX_DATA(TIOC0D_MARK, PJ19MD_100),
PINMUX_DATA(SIOFRXD_MARK, PJ19MD_101),
@@ -1313,126 +1327,126 @@ static pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PJ18_DATA, PJ18MD_000),
PINMUX_DATA(DV_DATA18_MARK, PJ18MD_001),
- PINMUX_DATA(LCD_DATA18_MARK, PJ18MD_010),
+ PINMUX_DATA(LCD_DATA18_PJ18_MARK, PJ18MD_010),
PINMUX_DATA(MOSI0_PJ18_MARK, PJ18MD_011),
PINMUX_DATA(TIOC0C_MARK, PJ18MD_100),
PINMUX_DATA(SIOFTXD_MARK, PJ18MD_101),
PINMUX_DATA(PJ17_DATA, PJ17MD_000),
PINMUX_DATA(DV_DATA17_MARK, PJ17MD_001),
- PINMUX_DATA(LCD_DATA17_MARK, PJ17MD_010),
+ PINMUX_DATA(LCD_DATA17_PJ17_MARK, PJ17MD_010),
PINMUX_DATA(SSL00_PJ17_MARK, PJ17MD_011),
PINMUX_DATA(TIOC0B_MARK, PJ17MD_100),
PINMUX_DATA(SIOFSYNC_MARK, PJ17MD_101),
PINMUX_DATA(PJ16_DATA, PJ16MD_000),
PINMUX_DATA(DV_DATA16_MARK, PJ16MD_001),
- PINMUX_DATA(LCD_DATA16_MARK, PJ16MD_010),
+ PINMUX_DATA(LCD_DATA16_PJ16_MARK, PJ16MD_010),
PINMUX_DATA(RSPCK0_PJ16_MARK, PJ16MD_011),
PINMUX_DATA(TIOC0A_MARK, PJ16MD_100),
PINMUX_DATA(SIOFSCK_MARK, PJ16MD_101),
PINMUX_DATA(PJ15_DATA, PJ15MD_000),
PINMUX_DATA(DV_DATA15_MARK, PJ15MD_001),
- PINMUX_DATA(LCD_DATA15_MARK, PJ15MD_010),
+ PINMUX_DATA(LCD_DATA15_PJ15_MARK, PJ15MD_010),
PINMUX_DATA(PINT7_PJ_MARK, PJ15MD_011),
PINMUX_DATA(PWM2H_MARK, PJ15MD_100),
PINMUX_DATA(TXD7_MARK, PJ15MD_101),
PINMUX_DATA(PJ14_DATA, PJ14MD_000),
PINMUX_DATA(DV_DATA14_MARK, PJ14MD_001),
- PINMUX_DATA(LCD_DATA14_MARK, PJ14MD_010),
+ PINMUX_DATA(LCD_DATA14_PJ14_MARK, PJ14MD_010),
PINMUX_DATA(PINT6_PJ_MARK, PJ14MD_011),
PINMUX_DATA(PWM2G_MARK, PJ14MD_100),
PINMUX_DATA(TXD6_MARK, PJ14MD_101),
PINMUX_DATA(PJ13_DATA, PJ13MD_000),
PINMUX_DATA(DV_DATA13_MARK, PJ13MD_001),
- PINMUX_DATA(LCD_DATA13_MARK, PJ13MD_010),
+ PINMUX_DATA(LCD_DATA13_PJ13_MARK, PJ13MD_010),
PINMUX_DATA(PINT5_PJ_MARK, PJ13MD_011),
PINMUX_DATA(PWM2F_MARK, PJ13MD_100),
PINMUX_DATA(TXD5_MARK, PJ13MD_101),
PINMUX_DATA(PJ12_DATA, PJ12MD_000),
PINMUX_DATA(DV_DATA12_MARK, PJ12MD_001),
- PINMUX_DATA(LCD_DATA12_MARK, PJ12MD_010),
+ PINMUX_DATA(LCD_DATA12_PJ12_MARK, PJ12MD_010),
PINMUX_DATA(PINT4_PJ_MARK, PJ12MD_011),
PINMUX_DATA(PWM2E_MARK, PJ12MD_100),
PINMUX_DATA(SCK7_MARK, PJ12MD_101),
PINMUX_DATA(PJ11_DATA, PJ11MD_000),
PINMUX_DATA(DV_DATA11_MARK, PJ11MD_001),
- PINMUX_DATA(LCD_DATA11_MARK, PJ11MD_010),
+ PINMUX_DATA(LCD_DATA11_PJ11_MARK, PJ11MD_010),
PINMUX_DATA(PINT3_PJ_MARK, PJ11MD_011),
PINMUX_DATA(PWM2D_MARK, PJ11MD_100),
PINMUX_DATA(SCK6_MARK, PJ11MD_101),
PINMUX_DATA(PJ10_DATA, PJ10MD_000),
PINMUX_DATA(DV_DATA10_MARK, PJ10MD_001),
- PINMUX_DATA(LCD_DATA10_MARK, PJ10MD_010),
+ PINMUX_DATA(LCD_DATA10_PJ10_MARK, PJ10MD_010),
PINMUX_DATA(PINT2_PJ_MARK, PJ10MD_011),
PINMUX_DATA(PWM2C_MARK, PJ10MD_100),
PINMUX_DATA(SCK5_MARK, PJ10MD_101),
PINMUX_DATA(PJ9_DATA, PJ9MD_000),
PINMUX_DATA(DV_DATA9_MARK, PJ9MD_001),
- PINMUX_DATA(LCD_DATA9_MARK, PJ9MD_010),
+ PINMUX_DATA(LCD_DATA9_PJ9_MARK, PJ9MD_010),
PINMUX_DATA(PINT1_PJ_MARK, PJ9MD_011),
PINMUX_DATA(PWM2B_MARK, PJ9MD_100),
PINMUX_DATA(RTS5_MARK, PJ9MD_101),
PINMUX_DATA(PJ8_DATA, PJ8MD_000),
PINMUX_DATA(DV_DATA8_MARK, PJ8MD_001),
- PINMUX_DATA(LCD_DATA8_MARK, PJ8MD_010),
+ PINMUX_DATA(LCD_DATA8_PJ8_MARK, PJ8MD_010),
PINMUX_DATA(PINT0_PJ_MARK, PJ8MD_011),
PINMUX_DATA(PWM2A_MARK, PJ8MD_100),
PINMUX_DATA(CTS5_MARK, PJ8MD_101),
PINMUX_DATA(PJ7_DATA, PJ7MD_000),
PINMUX_DATA(DV_DATA7_MARK, PJ7MD_001),
- PINMUX_DATA(LCD_DATA7_MARK, PJ7MD_010),
+ PINMUX_DATA(LCD_DATA7_PJ7_MARK, PJ7MD_010),
PINMUX_DATA(SD_D2_MARK, PJ7MD_011),
PINMUX_DATA(PWM1H_MARK, PJ7MD_100),
PINMUX_DATA(PJ6_DATA, PJ6MD_000),
PINMUX_DATA(DV_DATA6_MARK, PJ6MD_001),
- PINMUX_DATA(LCD_DATA6_MARK, PJ6MD_010),
+ PINMUX_DATA(LCD_DATA6_PJ6_MARK, PJ6MD_010),
PINMUX_DATA(SD_D3_MARK, PJ6MD_011),
PINMUX_DATA(PWM1G_MARK, PJ6MD_100),
PINMUX_DATA(PJ5_DATA, PJ5MD_000),
PINMUX_DATA(DV_DATA5_MARK, PJ5MD_001),
- PINMUX_DATA(LCD_DATA5_MARK, PJ5MD_010),
+ PINMUX_DATA(LCD_DATA5_PJ5_MARK, PJ5MD_010),
PINMUX_DATA(SD_CMD_MARK, PJ5MD_011),
PINMUX_DATA(PWM1F_MARK, PJ5MD_100),
PINMUX_DATA(PJ4_DATA, PJ4MD_000),
PINMUX_DATA(DV_DATA4_MARK, PJ4MD_001),
- PINMUX_DATA(LCD_DATA4_MARK, PJ4MD_010),
+ PINMUX_DATA(LCD_DATA4_PJ4_MARK, PJ4MD_010),
PINMUX_DATA(SD_CLK_MARK, PJ4MD_011),
PINMUX_DATA(PWM1E_MARK, PJ4MD_100),
PINMUX_DATA(PJ3_DATA, PJ3MD_000),
PINMUX_DATA(DV_DATA3_MARK, PJ3MD_001),
- PINMUX_DATA(LCD_DATA3_MARK, PJ3MD_010),
+ PINMUX_DATA(LCD_DATA3_PJ3_MARK, PJ3MD_010),
PINMUX_DATA(SD_D0_MARK, PJ3MD_011),
PINMUX_DATA(PWM1D_MARK, PJ3MD_100),
PINMUX_DATA(PJ2_DATA, PJ2MD_000),
PINMUX_DATA(DV_DATA2_MARK, PJ2MD_001),
- PINMUX_DATA(LCD_DATA2_MARK, PJ2MD_010),
+ PINMUX_DATA(LCD_DATA2_PJ2_MARK, PJ2MD_010),
PINMUX_DATA(SD_D1_MARK, PJ2MD_011),
PINMUX_DATA(PWM1C_MARK, PJ2MD_100),
PINMUX_DATA(PJ1_DATA, PJ1MD_000),
PINMUX_DATA(DV_DATA1_MARK, PJ1MD_001),
- PINMUX_DATA(LCD_DATA1_MARK, PJ1MD_010),
+ PINMUX_DATA(LCD_DATA1_PJ1_MARK, PJ1MD_010),
PINMUX_DATA(SD_WP_MARK, PJ1MD_011),
PINMUX_DATA(PWM1B_MARK, PJ1MD_100),
PINMUX_DATA(PJ0_DATA, PJ0MD_000),
PINMUX_DATA(DV_DATA0_MARK, PJ0MD_001),
- PINMUX_DATA(LCD_DATA0_MARK, PJ0MD_010),
+ PINMUX_DATA(LCD_DATA0_PJ0_MARK, PJ0MD_010),
PINMUX_DATA(SD_CD_MARK, PJ0MD_011),
PINMUX_DATA(PWM1A_MARK, PJ0MD_100),
};
@@ -1877,30 +1891,55 @@ static struct pinmux_gpio pinmux_gpios[] = {
PINMUX_GPIO(GPIO_FN_LCD_HSYNC, LCD_HSYNC_MARK),
PINMUX_GPIO(GPIO_FN_LCD_DE, LCD_DE_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA23, LCD_DATA23_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA22, LCD_DATA22_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA21, LCD_DATA21_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA20, LCD_DATA20_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA19, LCD_DATA19_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA18, LCD_DATA18_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA17, LCD_DATA17_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA16, LCD_DATA16_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK),
- PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA23_PG23, LCD_DATA23_PG23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA22_PG22, LCD_DATA22_PG22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA21_PG21, LCD_DATA21_PG21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA20_PG20, LCD_DATA20_PG20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA19_PG19, LCD_DATA19_PG19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA18_PG18, LCD_DATA18_PG18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA17_PG17, LCD_DATA17_PG17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA16_PG16, LCD_DATA16_PG16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15_PG15, LCD_DATA15_PG15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14_PG14, LCD_DATA14_PG14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13_PG13, LCD_DATA13_PG13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12_PG12, LCD_DATA12_PG12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11_PG11, LCD_DATA11_PG11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10_PG10, LCD_DATA10_PG10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9_PG9, LCD_DATA9_PG9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8_PG8, LCD_DATA8_PG8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7_PG7, LCD_DATA7_PG7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6_PG6, LCD_DATA6_PG6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5_PG5, LCD_DATA5_PG5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4_PG4, LCD_DATA4_PG4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3_PG3, LCD_DATA3_PG3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2_PG2, LCD_DATA2_PG2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1_PG1, LCD_DATA1_PG1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0_PG0, LCD_DATA0_PG0_MARK),
+
+ PINMUX_GPIO(GPIO_FN_LCD_DATA23_PJ23, LCD_DATA23_PJ23_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA22_PJ22, LCD_DATA22_PJ22_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA21_PJ21, LCD_DATA21_PJ21_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA20_PJ20, LCD_DATA20_PJ20_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA19_PJ19, LCD_DATA19_PJ19_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA18_PJ18, LCD_DATA18_PJ18_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA17_PJ17, LCD_DATA17_PJ17_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA16_PJ16, LCD_DATA16_PJ16_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA15_PJ15, LCD_DATA15_PJ15_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA14_PJ14, LCD_DATA14_PJ14_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA13_PJ13, LCD_DATA13_PJ13_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA12_PJ12, LCD_DATA12_PJ12_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA11_PJ11, LCD_DATA11_PJ11_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA10_PJ10, LCD_DATA10_PJ10_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA9_PJ9, LCD_DATA9_PJ9_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA8_PJ8, LCD_DATA8_PJ8_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA7_PJ7, LCD_DATA7_PJ7_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA6_PJ6, LCD_DATA6_PJ6_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA5_PJ5, LCD_DATA5_PJ5_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA4_PJ4, LCD_DATA4_PJ4_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA3_PJ3, LCD_DATA3_PJ3_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA2_PJ2, LCD_DATA2_PJ2_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA1_PJ1, LCD_DATA1_PJ1_MARK),
+ PINMUX_GPIO(GPIO_FN_LCD_DATA0_PJ0, LCD_DATA0_PJ0_MARK),
PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK),
};
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index b7cf6a547f1..7e605b95592 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -933,7 +933,7 @@ ret_with_reschedule:
pta restore_all, tr1
- movi _TIF_SIGPENDING, r8
+ movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
and r8, r7, r8
pta work_notifysig, tr0
bne r8, ZERO, tr0
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index f67601cb3f1..b96489d8b27 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -139,7 +139,7 @@ work_pending:
! r8: current_thread_info
! t: result of "tst #_TIF_NEED_RESCHED, r0"
bf/s work_resched
- tst #_TIF_SIGPENDING, r0
+ tst #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME), r0
work_notifysig:
bt/s __restore_all
mov r15, r4
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 7b57bf1dc85..ebe7a7d9721 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -273,7 +273,7 @@ void __init setup_arch(char **cmdline_p)
data_resource.start = virt_to_phys(_etext);
data_resource.end = virt_to_phys(_edata)-1;
bss_resource.start = virt_to_phys(__bss_start);
- bss_resource.end = virt_to_phys(_ebss)-1;
+ bss_resource.end = virt_to_phys(__bss_stop)-1;
#ifdef CONFIG_CMDLINE_OVERWRITE
strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 3896f26efa4..2a0a596ebf6 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -19,7 +19,6 @@ EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(_ebss);
EXPORT_SYMBOL(empty_zero_page);
#define DECLARE_EXPORT(name) \
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index c98905f71e2..db88cbf9eaf 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -78,7 +78,6 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_end = .;
BSS_SECTION(0, PAGE_SIZE, 4)
- _ebss = .; /* uClinux MTD sucks */
_end = . ;
STABS_DEBUG
diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S
index 84a57761f17..60164e65d66 100644
--- a/arch/sh/lib/mcount.S
+++ b/arch/sh/lib/mcount.S
@@ -39,7 +39,7 @@
*
* Make sure the stack pointer contains a valid address. Valid
* addresses for kernel stacks are anywhere after the bss
- * (after _ebss) and anywhere in init_thread_union (init_stack).
+ * (after __bss_stop) and anywhere in init_thread_union (init_stack).
*/
#define STACK_CHECK() \
mov #(THREAD_SIZE >> 10), r0; \
@@ -60,7 +60,7 @@
cmp/hi r2, r1; \
bf stack_panic; \
\
- /* If sp > _ebss then we're OK. */ \
+ /* If sp > __bss_stop then we're OK. */ \
mov.l .L_ebss, r1; \
cmp/hi r1, r15; \
bt 1f; \
@@ -70,7 +70,7 @@
cmp/hs r1, r15; \
bf stack_panic; \
\
- /* If sp > init_stack && sp < _ebss, not OK. */ \
+ /* If sp > init_stack && sp < __bss_stop, not OK. */ \
add r0, r1; \
cmp/hs r1, r15; \
bt stack_panic; \
@@ -292,8 +292,6 @@ stack_panic:
nop
.align 2
-.L_ebss:
- .long _ebss
.L_init_thread_union:
.long init_thread_union
.Lpanic:
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 15e0a169397..f1ddc0d2367 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -48,9 +48,7 @@ void *module_alloc(unsigned long size)
return NULL;
ret = module_map(size);
- if (!ret)
- ret = ERR_PTR(-ENOMEM);
- else
+ if (ret)
memset(ret, 0, size);
return ret;
@@ -116,6 +114,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
v = sym->st_value + rel[i].r_addend;
switch (ELF_R_TYPE(rel[i].r_info) & 0xff) {
+ case R_SPARC_DISP32:
+ v -= (Elf_Addr) location;
+ *loc32 = v;
+ break;
#ifdef CONFIG_SPARC64
case R_SPARC_64:
location[0] = v >> 56;
@@ -128,11 +130,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
location[7] = v >> 0;
break;
- case R_SPARC_DISP32:
- v -= (Elf_Addr) location;
- *loc32 = v;
- break;
-
case R_SPARC_WDISP19:
v -= (Elf_Addr) location;
*loc32 = (*loc32 & ~0x7ffff) |
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 0dc1f578608..11c6c9603e7 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -502,12 +502,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
{
int ret;
- if (current->personality == PER_LINUX32 &&
- personality == PER_LINUX)
- personality = PER_LINUX32;
+ if (personality(current->personality) == PER_LINUX32 &&
+ personality(personality) == PER_LINUX)
+ personality |= PER_LINUX32;
ret = sys_personality(personality);
- if (ret == PER_LINUX32)
- ret = PER_LINUX;
+ if (personality(ret) == PER_LINUX32)
+ ret &= ~PER_LINUX32;
return ret;
}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 6026fdd1b2e..d58edf5fefd 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2020,6 +2020,9 @@ EXPORT_SYMBOL(_PAGE_CACHE);
#ifdef CONFIG_SPARSEMEM_VMEMMAP
unsigned long vmemmap_table[VMEMMAP_SIZE];
+static long __meminitdata addr_start, addr_end;
+static int __meminitdata node_start;
+
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
{
unsigned long vstart = (unsigned long) start;
@@ -2050,15 +2053,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
*vmem_pp = pte_base | __pa(block);
- printk(KERN_INFO "[%p-%p] page_structs=%lu "
- "node=%d entry=%lu/%lu\n", start, block, nr,
- node,
- addr >> VMEMMAP_CHUNK_SHIFT,
- VMEMMAP_SIZE);
+ /* check to see if we have contiguous blocks */
+ if (addr_end != addr || node_start != node) {
+ if (addr_start)
+ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+ addr_start, addr_end-1, node_start);
+ addr_start = addr;
+ node_start = node;
+ }
+ addr_end = addr + VMEMMAP_CHUNK;
}
}
return 0;
}
+
+void __meminit vmemmap_populate_print_last(void)
+{
+ if (addr_start) {
+ printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
+ addr_start, addr_end-1, node_start);
+ addr_start = 0;
+ addr_end = 0;
+ node_start = 0;
+ }
+}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
static void prot_init_common(unsigned long page_none,
diff --git a/arch/tile/include/gxio/iorpc_trio.h b/arch/tile/include/gxio/iorpc_trio.h
index 15fb7799208..58105c31228 100644
--- a/arch/tile/include/gxio/iorpc_trio.h
+++ b/arch/tile/include/gxio/iorpc_trio.h
@@ -25,21 +25,23 @@
#include <linux/module.h>
#include <asm/pgtable.h>
-#define GXIO_TRIO_OP_ALLOC_ASIDS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400)
+#define GXIO_TRIO_OP_DEALLOC_ASID IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400)
+#define GXIO_TRIO_OP_ALLOC_ASIDS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1401)
-#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1402)
+#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1404)
-#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140e)
-#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140f)
+#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1412)
-#define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1417)
-#define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1418)
-#define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1419)
-#define GXIO_TRIO_OP_CONFIG_MSI_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x141a)
+#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1414)
-#define GXIO_TRIO_OP_SET_MPS_MRS IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141c)
-#define GXIO_TRIO_OP_FORCE_RC_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141d)
-#define GXIO_TRIO_OP_FORCE_EP_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e)
+#define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e)
+#define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141f)
+#define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1420)
+#define GXIO_TRIO_OP_CONFIG_MSI_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1421)
+
+#define GXIO_TRIO_OP_SET_MPS_MRS IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1423)
+#define GXIO_TRIO_OP_FORCE_RC_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1424)
+#define GXIO_TRIO_OP_FORCE_EP_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1425)
#define GXIO_TRIO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
#define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 69f1c57a8d0..33a6a2423bd 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -20,14 +20,6 @@ struct mm_struct;
struct thread_struct {
struct task_struct *saved_task;
- /*
- * This flag is set to 1 before calling do_fork (and analyzed in
- * copy_thread) to mark that we are begin called from userspace (fork /
- * vfork / clone), and reset to 0 after. It is left to 0 when called
- * from kernelspace (i.e. kernel_thread() or fork_idle(),
- * as of 2.6.11).
- */
- int forking;
struct pt_regs regs;
int singlestep_syscall;
void *fault_addr;
@@ -58,7 +50,6 @@ struct thread_struct {
#define INIT_THREAD \
{ \
- .forking = 0, \
.regs = EMPTY_REGS, \
.fault_addr = NULL, \
.prev_sched = NULL, \
diff --git a/arch/um/include/shared/common-offsets.h b/arch/um/include/shared/common-offsets.h
index 40db8f71dea..2df313b6a58 100644
--- a/arch/um/include/shared/common-offsets.h
+++ b/arch/um/include/shared/common-offsets.h
@@ -7,16 +7,6 @@ DEFINE(UM_KERN_PAGE_MASK, PAGE_MASK);
DEFINE(UM_KERN_PAGE_SHIFT, PAGE_SHIFT);
DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC);
-DEFINE_STR(UM_KERN_EMERG, KERN_EMERG);
-DEFINE_STR(UM_KERN_ALERT, KERN_ALERT);
-DEFINE_STR(UM_KERN_CRIT, KERN_CRIT);
-DEFINE_STR(UM_KERN_ERR, KERN_ERR);
-DEFINE_STR(UM_KERN_WARNING, KERN_WARNING);
-DEFINE_STR(UM_KERN_NOTICE, KERN_NOTICE);
-DEFINE_STR(UM_KERN_INFO, KERN_INFO);
-DEFINE_STR(UM_KERN_DEBUG, KERN_DEBUG);
-DEFINE_STR(UM_KERN_CONT, KERN_CONT);
-
DEFINE(UM_ELF_CLASS, ELF_CLASS);
DEFINE(UM_ELFCLASS32, ELFCLASS32);
DEFINE(UM_ELFCLASS64, ELFCLASS64);
diff --git a/arch/um/include/shared/user.h b/arch/um/include/shared/user.h
index 4fa82c055aa..cef06856333 100644
--- a/arch/um/include/shared/user.h
+++ b/arch/um/include/shared/user.h
@@ -26,6 +26,17 @@
extern void panic(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2)));
+/* Requires preincluding include/linux/kern_levels.h */
+#define UM_KERN_EMERG KERN_EMERG
+#define UM_KERN_ALERT KERN_ALERT
+#define UM_KERN_CRIT KERN_CRIT
+#define UM_KERN_ERR KERN_ERR
+#define UM_KERN_WARNING KERN_WARNING
+#define UM_KERN_NOTICE KERN_NOTICE
+#define UM_KERN_INFO KERN_INFO
+#define UM_KERN_DEBUG KERN_DEBUG
+#define UM_KERN_CONT KERN_CONT
+
#ifdef UML_CONFIG_PRINTK
extern int printk(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2)));
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 6cade936636..8c82786da82 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -39,34 +39,21 @@ void flush_thread(void)
void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
{
+ get_safe_registers(regs->regs.gp, regs->regs.fp);
PT_REGS_IP(regs) = eip;
PT_REGS_SP(regs) = esp;
-}
-EXPORT_SYMBOL(start_thread);
-
-static long execve1(const char *file,
- const char __user *const __user *argv,
- const char __user *const __user *env)
-{
- long error;
-
- error = do_execve(file, argv, env, &current->thread.regs);
- if (error == 0) {
- task_lock(current);
- current->ptrace &= ~PT_DTRACE;
+ current->ptrace &= ~PT_DTRACE;
#ifdef SUBARCH_EXECVE1
- SUBARCH_EXECVE1(&current->thread.regs.regs);
+ SUBARCH_EXECVE1(regs->regs);
#endif
- task_unlock(current);
- }
- return error;
}
+EXPORT_SYMBOL(start_thread);
long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env)
{
long err;
- err = execve1(file, argv, env);
+ err = do_execve(file, argv, env, &current->thread.regs);
if (!err)
UML_LONGJMP(current->thread.exec_buf, 1);
return err;
@@ -81,7 +68,7 @@ long sys_execve(const char __user *file, const char __user *const __user *argv,
filename = getname(file);
error = PTR_ERR(filename);
if (IS_ERR(filename)) goto out;
- error = execve1(filename, argv, env);
+ error = do_execve(filename, argv, env, &current->thread.regs);
putname(filename);
out:
return error;
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 57fc7028714..c5f5afa5074 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -181,11 +181,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
struct pt_regs *regs)
{
void (*handler)(void);
+ int kthread = current->flags & PF_KTHREAD;
int ret = 0;
p->thread = (struct thread_struct) INIT_THREAD;
- if (current->thread.forking) {
+ if (!kthread) {
memcpy(&p->thread.regs.regs, &regs->regs,
sizeof(p->thread.regs.regs));
PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
@@ -195,8 +196,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
handler = fork_handler;
arch_copy_thread(&current->thread.arch, &p->thread.arch);
- }
- else {
+ } else {
get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
p->thread.request.u.thread = current->thread.request.u.thread;
handler = new_thread_handler;
@@ -204,7 +204,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
- if (current->thread.forking) {
+ if (!kthread) {
clear_flushed_tls(p);
/*
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 7362d58efc2..cc9c2350e41 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -22,9 +22,13 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr,
struct k_sigaction *ka, siginfo_t *info)
{
sigset_t *oldset = sigmask_to_save();
+ int singlestep = 0;
unsigned long sp;
int err;
+ if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
+ singlestep = 1;
+
/* Did we come from a system call? */
if (PT_REGS_SYSCALL_NR(regs) >= 0) {
/* If so, check system call restarting.. */
@@ -61,7 +65,7 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr,
if (err)
force_sigsegv(signr, current);
else
- signal_delivered(signr, info, ka, regs, 0);
+ signal_delivered(signr, info, ka, regs, singlestep);
}
static int kern_do_signal(struct pt_regs *regs)
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index f958cb876ee..a4c6d8eee74 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -17,25 +17,25 @@
long sys_fork(void)
{
- long ret;
-
- current->thread.forking = 1;
- ret = do_fork(SIGCHLD, UPT_SP(&current->thread.regs.regs),
+ return do_fork(SIGCHLD, UPT_SP(&current->thread.regs.regs),
&current->thread.regs, 0, NULL, NULL);
- current->thread.forking = 0;
- return ret;
}
long sys_vfork(void)
{
- long ret;
-
- current->thread.forking = 1;
- ret = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
UPT_SP(&current->thread.regs.regs),
&current->thread.regs, 0, NULL, NULL);
- current->thread.forking = 0;
- return ret;
+}
+
+long sys_clone(unsigned long clone_flags, unsigned long newsp,
+ void __user *parent_tid, void __user *child_tid)
+{
+ if (!newsp)
+ newsp = UPT_SP(&current->thread.regs.regs);
+
+ return do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
+ child_tid);
}
long old_mmap(unsigned long addr, unsigned long len,
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
index f60238559af..0748fe0c8a7 100644
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -114,7 +114,7 @@ static void deliver_alarm(void)
skew += this_tick - last_tick;
while (skew >= one_tick) {
- alarm_handler(SIGVTALRM, NULL);
+ alarm_handler(SIGVTALRM, NULL, NULL);
skew -= one_tick;
}
diff --git a/arch/um/scripts/Makefile.rules b/arch/um/scripts/Makefile.rules
index d50270d26b4..15889df9b46 100644
--- a/arch/um/scripts/Makefile.rules
+++ b/arch/um/scripts/Makefile.rules
@@ -8,7 +8,7 @@ USER_OBJS += $(filter %_user.o,$(obj-y) $(obj-m) $(USER_SINGLE_OBJS))
USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file))
$(USER_OBJS:.o=.%): \
- c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) -include user.h $(CFLAGS_$(basetarget).o)
+ c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) -include $(srctree)/include/linux/kern_levels.h -include user.h $(CFLAGS_$(basetarget).o)
# These are like USER_OBJS but filter USER_CFLAGS through unprofile instead of
# using it directly.
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ba2657c4921..50a1d1f9b6d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -746,10 +746,10 @@ config SWIOTLB
def_bool y if X86_64
---help---
Support for software bounce buffers used on x86-64 systems
- which don't have a hardware IOMMU (e.g. the current generation
- of Intel's x86-64 CPUs). Using this PCI devices which can only
- access 32-bits of memory can be used on systems with more than
- 3 GB of memory. If unsure, say Y.
+ which don't have a hardware IOMMU. Using this PCI devices
+ which can only access 32-bits of memory can be used on systems
+ with more than 3 GB of memory.
+ If unsure, say Y.
config IOMMU_HELPER
def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)
@@ -1527,7 +1527,7 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
config CC_STACKPROTECTOR
- bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
+ bool "Enable -fstack-protector buffer overflow detection"
---help---
This option turns on the -fstack-protector GCC feature. This
feature puts, at the beginning of functions, a canary value on
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index b0c5276861e..474ca35b1bc 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -27,6 +27,10 @@ ifeq ($(CONFIG_X86_32),y)
KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
+ # Never want PIC in a 32-bit kernel, prevent breakage with GCC built
+ # with nonstandard options
+ KBUILD_CFLAGS += -fno-pic
+
# prevent gcc from keeping the stack 16 byte aligned
KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
@@ -138,7 +142,7 @@ KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
KBUILD_CFLAGS += $(mflags-y)
KBUILD_AFLAGS += $(mflags-y)
-archscripts:
+archscripts: scripts_basic
$(Q)$(MAKE) $(build)=arch/x86/tools relocs
###
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 5a747dd884d..f7535bedc33 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -57,7 +57,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
-Wall -Wstrict-prototypes \
-march=i386 -mregparm=3 \
-include $(srctree)/$(src)/code16gcc.h \
- -fno-strict-aliasing -fomit-frame-pointer \
+ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
$(call cc-option, -ffreestanding) \
$(call cc-option, -fno-toplevel-reorder,\
$(call cc-option, -fno-unit-at-a-time)) \
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 441520e4174..a3ac52b29cb 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -33,6 +33,14 @@
#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
#define MCI_STATUS_AR (1ULL<<55) /* Action required */
+#define MCACOD 0xffff /* MCA Error Code */
+
+/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
+#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
+#define MCACOD_SCRUBMSK 0xfff0
+#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
+#define MCACOD_DATA 0x0134 /* Data Load */
+#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
/* MCi_MISC register defines */
#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index dab39350e51..cb4e43bce98 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -196,11 +196,16 @@ static inline u32 get_ibs_caps(void) { return 0; }
extern void perf_events_lapic_init(void);
/*
- * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
- * This flag is otherwise unused and ABI specified to be 0, so nobody should
- * care what we do with it.
+ * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
+ * unused and ABI specified to be 0, so nobody should care what we do with
+ * them.
+ *
+ * EXACT - the IP points to the exact instruction that triggered the
+ * event (HW bugs exempt).
+ * VM - original X86_VM_MASK; see set_linear_ip().
*/
#define PERF_EFLAGS_EXACT (1UL << 3)
+#define PERF_EFLAGS_VM (1UL << 5)
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index b315a33867f..33692eaabab 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -12,8 +12,7 @@
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
- * These are fair FIFO ticket locks, which are currently limited to 256
- * CPUs.
+ * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
*
* (the type definitions are in asm/spinlock_types.h)
*/
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 93971e841dd..472b9b78301 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -51,7 +51,8 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
extern int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op);
-extern int m2p_remove_override(struct page *page, bool clear_pte);
+extern int m2p_remove_override(struct page *page,
+ struct gnttab_map_grant_ref *kmap_op);
extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 95bf99de905..1b8e5a03d94 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -25,10 +25,6 @@ unsigned long acpi_realmode_flags;
static char temp_stack[4096];
#endif
-asmlinkage void acpi_enter_s3(void)
-{
- acpi_enter_sleep_state(3, wake_sleep_flags);
-}
/**
* acpi_suspend_lowlevel - save kernel state
*
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index 5653a5791ec..67f59f8c695 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -2,7 +2,6 @@
* Variables and functions used by the code in sleep.c
*/
-#include <linux/linkage.h>
#include <asm/realmode.h>
extern unsigned long saved_video_mode;
@@ -11,7 +10,6 @@ extern long saved_magic;
extern int wakeup_pmode_return;
extern u8 wake_sleep_flags;
-extern asmlinkage void acpi_enter_s3(void);
extern unsigned long acpi_copy_wakeup_routine(unsigned long);
extern void wakeup_long64(void);
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index 72610839f03..13ab720573e 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -74,7 +74,9 @@ restore_registers:
ENTRY(do_suspend_lowlevel)
call save_processor_state
call save_registers
- call acpi_enter_s3
+ pushl $3
+ call acpi_enter_sleep_state
+ addl $4, %esp
# In case of S3 failure, we'll emerge here. Jump
# to ret_point to recover
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 014d1d28c39..8ea5164cbd0 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -71,7 +71,9 @@ ENTRY(do_suspend_lowlevel)
movq %rsi, saved_rsi
addq $8, %rsp
- call acpi_enter_s3
+ movl $3, %edi
+ xorl %eax, %eax
+ call acpi_enter_sleep_state
/* in case something went wrong, restore the machine status and go on */
jmp resume_point
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 931280ff829..ced4534baed 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -165,7 +165,7 @@ static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
#endif
#ifdef P6_NOP1
-static const unsigned char __initconst_or_module p6nops[] =
+static const unsigned char p6nops[] =
{
P6_NOP1,
P6_NOP2,
@@ -224,7 +224,7 @@ void __init arch_init_ideal_nops(void)
ideal_nops = intel_nops;
#endif
}
-
+ break;
default:
#ifdef CONFIG_X86_64
ideal_nops = k8_nops;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 406eee78468..c265593ec2c 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1204,7 +1204,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
BUG_ON(!cfg->vector);
vector = cfg->vector;
- for_each_cpu(cpu, cfg->domain)
+ for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
per_cpu(vector_irq, cpu)[vector] = -1;
cfg->vector = 0;
@@ -1212,7 +1212,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
if (likely(!cfg->move_in_progress))
return;
- for_each_cpu(cpu, cfg->old_domain) {
+ for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
vector++) {
if (per_cpu(vector_irq, cpu)[vector] != irq)
@@ -1356,6 +1356,16 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
if (!IO_APIC_IRQ(irq))
return;
+ /*
+ * For legacy irqs, cfg->domain starts with cpu 0. Now that IO-APIC
+ * can handle this irq and the apic driver is finialized at this point,
+ * update the cfg->domain.
+ */
+ if (irq < legacy_pic->nr_legacy_irqs &&
+ cpumask_equal(cfg->domain, cpumask_of(0)))
+ apic->vector_allocation_domain(0, cfg->domain,
+ apic->target_cpus());
+
if (assign_irq_vector(irq, cfg, apic->target_cpus()))
return;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 46d8786d655..a5fbc3c5fcc 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -144,6 +144,8 @@ static int __init x86_xsave_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+ setup_clear_cpu_cap(X86_FEATURE_AVX);
+ setup_clear_cpu_cap(X86_FEATURE_AVX2);
return 1;
}
__setup("noxsave", x86_xsave_setup);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 413c2ced887..13017626f9a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -55,13 +55,6 @@ static struct severity {
#define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S)
#define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR)
#define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV)
-#define MCACOD 0xffff
-/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
-#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
-#define MCACOD_SCRUBMSK 0xfff0
-#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
-#define MCACOD_DATA 0x0134 /* Data Load */
-#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
MCESEV(
NO, "Invalid",
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 5e095f873e3..292d0258311 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -103,6 +103,8 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
static DEFINE_PER_CPU(struct work_struct, mce_work);
+static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
+
/*
* CPU/chipset specific EDAC code can register a notifier call here to print
* MCE errors in a human-readable form.
@@ -650,14 +652,18 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
* Do a quick check if any of the events requires a panic.
* This decides if we keep the events around or clear them.
*/
-static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp)
+static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
+ struct pt_regs *regs)
{
int i, ret = 0;
for (i = 0; i < banks; i++) {
m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
- if (m->status & MCI_STATUS_VAL)
+ if (m->status & MCI_STATUS_VAL) {
__set_bit(i, validp);
+ if (quirk_no_way_out)
+ quirk_no_way_out(i, m, regs);
+ }
if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
ret = 1;
}
@@ -1040,7 +1046,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
*final = m;
memset(valid_banks, 0, sizeof(valid_banks));
- no_way_out = mce_no_way_out(&m, &msg, valid_banks);
+ no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
barrier();
@@ -1418,6 +1424,34 @@ static void __mcheck_cpu_init_generic(void)
}
}
+/*
+ * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
+ * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
+ * Vol 3B Table 15-20). But this confuses both the code that determines
+ * whether the machine check occurred in kernel or user mode, and also
+ * the severity assessment code. Pretend that EIPV was set, and take the
+ * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
+ */
+static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
+{
+ if (bank != 0)
+ return;
+ if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
+ return;
+ if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
+ MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
+ MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
+ MCACOD)) !=
+ (MCI_STATUS_UC|MCI_STATUS_EN|
+ MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
+ MCI_STATUS_AR|MCACOD_INSTR))
+ return;
+
+ m->mcgstatus |= MCG_STATUS_EIPV;
+ m->ip = regs->ip;
+ m->cs = regs->cs;
+}
+
/* Add per CPU specific workarounds here */
static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
{
@@ -1515,6 +1549,9 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
*/
if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
mce_bootlog = 0;
+
+ if (c->x86 == 6 && c->x86_model == 45)
+ quirk_no_way_out = quirk_sandybridge_ifu;
}
if (monarch_timeout < 0)
monarch_timeout = 0;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 29557aa06dd..915b876edd1 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -32,6 +32,8 @@
#include <asm/smp.h>
#include <asm/alternative.h>
#include <asm/timer.h>
+#include <asm/desc.h>
+#include <asm/ldt.h>
#include "perf_event.h"
@@ -1738,6 +1740,29 @@ valid_user_frame(const void __user *fp, unsigned long size)
return (__range_not_ok(fp, size, TASK_SIZE) == 0);
}
+static unsigned long get_segment_base(unsigned int segment)
+{
+ struct desc_struct *desc;
+ int idx = segment >> 3;
+
+ if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+ if (idx > LDT_ENTRIES)
+ return 0;
+
+ if (idx > current->active_mm->context.size)
+ return 0;
+
+ desc = current->active_mm->context.ldt;
+ } else {
+ if (idx > GDT_ENTRIES)
+ return 0;
+
+ desc = __this_cpu_ptr(&gdt_page.gdt[0]);
+ }
+
+ return get_desc_base(desc + idx);
+}
+
#ifdef CONFIG_COMPAT
#include <asm/compat.h>
@@ -1746,13 +1771,17 @@ static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
/* 32-bit process in 64-bit kernel. */
+ unsigned long ss_base, cs_base;
struct stack_frame_ia32 frame;
const void __user *fp;
if (!test_thread_flag(TIF_IA32))
return 0;
- fp = compat_ptr(regs->bp);
+ cs_base = get_segment_base(regs->cs);
+ ss_base = get_segment_base(regs->ss);
+
+ fp = compat_ptr(ss_base + regs->bp);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
unsigned long bytes;
frame.next_frame = 0;
@@ -1765,8 +1794,8 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
if (!valid_user_frame(fp, sizeof(frame)))
break;
- perf_callchain_store(entry, frame.return_address);
- fp = compat_ptr(frame.next_frame);
+ perf_callchain_store(entry, cs_base + frame.return_address);
+ fp = compat_ptr(ss_base + frame.next_frame);
}
return 1;
}
@@ -1789,6 +1818,12 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
return;
}
+ /*
+ * We don't know what to do with VM86 stacks.. ignore them for now.
+ */
+ if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
+ return;
+
fp = (void __user *)regs->bp;
perf_callchain_store(entry, regs->ip);
@@ -1816,16 +1851,50 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
}
}
-unsigned long perf_instruction_pointer(struct pt_regs *regs)
+/*
+ * Deal with code segment offsets for the various execution modes:
+ *
+ * VM86 - the good olde 16 bit days, where the linear address is
+ * 20 bits and we use regs->ip + 0x10 * regs->cs.
+ *
+ * IA32 - Where we need to look at GDT/LDT segment descriptor tables
+ * to figure out what the 32bit base address is.
+ *
+ * X32 - has TIF_X32 set, but is running in x86_64
+ *
+ * X86_64 - CS,DS,SS,ES are all zero based.
+ */
+static unsigned long code_segment_base(struct pt_regs *regs)
{
- unsigned long ip;
+ /*
+ * If we are in VM86 mode, add the segment offset to convert to a
+ * linear address.
+ */
+ if (regs->flags & X86_VM_MASK)
+ return 0x10 * regs->cs;
+
+ /*
+ * For IA32 we look at the GDT/LDT segment base to convert the
+ * effective IP to a linear address.
+ */
+#ifdef CONFIG_X86_32
+ if (user_mode(regs) && regs->cs != __USER_CS)
+ return get_segment_base(regs->cs);
+#else
+ if (test_thread_flag(TIF_IA32)) {
+ if (user_mode(regs) && regs->cs != __USER32_CS)
+ return get_segment_base(regs->cs);
+ }
+#endif
+ return 0;
+}
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- ip = perf_guest_cbs->get_guest_ip();
- else
- ip = instruction_pointer(regs);
+ return perf_guest_cbs->get_guest_ip();
- return ip;
+ return regs->ip + code_segment_base(regs);
}
unsigned long perf_misc_flags(struct pt_regs *regs)
@@ -1838,7 +1907,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
- if (!kernel_ip(regs->ip))
+ if (user_mode(regs))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 821d53b696d..8b6defe7eef 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -516,6 +516,26 @@ static inline bool kernel_ip(unsigned long ip)
#endif
}
+/*
+ * Not all PMUs provide the right context information to place the reported IP
+ * into full context. Specifically segment registers are typically not
+ * supplied.
+ *
+ * Assuming the address is a linear address (it is for IBS), we fake the CS and
+ * vm86 mode using the known zero-based code segment and 'fix up' the registers
+ * to reflect this.
+ *
+ * Intel PEBS/LBR appear to typically provide the effective address, nothing
+ * much we can do about that but pray and treat it like a linear address.
+ */
+static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
+{
+ regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
+ if (regs->flags & X86_VM_MASK)
+ regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
+ regs->ip = ip;
+}
+
#ifdef CONFIG_CPU_SUP_AMD
int amd_pmu_init(void);
@@ -566,6 +586,8 @@ extern struct event_constraint intel_westmere_pebs_event_constraints[];
extern struct event_constraint intel_snb_pebs_event_constraints[];
+extern struct event_constraint intel_ivb_pebs_event_constraints[];
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event);
void intel_pmu_pebs_enable(struct perf_event *event);
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index da9bcdcd985..eebd5ffe1bb 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -13,6 +13,8 @@
#include <asm/apic.h>
+#include "perf_event.h"
+
static u32 ibs_caps;
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
@@ -207,6 +209,15 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
return -EOPNOTSUPP;
}
+static const struct perf_event_attr ibs_notsupp = {
+ .exclude_user = 1,
+ .exclude_kernel = 1,
+ .exclude_hv = 1,
+ .exclude_idle = 1,
+ .exclude_host = 1,
+ .exclude_guest = 1,
+};
+
static int perf_ibs_init(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
@@ -227,6 +238,9 @@ static int perf_ibs_init(struct perf_event *event)
if (event->pmu != &perf_ibs->pmu)
return -ENOENT;
+ if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp))
+ return -EINVAL;
+
if (config & ~perf_ibs->config_mask)
return -EINVAL;
@@ -536,7 +550,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
regs.flags &= ~PERF_EFLAGS_EXACT;
} else {
- instruction_pointer_set(&regs, ibs_data.regs[1]);
+ set_linear_ip(&regs, ibs_data.regs[1]);
regs.flags |= PERF_EFLAGS_EXACT;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 382366977d4..6bca492b854 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1522,8 +1522,16 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
+ /*
+ * If PMU counter has PEBS enabled it is not enough to disable counter
+ * on a guest entry since PEBS memory write can overshoot guest entry
+ * and corrupt guest memory. Disabling PEBS solves the problem.
+ */
+ arr[1].msr = MSR_IA32_PEBS_ENABLE;
+ arr[1].host = cpuc->pebs_enabled;
+ arr[1].guest = 0;
- *nr = 1;
+ *nr = 2;
return arr;
}
@@ -2000,6 +2008,7 @@ __init int intel_pmu_init(void)
break;
case 28: /* Atom */
+ case 54: /* Cedariew */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -2039,7 +2048,6 @@ __init int intel_pmu_init(void)
case 42: /* SandyBridge */
case 45: /* SandyBridge, "Romely-EP" */
x86_add_quirk(intel_sandybridge_quirk);
- case 58: /* IvyBridge */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
@@ -2064,6 +2072,29 @@ __init int intel_pmu_init(void)
pr_cont("SandyBridge events, ");
break;
+ case 58: /* IvyBridge */
+ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
+ sizeof(hw_cache_extra_regs));
+
+ intel_pmu_lbr_init_snb();
+
+ x86_pmu.event_constraints = intel_snb_event_constraints;
+ x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
+ x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
+ x86_pmu.extra_regs = intel_snb_extra_regs;
+ /* all extra regs are per-cpu when HT is on */
+ x86_pmu.er_flags |= ERF_HAS_RSP_1;
+ x86_pmu.er_flags |= ERF_NO_HT_SHARING;
+
+ /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
+ intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
+ X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
+
+ pr_cont("IvyBridge events, ");
+ break;
+
default:
switch (x86_pmu.version) {
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 629ae0b7ad9..826054a4f2e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -407,6 +407,20 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+struct event_constraint intel_ivb_pebs_event_constraints[] = {
+ INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
+ INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
+ INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
+ INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
+ INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
+ EVENT_CONSTRAINT_END
+};
+
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{
struct event_constraint *c;
@@ -499,7 +513,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
* We sampled a branch insn, rewind using the LBR stack
*/
if (ip == to) {
- regs->ip = from;
+ set_linear_ip(regs, from);
return 1;
}
@@ -529,7 +543,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
} while (to < ip);
if (to == ip) {
- regs->ip = old_to;
+ set_linear_ip(regs, old_to);
return 1;
}
@@ -569,7 +583,8 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
* A possible PERF_SAMPLE_REGS will have to transfer all regs.
*/
regs = *iregs;
- regs.ip = pebs->ip;
+ regs.flags = pebs->flags;
+ set_linear_ip(&regs, pebs->ip);
regs.bp = pebs->bp;
regs.sp = pebs->sp;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 520b4265fcd..da02e9cc375 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -686,7 +686,8 @@ void intel_pmu_lbr_init_atom(void)
* to have an operational LBR which can freeze
* on PMU interrupt
*/
- if (boot_cpu_data.x86_mask < 10) {
+ if (boot_cpu_data.x86_model == 28
+ && boot_cpu_data.x86_mask < 10) {
pr_cont("LBR disabled due to erratum");
return;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 7563fda9f03..38e4894165b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -661,6 +661,11 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
}
}
+static struct uncore_event_desc snb_uncore_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
+ { /* end: all zeroes */ },
+};
+
static struct attribute *snb_uncore_formats_attr[] = {
&format_attr_event.attr,
&format_attr_umask.attr,
@@ -704,6 +709,7 @@ static struct intel_uncore_type snb_uncore_cbox = {
.constraints = snb_uncore_cbox_constraints,
.ops = &snb_uncore_msr_ops,
.format_group = &snb_uncore_format_group,
+ .event_descs = snb_uncore_events,
};
static struct intel_uncore_type *snb_msr_uncores[] = {
@@ -796,7 +802,6 @@ static struct intel_uncore_type *nhm_msr_uncores[] = {
DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5");
DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7");
-DEFINE_UNCORE_FORMAT_ATTR(mm_cfg, mm_cfg, "config:63");
DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63");
DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63");
@@ -902,16 +907,21 @@ static struct attribute_group nhmex_uncore_cbox_format_group = {
.attrs = nhmex_uncore_cbox_formats_attr,
};
+/* msr offset for each instance of cbox */
+static unsigned nhmex_cbox_msr_offsets[] = {
+ 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0,
+};
+
static struct intel_uncore_type nhmex_uncore_cbox = {
.name = "cbox",
.num_counters = 6,
- .num_boxes = 8,
+ .num_boxes = 10,
.perf_ctr_bits = 48,
.event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0,
.perf_ctr = NHMEX_C0_MSR_PMON_CTR0,
.event_mask = NHMEX_PMON_RAW_EVENT_MASK,
.box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL,
- .msr_offset = NHMEX_C_MSR_OFFSET,
+ .msr_offsets = nhmex_cbox_msr_offsets,
.pair_ctr_ctl = 1,
.ops = &nhmex_uncore_ops,
.format_group = &nhmex_uncore_cbox_format_group
@@ -1032,24 +1042,22 @@ static struct intel_uncore_type nhmex_uncore_bbox = {
static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event)
{
- struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
- struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
- if (event->attr.config & NHMEX_S_PMON_MM_CFG_EN) {
- reg1->config = event->attr.config1;
- reg2->config = event->attr.config2;
- } else {
- reg1->config = ~0ULL;
- reg2->config = ~0ULL;
- }
+ /* only TO_R_PROG_EV event uses the match/mask register */
+ if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) !=
+ NHMEX_S_EVENT_TO_R_PROG_EV)
+ return 0;
if (box->pmu->pmu_idx == 0)
reg1->reg = NHMEX_S0_MSR_MM_CFG;
else
reg1->reg = NHMEX_S1_MSR_MM_CFG;
-
reg1->idx = 0;
-
+ reg1->config = event->attr.config1;
+ reg2->config = event->attr.config2;
return 0;
}
@@ -1059,8 +1067,8 @@ static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct per
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
- wrmsrl(reg1->reg, 0);
- if (reg1->config != ~0ULL || reg2->config != ~0ULL) {
+ if (reg1->idx != EXTRA_REG_NONE) {
+ wrmsrl(reg1->reg, 0);
wrmsrl(reg1->reg + 1, reg1->config);
wrmsrl(reg1->reg + 2, reg2->config);
wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN);
@@ -1074,7 +1082,6 @@ static struct attribute *nhmex_uncore_sbox_formats_attr[] = {
&format_attr_edge.attr,
&format_attr_inv.attr,
&format_attr_thresh8.attr,
- &format_attr_mm_cfg.attr,
&format_attr_match.attr,
&format_attr_mask.attr,
NULL,
@@ -1142,6 +1149,9 @@ static struct extra_reg nhmex_uncore_mbox_extra_regs[] = {
EVENT_EXTRA_END
};
+/* Nehalem-EX or Westmere-EX ? */
+bool uncore_nhmex;
+
static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config)
{
struct intel_uncore_extra_reg *er;
@@ -1171,18 +1181,29 @@ static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64
return false;
/* mask of the shared fields */
- mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
+ if (uncore_nhmex)
+ mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK;
+ else
+ mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK;
er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC];
raw_spin_lock_irqsave(&er->lock, flags);
/* add mask of the non-shared field if it's in use */
- if (__BITS_VALUE(atomic_read(&er->ref), idx, 8))
- mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) {
+ if (uncore_nhmex)
+ mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ else
+ mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ }
if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) {
atomic_add(1 << (idx * 8), &er->ref);
- mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
- NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ if (uncore_nhmex)
+ mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK |
+ NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ else
+ mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK |
+ WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
er->config &= ~mask;
er->config |= (config & mask);
ret = true;
@@ -1216,7 +1237,10 @@ u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
/* get the non-shared control bits and shift them */
idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC;
- config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ if (uncore_nhmex)
+ config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
+ else
+ config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx);
if (new_idx > orig_idx) {
idx = new_idx - orig_idx;
config <<= 3 * idx;
@@ -1226,6 +1250,10 @@ u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify)
}
/* add the shared control bits back */
+ if (uncore_nhmex)
+ config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
+ else
+ config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config;
if (modify) {
/* adjust the main event selector */
@@ -1264,7 +1292,8 @@ again:
}
/* for the match/mask registers */
- if ((uncore_box_is_fake(box) || !reg2->alloc) &&
+ if (reg2->idx != EXTRA_REG_NONE &&
+ (uncore_box_is_fake(box) || !reg2->alloc) &&
!nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config))
goto fail;
@@ -1278,7 +1307,8 @@ again:
if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8))
nhmex_mbox_alter_er(event, idx[0], true);
reg1->alloc |= alloc;
- reg2->alloc = 1;
+ if (reg2->idx != EXTRA_REG_NONE)
+ reg2->alloc = 1;
}
return NULL;
fail:
@@ -1342,9 +1372,6 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event
struct extra_reg *er;
unsigned msr;
int reg_idx = 0;
-
- if (WARN_ON_ONCE(reg1->idx != -1))
- return -EINVAL;
/*
* The mbox events may require 2 extra MSRs at the most. But only
* the lower 32 bits in these MSRs are significant, so we can use
@@ -1355,11 +1382,6 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event
continue;
if (event->attr.config1 & ~er->valid_mask)
return -EINVAL;
- if (er->idx == __BITS_VALUE(reg1->idx, 0, 8) ||
- er->idx == __BITS_VALUE(reg1->idx, 1, 8))
- continue;
- if (WARN_ON_ONCE(reg_idx >= 2))
- return -EINVAL;
msr = er->msr + type->msr_offset * box->pmu->pmu_idx;
if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff))
@@ -1368,6 +1390,8 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event
/* always use the 32~63 bits to pass the PLD config */
if (er->idx == EXTRA_REG_NHMEX_M_PLD)
reg_idx = 1;
+ else if (WARN_ON_ONCE(reg_idx > 0))
+ return -EINVAL;
reg1->idx &= ~(0xff << (reg_idx * 8));
reg1->reg &= ~(0xffff << (reg_idx * 16));
@@ -1376,17 +1400,21 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event
reg1->config = event->attr.config1;
reg_idx++;
}
- /* use config2 to pass the filter config */
- reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
- if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
- reg2->config = event->attr.config2;
- else
- reg2->config = ~0ULL;
- if (box->pmu->pmu_idx == 0)
- reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
- else
- reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
-
+ /*
+ * The mbox only provides ability to perform address matching
+ * for the PLD events.
+ */
+ if (reg_idx == 2) {
+ reg2->idx = EXTRA_REG_NHMEX_M_FILTER;
+ if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN)
+ reg2->config = event->attr.config2;
+ else
+ reg2->config = ~0ULL;
+ if (box->pmu->pmu_idx == 0)
+ reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG;
+ else
+ reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG;
+ }
return 0;
}
@@ -1422,34 +1450,36 @@ static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct per
wrmsrl(__BITS_VALUE(reg1->reg, 1, 16),
nhmex_mbox_shared_reg_config(box, idx));
- wrmsrl(reg2->reg, 0);
- if (reg2->config != ~0ULL) {
- wrmsrl(reg2->reg + 1,
- reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
- wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
- (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
- wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
+ if (reg2->idx != EXTRA_REG_NONE) {
+ wrmsrl(reg2->reg, 0);
+ if (reg2->config != ~0ULL) {
+ wrmsrl(reg2->reg + 1,
+ reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK);
+ wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK &
+ (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT));
+ wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN);
+ }
}
wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0);
}
-DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
-DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
-DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
-DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
-DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
-DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
-DEFINE_UNCORE_FORMAT_ATTR(filter_cfg, filter_cfg, "config2:63");
-DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
-DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
-DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
-DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
+DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3");
+DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5");
+DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6");
+DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7");
+DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13");
+DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21");
+DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63");
+DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33");
+DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61");
+DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63");
static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
&format_attr_count_mode.attr,
@@ -1458,7 +1488,7 @@ static struct attribute *nhmex_uncore_mbox_formats_attr[] = {
&format_attr_flag_mode.attr,
&format_attr_inc_sel.attr,
&format_attr_set_flag_sel.attr,
- &format_attr_filter_cfg.attr,
+ &format_attr_filter_cfg_en.attr,
&format_attr_filter_match.attr,
&format_attr_filter_mask.attr,
&format_attr_dsp.attr,
@@ -1482,6 +1512,12 @@ static struct uncore_event_desc nhmex_uncore_mbox_events[] = {
{ /* end: all zeroes */ },
};
+static struct uncore_event_desc wsmex_uncore_mbox_events[] = {
+ INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"),
+ INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"),
+ { /* end: all zeroes */ },
+};
+
static struct intel_uncore_ops nhmex_uncore_mbox_ops = {
NHMEX_UNCORE_OPS_COMMON_INIT(),
.enable_event = nhmex_mbox_msr_enable_event,
@@ -1513,7 +1549,7 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
int port;
- /* adjust the main event selector */
+ /* adjust the main event selector and extra register index */
if (reg1->idx % 2) {
reg1->idx--;
hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
@@ -1522,29 +1558,17 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
}
- /* adjust address or config of extra register */
+ /* adjust extra register config */
port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
switch (reg1->idx % 6) {
- case 0:
- reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port);
- break;
- case 1:
- reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port);
- break;
case 2:
- /* the 8~15 bits to the 0~7 bits */
+ /* shift the 8~15 bits to the 0~7 bits */
reg1->config >>= 8;
break;
case 3:
- /* the 0~7 bits to the 8~15 bits */
+ /* shift the 0~7 bits to the 8~15 bits */
reg1->config <<= 8;
break;
- case 4:
- reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port);
- break;
- case 5:
- reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port);
- break;
};
}
@@ -1671,7 +1695,7 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event
struct hw_perf_event *hwc = &event->hw;
struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
struct hw_perf_event_extra *reg2 = &event->hw.branch_reg;
- int port, idx;
+ int idx;
idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >>
NHMEX_R_PMON_CTL_EV_SEL_SHIFT;
@@ -1681,27 +1705,11 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event
reg1->idx = idx;
reg1->config = event->attr.config1;
- port = idx / 6 + box->pmu->pmu_idx * 4;
- idx %= 6;
- switch (idx) {
- case 0:
- reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port);
- break;
- case 1:
- reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port);
- break;
- case 2:
- case 3:
- reg1->reg = NHMEX_R_MSR_PORTN_QLX_CFG(port);
- break;
+ switch (idx % 6) {
case 4:
case 5:
- if (idx == 4)
- reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port);
- else
- reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port);
- reg2->config = event->attr.config2;
hwc->config |= event->attr.config & (~0ULL << 32);
+ reg2->config = event->attr.config2;
break;
};
return 0;
@@ -1727,28 +1735,34 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per
struct hw_perf_event *hwc = &event->hw;
struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
- int idx, er_idx;
+ int idx, port;
- idx = reg1->idx % 6;
- er_idx = idx;
- if (er_idx > 2)
- er_idx--;
- er_idx += (reg1->idx / 6) * 5;
+ idx = reg1->idx;
+ port = idx / 6 + box->pmu->pmu_idx * 4;
- switch (idx) {
+ switch (idx % 6) {
case 0:
+ wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config);
+ break;
case 1:
- wrmsrl(reg1->reg, reg1->config);
+ wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config);
break;
case 2:
case 3:
- wrmsrl(reg1->reg, nhmex_rbox_shared_reg_config(box, er_idx));
+ wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port),
+ nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5));
break;
case 4:
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port),
+ hwc->config >> 32);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config);
+ break;
case 5:
- wrmsrl(reg1->reg, reg1->config);
- wrmsrl(reg1->reg + 1, hwc->config >> 32);
- wrmsrl(reg1->reg + 2, reg2->config);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port),
+ hwc->config >> 32);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config);
+ wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config);
break;
};
@@ -1756,8 +1770,8 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per
(hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK));
}
-DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config:32-63");
-DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config1:0-63");
+DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63");
+DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63");
DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63");
DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15");
DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31");
@@ -2303,6 +2317,7 @@ int uncore_pmu_event_init(struct perf_event *event)
event->hw.idx = -1;
event->hw.last_tag = ~0ULL;
event->hw.extra_reg.idx = EXTRA_REG_NONE;
+ event->hw.branch_reg.idx = EXTRA_REG_NONE;
if (event->attr.config == UNCORE_FIXED_EVENT) {
/* no fixed counter */
@@ -2373,7 +2388,7 @@ static void __init uncore_type_exit(struct intel_uncore_type *type)
type->attr_groups[1] = NULL;
}
-static void uncore_types_exit(struct intel_uncore_type **types)
+static void __init uncore_types_exit(struct intel_uncore_type **types)
{
int i;
for (i = 0; types[i]; i++)
@@ -2814,7 +2829,13 @@ static int __init uncore_cpu_init(void)
snbep_uncore_cbox.num_boxes = max_cores;
msr_uncores = snbep_msr_uncores;
break;
- case 46:
+ case 46: /* Nehalem-EX */
+ uncore_nhmex = true;
+ case 47: /* Westmere-EX aka. Xeon E7 */
+ if (!uncore_nhmex)
+ nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
+ if (nhmex_uncore_cbox.num_boxes > max_cores)
+ nhmex_uncore_cbox.num_boxes = max_cores;
msr_uncores = nhmex_msr_uncores;
break;
default:
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index f3851892e07..5b81c1856aa 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -5,7 +5,7 @@
#include "perf_event.h"
#define UNCORE_PMU_NAME_LEN 32
-#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC)
+#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
#define UNCORE_FIXED_EVENT 0xff
#define UNCORE_PMC_IDX_MAX_GENERIC 8
@@ -230,6 +230,7 @@
#define NHMEX_S1_MSR_MASK 0xe5a
#define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63)
+#define NHMEX_S_EVENT_TO_R_PROG_EV 0
/* NHM-EX Mbox */
#define NHMEX_M0_MSR_GLOBAL_CTL 0xca0
@@ -275,18 +276,12 @@
NHMEX_M_PMON_CTL_INC_SEL_MASK | \
NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK)
-
-#define NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK 0x1f
-#define NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK (0x7 << 5)
-#define NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK (0x7 << 8)
-#define NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR (1 << 23)
-#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK \
- (NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK | \
- NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK | \
- NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK | \
- NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR)
+#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23))
#define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (11 + 3 * (n)))
+#define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24))
+#define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (12 + 3 * (n)))
+
/*
* use the 9~13 bits to select event If the 7th bit is not set,
* otherwise use the 19~21 bits to select event.
@@ -368,6 +363,7 @@ struct intel_uncore_type {
unsigned num_shared_regs:8;
unsigned single_fixed:1;
unsigned pair_ctr_ctl:1;
+ unsigned *msr_offsets;
struct event_constraint unconstrainted;
struct event_constraint *constraints;
struct intel_uncore_pmu *pmus;
@@ -485,29 +481,31 @@ unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
return idx * 8 + box->pmu->type->perf_ctr;
}
-static inline
-unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
+static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
+{
+ struct intel_uncore_pmu *pmu = box->pmu;
+ return pmu->type->msr_offsets ?
+ pmu->type->msr_offsets[pmu->pmu_idx] :
+ pmu->type->msr_offset * pmu->pmu_idx;
+}
+
+static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
{
if (!box->pmu->type->box_ctl)
return 0;
- return box->pmu->type->box_ctl +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
}
-static inline
-unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
+static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
{
if (!box->pmu->type->fixed_ctl)
return 0;
- return box->pmu->type->fixed_ctl +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
}
-static inline
-unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
+static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
{
- return box->pmu->type->fixed_ctr +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
}
static inline
@@ -515,7 +513,7 @@ unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
{
return box->pmu->type->event_ctl +
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ uncore_msr_box_offset(box);
}
static inline
@@ -523,7 +521,7 @@ unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
{
return box->pmu->type->perf_ctr +
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
- box->pmu->type->msr_offset * box->pmu->pmu_idx;
+ uncore_msr_box_offset(box);
}
static inline
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 39472dd2323..60c78917190 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -199,12 +199,14 @@ static int __init cpuid_init(void)
goto out_chrdev;
}
cpuid_class->devnode = cpuid_devnode;
+ get_online_cpus();
for_each_online_cpu(i) {
err = cpuid_device_create(i);
if (err != 0)
goto out_class;
}
register_hotcpu_notifier(&cpuid_class_cpu_notifier);
+ put_online_cpus();
err = 0;
goto out;
@@ -214,6 +216,7 @@ out_class:
for_each_online_cpu(i) {
cpuid_device_destroy(i);
}
+ put_online_cpus();
class_destroy(cpuid_class);
out_chrdev:
__unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
@@ -225,11 +228,13 @@ static void __exit cpuid_exit(void)
{
int cpu = 0;
+ get_online_cpus();
for_each_online_cpu(cpu)
cpuid_device_destroy(cpu);
class_destroy(cpuid_class);
__unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
+ put_online_cpus();
}
module_init(cpuid_init);
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 1f5f1d5d2a0..d44f7829968 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -270,7 +270,7 @@ void fixup_irqs(void)
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
break_affinity = 1;
- affinity = cpu_all_mask;
+ affinity = cpu_online_mask;
}
chip = irq_data_get_irq_chip(data);
@@ -328,6 +328,7 @@ void fixup_irqs(void)
chip->irq_retrigger(data);
raw_spin_unlock(&desc->lock);
}
+ __this_cpu_write(vector_irq[vector], -1);
}
}
#endif
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
index 1d5d31ea686..dc1404bf8e4 100644
--- a/arch/x86/kernel/kdebugfs.c
+++ b/arch/x86/kernel/kdebugfs.c
@@ -107,7 +107,7 @@ static int __init create_setup_data_nodes(struct dentry *parent)
{
struct setup_data_node *node;
struct setup_data *data;
- int error = -ENOMEM;
+ int error;
struct dentry *d;
struct page *pg;
u64 pa_data;
@@ -121,8 +121,10 @@ static int __init create_setup_data_nodes(struct dentry *parent)
while (pa_data) {
node = kmalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
+ if (!node) {
+ error = -ENOMEM;
goto err_dir;
+ }
pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT);
if (PageHighMem(pg)) {
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 8a2ce8fd41c..82746f942cd 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -143,11 +143,12 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr,
unsigned int *current_size)
{
struct microcode_header_amd *mc_hdr;
- unsigned int actual_size;
+ unsigned int actual_size, patch_size;
u16 equiv_cpu_id;
/* size of the current patch we're staring at */
- *current_size = *(u32 *)(ucode_ptr + 4) + SECTION_HDR_SIZE;
+ patch_size = *(u32 *)(ucode_ptr + 4);
+ *current_size = patch_size + SECTION_HDR_SIZE;
equiv_cpu_id = find_equiv_id();
if (!equiv_cpu_id)
@@ -174,7 +175,7 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr,
/*
* now that the header looks sane, verify its size
*/
- actual_size = verify_ucode_size(cpu, *current_size, leftover_size);
+ actual_size = verify_ucode_size(cpu, patch_size, leftover_size);
if (!actual_size)
return 0;
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 4873e62db6a..9e5bcf1e237 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -225,6 +225,9 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
if (do_microcode_update(buf, len) == 0)
ret = (ssize_t)len;
+ if (ret > 0)
+ perf_check_microcode();
+
mutex_unlock(&microcode_mutex);
put_online_cpus();
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index eb113693f04..a7c5661f849 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -257,12 +257,14 @@ static int __init msr_init(void)
goto out_chrdev;
}
msr_class->devnode = msr_devnode;
+ get_online_cpus();
for_each_online_cpu(i) {
err = msr_device_create(i);
if (err != 0)
goto out_class;
}
register_hotcpu_notifier(&msr_class_cpu_notifier);
+ put_online_cpus();
err = 0;
goto out;
@@ -271,6 +273,7 @@ out_class:
i = 0;
for_each_online_cpu(i)
msr_device_destroy(i);
+ put_online_cpus();
class_destroy(msr_class);
out_chrdev:
__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
@@ -281,11 +284,13 @@ out:
static void __exit msr_exit(void)
{
int cpu = 0;
+ get_online_cpus();
for_each_online_cpu(cpu)
msr_device_destroy(cpu);
class_destroy(msr_class);
__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
unregister_hotcpu_notifier(&msr_class_cpu_notifier);
+ put_online_cpus();
}
module_init(msr_init);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 97d9a9914ba..a3b57a27be8 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -475,13 +475,26 @@ register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
return address_mask(ctxt, reg);
}
+static void masked_increment(ulong *reg, ulong mask, int inc)
+{
+ assign_masked(reg, *reg + inc, mask);
+}
+
static inline void
register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
{
+ ulong mask;
+
if (ctxt->ad_bytes == sizeof(unsigned long))
- *reg += inc;
+ mask = ~0UL;
else
- *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
+ mask = ad_mask(ctxt);
+ masked_increment(reg, mask, inc);
+}
+
+static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
+{
+ masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc);
}
static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@@ -1522,8 +1535,8 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
{
struct segmented_address addr;
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes);
- addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
+ rsp_increment(ctxt, -bytes);
+ addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
return segmented_write(ctxt, addr, data, bytes);
@@ -1542,13 +1555,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
int rc;
struct segmented_address addr;
- addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
+ addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
rc = segmented_read(ctxt, addr, dest, len);
if (rc != X86EMUL_CONTINUE)
return rc;
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
+ rsp_increment(ctxt, len);
return rc;
}
@@ -1688,8 +1701,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
- ctxt->op_bytes);
+ rsp_increment(ctxt, ctxt->op_bytes);
--reg;
}
@@ -2825,7 +2837,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
+ rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE;
}
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 1df8fb9e1d5..9fc9aa7ac70 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -316,6 +316,11 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
addr &= 1;
if (addr == 0) {
if (val & 0x10) {
+ u8 edge_irr = s->irr & ~s->elcr;
+ int i;
+ bool found = false;
+ struct kvm_vcpu *vcpu;
+
s->init4 = val & 1;
s->last_irr = 0;
s->irr &= s->elcr;
@@ -333,6 +338,18 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
if (val & 0x08)
pr_pic_unimpl(
"level sensitive irq not supported");
+
+ kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm)
+ if (kvm_apic_accept_pic_intr(vcpu)) {
+ found = true;
+ break;
+ }
+
+
+ if (found)
+ for (irq = 0; irq < PIC_NUM_PINS/2; irq++)
+ if (edge_irr & (1 << irq))
+ pic_clear_isr(s, irq);
} else if (val & 0x08) {
if (val & 0x04)
s->poll = 1;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 01ca0042393..7fbd0d273ea 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4113,16 +4113,21 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
LIST_HEAD(invalid_list);
/*
+ * Never scan more than sc->nr_to_scan VM instances.
+ * Will not hit this condition practically since we do not try
+ * to shrink more than one VM and it is very unlikely to see
+ * !n_used_mmu_pages so many times.
+ */
+ if (!nr_to_scan--)
+ break;
+ /*
* n_used_mmu_pages is accessed without holding kvm->mmu_lock
* here. We may skip a VM instance errorneosly, but we do not
* want to shrink a VM that only started to populate its MMU
* anyway.
*/
- if (kvm->arch.n_used_mmu_pages > 0) {
- if (!nr_to_scan--)
- break;
+ if (!kvm->arch.n_used_mmu_pages)
continue;
- }
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c39b60707e0..b1eb202ee76 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1488,13 +1488,6 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
loadsegment(ds, vmx->host_state.ds_sel);
loadsegment(es, vmx->host_state.es_sel);
}
-#else
- /*
- * The sysexit path does not restore ds/es, so we must set them to
- * a reasonable value ourselves.
- */
- loadsegment(ds, __USER_DS);
- loadsegment(es, __USER_DS);
#endif
reload_tss();
#ifdef CONFIG_X86_64
@@ -3626,6 +3619,7 @@ static void seg_setup(int seg)
static int alloc_apic_access_page(struct kvm *kvm)
{
+ struct page *page;
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
@@ -3640,7 +3634,13 @@ static int alloc_apic_access_page(struct kvm *kvm)
if (r)
goto out;
- kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
+ page = gfn_to_page(kvm, 0xfee00);
+ if (is_error_page(page)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ kvm->arch.apic_access_page = page;
out:
mutex_unlock(&kvm->slots_lock);
return r;
@@ -3648,6 +3648,7 @@ out:
static int alloc_identity_pagetable(struct kvm *kvm)
{
+ struct page *page;
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
@@ -3663,8 +3664,13 @@ static int alloc_identity_pagetable(struct kvm *kvm)
if (r)
goto out;
- kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
- kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
+ page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
+ if (is_error_page(page)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ kvm->arch.ept_identity_pagetable = page;
out:
mutex_unlock(&kvm->slots_lock);
return r;
@@ -6370,6 +6376,19 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);
+#ifndef CONFIG_X86_64
+ /*
+ * The sysexit path does not restore ds/es, so we must set them to
+ * a reasonable value ourselves.
+ *
+ * We can't defer this to vmx_load_host_state() since that function
+ * may be executed in interrupt context, which saves and restore segments
+ * around it, nullifying its effect.
+ */
+ loadsegment(ds, __USER_DS);
+ loadsegment(es, __USER_DS);
+#endif
+
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
| (1 << VCPU_EXREG_RFLAGS)
| (1 << VCPU_EXREG_CPL)
@@ -6569,7 +6588,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
/* Exposing INVPCID only when PCID is exposed */
best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
if (vmx_invpcid_supported() &&
- best && (best->ecx & bit(X86_FEATURE_INVPCID)) &&
+ best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
guest_cpuid_has_pcid(vcpu)) {
exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
@@ -6579,7 +6598,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
exec_control);
if (best)
- best->ecx &= ~bit(X86_FEATURE_INVPCID);
+ best->ebx &= ~bit(X86_FEATURE_INVPCID);
}
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 59b59508ff0..2966c847d48 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -806,7 +806,7 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
* kvm-specific. Those are put in the beginning of the list.
*/
-#define KVM_SAVE_MSRS_BEGIN 9
+#define KVM_SAVE_MSRS_BEGIN 10
static u32 msrs_to_save[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
@@ -925,6 +925,10 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
*/
getboottime(&boot);
+ if (kvm->arch.kvmclock_offset) {
+ struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
+ boot = timespec_sub(boot, ts);
+ }
wc.sec = boot.tv_sec;
wc.nsec = boot.tv_nsec;
wc.version = version;
@@ -1996,6 +2000,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_KVM_STEAL_TIME:
data = vcpu->arch.st.msr_val;
break;
+ case MSR_KVM_PV_EOI_EN:
+ data = vcpu->arch.pv_eoi.msr_val;
+ break;
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
case MSR_IA32_MCG_CAP:
@@ -5106,17 +5113,20 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
!kvm_event_needs_reinjection(vcpu);
}
-static void vapic_enter(struct kvm_vcpu *vcpu)
+static int vapic_enter(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct page *page;
if (!apic || !apic->vapic_addr)
- return;
+ return 0;
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+ if (is_error_page(page))
+ return -EFAULT;
vcpu->arch.apic->vapic_page = page;
+ return 0;
}
static void vapic_exit(struct kvm_vcpu *vcpu)
@@ -5423,7 +5433,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
}
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- vapic_enter(vcpu);
+ r = vapic_enter(vcpu);
+ if (r) {
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+ return r;
+ }
r = 1;
while (r > 0) {
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index f6679a7fb8c..b91e4851242 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
}
/*
- * search for a shareable pmd page for hugetlb.
+ * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
+ * and returns the corresponding pte. While this is not necessary for the
+ * !shared pmd case because we can allocate the pmd later as well, it makes the
+ * code much cleaner. pmd allocation is essential for the shared case because
+ * pud has to be populated inside the same i_mmap_mutex section - otherwise
+ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
+ * bad pmd for sharing.
*/
-static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+static pte_t *
+huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
{
struct vm_area_struct *vma = find_vma(mm, addr);
struct address_space *mapping = vma->vm_file->f_mapping;
@@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
struct vm_area_struct *svma;
unsigned long saddr;
pte_t *spte = NULL;
+ pte_t *pte;
if (!vma_shareable(vma, addr))
- return;
+ return (pte_t *)pmd_alloc(mm, pud, addr);
mutex_lock(&mapping->i_mmap_mutex);
vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
@@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
put_page(virt_to_page(spte));
spin_unlock(&mm->page_table_lock);
out:
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
mutex_unlock(&mapping->i_mmap_mutex);
+ return pte;
}
/*
@@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
} else {
BUG_ON(sz != PMD_SIZE);
if (pud_none(*pud))
- huge_pmd_share(mm, addr, pud);
- pte = (pte_t *) pmd_alloc(mm, pud, addr);
+ pte = huge_pmd_share(mm, addr, pud);
+ else
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
}
}
BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index e0e6990723e..ab1f6a93b52 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -319,7 +319,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
*/
int devmem_is_allowed(unsigned long pagenr)
{
- if (pagenr <= 256)
+ if (pagenr < 256)
return 1;
if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
return 0;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 931930a9616..a718e0d2350 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -919,13 +919,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
/*
* On success we use clflush, when the CPU supports it to
- * avoid the wbindv. If the CPU does not support it, in the
- * error case, and during early boot (for EFI) we fall back
- * to cpa_flush_all (which uses wbinvd):
+ * avoid the wbindv. If the CPU does not support it and in the
+ * error case we fall back to cpa_flush_all (which uses
+ * wbindv):
*/
- if (early_boot_irqs_disabled)
- __cpa_flush_all((void *)(long)cache);
- else if (!ret && cpu_has_clflush) {
+ if (!ret && cpu_has_clflush) {
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
cpa_flush_array(addr, numpages, cache,
cpa.flags, pages);
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 4599c3e8bcb..4ddf497ca65 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -142,23 +142,23 @@ static inline int save_add_info(void) {return 0;}
#endif
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
-void __init
+int __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
u64 start, end;
int node, pxm;
if (srat_disabled())
- return;
+ return -1;
if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
bad_srat();
- return;
+ return -1;
}
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
- return;
+ return -1;
if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
- return;
+ return -1;
start = ma->base_address;
end = start + ma->length;
pxm = ma->proximity_domain;
@@ -168,12 +168,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains.\n");
bad_srat();
- return;
+ return -1;
}
if (numa_add_memblk(node, start, end) < 0) {
bad_srat();
- return;
+ return -1;
}
node_set(node, numa_nodes_parsed);
@@ -181,6 +181,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
node, pxm,
(unsigned long long) start, (unsigned long long) end - 1);
+ return 0;
}
void __init acpi_numa_arch_fixup(void) {}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 2dc29f51e75..92660edaa1e 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -234,7 +234,22 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
return status;
}
-static int efi_set_rtc_mmss(unsigned long nowtime)
+static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
+ efi_time_cap_t *tc)
+{
+ unsigned long flags;
+ efi_status_t status;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ efi_call_phys_prelog();
+ status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm),
+ virt_to_phys(tc));
+ efi_call_phys_epilog();
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return status;
+}
+
+int efi_set_rtc_mmss(unsigned long nowtime)
{
int real_seconds, real_minutes;
efi_status_t status;
@@ -263,7 +278,7 @@ static int efi_set_rtc_mmss(unsigned long nowtime)
return 0;
}
-static unsigned long efi_get_time(void)
+unsigned long efi_get_time(void)
{
efi_status_t status;
efi_time_t eft;
@@ -606,13 +621,18 @@ static int __init efi_runtime_init(void)
}
/*
* We will only need *early* access to the following
- * EFI runtime service before set_virtual_address_map
+ * two EFI runtime services before set_virtual_address_map
* is invoked.
*/
+ efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
efi_phys.set_virtual_address_map =
(efi_set_virtual_address_map_t *)
runtime->set_virtual_address_map;
-
+ /*
+ * Make efi_get_time can be called before entering
+ * virtual mode.
+ */
+ efi.get_time = phys_efi_get_time;
early_iounmap(runtime, sizeof(efi_runtime_services_t));
return 0;
@@ -700,10 +720,12 @@ void __init efi_init(void)
efi_enabled = 0;
return;
}
+#ifdef CONFIG_X86_32
if (efi_native) {
x86_platform.get_wallclock = efi_get_time;
x86_platform.set_wallclock = efi_set_rtc_mmss;
}
+#endif
#if EFI_DEBUG
print_efi_memmap();
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index b2d534cab25..88692871823 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -72,7 +72,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
-Wall -Wstrict-prototypes \
-march=i386 -mregparm=3 \
-include $(srctree)/$(src)/../../boot/code16gcc.h \
- -fno-strict-aliasing -fomit-frame-pointer \
+ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
$(call cc-option, -ffreestanding) \
$(call cc-option, -fno-toplevel-reorder,\
$(call cc-option, -fno-unit-at-a-time)) \
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 51171aeff0d..a582bfed95b 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -60,8 +60,8 @@
51 common getsockname sys_getsockname
52 common getpeername sys_getpeername
53 common socketpair sys_socketpair
-54 common setsockopt sys_setsockopt
-55 common getsockopt sys_getsockopt
+54 64 setsockopt sys_setsockopt
+55 64 getsockopt sys_getsockopt
56 common clone stub_clone
57 common fork stub_fork
58 common vfork stub_vfork
@@ -318,7 +318,7 @@
309 common getcpu sys_getcpu
310 64 process_vm_readv sys_process_vm_readv
311 64 process_vm_writev sys_process_vm_writev
-312 64 kcmp sys_kcmp
+312 common kcmp sys_kcmp
#
# x32-specific system call numbers start at 512 to avoid cache impact
@@ -353,3 +353,5 @@
538 x32 sendmmsg compat_sys_sendmmsg
539 x32 process_vm_readv compat_sys_process_vm_readv
540 x32 process_vm_writev compat_sys_process_vm_writev
+541 x32 setsockopt compat_sys_setsockopt
+542 x32 getsockopt compat_sys_getsockopt
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index 9926e11a772..aeaff8bef2f 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -21,6 +21,7 @@ config 64BIT
config X86_32
def_bool !64BIT
select HAVE_AOUT
+ select ARCH_WANT_IPC_PARSE_VERSION
config X86_64
def_bool 64BIT
diff --git a/arch/x86/um/shared/sysdep/kernel-offsets.h b/arch/x86/um/shared/sysdep/kernel-offsets.h
index 5868526b5ee..46a9df99f3c 100644
--- a/arch/x86/um/shared/sysdep/kernel-offsets.h
+++ b/arch/x86/um/shared/sysdep/kernel-offsets.h
@@ -7,9 +7,6 @@
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-#define STR(x) #x
-#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : )
-
#define BLANK() asm volatile("\n->" : : )
#define OFFSET(sym, str, mem) \
diff --git a/arch/x86/um/shared/sysdep/syscalls.h b/arch/x86/um/shared/sysdep/syscalls.h
index bd9a89b67e4..ca255a805ed 100644
--- a/arch/x86/um/shared/sysdep/syscalls.h
+++ b/arch/x86/um/shared/sysdep/syscalls.h
@@ -1,3 +1,5 @@
+extern long sys_clone(unsigned long clone_flags, unsigned long newsp,
+ void __user *parent_tid, void __user *child_tid);
#ifdef __i386__
#include "syscalls_32.h"
#else
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index a508cea1350..ba7363ecf89 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -416,9 +416,6 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig,
PT_REGS_AX(regs) = (unsigned long) sig;
PT_REGS_DX(regs) = (unsigned long) 0;
PT_REGS_CX(regs) = (unsigned long) 0;
-
- if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
- ptrace_notify(SIGTRAP);
return 0;
}
@@ -466,9 +463,6 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
PT_REGS_AX(regs) = (unsigned long) sig;
PT_REGS_DX(regs) = (unsigned long) &frame->info;
PT_REGS_CX(regs) = (unsigned long) &frame->uc;
-
- if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
- ptrace_notify(SIGTRAP);
return 0;
}
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 68d1dc91b37..b5408cecac6 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -28,7 +28,7 @@
#define ptregs_execve sys_execve
#define ptregs_iopl sys_iopl
#define ptregs_vm86old sys_vm86old
-#define ptregs_clone sys_clone
+#define ptregs_clone i386_clone
#define ptregs_vm86 sys_vm86
#define ptregs_sigaltstack sys_sigaltstack
#define ptregs_vfork sys_vfork
diff --git a/arch/x86/um/syscalls_32.c b/arch/x86/um/syscalls_32.c
index b853e8600b9..db444c7218f 100644
--- a/arch/x86/um/syscalls_32.c
+++ b/arch/x86/um/syscalls_32.c
@@ -3,37 +3,24 @@
* Licensed under the GPL
*/
-#include "linux/sched.h"
-#include "linux/shm.h"
-#include "linux/ipc.h"
-#include "linux/syscalls.h"
-#include "asm/mman.h"
-#include "asm/uaccess.h"
-#include "asm/unistd.h"
+#include <linux/syscalls.h>
+#include <sysdep/syscalls.h>
/*
* The prototype on i386 is:
*
- * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr)
+ * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls
*
* and the "newtls" arg. on i386 is read by copy_thread directly from the
* register saved on the stack.
*/
-long sys_clone(unsigned long clone_flags, unsigned long newsp,
- int __user *parent_tid, void *newtls, int __user *child_tid)
+long i386_clone(unsigned long clone_flags, unsigned long newsp,
+ int __user *parent_tid, void *newtls, int __user *child_tid)
{
- long ret;
-
- if (!newsp)
- newsp = UPT_SP(&current->thread.regs.regs);
-
- current->thread.forking = 1;
- ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
- child_tid);
- current->thread.forking = 0;
- return ret;
+ return sys_clone(clone_flags, newsp, parent_tid, child_tid);
}
+
long sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact)
{
diff --git a/arch/x86/um/syscalls_64.c b/arch/x86/um/syscalls_64.c
index f3d82bb6e15..adb08eb5c22 100644
--- a/arch/x86/um/syscalls_64.c
+++ b/arch/x86/um/syscalls_64.c
@@ -5,12 +5,9 @@
* Licensed under the GPL
*/
-#include "linux/linkage.h"
-#include "linux/personality.h"
-#include "linux/utsname.h"
-#include "asm/prctl.h" /* XXX This should get the constants from libc */
-#include "asm/uaccess.h"
-#include "os.h"
+#include <linux/sched.h>
+#include <asm/prctl.h> /* XXX This should get the constants from libc */
+#include <os.h>
long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
{
@@ -79,20 +76,6 @@ long sys_arch_prctl(int code, unsigned long addr)
return arch_prctl(current, code, (unsigned long __user *) addr);
}
-long sys_clone(unsigned long clone_flags, unsigned long newsp,
- void __user *parent_tid, void __user *child_tid)
-{
- long ret;
-
- if (!newsp)
- newsp = UPT_SP(&current->thread.regs.regs);
- current->thread.forking = 1;
- ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
- child_tid);
- current->thread.forking = 0;
- return ret;
-}
-
void arch_switch_to(struct task_struct *to)
{
if ((to->thread.arch.fs == 0) || (to->mm == NULL))
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index bf4bda6d3e9..1fbe75a95f1 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -31,7 +31,6 @@
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
-#include <linux/syscore_ops.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -1453,6 +1452,10 @@ asmlinkage void __init xen_start_kernel(void)
pci_request_acs();
xen_acpi_sleep_register();
+
+ /* Avoid searching for BIOS MP tables */
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.mpparse.get_smp_config = x86_init_uint_noop;
}
#ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */
@@ -1470,130 +1473,38 @@ asmlinkage void __init xen_start_kernel(void)
#endif
}
-#ifdef CONFIG_XEN_PVHVM
-/*
- * The pfn containing the shared_info is located somewhere in RAM. This
- * will cause trouble if the current kernel is doing a kexec boot into a
- * new kernel. The new kernel (and its startup code) can not know where
- * the pfn is, so it can not reserve the page. The hypervisor will
- * continue to update the pfn, and as a result memory corruption occours
- * in the new kernel.
- *
- * One way to work around this issue is to allocate a page in the
- * xen-platform pci device's BAR memory range. But pci init is done very
- * late and the shared_info page is already in use very early to read
- * the pvclock. So moving the pfn from RAM to MMIO is racy because some
- * code paths on other vcpus could access the pfn during the small
- * window when the old pfn is moved to the new pfn. There is even a
- * small window were the old pfn is not backed by a mfn, and during that
- * time all reads return -1.
- *
- * Because it is not known upfront where the MMIO region is located it
- * can not be used right from the start in xen_hvm_init_shared_info.
- *
- * To minimise trouble the move of the pfn is done shortly before kexec.
- * This does not eliminate the race because all vcpus are still online
- * when the syscore_ops will be called. But hopefully there is no work
- * pending at this point in time. Also the syscore_op is run last which
- * reduces the risk further.
- */
-
-static struct shared_info *xen_hvm_shared_info;
-
-static void xen_hvm_connect_shared_info(unsigned long pfn)
+void __ref xen_hvm_init_shared_info(void)
{
+ int cpu;
struct xen_add_to_physmap xatp;
+ static struct shared_info *shared_info_page = 0;
+ if (!shared_info_page)
+ shared_info_page = (struct shared_info *)
+ extend_brk(PAGE_SIZE, PAGE_SIZE);
xatp.domid = DOMID_SELF;
xatp.idx = 0;
xatp.space = XENMAPSPACE_shared_info;
- xatp.gpfn = pfn;
+ xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
BUG();
-}
-static void xen_hvm_set_shared_info(struct shared_info *sip)
-{
- int cpu;
-
- HYPERVISOR_shared_info = sip;
+ HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
* page, we use it in the event channel upcall and in some pvclock
* related functions. We don't need the vcpu_info placement
* optimizations because we don't use any pv_mmu or pv_irq op on
* HVM.
- * When xen_hvm_set_shared_info is run at boot time only vcpu 0 is
- * online but xen_hvm_set_shared_info is run at resume time too and
+ * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
+ * online but xen_hvm_init_shared_info is run at resume time too and
* in that case multiple vcpus might be online. */
for_each_online_cpu(cpu) {
per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
}
}
-/* Reconnect the shared_info pfn to a mfn */
-void xen_hvm_resume_shared_info(void)
-{
- xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT);
-}
-
-#ifdef CONFIG_KEXEC
-static struct shared_info *xen_hvm_shared_info_kexec;
-static unsigned long xen_hvm_shared_info_pfn_kexec;
-
-/* Remember a pfn in MMIO space for kexec reboot */
-void __devinit xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn)
-{
- xen_hvm_shared_info_kexec = sip;
- xen_hvm_shared_info_pfn_kexec = pfn;
-}
-
-static void xen_hvm_syscore_shutdown(void)
-{
- struct xen_memory_reservation reservation = {
- .domid = DOMID_SELF,
- .nr_extents = 1,
- };
- unsigned long prev_pfn;
- int rc;
-
- if (!xen_hvm_shared_info_kexec)
- return;
-
- prev_pfn = __pa(xen_hvm_shared_info) >> PAGE_SHIFT;
- set_xen_guest_handle(reservation.extent_start, &prev_pfn);
-
- /* Move pfn to MMIO, disconnects previous pfn from mfn */
- xen_hvm_connect_shared_info(xen_hvm_shared_info_pfn_kexec);
-
- /* Update pointers, following hypercall is also a memory barrier */
- xen_hvm_set_shared_info(xen_hvm_shared_info_kexec);
-
- /* Allocate new mfn for previous pfn */
- do {
- rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
- if (rc == 0)
- msleep(123);
- } while (rc == 0);
-
- /* Make sure the previous pfn is really connected to a (new) mfn */
- BUG_ON(rc != 1);
-}
-
-static struct syscore_ops xen_hvm_syscore_ops = {
- .shutdown = xen_hvm_syscore_shutdown,
-};
-#endif
-
-/* Use a pfn in RAM, may move to MMIO before kexec. */
-static void __init xen_hvm_init_shared_info(void)
-{
- /* Remember pointer for resume */
- xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE);
- xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT);
- xen_hvm_set_shared_info(xen_hvm_shared_info);
-}
-
+#ifdef CONFIG_XEN_PVHVM
static void __init init_hvm_pv_info(void)
{
int major, minor;
@@ -1644,9 +1555,6 @@ static void __init xen_hvm_guest_init(void)
init_hvm_pv_info();
xen_hvm_init_shared_info();
-#ifdef CONFIG_KEXEC
- register_syscore_ops(&xen_hvm_syscore_ops);
-#endif
if (xen_feature(XENFEAT_hvm_callback_vector))
xen_have_vector_callback = 1;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index b65a76133f4..5141d808e75 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
- if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
+ if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
args->op.cmd = MMUEXT_INVLPG_MULTI;
args->op.arg1.linear_addr = start;
}
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 64effdc6da9..72213da605f 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -194,6 +194,13 @@ RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID
* boundary violation will require three middle nodes. */
RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
+/* When we populate back during bootup, the amount of pages can vary. The
+ * max we have is seen is 395979, but that does not mean it can't be more.
+ * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle
+ * it can re-use Xen provided mfn_list array, so we only need to allocate at
+ * most three P2M top nodes. */
+RESERVE_BRK(p2m_populated, PAGE_SIZE * 3);
+
static inline unsigned p2m_top_index(unsigned long pfn)
{
BUG_ON(pfn >= MAX_P2M_PFN);
@@ -570,12 +577,99 @@ static bool __init early_alloc_p2m(unsigned long pfn)
}
return true;
}
+
+/*
+ * Skim over the P2M tree looking at pages that are either filled with
+ * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and
+ * replace the P2M leaf with a p2m_missing or p2m_identity.
+ * Stick the old page in the new P2M tree location.
+ */
+bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn)
+{
+ unsigned topidx;
+ unsigned mididx;
+ unsigned ident_pfns;
+ unsigned inv_pfns;
+ unsigned long *p2m;
+ unsigned long *mid_mfn_p;
+ unsigned idx;
+ unsigned long pfn;
+
+ /* We only look when this entails a P2M middle layer */
+ if (p2m_index(set_pfn))
+ return false;
+
+ for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
+ topidx = p2m_top_index(pfn);
+
+ if (!p2m_top[topidx])
+ continue;
+
+ if (p2m_top[topidx] == p2m_mid_missing)
+ continue;
+
+ mididx = p2m_mid_index(pfn);
+ p2m = p2m_top[topidx][mididx];
+ if (!p2m)
+ continue;
+
+ if ((p2m == p2m_missing) || (p2m == p2m_identity))
+ continue;
+
+ if ((unsigned long)p2m == INVALID_P2M_ENTRY)
+ continue;
+
+ ident_pfns = 0;
+ inv_pfns = 0;
+ for (idx = 0; idx < P2M_PER_PAGE; idx++) {
+ /* IDENTITY_PFNs are 1:1 */
+ if (p2m[idx] == IDENTITY_FRAME(pfn + idx))
+ ident_pfns++;
+ else if (p2m[idx] == INVALID_P2M_ENTRY)
+ inv_pfns++;
+ else
+ break;
+ }
+ if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE))
+ goto found;
+ }
+ return false;
+found:
+ /* Found one, replace old with p2m_identity or p2m_missing */
+ p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);
+ /* And the other for save/restore.. */
+ mid_mfn_p = p2m_top_mfn_p[topidx];
+ /* NOTE: Even if it is a p2m_identity it should still be point to
+ * a page filled with INVALID_P2M_ENTRY entries. */
+ mid_mfn_p[mididx] = virt_to_mfn(p2m_missing);
+
+ /* Reset where we want to stick the old page in. */
+ topidx = p2m_top_index(set_pfn);
+ mididx = p2m_mid_index(set_pfn);
+
+ /* This shouldn't happen */
+ if (WARN_ON(p2m_top[topidx] == p2m_mid_missing))
+ early_alloc_p2m(set_pfn);
+
+ if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing))
+ return false;
+
+ p2m_init(p2m);
+ p2m_top[topidx][mididx] = p2m;
+ mid_mfn_p = p2m_top_mfn_p[topidx];
+ mid_mfn_p[mididx] = virt_to_mfn(p2m);
+
+ return true;
+}
bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
if (!early_alloc_p2m(pfn))
return false;
+ if (early_can_reuse_p2m_middle(pfn, mfn))
+ return __set_phys_to_machine(pfn, mfn);
+
if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
return false;
@@ -734,9 +828,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
- /* let's use dev_bus_addr to record the old mfn instead */
- kmap_op->dev_bus_addr = page->index;
- page->index = (unsigned long) kmap_op;
}
spin_lock_irqsave(&m2p_override_lock, flags);
list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
@@ -763,7 +854,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
return 0;
}
EXPORT_SYMBOL_GPL(m2p_add_override);
-int m2p_remove_override(struct page *page, bool clear_pte)
+int m2p_remove_override(struct page *page,
+ struct gnttab_map_grant_ref *kmap_op)
{
unsigned long flags;
unsigned long mfn;
@@ -793,10 +885,8 @@ int m2p_remove_override(struct page *page, bool clear_pte)
WARN_ON(!PagePrivate(page));
ClearPagePrivate(page);
- if (clear_pte) {
- struct gnttab_map_grant_ref *map_op =
- (struct gnttab_map_grant_ref *) page->index;
- set_phys_to_machine(pfn, map_op->dev_bus_addr);
+ set_phys_to_machine(pfn, page->index);
+ if (kmap_op != NULL) {
if (!PageHighMem(page)) {
struct multicall_space mcs;
struct gnttab_unmap_grant_ref *unmap_op;
@@ -808,13 +898,13 @@ int m2p_remove_override(struct page *page, bool clear_pte)
* issued. In this case handle is going to -1 because
* it hasn't been modified yet.
*/
- if (map_op->handle == -1)
+ if (kmap_op->handle == -1)
xen_mc_flush();
/*
- * Now if map_op->handle is negative it means that the
+ * Now if kmap_op->handle is negative it means that the
* hypercall actually returned an error.
*/
- if (map_op->handle == GNTST_general_error) {
+ if (kmap_op->handle == GNTST_general_error) {
printk(KERN_WARNING "m2p_remove_override: "
"pfn %lx mfn %lx, failed to modify kernel mappings",
pfn, mfn);
@@ -824,8 +914,8 @@ int m2p_remove_override(struct page *page, bool clear_pte)
mcs = xen_mc_entry(
sizeof(struct gnttab_unmap_grant_ref));
unmap_op = mcs.args;
- unmap_op->host_addr = map_op->host_addr;
- unmap_op->handle = map_op->handle;
+ unmap_op->host_addr = kmap_op->host_addr;
+ unmap_op->handle = kmap_op->handle;
unmap_op->dev_bus_addr = 0;
MULTI_grant_table_op(mcs.mc,
@@ -836,10 +926,9 @@ int m2p_remove_override(struct page *page, bool clear_pte)
set_pte_at(&init_mm, address, ptep,
pfn_pte(pfn, PAGE_KERNEL));
__flush_tlb_single(address);
- map_op->host_addr = 0;
+ kmap_op->host_addr = 0;
}
- } else
- set_phys_to_machine(pfn, page->index);
+ }
/* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
* somewhere in this domain, even before being added to the
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index ead85576d54..e2d62d697b5 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -17,6 +17,7 @@
#include <asm/e820.h>
#include <asm/setup.h>
#include <asm/acpi.h>
+#include <asm/numa.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
@@ -78,9 +79,16 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
memblock_reserve(start, size);
xen_max_p2m_pfn = PFN_DOWN(start + size);
+ for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
+ unsigned long mfn = pfn_to_mfn(pfn);
+
+ if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
+ continue;
+ WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
+ pfn, mfn);
- for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++)
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ }
}
static unsigned long __init xen_do_chunk(unsigned long start,
@@ -537,4 +545,7 @@ void __init xen_arch_setup(void)
disable_cpufreq();
WARN_ON(set_pm_idle_to_default());
fiddle_vdso();
+#ifdef CONFIG_NUMA
+ numa_off = 1;
+#endif
}
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index ae8a00c39de..45329c8c226 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
{
#ifdef CONFIG_XEN_PVHVM
int cpu;
- xen_hvm_resume_shared_info();
+ xen_hvm_init_shared_info();
xen_callback_vector();
xen_unplug_emulated_devices();
if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 1e4329e04e0..202d4c15015 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -41,7 +41,7 @@ void xen_enable_syscall(void);
void xen_vcpu_restore(void);
void xen_callback_vector(void);
-void xen_hvm_resume_shared_info(void);
+void xen_hvm_init_shared_info(void);
void xen_unplug_emulated_devices(void);
void __init xen_build_dynamic_phys_to_machine(void);