aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2012-05-30 12:11:26 -0700
committerH. Peter Anvin <hpa@zytor.com>2012-05-30 12:11:32 -0700
commitbbd771474ec44b516107685d77e1c80bbe09f141 (patch)
tree0cb15781539a68f27b4ea6c89f827282630cbce6
parent403e1c5b7495d7b80fae9fc4d0a7a6f5abdc3307 (diff)
parent319b6ffc6df892e4ccffff823cc5521a4a5d2dca (diff)
Merge branch 'x86/trampoline' into x86/urgent
x86/trampoline contains an urgent commit which is necessarily on a newer baseline. Signed-off-by: H. Peter Anvin <hpa@zytor.com>
-rw-r--r--Documentation/ABI/testing/sysfs-bus-i2c-devices-lm353315
-rw-r--r--Documentation/ABI/testing/sysfs-class-backlight-driver-lm353348
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-driver-lm353365
-rw-r--r--Documentation/SubmittingPatches3
-rw-r--r--Documentation/arm/SPEAr/overview.txt32
-rw-r--r--Documentation/cgroups/memory.txt37
-rw-r--r--Documentation/cgroups/resource_counter.txt8
-rw-r--r--Documentation/cris/README62
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.txt12
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt52
-rw-r--r--Documentation/devicetree/bindings/arm/spear-timer.txt18
-rw-r--r--Documentation/devicetree/bindings/arm/spear.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt11
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt19
-rw-r--r--Documentation/devicetree/bindings/dma/snps-dma.txt17
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt38
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mxs.txt87
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt42
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mxs.txt16
-rw-r--r--Documentation/devicetree/bindings/i2c/mux.txt60
-rw-r--r--Documentation/devicetree/bindings/i2c/samsung-i2c.txt8
-rw-r--r--Documentation/devicetree/bindings/i2c/xiic.txt22
-rw-r--r--Documentation/devicetree/bindings/mfd/da9052-i2c.txt60
-rw-r--r--Documentation/devicetree/bindings/mfd/tps65910.txt133
-rw-r--r--Documentation/devicetree/bindings/mfd/twl6040.txt62
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-esdhc.txt6
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt3
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt27
-rw-r--r--Documentation/devicetree/bindings/mmc/mmci.txt19
-rw-r--r--Documentation/devicetree/bindings/mmc/mxs-mmc.txt25
-rw-r--r--Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt4
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt47
-rw-r--r--Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt15
-rw-r--r--Documentation/devicetree/bindings/rtc/spear-rtc.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/omap-dmic.txt21
-rw-r--r--Documentation/devicetree/bindings/sound/omap-mcpdm.txt21
-rw-r--r--Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/tegra-usb.txt3
-rw-r--r--Documentation/dma-buf-sharing.txt109
-rw-r--r--Documentation/feature-removal-schedule.txt18
-rw-r--r--Documentation/filesystems/Locking2
-rw-r--r--Documentation/filesystems/ext3.txt6
-rw-r--r--Documentation/filesystems/porting16
-rw-r--r--Documentation/filesystems/proc.txt2
-rw-r--r--Documentation/filesystems/vfs.txt13
-rw-r--r--Documentation/i2c/muxes/i2c-mux-gpio (renamed from Documentation/i2c/muxes/gpio-i2cmux)12
-rw-r--r--Documentation/initrd.txt4
-rw-r--r--Documentation/kbuild/kbuild.txt19
-rw-r--r--Documentation/kbuild/kconfig.txt18
-rw-r--r--Documentation/kernel-parameters.txt13
-rw-r--r--Documentation/leds/ledtrig-transient.txt152
-rw-r--r--Documentation/virtual/kvm/api.txt281
-rw-r--r--Documentation/virtual/kvm/cpuid.txt6
-rw-r--r--Documentation/virtual/kvm/msr.txt4
-rw-r--r--Documentation/vm/transhuge.txt62
-rw-r--r--MAINTAINERS51
-rw-r--r--Makefile231
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/include/asm/kvm_para.h1
-rw-r--r--arch/arm/Kconfig16
-rw-r--r--arch/arm/Kconfig.debug37
-rw-r--r--arch/arm/Makefile8
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts48
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi60
-rw-r--r--arch/arm/boot/dts/imx23-evk.dts43
-rw-r--r--arch/arm/boot/dts/imx23.dtsi295
-rw-r--r--arch/arm/boot/dts/imx27-phytec-phycore.dts8
-rw-r--r--arch/arm/boot/dts/imx27.dtsi14
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts114
-rw-r--r--arch/arm/boot/dts/imx28.dtsi497
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts40
-rw-r--r--arch/arm/boot/dts/imx51.dtsi41
-rw-r--r--arch/arm/boot/dts/imx53-ard.dts6
-rw-r--r--arch/arm/boot/dts/imx53-evk.dts8
-rw-r--r--arch/arm/boot/dts/imx53-qsb.dts121
-rw-r--r--arch/arm/boot/dts/imx53-smd.dts16
-rw-r--r--arch/arm/boot/dts/imx53.dtsi45
-rw-r--r--arch/arm/boot/dts/imx6q-arm2.dts15
-rw-r--r--arch/arm/boot/dts/imx6q-sabrelite.dts50
-rw-r--r--arch/arm/boot/dts/imx6q-sabresd.dts53
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi171
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts2
-rw-r--r--arch/arm/boot/dts/omap4-panda.dts4
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts6
-rw-r--r--arch/arm/boot/dts/spear1310-evb.dts292
-rw-r--r--arch/arm/boot/dts/spear1310.dtsi184
-rw-r--r--arch/arm/boot/dts/spear1340-evb.dts308
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi56
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi262
-rw-r--r--arch/arm/boot/dts/spear300-evb.dts25
-rw-r--r--arch/arm/boot/dts/spear310-evb.dts20
-rw-r--r--arch/arm/boot/dts/spear320-evb.dts25
-rw-r--r--arch/arm/boot/dts/spear3xx.dtsi6
-rw-r--r--arch/arm/boot/dts/spear600-evb.dts29
-rw-r--r--arch/arm/boot/dts/spear600.dtsi6
-rw-r--r--arch/arm/boot/dts/tegra-cardhu.dts110
-rw-r--r--arch/arm/boot/dts/tegra-harmony.dts118
-rw-r--r--arch/arm/boot/dts/tegra-paz00.dts128
-rw-r--r--arch/arm/boot/dts/tegra-seaboard.dts213
-rw-r--r--arch/arm/boot/dts/tegra-trimslice.dts99
-rw-r--r--arch/arm/boot/dts/tegra-ventana.dts96
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi275
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi305
-rw-r--r--arch/arm/common/dmabounce.c84
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig3
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig8
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/prima2_defconfig69
-rw-r--r--arch/arm/configs/spear13xx_defconfig95
-rw-r--r--arch/arm/configs/spear3xx_defconfig4
-rw-r--r--arch/arm/configs/spear6xx_defconfig5
-rw-r--r--arch/arm/configs/tegra_defconfig11
-rw-r--r--arch/arm/include/asm/device.h4
-rw-r--r--arch/arm/include/asm/dma-contiguous.h15
-rw-r--r--arch/arm/include/asm/dma-iommu.h34
-rw-r--r--arch/arm/include/asm/dma-mapping.h407
-rw-r--r--arch/arm/include/asm/hardware/pl080.h2
-rw-r--r--arch/arm/include/asm/io.h24
-rw-r--r--arch/arm/include/asm/kvm_para.h1
-rw-r--r--arch/arm/include/asm/mach/arch.h1
-rw-r--r--arch/arm/include/asm/mach/map.h1
-rw-r--r--arch/arm/include/asm/thread_info.h8
-rw-r--r--arch/arm/kernel/entry-common.S8
-rw-r--r--arch/arm/kernel/ptrace.c3
-rw-r--r--arch/arm/kernel/setup.c17
-rw-r--r--arch/arm/kernel/signal.c85
-rw-r--r--arch/arm/kernel/signal.h2
-rw-r--r--arch/arm/kernel/traps.c2
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c1
-rw-r--r--arch/arm/mach-at91/include/mach/at_hdmac.h26
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c1
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c1
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c1
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c1
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c1
-rw-r--r--arch/arm/mach-davinci/board-tnetv107x-evm.c1
-rw-r--r--arch/arm/mach-davinci/clock.c3
-rw-r--r--arch/arm/mach-davinci/common.c7
-rw-r--r--arch/arm/mach-davinci/cpufreq.c3
-rw-r--r--arch/arm/mach-davinci/dma.c69
-rw-r--r--arch/arm/mach-davinci/include/mach/common.h19
-rw-r--r--arch/arm/mach-davinci/include/mach/debug-macro.S58
-rw-r--r--arch/arm/mach-davinci/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-davinci/include/mach/serial.h10
-rw-r--r--arch/arm/mach-davinci/include/mach/uncompress.h30
-rw-r--r--arch/arm/mach-davinci/pm.c3
-rw-r--r--arch/arm/mach-dove/common.c39
-rw-r--r--arch/arm/mach-dove/dove-db-setup.c1
-rw-r--r--arch/arm/mach-ep93xx/adssphere.c1
-rw-r--r--arch/arm/mach-ep93xx/core.c7
-rw-r--r--arch/arm/mach-ep93xx/crunch.c4
-rw-r--r--arch/arm/mach-ep93xx/edb93xx.c8
-rw-r--r--arch/arm/mach-ep93xx/gesbc9312.c1
-rw-r--r--arch/arm/mach-ep93xx/include/mach/platform.h7
-rw-r--r--arch/arm/mach-ep93xx/micro9.c4
-rw-r--r--arch/arm/mach-ep93xx/simone.c1
-rw-r--r--arch/arm/mach-ep93xx/snappercl15.c1
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c1
-rw-r--r--arch/arm/mach-ep93xx/vision_ep9307.c1
-rw-r--r--arch/arm/mach-exynos/Kconfig24
-rw-r--r--arch/arm/mach-exynos/Makefile7
-rw-r--r--arch/arm/mach-exynos/Makefile.boot3
-rw-r--r--arch/arm/mach-exynos/clock-exynos4.c79
-rw-r--r--arch/arm/mach-exynos/clock-exynos4.h2
-rw-r--r--arch/arm/mach-exynos/clock-exynos4210.c11
-rw-r--r--arch/arm/mach-exynos/clock-exynos4212.c38
-rw-r--r--arch/arm/mach-exynos/clock-exynos5.c141
-rw-r--r--arch/arm/mach-exynos/common.c187
-rw-r--r--arch/arm/mach-exynos/common.h7
-rw-r--r--arch/arm/mach-exynos/dev-drm.c29
-rw-r--r--arch/arm/mach-exynos/dev-sysmmu.c457
-rw-r--r--arch/arm/mach-exynos/dma.c141
-rw-r--r--arch/arm/mach-exynos/include/mach/gpio.h9
-rw-r--r--arch/arm/mach-exynos/include/mach/irqs.h65
-rw-r--r--arch/arm/mach-exynos/include/mach/map.h45
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-clock.h7
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-pmu.h10
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-sysmmu.h28
-rw-r--r--arch/arm/mach-exynos/include/mach/spi-clocks.h2
-rw-r--r--arch/arm/mach-exynos/include/mach/sysmmu.h88
-rw-r--r--arch/arm/mach-exynos/mach-armlex4210.c2
-rw-r--r--arch/arm/mach-exynos/mach-exynos4-dt.c1
-rw-r--r--arch/arm/mach-exynos/mach-exynos5-dt.c5
-rw-r--r--arch/arm/mach-exynos/mach-nuri.c1
-rw-r--r--arch/arm/mach-exynos/mach-origen.c1
-rw-r--r--arch/arm/mach-exynos/mach-smdk4x12.c1
-rw-r--r--arch/arm/mach-exynos/mach-smdkv310.c2
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c1
-rw-r--r--arch/arm/mach-exynos/mct.c17
-rw-r--r--arch/arm/mach-exynos/pm.c4
-rw-r--r--arch/arm/mach-exynos/pm_domains.c3
-rw-r--r--arch/arm/mach-exynos/pmu.c24
-rw-r--r--arch/arm/mach-imx/Kconfig8
-rw-r--r--arch/arm/mach-imx/Makefile19
-rw-r--r--arch/arm/mach-imx/Makefile.boot3
-rw-r--r--arch/arm/mach-imx/clk-busy.c189
-rw-r--r--arch/arm/mach-imx/clk-gate2.c118
-rw-r--r--arch/arm/mach-imx/clk-imx1.c115
-rw-r--r--arch/arm/mach-imx/clk-imx21.c186
-rw-r--r--arch/arm/mach-imx/clk-imx25.c248
-rw-r--r--arch/arm/mach-imx/clk-imx27.c290
-rw-r--r--arch/arm/mach-imx/clk-imx31.c182
-rw-r--r--arch/arm/mach-imx/clk-imx35.c278
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c506
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c444
-rw-r--r--arch/arm/mach-imx/clk-pfd.c147
-rw-r--r--arch/arm/mach-imx/clk-pllv1.c66
-rw-r--r--arch/arm/mach-imx/clk-pllv2.c249
-rw-r--r--arch/arm/mach-imx/clk-pllv3.c419
-rw-r--r--arch/arm/mach-imx/clk.h83
-rw-r--r--arch/arm/mach-imx/clock-imx1.c636
-rw-r--r--arch/arm/mach-imx/clock-imx21.c1239
-rw-r--r--arch/arm/mach-imx/clock-imx25.c346
-rw-r--r--arch/arm/mach-imx/clock-imx27.c785
-rw-r--r--arch/arm/mach-imx/clock-imx31.c630
-rw-r--r--arch/arm/mach-imx/clock-imx35.c536
-rw-r--r--arch/arm/mach-imx/clock-imx6q.c2111
-rw-r--r--arch/arm/mach-imx/clock-mx51-mx53.c1675
-rw-r--r--arch/arm/mach-imx/cpu-imx5.c6
-rw-r--r--arch/arm/mach-imx/crmregs-imx3.h79
-rw-r--r--arch/arm/mach-imx/imx51-dt.c1
-rw-r--r--arch/arm/mach-imx/imx53-dt.c19
-rw-r--r--arch/arm/mach-imx/lluart.c6
-rw-r--r--arch/arm/mach-imx/mach-cpuimx51sd.c1
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c55
-rw-r--r--arch/arm/mach-imx/mach-mx51_3ds.c1
-rw-r--r--arch/arm/mach-imx/mach-mx51_babbage.c7
-rw-r--r--arch/arm/mach-imx/mach-mx51_efikamx.c42
-rw-r--r--arch/arm/mach-imx/mach-mx51_efikasb.c28
-rw-r--r--arch/arm/mach-imx/mach-pcm037.c6
-rw-r--r--arch/arm/mach-imx/mach-pcm037_eet.c5
-rw-r--r--arch/arm/mach-imx/mm-imx3.c6
-rw-r--r--arch/arm/mach-imx/mm-imx5.c6
-rw-r--r--arch/arm/mach-imx/pcm037.h6
-rw-r--r--arch/arm/mach-imx/pm-imx3.c4
-rw-r--r--arch/arm/mach-kirkwood/board-dreamplug.c1
-rw-r--r--arch/arm/mach-kirkwood/board-dt.c3
-rw-r--r--arch/arm/mach-kirkwood/common.c286
-rw-r--r--arch/arm/mach-kirkwood/common.h1
-rw-r--r--arch/arm/mach-kirkwood/include/mach/bridge-regs.h16
-rw-r--r--arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/pcie.c25
-rw-r--r--arch/arm/mach-kirkwood/rd88f6192-nas-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/t5325-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/tsx1x-common.c1
-rw-r--r--arch/arm/mach-msm/board-halibut.c6
-rw-r--r--arch/arm/mach-msm/board-mahimahi.c6
-rw-r--r--arch/arm/mach-msm/board-msm7x27.c9
-rw-r--r--arch/arm/mach-msm/board-msm7x30.c8
-rw-r--r--arch/arm/mach-msm/board-msm8960.c7
-rw-r--r--arch/arm/mach-msm/board-msm8x60.c10
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c7
-rw-r--r--arch/arm/mach-msm/board-sapphire.c6
-rw-r--r--arch/arm/mach-msm/board-trout.c6
-rw-r--r--arch/arm/mach-msm/include/mach/board.h6
-rw-r--r--arch/arm/mach-msm/smd_debug.c3
-rw-r--r--arch/arm/mach-mv78xx0/common.c45
-rw-r--r--arch/arm/mach-mxs/Kconfig10
-rw-r--r--arch/arm/mach-mxs/Makefile6
-rw-r--r--arch/arm/mach-mxs/clock-mx23.c536
-rw-r--r--arch/arm/mach-mxs/clock-mx28.c803
-rw-r--r--arch/arm/mach-mxs/clock.c211
-rw-r--r--arch/arm/mach-mxs/devices/Kconfig1
-rw-r--r--arch/arm/mach-mxs/devices/platform-dma.c21
-rw-r--r--arch/arm/mach-mxs/devices/platform-gpio-mxs.c24
-rw-r--r--arch/arm/mach-mxs/devices/platform-mxs-mmc.c21
-rw-r--r--arch/arm/mach-mxs/include/mach/clock.h62
-rw-r--r--arch/arm/mach-mxs/include/mach/common.h11
-rw-r--r--arch/arm/mach-mxs/include/mach/devices-common.h3
-rw-r--r--arch/arm/mach-mxs/mach-mx28evk.c2
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c121
-rw-r--r--arch/arm/mach-mxs/mm.c16
-rw-r--r--arch/arm/mach-mxs/regs-clkctrl-mx23.h331
-rw-r--r--arch/arm/mach-mxs/regs-clkctrl-mx28.h486
-rw-r--r--arch/arm/mach-mxs/system.c16
-rw-r--r--arch/arm/mach-mxs/timer.c11
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c8
-rw-r--r--arch/arm/mach-omap1/board-fsample.c1
-rw-r--r--arch/arm/mach-omap1/board-generic.c1
-rw-r--r--arch/arm/mach-omap1/board-h2.c1
-rw-r--r--arch/arm/mach-omap1/board-h3.c1
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c1
-rw-r--r--arch/arm/mach-omap1/board-innovator.c1
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c1
-rw-r--r--arch/arm/mach-omap1/board-osk.c1
-rw-r--r--arch/arm/mach-omap1/board-palmte.c1
-rw-r--r--arch/arm/mach-omap1/board-palmtt.c1
-rw-r--r--arch/arm/mach-omap1/board-palmz71.c1
-rw-r--r--arch/arm/mach-omap1/board-perseus2.c1
-rw-r--r--arch/arm/mach-omap1/board-sx1.c1
-rw-r--r--arch/arm/mach-omap1/board-voiceblue.c1
-rw-r--r--arch/arm/mach-omap1/common.h19
-rw-r--r--arch/arm/mach-omap1/devices.c121
-rw-r--r--arch/arm/mach-omap1/io.c5
-rw-r--r--arch/arm/mach-omap1/serial.c3
-rw-r--r--arch/arm/mach-omap1/time.c16
-rw-r--r--arch/arm/mach-omap1/timer32k.c28
-rw-r--r--arch/arm/mach-omap2/Kconfig8
-rw-r--r--arch/arm/mach-omap2/Makefile167
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c1
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c1
-rw-r--r--arch/arm/mach-omap2/board-3630sdp.c1
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c1
-rw-r--r--arch/arm/mach-omap2/board-am3517crane.c1
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c1
-rw-r--r--arch/arm/mach-omap2/board-apollon.c1
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c2
-rw-r--r--arch/arm/mach-omap2/board-cm-t3517.c1
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c1
-rw-r--r--arch/arm/mach-omap2/board-generic.c1
-rw-r--r--arch/arm/mach-omap2/board-h4.c1
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c2
-rw-r--r--arch/arm/mach-omap2/board-ldp.c1
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c3
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c1
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c1
-rw-r--r--arch/arm/mach-omap2/board-omap3logic.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c1
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c1
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c1
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c1
-rw-r--r--arch/arm/mach-omap2/board-overo.c1
-rw-r--r--arch/arm/mach-omap2/board-rm680.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51.c1
-rw-r--r--arch/arm/mach-omap2/board-ti8168evm.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom.c2
-rw-r--r--arch/arm/mach-omap2/common.h51
-rw-r--r--arch/arm/mach-omap2/devices.c19
-rw-r--r--arch/arm/mach-omap2/dma.c11
-rw-r--r--arch/arm/mach-omap2/dsp.c27
-rw-r--r--arch/arm/mach-omap2/gpmc.c30
-rw-r--r--arch/arm/mach-omap2/hsmmc.c8
-rw-r--r--arch/arm/mach-omap2/id.c7
-rw-r--r--arch/arm/mach-omap2/include/mach/omap-wakeupgen.h8
-rw-r--r--arch/arm/mach-omap2/io.c101
-rw-r--r--arch/arm/mach-omap2/iomap.h28
-rw-r--r--arch/arm/mach-omap2/irq.c2
-rw-r--r--arch/arm/mach-omap2/mux.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/pm.c3
-rw-r--r--arch/arm/mach-omap2/pm24xx.c17
-rw-r--r--arch/arm/mach-omap2/pm34xx.c7
-rw-r--r--arch/arm/mach-omap2/pm44xx.c6
-rw-r--r--arch/arm/mach-omap2/powerdomains3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/timer.c118
-rw-r--r--arch/arm/mach-omap2/usb-musb.c2
-rw-r--r--arch/arm/mach-omap2/voltagedomains3xxx_data.c2
-rw-r--r--arch/arm/mach-orion5x/common.c27
-rw-r--r--arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c1
-rw-r--r--arch/arm/mach-pnx4008/core.c12
-rw-r--r--arch/arm/mach-pnx4008/pm.c4
-rw-r--r--arch/arm/mach-prima2/common.h6
-rw-r--r--arch/arm/mach-prima2/pm.c3
-rw-r--r--arch/arm/mach-prima2/prima2.c6
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig5
-rw-r--r--arch/arm/mach-s3c24xx/Makefile7
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2416.c1
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2443.c6
-rw-r--r--arch/arm/mach-s3c24xx/common-s3c2443.c15
-rw-r--r--arch/arm/mach-s3c24xx/common.c (renamed from arch/arm/plat-s3c24xx/cpu.c)69
-rw-r--r--arch/arm/mach-s3c24xx/dma-s3c2443.c16
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/dma.h4
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/map.h5
-rw-r--r--arch/arm/mach-s3c24xx/irq-pm.c (renamed from arch/arm/plat-s3c24xx/irq-pm.c)0
-rw-r--r--arch/arm/mach-s3c24xx/pm.c (renamed from arch/arm/plat-s3c24xx/pm.c)0
-rw-r--r--arch/arm/mach-s3c24xx/setup-spi.c39
-rw-r--r--arch/arm/mach-s3c24xx/sleep.S (renamed from arch/arm/plat-s3c24xx/sleep.S)0
-rw-r--r--arch/arm/mach-s3c64xx/common.c5
-rw-r--r--arch/arm/mach-s3c64xx/common.h7
-rw-r--r--arch/arm/mach-s3c64xx/mach-anw6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-hmt.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-mini6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-ncp.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-real6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq5.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq7.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6400.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/pm.c3
-rw-r--r--arch/arm/mach-sa1100/assabet.c1
-rw-r--r--arch/arm/mach-sa1100/badge4.c1
-rw-r--r--arch/arm/mach-sa1100/cerf.c1
-rw-r--r--arch/arm/mach-sa1100/collie.c1
-rw-r--r--arch/arm/mach-sa1100/generic.c4
-rw-r--r--arch/arm/mach-sa1100/generic.h7
-rw-r--r--arch/arm/mach-sa1100/h3100.c1
-rw-r--r--arch/arm/mach-sa1100/h3600.c1
-rw-r--r--arch/arm/mach-sa1100/hackkit.c1
-rw-r--r--arch/arm/mach-sa1100/jornada720.c1
-rw-r--r--arch/arm/mach-sa1100/lart.c1
-rw-r--r--arch/arm/mach-sa1100/nanoengine.c1
-rw-r--r--arch/arm/mach-sa1100/neponset.c1
-rw-r--r--arch/arm/mach-sa1100/pleb.c1
-rw-r--r--arch/arm/mach-sa1100/pm.c4
-rw-r--r--arch/arm/mach-sa1100/shannon.c1
-rw-r--r--arch/arm/mach-sa1100/simpad.c1
-rw-r--r--arch/arm/mach-shmobile/Makefile2
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c1
-rw-r--r--arch/arm/mach-shmobile/board-bonito.c1
-rw-r--r--arch/arm/mach-shmobile/board-g3evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-g4evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-kota2.c1
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c1
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c1
-rw-r--r--arch/arm/mach-shmobile/common.c24
-rw-r--r--arch/arm/mach-shmobile/cpuidle.c3
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h14
-rw-r--r--arch/arm/mach-shmobile/suspend.c3
-rw-r--r--arch/arm/mach-spear13xx/Kconfig20
-rw-r--r--arch/arm/mach-spear13xx/Makefile10
-rw-r--r--arch/arm/mach-spear13xx/Makefile.boot6
-rw-r--r--arch/arm/mach-spear13xx/headsmp.S47
-rw-r--r--arch/arm/mach-spear13xx/hotplug.c119
-rw-r--r--arch/arm/mach-spear13xx/include/mach/debug-macro.S14
-rw-r--r--arch/arm/mach-spear13xx/include/mach/dma.h128
-rw-r--r--arch/arm/mach-spear13xx/include/mach/generic.h49
-rw-r--r--arch/arm/mach-spear13xx/include/mach/gpio.h19
-rw-r--r--arch/arm/mach-spear13xx/include/mach/hardware.h1
-rw-r--r--arch/arm/mach-spear13xx/include/mach/irqs.h20
-rw-r--r--arch/arm/mach-spear13xx/include/mach/spear.h62
-rw-r--r--arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h0
-rw-r--r--arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h0
-rw-r--r--arch/arm/mach-spear13xx/include/mach/timex.h19
-rw-r--r--arch/arm/mach-spear13xx/include/mach/uncompress.h19
-rw-r--r--arch/arm/mach-spear13xx/platsmp.c127
-rw-r--r--arch/arm/mach-spear13xx/spear1310.c88
-rw-r--r--arch/arm/mach-spear13xx/spear1340.c192
-rw-r--r--arch/arm/mach-spear13xx/spear13xx.c197
-rw-r--r--arch/arm/mach-spear3xx/Makefile2
-rw-r--r--arch/arm/mach-spear3xx/clock.c892
-rw-r--r--arch/arm/mach-spear3xx/include/mach/generic.h21
-rw-r--r--arch/arm/mach-spear3xx/include/mach/irqs.h1
-rw-r--r--arch/arm/mach-spear3xx/include/mach/misc_regs.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear.h14
-rw-r--r--arch/arm/mach-spear3xx/spear300.c1
-rw-r--r--arch/arm/mach-spear3xx/spear310.c1
-rw-r--r--arch/arm/mach-spear3xx/spear320.c12
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c4
-rw-r--r--arch/arm/mach-spear6xx/Makefile2
-rw-r--r--arch/arm/mach-spear6xx/clock.c789
-rw-r--r--arch/arm/mach-spear6xx/include/mach/generic.h2
-rw-r--r--arch/arm/mach-spear6xx/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-spear6xx/include/mach/misc_regs.h2
-rw-r--r--arch/arm/mach-spear6xx/include/mach/spear.h1
-rw-r--r--arch/arm/mach-spear6xx/spear6xx.c7
-rw-r--r--arch/arm/mach-tegra/Kconfig37
-rw-r--r--arch/arm/mach-tegra/board-dt-tegra20.c1
-rw-r--r--arch/arm/mach-tegra/board-dt-tegra30.c11
-rw-r--r--arch/arm/mach-tegra/board-harmony.c1
-rw-r--r--arch/arm/mach-tegra/board-paz00.c4
-rw-r--r--arch/arm/mach-tegra/board-seaboard.c3
-rw-r--r--arch/arm/mach-tegra/board-trimslice.c3
-rw-r--r--arch/arm/mach-tegra/board.h14
-rw-r--r--arch/arm/mach-tegra/clock.c3
-rw-r--r--arch/arm/mach-tegra/common.c28
-rw-r--r--arch/arm/mach-tegra/devices.c5
-rw-r--r--arch/arm/mach-tegra/devices.h4
-rw-r--r--arch/arm/mach-tegra/include/mach/tegra-ahb.h19
-rw-r--r--arch/arm/mach-tegra/include/mach/uncompress.h176
-rw-r--r--arch/arm/mach-tegra/include/mach/usb_phy.h4
-rw-r--r--arch/arm/mach-tegra/powergate.c4
-rw-r--r--arch/arm/mach-tegra/tegra2_clocks.c4
-rw-r--r--arch/arm/mach-tegra/tegra30_clocks.c9
-rw-r--r--arch/arm/mach-tegra/usb_phy.c15
-rw-r--r--arch/arm/mach-ux500/board-mop500.c6
-rw-r--r--arch/arm/mach-ux500/clock.c6
-rw-r--r--arch/arm/mach-ux500/clock.h12
-rw-r--r--arch/arm/mach-ux500/cpu.c6
-rw-r--r--arch/arm/mach-ux500/include/mach/setup.h1
-rw-r--r--arch/arm/mm/dma-mapping.c1348
-rw-r--r--arch/arm/mm/init.c23
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c31
-rw-r--r--arch/arm/mm/vmregion.h2
-rw-r--r--arch/arm/plat-mxc/clock.c11
-rw-r--r--arch/arm/plat-mxc/include/mach/clock.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h7
-rw-r--r--arch/arm/plat-mxc/include/mach/debug-macro.S2
-rw-r--r--arch/arm/plat-mxc/include/mach/mx6q.h2
-rw-r--r--arch/arm/plat-mxc/time.c14
-rw-r--r--arch/arm/plat-omap/counter_32k.c93
-rw-r--r--arch/arm/plat-omap/devices.c122
-rw-r--r--arch/arm/plat-omap/dma.c4
-rw-r--r--arch/arm/plat-omap/dmtimer.c2
-rw-r--r--arch/arm/plat-omap/include/plat/common.h2
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h8
-rw-r--r--arch/arm/plat-omap/include/plat/dma.h5
-rw-r--r--arch/arm/plat-omap/include/plat/dmtimer.h1
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h9
-rw-r--r--arch/arm/plat-orion/common.c104
-rw-r--r--arch/arm/plat-orion/include/plat/common.h34
-rw-r--r--arch/arm/plat-orion/include/plat/orion_wdt.h18
-rw-r--r--arch/arm/plat-orion/pcie.c4
-rw-r--r--arch/arm/plat-pxa/include/plat/pxa27x_keypad.h4
-rw-r--r--arch/arm/plat-s3c24xx/Makefile6
-rw-r--r--arch/arm/plat-s3c24xx/clock.c59
-rw-r--r--arch/arm/plat-s3c24xx/dev-uart.c100
-rw-r--r--arch/arm/plat-s5p/Kconfig140
-rw-r--r--arch/arm/plat-s5p/Makefile28
-rw-r--r--arch/arm/plat-s5p/sysmmu.c313
-rw-r--r--arch/arm/plat-samsung/Kconfig142
-rw-r--r--arch/arm/plat-samsung/Makefile13
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h3
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-pl330.h1
-rw-r--r--arch/arm/plat-samsung/include/plat/s5p-clock.h4
-rw-r--r--arch/arm/plat-samsung/include/plat/sysmmu.h95
-rw-r--r--arch/arm/plat-samsung/s5p-clock.c (renamed from arch/arm/plat-s5p/clock.c)33
-rw-r--r--arch/arm/plat-samsung/s5p-dev-mfc.c (renamed from arch/arm/plat-s5p/dev-mfc.c)4
-rw-r--r--arch/arm/plat-samsung/s5p-dev-uart.c (renamed from arch/arm/plat-s5p/dev-uart.c)78
-rw-r--r--arch/arm/plat-samsung/s5p-irq-eint.c (renamed from arch/arm/plat-s5p/irq-eint.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-irq-gpioint.c (renamed from arch/arm/plat-s5p/irq-gpioint.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-irq-pm.c (renamed from arch/arm/plat-s5p/irq-pm.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-irq.c (renamed from arch/arm/plat-s5p/irq.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-pm.c (renamed from arch/arm/plat-s5p/pm.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-sleep.S (renamed from arch/arm/plat-s5p/sleep.S)3
-rw-r--r--arch/arm/plat-samsung/s5p-time.c (renamed from arch/arm/plat-s5p/s5p-time.c)3
-rw-r--r--arch/arm/plat-samsung/setup-mipiphy.c (renamed from arch/arm/plat-s5p/setup-mipiphy.c)0
-rw-r--r--arch/arm/plat-spear/Kconfig12
-rw-r--r--arch/arm/plat-spear/Makefile5
-rw-r--r--arch/arm/plat-spear/clock.c1005
-rw-r--r--arch/arm/plat-spear/include/plat/clock.h249
-rw-r--r--arch/arm/plat-spear/restart.c5
-rw-r--r--arch/arm/plat-spear/time.c39
-rw-r--r--arch/avr32/include/asm/kvm_para.h1
-rw-r--r--arch/blackfin/include/asm/kvm_para.h1
-rw-r--r--arch/c6x/include/asm/kvm_para.h1
-rw-r--r--arch/cris/Kconfig1
-rw-r--r--arch/cris/arch-v10/drivers/ds1302.c515
-rw-r--r--arch/cris/arch-v10/drivers/pcf8563.c380
-rw-r--r--arch/cris/arch-v10/kernel/fasttimer.c2
-rw-r--r--arch/cris/arch-v10/kernel/kgdb.c2
-rw-r--r--arch/cris/arch-v10/kernel/time.c9
-rw-r--r--arch/cris/arch-v10/lib/Makefile3
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c6
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c2
-rw-r--r--arch/cris/arch-v32/kernel/time.c7
-rw-r--r--arch/cris/include/arch-v32/arch/cache.h2
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/posix_types.h2
-rw-r--r--arch/cris/include/asm/rtc.h107
-rw-r--r--arch/cris/kernel/time.c76
-rw-r--r--arch/cris/kernel/vmlinux.lds.S1
-rw-r--r--arch/cris/mm/fault.c31
-rw-r--r--arch/frv/include/asm/kvm_para.h1
-rw-r--r--arch/h8300/include/asm/kvm_para.h1
-rw-r--r--arch/hexagon/include/asm/kvm_para.h1
-rw-r--r--arch/ia64/include/asm/kvm_host.h3
-rw-r--r--arch/ia64/include/asm/kvm_para.h5
-rw-r--r--arch/ia64/kvm/kvm-ia64.c30
-rw-r--r--arch/m68k/include/asm/kvm_para.h1
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/microblaze/include/asm/kvm_para.h1
-rw-r--r--arch/microblaze/kernel/entry.S7
-rw-r--r--arch/microblaze/kernel/mcount.S2
-rw-r--r--arch/microblaze/kernel/process.c6
-rw-r--r--arch/microblaze/mm/fault.c33
-rw-r--r--arch/mips/Kconfig15
-rw-r--r--arch/mips/Makefile4
-rw-r--r--arch/mips/alchemy/devboards/db1200.c1
-rw-r--r--arch/mips/ath79/Kconfig25
-rw-r--r--arch/mips/ath79/Makefile2
-rw-r--r--arch/mips/ath79/clock.c81
-rw-r--r--arch/mips/ath79/common.c9
-rw-r--r--arch/mips/ath79/dev-common.c3
-rw-r--r--arch/mips/ath79/dev-gpio-buttons.c4
-rw-r--r--arch/mips/ath79/dev-leds-gpio.c4
-rw-r--r--arch/mips/ath79/dev-wmac.c30
-rw-r--r--arch/mips/ath79/early_printk.c3
-rw-r--r--arch/mips/ath79/gpio.c47
-rw-r--r--arch/mips/ath79/irq.c147
-rw-r--r--arch/mips/ath79/mach-db120.c134
-rw-r--r--arch/mips/ath79/mach-pb44.c2
-rw-r--r--arch/mips/ath79/mach-ubnt-xm.c43
-rw-r--r--arch/mips/ath79/machtypes.h1
-rw-r--r--arch/mips/ath79/pci.c130
-rw-r--r--arch/mips/ath79/pci.h34
-rw-r--r--arch/mips/ath79/setup.c45
-rw-r--r--arch/mips/bcm63xx/boards/Makefile2
-rw-r--r--arch/mips/cavium-octeon/setup.c1
-rw-r--r--arch/mips/cavium-octeon/smp.c6
-rw-r--r--arch/mips/fw/arc/Makefile2
-rw-r--r--arch/mips/include/asm/clkdev.h25
-rw-r--r--arch/mips/include/asm/kvm_para.h1
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h91
-rw-r--r--arch/mips/include/asm/mach-ath79/ath79.h23
-rw-r--r--arch/mips/include/asm/mach-ath79/irq.h10
-rw-r--r--arch/mips/include/asm/mach-ath79/pci-ath724x.h21
-rw-r--r--arch/mips/include/asm/mach-ath79/pci.h28
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h1
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h23
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/irq.h18
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h67
-rw-r--r--arch/mips/include/asm/mach-lantiq/gpio.h16
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq.h34
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq_platform.h33
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h44
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h136
-rw-r--r--arch/mips/include/asm/mips-boards/generic.h4
-rw-r--r--arch/mips/include/asm/module.h1
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pcieep-defs.h1365
-rw-r--r--arch/mips/include/asm/pci.h6
-rw-r--r--arch/mips/include/asm/prom.h26
-rw-r--r--arch/mips/include/asm/setup.h3
-rw-r--r--arch/mips/include/asm/sparsemem.h6
-rw-r--r--arch/mips/include/asm/termios.h2
-rw-r--r--arch/mips/include/asm/traps.h1
-rw-r--r--arch/mips/include/asm/uasm.h2
-rw-r--r--arch/mips/jz4740/Makefile2
-rw-r--r--arch/mips/kernel/cpu-probe.c54
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c3
-rw-r--r--arch/mips/kernel/proc.c26
-rw-r--r--arch/mips/kernel/prom.c13
-rw-r--r--arch/mips/kernel/setup.c2
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/traps.c17
-rw-r--r--arch/mips/lantiq/Kconfig16
-rw-r--r--arch/mips/lantiq/Makefile5
-rw-r--r--arch/mips/lantiq/Platform1
-rw-r--r--arch/mips/lantiq/clk.c146
-rw-r--r--arch/mips/lantiq/clk.h68
-rw-r--r--arch/mips/lantiq/devices.c120
-rw-r--r--arch/mips/lantiq/devices.h23
-rw-r--r--arch/mips/lantiq/dts/Makefile4
-rw-r--r--arch/mips/lantiq/dts/danube.dtsi105
-rw-r--r--arch/mips/lantiq/dts/easy50712.dts113
-rw-r--r--arch/mips/lantiq/early_printk.c17
-rw-r--r--arch/mips/lantiq/falcon/Makefile1
-rw-r--r--arch/mips/lantiq/falcon/prom.c87
-rw-r--r--arch/mips/lantiq/falcon/reset.c90
-rw-r--r--arch/mips/lantiq/falcon/sysctrl.c260
-rw-r--r--arch/mips/lantiq/irq.c255
-rw-r--r--arch/mips/lantiq/machtypes.h20
-rw-r--r--arch/mips/lantiq/prom.c74
-rw-r--r--arch/mips/lantiq/prom.h8
-rw-r--r--arch/mips/lantiq/setup.c66
-rw-r--r--arch/mips/lantiq/xway/Kconfig23
-rw-r--r--arch/mips/lantiq/xway/Makefile8
-rw-r--r--arch/mips/lantiq/xway/clk-ase.c48
-rw-r--r--arch/mips/lantiq/xway/clk-xway.c223
-rw-r--r--arch/mips/lantiq/xway/clk.c151
-rw-r--r--arch/mips/lantiq/xway/devices.c119
-rw-r--r--arch/mips/lantiq/xway/devices.h20
-rw-r--r--arch/mips/lantiq/xway/dma.c61
-rw-r--r--arch/mips/lantiq/xway/ebu.c52
-rw-r--r--arch/mips/lantiq/xway/gpio.c12
-rw-r--r--arch/mips/lantiq/xway/gpio_ebu.c126
-rw-r--r--arch/mips/lantiq/xway/gpio_stp.c157
-rw-r--r--arch/mips/lantiq/xway/mach-easy50601.c57
-rw-r--r--arch/mips/lantiq/xway/mach-easy50712.c74
-rw-r--r--arch/mips/lantiq/xway/pmu.c69
-rw-r--r--arch/mips/lantiq/xway/prom-ase.c39
-rw-r--r--arch/mips/lantiq/xway/prom-xway.c54
-rw-r--r--arch/mips/lantiq/xway/prom.c115
-rw-r--r--arch/mips/lantiq/xway/reset.c77
-rw-r--r--arch/mips/lantiq/xway/setup-ase.c19
-rw-r--r--arch/mips/lantiq/xway/setup-xway.c20
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c371
-rw-r--r--arch/mips/mm/c-octeon.c14
-rw-r--r--arch/mips/mm/c-r4k.c14
-rw-r--r--arch/mips/oprofile/Makefile2
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c12
-rw-r--r--arch/mips/pci/Makefile6
-rw-r--r--arch/mips/pci/fixup-lantiq.c40
-rw-r--r--arch/mips/pci/ops-loongson2.c1
-rw-r--r--arch/mips/pci/pci-ar71xx.c375
-rw-r--r--arch/mips/pci/pci-ar724x.c292
-rw-r--r--arch/mips/pci/pci-ath724x.c174
-rw-r--r--arch/mips/pci/pci-lantiq.c219
-rw-r--r--arch/mips/pci/pci.c55
-rw-r--r--arch/mips/pmc-sierra/yosemite/Makefile2
-rw-r--r--arch/mips/pmc-sierra/yosemite/setup.c1
-rw-r--r--arch/mips/powertv/Makefile2
-rw-r--r--arch/mips/powertv/asic/Makefile2
-rw-r--r--arch/mips/powertv/pci/Makefile2
-rw-r--r--arch/mips/rb532/devices.c1
-rw-r--r--arch/mips/sni/setup.c1
-rw-r--r--arch/mn10300/include/asm/kvm_para.h1
-rw-r--r--arch/openrisc/Kconfig2
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/kvm_para.h1
-rw-r--r--arch/openrisc/include/asm/uaccess.h40
-rw-r--r--arch/openrisc/lib/string.S99
-rw-r--r--arch/parisc/include/asm/kvm_para.h1
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/boot/dts/mpc8569mds.dts1
-rw-r--r--arch/powerpc/include/asm/cputable.h23
-rw-r--r--arch/powerpc/include/asm/dbell.h3
-rw-r--r--arch/powerpc/include/asm/hvcall.h10
-rw-r--r--arch/powerpc/include/asm/hw_irq.h1
-rw-r--r--arch/powerpc/include/asm/kvm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h18
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h3
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h8
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h3
-rw-r--r--arch/powerpc/include/asm/kvm_booke_hv_asm.h49
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h96
-rw-r--r--arch/powerpc/include/asm/kvm_host.h60
-rw-r--r--arch/powerpc/include/asm/kvm_para.h5
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h20
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h6
-rw-r--r--arch/powerpc/include/asm/processor.h3
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h34
-rw-r--r--arch/powerpc/include/asm/switch_to.h1
-rw-r--r--arch/powerpc/include/asm/time.h1
-rw-r--r--arch/powerpc/include/asm/uaccess.h41
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h41
-rw-r--r--arch/powerpc/kernel/asm-offsets.c19
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S12
-rw-r--r--arch/powerpc/kernel/head_44x.S23
-rw-r--r--arch/powerpc/kernel/head_booke.h69
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S98
-rw-r--r--arch/powerpc/kernel/idle_power7.S7
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c6
-rw-r--r--arch/powerpc/kernel/time.c3
-rw-r--r--arch/powerpc/kvm/44x.c12
-rw-r--r--arch/powerpc/kvm/44x_emulate.c51
-rw-r--r--arch/powerpc/kvm/Kconfig28
-rw-r--r--arch/powerpc/kvm/Makefile17
-rw-r--r--arch/powerpc/kvm/book3s.c7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c31
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S2
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c150
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c3
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c106
-rw-r--r--arch/powerpc/kvm/book3s_hv.c467
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S9
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S185
-rw-r--r--arch/powerpc/kvm/book3s_pr.c59
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c101
-rw-r--r--arch/powerpc/kvm/book3s_segment.S13
-rw-r--r--arch/powerpc/kvm/booke.c471
-rw-r--r--arch/powerpc/kvm/booke.h62
-rw-r--r--arch/powerpc/kvm/booke_emulate.c118
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S8
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S597
-rw-r--r--arch/powerpc/kvm/e500.c372
-rw-r--r--arch/powerpc/kvm/e500.h306
-rw-r--r--arch/powerpc/kvm/e500_emulate.c210
-rw-r--r--arch/powerpc/kvm/e500_tlb.c666
-rw-r--r--arch/powerpc/kvm/e500_tlb.h174
-rw-r--r--arch/powerpc/kvm/e500mc.c342
-rw-r--r--arch/powerpc/kvm/emulate.c197
-rw-r--r--arch/powerpc/kvm/powerpc.c94
-rw-r--r--arch/powerpc/kvm/timing.h6
-rw-r--r--arch/powerpc/lib/string.S45
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/kvm.h5
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/kvm_para.h5
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/kvm/diag.c29
-rw-r--r--arch/s390/kvm/intercept.c1
-rw-r--r--arch/s390/kvm/kvm-s390.c87
-rw-r--r--arch/s390/kvm/kvm-s390.h1
-rw-r--r--arch/s390/kvm/priv.c31
-rw-r--r--arch/score/include/asm/kvm_para.h1
-rw-r--r--arch/sh/include/asm/kvm_para.h1
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/kvm_para.h1
-rw-r--r--arch/sparc/include/asm/uaccess_32.h22
-rw-r--r--arch/sparc/include/asm/uaccess_64.h8
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/ksyms.c2
-rw-r--r--arch/sparc/lib/strlen_user_32.S109
-rw-r--r--arch/sparc/lib/strlen_user_64.S97
-rw-r--r--arch/tile/Kconfig43
-rw-r--r--arch/tile/Makefile7
-rw-r--r--arch/tile/include/arch/spr_def_32.h56
-rw-r--r--arch/tile/include/arch/spr_def_64.h43
-rw-r--r--arch/tile/include/asm/Kbuild2
-rw-r--r--arch/tile/include/asm/atomic_32.h10
-rw-r--r--arch/tile/include/asm/bitops.h12
-rw-r--r--arch/tile/include/asm/byteorder.h20
-rw-r--r--arch/tile/include/asm/cachectl.h42
-rw-r--r--arch/tile/include/asm/compat.h3
-rw-r--r--arch/tile/include/asm/elf.h5
-rw-r--r--arch/tile/include/asm/futex.h143
-rw-r--r--arch/tile/include/asm/hardwall.h18
-rw-r--r--arch/tile/include/asm/hugetlb.h21
-rw-r--r--arch/tile/include/asm/irqflags.h34
-rw-r--r--arch/tile/include/asm/kexec.h12
-rw-r--r--arch/tile/include/asm/kvm_para.h1
-rw-r--r--arch/tile/include/asm/mmu.h2
-rw-r--r--arch/tile/include/asm/mmu_context.h8
-rw-r--r--arch/tile/include/asm/module.h40
-rw-r--r--arch/tile/include/asm/page.h18
-rw-r--r--arch/tile/include/asm/pgalloc.h92
-rw-r--r--arch/tile/include/asm/pgtable.h111
-rw-r--r--arch/tile/include/asm/pgtable_32.h40
-rw-r--r--arch/tile/include/asm/pgtable_64.h57
-rw-r--r--arch/tile/include/asm/processor.h17
-rw-r--r--arch/tile/include/asm/setup.h10
-rw-r--r--arch/tile/include/asm/syscalls.h3
-rw-r--r--arch/tile/include/asm/tlbflush.h17
-rw-r--r--arch/tile/include/asm/uaccess.h222
-rw-r--r--arch/tile/include/asm/unistd.h4
-rw-r--r--arch/tile/include/hv/drv_xgbe_intf.h2
-rw-r--r--arch/tile/include/hv/hypervisor.h325
-rw-r--r--arch/tile/kernel/Makefile3
-rw-r--r--arch/tile/kernel/entry.S3
-rw-r--r--arch/tile/kernel/hardwall.c754
-rw-r--r--arch/tile/kernel/head_32.S8
-rw-r--r--arch/tile/kernel/head_64.S22
-rw-r--r--arch/tile/kernel/hvglue.lds3
-rw-r--r--arch/tile/kernel/intvec_64.S80
-rw-r--r--arch/tile/kernel/machine_kexec.c42
-rw-r--r--arch/tile/kernel/module.c12
-rw-r--r--arch/tile/kernel/proc.c1
-rw-r--r--arch/tile/kernel/process.c16
-rw-r--r--arch/tile/kernel/relocate_kernel_32.S (renamed from arch/tile/kernel/relocate_kernel.S)0
-rw-r--r--arch/tile/kernel/relocate_kernel_64.S260
-rw-r--r--arch/tile/kernel/setup.c169
-rw-r--r--arch/tile/kernel/single_step.c16
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/sys.c10
-rw-r--r--arch/tile/kernel/sysfs.c8
-rw-r--r--arch/tile/kernel/tlb.c11
-rw-r--r--arch/tile/kernel/traps.c30
-rw-r--r--arch/tile/lib/atomic_32.c47
-rw-r--r--arch/tile/lib/exports.c8
-rw-r--r--arch/tile/lib/memchr_64.c8
-rw-r--r--arch/tile/lib/memcpy_64.c23
-rw-r--r--arch/tile/lib/memcpy_tile64.c8
-rw-r--r--arch/tile/lib/strchr_64.c15
-rw-r--r--arch/tile/lib/string-endian.h33
-rw-r--r--arch/tile/lib/strlen_64.c11
-rw-r--r--arch/tile/lib/usercopy_32.S76
-rw-r--r--arch/tile/lib/usercopy_64.S49
-rw-r--r--arch/tile/mm/fault.c34
-rw-r--r--arch/tile/mm/homecache.c1
-rw-r--r--arch/tile/mm/hugetlbpage.c285
-rw-r--r--arch/tile/mm/init.c19
-rw-r--r--arch/tile/mm/migrate.h6
-rw-r--r--arch/tile/mm/migrate_32.S36
-rw-r--r--arch/tile/mm/migrate_64.S34
-rw-r--r--arch/tile/mm/pgtable.c40
-rw-r--r--arch/um/Makefile11
-rw-r--r--arch/um/include/asm/kvm_para.h1
-rw-r--r--arch/unicore32/include/asm/kvm_para.h1
-rw-r--r--arch/x86/Kbuild2
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/include/asm/acpi.h9
-rw-r--r--arch/x86/include/asm/dma-contiguous.h13
-rw-r--r--arch/x86/include/asm/dma-mapping.h5
-rw-r--r--arch/x86/include/asm/kvm_emulate.h4
-rw-r--r--arch/x86/include/asm/kvm_host.h13
-rw-r--r--arch/x86/include/asm/kvm_para.h24
-rw-r--r--arch/x86/include/asm/pgtable-3level.h50
-rw-r--r--arch/x86/include/asm/processor.h7
-rw-r--r--arch/x86/include/asm/pvclock-abi.h1
-rw-r--r--arch/x86/include/asm/realmode.h62
-rw-r--r--arch/x86/include/asm/sta2x11.h12
-rw-r--r--arch/x86/include/asm/trampoline.h39
-rw-r--r--arch/x86/include/asm/uaccess.h4
-rw-r--r--arch/x86/include/asm/uaccess_32.h17
-rw-r--r--arch/x86/include/asm/uaccess_64.h3
-rw-r--r--arch/x86/include/asm/word-at-a-time.h32
-rw-r--r--arch/x86/include/asm/xen/events.h1
-rw-r--r--arch/x86/include/asm/xen/page.h1
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/Makefile9
-rw-r--r--arch/x86/kernel/acpi/realmode/.gitignore3
-rw-r--r--arch/x86/kernel/acpi/realmode/Makefile59
-rw-r--r--arch/x86/kernel/acpi/realmode/bioscall.S1
-rw-r--r--arch/x86/kernel/acpi/realmode/copy.S1
-rw-r--r--arch/x86/kernel/acpi/realmode/regs.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/video-bios.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/video-mode.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/video-vesa.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/video-vga.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.lds.S62
-rw-r--r--arch/x86/kernel/acpi/sleep.c33
-rw-r--r--arch/x86/kernel/acpi/sleep.h2
-rw-r--r--arch/x86/kernel/acpi/wakeup_rm.S12
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-apei.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c26
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c24
-rw-r--r--arch/x86/kernel/e820.c53
-rw-r--r--arch/x86/kernel/head32.c1
-rw-r--r--arch/x86/kernel/head64.c1
-rw-r--r--arch/x86/kernel/head_32.S5
-rw-r--r--arch/x86/kernel/head_64.S4
-rw-r--r--arch/x86/kernel/kvmclock.c20
-rw-r--r--arch/x86/kernel/mpparse.c11
-rw-r--r--arch/x86/kernel/pci-dma.c18
-rw-r--r--arch/x86/kernel/pci-nommu.c8
-rw-r--r--arch/x86/kernel/reboot.c25
-rw-r--r--arch/x86/kernel/setup.c24
-rw-r--r--arch/x86/kernel/smpboot.c18
-rw-r--r--arch/x86/kernel/tboot.c7
-rw-r--r--arch/x86/kernel/trampoline.c42
-rw-r--r--arch/x86/kernel/trampoline_32.S83
-rw-r--r--arch/x86/kernel/vmlinux.lds.S12
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/cpuid.c5
-rw-r--r--arch/x86/kvm/emulate.c293
-rw-r--r--arch/x86/kvm/i8254.c31
-rw-r--r--arch/x86/kvm/i8254.h7
-rw-r--r--arch/x86/kvm/lapic.c31
-rw-r--r--arch/x86/kvm/mmu.c345
-rw-r--r--arch/x86/kvm/mmu_audit.c10
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/svm.c9
-rw-r--r--arch/x86/kvm/vmx.c41
-rw-r--r--arch/x86/kvm/x86.c280
-rw-r--r--arch/x86/kvm/x86.h2
-rw-r--r--arch/x86/lib/usercopy.c97
-rw-r--r--arch/x86/lib/usercopy_32.c41
-rw-r--r--arch/x86/lib/usercopy_64.c48
-rw-r--r--arch/x86/mm/init.c16
-rw-r--r--arch/x86/mm/numa.c32
-rw-r--r--arch/x86/mm/numa_emulation.c4
-rw-r--r--arch/x86/mm/pat.c42
-rw-r--r--arch/x86/mm/srat.c5
-rw-r--r--arch/x86/pci/xen.c4
-rw-r--r--arch/x86/realmode/Makefile18
-rw-r--r--arch/x86/realmode/init.c115
-rw-r--r--arch/x86/realmode/rm/.gitignore3
-rw-r--r--arch/x86/realmode/rm/Makefile82
-rw-r--r--arch/x86/realmode/rm/bioscall.S1
-rw-r--r--arch/x86/realmode/rm/copy.S1
-rw-r--r--arch/x86/realmode/rm/header.S41
-rw-r--r--arch/x86/realmode/rm/realmode.h21
-rw-r--r--arch/x86/realmode/rm/realmode.lds.S76
-rw-r--r--arch/x86/realmode/rm/reboot_32.S (renamed from arch/x86/kernel/reboot_32.S)89
-rw-r--r--arch/x86/realmode/rm/regs.c1
-rw-r--r--arch/x86/realmode/rm/stack.S19
-rw-r--r--arch/x86/realmode/rm/trampoline_32.S74
-rw-r--r--arch/x86/realmode/rm/trampoline_64.S (renamed from arch/x86/kernel/trampoline_64.S)148
-rw-r--r--arch/x86/realmode/rm/trampoline_common.S7
-rw-r--r--arch/x86/realmode/rm/video-bios.c1
-rw-r--r--arch/x86/realmode/rm/video-mode.c1
-rw-r--r--arch/x86/realmode/rm/video-vesa.c1
-rw-r--r--arch/x86/realmode/rm/video-vga.c1
-rw-r--r--arch/x86/realmode/rm/wakemain.c (renamed from arch/x86/kernel/acpi/realmode/wakemain.c)3
-rw-r--r--arch/x86/realmode/rm/wakeup.h (renamed from arch/x86/kernel/acpi/realmode/wakeup.h)10
-rw-r--r--arch/x86/realmode/rm/wakeup_asm.S (renamed from arch/x86/kernel/acpi/realmode/wakeup.S)131
-rw-r--r--arch/x86/realmode/rmpiggy.S20
-rw-r--r--arch/x86/tools/relocs.c7
-rw-r--r--arch/x86/xen/debugfs.c104
-rw-r--r--arch/x86/xen/debugfs.h4
-rw-r--r--arch/x86/xen/enlighten.c13
-rw-r--r--arch/x86/xen/mmu.c23
-rw-r--r--arch/x86/xen/p2m.c104
-rw-r--r--arch/x86/xen/setup.c171
-rw-r--r--arch/x86/xen/smp.c112
-rw-r--r--arch/x86/xen/smp.h12
-rw-r--r--arch/x86/xen/spinlock.c12
-rw-r--r--arch/x86/xen/xen-ops.h1
-rw-r--r--arch/xtensa/include/asm/kvm_para.h1
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/bgrt.c1
-rw-r--r--drivers/acpi/sleep.c6
-rw-r--r--drivers/amba/Makefile4
-rw-r--r--drivers/amba/tegra-ahb.c293
-rw-r--r--drivers/ata/sata_mv.c40
-rw-r--r--drivers/base/Kconfig89
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/dma-buf.c99
-rw-r--r--drivers/base/dma-coherent.c42
-rw-r--r--drivers/base/dma-contiguous.c401
-rw-r--r--drivers/base/node.c8
-rw-r--r--drivers/clk/Kconfig12
-rw-r--r--drivers/clk/Makefile5
-rw-r--r--drivers/clk/clk-divider.c68
-rw-r--r--drivers/clk/clk-fixed-factor.c95
-rw-r--r--drivers/clk/clk-fixed-rate.c49
-rw-r--r--drivers/clk/clk-gate.c104
-rw-r--r--drivers/clk/clk-mux.c27
-rw-r--r--drivers/clk/clk.c279
-rw-r--r--drivers/clk/mxs/Makefile8
-rw-r--r--drivers/clk/mxs/clk-div.c110
-rw-r--r--drivers/clk/mxs/clk-frac.c139
-rw-r--r--drivers/clk/mxs/clk-imx23.c205
-rw-r--r--drivers/clk/mxs/clk-imx28.c338
-rw-r--r--drivers/clk/mxs/clk-pll.c116
-rw-r--r--drivers/clk/mxs/clk-ref.c154
-rw-r--r--drivers/clk/mxs/clk.c28
-rw-r--r--drivers/clk/mxs/clk.h66
-rw-r--r--drivers/clk/spear/Makefile10
-rw-r--r--drivers/clk/spear/clk-aux-synth.c198
-rw-r--r--drivers/clk/spear/clk-frac-synth.c165
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c154
-rw-r--r--drivers/clk/spear/clk-vco-pll.c363
-rw-r--r--drivers/clk/spear/clk.c36
-rw-r--r--drivers/clk/spear/clk.h134
-rw-r--r--drivers/clk/spear/spear1310_clock.c1106
-rw-r--r--drivers/clk/spear/spear1340_clock.c964
-rw-r--r--drivers/clk/spear/spear3xx_clock.c612
-rw-r--r--drivers/clk/spear/spear6xx_clock.c342
-rw-r--r--drivers/crypto/mv_cesa.c14
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/amba-pl08x.c52
-rw-r--r--drivers/dma/at_hdmac.c15
-rw-r--r--drivers/dma/at_hdmac_regs.h21
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/coh901318_lli.c4
-rw-r--r--drivers/dma/dw_dmac.c26
-rw-r--r--drivers/dma/ep93xx_dma.c117
-rw-r--r--drivers/dma/imx-dma.c12
-rw-r--r--drivers/dma/imx-sdma.c108
-rw-r--r--drivers/dma/intel_mid_dma.c8
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/mv_xor.c15
-rw-r--r--drivers/dma/mv_xor.h1
-rw-r--r--drivers/dma/mxs-dma.c194
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/pl330.c1
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/edac/amd64_edac.c200
-rw-r--r--drivers/edac/amd76x_edac.c42
-rw-r--r--drivers/edac/cell_edac.c42
-rw-r--r--drivers/edac/cpc925_edac.c91
-rw-r--r--drivers/edac/e752x_edac.c116
-rw-r--r--drivers/edac/e7xxx_edac.c86
-rw-r--r--drivers/edac/edac_core.h47
-rw-r--r--drivers/edac/edac_device.c27
-rw-r--r--drivers/edac/edac_mc.c716
-rw-r--r--drivers/edac/edac_mc_sysfs.c70
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/edac_pci.c6
-rw-r--r--drivers/edac/i3000_edac.c49
-rw-r--r--drivers/edac/i3200_edac.c56
-rw-r--r--drivers/edac/i5000_edac.c236
-rw-r--r--drivers/edac/i5100_edac.c106
-rw-r--r--drivers/edac/i5400_edac.c265
-rw-r--r--drivers/edac/i7300_edac.c115
-rw-r--r--drivers/edac/i7core_edac.c270
-rw-r--r--drivers/edac/i82443bxgx_edac.c41
-rw-r--r--drivers/edac/i82860_edac.c55
-rw-r--r--drivers/edac/i82875p_edac.c51
-rw-r--r--drivers/edac/i82975x_edac.c58
-rw-r--r--drivers/edac/mpc85xx_edac.c37
-rw-r--r--drivers/edac/mv64x60_edac.c47
-rw-r--r--drivers/edac/pasemi_edac.c49
-rw-r--r--drivers/edac/ppc4xx_edac.c50
-rw-r--r--drivers/edac/r82600_edac.c40
-rw-r--r--drivers/edac/sb_edac.c212
-rw-r--r--drivers/edac/tile_edac.c33
-rw-r--r--drivers/edac/x38_edac.c52
-rw-r--r--drivers/gpio/Kconfig48
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/gpio-ich.c419
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c158
-rw-r--r--drivers/gpio/gpio-mxs.c156
-rw-r--r--drivers/gpio/gpio-samsung.c11
-rw-r--r--drivers/gpio/gpio-sch.c8
-rw-r--r--drivers/gpio/gpio-sta2x11.c435
-rw-r--r--drivers/gpio/gpio-stp-xway.c301
-rw-r--r--drivers/gpio/gpio-tps65910.c188
-rw-r--r--drivers/gpio/gpio-wm831x.c6
-rw-r--r--drivers/i2c/Kconfig1
-rw-r--r--drivers/i2c/busses/Kconfig15
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c31
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h5
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c33
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c246
-rw-r--r--drivers/i2c/busses/i2c-gpio.c7
-rw-r--r--drivers/i2c/busses/i2c-imx.c2
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c157
-rw-r--r--drivers/i2c/busses/i2c-mpc.c30
-rw-r--r--drivers/i2c/busses/i2c-mxs.c22
-rw-r--r--drivers/i2c/busses/i2c-ocores.c3
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c5
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c109
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c11
-rw-r--r--drivers/i2c/busses/i2c-tegra.c24
-rw-r--r--drivers/i2c/busses/i2c-versatile.c9
-rw-r--r--drivers/i2c/busses/i2c-xiic.c23
-rw-r--r--drivers/i2c/i2c-core.c17
-rw-r--r--drivers/i2c/i2c-mux.c42
-rw-r--r--drivers/i2c/muxes/Kconfig6
-rw-r--r--drivers/i2c/muxes/Makefile6
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c (renamed from drivers/i2c/muxes/gpio-i2cmux.c)42
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c (renamed from drivers/i2c/muxes/pca9541.c)3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c (renamed from drivers/i2c/muxes/pca954x.c)2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c52
-rw-r--r--drivers/input/misc/wm831x-on.c2
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c9
-rw-r--r--drivers/iommu/Kconfig21
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/exynos-iommu.c1076
-rw-r--r--drivers/iommu/intel-iommu.c40
-rw-r--r--drivers/leds/Kconfig29
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/led-class.c21
-rw-r--r--drivers/leds/leds-da9052.c214
-rw-r--r--drivers/leds/leds-lm3530.c100
-rw-r--r--drivers/leds/leds-lm3533.c785
-rw-r--r--drivers/leds/leds-lp5521.c12
-rw-r--r--drivers/leds/leds-mc13783.c2
-rw-r--r--drivers/leds/leds-pca955x.c95
-rw-r--r--drivers/leds/ledtrig-backlight.c4
-rw-r--r--drivers/leds/ledtrig-gpio.c4
-rw-r--r--drivers/leds/ledtrig-heartbeat.c32
-rw-r--r--drivers/leds/ledtrig-timer.c54
-rw-r--r--drivers/leds/ledtrig-transient.c237
-rw-r--r--drivers/media/video/mx3_camera.c4
-rw-r--r--drivers/mfd/Kconfig76
-rw-r--r--drivers/mfd/Makefile10
-rw-r--r--drivers/mfd/ab8500-core.c423
-rw-r--r--drivers/mfd/ab8500-debugfs.c6
-rw-r--r--drivers/mfd/ab8500-gpadc.c8
-rw-r--r--drivers/mfd/ab8500-i2c.c128
-rw-r--r--drivers/mfd/ab8500-sysctrl.c6
-rw-r--r--drivers/mfd/anatop-mfd.c35
-rw-r--r--drivers/mfd/asic3.c33
-rw-r--r--drivers/mfd/cs5535-mfd.c13
-rw-r--r--drivers/mfd/da9052-core.c140
-rw-r--r--drivers/mfd/da9052-i2c.c72
-rw-r--r--drivers/mfd/da9052-spi.c19
-rw-r--r--drivers/mfd/db8500-prcmu.c35
-rw-r--r--drivers/mfd/intel_msic.c31
-rw-r--r--drivers/mfd/janz-cmodio.c17
-rw-r--r--drivers/mfd/lm3533-core.c667
-rw-r--r--drivers/mfd/lm3533-ctrlbank.c148
-rw-r--r--drivers/mfd/lpc_ich.c888
-rw-r--r--drivers/mfd/lpc_sch.c26
-rw-r--r--drivers/mfd/max77693-irq.c309
-rw-r--r--drivers/mfd/max77693.c249
-rw-r--r--drivers/mfd/mc13xxx-core.c239
-rw-r--r--drivers/mfd/mc13xxx-i2c.c128
-rw-r--r--drivers/mfd/mc13xxx-spi.c140
-rw-r--r--drivers/mfd/mc13xxx.h45
-rw-r--r--drivers/mfd/pcf50633-core.c36
-rw-r--r--drivers/mfd/rc5t583.c8
-rw-r--r--drivers/mfd/rdc321x-southbridge.c13
-rw-r--r--drivers/mfd/s5m-core.c6
-rw-r--r--drivers/mfd/sta2x11-mfd.c467
-rw-r--r--drivers/mfd/stmpe-spi.c1
-rw-r--r--drivers/mfd/tps65090.c33
-rw-r--r--drivers/mfd/tps65217.c17
-rw-r--r--drivers/mfd/tps65910-irq.c130
-rw-r--r--drivers/mfd/tps65910.c205
-rw-r--r--drivers/mfd/twl4030-irq.c1
-rw-r--r--drivers/mfd/twl6040-core.c120
-rw-r--r--drivers/mfd/twl6040-irq.c32
-rw-r--r--drivers/mfd/vx855.c12
-rw-r--r--drivers/mfd/wm831x-auxadc.c6
-rw-r--r--drivers/mfd/wm831x-core.c45
-rw-r--r--drivers/mfd/wm831x-irq.c148
-rw-r--r--drivers/mfd/wm8350-core.c31
-rw-r--r--drivers/mfd/wm8350-i2c.c61
-rw-r--r--drivers/mfd/wm8400-core.c250
-rw-r--r--drivers/mfd/wm8994-core.c25
-rw-r--r--drivers/mfd/wm8994-regmap.c1
-rw-r--r--drivers/misc/ab8500-pwm.c6
-rw-r--r--drivers/mmc/card/block.c22
-rw-r--r--drivers/mmc/card/queue.c6
-rw-r--r--drivers/mmc/core/bus.c2
-rw-r--r--drivers/mmc/core/cd-gpio.c3
-rw-r--r--drivers/mmc/core/core.c18
-rw-r--r--drivers/mmc/core/mmc.c119
-rw-r--r--drivers/mmc/core/sdio.c2
-rw-r--r--drivers/mmc/core/sdio_irq.c11
-rw-r--r--drivers/mmc/host/Kconfig17
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci.c469
-rw-r--r--drivers/mmc/host/davinci_mmc.c1
-rw-r--r--drivers/mmc/host/dw_mmc.c18
-rw-r--r--drivers/mmc/host/imxmmc.c1169
-rw-r--r--drivers/mmc/host/imxmmc.h64
-rw-r--r--drivers/mmc/host/mmci.c65
-rw-r--r--drivers/mmc/host/mvsdio.c14
-rw-r--r--drivers/mmc/host/mxcmmc.c39
-rw-r--r--drivers/mmc/host/mxs-mmc.c197
-rw-r--r--drivers/mmc/host/omap.c48
-rw-r--r--drivers/mmc/host/omap_hsmmc.c86
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c44
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c8
-rw-r--r--drivers/mmc/host/sdhci-spear.c82
-rw-r--r--drivers/mmc/host/sdhci-tegra.c26
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/mtd/maps/lantiq-flash.c76
-rw-r--r--drivers/mtd/nand/mxc_nand.c6
-rw-r--r--drivers/mtd/nand/orion_nand.c18
-rw-r--r--drivers/net/cris/eth_v10.c1
-rw-r--r--drivers/net/ethernet/freescale/fec.c35
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c42
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/of/of_i2c.c16
-rw-r--r--drivers/of/of_pci_irq.c2
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pinctrl/spear/Kconfig10
-rw-r--r--drivers/pinctrl/spear/Makefile2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h251
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c2198
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c1989
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.c103
-rw-r--r--drivers/platform/x86/toshiba_acpi.c1
-rw-r--r--drivers/power/wm831x_power.c21
-rw-r--r--drivers/regulator/anatop-regulator.c18
-rw-r--r--drivers/regulator/tps65910-regulator.c82
-rw-r--r--drivers/regulator/wm831x-dcdc.c24
-rw-r--r--drivers/regulator/wm831x-isink.c4
-rw-r--r--drivers/regulator/wm831x-ldo.c10
-rw-r--r--drivers/rtc/Kconfig42
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-ds1307.c20
-rw-r--r--drivers/rtc/rtc-ep93xx.c24
-rw-r--r--drivers/rtc/rtc-imxdi.c6
-rw-r--r--drivers/rtc/rtc-lpc32xx.c12
-rw-r--r--drivers/rtc/rtc-m41t93.c46
-rw-r--r--drivers/rtc/rtc-pcf8563.c44
-rw-r--r--drivers/rtc/rtc-pl031.c14
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/rtc/rtc-spear.c10
-rw-r--r--drivers/rtc/rtc-tegra.c50
-rw-r--r--drivers/rtc/rtc-wm831x.c2
-rw-r--r--drivers/s390/char/sclp_cmd.c12
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-imx.c30
-rw-r--r--drivers/spi/spi-orion.c30
-rw-r--r--drivers/staging/android/ashmem.c8
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/serial/imx.c38
-rw-r--r--drivers/tty/serial/lantiq.c83
-rw-r--r--drivers/tty/serial/sb1250-duart.c1
-rw-r--r--drivers/tty/serial/zs.c1
-rw-r--r--drivers/tty/tty_ldisc.c41
-rw-r--r--drivers/usb/host/ehci-mxc.c62
-rw-r--r--drivers/usb/host/ehci-orion.c16
-rw-r--r--drivers/usb/host/ehci-tegra.c5
-rw-r--r--drivers/video/backlight/Kconfig12
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp5520_bl.c4
-rw-r--r--drivers/video/backlight/adp8860_bl.c28
-rw-r--r--drivers/video/backlight/adp8870_bl.c28
-rw-r--r--drivers/video/backlight/ams369fg06.c16
-rw-r--r--drivers/video/backlight/apple_bl.c21
-rw-r--r--drivers/video/backlight/backlight.c11
-rw-r--r--drivers/video/backlight/corgi_lcd.c12
-rw-r--r--drivers/video/backlight/cr_bllcd.c9
-rw-r--r--drivers/video/backlight/da903x_bl.c1
-rw-r--r--drivers/video/backlight/generic_bl.c6
-rw-r--r--drivers/video/backlight/ili9320.c9
-rw-r--r--drivers/video/backlight/jornada720_bl.c14
-rw-r--r--drivers/video/backlight/jornada720_lcd.c8
-rw-r--r--drivers/video/backlight/l4f00242t03.c27
-rw-r--r--drivers/video/backlight/lcd.c20
-rw-r--r--drivers/video/backlight/ld9040.c15
-rw-r--r--drivers/video/backlight/lm3533_bl.c423
-rw-r--r--drivers/video/backlight/lms283gf05.c9
-rw-r--r--drivers/video/backlight/ltv350qv.c24
-rw-r--r--drivers/video/backlight/omap1_bl.c4
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c1
-rw-r--r--drivers/video/backlight/progear_bl.c6
-rw-r--r--drivers/video/backlight/s6e63m0.c16
-rw-r--r--drivers/video/backlight/tdo24m.c21
-rw-r--r--drivers/video/backlight/tosa_bl.c11
-rw-r--r--drivers/video/backlight/tosa_lcd.c8
-rw-r--r--drivers/video/backlight/wm831x_bl.c1
-rw-r--r--drivers/video/fbmem.c21
-rw-r--r--drivers/video/imxfb.c50
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c1
-rw-r--r--drivers/w1/masters/mxc_w1.c4
-rw-r--r--drivers/watchdog/Kconfig1
-rw-r--r--drivers/watchdog/iTCO_vendor.h6
-rw-r--r--drivers/watchdog/iTCO_vendor_support.c43
-rw-r--r--drivers/watchdog/iTCO_wdt.c529
-rw-r--r--drivers/watchdog/imx2_wdt.c2
-rw-r--r--drivers/watchdog/lantiq_wdt.c56
-rw-r--r--drivers/watchdog/orion_wdt.c16
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/acpi.c62
-rw-r--r--drivers/xen/events.c5
-rw-r--r--drivers/xen/grant-table.c125
-rw-r--r--drivers/xen/xen-acpi-processor.c1
-rw-r--r--drivers/xen/xen-selfballoon.c34
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c6
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h1
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c51
-rw-r--r--fs/9p/vfs_inode.c2
-rw-r--r--fs/affs/inode.c2
-rw-r--r--fs/afs/inode.c2
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/bad_inode.c1
-rw-r--r--fs/bfs/inode.c2
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/cifs/Kconfig20
-rw-r--r--fs/cifs/Makefile4
-rw-r--r--fs/cifs/README5
-rw-r--r--fs/cifs/cifs_debug.c56
-rw-r--r--fs/cifs/cifs_debug.h4
-rw-r--r--fs/cifs/cifsfs.c25
-rw-r--r--fs/cifs/cifsglob.h107
-rw-r--r--fs/cifs/cifsproto.h19
-rw-r--r--fs/cifs/cifssmb.c181
-rw-r--r--fs/cifs/connect.c178
-rw-r--r--fs/cifs/file.c683
-rw-r--r--fs/cifs/ioctl.c8
-rw-r--r--fs/cifs/misc.c66
-rw-r--r--fs/cifs/readdir.c15
-rw-r--r--fs/cifs/smb1ops.c154
-rw-r--r--fs/cifs/smb2ops.c27
-rw-r--r--fs/cifs/transport.c76
-rw-r--r--fs/coda/inode.c2
-rw-r--r--fs/debugfs/file.c128
-rw-r--r--fs/ecryptfs/super.c2
-rw-r--r--fs/exofs/Kbuild2
-rw-r--r--fs/exofs/exofs.h14
-rw-r--r--fs/exofs/inode.c4
-rw-r--r--fs/exofs/super.c16
-rw-r--r--fs/exofs/sys.c200
-rw-r--r--fs/ext2/balloc.c4
-rw-r--r--fs/ext2/ialloc.c2
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext2/super.c18
-rw-r--r--fs/ext2/xattr.c1
-rw-r--r--fs/ext3/dir.c167
-rw-r--r--fs/ext3/ext3.h6
-rw-r--r--fs/ext3/hash.c4
-rw-r--r--fs/ext3/ialloc.c20
-rw-r--r--fs/ext3/inode.c6
-rw-r--r--fs/ext3/super.c6
-rw-r--r--fs/ext4/super.c8
-rw-r--r--fs/fat/inode.c2
-rw-r--r--fs/freevxfs/vxfs_inode.c2
-rw-r--r--fs/fs-writeback.c336
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hostfs/hostfs_kern.c2
-rw-r--r--fs/hpfs/inode.c2
-rw-r--r--fs/hppfs/hppfs.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/inode.c15
-rw-r--r--fs/jbd/checkpoint.c23
-rw-r--r--fs/jbd/commit.c21
-rw-r--r--fs/jbd/journal.c206
-rw-r--r--fs/jbd/transaction.c2
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/logfs/readwrite.c2
-rw-r--r--fs/minix/inode.c2
-rw-r--r--fs/namei.c22
-rw-r--r--fs/ncpfs/inode.c2
-rw-r--r--fs/nfs/Kconfig11
-rw-r--r--fs/nfs/Makefile5
-rw-r--r--fs/nfs/blocklayout/blocklayout.c90
-rw-r--r--fs/nfs/blocklayout/blocklayoutdev.c2
-rw-r--r--fs/nfs/client.c268
-rw-r--r--fs/nfs/delegation.c16
-rw-r--r--fs/nfs/delegation.h1
-rw-r--r--fs/nfs/dir.c39
-rw-r--r--fs/nfs/direct.c746
-rw-r--r--fs/nfs/file.c8
-rw-r--r--fs/nfs/fscache.c15
-rw-r--r--fs/nfs/fscache.h10
-rw-r--r--fs/nfs/getroot.c85
-rw-r--r--fs/nfs/idmap.c30
-rw-r--r--fs/nfs/inode.c122
-rw-r--r--fs/nfs/internal.h135
-rw-r--r--fs/nfs/namespace.c103
-rw-r--r--fs/nfs/netns.h5
-rw-r--r--fs/nfs/nfs2xdr.c5
-rw-r--r--fs/nfs/nfs3proc.c27
-rw-r--r--fs/nfs/nfs3xdr.c112
-rw-r--r--fs/nfs/nfs4_fs.h23
-rw-r--r--fs/nfs/nfs4filelayout.c688
-rw-r--r--fs/nfs/nfs4filelayout.h63
-rw-r--r--fs/nfs/nfs4filelayoutdev.c102
-rw-r--r--fs/nfs/nfs4namespace.c55
-rw-r--r--fs/nfs/nfs4proc.c537
-rw-r--r--fs/nfs/nfs4renewd.c2
-rw-r--r--fs/nfs/nfs4state.c225
-rw-r--r--fs/nfs/nfs4xdr.c399
-rw-r--r--fs/nfs/objlayout/objio_osd.c18
-rw-r--r--fs/nfs/objlayout/objlayout.c19
-rw-r--r--fs/nfs/pagelist.c61
-rw-r--r--fs/nfs/pnfs.c352
-rw-r--r--fs/nfs/pnfs.h127
-rw-r--r--fs/nfs/proc.c21
-rw-r--r--fs/nfs/read.c437
-rw-r--r--fs/nfs/super.c760
-rw-r--r--fs/nfs/write.c809
-rw-r--r--fs/nilfs2/inode.c4
-rw-r--r--fs/ntfs/inode.c2
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c2
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/omfs/inode.c2
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--fs/pstore/inode.c2
-rw-r--r--fs/quota/dquot.c32
-rw-r--r--fs/reiserfs/inode.c4
-rw-r--r--fs/reiserfs/super.c6
-rw-r--r--fs/sysfs/inode.c2
-rw-r--r--fs/sysv/inode.c2
-rw-r--r--fs/ubifs/super.c2
-rw-r--r--fs/udf/inode.c2
-rw-r--r--fs/ufs/inode.c2
-rw-r--r--fs/xfs/xfs_super.c2
-rw-r--r--include/asm-generic/dma-coherent.h4
-rw-r--r--include/asm-generic/dma-contiguous.h28
-rw-r--r--include/asm-generic/kvm_para.h22
-rw-r--r--include/asm-generic/pgtable.h27
-rw-r--r--include/asm-generic/word-at-a-time.h52
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/amba/pl08x.h3
-rw-r--r--include/linux/apple_bl.h2
-rw-r--r--include/linux/bootmem.h3
-rw-r--r--include/linux/bug.h7
-rw-r--r--include/linux/clk-private.h99
-rw-r--r--include/linux/clk-provider.h120
-rw-r--r--include/linux/clk.h6
-rw-r--r--include/linux/compaction.h19
-rw-r--r--include/linux/debugfs.h11
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/dma-buf.h33
-rw-r--r--include/linux/dma-contiguous.h110
-rw-r--r--include/linux/dmaengine.h6
-rw-r--r--include/linux/edac.h182
-rw-r--r--include/linux/fb.h4
-rw-r--r--include/linux/fs.h14
-rw-r--r--include/linux/fsl/mxs-dma.h12
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/i2c-mux-gpio.h (renamed from include/linux/gpio-i2cmux.h)14
-rw-r--r--include/linux/i2c-mux.h3
-rw-r--r--include/linux/i2c.h1
-rw-r--r--include/linux/jbd.h18
-rw-r--r--include/linux/kallsyms.h7
-rw-r--r--include/linux/kernel-page-flags.h4
-rw-r--r--include/linux/kvm.h42
-rw-r--r--include/linux/kvm_host.h55
-rw-r--r--include/linux/lcd.h10
-rw-r--r--include/linux/led-lm3530.h2
-rw-r--r--include/linux/leds.h2
-rw-r--r--include/linux/memcontrol.h69
-rw-r--r--include/linux/mempolicy.h9
-rw-r--r--include/linux/mfd/abx500/ab8500.h18
-rw-r--r--include/linux/mfd/anatop.h4
-rw-r--r--include/linux/mfd/asic3.h2
-rw-r--r--include/linux/mfd/da9052/da9052.h19
-rw-r--r--include/linux/mfd/lm3533.h104
-rw-r--r--include/linux/mfd/lpc_ich.h48
-rw-r--r--include/linux/mfd/max77693-private.h227
-rw-r--r--include/linux/mfd/max77693.h36
-rw-r--r--include/linux/mfd/sta2x11-mfd.h324
-rw-r--r--include/linux/mfd/stmpe.h2
-rw-r--r--include/linux/mfd/tps65910.h49
-rw-r--r--include/linux/mfd/twl6040.h2
-rw-r--r--include/linux/mfd/wm831x/core.h12
-rw-r--r--include/linux/mfd/wm8350/core.h9
-rw-r--r--include/linux/mfd/wm8400-private.h14
-rw-r--r--include/linux/mfd/wm8994/core.h1
-rw-r--r--include/linux/mfd/wm8994/registers.h3
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_inline.h24
-rw-r--r--include/linux/mm_types.h11
-rw-r--r--include/linux/mmc/card.h4
-rw-r--r--include/linux/mmc/dw_mmc.h1
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/mmc.h60
-rw-r--r--include/linux/mmc/mxs-mmc.h (renamed from arch/arm/mach-mxs/include/mach/mmc.h)7
-rw-r--r--include/linux/mmdebug.h2
-rw-r--r--include/linux/mmzone.h100
-rw-r--r--include/linux/mv643xx_eth.h1
-rw-r--r--include/linux/nfs4.h13
-rw-r--r--include/linux/nfs_fs.h31
-rw-r--r--include/linux/nfs_fs_sb.h17
-rw-r--r--include/linux/nfs_page.h20
-rw-r--r--include/linux/nfs_xdr.h210
-rw-r--r--include/linux/of_i2c.h4
-rw-r--r--include/linux/of_pci.h2
-rw-r--r--include/linux/oom.h5
-rw-r--r--include/linux/page-isolation.h18
-rw-r--r--include/linux/pagemap.h8
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/res_counter.h5
-rw-r--r--include/linux/rmap.h2
-rw-r--r--include/linux/rtc.h3
-rw-r--r--include/linux/rtc/ds1307.h22
-rw-r--r--include/linux/spi/orion_spi.h17
-rw-r--r--include/linux/stmp_device.h20
-rw-r--r--include/linux/swap.h56
-rw-r--r--include/linux/writeback.h10
-rw-r--r--include/net/sock.h22
-rw-r--r--include/trace/events/jbd.h39
-rw-r--r--include/trace/events/vmscan.h122
-rw-r--r--include/trace/events/writeback.h36
-rw-r--r--include/xen/acpi.h58
-rw-r--r--include/xen/events.h3
-rw-r--r--include/xen/grant_table.h2
-rw-r--r--include/xen/xenbus_dev.h3
-rw-r--r--ipc/mqueue.c2
-rw-r--r--kernel/cgroup.c20
-rw-r--r--kernel/fork.c12
-rw-r--r--kernel/kallsyms.c32
-rw-r--r--kernel/res_counter.c10
-rw-r--r--kernel/watchdog.c12
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Makefile3
-rw-r--r--lib/bitmap.c12
-rw-r--r--lib/list_debug.c3
-rw-r--r--lib/radix-tree.c15
-rw-r--r--lib/spinlock_debug.c2
-rw-r--r--lib/stmp_device.c80
-rw-r--r--lib/string_helpers.c8
-rw-r--r--lib/strncpy_from_user.c47
-rw-r--r--lib/strnlen_user.c138
-rw-r--r--lib/swiotlb.c8
-rw-r--r--lib/test-kstrtox.c4
-rw-r--r--lib/vsprintf.c14
-rw-r--r--mm/Kconfig12
-rw-r--r--mm/Makefile12
-rw-r--r--mm/bootmem.c134
-rw-r--r--mm/compaction.c547
-rw-r--r--mm/filemap.c39
-rw-r--r--mm/huge_memory.c29
-rw-r--r--mm/hugetlb.c33
-rw-r--r--mm/internal.h45
-rw-r--r--mm/madvise.c15
-rw-r--r--mm/memblock.c42
-rw-r--r--mm/memcontrol.c642
-rw-r--r--mm/memory-failure.c10
-rw-r--r--mm/memory.c20
-rw-r--r--mm/memory_hotplug.c20
-rw-r--r--mm/mempolicy.c36
-rw-r--r--mm/mmap.c53
-rw-r--r--mm/mmzone.c14
-rw-r--r--mm/nobootmem.c112
-rw-r--r--mm/oom_kill.c44
-rw-r--r--mm/page-writeback.c3
-rw-r--r--mm/page_alloc.c485
-rw-r--r--mm/page_isolation.c15
-rw-r--r--mm/pgtable-generic.c4
-rw-r--r--mm/readahead.c40
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/shmem.c515
-rw-r--r--mm/sparse.c25
-rw-r--r--mm/swap.c129
-rw-r--r--mm/swapfile.c33
-rw-r--r--mm/thrash.c155
-rw-r--r--mm/truncate.c25
-rw-r--r--mm/vmalloc.c7
-rw-r--r--mm/vmscan.c738
-rw-r--r--mm/vmstat.c13
-rw-r--r--net/ipv4/tcp_memcontrol.c34
-rw-r--r--net/sunrpc/clnt.c2
-rw-r--r--net/sunrpc/rpc_pipe.c10
-rw-r--r--net/sunrpc/rpcb_clnt.c2
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--scripts/coccinelle/misc/ifaddr.cocci35
-rw-r--r--scripts/coccinelle/misc/noderef.cocci65
-rwxr-xr-xscripts/config11
-rw-r--r--scripts/kconfig/conf.c22
-rw-r--r--scripts/link-vmlinux.sh221
-rw-r--r--scripts/package/builddeb2
-rw-r--r--sound/core/pcm_lib.c23
-rw-r--r--sound/pci/hda/hda_codec.c66
-rw-r--r--sound/pci/hda/hda_codec.h3
-rw-r--r--sound/pci/hda/hda_intel.c314
-rw-r--r--sound/pci/hda/patch_realtek.c38
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c13
-rw-r--r--sound/soc/kirkwood/kirkwood.h1
-rw-r--r--sound/soc/omap/Kconfig7
-rw-r--r--sound/soc/omap/Makefile4
-rw-r--r--sound/soc/omap/mcbsp.c115
-rw-r--r--sound/soc/omap/mcbsp.h8
-rw-r--r--sound/soc/omap/omap-abe-twl6040.c68
-rw-r--r--sound/soc/omap/omap-dmic.c8
-rw-r--r--sound/soc/omap/omap-hdmi-card.c87
-rw-r--r--sound/soc/omap/omap-hdmi.c238
-rw-r--r--sound/soc/omap/omap-hdmi.h4
-rw-r--r--sound/soc/omap/omap-mcbsp.c45
-rw-r--r--sound/soc/omap/omap-mcpdm.c8
-rw-r--r--sound/soc/omap/omap4-hdmi-card.c121
-rw-r--r--tools/vm/page-types.c50
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/ioapic.c10
-rw-r--r--virt/kvm/ioapic.h1
-rw-r--r--virt/kvm/irq_comm.c14
-rw-r--r--virt/kvm/kvm_main.c132
1600 files changed, 64655 insertions, 40393 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533 b/Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533
new file mode 100644
index 00000000000..1b62230b33b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533
@@ -0,0 +1,15 @@
+What: /sys/bus/i2c/devices/.../output_hvled[n]
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the controlling backlight device for high-voltage current
+ sink HVLED[n] (n = 1, 2) (0, 1).
+
+What: /sys/bus/i2c/devices/.../output_lvled[n]
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the controlling led device for low-voltage current sink
+ LVLED[n] (n = 1..5) (0..3).
diff --git a/Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533 b/Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533
new file mode 100644
index 00000000000..77cf7ac949a
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533
@@ -0,0 +1,48 @@
+What: /sys/class/backlight/<backlight>/als_channel
+Date: May 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Get the ALS output channel used as input in
+ ALS-current-control mode (0, 1), where
+
+ 0 - out_current0 (backlight 0)
+ 1 - out_current1 (backlight 1)
+
+What: /sys/class/backlight/<backlight>/als_en
+Date: May 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Enable ALS-current-control mode (0, 1).
+
+What: /sys/class/backlight/<backlight>/id
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Get the id of this backlight (0, 1).
+
+What: /sys/class/backlight/<backlight>/linear
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the brightness-mapping mode (0, 1), where
+
+ 0 - exponential mode
+ 1 - linear mode
+
+What: /sys/class/backlight/<backlight>/pwm
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the PWM-input control mask (5 bits), where
+
+ bit 5 - PWM-input enabled in Zone 4
+ bit 4 - PWM-input enabled in Zone 3
+ bit 3 - PWM-input enabled in Zone 2
+ bit 2 - PWM-input enabled in Zone 1
+ bit 1 - PWM-input enabled in Zone 0
+ bit 0 - PWM-input enabled
diff --git a/Documentation/ABI/testing/sysfs-class-led-driver-lm3533 b/Documentation/ABI/testing/sysfs-class-led-driver-lm3533
new file mode 100644
index 00000000000..620ebb3b9ba
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led-driver-lm3533
@@ -0,0 +1,65 @@
+What: /sys/class/leds/<led>/als_channel
+Date: May 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the ALS output channel to use as input in
+ ALS-current-control mode (1, 2), where
+
+ 1 - out_current1
+ 2 - out_current2
+
+What: /sys/class/leds/<led>/als_en
+Date: May 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Enable ALS-current-control mode (0, 1).
+
+What: /sys/class/leds/<led>/falltime
+What: /sys/class/leds/<led>/risetime
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the pattern generator fall and rise times (0..7), where
+
+ 0 - 2048 us
+ 1 - 262 ms
+ 2 - 524 ms
+ 3 - 1.049 s
+ 4 - 2.097 s
+ 5 - 4.194 s
+ 6 - 8.389 s
+ 7 - 16.78 s
+
+What: /sys/class/leds/<led>/id
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Get the id of this led (0..3).
+
+What: /sys/class/leds/<led>/linear
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the brightness-mapping mode (0, 1), where
+
+ 0 - exponential mode
+ 1 - linear mode
+
+What: /sys/class/leds/<led>/pwm
+Date: April 2012
+KernelVersion: 3.5
+Contact: Johan Hovold <jhovold@gmail.com>
+Description:
+ Set the PWM-input control mask (5 bits), where
+
+ bit 5 - PWM-input enabled in Zone 4
+ bit 4 - PWM-input enabled in Zone 3
+ bit 3 - PWM-input enabled in Zone 2
+ bit 2 - PWM-input enabled in Zone 1
+ bit 1 - PWM-input enabled in Zone 0
+ bit 0 - PWM-input enabled
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 4468ce24427..c379a2a6949 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -150,7 +150,8 @@ be able to justify all violations that remain in your patch.
Look through the MAINTAINERS file and the source code, and determine
if your change applies to a specific subsystem of the kernel, with
-an assigned maintainer. If so, e-mail that person.
+an assigned maintainer. If so, e-mail that person. The script
+scripts/get_maintainer.pl can be very useful at this step.
If no maintainer is listed, or the maintainer does not respond, send
your patch to the primary Linux kernel developer's mailing list,
diff --git a/Documentation/arm/SPEAr/overview.txt b/Documentation/arm/SPEAr/overview.txt
index 28a9af953b9..57aae7765c7 100644
--- a/Documentation/arm/SPEAr/overview.txt
+++ b/Documentation/arm/SPEAr/overview.txt
@@ -8,9 +8,8 @@ Introduction
weblink : http://www.st.com/spear
The ST Microelectronics SPEAr range of ARM9/CortexA9 System-on-Chip CPUs are
- supported by the 'spear' platform of ARM Linux. Currently SPEAr300,
- SPEAr310, SPEAr320 and SPEAr600 SOCs are supported. Support for the SPEAr13XX
- series is in progress.
+ supported by the 'spear' platform of ARM Linux. Currently SPEAr1310,
+ SPEAr1340, SPEAr300, SPEAr310, SPEAr320 and SPEAr600 SOCs are supported.
Hierarchy in SPEAr is as follows:
@@ -26,33 +25,36 @@ Introduction
- SPEAr600 (SOC)
- SPEAr600 Evaluation Board
- SPEAr13XX (13XX SOC series, based on ARM CORTEXA9)
- - SPEAr1300 (SOC)
+ - SPEAr1310 (SOC)
+ - SPEAr1310 Evaluation Board
+ - SPEAr1340 (SOC)
+ - SPEAr1340 Evaluation Board
Configuration
-------------
A generic configuration is provided for each machine, and can be used as the
default by
- make spear600_defconfig
- make spear300_defconfig
- make spear310_defconfig
- make spear320_defconfig
+ make spear13xx_defconfig
+ make spear3xx_defconfig
+ make spear6xx_defconfig
Layout
------
- The common files for multiple machine families (SPEAr3XX, SPEAr6XX and
- SPEAr13XX) are located in the platform code contained in arch/arm/plat-spear
+ The common files for multiple machine families (SPEAr3xx, SPEAr6xx and
+ SPEAr13xx) are located in the platform code contained in arch/arm/plat-spear
with headers in plat/.
Each machine series have a directory with name arch/arm/mach-spear followed by
series name. Like mach-spear3xx, mach-spear6xx and mach-spear13xx.
- Common file for machines of spear3xx family is mach-spear3xx/spear3xx.c and for
- spear6xx is mach-spear6xx/spear6xx.c. mach-spear* also contain soc/machine
- specific files, like spear300.c, spear310.c, spear320.c and spear600.c.
- mach-spear* doesn't contains board specific files as they fully support
- Flattened Device Tree.
+ Common file for machines of spear3xx family is mach-spear3xx/spear3xx.c, for
+ spear6xx is mach-spear6xx/spear6xx.c and for spear13xx family is
+ mach-spear13xx/spear13xx.c. mach-spear* also contain soc/machine specific
+ files, like spear1310.c, spear1340.c spear300.c, spear310.c, spear320.c and
+ spear600.c. mach-spear* doesn't contains board specific files as they fully
+ support Flattened Device Tree.
Document Author
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 9b1067afb22..dd88540bb99 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -184,12 +184,14 @@ behind this approach is that a cgroup that aggressively uses a shared
page will eventually get charged for it (once it is uncharged from
the cgroup that brought it in -- this will happen on memory pressure).
+But see section 8.2: when moving a task to another cgroup, its pages may
+be recharged to the new cgroup, if move_charge_at_immigrate has been chosen.
+
Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used.
When you do swapoff and make swapped-out pages of shmem(tmpfs) to
be backed into memory in force, charges for pages are accounted against the
caller of swapoff rather than the users of shmem.
-
2.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP)
Swap Extension allows you to record charge for swap. A swapped-in page is
@@ -374,14 +376,15 @@ cgroup might have some charge associated with it, even though all
tasks have migrated away from it. (because we charge against pages, not
against tasks.)
-Such charges are freed or moved to their parent. At moving, both of RSS
-and CACHES are moved to parent.
-rmdir() may return -EBUSY if freeing/moving fails. See 5.1 also.
+We move the stats to root (if use_hierarchy==0) or parent (if
+use_hierarchy==1), and no change on the charge except uncharging
+from the child.
Charges recorded in swap information is not updated at removal of cgroup.
Recorded information is discarded and a cgroup which uses swap (swapcache)
will be charged as a new owner of it.
+About use_hierarchy, see Section 6.
5. Misc. interfaces.
@@ -394,13 +397,15 @@ will be charged as a new owner of it.
Almost all pages tracked by this memory cgroup will be unmapped and freed.
Some pages cannot be freed because they are locked or in-use. Such pages are
- moved to parent and this cgroup will be empty. This may return -EBUSY if
- VM is too busy to free/move all pages immediately.
+ moved to parent(if use_hierarchy==1) or root (if use_hierarchy==0) and this
+ cgroup will be empty.
Typical use case of this interface is that calling this before rmdir().
Because rmdir() moves all pages to parent, some out-of-use page caches can be
moved to the parent. If you want to avoid that, force_empty will be useful.
+ About use_hierarchy, see Section 6.
+
5.2 stat file
memory.stat file includes following statistics
@@ -430,17 +435,10 @@ hierarchical_memory_limit - # of bytes of memory limit with regard to hierarchy
hierarchical_memsw_limit - # of bytes of memory+swap limit with regard to
hierarchy under which memory cgroup is.
-total_cache - sum of all children's "cache"
-total_rss - sum of all children's "rss"
-total_mapped_file - sum of all children's "cache"
-total_pgpgin - sum of all children's "pgpgin"
-total_pgpgout - sum of all children's "pgpgout"
-total_swap - sum of all children's "swap"
-total_inactive_anon - sum of all children's "inactive_anon"
-total_active_anon - sum of all children's "active_anon"
-total_inactive_file - sum of all children's "inactive_file"
-total_active_file - sum of all children's "active_file"
-total_unevictable - sum of all children's "unevictable"
+total_<counter> - # hierarchical version of <counter>, which in
+ addition to the cgroup's own value includes the
+ sum of all hierarchical children's values of
+ <counter>, i.e. total_cache
# The following additional stats are dependent on CONFIG_DEBUG_VM.
@@ -622,8 +620,7 @@ memory cgroup.
bit | what type of charges would be moved ?
-----+------------------------------------------------------------------------
0 | A charge of an anonymous page(or swap of it) used by the target task.
- | Those pages and swaps must be used only by the target task. You must
- | enable Swap Extension(see 2.4) to enable move of swap charges.
+ | You must enable Swap Extension(see 2.4) to enable move of swap charges.
-----+------------------------------------------------------------------------
1 | A charge of file pages(normal file, tmpfs file(e.g. ipc shared memory)
| and swaps of tmpfs file) mmapped by the target task. Unlike the case of
@@ -636,8 +633,6 @@ memory cgroup.
8.3 TODO
-- Implement madvise(2) to let users decide the vma to be moved or not to be
- moved.
- All of moving charge operations are done under cgroup_mutex. It's not good
behavior to hold the mutex too long, so we may need some trick.
diff --git a/Documentation/cgroups/resource_counter.txt b/Documentation/cgroups/resource_counter.txt
index f3c4ec3626a..0c4a344e78f 100644
--- a/Documentation/cgroups/resource_counter.txt
+++ b/Documentation/cgroups/resource_counter.txt
@@ -92,6 +92,14 @@ to work with it.
The _locked routines imply that the res_counter->lock is taken.
+ f. void res_counter_uncharge_until
+ (struct res_counter *rc, struct res_counter *top,
+ unsinged long val)
+
+ Almost same as res_cunter_uncharge() but propagation of uncharge
+ stops when rc == top. This is useful when kill a res_coutner in
+ child cgroup.
+
2.1 Other accounting routines
There are more routines that may help you with common needs, like
diff --git a/Documentation/cris/README b/Documentation/cris/README
index d9b086869a6..8dbdb1a4442 100644
--- a/Documentation/cris/README
+++ b/Documentation/cris/README
@@ -1,38 +1,34 @@
-Linux 2.4 on the CRIS architecture
-==================================
-$Id: README,v 1.7 2001/04/19 12:38:32 bjornw Exp $
+Linux on the CRIS architecture
+==============================
-This is a port of Linux 2.4 to Axis Communications ETRAX 100LX embedded
-network CPU. For more information about CRIS and ETRAX please see further
-below.
+This is a port of Linux to Axis Communications ETRAX 100LX,
+ETRAX FS and ARTPEC-3 embedded network CPUs.
+
+For more information about CRIS and ETRAX please see further below.
In order to compile this you need a version of gcc with support for the
-ETRAX chip family. Please see this link for more information on how to
+ETRAX chip family. Please see this link for more information on how to
download the compiler and other tools useful when building and booting
software for the ETRAX platform:
-http://developer.axis.com/doc/software/devboard_lx/install-howto.html
-
-<more specific information should come in this document later>
+http://developer.axis.com/wiki/doku.php?id=axis:install-howto-2_20
What is CRIS ?
--------------
CRIS is an acronym for 'Code Reduced Instruction Set'. It is the CPU
architecture in Axis Communication AB's range of embedded network CPU's,
-called ETRAX. The latest CPU is called ETRAX 100LX, where LX stands for
-'Linux' because the chip was designed to be a good host for the Linux
-operating system.
+called ETRAX.
The ETRAX 100LX chip
--------------------
-For reference, please see the press-release:
+For reference, please see the following link:
-http://www.axis.com/news/us/001101_etrax.htm
+http://www.axis.com/products/dev_etrax_100lx/index.htm
-The ETRAX 100LX is a 100 MIPS processor with 8kB cache, MMU, and a very broad
-range of built-in interfaces, all with modern scatter/gather DMA.
+The ETRAX 100LX is a 100 MIPS processor with 8kB cache, MMU, and a very broad
+range of built-in interfaces, all with modern scatter/gather DMA.
Memory interfaces:
@@ -51,20 +47,28 @@ I/O interfaces:
* SCSI
* two parallel-ports
* two generic 8-bit ports
-
- (not all interfaces are available at the same time due to chip pin
+
+ (not all interfaces are available at the same time due to chip pin
multiplexing)
-The previous version of the ETRAX, the ETRAX 100, sits in almost all of
-Axis shipping thin-servers like the Axis 2100 web camera or the ETRAX 100
-developer-board. It lacks an MMU so the Linux we run on that is a version
-of uClinux (Linux 2.0 without MM-support) ported to the CRIS architecture.
-The new Linux 2.4 port has full MM and needs a CPU with an MMU, so it will
-not run on the ETRAX 100.
+ETRAX 100LX is CRISv10 architecture.
+
+
+The ETRAX FS and ARTPEC-3 chips
+-------------------------------
-A version of the Axis developer-board with ETRAX 100LX (running Linux
-2.4) is now available. For more information please see developer.axis.com.
+The ETRAX FS is a 200MHz 32-bit RISC processor with on-chip 16kB
+I-cache and 16kB D-cache and with a wide range of device interfaces
+including multiple high speed serial ports and an integrated USB 1.1 PHY.
+The ARTPEC-3 is a variant of the ETRAX FS with additional IO-units
+used by the Axis Communications network cameras.
+
+See below link for more information:
+
+http://www.axis.com/products/dev_etrax_fs/index.htm
+
+ETRAX FS and ARTPEC-3 are both CRISv32 architectures.
Bootlog
-------
@@ -182,10 +186,6 @@ SwapFree: 0 kB
-rwxr-xr-x 1 342 100 16252 Jan 01 00:00 telnetd
-(All programs are statically linked to the libc at this point - we have not ported the
- shared libraries yet)
-
-
diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt
index bfbc771a65f..ac9e7516756 100644
--- a/Documentation/devicetree/bindings/arm/fsl.txt
+++ b/Documentation/devicetree/bindings/arm/fsl.txt
@@ -1,6 +1,14 @@
Freescale i.MX Platforms Device Tree Bindings
-----------------------------------------------
+i.MX23 Evaluation Kit
+Required root node properties:
+ - compatible = "fsl,imx23-evk", "fsl,imx23";
+
+i.MX28 Evaluation Kit
+Required root node properties:
+ - compatible = "fsl,imx28-evk", "fsl,imx28";
+
i.MX51 Babbage Board
Required root node properties:
- compatible = "fsl,imx51-babbage", "fsl,imx51";
@@ -29,6 +37,10 @@ i.MX6 Quad SABRE Lite Board
Required root node properties:
- compatible = "fsl,imx6q-sabrelite", "fsl,imx6q";
+i.MX6 Quad SABRE Smart Device Board
+Required root node properties:
+ - compatible = "fsl,imx6q-sabresd", "fsl,imx6q";
+
Generic i.MX boards
-------------------
diff --git a/Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt b/Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt
new file mode 100644
index 00000000000..f2f2171e530
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt
@@ -0,0 +1,52 @@
+* Samsung Exynos Interrupt Combiner Controller
+
+Samsung's Exynos4 architecture includes a interrupt combiner controller which
+can combine interrupt sources as a group and provide a single interrupt request
+for the group. The interrupt request from each group are connected to a parent
+interrupt controller, such as GIC in case of Exynos4210.
+
+The interrupt combiner controller consists of multiple combiners. Upto eight
+interrupt sources can be connected to a combiner. The combiner outputs one
+combined interrupt for its eight interrupt sources. The combined interrupt
+is usually connected to a parent interrupt controller.
+
+A single node in the device tree is used to describe the interrupt combiner
+controller module (which includes multiple combiners). A combiner in the
+interrupt controller module shares config/control registers with other
+combiners. For example, a 32-bit interrupt enable/disable config register
+can accommodate upto 4 interrupt combiners (with each combiner supporting
+upto 8 interrupt sources).
+
+Required properties:
+- compatible: should be "samsung,exynos4210-combiner".
+- interrupt-controller: Identifies the node as an interrupt controller.
+- #interrupt-cells: should be <2>. The meaning of the cells are
+ * First Cell: Combiner Group Number.
+ * Second Cell: Interrupt number within the group.
+- reg: Base address and size of interrupt combiner registers.
+- interrupts: The list of interrupts generated by the combiners which are then
+ connected to a parent interrupt controller. The format of the interrupt
+ specifier depends in the interrupt parent controller.
+
+Optional properties:
+- samsung,combiner-nr: The number of interrupt combiners supported. If this
+ property is not specified, the default number of combiners is assumed
+ to be 16.
+- interrupt-parent: pHandle of the parent interrupt controller, if not
+ inherited from the parent node.
+
+
+Example:
+
+ The following is a an example from the Exynos4210 SoC dtsi file.
+
+ combiner:interrupt-controller@10440000 {
+ compatible = "samsung,exynos4210-combiner";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ reg = <0x10440000 0x1000>;
+ interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
+ <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
+ <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
+ <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/spear-timer.txt b/Documentation/devicetree/bindings/arm/spear-timer.txt
new file mode 100644
index 00000000000..c0017221cf5
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/spear-timer.txt
@@ -0,0 +1,18 @@
+* SPEAr ARM Timer
+
+** Timer node required properties:
+
+- compatible : Should be:
+ "st,spear-timer"
+- reg: Address range of the timer registers
+- interrupt-parent: Should be the phandle for the interrupt controller
+ that services interrupts for this device
+- interrupt: Should contain the timer interrupt number
+
+Example:
+
+ timer@f0000000 {
+ compatible = "st,spear-timer";
+ reg = <0xf0000000 0x400>;
+ interrupts = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/spear.txt b/Documentation/devicetree/bindings/arm/spear.txt
index aa5f355cc94..0d42949df6c 100644
--- a/Documentation/devicetree/bindings/arm/spear.txt
+++ b/Documentation/devicetree/bindings/arm/spear.txt
@@ -2,25 +2,25 @@ ST SPEAr Platforms Device Tree Bindings
---------------------------------------
Boards with the ST SPEAr600 SoC shall have the following properties:
-
Required root node property:
-
compatible = "st,spear600";
Boards with the ST SPEAr300 SoC shall have the following properties:
-
Required root node property:
-
compatible = "st,spear300";
Boards with the ST SPEAr310 SoC shall have the following properties:
-
Required root node property:
-
compatible = "st,spear310";
Boards with the ST SPEAr320 SoC shall have the following properties:
+Required root node property:
+compatible = "st,spear320";
+Boards with the ST SPEAr1310 SoC shall have the following properties:
Required root node property:
+compatible = "st,spear1310";
-compatible = "st,spear320";
+Boards with the ST SPEAr1340 SoC shall have the following properties:
+Required root node property:
+compatible = "st,spear1340";
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
new file mode 100644
index 00000000000..234406d41c1
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
@@ -0,0 +1,11 @@
+NVIDIA Tegra AHB
+
+Required properties:
+- compatible : "nvidia,tegra20-ahb" or "nvidia,tegra30-ahb"
+- reg : Should contain 1 register ranges(address and length)
+
+Example:
+ ahb: ahb@6000c004 {
+ compatible = "nvidia,tegra20-ahb";
+ reg = <0x6000c004 0x10c>; /* AHB Arbitration + Gizmo Controller */
+ };
diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
new file mode 100644
index 00000000000..ded0398d3bd
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
@@ -0,0 +1,19 @@
+* Freescale MXS DMA
+
+Required properties:
+- compatible : Should be "fsl,<chip>-dma-apbh" or "fsl,<chip>-dma-apbx"
+- reg : Should contain registers location and length
+
+Supported chips:
+imx23, imx28.
+
+Examples:
+dma-apbh@80004000 {
+ compatible = "fsl,imx28-dma-apbh";
+ reg = <0x80004000 2000>;
+};
+
+dma-apbx@80024000 {
+ compatible = "fsl,imx28-dma-apbx";
+ reg = <0x80024000 2000>;
+};
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
new file mode 100644
index 00000000000..c0d85dbcada
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -0,0 +1,17 @@
+* Synopsys Designware DMA Controller
+
+Required properties:
+- compatible: "snps,dma-spear1340"
+- reg: Address range of the DMAC registers
+- interrupt-parent: Should be the phandle for the interrupt controller
+ that services interrupts for this device
+- interrupt: Should contain the DMAC interrupt number
+
+Example:
+
+ dma@fc000000 {
+ compatible = "snps,dma-spear1340";
+ reg = <0xfc000000 0x1000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <12>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt b/Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt
new file mode 100644
index 00000000000..f93d51478d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt
@@ -0,0 +1,38 @@
+Lantiq SoC External Bus memory mapped GPIO controller
+
+By attaching hardware latches to the EBU it is possible to create output
+only gpios. This driver configures a special memory address, which when
+written to outputs 16 bit to the latches.
+
+The node describing the memory mapped GPIOs needs to be a child of the node
+describing the "lantiq,localbus".
+
+Required properties:
+- compatible : Should be "lantiq,gpio-mm-lantiq"
+- reg : Address and length of the register set for the device
+- #gpio-cells : Should be two. The first cell is the pin number and
+ the second cell is used to specify optional parameters (currently
+ unused).
+- gpio-controller : Marks the device node as a gpio controller.
+
+Optional properties:
+- lantiq,shadow : The default value that we shall assume as already set on the
+ shift register cascade.
+
+Example:
+
+localbus@0 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x0 0x3ffffff /* addrsel0 */
+ 1 0 0x4000000 0x4000010>; /* addsel1 */
+ compatible = "lantiq,localbus", "simple-bus";
+
+ gpio_mm0: gpio@4000000 {
+ compatible = "lantiq,gpio-mm";
+ reg = <1 0x0 0x10>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ lantiq,shadow = <0x77f>
+ };
+}
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mxs.txt b/Documentation/devicetree/bindings/gpio/gpio-mxs.txt
new file mode 100644
index 00000000000..0c35673f7a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-mxs.txt
@@ -0,0 +1,87 @@
+* Freescale MXS GPIO controller
+
+The Freescale MXS GPIO controller is part of MXS PIN controller. The
+GPIOs are organized in port/bank. Each port consists of 32 GPIOs.
+
+As the GPIO controller is embedded in the PIN controller and all the
+GPIO ports share the same IO space with PIN controller, the GPIO node
+will be represented as sub-nodes of MXS pinctrl node.
+
+Required properties for GPIO node:
+- compatible : Should be "fsl,<soc>-gpio". The supported SoCs include
+ imx23 and imx28.
+- interrupts : Should be the port interrupt shared by all 32 pins.
+- gpio-controller : Marks the device node as a gpio controller.
+- #gpio-cells : Should be two. The first cell is the pin number and
+ the second cell is used to specify optional parameters (currently
+ unused).
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells : Should be 2. The first cell is the GPIO number.
+ The second cell bits[3:0] is used to specify trigger type and level flags:
+ 1 = low-to-high edge triggered.
+ 2 = high-to-low edge triggered.
+ 4 = active high level-sensitive.
+ 8 = active low level-sensitive.
+
+Note: Each GPIO port should have an alias correctly numbered in "aliases"
+node.
+
+Examples:
+
+aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ gpio2 = &gpio2;
+ gpio3 = &gpio3;
+ gpio4 = &gpio4;
+};
+
+pinctrl@80018000 {
+ compatible = "fsl,imx28-pinctrl", "simple-bus";
+ reg = <0x80018000 2000>;
+
+ gpio0: gpio@0 {
+ compatible = "fsl,imx28-gpio";
+ interrupts = <127>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@1 {
+ compatible = "fsl,imx28-gpio";
+ interrupts = <126>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@2 {
+ compatible = "fsl,imx28-gpio";
+ interrupts = <125>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@3 {
+ compatible = "fsl,imx28-gpio";
+ interrupts = <124>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@4 {
+ compatible = "fsl,imx28-gpio";
+ interrupts = <123>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt b/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt
new file mode 100644
index 00000000000..854de130a97
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt
@@ -0,0 +1,42 @@
+Lantiq SoC Serial To Parallel (STP) GPIO controller
+
+The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
+peripheral controller used to drive external shift register cascades. At most
+3 groups of 8 bits can be driven. The hardware is able to allow the DSL modem
+to drive the 2 LSBs of the cascade automatically.
+
+
+Required properties:
+- compatible : Should be "lantiq,gpio-stp-xway"
+- reg : Address and length of the register set for the device
+- #gpio-cells : Should be two. The first cell is the pin number and
+ the second cell is used to specify optional parameters (currently
+ unused).
+- gpio-controller : Marks the device node as a gpio controller.
+
+Optional properties:
+- lantiq,shadow : The default value that we shall assume as already set on the
+ shift register cascade.
+- lantiq,groups : Set the 3 bit mask to select which of the 3 groups are enabled
+ in the shift register cascade.
+- lantiq,dsl : The dsl core can control the 2 LSBs of the gpio cascade. This 2 bit
+ property can enable this feature.
+- lantiq,phy1 : The gphy1 core can control 3 bits of the gpio cascade.
+- lantiq,phy2 : The gphy2 core can control 3 bits of the gpio cascade.
+- lantiq,rising : use rising instead of falling edge for the shift register
+
+Example:
+
+gpio1: stp@E100BB0 {
+ compatible = "lantiq,gpio-stp-xway";
+ reg = <0xE100BB0 0x40>;
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ lantiq,shadow = <0xffff>;
+ lantiq,groups = <0x7>;
+ lantiq,dsl = <0x3>;
+ lantiq,phy1 = <0x7>;
+ lantiq,phy2 = <0x7>;
+ /* lantiq,rising; */
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mxs.txt b/Documentation/devicetree/bindings/i2c/i2c-mxs.txt
new file mode 100644
index 00000000000..1bfc02de1b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mxs.txt
@@ -0,0 +1,16 @@
+* Freescale MXS Inter IC (I2C) Controller
+
+Required properties:
+- compatible: Should be "fsl,<chip>-i2c"
+- reg: Should contain registers location and length
+- interrupts: Should contain ERROR and DMA interrupts
+
+Examples:
+
+i2c0: i2c@80058000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx28-i2c";
+ reg = <0x80058000 2000>;
+ interrupts = <111 68>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/mux.txt b/Documentation/devicetree/bindings/i2c/mux.txt
new file mode 100644
index 00000000000..af84cce5cd7
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/mux.txt
@@ -0,0 +1,60 @@
+Common i2c bus multiplexer/switch properties.
+
+An i2c bus multiplexer/switch will have several child busses that are
+numbered uniquely in a device dependent manner. The nodes for an i2c bus
+multiplexer/switch will have one child node for each child
+bus.
+
+Required properties:
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Required properties for child nodes:
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg : The sub-bus number.
+
+Optional properties for child nodes:
+- Other properties specific to the multiplexer/switch hardware.
+- Child nodes conforming to i2c bus binding
+
+
+Example :
+
+ /*
+ An NXP pca9548 8 channel I2C multiplexer at address 0x70
+ with two NXP pca8574 GPIO expanders attached, one each to
+ ports 3 and 4.
+ */
+
+ mux@70 {
+ compatible = "nxp,pca9548";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ gpio1: gpio@38 {
+ compatible = "nxp,pca8574";
+ reg = <0x38>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+ };
+ i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+
+ gpio2: gpio@38 {
+ compatible = "nxp,pca8574";
+ reg = <0x38>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/samsung-i2c.txt b/Documentation/devicetree/bindings/i2c/samsung-i2c.txt
index 38832c71291..b6cb5a12c67 100644
--- a/Documentation/devicetree/bindings/i2c/samsung-i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/samsung-i2c.txt
@@ -6,14 +6,18 @@ Required properties:
- compatible: value should be either of the following.
(a) "samsung, s3c2410-i2c", for i2c compatible with s3c2410 i2c.
(b) "samsung, s3c2440-i2c", for i2c compatible with s3c2440 i2c.
+ (c) "samsung, s3c2440-hdmiphy-i2c", for s3c2440-like i2c used
+ inside HDMIPHY block found on several samsung SoCs
- reg: physical base address of the controller and length of memory mapped
region.
- interrupts: interrupt number to the cpu.
- samsung,i2c-sda-delay: Delay (in ns) applied to data line (SDA) edges.
- - gpios: The order of the gpios should be the following: <SDA, SCL>.
- The gpio specifier depends on the gpio controller.
Optional properties:
+ - gpios: The order of the gpios should be the following: <SDA, SCL>.
+ The gpio specifier depends on the gpio controller. Required in all
+ cases except for "samsung,s3c2440-hdmiphy-i2c" whose input/output
+ lines are permanently wired to the respective client
- samsung,i2c-slave-addr: Slave address in multi-master enviroment. If not
specified, default value is 0.
- samsung,i2c-max-bus-freq: Desired frequency in Hz of the bus. If not
diff --git a/Documentation/devicetree/bindings/i2c/xiic.txt b/Documentation/devicetree/bindings/i2c/xiic.txt
new file mode 100644
index 00000000000..ceabbe91ae4
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/xiic.txt
@@ -0,0 +1,22 @@
+Xilinx IIC controller:
+
+Required properties:
+- compatible : Must be "xlnx,xps-iic-2.00.a"
+- reg : IIC register location and length
+- interrupts : IIC controller unterrupt
+- #address-cells = <1>
+- #size-cells = <0>
+
+Optional properties:
+- Child nodes conforming to i2c bus binding
+
+Example:
+
+ axi_iic_0: i2c@40800000 {
+ compatible = "xlnx,xps-iic-2.00.a";
+ interrupts = < 1 2 >;
+ reg = < 0x40800000 0x10000 >;
+
+ #size-cells = <0>;
+ #address-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/da9052-i2c.txt b/Documentation/devicetree/bindings/mfd/da9052-i2c.txt
new file mode 100644
index 00000000000..1857f4a6b9a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/da9052-i2c.txt
@@ -0,0 +1,60 @@
+* Dialog DA9052/53 Power Management Integrated Circuit (PMIC)
+
+Required properties:
+- compatible : Should be "dlg,da9052", "dlg,da9053-aa",
+ "dlg,da9053-ab", or "dlg,da9053-bb"
+
+Sub-nodes:
+- regulators : Contain the regulator nodes. The DA9052/53 regulators are
+ bound using their names as listed below:
+
+ buck0 : regulator BUCK0
+ buck1 : regulator BUCK1
+ buck2 : regulator BUCK2
+ buck3 : regulator BUCK3
+ ldo4 : regulator LDO4
+ ldo5 : regulator LDO5
+ ldo6 : regulator LDO6
+ ldo7 : regulator LDO7
+ ldo8 : regulator LDO8
+ ldo9 : regulator LDO9
+ ldo10 : regulator LDO10
+ ldo11 : regulator LDO11
+ ldo12 : regulator LDO12
+ ldo13 : regulator LDO13
+
+ The bindings details of individual regulator device can be found in:
+ Documentation/devicetree/bindings/regulator/regulator.txt
+
+Examples:
+
+i2c@63fc8000 { /* I2C1 */
+ status = "okay";
+
+ pmic: dialog@48 {
+ compatible = "dlg,da9053-aa";
+ reg = <0x48>;
+
+ regulators {
+ buck0 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2075000>;
+ };
+
+ buck1 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2075000>;
+ };
+
+ buck2 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <2500000>;
+ };
+
+ buck3 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <2500000>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/mfd/tps65910.txt b/Documentation/devicetree/bindings/mfd/tps65910.txt
new file mode 100644
index 00000000000..645f5eaadb3
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/tps65910.txt
@@ -0,0 +1,133 @@
+TPS65910 Power Management Integrated Circuit
+
+Required properties:
+- compatible: "ti,tps65910" or "ti,tps65911"
+- reg: I2C slave address
+- interrupts: the interrupt outputs of the controller
+- #gpio-cells: number of cells to describe a GPIO, this should be 2.
+ The first cell is the GPIO number.
+ The second cell is used to specify additional options <unused>.
+- gpio-controller: mark the device as a GPIO controller
+- #interrupt-cells: the number of cells to describe an IRQ, this should be 2.
+ The first cell is the IRQ number.
+ The second cell is the flags, encoded as the trigger masks from
+ Documentation/devicetree/bindings/interrupts.txt
+- regulators: This is the list of child nodes that specify the regulator
+ initialization data for defined regulators. Not all regulators for the given
+ device need to be present. The definition for each of these nodes is defined
+ using the standard binding for regulators found at
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+
+ The valid names for regulators are:
+ tps65910: vrtc, vio, vdd1, vdd2, vdd3, vdig1, vdig2, vpll, vdac, vaux1,
+ vaux2, vaux33, vmmc
+ tps65911: vrtc, vio, vdd1, vdd3, vddctrl, ldo1, ldo2, ldo3, ldo4, ldo5,
+ ldo6, ldo7, ldo8
+
+Optional properties:
+- ti,vmbch-threshold: (tps65911) main battery charged threshold
+ comparator. (see VMBCH_VSEL in TPS65910 datasheet)
+- ti,vmbch2-threshold: (tps65911) main battery discharged threshold
+ comparator. (see VMBCH_VSEL in TPS65910 datasheet)
+- ti,en-gpio-sleep: enable sleep control for gpios
+ There should be 9 entries here, one for each gpio.
+
+Regulator Optional properties:
+- ti,regulator-ext-sleep-control: enable external sleep
+ control through external inputs [0 (not enabled), 1 (EN1), 2 (EN2) or 4(EN3)]
+ If this property is not defined, it defaults to 0 (not enabled).
+
+Example:
+
+ pmu: tps65910@d2 {
+ compatible = "ti,tps65910";
+ reg = <0xd2>;
+ interrupt-parent = <&intc>;
+ interrupts = < 0 118 0x04 >;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+
+ #interrupt-cells = <2>;
+ interrupt-controller;
+
+ ti,vmbch-threshold = 0;
+ ti,vmbch2-threshold = 0;
+
+ ti,en-gpio-sleep = <0 0 1 0 0 0 0 0 0>;
+
+ regulators {
+ vdd1_reg: vdd1 {
+ regulator-min-microvolt = < 600000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ vdd2_reg: vdd2 {
+ regulator-min-microvolt = < 600000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ti,regulator-ext-sleep-control = <4>;
+ };
+ vddctrl_reg: vddctrl {
+ regulator-min-microvolt = < 600000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ vio_reg: vio {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ti,regulator-ext-sleep-control = <1>;
+ };
+ ldo1_reg: ldo1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3300000>;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ ldo2_reg: ldo2 {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ ldo3_reg: ldo3 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3300000>;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ ldo4_reg: ldo4 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ ldo5_reg: ldo5 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3300000>;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ ldo6_reg: ldo6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ ti,regulator-ext-sleep-control = <0>;
+ };
+ ldo7_reg: ldo7 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-boot-on;
+ ti,regulator-ext-sleep-control = <1>;
+ };
+ ldo8_reg: ldo8 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ ti,regulator-ext-sleep-control = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/mfd/twl6040.txt b/Documentation/devicetree/bindings/mfd/twl6040.txt
new file mode 100644
index 00000000000..bc67c6f424a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/twl6040.txt
@@ -0,0 +1,62 @@
+Texas Instruments TWL6040 family
+
+The TWL6040s are 8-channel high quality low-power audio codecs providing audio
+and vibra functionality on OMAP4+ platforms.
+They are connected ot the host processor via i2c for commands, McPDM for audio
+data and commands.
+
+Required properties:
+- compatible : Must be "ti,twl6040";
+- reg: must be 0x4b for i2c address
+- interrupts: twl6040 has one interrupt line connecteded to the main SoC
+- interrupt-parent: The parent interrupt controller
+- twl6040,audpwron-gpio: Power on GPIO line for the twl6040
+
+- vio-supply: Regulator for the twl6040 VIO supply
+- v2v1-supply: Regulator for the twl6040 V2V1 supply
+
+Optional properties, nodes:
+- enable-active-high: To power on the twl6040 during boot.
+
+Vibra functionality
+Required properties:
+- vddvibl-supply: Regulator for the left vibra motor
+- vddvibr-supply: Regulator for the right vibra motor
+- vibra { }: Configuration section for vibra parameters containing the following
+ properties:
+- ti,vibldrv-res: Resistance parameter for left driver
+- ti,vibrdrv-res: Resistance parameter for right driver
+- ti,viblmotor-res: Resistance parameter for left motor
+- ti,viblmotor-res: Resistance parameter for right motor
+
+Optional properties within vibra { } section:
+- vddvibl_uV: If the vddvibl default voltage need to be changed
+- vddvibr_uV: If the vddvibr default voltage need to be changed
+
+Example:
+&i2c1 {
+ twl6040: twl@4b {
+ compatible = "ti,twl6040";
+ reg = <0x4b>;
+
+ interrupts = <0 119 4>;
+ interrupt-parent = <&gic>;
+ twl6040,audpwron-gpio = <&gpio4 31 0>;
+
+ vio-supply = <&v1v8>;
+ v2v1-supply = <&v2v1>;
+ enable-active-high;
+
+ /* regulators for vibra motor */
+ vddvibl-supply = <&vbat>;
+ vddvibr-supply = <&vbat>;
+
+ vibra {
+ /* Vibra driver, motor resistance parameters */
+ ti,vibldrv-res = <8>;
+ ti,vibrdrv-res = <3>;
+ ti,viblmotor-res = <10>;
+ ti,vibrmotor-res = <10>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
index 64bcb8be973..0d93b4b0e0e 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
@@ -11,9 +11,11 @@ Required properties:
- interrupt-parent : interrupt source phandle.
- clock-frequency : specifies eSDHC base clock frequency.
- sdhci,wp-inverted : (optional) specifies that eSDHC controller
- reports inverted write-protect state;
+ reports inverted write-protect state; New devices should use
+ the generic "wp-inverted" property.
- sdhci,1-bit-only : (optional) specifies that a controller can
- only handle 1-bit data transfers.
+ only handle 1-bit data transfers. New devices should use the
+ generic "bus-width = <1>" property.
- sdhci,auto-cmd12: (optional) specifies that a controller can
only handle auto CMD12.
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index ab22fe6e73a..c7e404b3ef0 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -9,7 +9,7 @@ Required properties:
- interrupts : Should contain eSDHC interrupt
Optional properties:
-- fsl,card-wired : Indicate the card is wired to host permanently
+- non-removable : Indicate the card is wired to host permanently
- fsl,cd-internal : Indicate to use controller internal card detection
- fsl,wp-internal : Indicate to use controller internal write protection
- cd-gpios : Specify GPIOs for card detection
diff --git a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
index 89a0084df2f..d64aea5a420 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
@@ -10,7 +10,8 @@ Required properties:
Optional properties:
- gpios : may specify GPIOs in this order: Card-Detect GPIO,
- Write-Protect GPIO.
+ Write-Protect GPIO. Note that this does not follow the
+ binding from mmc.txt, for historic reasons.
- interrupts : the interrupt of a card detect interrupt.
- interrupt-parent : the phandle for the interrupt controller that
services interrupts for this device.
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
new file mode 100644
index 00000000000..6e70dcde0a7
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -0,0 +1,27 @@
+These properties are common to multiple MMC host controllers. Any host
+that requires the respective functionality should implement them using
+these definitions.
+
+Required properties:
+- bus-width: Number of data lines, can be <1>, <4>, or <8>
+
+Optional properties:
+- cd-gpios : Specify GPIOs for card detection, see gpio binding
+- wp-gpios : Specify GPIOs for write protection, see gpio binding
+- cd-inverted: when present, polarity on the wp gpio line is inverted
+- wp-inverted: when present, polarity on the wp gpio line is inverted
+- non-removable: non-removable slot (like eMMC)
+- max-frequency: maximum operating clock frequency
+
+Example:
+
+sdhci@ab000000 {
+ compatible = "sdhci";
+ reg = <0xab000000 0x200>;
+ interrupts = <23>;
+ bus-width = <4>;
+ cd-gpios = <&gpio 69 0>;
+ cd-inverted;
+ wp-gpios = <&gpio 70 0>;
+ max-frequency = <50000000>;
+}
diff --git a/Documentation/devicetree/bindings/mmc/mmci.txt b/Documentation/devicetree/bindings/mmc/mmci.txt
new file mode 100644
index 00000000000..14a81d52611
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mmci.txt
@@ -0,0 +1,19 @@
+* ARM PrimeCell MultiMedia Card Interface (MMCI) PL180/1
+
+The ARM PrimeCell MMCI PL180 and PL181 provides and interface for
+reading and writing to MultiMedia and SD cards alike.
+
+Required properties:
+- compatible : contains "arm,pl18x", "arm,primecell".
+- reg : contains pl18x registers and length.
+- interrupts : contains the device IRQ(s).
+- arm,primecell-periphid : contains the PrimeCell Peripheral ID.
+
+Optional properties:
+- wp-gpios : contains any write protect (ro) gpios
+- cd-gpios : contains any card detection gpios
+- cd-inverted : indicates whether the cd gpio is inverted
+- max-frequency : contains the maximum operating frequency
+- bus-width : number of data lines, can be <1>, <4>, or <8>
+- mmc-cap-mmc-highspeed : indicates whether MMC is high speed capable
+- mmc-cap-sd-highspeed : indicates whether SD is high speed capable
diff --git a/Documentation/devicetree/bindings/mmc/mxs-mmc.txt b/Documentation/devicetree/bindings/mmc/mxs-mmc.txt
new file mode 100644
index 00000000000..14d870a9e3d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mxs-mmc.txt
@@ -0,0 +1,25 @@
+* Freescale MXS MMC controller
+
+The Freescale MXS Synchronous Serial Ports (SSP) can act as a MMC controller
+to support MMC, SD, and SDIO types of memory cards.
+
+Required properties:
+- compatible: Should be "fsl,<chip>-mmc". The supported chips include
+ imx23 and imx28.
+- reg: Should contain registers location and length
+- interrupts: Should contain ERROR and DMA interrupts
+- fsl,ssp-dma-channel: APBH DMA channel for the SSP
+- bus-width: Number of data lines, can be <1>, <4>, or <8>
+
+Optional properties:
+- wp-gpios: Specify GPIOs for write protection
+
+Examples:
+
+ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ reg = <0x80010000 2000>;
+ interrupts = <96 82>;
+ fsl,ssp-dma-channel = <0>;
+ bus-width = <8>;
+};
diff --git a/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
index 7e51154679a..f77c3031607 100644
--- a/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
@@ -7,12 +7,12 @@ Required properties:
- compatible : Should be "nvidia,<chip>-sdhci"
- reg : Should contain SD/MMC registers location and length
- interrupts : Should contain SD/MMC interrupt
+- bus-width : Number of data lines, can be <1>, <4>, or <8>
Optional properties:
- cd-gpios : Specify GPIOs for card detection
- wp-gpios : Specify GPIOs for write protection
- power-gpios : Specify GPIOs for power control
-- support-8bit : Boolean, indicates if 8-bit mode should be used.
Example:
@@ -23,5 +23,5 @@ sdhci@c8000200 {
cd-gpios = <&gpio 69 0>; /* gpio PI5 */
wp-gpios = <&gpio 57 0>; /* gpio PH1 */
power-gpios = <&gpio 155 0>; /* gpio PT3 */
- support-8bit;
+ bus-width = <8>;
};
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
index dbd4368ab8c..8a53958c9a9 100644
--- a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
+++ b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
@@ -15,7 +15,7 @@ Optional properties:
ti,dual-volt: boolean, supports dual voltage cards
<supply-name>-supply: phandle to the regulator device tree node
"supply-name" examples are "vmmc", "vmmc_aux" etc
-ti,bus-width: Number of data lines, default assumed is 1 if the property is missing.
+bus-width: Number of data lines, default assumed is 1 if the property is missing.
cd-gpios: GPIOs for card detection
wp-gpios: GPIOs for write protection
ti,non-removable: non-removable slot (like eMMC)
@@ -27,7 +27,7 @@ Example:
reg = <0x4809c000 0x400>;
ti,hwmods = "mmc1";
ti,dual-volt;
- ti,bus-width = <4>;
+ bus-width = <4>;
vmmc-supply = <&vmmc>; /* phandle to regulator node */
ti,non-removable;
};
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index de439517dff..7ab9e1a2d8b 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -14,7 +14,7 @@ Optional properties:
Example:
-fec@83fec000 {
+ethernet@83fec000 {
compatible = "fsl,imx51-fec", "fsl,imx27-fec";
reg = <0x83fec000 0x4000>;
interrupts = <87>;
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
index 3664d37e679..b4480d5c3ac 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
@@ -4,6 +4,8 @@ Required properties:
- compatible : "st,spear300-pinmux"
: "st,spear310-pinmux"
: "st,spear320-pinmux"
+ : "st,spear1310-pinmux"
+ : "st,spear1340-pinmux"
- reg : Address range of the pinctrl registers
- st,pinmux-mode: Mandatory for SPEAr300 and SPEAr320 and invalid for others.
- Its values for SPEAr300:
@@ -89,6 +91,37 @@ For SPEAr320 machines:
"rmii0_1_grp", "i2c1_8_9_grp", "i2c1_98_99_grp", "i2c2_0_1_grp",
"i2c2_2_3_grp", "i2c2_19_20_grp", "i2c2_75_76_grp", "i2c2_96_97_grp"
+For SPEAr1310 machines:
+ "i2c0_grp", "ssp0_grp", "ssp0_cs0_grp", "ssp0_cs1_2_grp", "i2s0_grp",
+ "i2s1_grp", "clcd_grp", "clcd_high_res_grp", "arm_gpio_grp",
+ "smi_2_chips_grp", "smi_4_chips_grp", "gmii_grp", "rgmii_grp",
+ "smii_0_1_2_grp", "ras_mii_txclk_grp", "nand_8bit_grp",
+ "nand_16bit_grp", "nand_4_chips_grp", "keyboard_6x6_grp",
+ "keyboard_rowcol6_8_grp", "uart0_grp", "uart0_modem_grp",
+ "gpt0_tmr0_grp", "gpt0_tmr1_grp", "gpt1_tmr0_grp", "gpt1_tmr1_grp",
+ "sdhci_grp", "cf_grp", "xd_grp", "touch_xy_grp",
+ "uart1_disable_i2c_grp", "uart1_disable_sd_grp", "uart2_3_grp",
+ "uart4_grp", "uart5_grp", "rs485_0_1_tdm_0_1_grp", "i2c_1_2_grp",
+ "i2c3_dis_smi_clcd_grp", "i2c3_dis_sd_i2s0_grp", "i2c_4_5_dis_smi_grp",
+ "i2c4_dis_sd_grp", "i2c5_dis_sd_grp", "i2c_6_7_dis_kbd_grp",
+ "i2c6_dis_sd_grp", "i2c7_dis_sd_grp", "can0_dis_nor_grp",
+ "can0_dis_sd_grp", "can1_dis_sd_grp", "can1_dis_kbd_grp", "pcie0_grp",
+ "pcie1_grp", "pcie2_grp", "sata0_grp", "sata1_grp", "sata2_grp",
+ "ssp1_dis_kbd_grp", "ssp1_dis_sd_grp", "gpt64_grp"
+
+For SPEAr1340 machines:
+ "pads_as_gpio_grp", "fsmc_8bit_grp", "fsmc_16bit_grp", "fsmc_pnor_grp",
+ "keyboard_row_col_grp", "keyboard_col5_grp", "spdif_in_grp",
+ "spdif_out_grp", "gpt_0_1_grp", "pwm0_grp", "pwm1_grp", "pwm2_grp",
+ "pwm3_grp", "vip_mux_grp", "vip_mux_cam0_grp", "vip_mux_cam1_grp",
+ "vip_mux_cam2_grp", "vip_mux_cam3_grp", "cam0_grp", "cam1_grp",
+ "cam2_grp", "cam3_grp", "smi_grp", "ssp0_grp", "ssp0_cs1_grp",
+ "ssp0_cs2_grp", "ssp0_cs3_grp", "uart0_grp", "uart0_enh_grp",
+ "uart1_grp", "i2s_in_grp", "i2s_out_grp", "gmii_grp", "rgmii_grp",
+ "rmii_grp", "sgmii_grp", "i2c0_grp", "i2c1_grp", "cec0_grp", "cec1_grp",
+ "sdhci_grp", "cf_grp", "xd_grp", "clcd_grp", "arm_trace_grp",
+ "miphy_dbg_grp", "pcie_grp", "sata_grp"
+
Valid values for function names are:
For All SPEAr3xx machines:
"firda", "i2c0", "ssp_cs", "ssp0", "mii0", "gpio0", "uart0_ext",
@@ -106,3 +139,17 @@ For SPEAr320 machines:
"uart2", "uart3", "uart4", "uart5", "uart6", "rs485", "touchscreen",
"can0", "can1", "pwm0_1", "pwm2", "pwm3", "ssp1", "ssp2", "mii2",
"mii0_1", "i2c1", "i2c2"
+
+
+For SPEAr1310 machines:
+ "i2c0", "ssp0", "i2s0", "i2s1", "clcd", "arm_gpio", "smi", "gmii",
+ "rgmii", "smii_0_1_2", "ras_mii_txclk", "nand", "keyboard", "uart0",
+ "gpt0", "gpt1", "sdhci", "cf", "xd", "touchscreen", "uart1", "uart2_3",
+ "uart4", "uart5", "rs485_0_1_tdm_0_1", "i2c_1_2", "i2c3_i2s1",
+ "i2c_4_5", "i2c_6_7", "can0", "can1", "pci", "sata", "ssp1", "gpt64"
+
+For SPEAr1340 machines:
+ "pads_as_gpio", "fsmc", "keyboard", "spdif_in", "spdif_out", "gpt_0_1",
+ "pwm", "vip", "cam0", "cam1", "cam2", "cam3", "smi", "ssp0", "uart0",
+ "uart1", "i2s", "gmac", "i2c0", "i2c1", "cec0", "cec1", "sdhci", "cf",
+ "xd", "clcd", "arm_trace", "miphy_dbg", "pcie", "sata"
diff --git a/Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt b/Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt
new file mode 100644
index 00000000000..a87a1e9bc06
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt
@@ -0,0 +1,15 @@
+* NXP LPC32xx SoC Real Time Clock controller
+
+Required properties:
+- compatible: must be "nxp,lpc3220-rtc"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: The RTC interrupt
+
+Example:
+
+ rtc@40024000 {
+ compatible = "nxp,lpc3220-rtc";
+ reg = <0x40024000 0x1000>;
+ interrupts = <52 0>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/spear-rtc.txt b/Documentation/devicetree/bindings/rtc/spear-rtc.txt
new file mode 100644
index 00000000000..ca67ac62108
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/spear-rtc.txt
@@ -0,0 +1,17 @@
+* SPEAr RTC
+
+Required properties:
+- compatible : "st,spear600-rtc"
+- reg : Address range of the rtc registers
+- interrupt-parent: Should be the phandle for the interrupt controller
+ that services interrupts for this device
+- interrupt: Should contain the rtc interrupt number
+
+Example:
+
+ rtc@fc000000 {
+ compatible = "st,spear600-rtc";
+ reg = <0xfc000000 0x1000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <12>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/omap-dmic.txt b/Documentation/devicetree/bindings/sound/omap-dmic.txt
new file mode 100644
index 00000000000..fd8105f1897
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/omap-dmic.txt
@@ -0,0 +1,21 @@
+* Texas Instruments OMAP4+ Digital Microphone Module
+
+Required properties:
+- compatible: "ti,omap4-dmic"
+- reg: Register location and size as an array:
+ <MPU access base address, size>,
+ <L3 interconnect address, size>;
+- interrupts: Interrupt number for DMIC
+- interrupt-parent: The parent interrupt controller
+- ti,hwmods: Name of the hwmod associated with OMAP dmic IP
+
+Example:
+
+dmic: dmic@4012e000 {
+ compatible = "ti,omap4-dmic";
+ reg = <0x4012e000 0x7f>, /* MPU private access */
+ <0x4902e000 0x7f>; /* L3 Interconnect */
+ interrupts = <0 114 0x4>;
+ interrupt-parent = <&gic>;
+ ti,hwmods = "dmic";
+};
diff --git a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
new file mode 100644
index 00000000000..0741dff048d
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
@@ -0,0 +1,21 @@
+* Texas Instruments OMAP4+ McPDM
+
+Required properties:
+- compatible: "ti,omap4-mcpdm"
+- reg: Register location and size as an array:
+ <MPU access base address, size>,
+ <L3 interconnect address, size>;
+- interrupts: Interrupt number for McPDM
+- interrupt-parent: The parent interrupt controller
+- ti,hwmods: Name of the hwmod associated to the McPDM
+
+Example:
+
+mcpdm: mcpdm@40132000 {
+ compatible = "ti,omap4-mcpdm";
+ reg = <0x40132000 0x7f>, /* MPU private access */
+ <0x49032000 0x7f>; /* L3 Interconnect */
+ interrupts = <0 112 0x4>;
+ interrupt-parent = <&gic>;
+ ti,hwmods = "mcpdm";
+};
diff --git a/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
index a9c0406280e..b462d0c5482 100644
--- a/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
@@ -11,7 +11,7 @@ Optional properties:
Example:
-uart@73fbc000 {
+serial@73fbc000 {
compatible = "fsl,imx51-uart", "fsl,imx21-uart";
reg = <0x73fbc000 0x4000>;
interrupts = <31>;
diff --git a/Documentation/devicetree/bindings/usb/tegra-usb.txt b/Documentation/devicetree/bindings/usb/tegra-usb.txt
index 007005ddbe1..e9b005dc762 100644
--- a/Documentation/devicetree/bindings/usb/tegra-usb.txt
+++ b/Documentation/devicetree/bindings/usb/tegra-usb.txt
@@ -12,6 +12,9 @@ Required properties :
- nvidia,vbus-gpio : If present, specifies a gpio that needs to be
activated for the bus to be powered.
+Required properties for phy_type == ulpi:
+ - nvidia,phy-reset-gpio : The GPIO used to reset the PHY.
+
Optional properties:
- dr_mode : dual role mode. Indicates the working mode for
nvidia,tegra20-ehci compatible controllers. Can be "host", "peripheral",
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
index 3bbd5c51605..ad86fb86c9a 100644
--- a/Documentation/dma-buf-sharing.txt
+++ b/Documentation/dma-buf-sharing.txt
@@ -29,13 +29,6 @@ The buffer-user
in memory, mapped into its own address space, so it can access the same area
of memory.
-*IMPORTANT*: [see https://lkml.org/lkml/2011/12/20/211 for more details]
-For this first version, A buffer shared using the dma_buf sharing API:
-- *may* be exported to user space using "mmap" *ONLY* by exporter, outside of
- this framework.
-- with this new iteration of the dma-buf api cpu access from the kernel has been
- enable, see below for the details.
-
dma-buf operations for device dma only
--------------------------------------
@@ -300,6 +293,17 @@ Access to a dma_buf from the kernel context involves three steps:
Note that these calls need to always succeed. The exporter needs to complete
any preparations that might fail in begin_cpu_access.
+ For some cases the overhead of kmap can be too high, a vmap interface
+ is introduced. This interface should be used very carefully, as vmalloc
+ space is a limited resources on many architectures.
+
+ Interfaces:
+ void *dma_buf_vmap(struct dma_buf *dmabuf)
+ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+
+ The vmap call can fail if there is no vmap support in the exporter, or if it
+ runs out of vmalloc space. Fallback to kmap should be implemented.
+
3. Finish access
When the importer is done accessing the range specified in begin_cpu_access,
@@ -313,6 +317,83 @@ Access to a dma_buf from the kernel context involves three steps:
enum dma_data_direction dir);
+Direct Userspace Access/mmap Support
+------------------------------------
+
+Being able to mmap an export dma-buf buffer object has 2 main use-cases:
+- CPU fallback processing in a pipeline and
+- supporting existing mmap interfaces in importers.
+
+1. CPU fallback processing in a pipeline
+
+ In many processing pipelines it is sometimes required that the cpu can access
+ the data in a dma-buf (e.g. for thumbnail creation, snapshots, ...). To avoid
+ the need to handle this specially in userspace frameworks for buffer sharing
+ it's ideal if the dma_buf fd itself can be used to access the backing storage
+ from userspace using mmap.
+
+ Furthermore Android's ION framework already supports this (and is otherwise
+ rather similar to dma-buf from a userspace consumer side with using fds as
+ handles, too). So it's beneficial to support this in a similar fashion on
+ dma-buf to have a good transition path for existing Android userspace.
+
+ No special interfaces, userspace simply calls mmap on the dma-buf fd.
+
+2. Supporting existing mmap interfaces in exporters
+
+ Similar to the motivation for kernel cpu access it is again important that
+ the userspace code of a given importing subsystem can use the same interfaces
+ with a imported dma-buf buffer object as with a native buffer object. This is
+ especially important for drm where the userspace part of contemporary OpenGL,
+ X, and other drivers is huge, and reworking them to use a different way to
+ mmap a buffer rather invasive.
+
+ The assumption in the current dma-buf interfaces is that redirecting the
+ initial mmap is all that's needed. A survey of some of the existing
+ subsystems shows that no driver seems to do any nefarious thing like syncing
+ up with outstanding asynchronous processing on the device or allocating
+ special resources at fault time. So hopefully this is good enough, since
+ adding interfaces to intercept pagefaults and allow pte shootdowns would
+ increase the complexity quite a bit.
+
+ Interface:
+ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
+ unsigned long);
+
+ If the importing subsystem simply provides a special-purpose mmap call to set
+ up a mapping in userspace, calling do_mmap with dma_buf->file will equally
+ achieve that for a dma-buf object.
+
+3. Implementation notes for exporters
+
+ Because dma-buf buffers have invariant size over their lifetime, the dma-buf
+ core checks whether a vma is too large and rejects such mappings. The
+ exporter hence does not need to duplicate this check.
+
+ Because existing importing subsystems might presume coherent mappings for
+ userspace, the exporter needs to set up a coherent mapping. If that's not
+ possible, it needs to fake coherency by manually shooting down ptes when
+ leaving the cpu domain and flushing caches at fault time. Note that all the
+ dma_buf files share the same anon inode, hence the exporter needs to replace
+ the dma_buf file stored in vma->vm_file with it's own if pte shootdown is
+ requred. This is because the kernel uses the underlying inode's address_space
+ for vma tracking (and hence pte tracking at shootdown time with
+ unmap_mapping_range).
+
+ If the above shootdown dance turns out to be too expensive in certain
+ scenarios, we can extend dma-buf with a more explicit cache tracking scheme
+ for userspace mappings. But the current assumption is that using mmap is
+ always a slower path, so some inefficiencies should be acceptable.
+
+ Exporters that shoot down mappings (for any reasons) shall not do any
+ synchronization at fault time with outstanding device operations.
+ Synchronization is an orthogonal issue to sharing the backing storage of a
+ buffer and hence should not be handled by dma-buf itself. This is explictly
+ mentioned here because many people seem to want something like this, but if
+ different exporters handle this differently, buffer sharing can fail in
+ interesting ways depending upong the exporter (if userspace starts depending
+ upon this implicit synchronization).
+
Miscellaneous notes
-------------------
@@ -336,6 +417,20 @@ Miscellaneous notes
the exporting driver to create a dmabuf fd must provide a way to let
userspace control setting of O_CLOEXEC flag passed in to dma_buf_fd().
+- If an exporter needs to manually flush caches and hence needs to fake
+ coherency for mmap support, it needs to be able to zap all the ptes pointing
+ at the backing storage. Now linux mm needs a struct address_space associated
+ with the struct file stored in vma->vm_file to do that with the function
+ unmap_mapping_range. But the dma_buf framework only backs every dma_buf fd
+ with the anon_file struct file, i.e. all dma_bufs share the same file.
+
+ Hence exporters need to setup their own file (and address_space) association
+ by setting vma->vm_file and adjusting vma->vm_pgoff in the dma_buf mmap
+ callback. In the specific case of a gem driver the exporter could use the
+ shmem file already provided by gem (and set vm_pgoff = 0). Exporters can then
+ zap ptes by unmapping the corresponding range of the struct address_space
+ associated with their own file.
+
References:
[1] struct dma_buf_ops in include/linux/dma-buf.h
[2] All interfaces mentioned above defined in include/linux/dma-buf.h
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 50d82ae09e2..ebaffe208cc 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -588,3 +588,21 @@ Why: Remount currently allows changing bound subsystems and
replaced with conventional fsnotify.
----------------------------
+
+What: KVM debugfs statistics
+When: 2013
+Why: KVM tracepoints provide mostly equivalent information in a much more
+ flexible fashion.
+
+----------------------------
+
+What: at91-mci driver ("CONFIG_MMC_AT91")
+When: 3.7
+Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support
+ was added to atmel-mci as a first step to support more chips.
+ Then at91-mci was kept only for old IP versions (on at91rm9200 and
+ at91sam9261). The support of these IP versions has just been added
+ to atmel-mci, so atmel-mci can be used for all chips.
+Who: Ludovic Desroches <ludovic.desroches@atmel.com>
+
+----------------------------
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 4fca82e5276..d449e632e6a 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -60,7 +60,6 @@ ata *);
ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
- void (*truncate_range)(struct inode *, loff_t, loff_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
locking rules:
@@ -87,7 +86,6 @@ setxattr: yes
getxattr: no
listxattr: no
removexattr: yes
-truncate_range: yes
fiemap: no
Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
victim.
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index b100adc38ad..293855e9500 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -59,9 +59,9 @@ commit=nrsec (*) Ext3 can be told to sync all its data and metadata
Setting it to very large values will improve
performance.
-barrier=<0(*)|1> This enables/disables the use of write barriers in
-barrier the jbd code. barrier=0 disables, barrier=1 enables.
-nobarrier (*) This also requires an IO stack which can support
+barrier=<0|1(*)> This enables/disables the use of write barriers in
+barrier (*) the jbd code. barrier=0 disables, barrier=1 enables.
+nobarrier This also requires an IO stack which can support
barriers, and if jbd gets an error on a barrier
write, it will disable again with a warning.
Write barriers enforce proper on-disk ordering
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 74acd961881..8c91d1057d9 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -297,7 +297,8 @@ in the beginning of ->setattr unconditionally.
be used instead. It gets called whenever the inode is evicted, whether it has
remaining links or not. Caller does *not* evict the pagecache or inode-associated
metadata buffers; getting rid of those is responsibility of method, as it had
-been for ->delete_inode().
+been for ->delete_inode(). Caller makes sure async writeback cannot be running
+for the inode while (or after) ->evict_inode() is called.
->drop_inode() returns int now; it's called on final iput() with
inode->i_lock held and it returns true if filesystems wants the inode to be
@@ -306,14 +307,11 @@ updated appropriately. generic_delete_inode() is also alive and it consists
simply of return 1. Note that all actual eviction work is done by caller after
->drop_inode() returns.
- clear_inode() is gone; use end_writeback() instead. As before, it must
-be called exactly once on each call of ->evict_inode() (as it used to be for
-each call of ->delete_inode()). Unlike before, if you are using inode-associated
-metadata buffers (i.e. mark_buffer_dirty_inode()), it's your responsibility to
-call invalidate_inode_buffers() before end_writeback().
- No async writeback (and thus no calls of ->write_inode()) will happen
-after end_writeback() returns, so actions that should not overlap with ->write_inode()
-(e.g. freeing on-disk inode if i_nlink is 0) ought to be done after that call.
+ As before, clear_inode() must be called exactly once on each call of
+->evict_inode() (as it used to be for each call of ->delete_inode()). Unlike
+before, if you are using inode-associated metadata buffers (i.e.
+mark_buffer_dirty_inode()), it's your responsibility to call
+invalidate_inode_buffers() before clear_inode().
NOTE: checking i_nlink in the beginning of ->write_inode() and bailing out
if it's zero is not *and* *never* *had* *been* enough. Final unlink() and iput()
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index ef088e55ab2..912af6ce562 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -743,6 +743,7 @@ Committed_AS: 100056 kB
VmallocTotal: 112216 kB
VmallocUsed: 428 kB
VmallocChunk: 111088 kB
+AnonHugePages: 49152 kB
MemTotal: Total usable ram (i.e. physical ram minus a few reserved
bits and the kernel binary code)
@@ -776,6 +777,7 @@ VmallocChunk: 111088 kB
Dirty: Memory which is waiting to get written back to the disk
Writeback: Memory which is actively being written back to the disk
AnonPages: Non-file backed pages mapped into userspace page tables
+AnonHugePages: Non-file backed huge pages mapped into userspace page tables
Mapped: files which have been mmaped, such as libraries
Slab: in-kernel data structures cache
SReclaimable: Part of Slab, that might be reclaimed, such as caches
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 0d049202808..ef19f91a0f1 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -363,7 +363,6 @@ struct inode_operations {
ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
- void (*truncate_range)(struct inode *, loff_t, loff_t);
};
Again, all methods are called without any locks being held, unless
@@ -472,9 +471,6 @@ otherwise noted.
removexattr: called by the VFS to remove an extended attribute from
a file. This method is called by removexattr(2) system call.
- truncate_range: a method provided by the underlying filesystem to truncate a
- range of blocks , i.e. punch a hole somewhere in a file.
-
The Address Space Object
========================
@@ -760,7 +756,7 @@ struct file_operations
----------------------
This describes how the VFS can manipulate an open file. As of kernel
-2.6.22, the following members are defined:
+3.5, the following members are defined:
struct file_operations {
struct module *owner;
@@ -790,6 +786,8 @@ struct file_operations {
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned int);
+ int (*setlease)(struct file *, long arg, struct file_lock **);
+ long (*fallocate)(struct file *, int mode, loff_t offset, loff_t len);
};
Again, all methods are called without any locks being held, unless
@@ -858,6 +856,11 @@ otherwise noted.
splice_read: called by the VFS to splice data from file to a pipe. This
method is used by the splice(2) system call
+ setlease: called by the VFS to set or release a file lock lease.
+ setlease has the file_lock_lock held and must not sleep.
+
+ fallocate: called by the VFS to preallocate blocks or punch a hole.
+
Note that the file operations are implemented by the specific
filesystem in which the inode resides. When opening a device node
(character or block special) most filesystems will call special
diff --git a/Documentation/i2c/muxes/gpio-i2cmux b/Documentation/i2c/muxes/i2c-mux-gpio
index 811cd78d4cd..bd9b2299b73 100644
--- a/Documentation/i2c/muxes/gpio-i2cmux
+++ b/Documentation/i2c/muxes/i2c-mux-gpio
@@ -1,11 +1,11 @@
-Kernel driver gpio-i2cmux
+Kernel driver i2c-gpio-mux
Author: Peter Korsgaard <peter.korsgaard@barco.com>
Description
-----------
-gpio-i2cmux is an i2c mux driver providing access to I2C bus segments
+i2c-gpio-mux is an i2c mux driver providing access to I2C bus segments
from a master I2C bus and a hardware MUX controlled through GPIO pins.
E.G.:
@@ -26,16 +26,16 @@ according to the settings of the GPIO pins 1..N.
Usage
-----
-gpio-i2cmux uses the platform bus, so you need to provide a struct
+i2c-gpio-mux uses the platform bus, so you need to provide a struct
platform_device with the platform_data pointing to a struct
gpio_i2cmux_platform_data with the I2C adapter number of the master
bus, the number of bus segments to create and the GPIO pins used
-to control it. See include/linux/gpio-i2cmux.h for details.
+to control it. See include/linux/i2c-gpio-mux.h for details.
E.G. something like this for a MUX providing 4 bus segments
controlled through 3 GPIO pins:
-#include <linux/gpio-i2cmux.h>
+#include <linux/i2c-gpio-mux.h>
#include <linux/platform_device.h>
static const unsigned myboard_gpiomux_gpios[] = {
@@ -57,7 +57,7 @@ static struct gpio_i2cmux_platform_data myboard_i2cmux_data = {
};
static struct platform_device myboard_i2cmux = {
- .name = "gpio-i2cmux",
+ .name = "i2c-gpio-mux",
.id = 0,
.dev = {
.platform_data = &myboard_i2cmux_data,
diff --git a/Documentation/initrd.txt b/Documentation/initrd.txt
index 1ba84f3584e..4e1839ccb55 100644
--- a/Documentation/initrd.txt
+++ b/Documentation/initrd.txt
@@ -362,5 +362,5 @@ Resources
http://www.almesberger.net/cv/papers/ols2k-9.ps.gz
[2] newlib package (experimental), with initrd example
http://sources.redhat.com/newlib/
-[3] Brouwer, Andries; "util-linux: Miscellaneous utilities for Linux"
- ftp://ftp.win.tue.nl/pub/linux-local/utils/util-linux/
+[3] util-linux: Miscellaneous utilities for Linux
+ http://www.kernel.org/pub/linux/utils/util-linux/
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 68e32bb6bd8..6466704d47b 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -50,6 +50,10 @@ LDFLAGS_MODULE
--------------------------------------------------
Additional options used for $(LD) when linking modules.
+LDFLAGS_vmlinux
+--------------------------------------------------
+Additional options passed to final link of vmlinux.
+
KBUILD_VERBOSE
--------------------------------------------------
Set the kbuild verbosity. Can be assigned same values as "V=...".
@@ -214,3 +218,18 @@ KBUILD_BUILD_USER, KBUILD_BUILD_HOST
These two variables allow to override the user@host string displayed during
boot and in /proc/version. The default value is the output of the commands
whoami and host, respectively.
+
+KBUILD_LDS
+--------------------------------------------------
+The linker script with full path. Assigned by the top-level Makefile.
+
+KBUILD_VMLINUX_INIT
+--------------------------------------------------
+All object files for the init (first) part of vmlinux.
+Files specified with KBUILD_VMLINUX_INIT are linked first.
+
+KBUILD_VMLINUX_MAIN
+--------------------------------------------------
+All object files for the main part of vmlinux.
+KBUILD_VMLINUX_INIT and KBUILD_VMLINUX_MAIN together specify
+all the object files used to link vmlinux.
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index 9d5f2a90dca..a09f1a6a830 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -53,15 +53,15 @@ KCONFIG_ALLCONFIG
--------------------------------------------------
(partially based on lkml email from/by Rob Landley, re: miniconfig)
--------------------------------------------------
-The allyesconfig/allmodconfig/allnoconfig/randconfig variants can
-also use the environment variable KCONFIG_ALLCONFIG as a flag or a
-filename that contains config symbols that the user requires to be
-set to a specific value. If KCONFIG_ALLCONFIG is used without a
-filename, "make *config" checks for a file named
-"all{yes/mod/no/def/random}.config" (corresponding to the *config command
-that was used) for symbol values that are to be forced. If this file
-is not found, it checks for a file named "all.config" to contain forced
-values.
+The allyesconfig/allmodconfig/allnoconfig/randconfig variants can also
+use the environment variable KCONFIG_ALLCONFIG as a flag or a filename
+that contains config symbols that the user requires to be set to a
+specific value. If KCONFIG_ALLCONFIG is used without a filename where
+KCONFIG_ALLCONFIG == "" or KCONFIG_ALLCONFIG == "1", "make *config"
+checks for a file named "all{yes/mod/no/def/random}.config"
+(corresponding to the *config command that was used) for symbol values
+that are to be forced. If this file is not found, it checks for a
+file named "all.config" to contain forced values.
This enables you to create "miniature" config (miniconfig) or custom
config files containing just the config symbols that you are interested
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b69cfdc1211..b40b413db88 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -397,8 +397,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
atkbd.softrepeat= [HW]
Use software keyboard repeat
- autotest [IA-64]
-
baycom_epp= [HW,AX25]
Format: <io>,<mode>
@@ -508,6 +506,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Also note the kernel might malfunction if you disable
some critical bits.
+ cma=nn[MG] [ARM,KNL]
+ Sets the size of kernel global memory area for contiguous
+ memory allocations. For more information, see
+ include/linux/dma-contiguous.h
+
cmo_free_hint= [PPC] Format: { yes | no }
Specify whether pages are marked as being inactive
when they are freed. This is used in CMO environments
@@ -515,6 +518,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
a hypervisor.
Default: yes
+ coherent_pool=nn[KMG] [ARM,KNL]
+ Sets the size of memory pool for coherent, atomic dma
+ allocations if Contiguous Memory Allocator (CMA) is used.
+
code_bytes [X86] How many bytes of object code to print
in an oops report.
Range: 0 - 8192
@@ -1444,8 +1451,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
devices can be requested on-demand with the
/dev/loop-control interface.
- mcatest= [IA-64]
-
mce [X86-32] Machine Check Exception
mce=option [X86-64] See Documentation/x86/x86_64/boot-options.txt
diff --git a/Documentation/leds/ledtrig-transient.txt b/Documentation/leds/ledtrig-transient.txt
new file mode 100644
index 00000000000..3bd38b487df
--- /dev/null
+++ b/Documentation/leds/ledtrig-transient.txt
@@ -0,0 +1,152 @@
+LED Transient Trigger
+=====================
+
+The leds timer trigger does not currently have an interface to activate
+a one shot timer. The current support allows for setting two timers, one for
+specifying how long a state to be on, and the second for how long the state
+to be off. The delay_on value specifies the time period an LED should stay
+in on state, followed by a delay_off value that specifies how long the LED
+should stay in off state. The on and off cycle repeats until the trigger
+gets deactivated. There is no provision for one time activation to implement
+features that require an on or off state to be held just once and then stay in
+the original state forever.
+
+Without one shot timer interface, user space can still use timer trigger to
+set a timer to hold a state, however when user space application crashes or
+goes away without deactivating the timer, the hardware will be left in that
+state permanently.
+
+As a specific example of this use-case, let's look at vibrate feature on
+phones. Vibrate function on phones is implemented using PWM pins on SoC or
+PMIC. There is a need to activate one shot timer to control the vibrate
+feature, to prevent user space crashes leaving the phone in vibrate mode
+permanently causing the battery to drain.
+
+Transient trigger addresses the need for one shot timer activation. The
+transient trigger can be enabled and disabled just like the other leds
+triggers.
+
+When an led class device driver registers itself, it can specify all leds
+triggers it supports and a default trigger. During registration, activation
+routine for the default trigger gets called. During registration of an led
+class device, the LED state does not change.
+
+When the driver unregisters, deactivation routine for the currently active
+trigger will be called, and LED state is changed to LED_OFF.
+
+Driver suspend changes the LED state to LED_OFF and resume doesn't change
+the state. Please note that there is no explicit interaction between the
+suspend and resume actions and the currently enabled trigger. LED state
+changes are suspended while the driver is in suspend state. Any timers
+that are active at the time driver gets suspended, continue to run, without
+being able to actually change the LED state. Once driver is resumed, triggers
+start functioning again.
+
+LED state changes are controlled using brightness which is a common led
+class device property. When brightness is set to 0 from user space via
+echo 0 > brightness, it will result in deactivating the current trigger.
+
+Transient trigger uses standard register and unregister interfaces. During
+trigger registration, for each led class device that specifies this trigger
+as its default trigger, trigger activation routine will get called. During
+registration, the LED state does not change, unless there is another trigger
+active, in which case LED state changes to LED_OFF.
+
+During trigger unregistration, LED state gets changed to LED_OFF.
+
+Transient trigger activation routine doesn't change the LED state. It
+creates its properties and does its initialization. Transient trigger
+deactivation routine, will cancel any timer that is active before it cleans
+up and removes the properties it created. It will restore the LED state to
+non-transient state. When driver gets suspended, irrespective of the transient
+state, the LED state changes to LED_OFF.
+
+Transient trigger can be enabled and disabled from user space on led class
+devices, that support this trigger as shown below:
+
+echo transient > trigger
+echo none > trigger
+
+NOTE: Add a new property trigger state to control the state.
+
+This trigger exports three properties, activate, state, and duration. When
+transient trigger is activated these properties are set to default values.
+
+- duration allows setting timer value in msecs. The initial value is 0.
+- activate allows activating and deactivating the timer specified by
+ duration as needed. The initial and default value is 0. This will allow
+ duration to be set after trigger activation.
+- state allows user to specify a transient state to be held for the specified
+ duration.
+
+ activate - one shot timer activate mechanism.
+ 1 when activated, 0 when deactivated.
+ default value is zero when transient trigger is enabled,
+ to allow duration to be set.
+
+ activate state indicates a timer with a value of specified
+ duration running.
+ deactivated state indicates that there is no active timer
+ running.
+
+ duration - one shot timer value. When activate is set, duration value
+ is used to start a timer that runs once. This value doesn't
+ get changed by the trigger unless user does a set via
+ echo new_value > duration
+
+ state - transient state to be held. It has two values 0 or 1. 0 maps
+ to LED_OFF and 1 maps to LED_FULL. The specified state is
+ held for the duration of the one shot timer and then the
+ state gets changed to the non-transient state which is the
+ inverse of transient state.
+ If state = LED_FULL, when the timer runs out the state will
+ go back to LED_OFF.
+ If state = LED_OFF, when the timer runs out the state will
+ go back to LED_FULL.
+ Please note that current LED state is not checked prior to
+ changing the state to the specified state.
+ Driver could map these values to inverted depending on the
+ default states it defines for the LED in its brightness_set()
+ interface which is called from the led brightness_set()
+ interfaces to control the LED state.
+
+When timer expires activate goes back to deactivated state, duration is left
+at the set value to be used when activate is set at a future time. This will
+allow user app to set the time once and activate it to run it once for the
+specified value as needed. When timer expires, state is restored to the
+non-transient state which is the inverse of the transient state.
+
+ echo 1 > activate - starts timer = duration when duration is not 0.
+ echo 0 > activate - cancels currently running timer.
+ echo n > duration - stores timer value to be used upon next
+ activate. Currently active timer if
+ any, continues to run for the specified time.
+ echo 0 > duration - stores timer value to be used upon next
+ activate. Currently active timer if any,
+ continues to run for the specified time.
+ echo 1 > state - stores desired transient state LED_FULL to be
+ held for the specified duration.
+ echo 0 > state - stores desired transient state LED_OFF to be
+ held for the specified duration.
+
+What is not supported:
+======================
+- Timer activation is one shot and extending and/or shortening the timer
+ is not supported.
+
+Example use-case 1:
+ echo transient > trigger
+ echo n > duration
+ echo 1 > state
+repeat the following step as needed:
+ echo 1 > activate - start timer = duration to run once
+ echo 1 > activate - start timer = duration to run once
+ echo none > trigger
+
+This trigger is intended to be used for for the following example use cases:
+ - Control of vibrate (phones, tablets etc.) hardware by user space app.
+ - Use of LED by user space app as activity indicator.
+ - Use of LED by user space app as a kind of watchdog indicator -- as
+ long as the app is alive, it can keep the LED illuminated, if it dies
+ the LED will be extinguished automatically.
+ - Use by any user space app that needs a transient GPIO output.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 6386f8c0482..930126698a0 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2,6 +2,7 @@ The Definitive KVM (Kernel-based Virtual Machine) API Documentation
===================================================================
1. General description
+----------------------
The kvm API is a set of ioctls that are issued to control various aspects
of a virtual machine. The ioctls belong to three classes
@@ -23,7 +24,9 @@ of a virtual machine. The ioctls belong to three classes
Only run vcpu ioctls from the same thread that was used to create the
vcpu.
+
2. File descriptors
+-------------------
The kvm API is centered around file descriptors. An initial
open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
@@ -41,7 +44,9 @@ not cause harm to the host, their actual behavior is not guaranteed by
the API. The only supported use is one virtual machine per process,
and one vcpu per thread.
+
3. Extensions
+-------------
As of Linux 2.6.22, the KVM ABI has been stabilized: no backward
incompatible change are allowed. However, there is an extension
@@ -53,7 +58,9 @@ Instead, kvm defines extension identifiers and a facility to query
whether a particular extension identifier is available. If it is, a
set of ioctls is available for application use.
+
4. API description
+------------------
This section describes ioctls that can be used to control kvm guests.
For each ioctl, the following information is provided along with a
@@ -75,6 +82,7 @@ description:
Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL)
are not detailed, but errors with specific meanings are.
+
4.1 KVM_GET_API_VERSION
Capability: basic
@@ -90,6 +98,7 @@ supported. Applications should refuse to run if KVM_GET_API_VERSION
returns a value other than 12. If this check passes, all ioctls
described as 'basic' will be available.
+
4.2 KVM_CREATE_VM
Capability: basic
@@ -109,6 +118,7 @@ In order to create user controlled virtual machines on S390, check
KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as
privileged user (CAP_SYS_ADMIN).
+
4.3 KVM_GET_MSR_INDEX_LIST
Capability: basic
@@ -135,6 +145,7 @@ Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are
not returned in the MSR list, as different vcpus can have a different number
of banks, as set via the KVM_X86_SETUP_MCE ioctl.
+
4.4 KVM_CHECK_EXTENSION
Capability: basic
@@ -149,6 +160,7 @@ receives an integer that describes the extension availability.
Generally 0 means no and 1 means yes, but some extensions may report
additional information in the integer return value.
+
4.5 KVM_GET_VCPU_MMAP_SIZE
Capability: basic
@@ -161,6 +173,7 @@ The KVM_RUN ioctl (cf.) communicates with userspace via a shared
memory region. This ioctl returns the size of that region. See the
KVM_RUN documentation for details.
+
4.6 KVM_SET_MEMORY_REGION
Capability: basic
@@ -171,6 +184,7 @@ Returns: 0 on success, -1 on error
This ioctl is obsolete and has been removed.
+
4.7 KVM_CREATE_VCPU
Capability: basic
@@ -223,6 +237,7 @@ machines, the resulting vcpu fd can be memory mapped at page offset
KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of the virtual
cpu's hardware control block.
+
4.8 KVM_GET_DIRTY_LOG (vm ioctl)
Capability: basic
@@ -246,6 +261,7 @@ since the last call to this ioctl. Bit 0 is the first page in the
memory slot. Ensure the entire structure is cleared to avoid padding
issues.
+
4.9 KVM_SET_MEMORY_ALIAS
Capability: basic
@@ -256,6 +272,7 @@ Returns: 0 (success), -1 (error)
This ioctl is obsolete and has been removed.
+
4.10 KVM_RUN
Capability: basic
@@ -272,6 +289,7 @@ obtained by mmap()ing the vcpu fd at offset 0, with the size given by
KVM_GET_VCPU_MMAP_SIZE. The parameter block is formatted as a 'struct
kvm_run' (see below).
+
4.11 KVM_GET_REGS
Capability: basic
@@ -292,6 +310,7 @@ struct kvm_regs {
__u64 rip, rflags;
};
+
4.12 KVM_SET_REGS
Capability: basic
@@ -304,6 +323,7 @@ Writes the general purpose registers into the vcpu.
See KVM_GET_REGS for the data structure.
+
4.13 KVM_GET_SREGS
Capability: basic
@@ -331,6 +351,7 @@ interrupt_bitmap is a bitmap of pending external interrupts. At most
one bit may be set. This interrupt has been acknowledged by the APIC
but not yet injected into the cpu core.
+
4.14 KVM_SET_SREGS
Capability: basic
@@ -342,6 +363,7 @@ Returns: 0 on success, -1 on error
Writes special registers into the vcpu. See KVM_GET_SREGS for the
data structures.
+
4.15 KVM_TRANSLATE
Capability: basic
@@ -365,6 +387,7 @@ struct kvm_translation {
__u8 pad[5];
};
+
4.16 KVM_INTERRUPT
Capability: basic
@@ -413,6 +436,7 @@ c) KVM_INTERRUPT_SET_LEVEL
Note that any value for 'irq' other than the ones stated above is invalid
and incurs unexpected behavior.
+
4.17 KVM_DEBUG_GUEST
Capability: basic
@@ -423,6 +447,7 @@ Returns: -1 on error
Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead.
+
4.18 KVM_GET_MSRS
Capability: basic
@@ -451,6 +476,7 @@ Application code should set the 'nmsrs' member (which indicates the
size of the entries array) and the 'index' member of each array entry.
kvm will fill in the 'data' member.
+
4.19 KVM_SET_MSRS
Capability: basic
@@ -466,6 +492,7 @@ Application code should set the 'nmsrs' member (which indicates the
size of the entries array), and the 'index' and 'data' members of each
array entry.
+
4.20 KVM_SET_CPUID
Capability: basic
@@ -494,6 +521,7 @@ struct kvm_cpuid {
struct kvm_cpuid_entry entries[0];
};
+
4.21 KVM_SET_SIGNAL_MASK
Capability: basic
@@ -516,6 +544,7 @@ struct kvm_signal_mask {
__u8 sigset[0];
};
+
4.22 KVM_GET_FPU
Capability: basic
@@ -541,6 +570,7 @@ struct kvm_fpu {
__u32 pad2;
};
+
4.23 KVM_SET_FPU
Capability: basic
@@ -566,6 +596,7 @@ struct kvm_fpu {
__u32 pad2;
};
+
4.24 KVM_CREATE_IRQCHIP
Capability: KVM_CAP_IRQCHIP
@@ -579,6 +610,7 @@ ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
only go to the IOAPIC. On ia64, a IOSAPIC is created.
+
4.25 KVM_IRQ_LINE
Capability: KVM_CAP_IRQCHIP
@@ -600,6 +632,7 @@ struct kvm_irq_level {
__u32 level; /* 0 or 1 */
};
+
4.26 KVM_GET_IRQCHIP
Capability: KVM_CAP_IRQCHIP
@@ -621,6 +654,7 @@ struct kvm_irqchip {
} chip;
};
+
4.27 KVM_SET_IRQCHIP
Capability: KVM_CAP_IRQCHIP
@@ -642,6 +676,7 @@ struct kvm_irqchip {
} chip;
};
+
4.28 KVM_XEN_HVM_CONFIG
Capability: KVM_CAP_XEN_HVM
@@ -666,6 +701,7 @@ struct kvm_xen_hvm_config {
__u8 pad2[30];
};
+
4.29 KVM_GET_CLOCK
Capability: KVM_CAP_ADJUST_CLOCK
@@ -684,6 +720,7 @@ struct kvm_clock_data {
__u32 pad[9];
};
+
4.30 KVM_SET_CLOCK
Capability: KVM_CAP_ADJUST_CLOCK
@@ -702,6 +739,7 @@ struct kvm_clock_data {
__u32 pad[9];
};
+
4.31 KVM_GET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS
@@ -741,6 +779,7 @@ struct kvm_vcpu_events {
KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
interrupt.shadow contains a valid state. Otherwise, this field is undefined.
+
4.32 KVM_SET_VCPU_EVENTS
Capability: KVM_CAP_VCPU_EVENTS
@@ -767,6 +806,7 @@ If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
the flags field to signal that interrupt.shadow contains a valid state and
shall be written into the VCPU.
+
4.33 KVM_GET_DEBUGREGS
Capability: KVM_CAP_DEBUGREGS
@@ -785,6 +825,7 @@ struct kvm_debugregs {
__u64 reserved[9];
};
+
4.34 KVM_SET_DEBUGREGS
Capability: KVM_CAP_DEBUGREGS
@@ -798,6 +839,7 @@ Writes debug registers into the vcpu.
See KVM_GET_DEBUGREGS for the data structure. The flags field is unused
yet and must be cleared on entry.
+
4.35 KVM_SET_USER_MEMORY_REGION
Capability: KVM_CAP_USER_MEM
@@ -844,6 +886,7 @@ It is recommended to use this API instead of the KVM_SET_MEMORY_REGION ioctl.
The KVM_SET_MEMORY_REGION does not allow fine grained control over memory
allocation and is deprecated.
+
4.36 KVM_SET_TSS_ADDR
Capability: KVM_CAP_SET_TSS_ADDR
@@ -862,6 +905,7 @@ This ioctl is required on Intel-based hosts. This is needed on Intel hardware
because of a quirk in the virtualization implementation (see the internals
documentation when it pops into existence).
+
4.37 KVM_ENABLE_CAP
Capability: KVM_CAP_ENABLE_CAP
@@ -897,6 +941,7 @@ function properly, this is the place to put them.
__u8 pad[64];
};
+
4.38 KVM_GET_MP_STATE
Capability: KVM_CAP_MP_STATE
@@ -927,6 +972,7 @@ Possible values are:
This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel
irqchip, the multiprocessing state must be maintained by userspace.
+
4.39 KVM_SET_MP_STATE
Capability: KVM_CAP_MP_STATE
@@ -941,6 +987,7 @@ arguments.
This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel
irqchip, the multiprocessing state must be maintained by userspace.
+
4.40 KVM_SET_IDENTITY_MAP_ADDR
Capability: KVM_CAP_SET_IDENTITY_MAP_ADDR
@@ -959,6 +1006,7 @@ This ioctl is required on Intel-based hosts. This is needed on Intel hardware
because of a quirk in the virtualization implementation (see the internals
documentation when it pops into existence).
+
4.41 KVM_SET_BOOT_CPU_ID
Capability: KVM_CAP_SET_BOOT_CPU_ID
@@ -971,6 +1019,7 @@ Define which vcpu is the Bootstrap Processor (BSP). Values are the same
as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default
is vcpu 0.
+
4.42 KVM_GET_XSAVE
Capability: KVM_CAP_XSAVE
@@ -985,6 +1034,7 @@ struct kvm_xsave {
This ioctl would copy current vcpu's xsave struct to the userspace.
+
4.43 KVM_SET_XSAVE
Capability: KVM_CAP_XSAVE
@@ -999,6 +1049,7 @@ struct kvm_xsave {
This ioctl would copy userspace's xsave struct to the kernel.
+
4.44 KVM_GET_XCRS
Capability: KVM_CAP_XCRS
@@ -1022,6 +1073,7 @@ struct kvm_xcrs {
This ioctl would copy current vcpu's xcrs to the userspace.
+
4.45 KVM_SET_XCRS
Capability: KVM_CAP_XCRS
@@ -1045,6 +1097,7 @@ struct kvm_xcrs {
This ioctl would set vcpu's xcr to the value userspace specified.
+
4.46 KVM_GET_SUPPORTED_CPUID
Capability: KVM_CAP_EXT_CPUID
@@ -1119,6 +1172,7 @@ support. Instead it is reported via
if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the
feature in userspace, then you can enable the feature for KVM_SET_CPUID2.
+
4.47 KVM_PPC_GET_PVINFO
Capability: KVM_CAP_PPC_GET_PVINFO
@@ -1142,6 +1196,7 @@ of 4 instructions that make up a hypercall.
If any additional field gets added to this structure later on, a bit for that
additional piece of information will be set in the flags bitmap.
+
4.48 KVM_ASSIGN_PCI_DEVICE
Capability: KVM_CAP_DEVICE_ASSIGNMENT
@@ -1185,6 +1240,7 @@ Only PCI header type 0 devices with PCI BAR resources are supported by
device assignment. The user requesting this ioctl must have read/write
access to the PCI sysfs resource files associated with the device.
+
4.49 KVM_DEASSIGN_PCI_DEVICE
Capability: KVM_CAP_DEVICE_DEASSIGNMENT
@@ -1198,6 +1254,7 @@ Ends PCI device assignment, releasing all associated resources.
See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
used in kvm_assigned_pci_dev to identify the device.
+
4.50 KVM_ASSIGN_DEV_IRQ
Capability: KVM_CAP_ASSIGN_DEV_IRQ
@@ -1231,6 +1288,7 @@ The following flags are defined:
It is not valid to specify multiple types per host or guest IRQ. However, the
IRQ type of host and guest can differ or can even be null.
+
4.51 KVM_DEASSIGN_DEV_IRQ
Capability: KVM_CAP_ASSIGN_DEV_IRQ
@@ -1245,6 +1303,7 @@ See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
by assigned_dev_id, flags must correspond to the IRQ type specified on
KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
+
4.52 KVM_SET_GSI_ROUTING
Capability: KVM_CAP_IRQ_ROUTING
@@ -1293,6 +1352,7 @@ struct kvm_irq_routing_msi {
__u32 pad;
};
+
4.53 KVM_ASSIGN_SET_MSIX_NR
Capability: KVM_CAP_DEVICE_MSIX
@@ -1314,6 +1374,7 @@ struct kvm_assigned_msix_nr {
#define KVM_MAX_MSIX_PER_DEV 256
+
4.54 KVM_ASSIGN_SET_MSIX_ENTRY
Capability: KVM_CAP_DEVICE_MSIX
@@ -1332,7 +1393,8 @@ struct kvm_assigned_msix_entry {
__u16 padding[3];
};
-4.54 KVM_SET_TSC_KHZ
+
+4.55 KVM_SET_TSC_KHZ
Capability: KVM_CAP_TSC_CONTROL
Architectures: x86
@@ -1343,7 +1405,8 @@ Returns: 0 on success, -1 on error
Specifies the tsc frequency for the virtual machine. The unit of the
frequency is KHz.
-4.55 KVM_GET_TSC_KHZ
+
+4.56 KVM_GET_TSC_KHZ
Capability: KVM_CAP_GET_TSC_KHZ
Architectures: x86
@@ -1355,7 +1418,8 @@ Returns the tsc frequency of the guest. The unit of the return value is
KHz. If the host has unstable tsc this ioctl returns -EIO instead as an
error.
-4.56 KVM_GET_LAPIC
+
+4.57 KVM_GET_LAPIC
Capability: KVM_CAP_IRQCHIP
Architectures: x86
@@ -1371,7 +1435,8 @@ struct kvm_lapic_state {
Reads the Local APIC registers and copies them into the input argument. The
data format and layout are the same as documented in the architecture manual.
-4.57 KVM_SET_LAPIC
+
+4.58 KVM_SET_LAPIC
Capability: KVM_CAP_IRQCHIP
Architectures: x86
@@ -1387,7 +1452,8 @@ struct kvm_lapic_state {
Copies the input argument into the the Local APIC registers. The data format
and layout are the same as documented in the architecture manual.
-4.58 KVM_IOEVENTFD
+
+4.59 KVM_IOEVENTFD
Capability: KVM_CAP_IOEVENTFD
Architectures: all
@@ -1417,7 +1483,8 @@ The following flags are defined:
If datamatch flag is set, the event will be signaled only if the written value
to the registered address is equal to datamatch in struct kvm_ioeventfd.
-4.59 KVM_DIRTY_TLB
+
+4.60 KVM_DIRTY_TLB
Capability: KVM_CAP_SW_TLB
Architectures: ppc
@@ -1449,7 +1516,8 @@ The "num_dirty" field is a performance hint for KVM to determine whether it
should skip processing the bitmap and just invalidate everything. It must
be set to the number of set bits in the bitmap.
-4.60 KVM_ASSIGN_SET_INTX_MASK
+
+4.61 KVM_ASSIGN_SET_INTX_MASK
Capability: KVM_CAP_PCI_2_3
Architectures: x86
@@ -1482,6 +1550,7 @@ See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
by assigned_dev_id. In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
evaluated.
+
4.62 KVM_CREATE_SPAPR_TCE
Capability: KVM_CAP_SPAPR_TCE
@@ -1517,6 +1586,7 @@ the entries written by kernel-handled H_PUT_TCE calls, and also lets
userspace update the TCE table directly which is useful in some
circumstances.
+
4.63 KVM_ALLOCATE_RMA
Capability: KVM_CAP_PPC_RMA
@@ -1549,6 +1619,7 @@ is supported; 2 if the processor requires all virtual machines to have
an RMA, or 1 if the processor can use an RMA but doesn't require it,
because it supports the Virtual RMA (VRMA) facility.
+
4.64 KVM_NMI
Capability: KVM_CAP_USER_NMI
@@ -1574,6 +1645,7 @@ following algorithm:
Some guests configure the LINT1 NMI input to cause a panic, aiding in
debugging.
+
4.65 KVM_S390_UCAS_MAP
Capability: KVM_CAP_S390_UCONTROL
@@ -1593,6 +1665,7 @@ This ioctl maps the memory at "user_addr" with the length "length" to
the vcpu's address space starting at "vcpu_addr". All parameters need to
be alligned by 1 megabyte.
+
4.66 KVM_S390_UCAS_UNMAP
Capability: KVM_CAP_S390_UCONTROL
@@ -1612,6 +1685,7 @@ This ioctl unmaps the memory in the vcpu's address space starting at
"vcpu_addr" with the length "length". The field "user_addr" is ignored.
All parameters need to be alligned by 1 megabyte.
+
4.67 KVM_S390_VCPU_FAULT
Capability: KVM_CAP_S390_UCONTROL
@@ -1628,6 +1702,7 @@ table upfront. This is useful to handle validity intercepts for user
controlled virtual machines to fault in the virtual cpu's lowcore pages
prior to calling the KVM_RUN ioctl.
+
4.68 KVM_SET_ONE_REG
Capability: KVM_CAP_ONE_REG
@@ -1653,6 +1728,7 @@ registers, find a list below:
| |
PPC | KVM_REG_PPC_HIOR | 64
+
4.69 KVM_GET_ONE_REG
Capability: KVM_CAP_ONE_REG
@@ -1669,7 +1745,193 @@ at the memory location pointed to by "addr".
The list of registers accessible using this interface is identical to the
list in 4.64.
+
+4.70 KVM_KVMCLOCK_CTRL
+
+Capability: KVM_CAP_KVMCLOCK_CTRL
+Architectures: Any that implement pvclocks (currently x86 only)
+Type: vcpu ioctl
+Parameters: None
+Returns: 0 on success, -1 on error
+
+This signals to the host kernel that the specified guest is being paused by
+userspace. The host will set a flag in the pvclock structure that is checked
+from the soft lockup watchdog. The flag is part of the pvclock structure that
+is shared between guest and host, specifically the second bit of the flags
+field of the pvclock_vcpu_time_info structure. It will be set exclusively by
+the host and read/cleared exclusively by the guest. The guest operation of
+checking and clearing the flag must an atomic operation so
+load-link/store-conditional, or equivalent must be used. There are two cases
+where the guest will clear the flag: when the soft lockup watchdog timer resets
+itself or when a soft lockup is detected. This ioctl can be called any time
+after pausing the vcpu, but before it is resumed.
+
+
+4.71 KVM_SIGNAL_MSI
+
+Capability: KVM_CAP_SIGNAL_MSI
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_msi (in)
+Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
+
+Directly inject a MSI message. Only valid with in-kernel irqchip that handles
+MSI messages.
+
+struct kvm_msi {
+ __u32 address_lo;
+ __u32 address_hi;
+ __u32 data;
+ __u32 flags;
+ __u8 pad[16];
+};
+
+No flags are defined so far. The corresponding field must be 0.
+
+
+4.71 KVM_CREATE_PIT2
+
+Capability: KVM_CAP_PIT2
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_pit_config (in)
+Returns: 0 on success, -1 on error
+
+Creates an in-kernel device model for the i8254 PIT. This call is only valid
+after enabling in-kernel irqchip support via KVM_CREATE_IRQCHIP. The following
+parameters have to be passed:
+
+struct kvm_pit_config {
+ __u32 flags;
+ __u32 pad[15];
+};
+
+Valid flags are:
+
+#define KVM_PIT_SPEAKER_DUMMY 1 /* emulate speaker port stub */
+
+PIT timer interrupts may use a per-VM kernel thread for injection. If it
+exists, this thread will have a name of the following pattern:
+
+kvm-pit/<owner-process-pid>
+
+When running a guest with elevated priorities, the scheduling parameters of
+this thread may have to be adjusted accordingly.
+
+This IOCTL replaces the obsolete KVM_CREATE_PIT.
+
+
+4.72 KVM_GET_PIT2
+
+Capability: KVM_CAP_PIT_STATE2
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_pit_state2 (out)
+Returns: 0 on success, -1 on error
+
+Retrieves the state of the in-kernel PIT model. Only valid after
+KVM_CREATE_PIT2. The state is returned in the following structure:
+
+struct kvm_pit_state2 {
+ struct kvm_pit_channel_state channels[3];
+ __u32 flags;
+ __u32 reserved[9];
+};
+
+Valid flags are:
+
+/* disable PIT in HPET legacy mode */
+#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
+
+This IOCTL replaces the obsolete KVM_GET_PIT.
+
+
+4.73 KVM_SET_PIT2
+
+Capability: KVM_CAP_PIT_STATE2
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_pit_state2 (in)
+Returns: 0 on success, -1 on error
+
+Sets the state of the in-kernel PIT model. Only valid after KVM_CREATE_PIT2.
+See KVM_GET_PIT2 for details on struct kvm_pit_state2.
+
+This IOCTL replaces the obsolete KVM_SET_PIT.
+
+
+4.74 KVM_PPC_GET_SMMU_INFO
+
+Capability: KVM_CAP_PPC_GET_SMMU_INFO
+Architectures: powerpc
+Type: vm ioctl
+Parameters: None
+Returns: 0 on success, -1 on error
+
+This populates and returns a structure describing the features of
+the "Server" class MMU emulation supported by KVM.
+This can in turn be used by userspace to generate the appropariate
+device-tree properties for the guest operating system.
+
+The structure contains some global informations, followed by an
+array of supported segment page sizes:
+
+ struct kvm_ppc_smmu_info {
+ __u64 flags;
+ __u32 slb_size;
+ __u32 pad;
+ struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
+ };
+
+The supported flags are:
+
+ - KVM_PPC_PAGE_SIZES_REAL:
+ When that flag is set, guest page sizes must "fit" the backing
+ store page sizes. When not set, any page size in the list can
+ be used regardless of how they are backed by userspace.
+
+ - KVM_PPC_1T_SEGMENTS
+ The emulated MMU supports 1T segments in addition to the
+ standard 256M ones.
+
+The "slb_size" field indicates how many SLB entries are supported
+
+The "sps" array contains 8 entries indicating the supported base
+page sizes for a segment in increasing order. Each entry is defined
+as follow:
+
+ struct kvm_ppc_one_seg_page_size {
+ __u32 page_shift; /* Base page shift of segment (or 0) */
+ __u32 slb_enc; /* SLB encoding for BookS */
+ struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
+ };
+
+An entry with a "page_shift" of 0 is unused. Because the array is
+organized in increasing order, a lookup can stop when encoutering
+such an entry.
+
+The "slb_enc" field provides the encoding to use in the SLB for the
+page size. The bits are in positions such as the value can directly
+be OR'ed into the "vsid" argument of the slbmte instruction.
+
+The "enc" array is a list which for each of those segment base page
+size provides the list of supported actual page sizes (which can be
+only larger or equal to the base page size), along with the
+corresponding encoding in the hash PTE. Similarily, the array is
+8 entries sorted by increasing sizes and an entry with a "0" shift
+is an empty entry and a terminator:
+
+ struct kvm_ppc_one_page_size {
+ __u32 page_shift; /* Page shift (or 0) */
+ __u32 pte_enc; /* Encoding in the HPTE (>>12) */
+ };
+
+The "pte_enc" field provides a value that can OR'ed into the hash
+PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
+into the hash PTE second double word).
+
5. The kvm_run structure
+------------------------
Application code obtains a pointer to the kvm_run structure by
mmap()ing a vcpu fd. From that point, application code can control
@@ -1910,7 +2172,9 @@ and usually define the validity of a groups of registers. (e.g. one bit
};
+
6. Capabilities that can be enabled
+-----------------------------------
There are certain capabilities that change the behavior of the virtual CPU when
enabled. To enable them, please see section 4.37. Below you can find a list of
@@ -1926,6 +2190,7 @@ The following information is provided along with the description:
Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL)
are not detailed, but errors with specific meanings are.
+
6.1 KVM_CAP_PPC_OSI
Architectures: ppc
@@ -1939,6 +2204,7 @@ between the guest and the host.
When this capability is enabled, KVM_EXIT_OSI can occur.
+
6.2 KVM_CAP_PPC_PAPR
Architectures: ppc
@@ -1957,6 +2223,7 @@ HTAB invisible to the guest.
When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur.
+
6.3 KVM_CAP_SW_TLB
Architectures: ppc
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt
index 882068538c9..83afe65d496 100644
--- a/Documentation/virtual/kvm/cpuid.txt
+++ b/Documentation/virtual/kvm/cpuid.txt
@@ -10,11 +10,15 @@ a guest.
KVM cpuid functions are:
function: KVM_CPUID_SIGNATURE (0x40000000)
-returns : eax = 0,
+returns : eax = 0x40000001,
ebx = 0x4b4d564b,
ecx = 0x564b4d56,
edx = 0x4d.
Note that this value in ebx, ecx and edx corresponds to the string "KVMKVMKVM".
+The value in eax corresponds to the maximum cpuid function present in this leaf,
+and will be updated if more functions are added in the future.
+Note also that old hosts set eax value to 0x0. This should
+be interpreted as if the value was 0x40000001.
This function queries the presence of KVM cpuid leafs.
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt
index 50317809113..96b41bd9752 100644
--- a/Documentation/virtual/kvm/msr.txt
+++ b/Documentation/virtual/kvm/msr.txt
@@ -109,6 +109,10 @@ MSR_KVM_SYSTEM_TIME_NEW: 0x4b564d01
0 | 24 | multiple cpus are guaranteed to
| | be monotonic
-------------------------------------------------------------
+ | | guest vcpu has been paused by
+ 1 | N/A | the host
+ | | See 4.70 in api.txt
+ -------------------------------------------------------------
Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid
leaf prior to usage.
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt
index 29bdf62aac0..f734bb2a78d 100644
--- a/Documentation/vm/transhuge.txt
+++ b/Documentation/vm/transhuge.txt
@@ -166,6 +166,68 @@ behavior. So to make them effective you need to restart any
application that could have been using hugepages. This also applies to
the regions registered in khugepaged.
+== Monitoring usage ==
+
+The number of transparent huge pages currently used by the system is
+available by reading the AnonHugePages field in /proc/meminfo. To
+identify what applications are using transparent huge pages, it is
+necessary to read /proc/PID/smaps and count the AnonHugePages fields
+for each mapping. Note that reading the smaps file is expensive and
+reading it frequently will incur overhead.
+
+There are a number of counters in /proc/vmstat that may be used to
+monitor how successfully the system is providing huge pages for use.
+
+thp_fault_alloc is incremented every time a huge page is successfully
+ allocated to handle a page fault. This applies to both the
+ first time a page is faulted and for COW faults.
+
+thp_collapse_alloc is incremented by khugepaged when it has found
+ a range of pages to collapse into one huge page and has
+ successfully allocated a new huge page to store the data.
+
+thp_fault_fallback is incremented if a page fault fails to allocate
+ a huge page and instead falls back to using small pages.
+
+thp_collapse_alloc_failed is incremented if khugepaged found a range
+ of pages that should be collapsed into one huge page but failed
+ the allocation.
+
+thp_split is incremented every time a huge page is split into base
+ pages. This can happen for a variety of reasons but a common
+ reason is that a huge page is old and is being reclaimed.
+
+As the system ages, allocating huge pages may be expensive as the
+system uses memory compaction to copy data around memory to free a
+huge page for use. There are some counters in /proc/vmstat to help
+monitor this overhead.
+
+compact_stall is incremented every time a process stalls to run
+ memory compaction so that a huge page is free for use.
+
+compact_success is incremented if the system compacted memory and
+ freed a huge page for use.
+
+compact_fail is incremented if the system tries to compact memory
+ but failed.
+
+compact_pages_moved is incremented each time a page is moved. If
+ this value is increasing rapidly, it implies that the system
+ is copying a lot of data to satisfy the huge page allocation.
+ It is possible that the cost of copying exceeds any savings
+ from reduced TLB misses.
+
+compact_pagemigrate_failed is incremented when the underlying mechanism
+ for moving a page failed.
+
+compact_blocks_moved is incremented each time memory compaction examines
+ a huge page aligned range of pages.
+
+It is possible to establish how long the stalls were using the function
+tracer to record how long was spent in __alloc_pages_nodemask and
+using the mm_page_alloc tracepoint to identify which allocations were
+for huge pages.
+
== get_user_pages and follow_page ==
get_user_pages and follow_page if run on a hugepage, will return the
diff --git a/MAINTAINERS b/MAINTAINERS
index 150a29f3cd3..a246490c95e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1905,6 +1905,16 @@ F: Documentation/filesystems/coda.txt
F: fs/coda/
F: include/linux/coda*.h
+COMMON CLK FRAMEWORK
+M: Mike Turquette <mturquette@ti.com>
+M: Mike Turquette <mturquette@linaro.org>
+L: linux-arm-kernel@lists.infradead.org (same as CLK API & CLKDEV)
+T: git git://git.linaro.org/people/mturquette/linux.git
+S: Maintained
+F: drivers/clk/clk.c
+F: drivers/clk/clk-*
+F: include/linux/clk-pr*
+
COMMON INTERNET FILE SYSTEM (CIFS)
M: Steve French <sfrench@samba.org>
L: linux-cifs@vger.kernel.org
@@ -2978,9 +2988,9 @@ GENERIC GPIO I2C MULTIPLEXER DRIVER
M: Peter Korsgaard <peter.korsgaard@barco.com>
L: linux-i2c@vger.kernel.org
S: Supported
-F: drivers/i2c/muxes/gpio-i2cmux.c
-F: include/linux/gpio-i2cmux.h
-F: Documentation/i2c/muxes/gpio-i2cmux
+F: drivers/i2c/muxes/i2c-mux-gpio.c
+F: include/linux/i2c-mux-gpio.h
+F: Documentation/i2c/muxes/i2c-mux-gpio
GENERIC HDLC (WAN) DRIVERS
M: Krzysztof Halasa <khc@pm.waw.pl>
@@ -3222,10 +3232,8 @@ F: include/linux/clockchips.h
F: include/linux/hrtimer.h
HIGH-SPEED SCC DRIVER FOR AX.25
-M: Klaus Kudielka <klaus.kudielka@ieee.org>
L: linux-hams@vger.kernel.org
-W: http://www.nt.tuwien.ac.at/~kkudielk/Linux/
-S: Maintained
+S: Orphan
F: drivers/net/hamradio/dmascc.c
F: drivers/net/hamradio/scc.c
@@ -3372,6 +3380,12 @@ W: http://www.developer.ibm.com/welcome/netfinity/serveraid.html
S: Supported
F: drivers/scsi/ips.*
+ICH LPC AND GPIO DRIVER
+M: Peter Tyser <ptyser@xes-inc.com>
+S: Maintained
+F: drivers/mfd/lpc_ich.c
+F: drivers/gpio/gpio-ich.c
+
IDE SUBSYSTEM
M: "David S. Miller" <davem@davemloft.net>
L: linux-ide@vger.kernel.org
@@ -4495,12 +4509,6 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/mmc/host/imxmmc.*
-MOUSE AND MISC DEVICES [GENERAL]
-M: Alessandro Rubini <rubini@ipvvis.unipv.it>
-S: Maintained
-F: drivers/input/mouse/
-F: include/linux/gpio_mouse.h
-
MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
M: Jiri Slaby <jirislaby@gmail.com>
S: Maintained
@@ -5138,7 +5146,7 @@ PCA9541 I2C BUS MASTER SELECTOR DRIVER
M: Guenter Roeck <guenter.roeck@ericsson.com>
L: linux-i2c@vger.kernel.org
S: Maintained
-F: drivers/i2c/muxes/pca9541.c
+F: drivers/i2c/muxes/i2c-mux-pca9541.c
PCA9564/PCA9665 I2C BUS DRIVER
M: Wolfram Sang <w.sang@pengutronix.de>
@@ -6340,14 +6348,25 @@ F: include/linux/compiler.h
SPEAR PLATFORM SUPPORT
M: Viresh Kumar <viresh.kumar@st.com>
+M: Shiraz Hashim <shiraz.hashim@st.com>
L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
S: Maintained
F: arch/arm/plat-spear/
+SPEAR13XX MACHINE SUPPORT
+M: Viresh Kumar <viresh.kumar@st.com>
+M: Shiraz Hashim <shiraz.hashim@st.com>
+L: spear-devel@list.st.com
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+W: http://www.st.com/spear
+S: Maintained
+F: arch/arm/mach-spear13xx/
+
SPEAR3XX MACHINE SUPPORT
M: Viresh Kumar <viresh.kumar@st.com>
+M: Shiraz Hashim <shiraz.hashim@st.com>
L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
@@ -6356,6 +6375,8 @@ F: arch/arm/mach-spear3xx/
SPEAR6XX MACHINE SUPPORT
M: Rajeev Kumar <rajeev-dlh.kumar@st.com>
+M: Shiraz Hashim <shiraz.hashim@st.com>
+M: Viresh Kumar <viresh.kumar@st.com>
L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
@@ -6368,9 +6389,7 @@ L: spear-devel@list.st.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.st.com/spear
S: Maintained
-F: arch/arm/mach-spear*/clock.c
-F: arch/arm/plat-spear/clock.c
-F: arch/arm/plat-spear/include/plat/clock.h
+F: drivers/clk/spear/
SPI SUBSYSTEM
M: Grant Likely <grant.likely@secretlab.ca>
diff --git a/Makefile b/Makefile
index b62c1e09444..dda21c3efc7 100644
--- a/Makefile
+++ b/Makefile
@@ -231,10 +231,6 @@ endif
# Where to locate arch specific headers
hdr-arch := $(SRCARCH)
-ifeq ($(ARCH),m68knommu)
- hdr-arch := m68k
-endif
-
KCONFIG_CONFIG ?= .config
export KCONFIG_CONFIG
@@ -341,7 +337,6 @@ AWK = awk
GENKSYMS = scripts/genksyms/genksyms
INSTALLKERNEL := installkernel
DEPMOD = /sbin/depmod
-KALLSYMS = scripts/kallsyms
PERL = perl
CHECK = sparse
@@ -739,197 +734,21 @@ libs-y1 := $(patsubst %/, %/lib.a, $(libs-y))
libs-y2 := $(patsubst %/, %/built-in.o, $(libs-y))
libs-y := $(libs-y1) $(libs-y2)
-# Build vmlinux
-# ---------------------------------------------------------------------------
-# vmlinux is built from the objects selected by $(vmlinux-init) and
-# $(vmlinux-main). Most are built-in.o files from top-level directories
-# in the kernel tree, others are specified in arch/$(ARCH)/Makefile.
-# Ordering when linking is important, and $(vmlinux-init) must be first.
-#
-# vmlinux
-# ^
-# |
-# +-< $(vmlinux-init)
-# | +--< init/version.o + more
-# |
-# +--< $(vmlinux-main)
-# | +--< driver/built-in.o mm/built-in.o + more
-# |
-# +-< kallsyms.o (see description in CONFIG_KALLSYMS section)
-#
-# vmlinux version (uname -v) cannot be updated during normal
-# descending-into-subdirs phase since we do not yet know if we need to
-# update vmlinux.
-# Therefore this step is delayed until just before final link of vmlinux -
-# except in the kallsyms case where it is done just before adding the
-# symbols to the kernel.
-#
-# System.map is generated to document addresses of all kernel symbols
-
-vmlinux-init := $(head-y) $(init-y)
-vmlinux-main := $(core-y) $(libs-y) $(drivers-y) $(net-y)
-vmlinux-all := $(vmlinux-init) $(vmlinux-main)
-vmlinux-lds := arch/$(SRCARCH)/kernel/vmlinux.lds
-export KBUILD_VMLINUX_OBJS := $(vmlinux-all)
-
-# Rule to link vmlinux - also used during CONFIG_KALLSYMS
-# May be overridden by arch/$(ARCH)/Makefile
-quiet_cmd_vmlinux__ ?= LD $@
- cmd_vmlinux__ ?= $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) -o $@ \
- -T $(vmlinux-lds) $(vmlinux-init) \
- --start-group $(vmlinux-main) --end-group \
- $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o FORCE ,$^)
-
-# Generate new vmlinux version
-quiet_cmd_vmlinux_version = GEN .version
- cmd_vmlinux_version = set -e; \
- if [ ! -r .version ]; then \
- rm -f .version; \
- echo 1 >.version; \
- else \
- mv .version .old_version; \
- expr 0$$(cat .old_version) + 1 >.version; \
- fi; \
- $(MAKE) $(build)=init
-
-# Generate System.map
-quiet_cmd_sysmap = SYSMAP
- cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap
-
-# Sort exception table at build time
-quiet_cmd_sortextable = SORTEX
- cmd_sortextable = $(objtree)/scripts/sortextable
-
-# Link of vmlinux
-# If CONFIG_KALLSYMS is set .version is already updated
-# Generate System.map and verify that the content is consistent
-# Use + in front of the vmlinux_version rule to silent warning with make -j2
-# First command is ':' to allow us to use + in front of the rule
-define rule_vmlinux__
- :
- $(if $(CONFIG_KALLSYMS),,+$(call cmd,vmlinux_version))
-
- $(call cmd,vmlinux__)
- $(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd
-
- $(if $(CONFIG_BUILDTIME_EXTABLE_SORT), \
- $(Q)$(if $($(quiet)cmd_sortextable), \
- echo ' $($(quiet)cmd_sortextable) vmlinux' &&) \
- $(cmd_sortextable) vmlinux)
-
-
- $(Q)$(if $($(quiet)cmd_sysmap), \
- echo ' $($(quiet)cmd_sysmap) System.map' &&) \
- $(cmd_sysmap) $@ System.map; \
- if [ $$? -ne 0 ]; then \
- rm -f $@; \
- /bin/false; \
- fi;
- $(verify_kallsyms)
-endef
-
-
-ifdef CONFIG_KALLSYMS
-# Generate section listing all symbols and add it into vmlinux $(kallsyms.o)
-# It's a three stage process:
-# o .tmp_vmlinux1 has all symbols and sections, but __kallsyms is
-# empty
-# Running kallsyms on that gives us .tmp_kallsyms1.o with
-# the right size - vmlinux version (uname -v) is updated during this step
-# o .tmp_vmlinux2 now has a __kallsyms section of the right size,
-# but due to the added section, some addresses have shifted.
-# From here, we generate a correct .tmp_kallsyms2.o
-# o The correct .tmp_kallsyms2.o is linked into the final vmlinux.
-# o Verify that the System.map from vmlinux matches the map from
-# .tmp_vmlinux2, just in case we did not generate kallsyms correctly.
-# o If 'make KALLSYMS_EXTRA_PASS=1" was used, do an extra pass using
-# .tmp_vmlinux3 and .tmp_kallsyms3.o. This is only meant as a
-# temporary bypass to allow the kernel to be built while the
-# maintainers work out what went wrong with kallsyms.
-
-last_kallsyms := 2
-
-ifdef KALLSYMS_EXTRA_PASS
-ifneq ($(KALLSYMS_EXTRA_PASS),0)
-last_kallsyms := 3
-endif
-endif
-
-kallsyms.o := .tmp_kallsyms$(last_kallsyms).o
-
-define verify_kallsyms
- $(Q)$(if $($(quiet)cmd_sysmap), \
- echo ' $($(quiet)cmd_sysmap) .tmp_System.map' &&) \
- $(cmd_sysmap) .tmp_vmlinux$(last_kallsyms) .tmp_System.map
- $(Q)cmp -s System.map .tmp_System.map || \
- (echo Inconsistent kallsyms data; \
- echo This is a bug - please report about it; \
- echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround; \
- rm .tmp_kallsyms* ; /bin/false )
-endef
-
-# Update vmlinux version before link
-# Use + in front of this rule to silent warning about make -j1
-# First command is ':' to allow us to use + in front of this rule
-cmd_ksym_ld = $(cmd_vmlinux__)
-define rule_ksym_ld
- :
- +$(call cmd,vmlinux_version)
- $(call cmd,vmlinux__)
- $(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd
-endef
-
-# Generate .S file with all kernel symbols
-quiet_cmd_kallsyms = KSYM $@
- cmd_kallsyms = $(NM) -n $< | $(KALLSYMS) \
- $(if $(CONFIG_KALLSYMS_ALL),--all-symbols) > $@
+# Externally visible symbols (used by link-vmlinux.sh)
+export KBUILD_VMLINUX_INIT := $(head-y) $(init-y)
+export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y) $(drivers-y) $(net-y)
+export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
+export LDFLAGS_vmlinux
-.tmp_kallsyms1.o .tmp_kallsyms2.o .tmp_kallsyms3.o: %.o: %.S scripts FORCE
- $(call if_changed_dep,as_o_S)
+vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN)
-.tmp_kallsyms%.S: .tmp_vmlinux% $(KALLSYMS)
- $(call cmd,kallsyms)
+# Final link of vmlinux
+ cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
+quiet_cmd_link-vmlinux = LINK $@
-# .tmp_vmlinux1 must be complete except kallsyms, so update vmlinux version
-.tmp_vmlinux1: $(vmlinux-lds) $(vmlinux-all) FORCE
- $(call if_changed_rule,ksym_ld)
-
-.tmp_vmlinux2: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms1.o FORCE
- $(call if_changed,vmlinux__)
-
-.tmp_vmlinux3: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms2.o FORCE
- $(call if_changed,vmlinux__)
-
-# Needs to visit scripts/ before $(KALLSYMS) can be used.
-$(KALLSYMS): scripts ;
-
-# Generate some data for debugging strange kallsyms problems
-debug_kallsyms: .tmp_map$(last_kallsyms)
-
-.tmp_map%: .tmp_vmlinux% FORCE
- ($(OBJDUMP) -h $< | $(AWK) '/^ +[0-9]/{print $$4 " 0 " $$2}'; $(NM) $<) | sort > $@
-
-.tmp_map3: .tmp_map2
-
-.tmp_map2: .tmp_map1
-
-endif # ifdef CONFIG_KALLSYMS
-
-# Do modpost on a prelinked vmlinux. The finally linked vmlinux has
-# relevant sections renamed as per the linker script.
-quiet_cmd_vmlinux-modpost = LD $@
- cmd_vmlinux-modpost = $(LD) $(LDFLAGS) -r -o $@ \
- $(vmlinux-init) --start-group $(vmlinux-main) --end-group \
- $(filter-out $(vmlinux-init) $(vmlinux-main) FORCE ,$^)
-define rule_vmlinux-modpost
- :
- +$(call cmd,vmlinux-modpost)
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost $@
- $(Q)echo 'cmd_$@ := $(cmd_vmlinux-modpost)' > $(dot-target).cmd
-endef
-
-# vmlinux image - including updated kernel symbols
-vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
+# Include targets which we want to
+# execute if the rest of the kernel build went well.
+vmlinux: scripts/link-vmlinux.sh $(vmlinux-deps) FORCE
ifdef CONFIG_HEADERS_CHECK
$(Q)$(MAKE) -f $(srctree)/Makefile headers_check
endif
@@ -939,22 +758,11 @@ endif
ifdef CONFIG_BUILD_DOCSRC
$(Q)$(MAKE) $(build)=Documentation
endif
- $(call vmlinux-modpost)
- $(call if_changed_rule,vmlinux__)
- $(Q)rm -f .old_version
-
-# build vmlinux.o first to catch section mismatch errors early
-ifdef CONFIG_KALLSYMS
-.tmp_vmlinux1: vmlinux.o
-endif
-
-modpost-init := $(filter-out init/built-in.o, $(vmlinux-init))
-vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
- $(call if_changed_rule,vmlinux-modpost)
+ +$(call if_changed,link-vmlinux)
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
-$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+$(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
# Handle descending into subdirectories listed in $(vmlinux-dirs)
# Preset locale variables to speed up the build process. Limit locale
@@ -1181,8 +989,6 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_DIRS += $(MODVERDIR)
-CLEAN_FILES += vmlinux System.map \
- .tmp_kallsyms* .tmp_version .tmp_vmlinux* .tmp_System.map
# Directories & files removed with 'make mrproper'
MRPROPER_DIRS += include/config usr/include include/generated \
@@ -1428,6 +1234,7 @@ scripts: ;
endif # KBUILD_EXTMOD
clean: $(clean-dirs)
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
$(call cmd,rmdirs)
$(call cmd,rmfiles)
@find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
@@ -1568,14 +1375,6 @@ quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
cmd_crmodverdir = $(Q)mkdir -p $(MODVERDIR) \
$(if $(KBUILD_MODULES),; rm -f $(MODVERDIR)/*)
-a_flags = -Wp,-MD,$(depfile) $(KBUILD_AFLAGS) $(AFLAGS_KERNEL) \
- $(KBUILD_AFLAGS_KERNEL) \
- $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(KBUILD_CPPFLAGS) \
- $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(basetarget).o)
-
-quiet_cmd_as_o_S = AS $@
-cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
-
# read all saved command lines
targets := $(wildcard $(sort $(targets)))
diff --git a/arch/Kconfig b/arch/Kconfig
index e9a910876cd..8c3d957fa8e 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -159,6 +159,9 @@ config HAVE_ARCH_TRACEHOOK
config HAVE_DMA_ATTRS
bool
+config HAVE_DMA_CONTIGUOUS
+ bool
+
config USE_GENERIC_SMP_HELPERS
bool
diff --git a/arch/alpha/include/asm/kvm_para.h b/arch/alpha/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/alpha/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5458aa9db06..5e7601301b4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -5,6 +5,9 @@ config ARM
select HAVE_AOUT
select HAVE_DMA_API_DEBUG
select HAVE_IDE if PCI || ISA || PCMCIA
+ select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
+ select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
@@ -54,6 +57,14 @@ config ARM
config ARM_HAS_SG_CHAIN
bool
+config NEED_SG_DMA_LENGTH
+ bool
+
+config ARM_DMA_USE_IOMMU
+ select NEED_SG_DMA_LENGTH
+ select ARM_HAS_SG_CHAIN
+ bool
+
config HAVE_PWM
bool
@@ -445,8 +456,10 @@ config ARCH_MXS
select ARCH_REQUIRE_GPIOLIB
select CLKDEV_LOOKUP
select CLKSRC_MMIO
+ select COMMON_CLK
select HAVE_CLK_PREPARE
select PINCTRL
+ select USE_OF
help
Support for Freescale MXS-based family of processors
@@ -936,6 +949,7 @@ config PLAT_SPEAR
select ARM_AMBA
select ARCH_REQUIRE_GPIOLIB
select CLKDEV_LOOKUP
+ select COMMON_CLK
select CLKSRC_MMIO
select GENERIC_CLOCKEVENTS
select HAVE_CLK
@@ -1040,7 +1054,6 @@ source "arch/arm/mach-sa1100/Kconfig"
source "arch/arm/plat-samsung/Kconfig"
source "arch/arm/plat-s3c24xx/Kconfig"
-source "arch/arm/plat-s5p/Kconfig"
source "arch/arm/plat-spear/Kconfig"
@@ -1091,6 +1104,7 @@ config PLAT_ORION
bool
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
+ select COMMON_CLK
config PLAT_PXA
bool
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 85348a09d65..01a13414121 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -103,6 +103,35 @@ choice
Say Y here if you want the debug print routines to direct
their output to the second serial port on these devices.
+ config DEBUG_DAVINCI_DA8XX_UART1
+ bool "Kernel low-level debugging on DaVinci DA8XX using UART1"
+ depends on ARCH_DAVINCI_DA8XX
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to UART1 serial port on DaVinci DA8XX devices.
+
+ config DEBUG_DAVINCI_DA8XX_UART2
+ bool "Kernel low-level debugging on DaVinci DA8XX using UART2"
+ depends on ARCH_DAVINCI_DA8XX
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to UART2 serial port on DaVinci DA8XX devices.
+
+ config DEBUG_DAVINCI_DMx_UART0
+ bool "Kernel low-level debugging on DaVinci DMx using UART0"
+ depends on ARCH_DAVINCI_DMx
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to UART0 serial port on DaVinci DMx devices.
+
+ config DEBUG_DAVINCI_TNETV107X_UART1
+ bool "Kernel low-level debugging on DaVinci TNETV107x using UART1"
+ depends on ARCH_DAVINCI_TNETV107X
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to UART1 serial port on DaVinci TNETV107X
+ devices.
+
config DEBUG_DC21285_PORT
bool "Kernel low-level debugging messages via footbridge serial port"
depends on FOOTBRIDGE
@@ -180,6 +209,14 @@ choice
Say Y here if you want kernel low-level debugging support
on i.MX50 or i.MX53.
+ config DEBUG_IMX6Q_UART2
+ bool "i.MX6Q Debug UART2"
+ depends on SOC_IMX6Q
+ help
+ Say Y here if you want kernel low-level debugging support
+ on i.MX6Q UART2. This is correct for e.g. the SabreLite
+ board.
+
config DEBUG_IMX6Q_UART4
bool "i.MX6Q Debug UART4"
depends on SOC_IMX6Q
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 157900da878..0298b00fe24 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -160,9 +160,7 @@ machine-$(CONFIG_ARCH_MXS) := mxs
machine-$(CONFIG_ARCH_NETX) := netx
machine-$(CONFIG_ARCH_NOMADIK) := nomadik
machine-$(CONFIG_ARCH_OMAP1) := omap1
-machine-$(CONFIG_ARCH_OMAP2) := omap2
-machine-$(CONFIG_ARCH_OMAP3) := omap2
-machine-$(CONFIG_ARCH_OMAP4) := omap2
+machine-$(CONFIG_ARCH_OMAP2PLUS) := omap2
machine-$(CONFIG_ARCH_ORION5X) := orion5x
machine-$(CONFIG_ARCH_PICOXCELL) := picoxcell
machine-$(CONFIG_ARCH_PNX4008) := pnx4008
@@ -188,6 +186,8 @@ machine-$(CONFIG_ARCH_VEXPRESS) := vexpress
machine-$(CONFIG_ARCH_VT8500) := vt8500
machine-$(CONFIG_ARCH_W90X900) := w90x900
machine-$(CONFIG_FOOTBRIDGE) := footbridge
+machine-$(CONFIG_MACH_SPEAR1310) := spear13xx
+machine-$(CONFIG_MACH_SPEAR1340) := spear13xx
machine-$(CONFIG_MACH_SPEAR300) := spear3xx
machine-$(CONFIG_MACH_SPEAR310) := spear3xx
machine-$(CONFIG_MACH_SPEAR320) := spear3xx
@@ -205,7 +205,7 @@ plat-$(CONFIG_PLAT_NOMADIK) := nomadik
plat-$(CONFIG_PLAT_ORION) := orion
plat-$(CONFIG_PLAT_PXA) := pxa
plat-$(CONFIG_PLAT_S3C24XX) := s3c24xx samsung
-plat-$(CONFIG_PLAT_S5P) := s5p samsung
+plat-$(CONFIG_PLAT_S5P) := samsung
plat-$(CONFIG_PLAT_SPEAR) := spear
plat-$(CONFIG_PLAT_VERSATILE) := versatile
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 399d17b231d..49945cc1bc7 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -23,4 +23,52 @@
chosen {
bootargs = "root=/dev/ram0 rw ramdisk=8192 console=ttySAC1,115200";
};
+
+ i2c@12C60000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <20000>;
+ gpios = <&gpb3 0 2 3 0>,
+ <&gpb3 1 2 3 0>;
+
+ eeprom@50 {
+ compatible = "samsung,s524ad0xd1";
+ reg = <0x50>;
+ };
+ };
+
+ i2c@12C70000 {
+ samsung,i2c-sda-delay = <100>;
+ samsung,i2c-max-bus-freq = <20000>;
+ gpios = <&gpb3 2 2 3 0>,
+ <&gpb3 3 2 3 0>;
+
+ eeprom@51 {
+ compatible = "samsung,s524ad0xd1";
+ reg = <0x51>;
+ };
+ };
+
+ i2c@12C80000 {
+ status = "disabled";
+ };
+
+ i2c@12C90000 {
+ status = "disabled";
+ };
+
+ i2c@12CA0000 {
+ status = "disabled";
+ };
+
+ i2c@12CB0000 {
+ status = "disabled";
+ };
+
+ i2c@12CC0000 {
+ status = "disabled";
+ };
+
+ i2c@12CD0000 {
+ status = "disabled";
+ };
};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index dfc43359943..5ca0cdb7641 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -23,11 +23,11 @@
compatible = "samsung,exynos5250";
interrupt-parent = <&gic>;
- gic:interrupt-controller@10490000 {
+ gic:interrupt-controller@10481000 {
compatible = "arm,cortex-a9-gic";
#interrupt-cells = <3>;
interrupt-controller;
- reg = <0x10490000 0x1000>, <0x10480000 0x100>;
+ reg = <0x10481000 0x1000>, <0x10482000 0x2000>;
};
watchdog {
@@ -42,30 +42,6 @@
interrupts = <0 43 0>, <0 44 0>;
};
- sdhci@12200000 {
- compatible = "samsung,exynos4210-sdhci";
- reg = <0x12200000 0x100>;
- interrupts = <0 75 0>;
- };
-
- sdhci@12210000 {
- compatible = "samsung,exynos4210-sdhci";
- reg = <0x12210000 0x100>;
- interrupts = <0 76 0>;
- };
-
- sdhci@12220000 {
- compatible = "samsung,exynos4210-sdhci";
- reg = <0x12220000 0x100>;
- interrupts = <0 77 0>;
- };
-
- sdhci@12230000 {
- compatible = "samsung,exynos4210-sdhci";
- reg = <0x12230000 0x100>;
- interrupts = <0 78 0>;
- };
-
serial@12C00000 {
compatible = "samsung,exynos4210-uart";
reg = <0x12C00000 0x100>;
@@ -94,48 +70,64 @@
compatible = "samsung,s3c2440-i2c";
reg = <0x12C60000 0x100>;
interrupts = <0 56 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c@12C70000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12C70000 0x100>;
interrupts = <0 57 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c@12C80000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12C80000 0x100>;
interrupts = <0 58 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c@12C90000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12C90000 0x100>;
interrupts = <0 59 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c@12CA0000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12CA0000 0x100>;
interrupts = <0 60 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c@12CB0000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12CB0000 0x100>;
interrupts = <0 61 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c@12CC0000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12CC0000 0x100>;
interrupts = <0 62 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
i2c@12CD0000 {
compatible = "samsung,s3c2440-i2c";
reg = <0x12CD0000 0x100>;
interrupts = <0 63 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
amba {
@@ -157,13 +149,13 @@
interrupts = <0 35 0>;
};
- mdma0: pdma@10800000 {
+ mdma0: mdma@10800000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x10800000 0x1000>;
interrupts = <0 33 0>;
};
- mdma1: pdma@11C10000 {
+ mdma1: mdma@11C10000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x11C10000 0x1000>;
interrupts = <0 124 0>;
@@ -242,6 +234,12 @@
#gpio-cells = <4>;
};
+ gpc4: gpio-controller@114002E0 {
+ compatible = "samsung,exynos4-gpio";
+ reg = <0x114002E0 0x20>;
+ #gpio-cells = <4>;
+ };
+
gpd0: gpio-controller@11400160 {
compatible = "samsung,exynos4-gpio";
reg = <0x11400160 0x20>;
@@ -388,19 +386,19 @@
gpv2: gpio-controller@10D10040 {
compatible = "samsung,exynos4-gpio";
- reg = <0x10D10040 0x20>;
+ reg = <0x10D10060 0x20>;
#gpio-cells = <4>;
};
gpv3: gpio-controller@10D10060 {
compatible = "samsung,exynos4-gpio";
- reg = <0x10D10060 0x20>;
+ reg = <0x10D10080 0x20>;
#gpio-cells = <4>;
};
gpv4: gpio-controller@10D10080 {
compatible = "samsung,exynos4-gpio";
- reg = <0x10D10080 0x20>;
+ reg = <0x10D100C0 0x20>;
#gpio-cells = <4>;
};
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts
new file mode 100644
index 00000000000..70bffa929b6
--- /dev/null
+++ b/arch/arm/boot/dts/imx23-evk.dts
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx23.dtsi"
+
+/ {
+ model = "Freescale i.MX23 Evaluation Kit";
+ compatible = "fsl,imx23-evk", "fsl,imx23";
+
+ memory {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx23-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_8bit_pins_a &mmc0_pins_fixup>;
+ bus-width = <8>;
+ wp-gpios = <&gpio1 30 0>;
+ status = "okay";
+ };
+ };
+
+ apbx@80040000 {
+ duart: serial@80070000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
new file mode 100644
index 00000000000..8c5f9994f3f
--- /dev/null
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -0,0 +1,295 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ interrupt-parent = <&icoll>;
+
+ aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ gpio2 = &gpio2;
+ };
+
+ cpus {
+ cpu@0 {
+ compatible = "arm,arm926ejs";
+ };
+ };
+
+ apb@80000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80000000 0x80000>;
+ ranges;
+
+ apbh@80000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80000000 0x40000>;
+ ranges;
+
+ icoll: interrupt-controller@80000000 {
+ compatible = "fsl,imx23-icoll", "fsl,mxs-icoll";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x80000000 0x2000>;
+ };
+
+ dma-apbh@80004000 {
+ compatible = "fsl,imx23-dma-apbh";
+ reg = <0x80004000 2000>;
+ };
+
+ ecc@80008000 {
+ reg = <0x80008000 2000>;
+ status = "disabled";
+ };
+
+ bch@8000a000 {
+ reg = <0x8000a000 2000>;
+ status = "disabled";
+ };
+
+ gpmi@8000c000 {
+ reg = <0x8000c000 2000>;
+ status = "disabled";
+ };
+
+ ssp0: ssp@80010000 {
+ reg = <0x80010000 2000>;
+ interrupts = <15 14>;
+ fsl,ssp-dma-channel = <1>;
+ status = "disabled";
+ };
+
+ etm@80014000 {
+ reg = <0x80014000 2000>;
+ status = "disabled";
+ };
+
+ pinctrl@80018000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx23-pinctrl", "simple-bus";
+ reg = <0x80018000 2000>;
+
+ gpio0: gpio@0 {
+ compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+ interrupts = <16>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@1 {
+ compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+ interrupts = <17>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@2 {
+ compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
+ interrupts = <18>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ duart_pins_a: duart@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x11a2 0x11b2>;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ mmc0_8bit_pins_a: mmc0-8bit@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x2020 0x2030 0x2040
+ 0x2050 0x0082 0x0092 0x00a2
+ 0x00b2 0x2000 0x2010 0x2060>;
+ fsl,drive-strength = <1>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
+ mmc0_pins_fixup: mmc0-pins-fixup {
+ fsl,pinmux-ids = <0x2010 0x2060>;
+ fsl,pull-up = <0>;
+ };
+ };
+
+ digctl@8001c000 {
+ reg = <0x8001c000 2000>;
+ status = "disabled";
+ };
+
+ emi@80020000 {
+ reg = <0x80020000 2000>;
+ status = "disabled";
+ };
+
+ dma-apbx@80024000 {
+ compatible = "fsl,imx23-dma-apbx";
+ reg = <0x80024000 2000>;
+ };
+
+ dcp@80028000 {
+ reg = <0x80028000 2000>;
+ status = "disabled";
+ };
+
+ pxp@8002a000 {
+ reg = <0x8002a000 2000>;
+ status = "disabled";
+ };
+
+ ocotp@8002c000 {
+ reg = <0x8002c000 2000>;
+ status = "disabled";
+ };
+
+ axi-ahb@8002e000 {
+ reg = <0x8002e000 2000>;
+ status = "disabled";
+ };
+
+ lcdif@80030000 {
+ reg = <0x80030000 2000>;
+ status = "disabled";
+ };
+
+ ssp1: ssp@80034000 {
+ reg = <0x80034000 2000>;
+ interrupts = <2 20>;
+ fsl,ssp-dma-channel = <2>;
+ status = "disabled";
+ };
+
+ tvenc@80038000 {
+ reg = <0x80038000 2000>;
+ status = "disabled";
+ };
+ };
+
+ apbx@80040000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80040000 0x40000>;
+ ranges;
+
+ clkctl@80040000 {
+ reg = <0x80040000 2000>;
+ status = "disabled";
+ };
+
+ saif0: saif@80042000 {
+ reg = <0x80042000 2000>;
+ status = "disabled";
+ };
+
+ power@80044000 {
+ reg = <0x80044000 2000>;
+ status = "disabled";
+ };
+
+ saif1: saif@80046000 {
+ reg = <0x80046000 2000>;
+ status = "disabled";
+ };
+
+ audio-out@80048000 {
+ reg = <0x80048000 2000>;
+ status = "disabled";
+ };
+
+ audio-in@8004c000 {
+ reg = <0x8004c000 2000>;
+ status = "disabled";
+ };
+
+ lradc@80050000 {
+ reg = <0x80050000 2000>;
+ status = "disabled";
+ };
+
+ spdif@80054000 {
+ reg = <0x80054000 2000>;
+ status = "disabled";
+ };
+
+ i2c@80058000 {
+ reg = <0x80058000 2000>;
+ status = "disabled";
+ };
+
+ rtc@8005c000 {
+ reg = <0x8005c000 2000>;
+ status = "disabled";
+ };
+
+ pwm@80064000 {
+ reg = <0x80064000 2000>;
+ status = "disabled";
+ };
+
+ timrot@80068000 {
+ reg = <0x80068000 2000>;
+ status = "disabled";
+ };
+
+ auart0: serial@8006c000 {
+ reg = <0x8006c000 0x2000>;
+ status = "disabled";
+ };
+
+ auart1: serial@8006e000 {
+ reg = <0x8006e000 0x2000>;
+ status = "disabled";
+ };
+
+ duart: serial@80070000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x80070000 0x2000>;
+ interrupts = <0>;
+ status = "disabled";
+ };
+
+ usbphy@8007c000 {
+ reg = <0x8007c000 0x2000>;
+ status = "disabled";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80080000 0x80000>;
+ ranges;
+
+ usbctrl@80080000 {
+ reg = <0x80080000 0x10000>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx27-phytec-phycore.dts b/arch/arm/boot/dts/imx27-phytec-phycore.dts
index a51a08fc2af..2b0ff60247a 100644
--- a/arch/arm/boot/dts/imx27-phytec-phycore.dts
+++ b/arch/arm/boot/dts/imx27-phytec-phycore.dts
@@ -27,22 +27,22 @@
status = "okay";
};
- uart@1000a000 {
+ serial@1000a000 {
fsl,uart-has-rtscts;
status = "okay";
};
- uart@1000b000 {
+ serial@1000b000 {
fsl,uart-has-rtscts;
status = "okay";
};
- uart@1000c000 {
+ serial@1000c000 {
fsl,uart-has-rtscts;
status = "okay";
};
- fec@1002b000 {
+ ethernet@1002b000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index bc5e7d5ddd5..2b1a166d41f 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -59,28 +59,28 @@
status = "disabled";
};
- uart1: uart@1000a000 {
+ uart1: serial@1000a000 {
compatible = "fsl,imx27-uart", "fsl,imx21-uart";
reg = <0x1000a000 0x1000>;
interrupts = <20>;
status = "disabled";
};
- uart2: uart@1000b000 {
+ uart2: serial@1000b000 {
compatible = "fsl,imx27-uart", "fsl,imx21-uart";
reg = <0x1000b000 0x1000>;
interrupts = <19>;
status = "disabled";
};
- uart3: uart@1000c000 {
+ uart3: serial@1000c000 {
compatible = "fsl,imx27-uart", "fsl,imx21-uart";
reg = <0x1000c000 0x1000>;
interrupts = <18>;
status = "disabled";
};
- uart4: uart@1000d000 {
+ uart4: serial@1000d000 {
compatible = "fsl,imx27-uart", "fsl,imx21-uart";
reg = <0x1000d000 0x1000>;
interrupts = <17>;
@@ -183,14 +183,14 @@
status = "disabled";
};
- uart5: uart@1001b000 {
+ uart5: serial@1001b000 {
compatible = "fsl,imx27-uart", "fsl,imx21-uart";
reg = <0x1001b000 0x1000>;
interrupts = <49>;
status = "disabled";
};
- uart6: uart@1001c000 {
+ uart6: serial@1001c000 {
compatible = "fsl,imx27-uart", "fsl,imx21-uart";
reg = <0x1001c000 0x1000>;
interrupts = <48>;
@@ -206,7 +206,7 @@
status = "disabled";
};
- fec: fec@1002b000 {
+ fec: ethernet@1002b000 {
compatible = "fsl,imx27-fec";
reg = <0x1002b000 0x4000>;
interrupts = <50>;
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
new file mode 100644
index 00000000000..ee520a529cb
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx28.dtsi"
+
+/ {
+ model = "Freescale i.MX28 Evaluation Kit";
+ compatible = "fsl,imx28-evk", "fsl,imx28";
+
+ memory {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_8bit_pins_a
+ &mmc0_cd_cfg &mmc0_sck_cfg>;
+ bus-width = <8>;
+ wp-gpios = <&gpio2 12 0>;
+ status = "okay";
+ };
+
+ ssp1: ssp@80012000 {
+ compatible = "fsl,imx28-mmc";
+ bus-width = <8>;
+ wp-gpios = <&gpio0 28 0>;
+ status = "okay";
+ };
+ };
+
+ apbx@80040000 {
+ saif0: saif@80042000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&saif0_pins_a>;
+ status = "okay";
+ };
+
+ saif1: saif@80046000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&saif1_pins_a>;
+ fsl,saif-master = <&saif0>;
+ status = "okay";
+ };
+
+ i2c0: i2c@80058000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+
+ sgtl5000: codec@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ VDDA-supply = <&reg_3p3v>;
+ VDDIO-supply = <&reg_3p3v>;
+
+ };
+ };
+
+ duart: serial@80074000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ mac0: ethernet@800f0000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_pins_a>;
+ status = "okay";
+ };
+
+ mac1: ethernet@800f4000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac1_pins_a>;
+ status = "okay";
+ };
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+
+ sound {
+ compatible = "fsl,imx28-evk-sgtl5000",
+ "fsl,mxs-audio-sgtl5000";
+ model = "imx28-evk-sgtl5000";
+ saif-controllers = <&saif0 &saif1>;
+ audio-codec = <&sgtl5000>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
new file mode 100644
index 00000000000..4634cb861a5
--- /dev/null
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -0,0 +1,497 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ interrupt-parent = <&icoll>;
+
+ aliases {
+ gpio0 = &gpio0;
+ gpio1 = &gpio1;
+ gpio2 = &gpio2;
+ gpio3 = &gpio3;
+ gpio4 = &gpio4;
+ saif0 = &saif0;
+ saif1 = &saif1;
+ };
+
+ cpus {
+ cpu@0 {
+ compatible = "arm,arm926ejs";
+ };
+ };
+
+ apb@80000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80000000 0x80000>;
+ ranges;
+
+ apbh@80000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80000000 0x3c900>;
+ ranges;
+
+ icoll: interrupt-controller@80000000 {
+ compatible = "fsl,imx28-icoll", "fsl,mxs-icoll";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x80000000 0x2000>;
+ };
+
+ hsadc@80002000 {
+ reg = <0x80002000 2000>;
+ interrupts = <13 87>;
+ status = "disabled";
+ };
+
+ dma-apbh@80004000 {
+ compatible = "fsl,imx28-dma-apbh";
+ reg = <0x80004000 2000>;
+ };
+
+ perfmon@80006000 {
+ reg = <0x80006000 800>;
+ interrupts = <27>;
+ status = "disabled";
+ };
+
+ bch@8000a000 {
+ reg = <0x8000a000 2000>;
+ interrupts = <41>;
+ status = "disabled";
+ };
+
+ gpmi@8000c000 {
+ reg = <0x8000c000 2000>;
+ interrupts = <42 88>;
+ status = "disabled";
+ };
+
+ ssp0: ssp@80010000 {
+ reg = <0x80010000 2000>;
+ interrupts = <96 82>;
+ fsl,ssp-dma-channel = <0>;
+ status = "disabled";
+ };
+
+ ssp1: ssp@80012000 {
+ reg = <0x80012000 2000>;
+ interrupts = <97 83>;
+ fsl,ssp-dma-channel = <1>;
+ status = "disabled";
+ };
+
+ ssp2: ssp@80014000 {
+ reg = <0x80014000 2000>;
+ interrupts = <98 84>;
+ fsl,ssp-dma-channel = <2>;
+ status = "disabled";
+ };
+
+ ssp3: ssp@80016000 {
+ reg = <0x80016000 2000>;
+ interrupts = <99 85>;
+ fsl,ssp-dma-channel = <3>;
+ status = "disabled";
+ };
+
+ pinctrl@80018000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx28-pinctrl", "simple-bus";
+ reg = <0x80018000 2000>;
+
+ gpio0: gpio@0 {
+ compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ interrupts = <127>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio1: gpio@1 {
+ compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ interrupts = <126>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio2: gpio@2 {
+ compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ interrupts = <125>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio3: gpio@3 {
+ compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ interrupts = <124>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ gpio4: gpio@4 {
+ compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
+ interrupts = <123>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ duart_pins_a: duart@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x3102 0x3112>;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ mac0_pins_a: mac0@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x4000 0x4010 0x4020
+ 0x4030 0x4040 0x4060 0x4070
+ 0x4080 0x4100>;
+ fsl,drive-strength = <1>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
+ mac1_pins_a: mac1@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x40f1 0x4091 0x40a1
+ 0x40e1 0x40b1 0x40c1>;
+ fsl,drive-strength = <1>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
+ mmc0_8bit_pins_a: mmc0-8bit@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x2000 0x2010 0x2020
+ 0x2030 0x2040 0x2050 0x2060
+ 0x2070 0x2080 0x2090 0x20a0>;
+ fsl,drive-strength = <1>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
+ mmc0_cd_cfg: mmc0-cd-cfg {
+ fsl,pinmux-ids = <0x2090>;
+ fsl,pull-up = <0>;
+ };
+
+ mmc0_sck_cfg: mmc0-sck-cfg {
+ fsl,pinmux-ids = <0x20a0>;
+ fsl,drive-strength = <2>;
+ fsl,pull-up = <0>;
+ };
+
+ i2c0_pins_a: i2c0@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x3180 0x3190>;
+ fsl,drive-strength = <1>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
+ saif0_pins_a: saif0@0 {
+ reg = <0>;
+ fsl,pinmux-ids =
+ <0x3140 0x3150 0x3160 0x3170>;
+ fsl,drive-strength = <2>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
+ saif1_pins_a: saif1@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <0x31a0>;
+ fsl,drive-strength = <2>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+ };
+
+ digctl@8001c000 {
+ reg = <0x8001c000 2000>;
+ interrupts = <89>;
+ status = "disabled";
+ };
+
+ etm@80022000 {
+ reg = <0x80022000 2000>;
+ status = "disabled";
+ };
+
+ dma-apbx@80024000 {
+ compatible = "fsl,imx28-dma-apbx";
+ reg = <0x80024000 2000>;
+ };
+
+ dcp@80028000 {
+ reg = <0x80028000 2000>;
+ interrupts = <52 53 54>;
+ status = "disabled";
+ };
+
+ pxp@8002a000 {
+ reg = <0x8002a000 2000>;
+ interrupts = <39>;
+ status = "disabled";
+ };
+
+ ocotp@8002c000 {
+ reg = <0x8002c000 2000>;
+ status = "disabled";
+ };
+
+ axi-ahb@8002e000 {
+ reg = <0x8002e000 2000>;
+ status = "disabled";
+ };
+
+ lcdif@80030000 {
+ reg = <0x80030000 2000>;
+ interrupts = <38 86>;
+ status = "disabled";
+ };
+
+ can0: can@80032000 {
+ reg = <0x80032000 2000>;
+ interrupts = <8>;
+ status = "disabled";
+ };
+
+ can1: can@80034000 {
+ reg = <0x80034000 2000>;
+ interrupts = <9>;
+ status = "disabled";
+ };
+
+ simdbg@8003c000 {
+ reg = <0x8003c000 200>;
+ status = "disabled";
+ };
+
+ simgpmisel@8003c200 {
+ reg = <0x8003c200 100>;
+ status = "disabled";
+ };
+
+ simsspsel@8003c300 {
+ reg = <0x8003c300 100>;
+ status = "disabled";
+ };
+
+ simmemsel@8003c400 {
+ reg = <0x8003c400 100>;
+ status = "disabled";
+ };
+
+ gpiomon@8003c500 {
+ reg = <0x8003c500 100>;
+ status = "disabled";
+ };
+
+ simenet@8003c700 {
+ reg = <0x8003c700 100>;
+ status = "disabled";
+ };
+
+ armjtag@8003c800 {
+ reg = <0x8003c800 100>;
+ status = "disabled";
+ };
+ };
+
+ apbx@80040000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80040000 0x40000>;
+ ranges;
+
+ clkctl@80040000 {
+ reg = <0x80040000 2000>;
+ status = "disabled";
+ };
+
+ saif0: saif@80042000 {
+ compatible = "fsl,imx28-saif";
+ reg = <0x80042000 2000>;
+ interrupts = <59 80>;
+ fsl,saif-dma-channel = <4>;
+ status = "disabled";
+ };
+
+ power@80044000 {
+ reg = <0x80044000 2000>;
+ status = "disabled";
+ };
+
+ saif1: saif@80046000 {
+ compatible = "fsl,imx28-saif";
+ reg = <0x80046000 2000>;
+ interrupts = <58 81>;
+ fsl,saif-dma-channel = <5>;
+ status = "disabled";
+ };
+
+ lradc@80050000 {
+ reg = <0x80050000 2000>;
+ status = "disabled";
+ };
+
+ spdif@80054000 {
+ reg = <0x80054000 2000>;
+ interrupts = <45 66>;
+ status = "disabled";
+ };
+
+ rtc@80056000 {
+ reg = <0x80056000 2000>;
+ interrupts = <28 29>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@80058000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx28-i2c";
+ reg = <0x80058000 2000>;
+ interrupts = <111 68>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@8005a000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx28-i2c";
+ reg = <0x8005a000 2000>;
+ interrupts = <110 69>;
+ status = "disabled";
+ };
+
+ pwm@80064000 {
+ reg = <0x80064000 2000>;
+ status = "disabled";
+ };
+
+ timrot@80068000 {
+ reg = <0x80068000 2000>;
+ status = "disabled";
+ };
+
+ auart0: serial@8006a000 {
+ reg = <0x8006a000 0x2000>;
+ interrupts = <112 70 71>;
+ status = "disabled";
+ };
+
+ auart1: serial@8006c000 {
+ reg = <0x8006c000 0x2000>;
+ interrupts = <113 72 73>;
+ status = "disabled";
+ };
+
+ auart2: serial@8006e000 {
+ reg = <0x8006e000 0x2000>;
+ interrupts = <114 74 75>;
+ status = "disabled";
+ };
+
+ auart3: serial@80070000 {
+ reg = <0x80070000 0x2000>;
+ interrupts = <115 76 77>;
+ status = "disabled";
+ };
+
+ auart4: serial@80072000 {
+ reg = <0x80072000 0x2000>;
+ interrupts = <116 78 79>;
+ status = "disabled";
+ };
+
+ duart: serial@80074000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x80074000 0x1000>;
+ interrupts = <47>;
+ status = "disabled";
+ };
+
+ usbphy0: usbphy@8007c000 {
+ reg = <0x8007c000 0x2000>;
+ status = "disabled";
+ };
+
+ usbphy1: usbphy@8007e000 {
+ reg = <0x8007e000 0x2000>;
+ status = "disabled";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x80080000 0x80000>;
+ ranges;
+
+ usbctrl0: usbctrl@80080000 {
+ reg = <0x80080000 0x10000>;
+ status = "disabled";
+ };
+
+ usbctrl1: usbctrl@80090000 {
+ reg = <0x80090000 0x10000>;
+ status = "disabled";
+ };
+
+ dflpt@800c0000 {
+ reg = <0x800c0000 0x10000>;
+ status = "disabled";
+ };
+
+ mac0: ethernet@800f0000 {
+ compatible = "fsl,imx28-fec";
+ reg = <0x800f0000 0x4000>;
+ interrupts = <101>;
+ status = "disabled";
+ };
+
+ mac1: ethernet@800f4000 {
+ compatible = "fsl,imx28-fec";
+ reg = <0x800f4000 0x4000>;
+ interrupts = <102>;
+ status = "disabled";
+ };
+
+ switch@800f8000 {
+ reg = <0x800f8000 0x8000>;
+ status = "disabled";
+ };
+
+ };
+};
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 9949e6060de..de065b5976e 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -17,10 +17,6 @@
model = "Freescale i.MX51 Babbage Board";
compatible = "fsl,imx51-babbage", "fsl,imx51";
- chosen {
- bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
- };
-
memory {
reg = <0x90000000 0x20000000>;
};
@@ -40,7 +36,7 @@
status = "okay";
};
- uart3: uart@7000c000 {
+ uart3: serial@7000c000 {
fsl,uart-has-rtscts;
status = "okay";
};
@@ -166,6 +162,11 @@
};
};
};
+
+ ssi2: ssi@70014000 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+ };
};
wdog@73f98000 { /* WDOG1 */
@@ -177,12 +178,12 @@
reg = <0x73fa8000 0x4000>;
};
- uart1: uart@73fbc000 {
+ uart1: serial@73fbc000 {
fsl,uart-has-rtscts;
status = "okay";
};
- uart2: uart@73fc0000 {
+ uart2: serial@73fc0000 {
status = "okay";
};
};
@@ -195,13 +196,20 @@
i2c@83fc4000 { /* I2C2 */
status = "okay";
- codec: sgtl5000@0a {
+ sgtl5000: codec@0a {
compatible = "fsl,sgtl5000";
reg = <0x0a>;
+ clock-frequency = <26000000>;
+ VDDA-supply = <&vdig_reg>;
+ VDDIO-supply = <&vvideo_reg>;
};
};
- fec@83fec000 {
+ audmux@83fd0000 {
+ status = "okay";
+ };
+
+ ethernet@83fec000 {
phy-mode = "mii";
status = "okay";
};
@@ -218,4 +226,18 @@
gpio-key,wakeup;
};
};
+
+ sound {
+ compatible = "fsl,imx51-babbage-sgtl5000",
+ "fsl,imx-audio-sgtl5000";
+ model = "imx51-babbage-sgtl5000";
+ ssi-controller = <&ssi2>;
+ audio-codec = <&sgtl5000>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ mux-int-port = <2>;
+ mux-ext-port = <3>;
+ };
};
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 6663986fe1c..bfa65abe8ef 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -86,7 +86,7 @@
status = "disabled";
};
- uart3: uart@7000c000 {
+ uart3: serial@7000c000 {
compatible = "fsl,imx51-uart", "fsl,imx21-uart";
reg = <0x7000c000 0x4000>;
interrupts = <33>;
@@ -102,6 +102,15 @@
status = "disabled";
};
+ ssi2: ssi@70014000 {
+ compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
+ reg = <0x70014000 0x4000>;
+ interrupts = <30>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */
+ status = "disabled";
+ };
+
esdhc@70020000 { /* ESDHC3 */
compatible = "fsl,imx51-esdhc";
reg = <0x70020000 0x4000>;
@@ -171,14 +180,14 @@
status = "disabled";
};
- uart1: uart@73fbc000 {
+ uart1: serial@73fbc000 {
compatible = "fsl,imx51-uart", "fsl,imx21-uart";
reg = <0x73fbc000 0x4000>;
interrupts = <31>;
status = "disabled";
};
- uart2: uart@73fc0000 {
+ uart2: serial@73fc0000 {
compatible = "fsl,imx51-uart", "fsl,imx21-uart";
reg = <0x73fc0000 0x4000>;
interrupts = <32>;
@@ -235,7 +244,31 @@
status = "disabled";
};
- fec@83fec000 {
+ ssi1: ssi@83fcc000 {
+ compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
+ reg = <0x83fcc000 0x4000>;
+ interrupts = <29>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <29 28 27 26>; /* TX0 RX0 TX1 RX1 */
+ status = "disabled";
+ };
+
+ audmux@83fd0000 {
+ compatible = "fsl,imx51-audmux", "fsl,imx31-audmux";
+ reg = <0x83fd0000 0x4000>;
+ status = "disabled";
+ };
+
+ ssi3: ssi@83fe8000 {
+ compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
+ reg = <0x83fe8000 0x4000>;
+ interrupts = <96>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <47 46 37 35>; /* TX0 RX0 TX1 RX1 */
+ status = "disabled";
+ };
+
+ ethernet@83fec000 {
compatible = "fsl,imx51-fec", "fsl,imx27-fec";
reg = <0x83fec000 0x4000>;
interrupts = <87>;
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index 2dccce46ed8..5b8eafcdbee 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -17,10 +17,6 @@
model = "Freescale i.MX53 Automotive Reference Design Board";
compatible = "fsl,imx53-ard", "fsl,imx53";
- chosen {
- bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
- };
-
memory {
reg = <0x70000000 0x40000000>;
};
@@ -44,7 +40,7 @@
reg = <0x53fa8000 0x4000>;
};
- uart1: uart@53fbc000 {
+ uart1: serial@53fbc000 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/imx53-evk.dts b/arch/arm/boot/dts/imx53-evk.dts
index 5bac4aa4800..9c798034675 100644
--- a/arch/arm/boot/dts/imx53-evk.dts
+++ b/arch/arm/boot/dts/imx53-evk.dts
@@ -17,10 +17,6 @@
model = "Freescale i.MX53 Evaluation Kit";
compatible = "fsl,imx53-evk", "fsl,imx53";
- chosen {
- bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
- };
-
memory {
reg = <0x70000000 0x80000000>;
};
@@ -75,7 +71,7 @@
reg = <0x53fa8000 0x4000>;
};
- uart1: uart@53fbc000 {
+ uart1: serial@53fbc000 {
status = "okay";
};
};
@@ -99,7 +95,7 @@
};
};
- fec@63fec000 {
+ ethernet@63fec000 {
phy-mode = "rmii";
phy-reset-gpios = <&gpio7 6 0>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
index 5c57c8672c3..2d803a9a694 100644
--- a/arch/arm/boot/dts/imx53-qsb.dts
+++ b/arch/arm/boot/dts/imx53-qsb.dts
@@ -17,10 +17,6 @@
model = "Freescale i.MX53 Quick Start Board";
compatible = "fsl,imx53-qsb", "fsl,imx53";
- chosen {
- bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
- };
-
memory {
reg = <0x70000000 0x40000000>;
};
@@ -33,6 +29,11 @@
status = "okay";
};
+ ssi2: ssi@50014000 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+ };
+
esdhc@50020000 { /* ESDHC3 */
cd-gpios = <&gpio3 11 0>;
wp-gpios = <&gpio3 12 0>;
@@ -49,7 +50,7 @@
reg = <0x53fa8000 0x4000>;
};
- uart1: uart@53fbc000 {
+ uart1: serial@53fbc000 {
status = "okay";
};
};
@@ -62,9 +63,11 @@
i2c@63fc4000 { /* I2C2 */
status = "okay";
- codec: sgtl5000@0a {
+ sgtl5000: codec@0a {
compatible = "fsl,sgtl5000";
reg = <0x0a>;
+ VDDA-supply = <&reg_3p2v>;
+ VDDIO-supply = <&reg_3p2v>;
};
};
@@ -77,12 +80,88 @@
};
pmic: dialog@48 {
- compatible = "dialog,da9053", "dialog,da9052";
+ compatible = "dlg,da9053-aa", "dlg,da9052";
reg = <0x48>;
+
+ regulators {
+ buck0 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2075000>;
+ };
+
+ buck1 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <2075000>;
+ };
+
+ buck2 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <2500000>;
+ };
+
+ buck3 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <2500000>;
+ };
+
+ ldo4 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ ldo5 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ ldo6 {
+ regulator-min-microvolt = <1725000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo7 {
+ regulator-min-microvolt = <1725000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ ldo8 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ };
+
+ ldo9 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ };
+
+ ldo10 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ };
+
+ ldo11 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ };
+
+ ldo12 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <3650000>;
+ };
+
+ ldo13 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3600000>;
+ };
+ };
};
};
- fec@63fec000 {
+ audmux@63fd0000 {
+ status = "okay";
+ };
+
+ ethernet@63fec000 {
phy-mode = "rmii";
phy-reset-gpios = <&gpio7 6 0>;
status = "okay";
@@ -122,4 +201,30 @@
linux,default-trigger = "heartbeat";
};
};
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_3p2v: 3p2v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P2V";
+ regulator-min-microvolt = <3200000>;
+ regulator-max-microvolt = <3200000>;
+ regulator-always-on;
+ };
+ };
+
+ sound {
+ compatible = "fsl,imx53-qsb-sgtl5000",
+ "fsl,imx-audio-sgtl5000";
+ model = "imx53-qsb-sgtl5000";
+ ssi-controller = <&ssi2>;
+ audio-codec = <&sgtl5000>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ mux-int-port = <2>;
+ mux-ext-port = <5>;
+ };
};
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
index c7ee86c2dfb..08091029168 100644
--- a/arch/arm/boot/dts/imx53-smd.dts
+++ b/arch/arm/boot/dts/imx53-smd.dts
@@ -17,10 +17,6 @@
model = "Freescale i.MX53 Smart Mobile Reference Design Board";
compatible = "fsl,imx53-smd", "fsl,imx53";
- chosen {
- bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
- };
-
memory {
reg = <0x70000000 0x40000000>;
};
@@ -35,11 +31,11 @@
};
esdhc@50008000 { /* ESDHC2 */
- fsl,card-wired;
+ non-removable;
status = "okay";
};
- uart3: uart@5000c000 {
+ uart3: serial@5000c000 {
fsl,uart-has-rtscts;
status = "okay";
};
@@ -76,7 +72,7 @@
};
esdhc@50020000 { /* ESDHC3 */
- fsl,card-wired;
+ non-removable;
status = "okay";
};
};
@@ -90,11 +86,11 @@
reg = <0x53fa8000 0x4000>;
};
- uart1: uart@53fbc000 {
+ uart1: serial@53fbc000 {
status = "okay";
};
- uart2: uart@53fc0000 {
+ uart2: serial@53fc0000 {
status = "okay";
};
};
@@ -142,7 +138,7 @@
};
};
- fec@63fec000 {
+ ethernet@63fec000 {
phy-mode = "rmii";
phy-reset-gpios = <&gpio7 6 0>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 5dd91b942c9..e3e869470cd 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -88,7 +88,7 @@
status = "disabled";
};
- uart3: uart@5000c000 {
+ uart3: serial@5000c000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x5000c000 0x4000>;
interrupts = <33>;
@@ -104,6 +104,15 @@
status = "disabled";
};
+ ssi2: ssi@50014000 {
+ compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
+ reg = <0x50014000 0x4000>;
+ interrupts = <30>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */
+ status = "disabled";
+ };
+
esdhc@50020000 { /* ESDHC3 */
compatible = "fsl,imx53-esdhc";
reg = <0x50020000 0x4000>;
@@ -173,14 +182,14 @@
status = "disabled";
};
- uart1: uart@53fbc000 {
+ uart1: serial@53fbc000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x53fbc000 0x4000>;
interrupts = <31>;
status = "disabled";
};
- uart2: uart@53fc0000 {
+ uart2: serial@53fc0000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x53fc0000 0x4000>;
interrupts = <32>;
@@ -226,7 +235,7 @@
status = "disabled";
};
- uart4: uart@53ff0000 {
+ uart4: serial@53ff0000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x53ff0000 0x4000>;
interrupts = <13>;
@@ -241,7 +250,7 @@
reg = <0x60000000 0x10000000>;
ranges;
- uart5: uart@63f90000 {
+ uart5: serial@63f90000 {
compatible = "fsl,imx53-uart", "fsl,imx21-uart";
reg = <0x63f90000 0x4000>;
interrupts = <86>;
@@ -290,7 +299,31 @@
status = "disabled";
};
- fec@63fec000 {
+ ssi1: ssi@63fcc000 {
+ compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
+ reg = <0x63fcc000 0x4000>;
+ interrupts = <29>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <29 28 27 26>; /* TX0 RX0 TX1 RX1 */
+ status = "disabled";
+ };
+
+ audmux@63fd0000 {
+ compatible = "fsl,imx53-audmux", "fsl,imx31-audmux";
+ reg = <0x63fd0000 0x4000>;
+ status = "disabled";
+ };
+
+ ssi3: ssi@63fe8000 {
+ compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
+ reg = <0x63fe8000 0x4000>;
+ interrupts = <96>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <47 46 45 44>; /* TX0 RX0 TX1 RX1 */
+ status = "disabled";
+ };
+
+ ethernet@63fec000 {
compatible = "fsl,imx53-fec", "fsl,imx25-fec";
reg = <0x63fec000 0x4000>;
interrupts = <87>;
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index ce1c8238c89..db4c6096c56 100644
--- a/arch/arm/boot/dts/imx6q-arm2.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -17,19 +17,14 @@
model = "Freescale i.MX6 Quad Armadillo2 Board";
compatible = "fsl,imx6q-arm2", "fsl,imx6q";
- chosen {
- bootargs = "console=ttymxc0,115200 root=/dev/mmcblk3p3 rootwait";
- };
-
memory {
reg = <0x10000000 0x80000000>;
};
soc {
aips-bus@02100000 { /* AIPS2 */
- enet@02188000 {
+ ethernet@02188000 {
phy-mode = "rgmii";
- local-mac-address = [00 04 9F 01 1B 61];
status = "okay";
};
@@ -37,16 +32,20 @@
cd-gpios = <&gpio6 11 0>;
wp-gpios = <&gpio6 14 0>;
vmmc-supply = <&reg_3p3v>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc3_1>;
status = "okay";
};
usdhc@0219c000 { /* uSDHC4 */
- fsl,card-wired;
+ non-removable;
vmmc-supply = <&reg_3p3v>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc4_1>;
status = "okay";
};
- uart4: uart@021f0000 {
+ uart4: serial@021f0000 {
status = "okay";
};
};
diff --git a/arch/arm/boot/dts/imx6q-sabrelite.dts b/arch/arm/boot/dts/imx6q-sabrelite.dts
index 4663a4e5a28..e0ec92973e7 100644
--- a/arch/arm/boot/dts/imx6q-sabrelite.dts
+++ b/arch/arm/boot/dts/imx6q-sabrelite.dts
@@ -22,8 +22,30 @@
};
soc {
+ aips-bus@02000000 { /* AIPS1 */
+ spba-bus@02000000 {
+ ecspi@02008000 { /* eCSPI1 */
+ fsl,spi-num-chipselects = <1>;
+ cs-gpios = <&gpio3 19 0>;
+ status = "okay";
+
+ flash: m25p80@0 {
+ compatible = "sst,sst25vf016b";
+ spi-max-frequency = <20000000>;
+ reg = <0>;
+ };
+ };
+
+ ssi1: ssi@02028000 {
+ fsl,mode = "i2s-slave";
+ status = "okay";
+ };
+ };
+
+ };
+
aips-bus@02100000 { /* AIPS2 */
- enet@02188000 {
+ ethernet@02188000 {
phy-mode = "rgmii";
phy-reset-gpios = <&gpio3 23 0>;
status = "okay";
@@ -43,13 +65,23 @@
status = "okay";
};
- uart2: uart@021e8000 {
+ audmux@021d8000 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_audmux_1>;
+ };
+
+ uart2: serial@021e8000 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_serial2_1>;
};
i2c@021a0000 { /* I2C1 */
status = "okay";
clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1_1>;
codec: sgtl5000@0a {
compatible = "fsl,sgtl5000";
@@ -80,4 +112,18 @@
regulator-always-on;
};
};
+
+ sound {
+ compatible = "fsl,imx6q-sabrelite-sgtl5000",
+ "fsl,imx-audio-sgtl5000";
+ model = "imx6q-sabrelite-sgtl5000";
+ ssi-controller = <&ssi1>;
+ audio-codec = <&codec>;
+ audio-routing =
+ "MIC_IN", "Mic Jack",
+ "Mic Jack", "Mic Bias",
+ "Headphone Jack", "HP_OUT";
+ mux-int-port = <1>;
+ mux-ext-port = <4>;
+ };
};
diff --git a/arch/arm/boot/dts/imx6q-sabresd.dts b/arch/arm/boot/dts/imx6q-sabresd.dts
new file mode 100644
index 00000000000..07509a18117
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-sabresd.dts
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx6q.dtsi"
+
+/ {
+ model = "Freescale i.MX6Q SABRE Smart Device Board";
+ compatible = "fsl,imx6q-sabresd", "fsl,imx6q";
+
+ memory {
+ reg = <0x10000000 0x40000000>;
+ };
+
+ soc {
+
+ aips-bus@02000000 { /* AIPS1 */
+ spba-bus@02000000 {
+ uart1: serial@02020000 {
+ status = "okay";
+ };
+ };
+ };
+
+ aips-bus@02100000 { /* AIPS2 */
+ ethernet@02188000 {
+ phy-mode = "rgmii";
+ status = "okay";
+ };
+
+ usdhc@02194000 { /* uSDHC2 */
+ cd-gpios = <&gpio2 2 0>;
+ wp-gpios = <&gpio2 3 0>;
+ status = "okay";
+ };
+
+ usdhc@02198000 { /* uSDHC3 */
+ cd-gpios = <&gpio2 0 0>;
+ wp-gpios = <&gpio2 1 0>;
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 4905f51a106..8c90cbac945 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -165,7 +165,7 @@
status = "disabled";
};
- uart1: uart@02020000 {
+ uart1: serial@02020000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x02020000 0x4000>;
interrupts = <0 26 0x04>;
@@ -177,19 +177,31 @@
interrupts = <0 51 0x04>;
};
- ssi@02028000 { /* SSI1 */
+ ssi1: ssi@02028000 {
+ compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
reg = <0x02028000 0x4000>;
interrupts = <0 46 0x04>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <38 37>;
+ status = "disabled";
};
- ssi@0202c000 { /* SSI2 */
+ ssi2: ssi@0202c000 {
+ compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
reg = <0x0202c000 0x4000>;
interrupts = <0 47 0x04>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <42 41>;
+ status = "disabled";
};
- ssi@02030000 { /* SSI3 */
+ ssi3: ssi@02030000 {
+ compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
reg = <0x02030000 0x4000>;
interrupts = <0 48 0x04>;
+ fsl,fifo-depth = <15>;
+ fsl,ssi-dma-events = <46 45>;
+ status = "disabled";
};
asrc@02034000 {
@@ -346,6 +358,90 @@
compatible = "fsl,imx6q-anatop";
reg = <0x020c8000 0x1000>;
interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>;
+
+ regulator-1p1@110 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vdd1p1";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1375000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x110>;
+ anatop-vol-bit-shift = <8>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <4>;
+ anatop-min-voltage = <800000>;
+ anatop-max-voltage = <1375000>;
+ };
+
+ regulator-3p0@120 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vdd3p0";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3150000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x120>;
+ anatop-vol-bit-shift = <8>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <0>;
+ anatop-min-voltage = <2625000>;
+ anatop-max-voltage = <3400000>;
+ };
+
+ regulator-2p5@130 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vdd2p5";
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2750000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x130>;
+ anatop-vol-bit-shift = <8>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <0>;
+ anatop-min-voltage = <2000000>;
+ anatop-max-voltage = <2750000>;
+ };
+
+ regulator-vddcore@140 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "cpu";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <0>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <1>;
+ anatop-min-voltage = <725000>;
+ anatop-max-voltage = <1450000>;
+ };
+
+ regulator-vddpu@140 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vddpu";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <9>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <1>;
+ anatop-min-voltage = <725000>;
+ anatop-max-voltage = <1450000>;
+ };
+
+ regulator-vddsoc@140 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vddsoc";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1450000>;
+ regulator-always-on;
+ anatop-reg-offset = <0x140>;
+ anatop-vol-bit-shift = <18>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <1>;
+ anatop-min-voltage = <725000>;
+ anatop-max-voltage = <1450000>;
+ };
};
usbphy@020c9000 { /* USBPHY1 */
@@ -386,7 +482,62 @@
};
iomuxc@020e0000 {
+ compatible = "fsl,imx6q-iomuxc";
reg = <0x020e0000 0x4000>;
+
+ /* shared pinctrl settings */
+ audmux {
+ pinctrl_audmux_1: audmux-1 {
+ fsl,pins = <18 0x80000000 /* MX6Q_PAD_SD2_DAT0__AUDMUX_AUD4_RXD */
+ 1586 0x80000000 /* MX6Q_PAD_SD2_DAT3__AUDMUX_AUD4_TXC */
+ 11 0x80000000 /* MX6Q_PAD_SD2_DAT2__AUDMUX_AUD4_TXD */
+ 3 0x80000000>; /* MX6Q_PAD_SD2_DAT1__AUDMUX_AUD4_TXFS */
+ };
+ };
+
+ i2c1 {
+ pinctrl_i2c1_1: i2c1grp-1 {
+ fsl,pins = <137 0x4001b8b1 /* MX6Q_PAD_EIM_D21__I2C1_SCL */
+ 196 0x4001b8b1>; /* MX6Q_PAD_EIM_D28__I2C1_SDA */
+ };
+ };
+
+ serial2 {
+ pinctrl_serial2_1: serial2grp-1 {
+ fsl,pins = <183 0x1b0b1 /* MX6Q_PAD_EIM_D26__UART2_TXD */
+ 191 0x1b0b1>; /* MX6Q_PAD_EIM_D27__UART2_RXD */
+ };
+ };
+
+ usdhc3 {
+ pinctrl_usdhc3_1: usdhc3grp-1 {
+ fsl,pins = <1273 0x17059 /* MX6Q_PAD_SD3_CMD__USDHC3_CMD */
+ 1281 0x10059 /* MX6Q_PAD_SD3_CLK__USDHC3_CLK */
+ 1289 0x17059 /* MX6Q_PAD_SD3_DAT0__USDHC3_DAT0 */
+ 1297 0x17059 /* MX6Q_PAD_SD3_DAT1__USDHC3_DAT1 */
+ 1305 0x17059 /* MX6Q_PAD_SD3_DAT2__USDHC3_DAT2 */
+ 1312 0x17059 /* MX6Q_PAD_SD3_DAT3__USDHC3_DAT3 */
+ 1265 0x17059 /* MX6Q_PAD_SD3_DAT4__USDHC3_DAT4 */
+ 1257 0x17059 /* MX6Q_PAD_SD3_DAT5__USDHC3_DAT5 */
+ 1249 0x17059 /* MX6Q_PAD_SD3_DAT6__USDHC3_DAT6 */
+ 1241 0x17059>; /* MX6Q_PAD_SD3_DAT7__USDHC3_DAT7 */
+ };
+ };
+
+ usdhc4 {
+ pinctrl_usdhc4_1: usdhc4grp-1 {
+ fsl,pins = <1386 0x17059 /* MX6Q_PAD_SD4_CMD__USDHC4_CMD */
+ 1392 0x10059 /* MX6Q_PAD_SD4_CLK__USDHC4_CLK */
+ 1462 0x17059 /* MX6Q_PAD_SD4_DAT0__USDHC4_DAT0 */
+ 1470 0x17059 /* MX6Q_PAD_SD4_DAT1__USDHC4_DAT1 */
+ 1478 0x17059 /* MX6Q_PAD_SD4_DAT2__USDHC4_DAT2 */
+ 1486 0x17059 /* MX6Q_PAD_SD4_DAT3__USDHC4_DAT3 */
+ 1493 0x17059 /* MX6Q_PAD_SD4_DAT4__USDHC4_DAT4 */
+ 1501 0x17059 /* MX6Q_PAD_SD4_DAT5__USDHC4_DAT5 */
+ 1509 0x17059 /* MX6Q_PAD_SD4_DAT6__USDHC4_DAT6 */
+ 1517 0x17059>; /* MX6Q_PAD_SD4_DAT7__USDHC4_DAT7 */
+ };
+ };
};
dcic@020e4000 { /* DCIC1 */
@@ -422,7 +573,7 @@
reg = <0x0217c000 0x4000>;
};
- enet@02188000 {
+ ethernet@02188000 {
compatible = "fsl,imx6q-fec";
reg = <0x02188000 0x4000>;
interrupts = <0 118 0x04 0 119 0x04>;
@@ -527,7 +678,9 @@
};
audmux@021d8000 {
+ compatible = "fsl,imx6q-audmux", "fsl,imx31-audmux";
reg = <0x021d8000 0x4000>;
+ status = "disabled";
};
mipi@021dc000 { /* MIPI-CSI */
@@ -543,28 +696,28 @@
interrupts = <0 18 0x04>;
};
- uart2: uart@021e8000 {
+ uart2: serial@021e8000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021e8000 0x4000>;
interrupts = <0 27 0x04>;
status = "disabled";
};
- uart3: uart@021ec000 {
+ uart3: serial@021ec000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021ec000 0x4000>;
interrupts = <0 28 0x04>;
status = "disabled";
};
- uart4: uart@021f0000 {
+ uart4: serial@021f0000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021f0000 0x4000>;
interrupts = <0 29 0x04>;
status = "disabled";
};
- uart5: uart@021f4000 {
+ uart5: serial@021f4000 {
compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
reg = <0x021f4000 0x4000>;
interrupts = <0 30 0x04>;
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index 8c756be4d7a..5b4506c0a8c 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -57,7 +57,7 @@
&mmc1 {
vmmc-supply = <&vmmc1>;
vmmc_aux-supply = <&vsim>;
- ti,bus-width = <8>;
+ bus-width = <8>;
};
&mmc2 {
diff --git a/arch/arm/boot/dts/omap4-panda.dts b/arch/arm/boot/dts/omap4-panda.dts
index e671361bc79..1efe0c58798 100644
--- a/arch/arm/boot/dts/omap4-panda.dts
+++ b/arch/arm/boot/dts/omap4-panda.dts
@@ -70,7 +70,7 @@
&mmc1 {
vmmc-supply = <&vmmc>;
- ti,bus-width = <8>;
+ bus-width = <8>;
};
&mmc2 {
@@ -87,5 +87,5 @@
&mmc5 {
ti,non-removable;
- ti,bus-width = <4>;
+ bus-width = <4>;
};
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index e5eeb6f9c6e..d08c4d13728 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -137,12 +137,12 @@
&mmc1 {
vmmc-supply = <&vmmc>;
- ti,bus-width = <8>;
+ bus-width = <8>;
};
&mmc2 {
vmmc-supply = <&vaux1>;
- ti,bus-width = <8>;
+ bus-width = <8>;
ti,non-removable;
};
@@ -155,6 +155,6 @@
};
&mmc5 {
- ti,bus-width = <4>;
+ bus-width = <4>;
ti,non-removable;
};
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
new file mode 100644
index 00000000000..8314e417188
--- /dev/null
+++ b/arch/arm/boot/dts/spear1310-evb.dts
@@ -0,0 +1,292 @@
+/*
+ * DTS file for SPEAr1310 Evaluation Baord
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "spear1310.dtsi"
+
+/ {
+ model = "ST SPEAr1310 Evaluation Board";
+ compatible = "st,spear1310-evb", "st,spear1310";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory {
+ reg = <0 0x40000000>;
+ };
+
+ ahb {
+ pinmux@e0700000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&state_default>;
+
+ state_default: pinmux {
+ i2c0-pmx {
+ st,pins = "i2c0_grp";
+ st,function = "i2c0";
+ };
+ i2s1 {
+ st,pins = "i2s1_grp";
+ st,function = "i2s1";
+ };
+ gpio {
+ st,pins = "arm_gpio_grp";
+ st,function = "arm_gpio";
+ };
+ eth {
+ st,pins = "gmii_grp";
+ st,function = "gmii";
+ };
+ ssp0 {
+ st,pins = "ssp0_grp";
+ st,function = "ssp0";
+ };
+ kbd {
+ st,pins = "keyboard_6x6_grp";
+ st,function = "keyboard";
+ };
+ sdhci {
+ st,pins = "sdhci_grp";
+ st,function = "sdhci";
+ };
+ smi-pmx {
+ st,pins = "smi_2_chips_grp";
+ st,function = "smi";
+ };
+ uart0 {
+ st,pins = "uart0_grp";
+ st,function = "uart0";
+ };
+ rs485 {
+ st,pins = "rs485_0_1_tdm_0_1_grp";
+ st,function = "rs485_0_1_tdm_0_1";
+ };
+ i2c1_2 {
+ st,pins = "i2c_1_2_grp";
+ st,function = "i2c_1_2";
+ };
+ pci {
+ st,pins = "pcie0_grp","pcie1_grp",
+ "pcie2_grp";
+ st,function = "pci";
+ };
+ smii {
+ st,pins = "smii_0_1_2_grp";
+ st,function = "smii_0_1_2";
+ };
+ nand {
+ st,pins = "nand_8bit_grp",
+ "nand_16bit_grp";
+ st,function = "nand";
+ };
+ };
+ };
+
+ ahci@b1000000 {
+ status = "okay";
+ };
+
+ cf@b2800000 {
+ status = "okay";
+ };
+
+ dma@ea800000 {
+ status = "okay";
+ };
+
+ dma@eb000000 {
+ status = "okay";
+ };
+
+ fsmc: flash@b0000000 {
+ status = "okay";
+ };
+
+ gmac0: eth@e2000000 {
+ status = "okay";
+ };
+
+ sdhci@b3000000 {
+ status = "okay";
+ };
+
+ smi: flash@ea000000 {
+ status = "okay";
+ clock-rate=<50000000>;
+
+ flash@e6000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xe6000000 0x800000>;
+ st,smi-fast-mode;
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x10000>;
+ };
+ partition@10000 {
+ label = "u-boot";
+ reg = <0x10000 0x40000>;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x2c0000>;
+ };
+ partition@310000 {
+ label = "rootfs";
+ reg = <0x310000 0x4f0000>;
+ };
+ };
+ };
+
+ spi0: spi@e0100000 {
+ status = "okay";
+ };
+
+ ehci@e4800000 {
+ status = "okay";
+ };
+
+ ehci@e5800000 {
+ status = "okay";
+ };
+
+ ohci@e4000000 {
+ status = "okay";
+ };
+
+ ohci@e5000000 {
+ status = "okay";
+ };
+
+ apb {
+ adc@e0080000 {
+ status = "okay";
+ };
+
+ gpio0: gpio@e0600000 {
+ status = "okay";
+ };
+
+ gpio1: gpio@e0680000 {
+ status = "okay";
+ };
+
+ i2c0: i2c@e0280000 {
+ status = "okay";
+ };
+
+ i2c1: i2c@5cd00000 {
+ status = "okay";
+ };
+
+ kbd@e0300000 {
+ linux,keymap = < 0x00000001
+ 0x00010002
+ 0x00020003
+ 0x00030004
+ 0x00040005
+ 0x00050006
+ 0x00060007
+ 0x00070008
+ 0x00080009
+ 0x0100000a
+ 0x0101000c
+ 0x0102000d
+ 0x0103000e
+ 0x0104000f
+ 0x01050010
+ 0x01060011
+ 0x01070012
+ 0x01080013
+ 0x02000014
+ 0x02010015
+ 0x02020016
+ 0x02030017
+ 0x02040018
+ 0x02050019
+ 0x0206001a
+ 0x0207001b
+ 0x0208001c
+ 0x0300001d
+ 0x0301001e
+ 0x0302001f
+ 0x03030020
+ 0x03040021
+ 0x03050022
+ 0x03060023
+ 0x03070024
+ 0x03080025
+ 0x04000026
+ 0x04010027
+ 0x04020028
+ 0x04030029
+ 0x0404002a
+ 0x0405002b
+ 0x0406002c
+ 0x0407002d
+ 0x0408002e
+ 0x0500002f
+ 0x05010030
+ 0x05020031
+ 0x05030032
+ 0x05040033
+ 0x05050034
+ 0x05060035
+ 0x05070036
+ 0x05080037
+ 0x06000038
+ 0x06010039
+ 0x0602003a
+ 0x0603003b
+ 0x0604003c
+ 0x0605003d
+ 0x0606003e
+ 0x0607003f
+ 0x06080040
+ 0x07000041
+ 0x07010042
+ 0x07020043
+ 0x07030044
+ 0x07040045
+ 0x07050046
+ 0x07060047
+ 0x07070048
+ 0x07080049
+ 0x0800004a
+ 0x0801004b
+ 0x0802004c
+ 0x0803004d
+ 0x0804004e
+ 0x0805004f
+ 0x08060050
+ 0x08070051
+ 0x08080052 >;
+ autorepeat;
+ st,mode = <0>;
+ status = "okay";
+ };
+
+ rtc@e0580000 {
+ status = "okay";
+ };
+
+ serial@e0000000 {
+ status = "okay";
+ };
+
+ wdt@ec800620 {
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
new file mode 100644
index 00000000000..9e61da404d5
--- /dev/null
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -0,0 +1,184 @@
+/*
+ * DTS file for all SPEAr1310 SoCs
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "spear13xx.dtsi"
+
+/ {
+ compatible = "st,spear1310";
+
+ ahb {
+ ahci@b1000000 {
+ compatible = "snps,spear-ahci";
+ reg = <0xb1000000 0x10000>;
+ interrupts = <0 68 0x4>;
+ status = "disabled";
+ };
+
+ ahci@b1800000 {
+ compatible = "snps,spear-ahci";
+ reg = <0xb1800000 0x10000>;
+ interrupts = <0 69 0x4>;
+ status = "disabled";
+ };
+
+ ahci@b4000000 {
+ compatible = "snps,spear-ahci";
+ reg = <0xb4000000 0x10000>;
+ interrupts = <0 70 0x4>;
+ status = "disabled";
+ };
+
+ gmac1: eth@5c400000 {
+ compatible = "st,spear600-gmac";
+ reg = <0x5c400000 0x8000>;
+ interrupts = <0 95 0x4>;
+ interrupt-names = "macirq";
+ status = "disabled";
+ };
+
+ gmac2: eth@5c500000 {
+ compatible = "st,spear600-gmac";
+ reg = <0x5c500000 0x8000>;
+ interrupts = <0 96 0x4>;
+ interrupt-names = "macirq";
+ status = "disabled";
+ };
+
+ gmac3: eth@5c600000 {
+ compatible = "st,spear600-gmac";
+ reg = <0x5c600000 0x8000>;
+ interrupts = <0 97 0x4>;
+ interrupt-names = "macirq";
+ status = "disabled";
+ };
+
+ gmac4: eth@5c700000 {
+ compatible = "st,spear600-gmac";
+ reg = <0x5c700000 0x8000>;
+ interrupts = <0 98 0x4>;
+ interrupt-names = "macirq";
+ status = "disabled";
+ };
+
+ spi1: spi@5d400000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x5d400000 0x1000>;
+ interrupts = <0 99 0x4>;
+ status = "disabled";
+ };
+
+ apb {
+ i2c1: i2c@5cd00000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x5cd00000 0x1000>;
+ interrupts = <0 87 0x4>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@5ce00000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x5ce00000 0x1000>;
+ interrupts = <0 88 0x4>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@5cf00000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x5cf00000 0x1000>;
+ interrupts = <0 89 0x4>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@5d000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x5d000000 0x1000>;
+ interrupts = <0 90 0x4>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@5d100000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x5d100000 0x1000>;
+ interrupts = <0 91 0x4>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@5d200000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x5d200000 0x1000>;
+ interrupts = <0 92 0x4>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@5d300000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0x5d300000 0x1000>;
+ interrupts = <0 93 0x4>;
+ status = "disabled";
+ };
+
+ serial@5c800000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x5c800000 0x1000>;
+ interrupts = <0 82 0x4>;
+ status = "disabled";
+ };
+
+ serial@5c900000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x5c900000 0x1000>;
+ interrupts = <0 83 0x4>;
+ status = "disabled";
+ };
+
+ serial@5ca00000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x5ca00000 0x1000>;
+ interrupts = <0 84 0x4>;
+ status = "disabled";
+ };
+
+ serial@5cb00000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x5cb00000 0x1000>;
+ interrupts = <0 85 0x4>;
+ status = "disabled";
+ };
+
+ serial@5cc00000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x5cc00000 0x1000>;
+ interrupts = <0 86 0x4>;
+ status = "disabled";
+ };
+
+ thermal@e07008c4 {
+ st,thermal-flags = <0x7000>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/spear1340-evb.dts b/arch/arm/boot/dts/spear1340-evb.dts
new file mode 100644
index 00000000000..0d8472e5ab9
--- /dev/null
+++ b/arch/arm/boot/dts/spear1340-evb.dts
@@ -0,0 +1,308 @@
+/*
+ * DTS file for SPEAr1340 Evaluation Baord
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "spear1340.dtsi"
+
+/ {
+ model = "ST SPEAr1340 Evaluation Board";
+ compatible = "st,spear1340-evb", "st,spear1340";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory {
+ reg = <0 0x40000000>;
+ };
+
+ ahb {
+ pinmux@e0700000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&state_default>;
+
+ state_default: pinmux {
+ pads_as_gpio {
+ st,pins = "pads_as_gpio_grp";
+ st,function = "pads_as_gpio";
+ };
+ fsmc {
+ st,pins = "fsmc_8bit_grp";
+ st,function = "fsmc";
+ };
+ kbd {
+ st,pins = "keyboard_row_col_grp",
+ "keyboard_col5_grp";
+ st,function = "keyboard";
+ };
+ uart0 {
+ st,pins = "uart0_grp", "uart0_enh_grp";
+ st,function = "uart0";
+ };
+ i2c0-pmx {
+ st,pins = "i2c0_grp";
+ st,function = "i2c0";
+ };
+ i2c1-pmx {
+ st,pins = "i2c1_grp";
+ st,function = "i2c1";
+ };
+ spdif-in {
+ st,pins = "spdif_in_grp";
+ st,function = "spdif_in";
+ };
+ spdif-out {
+ st,pins = "spdif_out_grp";
+ st,function = "spdif_out";
+ };
+ ssp0 {
+ st,pins = "ssp0_grp", "ssp0_cs1_grp",
+ "ssp0_cs3_grp";
+ st,function = "ssp0";
+ };
+ pwm {
+ st,pins = "pwm2_grp", "pwm3_grp";
+ st,function = "pwm";
+ };
+ smi-pmx {
+ st,pins = "smi_grp";
+ st,function = "smi";
+ };
+ i2s {
+ st,pins = "i2s_in_grp", "i2s_out_grp";
+ st,function = "i2s";
+ };
+ gmac {
+ st,pins = "gmii_grp", "rgmii_grp";
+ st,function = "gmac";
+ };
+ cam3 {
+ st,pins = "cam3_grp";
+ st,function = "cam3";
+ };
+ cec0 {
+ st,pins = "cec0_grp";
+ st,function = "cec0";
+ };
+ cec1 {
+ st,pins = "cec1_grp";
+ st,function = "cec1";
+ };
+ sdhci {
+ st,pins = "sdhci_grp";
+ st,function = "sdhci";
+ };
+ clcd {
+ st,pins = "clcd_grp";
+ st,function = "clcd";
+ };
+ sata {
+ st,pins = "sata_grp";
+ st,function = "sata";
+ };
+ };
+ };
+
+ dma@ea800000 {
+ status = "okay";
+ };
+
+ dma@eb000000 {
+ status = "okay";
+ };
+
+ fsmc: flash@b0000000 {
+ status = "okay";
+ };
+
+ gmac0: eth@e2000000 {
+ status = "okay";
+ };
+
+ sdhci@b3000000 {
+ status = "okay";
+ };
+
+ smi: flash@ea000000 {
+ status = "okay";
+ clock-rate=<50000000>;
+
+ flash@e6000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xe6000000 0x800000>;
+ st,smi-fast-mode;
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x10000>;
+ };
+ partition@10000 {
+ label = "u-boot";
+ reg = <0x10000 0x40000>;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x2c0000>;
+ };
+ partition@310000 {
+ label = "rootfs";
+ reg = <0x310000 0x4f0000>;
+ };
+ };
+ };
+
+ spi0: spi@e0100000 {
+ status = "okay";
+ };
+
+ ehci@e4800000 {
+ status = "okay";
+ };
+
+ ehci@e5800000 {
+ status = "okay";
+ };
+
+ ohci@e4000000 {
+ status = "okay";
+ };
+
+ ohci@e5000000 {
+ status = "okay";
+ };
+
+ apb {
+ adc@e0080000 {
+ status = "okay";
+ };
+
+ gpio0: gpio@e0600000 {
+ status = "okay";
+ };
+
+ gpio1: gpio@e0680000 {
+ status = "okay";
+ };
+
+ i2c0: i2c@e0280000 {
+ status = "okay";
+ };
+
+ i2c1: i2c@b4000000 {
+ status = "okay";
+ };
+
+ kbd@e0300000 {
+ linux,keymap = < 0x00000001
+ 0x00010002
+ 0x00020003
+ 0x00030004
+ 0x00040005
+ 0x00050006
+ 0x00060007
+ 0x00070008
+ 0x00080009
+ 0x0100000a
+ 0x0101000c
+ 0x0102000d
+ 0x0103000e
+ 0x0104000f
+ 0x01050010
+ 0x01060011
+ 0x01070012
+ 0x01080013
+ 0x02000014
+ 0x02010015
+ 0x02020016
+ 0x02030017
+ 0x02040018
+ 0x02050019
+ 0x0206001a
+ 0x0207001b
+ 0x0208001c
+ 0x0300001d
+ 0x0301001e
+ 0x0302001f
+ 0x03030020
+ 0x03040021
+ 0x03050022
+ 0x03060023
+ 0x03070024
+ 0x03080025
+ 0x04000026
+ 0x04010027
+ 0x04020028
+ 0x04030029
+ 0x0404002a
+ 0x0405002b
+ 0x0406002c
+ 0x0407002d
+ 0x0408002e
+ 0x0500002f
+ 0x05010030
+ 0x05020031
+ 0x05030032
+ 0x05040033
+ 0x05050034
+ 0x05060035
+ 0x05070036
+ 0x05080037
+ 0x06000038
+ 0x06010039
+ 0x0602003a
+ 0x0603003b
+ 0x0604003c
+ 0x0605003d
+ 0x0606003e
+ 0x0607003f
+ 0x06080040
+ 0x07000041
+ 0x07010042
+ 0x07020043
+ 0x07030044
+ 0x07040045
+ 0x07050046
+ 0x07060047
+ 0x07070048
+ 0x07080049
+ 0x0800004a
+ 0x0801004b
+ 0x0802004c
+ 0x0803004d
+ 0x0804004e
+ 0x0805004f
+ 0x08060050
+ 0x08070051
+ 0x08080052 >;
+ autorepeat;
+ st,mode = <0>;
+ status = "okay";
+ };
+
+ rtc@e0580000 {
+ status = "okay";
+ };
+
+ serial@e0000000 {
+ status = "okay";
+ };
+
+ serial@b4100000 {
+ status = "okay";
+ };
+
+ wdt@ec800620 {
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
new file mode 100644
index 00000000000..a26fc47a55e
--- /dev/null
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -0,0 +1,56 @@
+/*
+ * DTS file for all SPEAr1340 SoCs
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "spear13xx.dtsi"
+
+/ {
+ compatible = "st,spear1340";
+
+ ahb {
+ ahci@b1000000 {
+ compatible = "snps,spear-ahci";
+ reg = <0xb1000000 0x10000>;
+ interrupts = <0 72 0x4>;
+ status = "disabled";
+ };
+
+ spi1: spi@5d400000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0x5d400000 0x1000>;
+ interrupts = <0 99 0x4>;
+ status = "disabled";
+ };
+
+ apb {
+ i2c1: i2c@b4000000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xb4000000 0x1000>;
+ interrupts = <0 104 0x4>;
+ status = "disabled";
+ };
+
+ serial@b4100000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0xb4100000 0x1000>;
+ interrupts = <0 105 0x4>;
+ status = "disabled";
+ };
+
+ thermal@e07008c4 {
+ st,thermal-flags = <0x2a00>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
new file mode 100644
index 00000000000..1f8e1e1481d
--- /dev/null
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -0,0 +1,262 @@
+/*
+ * DTS file for all SPEAr13xx SoCs
+ *
+ * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ interrupt-parent = <&gic>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a9";
+ reg = <0>;
+ next-level-cache = <&L2>;
+ };
+
+ cpu@1 {
+ compatible = "arm,cortex-a9";
+ reg = <1>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ gic: interrupt-controller@ec801000 {
+ compatible = "arm,cortex-a9-gic";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = < 0xec801000 0x1000 >,
+ < 0xec800100 0x0100 >;
+ };
+
+ pmu {
+ compatible = "arm,cortex-a9-pmu";
+ interrupts = <0 8 0x04
+ 0 9 0x04>;
+ };
+
+ L2: l2-cache {
+ compatible = "arm,pl310-cache";
+ reg = <0xed000000 0x1000>;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ memory {
+ name = "memory";
+ device_type = "memory";
+ reg = <0 0x40000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyAMA0,115200";
+ };
+
+ ahb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0x50000000 0x50000000 0x10000000
+ 0xb0000000 0xb0000000 0x10000000
+ 0xe0000000 0xe0000000 0x10000000>;
+
+ sdhci@b3000000 {
+ compatible = "st,sdhci-spear";
+ reg = <0xb3000000 0x100>;
+ interrupts = <0 28 0x4>;
+ status = "disabled";
+ };
+
+ cf@b2800000 {
+ compatible = "arasan,cf-spear1340";
+ reg = <0xb2800000 0x100>;
+ interrupts = <0 29 0x4>;
+ status = "disabled";
+ };
+
+ dma@ea800000 {
+ compatible = "snps,dma-spear1340";
+ reg = <0xea800000 0x1000>;
+ interrupts = <0 19 0x4>;
+ status = "disabled";
+ };
+
+ dma@eb000000 {
+ compatible = "snps,dma-spear1340";
+ reg = <0xeb000000 0x1000>;
+ interrupts = <0 59 0x4>;
+ status = "disabled";
+ };
+
+ fsmc: flash@b0000000 {
+ compatible = "st,spear600-fsmc-nand";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb0000000 0x1000 /* FSMC Register */
+ 0xb0800000 0x0010>; /* NAND Base */
+ reg-names = "fsmc_regs", "nand_data";
+ interrupts = <0 20 0x4
+ 0 21 0x4
+ 0 22 0x4
+ 0 23 0x4>;
+ st,ale-off = <0x20000>;
+ st,cle-off = <0x10000>;
+ status = "disabled";
+ };
+
+ gmac0: eth@e2000000 {
+ compatible = "st,spear600-gmac";
+ reg = <0xe2000000 0x8000>;
+ interrupts = <0 23 0x4
+ 0 24 0x4>;
+ interrupt-names = "macirq", "eth_wake_irq";
+ status = "disabled";
+ };
+
+ smi: flash@ea000000 {
+ compatible = "st,spear600-smi";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xea000000 0x1000>;
+ interrupts = <0 30 0x4>;
+ status = "disabled";
+ };
+
+ spi0: spi@e0100000 {
+ compatible = "arm,pl022", "arm,primecell";
+ reg = <0xe0100000 0x1000>;
+ interrupts = <0 31 0x4>;
+ status = "disabled";
+ };
+
+ ehci@e4800000 {
+ compatible = "st,spear600-ehci", "usb-ehci";
+ reg = <0xe4800000 0x1000>;
+ interrupts = <0 64 0x4>;
+ status = "disabled";
+ };
+
+ ehci@e5800000 {
+ compatible = "st,spear600-ehci", "usb-ehci";
+ reg = <0xe5800000 0x1000>;
+ interrupts = <0 66 0x4>;
+ status = "disabled";
+ };
+
+ ohci@e4000000 {
+ compatible = "st,spear600-ohci", "usb-ohci";
+ reg = <0xe4000000 0x1000>;
+ interrupts = <0 65 0x4>;
+ status = "disabled";
+ };
+
+ ohci@e5000000 {
+ compatible = "st,spear600-ohci", "usb-ohci";
+ reg = <0xe5000000 0x1000>;
+ interrupts = <0 67 0x4>;
+ status = "disabled";
+ };
+
+ apb {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ ranges = <0x50000000 0x50000000 0x10000000
+ 0xb0000000 0xb0000000 0x10000000
+ 0xe0000000 0xe0000000 0x10000000>;
+
+ gpio0: gpio@e0600000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0xe0600000 0x1000>;
+ interrupts = <0 24 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ status = "disabled";
+ };
+
+ gpio1: gpio@e0680000 {
+ compatible = "arm,pl061", "arm,primecell";
+ reg = <0xe0680000 0x1000>;
+ interrupts = <0 25 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ status = "disabled";
+ };
+
+ kbd@e0300000 {
+ compatible = "st,spear300-kbd";
+ reg = <0xe0300000 0x1000>;
+ status = "disabled";
+ };
+
+ i2c0: i2c@e0280000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,designware-i2c";
+ reg = <0xe0280000 0x1000>;
+ interrupts = <0 41 0x4>;
+ status = "disabled";
+ };
+
+ rtc@e0580000 {
+ compatible = "st,spear-rtc";
+ reg = <0xe0580000 0x1000>;
+ interrupts = <0 36 0x4>;
+ status = "disabled";
+ };
+
+ serial@e0000000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0xe0000000 0x1000>;
+ interrupts = <0 36 0x4>;
+ status = "disabled";
+ };
+
+ adc@e0080000 {
+ compatible = "st,spear600-adc";
+ reg = <0xe0080000 0x1000>;
+ interrupts = <0 44 0x4>;
+ status = "disabled";
+ };
+
+ timer@e0380000 {
+ compatible = "st,spear-timer";
+ reg = <0xe0380000 0x400>;
+ interrupts = <0 37 0x4>;
+ };
+
+ timer@ec800600 {
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0xec800600 0x20>;
+ interrupts = <1 13 0x301>;
+ };
+
+ wdt@ec800620 {
+ compatible = "arm,cortex-a9-twd-wdt";
+ reg = <0xec800620 0x20>;
+ status = "disabled";
+ };
+
+ thermal@e07008c4 {
+ compatible = "st,thermal-spear1340";
+ reg = <0xe07008c4 0x4>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/spear300-evb.dts b/arch/arm/boot/dts/spear300-evb.dts
index 910e264b87c..fc82b1a2645 100644
--- a/arch/arm/boot/dts/spear300-evb.dts
+++ b/arch/arm/boot/dts/spear300-evb.dts
@@ -87,6 +87,31 @@
smi: flash@fc000000 {
status = "okay";
+ clock-rate=<50000000>;
+
+ flash@f8000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xf8000000 0x800000>;
+ st,smi-fast-mode;
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x10000>;
+ };
+ partition@10000 {
+ label = "u-boot";
+ reg = <0x10000 0x40000>;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x2c0000>;
+ };
+ partition@310000 {
+ label = "rootfs";
+ reg = <0x310000 0x4f0000>;
+ };
+ };
};
spi0: spi@d0100000 {
diff --git a/arch/arm/boot/dts/spear310-evb.dts b/arch/arm/boot/dts/spear310-evb.dts
index 6d95317100a..dc5e2d445a9 100644
--- a/arch/arm/boot/dts/spear310-evb.dts
+++ b/arch/arm/boot/dts/spear310-evb.dts
@@ -103,11 +103,27 @@
clock-rate=<50000000>;
flash@f8000000 {
- label = "m25p64";
- reg = <0xf8000000 0x800000>;
#address-cells = <1>;
#size-cells = <1>;
+ reg = <0xf8000000 0x800000>;
st,smi-fast-mode;
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x10000>;
+ };
+ partition@10000 {
+ label = "u-boot";
+ reg = <0x10000 0x40000>;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x2c0000>;
+ };
+ partition@310000 {
+ label = "rootfs";
+ reg = <0x310000 0x4f0000>;
+ };
};
};
diff --git a/arch/arm/boot/dts/spear320-evb.dts b/arch/arm/boot/dts/spear320-evb.dts
index 0c6463b71a3..6308fa3bec1 100644
--- a/arch/arm/boot/dts/spear320-evb.dts
+++ b/arch/arm/boot/dts/spear320-evb.dts
@@ -110,6 +110,31 @@
smi: flash@fc000000 {
status = "okay";
+ clock-rate=<50000000>;
+
+ flash@f8000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xf8000000 0x800000>;
+ st,smi-fast-mode;
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x10000>;
+ };
+ partition@10000 {
+ label = "u-boot";
+ reg = <0x10000 0x40000>;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x2c0000>;
+ };
+ partition@310000 {
+ label = "rootfs";
+ reg = <0x310000 0x4f0000>;
+ };
+ };
};
spi0: spi@d0100000 {
diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi
index 0ae7c8e8631..91072553963 100644
--- a/arch/arm/boot/dts/spear3xx.dtsi
+++ b/arch/arm/boot/dts/spear3xx.dtsi
@@ -139,6 +139,12 @@
interrupts = <12>;
status = "disabled";
};
+
+ timer@f0000000 {
+ compatible = "st,spear-timer";
+ reg = <0xf0000000 0x400>;
+ interrupts = <2>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/spear600-evb.dts b/arch/arm/boot/dts/spear600-evb.dts
index 790a7a8a5cc..1119c22c9a8 100644
--- a/arch/arm/boot/dts/spear600-evb.dts
+++ b/arch/arm/boot/dts/spear600-evb.dts
@@ -33,6 +33,35 @@
status = "okay";
};
+ smi: flash@fc000000 {
+ status = "okay";
+ clock-rate=<50000000>;
+
+ flash@f8000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xf8000000 0x800000>;
+ st,smi-fast-mode;
+
+ partition@0 {
+ label = "xloader";
+ reg = <0x0 0x10000>;
+ };
+ partition@10000 {
+ label = "u-boot";
+ reg = <0x10000 0x40000>;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x2c0000>;
+ };
+ partition@310000 {
+ label = "rootfs";
+ reg = <0x310000 0x4f0000>;
+ };
+ };
+ };
+
apb {
serial@d0000000 {
status = "okay";
diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
index d777e3a6f17..089f0a42c50 100644
--- a/arch/arm/boot/dts/spear600.dtsi
+++ b/arch/arm/boot/dts/spear600.dtsi
@@ -177,6 +177,12 @@
interrupts = <28>;
status = "disabled";
};
+
+ timer@f0000000 {
+ compatible = "st,spear-timer";
+ reg = <0xf0000000 0x400>;
+ interrupts = <16>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/tegra-cardhu.dts b/arch/arm/boot/dts/tegra-cardhu.dts
index 0a9f34a2c3a..36321bceec4 100644
--- a/arch/arm/boot/dts/tegra-cardhu.dts
+++ b/arch/arm/boot/dts/tegra-cardhu.dts
@@ -7,10 +7,10 @@
compatible = "nvidia,cardhu", "nvidia,tegra30";
memory {
- reg = < 0x80000000 0x40000000 >;
+ reg = <0x80000000 0x40000000>;
};
- pinmux@70000000 {
+ pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -51,64 +51,122 @@
nvidia,pull = <2>;
nvidia,tristate = <0>;
};
+ dap2_fs_pa2 {
+ nvidia,pins = "dap2_fs_pa2",
+ "dap2_sclk_pa3",
+ "dap2_din_pa4",
+ "dap2_dout_pa5";
+ nvidia,function = "i2s1";
+ nvidia,pull = <0>;
+ nvidia,tristate = <0>;
+ };
};
};
serial@70006000 {
- clock-frequency = < 408000000 >;
- };
-
- serial@70006040 {
- status = "disable";
- };
-
- serial@70006200 {
- status = "disable";
- };
-
- serial@70006300 {
- status = "disable";
- };
-
- serial@70006400 {
- status = "disable";
+ status = "okay";
+ clock-frequency = <408000000>;
};
i2c@7000c000 {
+ status = "okay";
clock-frequency = <100000>;
};
i2c@7000c400 {
+ status = "okay";
clock-frequency = <100000>;
};
i2c@7000c500 {
+ status = "okay";
clock-frequency = <100000>;
+
+ /* ALS and Proximity sensor */
+ isl29028@44 {
+ compatible = "isil,isl29028";
+ reg = <0x44>;
+ interrupt-parent = <&gpio>;
+ interrupts = <88 0x04>; /*gpio PL0 */
+ };
};
i2c@7000c700 {
+ status = "okay";
clock-frequency = <100000>;
};
i2c@7000d000 {
+ status = "okay";
clock-frequency = <100000>;
+
+ wm8903: wm8903@1a {
+ compatible = "wlf,wm8903";
+ reg = <0x1a>;
+ interrupt-parent = <&gpio>;
+ interrupts = <179 0x04>; /* gpio PW3 */
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ micdet-cfg = <0>;
+ micdet-delay = <100>;
+ gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
+ };
+
+ tps62361 {
+ compatible = "ti,tps62361";
+ reg = <0x60>;
+
+ regulator-name = "tps62361-vout";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ ti,vsel0-state-high;
+ ti,vsel1-state-high;
+ };
+ };
+
+ ahub {
+ i2s@70080400 {
+ status = "okay";
+ };
};
sdhci@78000000 {
+ status = "okay";
cd-gpios = <&gpio 69 0>; /* gpio PI5 */
wp-gpios = <&gpio 155 0>; /* gpio PT3 */
power-gpios = <&gpio 31 0>; /* gpio PD7 */
+ bus-width = <4>;
};
- sdhci@78000200 {
- status = "disable";
+ sdhci@78000600 {
+ status = "okay";
+ support-8bit;
+ bus-width = <8>;
};
- sdhci@78000400 {
- status = "disable";
- };
+ sound {
+ compatible = "nvidia,tegra-audio-wm8903-cardhu",
+ "nvidia,tegra-audio-wm8903";
+ nvidia,model = "NVIDIA Tegra Cardhu";
- sdhci@78000400 {
- support-8bit;
+ nvidia,audio-routing =
+ "Headphone Jack", "HPOUTR",
+ "Headphone Jack", "HPOUTL",
+ "Int Spk", "ROP",
+ "Int Spk", "RON",
+ "Int Spk", "LOP",
+ "Int Spk", "LON",
+ "Mic Jack", "MICBIAS",
+ "IN1L", "Mic Jack";
+
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,audio-codec = <&wm8903>;
+
+ nvidia,spkr-en-gpios = <&wm8903 2 0>;
+ nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
};
};
diff --git a/arch/arm/boot/dts/tegra-harmony.dts b/arch/arm/boot/dts/tegra-harmony.dts
index 1a0b1f18294..7de701365fc 100644
--- a/arch/arm/boot/dts/tegra-harmony.dts
+++ b/arch/arm/boot/dts/tegra-harmony.dts
@@ -6,11 +6,11 @@
model = "NVIDIA Tegra2 Harmony evaluation board";
compatible = "nvidia,harmony", "nvidia,tegra20";
- memory@0 {
- reg = < 0x00000000 0x40000000 >;
+ memory {
+ reg = <0x00000000 0x40000000>;
};
- pinmux@70000000 {
+ pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -167,28 +167,28 @@
};
conf_ata {
nvidia,pins = "ata", "atb", "atc", "atd", "ate",
- "cdev1", "dap1", "dtb", "gma", "gmb",
- "gmc", "gmd", "gme", "gpu7", "gpv",
- "i2cp", "pta", "rm", "slxa", "slxk",
- "spia", "spib";
+ "cdev1", "cdev2", "dap1", "dtb", "gma",
+ "gmb", "gmc", "gmd", "gme", "gpu7",
+ "gpv", "i2cp", "pta", "rm", "slxa",
+ "slxk", "spia", "spib", "uac";
nvidia,pull = <0>;
nvidia,tristate = <0>;
};
- conf_cdev2 {
- nvidia,pins = "cdev2", "csus", "spid", "spif";
- nvidia,pull = <1>;
- nvidia,tristate = <1>;
- };
conf_ck32 {
nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
"pmcc", "pmcd", "pmce", "xm2c", "xm2d";
nvidia,pull = <0>;
};
+ conf_csus {
+ nvidia,pins = "csus", "spid", "spif";
+ nvidia,pull = <1>;
+ nvidia,tristate = <1>;
+ };
conf_crtp {
nvidia,pins = "crtp", "dap2", "dap3", "dap4",
"dtc", "dte", "dtf", "gpu", "sdio1",
"slxc", "slxd", "spdi", "spdo", "spig",
- "uac", "uda";
+ "uda";
nvidia,pull = <0>;
nvidia,tristate = <1>;
};
@@ -234,42 +234,81 @@
};
};
- pmc@7000f400 {
- nvidia,invert-interrupt;
+ i2s@70002800 {
+ status = "okay";
+ };
+
+ serial@70006300 {
+ status = "okay";
+ clock-frequency = <216000000>;
};
i2c@7000c000 {
+ status = "okay";
clock-frequency = <400000>;
wm8903: wm8903@1a {
compatible = "wlf,wm8903";
reg = <0x1a>;
interrupt-parent = <&gpio>;
- interrupts = < 187 0x04 >;
+ interrupts = <187 0x04>;
gpio-controller;
#gpio-cells = <2>;
micdet-cfg = <0>;
micdet-delay = <100>;
- gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >;
+ gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
};
};
i2c@7000c400 {
+ status = "okay";
clock-frequency = <400000>;
};
i2c@7000c500 {
+ status = "okay";
clock-frequency = <400000>;
};
i2c@7000d000 {
+ status = "okay";
clock-frequency = <400000>;
};
- i2s@70002a00 {
- status = "disable";
+ pmc {
+ nvidia,invert-interrupt;
+ };
+
+ usb@c5000000 {
+ status = "okay";
+ };
+
+ usb@c5004000 {
+ status = "okay";
+ nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+ };
+
+ usb@c5008000 {
+ status = "okay";
+ };
+
+ sdhci@c8000200 {
+ status = "okay";
+ cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+ wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+ power-gpios = <&gpio 155 0>; /* gpio PT3 */
+ bus-width = <4>;
+ };
+
+ sdhci@c8000600 {
+ status = "okay";
+ cd-gpios = <&gpio 58 0>; /* gpio PH2 */
+ wp-gpios = <&gpio 59 0>; /* gpio PH3 */
+ power-gpios = <&gpio 70 0>; /* gpio PI6 */
+ support-8bit;
+ bus-width = <8>;
};
sound {
@@ -295,45 +334,4 @@
nvidia,int-mic-en-gpios = <&gpio 184 0>; /*gpio PX0 */
nvidia,ext-mic-en-gpios = <&gpio 185 0>; /* gpio PX1 */
};
-
- serial@70006000 {
- status = "disable";
- };
-
- serial@70006040 {
- status = "disable";
- };
-
- serial@70006200 {
- status = "disable";
- };
-
- serial@70006300 {
- clock-frequency = < 216000000 >;
- };
-
- serial@70006400 {
- status = "disable";
- };
-
- sdhci@c8000000 {
- status = "disable";
- };
-
- sdhci@c8000200 {
- cd-gpios = <&gpio 69 0>; /* gpio PI5 */
- wp-gpios = <&gpio 57 0>; /* gpio PH1 */
- power-gpios = <&gpio 155 0>; /* gpio PT3 */
- };
-
- sdhci@c8000400 {
- status = "disable";
- };
-
- sdhci@c8000600 {
- cd-gpios = <&gpio 58 0>; /* gpio PH2 */
- wp-gpios = <&gpio 59 0>; /* gpio PH3 */
- power-gpios = <&gpio 70 0>; /* gpio PI6 */
- support-8bit;
- };
};
diff --git a/arch/arm/boot/dts/tegra-paz00.dts b/arch/arm/boot/dts/tegra-paz00.dts
index 10943fb2561..bfeb117d5ae 100644
--- a/arch/arm/boot/dts/tegra-paz00.dts
+++ b/arch/arm/boot/dts/tegra-paz00.dts
@@ -6,11 +6,11 @@
model = "Toshiba AC100 / Dynabook AZ";
compatible = "compal,paz00", "nvidia,tegra20";
- memory@0 {
+ memory {
reg = <0x00000000 0x20000000>;
};
- pinmux@70000000 {
+ pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -159,18 +159,14 @@
};
conf_ata {
nvidia,pins = "ata", "atb", "atc", "atd", "ate",
- "cdev1", "dap1", "dap2", "dtf", "gma",
- "gmb", "gmc", "gmd", "gme", "gpu",
- "gpu7", "gpv", "i2cp", "pta", "rm",
- "sdio1", "slxk", "spdo", "uac", "uda";
+ "cdev1", "cdev2", "dap1", "dap2", "dtf",
+ "gma", "gmb", "gmc", "gmd", "gme",
+ "gpu", "gpu7", "gpv", "i2cp", "pta",
+ "rm", "sdio1", "slxk", "spdo", "uac",
+ "uda";
nvidia,pull = <0>;
nvidia,tristate = <0>;
};
- conf_cdev2 {
- nvidia,pins = "cdev2";
- nvidia,pull = <1>;
- nvidia,tristate = <0>;
- };
conf_ck32 {
nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
"pmcc", "pmcd", "pmce", "xm2c", "xm2d";
@@ -230,7 +226,22 @@
};
};
+ i2s@70002800 {
+ status = "okay";
+ };
+
+ serial@70006000 {
+ status = "okay";
+ clock-frequency = <216000000>;
+ };
+
+ serial@70006200 {
+ status = "okay";
+ clock-frequency = <216000000>;
+ };
+
i2c@7000c000 {
+ status = "okay";
clock-frequency = <400000>;
alc5632: alc5632@1e {
@@ -242,25 +253,23 @@
};
i2c@7000c400 {
+ status = "okay";
clock-frequency = <400000>;
};
- i2c@7000c500 {
- status = "disable";
- };
-
- nvec@7000c500 {
- #address-cells = <1>;
- #size-cells = <0>;
+ nvec {
compatible = "nvidia,nvec";
- reg = <0x7000C500 0x100>;
+ reg = <0x7000c500 0x100>;
interrupts = <0 92 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
clock-frequency = <80000>;
- request-gpios = <&gpio 170 0>;
+ request-gpios = <&gpio 170 0>; /* gpio PV2 */
slave-addr = <138>;
};
i2c@7000d000 {
+ status = "okay";
clock-frequency = <400000>;
adt7461@4c {
@@ -269,66 +278,31 @@
};
};
- i2s@70002a00 {
- status = "disable";
- };
-
- sound {
- compatible = "nvidia,tegra-audio-alc5632-paz00",
- "nvidia,tegra-audio-alc5632";
-
- nvidia,model = "Compal PAZ00";
-
- nvidia,audio-routing =
- "Int Spk", "SPKOUT",
- "Int Spk", "SPKOUTN",
- "Headset Mic", "MICBIAS1",
- "MIC1", "Headset Mic",
- "Headset Stereophone", "HPR",
- "Headset Stereophone", "HPL",
- "DMICDAT", "Digital Mic";
-
- nvidia,audio-codec = <&alc5632>;
- nvidia,i2s-controller = <&tegra_i2s1>;
- nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
- };
-
- serial@70006000 {
- clock-frequency = <216000000>;
+ usb@c5000000 {
+ status = "okay";
};
- serial@70006040 {
- status = "disable";
+ usb@c5004000 {
+ status = "okay";
+ nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
};
- serial@70006200 {
- clock-frequency = <216000000>;
- };
-
- serial@70006300 {
- status = "disable";
- };
-
- serial@70006400 {
- status = "disable";
+ usb@c5008000 {
+ status = "okay";
};
sdhci@c8000000 {
+ status = "okay";
cd-gpios = <&gpio 173 0>; /* gpio PV5 */
wp-gpios = <&gpio 57 0>; /* gpio PH1 */
power-gpios = <&gpio 169 0>; /* gpio PV1 */
- };
-
- sdhci@c8000200 {
- status = "disable";
- };
-
- sdhci@c8000400 {
- status = "disable";
+ bus-width = <4>;
};
sdhci@c8000600 {
+ status = "okay";
support-8bit;
+ bus-width = <8>;
};
gpio-keys {
@@ -347,8 +321,28 @@
wifi {
label = "wifi-led";
- gpios = <&gpio 24 0>;
+ gpios = <&gpio 24 0>; /* gpio PD0 */
linux,default-trigger = "rfkill0";
};
};
+
+ sound {
+ compatible = "nvidia,tegra-audio-alc5632-paz00",
+ "nvidia,tegra-audio-alc5632";
+
+ nvidia,model = "Compal PAZ00";
+
+ nvidia,audio-routing =
+ "Int Spk", "SPKOUT",
+ "Int Spk", "SPKOUTN",
+ "Headset Mic", "MICBIAS1",
+ "MIC1", "Headset Mic",
+ "Headset Stereophone", "HPR",
+ "Headset Stereophone", "HPL",
+ "DMICDAT", "Digital Mic";
+
+ nvidia,audio-codec = <&alc5632>;
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
+ };
};
diff --git a/arch/arm/boot/dts/tegra-seaboard.dts b/arch/arm/boot/dts/tegra-seaboard.dts
index ec33116f5df..89cb7f2acd9 100644
--- a/arch/arm/boot/dts/tegra-seaboard.dts
+++ b/arch/arm/boot/dts/tegra-seaboard.dts
@@ -7,11 +7,10 @@
compatible = "nvidia,seaboard", "nvidia,tegra20";
memory {
- device_type = "memory";
- reg = < 0x00000000 0x40000000 >;
+ reg = <0x00000000 0x40000000>;
};
- pinmux@70000000 {
+ pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -100,7 +99,7 @@
};
hdint {
nvidia,pins = "hdint", "lpw0", "lpw2", "lsc1",
- "lsck", "lsda", "pta";
+ "lsck", "lsda";
nvidia,function = "hdmi";
};
i2cp {
@@ -134,6 +133,10 @@
nvidia,pins = "pmc";
nvidia,function = "pwr_on";
};
+ pta {
+ nvidia,pins = "pta";
+ nvidia,function = "i2c2";
+ };
rm {
nvidia,pins = "rm";
nvidia,function = "i2c1";
@@ -254,108 +257,148 @@
};
};
+ i2s@70002800 {
+ status = "okay";
+ };
+
+ serial@70006300 {
+ status = "okay";
+ clock-frequency = <216000000>;
+ };
+
i2c@7000c000 {
+ status = "okay";
clock-frequency = <400000>;
wm8903: wm8903@1a {
compatible = "wlf,wm8903";
reg = <0x1a>;
interrupt-parent = <&gpio>;
- interrupts = < 187 0x04 >;
+ interrupts = <187 0x04>;
gpio-controller;
#gpio-cells = <2>;
micdet-cfg = <0>;
micdet-delay = <100>;
- gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >;
+ gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
+ };
+
+ /* ALS and proximity sensor */
+ isl29018@44 {
+ compatible = "isil,isl29018";
+ reg = <0x44>;
+ interrupt-parent = <&gpio>;
+ interrupts = <202 0x04>; /* GPIO PZ2 */
+ };
+
+ gyrometer@68 {
+ compatible = "invn,mpu3050";
+ reg = <0x68>;
+ interrupt-parent = <&gpio>;
+ interrupts = <204 0x04>; /* gpio PZ4 */
};
};
i2c@7000c400 {
- clock-frequency = <400000>;
+ status = "okay";
+ clock-frequency = <100000>;
+
+ smart-battery@b {
+ compatible = "ti,bq20z75", "smart-battery-1.1";
+ reg = <0xb>;
+ ti,i2c-retry-count = <2>;
+ ti,poll-retry-count = <10>;
+ };
};
i2c@7000c500 {
+ status = "okay";
clock-frequency = <400000>;
};
i2c@7000d000 {
+ status = "okay";
clock-frequency = <400000>;
- adt7461@4c {
- compatible = "adt7461";
+ temperature-sensor@4c {
+ compatible = "nct1008";
reg = <0x4c>;
};
- };
-
- i2s@70002a00 {
- status = "disable";
- };
-
- sound {
- compatible = "nvidia,tegra-audio-wm8903-seaboard",
- "nvidia,tegra-audio-wm8903";
- nvidia,model = "NVIDIA Tegra Seaboard";
-
- nvidia,audio-routing =
- "Headphone Jack", "HPOUTR",
- "Headphone Jack", "HPOUTL",
- "Int Spk", "ROP",
- "Int Spk", "RON",
- "Int Spk", "LOP",
- "Int Spk", "LON",
- "Mic Jack", "MICBIAS",
- "IN1R", "Mic Jack";
-
- nvidia,i2s-controller = <&tegra_i2s1>;
- nvidia,audio-codec = <&wm8903>;
-
- nvidia,spkr-en-gpios = <&wm8903 2 0>;
- nvidia,hp-det-gpios = <&gpio 185 0>; /* gpio PX1 */
- };
- serial@70006000 {
- status = "disable";
- };
-
- serial@70006040 {
- status = "disable";
+ magnetometer@c {
+ compatible = "ak8975";
+ reg = <0xc>;
+ interrupt-parent = <&gpio>;
+ interrupts = <109 0x04>; /* gpio PN5 */
+ };
};
- serial@70006200 {
- status = "disable";
- };
+ emc {
+ emc-table@190000 {
+ reg = <190000>;
+ compatible = "nvidia,tegra20-emc-table";
+ clock-frequency = <190000>;
+ nvidia,emc-registers = <0x0000000c 0x00000026
+ 0x00000009 0x00000003 0x00000004 0x00000004
+ 0x00000002 0x0000000c 0x00000003 0x00000003
+ 0x00000002 0x00000001 0x00000004 0x00000005
+ 0x00000004 0x00000009 0x0000000d 0x0000059f
+ 0x00000000 0x00000003 0x00000003 0x00000003
+ 0x00000003 0x00000001 0x0000000b 0x000000c8
+ 0x00000003 0x00000007 0x00000004 0x0000000f
+ 0x00000002 0x00000000 0x00000000 0x00000002
+ 0x00000000 0x00000000 0x00000083 0xa06204ae
+ 0x007dc010 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000>;
+ };
- serial@70006300 {
- clock-frequency = < 216000000 >;
+ emc-table@380000 {
+ reg = <380000>;
+ compatible = "nvidia,tegra20-emc-table";
+ clock-frequency = <380000>;
+ nvidia,emc-registers = <0x00000017 0x0000004b
+ 0x00000012 0x00000006 0x00000004 0x00000005
+ 0x00000003 0x0000000c 0x00000006 0x00000006
+ 0x00000003 0x00000001 0x00000004 0x00000005
+ 0x00000004 0x00000009 0x0000000d 0x00000b5f
+ 0x00000000 0x00000003 0x00000003 0x00000006
+ 0x00000006 0x00000001 0x00000011 0x000000c8
+ 0x00000003 0x0000000e 0x00000007 0x0000000f
+ 0x00000002 0x00000000 0x00000000 0x00000002
+ 0x00000000 0x00000000 0x00000083 0xe044048b
+ 0x007d8010 0x00000000 0x00000000 0x00000000
+ 0x00000000 0x00000000 0x00000000 0x00000000>;
+ };
};
- serial@70006400 {
- status = "disable";
+ usb@c5000000 {
+ status = "okay";
+ nvidia,vbus-gpio = <&gpio 24 0>; /* PD0 */
+ dr_mode = "otg";
};
- sdhci@c8000000 {
- status = "disable";
+ usb@c5004000 {
+ status = "okay";
+ nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
};
- sdhci@c8000200 {
- status = "disable";
+ usb@c5008000 {
+ status = "okay";
};
sdhci@c8000400 {
+ status = "okay";
cd-gpios = <&gpio 69 0>; /* gpio PI5 */
wp-gpios = <&gpio 57 0>; /* gpio PH1 */
power-gpios = <&gpio 70 0>; /* gpio PI6 */
+ bus-width = <4>;
};
sdhci@c8000600 {
+ status = "okay";
support-8bit;
- };
-
- usb@c5000000 {
- nvidia,vbus-gpio = <&gpio 24 0>; /* PD0 */
- dr_mode = "otg";
+ bus-width = <8>;
};
gpio-keys {
@@ -378,41 +421,25 @@
};
};
- emc@7000f400 {
- emc-table@190000 {
- reg = < 190000 >;
- compatible = "nvidia,tegra20-emc-table";
- clock-frequency = < 190000 >;
- nvidia,emc-registers = < 0x0000000c 0x00000026
- 0x00000009 0x00000003 0x00000004 0x00000004
- 0x00000002 0x0000000c 0x00000003 0x00000003
- 0x00000002 0x00000001 0x00000004 0x00000005
- 0x00000004 0x00000009 0x0000000d 0x0000059f
- 0x00000000 0x00000003 0x00000003 0x00000003
- 0x00000003 0x00000001 0x0000000b 0x000000c8
- 0x00000003 0x00000007 0x00000004 0x0000000f
- 0x00000002 0x00000000 0x00000000 0x00000002
- 0x00000000 0x00000000 0x00000083 0xa06204ae
- 0x007dc010 0x00000000 0x00000000 0x00000000
- 0x00000000 0x00000000 0x00000000 0x00000000 >;
- };
+ sound {
+ compatible = "nvidia,tegra-audio-wm8903-seaboard",
+ "nvidia,tegra-audio-wm8903";
+ nvidia,model = "NVIDIA Tegra Seaboard";
- emc-table@380000 {
- reg = < 380000 >;
- compatible = "nvidia,tegra20-emc-table";
- clock-frequency = < 380000 >;
- nvidia,emc-registers = < 0x00000017 0x0000004b
- 0x00000012 0x00000006 0x00000004 0x00000005
- 0x00000003 0x0000000c 0x00000006 0x00000006
- 0x00000003 0x00000001 0x00000004 0x00000005
- 0x00000004 0x00000009 0x0000000d 0x00000b5f
- 0x00000000 0x00000003 0x00000003 0x00000006
- 0x00000006 0x00000001 0x00000011 0x000000c8
- 0x00000003 0x0000000e 0x00000007 0x0000000f
- 0x00000002 0x00000000 0x00000000 0x00000002
- 0x00000000 0x00000000 0x00000083 0xe044048b
- 0x007d8010 0x00000000 0x00000000 0x00000000
- 0x00000000 0x00000000 0x00000000 0x00000000 >;
- };
+ nvidia,audio-routing =
+ "Headphone Jack", "HPOUTR",
+ "Headphone Jack", "HPOUTL",
+ "Int Spk", "ROP",
+ "Int Spk", "RON",
+ "Int Spk", "LOP",
+ "Int Spk", "LON",
+ "Mic Jack", "MICBIAS",
+ "IN1R", "Mic Jack";
+
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,audio-codec = <&wm8903>;
+
+ nvidia,spkr-en-gpios = <&wm8903 2 0>;
+ nvidia,hp-det-gpios = <&gpio 185 0>; /* gpio PX1 */
};
};
diff --git a/arch/arm/boot/dts/tegra-trimslice.dts b/arch/arm/boot/dts/tegra-trimslice.dts
index 98efd5b0d7f..9de5636023f 100644
--- a/arch/arm/boot/dts/tegra-trimslice.dts
+++ b/arch/arm/boot/dts/tegra-trimslice.dts
@@ -6,11 +6,11 @@
model = "Compulab TrimSlice board";
compatible = "compulab,trimslice", "nvidia,tegra20";
- memory@0 {
- reg = < 0x00000000 0x40000000 >;
+ memory {
+ reg = <0x00000000 0x40000000>;
};
- pinmux@70000000 {
+ pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -182,23 +182,23 @@
nvidia,tristate = <1>;
};
conf_atb {
- nvidia,pins = "atb", "cdev1", "dap1", "gma",
- "gmc", "gmd", "gpu", "gpu7", "gpv",
- "sdio1", "slxa", "slxk", "uac";
+ nvidia,pins = "atb", "cdev1", "cdev2", "dap1",
+ "gma", "gmc", "gmd", "gpu", "gpu7",
+ "gpv", "sdio1", "slxa", "slxk", "uac";
nvidia,pull = <0>;
nvidia,tristate = <0>;
};
- conf_cdev2 {
- nvidia,pins = "cdev2", "csus", "spia", "spib",
- "spid", "spif";
- nvidia,pull = <1>;
- nvidia,tristate = <1>;
- };
conf_ck32 {
nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
"pmcc", "pmcd", "pmce", "xm2c", "xm2d";
nvidia,pull = <0>;
};
+ conf_csus {
+ nvidia,pins = "csus", "spia", "spib",
+ "spid", "spif";
+ nvidia,pull = <1>;
+ nvidia,tristate = <1>;
+ };
conf_ddc {
nvidia,pins = "ddc", "dtf", "rm", "sdc", "sdd";
nvidia,pull = <2>;
@@ -240,68 +240,67 @@
};
};
+ i2s@70002800 {
+ status = "okay";
+ };
+
+ serial@70006000 {
+ status = "okay";
+ clock-frequency = <216000000>;
+ };
+
i2c@7000c000 {
+ status = "okay";
clock-frequency = <400000>;
};
i2c@7000c400 {
+ status = "okay";
clock-frequency = <400000>;
};
i2c@7000c500 {
+ status = "okay";
clock-frequency = <400000>;
- };
-
- i2c@7000d000 {
- status = "disable";
- };
-
- i2s@70002800 {
- status = "disable";
- };
-
- i2s@70002a00 {
- status = "disable";
- };
-
- das@70000c00 {
- status = "disable";
- };
- serial@70006000 {
- clock-frequency = < 216000000 >;
- };
+ codec: codec@1a {
+ compatible = "ti,tlv320aic23";
+ reg = <0x1a>;
+ };
- serial@70006040 {
- status = "disable";
+ rtc@56 {
+ compatible = "emmicro,em3027";
+ reg = <0x56>;
+ };
};
- serial@70006200 {
- status = "disable";
+ usb@c5000000 {
+ status = "okay";
};
- serial@70006300 {
- status = "disable";
+ usb@c5004000 {
+ nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
};
- serial@70006400 {
- status = "disable";
+ usb@c5008000 {
+ status = "okay";
};
sdhci@c8000000 {
- status = "disable";
+ status = "okay";
+ bus-width = <4>;
};
- sdhci@c8000200 {
- status = "disable";
- };
-
- sdhci@c8000400 {
- status = "disable";
+ sdhci@c8000600 {
+ status = "okay";
+ cd-gpios = <&gpio 121 0>; /* gpio PP1 */
+ wp-gpios = <&gpio 122 0>; /* gpio PP2 */
+ bus-width = <4>;
};
- sdhci@c8000600 {
- cd-gpios = <&gpio 121 0>;
- wp-gpios = <&gpio 122 0>;
+ sound {
+ compatible = "nvidia,tegra-audio-trimslice";
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,audio-codec = <&codec>;
};
};
diff --git a/arch/arm/boot/dts/tegra-ventana.dts b/arch/arm/boot/dts/tegra-ventana.dts
index 71eb2e50a66..445343b0fbd 100644
--- a/arch/arm/boot/dts/tegra-ventana.dts
+++ b/arch/arm/boot/dts/tegra-ventana.dts
@@ -7,10 +7,10 @@
compatible = "nvidia,ventana", "nvidia,tegra20";
memory {
- reg = < 0x00000000 0x40000000 >;
+ reg = <0x00000000 0x40000000>;
};
- pinmux@70000000 {
+ pinmux {
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
@@ -240,38 +240,82 @@
};
};
+ i2s@70002800 {
+ status = "okay";
+ };
+
+ serial@70006300 {
+ status = "okay";
+ clock-frequency = <216000000>;
+ };
+
i2c@7000c000 {
+ status = "okay";
clock-frequency = <400000>;
wm8903: wm8903@1a {
compatible = "wlf,wm8903";
reg = <0x1a>;
interrupt-parent = <&gpio>;
- interrupts = < 187 0x04 >;
+ interrupts = <187 0x04>;
gpio-controller;
#gpio-cells = <2>;
micdet-cfg = <0>;
micdet-delay = <100>;
- gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >;
+ gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
+ };
+
+ /* ALS and proximity sensor */
+ isl29018@44 {
+ compatible = "isil,isl29018";
+ reg = <0x44>;
+ interrupt-parent = <&gpio>;
+ interrupts = <202 0x04>; /*gpio PZ2 */
};
};
i2c@7000c400 {
+ status = "okay";
clock-frequency = <400000>;
};
i2c@7000c500 {
+ status = "okay";
clock-frequency = <400000>;
};
i2c@7000d000 {
+ status = "okay";
clock-frequency = <400000>;
};
- i2s@70002a00 {
- status = "disable";
+ usb@c5000000 {
+ status = "okay";
+ };
+
+ usb@c5004000 {
+ status = "okay";
+ nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
+ };
+
+ usb@c5008000 {
+ status = "okay";
+ };
+
+ sdhci@c8000400 {
+ status = "okay";
+ cd-gpios = <&gpio 69 0>; /* gpio PI5 */
+ wp-gpios = <&gpio 57 0>; /* gpio PH1 */
+ power-gpios = <&gpio 70 0>; /* gpio PI6 */
+ bus-width = <4>;
+ };
+
+ sdhci@c8000600 {
+ status = "okay";
+ support-8bit;
+ bus-width = <8>;
};
sound {
@@ -294,45 +338,7 @@
nvidia,spkr-en-gpios = <&wm8903 2 0>;
nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
- nvidia,int-mic-en-gpios = <&gpio 184 0>; /*gpio PX0 */
+ nvidia,int-mic-en-gpios = <&gpio 184 0>; /* gpio PX0 */
nvidia,ext-mic-en-gpios = <&gpio 185 0>; /* gpio PX1 */
};
-
- serial@70006000 {
- status = "disable";
- };
-
- serial@70006040 {
- status = "disable";
- };
-
- serial@70006200 {
- status = "disable";
- };
-
- serial@70006300 {
- clock-frequency = < 216000000 >;
- };
-
- serial@70006400 {
- status = "disable";
- };
-
- sdhci@c8000000 {
- status = "disable";
- };
-
- sdhci@c8000200 {
- status = "disable";
- };
-
- sdhci@c8000400 {
- cd-gpios = <&gpio 69 0>; /* gpio PI5 */
- wp-gpios = <&gpio 57 0>; /* gpio PH1 */
- power-gpios = <&gpio 70 0>; /* gpio PI6 */
- };
-
- sdhci@c8000600 {
- support-8bit;
- };
};
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 108e894a892..c417d67e902 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -4,207 +4,242 @@
compatible = "nvidia,tegra20";
interrupt-parent = <&intc>;
- pmc@7000f400 {
- compatible = "nvidia,tegra20-pmc";
- reg = <0x7000e400 0x400>;
- };
-
- intc: interrupt-controller@50041000 {
+ intc: interrupt-controller {
compatible = "arm,cortex-a9-gic";
+ reg = <0x50041000 0x1000
+ 0x50040100 0x0100>;
interrupt-controller;
#interrupt-cells = <3>;
- reg = < 0x50041000 0x1000 >,
- < 0x50040100 0x0100 >;
};
- pmu {
- compatible = "arm,cortex-a9-pmu";
- interrupts = <0 56 0x04
- 0 57 0x04>;
- };
-
- apbdma: dma@6000a000 {
+ apbdma: dma {
compatible = "nvidia,tegra20-apbdma";
reg = <0x6000a000 0x1200>;
- interrupts = < 0 104 0x04
- 0 105 0x04
- 0 106 0x04
- 0 107 0x04
- 0 108 0x04
- 0 109 0x04
- 0 110 0x04
- 0 111 0x04
- 0 112 0x04
- 0 113 0x04
- 0 114 0x04
- 0 115 0x04
- 0 116 0x04
- 0 117 0x04
- 0 118 0x04
- 0 119 0x04 >;
- };
-
- i2c@7000c000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra20-i2c";
- reg = <0x7000C000 0x100>;
- interrupts = < 0 38 0x04 >;
- };
-
- i2c@7000c400 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra20-i2c";
- reg = <0x7000C400 0x100>;
- interrupts = < 0 84 0x04 >;
+ interrupts = <0 104 0x04
+ 0 105 0x04
+ 0 106 0x04
+ 0 107 0x04
+ 0 108 0x04
+ 0 109 0x04
+ 0 110 0x04
+ 0 111 0x04
+ 0 112 0x04
+ 0 113 0x04
+ 0 114 0x04
+ 0 115 0x04
+ 0 116 0x04
+ 0 117 0x04
+ 0 118 0x04
+ 0 119 0x04>;
+ };
+
+ ahb {
+ compatible = "nvidia,tegra20-ahb";
+ reg = <0x6000c004 0x10c>; /* AHB Arbitration + Gizmo Controller */
+ };
+
+ gpio: gpio {
+ compatible = "nvidia,tegra20-gpio";
+ reg = <0x6000d000 0x1000>;
+ interrupts = <0 32 0x04
+ 0 33 0x04
+ 0 34 0x04
+ 0 35 0x04
+ 0 55 0x04
+ 0 87 0x04
+ 0 89 0x04>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ #interrupt-cells = <2>;
+ interrupt-controller;
};
- i2c@7000c500 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra20-i2c";
- reg = <0x7000C500 0x100>;
- interrupts = < 0 92 0x04 >;
+ pinmux: pinmux {
+ compatible = "nvidia,tegra20-pinmux";
+ reg = <0x70000014 0x10 /* Tri-state registers */
+ 0x70000080 0x20 /* Mux registers */
+ 0x700000a0 0x14 /* Pull-up/down registers */
+ 0x70000868 0xa8>; /* Pad control registers */
};
- i2c@7000d000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra20-i2c-dvc";
- reg = <0x7000D000 0x200>;
- interrupts = < 0 53 0x04 >;
+ das {
+ compatible = "nvidia,tegra20-das";
+ reg = <0x70000c00 0x80>;
};
tegra_i2s1: i2s@70002800 {
compatible = "nvidia,tegra20-i2s";
reg = <0x70002800 0x200>;
- interrupts = < 0 13 0x04 >;
- nvidia,dma-request-selector = < &apbdma 2 >;
+ interrupts = <0 13 0x04>;
+ nvidia,dma-request-selector = <&apbdma 2>;
+ status = "disable";
};
tegra_i2s2: i2s@70002a00 {
compatible = "nvidia,tegra20-i2s";
reg = <0x70002a00 0x200>;
- interrupts = < 0 3 0x04 >;
- nvidia,dma-request-selector = < &apbdma 1 >;
- };
-
- das@70000c00 {
- compatible = "nvidia,tegra20-das";
- reg = <0x70000c00 0x80>;
- };
-
- gpio: gpio@6000d000 {
- compatible = "nvidia,tegra20-gpio";
- reg = < 0x6000d000 0x1000 >;
- interrupts = < 0 32 0x04
- 0 33 0x04
- 0 34 0x04
- 0 35 0x04
- 0 55 0x04
- 0 87 0x04
- 0 89 0x04 >;
- #gpio-cells = <2>;
- gpio-controller;
- #interrupt-cells = <2>;
- interrupt-controller;
- };
-
- pinmux: pinmux@70000000 {
- compatible = "nvidia,tegra20-pinmux";
- reg = < 0x70000014 0x10 /* Tri-state registers */
- 0x70000080 0x20 /* Mux registers */
- 0x700000a0 0x14 /* Pull-up/down registers */
- 0x70000868 0xa8 >; /* Pad control registers */
+ interrupts = <0 3 0x04>;
+ nvidia,dma-request-selector = <&apbdma 1>;
+ status = "disable";
};
serial@70006000 {
compatible = "nvidia,tegra20-uart";
reg = <0x70006000 0x40>;
reg-shift = <2>;
- interrupts = < 0 36 0x04 >;
+ interrupts = <0 36 0x04>;
+ status = "disable";
};
serial@70006040 {
compatible = "nvidia,tegra20-uart";
reg = <0x70006040 0x40>;
reg-shift = <2>;
- interrupts = < 0 37 0x04 >;
+ interrupts = <0 37 0x04>;
+ status = "disable";
};
serial@70006200 {
compatible = "nvidia,tegra20-uart";
reg = <0x70006200 0x100>;
reg-shift = <2>;
- interrupts = < 0 46 0x04 >;
+ interrupts = <0 46 0x04>;
+ status = "disable";
};
serial@70006300 {
compatible = "nvidia,tegra20-uart";
reg = <0x70006300 0x100>;
reg-shift = <2>;
- interrupts = < 0 90 0x04 >;
+ interrupts = <0 90 0x04>;
+ status = "disable";
};
serial@70006400 {
compatible = "nvidia,tegra20-uart";
reg = <0x70006400 0x100>;
reg-shift = <2>;
- interrupts = < 0 91 0x04 >;
+ interrupts = <0 91 0x04>;
+ status = "disable";
};
- emc@7000f400 {
+ i2c@7000c000 {
+ compatible = "nvidia,tegra20-i2c";
+ reg = <0x7000c000 0x100>;
+ interrupts = <0 38 0x04>;
#address-cells = <1>;
#size-cells = <0>;
- compatible = "nvidia,tegra20-emc";
- reg = <0x7000f400 0x200>;
+ status = "disable";
};
- sdhci@c8000000 {
- compatible = "nvidia,tegra20-sdhci";
- reg = <0xc8000000 0x200>;
- interrupts = < 0 14 0x04 >;
+ i2c@7000c400 {
+ compatible = "nvidia,tegra20-i2c";
+ reg = <0x7000c400 0x100>;
+ interrupts = <0 84 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
};
- sdhci@c8000200 {
- compatible = "nvidia,tegra20-sdhci";
- reg = <0xc8000200 0x200>;
- interrupts = < 0 15 0x04 >;
+ i2c@7000c500 {
+ compatible = "nvidia,tegra20-i2c";
+ reg = <0x7000c500 0x100>;
+ interrupts = <0 92 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
};
- sdhci@c8000400 {
- compatible = "nvidia,tegra20-sdhci";
- reg = <0xc8000400 0x200>;
- interrupts = < 0 19 0x04 >;
+ i2c@7000d000 {
+ compatible = "nvidia,tegra20-i2c-dvc";
+ reg = <0x7000d000 0x200>;
+ interrupts = <0 53 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
};
- sdhci@c8000600 {
- compatible = "nvidia,tegra20-sdhci";
- reg = <0xc8000600 0x200>;
- interrupts = < 0 31 0x04 >;
+ pmc {
+ compatible = "nvidia,tegra20-pmc";
+ reg = <0x7000e400 0x400>;
+ };
+
+ mc {
+ compatible = "nvidia,tegra20-mc";
+ reg = <0x7000f000 0x024
+ 0x7000f03c 0x3c4>;
+ interrupts = <0 77 0x04>;
+ };
+
+ gart {
+ compatible = "nvidia,tegra20-gart";
+ reg = <0x7000f024 0x00000018 /* controller registers */
+ 0x58000000 0x02000000>; /* GART aperture */
+ };
+
+ emc {
+ compatible = "nvidia,tegra20-emc";
+ reg = <0x7000f400 0x200>;
+ #address-cells = <1>;
+ #size-cells = <0>;
};
usb@c5000000 {
compatible = "nvidia,tegra20-ehci", "usb-ehci";
reg = <0xc5000000 0x4000>;
- interrupts = < 0 20 0x04 >;
+ interrupts = <0 20 0x04>;
phy_type = "utmi";
nvidia,has-legacy-mode;
+ status = "disable";
};
usb@c5004000 {
compatible = "nvidia,tegra20-ehci", "usb-ehci";
reg = <0xc5004000 0x4000>;
- interrupts = < 0 21 0x04 >;
+ interrupts = <0 21 0x04>;
phy_type = "ulpi";
+ status = "disable";
};
usb@c5008000 {
compatible = "nvidia,tegra20-ehci", "usb-ehci";
reg = <0xc5008000 0x4000>;
- interrupts = < 0 97 0x04 >;
+ interrupts = <0 97 0x04>;
phy_type = "utmi";
+ status = "disable";
+ };
+
+ sdhci@c8000000 {
+ compatible = "nvidia,tegra20-sdhci";
+ reg = <0xc8000000 0x200>;
+ interrupts = <0 14 0x04>;
+ status = "disable";
};
-};
+ sdhci@c8000200 {
+ compatible = "nvidia,tegra20-sdhci";
+ reg = <0xc8000200 0x200>;
+ interrupts = <0 15 0x04>;
+ status = "disable";
+ };
+
+ sdhci@c8000400 {
+ compatible = "nvidia,tegra20-sdhci";
+ reg = <0xc8000400 0x200>;
+ interrupts = <0 19 0x04>;
+ status = "disable";
+ };
+
+ sdhci@c8000600 {
+ compatible = "nvidia,tegra20-sdhci";
+ reg = <0xc8000600 0x200>;
+ interrupts = <0 31 0x04>;
+ status = "disable";
+ };
+
+ pmu {
+ compatible = "arm,cortex-a9-pmu";
+ interrupts = <0 56 0x04
+ 0 57 0x04>;
+ };
+};
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 62a7b39f1c9..2dcc09e784b 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -4,183 +4,268 @@
compatible = "nvidia,tegra30";
interrupt-parent = <&intc>;
- pmc@7000f400 {
- compatible = "nvidia,tegra20-pmc", "nvidia,tegra30-pmc";
- reg = <0x7000e400 0x400>;
- };
-
- intc: interrupt-controller@50041000 {
+ intc: interrupt-controller {
compatible = "arm,cortex-a9-gic";
+ reg = <0x50041000 0x1000
+ 0x50040100 0x0100>;
interrupt-controller;
#interrupt-cells = <3>;
- reg = < 0x50041000 0x1000 >,
- < 0x50040100 0x0100 >;
};
- pmu {
- compatible = "arm,cortex-a9-pmu";
- interrupts = <0 144 0x04
- 0 145 0x04
- 0 146 0x04
- 0 147 0x04>;
- };
-
- apbdma: dma@6000a000 {
+ apbdma: dma {
compatible = "nvidia,tegra30-apbdma", "nvidia,tegra20-apbdma";
reg = <0x6000a000 0x1400>;
- interrupts = < 0 104 0x04
- 0 105 0x04
- 0 106 0x04
- 0 107 0x04
- 0 108 0x04
- 0 109 0x04
- 0 110 0x04
- 0 111 0x04
- 0 112 0x04
- 0 113 0x04
- 0 114 0x04
- 0 115 0x04
- 0 116 0x04
- 0 117 0x04
- 0 118 0x04
- 0 119 0x04
- 0 128 0x04
- 0 129 0x04
- 0 130 0x04
- 0 131 0x04
- 0 132 0x04
- 0 133 0x04
- 0 134 0x04
- 0 135 0x04
- 0 136 0x04
- 0 137 0x04
- 0 138 0x04
- 0 139 0x04
- 0 140 0x04
- 0 141 0x04
- 0 142 0x04
- 0 143 0x04 >;
- };
-
- i2c@7000c000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
- reg = <0x7000C000 0x100>;
- interrupts = < 0 38 0x04 >;
- };
-
- i2c@7000c400 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
- reg = <0x7000C400 0x100>;
- interrupts = < 0 84 0x04 >;
- };
-
- i2c@7000c500 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
- reg = <0x7000C500 0x100>;
- interrupts = < 0 92 0x04 >;
+ interrupts = <0 104 0x04
+ 0 105 0x04
+ 0 106 0x04
+ 0 107 0x04
+ 0 108 0x04
+ 0 109 0x04
+ 0 110 0x04
+ 0 111 0x04
+ 0 112 0x04
+ 0 113 0x04
+ 0 114 0x04
+ 0 115 0x04
+ 0 116 0x04
+ 0 117 0x04
+ 0 118 0x04
+ 0 119 0x04
+ 0 128 0x04
+ 0 129 0x04
+ 0 130 0x04
+ 0 131 0x04
+ 0 132 0x04
+ 0 133 0x04
+ 0 134 0x04
+ 0 135 0x04
+ 0 136 0x04
+ 0 137 0x04
+ 0 138 0x04
+ 0 139 0x04
+ 0 140 0x04
+ 0 141 0x04
+ 0 142 0x04
+ 0 143 0x04>;
};
- i2c@7000c700 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
- reg = <0x7000c700 0x100>;
- interrupts = < 0 120 0x04 >;
+ ahb: ahb {
+ compatible = "nvidia,tegra30-ahb";
+ reg = <0x6000c004 0x14c>; /* AHB Arbitration + Gizmo Controller */
};
- i2c@7000d000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
- reg = <0x7000D000 0x100>;
- interrupts = < 0 53 0x04 >;
- };
-
- gpio: gpio@6000d000 {
+ gpio: gpio {
compatible = "nvidia,tegra30-gpio", "nvidia,tegra20-gpio";
- reg = < 0x6000d000 0x1000 >;
- interrupts = < 0 32 0x04
- 0 33 0x04
- 0 34 0x04
- 0 35 0x04
- 0 55 0x04
- 0 87 0x04
- 0 89 0x04
- 0 125 0x04 >;
+ reg = <0x6000d000 0x1000>;
+ interrupts = <0 32 0x04
+ 0 33 0x04
+ 0 34 0x04
+ 0 35 0x04
+ 0 55 0x04
+ 0 87 0x04
+ 0 89 0x04
+ 0 125 0x04>;
#gpio-cells = <2>;
gpio-controller;
#interrupt-cells = <2>;
interrupt-controller;
};
+ pinmux: pinmux {
+ compatible = "nvidia,tegra30-pinmux";
+ reg = <0x70000868 0xd0 /* Pad control registers */
+ 0x70003000 0x3e0>; /* Mux registers */
+ };
+
serial@70006000 {
compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
reg = <0x70006000 0x40>;
reg-shift = <2>;
- interrupts = < 0 36 0x04 >;
+ interrupts = <0 36 0x04>;
+ status = "disable";
};
serial@70006040 {
compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
reg = <0x70006040 0x40>;
reg-shift = <2>;
- interrupts = < 0 37 0x04 >;
+ interrupts = <0 37 0x04>;
+ status = "disable";
};
serial@70006200 {
compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
reg = <0x70006200 0x100>;
reg-shift = <2>;
- interrupts = < 0 46 0x04 >;
+ interrupts = <0 46 0x04>;
+ status = "disable";
};
serial@70006300 {
compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
reg = <0x70006300 0x100>;
reg-shift = <2>;
- interrupts = < 0 90 0x04 >;
+ interrupts = <0 90 0x04>;
+ status = "disable";
};
serial@70006400 {
compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
reg = <0x70006400 0x100>;
reg-shift = <2>;
- interrupts = < 0 91 0x04 >;
+ interrupts = <0 91 0x04>;
+ status = "disable";
+ };
+
+ i2c@7000c000 {
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000c000 0x100>;
+ interrupts = <0 38 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
+ };
+
+ i2c@7000c400 {
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000c400 0x100>;
+ interrupts = <0 84 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
+ };
+
+ i2c@7000c500 {
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000c500 0x100>;
+ interrupts = <0 92 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
+ };
+
+ i2c@7000c700 {
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000c700 0x100>;
+ interrupts = <0 120 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
+ };
+
+ i2c@7000d000 {
+ compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
+ reg = <0x7000d000 0x100>;
+ interrupts = <0 53 0x04>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disable";
+ };
+
+ pmc {
+ compatible = "nvidia,tegra20-pmc", "nvidia,tegra30-pmc";
+ reg = <0x7000e400 0x400>;
+ };
+
+ mc {
+ compatible = "nvidia,tegra30-mc";
+ reg = <0x7000f000 0x010
+ 0x7000f03c 0x1b4
+ 0x7000f200 0x028
+ 0x7000f284 0x17c>;
+ interrupts = <0 77 0x04>;
+ };
+
+ smmu {
+ compatible = "nvidia,tegra30-smmu";
+ reg = <0x7000f010 0x02c
+ 0x7000f1f0 0x010
+ 0x7000f228 0x05c>;
+ nvidia,#asids = <4>; /* # of ASIDs */
+ dma-window = <0 0x40000000>; /* IOVA start & length */
+ nvidia,ahb = <&ahb>;
+ };
+
+ ahub {
+ compatible = "nvidia,tegra30-ahub";
+ reg = <0x70080000 0x200
+ 0x70080200 0x100>;
+ interrupts = <0 103 0x04>;
+ nvidia,dma-request-selector = <&apbdma 1>;
+
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ tegra_i2s0: i2s@70080300 {
+ compatible = "nvidia,tegra30-i2s";
+ reg = <0x70080300 0x100>;
+ nvidia,ahub-cif-ids = <4 4>;
+ status = "disable";
+ };
+
+ tegra_i2s1: i2s@70080400 {
+ compatible = "nvidia,tegra30-i2s";
+ reg = <0x70080400 0x100>;
+ nvidia,ahub-cif-ids = <5 5>;
+ status = "disable";
+ };
+
+ tegra_i2s2: i2s@70080500 {
+ compatible = "nvidia,tegra30-i2s";
+ reg = <0x70080500 0x100>;
+ nvidia,ahub-cif-ids = <6 6>;
+ status = "disable";
+ };
+
+ tegra_i2s3: i2s@70080600 {
+ compatible = "nvidia,tegra30-i2s";
+ reg = <0x70080600 0x100>;
+ nvidia,ahub-cif-ids = <7 7>;
+ status = "disable";
+ };
+
+ tegra_i2s4: i2s@70080700 {
+ compatible = "nvidia,tegra30-i2s";
+ reg = <0x70080700 0x100>;
+ nvidia,ahub-cif-ids = <8 8>;
+ status = "disable";
+ };
};
sdhci@78000000 {
compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
reg = <0x78000000 0x200>;
- interrupts = < 0 14 0x04 >;
+ interrupts = <0 14 0x04>;
+ status = "disable";
};
sdhci@78000200 {
compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
reg = <0x78000200 0x200>;
- interrupts = < 0 15 0x04 >;
+ interrupts = <0 15 0x04>;
+ status = "disable";
};
sdhci@78000400 {
compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
reg = <0x78000400 0x200>;
- interrupts = < 0 19 0x04 >;
+ interrupts = <0 19 0x04>;
+ status = "disable";
};
sdhci@78000600 {
compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
reg = <0x78000600 0x200>;
- interrupts = < 0 31 0x04 >;
+ interrupts = <0 31 0x04>;
+ status = "disable";
};
- pinmux: pinmux@70000000 {
- compatible = "nvidia,tegra30-pinmux";
- reg = < 0x70000868 0xd0 /* Pad control registers */
- 0x70003000 0x3e0 >; /* Mux registers */
+ pmu {
+ compatible = "arm,cortex-a9-pmu";
+ interrupts = <0 144 0x04
+ 0 145 0x04
+ 0 146 0x04
+ 0 147 0x04>;
};
};
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 595ecd290eb..9d7eb530f95 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
read_lock_irqsave(&device_info->lock, flags);
list_for_each_entry(b, &device_info->safe_buffers, node)
- if (b->safe_dma_addr == safe_dma_addr) {
+ if (b->safe_dma_addr <= safe_dma_addr &&
+ b->safe_dma_addr + b->size > safe_dma_addr) {
rb = b;
break;
}
@@ -254,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
if (buf == NULL) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
- return ~0;
+ return DMA_ERROR_CODE;
}
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -307,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
-dma_addr_t __dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir)
+static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
dma_addr_t dma_addr;
int ret;
@@ -320,21 +322,20 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
ret = needs_bounce(dev, dma_addr, size);
if (ret < 0)
- return ~0;
+ return DMA_ERROR_CODE;
if (ret == 0) {
- __dma_page_cpu_to_dev(page, offset, size, dir);
+ arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
return dma_addr;
}
if (PageHighMem(page)) {
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
- return ~0;
+ return DMA_ERROR_CODE;
}
return map_single(dev, page_address(page) + offset, size, dir);
}
-EXPORT_SYMBOL(__dma_map_page);
/*
* see if a mapped address was really a "safe" buffer and if so, copy
@@ -342,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page);
* the safe buffer. (basically return things back to the way they
* should be)
*/
-void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
+static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct safe_buffer *buf;
@@ -352,19 +353,18 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
buf = find_safe_buffer_dev(dev, dma_addr, __func__);
if (!buf) {
- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
- dma_addr & ~PAGE_MASK, size, dir);
+ arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
return;
}
unmap_single(dev, buf, size, dir);
}
-EXPORT_SYMBOL(__dma_unmap_page);
-int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
- unsigned long off, size_t sz, enum dma_data_direction dir)
+static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t sz, enum dma_data_direction dir)
{
struct safe_buffer *buf;
+ unsigned long off;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir);
@@ -373,6 +373,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
if (!buf)
return 1;
+ off = addr - buf->safe_dma_addr;
+
BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -388,12 +390,21 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
}
return 0;
}
-EXPORT_SYMBOL(dmabounce_sync_for_cpu);
-int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
- unsigned long off, size_t sz, enum dma_data_direction dir)
+static void dmabounce_sync_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
+ return;
+
+ arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
+}
+
+static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+ size_t sz, enum dma_data_direction dir)
{
struct safe_buffer *buf;
+ unsigned long off;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir);
@@ -402,6 +413,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
if (!buf)
return 1;
+ off = addr - buf->safe_dma_addr;
+
BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -417,7 +430,38 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
}
return 0;
}
-EXPORT_SYMBOL(dmabounce_sync_for_device);
+
+static void dmabounce_sync_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ if (!__dmabounce_sync_for_device(dev, handle, size, dir))
+ return;
+
+ arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
+}
+
+static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (dev->archdata.dmabounce)
+ return 0;
+
+ return arm_dma_ops.set_dma_mask(dev, dma_mask);
+}
+
+static struct dma_map_ops dmabounce_ops = {
+ .alloc = arm_dma_alloc,
+ .free = arm_dma_free,
+ .mmap = arm_dma_mmap,
+ .map_page = dmabounce_map_page,
+ .unmap_page = dmabounce_unmap_page,
+ .sync_single_for_cpu = dmabounce_sync_for_cpu,
+ .sync_single_for_device = dmabounce_sync_for_device,
+ .map_sg = arm_dma_map_sg,
+ .unmap_sg = arm_dma_unmap_sg,
+ .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_dma_sync_sg_for_device,
+ .set_dma_mask = dmabounce_set_mask,
+};
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
const char *name, unsigned long size)
@@ -479,6 +523,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
#endif
dev->archdata.dmabounce = device_info;
+ set_dma_ops(dev, &dmabounce_ops);
dev_info(dev, "dmabounce: registered device\n");
@@ -497,6 +542,7 @@ void dmabounce_unregister_dev(struct device *dev)
struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
dev->archdata.dmabounce = NULL;
+ set_dma_ops(dev, NULL);
if (!device_info) {
dev_warn(dev,
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 09a02963cf5..e05a2f1665a 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -33,6 +33,7 @@ CONFIG_MACH_IMX27LITE=y
CONFIG_MACH_PCA100=y
CONFIG_MACH_MXT_TD60=y
CONFIG_MACH_IMX27IPCAM=y
+CONFIG_MACH_IMX27_DT=y
CONFIG_MXC_IRQ_PRIOR=y
CONFIG_MXC_PWM=y
CONFIG_NO_HZ=y
@@ -172,7 +173,7 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PCF8563=y
CONFIG_RTC_DRV_IMXDI=y
-CONFIG_RTC_MXC=y
+CONFIG_RTC_DRV_MXC=y
CONFIG_DMADEVICES=y
CONFIG_IMX_SDMA=y
CONFIG_IMX_DMA=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index dc6f6411bbf..b1d3675df72 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -64,6 +64,12 @@ CONFIG_IPV6=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_SST25L=y
# CONFIG_STANDALONE is not set
CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
@@ -172,7 +178,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
-CONFIG_RTC_MXC=y
+CONFIG_RTC_DRV_MXC=y
CONFIG_DMADEVICES=y
CONFIG_IMX_SDMA=y
CONFIG_EXT2_FS=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 1ebbf451c48..5406c23a02e 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -22,6 +22,7 @@ CONFIG_BLK_DEV_INTEGRITY=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_MXS=y
+CONFIG_MACH_MXS_DT=y
CONFIG_MACH_MX23EVK=y
CONFIG_MACH_MX28EVK=y
CONFIG_MACH_STMP378X_DEVB=y
diff --git a/arch/arm/configs/prima2_defconfig b/arch/arm/configs/prima2_defconfig
new file mode 100644
index 00000000000..c328ac65479
--- /dev/null
+++ b/arch/arm/configs/prima2_defconfig
@@ -0,0 +1,69 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_ARCH_PRIMA2=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_KEXEC=y
+CONFIG_BINFMT_MISC=y
+CONFIG_PM_RUNTIME=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_SERIAL_SIRFSOC=y
+CONFIG_SERIAL_SIRFSOC_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_SIRF=y
+CONFIG_SPI=y
+CONFIG_SPI_SIRF=y
+CONFIG_SPI_SPIDEV=y
+# CONFIG_HWMON is not set
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB_GADGET=y
+CONFIG_USB_FILE_STORAGE=m
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_DMADEVICES=y
+CONFIG_DMADEVICES_DEBUG=y
+CONFIG_DMADEVICES_VDEBUG=y
+CONFIG_SIRF_DMA=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CRAMFS=y
+CONFIG_ROMFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_INFO=y
+CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/spear13xx_defconfig b/arch/arm/configs/spear13xx_defconfig
new file mode 100644
index 00000000000..1fdb82694ca
--- /dev/null
+++ b/arch/arm/configs/spear13xx_defconfig
@@ -0,0 +1,95 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR13XX=y
+CONFIG_MACH_SPEAR1310=y
+CONFIG_MACH_SPEAR1340=y
+# CONFIG_SWP_EMULATE is not set
+CONFIG_SMP=y
+# CONFIG_SMP_ON_UP is not set
+# CONFIG_ARM_CPU_TOPOLOGY is not set
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_FSMC=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_PATA_ARASAN_CF=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_SPEAR=y
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=8192
+CONFIG_I2C=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_SPI=y
+CONFIG_SPI_PL022=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_PL061=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_MPCORE_WATCHDOG=y
+# CONFIG_HID_SUPPORT is not set
+CONFIG_USB=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_SPEAR=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_DW_DMAC=y
+CONFIG_DMATEST=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_AUTOFS4_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=m
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_INFO=y
diff --git a/arch/arm/configs/spear3xx_defconfig b/arch/arm/configs/spear3xx_defconfig
index 7ed42912d69..865980c5f21 100644
--- a/arch/arm/configs/spear3xx_defconfig
+++ b/arch/arm/configs/spear3xx_defconfig
@@ -14,6 +14,9 @@ CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSMC=y
CONFIG_BLK_DEV_RAM=y
@@ -73,6 +76,7 @@ CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=m
diff --git a/arch/arm/configs/spear6xx_defconfig b/arch/arm/configs/spear6xx_defconfig
index cf94bc73a0e..a2a1265f86b 100644
--- a/arch/arm/configs/spear6xx_defconfig
+++ b/arch/arm/configs/spear6xx_defconfig
@@ -8,11 +8,13 @@ CONFIG_MODVERSIONS=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_PLAT_SPEAR=y
CONFIG_ARCH_SPEAR6XX=y
-CONFIG_BOARD_SPEAR600_DT=y
CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_FSMC=y
CONFIG_BLK_DEV_RAM=y
@@ -64,6 +66,7 @@ CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=m
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 351d6708c3a..1198dd61c7c 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -45,6 +45,7 @@ CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_IDLE=y
CONFIG_VFP=y
+CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -91,6 +92,8 @@ CONFIG_USB_NET_SMSC75XX=y
CONFIG_USB_NET_SMSC95XX=y
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_MPU3050=y
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
@@ -103,12 +106,15 @@ CONFIG_I2C=y
CONFIG_I2C_TEGRA=y
CONFIG_SPI=y
CONFIG_SPI_TEGRA=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_SBS=y
CONFIG_SENSORS_LM90=y
CONFIG_MFD_TPS6586X=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_TPS62360=y
CONFIG_REGULATOR_TPS6586X=y
CONFIG_SOUND=y
CONFIG_SND=y
@@ -133,16 +139,19 @@ CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_TEGRA=y
CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_EM3027=y
CONFIG_RTC_DRV_TEGRA=y
CONFIG_STAGING=y
-CONFIG_IIO=y
CONFIG_SENSORS_ISL29018=y
+CONFIG_SENSORS_ISL29028=y
CONFIG_SENSORS_AK8975=y
CONFIG_MFD_NVEC=y
CONFIG_KEYBOARD_NVEC=y
CONFIG_SERIO_NVEC_PS2=y
CONFIG_TEGRA_IOMMU_GART=y
CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_MEMORY=y
+CONFIG_IIO=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 7aa368003b0..b69c0d3285f 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -7,12 +7,16 @@
#define ASMARM_DEVICE_H
struct dev_archdata {
+ struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce;
#endif
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ struct dma_iommu_mapping *mapping;
+#endif
};
struct omap_device;
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
new file mode 100644
index 00000000000..3ed37b4d93d
--- /dev/null
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -0,0 +1,15 @@
+#ifndef ASMARM_DMA_CONTIGUOUS_H
+#define ASMARM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+
+#endif
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
new file mode 100644
index 00000000000..799b09409fa
--- /dev/null
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -0,0 +1,34 @@
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/kmemcheck.h>
+
+struct dma_iommu_mapping {
+ /* iommu specific data */
+ struct iommu_domain *domain;
+
+ void *bitmap;
+ size_t bits;
+ unsigned int order;
+ dma_addr_t base;
+
+ spinlock_t lock;
+ struct kref kref;
+};
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+ int order);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index cb3b7c981c4..bbef15d0489 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -5,11 +5,35 @@
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
+#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
+#define DMA_ERROR_CODE (~0)
+extern struct dma_map_ops arm_dma_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev && dev->archdata.dma_ops)
+ return dev->archdata.dma_ops;
+ return &arm_dma_ops;
+}
+
+static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+{
+ BUG_ON(!dev);
+ dev->archdata.dma_ops = ops;
+}
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ return get_dma_ops(dev)->set_dma_mask(dev, mask);
+}
+
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
#endif
@@ -62,68 +86,11 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
#endif
/*
- * The DMA API is built upon the notion of "buffer ownership". A buffer
- * is either exclusively owned by the CPU (and therefore may be accessed
- * by it) or exclusively owned by the DMA device. These helper functions
- * represent the transitions between these two ownership states.
- *
- * Note, however, that on later ARMs, this notion does not work due to
- * speculative prefetches. We model our approach on the assumption that
- * the CPU does do speculative prefetches, which means we clean caches
- * before transfers and delay cache invalidation until transfer completion.
- *
- * Private support functions: these are not part of the API and are
- * liable to change. Drivers must not use these.
- */
-static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- extern void ___dma_single_cpu_to_dev(const void *, size_t,
- enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_single_cpu_to_dev(kaddr, size, dir);
-}
-
-static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- extern void ___dma_single_dev_to_cpu(const void *, size_t,
- enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_single_dev_to_cpu(kaddr, size, dir);
-}
-
-static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
- size_t size, enum dma_data_direction dir)
-{
- extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
- size_t, enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_page_cpu_to_dev(page, off, size, dir);
-}
-
-static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
- size_t size, enum dma_data_direction dir)
-{
- extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
- size_t, enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_page_dev_to_cpu(page, off, size, dir);
-}
-
-extern int dma_supported(struct device *, u64);
-extern int dma_set_mask(struct device *, u64);
-
-/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return dma_addr == ~0;
+ return dma_addr == DMA_ERROR_CODE;
}
/*
@@ -141,69 +108,118 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
{
}
+extern int dma_supported(struct device *dev, u64 mask);
+
/**
- * dma_alloc_coherent - allocate consistent memory for DMA
+ * arm_dma_alloc - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: required memory size
* @handle: bus-specific DMA address
+ * @attrs: optinal attributes that specific mapping properties
*
- * Allocate some uncached, unbuffered memory for a device for
- * performing DMA. This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
+ * Allocate some memory for a device for performing DMA. This function
+ * allocates pages, and will return the CPU-viewed address, and sets @handle
+ * to be the device-viewed address.
*/
-extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
+extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs);
+
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+ BUG_ON(!ops);
+
+ cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+ return cpu_addr;
+}
/**
- * dma_free_coherent - free memory allocated by dma_alloc_coherent
+ * arm_dma_free - free memory allocated by arm_dma_alloc
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: size of memory originally requested in dma_alloc_coherent
* @cpu_addr: CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
*
* Free (and unmap) a DMA buffer previously allocated by
- * dma_alloc_coherent().
+ * arm_dma_alloc().
*
* References to memory and mappings associated with cpu_addr/handle
* during and after this call executing are illegal.
*/
-extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
+extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs);
+
+#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
/**
- * dma_mmap_coherent - map a coherent DMA allocation into user space
+ * arm_dma_mmap - map a coherent DMA allocation into user space
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @vma: vm_area_struct describing requested user mapping
* @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
* @size: size of memory originally requested in dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
*
* Map a coherent DMA buffer previously allocated by dma_alloc_coherent
* into user space. The coherent DMA buffer must not be freed by the
* driver until the user space mapping has been released.
*/
-int dma_mmap_coherent(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t);
+extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs);
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
-/**
- * dma_alloc_writecombine - allocate writecombining memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- *
- * Allocate some uncached, buffered memory for a device for
- * performing DMA. This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
- */
-extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
- gfp_t);
+static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
+static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
-#define dma_free_writecombine(dev,size,cpu_addr,handle) \
- dma_free_coherent(dev,size,cpu_addr,handle)
+static inline void dma_free_writecombine(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
-int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t);
+static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
/*
* This can be called during boot to increase the size of the consistent
@@ -212,8 +228,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
*/
extern void __init init_consistent_dma_size(unsigned long size);
-
-#ifdef CONFIG_DMABOUNCE
/*
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
* and utilize bounce buffers as needed to work around limited DMA windows.
@@ -253,222 +267,19 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
*/
extern void dmabounce_unregister_dev(struct device *);
-/*
- * The DMA API, implemented by dmabounce.c. See below for descriptions.
- */
-extern dma_addr_t __dma_map_page(struct device *, struct page *,
- unsigned long, size_t, enum dma_data_direction);
-extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
- enum dma_data_direction);
-
-/*
- * Private functions
- */
-int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
- size_t, enum dma_data_direction);
-int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
- size_t, enum dma_data_direction);
-#else
-static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- return 1;
-}
-static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- return 1;
-}
-
-
-static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- __dma_page_cpu_to_dev(page, offset, size, dir);
- return pfn_to_dma(dev, page_to_pfn(page)) + offset;
-}
-
-static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
- handle & ~PAGE_MASK, size, dir);
-}
-#endif /* CONFIG_DMABOUNCE */
-
-/**
- * dma_map_single - map a single buffer for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @cpu_addr: CPU direct mapped address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_single() or
- * dma_sync_single_for_cpu().
- */
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction dir)
-{
- unsigned long offset;
- struct page *page;
- dma_addr_t addr;
-
- BUG_ON(!virt_addr_valid(cpu_addr));
- BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
- BUG_ON(!valid_dma_direction(dir));
-
- page = virt_to_page(cpu_addr);
- offset = (unsigned long)cpu_addr & ~PAGE_MASK;
- addr = __dma_map_page(dev, page, offset, size, dir);
- debug_dma_map_page(dev, page, offset, size, dir, addr, true);
-
- return addr;
-}
-
-/**
- * dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_page().
- */
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- dma_addr_t addr;
-
- BUG_ON(!valid_dma_direction(dir));
-
- addr = __dma_map_page(dev, page, offset, size, dir);
- debug_dma_map_page(dev, page, offset, size, dir, addr, false);
-
- return addr;
-}
-
-/**
- * dma_unmap_single - unmap a single buffer previously mapped
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_single)
- * @dir: DMA transfer direction (same as passed to dma_map_single)
- *
- * Unmap a single streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_single() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- debug_dma_unmap_page(dev, handle, size, dir, true);
- __dma_unmap_page(dev, handle, size, dir);
-}
-
-/**
- * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_page)
- * @dir: DMA transfer direction (same as passed to dma_map_page)
- *
- * Unmap a page streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_page() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- debug_dma_unmap_page(dev, handle, size, dir, false);
- __dma_unmap_page(dev, handle, size, dir);
-}
-
-/**
- * dma_sync_single_range_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @offset: offset of region to start sync
- * @size: size of region to sync
- * @dir: DMA transfer direction (same as passed to dma_map_single)
- *
- * Make physical memory consistent for a single streaming mode DMA
- * translation after a transfer.
- *
- * If you perform a dma_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so. At the
- * next point you give the PCI dma address back to the card, you
- * must first the perform a dma_sync_for_device, and then the
- * device again owns the buffer.
- */
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t handle, unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-
- debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
-
- if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
- return;
-
- __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t handle, unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-
- debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
-
- if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
- return;
-
- __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- dma_sync_single_range_for_device(dev, handle, 0, size, dir);
-}
/*
* The scatter list versions of the above methods.
*/
-extern int dma_map_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
+extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
+ enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
+ enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
enum dma_data_direction);
-extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
+extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
enum dma_data_direction);
-extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h
index 33c78d7af2e..4eea2107214 100644
--- a/arch/arm/include/asm/hardware/pl080.h
+++ b/arch/arm/include/asm/hardware/pl080.h
@@ -102,6 +102,8 @@
#define PL080_WIDTH_16BIT (0x1)
#define PL080_WIDTH_32BIT (0x2)
+#define PL080N_CONFIG_ITPROT (1 << 20)
+#define PL080N_CONFIG_SECPROT (1 << 19)
#define PL080_CONFIG_HALT (1 << 18)
#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */
#define PL080_CONFIG_LOCK (1 << 16)
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 9af5563dd3e..815c669fec0 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -47,9 +47,9 @@ extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
-#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a) = (v))
-#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v))
-#define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v))
+#define __raw_writeb(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned char __force *)(a) = (v)))
+#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
+#define __raw_writel(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v)))
#define __raw_readb(a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a))
#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
@@ -229,11 +229,9 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
__raw_readl(c)); __r; })
-#define writeb_relaxed(v,c) ((void)__raw_writeb(v,c))
-#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
- cpu_to_le16(v),c))
-#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
- cpu_to_le32(v),c))
+#define writeb_relaxed(v,c) __raw_writeb(v,c)
+#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
+#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
@@ -281,12 +279,12 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
#define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
-#define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); })
-#define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); })
-#define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); })
+#define iowrite8(v,p) ({ __iowmb(); __raw_writeb(v, p); })
+#define iowrite16(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); })
+#define iowrite32(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); })
-#define iowrite16be(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_be16(v), p); })
-#define iowrite32be(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_be32(v), p); })
+#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
#define ioread8_rep(p,d,c) __raw_readsb(p,d,c)
#define ioread16_rep(p,d,c) __raw_readsw(p,d,c)
diff --git a/arch/arm/include/asm/kvm_para.h b/arch/arm/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/arm/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index d7692cafde7..0b1c94b8c65 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -43,6 +43,7 @@ struct machine_desc {
void (*init_irq)(void);
struct sys_timer *timer; /* system tick timer */
void (*init_machine)(void);
+ void (*init_late)(void);
#ifdef CONFIG_MULTI_IRQ_HANDLER
void (*handle_irq)(struct pt_regs *);
#endif
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index b36f3654bf5..a6efcdd6fd2 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -30,6 +30,7 @@ struct map_desc {
#define MT_MEMORY_DTCM 12
#define MT_MEMORY_ITCM 13
#define MT_MEMORY_SO 14
+#define MT_MEMORY_DMA_READY 15
#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 68388eb4946..b79f8e97f77 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -148,6 +148,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
+#define TIF_SYSCALL_RESTARTSYS 10
#define TIF_POLLING_NRFLAG 16
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
@@ -162,16 +163,17 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
-#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_SYSCALL_RESTARTSYS (1 << TIF_SYSCALL_RESTARTSYS)
/* Checks for any syscall work in entry-common.S */
-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_RESTARTSYS)
/*
* Change these and you break ASM code in entry-common.S
*/
-#define _TIF_WORK_MASK 0x000000ff
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_RESUME)
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 7bd2d3cb895..4afed88d250 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -53,9 +53,13 @@ fast_work_pending:
work_pending:
tst r1, #_TIF_NEED_RESCHED
bne work_resched
- tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
- beq no_work_pending
+ /*
+ * TIF_SIGPENDING or TIF_NOTIFY_RESUME must've been set if we got here
+ */
+ ldr r2, [sp, #S_PSR]
mov r0, sp @ 'regs'
+ tst r2, #15 @ are we returning to user mode?
+ bne no_work_pending @ no? just leave, then...
mov r2, why @ 'syscall'
tst r1, #_TIF_SIGPENDING @ delivering a signal?
movne why, #0 @ prevent further restarts
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 14e38261cd3..5700a7ae7f0 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -25,6 +25,7 @@
#include <linux/regset.h>
#include <linux/audit.h>
#include <linux/tracehook.h>
+#include <linux/unistd.h>
#include <asm/pgtable.h>
#include <asm/traps.h>
@@ -917,6 +918,8 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
+ if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS))
+ scno = __NR_restart_syscall - __NR_SYSCALL_BASE;
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return scno;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index ebfac782593..e15d83bb4ea 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -81,6 +81,7 @@ __setup("fpe=", fpe_setup);
extern void paging_init(struct machine_desc *desc);
extern void sanity_check_meminfo(void);
extern void reboot_setup(char *str);
+extern void setup_dma_zone(struct machine_desc *desc);
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
@@ -800,6 +801,14 @@ static int __init customize_machine(void)
}
arch_initcall(customize_machine);
+static int __init init_machine_late(void)
+{
+ if (machine_desc->init_late)
+ machine_desc->init_late();
+ return 0;
+}
+late_initcall(init_machine_late);
+
#ifdef CONFIG_KEXEC
static inline unsigned long long get_total_mem(void)
{
@@ -939,12 +948,8 @@ void __init setup_arch(char **cmdline_p)
machine_desc = mdesc;
machine_name = mdesc->name;
-#ifdef CONFIG_ZONE_DMA
- if (mdesc->dma_zone_size) {
- extern unsigned long arm_dma_zone_size;
- arm_dma_zone_size = mdesc->dma_zone_size;
- }
-#endif
+ setup_dma_zone(mdesc);
+
if (mdesc->restart_mode)
reboot_setup(&mdesc->restart_mode);
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 4e5fdd9bd9e..17fc36c41cf 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -29,7 +29,6 @@
*/
#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
/*
* With EABI, the syscall number has to be loaded into r7.
@@ -50,18 +49,6 @@ const unsigned long sigreturn_codes[7] = {
};
/*
- * Either we support OABI only, or we have EABI with the OABI
- * compat layer enabled. In the later case we don't know if
- * user space is EABI or not, and if not we must not clobber r7.
- * Always using the OABI syscall solves that issue and works for
- * all those cases.
- */
-const unsigned long syscall_restart_code[2] = {
- SWI_SYS_RESTART, /* swi __NR_restart_syscall */
- 0xe49df004, /* ldr pc, [sp], #4 */
-};
-
-/*
* atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
@@ -82,10 +69,10 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
+ __get_user(mask, &act->sa_mask))
return -EFAULT;
- __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
}
@@ -94,10 +81,10 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
- __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
@@ -602,15 +589,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
int signr;
/*
- * We want the common case to go fast, which
- * is why we may in certain cases get here from
- * kernel mode. Just return without doing anything
- * if so.
- */
- if (!user_mode(regs))
- return;
-
- /*
* If we were from a system call, check for system call restarting...
*/
if (syscall) {
@@ -626,18 +604,13 @@ static void do_signal(struct pt_regs *regs, int syscall)
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
+ case -ERESTART_RESTARTBLOCK:
regs->ARM_r0 = regs->ARM_ORIG_r0;
regs->ARM_pc = restart_addr;
break;
- case -ERESTART_RESTARTBLOCK:
- regs->ARM_r0 = -EINTR;
- break;
}
}
- if (try_to_freeze())
- goto no_signal;
-
/*
* Get the signal to deliver. When running under ptrace, at this
* point the debugger may change all our registers ...
@@ -652,12 +625,14 @@ static void do_signal(struct pt_regs *regs, int syscall)
* debugger has chosen to restart at a different PC.
*/
if (regs->ARM_pc == restart_addr) {
- if (retval == -ERESTARTNOHAND
+ if (retval == -ERESTARTNOHAND ||
+ retval == -ERESTART_RESTARTBLOCK
|| (retval == -ERESTARTSYS
&& !(ka.sa.sa_flags & SA_RESTART))) {
regs->ARM_r0 = -EINTR;
regs->ARM_pc = continue_addr;
}
+ clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
}
if (test_thread_flag(TIF_RESTORE_SIGMASK))
@@ -677,7 +652,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
return;
}
- no_signal:
if (syscall) {
/*
* Handle restarting a different system call. As above,
@@ -685,38 +659,15 @@ static void do_signal(struct pt_regs *regs, int syscall)
* ignore the restart.
*/
if (retval == -ERESTART_RESTARTBLOCK
- && regs->ARM_pc == continue_addr) {
- if (thumb_mode(regs)) {
- regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
- regs->ARM_pc -= 2;
- } else {
-#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
- regs->ARM_r7 = __NR_restart_syscall;
- regs->ARM_pc -= 4;
-#else
- u32 __user *usp;
-
- regs->ARM_sp -= 4;
- usp = (u32 __user *)regs->ARM_sp;
-
- if (put_user(regs->ARM_pc, usp) == 0) {
- regs->ARM_pc = KERN_RESTART_CODE;
- } else {
- regs->ARM_sp += 4;
- force_sigsegv(0, current);
- }
-#endif
- }
- }
-
- /* If there's no signal to deliver, we just put the saved sigmask
- * back.
- */
- if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
- clear_thread_flag(TIF_RESTORE_SIGMASK);
- sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
- }
+ && regs->ARM_pc == restart_addr)
+ set_thread_flag(TIF_SYSCALL_RESTARTSYS);
}
+
+ /* If there's no signal to deliver, we just put the saved sigmask
+ * back.
+ */
+ if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
+ set_current_blocked(&current->saved_sigmask);
}
asmlinkage void
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
index 6fcfe8398aa..5ff067b7c75 100644
--- a/arch/arm/kernel/signal.h
+++ b/arch/arm/kernel/signal.h
@@ -8,7 +8,5 @@
* published by the Free Software Foundation.
*/
#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
-#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
extern const unsigned long sigreturn_codes[7];
-extern const unsigned long syscall_restart_code[2];
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 3647170e9a1..4928d89758f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -820,8 +820,6 @@ void __init early_trap_init(void *vectors_base)
*/
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
sigreturn_codes, sizeof(sigreturn_codes));
- memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
- syscall_restart_code, sizeof(syscall_restart_code));
flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index f6747246d64..933fc9afe7d 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -436,7 +436,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
atslave->dma_dev = &at_hdmac_device.dev;
atslave->cfg = ATC_FIFOCFG_HALFFIFO
| ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW;
- atslave->ctrla = ATC_SCSIZE_16 | ATC_DCSIZE_16;
if (mmc_id == 0) /* MCI0 */
atslave->cfg |= ATC_SRC_PER(AT_DMA_ID_MCI0)
| ATC_DST_PER(AT_DMA_ID_MCI0);
diff --git a/arch/arm/mach-at91/include/mach/at_hdmac.h b/arch/arm/mach-at91/include/mach/at_hdmac.h
index fff48d1a0f4..cab0997be3d 100644
--- a/arch/arm/mach-at91/include/mach/at_hdmac.h
+++ b/arch/arm/mach-at91/include/mach/at_hdmac.h
@@ -26,18 +26,11 @@ struct at_dma_platform_data {
/**
* struct at_dma_slave - Controller-specific information about a slave
* @dma_dev: required DMA master device
- * @tx_reg: physical address of data register used for
- * memory-to-peripheral transfers
- * @rx_reg: physical address of data register used for
- * peripheral-to-memory transfers
- * @reg_width: peripheral register width
* @cfg: Platform-specific initializer for the CFG register
- * @ctrla: Platform-specific initializer for the CTRLA register
*/
struct at_dma_slave {
struct device *dma_dev;
u32 cfg;
- u32 ctrla;
};
@@ -64,24 +57,5 @@ struct at_dma_slave {
#define ATC_FIFOCFG_HALFFIFO (0x1 << 28)
#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28)
-/* Platform-configurable bits in CTRLA */
-#define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */
-#define ATC_SCSIZE_1 (0x0 << 16)
-#define ATC_SCSIZE_4 (0x1 << 16)
-#define ATC_SCSIZE_8 (0x2 << 16)
-#define ATC_SCSIZE_16 (0x3 << 16)
-#define ATC_SCSIZE_32 (0x4 << 16)
-#define ATC_SCSIZE_64 (0x5 << 16)
-#define ATC_SCSIZE_128 (0x6 << 16)
-#define ATC_SCSIZE_256 (0x7 << 16)
-#define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */
-#define ATC_DCSIZE_1 (0x0 << 20)
-#define ATC_DCSIZE_4 (0x1 << 20)
-#define ATC_DCSIZE_8 (0x2 << 20)
-#define ATC_DCSIZE_16 (0x3 << 20)
-#define ATC_DCSIZE_32 (0x4 << 20)
-#define ATC_DCSIZE_64 (0x5 << 20)
-#define ATC_DCSIZE_128 (0x6 << 20)
-#define ATC_DCSIZE_256 (0x7 << 20)
#endif /* AT_HDMAC_H */
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index dc1afe5be20..0031864e7f1 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -681,6 +681,7 @@ MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = da830_evm_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = da8xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 09f61073c8d..0149fb453be 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -1411,6 +1411,7 @@ MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = da850_evm_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = da8xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 82ed753fb36..1c7b1f46a8f 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -357,6 +357,7 @@ MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm355_evm_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index d74a8b3445f..8e7703213b0 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -276,6 +276,7 @@ MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm355_leopard_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 5bce2b83bb4..688a9c556dc 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -618,6 +618,7 @@ MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm365_evm_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 3683306e024..d34ed55912b 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -825,6 +825,7 @@ MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = davinci_evm_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index d72ab948d63..958679a20e1 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -788,6 +788,7 @@ MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = evm_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
@@ -798,6 +799,7 @@ MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = evm_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 672d820e2aa..beecde3a1d2 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -572,6 +572,7 @@ MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = mityomapl138_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = da8xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index a772bb45570..5de69f2fcca 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -278,6 +278,7 @@ MACHINE_START(NEUROS_OSD2, "Neuros OSD2")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = davinci_ntosd2_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index 45e815760a2..dc1208e9e66 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -343,6 +343,7 @@ MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = omapl138_hawk_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = da8xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 76e67509610..9078acf94ba 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -157,6 +157,7 @@ MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = davinci_sffsdr_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = davinci_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/board-tnetv107x-evm.c b/arch/arm/mach-davinci/board-tnetv107x-evm.c
index 5f14e30b00d..ac4e003ad86 100644
--- a/arch/arm/mach-davinci/board-tnetv107x-evm.c
+++ b/arch/arm/mach-davinci/board-tnetv107x-evm.c
@@ -282,6 +282,7 @@ MACHINE_START(TNETV107X, "TNETV107X EVM")
.init_irq = cp_intc_init,
.timer = &davinci_timer,
.init_machine = tnetv107x_evm_board_init,
+ .init_late = davinci_init_late,
.dma_zone_size = SZ_128M,
.restart = tnetv107x_restart,
MACHINE_END
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index 008772e3b84..34668ead53c 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -213,7 +213,7 @@ EXPORT_SYMBOL(clk_unregister);
/*
* Disable any unused clocks left on by the bootloader
*/
-static int __init clk_disable_unused(void)
+int __init davinci_clk_disable_unused(void)
{
struct clk *ck;
@@ -237,7 +237,6 @@ static int __init clk_disable_unused(void)
return 0;
}
-late_initcall(clk_disable_unused);
#endif
static unsigned long clk_sysclk_recalc(struct clk *clk)
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
index cb9b2e47510..64b0f65a863 100644
--- a/arch/arm/mach-davinci/common.c
+++ b/arch/arm/mach-davinci/common.c
@@ -117,3 +117,10 @@ void __init davinci_common_init(struct davinci_soc_info *soc_info)
err:
panic("davinci_common_init: SoC Initialization failed\n");
}
+
+void __init davinci_init_late(void)
+{
+ davinci_cpufreq_init();
+ davinci_pm_init();
+ davinci_clk_disable_unused();
+}
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c
index 031048fec9f..4729eaab0f4 100644
--- a/arch/arm/mach-davinci/cpufreq.c
+++ b/arch/arm/mach-davinci/cpufreq.c
@@ -240,10 +240,9 @@ static struct platform_driver davinci_cpufreq_driver = {
.remove = __exit_p(davinci_cpufreq_remove),
};
-static int __init davinci_cpufreq_init(void)
+int __init davinci_cpufreq_init(void)
{
return platform_driver_probe(&davinci_cpufreq_driver,
davinci_cpufreq_probe);
}
-late_initcall(davinci_cpufreq_init);
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index 95ce019c9b9..a685e9706b7 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -353,9 +353,10 @@ static int irq2ctlr(int irq)
*****************************************************************************/
static irqreturn_t dma_irq_handler(int irq, void *data)
{
- int i;
int ctlr;
- unsigned int cnt = 0;
+ u32 sh_ier;
+ u32 sh_ipr;
+ u32 bank;
ctlr = irq2ctlr(irq);
if (ctlr < 0)
@@ -363,41 +364,39 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
dev_dbg(data, "dma_irq_handler\n");
- if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) &&
- (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
- return IRQ_NONE;
+ sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
+ if (!sh_ipr) {
+ sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1);
+ if (!sh_ipr)
+ return IRQ_NONE;
+ sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1);
+ bank = 1;
+ } else {
+ sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0);
+ bank = 0;
+ }
- while (1) {
- int j;
- if (edma_shadow0_read_array(ctlr, SH_IPR, 0) &
- edma_shadow0_read_array(ctlr, SH_IER, 0))
- j = 0;
- else if (edma_shadow0_read_array(ctlr, SH_IPR, 1) &
- edma_shadow0_read_array(ctlr, SH_IER, 1))
- j = 1;
- else
- break;
- dev_dbg(data, "IPR%d %08x\n", j,
- edma_shadow0_read_array(ctlr, SH_IPR, j));
- for (i = 0; i < 32; i++) {
- int k = (j << 5) + i;
- if ((edma_shadow0_read_array(ctlr, SH_IPR, j) & BIT(i))
- && (edma_shadow0_read_array(ctlr,
- SH_IER, j) & BIT(i))) {
- /* Clear the corresponding IPR bits */
- edma_shadow0_write_array(ctlr, SH_ICR, j,
- BIT(i));
- if (edma_cc[ctlr]->intr_data[k].callback)
- edma_cc[ctlr]->intr_data[k].callback(
- k, DMA_COMPLETE,
- edma_cc[ctlr]->intr_data[k].
- data);
- }
+ do {
+ u32 slot;
+ u32 channel;
+
+ dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
+
+ slot = __ffs(sh_ipr);
+ sh_ipr &= ~(BIT(slot));
+
+ if (sh_ier & BIT(slot)) {
+ channel = (bank << 5) | slot;
+ /* Clear the corresponding IPR bits */
+ edma_shadow0_write_array(ctlr, SH_ICR, bank,
+ BIT(slot));
+ if (edma_cc[ctlr]->intr_data[channel].callback)
+ edma_cc[ctlr]->intr_data[channel].callback(
+ channel, DMA_COMPLETE,
+ edma_cc[ctlr]->intr_data[channel].data);
}
- cnt++;
- if (cnt > 10)
- break;
- }
+ } while (sh_ipr);
+
edma_shadow0_write(ctlr, SH_IEVAL, 1);
return IRQ_HANDLED;
}
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
index 5cd39a4e0c9..bdc4aa8e672 100644
--- a/arch/arm/mach-davinci/include/mach/common.h
+++ b/arch/arm/mach-davinci/include/mach/common.h
@@ -84,6 +84,25 @@ extern struct davinci_soc_info davinci_soc_info;
extern void davinci_common_init(struct davinci_soc_info *soc_info);
extern void davinci_init_ide(void);
void davinci_restart(char mode, const char *cmd);
+void davinci_init_late(void);
+
+#ifdef CONFIG_DAVINCI_RESET_CLOCKS
+int davinci_clk_disable_unused(void);
+#else
+static inline int davinci_clk_disable_unused(void) { return 0; }
+#endif
+
+#ifdef CONFIG_CPU_FREQ
+int davinci_cpufreq_init(void);
+#else
+static inline int davinci_cpufreq_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_SUSPEND
+int davinci_pm_init(void);
+#else
+static inline int davinci_pm_init(void) { return 0; }
+#endif
/* standard place to map on-chip SRAMs; they *may* support DMA */
#define SRAM_VIRT 0xfffe0000
diff --git a/arch/arm/mach-davinci/include/mach/debug-macro.S b/arch/arm/mach-davinci/include/mach/debug-macro.S
index cf94552d527..34290d14754 100644
--- a/arch/arm/mach-davinci/include/mach/debug-macro.S
+++ b/arch/arm/mach-davinci/include/mach/debug-macro.S
@@ -22,46 +22,28 @@
#define UART_SHIFT 2
- .pushsection .data
-davinci_uart_phys: .word 0
-davinci_uart_virt: .word 0
- .popsection
-
- .macro addruart, rp, rv, tmp
-
- /* Use davinci_uart_phys/virt if already configured */
-10: adr \rp, 99f @ get effective addr of 99f
- ldr \rv, [\rp] @ get absolute addr of 99f
- sub \rv, \rv, \rp @ offset between the two
- ldr \rp, [\rp, #4] @ abs addr of omap_uart_phys
- sub \tmp, \rp, \rv @ make it effective
- ldr \rp, [\tmp, #0] @ davinci_uart_phys
- ldr \rv, [\tmp, #4] @ davinci_uart_virt
- cmp \rp, #0 @ is port configured?
- cmpne \rv, #0
- bne 100f @ already configured
-
- /* Check the debug UART address set in uncompress.h */
- and \rp, pc, #0xff000000
- ldr \rv, =DAVINCI_UART_INFO_OFS
- add \rp, \rp, \rv
-
- /* Copy uart phys address from decompressor uart info */
- ldr \rv, [\rp, #0]
- str \rv, [\tmp, #0]
-
- /* Copy uart virt address from decompressor uart info */
- ldr \rv, [\rp, #4]
- str \rv, [\tmp, #4]
-
- b 10b
+#if defined(CONFIG_DEBUG_DAVINCI_DMx_UART0)
+#define UART_BASE DAVINCI_UART0_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART0)
+#define UART_BASE DA8XX_UART0_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART1)
+#define UART_BASE DA8XX_UART1_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART2)
+#define UART_BASE DA8XX_UART2_BASE
+#elif defined(CONFIG_DEBUG_DAVINCI_TNETV107X_UART1)
+#define UART_BASE TNETV107X_UART2_BASE
+#define UART_VIRTBASE TNETV107X_UART2_VIRT
+#else
+#error "Select a specifc port for DEBUG_LL"
+#endif
- .align
-99: .word .
- .word davinci_uart_phys
- .ltorg
+#ifndef UART_VIRTBASE
+#define UART_VIRTBASE IO_ADDRESS(UART_BASE)
+#endif
-100:
+ .macro addruart, rp, rv, tmp
+ ldr \rp, =UART_BASE
+ ldr \rv, =UART_VIRTBASE
.endm
.macro senduart,rd,rx
diff --git a/arch/arm/mach-davinci/include/mach/hardware.h b/arch/arm/mach-davinci/include/mach/hardware.h
index 2184691ebc2..16bb42291d3 100644
--- a/arch/arm/mach-davinci/include/mach/hardware.h
+++ b/arch/arm/mach-davinci/include/mach/hardware.h
@@ -22,7 +22,7 @@
/*
* I/O mapping
*/
-#define IO_PHYS 0x01c00000UL
+#define IO_PHYS UL(0x01c00000)
#define IO_OFFSET 0xfd000000 /* Virtual IO = 0xfec00000 */
#define IO_SIZE 0x00400000
#define IO_VIRT (IO_PHYS + IO_OFFSET)
diff --git a/arch/arm/mach-davinci/include/mach/serial.h b/arch/arm/mach-davinci/include/mach/serial.h
index e347d88fef9..46b3cd11c3c 100644
--- a/arch/arm/mach-davinci/include/mach/serial.h
+++ b/arch/arm/mach-davinci/include/mach/serial.h
@@ -15,16 +15,6 @@
#include <mach/hardware.h>
-/*
- * Stolen area that contains debug uart physical and virtual addresses. These
- * addresses are filled in by the uncompress.h code, and are used by the debug
- * macros in debug-macro.S.
- *
- * This area sits just below the page tables (see arch/arm/kernel/head.S).
- * We define it as a relative offset from start of usable RAM.
- */
-#define DAVINCI_UART_INFO_OFS 0x3ff8
-
#define DAVINCI_UART0_BASE (IO_PHYS + 0x20000)
#define DAVINCI_UART1_BASE (IO_PHYS + 0x20400)
#define DAVINCI_UART2_BASE (IO_PHYS + 0x20800)
diff --git a/arch/arm/mach-davinci/include/mach/uncompress.h b/arch/arm/mach-davinci/include/mach/uncompress.h
index da2fb2c2155..18cfd497715 100644
--- a/arch/arm/mach-davinci/include/mach/uncompress.h
+++ b/arch/arm/mach-davinci/include/mach/uncompress.h
@@ -43,37 +43,27 @@ static inline void flush(void)
barrier();
}
-static inline void set_uart_info(u32 phys, void * __iomem virt)
+static inline void set_uart_info(u32 phys)
{
- /*
- * Get address of some.bss variable and round it down
- * a la CONFIG_AUTO_ZRELADDR.
- */
- u32 ram_start = (u32)&uart & 0xf8000000;
- u32 *uart_info = (u32 *)(ram_start + DAVINCI_UART_INFO_OFS);
-
uart = (u32 *)phys;
- uart_info[0] = phys;
- uart_info[1] = (u32)virt;
}
-#define _DEBUG_LL_ENTRY(machine, phys, virt) \
- if (machine_is_##machine()) { \
- set_uart_info(phys, virt); \
- break; \
+#define _DEBUG_LL_ENTRY(machine, phys) \
+ { \
+ if (machine_is_##machine()) { \
+ set_uart_info(phys); \
+ break; \
+ } \
}
#define DEBUG_LL_DAVINCI(machine, port) \
- _DEBUG_LL_ENTRY(machine, DAVINCI_UART##port##_BASE, \
- IO_ADDRESS(DAVINCI_UART##port##_BASE))
+ _DEBUG_LL_ENTRY(machine, DAVINCI_UART##port##_BASE)
#define DEBUG_LL_DA8XX(machine, port) \
- _DEBUG_LL_ENTRY(machine, DA8XX_UART##port##_BASE, \
- IO_ADDRESS(DA8XX_UART##port##_BASE))
+ _DEBUG_LL_ENTRY(machine, DA8XX_UART##port##_BASE)
#define DEBUG_LL_TNETV107X(machine, port) \
- _DEBUG_LL_ENTRY(machine, TNETV107X_UART##port##_BASE, \
- TNETV107X_UART##port##_VIRT)
+ _DEBUG_LL_ENTRY(machine, TNETV107X_UART##port##_BASE)
static inline void __arch_decomp_setup(unsigned long arch_id)
{
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index 04c49f7543e..eb8360b33aa 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -152,8 +152,7 @@ static struct platform_driver davinci_pm_driver = {
.remove = __exit_p(davinci_pm_remove),
};
-static int __init davinci_pm_init(void)
+int __init davinci_pm_init(void)
{
return platform_driver_probe(&davinci_pm_driver, davinci_pm_probe);
}
-late_initcall(davinci_pm_init);
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 42ab1e7c4ec..9493076fc59 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/ata_platform.h>
#include <linux/gpio.h>
#include <asm/page.h>
@@ -68,6 +68,19 @@ void __init dove_map_io(void)
}
/*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+static struct clk *tclk;
+
+static void __init clk_init(void)
+{
+ tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
+ get_tclk());
+
+ orion_clkdev_init(tclk);
+}
+
+/*****************************************************************************
* EHCI0
****************************************************************************/
void __init dove_ehci0_init(void)
@@ -89,8 +102,7 @@ void __init dove_ehci1_init(void)
void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
- DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM,
- 0, get_tclk());
+ DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM, 0);
}
/*****************************************************************************
@@ -116,7 +128,7 @@ void __init dove_sata_init(struct mv_sata_platform_data *sata_data)
void __init dove_uart0_init(void)
{
orion_uart0_init(DOVE_UART0_VIRT_BASE, DOVE_UART0_PHYS_BASE,
- IRQ_DOVE_UART_0, get_tclk());
+ IRQ_DOVE_UART_0, tclk);
}
/*****************************************************************************
@@ -125,7 +137,7 @@ void __init dove_uart0_init(void)
void __init dove_uart1_init(void)
{
orion_uart1_init(DOVE_UART1_VIRT_BASE, DOVE_UART1_PHYS_BASE,
- IRQ_DOVE_UART_1, get_tclk());
+ IRQ_DOVE_UART_1, tclk);
}
/*****************************************************************************
@@ -134,7 +146,7 @@ void __init dove_uart1_init(void)
void __init dove_uart2_init(void)
{
orion_uart2_init(DOVE_UART2_VIRT_BASE, DOVE_UART2_PHYS_BASE,
- IRQ_DOVE_UART_2, get_tclk());
+ IRQ_DOVE_UART_2, tclk);
}
/*****************************************************************************
@@ -143,7 +155,7 @@ void __init dove_uart2_init(void)
void __init dove_uart3_init(void)
{
orion_uart3_init(DOVE_UART3_VIRT_BASE, DOVE_UART3_PHYS_BASE,
- IRQ_DOVE_UART_3, get_tclk());
+ IRQ_DOVE_UART_3, tclk);
}
/*****************************************************************************
@@ -151,12 +163,12 @@ void __init dove_uart3_init(void)
****************************************************************************/
void __init dove_spi0_init(void)
{
- orion_spi_init(DOVE_SPI0_PHYS_BASE, get_tclk());
+ orion_spi_init(DOVE_SPI0_PHYS_BASE);
}
void __init dove_spi1_init(void)
{
- orion_spi_1_init(DOVE_SPI1_PHYS_BASE, get_tclk());
+ orion_spi_1_init(DOVE_SPI1_PHYS_BASE);
}
/*****************************************************************************
@@ -272,18 +284,17 @@ void __init dove_sdio1_init(void)
void __init dove_init(void)
{
- int tclk;
-
- tclk = get_tclk();
-
printk(KERN_INFO "Dove 88AP510 SoC, ");
- printk(KERN_INFO "TCLK = %dMHz\n", (tclk + 499999) / 1000000);
+ printk(KERN_INFO "TCLK = %dMHz\n", (get_tclk() + 499999) / 1000000);
#ifdef CONFIG_CACHE_TAUROS2
tauros2_init();
#endif
dove_setup_cpu_mbus();
+ /* Setup root of clk tree */
+ clk_init();
+
/* internal devices that every board has */
dove_rtc_init();
dove_xor0_init();
diff --git a/arch/arm/mach-dove/dove-db-setup.c b/arch/arm/mach-dove/dove-db-setup.c
index ea77ae430b2..bc2867f1134 100644
--- a/arch/arm/mach-dove/dove-db-setup.c
+++ b/arch/arm/mach-dove/dove-db-setup.c
@@ -20,7 +20,6 @@
#include <linux/i2c.h>
#include <linux/pci.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <linux/spi/flash.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
diff --git a/arch/arm/mach-ep93xx/adssphere.c b/arch/arm/mach-ep93xx/adssphere.c
index 2d45947a303..a472777e9eb 100644
--- a/arch/arm/mach-ep93xx/adssphere.c
+++ b/arch/arm/mach-ep93xx/adssphere.c
@@ -41,5 +41,6 @@ MACHINE_START(ADSSPHERE, "ADS Sphere board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = adssphere_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 66b1494f23a..4dd07a0e360 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -675,7 +675,7 @@ int ep93xx_keypad_acquire_gpio(struct platform_device *pdev)
fail_gpio_d:
gpio_free(EP93XX_GPIO_LINE_C(i));
fail_gpio_c:
- for ( ; i >= 0; --i) {
+ for (--i; i >= 0; --i) {
gpio_free(EP93XX_GPIO_LINE_C(i));
gpio_free(EP93XX_GPIO_LINE_D(i));
}
@@ -834,3 +834,8 @@ void ep93xx_restart(char mode, const char *cmd)
while (1)
;
}
+
+void __init ep93xx_init_late(void)
+{
+ crunch_init();
+}
diff --git a/arch/arm/mach-ep93xx/crunch.c b/arch/arm/mach-ep93xx/crunch.c
index 74753e2df60..a4a2ab9648c 100644
--- a/arch/arm/mach-ep93xx/crunch.c
+++ b/arch/arm/mach-ep93xx/crunch.c
@@ -79,12 +79,10 @@ static struct notifier_block crunch_notifier_block = {
.notifier_call = crunch_do,
};
-static int __init crunch_init(void)
+int __init crunch_init(void)
{
thread_register_notifier(&crunch_notifier_block);
elf_hwcap |= HWCAP_CRUNCH;
return 0;
}
-
-late_initcall(crunch_init);
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index da9047d726f..d74c5cddb98 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -255,6 +255,7 @@ MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -268,6 +269,7 @@ MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -281,6 +283,7 @@ MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -294,6 +297,7 @@ MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -307,6 +311,7 @@ MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -320,6 +325,7 @@ MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -333,6 +339,7 @@ MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -346,6 +353,7 @@ MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-ep93xx/gesbc9312.c b/arch/arm/mach-ep93xx/gesbc9312.c
index fcdffbe49dc..437c3411115 100644
--- a/arch/arm/mach-ep93xx/gesbc9312.c
+++ b/arch/arm/mach-ep93xx/gesbc9312.c
@@ -41,5 +41,6 @@ MACHINE_START(GESBC9312, "Glomation GESBC-9312-sx")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = gesbc9312_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h
index 602bd87fd0a..1ecb040d98b 100644
--- a/arch/arm/mach-ep93xx/include/mach/platform.h
+++ b/arch/arm/mach-ep93xx/include/mach/platform.h
@@ -53,5 +53,12 @@ void ep93xx_init_devices(void);
extern struct sys_timer ep93xx_timer;
void ep93xx_restart(char, const char *);
+void ep93xx_init_late(void);
+
+#ifdef CONFIG_CRUNCH
+int crunch_init(void);
+#else
+static inline int crunch_init(void) { return 0; }
+#endif
#endif
diff --git a/arch/arm/mach-ep93xx/micro9.c b/arch/arm/mach-ep93xx/micro9.c
index dc431c5f04c..3d7cdab725b 100644
--- a/arch/arm/mach-ep93xx/micro9.c
+++ b/arch/arm/mach-ep93xx/micro9.c
@@ -85,6 +85,7 @@ MACHINE_START(MICRO9, "Contec Micro9-High")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -98,6 +99,7 @@ MACHINE_START(MICRO9M, "Contec Micro9-Mid")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -111,6 +113,7 @@ MACHINE_START(MICRO9L, "Contec Micro9-Lite")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
@@ -124,6 +127,7 @@ MACHINE_START(MICRO9S, "Contec Micro9-Slim")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
#endif
diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
index f40c2987e54..33dc0791741 100644
--- a/arch/arm/mach-ep93xx/simone.c
+++ b/arch/arm/mach-ep93xx/simone.c
@@ -86,5 +86,6 @@ MACHINE_START(SIM_ONE, "Simplemachines Sim.One Board")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = simone_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c
index 0c00852ef16..eb282378fa7 100644
--- a/arch/arm/mach-ep93xx/snappercl15.c
+++ b/arch/arm/mach-ep93xx/snappercl15.c
@@ -183,5 +183,6 @@ MACHINE_START(SNAPPER_CL15, "Bluewater Systems Snapper CL15")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = snappercl15_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 5ea790942e9..d4ef339d961 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -252,5 +252,6 @@ MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = ts72xx_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index ba156eb225e..2905a4929bd 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -367,5 +367,6 @@ MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307")
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = vision_init_machine,
+ .init_late = ep93xx_init_late,
.restart = ep93xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 15b05b89cc3..43ebe909441 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -61,6 +61,7 @@ config SOC_EXYNOS5250
bool "SAMSUNG EXYNOS5250"
default y
depends on ARCH_EXYNOS5
+ select SAMSUNG_DMADEV
help
Enable EXYNOS5250 SoC support
@@ -70,7 +71,7 @@ config EXYNOS4_MCT
help
Use MCT (Multi Core Timer) as kernel timers
-config EXYNOS4_DEV_DMA
+config EXYNOS_DEV_DMA
bool
help
Compile in amba device definitions for DMA controller
@@ -80,15 +81,20 @@ config EXYNOS4_DEV_AHCI
help
Compile in platform device definitions for AHCI
+config EXYNOS_DEV_DRM
+ bool
+ help
+ Compile in platform device definitions for core DRM device
+
config EXYNOS4_SETUP_FIMD0
bool
help
Common setup code for FIMD0.
-config EXYNOS4_DEV_SYSMMU
+config EXYNOS_DEV_SYSMMU
bool
help
- Common setup code for SYSTEM MMU in EXYNOS4
+ Common setup code for SYSTEM MMU in EXYNOS platforms
config EXYNOS4_DEV_DWMCI
bool
@@ -161,7 +167,7 @@ config EXYNOS4_SETUP_USB_PHY
help
Common setup code for USB PHY controller
-config EXYNOS4_SETUP_SPI
+config EXYNOS_SETUP_SPI
bool
help
Common setup code for SPI GPIO configurations.
@@ -201,12 +207,12 @@ config MACH_SMDKV310
select S3C_DEV_HSMMC3
select SAMSUNG_DEV_BACKLIGHT
select EXYNOS_DEV_DRM
+ select EXYNOS_DEV_SYSMMU
select EXYNOS4_DEV_AHCI
select SAMSUNG_DEV_KEYPAD
select EXYNOS4_DEV_DMA
select SAMSUNG_DEV_PWM
select EXYNOS4_DEV_USB_OHCI
- select EXYNOS4_DEV_SYSMMU
select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_I2C1
select EXYNOS4_SETUP_KEYPAD
@@ -224,8 +230,7 @@ config MACH_ARMLEX4210
select S3C_DEV_HSMMC2
select S3C_DEV_HSMMC3
select EXYNOS4_DEV_AHCI
- select EXYNOS4_DEV_DMA
- select EXYNOS4_DEV_SYSMMU
+ select EXYNOS_DEV_DMA
select EXYNOS4_SETUP_SDHCI
help
Machine support for Samsung ARMLEX4210 based on EXYNOS4210
@@ -256,6 +261,7 @@ config MACH_UNIVERSAL_C210
select S5P_DEV_MFC
select S5P_DEV_ONENAND
select S5P_DEV_TV
+ select EXYNOS_DEV_SYSMMU
select EXYNOS4_DEV_DMA
select EXYNOS_DEV_DRM
select EXYNOS4_SETUP_FIMD0
@@ -332,6 +338,7 @@ config MACH_ORIGEN
select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_PWM
select EXYNOS_DEV_DRM
+ select EXYNOS_DEV_SYSMMU
select EXYNOS4_DEV_DMA
select EXYNOS4_DEV_USB_OHCI
select EXYNOS4_SETUP_FIMD0
@@ -360,7 +367,8 @@ config MACH_SMDK4212
select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_KEYPAD
select SAMSUNG_DEV_PWM
- select EXYNOS4_DEV_DMA
+ select EXYNOS_DEV_SYSMMU
+ select EXYNOS_DEV_DMA
select EXYNOS4_SETUP_I2C1
select EXYNOS4_SETUP_I2C3
select EXYNOS4_SETUP_I2C7
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index 8631840d1b5..440a637c76f 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -50,10 +50,11 @@ obj-$(CONFIG_MACH_EXYNOS5_DT) += mach-exynos5-dt.o
obj-y += dev-uart.o
obj-$(CONFIG_ARCH_EXYNOS4) += dev-audio.o
obj-$(CONFIG_EXYNOS4_DEV_AHCI) += dev-ahci.o
-obj-$(CONFIG_EXYNOS4_DEV_SYSMMU) += dev-sysmmu.o
obj-$(CONFIG_EXYNOS4_DEV_DWMCI) += dev-dwmci.o
-obj-$(CONFIG_EXYNOS4_DEV_DMA) += dma.o
+obj-$(CONFIG_EXYNOS_DEV_DMA) += dma.o
obj-$(CONFIG_EXYNOS4_DEV_USB_OHCI) += dev-ohci.o
+obj-$(CONFIG_EXYNOS_DEV_DRM) += dev-drm.o
+obj-$(CONFIG_EXYNOS_DEV_SYSMMU) += dev-sysmmu.o
obj-$(CONFIG_ARCH_EXYNOS) += setup-i2c0.o
obj-$(CONFIG_EXYNOS4_SETUP_FIMC) += setup-fimc.o
@@ -68,4 +69,4 @@ obj-$(CONFIG_EXYNOS4_SETUP_I2C7) += setup-i2c7.o
obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD) += setup-keypad.o
obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
obj-$(CONFIG_EXYNOS4_SETUP_USB_PHY) += setup-usb-phy.o
-obj-$(CONFIG_EXYNOS4_SETUP_SPI) += setup-spi.o
+obj-$(CONFIG_EXYNOS_SETUP_SPI) += setup-spi.o
diff --git a/arch/arm/mach-exynos/Makefile.boot b/arch/arm/mach-exynos/Makefile.boot
index b9862e22bf1..31bd181b051 100644
--- a/arch/arm/mach-exynos/Makefile.boot
+++ b/arch/arm/mach-exynos/Makefile.boot
@@ -1,2 +1,5 @@
zreladdr-y += 0x40008000
params_phys-y := 0x40000100
+
+dtb-$(CONFIG_MACH_EXYNOS4_DT) += exynos4210-origen.dtb exynos4210-smdkv310.dtb
+dtb-$(CONFIG_MACH_EXYNOS5_DT) += exynos5250-smdk5250.dtb
diff --git a/arch/arm/mach-exynos/clock-exynos4.c b/arch/arm/mach-exynos/clock-exynos4.c
index 6efd1e5919f..bcb7db45314 100644
--- a/arch/arm/mach-exynos/clock-exynos4.c
+++ b/arch/arm/mach-exynos/clock-exynos4.c
@@ -168,7 +168,7 @@ static int exynos4_clk_ip_tv_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_TV, clk, enable);
}
-static int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable)
+int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_IMAGE, clk, enable);
}
@@ -198,6 +198,11 @@ static int exynos4_clk_ip_perir_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(EXYNOS4_CLKGATE_IP_PERIR, clk, enable);
}
+int exynos4_clk_ip_dmc_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS4_CLKGATE_IP_DMC, clk, enable);
+}
+
static int exynos4_clk_hdmiphy_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(S5P_HDMI_PHY_CONTROL, clk, enable);
@@ -678,61 +683,55 @@ static struct clk exynos4_init_clocks_off[] = {
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 14),
}, {
- .name = "SYSMMU_MDMA",
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
+ .enable = exynos4_clk_ip_mfc_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(mfc_r, 1),
+ .enable = exynos4_clk_ip_mfc_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(tv, 2),
+ .enable = exynos4_clk_ip_tv_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(jpeg, 3),
+ .enable = exynos4_clk_ip_cam_ctrl,
+ .ctrlbit = (1 << 11),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(rot, 4),
.enable = exynos4_clk_ip_image_ctrl,
- .ctrlbit = (1 << 5),
+ .ctrlbit = (1 << 4),
}, {
- .name = "SYSMMU_FIMC0",
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(fimc0, 5),
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 7),
}, {
- .name = "SYSMMU_FIMC1",
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(fimc1, 6),
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 8),
}, {
- .name = "SYSMMU_FIMC2",
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(fimc2, 7),
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 9),
}, {
- .name = "SYSMMU_FIMC3",
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(fimc3, 8),
.enable = exynos4_clk_ip_cam_ctrl,
.ctrlbit = (1 << 10),
}, {
- .name = "SYSMMU_JPEG",
- .enable = exynos4_clk_ip_cam_ctrl,
- .ctrlbit = (1 << 11),
- }, {
- .name = "SYSMMU_FIMD0",
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(fimd0, 10),
.enable = exynos4_clk_ip_lcd0_ctrl,
.ctrlbit = (1 << 4),
- }, {
- .name = "SYSMMU_FIMD1",
- .enable = exynos4_clk_ip_lcd1_ctrl,
- .ctrlbit = (1 << 4),
- }, {
- .name = "SYSMMU_PCIe",
- .enable = exynos4_clk_ip_fsys_ctrl,
- .ctrlbit = (1 << 18),
- }, {
- .name = "SYSMMU_G2D",
- .enable = exynos4_clk_ip_image_ctrl,
- .ctrlbit = (1 << 3),
- }, {
- .name = "SYSMMU_ROTATOR",
- .enable = exynos4_clk_ip_image_ctrl,
- .ctrlbit = (1 << 4),
- }, {
- .name = "SYSMMU_TV",
- .enable = exynos4_clk_ip_tv_ctrl,
- .ctrlbit = (1 << 4),
- }, {
- .name = "SYSMMU_MFC_L",
- .enable = exynos4_clk_ip_mfc_ctrl,
- .ctrlbit = (1 << 1),
- }, {
- .name = "SYSMMU_MFC_R",
- .enable = exynos4_clk_ip_mfc_ctrl,
- .ctrlbit = (1 << 2),
}
};
diff --git a/arch/arm/mach-exynos/clock-exynos4.h b/arch/arm/mach-exynos/clock-exynos4.h
index cb71c29c14d..28a11970118 100644
--- a/arch/arm/mach-exynos/clock-exynos4.h
+++ b/arch/arm/mach-exynos/clock-exynos4.h
@@ -26,5 +26,7 @@ extern struct clk *exynos4_clkset_group_list[];
extern int exynos4_clksrc_mask_fsys_ctrl(struct clk *clk, int enable);
extern int exynos4_clk_ip_fsys_ctrl(struct clk *clk, int enable);
extern int exynos4_clk_ip_lcd1_ctrl(struct clk *clk, int enable);
+extern int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable);
+extern int exynos4_clk_ip_dmc_ctrl(struct clk *clk, int enable);
#endif /* __ASM_ARCH_CLOCK_H */
diff --git a/arch/arm/mach-exynos/clock-exynos4210.c b/arch/arm/mach-exynos/clock-exynos4210.c
index 3b131e4b6ef..b8689ff60ba 100644
--- a/arch/arm/mach-exynos/clock-exynos4210.c
+++ b/arch/arm/mach-exynos/clock-exynos4210.c
@@ -26,6 +26,7 @@
#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
+#include <mach/sysmmu.h>
#include "common.h"
#include "clock-exynos4.h"
@@ -94,6 +95,16 @@ static struct clk init_clocks_off[] = {
.devname = "exynos4-fb.1",
.enable = exynos4_clk_ip_lcd1_ctrl,
.ctrlbit = (1 << 0),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
+ .enable = exynos4_clk_ip_image_ctrl,
+ .ctrlbit = (1 << 3),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(fimd1, 11),
+ .enable = exynos4_clk_ip_lcd1_ctrl,
+ .ctrlbit = (1 << 4),
},
};
diff --git a/arch/arm/mach-exynos/clock-exynos4212.c b/arch/arm/mach-exynos/clock-exynos4212.c
index 3ecc01e06f7..da397d21bbc 100644
--- a/arch/arm/mach-exynos/clock-exynos4212.c
+++ b/arch/arm/mach-exynos/clock-exynos4212.c
@@ -26,6 +26,7 @@
#include <mach/hardware.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
+#include <mach/sysmmu.h>
#include "common.h"
#include "clock-exynos4.h"
@@ -39,6 +40,16 @@ static struct sleep_save exynos4212_clock_save[] = {
};
#endif
+static int exynos4212_clk_ip_isp0_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS4_CLKGATE_IP_ISP0, clk, enable);
+}
+
+static int exynos4212_clk_ip_isp1_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS4_CLKGATE_IP_ISP1, clk, enable);
+}
+
static struct clk *clk_src_mpll_user_list[] = {
[0] = &clk_fin_mpll,
[1] = &exynos4_clk_mout_mpll.clk,
@@ -66,7 +77,32 @@ static struct clksrc_clk clksrcs[] = {
};
static struct clk init_clocks_off[] = {
- /* nothing here yet */
+ {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
+ .enable = exynos4_clk_ip_dmc_ctrl,
+ .ctrlbit = (1 << 24),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
+ .enable = exynos4212_clk_ip_isp0_ctrl,
+ .ctrlbit = (7 << 8),
+ }, {
+ .name = SYSMMU_CLOCK_NAME2,
+ .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
+ .enable = exynos4212_clk_ip_isp1_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = "flite",
+ .devname = "exynos-fimc-lite.0",
+ .enable = exynos4212_clk_ip_isp0_ctrl,
+ .ctrlbit = (1 << 4),
+ }, {
+ .name = "flite",
+ .devname = "exynos-fimc-lite.1",
+ .enable = exynos4212_clk_ip_isp0_ctrl,
+ .ctrlbit = (1 << 3),
+ }
};
#ifdef CONFIG_PM_SLEEP
diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c
index 7ac6ff4c46b..5aa460b01fd 100644
--- a/arch/arm/mach-exynos/clock-exynos5.c
+++ b/arch/arm/mach-exynos/clock-exynos5.c
@@ -82,6 +82,11 @@ static int exynos5_clksrc_mask_peric0_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(EXYNOS5_CLKSRC_MASK_PERIC0, clk, enable);
}
+static int exynos5_clk_ip_acp_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ACP, clk, enable);
+}
+
static int exynos5_clk_ip_core_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS5_CLKGATE_IP_CORE, clk, enable);
@@ -127,6 +132,21 @@ static int exynos5_clk_ip_peris_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(EXYNOS5_CLKGATE_IP_PERIS, clk, enable);
}
+static int exynos5_clk_ip_gscl_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS5_CLKGATE_IP_GSCL, clk, enable);
+}
+
+static int exynos5_clk_ip_isp0_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ISP0, clk, enable);
+}
+
+static int exynos5_clk_ip_isp1_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ISP1, clk, enable);
+}
+
/* Core list of CMU_CPU side */
static struct clksrc_clk exynos5_clk_mout_apll = {
@@ -145,11 +165,29 @@ static struct clksrc_clk exynos5_clk_sclk_apll = {
.reg_div = { .reg = EXYNOS5_CLKDIV_CPU0, .shift = 24, .size = 3 },
};
+static struct clksrc_clk exynos5_clk_mout_bpll_fout = {
+ .clk = {
+ .name = "mout_bpll_fout",
+ },
+ .sources = &clk_src_bpll_fout,
+ .reg_src = { .reg = EXYNOS5_PLL_DIV2_SEL, .shift = 0, .size = 1 },
+};
+
+static struct clk *exynos5_clk_src_bpll_list[] = {
+ [0] = &clk_fin_bpll,
+ [1] = &exynos5_clk_mout_bpll_fout.clk,
+};
+
+static struct clksrc_sources exynos5_clk_src_bpll = {
+ .sources = exynos5_clk_src_bpll_list,
+ .nr_sources = ARRAY_SIZE(exynos5_clk_src_bpll_list),
+};
+
static struct clksrc_clk exynos5_clk_mout_bpll = {
.clk = {
.name = "mout_bpll",
},
- .sources = &clk_src_bpll,
+ .sources = &exynos5_clk_src_bpll,
.reg_src = { .reg = EXYNOS5_CLKSRC_CDREX, .shift = 0, .size = 1 },
};
@@ -187,11 +225,29 @@ static struct clksrc_clk exynos5_clk_mout_epll = {
.reg_src = { .reg = EXYNOS5_CLKSRC_TOP2, .shift = 12, .size = 1 },
};
+static struct clksrc_clk exynos5_clk_mout_mpll_fout = {
+ .clk = {
+ .name = "mout_mpll_fout",
+ },
+ .sources = &clk_src_mpll_fout,
+ .reg_src = { .reg = EXYNOS5_PLL_DIV2_SEL, .shift = 4, .size = 1 },
+};
+
+static struct clk *exynos5_clk_src_mpll_list[] = {
+ [0] = &clk_fin_mpll,
+ [1] = &exynos5_clk_mout_mpll_fout.clk,
+};
+
+static struct clksrc_sources exynos5_clk_src_mpll = {
+ .sources = exynos5_clk_src_mpll_list,
+ .nr_sources = ARRAY_SIZE(exynos5_clk_src_mpll_list),
+};
+
struct clksrc_clk exynos5_clk_mout_mpll = {
.clk = {
.name = "mout_mpll",
},
- .sources = &clk_src_mpll,
+ .sources = &exynos5_clk_src_mpll,
.reg_src = { .reg = EXYNOS5_CLKSRC_CORE1, .shift = 8, .size = 1 },
};
@@ -454,6 +510,11 @@ static struct clk exynos5_init_clocks_off[] = {
.enable = exynos5_clk_ip_peris_ctrl,
.ctrlbit = (1 << 20),
}, {
+ .name = "watchdog",
+ .parent = &exynos5_clk_aclk_66.clk,
+ .enable = exynos5_clk_ip_peris_ctrl,
+ .ctrlbit = (1 << 19),
+ }, {
.name = "hsmmc",
.devname = "exynos4-sdhci.0",
.parent = &exynos5_clk_aclk_200.clk,
@@ -630,6 +691,76 @@ static struct clk exynos5_init_clocks_off[] = {
.parent = &exynos5_clk_aclk_66.clk,
.enable = exynos5_clk_ip_peric_ctrl,
.ctrlbit = (1 << 14),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
+ .enable = &exynos5_clk_ip_mfc_ctrl,
+ .ctrlbit = (1 << 1),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(mfc_r, 1),
+ .enable = &exynos5_clk_ip_mfc_ctrl,
+ .ctrlbit = (1 << 2),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(tv, 2),
+ .enable = &exynos5_clk_ip_disp1_ctrl,
+ .ctrlbit = (1 << 9)
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(jpeg, 3),
+ .enable = &exynos5_clk_ip_gen_ctrl,
+ .ctrlbit = (1 << 7),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(rot, 4),
+ .enable = &exynos5_clk_ip_gen_ctrl,
+ .ctrlbit = (1 << 6)
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(gsc0, 5),
+ .enable = &exynos5_clk_ip_gscl_ctrl,
+ .ctrlbit = (1 << 7),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(gsc1, 6),
+ .enable = &exynos5_clk_ip_gscl_ctrl,
+ .ctrlbit = (1 << 8),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(gsc2, 7),
+ .enable = &exynos5_clk_ip_gscl_ctrl,
+ .ctrlbit = (1 << 9),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(gsc3, 8),
+ .enable = &exynos5_clk_ip_gscl_ctrl,
+ .ctrlbit = (1 << 10),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
+ .enable = &exynos5_clk_ip_isp0_ctrl,
+ .ctrlbit = (0x3F << 8),
+ }, {
+ .name = SYSMMU_CLOCK_NAME2,
+ .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
+ .enable = &exynos5_clk_ip_isp1_ctrl,
+ .ctrlbit = (0xF << 4),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(camif0, 12),
+ .enable = &exynos5_clk_ip_gscl_ctrl,
+ .ctrlbit = (1 << 11),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(camif1, 13),
+ .enable = &exynos5_clk_ip_gscl_ctrl,
+ .ctrlbit = (1 << 12),
+ }, {
+ .name = SYSMMU_CLOCK_NAME,
+ .devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
+ .enable = &exynos5_clk_ip_acp_ctrl,
+ .ctrlbit = (1 << 7)
}
};
@@ -941,10 +1072,12 @@ static struct clksrc_clk *exynos5_sysclks[] = {
&exynos5_clk_mout_apll,
&exynos5_clk_sclk_apll,
&exynos5_clk_mout_bpll,
+ &exynos5_clk_mout_bpll_fout,
&exynos5_clk_mout_bpll_user,
&exynos5_clk_mout_cpll,
&exynos5_clk_mout_epll,
&exynos5_clk_mout_mpll,
+ &exynos5_clk_mout_mpll_fout,
&exynos5_clk_mout_mpll_user,
&exynos5_clk_vpllsrc,
&exynos5_clk_sclk_vpll,
@@ -1008,7 +1141,9 @@ static struct clk *exynos5_clks[] __initdata = {
&exynos5_clk_sclk_hdmi27m,
&exynos5_clk_sclk_hdmiphy,
&clk_fout_bpll,
+ &clk_fout_bpll_div2,
&clk_fout_cpll,
+ &clk_fout_mpll_div2,
&exynos5_clk_armclk,
};
@@ -1173,8 +1308,10 @@ void __init_or_cpufreq exynos5_setup_clocks(void)
clk_fout_apll.ops = &exynos5_fout_apll_ops;
clk_fout_bpll.rate = bpll;
+ clk_fout_bpll_div2.rate = bpll >> 1;
clk_fout_cpll.rate = cpll;
clk_fout_mpll.rate = mpll;
+ clk_fout_mpll_div2.rate = mpll >> 1;
clk_fout_epll.rate = epll;
clk_fout_vpll.rate = vpll;
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index 5ccd6e80a60..742edd3bbec 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -19,6 +19,9 @@
#include <linux/serial_core.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/export.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
#include <asm/proc-fns.h>
#include <asm/exception.h>
@@ -265,12 +268,12 @@ static struct map_desc exynos5_iodesc[] __initdata = {
}, {
.virtual = (unsigned long)S5P_VA_GIC_CPU,
.pfn = __phys_to_pfn(EXYNOS5_PA_GIC_CPU),
- .length = SZ_64K,
+ .length = SZ_8K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S5P_VA_GIC_DIST,
.pfn = __phys_to_pfn(EXYNOS5_PA_GIC_DIST),
- .length = SZ_64K,
+ .length = SZ_4K,
.type = MT_DEVICE,
},
};
@@ -285,6 +288,11 @@ void exynos5_restart(char mode, const char *cmd)
__raw_writel(0x1, EXYNOS_SWRESET);
}
+void __init exynos_init_late(void)
+{
+ exynos_pm_late_initcall();
+}
+
/*
* exynos_map_io
*
@@ -399,6 +407,7 @@ struct combiner_chip_data {
void __iomem *base;
};
+static struct irq_domain *combiner_irq_domain;
static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
static inline void __iomem *combiner_base(struct irq_data *data)
@@ -411,14 +420,14 @@ static inline void __iomem *combiner_base(struct irq_data *data)
static void combiner_mask_irq(struct irq_data *data)
{
- u32 mask = 1 << (data->irq % 32);
+ u32 mask = 1 << (data->hwirq % 32);
__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
}
static void combiner_unmask_irq(struct irq_data *data)
{
- u32 mask = 1 << (data->irq % 32);
+ u32 mask = 1 << (data->hwirq % 32);
__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
}
@@ -474,49 +483,131 @@ static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int i
irq_set_chained_handler(irq, combiner_handle_cascade_irq);
}
-static void __init combiner_init(unsigned int combiner_nr, void __iomem *base,
- unsigned int irq_start)
+static void __init combiner_init_one(unsigned int combiner_nr,
+ void __iomem *base)
{
- unsigned int i;
- unsigned int max_nr;
-
- if (soc_is_exynos5250())
- max_nr = EXYNOS5_MAX_COMBINER_NR;
- else
- max_nr = EXYNOS4_MAX_COMBINER_NR;
-
- if (combiner_nr >= max_nr)
- BUG();
-
combiner_data[combiner_nr].base = base;
- combiner_data[combiner_nr].irq_offset = irq_start;
+ combiner_data[combiner_nr].irq_offset = irq_find_mapping(
+ combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
/* Disable all interrupts */
-
__raw_writel(combiner_data[combiner_nr].irq_mask,
base + COMBINER_ENABLE_CLEAR);
+}
+
+#ifdef CONFIG_OF
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (d->of_node != controller)
+ return -EINVAL;
+
+ if (intsize < 2)
+ return -EINVAL;
+
+ *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
+ *out_type = 0;
+
+ return 0;
+}
+#else
+static int combiner_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ return -EINVAL;
+}
+#endif
+
+static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
+ irq_set_chip_data(irq, &combiner_data[hw >> 3]);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+
+ return 0;
+}
+
+static struct irq_domain_ops combiner_irq_domain_ops = {
+ .xlate = combiner_irq_domain_xlate,
+ .map = combiner_irq_domain_map,
+};
+
+void __init combiner_init(void __iomem *combiner_base, struct device_node *np)
+{
+ int i, irq, irq_base;
+ unsigned int max_nr, nr_irq;
+
+ if (np) {
+ if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
+ pr_warning("%s: number of combiners not specified, "
+ "setting default as %d.\n",
+ __func__, EXYNOS4_MAX_COMBINER_NR);
+ max_nr = EXYNOS4_MAX_COMBINER_NR;
+ }
+ } else {
+ max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
+ EXYNOS4_MAX_COMBINER_NR;
+ }
+ nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
- /* Setup the Linux IRQ subsystem */
+ irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
+ if (IS_ERR_VALUE(irq_base)) {
+ irq_base = COMBINER_IRQ(0, 0);
+ pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
+ }
- for (i = irq_start; i < combiner_data[combiner_nr].irq_offset
- + MAX_IRQ_IN_COMBINER; i++) {
- irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq);
- irq_set_chip_data(i, &combiner_data[combiner_nr]);
- set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+ combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
+ &combiner_irq_domain_ops, &combiner_data);
+ if (WARN_ON(!combiner_irq_domain)) {
+ pr_warning("%s: irq domain init failed\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < max_nr; i++) {
+ combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
+ irq = IRQ_SPI(i);
+#ifdef CONFIG_OF
+ if (np)
+ irq = irq_of_parse_and_map(np, i);
+#endif
+ combiner_cascade_irq(i, irq);
}
}
#ifdef CONFIG_OF
+int __init combiner_of_init(struct device_node *np, struct device_node *parent)
+{
+ void __iomem *combiner_base;
+
+ combiner_base = of_iomap(np, 0);
+ if (!combiner_base) {
+ pr_err("%s: failed to map combiner registers\n", __func__);
+ return -ENXIO;
+ }
+
+ combiner_init(combiner_base, np);
+
+ return 0;
+}
+
static const struct of_device_id exynos4_dt_irq_match[] = {
{ .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
+ { .compatible = "samsung,exynos4210-combiner",
+ .data = combiner_of_init, },
{},
};
#endif
void __init exynos4_init_irq(void)
{
- int irq;
unsigned int gic_bank_offset;
gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
@@ -528,12 +619,8 @@ void __init exynos4_init_irq(void)
of_irq_init(exynos4_dt_irq_match);
#endif
- for (irq = 0; irq < EXYNOS4_MAX_COMBINER_NR; irq++) {
-
- combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
- COMBINER_IRQ(irq, 0));
- combiner_cascade_irq(irq, IRQ_SPI(irq));
- }
+ if (!of_have_populated_dt())
+ combiner_init(S5P_VA_COMBINER_BASE, NULL);
/*
* The parameters of s5p_init_irq() are for VIC init.
@@ -545,18 +632,9 @@ void __init exynos4_init_irq(void)
void __init exynos5_init_irq(void)
{
- int irq;
-
#ifdef CONFIG_OF
of_irq_init(exynos4_dt_irq_match);
#endif
-
- for (irq = 0; irq < EXYNOS5_MAX_COMBINER_NR; irq++) {
- combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
- COMBINER_IRQ(irq, 0));
- combiner_cascade_irq(irq, IRQ_SPI(irq));
- }
-
/*
* The parameters of s5p_init_irq() are for VIC init.
* Theses parameters should be NULL and 0 because EXYNOS4
@@ -565,30 +643,18 @@ void __init exynos5_init_irq(void)
s5p_init_irq(NULL, 0);
}
-struct bus_type exynos4_subsys = {
- .name = "exynos4-core",
- .dev_name = "exynos4-core",
-};
-
-struct bus_type exynos5_subsys = {
- .name = "exynos5-core",
- .dev_name = "exynos5-core",
+struct bus_type exynos_subsys = {
+ .name = "exynos-core",
+ .dev_name = "exynos-core",
};
static struct device exynos4_dev = {
- .bus = &exynos4_subsys,
-};
-
-static struct device exynos5_dev = {
- .bus = &exynos5_subsys,
+ .bus = &exynos_subsys,
};
static int __init exynos_core_init(void)
{
- if (soc_is_exynos5250())
- return subsys_system_register(&exynos5_subsys, NULL);
- else
- return subsys_system_register(&exynos4_subsys, NULL);
+ return subsys_system_register(&exynos_subsys, NULL);
}
core_initcall(exynos_core_init);
@@ -675,10 +741,7 @@ static int __init exynos_init(void)
{
printk(KERN_INFO "EXYNOS: Initializing architecture\n");
- if (soc_is_exynos5250())
- return device_register(&exynos5_dev);
- else
- return device_register(&exynos4_dev);
+ return device_register(&exynos4_dev);
}
/* uart registration process */
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 677b5467df1..aed2eeb0651 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -19,6 +19,13 @@ void exynos4_init_irq(void);
void exynos5_init_irq(void);
void exynos4_restart(char mode, const char *cmd);
void exynos5_restart(char mode, const char *cmd);
+void exynos_init_late(void);
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS
+int exynos_pm_late_initcall(void);
+#else
+static int exynos_pm_late_initcall(void) { return 0; }
+#endif
#ifdef CONFIG_ARCH_EXYNOS4
void exynos4_register_clocks(void);
diff --git a/arch/arm/mach-exynos/dev-drm.c b/arch/arm/mach-exynos/dev-drm.c
new file mode 100644
index 00000000000..17c9c6ecc2e
--- /dev/null
+++ b/arch/arm/mach-exynos/dev-drm.c
@@ -0,0 +1,29 @@
+/*
+ * linux/arch/arm/mach-exynos/dev-drm.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS - core DRM device
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include <plat/devs.h>
+
+static u64 exynos_drm_dma_mask = DMA_BIT_MASK(32);
+
+struct platform_device exynos_device_drm = {
+ .name = "exynos-drm",
+ .dev = {
+ .dma_mask = &exynos_drm_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ }
+};
diff --git a/arch/arm/mach-exynos/dev-sysmmu.c b/arch/arm/mach-exynos/dev-sysmmu.c
index 781563fcb15..c5b1ea301df 100644
--- a/arch/arm/mach-exynos/dev-sysmmu.c
+++ b/arch/arm/mach-exynos/dev-sysmmu.c
@@ -1,9 +1,9 @@
-/* linux/arch/arm/mach-exynos4/dev-sysmmu.c
+/* linux/arch/arm/mach-exynos/dev-sysmmu.c
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * EXYNOS4 - System MMU support
+ * EXYNOS - System MMU support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -12,222 +12,263 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <linux/export.h>
+
+#include <plat/cpu.h>
#include <mach/map.h>
#include <mach/irqs.h>
#include <mach/sysmmu.h>
-#include <plat/s5p-clock.h>
-
-/* These names must be equal to the clock names in mach-exynos4/clock.c */
-const char *sysmmu_ips_name[EXYNOS4_SYSMMU_TOTAL_IPNUM] = {
- "SYSMMU_MDMA" ,
- "SYSMMU_SSS" ,
- "SYSMMU_FIMC0" ,
- "SYSMMU_FIMC1" ,
- "SYSMMU_FIMC2" ,
- "SYSMMU_FIMC3" ,
- "SYSMMU_JPEG" ,
- "SYSMMU_FIMD0" ,
- "SYSMMU_FIMD1" ,
- "SYSMMU_PCIe" ,
- "SYSMMU_G2D" ,
- "SYSMMU_ROTATOR",
- "SYSMMU_MDMA2" ,
- "SYSMMU_TV" ,
- "SYSMMU_MFC_L" ,
- "SYSMMU_MFC_R" ,
-};
-static struct resource exynos4_sysmmu_resource[] = {
- [0] = {
- .start = EXYNOS4_PA_SYSMMU_MDMA,
- .end = EXYNOS4_PA_SYSMMU_MDMA + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_SYSMMU_MDMA0_0,
- .end = IRQ_SYSMMU_MDMA0_0,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = EXYNOS4_PA_SYSMMU_SSS,
- .end = EXYNOS4_PA_SYSMMU_SSS + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [3] = {
- .start = IRQ_SYSMMU_SSS_0,
- .end = IRQ_SYSMMU_SSS_0,
- .flags = IORESOURCE_IRQ,
- },
- [4] = {
- .start = EXYNOS4_PA_SYSMMU_FIMC0,
- .end = EXYNOS4_PA_SYSMMU_FIMC0 + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [5] = {
- .start = IRQ_SYSMMU_FIMC0_0,
- .end = IRQ_SYSMMU_FIMC0_0,
- .flags = IORESOURCE_IRQ,
- },
- [6] = {
- .start = EXYNOS4_PA_SYSMMU_FIMC1,
- .end = EXYNOS4_PA_SYSMMU_FIMC1 + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [7] = {
- .start = IRQ_SYSMMU_FIMC1_0,
- .end = IRQ_SYSMMU_FIMC1_0,
- .flags = IORESOURCE_IRQ,
- },
- [8] = {
- .start = EXYNOS4_PA_SYSMMU_FIMC2,
- .end = EXYNOS4_PA_SYSMMU_FIMC2 + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [9] = {
- .start = IRQ_SYSMMU_FIMC2_0,
- .end = IRQ_SYSMMU_FIMC2_0,
- .flags = IORESOURCE_IRQ,
- },
- [10] = {
- .start = EXYNOS4_PA_SYSMMU_FIMC3,
- .end = EXYNOS4_PA_SYSMMU_FIMC3 + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [11] = {
- .start = IRQ_SYSMMU_FIMC3_0,
- .end = IRQ_SYSMMU_FIMC3_0,
- .flags = IORESOURCE_IRQ,
- },
- [12] = {
- .start = EXYNOS4_PA_SYSMMU_JPEG,
- .end = EXYNOS4_PA_SYSMMU_JPEG + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [13] = {
- .start = IRQ_SYSMMU_JPEG_0,
- .end = IRQ_SYSMMU_JPEG_0,
- .flags = IORESOURCE_IRQ,
- },
- [14] = {
- .start = EXYNOS4_PA_SYSMMU_FIMD0,
- .end = EXYNOS4_PA_SYSMMU_FIMD0 + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [15] = {
- .start = IRQ_SYSMMU_LCD0_M0_0,
- .end = IRQ_SYSMMU_LCD0_M0_0,
- .flags = IORESOURCE_IRQ,
- },
- [16] = {
- .start = EXYNOS4_PA_SYSMMU_FIMD1,
- .end = EXYNOS4_PA_SYSMMU_FIMD1 + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [17] = {
- .start = IRQ_SYSMMU_LCD1_M1_0,
- .end = IRQ_SYSMMU_LCD1_M1_0,
- .flags = IORESOURCE_IRQ,
- },
- [18] = {
- .start = EXYNOS4_PA_SYSMMU_PCIe,
- .end = EXYNOS4_PA_SYSMMU_PCIe + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [19] = {
- .start = IRQ_SYSMMU_PCIE_0,
- .end = IRQ_SYSMMU_PCIE_0,
- .flags = IORESOURCE_IRQ,
- },
- [20] = {
- .start = EXYNOS4_PA_SYSMMU_G2D,
- .end = EXYNOS4_PA_SYSMMU_G2D + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [21] = {
- .start = IRQ_SYSMMU_2D_0,
- .end = IRQ_SYSMMU_2D_0,
- .flags = IORESOURCE_IRQ,
- },
- [22] = {
- .start = EXYNOS4_PA_SYSMMU_ROTATOR,
- .end = EXYNOS4_PA_SYSMMU_ROTATOR + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [23] = {
- .start = IRQ_SYSMMU_ROTATOR_0,
- .end = IRQ_SYSMMU_ROTATOR_0,
- .flags = IORESOURCE_IRQ,
- },
- [24] = {
- .start = EXYNOS4_PA_SYSMMU_MDMA2,
- .end = EXYNOS4_PA_SYSMMU_MDMA2 + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [25] = {
- .start = IRQ_SYSMMU_MDMA1_0,
- .end = IRQ_SYSMMU_MDMA1_0,
- .flags = IORESOURCE_IRQ,
- },
- [26] = {
- .start = EXYNOS4_PA_SYSMMU_TV,
- .end = EXYNOS4_PA_SYSMMU_TV + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [27] = {
- .start = IRQ_SYSMMU_TV_M0_0,
- .end = IRQ_SYSMMU_TV_M0_0,
- .flags = IORESOURCE_IRQ,
- },
- [28] = {
- .start = EXYNOS4_PA_SYSMMU_MFC_L,
- .end = EXYNOS4_PA_SYSMMU_MFC_L + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [29] = {
- .start = IRQ_SYSMMU_MFC_M0_0,
- .end = IRQ_SYSMMU_MFC_M0_0,
- .flags = IORESOURCE_IRQ,
- },
- [30] = {
- .start = EXYNOS4_PA_SYSMMU_MFC_R,
- .end = EXYNOS4_PA_SYSMMU_MFC_R + SZ_64K - 1,
- .flags = IORESOURCE_MEM,
- },
- [31] = {
- .start = IRQ_SYSMMU_MFC_M1_0,
- .end = IRQ_SYSMMU_MFC_M1_0,
- .flags = IORESOURCE_IRQ,
- },
-};
+static u64 exynos_sysmmu_dma_mask = DMA_BIT_MASK(32);
+
+#define SYSMMU_PLATFORM_DEVICE(ipname, devid) \
+static struct sysmmu_platform_data platdata_##ipname = { \
+ .dbgname = #ipname, \
+}; \
+struct platform_device SYSMMU_PLATDEV(ipname) = \
+{ \
+ .name = SYSMMU_DEVNAME_BASE, \
+ .id = devid, \
+ .dev = { \
+ .dma_mask = &exynos_sysmmu_dma_mask, \
+ .coherent_dma_mask = DMA_BIT_MASK(32), \
+ .platform_data = &platdata_##ipname, \
+ }, \
+}
+
+SYSMMU_PLATFORM_DEVICE(mfc_l, 0);
+SYSMMU_PLATFORM_DEVICE(mfc_r, 1);
+SYSMMU_PLATFORM_DEVICE(tv, 2);
+SYSMMU_PLATFORM_DEVICE(jpeg, 3);
+SYSMMU_PLATFORM_DEVICE(rot, 4);
+SYSMMU_PLATFORM_DEVICE(fimc0, 5); /* fimc* and gsc* exist exclusively */
+SYSMMU_PLATFORM_DEVICE(fimc1, 6);
+SYSMMU_PLATFORM_DEVICE(fimc2, 7);
+SYSMMU_PLATFORM_DEVICE(fimc3, 8);
+SYSMMU_PLATFORM_DEVICE(gsc0, 5);
+SYSMMU_PLATFORM_DEVICE(gsc1, 6);
+SYSMMU_PLATFORM_DEVICE(gsc2, 7);
+SYSMMU_PLATFORM_DEVICE(gsc3, 8);
+SYSMMU_PLATFORM_DEVICE(isp, 9);
+SYSMMU_PLATFORM_DEVICE(fimd0, 10);
+SYSMMU_PLATFORM_DEVICE(fimd1, 11);
+SYSMMU_PLATFORM_DEVICE(camif0, 12);
+SYSMMU_PLATFORM_DEVICE(camif1, 13);
+SYSMMU_PLATFORM_DEVICE(2d, 14);
+
+#define SYSMMU_RESOURCE_NAME(core, ipname) sysmmures_##core##_##ipname
+
+#define SYSMMU_RESOURCE(core, ipname) \
+ static struct resource SYSMMU_RESOURCE_NAME(core, ipname)[] __initdata =
+
+#define DEFINE_SYSMMU_RESOURCE(core, mem, irq) \
+ DEFINE_RES_MEM_NAMED(core##_PA_SYSMMU_##mem, SZ_4K, #mem), \
+ DEFINE_RES_IRQ_NAMED(core##_IRQ_SYSMMU_##irq##_0, #mem)
+
+#define SYSMMU_RESOURCE_DEFINE(core, ipname, mem, irq) \
+ SYSMMU_RESOURCE(core, ipname) { \
+ DEFINE_SYSMMU_RESOURCE(core, mem, irq) \
+ }
-struct platform_device exynos4_device_sysmmu = {
- .name = "s5p-sysmmu",
- .id = 32,
- .num_resources = ARRAY_SIZE(exynos4_sysmmu_resource),
- .resource = exynos4_sysmmu_resource,
+struct sysmmu_resource_map {
+ struct platform_device *pdev;
+ struct resource *res;
+ u32 rnum;
+ struct device *pdd;
+ char *clocknames;
};
-EXPORT_SYMBOL(exynos4_device_sysmmu);
-static struct clk *sysmmu_clk[S5P_SYSMMU_TOTAL_IPNUM];
-void sysmmu_clk_init(struct device *dev, sysmmu_ips ips)
-{
- sysmmu_clk[ips] = clk_get(dev, sysmmu_ips_name[ips]);
- if (IS_ERR(sysmmu_clk[ips]))
- sysmmu_clk[ips] = NULL;
- else
- clk_put(sysmmu_clk[ips]);
+#define SYSMMU_RESOURCE_MAPPING(core, ipname, resname) { \
+ .pdev = &SYSMMU_PLATDEV(ipname), \
+ .res = SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
+ .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+ .clocknames = SYSMMU_CLOCK_NAME, \
}
-void sysmmu_clk_enable(sysmmu_ips ips)
-{
- if (sysmmu_clk[ips])
- clk_enable(sysmmu_clk[ips]);
+#define SYSMMU_RESOURCE_MAPPING_MC(core, ipname, resname, pdata) { \
+ .pdev = &SYSMMU_PLATDEV(ipname), \
+ .res = SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
+ .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+ .clocknames = SYSMMU_CLOCK_NAME "," SYSMMU_CLOCK_NAME2, \
+}
+
+#ifdef CONFIG_EXYNOS_DEV_PD
+#define SYSMMU_RESOURCE_MAPPING_PD(core, ipname, resname, pd) { \
+ .pdev = &SYSMMU_PLATDEV(ipname), \
+ .res = &SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
+ .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+ .clocknames = SYSMMU_CLOCK_NAME, \
+ .pdd = &exynos##core##_device_pd[pd].dev, \
+}
+
+#define SYSMMU_RESOURCE_MAPPING_MCPD(core, ipname, resname, pd, pdata) {\
+ .pdev = &SYSMMU_PLATDEV(ipname), \
+ .res = &SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
+ .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
+ .clocknames = SYSMMU_CLOCK_NAME "," SYSMMU_CLOCK_NAME2, \
+ .pdd = &exynos##core##_device_pd[pd].dev, \
}
+#else
+#define SYSMMU_RESOURCE_MAPPING_PD(core, ipname, resname, pd) \
+ SYSMMU_RESOURCE_MAPPING(core, ipname, resname)
+#define SYSMMU_RESOURCE_MAPPING_MCPD(core, ipname, resname, pd, pdata) \
+ SYSMMU_RESOURCE_MAPPING_MC(core, ipname, resname, pdata)
+
+#endif /* CONFIG_EXYNOS_DEV_PD */
+
+#ifdef CONFIG_ARCH_EXYNOS4
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc0, FIMC0, FIMC0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc1, FIMC1, FIMC1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc2, FIMC2, FIMC2);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc3, FIMC3, FIMC3);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, jpeg, JPEG, JPEG);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, 2d, G2D, 2D);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, tv, TV, TV_M0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, 2d_acp, 2D_ACP, 2D);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, rot, ROTATOR, ROTATOR);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimd0, FIMD0, LCD0_M0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimd1, FIMD1, LCD1_M1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, flite0, FIMC_LITE0, FIMC_LITE0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, flite1, FIMC_LITE1, FIMC_LITE1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, mfc_r, MFC_R, MFC_M0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS4, mfc_l, MFC_L, MFC_M1);
+SYSMMU_RESOURCE(EXYNOS4, isp) {
+ DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_ISP, FIMC_ISP),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_DRC, FIMC_DRC),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_FD, FIMC_FD),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS4, ISPCPU, FIMC_CX),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap4[] __initdata = {
+ SYSMMU_RESOURCE_MAPPING_PD(4, fimc0, fimc0, PD_CAM),
+ SYSMMU_RESOURCE_MAPPING_PD(4, fimc1, fimc1, PD_CAM),
+ SYSMMU_RESOURCE_MAPPING_PD(4, fimc2, fimc2, PD_CAM),
+ SYSMMU_RESOURCE_MAPPING_PD(4, fimc3, fimc3, PD_CAM),
+ SYSMMU_RESOURCE_MAPPING_PD(4, tv, tv, PD_TV),
+ SYSMMU_RESOURCE_MAPPING_PD(4, mfc_r, mfc_r, PD_MFC),
+ SYSMMU_RESOURCE_MAPPING_PD(4, mfc_l, mfc_l, PD_MFC),
+ SYSMMU_RESOURCE_MAPPING_PD(4, rot, rot, PD_LCD0),
+ SYSMMU_RESOURCE_MAPPING_PD(4, jpeg, jpeg, PD_CAM),
+ SYSMMU_RESOURCE_MAPPING_PD(4, fimd0, fimd0, PD_LCD0),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap4210[] __initdata = {
+ SYSMMU_RESOURCE_MAPPING_PD(4, 2d, 2d, PD_LCD0),
+ SYSMMU_RESOURCE_MAPPING_PD(4, fimd1, fimd1, PD_LCD1),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap4212[] __initdata = {
+ SYSMMU_RESOURCE_MAPPING(4, 2d, 2d_acp),
+ SYSMMU_RESOURCE_MAPPING_PD(4, camif0, flite0, PD_ISP),
+ SYSMMU_RESOURCE_MAPPING_PD(4, camif1, flite1, PD_ISP),
+ SYSMMU_RESOURCE_MAPPING_PD(4, isp, isp, PD_ISP),
+};
+#endif /* CONFIG_ARCH_EXYNOS4 */
-void sysmmu_clk_disable(sysmmu_ips ips)
+#ifdef CONFIG_ARCH_EXYNOS5
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, jpeg, JPEG, JPEG);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, fimd1, FIMD1, FIMD1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, 2d, 2D, 2D);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, rot, ROTATOR, ROTATOR);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, tv, TV, TV);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, flite0, LITE0, LITE0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, flite1, LITE1, LITE1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc0, GSC0, GSC0);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc1, GSC1, GSC1);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc2, GSC2, GSC2);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc3, GSC3, GSC3);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, mfc_r, MFC_R, MFC_R);
+SYSMMU_RESOURCE_DEFINE(EXYNOS5, mfc_l, MFC_L, MFC_L);
+SYSMMU_RESOURCE(EXYNOS5, isp) {
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, ISP, ISP),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, DRC, DRC),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, FD, FD),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, ISPCPU, MCUISP),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, SCALERC, SCALERCISP),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, SCALERP, SCALERPISP),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, ODC, ODC),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, DIS0, DIS0),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, DIS1, DIS1),
+ DEFINE_SYSMMU_RESOURCE(EXYNOS5, 3DNR, 3DNR),
+};
+
+static struct sysmmu_resource_map sysmmu_resmap5[] __initdata = {
+ SYSMMU_RESOURCE_MAPPING(5, jpeg, jpeg),
+ SYSMMU_RESOURCE_MAPPING(5, fimd1, fimd1),
+ SYSMMU_RESOURCE_MAPPING(5, 2d, 2d),
+ SYSMMU_RESOURCE_MAPPING(5, rot, rot),
+ SYSMMU_RESOURCE_MAPPING_PD(5, tv, tv, PD_DISP1),
+ SYSMMU_RESOURCE_MAPPING_PD(5, camif0, flite0, PD_GSCL),
+ SYSMMU_RESOURCE_MAPPING_PD(5, camif1, flite1, PD_GSCL),
+ SYSMMU_RESOURCE_MAPPING_PD(5, gsc0, gsc0, PD_GSCL),
+ SYSMMU_RESOURCE_MAPPING_PD(5, gsc1, gsc1, PD_GSCL),
+ SYSMMU_RESOURCE_MAPPING_PD(5, gsc2, gsc2, PD_GSCL),
+ SYSMMU_RESOURCE_MAPPING_PD(5, gsc3, gsc3, PD_GSCL),
+ SYSMMU_RESOURCE_MAPPING_PD(5, mfc_r, mfc_r, PD_MFC),
+ SYSMMU_RESOURCE_MAPPING_PD(5, mfc_l, mfc_l, PD_MFC),
+ SYSMMU_RESOURCE_MAPPING_MCPD(5, isp, isp, PD_ISP, mc_platdata),
+};
+#endif /* CONFIG_ARCH_EXYNOS5 */
+
+static int __init init_sysmmu_platform_device(void)
{
- if (sysmmu_clk[ips])
- clk_disable(sysmmu_clk[ips]);
+ int i, j;
+ struct sysmmu_resource_map *resmap[2] = {NULL, NULL};
+ int nmap[2] = {0, 0};
+
+#ifdef CONFIG_ARCH_EXYNOS5
+ if (soc_is_exynos5250()) {
+ resmap[0] = sysmmu_resmap5;
+ nmap[0] = ARRAY_SIZE(sysmmu_resmap5);
+ nmap[1] = 0;
+ }
+#endif
+
+#ifdef CONFIG_ARCH_EXYNOS4
+ if (resmap[0] == NULL) {
+ resmap[0] = sysmmu_resmap4;
+ nmap[0] = ARRAY_SIZE(sysmmu_resmap4);
+ }
+
+ if (soc_is_exynos4210()) {
+ resmap[1] = sysmmu_resmap4210;
+ nmap[1] = ARRAY_SIZE(sysmmu_resmap4210);
+ }
+
+ if (soc_is_exynos4412() || soc_is_exynos4212()) {
+ resmap[1] = sysmmu_resmap4212;
+ nmap[1] = ARRAY_SIZE(sysmmu_resmap4212);
+ }
+#endif
+
+ for (j = 0; j < 2; j++) {
+ for (i = 0; i < nmap[j]; i++) {
+ struct sysmmu_resource_map *map;
+ struct sysmmu_platform_data *platdata;
+
+ map = &resmap[j][i];
+
+ map->pdev->dev.parent = map->pdd;
+
+ platdata = map->pdev->dev.platform_data;
+ platdata->clockname = map->clocknames;
+
+ if (platform_device_add_resources(map->pdev, map->res,
+ map->rnum)) {
+ pr_err("%s: Failed to add device resources for "
+ "%s.%d\n", __func__,
+ map->pdev->name, map->pdev->id);
+ continue;
+ }
+
+ if (platform_device_register(map->pdev)) {
+ pr_err("%s: Failed to register %s.%d\n",
+ __func__, map->pdev->name,
+ map->pdev->id);
+ }
+ }
+ }
+
+ return 0;
}
+arch_initcall(init_sysmmu_platform_device);
diff --git a/arch/arm/mach-exynos/dma.c b/arch/arm/mach-exynos/dma.c
index 69aaa450320..f60b66dbcf8 100644
--- a/arch/arm/mach-exynos/dma.c
+++ b/arch/arm/mach-exynos/dma.c
@@ -103,10 +103,45 @@ static u8 exynos4212_pdma0_peri[] = {
DMACH_MIPI_HSI5,
};
-struct dma_pl330_platdata exynos4_pdma0_pdata;
+static u8 exynos5250_pdma0_peri[] = {
+ DMACH_PCM0_RX,
+ DMACH_PCM0_TX,
+ DMACH_PCM2_RX,
+ DMACH_PCM2_TX,
+ DMACH_SPI0_RX,
+ DMACH_SPI0_TX,
+ DMACH_SPI2_RX,
+ DMACH_SPI2_TX,
+ DMACH_I2S0S_TX,
+ DMACH_I2S0_RX,
+ DMACH_I2S0_TX,
+ DMACH_I2S2_RX,
+ DMACH_I2S2_TX,
+ DMACH_UART0_RX,
+ DMACH_UART0_TX,
+ DMACH_UART2_RX,
+ DMACH_UART2_TX,
+ DMACH_UART4_RX,
+ DMACH_UART4_TX,
+ DMACH_SLIMBUS0_RX,
+ DMACH_SLIMBUS0_TX,
+ DMACH_SLIMBUS2_RX,
+ DMACH_SLIMBUS2_TX,
+ DMACH_SLIMBUS4_RX,
+ DMACH_SLIMBUS4_TX,
+ DMACH_AC97_MICIN,
+ DMACH_AC97_PCMIN,
+ DMACH_AC97_PCMOUT,
+ DMACH_MIPI_HSI0,
+ DMACH_MIPI_HSI2,
+ DMACH_MIPI_HSI4,
+ DMACH_MIPI_HSI6,
+};
+
+static struct dma_pl330_platdata exynos_pdma0_pdata;
-static AMBA_AHB_DEVICE(exynos4_pdma0, "dma-pl330.0", 0x00041330,
- EXYNOS4_PA_PDMA0, {EXYNOS4_IRQ_PDMA0}, &exynos4_pdma0_pdata);
+static AMBA_AHB_DEVICE(exynos_pdma0, "dma-pl330.0", 0x00041330,
+ EXYNOS4_PA_PDMA0, {EXYNOS4_IRQ_PDMA0}, &exynos_pdma0_pdata);
static u8 exynos4210_pdma1_peri[] = {
DMACH_PCM0_RX,
@@ -169,10 +204,45 @@ static u8 exynos4212_pdma1_peri[] = {
DMACH_MIPI_HSI7,
};
-static struct dma_pl330_platdata exynos4_pdma1_pdata;
+static u8 exynos5250_pdma1_peri[] = {
+ DMACH_PCM0_RX,
+ DMACH_PCM0_TX,
+ DMACH_PCM1_RX,
+ DMACH_PCM1_TX,
+ DMACH_SPI1_RX,
+ DMACH_SPI1_TX,
+ DMACH_PWM,
+ DMACH_SPDIF,
+ DMACH_I2S0S_TX,
+ DMACH_I2S0_RX,
+ DMACH_I2S0_TX,
+ DMACH_I2S1_RX,
+ DMACH_I2S1_TX,
+ DMACH_UART0_RX,
+ DMACH_UART0_TX,
+ DMACH_UART1_RX,
+ DMACH_UART1_TX,
+ DMACH_UART3_RX,
+ DMACH_UART3_TX,
+ DMACH_SLIMBUS1_RX,
+ DMACH_SLIMBUS1_TX,
+ DMACH_SLIMBUS3_RX,
+ DMACH_SLIMBUS3_TX,
+ DMACH_SLIMBUS5_RX,
+ DMACH_SLIMBUS5_TX,
+ DMACH_SLIMBUS0AUX_RX,
+ DMACH_SLIMBUS0AUX_TX,
+ DMACH_DISP1,
+ DMACH_MIPI_HSI1,
+ DMACH_MIPI_HSI3,
+ DMACH_MIPI_HSI5,
+ DMACH_MIPI_HSI7,
+};
-static AMBA_AHB_DEVICE(exynos4_pdma1, "dma-pl330.1", 0x00041330,
- EXYNOS4_PA_PDMA1, {EXYNOS4_IRQ_PDMA1}, &exynos4_pdma1_pdata);
+static struct dma_pl330_platdata exynos_pdma1_pdata;
+
+static AMBA_AHB_DEVICE(exynos_pdma1, "dma-pl330.1", 0x00041330,
+ EXYNOS4_PA_PDMA1, {EXYNOS4_IRQ_PDMA1}, &exynos_pdma1_pdata);
static u8 mdma_peri[] = {
DMACH_MTOM_0,
@@ -185,46 +255,63 @@ static u8 mdma_peri[] = {
DMACH_MTOM_7,
};
-static struct dma_pl330_platdata exynos4_mdma1_pdata = {
+static struct dma_pl330_platdata exynos_mdma1_pdata = {
.nr_valid_peri = ARRAY_SIZE(mdma_peri),
.peri_id = mdma_peri,
};
-static AMBA_AHB_DEVICE(exynos4_mdma1, "dma-pl330.2", 0x00041330,
- EXYNOS4_PA_MDMA1, {EXYNOS4_IRQ_MDMA1}, &exynos4_mdma1_pdata);
+static AMBA_AHB_DEVICE(exynos_mdma1, "dma-pl330.2", 0x00041330,
+ EXYNOS4_PA_MDMA1, {EXYNOS4_IRQ_MDMA1}, &exynos_mdma1_pdata);
-static int __init exynos4_dma_init(void)
+static int __init exynos_dma_init(void)
{
if (of_have_populated_dt())
return 0;
if (soc_is_exynos4210()) {
- exynos4_pdma0_pdata.nr_valid_peri =
+ exynos_pdma0_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4210_pdma0_peri);
- exynos4_pdma0_pdata.peri_id = exynos4210_pdma0_peri;
- exynos4_pdma1_pdata.nr_valid_peri =
+ exynos_pdma0_pdata.peri_id = exynos4210_pdma0_peri;
+ exynos_pdma1_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4210_pdma1_peri);
- exynos4_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
+ exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
} else if (soc_is_exynos4212() || soc_is_exynos4412()) {
- exynos4_pdma0_pdata.nr_valid_peri =
+ exynos_pdma0_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4212_pdma0_peri);
- exynos4_pdma0_pdata.peri_id = exynos4212_pdma0_peri;
- exynos4_pdma1_pdata.nr_valid_peri =
+ exynos_pdma0_pdata.peri_id = exynos4212_pdma0_peri;
+ exynos_pdma1_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4212_pdma1_peri);
- exynos4_pdma1_pdata.peri_id = exynos4212_pdma1_peri;
+ exynos_pdma1_pdata.peri_id = exynos4212_pdma1_peri;
+ } else if (soc_is_exynos5250()) {
+ exynos_pdma0_pdata.nr_valid_peri =
+ ARRAY_SIZE(exynos5250_pdma0_peri);
+ exynos_pdma0_pdata.peri_id = exynos5250_pdma0_peri;
+ exynos_pdma1_pdata.nr_valid_peri =
+ ARRAY_SIZE(exynos5250_pdma1_peri);
+ exynos_pdma1_pdata.peri_id = exynos5250_pdma1_peri;
+
+ exynos_pdma0_device.res.start = EXYNOS5_PA_PDMA0;
+ exynos_pdma0_device.res.end = EXYNOS5_PA_PDMA0 + SZ_4K;
+ exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_PDMA0;
+ exynos_pdma1_device.res.start = EXYNOS5_PA_PDMA1;
+ exynos_pdma1_device.res.end = EXYNOS5_PA_PDMA1 + SZ_4K;
+ exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_PDMA1;
+ exynos_mdma1_device.res.start = EXYNOS5_PA_MDMA1;
+ exynos_mdma1_device.res.end = EXYNOS5_PA_MDMA1 + SZ_4K;
+ exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_MDMA1;
}
- dma_cap_set(DMA_SLAVE, exynos4_pdma0_pdata.cap_mask);
- dma_cap_set(DMA_CYCLIC, exynos4_pdma0_pdata.cap_mask);
- amba_device_register(&exynos4_pdma0_device, &iomem_resource);
+ dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask);
+ dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask);
+ amba_device_register(&exynos_pdma0_device, &iomem_resource);
- dma_cap_set(DMA_SLAVE, exynos4_pdma1_pdata.cap_mask);
- dma_cap_set(DMA_CYCLIC, exynos4_pdma1_pdata.cap_mask);
- amba_device_register(&exynos4_pdma1_device, &iomem_resource);
+ dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask);
+ dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask);
+ amba_device_register(&exynos_pdma1_device, &iomem_resource);
- dma_cap_set(DMA_MEMCPY, exynos4_mdma1_pdata.cap_mask);
- amba_device_register(&exynos4_mdma1_device, &iomem_resource);
+ dma_cap_set(DMA_MEMCPY, exynos_mdma1_pdata.cap_mask);
+ amba_device_register(&exynos_mdma1_device, &iomem_resource);
return 0;
}
-arch_initcall(exynos4_dma_init);
+arch_initcall(exynos_dma_init);
diff --git a/arch/arm/mach-exynos/include/mach/gpio.h b/arch/arm/mach-exynos/include/mach/gpio.h
index d7498afe036..eb24f1eb8e3 100644
--- a/arch/arm/mach-exynos/include/mach/gpio.h
+++ b/arch/arm/mach-exynos/include/mach/gpio.h
@@ -153,10 +153,11 @@ enum exynos4_gpio_number {
#define EXYNOS5_GPIO_B2_NR (4)
#define EXYNOS5_GPIO_B3_NR (4)
#define EXYNOS5_GPIO_C0_NR (7)
-#define EXYNOS5_GPIO_C1_NR (7)
+#define EXYNOS5_GPIO_C1_NR (4)
#define EXYNOS5_GPIO_C2_NR (7)
#define EXYNOS5_GPIO_C3_NR (7)
-#define EXYNOS5_GPIO_D0_NR (8)
+#define EXYNOS5_GPIO_C4_NR (7)
+#define EXYNOS5_GPIO_D0_NR (4)
#define EXYNOS5_GPIO_D1_NR (8)
#define EXYNOS5_GPIO_Y0_NR (6)
#define EXYNOS5_GPIO_Y1_NR (4)
@@ -199,7 +200,8 @@ enum exynos5_gpio_number {
EXYNOS5_GPIO_C1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C0),
EXYNOS5_GPIO_C2_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C1),
EXYNOS5_GPIO_C3_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C2),
- EXYNOS5_GPIO_D0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C3),
+ EXYNOS5_GPIO_C4_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C3),
+ EXYNOS5_GPIO_D0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C4),
EXYNOS5_GPIO_D1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_D0),
EXYNOS5_GPIO_Y0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_D1),
EXYNOS5_GPIO_Y1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_Y0),
@@ -242,6 +244,7 @@ enum exynos5_gpio_number {
#define EXYNOS5_GPC1(_nr) (EXYNOS5_GPIO_C1_START + (_nr))
#define EXYNOS5_GPC2(_nr) (EXYNOS5_GPIO_C2_START + (_nr))
#define EXYNOS5_GPC3(_nr) (EXYNOS5_GPIO_C3_START + (_nr))
+#define EXYNOS5_GPC4(_nr) (EXYNOS5_GPIO_C4_START + (_nr))
#define EXYNOS5_GPD0(_nr) (EXYNOS5_GPIO_D0_START + (_nr))
#define EXYNOS5_GPD1(_nr) (EXYNOS5_GPIO_D1_START + (_nr))
#define EXYNOS5_GPY0(_nr) (EXYNOS5_GPIO_Y0_START + (_nr))
diff --git a/arch/arm/mach-exynos/include/mach/irqs.h b/arch/arm/mach-exynos/include/mach/irqs.h
index c02dae7bf4a..7a4b4789eb7 100644
--- a/arch/arm/mach-exynos/include/mach/irqs.h
+++ b/arch/arm/mach-exynos/include/mach/irqs.h
@@ -154,6 +154,13 @@
#define EXYNOS4_IRQ_SYSMMU_MFC_M1_0 COMBINER_IRQ(5, 6)
#define EXYNOS4_IRQ_SYSMMU_PCIE_0 COMBINER_IRQ(5, 7)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_LITE0_0 COMBINER_IRQ(16, 0)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_LITE1_0 COMBINER_IRQ(16, 1)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_ISP_0 COMBINER_IRQ(16, 2)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_DRC_0 COMBINER_IRQ(16, 3)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_FD_0 COMBINER_IRQ(16, 4)
+#define EXYNOS4_IRQ_SYSMMU_FIMC_CX_0 COMBINER_IRQ(16, 5)
+
#define EXYNOS4_IRQ_FIMD0_FIFO COMBINER_IRQ(11, 0)
#define EXYNOS4_IRQ_FIMD0_VSYNC COMBINER_IRQ(11, 1)
#define EXYNOS4_IRQ_FIMD0_SYSTEM COMBINER_IRQ(11, 2)
@@ -221,24 +228,6 @@
#define IRQ_KEYPAD EXYNOS4_IRQ_KEYPAD
#define IRQ_PMU EXYNOS4_IRQ_PMU
-#define IRQ_SYSMMU_MDMA0_0 EXYNOS4_IRQ_SYSMMU_MDMA0_0
-#define IRQ_SYSMMU_SSS_0 EXYNOS4_IRQ_SYSMMU_SSS_0
-#define IRQ_SYSMMU_FIMC0_0 EXYNOS4_IRQ_SYSMMU_FIMC0_0
-#define IRQ_SYSMMU_FIMC1_0 EXYNOS4_IRQ_SYSMMU_FIMC1_0
-#define IRQ_SYSMMU_FIMC2_0 EXYNOS4_IRQ_SYSMMU_FIMC2_0
-#define IRQ_SYSMMU_FIMC3_0 EXYNOS4_IRQ_SYSMMU_FIMC3_0
-#define IRQ_SYSMMU_JPEG_0 EXYNOS4_IRQ_SYSMMU_JPEG_0
-#define IRQ_SYSMMU_2D_0 EXYNOS4_IRQ_SYSMMU_2D_0
-
-#define IRQ_SYSMMU_ROTATOR_0 EXYNOS4_IRQ_SYSMMU_ROTATOR_0
-#define IRQ_SYSMMU_MDMA1_0 EXYNOS4_IRQ_SYSMMU_MDMA1_0
-#define IRQ_SYSMMU_LCD0_M0_0 EXYNOS4_IRQ_SYSMMU_LCD0_M0_0
-#define IRQ_SYSMMU_LCD1_M1_0 EXYNOS4_IRQ_SYSMMU_LCD1_M1_0
-#define IRQ_SYSMMU_TV_M0_0 EXYNOS4_IRQ_SYSMMU_TV_M0_0
-#define IRQ_SYSMMU_MFC_M0_0 EXYNOS4_IRQ_SYSMMU_MFC_M0_0
-#define IRQ_SYSMMU_MFC_M1_0 EXYNOS4_IRQ_SYSMMU_MFC_M1_0
-#define IRQ_SYSMMU_PCIE_0 EXYNOS4_IRQ_SYSMMU_PCIE_0
-
#define IRQ_FIMD0_FIFO EXYNOS4_IRQ_FIMD0_FIFO
#define IRQ_FIMD0_VSYNC EXYNOS4_IRQ_FIMD0_VSYNC
#define IRQ_FIMD0_SYSTEM EXYNOS4_IRQ_FIMD0_SYSTEM
@@ -298,6 +287,7 @@
#define EXYNOS5_IRQ_MIPICSI1 IRQ_SPI(80)
#define EXYNOS5_IRQ_EFNFCON_DMA_ABORT IRQ_SPI(81)
#define EXYNOS5_IRQ_MIPIDSI0 IRQ_SPI(82)
+#define EXYNOS5_IRQ_WDT_IOP IRQ_SPI(83)
#define EXYNOS5_IRQ_ROTATOR IRQ_SPI(84)
#define EXYNOS5_IRQ_GSC0 IRQ_SPI(85)
#define EXYNOS5_IRQ_GSC1 IRQ_SPI(86)
@@ -306,8 +296,8 @@
#define EXYNOS5_IRQ_JPEG IRQ_SPI(89)
#define EXYNOS5_IRQ_EFNFCON_DMA IRQ_SPI(90)
#define EXYNOS5_IRQ_2D IRQ_SPI(91)
-#define EXYNOS5_IRQ_SFMC0 IRQ_SPI(92)
-#define EXYNOS5_IRQ_SFMC1 IRQ_SPI(93)
+#define EXYNOS5_IRQ_EFNFCON_0 IRQ_SPI(92)
+#define EXYNOS5_IRQ_EFNFCON_1 IRQ_SPI(93)
#define EXYNOS5_IRQ_MIXER IRQ_SPI(94)
#define EXYNOS5_IRQ_HDMI IRQ_SPI(95)
#define EXYNOS5_IRQ_MFC IRQ_SPI(96)
@@ -321,7 +311,7 @@
#define EXYNOS5_IRQ_PCM2 IRQ_SPI(104)
#define EXYNOS5_IRQ_SPDIF IRQ_SPI(105)
#define EXYNOS5_IRQ_ADC0 IRQ_SPI(106)
-
+#define EXYNOS5_IRQ_ADC1 IRQ_SPI(107)
#define EXYNOS5_IRQ_SATA_PHY IRQ_SPI(108)
#define EXYNOS5_IRQ_SATA_PMEMREQ IRQ_SPI(109)
#define EXYNOS5_IRQ_CAM_C IRQ_SPI(110)
@@ -330,8 +320,9 @@
#define EXYNOS5_IRQ_DP1_INTP1 IRQ_SPI(113)
#define EXYNOS5_IRQ_CEC IRQ_SPI(114)
#define EXYNOS5_IRQ_SATA IRQ_SPI(115)
-#define EXYNOS5_IRQ_NFCON IRQ_SPI(116)
+#define EXYNOS5_IRQ_MCT_L0 IRQ_SPI(120)
+#define EXYNOS5_IRQ_MCT_L1 IRQ_SPI(121)
#define EXYNOS5_IRQ_MMC44 IRQ_SPI(123)
#define EXYNOS5_IRQ_MDMA1 IRQ_SPI(124)
#define EXYNOS5_IRQ_FIMC_LITE0 IRQ_SPI(125)
@@ -339,7 +330,6 @@
#define EXYNOS5_IRQ_RP_TIMER IRQ_SPI(127)
#define EXYNOS5_IRQ_PMU COMBINER_IRQ(1, 2)
-#define EXYNOS5_IRQ_PMU_CPU1 COMBINER_IRQ(1, 6)
#define EXYNOS5_IRQ_SYSMMU_GSC0_0 COMBINER_IRQ(2, 0)
#define EXYNOS5_IRQ_SYSMMU_GSC0_1 COMBINER_IRQ(2, 1)
@@ -350,6 +340,8 @@
#define EXYNOS5_IRQ_SYSMMU_GSC3_0 COMBINER_IRQ(2, 6)
#define EXYNOS5_IRQ_SYSMMU_GSC3_1 COMBINER_IRQ(2, 7)
+#define EXYNOS5_IRQ_SYSMMU_LITE2_0 COMBINER_IRQ(3, 0)
+#define EXYNOS5_IRQ_SYSMMU_LITE2_1 COMBINER_IRQ(3, 1)
#define EXYNOS5_IRQ_SYSMMU_FIMD1_0 COMBINER_IRQ(3, 2)
#define EXYNOS5_IRQ_SYSMMU_FIMD1_1 COMBINER_IRQ(3, 3)
#define EXYNOS5_IRQ_SYSMMU_LITE0_0 COMBINER_IRQ(3, 4)
@@ -373,8 +365,8 @@
#define EXYNOS5_IRQ_SYSMMU_ARM_0 COMBINER_IRQ(6, 0)
#define EXYNOS5_IRQ_SYSMMU_ARM_1 COMBINER_IRQ(6, 1)
-#define EXYNOS5_IRQ_SYSMMU_MFC_L_0 COMBINER_IRQ(6, 2)
-#define EXYNOS5_IRQ_SYSMMU_MFC_L_1 COMBINER_IRQ(6, 3)
+#define EXYNOS5_IRQ_SYSMMU_MFC_R_0 COMBINER_IRQ(6, 2)
+#define EXYNOS5_IRQ_SYSMMU_MFC_R_1 COMBINER_IRQ(6, 3)
#define EXYNOS5_IRQ_SYSMMU_RTIC_0 COMBINER_IRQ(6, 4)
#define EXYNOS5_IRQ_SYSMMU_RTIC_1 COMBINER_IRQ(6, 5)
#define EXYNOS5_IRQ_SYSMMU_SSS_0 COMBINER_IRQ(6, 6)
@@ -386,11 +378,9 @@
#define EXYNOS5_IRQ_SYSMMU_MDMA1_1 COMBINER_IRQ(7, 3)
#define EXYNOS5_IRQ_SYSMMU_TV_0 COMBINER_IRQ(7, 4)
#define EXYNOS5_IRQ_SYSMMU_TV_1 COMBINER_IRQ(7, 5)
-#define EXYNOS5_IRQ_SYSMMU_GPSX_0 COMBINER_IRQ(7, 6)
-#define EXYNOS5_IRQ_SYSMMU_GPSX_1 COMBINER_IRQ(7, 7)
-#define EXYNOS5_IRQ_SYSMMU_MFC_R_0 COMBINER_IRQ(8, 5)
-#define EXYNOS5_IRQ_SYSMMU_MFC_R_1 COMBINER_IRQ(8, 6)
+#define EXYNOS5_IRQ_SYSMMU_MFC_L_0 COMBINER_IRQ(8, 5)
+#define EXYNOS5_IRQ_SYSMMU_MFC_L_1 COMBINER_IRQ(8, 6)
#define EXYNOS5_IRQ_SYSMMU_DIS1_0 COMBINER_IRQ(9, 4)
#define EXYNOS5_IRQ_SYSMMU_DIS1_1 COMBINER_IRQ(9, 5)
@@ -406,17 +396,24 @@
#define EXYNOS5_IRQ_SYSMMU_DRC_0 COMBINER_IRQ(11, 6)
#define EXYNOS5_IRQ_SYSMMU_DRC_1 COMBINER_IRQ(11, 7)
+#define EXYNOS5_IRQ_MDMA1_ABORT COMBINER_IRQ(13, 1)
+
+#define EXYNOS5_IRQ_MDMA0_ABORT COMBINER_IRQ(15, 3)
+
#define EXYNOS5_IRQ_FIMD1_FIFO COMBINER_IRQ(18, 4)
#define EXYNOS5_IRQ_FIMD1_VSYNC COMBINER_IRQ(18, 5)
#define EXYNOS5_IRQ_FIMD1_SYSTEM COMBINER_IRQ(18, 6)
+#define EXYNOS5_IRQ_ARMIOP_GIC COMBINER_IRQ(19, 0)
+#define EXYNOS5_IRQ_ARMISP_GIC COMBINER_IRQ(19, 1)
+#define EXYNOS5_IRQ_IOP_GIC COMBINER_IRQ(19, 3)
+#define EXYNOS5_IRQ_ISP_GIC COMBINER_IRQ(19, 4)
+
+#define EXYNOS5_IRQ_PMU_CPU1 COMBINER_IRQ(22, 4)
+
#define EXYNOS5_IRQ_EINT0 COMBINER_IRQ(23, 0)
-#define EXYNOS5_IRQ_MCT_L0 COMBINER_IRQ(23, 1)
-#define EXYNOS5_IRQ_MCT_L1 COMBINER_IRQ(23, 2)
#define EXYNOS5_IRQ_MCT_G0 COMBINER_IRQ(23, 3)
#define EXYNOS5_IRQ_MCT_G1 COMBINER_IRQ(23, 4)
-#define EXYNOS5_IRQ_MCT_G2 COMBINER_IRQ(23, 5)
-#define EXYNOS5_IRQ_MCT_G3 COMBINER_IRQ(23, 6)
#define EXYNOS5_IRQ_EINT1 COMBINER_IRQ(24, 0)
#define EXYNOS5_IRQ_SYSMMU_LITE1_0 COMBINER_IRQ(24, 1)
@@ -447,7 +444,7 @@
#define EXYNOS5_MAX_COMBINER_NR 32
-#define EXYNOS5_IRQ_GPIO1_NR_GROUPS 13
+#define EXYNOS5_IRQ_GPIO1_NR_GROUPS 14
#define EXYNOS5_IRQ_GPIO2_NR_GROUPS 9
#define EXYNOS5_IRQ_GPIO3_NR_GROUPS 5
#define EXYNOS5_IRQ_GPIO4_NR_GROUPS 1
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index e009a66477f..ca4aa89aa46 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -34,6 +34,9 @@
#define EXYNOS4_PA_JPEG 0x11840000
+/* x = 0...1 */
+#define EXYNOS4_PA_FIMC_LITE(x) (0x12390000 + ((x) * 0x10000))
+
#define EXYNOS4_PA_G2D 0x12800000
#define EXYNOS4_PA_I2S0 0x03830000
@@ -78,8 +81,8 @@
#define EXYNOS4_PA_GIC_CPU 0x10480000
#define EXYNOS4_PA_GIC_DIST 0x10490000
-#define EXYNOS5_PA_GIC_CPU 0x10480000
-#define EXYNOS5_PA_GIC_DIST 0x10490000
+#define EXYNOS5_PA_GIC_CPU 0x10482000
+#define EXYNOS5_PA_GIC_DIST 0x10481000
#define EXYNOS4_PA_COREPERI 0x10500000
#define EXYNOS4_PA_TWD 0x10500600
@@ -95,6 +98,7 @@
#define EXYNOS5_PA_PDMA1 0x121B0000
#define EXYNOS4_PA_SYSMMU_MDMA 0x10A40000
+#define EXYNOS4_PA_SYSMMU_2D_ACP 0x10A40000
#define EXYNOS4_PA_SYSMMU_SSS 0x10A50000
#define EXYNOS4_PA_SYSMMU_FIMC0 0x11A20000
#define EXYNOS4_PA_SYSMMU_FIMC1 0x11A30000
@@ -103,6 +107,12 @@
#define EXYNOS4_PA_SYSMMU_JPEG 0x11A60000
#define EXYNOS4_PA_SYSMMU_FIMD0 0x11E20000
#define EXYNOS4_PA_SYSMMU_FIMD1 0x12220000
+#define EXYNOS4_PA_SYSMMU_FIMC_ISP 0x12260000
+#define EXYNOS4_PA_SYSMMU_FIMC_DRC 0x12270000
+#define EXYNOS4_PA_SYSMMU_FIMC_FD 0x122A0000
+#define EXYNOS4_PA_SYSMMU_ISPCPU 0x122B0000
+#define EXYNOS4_PA_SYSMMU_FIMC_LITE0 0x123B0000
+#define EXYNOS4_PA_SYSMMU_FIMC_LITE1 0x123C0000
#define EXYNOS4_PA_SYSMMU_PCIe 0x12620000
#define EXYNOS4_PA_SYSMMU_G2D 0x12A20000
#define EXYNOS4_PA_SYSMMU_ROTATOR 0x12A30000
@@ -110,6 +120,37 @@
#define EXYNOS4_PA_SYSMMU_TV 0x12E20000
#define EXYNOS4_PA_SYSMMU_MFC_L 0x13620000
#define EXYNOS4_PA_SYSMMU_MFC_R 0x13630000
+
+#define EXYNOS5_PA_SYSMMU_MDMA1 0x10A40000
+#define EXYNOS5_PA_SYSMMU_SSS 0x10A50000
+#define EXYNOS5_PA_SYSMMU_2D 0x10A60000
+#define EXYNOS5_PA_SYSMMU_MFC_L 0x11200000
+#define EXYNOS5_PA_SYSMMU_MFC_R 0x11210000
+#define EXYNOS5_PA_SYSMMU_ROTATOR 0x11D40000
+#define EXYNOS5_PA_SYSMMU_MDMA2 0x11D50000
+#define EXYNOS5_PA_SYSMMU_JPEG 0x11F20000
+#define EXYNOS5_PA_SYSMMU_IOP 0x12360000
+#define EXYNOS5_PA_SYSMMU_RTIC 0x12370000
+#define EXYNOS5_PA_SYSMMU_GPS 0x12630000
+#define EXYNOS5_PA_SYSMMU_ISP 0x13260000
+#define EXYNOS5_PA_SYSMMU_DRC 0x12370000
+#define EXYNOS5_PA_SYSMMU_SCALERC 0x13280000
+#define EXYNOS5_PA_SYSMMU_SCALERP 0x13290000
+#define EXYNOS5_PA_SYSMMU_FD 0x132A0000
+#define EXYNOS5_PA_SYSMMU_ISPCPU 0x132B0000
+#define EXYNOS5_PA_SYSMMU_ODC 0x132C0000
+#define EXYNOS5_PA_SYSMMU_DIS0 0x132D0000
+#define EXYNOS5_PA_SYSMMU_DIS1 0x132E0000
+#define EXYNOS5_PA_SYSMMU_3DNR 0x132F0000
+#define EXYNOS5_PA_SYSMMU_LITE0 0x13C40000
+#define EXYNOS5_PA_SYSMMU_LITE1 0x13C50000
+#define EXYNOS5_PA_SYSMMU_GSC0 0x13E80000
+#define EXYNOS5_PA_SYSMMU_GSC1 0x13E90000
+#define EXYNOS5_PA_SYSMMU_GSC2 0x13EA0000
+#define EXYNOS5_PA_SYSMMU_GSC3 0x13EB0000
+#define EXYNOS5_PA_SYSMMU_FIMD1 0x14640000
+#define EXYNOS5_PA_SYSMMU_TV 0x14650000
+
#define EXYNOS4_PA_SPI0 0x13920000
#define EXYNOS4_PA_SPI1 0x13930000
#define EXYNOS4_PA_SPI2 0x13940000
diff --git a/arch/arm/mach-exynos/include/mach/regs-clock.h b/arch/arm/mach-exynos/include/mach/regs-clock.h
index d9578a58ae7..b78b5f3ad9c 100644
--- a/arch/arm/mach-exynos/include/mach/regs-clock.h
+++ b/arch/arm/mach-exynos/include/mach/regs-clock.h
@@ -135,6 +135,9 @@
#define EXYNOS4_CLKGATE_SCLKCPU EXYNOS_CLKREG(0x14800)
#define EXYNOS4_CLKGATE_IP_CPU EXYNOS_CLKREG(0x14900)
+#define EXYNOS4_CLKGATE_IP_ISP0 EXYNOS_CLKREG(0x18800)
+#define EXYNOS4_CLKGATE_IP_ISP1 EXYNOS_CLKREG(0x18804)
+
#define EXYNOS4_APLL_LOCKTIME (0x1C20) /* 300us */
#define EXYNOS4_APLLCON0_ENABLE_SHIFT (31)
@@ -303,6 +306,8 @@
#define EXYNOS5_CLKDIV_PERIC0 EXYNOS_CLKREG(0x10558)
#define EXYNOS5_CLKGATE_IP_ACP EXYNOS_CLKREG(0x08800)
+#define EXYNOS5_CLKGATE_IP_ISP0 EXYNOS_CLKREG(0x0C800)
+#define EXYNOS5_CLKGATE_IP_ISP1 EXYNOS_CLKREG(0x0C804)
#define EXYNOS5_CLKGATE_IP_GSCL EXYNOS_CLKREG(0x10920)
#define EXYNOS5_CLKGATE_IP_DISP1 EXYNOS_CLKREG(0x10928)
#define EXYNOS5_CLKGATE_IP_MFC EXYNOS_CLKREG(0x1092C)
@@ -317,6 +322,8 @@
#define EXYNOS5_CLKSRC_CDREX EXYNOS_CLKREG(0x20200)
#define EXYNOS5_CLKDIV_CDREX EXYNOS_CLKREG(0x20500)
+#define EXYNOS5_PLL_DIV2_SEL EXYNOS_CLKREG(0x20A24)
+
#define EXYNOS5_EPLL_LOCK EXYNOS_CLKREG(0x10030)
#define EXYNOS5_EPLLCON0_LOCKED_SHIFT (29)
diff --git a/arch/arm/mach-exynos/include/mach/regs-pmu.h b/arch/arm/mach-exynos/include/mach/regs-pmu.h
index d457d052a42..4dbb8629b20 100644
--- a/arch/arm/mach-exynos/include/mach/regs-pmu.h
+++ b/arch/arm/mach-exynos/include/mach/regs-pmu.h
@@ -180,7 +180,7 @@
#define S5P_PMU_LCD1_CONF S5P_PMUREG(0x3CA0)
-/* Only for EXYNOS4212 */
+/* Only for EXYNOS4x12 */
#define S5P_ISP_ARM_LOWPWR S5P_PMUREG(0x1050)
#define S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR S5P_PMUREG(0x1054)
#define S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR S5P_PMUREG(0x1058)
@@ -221,4 +221,12 @@
#define S5P_SECSS_MEM_OPTION S5P_PMUREG(0x2EC8)
#define S5P_ROTATOR_MEM_OPTION S5P_PMUREG(0x2F48)
+/* Only for EXYNOS4412 */
+#define S5P_ARM_CORE2_LOWPWR S5P_PMUREG(0x1020)
+#define S5P_DIS_IRQ_CORE2 S5P_PMUREG(0x1024)
+#define S5P_DIS_IRQ_CENTRAL2 S5P_PMUREG(0x1028)
+#define S5P_ARM_CORE3_LOWPWR S5P_PMUREG(0x1030)
+#define S5P_DIS_IRQ_CORE3 S5P_PMUREG(0x1034)
+#define S5P_DIS_IRQ_CENTRAL3 S5P_PMUREG(0x1038)
+
#endif /* __ASM_ARCH_REGS_PMU_H */
diff --git a/arch/arm/mach-exynos/include/mach/regs-sysmmu.h b/arch/arm/mach-exynos/include/mach/regs-sysmmu.h
deleted file mode 100644
index 68ff6ad08a2..00000000000
--- a/arch/arm/mach-exynos/include/mach/regs-sysmmu.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* linux/arch/arm/mach-exynos4/include/mach/regs-sysmmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * EXYNOS4 - System MMU register
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_REGS_SYSMMU_H
-#define __ASM_ARCH_REGS_SYSMMU_H __FILE__
-
-#define S5P_MMU_CTRL 0x000
-#define S5P_MMU_CFG 0x004
-#define S5P_MMU_STATUS 0x008
-#define S5P_MMU_FLUSH 0x00C
-#define S5P_PT_BASE_ADDR 0x014
-#define S5P_INT_STATUS 0x018
-#define S5P_INT_CLEAR 0x01C
-#define S5P_PAGE_FAULT_ADDR 0x024
-#define S5P_AW_FAULT_ADDR 0x028
-#define S5P_AR_FAULT_ADDR 0x02C
-#define S5P_DEFAULT_SLAVE_ADDR 0x030
-
-#endif /* __ASM_ARCH_REGS_SYSMMU_H */
diff --git a/arch/arm/mach-exynos/include/mach/spi-clocks.h b/arch/arm/mach-exynos/include/mach/spi-clocks.h
index 576efdf6d09..c71a5fba6a8 100644
--- a/arch/arm/mach-exynos/include/mach/spi-clocks.h
+++ b/arch/arm/mach-exynos/include/mach/spi-clocks.h
@@ -11,6 +11,6 @@
#define __ASM_ARCH_SPI_CLKS_H __FILE__
/* Must source from SCLK_SPI */
-#define EXYNOS4_SPI_SRCCLK_SCLK 0
+#define EXYNOS_SPI_SRCCLK_SCLK 0
#endif /* __ASM_ARCH_SPI_CLKS_H */
diff --git a/arch/arm/mach-exynos/include/mach/sysmmu.h b/arch/arm/mach-exynos/include/mach/sysmmu.h
index 6a5fbb534e8..998daf2add9 100644
--- a/arch/arm/mach-exynos/include/mach/sysmmu.h
+++ b/arch/arm/mach-exynos/include/mach/sysmmu.h
@@ -1,46 +1,66 @@
-/* linux/arch/arm/mach-exynos4/include/mach/sysmmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Samsung sysmmu driver for EXYNOS4
+ * EXYNOS - System MMU support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARM_ARCH_SYSMMU_H
-#define __ASM_ARM_ARCH_SYSMMU_H __FILE__
-
-enum exynos4_sysmmu_ips {
- SYSMMU_MDMA,
- SYSMMU_SSS,
- SYSMMU_FIMC0,
- SYSMMU_FIMC1,
- SYSMMU_FIMC2,
- SYSMMU_FIMC3,
- SYSMMU_JPEG,
- SYSMMU_FIMD0,
- SYSMMU_FIMD1,
- SYSMMU_PCIe,
- SYSMMU_G2D,
- SYSMMU_ROTATOR,
- SYSMMU_MDMA2,
- SYSMMU_TV,
- SYSMMU_MFC_L,
- SYSMMU_MFC_R,
- EXYNOS4_SYSMMU_TOTAL_IPNUM,
+ */
+
+#ifndef _ARM_MACH_EXYNOS_SYSMMU_H_
+#define _ARM_MACH_EXYNOS_SYSMMU_H_
+
+struct sysmmu_platform_data {
+ char *dbgname;
+ /* comma(,) separated list of clock names for clock gating */
+ char *clockname;
};
-#define S5P_SYSMMU_TOTAL_IPNUM EXYNOS4_SYSMMU_TOTAL_IPNUM
+#define SYSMMU_DEVNAME_BASE "exynos-sysmmu"
+
+#define SYSMMU_CLOCK_NAME "sysmmu"
+#define SYSMMU_CLOCK_NAME2 "sysmmu_mc"
+
+#ifdef CONFIG_EXYNOS_DEV_SYSMMU
+#include <linux/device.h>
+struct platform_device;
+
+#define SYSMMU_PLATDEV(ipname) exynos_device_sysmmu_##ipname
+
+extern struct platform_device SYSMMU_PLATDEV(mfc_l);
+extern struct platform_device SYSMMU_PLATDEV(mfc_r);
+extern struct platform_device SYSMMU_PLATDEV(tv);
+extern struct platform_device SYSMMU_PLATDEV(jpeg);
+extern struct platform_device SYSMMU_PLATDEV(rot);
+extern struct platform_device SYSMMU_PLATDEV(fimc0);
+extern struct platform_device SYSMMU_PLATDEV(fimc1);
+extern struct platform_device SYSMMU_PLATDEV(fimc2);
+extern struct platform_device SYSMMU_PLATDEV(fimc3);
+extern struct platform_device SYSMMU_PLATDEV(gsc0);
+extern struct platform_device SYSMMU_PLATDEV(gsc1);
+extern struct platform_device SYSMMU_PLATDEV(gsc2);
+extern struct platform_device SYSMMU_PLATDEV(gsc3);
+extern struct platform_device SYSMMU_PLATDEV(isp);
+extern struct platform_device SYSMMU_PLATDEV(fimd0);
+extern struct platform_device SYSMMU_PLATDEV(fimd1);
+extern struct platform_device SYSMMU_PLATDEV(camif0);
+extern struct platform_device SYSMMU_PLATDEV(camif1);
+extern struct platform_device SYSMMU_PLATDEV(2d);
-extern const char *sysmmu_ips_name[EXYNOS4_SYSMMU_TOTAL_IPNUM];
+#ifdef CONFIG_IOMMU_API
+static inline void platform_set_sysmmu(
+ struct device *sysmmu, struct device *dev)
+{
+ dev->archdata.iommu = sysmmu;
+}
+#endif
-typedef enum exynos4_sysmmu_ips sysmmu_ips;
+#else /* !CONFIG_EXYNOS_DEV_SYSMMU */
+#define platform_set_sysmmu(dev, sysmmu) do { } while (0)
+#endif
-void sysmmu_clk_init(struct device *dev, sysmmu_ips ips);
-void sysmmu_clk_enable(sysmmu_ips ips);
-void sysmmu_clk_disable(sysmmu_ips ips);
+#define SYSMMU_CLOCK_DEVNAME(ipname, id) (SYSMMU_DEVNAME_BASE "." #id)
-#endif /* __ASM_ARM_ARCH_SYSMMU_H */
+#endif /* _ARM_MACH_EXYNOS_SYSMMU_H_ */
diff --git a/arch/arm/mach-exynos/mach-armlex4210.c b/arch/arm/mach-exynos/mach-armlex4210.c
index fed7116418e..5a3daa0168d 100644
--- a/arch/arm/mach-exynos/mach-armlex4210.c
+++ b/arch/arm/mach-exynos/mach-armlex4210.c
@@ -147,7 +147,6 @@ static struct platform_device *armlex4210_devices[] __initdata = {
&s3c_device_hsmmc3,
&s3c_device_rtc,
&s3c_device_wdt,
- &exynos4_device_sysmmu,
&samsung_asoc_dma,
&armlex4210_smsc911x,
&exynos4_device_ahci,
@@ -204,6 +203,7 @@ MACHINE_START(ARMLEX4210, "ARMLEX4210")
.map_io = armlex4210_map_io,
.handle_irq = gic_handle_irq,
.init_machine = armlex4210_machine_init,
+ .init_late = exynos_init_late,
.timer = &exynos4_timer,
.restart = exynos4_restart,
MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-exynos4-dt.c b/arch/arm/mach-exynos/mach-exynos4-dt.c
index 8245f1c761d..e7e9743543a 100644
--- a/arch/arm/mach-exynos/mach-exynos4-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos4-dt.c
@@ -83,6 +83,7 @@ DT_MACHINE_START(EXYNOS4210_DT, "Samsung Exynos4 (Flattened Device Tree)")
.map_io = exynos4210_dt_map_io,
.handle_irq = gic_handle_irq,
.init_machine = exynos4210_dt_machine_init,
+ .init_late = exynos_init_late,
.timer = &exynos4_timer,
.dt_compat = exynos4210_dt_compat,
.restart = exynos4_restart,
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index 4711c8920e3..7b1e11a228c 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -43,6 +43,10 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = {
"exynos4210-uart.2", NULL),
OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS5_PA_UART3,
"exynos4210-uart.3", NULL),
+ OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(0),
+ "s3c2440-i2c.0", NULL),
+ OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(1),
+ "s3c2440-i2c.1", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_MDMA1, "dma-pl330.2", NULL),
@@ -72,6 +76,7 @@ DT_MACHINE_START(EXYNOS5_DT, "SAMSUNG EXYNOS5 (Flattened Device Tree)")
.map_io = exynos5250_dt_map_io,
.handle_irq = gic_handle_irq,
.init_machine = exynos5250_dt_machine_init,
+ .init_late = exynos_init_late,
.timer = &exynos4_timer,
.dt_compat = exynos5250_dt_compat,
.restart = exynos5_restart,
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index 6c31f2ad765..972983e392b 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -1389,6 +1389,7 @@ MACHINE_START(NURI, "NURI")
.map_io = nuri_map_io,
.handle_irq = gic_handle_irq,
.init_machine = nuri_machine_init,
+ .init_late = exynos_init_late,
.timer = &exynos4_timer,
.reserve = &nuri_reserve,
.restart = exynos4_restart,
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
index 26124a38bcb..a7f7fd567dd 100644
--- a/arch/arm/mach-exynos/mach-origen.c
+++ b/arch/arm/mach-exynos/mach-origen.c
@@ -766,6 +766,7 @@ MACHINE_START(ORIGEN, "ORIGEN")
.map_io = origen_map_io,
.handle_irq = gic_handle_irq,
.init_machine = origen_machine_init,
+ .init_late = exynos_init_late,
.timer = &exynos4_timer,
.reserve = &origen_reserve,
.restart = exynos4_restart,
diff --git a/arch/arm/mach-exynos/mach-smdk4x12.c b/arch/arm/mach-exynos/mach-smdk4x12.c
index fe772d893cc..fb09c70e195 100644
--- a/arch/arm/mach-exynos/mach-smdk4x12.c
+++ b/arch/arm/mach-exynos/mach-smdk4x12.c
@@ -316,6 +316,7 @@ MACHINE_START(SMDK4412, "SMDK4412")
.map_io = smdk4x12_map_io,
.handle_irq = gic_handle_irq,
.init_machine = smdk4x12_machine_init,
+ .init_late = exynos_init_late,
.timer = &exynos4_timer,
.restart = exynos4_restart,
.reserve = &smdk4x12_reserve,
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
index 5af96064ca5..70df1a0c211 100644
--- a/arch/arm/mach-exynos/mach-smdkv310.c
+++ b/arch/arm/mach-exynos/mach-smdkv310.c
@@ -295,7 +295,6 @@ static struct platform_device *smdkv310_devices[] __initdata = {
&s5p_device_mfc_l,
&s5p_device_mfc_r,
&exynos4_device_spdif,
- &exynos4_device_sysmmu,
&samsung_asoc_dma,
&samsung_asoc_idma,
&s5p_device_fimd0,
@@ -412,6 +411,7 @@ MACHINE_START(SMDKC210, "SMDKC210")
.map_io = smdkv310_map_io,
.handle_irq = gic_handle_irq,
.init_machine = smdkv310_machine_init,
+ .init_late = exynos_init_late,
.timer = &exynos4_timer,
.restart = exynos4_restart,
MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 6b731b86327..083b44de9c1 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -1157,6 +1157,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
.map_io = universal_map_io,
.handle_irq = gic_handle_irq,
.init_machine = universal_machine_init,
+ .init_late = exynos_init_late,
.timer = &s5p_timer,
.reserve = &universal_reserve,
.restart = exynos4_restart,
diff --git a/arch/arm/mach-exynos/mct.c b/arch/arm/mach-exynos/mct.c
index 897d9a9cf22..b601fb8a408 100644
--- a/arch/arm/mach-exynos/mct.c
+++ b/arch/arm/mach-exynos/mct.c
@@ -388,6 +388,7 @@ static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
{
struct mct_clock_event_device *mevt;
unsigned int cpu = smp_processor_id();
+ int mct_lx_irq;
mevt = this_cpu_ptr(&percpu_mct_tick);
mevt->evt = evt;
@@ -414,14 +415,18 @@ static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
if (mct_int_type == MCT_INT_SPI) {
if (cpu == 0) {
+ mct_lx_irq = soc_is_exynos4210() ? EXYNOS4_IRQ_MCT_L0 :
+ EXYNOS5_IRQ_MCT_L0;
mct_tick0_event_irq.dev_id = mevt;
- evt->irq = EXYNOS4_IRQ_MCT_L0;
- setup_irq(EXYNOS4_IRQ_MCT_L0, &mct_tick0_event_irq);
+ evt->irq = mct_lx_irq;
+ setup_irq(mct_lx_irq, &mct_tick0_event_irq);
} else {
+ mct_lx_irq = soc_is_exynos4210() ? EXYNOS4_IRQ_MCT_L1 :
+ EXYNOS5_IRQ_MCT_L1;
mct_tick1_event_irq.dev_id = mevt;
- evt->irq = EXYNOS4_IRQ_MCT_L1;
- setup_irq(EXYNOS4_IRQ_MCT_L1, &mct_tick1_event_irq);
- irq_set_affinity(EXYNOS4_IRQ_MCT_L1, cpumask_of(1));
+ evt->irq = mct_lx_irq;
+ setup_irq(mct_lx_irq, &mct_tick1_event_irq);
+ irq_set_affinity(mct_lx_irq, cpumask_of(1));
}
} else {
enable_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER, 0);
@@ -473,7 +478,7 @@ static void __init exynos4_timer_resources(void)
static void __init exynos4_timer_init(void)
{
- if (soc_is_exynos4210())
+ if ((soc_is_exynos4210()) || (soc_is_exynos5250()))
mct_int_type = MCT_INT_SPI;
else
mct_int_type = MCT_INT_PPI;
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 428cfeb5772..563dea9a6db 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -275,7 +275,7 @@ static void exynos4_restore_pll(void)
static struct subsys_interface exynos4_pm_interface = {
.name = "exynos4_pm",
- .subsys = &exynos4_subsys,
+ .subsys = &exynos_subsys,
.add_dev = exynos4_pm_add,
};
@@ -313,7 +313,7 @@ static int exynos4_pm_suspend(void)
tmp &= ~S5P_CENTRAL_LOWPWR_CFG;
__raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION);
- if (soc_is_exynos4212()) {
+ if (soc_is_exynos4212() || soc_is_exynos4412()) {
tmp = __raw_readl(S5P_CENTRAL_SEQ_OPTION);
tmp &= ~(S5P_USE_STANDBYWFI_ISP_ARM |
S5P_USE_STANDBYWFE_ISP_ARM);
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 13b306808b4..e9fafcf163d 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -193,9 +193,8 @@ static __init int exynos4_pm_init_power_domain(void)
}
arch_initcall(exynos4_pm_init_power_domain);
-static __init int exynos_pm_late_initcall(void)
+int __init exynos_pm_late_initcall(void)
{
pm_genpd_poweroff_unused();
return 0;
}
-late_initcall(exynos_pm_late_initcall);
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index bba48f5c3e8..77c6815eebe 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -94,7 +94,7 @@ static struct exynos4_pmu_conf exynos4210_pmu_config[] = {
{ PMU_TABLE_END,},
};
-static struct exynos4_pmu_conf exynos4212_pmu_config[] = {
+static struct exynos4_pmu_conf exynos4x12_pmu_config[] = {
{ S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
{ S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
{ S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } },
@@ -202,6 +202,16 @@ static struct exynos4_pmu_conf exynos4212_pmu_config[] = {
{ PMU_TABLE_END,},
};
+static struct exynos4_pmu_conf exynos4412_pmu_config[] = {
+ { S5P_ARM_CORE2_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE2, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL2, { 0x0, 0x0, 0x0 } },
+ { S5P_ARM_CORE3_LOWPWR, { 0x0, 0x0, 0x2 } },
+ { S5P_DIS_IRQ_CORE3, { 0x0, 0x0, 0x0 } },
+ { S5P_DIS_IRQ_CENTRAL3, { 0x0, 0x0, 0x0 } },
+ { PMU_TABLE_END,},
+};
+
void exynos4_sys_powerdown_conf(enum sys_powerdown mode)
{
unsigned int i;
@@ -209,6 +219,12 @@ void exynos4_sys_powerdown_conf(enum sys_powerdown mode)
for (i = 0; (exynos4_pmu_config[i].reg != PMU_TABLE_END) ; i++)
__raw_writel(exynos4_pmu_config[i].val[mode],
exynos4_pmu_config[i].reg);
+
+ if (soc_is_exynos4412()) {
+ for (i = 0; exynos4412_pmu_config[i].reg != PMU_TABLE_END ; i++)
+ __raw_writel(exynos4412_pmu_config[i].val[mode],
+ exynos4412_pmu_config[i].reg);
+ }
}
static int __init exynos4_pmu_init(void)
@@ -218,9 +234,9 @@ static int __init exynos4_pmu_init(void)
if (soc_is_exynos4210()) {
exynos4_pmu_config = exynos4210_pmu_config;
pr_info("EXYNOS4210 PMU Initialize\n");
- } else if (soc_is_exynos4212()) {
- exynos4_pmu_config = exynos4212_pmu_config;
- pr_info("EXYNOS4212 PMU Initialize\n");
+ } else if (soc_is_exynos4212() || soc_is_exynos4412()) {
+ exynos4_pmu_config = exynos4x12_pmu_config;
+ pr_info("EXYNOS4x12 PMU Initialize\n");
} else {
pr_info("EXYNOS4: PMU not supported\n");
}
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index cca8c0c7479..0021f726b15 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -34,6 +34,7 @@ config ARCH_MX53
config SOC_IMX1
bool
select ARCH_MX1
+ select COMMON_CLK
select CPU_ARM920T
select IMX_HAVE_IOMUX_V1
select MXC_AVIC
@@ -42,12 +43,14 @@ config SOC_IMX21
bool
select MACH_MX21
select CPU_ARM926T
+ select COMMON_CLK
select IMX_HAVE_IOMUX_V1
select MXC_AVIC
config SOC_IMX25
bool
select ARCH_MX25
+ select COMMON_CLK
select CPU_ARM926T
select ARCH_MXC_IOMUX_V3
select MXC_AVIC
@@ -56,6 +59,7 @@ config SOC_IMX27
bool
select MACH_MX27
select CPU_ARM926T
+ select COMMON_CLK
select IMX_HAVE_IOMUX_V1
select MXC_AVIC
@@ -64,12 +68,14 @@ config SOC_IMX31
select CPU_V6
select IMX_HAVE_PLATFORM_MXC_RNGA
select MXC_AVIC
+ select COMMON_CLK
select SMP_ON_UP if SMP
config SOC_IMX35
bool
select CPU_V6
select ARCH_MXC_IOMUX_V3
+ select COMMON_CLK
select HAVE_EPIT
select MXC_AVIC
select SMP_ON_UP if SMP
@@ -77,6 +83,7 @@ config SOC_IMX35
config SOC_IMX5
select CPU_V7
select MXC_TZIC
+ select COMMON_CLK
select ARCH_MXC_IOMUX_V3
select ARCH_HAS_CPUFREQ
select ARCH_MX5
@@ -815,6 +822,7 @@ config SOC_IMX6Q
bool "i.MX6 Quad support"
select ARM_CPU_SUSPEND if PM
select ARM_GIC
+ select COMMON_CLK
select CPU_V7
select HAVE_ARM_SCU
select HAVE_IMX_GPC
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 4937c070a57..ff29421414f 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -1,15 +1,18 @@
-obj-$(CONFIG_SOC_IMX1) += clock-imx1.o mm-imx1.o
-obj-$(CONFIG_SOC_IMX21) += clock-imx21.o mm-imx21.o
+obj-$(CONFIG_SOC_IMX1) += clk-imx1.o mm-imx1.o
+obj-$(CONFIG_SOC_IMX21) += clk-imx21.o mm-imx21.o
-obj-$(CONFIG_SOC_IMX25) += clock-imx25.o mm-imx25.o ehci-imx25.o cpu-imx25.o
+obj-$(CONFIG_SOC_IMX25) += clk-imx25.o mm-imx25.o ehci-imx25.o cpu-imx25.o
obj-$(CONFIG_SOC_IMX27) += cpu-imx27.o pm-imx27.o
-obj-$(CONFIG_SOC_IMX27) += clock-imx27.o mm-imx27.o ehci-imx27.o
+obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o
-obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clock-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
-obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clock-imx35.o ehci-imx35.o pm-imx3.o
+obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
+obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o
-obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clock-mx51-mx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+
+obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \
+ clk-pfd.o clk-busy.o
# Support for CMOS sensor interface
obj-$(CONFIG_MX1_VIDEO) += mx1-camera-fiq.o mx1-camera-fiq-ksym.o
@@ -70,7 +73,7 @@ obj-$(CONFIG_CPU_V7) += head-v7.o
AFLAGS_head-v7.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
-obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o
+obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
diff --git a/arch/arm/mach-imx/Makefile.boot b/arch/arm/mach-imx/Makefile.boot
index 3851d8a2787..05541cf4a87 100644
--- a/arch/arm/mach-imx/Makefile.boot
+++ b/arch/arm/mach-imx/Makefile.boot
@@ -42,4 +42,5 @@ dtb-$(CONFIG_MACH_IMX51_DT) += imx51-babbage.dtb
dtb-$(CONFIG_MACH_IMX53_DT) += imx53-ard.dtb imx53-evk.dtb \
imx53-qsb.dtb imx53-smd.dtb
dtb-$(CONFIG_SOC_IMX6Q) += imx6q-arm2.dtb \
- imx6q-sabrelite.dtb
+ imx6q-sabrelite.dtb \
+ imx6q-sabresd.dtb \
diff --git a/arch/arm/mach-imx/clk-busy.c b/arch/arm/mach-imx/clk-busy.c
new file mode 100644
index 00000000000..1a7a8dd045a
--- /dev/null
+++ b/arch/arm/mach-imx/clk-busy.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include "clk.h"
+
+static int clk_busy_wait(void __iomem *reg, u8 shift)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(10);
+
+ while (readl_relaxed(reg) & (1 << shift))
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+struct clk_busy_divider {
+ struct clk_divider div;
+ const struct clk_ops *div_ops;
+ void __iomem *reg;
+ u8 shift;
+};
+
+static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
+{
+ struct clk_divider *div = container_of(hw, struct clk_divider, hw);
+
+ return container_of(div, struct clk_busy_divider, div);
+}
+
+static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_busy_divider *busy = to_clk_busy_divider(hw);
+
+ return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
+}
+
+static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_busy_divider *busy = to_clk_busy_divider(hw);
+
+ return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
+}
+
+static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_busy_divider *busy = to_clk_busy_divider(hw);
+ int ret;
+
+ ret = busy->div_ops->set_rate(&busy->div.hw, rate, parent_rate);
+ if (!ret)
+ ret = clk_busy_wait(busy->reg, busy->shift);
+
+ return ret;
+}
+
+static struct clk_ops clk_busy_divider_ops = {
+ .recalc_rate = clk_busy_divider_recalc_rate,
+ .round_rate = clk_busy_divider_round_rate,
+ .set_rate = clk_busy_divider_set_rate,
+};
+
+struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width,
+ void __iomem *busy_reg, u8 busy_shift)
+{
+ struct clk_busy_divider *busy;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ busy = kzalloc(sizeof(*busy), GFP_KERNEL);
+ if (!busy)
+ return ERR_PTR(-ENOMEM);
+
+ busy->reg = busy_reg;
+ busy->shift = busy_shift;
+
+ busy->div.reg = reg;
+ busy->div.shift = shift;
+ busy->div.width = width;
+ busy->div.lock = &imx_ccm_lock;
+ busy->div_ops = &clk_divider_ops;
+
+ init.name = name;
+ init.ops = &clk_busy_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ busy->div.hw.init = &init;
+
+ clk = clk_register(NULL, &busy->div.hw);
+ if (!clk)
+ kfree(busy);
+
+ return clk;
+}
+
+struct clk_busy_mux {
+ struct clk_mux mux;
+ const struct clk_ops *mux_ops;
+ void __iomem *reg;
+ u8 shift;
+};
+
+static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
+{
+ struct clk_mux *mux = container_of(hw, struct clk_mux, hw);
+
+ return container_of(mux, struct clk_busy_mux, mux);
+}
+
+static u8 clk_busy_mux_get_parent(struct clk_hw *hw)
+{
+ struct clk_busy_mux *busy = to_clk_busy_mux(hw);
+
+ return busy->mux_ops->get_parent(&busy->mux.hw);
+}
+
+static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_busy_mux *busy = to_clk_busy_mux(hw);
+ int ret;
+
+ ret = busy->mux_ops->set_parent(&busy->mux.hw, index);
+ if (!ret)
+ ret = clk_busy_wait(busy->reg, busy->shift);
+
+ return ret;
+}
+
+struct clk_ops clk_busy_mux_ops = {
+ .get_parent = clk_busy_mux_get_parent,
+ .set_parent = clk_busy_mux_set_parent,
+};
+
+struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
+ u8 width, void __iomem *busy_reg, u8 busy_shift,
+ const char **parent_names, int num_parents)
+{
+ struct clk_busy_mux *busy;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ busy = kzalloc(sizeof(*busy), GFP_KERNEL);
+ if (!busy)
+ return ERR_PTR(-ENOMEM);
+
+ busy->reg = busy_reg;
+ busy->shift = busy_shift;
+
+ busy->mux.reg = reg;
+ busy->mux.shift = shift;
+ busy->mux.width = width;
+ busy->mux.lock = &imx_ccm_lock;
+ busy->mux_ops = &clk_mux_ops;
+
+ init.name = name;
+ init.ops = &clk_busy_mux_ops;
+ init.flags = 0;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ busy->mux.hw.init = &init;
+
+ clk = clk_register(NULL, &busy->mux.hw);
+ if (IS_ERR(clk))
+ kfree(busy);
+
+ return clk;
+}
diff --git a/arch/arm/mach-imx/clk-gate2.c b/arch/arm/mach-imx/clk-gate2.c
new file mode 100644
index 00000000000..3c1b8ff9a0a
--- /dev/null
+++ b/arch/arm/mach-imx/clk-gate2.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
+ * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Gated clock implementation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+
+/**
+ * DOC: basic gatable clock which can gate and ungate it's ouput
+ *
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable and clk_disable are functional & control gating
+ * rate - inherits rate from parent. No clk_set_rate support
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
+static int clk_gate2_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ u32 reg;
+ unsigned long flags = 0;
+
+ if (gate->lock)
+ spin_lock_irqsave(gate->lock, flags);
+
+ reg = readl(gate->reg);
+ reg |= 3 << gate->bit_idx;
+ writel(reg, gate->reg);
+
+ if (gate->lock)
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return 0;
+}
+
+static void clk_gate2_disable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ u32 reg;
+ unsigned long flags = 0;
+
+ if (gate->lock)
+ spin_lock_irqsave(gate->lock, flags);
+
+ reg = readl(gate->reg);
+ reg &= ~(3 << gate->bit_idx);
+ writel(reg, gate->reg);
+
+ if (gate->lock)
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int clk_gate2_is_enabled(struct clk_hw *hw)
+{
+ u32 reg;
+ struct clk_gate *gate = to_clk_gate(hw);
+
+ reg = readl(gate->reg);
+
+ if (((reg >> gate->bit_idx) & 3) == 3)
+ return 1;
+
+ return 0;
+}
+
+static struct clk_ops clk_gate2_ops = {
+ .enable = clk_gate2_enable,
+ .disable = clk_gate2_disable,
+ .is_enabled = clk_gate2_is_enabled,
+};
+
+struct clk *clk_register_gate2(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate2_flags, spinlock_t *lock)
+{
+ struct clk_gate *gate;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ /* struct clk_gate assignments */
+ gate->reg = reg;
+ gate->bit_idx = bit_idx;
+ gate->flags = clk_gate2_flags;
+ gate->lock = lock;
+
+ init.name = name;
+ init.ops = &clk_gate2_ops;
+ init.flags = flags;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ gate->hw.init = &init;
+
+ clk = clk_register(dev, &gate->hw);
+ if (IS_ERR(clk))
+ kfree(clk);
+
+ return clk;
+}
diff --git a/arch/arm/mach-imx/clk-imx1.c b/arch/arm/mach-imx/clk-imx1.c
new file mode 100644
index 00000000000..0f0beb580b7
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx1.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+#include "clk.h"
+
+/* CCM register addresses */
+#define IO_ADDR_CCM(off) (MX1_IO_ADDRESS(MX1_CCM_BASE_ADDR + (off)))
+
+#define CCM_CSCR IO_ADDR_CCM(0x0)
+#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
+#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
+#define CCM_PCDR IO_ADDR_CCM(0x20)
+
+/* SCM register addresses */
+#define IO_ADDR_SCM(off) (MX1_IO_ADDRESS(MX1_SCM_BASE_ADDR + (off)))
+
+#define SCM_GCCR IO_ADDR_SCM(0xc)
+
+static const char *prem_sel_clks[] = { "clk32_premult", "clk16m", };
+static const char *clko_sel_clks[] = { "per1", "hclk", "clk48m", "clk16m", "prem",
+ "fclk", };
+enum imx1_clks {
+ dummy, clk32, clk16m_ext, clk16m, clk32_premult, prem, mpll, spll, mcu,
+ fclk, hclk, clk48m, per1, per2, per3, clko, dma_gate, csi_gate,
+ mma_gate, usbd_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx1_clocks_init(unsigned long fref)
+{
+ int i;
+
+ clk[dummy] = imx_clk_fixed("dummy", 0);
+ clk[clk32] = imx_clk_fixed("clk32", fref);
+ clk[clk16m_ext] = imx_clk_fixed("clk16m_ext", 16000000);
+ clk[clk16m] = imx_clk_gate("clk16m", "clk16m_ext", CCM_CSCR, 17);
+ clk[clk32_premult] = imx_clk_fixed_factor("clk32_premult", "clk32", 512, 1);
+ clk[prem] = imx_clk_mux("prem", CCM_CSCR, 16, 1, prem_sel_clks,
+ ARRAY_SIZE(prem_sel_clks));
+ clk[mpll] = imx_clk_pllv1("mpll", "clk32_premult", CCM_MPCTL0);
+ clk[spll] = imx_clk_pllv1("spll", "prem", CCM_SPCTL0);
+ clk[mcu] = imx_clk_divider("mcu", "clk32_premult", CCM_CSCR, 15, 1);
+ clk[fclk] = imx_clk_divider("fclk", "mpll", CCM_CSCR, 15, 1);
+ clk[hclk] = imx_clk_divider("hclk", "spll", CCM_CSCR, 10, 4);
+ clk[clk48m] = imx_clk_divider("clk48m", "spll", CCM_CSCR, 26, 3);
+ clk[per1] = imx_clk_divider("per1", "spll", CCM_PCDR, 0, 4);
+ clk[per2] = imx_clk_divider("per2", "spll", CCM_PCDR, 4, 4);
+ clk[per3] = imx_clk_divider("per3", "spll", CCM_PCDR, 16, 7);
+ clk[clko] = imx_clk_mux("clko", CCM_CSCR, 29, 3, clko_sel_clks,
+ ARRAY_SIZE(clko_sel_clks));
+ clk[dma_gate] = imx_clk_gate("dma_gate", "hclk", SCM_GCCR, 4);
+ clk[csi_gate] = imx_clk_gate("csi_gate", "hclk", SCM_GCCR, 2);
+ clk[mma_gate] = imx_clk_gate("mma_gate", "hclk", SCM_GCCR, 1);
+ clk[usbd_gate] = imx_clk_gate("usbd_gate", "clk48m", SCM_GCCR, 0);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("imx1 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ clk_register_clkdev(clk[dma_gate], "ahb", "imx-dma");
+ clk_register_clkdev(clk[csi_gate], NULL, "mx1-camera.0");
+ clk_register_clkdev(clk[mma_gate], "mma", NULL);
+ clk_register_clkdev(clk[usbd_gate], NULL, "imx_udc.0");
+ clk_register_clkdev(clk[per1], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[hclk], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[per1], "per", "imx1-uart.0");
+ clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.0");
+ clk_register_clkdev(clk[per1], "per", "imx1-uart.1");
+ clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.1");
+ clk_register_clkdev(clk[per1], "per", "imx1-uart.2");
+ clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.2");
+ clk_register_clkdev(clk[hclk], NULL, "imx-i2c.0");
+ clk_register_clkdev(clk[per2], "per", "imx1-cspi.0");
+ clk_register_clkdev(clk[dummy], "ipg", "imx1-cspi.0");
+ clk_register_clkdev(clk[per2], "per", "imx1-cspi.1");
+ clk_register_clkdev(clk[dummy], "ipg", "imx1-cspi.1");
+ clk_register_clkdev(clk[per2], NULL, "imx-mmc.0");
+ clk_register_clkdev(clk[per2], "per", "imx-fb.0");
+ clk_register_clkdev(clk[dummy], "ipg", "imx-fb.0");
+ clk_register_clkdev(clk[dummy], "ahb", "imx-fb.0");
+ clk_register_clkdev(clk[hclk], "mshc", NULL);
+ clk_register_clkdev(clk[per3], "ssi", NULL);
+ clk_register_clkdev(clk[clk32], NULL, "mxc_rtc.0");
+ clk_register_clkdev(clk[clko], "clko", NULL);
+
+ mxc_timer_init(NULL, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
+ MX1_TIM1_INT);
+
+ return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx21.c b/arch/arm/mach-imx/clk-imx21.c
new file mode 100644
index 00000000000..4e4f384ee8d
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx21.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+#include "clk.h"
+
+#define IO_ADDR_CCM(off) (MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
+
+/* Register offsets */
+#define CCM_CSCR IO_ADDR_CCM(0x0)
+#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
+#define CCM_MPCTL1 IO_ADDR_CCM(0x8)
+#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
+#define CCM_SPCTL1 IO_ADDR_CCM(0x10)
+#define CCM_OSC26MCTL IO_ADDR_CCM(0x14)
+#define CCM_PCDR0 IO_ADDR_CCM(0x18)
+#define CCM_PCDR1 IO_ADDR_CCM(0x1c)
+#define CCM_PCCR0 IO_ADDR_CCM(0x20)
+#define CCM_PCCR1 IO_ADDR_CCM(0x24)
+#define CCM_CCSR IO_ADDR_CCM(0x28)
+#define CCM_PMCTL IO_ADDR_CCM(0x2c)
+#define CCM_PMCOUNT IO_ADDR_CCM(0x30)
+#define CCM_WKGDCTL IO_ADDR_CCM(0x34)
+
+static const char *mpll_sel_clks[] = { "fpm", "ckih", };
+static const char *spll_sel_clks[] = { "fpm", "ckih", };
+
+enum imx21_clks {
+ ckil, ckih, fpm, mpll_sel, spll_sel, mpll, spll, fclk, hclk, ipg, per1,
+ per2, per3, per4, uart1_ipg_gate, uart2_ipg_gate, uart3_ipg_gate,
+ uart4_ipg_gate, gpt1_ipg_gate, gpt2_ipg_gate, gpt3_ipg_gate,
+ pwm_ipg_gate, sdhc1_ipg_gate, sdhc2_ipg_gate, lcdc_ipg_gate,
+ lcdc_hclk_gate, cspi3_ipg_gate, cspi2_ipg_gate, cspi1_ipg_gate,
+ per4_gate, csi_hclk_gate, usb_div, usb_gate, usb_hclk_gate, ssi1_gate,
+ ssi2_gate, nfc_div, nfc_gate, dma_gate, dma_hclk_gate, brom_gate,
+ emma_gate, emma_hclk_gate, slcdc_gate, slcdc_hclk_gate, wdog_gate,
+ gpio_gate, i2c_gate, kpp_gate, owire_gate, rtc_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+/*
+ * must be called very early to get information about the
+ * available clock rate when the timer framework starts
+ */
+int __init mx21_clocks_init(unsigned long lref, unsigned long href)
+{
+ int i;
+
+ clk[ckil] = imx_clk_fixed("ckil", lref);
+ clk[ckih] = imx_clk_fixed("ckih", href);
+ clk[fpm] = imx_clk_fixed_factor("fpm", "ckil", 512, 1);
+ clk[mpll_sel] = imx_clk_mux("mpll_sel", CCM_CSCR, 16, 1, mpll_sel_clks,
+ ARRAY_SIZE(mpll_sel_clks));
+ clk[spll_sel] = imx_clk_mux("spll_sel", CCM_CSCR, 17, 1, spll_sel_clks,
+ ARRAY_SIZE(spll_sel_clks));
+ clk[mpll] = imx_clk_pllv1("mpll", "mpll_sel", CCM_MPCTL0);
+ clk[spll] = imx_clk_pllv1("spll", "spll_sel", CCM_SPCTL0);
+ clk[fclk] = imx_clk_divider("fclk", "mpll", CCM_CSCR, 29, 3);
+ clk[hclk] = imx_clk_divider("hclk", "fclk", CCM_CSCR, 10, 4);
+ clk[ipg] = imx_clk_divider("ipg", "hclk", CCM_CSCR, 9, 1);
+ clk[per1] = imx_clk_divider("per1", "mpll", CCM_PCDR1, 0, 6);
+ clk[per2] = imx_clk_divider("per2", "mpll", CCM_PCDR1, 8, 6);
+ clk[per3] = imx_clk_divider("per3", "mpll", CCM_PCDR1, 16, 6);
+ clk[per4] = imx_clk_divider("per4", "mpll", CCM_PCDR1, 24, 6);
+ clk[uart1_ipg_gate] = imx_clk_gate("uart1_ipg_gate", "ipg", CCM_PCCR0, 0);
+ clk[uart2_ipg_gate] = imx_clk_gate("uart2_ipg_gate", "ipg", CCM_PCCR0, 1);
+ clk[uart3_ipg_gate] = imx_clk_gate("uart3_ipg_gate", "ipg", CCM_PCCR0, 2);
+ clk[uart4_ipg_gate] = imx_clk_gate("uart4_ipg_gate", "ipg", CCM_PCCR0, 3);
+ clk[gpt1_ipg_gate] = imx_clk_gate("gpt1_ipg_gate", "ipg", CCM_PCCR1, 25);
+ clk[gpt2_ipg_gate] = imx_clk_gate("gpt2_ipg_gate", "ipg", CCM_PCCR1, 26);
+ clk[gpt3_ipg_gate] = imx_clk_gate("gpt3_ipg_gate", "ipg", CCM_PCCR1, 27);
+ clk[pwm_ipg_gate] = imx_clk_gate("pwm_ipg_gate", "ipg", CCM_PCCR1, 28);
+ clk[sdhc1_ipg_gate] = imx_clk_gate("sdhc1_ipg_gate", "ipg", CCM_PCCR0, 9);
+ clk[sdhc2_ipg_gate] = imx_clk_gate("sdhc2_ipg_gate", "ipg", CCM_PCCR0, 10);
+ clk[lcdc_ipg_gate] = imx_clk_gate("lcdc_ipg_gate", "ipg", CCM_PCCR0, 18);
+ clk[lcdc_hclk_gate] = imx_clk_gate("lcdc_hclk_gate", "hclk", CCM_PCCR0, 26);
+ clk[cspi3_ipg_gate] = imx_clk_gate("cspi3_ipg_gate", "ipg", CCM_PCCR1, 23);
+ clk[cspi2_ipg_gate] = imx_clk_gate("cspi2_ipg_gate", "ipg", CCM_PCCR0, 5);
+ clk[cspi1_ipg_gate] = imx_clk_gate("cspi1_ipg_gate", "ipg", CCM_PCCR0, 4);
+ clk[per4_gate] = imx_clk_gate("per4_gate", "per4", CCM_PCCR0, 22);
+ clk[csi_hclk_gate] = imx_clk_gate("csi_hclk_gate", "hclk", CCM_PCCR0, 31);
+ clk[usb_div] = imx_clk_divider("usb_div", "spll", CCM_CSCR, 26, 3);
+ clk[usb_gate] = imx_clk_gate("usb_gate", "usb_div", CCM_PCCR0, 14);
+ clk[usb_hclk_gate] = imx_clk_gate("usb_hclk_gate", "hclk", CCM_PCCR0, 24);
+ clk[ssi1_gate] = imx_clk_gate("ssi1_gate", "ipg", CCM_PCCR0, 6);
+ clk[ssi2_gate] = imx_clk_gate("ssi2_gate", "ipg", CCM_PCCR0, 7);
+ clk[nfc_div] = imx_clk_divider("nfc_div", "ipg", CCM_PCDR0, 12, 4);
+ clk[nfc_gate] = imx_clk_gate("nfc_gate", "nfc_div", CCM_PCCR0, 19);
+ clk[dma_gate] = imx_clk_gate("dma_gate", "ipg", CCM_PCCR0, 13);
+ clk[dma_hclk_gate] = imx_clk_gate("dma_hclk_gate", "hclk", CCM_PCCR0, 30);
+ clk[brom_gate] = imx_clk_gate("brom_gate", "hclk", CCM_PCCR0, 28);
+ clk[emma_gate] = imx_clk_gate("emma_gate", "ipg", CCM_PCCR0, 15);
+ clk[emma_hclk_gate] = imx_clk_gate("emma_hclk_gate", "hclk", CCM_PCCR0, 27);
+ clk[slcdc_gate] = imx_clk_gate("slcdc_gate", "ipg", CCM_PCCR0, 25);
+ clk[slcdc_hclk_gate] = imx_clk_gate("slcdc_hclk_gate", "hclk", CCM_PCCR0, 21);
+ clk[wdog_gate] = imx_clk_gate("wdog_gate", "ipg", CCM_PCCR1, 24);
+ clk[gpio_gate] = imx_clk_gate("gpio_gate", "ipg", CCM_PCCR0, 11);
+ clk[i2c_gate] = imx_clk_gate("i2c_gate", "ipg", CCM_PCCR0, 12);
+ clk[kpp_gate] = imx_clk_gate("kpp_gate", "ipg", CCM_PCCR1, 30);
+ clk[owire_gate] = imx_clk_gate("owire_gate", "ipg", CCM_PCCR1, 31);
+ clk[rtc_gate] = imx_clk_gate("rtc_gate", "ipg", CCM_PCCR1, 29);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX21 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ clk_register_clkdev(clk[per1], "per1", NULL);
+ clk_register_clkdev(clk[per2], "per2", NULL);
+ clk_register_clkdev(clk[per3], "per3", NULL);
+ clk_register_clkdev(clk[per4], "per4", NULL);
+ clk_register_clkdev(clk[per1], "per", "imx21-uart.0");
+ clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
+ clk_register_clkdev(clk[per1], "per", "imx21-uart.1");
+ clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
+ clk_register_clkdev(clk[per1], "per", "imx21-uart.2");
+ clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
+ clk_register_clkdev(clk[per1], "per", "imx21-uart.3");
+ clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
+ clk_register_clkdev(clk[gpt1_ipg_gate], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[per1], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[gpt2_ipg_gate], "ipg", "imx-gpt.1");
+ clk_register_clkdev(clk[per1], "per", "imx-gpt.1");
+ clk_register_clkdev(clk[gpt3_ipg_gate], "ipg", "imx-gpt.2");
+ clk_register_clkdev(clk[per1], "per", "imx-gpt.2");
+ clk_register_clkdev(clk[pwm_ipg_gate], "pwm", "mxc_pwm.0");
+ clk_register_clkdev(clk[per2], "per", "imx21-cspi.0");
+ clk_register_clkdev(clk[cspi1_ipg_gate], "ipg", "imx21-cspi.0");
+ clk_register_clkdev(clk[per2], "per", "imx21-cspi.1");
+ clk_register_clkdev(clk[cspi2_ipg_gate], "ipg", "imx21-cspi.1");
+ clk_register_clkdev(clk[per2], "per", "imx21-cspi.2");
+ clk_register_clkdev(clk[cspi3_ipg_gate], "ipg", "imx21-cspi.2");
+ clk_register_clkdev(clk[per3], "per", "imx-fb.0");
+ clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0");
+ clk_register_clkdev(clk[lcdc_hclk_gate], "ahb", "imx-fb.0");
+ clk_register_clkdev(clk[usb_gate], "per", "imx21-hcd.0");
+ clk_register_clkdev(clk[usb_hclk_gate], "ahb", "imx21-hcd.0");
+ clk_register_clkdev(clk[nfc_gate], NULL, "mxc_nand.0");
+ clk_register_clkdev(clk[dma_hclk_gate], "ahb", "imx-dma");
+ clk_register_clkdev(clk[dma_gate], "ipg", "imx-dma");
+ clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
+ clk_register_clkdev(clk[i2c_gate], NULL, "imx-i2c.0");
+ clk_register_clkdev(clk[kpp_gate], NULL, "mxc-keypad");
+ clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1.0");
+ clk_register_clkdev(clk[brom_gate], "brom", NULL);
+ clk_register_clkdev(clk[emma_gate], "emma", NULL);
+ clk_register_clkdev(clk[slcdc_gate], "slcdc", NULL);
+ clk_register_clkdev(clk[gpio_gate], "gpio", NULL);
+ clk_register_clkdev(clk[rtc_gate], "rtc", NULL);
+ clk_register_clkdev(clk[csi_hclk_gate], "csi", NULL);
+ clk_register_clkdev(clk[ssi1_gate], "ssi1", NULL);
+ clk_register_clkdev(clk[ssi2_gate], "ssi2", NULL);
+ clk_register_clkdev(clk[sdhc1_ipg_gate], "sdhc1", NULL);
+ clk_register_clkdev(clk[sdhc2_ipg_gate], "sdhc2", NULL);
+
+ mxc_timer_init(NULL, MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
+ MX21_INT_GPT1);
+ return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c
new file mode 100644
index 00000000000..d9833bb5fd6
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx25.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2009 by Sascha Hauer, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+#include <mach/mx25.h>
+#include "clk.h"
+
+#define CRM_BASE MX25_IO_ADDRESS(MX25_CRM_BASE_ADDR)
+
+#define CCM_MPCTL 0x00
+#define CCM_UPCTL 0x04
+#define CCM_CCTL 0x08
+#define CCM_CGCR0 0x0C
+#define CCM_CGCR1 0x10
+#define CCM_CGCR2 0x14
+#define CCM_PCDR0 0x18
+#define CCM_PCDR1 0x1C
+#define CCM_PCDR2 0x20
+#define CCM_PCDR3 0x24
+#define CCM_RCSR 0x28
+#define CCM_CRDR 0x2C
+#define CCM_DCVR0 0x30
+#define CCM_DCVR1 0x34
+#define CCM_DCVR2 0x38
+#define CCM_DCVR3 0x3c
+#define CCM_LTR0 0x40
+#define CCM_LTR1 0x44
+#define CCM_LTR2 0x48
+#define CCM_LTR3 0x4c
+#define CCM_MCR 0x64
+
+#define ccm(x) (CRM_BASE + (x))
+
+static const char *cpu_sel_clks[] = { "mpll", "mpll_cpu_3_4", };
+static const char *per_sel_clks[] = { "ahb", "upll", };
+
+enum mx25_clks {
+ dummy, osc, mpll, upll, mpll_cpu_3_4, cpu_sel, cpu, ahb, usb_div, ipg,
+ per0_sel, per1_sel, per2_sel, per3_sel, per4_sel, per5_sel, per6_sel,
+ per7_sel, per8_sel, per9_sel, per10_sel, per11_sel, per12_sel,
+ per13_sel, per14_sel, per15_sel, per0, per1, per2, per3, per4, per5,
+ per6, per7, per8, per9, per10, per11, per12, per13, per14, per15,
+ csi_ipg_per, esdhc1_ipg_per, esdhc2_ipg_per, gpt_ipg_per, i2c_ipg_per,
+ lcdc_ipg_per, nfc_ipg_per, ssi1_ipg_per, ssi2_ipg_per, uart_ipg_per,
+ csi_ahb, esdhc1_ahb, esdhc2_ahb, fec_ahb, lcdc_ahb, sdma_ahb,
+ usbotg_ahb, can1_ipg, can2_ipg, csi_ipg, cspi1_ipg, cspi2_ipg,
+ cspi3_ipg, dryice_ipg, esdhc1_ipg, esdhc2_ipg, fec_ipg, iim_ipg,
+ kpp_ipg, lcdc_ipg, pwm1_ipg, pwm2_ipg, pwm3_ipg, pwm4_ipg, sdma_ipg,
+ ssi1_ipg, ssi2_ipg, tsc_ipg, uart1_ipg, uart2_ipg, uart3_ipg,
+ uart4_ipg, uart5_ipg, wdt_ipg, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx25_clocks_init(void)
+{
+ int i;
+
+ clk[dummy] = imx_clk_fixed("dummy", 0);
+ clk[osc] = imx_clk_fixed("osc", 24000000);
+ clk[mpll] = imx_clk_pllv1("mpll", "osc", ccm(CCM_MPCTL));
+ clk[upll] = imx_clk_pllv1("upll", "osc", ccm(CCM_UPCTL));
+ clk[mpll_cpu_3_4] = imx_clk_fixed_factor("mpll_cpu_3_4", "mpll", 3, 4);
+ clk[cpu_sel] = imx_clk_mux("cpu_sel", ccm(CCM_CCTL), 14, 1, cpu_sel_clks, ARRAY_SIZE(cpu_sel_clks));
+ clk[cpu] = imx_clk_divider("cpu", "cpu_sel", ccm(CCM_CCTL), 30, 2);
+ clk[ahb] = imx_clk_divider("ahb", "cpu", ccm(CCM_CCTL), 28, 2);
+ clk[usb_div] = imx_clk_divider("usb_div", "upll", ccm(CCM_CCTL), 16, 6);
+ clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
+ clk[per0_sel] = imx_clk_mux("per0_sel", ccm(CCM_MCR), 0, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per1_sel] = imx_clk_mux("per1_sel", ccm(CCM_MCR), 1, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per2_sel] = imx_clk_mux("per2_sel", ccm(CCM_MCR), 2, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per3_sel] = imx_clk_mux("per3_sel", ccm(CCM_MCR), 3, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per4_sel] = imx_clk_mux("per4_sel", ccm(CCM_MCR), 4, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per5_sel] = imx_clk_mux("per5_sel", ccm(CCM_MCR), 5, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per6_sel] = imx_clk_mux("per6_sel", ccm(CCM_MCR), 6, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per7_sel] = imx_clk_mux("per7_sel", ccm(CCM_MCR), 7, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per8_sel] = imx_clk_mux("per8_sel", ccm(CCM_MCR), 8, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per9_sel] = imx_clk_mux("per9_sel", ccm(CCM_MCR), 9, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per10_sel] = imx_clk_mux("per10_sel", ccm(CCM_MCR), 10, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per11_sel] = imx_clk_mux("per11_sel", ccm(CCM_MCR), 11, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per12_sel] = imx_clk_mux("per12_sel", ccm(CCM_MCR), 12, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per13_sel] = imx_clk_mux("per13_sel", ccm(CCM_MCR), 13, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per14_sel] = imx_clk_mux("per14_sel", ccm(CCM_MCR), 14, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per15_sel] = imx_clk_mux("per15_sel", ccm(CCM_MCR), 15, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
+ clk[per0] = imx_clk_divider("per0", "per0_sel", ccm(CCM_PCDR0), 0, 6);
+ clk[per1] = imx_clk_divider("per1", "per1_sel", ccm(CCM_PCDR0), 8, 6);
+ clk[per2] = imx_clk_divider("per2", "per2_sel", ccm(CCM_PCDR0), 16, 6);
+ clk[per3] = imx_clk_divider("per3", "per3_sel", ccm(CCM_PCDR0), 24, 6);
+ clk[per4] = imx_clk_divider("per4", "per4_sel", ccm(CCM_PCDR1), 0, 6);
+ clk[per5] = imx_clk_divider("per5", "per5_sel", ccm(CCM_PCDR1), 8, 6);
+ clk[per6] = imx_clk_divider("per6", "per6_sel", ccm(CCM_PCDR1), 16, 6);
+ clk[per7] = imx_clk_divider("per7", "per7_sel", ccm(CCM_PCDR1), 24, 6);
+ clk[per8] = imx_clk_divider("per8", "per8_sel", ccm(CCM_PCDR2), 0, 6);
+ clk[per9] = imx_clk_divider("per9", "per9_sel", ccm(CCM_PCDR2), 8, 6);
+ clk[per10] = imx_clk_divider("per10", "per10_sel", ccm(CCM_PCDR2), 16, 6);
+ clk[per11] = imx_clk_divider("per11", "per11_sel", ccm(CCM_PCDR2), 24, 6);
+ clk[per12] = imx_clk_divider("per12", "per12_sel", ccm(CCM_PCDR3), 0, 6);
+ clk[per13] = imx_clk_divider("per13", "per13_sel", ccm(CCM_PCDR3), 8, 6);
+ clk[per14] = imx_clk_divider("per14", "per14_sel", ccm(CCM_PCDR3), 16, 6);
+ clk[per15] = imx_clk_divider("per15", "per15_sel", ccm(CCM_PCDR3), 24, 6);
+ clk[csi_ipg_per] = imx_clk_gate("csi_ipg_per", "per0", ccm(CCM_CGCR0), 0);
+ clk[esdhc1_ipg_per] = imx_clk_gate("esdhc1_ipg_per", "per3", ccm(CCM_CGCR0), 3);
+ clk[esdhc2_ipg_per] = imx_clk_gate("esdhc2_ipg_per", "per4", ccm(CCM_CGCR0), 4);
+ clk[gpt_ipg_per] = imx_clk_gate("gpt_ipg_per", "per5", ccm(CCM_CGCR0), 5);
+ clk[i2c_ipg_per] = imx_clk_gate("i2c_ipg_per", "per6", ccm(CCM_CGCR0), 6);
+ clk[lcdc_ipg_per] = imx_clk_gate("lcdc_ipg_per", "per8", ccm(CCM_CGCR0), 7);
+ clk[nfc_ipg_per] = imx_clk_gate("nfc_ipg_per", "ipg_per", ccm(CCM_CGCR0), 8);
+ clk[ssi1_ipg_per] = imx_clk_gate("ssi1_ipg_per", "per13", ccm(CCM_CGCR0), 13);
+ clk[ssi2_ipg_per] = imx_clk_gate("ssi2_ipg_per", "per14", ccm(CCM_CGCR0), 14);
+ clk[uart_ipg_per] = imx_clk_gate("uart_ipg_per", "per15", ccm(CCM_CGCR0), 15);
+ clk[csi_ahb] = imx_clk_gate("csi_ahb", "ahb", ccm(CCM_CGCR0), 18);
+ clk[esdhc1_ahb] = imx_clk_gate("esdhc1_ahb", "ahb", ccm(CCM_CGCR0), 21);
+ clk[esdhc2_ahb] = imx_clk_gate("esdhc2_ahb", "ahb", ccm(CCM_CGCR0), 22);
+ clk[fec_ahb] = imx_clk_gate("fec_ahb", "ahb", ccm(CCM_CGCR0), 23);
+ clk[lcdc_ahb] = imx_clk_gate("lcdc_ahb", "ahb", ccm(CCM_CGCR0), 24);
+ clk[sdma_ahb] = imx_clk_gate("sdma_ahb", "ahb", ccm(CCM_CGCR0), 26);
+ clk[usbotg_ahb] = imx_clk_gate("usbotg_ahb", "ahb", ccm(CCM_CGCR0), 28);
+ clk[can1_ipg] = imx_clk_gate("can1_ipg", "ipg", ccm(CCM_CGCR1), 2);
+ clk[can2_ipg] = imx_clk_gate("can2_ipg", "ipg", ccm(CCM_CGCR1), 3);
+ clk[csi_ipg] = imx_clk_gate("csi_ipg", "ipg", ccm(CCM_CGCR1), 4);
+ clk[cspi1_ipg] = imx_clk_gate("cspi1_ipg", "ipg", ccm(CCM_CGCR1), 5);
+ clk[cspi2_ipg] = imx_clk_gate("cspi2_ipg", "ipg", ccm(CCM_CGCR1), 6);
+ clk[cspi3_ipg] = imx_clk_gate("cspi3_ipg", "ipg", ccm(CCM_CGCR1), 7);
+ clk[dryice_ipg] = imx_clk_gate("dryice_ipg", "ipg", ccm(CCM_CGCR1), 8);
+ clk[esdhc1_ipg] = imx_clk_gate("esdhc1_ipg", "ipg", ccm(CCM_CGCR1), 13);
+ clk[esdhc2_ipg] = imx_clk_gate("esdhc2_ipg", "ipg", ccm(CCM_CGCR1), 14);
+ clk[fec_ipg] = imx_clk_gate("fec_ipg", "ipg", ccm(CCM_CGCR1), 15);
+ clk[iim_ipg] = imx_clk_gate("iim_ipg", "ipg", ccm(CCM_CGCR1), 26);
+ clk[kpp_ipg] = imx_clk_gate("kpp_ipg", "ipg", ccm(CCM_CGCR1), 28);
+ clk[lcdc_ipg] = imx_clk_gate("lcdc_ipg", "ipg", ccm(CCM_CGCR1), 29);
+ clk[pwm1_ipg] = imx_clk_gate("pwm1_ipg", "ipg", ccm(CCM_CGCR1), 31);
+ clk[pwm2_ipg] = imx_clk_gate("pwm2_ipg", "ipg", ccm(CCM_CGCR2), 0);
+ clk[pwm3_ipg] = imx_clk_gate("pwm3_ipg", "ipg", ccm(CCM_CGCR2), 1);
+ clk[pwm4_ipg] = imx_clk_gate("pwm4_ipg", "ipg", ccm(CCM_CGCR2), 2);
+ clk[sdma_ipg] = imx_clk_gate("sdma_ipg", "ipg", ccm(CCM_CGCR2), 6);
+ clk[ssi1_ipg] = imx_clk_gate("ssi1_ipg", "ipg", ccm(CCM_CGCR2), 11);
+ clk[ssi2_ipg] = imx_clk_gate("ssi2_ipg", "ipg", ccm(CCM_CGCR2), 12);
+ clk[tsc_ipg] = imx_clk_gate("tsc_ipg", "ipg", ccm(CCM_CGCR2), 13);
+ clk[uart1_ipg] = imx_clk_gate("uart1_ipg", "ipg", ccm(CCM_CGCR2), 14);
+ clk[uart2_ipg] = imx_clk_gate("uart2_ipg", "ipg", ccm(CCM_CGCR2), 15);
+ clk[uart3_ipg] = imx_clk_gate("uart3_ipg", "ipg", ccm(CCM_CGCR2), 16);
+ clk[uart4_ipg] = imx_clk_gate("uart4_ipg", "ipg", ccm(CCM_CGCR2), 17);
+ clk[uart5_ipg] = imx_clk_gate("uart5_ipg", "ipg", ccm(CCM_CGCR2), 18);
+ clk[wdt_ipg] = imx_clk_gate("wdt_ipg", "ipg", ccm(CCM_CGCR2), 19);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX25 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ /* i.mx25 has the i.mx21 type uart */
+ clk_register_clkdev(clk[uart1_ipg], "ipg", "imx21-uart.0");
+ clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.0");
+ clk_register_clkdev(clk[uart2_ipg], "ipg", "imx21-uart.1");
+ clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.1");
+ clk_register_clkdev(clk[uart3_ipg], "ipg", "imx21-uart.2");
+ clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.2");
+ clk_register_clkdev(clk[uart4_ipg], "ipg", "imx21-uart.3");
+ clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.3");
+ clk_register_clkdev(clk[uart5_ipg], "ipg", "imx21-uart.4");
+ clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.4");
+ clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
+ clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.0");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
+ clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.1");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
+ clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.2");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
+ clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usbotg_ahb], "ahb", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
+ clk_register_clkdev(clk[nfc_ipg_per], NULL, "mxc_nand.0");
+ /* i.mx25 has the i.mx35 type cspi */
+ clk_register_clkdev(clk[cspi1_ipg], NULL, "imx35-cspi.0");
+ clk_register_clkdev(clk[cspi2_ipg], NULL, "imx35-cspi.1");
+ clk_register_clkdev(clk[cspi3_ipg], NULL, "imx35-cspi.2");
+ clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.0");
+ clk_register_clkdev(clk[per10], "per", "mxc_pwm.0");
+ clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.1");
+ clk_register_clkdev(clk[per10], "per", "mxc_pwm.1");
+ clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.2");
+ clk_register_clkdev(clk[per10], "per", "mxc_pwm.2");
+ clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.3");
+ clk_register_clkdev(clk[per10], "per", "mxc_pwm.3");
+ clk_register_clkdev(clk[kpp_ipg], NULL, "imx-keypad");
+ clk_register_clkdev(clk[tsc_ipg], NULL, "mx25-adc");
+ clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.0");
+ clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.1");
+ clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.2");
+ clk_register_clkdev(clk[fec_ipg], "ipg", "imx25-fec.0");
+ clk_register_clkdev(clk[fec_ahb], "ahb", "imx25-fec.0");
+ clk_register_clkdev(clk[dryice_ipg], NULL, "imxdi_rtc.0");
+ clk_register_clkdev(clk[lcdc_ipg_per], "per", "imx-fb.0");
+ clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
+ clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
+ clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
+ clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0");
+ clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
+ clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
+ clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
+ clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
+ clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
+ clk_register_clkdev(clk[esdhc2_ipg_per], "per", "sdhci-esdhc-imx25.1");
+ clk_register_clkdev(clk[esdhc2_ipg], "ipg", "sdhci-esdhc-imx25.1");
+ clk_register_clkdev(clk[esdhc2_ahb], "ahb", "sdhci-esdhc-imx25.1");
+ clk_register_clkdev(clk[csi_ipg_per], "per", "mx2-camera.0");
+ clk_register_clkdev(clk[csi_ipg], "ipg", "mx2-camera.0");
+ clk_register_clkdev(clk[csi_ahb], "ahb", "mx2-camera.0");
+ clk_register_clkdev(clk[dummy], "audmux", NULL);
+ clk_register_clkdev(clk[can1_ipg], NULL, "flexcan.0");
+ clk_register_clkdev(clk[can2_ipg], NULL, "flexcan.1");
+ /* i.mx25 has the i.mx35 type sdma */
+ clk_register_clkdev(clk[sdma_ipg], "ipg", "imx35-sdma");
+ clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
+ clk_register_clkdev(clk[iim_ipg], "iim", NULL);
+
+ mxc_timer_init(NULL, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
+ return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
new file mode 100644
index 00000000000..50a7ebd8d1b
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx27.c
@@ -0,0 +1,290 @@
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include "clk.h"
+
+#define IO_ADDR_CCM(off) (MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR + (off)))
+
+/* Register offsets */
+#define CCM_CSCR IO_ADDR_CCM(0x0)
+#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
+#define CCM_MPCTL1 IO_ADDR_CCM(0x8)
+#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
+#define CCM_SPCTL1 IO_ADDR_CCM(0x10)
+#define CCM_OSC26MCTL IO_ADDR_CCM(0x14)
+#define CCM_PCDR0 IO_ADDR_CCM(0x18)
+#define CCM_PCDR1 IO_ADDR_CCM(0x1c)
+#define CCM_PCCR0 IO_ADDR_CCM(0x20)
+#define CCM_PCCR1 IO_ADDR_CCM(0x24)
+#define CCM_CCSR IO_ADDR_CCM(0x28)
+#define CCM_PMCTL IO_ADDR_CCM(0x2c)
+#define CCM_PMCOUNT IO_ADDR_CCM(0x30)
+#define CCM_WKGDCTL IO_ADDR_CCM(0x34)
+
+#define CCM_CSCR_UPDATE_DIS (1 << 31)
+#define CCM_CSCR_SSI2 (1 << 23)
+#define CCM_CSCR_SSI1 (1 << 22)
+#define CCM_CSCR_VPU (1 << 21)
+#define CCM_CSCR_MSHC (1 << 20)
+#define CCM_CSCR_SPLLRES (1 << 19)
+#define CCM_CSCR_MPLLRES (1 << 18)
+#define CCM_CSCR_SP (1 << 17)
+#define CCM_CSCR_MCU (1 << 16)
+#define CCM_CSCR_OSC26MDIV (1 << 4)
+#define CCM_CSCR_OSC26M (1 << 3)
+#define CCM_CSCR_FPM (1 << 2)
+#define CCM_CSCR_SPEN (1 << 1)
+#define CCM_CSCR_MPEN (1 << 0)
+
+/* i.MX27 TO 2+ */
+#define CCM_CSCR_ARM_SRC (1 << 15)
+
+#define CCM_SPCTL1_LF (1 << 15)
+#define CCM_SPCTL1_BRMO (1 << 6)
+
+static const char *vpu_sel_clks[] = { "spll", "mpll_main2", };
+static const char *cpu_sel_clks[] = { "mpll_main2", "mpll", };
+static const char *clko_sel_clks[] = {
+ "ckil", "prem", "ckih", "ckih",
+ "ckih", "mpll", "spll", "cpu_div",
+ "ahb", "ipg", "per1_div", "per2_div",
+ "per3_div", "per4_div", "ssi1_div", "ssi2_div",
+ "nfc_div", "mshc_div", "vpu_div", "60m",
+ "32k", "usb_div", "dptc",
+};
+
+static const char *ssi_sel_clks[] = { "spll", "mpll", };
+
+enum mx27_clks {
+ dummy, ckih, ckil, mpll, spll, mpll_main2, ahb, ipg, nfc_div, per1_div,
+ per2_div, per3_div, per4_div, vpu_sel, vpu_div, usb_div, cpu_sel,
+ clko_sel, cpu_div, clko_div, ssi1_sel, ssi2_sel, ssi1_div, ssi2_div,
+ clko_en, ssi2_ipg_gate, ssi1_ipg_gate, slcdc_ipg_gate, sdhc3_ipg_gate,
+ sdhc2_ipg_gate, sdhc1_ipg_gate, scc_ipg_gate, sahara_ipg_gate,
+ rtc_ipg_gate, pwm_ipg_gate, owire_ipg_gate, lcdc_ipg_gate,
+ kpp_ipg_gate, iim_ipg_gate, i2c2_ipg_gate, i2c1_ipg_gate,
+ gpt6_ipg_gate, gpt5_ipg_gate, gpt4_ipg_gate, gpt3_ipg_gate,
+ gpt2_ipg_gate, gpt1_ipg_gate, gpio_ipg_gate, fec_ipg_gate,
+ emma_ipg_gate, dma_ipg_gate, cspi3_ipg_gate, cspi2_ipg_gate,
+ cspi1_ipg_gate, nfc_baud_gate, ssi2_baud_gate, ssi1_baud_gate,
+ vpu_baud_gate, per4_gate, per3_gate, per2_gate, per1_gate,
+ usb_ahb_gate, slcdc_ahb_gate, sahara_ahb_gate, lcdc_ahb_gate,
+ vpu_ahb_gate, fec_ahb_gate, emma_ahb_gate, emi_ahb_gate, dma_ahb_gate,
+ csi_ahb_gate, brom_ahb_gate, ata_ahb_gate, wdog_ipg_gate, usb_ipg_gate,
+ uart6_ipg_gate, uart5_ipg_gate, uart4_ipg_gate, uart3_ipg_gate,
+ uart2_ipg_gate, uart1_ipg_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx27_clocks_init(unsigned long fref)
+{
+ int i;
+
+ clk[dummy] = imx_clk_fixed("dummy", 0);
+ clk[ckih] = imx_clk_fixed("ckih", fref);
+ clk[ckil] = imx_clk_fixed("ckil", 32768);
+ clk[mpll] = imx_clk_pllv1("mpll", "ckih", CCM_MPCTL0);
+ clk[spll] = imx_clk_pllv1("spll", "ckih", CCM_SPCTL0);
+ clk[mpll_main2] = imx_clk_fixed_factor("mpll_main2", "mpll", 2, 3);
+
+ if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
+ clk[ahb] = imx_clk_divider("ahb", "mpll_main2", CCM_CSCR, 8, 2);
+ clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
+ } else {
+ clk[ahb] = imx_clk_divider("ahb", "mpll_main2", CCM_CSCR, 9, 4);
+ clk[ipg] = imx_clk_divider("ipg", "ahb", CCM_CSCR, 8, 1);
+ }
+
+ clk[nfc_div] = imx_clk_divider("nfc_div", "ahb", CCM_PCDR0, 6, 4);
+ clk[per1_div] = imx_clk_divider("per1_div", "mpll_main2", CCM_PCDR1, 0, 6);
+ clk[per2_div] = imx_clk_divider("per2_div", "mpll_main2", CCM_PCDR1, 8, 6);
+ clk[per3_div] = imx_clk_divider("per3_div", "mpll_main2", CCM_PCDR1, 16, 6);
+ clk[per4_div] = imx_clk_divider("per4_div", "mpll_main2", CCM_PCDR1, 24, 6);
+ clk[vpu_sel] = imx_clk_mux("vpu_sel", CCM_CSCR, 21, 1, vpu_sel_clks, ARRAY_SIZE(vpu_sel_clks));
+ clk[vpu_div] = imx_clk_divider("vpu_div", "vpu_sel", CCM_PCDR0, 10, 3);
+ clk[usb_div] = imx_clk_divider("usb_div", "spll", CCM_CSCR, 28, 3);
+ clk[cpu_sel] = imx_clk_mux("cpu_sel", CCM_CSCR, 15, 1, cpu_sel_clks, ARRAY_SIZE(cpu_sel_clks));
+ clk[clko_sel] = imx_clk_mux("clko_sel", CCM_CCSR, 0, 5, clko_sel_clks, ARRAY_SIZE(clko_sel_clks));
+ if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
+ clk[cpu_div] = imx_clk_divider("cpu_div", "cpu_sel", CCM_CSCR, 12, 2);
+ else
+ clk[cpu_div] = imx_clk_divider("cpu_div", "cpu_sel", CCM_CSCR, 13, 3);
+ clk[clko_div] = imx_clk_divider("clko_div", "clko_sel", CCM_PCDR0, 22, 3);
+ clk[ssi1_sel] = imx_clk_mux("ssi1_sel", CCM_CSCR, 22, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
+ clk[ssi2_sel] = imx_clk_mux("ssi2_sel", CCM_CSCR, 23, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
+ clk[ssi1_div] = imx_clk_divider("ssi1_div", "ssi1_sel", CCM_PCDR0, 16, 6);
+ clk[ssi2_div] = imx_clk_divider("ssi2_div", "ssi2_sel", CCM_PCDR0, 26, 3);
+ clk[clko_en] = imx_clk_gate("clko_en", "clko_div", CCM_PCCR0, 0);
+ clk[ssi2_ipg_gate] = imx_clk_gate("ssi2_ipg_gate", "ipg", CCM_PCCR0, 0);
+ clk[ssi1_ipg_gate] = imx_clk_gate("ssi1_ipg_gate", "ipg", CCM_PCCR0, 1);
+ clk[slcdc_ipg_gate] = imx_clk_gate("slcdc_ipg_gate", "ipg", CCM_PCCR0, 2);
+ clk[sdhc3_ipg_gate] = imx_clk_gate("sdhc3_ipg_gate", "ipg", CCM_PCCR0, 3);
+ clk[sdhc2_ipg_gate] = imx_clk_gate("sdhc2_ipg_gate", "ipg", CCM_PCCR0, 4);
+ clk[sdhc1_ipg_gate] = imx_clk_gate("sdhc1_ipg_gate", "ipg", CCM_PCCR0, 5);
+ clk[scc_ipg_gate] = imx_clk_gate("scc_ipg_gate", "ipg", CCM_PCCR0, 6);
+ clk[sahara_ipg_gate] = imx_clk_gate("sahara_ipg_gate", "ipg", CCM_PCCR0, 7);
+ clk[rtc_ipg_gate] = imx_clk_gate("rtc_ipg_gate", "ipg", CCM_PCCR0, 9);
+ clk[pwm_ipg_gate] = imx_clk_gate("pwm_ipg_gate", "ipg", CCM_PCCR0, 11);
+ clk[owire_ipg_gate] = imx_clk_gate("owire_ipg_gate", "ipg", CCM_PCCR0, 12);
+ clk[lcdc_ipg_gate] = imx_clk_gate("lcdc_ipg_gate", "ipg", CCM_PCCR0, 14);
+ clk[kpp_ipg_gate] = imx_clk_gate("kpp_ipg_gate", "ipg", CCM_PCCR0, 15);
+ clk[iim_ipg_gate] = imx_clk_gate("iim_ipg_gate", "ipg", CCM_PCCR0, 16);
+ clk[i2c2_ipg_gate] = imx_clk_gate("i2c2_ipg_gate", "ipg", CCM_PCCR0, 17);
+ clk[i2c1_ipg_gate] = imx_clk_gate("i2c1_ipg_gate", "ipg", CCM_PCCR0, 18);
+ clk[gpt6_ipg_gate] = imx_clk_gate("gpt6_ipg_gate", "ipg", CCM_PCCR0, 19);
+ clk[gpt5_ipg_gate] = imx_clk_gate("gpt5_ipg_gate", "ipg", CCM_PCCR0, 20);
+ clk[gpt4_ipg_gate] = imx_clk_gate("gpt4_ipg_gate", "ipg", CCM_PCCR0, 21);
+ clk[gpt3_ipg_gate] = imx_clk_gate("gpt3_ipg_gate", "ipg", CCM_PCCR0, 22);
+ clk[gpt2_ipg_gate] = imx_clk_gate("gpt2_ipg_gate", "ipg", CCM_PCCR0, 23);
+ clk[gpt1_ipg_gate] = imx_clk_gate("gpt1_ipg_gate", "ipg", CCM_PCCR0, 24);
+ clk[gpio_ipg_gate] = imx_clk_gate("gpio_ipg_gate", "ipg", CCM_PCCR0, 25);
+ clk[fec_ipg_gate] = imx_clk_gate("fec_ipg_gate", "ipg", CCM_PCCR0, 26);
+ clk[emma_ipg_gate] = imx_clk_gate("emma_ipg_gate", "ipg", CCM_PCCR0, 27);
+ clk[dma_ipg_gate] = imx_clk_gate("dma_ipg_gate", "ipg", CCM_PCCR0, 28);
+ clk[cspi3_ipg_gate] = imx_clk_gate("cspi3_ipg_gate", "ipg", CCM_PCCR0, 29);
+ clk[cspi2_ipg_gate] = imx_clk_gate("cspi2_ipg_gate", "ipg", CCM_PCCR0, 30);
+ clk[cspi1_ipg_gate] = imx_clk_gate("cspi1_ipg_gate", "ipg", CCM_PCCR0, 31);
+ clk[nfc_baud_gate] = imx_clk_gate("nfc_baud_gate", "nfc_div", CCM_PCCR1, 3);
+ clk[ssi2_baud_gate] = imx_clk_gate("ssi2_baud_gate", "ssi2_div", CCM_PCCR1, 4);
+ clk[ssi1_baud_gate] = imx_clk_gate("ssi1_baud_gate", "ssi1_div", CCM_PCCR1, 5);
+ clk[vpu_baud_gate] = imx_clk_gate("vpu_baud_gate", "vpu_div", CCM_PCCR1, 6);
+ clk[per4_gate] = imx_clk_gate("per4_gate", "per4_div", CCM_PCCR1, 7);
+ clk[per3_gate] = imx_clk_gate("per3_gate", "per3_div", CCM_PCCR1, 8);
+ clk[per2_gate] = imx_clk_gate("per2_gate", "per2_div", CCM_PCCR1, 9);
+ clk[per1_gate] = imx_clk_gate("per1_gate", "per1_div", CCM_PCCR1, 10);
+ clk[usb_ahb_gate] = imx_clk_gate("usb_ahb_gate", "ahb", CCM_PCCR1, 11);
+ clk[slcdc_ahb_gate] = imx_clk_gate("slcdc_ahb_gate", "ahb", CCM_PCCR1, 12);
+ clk[sahara_ahb_gate] = imx_clk_gate("sahara_ahb_gate", "ahb", CCM_PCCR1, 13);
+ clk[lcdc_ahb_gate] = imx_clk_gate("lcdc_ahb_gate", "ahb", CCM_PCCR1, 15);
+ clk[vpu_ahb_gate] = imx_clk_gate("vpu_ahb_gate", "ahb", CCM_PCCR1, 16);
+ clk[fec_ahb_gate] = imx_clk_gate("fec_ahb_gate", "ahb", CCM_PCCR1, 17);
+ clk[emma_ahb_gate] = imx_clk_gate("emma_ahb_gate", "ahb", CCM_PCCR1, 18);
+ clk[emi_ahb_gate] = imx_clk_gate("emi_ahb_gate", "ahb", CCM_PCCR1, 19);
+ clk[dma_ahb_gate] = imx_clk_gate("dma_ahb_gate", "ahb", CCM_PCCR1, 20);
+ clk[csi_ahb_gate] = imx_clk_gate("csi_ahb_gate", "ahb", CCM_PCCR1, 21);
+ clk[brom_ahb_gate] = imx_clk_gate("brom_ahb_gate", "ahb", CCM_PCCR1, 22);
+ clk[ata_ahb_gate] = imx_clk_gate("ata_ahb_gate", "ahb", CCM_PCCR1, 23);
+ clk[wdog_ipg_gate] = imx_clk_gate("wdog_ipg_gate", "ipg", CCM_PCCR1, 24);
+ clk[usb_ipg_gate] = imx_clk_gate("usb_ipg_gate", "ipg", CCM_PCCR1, 25);
+ clk[uart6_ipg_gate] = imx_clk_gate("uart6_ipg_gate", "ipg", CCM_PCCR1, 26);
+ clk[uart5_ipg_gate] = imx_clk_gate("uart5_ipg_gate", "ipg", CCM_PCCR1, 27);
+ clk[uart4_ipg_gate] = imx_clk_gate("uart4_ipg_gate", "ipg", CCM_PCCR1, 28);
+ clk[uart3_ipg_gate] = imx_clk_gate("uart3_ipg_gate", "ipg", CCM_PCCR1, 29);
+ clk[uart2_ipg_gate] = imx_clk_gate("uart2_ipg_gate", "ipg", CCM_PCCR1, 30);
+ clk[uart1_ipg_gate] = imx_clk_gate("uart1_ipg_gate", "ipg", CCM_PCCR1, 31);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX27 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
+ clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.0");
+ clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
+ clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.1");
+ clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
+ clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.2");
+ clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
+ clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.3");
+ clk_register_clkdev(clk[uart5_ipg_gate], "ipg", "imx21-uart.4");
+ clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.4");
+ clk_register_clkdev(clk[uart6_ipg_gate], "ipg", "imx21-uart.5");
+ clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.5");
+ clk_register_clkdev(clk[gpt1_ipg_gate], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[gpt2_ipg_gate], "ipg", "imx-gpt.1");
+ clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.1");
+ clk_register_clkdev(clk[gpt3_ipg_gate], "ipg", "imx-gpt.2");
+ clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.2");
+ clk_register_clkdev(clk[gpt4_ipg_gate], "ipg", "imx-gpt.3");
+ clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.3");
+ clk_register_clkdev(clk[gpt5_ipg_gate], "ipg", "imx-gpt.4");
+ clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.4");
+ clk_register_clkdev(clk[gpt6_ipg_gate], "ipg", "imx-gpt.5");
+ clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.5");
+ clk_register_clkdev(clk[pwm_ipg_gate], NULL, "mxc_pwm.0");
+ clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.0");
+ clk_register_clkdev(clk[sdhc1_ipg_gate], "ipg", "mxc-mmc.0");
+ clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.1");
+ clk_register_clkdev(clk[sdhc2_ipg_gate], "ipg", "mxc-mmc.1");
+ clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.2");
+ clk_register_clkdev(clk[sdhc2_ipg_gate], "ipg", "mxc-mmc.2");
+ clk_register_clkdev(clk[cspi1_ipg_gate], NULL, "imx27-cspi.0");
+ clk_register_clkdev(clk[cspi2_ipg_gate], NULL, "imx27-cspi.1");
+ clk_register_clkdev(clk[cspi3_ipg_gate], NULL, "imx27-cspi.2");
+ clk_register_clkdev(clk[per3_gate], "per", "imx-fb.0");
+ clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0");
+ clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx-fb.0");
+ clk_register_clkdev(clk[csi_ahb_gate], NULL, "mx2-camera.0");
+ clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
+ clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.0");
+ clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.0");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
+ clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.1");
+ clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.1");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
+ clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.2");
+ clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.2");
+ clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
+ clk_register_clkdev(clk[nfc_baud_gate], NULL, "mxc_nand.0");
+ clk_register_clkdev(clk[vpu_baud_gate], "per", "imx-vpu");
+ clk_register_clkdev(clk[vpu_ahb_gate], "ahb", "imx-vpu");
+ clk_register_clkdev(clk[dma_ahb_gate], "ahb", "imx-dma");
+ clk_register_clkdev(clk[dma_ipg_gate], "ipg", "imx-dma");
+ clk_register_clkdev(clk[fec_ipg_gate], "ipg", "imx27-fec.0");
+ clk_register_clkdev(clk[fec_ahb_gate], "ahb", "imx27-fec.0");
+ clk_register_clkdev(clk[wdog_ipg_gate], NULL, "imx2-wdt.0");
+ clk_register_clkdev(clk[i2c1_ipg_gate], NULL, "imx-i2c.0");
+ clk_register_clkdev(clk[i2c2_ipg_gate], NULL, "imx-i2c.1");
+ clk_register_clkdev(clk[owire_ipg_gate], NULL, "mxc_w1.0");
+ clk_register_clkdev(clk[kpp_ipg_gate], NULL, "imx-keypad");
+ clk_register_clkdev(clk[emma_ahb_gate], "ahb", "imx-emma");
+ clk_register_clkdev(clk[emma_ipg_gate], "ipg", "imx-emma");
+ clk_register_clkdev(clk[iim_ipg_gate], "iim", NULL);
+ clk_register_clkdev(clk[gpio_ipg_gate], "gpio", NULL);
+ clk_register_clkdev(clk[brom_ahb_gate], "brom", NULL);
+ clk_register_clkdev(clk[ata_ahb_gate], "ata", NULL);
+ clk_register_clkdev(clk[rtc_ipg_gate], "rtc", NULL);
+ clk_register_clkdev(clk[scc_ipg_gate], "scc", NULL);
+ clk_register_clkdev(clk[cpu_div], "cpu", NULL);
+ clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL);
+ clk_register_clkdev(clk[ssi1_baud_gate], "bitrate" , "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_baud_gate], "bitrate" , "imx-ssi.1");
+
+ mxc_timer_init(NULL, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR),
+ MX27_INT_GPT1);
+
+ clk_prepare_enable(clk[emi_ahb_gate]);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+int __init mx27_clocks_init_dt(void)
+{
+ struct device_node *np;
+ u32 fref = 26000000; /* default */
+
+ for_each_compatible_node(np, NULL, "fixed-clock") {
+ if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
+ continue;
+
+ if (!of_property_read_u32(np, "clock-frequency", &fref))
+ break;
+ }
+
+ return mx27_clocks_init(fref);
+}
+#endif
diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c
new file mode 100644
index 00000000000..a854b9cae5e
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx31.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2012 Sascha Hauer <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/mx31.h>
+#include <mach/common.h>
+
+#include "clk.h"
+#include "crmregs-imx3.h"
+
+static const char *mcu_main_sel[] = { "spll", "mpll", };
+static const char *per_sel[] = { "per_div", "ipg", };
+static const char *csi_sel[] = { "upll", "spll", };
+static const char *fir_sel[] = { "mcu_main", "upll", "spll" };
+
+enum mx31_clks {
+ ckih, ckil, mpll, spll, upll, mcu_main, hsp, ahb, nfc, ipg, per_div,
+ per, csi, fir, csi_div, usb_div_pre, usb_div_post, fir_div_pre,
+ fir_div_post, sdhc1_gate, sdhc2_gate, gpt_gate, epit1_gate, epit2_gate,
+ iim_gate, ata_gate, sdma_gate, cspi3_gate, rng_gate, uart1_gate,
+ uart2_gate, ssi1_gate, i2c1_gate, i2c2_gate, i2c3_gate, hantro_gate,
+ mstick1_gate, mstick2_gate, csi_gate, rtc_gate, wdog_gate, pwm_gate,
+ sim_gate, ect_gate, usb_gate, kpp_gate, ipu_gate, uart3_gate,
+ uart4_gate, uart5_gate, owire_gate, ssi2_gate, cspi1_gate, cspi2_gate,
+ gacc_gate, emi_gate, rtic_gate, firi_gate, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx31_clocks_init(unsigned long fref)
+{
+ void __iomem *base = MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR);
+ int i;
+
+ clk[ckih] = imx_clk_fixed("ckih", fref);
+ clk[ckil] = imx_clk_fixed("ckil", 32768);
+ clk[mpll] = imx_clk_pllv1("mpll", "ckih", base + MXC_CCM_MPCTL);
+ clk[spll] = imx_clk_pllv1("spll", "ckih", base + MXC_CCM_SRPCTL);
+ clk[upll] = imx_clk_pllv1("upll", "ckih", base + MXC_CCM_UPCTL);
+ clk[mcu_main] = imx_clk_mux("mcu_main", base + MXC_CCM_PMCR0, 31, 1, mcu_main_sel, ARRAY_SIZE(mcu_main_sel));
+ clk[hsp] = imx_clk_divider("hsp", "mcu_main", base + MXC_CCM_PDR0, 11, 3);
+ clk[ahb] = imx_clk_divider("ahb", "mcu_main", base + MXC_CCM_PDR0, 3, 3);
+ clk[nfc] = imx_clk_divider("nfc", "ahb", base + MXC_CCM_PDR0, 8, 3);
+ clk[ipg] = imx_clk_divider("ipg", "ahb", base + MXC_CCM_PDR0, 6, 2);
+ clk[per_div] = imx_clk_divider("per_div", "upll", base + MXC_CCM_PDR0, 16, 5);
+ clk[per] = imx_clk_mux("per", base + MXC_CCM_CCMR, 24, 1, per_sel, ARRAY_SIZE(per_sel));
+ clk[csi] = imx_clk_mux("csi_sel", base + MXC_CCM_CCMR, 25, 1, csi_sel, ARRAY_SIZE(csi_sel));
+ clk[fir] = imx_clk_mux("fir_sel", base + MXC_CCM_CCMR, 11, 2, fir_sel, ARRAY_SIZE(fir_sel));
+ clk[csi_div] = imx_clk_divider("csi_div", "csi_sel", base + MXC_CCM_PDR0, 23, 9);
+ clk[usb_div_pre] = imx_clk_divider("usb_div_pre", "upll", base + MXC_CCM_PDR1, 30, 2);
+ clk[usb_div_post] = imx_clk_divider("usb_div_post", "usb_div_pre", base + MXC_CCM_PDR1, 27, 3);
+ clk[fir_div_pre] = imx_clk_divider("fir_div_pre", "fir_sel", base + MXC_CCM_PDR1, 24, 3);
+ clk[fir_div_post] = imx_clk_divider("fir_div_post", "fir_div_pre", base + MXC_CCM_PDR1, 23, 6);
+ clk[sdhc1_gate] = imx_clk_gate2("sdhc1_gate", "per", base + MXC_CCM_CGR0, 0);
+ clk[sdhc2_gate] = imx_clk_gate2("sdhc2_gate", "per", base + MXC_CCM_CGR0, 2);
+ clk[gpt_gate] = imx_clk_gate2("gpt_gate", "per", base + MXC_CCM_CGR0, 4);
+ clk[epit1_gate] = imx_clk_gate2("epit1_gate", "per", base + MXC_CCM_CGR0, 6);
+ clk[epit2_gate] = imx_clk_gate2("epit2_gate", "per", base + MXC_CCM_CGR0, 8);
+ clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", base + MXC_CCM_CGR0, 10);
+ clk[ata_gate] = imx_clk_gate2("ata_gate", "ipg", base + MXC_CCM_CGR0, 12);
+ clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ahb", base + MXC_CCM_CGR0, 14);
+ clk[cspi3_gate] = imx_clk_gate2("cspi3_gate", "ipg", base + MXC_CCM_CGR0, 16);
+ clk[rng_gate] = imx_clk_gate2("rng_gate", "ipg", base + MXC_CCM_CGR0, 18);
+ clk[uart1_gate] = imx_clk_gate2("uart1_gate", "per", base + MXC_CCM_CGR0, 20);
+ clk[uart2_gate] = imx_clk_gate2("uart2_gate", "per", base + MXC_CCM_CGR0, 22);
+ clk[ssi1_gate] = imx_clk_gate2("ssi1_gate", "spll", base + MXC_CCM_CGR0, 24);
+ clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "per", base + MXC_CCM_CGR0, 26);
+ clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "per", base + MXC_CCM_CGR0, 28);
+ clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per", base + MXC_CCM_CGR0, 30);
+ clk[hantro_gate] = imx_clk_gate2("hantro_gate", "per", base + MXC_CCM_CGR1, 0);
+ clk[mstick1_gate] = imx_clk_gate2("mstick1_gate", "per", base + MXC_CCM_CGR1, 2);
+ clk[mstick2_gate] = imx_clk_gate2("mstick2_gate", "per", base + MXC_CCM_CGR1, 4);
+ clk[csi_gate] = imx_clk_gate2("csi_gate", "csi_div", base + MXC_CCM_CGR1, 6);
+ clk[rtc_gate] = imx_clk_gate2("rtc_gate", "ipg", base + MXC_CCM_CGR1, 8);
+ clk[wdog_gate] = imx_clk_gate2("wdog_gate", "ipg", base + MXC_CCM_CGR1, 10);
+ clk[pwm_gate] = imx_clk_gate2("pwm_gate", "per", base + MXC_CCM_CGR1, 12);
+ clk[sim_gate] = imx_clk_gate2("sim_gate", "per", base + MXC_CCM_CGR1, 14);
+ clk[ect_gate] = imx_clk_gate2("ect_gate", "per", base + MXC_CCM_CGR1, 16);
+ clk[usb_gate] = imx_clk_gate2("usb_gate", "ahb", base + MXC_CCM_CGR1, 18);
+ clk[kpp_gate] = imx_clk_gate2("kpp_gate", "ipg", base + MXC_CCM_CGR1, 20);
+ clk[ipu_gate] = imx_clk_gate2("ipu_gate", "hsp", base + MXC_CCM_CGR1, 22);
+ clk[uart3_gate] = imx_clk_gate2("uart3_gate", "per", base + MXC_CCM_CGR1, 24);
+ clk[uart4_gate] = imx_clk_gate2("uart4_gate", "per", base + MXC_CCM_CGR1, 26);
+ clk[uart5_gate] = imx_clk_gate2("uart5_gate", "per", base + MXC_CCM_CGR1, 28);
+ clk[owire_gate] = imx_clk_gate2("owire_gate", "per", base + MXC_CCM_CGR1, 30);
+ clk[ssi2_gate] = imx_clk_gate2("ssi2_gate", "spll", base + MXC_CCM_CGR2, 0);
+ clk[cspi1_gate] = imx_clk_gate2("cspi1_gate", "ipg", base + MXC_CCM_CGR2, 2);
+ clk[cspi2_gate] = imx_clk_gate2("cspi2_gate", "ipg", base + MXC_CCM_CGR2, 4);
+ clk[gacc_gate] = imx_clk_gate2("gacc_gate", "per", base + MXC_CCM_CGR2, 6);
+ clk[emi_gate] = imx_clk_gate2("emi_gate", "ahb", base + MXC_CCM_CGR2, 8);
+ clk[rtic_gate] = imx_clk_gate2("rtic_gate", "ahb", base + MXC_CCM_CGR2, 10);
+ clk[firi_gate] = imx_clk_gate2("firi_gate", "upll", base+MXC_CCM_CGR2, 12);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("imx31 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[cspi1_gate], NULL, "imx31-cspi.0");
+ clk_register_clkdev(clk[cspi2_gate], NULL, "imx31-cspi.1");
+ clk_register_clkdev(clk[cspi3_gate], NULL, "imx31-cspi.2");
+ clk_register_clkdev(clk[pwm_gate], "pwm", NULL);
+ clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
+ clk_register_clkdev(clk[rtc_gate], "rtc", NULL);
+ clk_register_clkdev(clk[epit1_gate], "epit", NULL);
+ clk_register_clkdev(clk[epit2_gate], "epit", NULL);
+ clk_register_clkdev(clk[nfc], NULL, "mxc_nand.0");
+ clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
+ clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
+ clk_register_clkdev(clk[kpp_gate], "kpp", NULL);
+ clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.0");
+ clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.0");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
+ clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.1");
+ clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.1");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
+ clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.2");
+ clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.2");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
+ clk_register_clkdev(clk[usb_div_post], "per", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usb_gate], "ahb", "fsl-usb2-udc");
+ clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
+ clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
+ /* i.mx31 has the i.mx21 type uart */
+ clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
+ clk_register_clkdev(clk[uart2_gate], "per", "imx21-uart.1");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1");
+ clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2");
+ clk_register_clkdev(clk[uart4_gate], "per", "imx21-uart.3");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.3");
+ clk_register_clkdev(clk[uart5_gate], "per", "imx21-uart.4");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.4");
+ clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
+ clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
+ clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
+ clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1.0");
+ clk_register_clkdev(clk[sdhc1_gate], NULL, "mxc-mmc.0");
+ clk_register_clkdev(clk[sdhc2_gate], NULL, "mxc-mmc.1");
+ clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
+ clk_register_clkdev(clk[firi_gate], "firi", NULL);
+ clk_register_clkdev(clk[ata_gate], NULL, "pata_imx");
+ clk_register_clkdev(clk[rtic_gate], "rtic", NULL);
+ clk_register_clkdev(clk[rng_gate], "rng", NULL);
+ clk_register_clkdev(clk[sdma_gate], NULL, "imx31-sdma");
+ clk_register_clkdev(clk[iim_gate], "iim", NULL);
+
+ clk_set_parent(clk[csi], clk[upll]);
+ clk_prepare_enable(clk[emi_gate]);
+ clk_prepare_enable(clk[iim_gate]);
+ mx31_revision();
+ clk_disable_unprepare(clk[iim_gate]);
+
+ mxc_timer_init(NULL, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR),
+ MX31_INT_GPT);
+
+ return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
new file mode 100644
index 00000000000..a9e60bf7dd7
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2012 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+
+#include "crmregs-imx3.h"
+#include "clk.h"
+
+struct arm_ahb_div {
+ unsigned char arm, ahb, sel;
+};
+
+static struct arm_ahb_div clk_consumer[] = {
+ { .arm = 1, .ahb = 4, .sel = 0},
+ { .arm = 1, .ahb = 3, .sel = 1},
+ { .arm = 2, .ahb = 2, .sel = 0},
+ { .arm = 0, .ahb = 0, .sel = 0},
+ { .arm = 0, .ahb = 0, .sel = 0},
+ { .arm = 0, .ahb = 0, .sel = 0},
+ { .arm = 4, .ahb = 1, .sel = 0},
+ { .arm = 1, .ahb = 5, .sel = 0},
+ { .arm = 1, .ahb = 8, .sel = 0},
+ { .arm = 1, .ahb = 6, .sel = 1},
+ { .arm = 2, .ahb = 4, .sel = 0},
+ { .arm = 0, .ahb = 0, .sel = 0},
+ { .arm = 0, .ahb = 0, .sel = 0},
+ { .arm = 0, .ahb = 0, .sel = 0},
+ { .arm = 4, .ahb = 2, .sel = 0},
+ { .arm = 0, .ahb = 0, .sel = 0},
+};
+
+static char hsp_div_532[] = { 4, 8, 3, 0 };
+static char hsp_div_400[] = { 3, 6, 3, 0 };
+
+static const char *std_sel[] = {"ppll", "arm"};
+static const char *ipg_per_sel[] = {"ahb_per_div", "arm_per_div"};
+
+enum mx35_clks {
+ ckih, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg,
+ arm_per_div, ahb_per_div, ipg_per, uart_sel, uart_div, esdhc_sel,
+ esdhc1_div, esdhc2_div, esdhc3_div, spdif_sel, spdif_div_pre,
+ spdif_div_post, ssi_sel, ssi1_div_pre, ssi1_div_post, ssi2_div_pre,
+ ssi2_div_post, usb_sel, usb_div, nfc_div, asrc_gate, pata_gate,
+ audmux_gate, can1_gate, can2_gate, cspi1_gate, cspi2_gate, ect_gate,
+ edio_gate, emi_gate, epit1_gate, epit2_gate, esai_gate, esdhc1_gate,
+ esdhc2_gate, esdhc3_gate, fec_gate, gpio1_gate, gpio2_gate, gpio3_gate,
+ gpt_gate, i2c1_gate, i2c2_gate, i2c3_gate, iomuxc_gate, ipu_gate,
+ kpp_gate, mlb_gate, mshc_gate, owire_gate, pwm_gate, rngc_gate,
+ rtc_gate, rtic_gate, scc_gate, sdma_gate, spba_gate, spdif_gate,
+ ssi1_gate, ssi2_gate, uart1_gate, uart2_gate, uart3_gate, usbotg_gate,
+ wdog_gate, max_gate, admux_gate, csi_gate, iim_gate, gpu2d_gate,
+ clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx35_clocks_init()
+{
+ void __iomem *base = MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR);
+ u32 pdr0, consumer_sel, hsp_sel;
+ struct arm_ahb_div *aad;
+ unsigned char *hsp_div;
+ int i;
+
+ pdr0 = __raw_readl(base + MXC_CCM_PDR0);
+ consumer_sel = (pdr0 >> 16) & 0xf;
+ aad = &clk_consumer[consumer_sel];
+ if (!aad->arm) {
+ pr_err("i.MX35 clk: illegal consumer mux selection 0x%x\n", consumer_sel);
+ /*
+ * We are basically stuck. Continue with a default entry and hope we
+ * get far enough to actually show the above message
+ */
+ aad = &clk_consumer[0];
+ }
+
+ clk[ckih] = imx_clk_fixed("ckih", 24000000);
+ clk[mpll] = imx_clk_pllv1("mpll", "ckih", base + MX35_CCM_MPCTL);
+ clk[ppll] = imx_clk_pllv1("ppll", "ckih", base + MX35_CCM_PPCTL);
+
+ clk[mpll] = imx_clk_fixed_factor("mpll_075", "mpll", 3, 4);
+
+ if (aad->sel)
+ clk[arm] = imx_clk_fixed_factor("arm", "mpll_075", 1, aad->arm);
+ else
+ clk[arm] = imx_clk_fixed_factor("arm", "mpll", 1, aad->arm);
+
+ if (clk_get_rate(clk[arm]) > 400000000)
+ hsp_div = hsp_div_532;
+ else
+ hsp_div = hsp_div_400;
+
+ hsp_sel = (pdr0 >> 20) & 0x3;
+ if (!hsp_div[hsp_sel]) {
+ pr_err("i.MX35 clk: illegal hsp clk selection 0x%x\n", hsp_sel);
+ hsp_sel = 0;
+ }
+
+ clk[hsp] = imx_clk_fixed_factor("hsp", "arm", 1, hsp_div[hsp_sel]);
+
+ clk[ahb] = imx_clk_fixed_factor("ahb", "arm", 1, aad->ahb);
+ clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
+
+ clk[arm_per_div] = imx_clk_divider("arm_per_div", "arm", base + MX35_CCM_PDR4, 16, 6);
+ clk[ahb_per_div] = imx_clk_divider("ahb_per_div", "ahb", base + MXC_CCM_PDR0, 12, 3);
+ clk[ipg_per] = imx_clk_mux("ipg_per", base + MXC_CCM_PDR0, 26, 1, ipg_per_sel, ARRAY_SIZE(ipg_per_sel));
+
+ clk[uart_sel] = imx_clk_mux("uart_sel", base + MX35_CCM_PDR3, 14, 1, std_sel, ARRAY_SIZE(std_sel));
+ clk[uart_div] = imx_clk_divider("uart_div", "uart_sel", base + MX35_CCM_PDR4, 10, 6);
+
+ clk[esdhc_sel] = imx_clk_mux("esdhc_sel", base + MX35_CCM_PDR4, 9, 1, std_sel, ARRAY_SIZE(std_sel));
+ clk[esdhc1_div] = imx_clk_divider("esdhc1_div", "esdhc_sel", base + MX35_CCM_PDR3, 0, 6);
+ clk[esdhc2_div] = imx_clk_divider("esdhc2_div", "esdhc_sel", base + MX35_CCM_PDR3, 8, 6);
+ clk[esdhc3_div] = imx_clk_divider("esdhc3_div", "esdhc_sel", base + MX35_CCM_PDR3, 16, 6);
+
+ clk[spdif_sel] = imx_clk_mux("spdif_sel", base + MX35_CCM_PDR3, 22, 1, std_sel, ARRAY_SIZE(std_sel));
+ clk[spdif_div_pre] = imx_clk_divider("spdif_div_pre", "spdif_sel", base + MX35_CCM_PDR3, 29, 3); /* divide by 1 not allowed */
+ clk[spdif_div_post] = imx_clk_divider("spdif_div_post", "spdif_div_pre", base + MX35_CCM_PDR3, 23, 6);
+
+ clk[ssi_sel] = imx_clk_mux("ssi_sel", base + MX35_CCM_PDR2, 6, 1, std_sel, ARRAY_SIZE(std_sel));
+ clk[ssi1_div_pre] = imx_clk_divider("ssi1_div_pre", "ssi_sel", base + MX35_CCM_PDR2, 24, 3);
+ clk[ssi1_div_post] = imx_clk_divider("ssi1_div_post", "ssi1_div_pre", base + MX35_CCM_PDR2, 0, 6);
+ clk[ssi2_div_pre] = imx_clk_divider("ssi2_div_pre", "ssi_sel", base + MX35_CCM_PDR2, 27, 3);
+ clk[ssi2_div_post] = imx_clk_divider("ssi2_div_post", "ssi2_div_pre", base + MX35_CCM_PDR2, 8, 6);
+
+ clk[usb_sel] = imx_clk_mux("usb_sel", base + MX35_CCM_PDR4, 9, 1, std_sel, ARRAY_SIZE(std_sel));
+ clk[usb_div] = imx_clk_divider("usb_div", "usb_sel", base + MX35_CCM_PDR4, 22, 6);
+
+ clk[nfc_div] = imx_clk_divider("nfc_div", "ahb", base + MX35_CCM_PDR4, 28, 4);
+
+ clk[asrc_gate] = imx_clk_gate2("asrc_gate", "ipg", base + MX35_CCM_CGR0, 0);
+ clk[pata_gate] = imx_clk_gate2("pata_gate", "ipg", base + MX35_CCM_CGR0, 2);
+ clk[audmux_gate] = imx_clk_gate2("audmux_gate", "ipg", base + MX35_CCM_CGR0, 4);
+ clk[can1_gate] = imx_clk_gate2("can1_gate", "ipg", base + MX35_CCM_CGR0, 6);
+ clk[can2_gate] = imx_clk_gate2("can2_gate", "ipg", base + MX35_CCM_CGR0, 8);
+ clk[cspi1_gate] = imx_clk_gate2("cspi1_gate", "ipg", base + MX35_CCM_CGR0, 10);
+ clk[cspi2_gate] = imx_clk_gate2("cspi2_gate", "ipg", base + MX35_CCM_CGR0, 12);
+ clk[ect_gate] = imx_clk_gate2("ect_gate", "ipg", base + MX35_CCM_CGR0, 14);
+ clk[edio_gate] = imx_clk_gate2("edio_gate", "ipg", base + MX35_CCM_CGR0, 16);
+ clk[emi_gate] = imx_clk_gate2("emi_gate", "ipg", base + MX35_CCM_CGR0, 18);
+ clk[epit1_gate] = imx_clk_gate2("epit1_gate", "ipg", base + MX35_CCM_CGR0, 20);
+ clk[epit2_gate] = imx_clk_gate2("epit2_gate", "ipg", base + MX35_CCM_CGR0, 22);
+ clk[esai_gate] = imx_clk_gate2("esai_gate", "ipg", base + MX35_CCM_CGR0, 24);
+ clk[esdhc1_gate] = imx_clk_gate2("esdhc1_gate", "esdhc1_div", base + MX35_CCM_CGR0, 26);
+ clk[esdhc2_gate] = imx_clk_gate2("esdhc2_gate", "esdhc2_div", base + MX35_CCM_CGR0, 28);
+ clk[esdhc3_gate] = imx_clk_gate2("esdhc3_gate", "esdhc3_div", base + MX35_CCM_CGR0, 30);
+
+ clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", base + MX35_CCM_CGR1, 0);
+ clk[gpio1_gate] = imx_clk_gate2("gpio1_gate", "ipg", base + MX35_CCM_CGR1, 2);
+ clk[gpio2_gate] = imx_clk_gate2("gpio2_gate", "ipg", base + MX35_CCM_CGR1, 4);
+ clk[gpio3_gate] = imx_clk_gate2("gpio3_gate", "ipg", base + MX35_CCM_CGR1, 6);
+ clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", base + MX35_CCM_CGR1, 8);
+ clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "ipg_per", base + MX35_CCM_CGR1, 10);
+ clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "ipg_per", base + MX35_CCM_CGR1, 12);
+ clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "ipg_per", base + MX35_CCM_CGR1, 14);
+ clk[iomuxc_gate] = imx_clk_gate2("iomuxc_gate", "ipg", base + MX35_CCM_CGR1, 16);
+ clk[ipu_gate] = imx_clk_gate2("ipu_gate", "hsp", base + MX35_CCM_CGR1, 18);
+ clk[kpp_gate] = imx_clk_gate2("kpp_gate", "ipg", base + MX35_CCM_CGR1, 20);
+ clk[mlb_gate] = imx_clk_gate2("mlb_gate", "ahb", base + MX35_CCM_CGR1, 22);
+ clk[mshc_gate] = imx_clk_gate2("mshc_gate", "dummy", base + MX35_CCM_CGR1, 24);
+ clk[owire_gate] = imx_clk_gate2("owire_gate", "ipg_per", base + MX35_CCM_CGR1, 26);
+ clk[pwm_gate] = imx_clk_gate2("pwm_gate", "ipg_per", base + MX35_CCM_CGR1, 28);
+ clk[rngc_gate] = imx_clk_gate2("rngc_gate", "ipg", base + MX35_CCM_CGR1, 30);
+
+ clk[rtc_gate] = imx_clk_gate2("rtc_gate", "ipg", base + MX35_CCM_CGR2, 0);
+ clk[rtic_gate] = imx_clk_gate2("rtic_gate", "ahb", base + MX35_CCM_CGR2, 2);
+ clk[scc_gate] = imx_clk_gate2("scc_gate", "ipg", base + MX35_CCM_CGR2, 4);
+ clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ahb", base + MX35_CCM_CGR2, 6);
+ clk[spba_gate] = imx_clk_gate2("spba_gate", "ipg", base + MX35_CCM_CGR2, 8);
+ clk[spdif_gate] = imx_clk_gate2("spdif_gate", "spdif_div_post", base + MX35_CCM_CGR2, 10);
+ clk[ssi1_gate] = imx_clk_gate2("ssi1_gate", "ssi1_div_post", base + MX35_CCM_CGR2, 12);
+ clk[ssi2_gate] = imx_clk_gate2("ssi2_gate", "ssi2_div_post", base + MX35_CCM_CGR2, 14);
+ clk[uart1_gate] = imx_clk_gate2("uart1_gate", "uart_div", base + MX35_CCM_CGR2, 16);
+ clk[uart2_gate] = imx_clk_gate2("uart2_gate", "uart_div", base + MX35_CCM_CGR2, 18);
+ clk[uart3_gate] = imx_clk_gate2("uart3_gate", "uart_div", base + MX35_CCM_CGR2, 20);
+ clk[usbotg_gate] = imx_clk_gate2("usbotg_gate", "ahb", base + MX35_CCM_CGR2, 22);
+ clk[wdog_gate] = imx_clk_gate2("wdog_gate", "ipg", base + MX35_CCM_CGR2, 24);
+ clk[max_gate] = imx_clk_gate2("max_gate", "dummy", base + MX35_CCM_CGR2, 26);
+ clk[admux_gate] = imx_clk_gate2("admux_gate", "ipg", base + MX35_CCM_CGR2, 30);
+
+ clk[csi_gate] = imx_clk_gate2("csi_gate", "ipg", base + MX35_CCM_CGR3, 0);
+ clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", base + MX35_CCM_CGR3, 2);
+ clk[gpu2d_gate] = imx_clk_gate2("gpu2d_gate", "ahb", base + MX35_CCM_CGR3, 4);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX35 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+
+ clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
+ clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
+ clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
+ clk_register_clkdev(clk[cspi1_gate], "per", "imx35-cspi.0");
+ clk_register_clkdev(clk[cspi1_gate], "ipg", "imx35-cspi.0");
+ clk_register_clkdev(clk[cspi2_gate], "per", "imx35-cspi.1");
+ clk_register_clkdev(clk[cspi2_gate], "ipg", "imx35-cspi.1");
+ clk_register_clkdev(clk[epit1_gate], NULL, "imx-epit.0");
+ clk_register_clkdev(clk[epit2_gate], NULL, "imx-epit.1");
+ clk_register_clkdev(clk[esdhc1_gate], "per", "sdhci-esdhc-imx35.0");
+ clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.0");
+ clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.0");
+ clk_register_clkdev(clk[esdhc2_gate], "per", "sdhci-esdhc-imx35.1");
+ clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.1");
+ clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.1");
+ clk_register_clkdev(clk[esdhc3_gate], "per", "sdhci-esdhc-imx35.2");
+ clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.2");
+ clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.2");
+ /* i.mx35 has the i.mx27 type fec */
+ clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0");
+ clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
+ clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
+ clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
+ clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
+ clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
+ clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
+ clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
+ clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0");
+ clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0");
+ clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
+ clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
+ /* i.mx35 has the i.mx21 type uart */
+ clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
+ clk_register_clkdev(clk[uart2_gate], "per", "imx21-uart.1");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1");
+ clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2");
+ clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
+ clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.0");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
+ clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.1");
+ clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
+ clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
+ clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.2");
+ clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
+ clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usbotg_gate], "ahb", "fsl-usb2-udc");
+ clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
+ clk_register_clkdev(clk[nfc_div], NULL, "mxc_nand.0");
+
+ clk_prepare_enable(clk[spba_gate]);
+ clk_prepare_enable(clk[gpio1_gate]);
+ clk_prepare_enable(clk[gpio2_gate]);
+ clk_prepare_enable(clk[gpio3_gate]);
+ clk_prepare_enable(clk[iim_gate]);
+ clk_prepare_enable(clk[emi_gate]);
+
+ imx_print_silicon_rev("i.MX35", mx35_revision());
+
+#ifdef CONFIG_MXC_USE_EPIT
+ epit_timer_init(&epit1_clk,
+ MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
+#else
+ mxc_timer_init(NULL, MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR),
+ MX35_INT_GPT);
+#endif
+
+ return 0;
+}
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
new file mode 100644
index 00000000000..fcd94f3b0f0
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/err.h>
+
+#include <mach/hardware.h>
+#include <mach/common.h>
+
+#include "crm-regs-imx5.h"
+#include "clk.h"
+
+/* Low-power Audio Playback Mode clock */
+static const char *lp_apm_sel[] = { "osc", };
+
+/* This is used multiple times */
+static const char *standard_pll_sel[] = { "pll1_sw", "pll2_sw", "pll3_sw", "lp_apm", };
+static const char *periph_apm_sel[] = { "pll1_sw", "pll3_sw", "lp_apm", };
+static const char *main_bus_sel[] = { "pll2_sw", "periph_apm", };
+static const char *per_lp_apm_sel[] = { "main_bus", "lp_apm", };
+static const char *per_root_sel[] = { "per_podf", "ipg", };
+static const char *esdhc_c_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
+static const char *esdhc_d_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
+static const char *ssi_apm_sels[] = { "ckih1", "lp_amp", "ckih2", };
+static const char *ssi_clk_sels[] = { "pll1_sw", "pll2_sw", "pll3_sw", "ssi_apm", };
+static const char *ssi3_clk_sels[] = { "ssi1_root_gate", "ssi2_root_gate", };
+static const char *ssi_ext1_com_sels[] = { "ssi_ext1_podf", "ssi1_root_gate", };
+static const char *ssi_ext2_com_sels[] = { "ssi_ext2_podf", "ssi2_root_gate", };
+static const char *emi_slow_sel[] = { "main_bus", "ahb", };
+static const char *usb_phy_sel_str[] = { "osc", "usb_phy_podf", };
+static const char *mx51_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "tve_di", };
+static const char *mx53_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "di_pll4_podf", "dummy", "ldb_di0", };
+static const char *mx53_ldb_di0_sel[] = { "pll3_sw", "pll4_sw", };
+static const char *mx51_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", };
+static const char *mx53_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", "ldb_di1", };
+static const char *mx53_ldb_di1_sel[] = { "pll3_sw", "pll4_sw", };
+static const char *mx51_tve_ext_sel[] = { "osc", "ckih1", };
+static const char *mx53_tve_ext_sel[] = { "pll4_sw", "ckih1", };
+static const char *tve_sel[] = { "tve_pred", "tve_ext_sel", };
+static const char *ipu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
+static const char *vpu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
+
+enum imx5_clks {
+ dummy, ckil, osc, ckih1, ckih2, ahb, ipg, axi_a, axi_b, uart_pred,
+ uart_root, esdhc_a_pred, esdhc_b_pred, esdhc_c_s, esdhc_d_s,
+ emi_sel, emi_slow_podf, nfc_podf, ecspi_pred, ecspi_podf, usboh3_pred,
+ usboh3_podf, usb_phy_pred, usb_phy_podf, cpu_podf, di_pred, tve_di,
+ tve_s, uart1_ipg_gate, uart1_per_gate, uart2_ipg_gate,
+ uart2_per_gate, uart3_ipg_gate, uart3_per_gate, i2c1_gate, i2c2_gate,
+ gpt_ipg_gate, pwm1_ipg_gate, pwm1_hf_gate, pwm2_ipg_gate, pwm2_hf_gate,
+ gpt_gate, fec_gate, usboh3_per_gate, esdhc1_ipg_gate, esdhc2_ipg_gate,
+ esdhc3_ipg_gate, esdhc4_ipg_gate, ssi1_ipg_gate, ssi2_ipg_gate,
+ ssi3_ipg_gate, ecspi1_ipg_gate, ecspi1_per_gate, ecspi2_ipg_gate,
+ ecspi2_per_gate, cspi_ipg_gate, sdma_gate, emi_slow_gate, ipu_s,
+ ipu_gate, nfc_gate, ipu_di1_gate, vpu_s, vpu_gate,
+ vpu_reference_gate, uart4_ipg_gate, uart4_per_gate, uart5_ipg_gate,
+ uart5_per_gate, tve_gate, tve_pred, esdhc1_per_gate, esdhc2_per_gate,
+ esdhc3_per_gate, esdhc4_per_gate, usb_phy_gate, hsi2c_gate,
+ mipi_hsc1_gate, mipi_hsc2_gate, mipi_esc_gate, mipi_hsp_gate,
+ ldb_di1_div_3_5, ldb_di1_div, ldb_di0_div_3_5, ldb_di0_div,
+ ldb_di1_gate, can2_serial_gate, can2_ipg_gate, i2c3_gate, lp_apm,
+ periph_apm, main_bus, ahb_max, aips_tz1, aips_tz2, tmax1, tmax2,
+ tmax3, spba, uart_sel, esdhc_a_sel, esdhc_b_sel, esdhc_a_podf,
+ esdhc_b_podf, ecspi_sel, usboh3_sel, usb_phy_sel, iim_gate,
+ usboh3_gate, emi_fast_gate, ipu_di0_gate,gpc_dvfs, pll1_sw, pll2_sw,
+ pll3_sw, ipu_di0_sel, ipu_di1_sel, tve_ext_sel, mx51_mipi, pll4_sw,
+ ldb_di1_sel, di_pll4_podf, ldb_di0_sel, ldb_di0_gate, usb_phy1_gate,
+ usb_phy2_gate, per_lp_apm, per_pred1, per_pred2, per_podf, per_root,
+ ssi_apm, ssi1_root_sel, ssi2_root_sel, ssi3_root_sel, ssi_ext1_sel,
+ ssi_ext2_sel, ssi_ext1_com_sel, ssi_ext2_com_sel, ssi1_root_pred,
+ ssi1_root_podf, ssi2_root_pred, ssi2_root_podf, ssi_ext1_pred,
+ ssi_ext1_podf, ssi_ext2_pred, ssi_ext2_podf, ssi1_root_gate,
+ ssi2_root_gate, ssi3_root_gate, ssi_ext1_gate, ssi_ext2_gate,
+ clk_max
+};
+
+static struct clk *clk[clk_max];
+
+static void __init mx5_clocks_common_init(unsigned long rate_ckil,
+ unsigned long rate_osc, unsigned long rate_ckih1,
+ unsigned long rate_ckih2)
+{
+ int i;
+
+ clk[dummy] = imx_clk_fixed("dummy", 0);
+ clk[ckil] = imx_clk_fixed("ckil", rate_ckil);
+ clk[osc] = imx_clk_fixed("osc", rate_osc);
+ clk[ckih1] = imx_clk_fixed("ckih1", rate_ckih1);
+ clk[ckih2] = imx_clk_fixed("ckih2", rate_ckih2);
+
+ clk[lp_apm] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 9, 1,
+ lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
+ clk[periph_apm] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2,
+ periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
+ clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
+ main_bus_sel, ARRAY_SIZE(main_bus_sel));
+ clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCDR, 1, 1,
+ per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
+ clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
+ clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3);
+ clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3);
+ clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCDR, 1, 0,
+ per_root_sel, ARRAY_SIZE(per_root_sel));
+ clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
+ clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
+ clk[aips_tz1] = imx_clk_gate2("aips_tz1", "ahb", MXC_CCM_CCGR0, 24);
+ clk[aips_tz2] = imx_clk_gate2("aips_tz2", "ahb", MXC_CCM_CCGR0, 26);
+ clk[tmax1] = imx_clk_gate2("tmax1", "ahb", MXC_CCM_CCGR1, 0);
+ clk[tmax2] = imx_clk_gate2("tmax2", "ahb", MXC_CCM_CCGR1, 2);
+ clk[tmax3] = imx_clk_gate2("tmax3", "ahb", MXC_CCM_CCGR1, 4);
+ clk[spba] = imx_clk_gate2("spba", "ipg", MXC_CCM_CCGR5, 0);
+ clk[ipg] = imx_clk_divider("ipg", "ahb", MXC_CCM_CBCDR, 8, 2);
+ clk[axi_a] = imx_clk_divider("axi_a", "main_bus", MXC_CCM_CBCDR, 16, 3);
+ clk[axi_b] = imx_clk_divider("axi_b", "main_bus", MXC_CCM_CBCDR, 19, 3);
+ clk[uart_sel] = imx_clk_mux("uart_sel", MXC_CCM_CSCMR1, 24, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[uart_pred] = imx_clk_divider("uart_pred", "uart_sel", MXC_CCM_CSCDR1, 3, 3);
+ clk[uart_root] = imx_clk_divider("uart_root", "uart_pred", MXC_CCM_CSCDR1, 0, 3);
+
+ clk[esdhc_a_sel] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[esdhc_b_sel] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[esdhc_a_pred] = imx_clk_divider("esdhc_a_pred", "esdhc_a_sel", MXC_CCM_CSCDR1, 16, 3);
+ clk[esdhc_a_podf] = imx_clk_divider("esdhc_a_podf", "esdhc_a_pred", MXC_CCM_CSCDR1, 11, 3);
+ clk[esdhc_b_pred] = imx_clk_divider("esdhc_b_pred", "esdhc_b_sel", MXC_CCM_CSCDR1, 22, 3);
+ clk[esdhc_b_podf] = imx_clk_divider("esdhc_b_podf", "esdhc_b_pred", MXC_CCM_CSCDR1, 19, 3);
+ clk[esdhc_c_s] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
+ clk[esdhc_d_s] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
+
+ clk[emi_sel] = imx_clk_mux("emi_sel", MXC_CCM_CBCDR, 26, 1,
+ emi_slow_sel, ARRAY_SIZE(emi_slow_sel));
+ clk[emi_slow_podf] = imx_clk_divider("emi_slow_podf", "emi_sel", MXC_CCM_CBCDR, 22, 3);
+ clk[nfc_podf] = imx_clk_divider("nfc_podf", "emi_slow_podf", MXC_CCM_CBCDR, 13, 3);
+ clk[ecspi_sel] = imx_clk_mux("ecspi_sel", MXC_CCM_CSCMR1, 4, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[ecspi_pred] = imx_clk_divider("ecspi_pred", "ecspi_sel", MXC_CCM_CSCDR2, 25, 3);
+ clk[ecspi_podf] = imx_clk_divider("ecspi_podf", "ecspi_pred", MXC_CCM_CSCDR2, 19, 6);
+ clk[usboh3_sel] = imx_clk_mux("usboh3_sel", MXC_CCM_CSCMR1, 22, 2,
+ standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
+ clk[usboh3_pred] = imx_clk_divider("usboh3_pred", "usboh3_sel", MXC_CCM_CSCDR1, 8, 3);
+ clk[usboh3_podf] = imx_clk_divider("usboh3_podf", "usboh3_pred", MXC_CCM_CSCDR1, 6, 2);
+ clk[usb_phy_pred] = imx_clk_divider("usb_phy_pred", "pll3_sw", MXC_CCM_CDCDR, 3, 3);
+ clk[usb_phy_podf] = imx_clk_divider("usb_phy_podf", "usb_phy_pred", MXC_CCM_CDCDR, 0, 3);
+ clk[usb_phy_sel] = imx_clk_mux("usb_phy_sel", MXC_CCM_CSCMR1, 26, 1,
+ usb_phy_sel_str, ARRAY_SIZE(usb_phy_sel_str));
+ clk[cpu_podf] = imx_clk_divider("cpu_podf", "pll1_sw", MXC_CCM_CACRR, 0, 3);
+ clk[di_pred] = imx_clk_divider("di_pred", "pll3_sw", MXC_CCM_CDCDR, 6, 3);
+ clk[tve_di] = imx_clk_fixed("tve_di", 65000000); /* FIXME */
+ clk[tve_s] = imx_clk_mux("tve_sel", MXC_CCM_CSCMR1, 7, 1, tve_sel, ARRAY_SIZE(tve_sel));
+ clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", MXC_CCM_CCGR0, 30);
+ clk[uart1_ipg_gate] = imx_clk_gate2("uart1_ipg_gate", "ipg", MXC_CCM_CCGR1, 6);
+ clk[uart1_per_gate] = imx_clk_gate2("uart1_per_gate", "uart_root", MXC_CCM_CCGR1, 8);
+ clk[uart2_ipg_gate] = imx_clk_gate2("uart2_ipg_gate", "ipg", MXC_CCM_CCGR1, 10);
+ clk[uart2_per_gate] = imx_clk_gate2("uart2_per_gate", "uart_root", MXC_CCM_CCGR1, 12);
+ clk[uart3_ipg_gate] = imx_clk_gate2("uart3_ipg_gate", "ipg", MXC_CCM_CCGR1, 14);
+ clk[uart3_per_gate] = imx_clk_gate2("uart3_per_gate", "uart_root", MXC_CCM_CCGR1, 16);
+ clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "per_root", MXC_CCM_CCGR1, 18);
+ clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "per_root", MXC_CCM_CCGR1, 20);
+ clk[gpt_ipg_gate] = imx_clk_gate2("gpt_ipg_gate", "ipg", MXC_CCM_CCGR2, 20);
+ clk[pwm1_ipg_gate] = imx_clk_gate2("pwm1_ipg_gate", "ipg", MXC_CCM_CCGR2, 10);
+ clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "ipg", MXC_CCM_CCGR2, 12);
+ clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14);
+ clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "ipg", MXC_CCM_CCGR2, 16);
+ clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", MXC_CCM_CCGR2, 18);
+ clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24);
+ clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26);
+ clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28);
+ clk[esdhc1_ipg_gate] = imx_clk_gate2("esdhc1_ipg_gate", "ipg", MXC_CCM_CCGR3, 0);
+ clk[esdhc2_ipg_gate] = imx_clk_gate2("esdhc2_ipg_gate", "ipg", MXC_CCM_CCGR3, 4);
+ clk[esdhc3_ipg_gate] = imx_clk_gate2("esdhc3_ipg_gate", "ipg", MXC_CCM_CCGR3, 8);
+ clk[esdhc4_ipg_gate] = imx_clk_gate2("esdhc4_ipg_gate", "ipg", MXC_CCM_CCGR3, 12);
+ clk[ssi1_ipg_gate] = imx_clk_gate2("ssi1_ipg_gate", "ipg", MXC_CCM_CCGR3, 16);
+ clk[ssi2_ipg_gate] = imx_clk_gate2("ssi2_ipg_gate", "ipg", MXC_CCM_CCGR3, 20);
+ clk[ssi3_ipg_gate] = imx_clk_gate2("ssi3_ipg_gate", "ipg", MXC_CCM_CCGR3, 24);
+ clk[ecspi1_ipg_gate] = imx_clk_gate2("ecspi1_ipg_gate", "ipg", MXC_CCM_CCGR4, 18);
+ clk[ecspi1_per_gate] = imx_clk_gate2("ecspi1_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 20);
+ clk[ecspi2_ipg_gate] = imx_clk_gate2("ecspi2_ipg_gate", "ipg", MXC_CCM_CCGR4, 22);
+ clk[ecspi2_per_gate] = imx_clk_gate2("ecspi2_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 24);
+ clk[cspi_ipg_gate] = imx_clk_gate2("cspi_ipg_gate", "ipg", MXC_CCM_CCGR4, 26);
+ clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ipg", MXC_CCM_CCGR4, 30);
+ clk[emi_fast_gate] = imx_clk_gate2("emi_fast_gate", "dummy", MXC_CCM_CCGR5, 14);
+ clk[emi_slow_gate] = imx_clk_gate2("emi_slow_gate", "emi_slow_podf", MXC_CCM_CCGR5, 16);
+ clk[ipu_s] = imx_clk_mux("ipu_sel", MXC_CCM_CBCMR, 6, 2, ipu_sel, ARRAY_SIZE(ipu_sel));
+ clk[ipu_gate] = imx_clk_gate2("ipu_gate", "ipu_sel", MXC_CCM_CCGR5, 10);
+ clk[nfc_gate] = imx_clk_gate2("nfc_gate", "nfc_podf", MXC_CCM_CCGR5, 20);
+ clk[ipu_di0_gate] = imx_clk_gate2("ipu_di0_gate", "ipu_di0_sel", MXC_CCM_CCGR6, 10);
+ clk[ipu_di1_gate] = imx_clk_gate2("ipu_di1_gate", "ipu_di1_sel", MXC_CCM_CCGR6, 12);
+ clk[vpu_s] = imx_clk_mux("vpu_sel", MXC_CCM_CBCMR, 14, 2, vpu_sel, ARRAY_SIZE(vpu_sel));
+ clk[vpu_gate] = imx_clk_gate2("vpu_gate", "vpu_sel", MXC_CCM_CCGR5, 6);
+ clk[vpu_reference_gate] = imx_clk_gate2("vpu_reference_gate", "osc", MXC_CCM_CCGR5, 8);
+ clk[uart4_ipg_gate] = imx_clk_gate2("uart4_ipg_gate", "ipg", MXC_CCM_CCGR7, 8);
+ clk[uart4_per_gate] = imx_clk_gate2("uart4_per_gate", "uart_root", MXC_CCM_CCGR7, 10);
+ clk[uart5_ipg_gate] = imx_clk_gate2("uart5_ipg_gate", "ipg", MXC_CCM_CCGR7, 12);
+ clk[uart5_per_gate] = imx_clk_gate2("uart5_per_gate", "uart_root", MXC_CCM_CCGR7, 14);
+ clk[gpc_dvfs] = imx_clk_gate2("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24);
+
+ clk[ssi_apm] = imx_clk_mux("ssi_apm", MXC_CCM_CSCMR1, 8, 2, ssi_apm_sels, ARRAY_SIZE(ssi_apm_sels));
+ clk[ssi1_root_sel] = imx_clk_mux("ssi1_root_sel", MXC_CCM_CSCMR1, 14, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+ clk[ssi2_root_sel] = imx_clk_mux("ssi2_root_sel", MXC_CCM_CSCMR1, 12, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+ clk[ssi3_root_sel] = imx_clk_mux("ssi3_root_sel", MXC_CCM_CSCMR1, 11, 1, ssi3_clk_sels, ARRAY_SIZE(ssi3_clk_sels));
+ clk[ssi_ext1_sel] = imx_clk_mux("ssi_ext1_sel", MXC_CCM_CSCMR1, 28, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+ clk[ssi_ext2_sel] = imx_clk_mux("ssi_ext2_sel", MXC_CCM_CSCMR1, 30, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
+ clk[ssi_ext1_com_sel] = imx_clk_mux("ssi_ext1_com_sel", MXC_CCM_CSCMR1, 0, 1, ssi_ext1_com_sels, ARRAY_SIZE(ssi_ext1_com_sels));
+ clk[ssi_ext2_com_sel] = imx_clk_mux("ssi_ext2_com_sel", MXC_CCM_CSCMR1, 1, 1, ssi_ext2_com_sels, ARRAY_SIZE(ssi_ext2_com_sels));
+ clk[ssi1_root_pred] = imx_clk_divider("ssi1_root_pred", "ssi1_root_sel", MXC_CCM_CS1CDR, 6, 3);
+ clk[ssi1_root_podf] = imx_clk_divider("ssi1_root_podf", "ssi1_root_pred", MXC_CCM_CS1CDR, 0, 6);
+ clk[ssi2_root_pred] = imx_clk_divider("ssi2_root_pred", "ssi2_root_sel", MXC_CCM_CS2CDR, 6, 3);
+ clk[ssi2_root_podf] = imx_clk_divider("ssi2_root_podf", "ssi2_root_pred", MXC_CCM_CS2CDR, 0, 6);
+ clk[ssi_ext1_pred] = imx_clk_divider("ssi_ext1_pred", "ssi_ext1_sel", MXC_CCM_CS1CDR, 22, 3);
+ clk[ssi_ext1_podf] = imx_clk_divider("ssi_ext1_podf", "ssi_ext1_pred", MXC_CCM_CS1CDR, 16, 6);
+ clk[ssi_ext2_pred] = imx_clk_divider("ssi_ext2_pred", "ssi_ext2_sel", MXC_CCM_CS2CDR, 22, 3);
+ clk[ssi_ext2_podf] = imx_clk_divider("ssi_ext2_podf", "ssi_ext2_pred", MXC_CCM_CS2CDR, 16, 6);
+ clk[ssi1_root_gate] = imx_clk_gate2("ssi1_root_gate", "ssi1_root_podf", MXC_CCM_CCGR3, 18);
+ clk[ssi2_root_gate] = imx_clk_gate2("ssi2_root_gate", "ssi2_root_podf", MXC_CCM_CCGR3, 22);
+ clk[ssi3_root_gate] = imx_clk_gate2("ssi3_root_gate", "ssi3_root_sel", MXC_CCM_CCGR3, 26);
+ clk[ssi_ext1_gate] = imx_clk_gate2("ssi_ext1_gate", "ssi_ext1_com_sel", MXC_CCM_CCGR3, 28);
+ clk[ssi_ext2_gate] = imx_clk_gate2("ssi_ext2_gate", "ssi_ext2_com_sel", MXC_CCM_CCGR3, 30);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX5 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[gpt_ipg_gate], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[uart1_per_gate], "per", "imx21-uart.0");
+ clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
+ clk_register_clkdev(clk[uart2_per_gate], "per", "imx21-uart.1");
+ clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
+ clk_register_clkdev(clk[uart3_per_gate], "per", "imx21-uart.2");
+ clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
+ clk_register_clkdev(clk[uart4_per_gate], "per", "imx21-uart.3");
+ clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
+ clk_register_clkdev(clk[uart5_per_gate], "per", "imx21-uart.4");
+ clk_register_clkdev(clk[uart5_ipg_gate], "ipg", "imx21-uart.4");
+ clk_register_clkdev(clk[ecspi1_per_gate], "per", "imx51-ecspi.0");
+ clk_register_clkdev(clk[ecspi1_ipg_gate], "ipg", "imx51-ecspi.0");
+ clk_register_clkdev(clk[ecspi2_per_gate], "per", "imx51-ecspi.1");
+ clk_register_clkdev(clk[ecspi2_ipg_gate], "ipg", "imx51-ecspi.1");
+ clk_register_clkdev(clk[cspi_ipg_gate], NULL, "imx51-cspi.0");
+ clk_register_clkdev(clk[pwm1_ipg_gate], "pwm", "mxc_pwm.0");
+ clk_register_clkdev(clk[pwm2_ipg_gate], "pwm", "mxc_pwm.1");
+ clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
+ clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
+ clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.0");
+ clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.0");
+ clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.0");
+ clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.1");
+ clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.1");
+ clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.1");
+ clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.2");
+ clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.2");
+ clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.2");
+ clk_register_clkdev(clk[usboh3_per_gate], "per", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usboh3_gate], "ipg", "fsl-usb2-udc");
+ clk_register_clkdev(clk[usboh3_gate], "ahb", "fsl-usb2-udc");
+ clk_register_clkdev(clk[nfc_gate], NULL, "mxc_nand");
+ clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
+ clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
+ clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2");
+ clk_register_clkdev(clk[ssi_ext1_gate], "ssi_ext1", NULL);
+ clk_register_clkdev(clk[ssi_ext2_gate], "ssi_ext2", NULL);
+ clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
+ clk_register_clkdev(clk[cpu_podf], "cpu", NULL);
+ clk_register_clkdev(clk[iim_gate], "iim", NULL);
+ clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0");
+ clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1");
+ clk_register_clkdev(clk[dummy], NULL, "imx-keypad");
+ clk_register_clkdev(clk[tve_gate], NULL, "imx-tve.0");
+ clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx-tve.0");
+
+ /* Set SDHC parents to be PLL2 */
+ clk_set_parent(clk[esdhc_a_sel], clk[pll2_sw]);
+ clk_set_parent(clk[esdhc_b_sel], clk[pll2_sw]);
+
+ /* move usb phy clk to 24MHz */
+ clk_set_parent(clk[usb_phy_sel], clk[osc]);
+
+ clk_prepare_enable(clk[gpc_dvfs]);
+ clk_prepare_enable(clk[ahb_max]); /* esdhc3 */
+ clk_prepare_enable(clk[aips_tz1]);
+ clk_prepare_enable(clk[aips_tz2]); /* fec */
+ clk_prepare_enable(clk[spba]);
+ clk_prepare_enable(clk[emi_fast_gate]); /* fec */
+ clk_prepare_enable(clk[tmax1]);
+ clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */
+ clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */
+}
+
+int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
+ unsigned long rate_ckih1, unsigned long rate_ckih2)
+{
+ int i;
+
+ clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX51_DPLL1_BASE);
+ clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX51_DPLL2_BASE);
+ clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX51_DPLL3_BASE);
+ clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
+ mx51_ipu_di0_sel, ARRAY_SIZE(mx51_ipu_di0_sel));
+ clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
+ mx51_ipu_di1_sel, ARRAY_SIZE(mx51_ipu_di1_sel));
+ clk[tve_ext_sel] = imx_clk_mux("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
+ mx51_tve_ext_sel, ARRAY_SIZE(mx51_tve_ext_sel));
+ clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_sel", MXC_CCM_CCGR2, 30);
+ clk[tve_pred] = imx_clk_divider("tve_pred", "pll3_sw", MXC_CCM_CDCDR, 28, 3);
+ clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
+ clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 6);
+ clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 10);
+ clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
+ clk[usb_phy_gate] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0);
+ clk[hsi2c_gate] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22);
+ clk[mipi_hsc1_gate] = imx_clk_gate2("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6);
+ clk[mipi_hsc2_gate] = imx_clk_gate2("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8);
+ clk[mipi_esc_gate] = imx_clk_gate2("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10);
+ clk[mipi_hsp_gate] = imx_clk_gate2("mipi_hsp_gate", "ipg", MXC_CCM_CCGR4, 12);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX51 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
+
+ clk_register_clkdev(clk[hsi2c_gate], NULL, "imx-i2c.2");
+ clk_register_clkdev(clk[mx51_mipi], "mipi_hsp", NULL);
+ clk_register_clkdev(clk[vpu_gate], NULL, "imx51-vpu.0");
+ clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0");
+ clk_register_clkdev(clk[gpc_dvfs], "gpc_dvfs", NULL);
+ clk_register_clkdev(clk[ipu_gate], "bus", "imx51-ipu");
+ clk_register_clkdev(clk[ipu_di0_gate], "di0", "imx51-ipu");
+ clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx51-ipu");
+ clk_register_clkdev(clk[ipu_gate], "hsp", "imx51-ipu");
+ clk_register_clkdev(clk[usb_phy_gate], "phy", "mxc-ehci.0");
+ clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx51.0");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.0");
+ clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx51.0");
+ clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx51.1");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.1");
+ clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx51.1");
+ clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx51.2");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.2");
+ clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx51.2");
+ clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx51.3");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.3");
+ clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx51.3");
+ clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "83fcc000.ssi");
+ clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "70014000.ssi");
+ clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "83fe8000.ssi");
+
+ /* set the usboh3 parent to pll2_sw */
+ clk_set_parent(clk[usboh3_sel], clk[pll2_sw]);
+
+ /* set SDHC root clock to 166.25MHZ*/
+ clk_set_rate(clk[esdhc_a_podf], 166250000);
+ clk_set_rate(clk[esdhc_b_podf], 166250000);
+
+ /* System timer */
+ mxc_timer_init(NULL, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
+ MX51_INT_GPT);
+
+ clk_prepare_enable(clk[iim_gate]);
+ imx_print_silicon_rev("i.MX51", mx51_revision());
+ clk_disable_unprepare(clk[iim_gate]);
+
+ return 0;
+}
+
+int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
+ unsigned long rate_ckih1, unsigned long rate_ckih2)
+{
+ int i;
+ unsigned long r;
+
+ clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX53_DPLL1_BASE);
+ clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX53_DPLL2_BASE);
+ clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX53_DPLL3_BASE);
+ clk[pll4_sw] = imx_clk_pllv2("pll4_sw", "osc", MX53_DPLL4_BASE);
+
+ clk[ldb_di1_sel] = imx_clk_mux("ldb_di1_sel", MXC_CCM_CSCMR2, 9, 1,
+ mx53_ldb_di1_sel, ARRAY_SIZE(mx53_ldb_di1_sel));
+ clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+ clk[ldb_di1_div] = imx_clk_divider("ldb_di1_div", "ldb_di1_div_3_5", MXC_CCM_CSCMR2, 11, 1);
+ clk[di_pll4_podf] = imx_clk_divider("di_pll4_podf", "pll4_sw", MXC_CCM_CDCDR, 16, 3);
+ clk[ldb_di0_sel] = imx_clk_mux("ldb_di0_sel", MXC_CCM_CSCMR2, 8, 1,
+ mx53_ldb_di0_sel, ARRAY_SIZE(mx53_ldb_di0_sel));
+ clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+ clk[ldb_di0_div] = imx_clk_divider("ldb_di0_div", "ldb_di0_div_3_5", MXC_CCM_CSCMR2, 10, 1);
+ clk[ldb_di0_gate] = imx_clk_gate2("ldb_di0_gate", "ldb_di0_div", MXC_CCM_CCGR6, 28);
+ clk[ldb_di1_gate] = imx_clk_gate2("ldb_di1_gate", "ldb_di1_div", MXC_CCM_CCGR6, 30);
+ clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
+ mx53_ipu_di0_sel, ARRAY_SIZE(mx53_ipu_di0_sel));
+ clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
+ mx53_ipu_di1_sel, ARRAY_SIZE(mx53_ipu_di1_sel));
+ clk[tve_ext_sel] = imx_clk_mux("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
+ mx53_tve_ext_sel, ARRAY_SIZE(mx53_tve_ext_sel));
+ clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_pred", MXC_CCM_CCGR2, 30);
+ clk[tve_pred] = imx_clk_divider("tve_pred", "tve_ext_sel", MXC_CCM_CDCDR, 28, 3);
+ clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
+ clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6);
+ clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10);
+ clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
+ clk[usb_phy1_gate] = imx_clk_gate2("usb_phy1_gate", "usb_phy_sel", MXC_CCM_CCGR4, 10);
+ clk[usb_phy2_gate] = imx_clk_gate2("usb_phy2_gate", "usb_phy_sel", MXC_CCM_CCGR4, 12);
+ clk[can2_serial_gate] = imx_clk_gate2("can2_serial_gate", "ipg", MXC_CCM_CCGR4, 6);
+ clk[can2_ipg_gate] = imx_clk_gate2("can2_ipg_gate", "ipg", MXC_CCM_CCGR4, 8);
+ clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per_root", MXC_CCM_CCGR1, 22);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX53 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
+
+ clk_register_clkdev(clk[vpu_gate], NULL, "imx53-vpu.0");
+ clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
+ clk_register_clkdev(clk[fec_gate], NULL, "imx25-fec.0");
+ clk_register_clkdev(clk[ipu_gate], "bus", "imx53-ipu");
+ clk_register_clkdev(clk[ipu_di0_gate], "di0", "imx53-ipu");
+ clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx53-ipu");
+ clk_register_clkdev(clk[ipu_gate], "hsp", "imx53-ipu");
+ clk_register_clkdev(clk[usb_phy1_gate], "usb_phy1", "mxc-ehci.0");
+ clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx53.0");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.0");
+ clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx53.0");
+ clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx53.1");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.1");
+ clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx53.1");
+ clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx53.2");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.2");
+ clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx53.2");
+ clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx53.3");
+ clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.3");
+ clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx53.3");
+ clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "63fcc000.ssi");
+ clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "50014000.ssi");
+ clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "63fd0000.ssi");
+
+ /* set SDHC root clock to 200MHZ*/
+ clk_set_rate(clk[esdhc_a_podf], 200000000);
+ clk_set_rate(clk[esdhc_b_podf], 200000000);
+
+ /* System timer */
+ mxc_timer_init(NULL, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
+ MX53_INT_GPT);
+
+ clk_prepare_enable(clk[iim_gate]);
+ imx_print_silicon_rev("i.MX53", mx53_revision());
+ clk_disable_unprepare(clk[iim_gate]);
+
+ r = clk_round_rate(clk[usboh3_per_gate], 54000000);
+ clk_set_rate(clk[usboh3_per_gate], r);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
+ unsigned long *ckih1, unsigned long *ckih2)
+{
+ struct device_node *np;
+
+ /* retrieve the freqency of fixed clocks from device tree */
+ for_each_compatible_node(np, NULL, "fixed-clock") {
+ u32 rate;
+ if (of_property_read_u32(np, "clock-frequency", &rate))
+ continue;
+
+ if (of_device_is_compatible(np, "fsl,imx-ckil"))
+ *ckil = rate;
+ else if (of_device_is_compatible(np, "fsl,imx-osc"))
+ *osc = rate;
+ else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
+ *ckih1 = rate;
+ else if (of_device_is_compatible(np, "fsl,imx-ckih2"))
+ *ckih2 = rate;
+ }
+}
+
+int __init mx51_clocks_init_dt(void)
+{
+ unsigned long ckil, osc, ckih1, ckih2;
+
+ clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
+ return mx51_clocks_init(ckil, osc, ckih1, ckih2);
+}
+
+int __init mx53_clocks_init_dt(void)
+{
+ unsigned long ckil, osc, ckih1, ckih2;
+
+ clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
+ return mx53_clocks_init(ckil, osc, ckih1, ckih2);
+}
+#endif
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
new file mode 100644
index 00000000000..cab02d0a15d
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <mach/common.h>
+#include "clk.h"
+
+#define CCGR0 0x68
+#define CCGR1 0x6c
+#define CCGR2 0x70
+#define CCGR3 0x74
+#define CCGR4 0x78
+#define CCGR5 0x7c
+#define CCGR6 0x80
+#define CCGR7 0x84
+
+#define CLPCR 0x54
+#define BP_CLPCR_LPM 0
+#define BM_CLPCR_LPM (0x3 << 0)
+#define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2)
+#define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5)
+#define BM_CLPCR_SBYOS (0x1 << 6)
+#define BM_CLPCR_DIS_REF_OSC (0x1 << 7)
+#define BM_CLPCR_VSTBY (0x1 << 8)
+#define BP_CLPCR_STBY_COUNT 9
+#define BM_CLPCR_STBY_COUNT (0x3 << 9)
+#define BM_CLPCR_COSC_PWRDOWN (0x1 << 11)
+#define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16)
+#define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17)
+#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19)
+#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21)
+#define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22)
+#define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23)
+#define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24)
+#define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25)
+#define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26)
+#define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27)
+
+static void __iomem *ccm_base;
+
+void __init imx6q_clock_map_io(void) { }
+
+int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
+{
+ u32 val = readl_relaxed(ccm_base + CLPCR);
+
+ val &= ~BM_CLPCR_LPM;
+ switch (mode) {
+ case WAIT_CLOCKED:
+ break;
+ case WAIT_UNCLOCKED:
+ val |= 0x1 << BP_CLPCR_LPM;
+ break;
+ case STOP_POWER_ON:
+ val |= 0x2 << BP_CLPCR_LPM;
+ break;
+ case WAIT_UNCLOCKED_POWER_OFF:
+ val |= 0x1 << BP_CLPCR_LPM;
+ val &= ~BM_CLPCR_VSTBY;
+ val &= ~BM_CLPCR_SBYOS;
+ break;
+ case STOP_POWER_OFF:
+ val |= 0x2 << BP_CLPCR_LPM;
+ val |= 0x3 << BP_CLPCR_STBY_COUNT;
+ val |= BM_CLPCR_VSTBY;
+ val |= BM_CLPCR_SBYOS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(val, ccm_base + CLPCR);
+
+ return 0;
+}
+
+static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
+static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
+static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
+static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", };
+static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
+static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
+static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *audio_sels[] = { "pll4_audio", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
+static const char *gpu_axi_sels[] = { "axi", "ahb", };
+static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
+static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
+static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
+static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
+static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *ipu2_di0_sels[] = { "ipu2_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *ipu2_di1_sels[] = { "ipu2_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+static const char *hsi_tx_sels[] = { "pll3_120m", "pll2_pfd2_396m", };
+static const char *pcie_axi_sels[] = { "axi", "ahb", };
+static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio", };
+static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
+static const char *emi_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *vdo_axi_sels[] = { "axi", "ahb", };
+static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video",
+ "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
+ "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio", };
+
+static const char * const clks_init_on[] __initconst = {
+ "mmdc_ch0_axi", "mmdc_ch1_axi", "usboh3",
+};
+
+enum mx6q_clks {
+ dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m,
+ pll3_pfd0_720m, pll3_pfd1_540m, pll3_pfd2_508m, pll3_pfd3_454m,
+ pll2_198m, pll3_120m, pll3_80m, pll3_60m, twd, step, pll1_sw,
+ periph_pre, periph2_pre, periph_clk2_sel, periph2_clk2_sel, axi_sel,
+ esai_sel, asrc_sel, spdif_sel, gpu2d_axi, gpu3d_axi, gpu2d_core_sel,
+ gpu3d_core_sel, gpu3d_shader_sel, ipu1_sel, ipu2_sel, ldb_di0_sel,
+ ldb_di1_sel, ipu1_di0_pre_sel, ipu1_di1_pre_sel, ipu2_di0_pre_sel,
+ ipu2_di1_pre_sel, ipu1_di0_sel, ipu1_di1_sel, ipu2_di0_sel,
+ ipu2_di1_sel, hsi_tx_sel, pcie_axi_sel, ssi1_sel, ssi2_sel, ssi3_sel,
+ usdhc1_sel, usdhc2_sel, usdhc3_sel, usdhc4_sel, enfc_sel, emi_sel,
+ emi_slow_sel, vdo_axi_sel, vpu_axi_sel, cko1_sel, periph, periph2,
+ periph_clk2, periph2_clk2, ipg, ipg_per, esai_pred, esai_podf,
+ asrc_pred, asrc_podf, spdif_pred, spdif_podf, can_root, ecspi_root,
+ gpu2d_core_podf, gpu3d_core_podf, gpu3d_shader, ipu1_podf, ipu2_podf,
+ ldb_di0_podf, ldb_di1_podf, ipu1_di0_pre, ipu1_di1_pre, ipu2_di0_pre,
+ ipu2_di1_pre, hsi_tx_podf, ssi1_pred, ssi1_podf, ssi2_pred, ssi2_podf,
+ ssi3_pred, ssi3_podf, uart_serial_podf, usdhc1_podf, usdhc2_podf,
+ usdhc3_podf, usdhc4_podf, enfc_pred, enfc_podf, emi_podf,
+ emi_slow_podf, vpu_axi_podf, cko1_podf, axi, mmdc_ch0_axi_podf,
+ mmdc_ch1_axi_podf, arm, ahb, apbh_dma, asrc, can1_ipg, can1_serial,
+ can2_ipg, can2_serial, ecspi1, ecspi2, ecspi3, ecspi4, ecspi5, enet,
+ esai, gpt_ipg, gpt_ipg_per, gpu2d_core, gpu3d_core, hdmi_iahb,
+ hdmi_isfr, i2c1, i2c2, i2c3, iim, enfc, ipu1, ipu1_di0, ipu1_di1, ipu2,
+ ipu2_di0, ldb_di0, ldb_di1, ipu2_di1, hsi_tx, mlb, mmdc_ch0_axi,
+ mmdc_ch1_axi, ocram, openvg_axi, pcie_axi, pwm1, pwm2, pwm3, pwm4,
+ gpmi_bch_apb, gpmi_bch, gpmi_io, gpmi_apb, sata, sdma, spba, ssi1,
+ ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
+ usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
+ pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
+ ssi2_ipg, ssi3_ipg, clk_max
+};
+
+static struct clk *clk[clk_max];
+
+int __init mx6q_clocks_init(void)
+{
+ struct device_node *np;
+ void __iomem *base;
+ struct clk *c;
+ int i, irq;
+
+ clk[dummy] = imx_clk_fixed("dummy", 0);
+
+ /* retrieve the freqency of fixed clocks from device tree */
+ for_each_compatible_node(np, NULL, "fixed-clock") {
+ u32 rate;
+ if (of_property_read_u32(np, "clock-frequency", &rate))
+ continue;
+
+ if (of_device_is_compatible(np, "fsl,imx-ckil"))
+ clk[ckil] = imx_clk_fixed("ckil", rate);
+ else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
+ clk[ckih] = imx_clk_fixed("ckih", rate);
+ else if (of_device_is_compatible(np, "fsl,imx-osc"))
+ clk[osc] = imx_clk_fixed("osc", rate);
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ /* type name parent_name base gate_mask div_mask */
+ clk[pll1_sys] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x2000, 0x7f);
+ clk[pll2_bus] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x2000, 0x1);
+ clk[pll3_usb_otg] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x2000, 0x3);
+ clk[pll4_audio] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "osc", base + 0x70, 0x2000, 0x7f);
+ clk[pll5_video] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x2000, 0x7f);
+ clk[pll6_mlb] = imx_clk_pllv3(IMX_PLLV3_MLB, "pll6_mlb", "osc", base + 0xd0, 0x2000, 0x0);
+ clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host","osc", base + 0x20, 0x2000, 0x3);
+ clk[pll8_enet] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll8_enet", "osc", base + 0xe0, 0x182000, 0x3);
+
+ /* name parent_name reg idx */
+ clk[pll2_pfd0_352m] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0);
+ clk[pll2_pfd1_594m] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1);
+ clk[pll2_pfd2_396m] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2);
+ clk[pll3_pfd0_720m] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0);
+ clk[pll3_pfd1_540m] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1);
+ clk[pll3_pfd2_508m] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2);
+ clk[pll3_pfd3_454m] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3);
+
+ /* name parent_name mult div */
+ clk[pll2_198m] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2);
+ clk[pll3_120m] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4);
+ clk[pll3_80m] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
+ clk[pll3_60m] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
+ clk[twd] = imx_clk_fixed_factor("twd", "arm", 1, 2);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ccm");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+ ccm_base = base;
+
+ /* name reg shift width parent_names num_parents */
+ clk[step] = imx_clk_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels));
+ clk[pll1_sw] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels));
+ clk[periph_pre] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
+ clk[periph2_pre] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
+ clk[periph_clk2_sel] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 1, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
+ clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
+ clk[axi_sel] = imx_clk_mux("axi_sel", base + 0x14, 6, 2, axi_sels, ARRAY_SIZE(axi_sels));
+ clk[esai_sel] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
+ clk[asrc_sel] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
+ clk[spdif_sel] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels));
+ clk[gpu2d_axi] = imx_clk_mux("gpu2d_axi", base + 0x18, 0, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
+ clk[gpu3d_axi] = imx_clk_mux("gpu3d_axi", base + 0x18, 1, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
+ clk[gpu2d_core_sel] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels));
+ clk[gpu3d_core_sel] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels));
+ clk[gpu3d_shader_sel] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
+ clk[ipu1_sel] = imx_clk_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
+ clk[ipu2_sel] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
+ clk[ldb_di0_sel] = imx_clk_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels));
+ clk[ldb_di1_sel] = imx_clk_mux("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels));
+ clk[ipu1_di0_pre_sel] = imx_clk_mux("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+ clk[ipu1_di1_pre_sel] = imx_clk_mux("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+ clk[ipu2_di0_pre_sel] = imx_clk_mux("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+ clk[ipu2_di1_pre_sel] = imx_clk_mux("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
+ clk[ipu1_di0_sel] = imx_clk_mux("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels));
+ clk[ipu1_di1_sel] = imx_clk_mux("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels));
+ clk[ipu2_di0_sel] = imx_clk_mux("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels));
+ clk[ipu2_di1_sel] = imx_clk_mux("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels));
+ clk[hsi_tx_sel] = imx_clk_mux("hsi_tx_sel", base + 0x30, 28, 1, hsi_tx_sels, ARRAY_SIZE(hsi_tx_sels));
+ clk[pcie_axi_sel] = imx_clk_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels));
+ clk[ssi1_sel] = imx_clk_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clk[ssi2_sel] = imx_clk_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clk[ssi3_sel] = imx_clk_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
+ clk[usdhc1_sel] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[usdhc2_sel] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[usdhc3_sel] = imx_clk_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[usdhc4_sel] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+ clk[enfc_sel] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels));
+ clk[emi_sel] = imx_clk_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels));
+ clk[emi_slow_sel] = imx_clk_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_sels, ARRAY_SIZE(emi_sels));
+ clk[vdo_axi_sel] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels));
+ clk[vpu_axi_sel] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels));
+ clk[cko1_sel] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels));
+
+ /* name reg shift width busy: reg, shift parent_names num_parents */
+ clk[periph] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels));
+ clk[periph2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels));
+
+ /* name parent_name reg shift width */
+ clk[periph_clk2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
+ clk[periph2_clk2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
+ clk[ipg] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2);
+ clk[ipg_per] = imx_clk_divider("ipg_per", "ipg", base + 0x1c, 0, 6);
+ clk[esai_pred] = imx_clk_divider("esai_pred", "esai_sel", base + 0x28, 9, 3);
+ clk[esai_podf] = imx_clk_divider("esai_podf", "esai_pred", base + 0x28, 25, 3);
+ clk[asrc_pred] = imx_clk_divider("asrc_pred", "asrc_sel", base + 0x30, 12, 3);
+ clk[asrc_podf] = imx_clk_divider("asrc_podf", "asrc_pred", base + 0x30, 9, 3);
+ clk[spdif_pred] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
+ clk[spdif_podf] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
+ clk[can_root] = imx_clk_divider("can_root", "pll3_usb_otg", base + 0x20, 2, 6);
+ clk[ecspi_root] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
+ clk[gpu2d_core_podf] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3);
+ clk[gpu3d_core_podf] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3);
+ clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
+ clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
+ clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
+ clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_sel", base + 0x20, 10, 1);
+ clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_sel", base + 0x20, 11, 1);
+ clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
+ clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
+ clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3);
+ clk[ipu2_di1_pre] = imx_clk_divider("ipu2_di1_pre", "ipu2_di1_pre_sel", base + 0x38, 12, 3);
+ clk[hsi_tx_podf] = imx_clk_divider("hsi_tx_podf", "hsi_tx_sel", base + 0x30, 29, 3);
+ clk[ssi1_pred] = imx_clk_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3);
+ clk[ssi1_podf] = imx_clk_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6);
+ clk[ssi2_pred] = imx_clk_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3);
+ clk[ssi2_podf] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6);
+ clk[ssi3_pred] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3);
+ clk[ssi3_podf] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6);
+ clk[uart_serial_podf] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6);
+ clk[usdhc1_podf] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3);
+ clk[usdhc2_podf] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3);
+ clk[usdhc3_podf] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3);
+ clk[usdhc4_podf] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3);
+ clk[enfc_pred] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3);
+ clk[enfc_podf] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6);
+ clk[emi_podf] = imx_clk_divider("emi_podf", "emi_sel", base + 0x1c, 20, 3);
+ clk[emi_slow_podf] = imx_clk_divider("emi_slow_podf", "emi_slow_sel", base + 0x1c, 23, 3);
+ clk[vpu_axi_podf] = imx_clk_divider("vpu_axi_podf", "vpu_axi_sel", base + 0x24, 25, 3);
+ clk[cko1_podf] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3);
+
+ /* name parent_name reg shift width busy: reg, shift */
+ clk[axi] = imx_clk_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0);
+ clk[mmdc_ch0_axi_podf] = imx_clk_busy_divider("mmdc_ch0_axi_podf", "periph", base + 0x14, 19, 3, base + 0x48, 4);
+ clk[mmdc_ch1_axi_podf] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
+ clk[arm] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
+ clk[ahb] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
+
+ /* name parent_name reg shift */
+ clk[apbh_dma] = imx_clk_gate2("apbh_dma", "ahb", base + 0x68, 4);
+ clk[asrc] = imx_clk_gate2("asrc", "asrc_podf", base + 0x68, 6);
+ clk[can1_ipg] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14);
+ clk[can1_serial] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16);
+ clk[can2_ipg] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
+ clk[can2_serial] = imx_clk_gate2("can2_serial", "can_root", base + 0x68, 20);
+ clk[ecspi1] = imx_clk_gate2("ecspi1", "ecspi_root", base + 0x6c, 0);
+ clk[ecspi2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2);
+ clk[ecspi3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4);
+ clk[ecspi4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6);
+ clk[ecspi5] = imx_clk_gate2("ecspi5", "ecspi_root", base + 0x6c, 8);
+ clk[enet] = imx_clk_gate2("enet", "ipg", base + 0x6c, 10);
+ clk[esai] = imx_clk_gate2("esai", "esai_podf", base + 0x6c, 16);
+ clk[gpt_ipg] = imx_clk_gate2("gpt_ipg", "ipg", base + 0x6c, 20);
+ clk[gpt_ipg_per] = imx_clk_gate2("gpt_ipg_per", "ipg_per", base + 0x6c, 22);
+ clk[gpu2d_core] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
+ clk[gpu3d_core] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26);
+ clk[hdmi_iahb] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0);
+ clk[hdmi_isfr] = imx_clk_gate2("hdmi_isfr", "pll3_pfd1_540m", base + 0x70, 4);
+ clk[i2c1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6);
+ clk[i2c2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8);
+ clk[i2c3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10);
+ clk[iim] = imx_clk_gate2("iim", "ipg", base + 0x70, 12);
+ clk[enfc] = imx_clk_gate2("enfc", "enfc_podf", base + 0x70, 14);
+ clk[ipu1] = imx_clk_gate2("ipu1", "ipu1_podf", base + 0x74, 0);
+ clk[ipu1_di0] = imx_clk_gate2("ipu1_di0", "ipu1_di0_sel", base + 0x74, 2);
+ clk[ipu1_di1] = imx_clk_gate2("ipu1_di1", "ipu1_di1_sel", base + 0x74, 4);
+ clk[ipu2] = imx_clk_gate2("ipu2", "ipu2_podf", base + 0x74, 6);
+ clk[ipu2_di0] = imx_clk_gate2("ipu2_di0", "ipu2_di0_sel", base + 0x74, 8);
+ clk[ldb_di0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12);
+ clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14);
+ clk[ipu2_di1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10);
+ clk[hsi_tx] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16);
+ clk[mlb] = imx_clk_gate2("mlb", "pll6_mlb", base + 0x74, 18);
+ clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20);
+ clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22);
+ clk[ocram] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28);
+ clk[openvg_axi] = imx_clk_gate2("openvg_axi", "axi", base + 0x74, 30);
+ clk[pcie_axi] = imx_clk_gate2("pcie_axi", "pcie_axi_sel", base + 0x78, 0);
+ clk[pwm1] = imx_clk_gate2("pwm1", "ipg_per", base + 0x78, 16);
+ clk[pwm2] = imx_clk_gate2("pwm2", "ipg_per", base + 0x78, 18);
+ clk[pwm3] = imx_clk_gate2("pwm3", "ipg_per", base + 0x78, 20);
+ clk[pwm4] = imx_clk_gate2("pwm4", "ipg_per", base + 0x78, 22);
+ clk[gpmi_bch_apb] = imx_clk_gate2("gpmi_bch_apb", "usdhc3", base + 0x78, 24);
+ clk[gpmi_bch] = imx_clk_gate2("gpmi_bch", "usdhc4", base + 0x78, 26);
+ clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
+ clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
+ clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
+ clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
+ clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
+ clk[ssi1_ipg] = imx_clk_gate2("ssi1_ipg", "ipg", base + 0x7c, 18);
+ clk[ssi2_ipg] = imx_clk_gate2("ssi2_ipg", "ipg", base + 0x7c, 20);
+ clk[ssi3_ipg] = imx_clk_gate2("ssi3_ipg", "ipg", base + 0x7c, 22);
+ clk[uart_ipg] = imx_clk_gate2("uart_ipg", "ipg", base + 0x7c, 24);
+ clk[uart_serial] = imx_clk_gate2("uart_serial", "uart_serial_podf", base + 0x7c, 26);
+ clk[usboh3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0);
+ clk[usdhc1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2);
+ clk[usdhc2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4);
+ clk[usdhc3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
+ clk[usdhc4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8);
+ clk[vdo_axi] = imx_clk_gate2("vdo_axi", "vdo_axi_sel", base + 0x80, 12);
+ clk[vpu_axi] = imx_clk_gate2("vpu_axi", "vpu_axi_podf", base + 0x80, 14);
+ clk[cko1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7);
+
+ for (i = 0; i < ARRAY_SIZE(clk); i++)
+ if (IS_ERR(clk[i]))
+ pr_err("i.MX6q clk %d: register failed with %ld\n",
+ i, PTR_ERR(clk[i]));
+
+ clk_register_clkdev(clk[mmdc_ch0_axi], NULL, "mmdc_ch0_axi");
+ clk_register_clkdev(clk[mmdc_ch1_axi], NULL, "mmdc_ch1_axi");
+ clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
+ clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[twd], NULL, "smp_twd");
+ clk_register_clkdev(clk[usboh3], NULL, "usboh3");
+ clk_register_clkdev(clk[uart_serial], "per", "2020000.serial");
+ clk_register_clkdev(clk[uart_ipg], "ipg", "2020000.serial");
+ clk_register_clkdev(clk[uart_serial], "per", "21e8000.serial");
+ clk_register_clkdev(clk[uart_ipg], "ipg", "21e8000.serial");
+ clk_register_clkdev(clk[uart_serial], "per", "21ec000.serial");
+ clk_register_clkdev(clk[uart_ipg], "ipg", "21ec000.serial");
+ clk_register_clkdev(clk[uart_serial], "per", "21f0000.serial");
+ clk_register_clkdev(clk[uart_ipg], "ipg", "21f0000.serial");
+ clk_register_clkdev(clk[uart_serial], "per", "21f4000.serial");
+ clk_register_clkdev(clk[uart_ipg], "ipg", "21f4000.serial");
+ clk_register_clkdev(clk[enet], NULL, "2188000.ethernet");
+ clk_register_clkdev(clk[usdhc1], NULL, "2190000.usdhc");
+ clk_register_clkdev(clk[usdhc2], NULL, "2194000.usdhc");
+ clk_register_clkdev(clk[usdhc3], NULL, "2198000.usdhc");
+ clk_register_clkdev(clk[usdhc4], NULL, "219c000.usdhc");
+ clk_register_clkdev(clk[i2c1], NULL, "21a0000.i2c");
+ clk_register_clkdev(clk[i2c2], NULL, "21a4000.i2c");
+ clk_register_clkdev(clk[i2c3], NULL, "21a8000.i2c");
+ clk_register_clkdev(clk[ecspi1], NULL, "2008000.ecspi");
+ clk_register_clkdev(clk[ecspi2], NULL, "200c000.ecspi");
+ clk_register_clkdev(clk[ecspi3], NULL, "2010000.ecspi");
+ clk_register_clkdev(clk[ecspi4], NULL, "2014000.ecspi");
+ clk_register_clkdev(clk[ecspi5], NULL, "2018000.ecspi");
+ clk_register_clkdev(clk[sdma], NULL, "20ec000.sdma");
+ clk_register_clkdev(clk[dummy], NULL, "20bc000.wdog");
+ clk_register_clkdev(clk[dummy], NULL, "20c0000.wdog");
+ clk_register_clkdev(clk[ssi1_ipg], NULL, "2028000.ssi");
+ clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
+ clk_register_clkdev(clk[ahb], "ahb", NULL);
+ clk_register_clkdev(clk[cko1], "cko1", NULL);
+
+ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) {
+ c = clk_get_sys(clks_init_on[i], NULL);
+ if (IS_ERR(c)) {
+ pr_err("%s: failed to get clk %s", __func__,
+ clks_init_on[i]);
+ return PTR_ERR(c);
+ }
+ clk_prepare_enable(c);
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+ irq = irq_of_parse_and_map(np, 0);
+ mxc_timer_init(NULL, base, irq);
+
+ return 0;
+}
diff --git a/arch/arm/mach-imx/clk-pfd.c b/arch/arm/mach-imx/clk-pfd.c
new file mode 100644
index 00000000000..e2ed4160f32
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pfd.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include "clk.h"
+
+/**
+ * struct clk_pfd - IMX PFD clock
+ * @clk_hw: clock source
+ * @reg: PFD register address
+ * @idx: the index of PFD encoded in the register
+ *
+ * PFD clock found on i.MX6 series. Each register for PFD has 4 clk_pfd
+ * data encoded, and member idx is used to specify the one. And each
+ * register has SET, CLR and TOG registers at offset 0x4 0x8 and 0xc.
+ */
+struct clk_pfd {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 idx;
+};
+
+#define to_clk_pfd(_hw) container_of(_hw, struct clk_pfd, hw)
+
+#define SET 0x4
+#define CLR 0x8
+#define OTG 0xc
+
+static int clk_pfd_enable(struct clk_hw *hw)
+{
+ struct clk_pfd *pfd = to_clk_pfd(hw);
+
+ writel_relaxed(1 << ((pfd->idx + 1) * 8 - 1), pfd->reg + CLR);
+
+ return 0;
+}
+
+static void clk_pfd_disable(struct clk_hw *hw)
+{
+ struct clk_pfd *pfd = to_clk_pfd(hw);
+
+ writel_relaxed(1 << ((pfd->idx + 1) * 8 - 1), pfd->reg + SET);
+}
+
+static unsigned long clk_pfd_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pfd *pfd = to_clk_pfd(hw);
+ u64 tmp = parent_rate;
+ u8 frac = (readl_relaxed(pfd->reg) >> (pfd->idx * 8)) & 0x3f;
+
+ tmp *= 18;
+ do_div(tmp, frac);
+
+ return tmp;
+}
+
+static long clk_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u64 tmp = *prate;
+ u8 frac;
+
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+ if (frac < 12)
+ frac = 12;
+ else if (frac > 35)
+ frac = 35;
+ tmp = *prate;
+ tmp *= 18;
+ do_div(tmp, frac);
+
+ return tmp;
+}
+
+static int clk_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pfd *pfd = to_clk_pfd(hw);
+ u64 tmp = parent_rate;
+ u8 frac;
+
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+ if (frac < 12)
+ frac = 12;
+ else if (frac > 35)
+ frac = 35;
+
+ writel_relaxed(0x3f << (pfd->idx * 8), pfd->reg + CLR);
+ writel_relaxed(frac << (pfd->idx * 8), pfd->reg + SET);
+
+ return 0;
+}
+
+static const struct clk_ops clk_pfd_ops = {
+ .enable = clk_pfd_enable,
+ .disable = clk_pfd_disable,
+ .recalc_rate = clk_pfd_recalc_rate,
+ .round_rate = clk_pfd_round_rate,
+ .set_rate = clk_pfd_set_rate,
+};
+
+struct clk *imx_clk_pfd(const char *name, const char *parent_name,
+ void __iomem *reg, u8 idx)
+{
+ struct clk_pfd *pfd;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pfd = kzalloc(sizeof(*pfd), GFP_KERNEL);
+ if (!pfd)
+ return ERR_PTR(-ENOMEM);
+
+ pfd->reg = reg;
+ pfd->idx = idx;
+
+ init.name = name;
+ init.ops = &clk_pfd_ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pfd->hw.init = &init;
+
+ clk = clk_register(NULL, &pfd->hw);
+ if (IS_ERR(clk))
+ kfree(pfd);
+
+ return clk;
+}
diff --git a/arch/arm/mach-imx/clk-pllv1.c b/arch/arm/mach-imx/clk-pllv1.c
new file mode 100644
index 00000000000..2d856f9ccf5
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pllv1.c
@@ -0,0 +1,66 @@
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <mach/common.h>
+#include <mach/hardware.h>
+#include <mach/clock.h>
+#include "clk.h"
+
+/**
+ * pll v1
+ *
+ * @clk_hw clock source
+ * @parent the parent clock name
+ * @base base address of pll registers
+ *
+ * PLL clock version 1, found on i.MX1/21/25/27/31/35
+ */
+struct clk_pllv1 {
+ struct clk_hw hw;
+ void __iomem *base;
+};
+
+#define to_clk_pllv1(clk) (container_of(clk, struct clk_pllv1, clk))
+
+static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pllv1 *pll = to_clk_pllv1(hw);
+
+ return mxc_decode_pll(readl(pll->base), parent_rate);
+}
+
+struct clk_ops clk_pllv1_ops = {
+ .recalc_rate = clk_pllv1_recalc_rate,
+};
+
+struct clk *imx_clk_pllv1(const char *name, const char *parent,
+ void __iomem *base)
+{
+ struct clk_pllv1 *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kmalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ pll->base = base;
+
+ init.name = name;
+ init.ops = &clk_pllv1_ops;
+ init.flags = 0;
+ init.parent_names = &parent;
+ init.num_parents = 1;
+
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
diff --git a/arch/arm/mach-imx/clk-pllv2.c b/arch/arm/mach-imx/clk-pllv2.c
new file mode 100644
index 00000000000..4685919deb6
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pllv2.c
@@ -0,0 +1,249 @@
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include <asm/div64.h>
+
+#include "clk.h"
+
+#define to_clk_pllv2(clk) (container_of(clk, struct clk_pllv2, clk))
+
+/* PLL Register Offsets */
+#define MXC_PLL_DP_CTL 0x00
+#define MXC_PLL_DP_CONFIG 0x04
+#define MXC_PLL_DP_OP 0x08
+#define MXC_PLL_DP_MFD 0x0C
+#define MXC_PLL_DP_MFN 0x10
+#define MXC_PLL_DP_MFNMINUS 0x14
+#define MXC_PLL_DP_MFNPLUS 0x18
+#define MXC_PLL_DP_HFS_OP 0x1C
+#define MXC_PLL_DP_HFS_MFD 0x20
+#define MXC_PLL_DP_HFS_MFN 0x24
+#define MXC_PLL_DP_MFN_TOGC 0x28
+#define MXC_PLL_DP_DESTAT 0x2c
+
+/* PLL Register Bit definitions */
+#define MXC_PLL_DP_CTL_MUL_CTRL 0x2000
+#define MXC_PLL_DP_CTL_DPDCK0_2_EN 0x1000
+#define MXC_PLL_DP_CTL_DPDCK0_2_OFFSET 12
+#define MXC_PLL_DP_CTL_ADE 0x800
+#define MXC_PLL_DP_CTL_REF_CLK_DIV 0x400
+#define MXC_PLL_DP_CTL_REF_CLK_SEL_MASK (3 << 8)
+#define MXC_PLL_DP_CTL_REF_CLK_SEL_OFFSET 8
+#define MXC_PLL_DP_CTL_HFSM 0x80
+#define MXC_PLL_DP_CTL_PRE 0x40
+#define MXC_PLL_DP_CTL_UPEN 0x20
+#define MXC_PLL_DP_CTL_RST 0x10
+#define MXC_PLL_DP_CTL_RCP 0x8
+#define MXC_PLL_DP_CTL_PLM 0x4
+#define MXC_PLL_DP_CTL_BRM0 0x2
+#define MXC_PLL_DP_CTL_LRF 0x1
+
+#define MXC_PLL_DP_CONFIG_BIST 0x8
+#define MXC_PLL_DP_CONFIG_SJC_CE 0x4
+#define MXC_PLL_DP_CONFIG_AREN 0x2
+#define MXC_PLL_DP_CONFIG_LDREQ 0x1
+
+#define MXC_PLL_DP_OP_MFI_OFFSET 4
+#define MXC_PLL_DP_OP_MFI_MASK (0xF << 4)
+#define MXC_PLL_DP_OP_PDF_OFFSET 0
+#define MXC_PLL_DP_OP_PDF_MASK 0xF
+
+#define MXC_PLL_DP_MFD_OFFSET 0
+#define MXC_PLL_DP_MFD_MASK 0x07FFFFFF
+
+#define MXC_PLL_DP_MFN_OFFSET 0x0
+#define MXC_PLL_DP_MFN_MASK 0x07FFFFFF
+
+#define MXC_PLL_DP_MFN_TOGC_TOG_DIS (1 << 17)
+#define MXC_PLL_DP_MFN_TOGC_TOG_EN (1 << 16)
+#define MXC_PLL_DP_MFN_TOGC_CNT_OFFSET 0x0
+#define MXC_PLL_DP_MFN_TOGC_CNT_MASK 0xFFFF
+
+#define MXC_PLL_DP_DESTAT_TOG_SEL (1 << 31)
+#define MXC_PLL_DP_DESTAT_MFN 0x07FFFFFF
+
+#define MAX_DPLL_WAIT_TRIES 1000 /* 1000 * udelay(1) = 1ms */
+
+struct clk_pllv2 {
+ struct clk_hw hw;
+ void __iomem *base;
+};
+
+static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
+ unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
+ void __iomem *pllbase;
+ s64 temp;
+ struct clk_pllv2 *pll = to_clk_pllv2(hw);
+
+ pllbase = pll->base;
+
+ dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+ pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
+ dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
+
+ if (pll_hfsm == 0) {
+ dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
+ dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
+ dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
+ } else {
+ dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
+ dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
+ dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
+ }
+ pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
+ mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
+ mfi = (mfi <= 5) ? 5 : mfi;
+ mfd = dp_mfd & MXC_PLL_DP_MFD_MASK;
+ mfn = mfn_abs = dp_mfn & MXC_PLL_DP_MFN_MASK;
+ /* Sign extend to 32-bits */
+ if (mfn >= 0x04000000) {
+ mfn |= 0xFC000000;
+ mfn_abs = -mfn;
+ }
+
+ ref_clk = 2 * parent_rate;
+ if (dbl != 0)
+ ref_clk *= 2;
+
+ ref_clk /= (pdf + 1);
+ temp = (u64) ref_clk * mfn_abs;
+ do_div(temp, mfd + 1);
+ if (mfn < 0)
+ temp = -temp;
+ temp = (ref_clk * mfi) + temp;
+
+ return temp;
+}
+
+static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pllv2 *pll = to_clk_pllv2(hw);
+ u32 reg;
+ void __iomem *pllbase;
+ long mfi, pdf, mfn, mfd = 999999;
+ s64 temp64;
+ unsigned long quad_parent_rate;
+ unsigned long pll_hfsm, dp_ctl;
+
+ pllbase = pll->base;
+
+ quad_parent_rate = 4 * parent_rate;
+ pdf = mfi = -1;
+ while (++pdf < 16 && mfi < 5)
+ mfi = rate * (pdf+1) / quad_parent_rate;
+ if (mfi > 15)
+ return -EINVAL;
+ pdf--;
+
+ temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
+ do_div(temp64, quad_parent_rate/1000000);
+ mfn = (long)temp64;
+
+ dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+ /* use dpdck0_2 */
+ __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
+ pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
+ if (pll_hfsm == 0) {
+ reg = mfi << 4 | pdf;
+ __raw_writel(reg, pllbase + MXC_PLL_DP_OP);
+ __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
+ __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
+ } else {
+ reg = mfi << 4 | pdf;
+ __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
+ __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
+ __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
+ }
+
+ return 0;
+}
+
+static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return rate;
+}
+
+static int clk_pllv2_prepare(struct clk_hw *hw)
+{
+ struct clk_pllv2 *pll = to_clk_pllv2(hw);
+ u32 reg;
+ void __iomem *pllbase;
+ int i = 0;
+
+ pllbase = pll->base;
+ reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) | MXC_PLL_DP_CTL_UPEN;
+ __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
+
+ /* Wait for lock */
+ do {
+ reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
+ if (reg & MXC_PLL_DP_CTL_LRF)
+ break;
+
+ udelay(1);
+ } while (++i < MAX_DPLL_WAIT_TRIES);
+
+ if (i == MAX_DPLL_WAIT_TRIES) {
+ pr_err("MX5: pll locking failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void clk_pllv2_unprepare(struct clk_hw *hw)
+{
+ struct clk_pllv2 *pll = to_clk_pllv2(hw);
+ u32 reg;
+ void __iomem *pllbase;
+
+ pllbase = pll->base;
+ reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) & ~MXC_PLL_DP_CTL_UPEN;
+ __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
+}
+
+struct clk_ops clk_pllv2_ops = {
+ .prepare = clk_pllv2_prepare,
+ .unprepare = clk_pllv2_unprepare,
+ .recalc_rate = clk_pllv2_recalc_rate,
+ .round_rate = clk_pllv2_round_rate,
+ .set_rate = clk_pllv2_set_rate,
+};
+
+struct clk *imx_clk_pllv2(const char *name, const char *parent,
+ void __iomem *base)
+{
+ struct clk_pllv2 *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ pll->base = base;
+
+ init.name = name;
+ init.ops = &clk_pllv2_ops;
+ init.flags = 0;
+ init.parent_names = &parent;
+ init.num_parents = 1;
+
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
diff --git a/arch/arm/mach-imx/clk-pllv3.c b/arch/arm/mach-imx/clk-pllv3.c
new file mode 100644
index 00000000000..36aac947bce
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pllv3.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define PLL_NUM_OFFSET 0x10
+#define PLL_DENOM_OFFSET 0x20
+
+#define BM_PLL_POWER (0x1 << 12)
+#define BM_PLL_ENABLE (0x1 << 13)
+#define BM_PLL_BYPASS (0x1 << 16)
+#define BM_PLL_LOCK (0x1 << 31)
+
+/**
+ * struct clk_pllv3 - IMX PLL clock version 3
+ * @clk_hw: clock source
+ * @base: base address of PLL registers
+ * @powerup_set: set POWER bit to power up the PLL
+ * @gate_mask: mask of gate bits
+ * @div_mask: mask of divider bits
+ *
+ * IMX PLL clock version 3, found on i.MX6 series. Divider for pllv3
+ * is actually a multiplier, and always sits at bit 0.
+ */
+struct clk_pllv3 {
+ struct clk_hw hw;
+ void __iomem *base;
+ bool powerup_set;
+ u32 gate_mask;
+ u32 div_mask;
+};
+
+#define to_clk_pllv3(_hw) container_of(_hw, struct clk_pllv3, hw)
+
+static int clk_pllv3_prepare(struct clk_hw *hw)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ unsigned long timeout = jiffies + msecs_to_jiffies(10);
+ u32 val;
+
+ val = readl_relaxed(pll->base);
+ val &= ~BM_PLL_BYPASS;
+ if (pll->powerup_set)
+ val |= BM_PLL_POWER;
+ else
+ val &= ~BM_PLL_POWER;
+ writel_relaxed(val, pll->base);
+
+ /* Wait for PLL to lock */
+ while (!(readl_relaxed(pll->base) & BM_PLL_LOCK))
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void clk_pllv3_unprepare(struct clk_hw *hw)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->base);
+ val |= BM_PLL_BYPASS;
+ if (pll->powerup_set)
+ val &= ~BM_PLL_POWER;
+ else
+ val |= BM_PLL_POWER;
+ writel_relaxed(val, pll->base);
+}
+
+static int clk_pllv3_enable(struct clk_hw *hw)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->base);
+ val |= pll->gate_mask;
+ writel_relaxed(val, pll->base);
+
+ return 0;
+}
+
+static void clk_pllv3_disable(struct clk_hw *hw)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->base);
+ val &= ~pll->gate_mask;
+ writel_relaxed(val, pll->base);
+}
+
+static unsigned long clk_pllv3_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 div = readl_relaxed(pll->base) & pll->div_mask;
+
+ return (div == 1) ? parent_rate * 22 : parent_rate * 20;
+}
+
+static long clk_pllv3_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long parent_rate = *prate;
+
+ return (rate >= parent_rate * 22) ? parent_rate * 22 :
+ parent_rate * 20;
+}
+
+static int clk_pllv3_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 val, div;
+
+ if (rate == parent_rate * 22)
+ div = 1;
+ else if (rate == parent_rate * 20)
+ div = 0;
+ else
+ return -EINVAL;
+
+ val = readl_relaxed(pll->base);
+ val &= ~pll->div_mask;
+ val |= div;
+ writel_relaxed(val, pll->base);
+
+ return 0;
+}
+
+static const struct clk_ops clk_pllv3_ops = {
+ .prepare = clk_pllv3_prepare,
+ .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_recalc_rate,
+ .round_rate = clk_pllv3_round_rate,
+ .set_rate = clk_pllv3_set_rate,
+};
+
+static unsigned long clk_pllv3_sys_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 div = readl_relaxed(pll->base) & pll->div_mask;
+
+ return parent_rate * div / 2;
+}
+
+static long clk_pllv3_sys_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long parent_rate = *prate;
+ unsigned long min_rate = parent_rate * 54 / 2;
+ unsigned long max_rate = parent_rate * 108 / 2;
+ u32 div;
+
+ if (rate > max_rate)
+ rate = max_rate;
+ else if (rate < min_rate)
+ rate = min_rate;
+ div = rate * 2 / parent_rate;
+
+ return parent_rate * div / 2;
+}
+
+static int clk_pllv3_sys_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ unsigned long min_rate = parent_rate * 54 / 2;
+ unsigned long max_rate = parent_rate * 108 / 2;
+ u32 val, div;
+
+ if (rate < min_rate || rate > max_rate)
+ return -EINVAL;
+
+ div = rate * 2 / parent_rate;
+ val = readl_relaxed(pll->base);
+ val &= ~pll->div_mask;
+ val |= div;
+ writel_relaxed(val, pll->base);
+
+ return 0;
+}
+
+static const struct clk_ops clk_pllv3_sys_ops = {
+ .prepare = clk_pllv3_prepare,
+ .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_sys_recalc_rate,
+ .round_rate = clk_pllv3_sys_round_rate,
+ .set_rate = clk_pllv3_sys_set_rate,
+};
+
+static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
+ u32 mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
+ u32 div = readl_relaxed(pll->base) & pll->div_mask;
+
+ return (parent_rate * div) + ((parent_rate / mfd) * mfn);
+}
+
+static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long parent_rate = *prate;
+ unsigned long min_rate = parent_rate * 27;
+ unsigned long max_rate = parent_rate * 54;
+ u32 div;
+ u32 mfn, mfd = 1000000;
+ s64 temp64;
+
+ if (rate > max_rate)
+ rate = max_rate;
+ else if (rate < min_rate)
+ rate = min_rate;
+
+ div = rate / parent_rate;
+ temp64 = (u64) (rate - div * parent_rate);
+ temp64 *= mfd;
+ do_div(temp64, parent_rate);
+ mfn = temp64;
+
+ return parent_rate * div + parent_rate / mfd * mfn;
+}
+
+static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ unsigned long min_rate = parent_rate * 27;
+ unsigned long max_rate = parent_rate * 54;
+ u32 val, div;
+ u32 mfn, mfd = 1000000;
+ s64 temp64;
+
+ if (rate < min_rate || rate > max_rate)
+ return -EINVAL;
+
+ div = rate / parent_rate;
+ temp64 = (u64) (rate - div * parent_rate);
+ temp64 *= mfd;
+ do_div(temp64, parent_rate);
+ mfn = temp64;
+
+ val = readl_relaxed(pll->base);
+ val &= ~pll->div_mask;
+ val |= div;
+ writel_relaxed(val, pll->base);
+ writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
+ writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
+
+ return 0;
+}
+
+static const struct clk_ops clk_pllv3_av_ops = {
+ .prepare = clk_pllv3_prepare,
+ .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_av_recalc_rate,
+ .round_rate = clk_pllv3_av_round_rate,
+ .set_rate = clk_pllv3_av_set_rate,
+};
+
+static unsigned long clk_pllv3_enet_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 div = readl_relaxed(pll->base) & pll->div_mask;
+
+ switch (div) {
+ case 0:
+ return 25000000;
+ case 1:
+ return 50000000;
+ case 2:
+ return 100000000;
+ case 3:
+ return 125000000;
+ }
+
+ return 0;
+}
+
+static long clk_pllv3_enet_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ if (rate >= 125000000)
+ rate = 125000000;
+ else if (rate >= 100000000)
+ rate = 100000000;
+ else if (rate >= 50000000)
+ rate = 50000000;
+ else
+ rate = 25000000;
+ return rate;
+}
+
+static int clk_pllv3_enet_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ u32 val, div;
+
+ switch (rate) {
+ case 25000000:
+ div = 0;
+ break;
+ case 50000000:
+ div = 1;
+ break;
+ case 100000000:
+ div = 2;
+ break;
+ case 125000000:
+ div = 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = readl_relaxed(pll->base);
+ val &= ~pll->div_mask;
+ val |= div;
+ writel_relaxed(val, pll->base);
+
+ return 0;
+}
+
+static const struct clk_ops clk_pllv3_enet_ops = {
+ .prepare = clk_pllv3_prepare,
+ .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+ .recalc_rate = clk_pllv3_enet_recalc_rate,
+ .round_rate = clk_pllv3_enet_round_rate,
+ .set_rate = clk_pllv3_enet_set_rate,
+};
+
+static const struct clk_ops clk_pllv3_mlb_ops = {
+ .prepare = clk_pllv3_prepare,
+ .unprepare = clk_pllv3_unprepare,
+ .enable = clk_pllv3_enable,
+ .disable = clk_pllv3_disable,
+};
+
+struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
+ const char *parent_name, void __iomem *base,
+ u32 gate_mask, u32 div_mask)
+{
+ struct clk_pllv3 *pll;
+ const struct clk_ops *ops;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ switch (type) {
+ case IMX_PLLV3_SYS:
+ ops = &clk_pllv3_sys_ops;
+ break;
+ case IMX_PLLV3_USB:
+ ops = &clk_pllv3_ops;
+ pll->powerup_set = true;
+ break;
+ case IMX_PLLV3_AV:
+ ops = &clk_pllv3_av_ops;
+ break;
+ case IMX_PLLV3_ENET:
+ ops = &clk_pllv3_enet_ops;
+ break;
+ case IMX_PLLV3_MLB:
+ ops = &clk_pllv3_mlb_ops;
+ break;
+ default:
+ ops = &clk_pllv3_ops;
+ }
+ pll->base = base;
+ pll->gate_mask = gate_mask;
+ pll->div_mask = div_mask;
+
+ init.name = name;
+ init.ops = ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
diff --git a/arch/arm/mach-imx/clk.h b/arch/arm/mach-imx/clk.h
new file mode 100644
index 00000000000..1bf64fe2523
--- /dev/null
+++ b/arch/arm/mach-imx/clk.h
@@ -0,0 +1,83 @@
+#ifndef __MACH_IMX_CLK_H
+#define __MACH_IMX_CLK_H
+
+#include <linux/spinlock.h>
+#include <linux/clk-provider.h>
+#include <mach/clock.h>
+
+struct clk *imx_clk_pllv1(const char *name, const char *parent,
+ void __iomem *base);
+
+struct clk *imx_clk_pllv2(const char *name, const char *parent,
+ void __iomem *base);
+
+enum imx_pllv3_type {
+ IMX_PLLV3_GENERIC,
+ IMX_PLLV3_SYS,
+ IMX_PLLV3_USB,
+ IMX_PLLV3_AV,
+ IMX_PLLV3_ENET,
+ IMX_PLLV3_MLB,
+};
+
+struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
+ const char *parent_name, void __iomem *base, u32 gate_mask,
+ u32 div_mask);
+
+struct clk *clk_register_gate2(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock);
+
+static inline struct clk *imx_clk_gate2(const char *name, const char *parent,
+ void __iomem *reg, u8 shift)
+{
+ return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
+ shift, 0, &imx_ccm_lock);
+}
+
+struct clk *imx_clk_pfd(const char *name, const char *parent_name,
+ void __iomem *reg, u8 idx);
+
+struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width,
+ void __iomem *busy_reg, u8 busy_shift);
+
+struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
+ u8 width, void __iomem *busy_reg, u8 busy_shift,
+ const char **parent_names, int num_parents);
+
+static inline struct clk *imx_clk_fixed(const char *name, int rate)
+{
+ return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+}
+
+static inline struct clk *imx_clk_divider(const char *name, const char *parent,
+ void __iomem *reg, u8 shift, u8 width)
+{
+ return clk_register_divider(NULL, name, parent, CLK_SET_RATE_PARENT,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
+static inline struct clk *imx_clk_gate(const char *name, const char *parent,
+ void __iomem *reg, u8 shift)
+{
+ return clk_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
+ shift, 0, &imx_ccm_lock);
+}
+
+static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
+ u8 shift, u8 width, const char **parents, int num_parents)
+{
+ return clk_register_mux(NULL, name, parents, num_parents, 0, reg, shift,
+ width, 0, &imx_ccm_lock);
+}
+
+static inline struct clk *imx_clk_fixed_factor(const char *name,
+ const char *parent, unsigned int mult, unsigned int div)
+{
+ return clk_register_fixed_factor(NULL, name, parent,
+ CLK_SET_RATE_PARENT, mult, div);
+}
+
+#endif
diff --git a/arch/arm/mach-imx/clock-imx1.c b/arch/arm/mach-imx/clock-imx1.c
deleted file mode 100644
index 4aabeb24156..00000000000
--- a/arch/arm/mach-imx/clock-imx1.c
+++ /dev/null
@@ -1,636 +0,0 @@
-/*
- * Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/math64.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-
-#define IO_ADDR_CCM(off) (MX1_IO_ADDRESS(MX1_CCM_BASE_ADDR + (off)))
-
-/* CCM register addresses */
-#define CCM_CSCR IO_ADDR_CCM(0x0)
-#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
-#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
-#define CCM_PCDR IO_ADDR_CCM(0x20)
-
-#define CCM_CSCR_CLKO_OFFSET 29
-#define CCM_CSCR_CLKO_MASK (0x7 << 29)
-#define CCM_CSCR_USB_OFFSET 26
-#define CCM_CSCR_USB_MASK (0x7 << 26)
-#define CCM_CSCR_OSC_EN_SHIFT 17
-#define CCM_CSCR_SYSTEM_SEL (1 << 16)
-#define CCM_CSCR_BCLK_OFFSET 10
-#define CCM_CSCR_BCLK_MASK (0xf << 10)
-#define CCM_CSCR_PRESC (1 << 15)
-
-#define CCM_PCDR_PCLK3_OFFSET 16
-#define CCM_PCDR_PCLK3_MASK (0x7f << 16)
-#define CCM_PCDR_PCLK2_OFFSET 4
-#define CCM_PCDR_PCLK2_MASK (0xf << 4)
-#define CCM_PCDR_PCLK1_OFFSET 0
-#define CCM_PCDR_PCLK1_MASK 0xf
-
-#define IO_ADDR_SCM(off) (MX1_IO_ADDRESS(MX1_SCM_BASE_ADDR + (off)))
-
-/* SCM register addresses */
-#define SCM_GCCR IO_ADDR_SCM(0xc)
-
-#define SCM_GCCR_DMA_CLK_EN_OFFSET 3
-#define SCM_GCCR_CSI_CLK_EN_OFFSET 2
-#define SCM_GCCR_MMA_CLK_EN_OFFSET 1
-#define SCM_GCCR_USBD_CLK_EN_OFFSET 0
-
-static int _clk_enable(struct clk *clk)
-{
- unsigned int reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg |= 1 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
-
- return 0;
-}
-
-static void _clk_disable(struct clk *clk)
-{
- unsigned int reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(1 << clk->enable_shift);
- __raw_writel(reg, clk->enable_reg);
-}
-
-static int _clk_can_use_parent(const struct clk *clk_arr[], unsigned int size,
- struct clk *parent)
-{
- int i;
-
- for (i = 0; i < size; i++)
- if (parent == clk_arr[i])
- return i;
-
- return -EINVAL;
-}
-
-static unsigned long
-_clk_simple_round_rate(struct clk *clk, unsigned long rate, unsigned int limit)
-{
- int div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
- if (parent_rate % rate)
- div++;
-
- if (div > limit)
- div = limit;
-
- return parent_rate / div;
-}
-
-static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate)
-{
- return clk->parent->round_rate(clk->parent, rate);
-}
-
-static int _clk_parent_set_rate(struct clk *clk, unsigned long rate)
-{
- return clk->parent->set_rate(clk->parent, rate);
-}
-
-static unsigned long clk16m_get_rate(struct clk *clk)
-{
- return 16000000;
-}
-
-static struct clk clk16m = {
- .get_rate = clk16m_get_rate,
- .enable = _clk_enable,
- .enable_reg = CCM_CSCR,
- .enable_shift = CCM_CSCR_OSC_EN_SHIFT,
- .disable = _clk_disable,
-};
-
-/* in Hz */
-static unsigned long clk32_rate;
-
-static unsigned long clk32_get_rate(struct clk *clk)
-{
- return clk32_rate;
-}
-
-static struct clk clk32 = {
- .get_rate = clk32_get_rate,
-};
-
-static unsigned long clk32_premult_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) * 512;
-}
-
-static struct clk clk32_premult = {
- .parent = &clk32,
- .get_rate = clk32_premult_get_rate,
-};
-
-static const struct clk *prem_clk_clocks[] = {
- &clk32_premult,
- &clk16m,
-};
-
-static int prem_clk_set_parent(struct clk *clk, struct clk *parent)
-{
- int i;
- unsigned int reg = __raw_readl(CCM_CSCR);
-
- i = _clk_can_use_parent(prem_clk_clocks, ARRAY_SIZE(prem_clk_clocks),
- parent);
-
- switch (i) {
- case 0:
- reg &= ~CCM_CSCR_SYSTEM_SEL;
- break;
- case 1:
- reg |= CCM_CSCR_SYSTEM_SEL;
- break;
- default:
- return i;
- }
-
- __raw_writel(reg, CCM_CSCR);
-
- return 0;
-}
-
-static struct clk prem_clk = {
- .set_parent = prem_clk_set_parent,
-};
-
-static unsigned long system_clk_get_rate(struct clk *clk)
-{
- return mxc_decode_pll(__raw_readl(CCM_SPCTL0),
- clk_get_rate(clk->parent));
-}
-
-static struct clk system_clk = {
- .parent = &prem_clk,
- .get_rate = system_clk_get_rate,
-};
-
-static unsigned long mcu_clk_get_rate(struct clk *clk)
-{
- return mxc_decode_pll(__raw_readl(CCM_MPCTL0),
- clk_get_rate(clk->parent));
-}
-
-static struct clk mcu_clk = {
- .parent = &clk32_premult,
- .get_rate = mcu_clk_get_rate,
-};
-
-static unsigned long fclk_get_rate(struct clk *clk)
-{
- unsigned long fclk = clk_get_rate(clk->parent);
-
- if (__raw_readl(CCM_CSCR) & CCM_CSCR_PRESC)
- fclk /= 2;
-
- return fclk;
-}
-
-static struct clk fclk = {
- .parent = &mcu_clk,
- .get_rate = fclk_get_rate,
-};
-
-/*
- * get hclk ( SDRAM, CSI, Memory Stick, I2C, DMA )
- */
-static unsigned long hclk_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) &
- CCM_CSCR_BCLK_MASK) >> CCM_CSCR_BCLK_OFFSET) + 1);
-}
-
-static unsigned long hclk_round_rate(struct clk *clk, unsigned long rate)
-{
- return _clk_simple_round_rate(clk, rate, 16);
-}
-
-static int hclk_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned int div;
- unsigned int reg;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
-
- if (div > 16 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
-
- div--;
-
- reg = __raw_readl(CCM_CSCR);
- reg &= ~CCM_CSCR_BCLK_MASK;
- reg |= div << CCM_CSCR_BCLK_OFFSET;
- __raw_writel(reg, CCM_CSCR);
-
- return 0;
-}
-
-static struct clk hclk = {
- .parent = &system_clk,
- .get_rate = hclk_get_rate,
- .round_rate = hclk_round_rate,
- .set_rate = hclk_set_rate,
-};
-
-static unsigned long clk48m_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) &
- CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET) + 1);
-}
-
-static unsigned long clk48m_round_rate(struct clk *clk, unsigned long rate)
-{
- return _clk_simple_round_rate(clk, rate, 8);
-}
-
-static int clk48m_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned int div;
- unsigned int reg;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
-
- if (div > 8 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
-
- div--;
-
- reg = __raw_readl(CCM_CSCR);
- reg &= ~CCM_CSCR_USB_MASK;
- reg |= div << CCM_CSCR_USB_OFFSET;
- __raw_writel(reg, CCM_CSCR);
-
- return 0;
-}
-
-static struct clk clk48m = {
- .parent = &system_clk,
- .get_rate = clk48m_get_rate,
- .round_rate = clk48m_round_rate,
- .set_rate = clk48m_set_rate,
-};
-
-/*
- * get peripheral clock 1 ( UART[12], Timer[12], PWM )
- */
-static unsigned long perclk1_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
- CCM_PCDR_PCLK1_MASK) >> CCM_PCDR_PCLK1_OFFSET) + 1);
-}
-
-static unsigned long perclk1_round_rate(struct clk *clk, unsigned long rate)
-{
- return _clk_simple_round_rate(clk, rate, 16);
-}
-
-static int perclk1_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned int div;
- unsigned int reg;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
-
- if (div > 16 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
-
- div--;
-
- reg = __raw_readl(CCM_PCDR);
- reg &= ~CCM_PCDR_PCLK1_MASK;
- reg |= div << CCM_PCDR_PCLK1_OFFSET;
- __raw_writel(reg, CCM_PCDR);
-
- return 0;
-}
-
-/*
- * get peripheral clock 2 ( LCD, SD, SPI[12] )
- */
-static unsigned long perclk2_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
- CCM_PCDR_PCLK2_MASK) >> CCM_PCDR_PCLK2_OFFSET) + 1);
-}
-
-static unsigned long perclk2_round_rate(struct clk *clk, unsigned long rate)
-{
- return _clk_simple_round_rate(clk, rate, 16);
-}
-
-static int perclk2_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned int div;
- unsigned int reg;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
-
- if (div > 16 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
-
- div--;
-
- reg = __raw_readl(CCM_PCDR);
- reg &= ~CCM_PCDR_PCLK2_MASK;
- reg |= div << CCM_PCDR_PCLK2_OFFSET;
- __raw_writel(reg, CCM_PCDR);
-
- return 0;
-}
-
-/*
- * get peripheral clock 3 ( SSI )
- */
-static unsigned long perclk3_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
- CCM_PCDR_PCLK3_MASK) >> CCM_PCDR_PCLK3_OFFSET) + 1);
-}
-
-static unsigned long perclk3_round_rate(struct clk *clk, unsigned long rate)
-{
- return _clk_simple_round_rate(clk, rate, 128);
-}
-
-static int perclk3_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned int div;
- unsigned int reg;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
-
- if (div > 128 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
-
- div--;
-
- reg = __raw_readl(CCM_PCDR);
- reg &= ~CCM_PCDR_PCLK3_MASK;
- reg |= div << CCM_PCDR_PCLK3_OFFSET;
- __raw_writel(reg, CCM_PCDR);
-
- return 0;
-}
-
-static struct clk perclk[] = {
- {
- .id = 0,
- .parent = &system_clk,
- .get_rate = perclk1_get_rate,
- .round_rate = perclk1_round_rate,
- .set_rate = perclk1_set_rate,
- }, {
- .id = 1,
- .parent = &system_clk,
- .get_rate = perclk2_get_rate,
- .round_rate = perclk2_round_rate,
- .set_rate = perclk2_set_rate,
- }, {
- .id = 2,
- .parent = &system_clk,
- .get_rate = perclk3_get_rate,
- .round_rate = perclk3_round_rate,
- .set_rate = perclk3_set_rate,
- }
-};
-
-static const struct clk *clko_clocks[] = {
- &perclk[0],
- &hclk,
- &clk48m,
- &clk16m,
- &prem_clk,
- &fclk,
-};
-
-static int clko_set_parent(struct clk *clk, struct clk *parent)
-{
- int i;
- unsigned int reg;
-
- i = _clk_can_use_parent(clko_clocks, ARRAY_SIZE(clko_clocks), parent);
- if (i < 0)
- return i;
-
- reg = __raw_readl(CCM_CSCR) & ~CCM_CSCR_CLKO_MASK;
- reg |= i << CCM_CSCR_CLKO_OFFSET;
- __raw_writel(reg, CCM_CSCR);
-
- if (clko_clocks[i]->set_rate && clko_clocks[i]->round_rate) {
- clk->set_rate = _clk_parent_set_rate;
- clk->round_rate = _clk_parent_round_rate;
- } else {
- clk->set_rate = NULL;
- clk->round_rate = NULL;
- }
-
- return 0;
-}
-
-static struct clk clko_clk = {
- .set_parent = clko_set_parent,
-};
-
-static struct clk dma_clk = {
- .parent = &hclk,
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
- .enable = _clk_enable,
- .enable_reg = SCM_GCCR,
- .enable_shift = SCM_GCCR_DMA_CLK_EN_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk csi_clk = {
- .parent = &hclk,
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
- .enable = _clk_enable,
- .enable_reg = SCM_GCCR,
- .enable_shift = SCM_GCCR_CSI_CLK_EN_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk mma_clk = {
- .parent = &hclk,
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
- .enable = _clk_enable,
- .enable_reg = SCM_GCCR,
- .enable_shift = SCM_GCCR_MMA_CLK_EN_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk usbd_clk = {
- .parent = &clk48m,
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
- .enable = _clk_enable,
- .enable_reg = SCM_GCCR,
- .enable_shift = SCM_GCCR_USBD_CLK_EN_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk gpt_clk = {
- .parent = &perclk[0],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk uart_clk = {
- .parent = &perclk[0],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk i2c_clk = {
- .parent = &hclk,
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk spi_clk = {
- .parent = &perclk[1],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk sdhc_clk = {
- .parent = &perclk[1],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk lcdc_clk = {
- .parent = &perclk[1],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk mshc_clk = {
- .parent = &hclk,
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk ssi_clk = {
- .parent = &perclk[2],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
-};
-
-static struct clk rtc_clk = {
- .parent = &clk32,
-};
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-static struct clk_lookup lookups[] __initdata = {
- _REGISTER_CLOCK(NULL, "dma", dma_clk)
- _REGISTER_CLOCK("mx1-camera.0", NULL, csi_clk)
- _REGISTER_CLOCK(NULL, "mma", mma_clk)
- _REGISTER_CLOCK("imx_udc.0", NULL, usbd_clk)
- _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
- _REGISTER_CLOCK("imx1-uart.0", NULL, uart_clk)
- _REGISTER_CLOCK("imx1-uart.1", NULL, uart_clk)
- _REGISTER_CLOCK("imx1-uart.2", NULL, uart_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
- _REGISTER_CLOCK("imx1-cspi.0", NULL, spi_clk)
- _REGISTER_CLOCK("imx1-cspi.1", NULL, spi_clk)
- _REGISTER_CLOCK("imx-mmc.0", NULL, sdhc_clk)
- _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
- _REGISTER_CLOCK(NULL, "mshc", mshc_clk)
- _REGISTER_CLOCK(NULL, "ssi", ssi_clk)
- _REGISTER_CLOCK("mxc_rtc.0", NULL, rtc_clk)
-};
-
-int __init mx1_clocks_init(unsigned long fref)
-{
- unsigned int reg;
-
- /* disable clocks we are able to */
- __raw_writel(0, SCM_GCCR);
-
- clk32_rate = fref;
- reg = __raw_readl(CCM_CSCR);
-
- /* detect clock reference for system PLL */
- if (reg & CCM_CSCR_SYSTEM_SEL) {
- prem_clk.parent = &clk16m;
- } else {
- /* ensure that oscillator is disabled */
- reg &= ~(1 << CCM_CSCR_OSC_EN_SHIFT);
- __raw_writel(reg, CCM_CSCR);
- prem_clk.parent = &clk32_premult;
- }
-
- /* detect reference for CLKO */
- reg = (reg & CCM_CSCR_CLKO_MASK) >> CCM_CSCR_CLKO_OFFSET;
- clko_clk.parent = (struct clk *)clko_clocks[reg];
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- clk_enable(&hclk);
- clk_enable(&fclk);
-
- mxc_timer_init(&gpt_clk, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
- MX1_TIM1_INT);
-
- return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx21.c b/arch/arm/mach-imx/clock-imx21.c
deleted file mode 100644
index ee15d8c9db0..00000000000
--- a/arch/arm/mach-imx/clock-imx21.c
+++ /dev/null
@@ -1,1239 +0,0 @@
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-#include <asm/div64.h>
-
-#define IO_ADDR_CCM(off) (MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
-
-/* Register offsets */
-#define CCM_CSCR IO_ADDR_CCM(0x0)
-#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
-#define CCM_MPCTL1 IO_ADDR_CCM(0x8)
-#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
-#define CCM_SPCTL1 IO_ADDR_CCM(0x10)
-#define CCM_OSC26MCTL IO_ADDR_CCM(0x14)
-#define CCM_PCDR0 IO_ADDR_CCM(0x18)
-#define CCM_PCDR1 IO_ADDR_CCM(0x1c)
-#define CCM_PCCR0 IO_ADDR_CCM(0x20)
-#define CCM_PCCR1 IO_ADDR_CCM(0x24)
-#define CCM_CCSR IO_ADDR_CCM(0x28)
-#define CCM_PMCTL IO_ADDR_CCM(0x2c)
-#define CCM_PMCOUNT IO_ADDR_CCM(0x30)
-#define CCM_WKGDCTL IO_ADDR_CCM(0x34)
-
-#define CCM_CSCR_PRESC_OFFSET 29
-#define CCM_CSCR_PRESC_MASK (0x7 << CCM_CSCR_PRESC_OFFSET)
-
-#define CCM_CSCR_USB_OFFSET 26
-#define CCM_CSCR_USB_MASK (0x7 << CCM_CSCR_USB_OFFSET)
-#define CCM_CSCR_SD_OFFSET 24
-#define CCM_CSCR_SD_MASK (0x3 << CCM_CSCR_SD_OFFSET)
-#define CCM_CSCR_SPLLRES (1 << 22)
-#define CCM_CSCR_MPLLRES (1 << 21)
-#define CCM_CSCR_SSI2_OFFSET 20
-#define CCM_CSCR_SSI2 (1 << CCM_CSCR_SSI2_OFFSET)
-#define CCM_CSCR_SSI1_OFFSET 19
-#define CCM_CSCR_SSI1 (1 << CCM_CSCR_SSI1_OFFSET)
-#define CCM_CSCR_FIR_OFFSET 18
-#define CCM_CSCR_FIR (1 << CCM_CSCR_FIR_OFFSET)
-#define CCM_CSCR_SP (1 << 17)
-#define CCM_CSCR_MCU (1 << 16)
-#define CCM_CSCR_BCLK_OFFSET 10
-#define CCM_CSCR_BCLK_MASK (0xf << CCM_CSCR_BCLK_OFFSET)
-#define CCM_CSCR_IPDIV_OFFSET 9
-#define CCM_CSCR_IPDIV (1 << CCM_CSCR_IPDIV_OFFSET)
-
-#define CCM_CSCR_OSC26MDIV (1 << 4)
-#define CCM_CSCR_OSC26M (1 << 3)
-#define CCM_CSCR_FPM (1 << 2)
-#define CCM_CSCR_SPEN (1 << 1)
-#define CCM_CSCR_MPEN 1
-
-#define CCM_MPCTL0_CPLM (1 << 31)
-#define CCM_MPCTL0_PD_OFFSET 26
-#define CCM_MPCTL0_PD_MASK (0xf << 26)
-#define CCM_MPCTL0_MFD_OFFSET 16
-#define CCM_MPCTL0_MFD_MASK (0x3ff << 16)
-#define CCM_MPCTL0_MFI_OFFSET 10
-#define CCM_MPCTL0_MFI_MASK (0xf << 10)
-#define CCM_MPCTL0_MFN_OFFSET 0
-#define CCM_MPCTL0_MFN_MASK 0x3ff
-
-#define CCM_MPCTL1_LF (1 << 15)
-#define CCM_MPCTL1_BRMO (1 << 6)
-
-#define CCM_SPCTL0_CPLM (1 << 31)
-#define CCM_SPCTL0_PD_OFFSET 26
-#define CCM_SPCTL0_PD_MASK (0xf << 26)
-#define CCM_SPCTL0_MFD_OFFSET 16
-#define CCM_SPCTL0_MFD_MASK (0x3ff << 16)
-#define CCM_SPCTL0_MFI_OFFSET 10
-#define CCM_SPCTL0_MFI_MASK (0xf << 10)
-#define CCM_SPCTL0_MFN_OFFSET 0
-#define CCM_SPCTL0_MFN_MASK 0x3ff
-
-#define CCM_SPCTL1_LF (1 << 15)
-#define CCM_SPCTL1_BRMO (1 << 6)
-
-#define CCM_OSC26MCTL_PEAK_OFFSET 16
-#define CCM_OSC26MCTL_PEAK_MASK (0x3 << 16)
-#define CCM_OSC26MCTL_AGC_OFFSET 8
-#define CCM_OSC26MCTL_AGC_MASK (0x3f << 8)
-#define CCM_OSC26MCTL_ANATEST_OFFSET 0
-#define CCM_OSC26MCTL_ANATEST_MASK 0x3f
-
-#define CCM_PCDR0_SSI2BAUDDIV_OFFSET 26
-#define CCM_PCDR0_SSI2BAUDDIV_MASK (0x3f << 26)
-#define CCM_PCDR0_SSI1BAUDDIV_OFFSET 16
-#define CCM_PCDR0_SSI1BAUDDIV_MASK (0x3f << 16)
-#define CCM_PCDR0_NFCDIV_OFFSET 12
-#define CCM_PCDR0_NFCDIV_MASK (0xf << 12)
-#define CCM_PCDR0_48MDIV_OFFSET 5
-#define CCM_PCDR0_48MDIV_MASK (0x7 << CCM_PCDR0_48MDIV_OFFSET)
-#define CCM_PCDR0_FIRIDIV_OFFSET 0
-#define CCM_PCDR0_FIRIDIV_MASK 0x1f
-#define CCM_PCDR1_PERDIV4_OFFSET 24
-#define CCM_PCDR1_PERDIV4_MASK (0x3f << 24)
-#define CCM_PCDR1_PERDIV3_OFFSET 16
-#define CCM_PCDR1_PERDIV3_MASK (0x3f << 16)
-#define CCM_PCDR1_PERDIV2_OFFSET 8
-#define CCM_PCDR1_PERDIV2_MASK (0x3f << 8)
-#define CCM_PCDR1_PERDIV1_OFFSET 0
-#define CCM_PCDR1_PERDIV1_MASK 0x3f
-
-#define CCM_PCCR_HCLK_CSI_OFFSET 31
-#define CCM_PCCR_HCLK_CSI_REG CCM_PCCR0
-#define CCM_PCCR_HCLK_DMA_OFFSET 30
-#define CCM_PCCR_HCLK_DMA_REG CCM_PCCR0
-#define CCM_PCCR_HCLK_BROM_OFFSET 28
-#define CCM_PCCR_HCLK_BROM_REG CCM_PCCR0
-#define CCM_PCCR_HCLK_EMMA_OFFSET 27
-#define CCM_PCCR_HCLK_EMMA_REG CCM_PCCR0
-#define CCM_PCCR_HCLK_LCDC_OFFSET 26
-#define CCM_PCCR_HCLK_LCDC_REG CCM_PCCR0
-#define CCM_PCCR_HCLK_SLCDC_OFFSET 25
-#define CCM_PCCR_HCLK_SLCDC_REG CCM_PCCR0
-#define CCM_PCCR_HCLK_USBOTG_OFFSET 24
-#define CCM_PCCR_HCLK_USBOTG_REG CCM_PCCR0
-#define CCM_PCCR_HCLK_BMI_OFFSET 23
-#define CCM_PCCR_BMI_MASK (1 << CCM_PCCR_BMI_MASK)
-#define CCM_PCCR_HCLK_BMI_REG CCM_PCCR0
-#define CCM_PCCR_PERCLK4_OFFSET 22
-#define CCM_PCCR_PERCLK4_REG CCM_PCCR0
-#define CCM_PCCR_SLCDC_OFFSET 21
-#define CCM_PCCR_SLCDC_REG CCM_PCCR0
-#define CCM_PCCR_FIRI_BAUD_OFFSET 20
-#define CCM_PCCR_FIRI_BAUD_MASK (1 << CCM_PCCR_FIRI_BAUD_MASK)
-#define CCM_PCCR_FIRI_BAUD_REG CCM_PCCR0
-#define CCM_PCCR_NFC_OFFSET 19
-#define CCM_PCCR_NFC_REG CCM_PCCR0
-#define CCM_PCCR_LCDC_OFFSET 18
-#define CCM_PCCR_LCDC_REG CCM_PCCR0
-#define CCM_PCCR_SSI1_BAUD_OFFSET 17
-#define CCM_PCCR_SSI1_BAUD_REG CCM_PCCR0
-#define CCM_PCCR_SSI2_BAUD_OFFSET 16
-#define CCM_PCCR_SSI2_BAUD_REG CCM_PCCR0
-#define CCM_PCCR_EMMA_OFFSET 15
-#define CCM_PCCR_EMMA_REG CCM_PCCR0
-#define CCM_PCCR_USBOTG_OFFSET 14
-#define CCM_PCCR_USBOTG_REG CCM_PCCR0
-#define CCM_PCCR_DMA_OFFSET 13
-#define CCM_PCCR_DMA_REG CCM_PCCR0
-#define CCM_PCCR_I2C1_OFFSET 12
-#define CCM_PCCR_I2C1_REG CCM_PCCR0
-#define CCM_PCCR_GPIO_OFFSET 11
-#define CCM_PCCR_GPIO_REG CCM_PCCR0
-#define CCM_PCCR_SDHC2_OFFSET 10
-#define CCM_PCCR_SDHC2_REG CCM_PCCR0
-#define CCM_PCCR_SDHC1_OFFSET 9
-#define CCM_PCCR_SDHC1_REG CCM_PCCR0
-#define CCM_PCCR_FIRI_OFFSET 8
-#define CCM_PCCR_FIRI_MASK (1 << CCM_PCCR_BAUD_MASK)
-#define CCM_PCCR_FIRI_REG CCM_PCCR0
-#define CCM_PCCR_SSI2_IPG_OFFSET 7
-#define CCM_PCCR_SSI2_REG CCM_PCCR0
-#define CCM_PCCR_SSI1_IPG_OFFSET 6
-#define CCM_PCCR_SSI1_REG CCM_PCCR0
-#define CCM_PCCR_CSPI2_OFFSET 5
-#define CCM_PCCR_CSPI2_REG CCM_PCCR0
-#define CCM_PCCR_CSPI1_OFFSET 4
-#define CCM_PCCR_CSPI1_REG CCM_PCCR0
-#define CCM_PCCR_UART4_OFFSET 3
-#define CCM_PCCR_UART4_REG CCM_PCCR0
-#define CCM_PCCR_UART3_OFFSET 2
-#define CCM_PCCR_UART3_REG CCM_PCCR0
-#define CCM_PCCR_UART2_OFFSET 1
-#define CCM_PCCR_UART2_REG CCM_PCCR0
-#define CCM_PCCR_UART1_OFFSET 0
-#define CCM_PCCR_UART1_REG CCM_PCCR0
-
-#define CCM_PCCR_OWIRE_OFFSET 31
-#define CCM_PCCR_OWIRE_REG CCM_PCCR1
-#define CCM_PCCR_KPP_OFFSET 30
-#define CCM_PCCR_KPP_REG CCM_PCCR1
-#define CCM_PCCR_RTC_OFFSET 29
-#define CCM_PCCR_RTC_REG CCM_PCCR1
-#define CCM_PCCR_PWM_OFFSET 28
-#define CCM_PCCR_PWM_REG CCM_PCCR1
-#define CCM_PCCR_GPT3_OFFSET 27
-#define CCM_PCCR_GPT3_REG CCM_PCCR1
-#define CCM_PCCR_GPT2_OFFSET 26
-#define CCM_PCCR_GPT2_REG CCM_PCCR1
-#define CCM_PCCR_GPT1_OFFSET 25
-#define CCM_PCCR_GPT1_REG CCM_PCCR1
-#define CCM_PCCR_WDT_OFFSET 24
-#define CCM_PCCR_WDT_REG CCM_PCCR1
-#define CCM_PCCR_CSPI3_OFFSET 23
-#define CCM_PCCR_CSPI3_REG CCM_PCCR1
-
-#define CCM_PCCR_CSPI1_MASK (1 << CCM_PCCR_CSPI1_OFFSET)
-#define CCM_PCCR_CSPI2_MASK (1 << CCM_PCCR_CSPI2_OFFSET)
-#define CCM_PCCR_CSPI3_MASK (1 << CCM_PCCR_CSPI3_OFFSET)
-#define CCM_PCCR_DMA_MASK (1 << CCM_PCCR_DMA_OFFSET)
-#define CCM_PCCR_EMMA_MASK (1 << CCM_PCCR_EMMA_OFFSET)
-#define CCM_PCCR_GPIO_MASK (1 << CCM_PCCR_GPIO_OFFSET)
-#define CCM_PCCR_GPT1_MASK (1 << CCM_PCCR_GPT1_OFFSET)
-#define CCM_PCCR_GPT2_MASK (1 << CCM_PCCR_GPT2_OFFSET)
-#define CCM_PCCR_GPT3_MASK (1 << CCM_PCCR_GPT3_OFFSET)
-#define CCM_PCCR_HCLK_BROM_MASK (1 << CCM_PCCR_HCLK_BROM_OFFSET)
-#define CCM_PCCR_HCLK_CSI_MASK (1 << CCM_PCCR_HCLK_CSI_OFFSET)
-#define CCM_PCCR_HCLK_DMA_MASK (1 << CCM_PCCR_HCLK_DMA_OFFSET)
-#define CCM_PCCR_HCLK_EMMA_MASK (1 << CCM_PCCR_HCLK_EMMA_OFFSET)
-#define CCM_PCCR_HCLK_LCDC_MASK (1 << CCM_PCCR_HCLK_LCDC_OFFSET)
-#define CCM_PCCR_HCLK_SLCDC_MASK (1 << CCM_PCCR_HCLK_SLCDC_OFFSET)
-#define CCM_PCCR_HCLK_USBOTG_MASK (1 << CCM_PCCR_HCLK_USBOTG_OFFSET)
-#define CCM_PCCR_I2C1_MASK (1 << CCM_PCCR_I2C1_OFFSET)
-#define CCM_PCCR_KPP_MASK (1 << CCM_PCCR_KPP_OFFSET)
-#define CCM_PCCR_LCDC_MASK (1 << CCM_PCCR_LCDC_OFFSET)
-#define CCM_PCCR_NFC_MASK (1 << CCM_PCCR_NFC_OFFSET)
-#define CCM_PCCR_OWIRE_MASK (1 << CCM_PCCR_OWIRE_OFFSET)
-#define CCM_PCCR_PERCLK4_MASK (1 << CCM_PCCR_PERCLK4_OFFSET)
-#define CCM_PCCR_PWM_MASK (1 << CCM_PCCR_PWM_OFFSET)
-#define CCM_PCCR_RTC_MASK (1 << CCM_PCCR_RTC_OFFSET)
-#define CCM_PCCR_SDHC1_MASK (1 << CCM_PCCR_SDHC1_OFFSET)
-#define CCM_PCCR_SDHC2_MASK (1 << CCM_PCCR_SDHC2_OFFSET)
-#define CCM_PCCR_SLCDC_MASK (1 << CCM_PCCR_SLCDC_OFFSET)
-#define CCM_PCCR_SSI1_BAUD_MASK (1 << CCM_PCCR_SSI1_BAUD_OFFSET)
-#define CCM_PCCR_SSI1_IPG_MASK (1 << CCM_PCCR_SSI1_IPG_OFFSET)
-#define CCM_PCCR_SSI2_BAUD_MASK (1 << CCM_PCCR_SSI2_BAUD_OFFSET)
-#define CCM_PCCR_SSI2_IPG_MASK (1 << CCM_PCCR_SSI2_IPG_OFFSET)
-#define CCM_PCCR_UART1_MASK (1 << CCM_PCCR_UART1_OFFSET)
-#define CCM_PCCR_UART2_MASK (1 << CCM_PCCR_UART2_OFFSET)
-#define CCM_PCCR_UART3_MASK (1 << CCM_PCCR_UART3_OFFSET)
-#define CCM_PCCR_UART4_MASK (1 << CCM_PCCR_UART4_OFFSET)
-#define CCM_PCCR_USBOTG_MASK (1 << CCM_PCCR_USBOTG_OFFSET)
-#define CCM_PCCR_WDT_MASK (1 << CCM_PCCR_WDT_OFFSET)
-
-#define CCM_CCSR_32KSR (1 << 15)
-
-#define CCM_CCSR_CLKMODE1 (1 << 9)
-#define CCM_CCSR_CLKMODE0 (1 << 8)
-
-#define CCM_CCSR_CLKOSEL_OFFSET 0
-#define CCM_CCSR_CLKOSEL_MASK 0x1f
-
-#define SYS_FMCR 0x14 /* Functional Muxing Control Reg */
-#define SYS_CHIP_ID 0x00 /* The offset of CHIP ID register */
-
-static int _clk_enable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg |= 1 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
- return 0;
-}
-
-static void _clk_disable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(1 << clk->enable_shift);
- __raw_writel(reg, clk->enable_reg);
-}
-
-static unsigned long _clk_generic_round_rate(struct clk *clk,
- unsigned long rate,
- u32 max_divisor)
-{
- u32 div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
- if (parent_rate % rate)
- div++;
-
- if (div > max_divisor)
- div = max_divisor;
-
- return parent_rate / div;
-}
-
-static int _clk_spll_enable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(CCM_CSCR);
- reg |= CCM_CSCR_SPEN;
- __raw_writel(reg, CCM_CSCR);
-
- while ((__raw_readl(CCM_SPCTL1) & CCM_SPCTL1_LF) == 0)
- ;
- return 0;
-}
-
-static void _clk_spll_disable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(CCM_CSCR);
- reg &= ~CCM_CSCR_SPEN;
- __raw_writel(reg, CCM_CSCR);
-}
-
-
-#define CSCR() (__raw_readl(CCM_CSCR))
-#define PCDR0() (__raw_readl(CCM_PCDR0))
-#define PCDR1() (__raw_readl(CCM_PCDR1))
-
-static unsigned long _clk_perclkx_round_rate(struct clk *clk,
- unsigned long rate)
-{
- return _clk_generic_round_rate(clk, rate, 64);
-}
-
-static int _clk_perclkx_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg;
- u32 div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (clk->id < 0 || clk->id > 3)
- return -EINVAL;
-
- div = parent_rate / rate;
- if (div > 64 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
- div--;
-
- reg =
- __raw_readl(CCM_PCDR1) & ~(CCM_PCDR1_PERDIV1_MASK <<
- (clk->id << 3));
- reg |= div << (clk->id << 3);
- __raw_writel(reg, CCM_PCDR1);
-
- return 0;
-}
-
-static unsigned long _clk_usb_recalc(struct clk *clk)
-{
- unsigned long usb_pdf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- usb_pdf = (CSCR() & CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET;
-
- return parent_rate / (usb_pdf + 1U);
-}
-
-static unsigned long _clk_usb_round_rate(struct clk *clk,
- unsigned long rate)
-{
- return _clk_generic_round_rate(clk, rate, 8);
-}
-
-static int _clk_usb_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg;
- u32 div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
- if (div > 8 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
- div--;
-
- reg = CSCR() & ~CCM_CSCR_USB_MASK;
- reg |= div << CCM_CSCR_USB_OFFSET;
- __raw_writel(reg, CCM_CSCR);
-
- return 0;
-}
-
-static unsigned long _clk_ssix_recalc(struct clk *clk, unsigned long pdf)
-{
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- pdf = (pdf < 2) ? 124UL : pdf; /* MX21 & MX27 TO1 */
-
- return 2UL * parent_rate / pdf;
-}
-
-static unsigned long _clk_ssi1_recalc(struct clk *clk)
-{
- return _clk_ssix_recalc(clk,
- (PCDR0() & CCM_PCDR0_SSI1BAUDDIV_MASK)
- >> CCM_PCDR0_SSI1BAUDDIV_OFFSET);
-}
-
-static unsigned long _clk_ssi2_recalc(struct clk *clk)
-{
- return _clk_ssix_recalc(clk,
- (PCDR0() & CCM_PCDR0_SSI2BAUDDIV_MASK) >>
- CCM_PCDR0_SSI2BAUDDIV_OFFSET);
-}
-
-static unsigned long _clk_nfc_recalc(struct clk *clk)
-{
- unsigned long nfc_pdf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- nfc_pdf = (PCDR0() & CCM_PCDR0_NFCDIV_MASK)
- >> CCM_PCDR0_NFCDIV_OFFSET;
-
- return parent_rate / (nfc_pdf + 1);
-}
-
-static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate)
-{
- return clk->parent->round_rate(clk->parent, rate);
-}
-
-static int _clk_parent_set_rate(struct clk *clk, unsigned long rate)
-{
- return clk->parent->set_rate(clk->parent, rate);
-}
-
-static unsigned long external_high_reference; /* in Hz */
-
-static unsigned long get_high_reference_clock_rate(struct clk *clk)
-{
- return external_high_reference;
-}
-
-/*
- * the high frequency external clock reference
- * Default case is 26MHz.
- */
-static struct clk ckih_clk = {
- .get_rate = get_high_reference_clock_rate,
-};
-
-static unsigned long external_low_reference; /* in Hz */
-
-static unsigned long get_low_reference_clock_rate(struct clk *clk)
-{
- return external_low_reference;
-}
-
-/*
- * the low frequency external clock reference
- * Default case is 32.768kHz.
- */
-static struct clk ckil_clk = {
- .get_rate = get_low_reference_clock_rate,
-};
-
-
-static unsigned long _clk_fpm_recalc(struct clk *clk)
-{
- return clk_get_rate(clk->parent) * 512;
-}
-
-/* Output of frequency pre multiplier */
-static struct clk fpm_clk = {
- .parent = &ckil_clk,
- .get_rate = _clk_fpm_recalc,
-};
-
-static unsigned long get_mpll_clk(struct clk *clk)
-{
- uint32_t reg;
- unsigned long ref_clk;
- unsigned long mfi = 0, mfn = 0, mfd = 0, pdf = 0;
- unsigned long long temp;
-
- ref_clk = clk_get_rate(clk->parent);
-
- reg = __raw_readl(CCM_MPCTL0);
- pdf = (reg & CCM_MPCTL0_PD_MASK) >> CCM_MPCTL0_PD_OFFSET;
- mfd = (reg & CCM_MPCTL0_MFD_MASK) >> CCM_MPCTL0_MFD_OFFSET;
- mfi = (reg & CCM_MPCTL0_MFI_MASK) >> CCM_MPCTL0_MFI_OFFSET;
- mfn = (reg & CCM_MPCTL0_MFN_MASK) >> CCM_MPCTL0_MFN_OFFSET;
-
- mfi = (mfi <= 5) ? 5 : mfi;
- temp = 2LL * ref_clk * mfn;
- do_div(temp, mfd + 1);
- temp = 2LL * ref_clk * mfi + temp;
- do_div(temp, pdf + 1);
-
- return (unsigned long)temp;
-}
-
-static struct clk mpll_clk = {
- .parent = &ckih_clk,
- .get_rate = get_mpll_clk,
-};
-
-static unsigned long _clk_fclk_get_rate(struct clk *clk)
-{
- unsigned long parent_rate;
- u32 div;
-
- div = (CSCR() & CCM_CSCR_PRESC_MASK) >> CCM_CSCR_PRESC_OFFSET;
- parent_rate = clk_get_rate(clk->parent);
-
- return parent_rate / (div+1);
-}
-
-static struct clk fclk_clk = {
- .parent = &mpll_clk,
- .get_rate = _clk_fclk_get_rate
-};
-
-static unsigned long get_spll_clk(struct clk *clk)
-{
- uint32_t reg;
- unsigned long ref_clk;
- unsigned long mfi = 0, mfn = 0, mfd = 0, pdf = 0;
- unsigned long long temp;
-
- ref_clk = clk_get_rate(clk->parent);
-
- reg = __raw_readl(CCM_SPCTL0);
- pdf = (reg & CCM_SPCTL0_PD_MASK) >> CCM_SPCTL0_PD_OFFSET;
- mfd = (reg & CCM_SPCTL0_MFD_MASK) >> CCM_SPCTL0_MFD_OFFSET;
- mfi = (reg & CCM_SPCTL0_MFI_MASK) >> CCM_SPCTL0_MFI_OFFSET;
- mfn = (reg & CCM_SPCTL0_MFN_MASK) >> CCM_SPCTL0_MFN_OFFSET;
-
- mfi = (mfi <= 5) ? 5 : mfi;
- temp = 2LL * ref_clk * mfn;
- do_div(temp, mfd + 1);
- temp = 2LL * ref_clk * mfi + temp;
- do_div(temp, pdf + 1);
-
- return (unsigned long)temp;
-}
-
-static struct clk spll_clk = {
- .parent = &ckih_clk,
- .get_rate = get_spll_clk,
- .enable = _clk_spll_enable,
- .disable = _clk_spll_disable,
-};
-
-static unsigned long get_hclk_clk(struct clk *clk)
-{
- unsigned long rate;
- unsigned long bclk_pdf;
-
- bclk_pdf = (CSCR() & CCM_CSCR_BCLK_MASK)
- >> CCM_CSCR_BCLK_OFFSET;
-
- rate = clk_get_rate(clk->parent);
- return rate / (bclk_pdf + 1);
-}
-
-static struct clk hclk_clk = {
- .parent = &fclk_clk,
- .get_rate = get_hclk_clk,
-};
-
-static unsigned long get_ipg_clk(struct clk *clk)
-{
- unsigned long rate;
- unsigned long ipg_pdf;
-
- ipg_pdf = (CSCR() & CCM_CSCR_IPDIV) >> CCM_CSCR_IPDIV_OFFSET;
-
- rate = clk_get_rate(clk->parent);
- return rate / (ipg_pdf + 1);
-}
-
-static struct clk ipg_clk = {
- .parent = &hclk_clk,
- .get_rate = get_ipg_clk,
-};
-
-static unsigned long _clk_perclkx_recalc(struct clk *clk)
-{
- unsigned long perclk_pdf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (clk->id < 0 || clk->id > 3)
- return 0;
-
- perclk_pdf = (PCDR1() >> (clk->id << 3)) & CCM_PCDR1_PERDIV1_MASK;
-
- return parent_rate / (perclk_pdf + 1);
-}
-
-static struct clk per_clk[] = {
- {
- .id = 0,
- .parent = &mpll_clk,
- .get_rate = _clk_perclkx_recalc,
- }, {
- .id = 1,
- .parent = &mpll_clk,
- .get_rate = _clk_perclkx_recalc,
- }, {
- .id = 2,
- .parent = &mpll_clk,
- .round_rate = _clk_perclkx_round_rate,
- .set_rate = _clk_perclkx_set_rate,
- .get_rate = _clk_perclkx_recalc,
- /* Enable/Disable done via lcd_clkc[1] */
- }, {
- .id = 3,
- .parent = &mpll_clk,
- .round_rate = _clk_perclkx_round_rate,
- .set_rate = _clk_perclkx_set_rate,
- .get_rate = _clk_perclkx_recalc,
- /* Enable/Disable done via csi_clk[1] */
- },
-};
-
-static struct clk uart_ipg_clk[];
-
-static struct clk uart_clk[] = {
- {
- .id = 0,
- .parent = &per_clk[0],
- .secondary = &uart_ipg_clk[0],
- }, {
- .id = 1,
- .parent = &per_clk[0],
- .secondary = &uart_ipg_clk[1],
- }, {
- .id = 2,
- .parent = &per_clk[0],
- .secondary = &uart_ipg_clk[2],
- }, {
- .id = 3,
- .parent = &per_clk[0],
- .secondary = &uart_ipg_clk[3],
- },
-};
-
-static struct clk uart_ipg_clk[] = {
- {
- .id = 0,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_UART1_REG,
- .enable_shift = CCM_PCCR_UART1_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 1,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_UART2_REG,
- .enable_shift = CCM_PCCR_UART2_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 2,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_UART3_REG,
- .enable_shift = CCM_PCCR_UART3_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 3,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_UART4_REG,
- .enable_shift = CCM_PCCR_UART4_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk gpt_ipg_clk[];
-
-static struct clk gpt_clk[] = {
- {
- .id = 0,
- .parent = &per_clk[0],
- .secondary = &gpt_ipg_clk[0],
- }, {
- .id = 1,
- .parent = &per_clk[0],
- .secondary = &gpt_ipg_clk[1],
- }, {
- .id = 2,
- .parent = &per_clk[0],
- .secondary = &gpt_ipg_clk[2],
- },
-};
-
-static struct clk gpt_ipg_clk[] = {
- {
- .id = 0,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_GPT1_REG,
- .enable_shift = CCM_PCCR_GPT1_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 1,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_GPT2_REG,
- .enable_shift = CCM_PCCR_GPT2_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 2,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_GPT3_REG,
- .enable_shift = CCM_PCCR_GPT3_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk pwm_clk[] = {
- {
- .parent = &per_clk[0],
- .secondary = &pwm_clk[1],
- }, {
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_PWM_REG,
- .enable_shift = CCM_PCCR_PWM_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk sdhc_ipg_clk[];
-
-static struct clk sdhc_clk[] = {
- {
- .id = 0,
- .parent = &per_clk[1],
- .secondary = &sdhc_ipg_clk[0],
- }, {
- .id = 1,
- .parent = &per_clk[1],
- .secondary = &sdhc_ipg_clk[1],
- },
-};
-
-static struct clk sdhc_ipg_clk[] = {
- {
- .id = 0,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_SDHC1_REG,
- .enable_shift = CCM_PCCR_SDHC1_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 1,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_SDHC2_REG,
- .enable_shift = CCM_PCCR_SDHC2_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk cspi_ipg_clk[];
-
-static struct clk cspi_clk[] = {
- {
- .id = 0,
- .parent = &per_clk[1],
- .secondary = &cspi_ipg_clk[0],
- }, {
- .id = 1,
- .parent = &per_clk[1],
- .secondary = &cspi_ipg_clk[1],
- }, {
- .id = 2,
- .parent = &per_clk[1],
- .secondary = &cspi_ipg_clk[2],
- },
-};
-
-static struct clk cspi_ipg_clk[] = {
- {
- .id = 0,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_CSPI1_REG,
- .enable_shift = CCM_PCCR_CSPI1_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 1,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_CSPI2_REG,
- .enable_shift = CCM_PCCR_CSPI2_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 3,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_CSPI3_REG,
- .enable_shift = CCM_PCCR_CSPI3_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk lcdc_clk[] = {
- {
- .parent = &per_clk[2],
- .secondary = &lcdc_clk[1],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
- }, {
- .parent = &ipg_clk,
- .secondary = &lcdc_clk[2],
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_LCDC_REG,
- .enable_shift = CCM_PCCR_LCDC_OFFSET,
- .disable = _clk_disable,
- }, {
- .parent = &hclk_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_HCLK_LCDC_REG,
- .enable_shift = CCM_PCCR_HCLK_LCDC_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk csi_clk[] = {
- {
- .parent = &per_clk[3],
- .secondary = &csi_clk[1],
- .round_rate = _clk_parent_round_rate,
- .set_rate = _clk_parent_set_rate,
- }, {
- .parent = &hclk_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_HCLK_CSI_REG,
- .enable_shift = CCM_PCCR_HCLK_CSI_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk usb_clk[] = {
- {
- .parent = &spll_clk,
- .secondary = &usb_clk[1],
- .get_rate = _clk_usb_recalc,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_USBOTG_REG,
- .enable_shift = CCM_PCCR_USBOTG_OFFSET,
- .disable = _clk_disable,
- .round_rate = _clk_usb_round_rate,
- .set_rate = _clk_usb_set_rate,
- }, {
- .parent = &hclk_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_HCLK_USBOTG_REG,
- .enable_shift = CCM_PCCR_HCLK_USBOTG_OFFSET,
- .disable = _clk_disable,
- }
-};
-
-static struct clk ssi_ipg_clk[];
-
-static struct clk ssi_clk[] = {
- {
- .id = 0,
- .parent = &mpll_clk,
- .secondary = &ssi_ipg_clk[0],
- .get_rate = _clk_ssi1_recalc,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_SSI1_BAUD_REG,
- .enable_shift = CCM_PCCR_SSI1_BAUD_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 1,
- .parent = &mpll_clk,
- .secondary = &ssi_ipg_clk[1],
- .get_rate = _clk_ssi2_recalc,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_SSI2_BAUD_REG,
- .enable_shift = CCM_PCCR_SSI2_BAUD_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk ssi_ipg_clk[] = {
- {
- .id = 0,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_SSI1_REG,
- .enable_shift = CCM_PCCR_SSI1_IPG_OFFSET,
- .disable = _clk_disable,
- }, {
- .id = 1,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_SSI2_REG,
- .enable_shift = CCM_PCCR_SSI2_IPG_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-
-static struct clk nfc_clk = {
- .parent = &fclk_clk,
- .get_rate = _clk_nfc_recalc,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_NFC_REG,
- .enable_shift = CCM_PCCR_NFC_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk dma_clk[] = {
- {
- .parent = &hclk_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_DMA_REG,
- .enable_shift = CCM_PCCR_DMA_OFFSET,
- .disable = _clk_disable,
- .secondary = &dma_clk[1],
- }, {
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_HCLK_DMA_REG,
- .enable_shift = CCM_PCCR_HCLK_DMA_OFFSET,
- .disable = _clk_disable,
- },
-};
-
-static struct clk brom_clk = {
- .parent = &hclk_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_HCLK_BROM_REG,
- .enable_shift = CCM_PCCR_HCLK_BROM_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk emma_clk[] = {
- {
- .parent = &hclk_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_EMMA_REG,
- .enable_shift = CCM_PCCR_EMMA_OFFSET,
- .disable = _clk_disable,
- .secondary = &emma_clk[1],
- }, {
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_HCLK_EMMA_REG,
- .enable_shift = CCM_PCCR_HCLK_EMMA_OFFSET,
- .disable = _clk_disable,
- }
-};
-
-static struct clk slcdc_clk[] = {
- {
- .parent = &hclk_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_SLCDC_REG,
- .enable_shift = CCM_PCCR_SLCDC_OFFSET,
- .disable = _clk_disable,
- .secondary = &slcdc_clk[1],
- }, {
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_HCLK_SLCDC_REG,
- .enable_shift = CCM_PCCR_HCLK_SLCDC_OFFSET,
- .disable = _clk_disable,
- }
-};
-
-static struct clk wdog_clk = {
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_WDT_REG,
- .enable_shift = CCM_PCCR_WDT_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk gpio_clk = {
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_GPIO_REG,
- .enable_shift = CCM_PCCR_GPIO_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk i2c_clk = {
- .id = 0,
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_I2C1_REG,
- .enable_shift = CCM_PCCR_I2C1_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk kpp_clk = {
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_KPP_REG,
- .enable_shift = CCM_PCCR_KPP_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk owire_clk = {
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_OWIRE_REG,
- .enable_shift = CCM_PCCR_OWIRE_OFFSET,
- .disable = _clk_disable,
-};
-
-static struct clk rtc_clk = {
- .parent = &ipg_clk,
- .enable = _clk_enable,
- .enable_reg = CCM_PCCR_RTC_REG,
- .enable_shift = CCM_PCCR_RTC_OFFSET,
- .disable = _clk_disable,
-};
-
-static unsigned long _clk_clko_round_rate(struct clk *clk, unsigned long rate)
-{
- return _clk_generic_round_rate(clk, rate, 8);
-}
-
-static int _clk_clko_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg;
- u32 div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
-
- if (div > 8 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
- div--;
-
- reg = __raw_readl(CCM_PCDR0);
-
- if (clk->parent == &usb_clk[0]) {
- reg &= ~CCM_PCDR0_48MDIV_MASK;
- reg |= div << CCM_PCDR0_48MDIV_OFFSET;
- }
- __raw_writel(reg, CCM_PCDR0);
-
- return 0;
-}
-
-static unsigned long _clk_clko_recalc(struct clk *clk)
-{
- u32 div = 0;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (clk->parent == &usb_clk[0]) /* 48M */
- div = __raw_readl(CCM_PCDR0) & CCM_PCDR0_48MDIV_MASK
- >> CCM_PCDR0_48MDIV_OFFSET;
- div++;
-
- return parent_rate / div;
-}
-
-static struct clk clko_clk;
-
-static int _clk_clko_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(CCM_CCSR) & ~CCM_CCSR_CLKOSEL_MASK;
-
- if (parent == &ckil_clk)
- reg |= 0 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &fpm_clk)
- reg |= 1 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &ckih_clk)
- reg |= 2 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == mpll_clk.parent)
- reg |= 3 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == spll_clk.parent)
- reg |= 4 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &mpll_clk)
- reg |= 5 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &spll_clk)
- reg |= 6 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &fclk_clk)
- reg |= 7 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &hclk_clk)
- reg |= 8 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &ipg_clk)
- reg |= 9 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &per_clk[0])
- reg |= 0xA << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &per_clk[1])
- reg |= 0xB << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &per_clk[2])
- reg |= 0xC << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &per_clk[3])
- reg |= 0xD << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &ssi_clk[0])
- reg |= 0xE << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &ssi_clk[1])
- reg |= 0xF << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &nfc_clk)
- reg |= 0x10 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &usb_clk[0])
- reg |= 0x14 << CCM_CCSR_CLKOSEL_OFFSET;
- else if (parent == &clko_clk)
- reg |= 0x15 << CCM_CCSR_CLKOSEL_OFFSET;
- else
- return -EINVAL;
-
- __raw_writel(reg, CCM_CCSR);
-
- return 0;
-}
-
-static struct clk clko_clk = {
- .get_rate = _clk_clko_recalc,
- .set_rate = _clk_clko_set_rate,
- .round_rate = _clk_clko_round_rate,
- .set_parent = _clk_clko_set_parent,
-};
-
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-static struct clk_lookup lookups[] = {
-/* It's unlikely that any driver wants one of them directly:
- _REGISTER_CLOCK(NULL, "ckih", ckih_clk)
- _REGISTER_CLOCK(NULL, "ckil", ckil_clk)
- _REGISTER_CLOCK(NULL, "fpm", fpm_clk)
- _REGISTER_CLOCK(NULL, "mpll", mpll_clk)
- _REGISTER_CLOCK(NULL, "spll", spll_clk)
- _REGISTER_CLOCK(NULL, "fclk", fclk_clk)
- _REGISTER_CLOCK(NULL, "hclk", hclk_clk)
- _REGISTER_CLOCK(NULL, "ipg", ipg_clk)
-*/
- _REGISTER_CLOCK(NULL, "perclk1", per_clk[0])
- _REGISTER_CLOCK(NULL, "perclk2", per_clk[1])
- _REGISTER_CLOCK(NULL, "perclk3", per_clk[2])
- _REGISTER_CLOCK(NULL, "perclk4", per_clk[3])
- _REGISTER_CLOCK(NULL, "clko", clko_clk)
- _REGISTER_CLOCK("imx21-uart.0", NULL, uart_clk[0])
- _REGISTER_CLOCK("imx21-uart.1", NULL, uart_clk[1])
- _REGISTER_CLOCK("imx21-uart.2", NULL, uart_clk[2])
- _REGISTER_CLOCK("imx21-uart.3", NULL, uart_clk[3])
- _REGISTER_CLOCK(NULL, "gpt1", gpt_clk[0])
- _REGISTER_CLOCK(NULL, "gpt1", gpt_clk[1])
- _REGISTER_CLOCK(NULL, "gpt1", gpt_clk[2])
- _REGISTER_CLOCK(NULL, "pwm", pwm_clk[0])
- _REGISTER_CLOCK(NULL, "sdhc1", sdhc_clk[0])
- _REGISTER_CLOCK(NULL, "sdhc2", sdhc_clk[1])
- _REGISTER_CLOCK("imx21-cspi.0", NULL, cspi_clk[0])
- _REGISTER_CLOCK("imx21-cspi.1", NULL, cspi_clk[1])
- _REGISTER_CLOCK("imx21-cspi.2", NULL, cspi_clk[2])
- _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk[0])
- _REGISTER_CLOCK(NULL, "csi", csi_clk[0])
- _REGISTER_CLOCK("imx21-hcd.0", NULL, usb_clk[0])
- _REGISTER_CLOCK(NULL, "ssi1", ssi_clk[0])
- _REGISTER_CLOCK(NULL, "ssi2", ssi_clk[1])
- _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
- _REGISTER_CLOCK(NULL, "dma", dma_clk[0])
- _REGISTER_CLOCK(NULL, "brom", brom_clk)
- _REGISTER_CLOCK(NULL, "emma", emma_clk[0])
- _REGISTER_CLOCK(NULL, "slcdc", slcdc_clk[0])
- _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
- _REGISTER_CLOCK(NULL, "gpio", gpio_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
- _REGISTER_CLOCK("mxc-keypad", NULL, kpp_clk)
- _REGISTER_CLOCK(NULL, "owire", owire_clk)
- _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
-};
-
-/*
- * must be called very early to get information about the
- * available clock rate when the timer framework starts
- */
-int __init mx21_clocks_init(unsigned long lref, unsigned long href)
-{
- u32 cscr;
-
- external_low_reference = lref;
- external_high_reference = href;
-
- /* detect clock reference for both system PLL */
- cscr = CSCR();
- if (cscr & CCM_CSCR_MCU)
- mpll_clk.parent = &ckih_clk;
- else
- mpll_clk.parent = &fpm_clk;
-
- if (cscr & CCM_CSCR_SP)
- spll_clk.parent = &ckih_clk;
- else
- spll_clk.parent = &fpm_clk;
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- /* Turn off all clock gates */
- __raw_writel(0, CCM_PCCR0);
- __raw_writel(CCM_PCCR_GPT1_MASK, CCM_PCCR1);
-
- /* This turns of the serial PLL as well */
- spll_clk.disable(&spll_clk);
-
- /* This will propagate to all children and init all the clock rates. */
- clk_enable(&per_clk[0]);
- clk_enable(&gpio_clk);
-
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
- clk_enable(&uart_clk[0]);
-#endif
-
- mxc_timer_init(&gpt_clk[0], MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
- MX21_INT_GPT1);
- return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx25.c b/arch/arm/mach-imx/clock-imx25.c
deleted file mode 100644
index b0fec74c8c9..00000000000
--- a/arch/arm/mach-imx/clock-imx25.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Copyright (C) 2009 by Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-#include <mach/mx25.h>
-
-#define CRM_BASE MX25_IO_ADDRESS(MX25_CRM_BASE_ADDR)
-
-#define CCM_MPCTL 0x00
-#define CCM_UPCTL 0x04
-#define CCM_CCTL 0x08
-#define CCM_CGCR0 0x0C
-#define CCM_CGCR1 0x10
-#define CCM_CGCR2 0x14
-#define CCM_PCDR0 0x18
-#define CCM_PCDR1 0x1C
-#define CCM_PCDR2 0x20
-#define CCM_PCDR3 0x24
-#define CCM_RCSR 0x28
-#define CCM_CRDR 0x2C
-#define CCM_DCVR0 0x30
-#define CCM_DCVR1 0x34
-#define CCM_DCVR2 0x38
-#define CCM_DCVR3 0x3c
-#define CCM_LTR0 0x40
-#define CCM_LTR1 0x44
-#define CCM_LTR2 0x48
-#define CCM_LTR3 0x4c
-
-static unsigned long get_rate_mpll(void)
-{
- ulong mpctl = __raw_readl(CRM_BASE + CCM_MPCTL);
-
- return mxc_decode_pll(mpctl, 24000000);
-}
-
-static unsigned long get_rate_upll(void)
-{
- ulong mpctl = __raw_readl(CRM_BASE + CCM_UPCTL);
-
- return mxc_decode_pll(mpctl, 24000000);
-}
-
-unsigned long get_rate_arm(struct clk *clk)
-{
- unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
- unsigned long rate = get_rate_mpll();
-
- if (cctl & (1 << 14))
- rate = (rate * 3) >> 2;
-
- return rate / ((cctl >> 30) + 1);
-}
-
-static unsigned long get_rate_ahb(struct clk *clk)
-{
- unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
-
- return get_rate_arm(NULL) / (((cctl >> 28) & 0x3) + 1);
-}
-
-static unsigned long get_rate_ipg(struct clk *clk)
-{
- return get_rate_ahb(NULL) >> 1;
-}
-
-static unsigned long get_rate_per(int per)
-{
- unsigned long ofs = (per & 0x3) * 8;
- unsigned long reg = per & ~0x3;
- unsigned long val = (readl(CRM_BASE + CCM_PCDR0 + reg) >> ofs) & 0x3f;
- unsigned long fref;
-
- if (readl(CRM_BASE + 0x64) & (1 << per))
- fref = get_rate_upll();
- else
- fref = get_rate_ahb(NULL);
-
- return fref / (val + 1);
-}
-
-static unsigned long get_rate_uart(struct clk *clk)
-{
- return get_rate_per(15);
-}
-
-static unsigned long get_rate_ssi2(struct clk *clk)
-{
- return get_rate_per(14);
-}
-
-static unsigned long get_rate_ssi1(struct clk *clk)
-{
- return get_rate_per(13);
-}
-
-static unsigned long get_rate_i2c(struct clk *clk)
-{
- return get_rate_per(6);
-}
-
-static unsigned long get_rate_nfc(struct clk *clk)
-{
- return get_rate_per(8);
-}
-
-static unsigned long get_rate_gpt(struct clk *clk)
-{
- return get_rate_per(5);
-}
-
-static unsigned long get_rate_lcdc(struct clk *clk)
-{
- return get_rate_per(7);
-}
-
-static unsigned long get_rate_esdhc1(struct clk *clk)
-{
- return get_rate_per(3);
-}
-
-static unsigned long get_rate_esdhc2(struct clk *clk)
-{
- return get_rate_per(4);
-}
-
-static unsigned long get_rate_csi(struct clk *clk)
-{
- return get_rate_per(0);
-}
-
-static unsigned long get_rate_otg(struct clk *clk)
-{
- unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
- unsigned long rate = get_rate_upll();
-
- return (cctl & (1 << 23)) ? 0 : rate / ((0x3F & (cctl >> 16)) + 1);
-}
-
-static int clk_cgcr_enable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg |= 1 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
-
- return 0;
-}
-
-static void clk_cgcr_disable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(1 << clk->enable_shift);
- __raw_writel(reg, clk->enable_reg);
-}
-
-#define DEFINE_CLOCK(name, i, er, es, gr, sr, s) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = CRM_BASE + er, \
- .enable_shift = es, \
- .get_rate = gr, \
- .set_rate = sr, \
- .enable = clk_cgcr_enable, \
- .disable = clk_cgcr_disable, \
- .secondary = s, \
- }
-
-/*
- * Note: the following IPG clock gating bits are wrongly marked "Reserved" in
- * the i.MX25 Reference Manual Rev 1, table 15-13. The information below is
- * taken from the Freescale released BSP.
- *
- * bit reg offset clock
- *
- * 0 CGCR1 0 AUDMUX
- * 12 CGCR1 12 ESAI
- * 16 CGCR1 16 GPIO1
- * 17 CGCR1 17 GPIO2
- * 18 CGCR1 18 GPIO3
- * 23 CGCR1 23 I2C1
- * 24 CGCR1 24 I2C2
- * 25 CGCR1 25 I2C3
- * 27 CGCR1 27 IOMUXC
- * 28 CGCR1 28 KPP
- * 30 CGCR1 30 OWIRE
- * 36 CGCR2 4 RTIC
- * 51 CGCR2 19 WDOG
- */
-
-DEFINE_CLOCK(gpt_clk, 0, CCM_CGCR0, 5, get_rate_gpt, NULL, NULL);
-DEFINE_CLOCK(uart_per_clk, 0, CCM_CGCR0, 15, get_rate_uart, NULL, NULL);
-DEFINE_CLOCK(ssi1_per_clk, 0, CCM_CGCR0, 13, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(ssi2_per_clk, 0, CCM_CGCR0, 14, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(cspi1_clk, 0, CCM_CGCR1, 5, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(cspi2_clk, 0, CCM_CGCR1, 6, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(cspi3_clk, 0, CCM_CGCR1, 7, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(esdhc1_ahb_clk, 0, CCM_CGCR0, 21, get_rate_esdhc1, NULL, NULL);
-DEFINE_CLOCK(esdhc1_per_clk, 0, CCM_CGCR0, 3, get_rate_esdhc1, NULL,
- &esdhc1_ahb_clk);
-DEFINE_CLOCK(esdhc2_ahb_clk, 0, CCM_CGCR0, 22, get_rate_esdhc2, NULL, NULL);
-DEFINE_CLOCK(esdhc2_per_clk, 0, CCM_CGCR0, 4, get_rate_esdhc2, NULL,
- &esdhc2_ahb_clk);
-DEFINE_CLOCK(sdma_ahb_clk, 0, CCM_CGCR0, 26, NULL, NULL, NULL);
-DEFINE_CLOCK(fec_ahb_clk, 0, CCM_CGCR0, 23, NULL, NULL, NULL);
-DEFINE_CLOCK(lcdc_ahb_clk, 0, CCM_CGCR0, 24, NULL, NULL, NULL);
-DEFINE_CLOCK(lcdc_per_clk, 0, CCM_CGCR0, 7, NULL, NULL, &lcdc_ahb_clk);
-DEFINE_CLOCK(csi_ahb_clk, 0, CCM_CGCR0, 18, get_rate_csi, NULL, NULL);
-DEFINE_CLOCK(csi_per_clk, 0, CCM_CGCR0, 0, get_rate_csi, NULL, &csi_ahb_clk);
-DEFINE_CLOCK(uart1_clk, 0, CCM_CGCR2, 14, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart2_clk, 0, CCM_CGCR2, 15, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart3_clk, 0, CCM_CGCR2, 16, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart4_clk, 0, CCM_CGCR2, 17, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(uart5_clk, 0, CCM_CGCR2, 18, get_rate_uart, NULL, &uart_per_clk);
-DEFINE_CLOCK(nfc_clk, 0, CCM_CGCR0, 8, get_rate_nfc, NULL, NULL);
-DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL, NULL);
-DEFINE_CLOCK(pwm1_clk, 0, CCM_CGCR1, 31, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(pwm2_clk, 0, CCM_CGCR2, 0, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(pwm3_clk, 0, CCM_CGCR2, 1, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL, NULL);
-DEFINE_CLOCK(fec_clk, 0, CCM_CGCR1, 15, get_rate_ipg, NULL, &fec_ahb_clk);
-DEFINE_CLOCK(dryice_clk, 0, CCM_CGCR1, 8, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(lcdc_clk, 0, CCM_CGCR1, 29, get_rate_lcdc, NULL, &lcdc_per_clk);
-DEFINE_CLOCK(wdt_clk, 0, CCM_CGCR2, 19, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(ssi1_clk, 0, CCM_CGCR2, 11, get_rate_ssi1, NULL, &ssi1_per_clk);
-DEFINE_CLOCK(ssi2_clk, 1, CCM_CGCR2, 12, get_rate_ssi2, NULL, &ssi2_per_clk);
-DEFINE_CLOCK(sdma_clk, 0, CCM_CGCR2, 6, get_rate_ipg, NULL, &sdma_ahb_clk);
-DEFINE_CLOCK(esdhc1_clk, 0, CCM_CGCR1, 13, get_rate_esdhc1, NULL,
- &esdhc1_per_clk);
-DEFINE_CLOCK(esdhc2_clk, 1, CCM_CGCR1, 14, get_rate_esdhc2, NULL,
- &esdhc2_per_clk);
-DEFINE_CLOCK(audmux_clk, 0, CCM_CGCR1, 0, NULL, NULL, NULL);
-DEFINE_CLOCK(csi_clk, 0, CCM_CGCR1, 4, get_rate_csi, NULL, &csi_per_clk);
-DEFINE_CLOCK(can1_clk, 0, CCM_CGCR1, 2, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(can2_clk, 1, CCM_CGCR1, 3, get_rate_ipg, NULL, NULL);
-DEFINE_CLOCK(iim_clk, 0, CCM_CGCR1, 26, NULL, NULL, NULL);
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup lookups[] = {
- /* i.mx25 has the i.mx21 type uart */
- _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
- _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
- _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
- _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
- _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
- _REGISTER_CLOCK("mxc-ehci.0", "usb", usbotg_clk)
- _REGISTER_CLOCK("mxc-ehci.1", "usb", usbotg_clk)
- _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
- _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
- /* i.mx25 has the i.mx35 type cspi */
- _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
- _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
- _REGISTER_CLOCK("imx35-cspi.2", NULL, cspi3_clk)
- _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk)
- _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk)
- _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk)
- _REGISTER_CLOCK("mxc_pwm.3", NULL, pwm4_clk)
- _REGISTER_CLOCK("imx-keypad", NULL, kpp_clk)
- _REGISTER_CLOCK("mx25-adc", NULL, tsc_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
- _REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk)
- _REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk)
- _REGISTER_CLOCK("imx25-fec.0", NULL, fec_clk)
- _REGISTER_CLOCK("imxdi_rtc.0", NULL, dryice_clk)
- _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
- _REGISTER_CLOCK("imx2-wdt.0", NULL, wdt_clk)
- _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
- _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx25.0", NULL, esdhc1_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx25.1", NULL, esdhc2_clk)
- _REGISTER_CLOCK("mx2-camera.0", NULL, csi_clk)
- _REGISTER_CLOCK(NULL, "audmux", audmux_clk)
- _REGISTER_CLOCK("flexcan.0", NULL, can1_clk)
- _REGISTER_CLOCK("flexcan.1", NULL, can2_clk)
- /* i.mx25 has the i.mx35 type sdma */
- _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
- _REGISTER_CLOCK(NULL, "iim", iim_clk)
-};
-
-int __init mx25_clocks_init(void)
-{
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- /* Turn off all clocks except the ones we need to survive, namely:
- * EMI, GPIO1-3 (CCM_CGCR1[18:16]), GPT1, IOMUXC (CCM_CGCR1[27]), IIM,
- * SCC
- */
- __raw_writel((1 << 19), CRM_BASE + CCM_CGCR0);
- __raw_writel((0xf << 16) | (3 << 26), CRM_BASE + CCM_CGCR1);
- __raw_writel((1 << 5), CRM_BASE + CCM_CGCR2);
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
- clk_enable(&uart1_clk);
-#endif
-
- /* Clock source for lcdc and csi is upll */
- __raw_writel(__raw_readl(CRM_BASE+0x64) | (1 << 7) | (1 << 0),
- CRM_BASE + 0x64);
-
- /* Clock source for gpt is ahb_div */
- __raw_writel(__raw_readl(CRM_BASE+0x64) & ~(1 << 5), CRM_BASE + 0x64);
-
- clk_enable(&iim_clk);
- imx_print_silicon_rev("i.MX25", mx25_revision());
- clk_disable(&iim_clk);
-
- mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
-
- return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx27.c b/arch/arm/mach-imx/clock-imx27.c
deleted file mode 100644
index 98e04f5a87d..00000000000
--- a/arch/arm/mach-imx/clock-imx27.c
+++ /dev/null
@@ -1,785 +0,0 @@
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/clkdev.h>
-#include <linux/of.h>
-
-#include <asm/div64.h>
-
-#include <mach/clock.h>
-#include <mach/common.h>
-#include <mach/hardware.h>
-
-#define IO_ADDR_CCM(off) (MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR + (off)))
-
-/* Register offsets */
-#define CCM_CSCR IO_ADDR_CCM(0x0)
-#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
-#define CCM_MPCTL1 IO_ADDR_CCM(0x8)
-#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
-#define CCM_SPCTL1 IO_ADDR_CCM(0x10)
-#define CCM_OSC26MCTL IO_ADDR_CCM(0x14)
-#define CCM_PCDR0 IO_ADDR_CCM(0x18)
-#define CCM_PCDR1 IO_ADDR_CCM(0x1c)
-#define CCM_PCCR0 IO_ADDR_CCM(0x20)
-#define CCM_PCCR1 IO_ADDR_CCM(0x24)
-#define CCM_CCSR IO_ADDR_CCM(0x28)
-#define CCM_PMCTL IO_ADDR_CCM(0x2c)
-#define CCM_PMCOUNT IO_ADDR_CCM(0x30)
-#define CCM_WKGDCTL IO_ADDR_CCM(0x34)
-
-#define CCM_CSCR_UPDATE_DIS (1 << 31)
-#define CCM_CSCR_SSI2 (1 << 23)
-#define CCM_CSCR_SSI1 (1 << 22)
-#define CCM_CSCR_VPU (1 << 21)
-#define CCM_CSCR_MSHC (1 << 20)
-#define CCM_CSCR_SPLLRES (1 << 19)
-#define CCM_CSCR_MPLLRES (1 << 18)
-#define CCM_CSCR_SP (1 << 17)
-#define CCM_CSCR_MCU (1 << 16)
-#define CCM_CSCR_OSC26MDIV (1 << 4)
-#define CCM_CSCR_OSC26M (1 << 3)
-#define CCM_CSCR_FPM (1 << 2)
-#define CCM_CSCR_SPEN (1 << 1)
-#define CCM_CSCR_MPEN (1 << 0)
-
-/* i.MX27 TO 2+ */
-#define CCM_CSCR_ARM_SRC (1 << 15)
-
-#define CCM_SPCTL1_LF (1 << 15)
-#define CCM_SPCTL1_BRMO (1 << 6)
-
-static struct clk mpll_main1_clk, mpll_main2_clk;
-
-static int clk_pccr_enable(struct clk *clk)
-{
- unsigned long reg;
-
- if (!clk->enable_reg)
- return 0;
-
- reg = __raw_readl(clk->enable_reg);
- reg |= 1 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
-
- return 0;
-}
-
-static void clk_pccr_disable(struct clk *clk)
-{
- unsigned long reg;
-
- if (!clk->enable_reg)
- return;
-
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(1 << clk->enable_shift);
- __raw_writel(reg, clk->enable_reg);
-}
-
-static int clk_spll_enable(struct clk *clk)
-{
- unsigned long reg;
-
- reg = __raw_readl(CCM_CSCR);
- reg |= CCM_CSCR_SPEN;
- __raw_writel(reg, CCM_CSCR);
-
- while (!(__raw_readl(CCM_SPCTL1) & CCM_SPCTL1_LF));
-
- return 0;
-}
-
-static void clk_spll_disable(struct clk *clk)
-{
- unsigned long reg;
-
- reg = __raw_readl(CCM_CSCR);
- reg &= ~CCM_CSCR_SPEN;
- __raw_writel(reg, CCM_CSCR);
-}
-
-static int clk_cpu_set_parent(struct clk *clk, struct clk *parent)
-{
- int cscr = __raw_readl(CCM_CSCR);
-
- if (clk->parent == parent)
- return 0;
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
- if (parent == &mpll_main1_clk) {
- cscr |= CCM_CSCR_ARM_SRC;
- } else {
- if (parent == &mpll_main2_clk)
- cscr &= ~CCM_CSCR_ARM_SRC;
- else
- return -EINVAL;
- }
- __raw_writel(cscr, CCM_CSCR);
- clk->parent = parent;
- return 0;
- }
- return -ENODEV;
-}
-
-static unsigned long round_rate_cpu(struct clk *clk, unsigned long rate)
-{
- int div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
- if (parent_rate % rate)
- div++;
-
- if (div > 4)
- div = 4;
-
- return parent_rate / div;
-}
-
-static int set_rate_cpu(struct clk *clk, unsigned long rate)
-{
- unsigned int div;
- uint32_t reg;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
-
- if (div > 4 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
-
- div--;
-
- reg = __raw_readl(CCM_CSCR);
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
- reg &= ~(3 << 12);
- reg |= div << 12;
- reg &= ~(CCM_CSCR_FPM | CCM_CSCR_SPEN);
- __raw_writel(reg | CCM_CSCR_UPDATE_DIS, CCM_CSCR);
- } else {
- printk(KERN_ERR "Can't set CPU frequency!\n");
- }
-
- return 0;
-}
-
-static unsigned long round_rate_per(struct clk *clk, unsigned long rate)
-{
- u32 div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
- if (parent_rate % rate)
- div++;
-
- if (div > 64)
- div = 64;
-
- return parent_rate / div;
-}
-
-static int set_rate_per(struct clk *clk, unsigned long rate)
-{
- u32 reg;
- u32 div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (clk->id < 0 || clk->id > 3)
- return -EINVAL;
-
- div = parent_rate / rate;
- if (div > 64 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
- div--;
-
- reg = __raw_readl(CCM_PCDR1) & ~(0x3f << (clk->id << 3));
- reg |= div << (clk->id << 3);
- __raw_writel(reg, CCM_PCDR1);
-
- return 0;
-}
-
-static unsigned long get_rate_usb(struct clk *clk)
-{
- unsigned long usb_pdf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- usb_pdf = (__raw_readl(CCM_CSCR) >> 28) & 0x7;
-
- return parent_rate / (usb_pdf + 1U);
-}
-
-static unsigned long get_rate_ssix(struct clk *clk, unsigned long pdf)
-{
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
- pdf += 4; /* MX27 TO2+ */
- else
- pdf = (pdf < 2) ? 124UL : pdf; /* MX21 & MX27 TO1 */
-
- return 2UL * parent_rate / pdf;
-}
-
-static unsigned long get_rate_ssi1(struct clk *clk)
-{
- return get_rate_ssix(clk, (__raw_readl(CCM_PCDR0) >> 16) & 0x3f);
-}
-
-static unsigned long get_rate_ssi2(struct clk *clk)
-{
- return get_rate_ssix(clk, (__raw_readl(CCM_PCDR0) >> 26) & 0x3f);
-}
-
-static unsigned long get_rate_nfc(struct clk *clk)
-{
- unsigned long nfc_pdf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
- nfc_pdf = (__raw_readl(CCM_PCDR0) >> 6) & 0xf;
- else
- nfc_pdf = (__raw_readl(CCM_PCDR0) >> 12) & 0xf;
-
- return parent_rate / (nfc_pdf + 1);
-}
-
-static unsigned long get_rate_vpu(struct clk *clk)
-{
- unsigned long vpu_pdf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
- vpu_pdf = (__raw_readl(CCM_PCDR0) >> 10) & 0x3f;
- vpu_pdf += 4;
- } else {
- vpu_pdf = (__raw_readl(CCM_PCDR0) >> 8) & 0xf;
- vpu_pdf = (vpu_pdf < 2) ? 124 : vpu_pdf;
- }
-
- return 2UL * parent_rate / vpu_pdf;
-}
-
-static unsigned long round_rate_parent(struct clk *clk, unsigned long rate)
-{
- return clk->parent->round_rate(clk->parent, rate);
-}
-
-static unsigned long get_rate_parent(struct clk *clk)
-{
- return clk_get_rate(clk->parent);
-}
-
-static int set_rate_parent(struct clk *clk, unsigned long rate)
-{
- return clk->parent->set_rate(clk->parent, rate);
-}
-
-/* in Hz */
-static unsigned long external_high_reference = 26000000;
-
-static unsigned long get_rate_high_reference(struct clk *clk)
-{
- return external_high_reference;
-}
-
-/* in Hz */
-static unsigned long external_low_reference = 32768;
-
-static unsigned long get_rate_low_reference(struct clk *clk)
-{
- return external_low_reference;
-}
-
-static unsigned long get_rate_fpm(struct clk *clk)
-{
- return clk_get_rate(clk->parent) * 1024;
-}
-
-static unsigned long get_rate_mpll(struct clk *clk)
-{
- return mxc_decode_pll(__raw_readl(CCM_MPCTL0),
- clk_get_rate(clk->parent));
-}
-
-static unsigned long get_rate_mpll_main(struct clk *clk)
-{
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- /* i.MX27 TO2:
- * clk->id == 0: arm clock source path 1 which is from 2 * MPLL / 2
- * clk->id == 1: arm clock source path 2 which is from 2 * MPLL / 3
- */
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0 && clk->id == 1)
- return 2UL * parent_rate / 3UL;
-
- return parent_rate;
-}
-
-static unsigned long get_rate_spll(struct clk *clk)
-{
- uint32_t reg;
- unsigned long rate;
-
- rate = clk_get_rate(clk->parent);
-
- reg = __raw_readl(CCM_SPCTL0);
-
- /* On TO2 we have to write the value back. Otherwise we
- * read 0 from this register the next time.
- */
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
- __raw_writel(reg, CCM_SPCTL0);
-
- return mxc_decode_pll(reg, rate);
-}
-
-static unsigned long get_rate_cpu(struct clk *clk)
-{
- u32 div;
- unsigned long rate;
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
- div = (__raw_readl(CCM_CSCR) >> 12) & 0x3;
- else
- div = (__raw_readl(CCM_CSCR) >> 13) & 0x7;
-
- rate = clk_get_rate(clk->parent);
- return rate / (div + 1);
-}
-
-static unsigned long get_rate_ahb(struct clk *clk)
-{
- unsigned long rate, bclk_pdf;
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
- bclk_pdf = (__raw_readl(CCM_CSCR) >> 8) & 0x3;
- else
- bclk_pdf = (__raw_readl(CCM_CSCR) >> 9) & 0xf;
-
- rate = clk_get_rate(clk->parent);
- return rate / (bclk_pdf + 1);
-}
-
-static unsigned long get_rate_ipg(struct clk *clk)
-{
- unsigned long rate, ipg_pdf;
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
- return clk_get_rate(clk->parent);
- else
- ipg_pdf = (__raw_readl(CCM_CSCR) >> 8) & 1;
-
- rate = clk_get_rate(clk->parent);
- return rate / (ipg_pdf + 1);
-}
-
-static unsigned long get_rate_per(struct clk *clk)
-{
- unsigned long perclk_pdf, parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (clk->id < 0 || clk->id > 3)
- return 0;
-
- perclk_pdf = (__raw_readl(CCM_PCDR1) >> (clk->id << 3)) & 0x3f;
-
- return parent_rate / (perclk_pdf + 1);
-}
-
-/*
- * the high frequency external clock reference
- * Default case is 26MHz. Could be changed at runtime
- * with a call to change_external_high_reference()
- */
-static struct clk ckih_clk = {
- .get_rate = get_rate_high_reference,
-};
-
-static struct clk mpll_clk = {
- .parent = &ckih_clk,
- .get_rate = get_rate_mpll,
-};
-
-/* For i.MX27 TO2, it is the MPLL path 1 of ARM core
- * It provides the clock source whose rate is same as MPLL
- */
-static struct clk mpll_main1_clk = {
- .id = 0,
- .parent = &mpll_clk,
- .get_rate = get_rate_mpll_main,
-};
-
-/* For i.MX27 TO2, it is the MPLL path 2 of ARM core
- * It provides the clock source whose rate is same MPLL * 2 / 3
- */
-static struct clk mpll_main2_clk = {
- .id = 1,
- .parent = &mpll_clk,
- .get_rate = get_rate_mpll_main,
-};
-
-static struct clk ahb_clk = {
- .parent = &mpll_main2_clk,
- .get_rate = get_rate_ahb,
-};
-
-static struct clk ipg_clk = {
- .parent = &ahb_clk,
- .get_rate = get_rate_ipg,
-};
-
-static struct clk cpu_clk = {
- .parent = &mpll_main2_clk,
- .set_parent = clk_cpu_set_parent,
- .round_rate = round_rate_cpu,
- .get_rate = get_rate_cpu,
- .set_rate = set_rate_cpu,
-};
-
-static struct clk spll_clk = {
- .parent = &ckih_clk,
- .get_rate = get_rate_spll,
- .enable = clk_spll_enable,
- .disable = clk_spll_disable,
-};
-
-/*
- * the low frequency external clock reference
- * Default case is 32.768kHz.
- */
-static struct clk ckil_clk = {
- .get_rate = get_rate_low_reference,
-};
-
-/* Output of frequency pre multiplier */
-static struct clk fpm_clk = {
- .parent = &ckil_clk,
- .get_rate = get_rate_fpm,
-};
-
-#define PCCR0 CCM_PCCR0
-#define PCCR1 CCM_PCCR1
-
-#define DEFINE_CLOCK(name, i, er, es, gr, s, p) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = gr, \
- .enable = clk_pccr_enable, \
- .disable = clk_pccr_disable, \
- .secondary = s, \
- .parent = p, \
- }
-
-#define DEFINE_CLOCK1(name, i, er, es, getsetround, s, p) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = get_rate_##getsetround, \
- .set_rate = set_rate_##getsetround, \
- .round_rate = round_rate_##getsetround, \
- .enable = clk_pccr_enable, \
- .disable = clk_pccr_disable, \
- .secondary = s, \
- .parent = p, \
- }
-
-/* Forward declaration to keep the following list in order */
-static struct clk slcdc_clk1, sahara2_clk1, rtic_clk1, fec_clk1, emma_clk1,
- dma_clk1, lcdc_clk2, vpu_clk1;
-
-/* All clocks we can gate through PCCRx in the order of PCCRx bits */
-DEFINE_CLOCK(ssi2_clk1, 1, PCCR0, 0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ssi1_clk1, 0, PCCR0, 1, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(slcdc_clk, 0, PCCR0, 2, NULL, &slcdc_clk1, &ahb_clk);
-DEFINE_CLOCK(sdhc3_clk1, 0, PCCR0, 3, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sdhc2_clk1, 0, PCCR0, 4, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sdhc1_clk1, 0, PCCR0, 5, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(scc_clk, 0, PCCR0, 6, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sahara2_clk, 0, PCCR0, 7, NULL, &sahara2_clk1, &ahb_clk);
-DEFINE_CLOCK(rtic_clk, 0, PCCR0, 8, NULL, &rtic_clk1, &ahb_clk);
-DEFINE_CLOCK(rtc_clk, 0, PCCR0, 9, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(pwm_clk1, 0, PCCR0, 11, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(owire_clk, 0, PCCR0, 12, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(mstick_clk1, 0, PCCR0, 13, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(lcdc_clk1, 0, PCCR0, 14, NULL, &lcdc_clk2, &ipg_clk);
-DEFINE_CLOCK(kpp_clk, 0, PCCR0, 15, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(iim_clk, 0, PCCR0, 16, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(i2c2_clk, 1, PCCR0, 17, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(i2c1_clk, 0, PCCR0, 18, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt6_clk1, 0, PCCR0, 29, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt5_clk1, 0, PCCR0, 20, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt4_clk1, 0, PCCR0, 21, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt3_clk1, 0, PCCR0, 22, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt2_clk1, 0, PCCR0, 23, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpt1_clk1, 0, PCCR0, 24, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(gpio_clk, 0, PCCR0, 25, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(fec_clk, 0, PCCR0, 26, NULL, &fec_clk1, &ahb_clk);
-DEFINE_CLOCK(emma_clk, 0, PCCR0, 27, NULL, &emma_clk1, &ahb_clk);
-DEFINE_CLOCK(dma_clk, 0, PCCR0, 28, NULL, &dma_clk1, &ahb_clk);
-DEFINE_CLOCK(cspi13_clk1, 0, PCCR0, 29, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(cspi2_clk1, 0, PCCR0, 30, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(cspi1_clk1, 0, PCCR0, 31, NULL, NULL, &ipg_clk);
-
-DEFINE_CLOCK(mstick_clk, 0, PCCR1, 2, NULL, &mstick_clk1, &ipg_clk);
-DEFINE_CLOCK(nfc_clk, 0, PCCR1, 3, get_rate_nfc, NULL, &cpu_clk);
-DEFINE_CLOCK(ssi2_clk, 1, PCCR1, 4, get_rate_ssi2, &ssi2_clk1, &mpll_main2_clk);
-DEFINE_CLOCK(ssi1_clk, 0, PCCR1, 5, get_rate_ssi1, &ssi1_clk1, &mpll_main2_clk);
-DEFINE_CLOCK(vpu_clk, 0, PCCR1, 6, get_rate_vpu, &vpu_clk1, &mpll_main2_clk);
-DEFINE_CLOCK1(per4_clk, 3, PCCR1, 7, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK1(per3_clk, 2, PCCR1, 8, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK1(per2_clk, 1, PCCR1, 9, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK1(per1_clk, 0, PCCR1, 10, per, NULL, &mpll_main2_clk);
-DEFINE_CLOCK(usb_clk1, 0, PCCR1, 11, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(slcdc_clk1, 0, PCCR1, 12, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(sahara2_clk1, 0, PCCR1, 13, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(rtic_clk1, 0, PCCR1, 14, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(lcdc_clk2, 0, PCCR1, 15, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(vpu_clk1, 0, PCCR1, 16, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(fec_clk1, 0, PCCR1, 17, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(emma_clk1, 0, PCCR1, 18, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(emi_clk, 0, PCCR1, 19, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(dma_clk1, 0, PCCR1, 20, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(csi_clk1, 0, PCCR1, 21, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(brom_clk, 0, PCCR1, 22, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(pata_clk, 0, PCCR1, 23, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(wdog_clk, 0, PCCR1, 24, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(usb_clk, 0, PCCR1, 25, get_rate_usb, &usb_clk1, &spll_clk);
-DEFINE_CLOCK(uart6_clk1, 0, PCCR1, 26, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart5_clk1, 0, PCCR1, 27, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart4_clk1, 0, PCCR1, 28, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart3_clk1, 0, PCCR1, 29, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart2_clk1, 0, PCCR1, 30, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart1_clk1, 0, PCCR1, 31, NULL, NULL, &ipg_clk);
-
-/* Clocks we cannot directly gate, but drivers need their rates */
-DEFINE_CLOCK(cspi1_clk, 0, NULL, 0, NULL, &cspi1_clk1, &per2_clk);
-DEFINE_CLOCK(cspi2_clk, 1, NULL, 0, NULL, &cspi2_clk1, &per2_clk);
-DEFINE_CLOCK(cspi3_clk, 2, NULL, 0, NULL, &cspi13_clk1, &per2_clk);
-DEFINE_CLOCK(sdhc1_clk, 0, NULL, 0, NULL, &sdhc1_clk1, &per2_clk);
-DEFINE_CLOCK(sdhc2_clk, 1, NULL, 0, NULL, &sdhc2_clk1, &per2_clk);
-DEFINE_CLOCK(sdhc3_clk, 2, NULL, 0, NULL, &sdhc3_clk1, &per2_clk);
-DEFINE_CLOCK(pwm_clk, 0, NULL, 0, NULL, &pwm_clk1, &per1_clk);
-DEFINE_CLOCK(gpt1_clk, 0, NULL, 0, NULL, &gpt1_clk1, &per1_clk);
-DEFINE_CLOCK(gpt2_clk, 1, NULL, 0, NULL, &gpt2_clk1, &per1_clk);
-DEFINE_CLOCK(gpt3_clk, 2, NULL, 0, NULL, &gpt3_clk1, &per1_clk);
-DEFINE_CLOCK(gpt4_clk, 3, NULL, 0, NULL, &gpt4_clk1, &per1_clk);
-DEFINE_CLOCK(gpt5_clk, 4, NULL, 0, NULL, &gpt5_clk1, &per1_clk);
-DEFINE_CLOCK(gpt6_clk, 5, NULL, 0, NULL, &gpt6_clk1, &per1_clk);
-DEFINE_CLOCK(uart1_clk, 0, NULL, 0, NULL, &uart1_clk1, &per1_clk);
-DEFINE_CLOCK(uart2_clk, 1, NULL, 0, NULL, &uart2_clk1, &per1_clk);
-DEFINE_CLOCK(uart3_clk, 2, NULL, 0, NULL, &uart3_clk1, &per1_clk);
-DEFINE_CLOCK(uart4_clk, 3, NULL, 0, NULL, &uart4_clk1, &per1_clk);
-DEFINE_CLOCK(uart5_clk, 4, NULL, 0, NULL, &uart5_clk1, &per1_clk);
-DEFINE_CLOCK(uart6_clk, 5, NULL, 0, NULL, &uart6_clk1, &per1_clk);
-DEFINE_CLOCK1(lcdc_clk, 0, NULL, 0, parent, &lcdc_clk1, &per3_clk);
-DEFINE_CLOCK1(csi_clk, 0, NULL, 0, parent, &csi_clk1, &per4_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup lookups[] = {
- /* i.mx27 has the i.mx21 type uart */
- _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
- _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
- _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
- _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
- _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
- _REGISTER_CLOCK("imx21-uart.5", NULL, uart6_clk)
- _REGISTER_CLOCK(NULL, "gpt1", gpt1_clk)
- _REGISTER_CLOCK(NULL, "gpt2", gpt2_clk)
- _REGISTER_CLOCK(NULL, "gpt3", gpt3_clk)
- _REGISTER_CLOCK(NULL, "gpt4", gpt4_clk)
- _REGISTER_CLOCK(NULL, "gpt5", gpt5_clk)
- _REGISTER_CLOCK(NULL, "gpt6", gpt6_clk)
- _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm_clk)
- _REGISTER_CLOCK("mxc-mmc.0", NULL, sdhc1_clk)
- _REGISTER_CLOCK("mxc-mmc.1", NULL, sdhc2_clk)
- _REGISTER_CLOCK("mxc-mmc.2", NULL, sdhc3_clk)
- _REGISTER_CLOCK("imx27-cspi.0", NULL, cspi1_clk)
- _REGISTER_CLOCK("imx27-cspi.1", NULL, cspi2_clk)
- _REGISTER_CLOCK("imx27-cspi.2", NULL, cspi3_clk)
- _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
- _REGISTER_CLOCK("mx2-camera.0", NULL, csi_clk)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb", usb_clk)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usb_clk1)
- _REGISTER_CLOCK("mxc-ehci.0", "usb", usb_clk)
- _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_clk1)
- _REGISTER_CLOCK("mxc-ehci.1", "usb", usb_clk)
- _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_clk1)
- _REGISTER_CLOCK("mxc-ehci.2", "usb", usb_clk)
- _REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_clk1)
- _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
- _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
- _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
- _REGISTER_CLOCK(NULL, "vpu", vpu_clk)
- _REGISTER_CLOCK(NULL, "dma", dma_clk)
- _REGISTER_CLOCK(NULL, "rtic", rtic_clk)
- _REGISTER_CLOCK(NULL, "brom", brom_clk)
- _REGISTER_CLOCK(NULL, "emma", emma_clk)
- _REGISTER_CLOCK("m2m-emmaprp.0", NULL, emma_clk)
- _REGISTER_CLOCK(NULL, "slcdc", slcdc_clk)
- _REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
- _REGISTER_CLOCK(NULL, "emi", emi_clk)
- _REGISTER_CLOCK(NULL, "sahara2", sahara2_clk)
- _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
- _REGISTER_CLOCK(NULL, "mstick", mstick_clk)
- _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
- _REGISTER_CLOCK(NULL, "gpio", gpio_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
- _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
- _REGISTER_CLOCK(NULL, "iim", iim_clk)
- _REGISTER_CLOCK(NULL, "kpp", kpp_clk)
- _REGISTER_CLOCK("mxc_w1.0", NULL, owire_clk)
- _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
- _REGISTER_CLOCK(NULL, "scc", scc_clk)
-};
-
-/* Adjust the clock path for TO2 and later */
-static void __init to2_adjust_clocks(void)
-{
- unsigned long cscr = __raw_readl(CCM_CSCR);
-
- if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
- if (cscr & CCM_CSCR_ARM_SRC)
- cpu_clk.parent = &mpll_main1_clk;
-
- if (!(cscr & CCM_CSCR_SSI2))
- ssi1_clk.parent = &spll_clk;
-
- if (!(cscr & CCM_CSCR_SSI1))
- ssi1_clk.parent = &spll_clk;
-
- if (!(cscr & CCM_CSCR_VPU))
- vpu_clk.parent = &spll_clk;
- } else {
- cpu_clk.parent = &mpll_clk;
- cpu_clk.set_parent = NULL;
- cpu_clk.round_rate = NULL;
- cpu_clk.set_rate = NULL;
- ahb_clk.parent = &mpll_clk;
-
- per1_clk.parent = &mpll_clk;
- per2_clk.parent = &mpll_clk;
- per3_clk.parent = &mpll_clk;
- per4_clk.parent = &mpll_clk;
-
- ssi1_clk.parent = &mpll_clk;
- ssi2_clk.parent = &mpll_clk;
-
- vpu_clk.parent = &mpll_clk;
- }
-}
-
-/*
- * must be called very early to get information about the
- * available clock rate when the timer framework starts
- */
-int __init mx27_clocks_init(unsigned long fref)
-{
- u32 cscr = __raw_readl(CCM_CSCR);
-
- external_high_reference = fref;
-
- /* detect clock reference for both system PLLs */
- if (cscr & CCM_CSCR_MCU)
- mpll_clk.parent = &ckih_clk;
- else
- mpll_clk.parent = &fpm_clk;
-
- if (cscr & CCM_CSCR_SP)
- spll_clk.parent = &ckih_clk;
- else
- spll_clk.parent = &fpm_clk;
-
- to2_adjust_clocks();
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- /* Turn off all clocks we do not need */
- __raw_writel(0, CCM_PCCR0);
- __raw_writel((1 << 10) | (1 << 19), CCM_PCCR1);
-
- spll_clk.disable(&spll_clk);
-
- /* enable basic clocks */
- clk_enable(&per1_clk);
- clk_enable(&gpio_clk);
- clk_enable(&emi_clk);
- clk_enable(&iim_clk);
- imx_print_silicon_rev("i.MX27", mx27_revision());
- clk_disable(&iim_clk);
-
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
- clk_enable(&uart1_clk);
-#endif
-
- mxc_timer_init(&gpt1_clk, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR),
- MX27_INT_GPT1);
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-int __init mx27_clocks_init_dt(void)
-{
- struct device_node *np;
- u32 fref = 26000000; /* default */
-
- for_each_compatible_node(np, NULL, "fixed-clock") {
- if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
- continue;
-
- if (!of_property_read_u32(np, "clock-frequency", &fref))
- break;
- }
-
- return mx27_clocks_init(fref);
-}
-#endif
diff --git a/arch/arm/mach-imx/clock-imx31.c b/arch/arm/mach-imx/clock-imx31.c
deleted file mode 100644
index 3a943cd4159..00000000000
--- a/arch/arm/mach-imx/clock-imx31.c
+++ /dev/null
@@ -1,630 +0,0 @@
-/*
- * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <asm/div64.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/mx31.h>
-#include <mach/common.h>
-
-#include "crmregs-imx3.h"
-
-#define PRE_DIV_MIN_FREQ 10000000 /* Minimum Frequency after Predivider */
-
-static void __calc_pre_post_dividers(u32 div, u32 *pre, u32 *post)
-{
- u32 min_pre, temp_pre, old_err, err;
-
- if (div >= 512) {
- *pre = 8;
- *post = 64;
- } else if (div >= 64) {
- min_pre = (div - 1) / 64 + 1;
- old_err = 8;
- for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
- err = div % temp_pre;
- if (err == 0) {
- *pre = temp_pre;
- break;
- }
- err = temp_pre - err;
- if (err < old_err) {
- old_err = err;
- *pre = temp_pre;
- }
- }
- *post = (div + *pre - 1) / *pre;
- } else if (div <= 8) {
- *pre = div;
- *post = 1;
- } else {
- *pre = 1;
- *post = div;
- }
-}
-
-static struct clk mcu_pll_clk;
-static struct clk serial_pll_clk;
-static struct clk ipg_clk;
-static struct clk ckih_clk;
-
-static int cgr_enable(struct clk *clk)
-{
- u32 reg;
-
- if (!clk->enable_reg)
- return 0;
-
- reg = __raw_readl(clk->enable_reg);
- reg |= 3 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
-
- return 0;
-}
-
-static void cgr_disable(struct clk *clk)
-{
- u32 reg;
-
- if (!clk->enable_reg)
- return;
-
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(3 << clk->enable_shift);
-
- /* special case for EMI clock */
- if (clk->enable_reg == MXC_CCM_CGR2 && clk->enable_shift == 8)
- reg |= (1 << clk->enable_shift);
-
- __raw_writel(reg, clk->enable_reg);
-}
-
-static unsigned long pll_ref_get_rate(void)
-{
- unsigned long ccmr;
- unsigned int prcs;
-
- ccmr = __raw_readl(MXC_CCM_CCMR);
- prcs = (ccmr & MXC_CCM_CCMR_PRCS_MASK) >> MXC_CCM_CCMR_PRCS_OFFSET;
- if (prcs == 0x1)
- return CKIL_CLK_FREQ * 1024;
- else
- return clk_get_rate(&ckih_clk);
-}
-
-static unsigned long usb_pll_get_rate(struct clk *clk)
-{
- unsigned long reg;
-
- reg = __raw_readl(MXC_CCM_UPCTL);
-
- return mxc_decode_pll(reg, pll_ref_get_rate());
-}
-
-static unsigned long serial_pll_get_rate(struct clk *clk)
-{
- unsigned long reg;
-
- reg = __raw_readl(MXC_CCM_SRPCTL);
-
- return mxc_decode_pll(reg, pll_ref_get_rate());
-}
-
-static unsigned long mcu_pll_get_rate(struct clk *clk)
-{
- unsigned long reg, ccmr;
-
- ccmr = __raw_readl(MXC_CCM_CCMR);
-
- if (!(ccmr & MXC_CCM_CCMR_MPE) || (ccmr & MXC_CCM_CCMR_MDS))
- return clk_get_rate(&ckih_clk);
-
- reg = __raw_readl(MXC_CCM_MPCTL);
-
- return mxc_decode_pll(reg, pll_ref_get_rate());
-}
-
-static int usb_pll_enable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CCMR);
- reg |= MXC_CCM_CCMR_UPE;
- __raw_writel(reg, MXC_CCM_CCMR);
-
- /* No lock bit on MX31, so using max time from spec */
- udelay(80);
-
- return 0;
-}
-
-static void usb_pll_disable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CCMR);
- reg &= ~MXC_CCM_CCMR_UPE;
- __raw_writel(reg, MXC_CCM_CCMR);
-}
-
-static int serial_pll_enable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CCMR);
- reg |= MXC_CCM_CCMR_SPE;
- __raw_writel(reg, MXC_CCM_CCMR);
-
- /* No lock bit on MX31, so using max time from spec */
- udelay(80);
-
- return 0;
-}
-
-static void serial_pll_disable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CCMR);
- reg &= ~MXC_CCM_CCMR_SPE;
- __raw_writel(reg, MXC_CCM_CCMR);
-}
-
-#define PDR0(mask, off) ((__raw_readl(MXC_CCM_PDR0) & mask) >> off)
-#define PDR1(mask, off) ((__raw_readl(MXC_CCM_PDR1) & mask) >> off)
-#define PDR2(mask, off) ((__raw_readl(MXC_CCM_PDR2) & mask) >> off)
-
-static unsigned long mcu_main_get_rate(struct clk *clk)
-{
- u32 pmcr0 = __raw_readl(MXC_CCM_PMCR0);
-
- if ((pmcr0 & MXC_CCM_PMCR0_DFSUP1) == MXC_CCM_PMCR0_DFSUP1_SPLL)
- return clk_get_rate(&serial_pll_clk);
- else
- return clk_get_rate(&mcu_pll_clk);
-}
-
-static unsigned long ahb_get_rate(struct clk *clk)
-{
- unsigned long max_pdf;
-
- max_pdf = PDR0(MXC_CCM_PDR0_MAX_PODF_MASK,
- MXC_CCM_PDR0_MAX_PODF_OFFSET);
- return clk_get_rate(clk->parent) / (max_pdf + 1);
-}
-
-static unsigned long ipg_get_rate(struct clk *clk)
-{
- unsigned long ipg_pdf;
-
- ipg_pdf = PDR0(MXC_CCM_PDR0_IPG_PODF_MASK,
- MXC_CCM_PDR0_IPG_PODF_OFFSET);
- return clk_get_rate(clk->parent) / (ipg_pdf + 1);
-}
-
-static unsigned long nfc_get_rate(struct clk *clk)
-{
- unsigned long nfc_pdf;
-
- nfc_pdf = PDR0(MXC_CCM_PDR0_NFC_PODF_MASK,
- MXC_CCM_PDR0_NFC_PODF_OFFSET);
- return clk_get_rate(clk->parent) / (nfc_pdf + 1);
-}
-
-static unsigned long hsp_get_rate(struct clk *clk)
-{
- unsigned long hsp_pdf;
-
- hsp_pdf = PDR0(MXC_CCM_PDR0_HSP_PODF_MASK,
- MXC_CCM_PDR0_HSP_PODF_OFFSET);
- return clk_get_rate(clk->parent) / (hsp_pdf + 1);
-}
-
-static unsigned long usb_get_rate(struct clk *clk)
-{
- unsigned long usb_pdf, usb_prepdf;
-
- usb_pdf = PDR1(MXC_CCM_PDR1_USB_PODF_MASK,
- MXC_CCM_PDR1_USB_PODF_OFFSET);
- usb_prepdf = PDR1(MXC_CCM_PDR1_USB_PRDF_MASK,
- MXC_CCM_PDR1_USB_PRDF_OFFSET);
- return clk_get_rate(clk->parent) / (usb_prepdf + 1) / (usb_pdf + 1);
-}
-
-static unsigned long csi_get_rate(struct clk *clk)
-{
- u32 reg, pre, post;
-
- reg = __raw_readl(MXC_CCM_PDR0);
- pre = (reg & MXC_CCM_PDR0_CSI_PRDF_MASK) >>
- MXC_CCM_PDR0_CSI_PRDF_OFFSET;
- pre++;
- post = (reg & MXC_CCM_PDR0_CSI_PODF_MASK) >>
- MXC_CCM_PDR0_CSI_PODF_OFFSET;
- post++;
- return clk_get_rate(clk->parent) / (pre * post);
-}
-
-static unsigned long csi_round_rate(struct clk *clk, unsigned long rate)
-{
- u32 pre, post, parent = clk_get_rate(clk->parent);
- u32 div = parent / rate;
-
- if (parent % rate)
- div++;
-
- __calc_pre_post_dividers(div, &pre, &post);
-
- return parent / (pre * post);
-}
-
-static int csi_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg, div, pre, post, parent = clk_get_rate(clk->parent);
-
- div = parent / rate;
-
- if ((parent / div) != rate)
- return -EINVAL;
-
- __calc_pre_post_dividers(div, &pre, &post);
-
- /* Set CSI clock divider */
- reg = __raw_readl(MXC_CCM_PDR0) &
- ~(MXC_CCM_PDR0_CSI_PODF_MASK | MXC_CCM_PDR0_CSI_PRDF_MASK);
- reg |= (post - 1) << MXC_CCM_PDR0_CSI_PODF_OFFSET;
- reg |= (pre - 1) << MXC_CCM_PDR0_CSI_PRDF_OFFSET;
- __raw_writel(reg, MXC_CCM_PDR0);
-
- return 0;
-}
-
-static unsigned long ssi1_get_rate(struct clk *clk)
-{
- unsigned long ssi1_pdf, ssi1_prepdf;
-
- ssi1_pdf = PDR1(MXC_CCM_PDR1_SSI1_PODF_MASK,
- MXC_CCM_PDR1_SSI1_PODF_OFFSET);
- ssi1_prepdf = PDR1(MXC_CCM_PDR1_SSI1_PRE_PODF_MASK,
- MXC_CCM_PDR1_SSI1_PRE_PODF_OFFSET);
- return clk_get_rate(clk->parent) / (ssi1_prepdf + 1) / (ssi1_pdf + 1);
-}
-
-static unsigned long ssi2_get_rate(struct clk *clk)
-{
- unsigned long ssi2_pdf, ssi2_prepdf;
-
- ssi2_pdf = PDR1(MXC_CCM_PDR1_SSI2_PODF_MASK,
- MXC_CCM_PDR1_SSI2_PODF_OFFSET);
- ssi2_prepdf = PDR1(MXC_CCM_PDR1_SSI2_PRE_PODF_MASK,
- MXC_CCM_PDR1_SSI2_PRE_PODF_OFFSET);
- return clk_get_rate(clk->parent) / (ssi2_prepdf + 1) / (ssi2_pdf + 1);
-}
-
-static unsigned long firi_get_rate(struct clk *clk)
-{
- unsigned long firi_pdf, firi_prepdf;
-
- firi_pdf = PDR1(MXC_CCM_PDR1_FIRI_PODF_MASK,
- MXC_CCM_PDR1_FIRI_PODF_OFFSET);
- firi_prepdf = PDR1(MXC_CCM_PDR1_FIRI_PRE_PODF_MASK,
- MXC_CCM_PDR1_FIRI_PRE_PODF_OFFSET);
- return clk_get_rate(clk->parent) / (firi_prepdf + 1) / (firi_pdf + 1);
-}
-
-static unsigned long firi_round_rate(struct clk *clk, unsigned long rate)
-{
- u32 pre, post;
- u32 parent = clk_get_rate(clk->parent);
- u32 div = parent / rate;
-
- if (parent % rate)
- div++;
-
- __calc_pre_post_dividers(div, &pre, &post);
-
- return parent / (pre * post);
-
-}
-
-static int firi_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg, div, pre, post, parent = clk_get_rate(clk->parent);
-
- div = parent / rate;
-
- if ((parent / div) != rate)
- return -EINVAL;
-
- __calc_pre_post_dividers(div, &pre, &post);
-
- /* Set FIRI clock divider */
- reg = __raw_readl(MXC_CCM_PDR1) &
- ~(MXC_CCM_PDR1_FIRI_PODF_MASK | MXC_CCM_PDR1_FIRI_PRE_PODF_MASK);
- reg |= (pre - 1) << MXC_CCM_PDR1_FIRI_PRE_PODF_OFFSET;
- reg |= (post - 1) << MXC_CCM_PDR1_FIRI_PODF_OFFSET;
- __raw_writel(reg, MXC_CCM_PDR1);
-
- return 0;
-}
-
-static unsigned long mbx_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 2;
-}
-
-static unsigned long mstick1_get_rate(struct clk *clk)
-{
- unsigned long msti_pdf;
-
- msti_pdf = PDR2(MXC_CCM_PDR2_MST1_PDF_MASK,
- MXC_CCM_PDR2_MST1_PDF_OFFSET);
- return clk_get_rate(clk->parent) / (msti_pdf + 1);
-}
-
-static unsigned long mstick2_get_rate(struct clk *clk)
-{
- unsigned long msti_pdf;
-
- msti_pdf = PDR2(MXC_CCM_PDR2_MST2_PDF_MASK,
- MXC_CCM_PDR2_MST2_PDF_OFFSET);
- return clk_get_rate(clk->parent) / (msti_pdf + 1);
-}
-
-static unsigned long ckih_rate;
-
-static unsigned long clk_ckih_get_rate(struct clk *clk)
-{
- return ckih_rate;
-}
-
-static unsigned long clk_ckil_get_rate(struct clk *clk)
-{
- return CKIL_CLK_FREQ;
-}
-
-static struct clk ckih_clk = {
- .get_rate = clk_ckih_get_rate,
-};
-
-static struct clk mcu_pll_clk = {
- .parent = &ckih_clk,
- .get_rate = mcu_pll_get_rate,
-};
-
-static struct clk mcu_main_clk = {
- .parent = &mcu_pll_clk,
- .get_rate = mcu_main_get_rate,
-};
-
-static struct clk serial_pll_clk = {
- .parent = &ckih_clk,
- .get_rate = serial_pll_get_rate,
- .enable = serial_pll_enable,
- .disable = serial_pll_disable,
-};
-
-static struct clk usb_pll_clk = {
- .parent = &ckih_clk,
- .get_rate = usb_pll_get_rate,
- .enable = usb_pll_enable,
- .disable = usb_pll_disable,
-};
-
-static struct clk ahb_clk = {
- .parent = &mcu_main_clk,
- .get_rate = ahb_get_rate,
-};
-
-#define DEFINE_CLOCK(name, i, er, es, gr, s, p) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = gr, \
- .enable = cgr_enable, \
- .disable = cgr_disable, \
- .secondary = s, \
- .parent = p, \
- }
-
-#define DEFINE_CLOCK1(name, i, er, es, getsetround, s, p) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = getsetround##_get_rate, \
- .set_rate = getsetround##_set_rate, \
- .round_rate = getsetround##_round_rate, \
- .enable = cgr_enable, \
- .disable = cgr_disable, \
- .secondary = s, \
- .parent = p, \
- }
-
-DEFINE_CLOCK(perclk_clk, 0, NULL, 0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ckil_clk, 0, NULL, 0, clk_ckil_get_rate, NULL, NULL);
-
-DEFINE_CLOCK(sdhc1_clk, 0, MXC_CCM_CGR0, 0, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(sdhc2_clk, 1, MXC_CCM_CGR0, 2, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CGR0, 4, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(epit1_clk, 0, MXC_CCM_CGR0, 6, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(epit2_clk, 1, MXC_CCM_CGR0, 8, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(iim_clk, 0, MXC_CCM_CGR0, 10, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(pata_clk, 0, MXC_CCM_CGR0, 12, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(sdma_clk1, 0, MXC_CCM_CGR0, 14, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(cspi3_clk, 2, MXC_CCM_CGR0, 16, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(rng_clk, 0, MXC_CCM_CGR0, 18, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(uart1_clk, 0, MXC_CCM_CGR0, 20, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(uart2_clk, 1, MXC_CCM_CGR0, 22, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(ssi1_clk, 0, MXC_CCM_CGR0, 24, ssi1_get_rate, NULL, &serial_pll_clk);
-DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CGR0, 26, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(i2c2_clk, 1, MXC_CCM_CGR0, 28, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(i2c3_clk, 2, MXC_CCM_CGR0, 30, NULL, NULL, &perclk_clk);
-
-DEFINE_CLOCK(mpeg4_clk, 0, MXC_CCM_CGR1, 0, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(mstick1_clk, 0, MXC_CCM_CGR1, 2, mstick1_get_rate, NULL, &usb_pll_clk);
-DEFINE_CLOCK(mstick2_clk, 1, MXC_CCM_CGR1, 4, mstick2_get_rate, NULL, &usb_pll_clk);
-DEFINE_CLOCK1(csi_clk, 0, MXC_CCM_CGR1, 6, csi, NULL, &serial_pll_clk);
-DEFINE_CLOCK(rtc_clk, 0, MXC_CCM_CGR1, 8, NULL, NULL, &ckil_clk);
-DEFINE_CLOCK(wdog_clk, 0, MXC_CCM_CGR1, 10, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(pwm_clk, 0, MXC_CCM_CGR1, 12, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(usb_clk2, 0, MXC_CCM_CGR1, 18, usb_get_rate, NULL, &ahb_clk);
-DEFINE_CLOCK(kpp_clk, 0, MXC_CCM_CGR1, 20, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ipu_clk, 0, MXC_CCM_CGR1, 22, hsp_get_rate, NULL, &mcu_main_clk);
-DEFINE_CLOCK(uart3_clk, 2, MXC_CCM_CGR1, 24, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(uart4_clk, 3, MXC_CCM_CGR1, 26, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(uart5_clk, 4, MXC_CCM_CGR1, 28, NULL, NULL, &perclk_clk);
-DEFINE_CLOCK(owire_clk, 0, MXC_CCM_CGR1, 30, NULL, NULL, &perclk_clk);
-
-DEFINE_CLOCK(ssi2_clk, 1, MXC_CCM_CGR2, 0, ssi2_get_rate, NULL, &serial_pll_clk);
-DEFINE_CLOCK(cspi1_clk, 0, MXC_CCM_CGR2, 2, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(cspi2_clk, 1, MXC_CCM_CGR2, 4, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(mbx_clk, 0, MXC_CCM_CGR2, 6, mbx_get_rate, NULL, &ahb_clk);
-DEFINE_CLOCK(emi_clk, 0, MXC_CCM_CGR2, 8, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK(rtic_clk, 0, MXC_CCM_CGR2, 10, NULL, NULL, &ahb_clk);
-DEFINE_CLOCK1(firi_clk, 0, MXC_CCM_CGR2, 12, firi, NULL, &usb_pll_clk);
-
-DEFINE_CLOCK(sdma_clk2, 0, NULL, 0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(usb_clk1, 0, NULL, 0, usb_get_rate, NULL, &usb_pll_clk);
-DEFINE_CLOCK(nfc_clk, 0, NULL, 0, nfc_get_rate, NULL, &ahb_clk);
-DEFINE_CLOCK(scc_clk, 0, NULL, 0, NULL, NULL, &ipg_clk);
-DEFINE_CLOCK(ipg_clk, 0, NULL, 0, ipg_get_rate, NULL, &ahb_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup lookups[] = {
- _REGISTER_CLOCK(NULL, "emi", emi_clk)
- _REGISTER_CLOCK("imx31-cspi.0", NULL, cspi1_clk)
- _REGISTER_CLOCK("imx31-cspi.1", NULL, cspi2_clk)
- _REGISTER_CLOCK("imx31-cspi.2", NULL, cspi3_clk)
- _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
- _REGISTER_CLOCK(NULL, "pwm", pwm_clk)
- _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
- _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
- _REGISTER_CLOCK(NULL, "epit", epit1_clk)
- _REGISTER_CLOCK(NULL, "epit", epit2_clk)
- _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
- _REGISTER_CLOCK("ipu-core", NULL, ipu_clk)
- _REGISTER_CLOCK("mx3_sdc_fb", NULL, ipu_clk)
- _REGISTER_CLOCK(NULL, "kpp", kpp_clk)
- _REGISTER_CLOCK("mxc-ehci.0", "usb", usb_clk1)
- _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_clk2)
- _REGISTER_CLOCK("mxc-ehci.1", "usb", usb_clk1)
- _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_clk2)
- _REGISTER_CLOCK("mxc-ehci.2", "usb", usb_clk1)
- _REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_clk2)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb", usb_clk1)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usb_clk2)
- _REGISTER_CLOCK("mx3-camera.0", NULL, csi_clk)
- /* i.mx31 has the i.mx21 type uart */
- _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
- _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
- _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
- _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
- _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
- _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
- _REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_clk)
- _REGISTER_CLOCK("mxc_w1.0", NULL, owire_clk)
- _REGISTER_CLOCK("mxc-mmc.0", NULL, sdhc1_clk)
- _REGISTER_CLOCK("mxc-mmc.1", NULL, sdhc2_clk)
- _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
- _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
- _REGISTER_CLOCK(NULL, "firi", firi_clk)
- _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
- _REGISTER_CLOCK(NULL, "rtic", rtic_clk)
- _REGISTER_CLOCK(NULL, "rng", rng_clk)
- _REGISTER_CLOCK("imx31-sdma", NULL, sdma_clk1)
- _REGISTER_CLOCK(NULL, "sdma_ipg", sdma_clk2)
- _REGISTER_CLOCK(NULL, "mstick", mstick1_clk)
- _REGISTER_CLOCK(NULL, "mstick", mstick2_clk)
- _REGISTER_CLOCK(NULL, "scc", scc_clk)
- _REGISTER_CLOCK(NULL, "iim", iim_clk)
- _REGISTER_CLOCK(NULL, "mpeg4", mpeg4_clk)
- _REGISTER_CLOCK(NULL, "mbx", mbx_clk)
-};
-
-int __init mx31_clocks_init(unsigned long fref)
-{
- u32 reg;
-
- ckih_rate = fref;
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- /* change the csi_clk parent if necessary */
- reg = __raw_readl(MXC_CCM_CCMR);
- if (!(reg & MXC_CCM_CCMR_CSCS))
- if (clk_set_parent(&csi_clk, &usb_pll_clk))
- pr_err("%s: error changing csi_clk parent\n", __func__);
-
-
- /* Turn off all possible clocks */
- __raw_writel((3 << 4), MXC_CCM_CGR0);
- __raw_writel(0, MXC_CCM_CGR1);
- __raw_writel((3 << 8) | (3 << 14) | (3 << 16)|
- 1 << 27 | 1 << 28, /* Bit 27 and 28 are not defined for
- MX32, but still required to be set */
- MXC_CCM_CGR2);
-
- /*
- * Before turning off usb_pll make sure ipg_per_clk is generated
- * by ipg_clk and not usb_pll.
- */
- __raw_writel(__raw_readl(MXC_CCM_CCMR) | (1 << 24), MXC_CCM_CCMR);
-
- usb_pll_disable(&usb_pll_clk);
-
- pr_info("Clock input source is %ld\n", clk_get_rate(&ckih_clk));
-
- clk_enable(&gpt_clk);
- clk_enable(&emi_clk);
- clk_enable(&iim_clk);
- mx31_revision();
- clk_disable(&iim_clk);
-
- clk_enable(&serial_pll_clk);
-
- if (mx31_revision() >= IMX_CHIP_REVISION_2_0) {
- reg = __raw_readl(MXC_CCM_PMCR1);
- /* No PLL restart on DVFS switch; enable auto EMI handshake */
- reg |= MXC_CCM_PMCR1_PLLRDIS | MXC_CCM_PMCR1_EMIRQ_EN;
- __raw_writel(reg, MXC_CCM_PMCR1);
- }
-
- mxc_timer_init(&ipg_clk, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR),
- MX31_INT_GPT);
-
- return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx35.c b/arch/arm/mach-imx/clock-imx35.c
deleted file mode 100644
index e56c1a83eee..00000000000
--- a/arch/arm/mach-imx/clock-imx35.c
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
- * Copyright (C) 2009 by Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-
-#include <mach/clock.h>
-#include <mach/hardware.h>
-#include <mach/common.h>
-
-#include "crmregs-imx3.h"
-
-#ifdef HAVE_SET_RATE_SUPPORT
-static void calc_dividers(u32 div, u32 *pre, u32 *post, u32 maxpost)
-{
- u32 min_pre, temp_pre, old_err, err;
-
- min_pre = (div - 1) / maxpost + 1;
- old_err = 8;
-
- for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
- if (div > (temp_pre * maxpost))
- break;
-
- if (div < (temp_pre * temp_pre))
- continue;
-
- err = div % temp_pre;
-
- if (err == 0) {
- *pre = temp_pre;
- break;
- }
-
- err = temp_pre - err;
-
- if (err < old_err) {
- old_err = err;
- *pre = temp_pre;
- }
- }
-
- *post = (div + *pre - 1) / *pre;
-}
-
-/* get the best values for a 3-bit divider combined with a 6-bit divider */
-static void calc_dividers_3_6(u32 div, u32 *pre, u32 *post)
-{
- if (div >= 512) {
- *pre = 8;
- *post = 64;
- } else if (div >= 64) {
- calc_dividers(div, pre, post, 64);
- } else if (div <= 8) {
- *pre = div;
- *post = 1;
- } else {
- *pre = 1;
- *post = div;
- }
-}
-
-/* get the best values for two cascaded 3-bit dividers */
-static void calc_dividers_3_3(u32 div, u32 *pre, u32 *post)
-{
- if (div >= 64) {
- *pre = *post = 8;
- } else if (div > 8) {
- calc_dividers(div, pre, post, 8);
- } else {
- *pre = 1;
- *post = div;
- }
-}
-#endif
-
-static unsigned long get_rate_mpll(void)
-{
- ulong mpctl = __raw_readl(MX35_CCM_MPCTL);
-
- return mxc_decode_pll(mpctl, 24000000);
-}
-
-static unsigned long get_rate_ppll(void)
-{
- ulong ppctl = __raw_readl(MX35_CCM_PPCTL);
-
- return mxc_decode_pll(ppctl, 24000000);
-}
-
-struct arm_ahb_div {
- unsigned char arm, ahb, sel;
-};
-
-static struct arm_ahb_div clk_consumer[] = {
- { .arm = 1, .ahb = 4, .sel = 0},
- { .arm = 1, .ahb = 3, .sel = 1},
- { .arm = 2, .ahb = 2, .sel = 0},
- { .arm = 0, .ahb = 0, .sel = 0},
- { .arm = 0, .ahb = 0, .sel = 0},
- { .arm = 0, .ahb = 0, .sel = 0},
- { .arm = 4, .ahb = 1, .sel = 0},
- { .arm = 1, .ahb = 5, .sel = 0},
- { .arm = 1, .ahb = 8, .sel = 0},
- { .arm = 1, .ahb = 6, .sel = 1},
- { .arm = 2, .ahb = 4, .sel = 0},
- { .arm = 0, .ahb = 0, .sel = 0},
- { .arm = 0, .ahb = 0, .sel = 0},
- { .arm = 0, .ahb = 0, .sel = 0},
- { .arm = 4, .ahb = 2, .sel = 0},
- { .arm = 0, .ahb = 0, .sel = 0},
-};
-
-static unsigned long get_rate_arm(void)
-{
- unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
- struct arm_ahb_div *aad;
- unsigned long fref = get_rate_mpll();
-
- aad = &clk_consumer[(pdr0 >> 16) & 0xf];
- if (aad->sel)
- fref = fref * 3 / 4;
-
- return fref / aad->arm;
-}
-
-static unsigned long get_rate_ahb(struct clk *clk)
-{
- unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
- struct arm_ahb_div *aad;
- unsigned long fref = get_rate_arm();
-
- aad = &clk_consumer[(pdr0 >> 16) & 0xf];
-
- return fref / aad->ahb;
-}
-
-static unsigned long get_rate_ipg(struct clk *clk)
-{
- return get_rate_ahb(NULL) >> 1;
-}
-
-static unsigned long get_rate_uart(struct clk *clk)
-{
- unsigned long pdr3 = __raw_readl(MX35_CCM_PDR3);
- unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
- unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
-
- if (pdr3 & (1 << 14))
- return get_rate_arm() / div;
- else
- return get_rate_ppll() / div;
-}
-
-static unsigned long get_rate_sdhc(struct clk *clk)
-{
- unsigned long pdr3 = __raw_readl(MX35_CCM_PDR3);
- unsigned long div, rate;
-
- if (pdr3 & (1 << 6))
- rate = get_rate_arm();
- else
- rate = get_rate_ppll();
-
- switch (clk->id) {
- default:
- case 0:
- div = pdr3 & 0x3f;
- break;
- case 1:
- div = (pdr3 >> 8) & 0x3f;
- break;
- case 2:
- div = (pdr3 >> 16) & 0x3f;
- break;
- }
-
- return rate / (div + 1);
-}
-
-static unsigned long get_rate_mshc(struct clk *clk)
-{
- unsigned long pdr1 = __raw_readl(MXC_CCM_PDR1);
- unsigned long div1, div2, rate;
-
- if (pdr1 & (1 << 7))
- rate = get_rate_arm();
- else
- rate = get_rate_ppll();
-
- div1 = (pdr1 >> 29) & 0x7;
- div2 = (pdr1 >> 22) & 0x3f;
-
- return rate / ((div1 + 1) * (div2 + 1));
-}
-
-static unsigned long get_rate_ssi(struct clk *clk)
-{
- unsigned long pdr2 = __raw_readl(MX35_CCM_PDR2);
- unsigned long div1, div2, rate;
-
- if (pdr2 & (1 << 6))
- rate = get_rate_arm();
- else
- rate = get_rate_ppll();
-
- switch (clk->id) {
- default:
- case 0:
- div1 = pdr2 & 0x3f;
- div2 = (pdr2 >> 24) & 0x7;
- break;
- case 1:
- div1 = (pdr2 >> 8) & 0x3f;
- div2 = (pdr2 >> 27) & 0x7;
- break;
- }
-
- return rate / ((div1 + 1) * (div2 + 1));
-}
-
-static unsigned long get_rate_csi(struct clk *clk)
-{
- unsigned long pdr2 = __raw_readl(MX35_CCM_PDR2);
- unsigned long rate;
-
- if (pdr2 & (1 << 7))
- rate = get_rate_arm();
- else
- rate = get_rate_ppll();
-
- return rate / (((pdr2 >> 16) & 0x3f) + 1);
-}
-
-static unsigned long get_rate_otg(struct clk *clk)
-{
- unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
- unsigned long rate;
-
- if (pdr4 & (1 << 9))
- rate = get_rate_arm();
- else
- rate = get_rate_ppll();
-
- return rate / (((pdr4 >> 22) & 0x3f) + 1);
-}
-
-static unsigned long get_rate_ipg_per(struct clk *clk)
-{
- unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
- unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
- unsigned long div;
-
- if (pdr0 & (1 << 26)) {
- div = (pdr4 >> 16) & 0x3f;
- return get_rate_arm() / (div + 1);
- } else {
- div = (pdr0 >> 12) & 0x7;
- return get_rate_ahb(NULL) / (div + 1);
- }
-}
-
-static unsigned long get_rate_hsp(struct clk *clk)
-{
- unsigned long hsp_podf = (__raw_readl(MXC_CCM_PDR0) >> 20) & 0x03;
- unsigned long fref = get_rate_mpll();
-
- if (fref > 400 * 1000 * 1000) {
- switch (hsp_podf) {
- case 0:
- return fref >> 2;
- case 1:
- return fref >> 3;
- case 2:
- return fref / 3;
- }
- } else {
- switch (hsp_podf) {
- case 0:
- case 2:
- return fref / 3;
- case 1:
- return fref / 6;
- }
- }
-
- return 0;
-}
-
-static int clk_cgr_enable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg |= 3 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
-
- return 0;
-}
-
-static void clk_cgr_disable(struct clk *clk)
-{
- u32 reg;
-
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(3 << clk->enable_shift);
- __raw_writel(reg, clk->enable_reg);
-}
-
-#define DEFINE_CLOCK(name, i, er, es, gr, sr) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = gr, \
- .set_rate = sr, \
- .enable = clk_cgr_enable, \
- .disable = clk_cgr_disable, \
- }
-
-DEFINE_CLOCK(asrc_clk, 0, MX35_CCM_CGR0, 0, NULL, NULL);
-DEFINE_CLOCK(pata_clk, 0, MX35_CCM_CGR0, 2, get_rate_ipg, NULL);
-/* DEFINE_CLOCK(audmux_clk, 0, MX35_CCM_CGR0, 4, NULL, NULL); */
-DEFINE_CLOCK(can1_clk, 0, MX35_CCM_CGR0, 6, get_rate_ipg, NULL);
-DEFINE_CLOCK(can2_clk, 1, MX35_CCM_CGR0, 8, get_rate_ipg, NULL);
-DEFINE_CLOCK(cspi1_clk, 0, MX35_CCM_CGR0, 10, get_rate_ipg, NULL);
-DEFINE_CLOCK(cspi2_clk, 1, MX35_CCM_CGR0, 12, get_rate_ipg, NULL);
-DEFINE_CLOCK(ect_clk, 0, MX35_CCM_CGR0, 14, get_rate_ipg, NULL);
-DEFINE_CLOCK(edio_clk, 0, MX35_CCM_CGR0, 16, NULL, NULL);
-DEFINE_CLOCK(emi_clk, 0, MX35_CCM_CGR0, 18, get_rate_ipg, NULL);
-DEFINE_CLOCK(epit1_clk, 0, MX35_CCM_CGR0, 20, get_rate_ipg, NULL);
-DEFINE_CLOCK(epit2_clk, 1, MX35_CCM_CGR0, 22, get_rate_ipg, NULL);
-DEFINE_CLOCK(esai_clk, 0, MX35_CCM_CGR0, 24, NULL, NULL);
-DEFINE_CLOCK(esdhc1_clk, 0, MX35_CCM_CGR0, 26, get_rate_sdhc, NULL);
-DEFINE_CLOCK(esdhc2_clk, 1, MX35_CCM_CGR0, 28, get_rate_sdhc, NULL);
-DEFINE_CLOCK(esdhc3_clk, 2, MX35_CCM_CGR0, 30, get_rate_sdhc, NULL);
-
-DEFINE_CLOCK(fec_clk, 0, MX35_CCM_CGR1, 0, get_rate_ipg, NULL);
-DEFINE_CLOCK(gpio1_clk, 0, MX35_CCM_CGR1, 2, NULL, NULL);
-DEFINE_CLOCK(gpio2_clk, 1, MX35_CCM_CGR1, 4, NULL, NULL);
-DEFINE_CLOCK(gpio3_clk, 2, MX35_CCM_CGR1, 6, NULL, NULL);
-DEFINE_CLOCK(gpt_clk, 0, MX35_CCM_CGR1, 8, get_rate_ipg, NULL);
-DEFINE_CLOCK(i2c1_clk, 0, MX35_CCM_CGR1, 10, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(i2c2_clk, 1, MX35_CCM_CGR1, 12, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(i2c3_clk, 2, MX35_CCM_CGR1, 14, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(iomuxc_clk, 0, MX35_CCM_CGR1, 16, NULL, NULL);
-DEFINE_CLOCK(ipu_clk, 0, MX35_CCM_CGR1, 18, get_rate_hsp, NULL);
-DEFINE_CLOCK(kpp_clk, 0, MX35_CCM_CGR1, 20, get_rate_ipg, NULL);
-DEFINE_CLOCK(mlb_clk, 0, MX35_CCM_CGR1, 22, get_rate_ahb, NULL);
-DEFINE_CLOCK(mshc_clk, 0, MX35_CCM_CGR1, 24, get_rate_mshc, NULL);
-DEFINE_CLOCK(owire_clk, 0, MX35_CCM_CGR1, 26, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(pwm_clk, 0, MX35_CCM_CGR1, 28, get_rate_ipg_per, NULL);
-DEFINE_CLOCK(rngc_clk, 0, MX35_CCM_CGR1, 30, get_rate_ipg, NULL);
-
-DEFINE_CLOCK(rtc_clk, 0, MX35_CCM_CGR2, 0, get_rate_ipg, NULL);
-DEFINE_CLOCK(rtic_clk, 0, MX35_CCM_CGR2, 2, get_rate_ahb, NULL);
-DEFINE_CLOCK(scc_clk, 0, MX35_CCM_CGR2, 4, get_rate_ipg, NULL);
-DEFINE_CLOCK(sdma_clk, 0, MX35_CCM_CGR2, 6, NULL, NULL);
-DEFINE_CLOCK(spba_clk, 0, MX35_CCM_CGR2, 8, get_rate_ipg, NULL);
-DEFINE_CLOCK(spdif_clk, 0, MX35_CCM_CGR2, 10, NULL, NULL);
-DEFINE_CLOCK(ssi1_clk, 0, MX35_CCM_CGR2, 12, get_rate_ssi, NULL);
-DEFINE_CLOCK(ssi2_clk, 1, MX35_CCM_CGR2, 14, get_rate_ssi, NULL);
-DEFINE_CLOCK(uart1_clk, 0, MX35_CCM_CGR2, 16, get_rate_uart, NULL);
-DEFINE_CLOCK(uart2_clk, 1, MX35_CCM_CGR2, 18, get_rate_uart, NULL);
-DEFINE_CLOCK(uart3_clk, 2, MX35_CCM_CGR2, 20, get_rate_uart, NULL);
-DEFINE_CLOCK(usbotg_clk, 0, MX35_CCM_CGR2, 22, get_rate_otg, NULL);
-DEFINE_CLOCK(wdog_clk, 0, MX35_CCM_CGR2, 24, NULL, NULL);
-DEFINE_CLOCK(max_clk, 0, MX35_CCM_CGR2, 26, NULL, NULL);
-DEFINE_CLOCK(audmux_clk, 0, MX35_CCM_CGR2, 30, NULL, NULL);
-
-DEFINE_CLOCK(csi_clk, 0, MX35_CCM_CGR3, 0, get_rate_csi, NULL);
-DEFINE_CLOCK(iim_clk, 0, MX35_CCM_CGR3, 2, NULL, NULL);
-DEFINE_CLOCK(gpu2d_clk, 0, MX35_CCM_CGR3, 4, NULL, NULL);
-
-DEFINE_CLOCK(usbahb_clk, 0, 0, 0, get_rate_ahb, NULL);
-
-static int clk_dummy_enable(struct clk *clk)
-{
- return 0;
-}
-
-static void clk_dummy_disable(struct clk *clk)
-{
-}
-
-static unsigned long get_rate_nfc(struct clk *clk)
-{
- unsigned long div1;
-
- div1 = (__raw_readl(MX35_CCM_PDR4) >> 28) + 1;
-
- return get_rate_ahb(NULL) / div1;
-}
-
-/* NAND Controller: It seems it can't be disabled */
-static struct clk nfc_clk = {
- .id = 0,
- .enable_reg = 0,
- .enable_shift = 0,
- .get_rate = get_rate_nfc,
- .set_rate = NULL, /* set_rate_nfc, */
- .enable = clk_dummy_enable,
- .disable = clk_dummy_disable
-};
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup lookups[] = {
- _REGISTER_CLOCK(NULL, "asrc", asrc_clk)
- _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
- _REGISTER_CLOCK("flexcan.0", NULL, can1_clk)
- _REGISTER_CLOCK("flexcan.1", NULL, can2_clk)
- _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
- _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
- _REGISTER_CLOCK(NULL, "ect", ect_clk)
- _REGISTER_CLOCK(NULL, "edio", edio_clk)
- _REGISTER_CLOCK(NULL, "emi", emi_clk)
- _REGISTER_CLOCK("imx-epit.0", NULL, epit1_clk)
- _REGISTER_CLOCK("imx-epit.1", NULL, epit2_clk)
- _REGISTER_CLOCK(NULL, "esai", esai_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx35.0", NULL, esdhc1_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx35.1", NULL, esdhc2_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx35.2", NULL, esdhc3_clk)
- /* i.mx35 has the i.mx27 type fec */
- _REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
- _REGISTER_CLOCK(NULL, "gpio", gpio1_clk)
- _REGISTER_CLOCK(NULL, "gpio", gpio2_clk)
- _REGISTER_CLOCK(NULL, "gpio", gpio3_clk)
- _REGISTER_CLOCK("gpt.0", NULL, gpt_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
- _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
- _REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_clk)
- _REGISTER_CLOCK(NULL, "iomuxc", iomuxc_clk)
- _REGISTER_CLOCK("ipu-core", NULL, ipu_clk)
- _REGISTER_CLOCK("mx3_sdc_fb", NULL, ipu_clk)
- _REGISTER_CLOCK(NULL, "kpp", kpp_clk)
- _REGISTER_CLOCK(NULL, "mlb", mlb_clk)
- _REGISTER_CLOCK(NULL, "mshc", mshc_clk)
- _REGISTER_CLOCK("mxc_w1", NULL, owire_clk)
- _REGISTER_CLOCK(NULL, "pwm", pwm_clk)
- _REGISTER_CLOCK(NULL, "rngc", rngc_clk)
- _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
- _REGISTER_CLOCK(NULL, "rtic", rtic_clk)
- _REGISTER_CLOCK(NULL, "scc", scc_clk)
- _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
- _REGISTER_CLOCK(NULL, "spba", spba_clk)
- _REGISTER_CLOCK(NULL, "spdif", spdif_clk)
- _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
- _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
- /* i.mx35 has the i.mx21 type uart */
- _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
- _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
- _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
- _REGISTER_CLOCK("mxc-ehci.0", "usb", usbotg_clk)
- _REGISTER_CLOCK("mxc-ehci.1", "usb", usbotg_clk)
- _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usbahb_clk)
- _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
- _REGISTER_CLOCK(NULL, "max", max_clk)
- _REGISTER_CLOCK(NULL, "audmux", audmux_clk)
- _REGISTER_CLOCK("mx3-camera.0", NULL, csi_clk)
- _REGISTER_CLOCK(NULL, "iim", iim_clk)
- _REGISTER_CLOCK(NULL, "gpu2d", gpu2d_clk)
- _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
-};
-
-int __init mx35_clocks_init()
-{
- unsigned int cgr2 = 3 << 26;
-
-#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
- cgr2 |= 3 << 16;
-#endif
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- /* Turn off all clocks except the ones we need to survive, namely:
- * EMI, GPIO1/2/3, GPT, IOMUX, MAX and eventually uart
- */
- __raw_writel((3 << 18), MX35_CCM_CGR0);
- __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
- MX35_CCM_CGR1);
- __raw_writel(cgr2, MX35_CCM_CGR2);
- __raw_writel(0, MX35_CCM_CGR3);
-
- clk_enable(&iim_clk);
- imx_print_silicon_rev("i.MX35", mx35_revision());
- clk_disable(&iim_clk);
-
- /*
- * Check if we came up in internal boot mode. If yes, we need some
- * extra clocks turned on, otherwise the MX35 boot ROM code will
- * hang after a watchdog reset.
- */
- if (!(__raw_readl(MX35_CCM_RCSR) & (3 << 10))) {
- /* Additionally turn on UART1, SCC, and IIM clocks */
- clk_enable(&iim_clk);
- clk_enable(&uart1_clk);
- clk_enable(&scc_clk);
- }
-
-#ifdef CONFIG_MXC_USE_EPIT
- epit_timer_init(&epit1_clk,
- MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
-#else
- mxc_timer_init(&gpt_clk,
- MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
-#endif
-
- return 0;
-}
diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c
deleted file mode 100644
index 111c328f542..00000000000
--- a/arch/arm/mach-imx/clock-imx6q.c
+++ /dev/null
@@ -1,2111 +0,0 @@
-/*
- * Copyright 2011 Freescale Semiconductor, Inc.
- * Copyright 2011 Linaro Ltd.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <asm/div64.h>
-#include <asm/mach/map.h>
-#include <mach/clock.h>
-#include <mach/common.h>
-#include <mach/hardware.h>
-
-#define PLL_BASE IMX_IO_ADDRESS(MX6Q_ANATOP_BASE_ADDR)
-#define PLL1_SYS (PLL_BASE + 0x000)
-#define PLL2_BUS (PLL_BASE + 0x030)
-#define PLL3_USB_OTG (PLL_BASE + 0x010)
-#define PLL4_AUDIO (PLL_BASE + 0x070)
-#define PLL5_VIDEO (PLL_BASE + 0x0a0)
-#define PLL6_MLB (PLL_BASE + 0x0d0)
-#define PLL7_USB_HOST (PLL_BASE + 0x020)
-#define PLL8_ENET (PLL_BASE + 0x0e0)
-#define PFD_480 (PLL_BASE + 0x0f0)
-#define PFD_528 (PLL_BASE + 0x100)
-#define PLL_NUM_OFFSET 0x010
-#define PLL_DENOM_OFFSET 0x020
-
-#define PFD0 7
-#define PFD1 15
-#define PFD2 23
-#define PFD3 31
-#define PFD_FRAC_MASK 0x3f
-
-#define BM_PLL_BYPASS (0x1 << 16)
-#define BM_PLL_ENABLE (0x1 << 13)
-#define BM_PLL_POWER_DOWN (0x1 << 12)
-#define BM_PLL_LOCK (0x1 << 31)
-#define BP_PLL_SYS_DIV_SELECT 0
-#define BM_PLL_SYS_DIV_SELECT (0x7f << 0)
-#define BP_PLL_BUS_DIV_SELECT 0
-#define BM_PLL_BUS_DIV_SELECT (0x1 << 0)
-#define BP_PLL_USB_DIV_SELECT 0
-#define BM_PLL_USB_DIV_SELECT (0x3 << 0)
-#define BP_PLL_AV_DIV_SELECT 0
-#define BM_PLL_AV_DIV_SELECT (0x7f << 0)
-#define BP_PLL_ENET_DIV_SELECT 0
-#define BM_PLL_ENET_DIV_SELECT (0x3 << 0)
-#define BM_PLL_ENET_EN_PCIE (0x1 << 19)
-#define BM_PLL_ENET_EN_SATA (0x1 << 20)
-
-#define CCM_BASE IMX_IO_ADDRESS(MX6Q_CCM_BASE_ADDR)
-#define CCR (CCM_BASE + 0x00)
-#define CCDR (CCM_BASE + 0x04)
-#define CSR (CCM_BASE + 0x08)
-#define CCSR (CCM_BASE + 0x0c)
-#define CACRR (CCM_BASE + 0x10)
-#define CBCDR (CCM_BASE + 0x14)
-#define CBCMR (CCM_BASE + 0x18)
-#define CSCMR1 (CCM_BASE + 0x1c)
-#define CSCMR2 (CCM_BASE + 0x20)
-#define CSCDR1 (CCM_BASE + 0x24)
-#define CS1CDR (CCM_BASE + 0x28)
-#define CS2CDR (CCM_BASE + 0x2c)
-#define CDCDR (CCM_BASE + 0x30)
-#define CHSCCDR (CCM_BASE + 0x34)
-#define CSCDR2 (CCM_BASE + 0x38)
-#define CSCDR3 (CCM_BASE + 0x3c)
-#define CSCDR4 (CCM_BASE + 0x40)
-#define CWDR (CCM_BASE + 0x44)
-#define CDHIPR (CCM_BASE + 0x48)
-#define CDCR (CCM_BASE + 0x4c)
-#define CTOR (CCM_BASE + 0x50)
-#define CLPCR (CCM_BASE + 0x54)
-#define CISR (CCM_BASE + 0x58)
-#define CIMR (CCM_BASE + 0x5c)
-#define CCOSR (CCM_BASE + 0x60)
-#define CGPR (CCM_BASE + 0x64)
-#define CCGR0 (CCM_BASE + 0x68)
-#define CCGR1 (CCM_BASE + 0x6c)
-#define CCGR2 (CCM_BASE + 0x70)
-#define CCGR3 (CCM_BASE + 0x74)
-#define CCGR4 (CCM_BASE + 0x78)
-#define CCGR5 (CCM_BASE + 0x7c)
-#define CCGR6 (CCM_BASE + 0x80)
-#define CCGR7 (CCM_BASE + 0x84)
-#define CMEOR (CCM_BASE + 0x88)
-
-#define CG0 0
-#define CG1 2
-#define CG2 4
-#define CG3 6
-#define CG4 8
-#define CG5 10
-#define CG6 12
-#define CG7 14
-#define CG8 16
-#define CG9 18
-#define CG10 20
-#define CG11 22
-#define CG12 24
-#define CG13 26
-#define CG14 28
-#define CG15 30
-
-#define BM_CCSR_PLL1_SW_SEL (0x1 << 2)
-#define BM_CCSR_STEP_SEL (0x1 << 8)
-
-#define BP_CACRR_ARM_PODF 0
-#define BM_CACRR_ARM_PODF (0x7 << 0)
-
-#define BP_CBCDR_PERIPH2_CLK2_PODF 0
-#define BM_CBCDR_PERIPH2_CLK2_PODF (0x7 << 0)
-#define BP_CBCDR_MMDC_CH1_AXI_PODF 3
-#define BM_CBCDR_MMDC_CH1_AXI_PODF (0x7 << 3)
-#define BP_CBCDR_AXI_SEL 6
-#define BM_CBCDR_AXI_SEL (0x3 << 6)
-#define BP_CBCDR_IPG_PODF 8
-#define BM_CBCDR_IPG_PODF (0x3 << 8)
-#define BP_CBCDR_AHB_PODF 10
-#define BM_CBCDR_AHB_PODF (0x7 << 10)
-#define BP_CBCDR_AXI_PODF 16
-#define BM_CBCDR_AXI_PODF (0x7 << 16)
-#define BP_CBCDR_MMDC_CH0_AXI_PODF 19
-#define BM_CBCDR_MMDC_CH0_AXI_PODF (0x7 << 19)
-#define BP_CBCDR_PERIPH_CLK_SEL 25
-#define BM_CBCDR_PERIPH_CLK_SEL (0x1 << 25)
-#define BP_CBCDR_PERIPH2_CLK_SEL 26
-#define BM_CBCDR_PERIPH2_CLK_SEL (0x1 << 26)
-#define BP_CBCDR_PERIPH_CLK2_PODF 27
-#define BM_CBCDR_PERIPH_CLK2_PODF (0x7 << 27)
-
-#define BP_CBCMR_GPU2D_AXI_SEL 0
-#define BM_CBCMR_GPU2D_AXI_SEL (0x1 << 0)
-#define BP_CBCMR_GPU3D_AXI_SEL 1
-#define BM_CBCMR_GPU3D_AXI_SEL (0x1 << 1)
-#define BP_CBCMR_GPU3D_CORE_SEL 4
-#define BM_CBCMR_GPU3D_CORE_SEL (0x3 << 4)
-#define BP_CBCMR_GPU3D_SHADER_SEL 8
-#define BM_CBCMR_GPU3D_SHADER_SEL (0x3 << 8)
-#define BP_CBCMR_PCIE_AXI_SEL 10
-#define BM_CBCMR_PCIE_AXI_SEL (0x1 << 10)
-#define BP_CBCMR_VDO_AXI_SEL 11
-#define BM_CBCMR_VDO_AXI_SEL (0x1 << 11)
-#define BP_CBCMR_PERIPH_CLK2_SEL 12
-#define BM_CBCMR_PERIPH_CLK2_SEL (0x3 << 12)
-#define BP_CBCMR_VPU_AXI_SEL 14
-#define BM_CBCMR_VPU_AXI_SEL (0x3 << 14)
-#define BP_CBCMR_GPU2D_CORE_SEL 16
-#define BM_CBCMR_GPU2D_CORE_SEL (0x3 << 16)
-#define BP_CBCMR_PRE_PERIPH_CLK_SEL 18
-#define BM_CBCMR_PRE_PERIPH_CLK_SEL (0x3 << 18)
-#define BP_CBCMR_PERIPH2_CLK2_SEL 20
-#define BM_CBCMR_PERIPH2_CLK2_SEL (0x1 << 20)
-#define BP_CBCMR_PRE_PERIPH2_CLK_SEL 21
-#define BM_CBCMR_PRE_PERIPH2_CLK_SEL (0x3 << 21)
-#define BP_CBCMR_GPU2D_CORE_PODF 23
-#define BM_CBCMR_GPU2D_CORE_PODF (0x7 << 23)
-#define BP_CBCMR_GPU3D_CORE_PODF 26
-#define BM_CBCMR_GPU3D_CORE_PODF (0x7 << 26)
-#define BP_CBCMR_GPU3D_SHADER_PODF 29
-#define BM_CBCMR_GPU3D_SHADER_PODF (0x7 << 29)
-
-#define BP_CSCMR1_PERCLK_PODF 0
-#define BM_CSCMR1_PERCLK_PODF (0x3f << 0)
-#define BP_CSCMR1_SSI1_SEL 10
-#define BM_CSCMR1_SSI1_SEL (0x3 << 10)
-#define BP_CSCMR1_SSI2_SEL 12
-#define BM_CSCMR1_SSI2_SEL (0x3 << 12)
-#define BP_CSCMR1_SSI3_SEL 14
-#define BM_CSCMR1_SSI3_SEL (0x3 << 14)
-#define BP_CSCMR1_USDHC1_SEL 16
-#define BM_CSCMR1_USDHC1_SEL (0x1 << 16)
-#define BP_CSCMR1_USDHC2_SEL 17
-#define BM_CSCMR1_USDHC2_SEL (0x1 << 17)
-#define BP_CSCMR1_USDHC3_SEL 18
-#define BM_CSCMR1_USDHC3_SEL (0x1 << 18)
-#define BP_CSCMR1_USDHC4_SEL 19
-#define BM_CSCMR1_USDHC4_SEL (0x1 << 19)
-#define BP_CSCMR1_EMI_PODF 20
-#define BM_CSCMR1_EMI_PODF (0x7 << 20)
-#define BP_CSCMR1_EMI_SLOW_PODF 23
-#define BM_CSCMR1_EMI_SLOW_PODF (0x7 << 23)
-#define BP_CSCMR1_EMI_SEL 27
-#define BM_CSCMR1_EMI_SEL (0x3 << 27)
-#define BP_CSCMR1_EMI_SLOW_SEL 29
-#define BM_CSCMR1_EMI_SLOW_SEL (0x3 << 29)
-
-#define BP_CSCMR2_CAN_PODF 2
-#define BM_CSCMR2_CAN_PODF (0x3f << 2)
-#define BM_CSCMR2_LDB_DI0_IPU_DIV (0x1 << 10)
-#define BM_CSCMR2_LDB_DI1_IPU_DIV (0x1 << 11)
-#define BP_CSCMR2_ESAI_SEL 19
-#define BM_CSCMR2_ESAI_SEL (0x3 << 19)
-
-#define BP_CSCDR1_UART_PODF 0
-#define BM_CSCDR1_UART_PODF (0x3f << 0)
-#define BP_CSCDR1_USDHC1_PODF 11
-#define BM_CSCDR1_USDHC1_PODF (0x7 << 11)
-#define BP_CSCDR1_USDHC2_PODF 16
-#define BM_CSCDR1_USDHC2_PODF (0x7 << 16)
-#define BP_CSCDR1_USDHC3_PODF 19
-#define BM_CSCDR1_USDHC3_PODF (0x7 << 19)
-#define BP_CSCDR1_USDHC4_PODF 22
-#define BM_CSCDR1_USDHC4_PODF (0x7 << 22)
-#define BP_CSCDR1_VPU_AXI_PODF 25
-#define BM_CSCDR1_VPU_AXI_PODF (0x7 << 25)
-
-#define BP_CS1CDR_SSI1_PODF 0
-#define BM_CS1CDR_SSI1_PODF (0x3f << 0)
-#define BP_CS1CDR_SSI1_PRED 6
-#define BM_CS1CDR_SSI1_PRED (0x7 << 6)
-#define BP_CS1CDR_ESAI_PRED 9
-#define BM_CS1CDR_ESAI_PRED (0x7 << 9)
-#define BP_CS1CDR_SSI3_PODF 16
-#define BM_CS1CDR_SSI3_PODF (0x3f << 16)
-#define BP_CS1CDR_SSI3_PRED 22
-#define BM_CS1CDR_SSI3_PRED (0x7 << 22)
-#define BP_CS1CDR_ESAI_PODF 25
-#define BM_CS1CDR_ESAI_PODF (0x7 << 25)
-
-#define BP_CS2CDR_SSI2_PODF 0
-#define BM_CS2CDR_SSI2_PODF (0x3f << 0)
-#define BP_CS2CDR_SSI2_PRED 6
-#define BM_CS2CDR_SSI2_PRED (0x7 << 6)
-#define BP_CS2CDR_LDB_DI0_SEL 9
-#define BM_CS2CDR_LDB_DI0_SEL (0x7 << 9)
-#define BP_CS2CDR_LDB_DI1_SEL 12
-#define BM_CS2CDR_LDB_DI1_SEL (0x7 << 12)
-#define BP_CS2CDR_ENFC_SEL 16
-#define BM_CS2CDR_ENFC_SEL (0x3 << 16)
-#define BP_CS2CDR_ENFC_PRED 18
-#define BM_CS2CDR_ENFC_PRED (0x7 << 18)
-#define BP_CS2CDR_ENFC_PODF 21
-#define BM_CS2CDR_ENFC_PODF (0x3f << 21)
-
-#define BP_CDCDR_ASRC_SERIAL_SEL 7
-#define BM_CDCDR_ASRC_SERIAL_SEL (0x3 << 7)
-#define BP_CDCDR_ASRC_SERIAL_PODF 9
-#define BM_CDCDR_ASRC_SERIAL_PODF (0x7 << 9)
-#define BP_CDCDR_ASRC_SERIAL_PRED 12
-#define BM_CDCDR_ASRC_SERIAL_PRED (0x7 << 12)
-#define BP_CDCDR_SPDIF_SEL 20
-#define BM_CDCDR_SPDIF_SEL (0x3 << 20)
-#define BP_CDCDR_SPDIF_PODF 22
-#define BM_CDCDR_SPDIF_PODF (0x7 << 22)
-#define BP_CDCDR_SPDIF_PRED 25
-#define BM_CDCDR_SPDIF_PRED (0x7 << 25)
-#define BP_CDCDR_HSI_TX_PODF 29
-#define BM_CDCDR_HSI_TX_PODF (0x7 << 29)
-#define BP_CDCDR_HSI_TX_SEL 28
-#define BM_CDCDR_HSI_TX_SEL (0x1 << 28)
-
-#define BP_CHSCCDR_IPU1_DI0_SEL 0
-#define BM_CHSCCDR_IPU1_DI0_SEL (0x7 << 0)
-#define BP_CHSCCDR_IPU1_DI0_PRE_PODF 3
-#define BM_CHSCCDR_IPU1_DI0_PRE_PODF (0x7 << 3)
-#define BP_CHSCCDR_IPU1_DI0_PRE_SEL 6
-#define BM_CHSCCDR_IPU1_DI0_PRE_SEL (0x7 << 6)
-#define BP_CHSCCDR_IPU1_DI1_SEL 9
-#define BM_CHSCCDR_IPU1_DI1_SEL (0x7 << 9)
-#define BP_CHSCCDR_IPU1_DI1_PRE_PODF 12
-#define BM_CHSCCDR_IPU1_DI1_PRE_PODF (0x7 << 12)
-#define BP_CHSCCDR_IPU1_DI1_PRE_SEL 15
-#define BM_CHSCCDR_IPU1_DI1_PRE_SEL (0x7 << 15)
-
-#define BP_CSCDR2_IPU2_DI0_SEL 0
-#define BM_CSCDR2_IPU2_DI0_SEL (0x7)
-#define BP_CSCDR2_IPU2_DI0_PRE_PODF 3
-#define BM_CSCDR2_IPU2_DI0_PRE_PODF (0x7 << 3)
-#define BP_CSCDR2_IPU2_DI0_PRE_SEL 6
-#define BM_CSCDR2_IPU2_DI0_PRE_SEL (0x7 << 6)
-#define BP_CSCDR2_IPU2_DI1_SEL 9
-#define BM_CSCDR2_IPU2_DI1_SEL (0x7 << 9)
-#define BP_CSCDR2_IPU2_DI1_PRE_PODF 12
-#define BM_CSCDR2_IPU2_DI1_PRE_PODF (0x7 << 12)
-#define BP_CSCDR2_IPU2_DI1_PRE_SEL 15
-#define BM_CSCDR2_IPU2_DI1_PRE_SEL (0x7 << 15)
-#define BP_CSCDR2_ECSPI_CLK_PODF 19
-#define BM_CSCDR2_ECSPI_CLK_PODF (0x3f << 19)
-
-#define BP_CSCDR3_IPU1_HSP_SEL 9
-#define BM_CSCDR3_IPU1_HSP_SEL (0x3 << 9)
-#define BP_CSCDR3_IPU1_HSP_PODF 11
-#define BM_CSCDR3_IPU1_HSP_PODF (0x7 << 11)
-#define BP_CSCDR3_IPU2_HSP_SEL 14
-#define BM_CSCDR3_IPU2_HSP_SEL (0x3 << 14)
-#define BP_CSCDR3_IPU2_HSP_PODF 16
-#define BM_CSCDR3_IPU2_HSP_PODF (0x7 << 16)
-
-#define BM_CDHIPR_AXI_PODF_BUSY (0x1 << 0)
-#define BM_CDHIPR_AHB_PODF_BUSY (0x1 << 1)
-#define BM_CDHIPR_MMDC_CH1_PODF_BUSY (0x1 << 2)
-#define BM_CDHIPR_PERIPH2_SEL_BUSY (0x1 << 3)
-#define BM_CDHIPR_MMDC_CH0_PODF_BUSY (0x1 << 4)
-#define BM_CDHIPR_PERIPH_SEL_BUSY (0x1 << 5)
-#define BM_CDHIPR_ARM_PODF_BUSY (0x1 << 16)
-
-#define BP_CLPCR_LPM 0
-#define BM_CLPCR_LPM (0x3 << 0)
-#define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2)
-#define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5)
-#define BM_CLPCR_SBYOS (0x1 << 6)
-#define BM_CLPCR_DIS_REF_OSC (0x1 << 7)
-#define BM_CLPCR_VSTBY (0x1 << 8)
-#define BP_CLPCR_STBY_COUNT 9
-#define BM_CLPCR_STBY_COUNT (0x3 << 9)
-#define BM_CLPCR_COSC_PWRDOWN (0x1 << 11)
-#define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16)
-#define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17)
-#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19)
-#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21)
-#define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22)
-#define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23)
-#define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24)
-#define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25)
-#define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26)
-#define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27)
-
-#define BP_CCOSR_CKO1_EN 7
-#define BP_CCOSR_CKO1_PODF 4
-#define BM_CCOSR_CKO1_PODF (0x7 << 4)
-#define BP_CCOSR_CKO1_SEL 0
-#define BM_CCOSR_CKO1_SEL (0xf << 0)
-
-#define FREQ_480M 480000000
-#define FREQ_528M 528000000
-#define FREQ_594M 594000000
-#define FREQ_650M 650000000
-#define FREQ_1300M 1300000000
-
-static struct clk pll1_sys;
-static struct clk pll2_bus;
-static struct clk pll3_usb_otg;
-static struct clk pll4_audio;
-static struct clk pll5_video;
-static struct clk pll6_mlb;
-static struct clk pll7_usb_host;
-static struct clk pll8_enet;
-static struct clk apbh_dma_clk;
-static struct clk arm_clk;
-static struct clk ipg_clk;
-static struct clk ahb_clk;
-static struct clk axi_clk;
-static struct clk mmdc_ch0_axi_clk;
-static struct clk mmdc_ch1_axi_clk;
-static struct clk periph_clk;
-static struct clk periph_pre_clk;
-static struct clk periph_clk2_clk;
-static struct clk periph2_clk;
-static struct clk periph2_pre_clk;
-static struct clk periph2_clk2_clk;
-static struct clk gpu2d_core_clk;
-static struct clk gpu3d_core_clk;
-static struct clk gpu3d_shader_clk;
-static struct clk ipg_perclk;
-static struct clk emi_clk;
-static struct clk emi_slow_clk;
-static struct clk can1_clk;
-static struct clk uart_clk;
-static struct clk usdhc1_clk;
-static struct clk usdhc2_clk;
-static struct clk usdhc3_clk;
-static struct clk usdhc4_clk;
-static struct clk vpu_clk;
-static struct clk hsi_tx_clk;
-static struct clk ipu1_di0_pre_clk;
-static struct clk ipu1_di1_pre_clk;
-static struct clk ipu2_di0_pre_clk;
-static struct clk ipu2_di1_pre_clk;
-static struct clk ipu1_clk;
-static struct clk ipu2_clk;
-static struct clk ssi1_clk;
-static struct clk ssi3_clk;
-static struct clk esai_clk;
-static struct clk ssi2_clk;
-static struct clk spdif_clk;
-static struct clk asrc_serial_clk;
-static struct clk gpu2d_axi_clk;
-static struct clk gpu3d_axi_clk;
-static struct clk pcie_clk;
-static struct clk vdo_axi_clk;
-static struct clk ldb_di0_clk;
-static struct clk ldb_di1_clk;
-static struct clk ipu1_di0_clk;
-static struct clk ipu1_di1_clk;
-static struct clk ipu2_di0_clk;
-static struct clk ipu2_di1_clk;
-static struct clk enfc_clk;
-static struct clk cko1_clk;
-static struct clk dummy_clk = {};
-
-static unsigned long external_high_reference;
-static unsigned long external_low_reference;
-static unsigned long oscillator_reference;
-
-static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
-{
- return oscillator_reference;
-}
-
-static unsigned long get_high_reference_clock_rate(struct clk *clk)
-{
- return external_high_reference;
-}
-
-static unsigned long get_low_reference_clock_rate(struct clk *clk)
-{
- return external_low_reference;
-}
-
-static struct clk ckil_clk = {
- .get_rate = get_low_reference_clock_rate,
-};
-
-static struct clk ckih_clk = {
- .get_rate = get_high_reference_clock_rate,
-};
-
-static struct clk osc_clk = {
- .get_rate = get_oscillator_reference_clock_rate,
-};
-
-static inline void __iomem *pll_get_reg_addr(struct clk *pll)
-{
- if (pll == &pll1_sys)
- return PLL1_SYS;
- else if (pll == &pll2_bus)
- return PLL2_BUS;
- else if (pll == &pll3_usb_otg)
- return PLL3_USB_OTG;
- else if (pll == &pll4_audio)
- return PLL4_AUDIO;
- else if (pll == &pll5_video)
- return PLL5_VIDEO;
- else if (pll == &pll6_mlb)
- return PLL6_MLB;
- else if (pll == &pll7_usb_host)
- return PLL7_USB_HOST;
- else if (pll == &pll8_enet)
- return PLL8_ENET;
- else
- BUG();
-
- return NULL;
-}
-
-static int pll_enable(struct clk *clk)
-{
- int timeout = 0x100000;
- void __iomem *reg;
- u32 val;
-
- reg = pll_get_reg_addr(clk);
- val = readl_relaxed(reg);
- val &= ~BM_PLL_BYPASS;
- val &= ~BM_PLL_POWER_DOWN;
- /* 480MHz PLLs have the opposite definition for power bit */
- if (clk == &pll3_usb_otg || clk == &pll7_usb_host)
- val |= BM_PLL_POWER_DOWN;
- writel_relaxed(val, reg);
-
- /* Wait for PLL to lock */
- while (!(readl_relaxed(reg) & BM_PLL_LOCK) && --timeout)
- cpu_relax();
-
- if (unlikely(!timeout))
- return -EBUSY;
-
- /* Enable the PLL output now */
- val = readl_relaxed(reg);
- val |= BM_PLL_ENABLE;
- writel_relaxed(val, reg);
-
- return 0;
-}
-
-static void pll_disable(struct clk *clk)
-{
- void __iomem *reg;
- u32 val;
-
- reg = pll_get_reg_addr(clk);
- val = readl_relaxed(reg);
- val &= ~BM_PLL_ENABLE;
- val |= BM_PLL_BYPASS;
- val |= BM_PLL_POWER_DOWN;
- if (clk == &pll3_usb_otg || clk == &pll7_usb_host)
- val &= ~BM_PLL_POWER_DOWN;
- writel_relaxed(val, reg);
-}
-
-static unsigned long pll1_sys_get_rate(struct clk *clk)
-{
- u32 div = (readl_relaxed(PLL1_SYS) & BM_PLL_SYS_DIV_SELECT) >>
- BP_PLL_SYS_DIV_SELECT;
-
- return clk_get_rate(clk->parent) * div / 2;
-}
-
-static int pll1_sys_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 val, div;
-
- if (rate < FREQ_650M || rate > FREQ_1300M)
- return -EINVAL;
-
- div = rate * 2 / clk_get_rate(clk->parent);
- val = readl_relaxed(PLL1_SYS);
- val &= ~BM_PLL_SYS_DIV_SELECT;
- val |= div << BP_PLL_SYS_DIV_SELECT;
- writel_relaxed(val, PLL1_SYS);
-
- return 0;
-}
-
-static unsigned long pll8_enet_get_rate(struct clk *clk)
-{
- u32 div = (readl_relaxed(PLL8_ENET) & BM_PLL_ENET_DIV_SELECT) >>
- BP_PLL_ENET_DIV_SELECT;
-
- switch (div) {
- case 0:
- return 25000000;
- case 1:
- return 50000000;
- case 2:
- return 100000000;
- case 3:
- return 125000000;
- }
-
- return 0;
-}
-
-static int pll8_enet_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 val, div;
-
- switch (rate) {
- case 25000000:
- div = 0;
- break;
- case 50000000:
- div = 1;
- break;
- case 100000000:
- div = 2;
- break;
- case 125000000:
- div = 3;
- break;
- default:
- return -EINVAL;
- }
-
- val = readl_relaxed(PLL8_ENET);
- val &= ~BM_PLL_ENET_DIV_SELECT;
- val |= div << BP_PLL_ENET_DIV_SELECT;
- writel_relaxed(val, PLL8_ENET);
-
- return 0;
-}
-
-static unsigned long pll_av_get_rate(struct clk *clk)
-{
- void __iomem *reg = (clk == &pll4_audio) ? PLL4_AUDIO : PLL5_VIDEO;
- unsigned long parent_rate = clk_get_rate(clk->parent);
- u32 mfn = readl_relaxed(reg + PLL_NUM_OFFSET);
- u32 mfd = readl_relaxed(reg + PLL_DENOM_OFFSET);
- u32 div = (readl_relaxed(reg) & BM_PLL_AV_DIV_SELECT) >>
- BP_PLL_AV_DIV_SELECT;
-
- return (parent_rate * div) + ((parent_rate / mfd) * mfn);
-}
-
-static int pll_av_set_rate(struct clk *clk, unsigned long rate)
-{
- void __iomem *reg = (clk == &pll4_audio) ? PLL4_AUDIO : PLL5_VIDEO;
- unsigned int parent_rate = clk_get_rate(clk->parent);
- u32 val, div;
- u32 mfn, mfd = 1000000;
- s64 temp64;
-
- if (rate < FREQ_650M || rate > FREQ_1300M)
- return -EINVAL;
-
- div = rate / parent_rate;
- temp64 = (u64) (rate - div * parent_rate);
- temp64 *= mfd;
- do_div(temp64, parent_rate);
- mfn = temp64;
-
- val = readl_relaxed(reg);
- val &= ~BM_PLL_AV_DIV_SELECT;
- val |= div << BP_PLL_AV_DIV_SELECT;
- writel_relaxed(val, reg);
- writel_relaxed(mfn, reg + PLL_NUM_OFFSET);
- writel_relaxed(mfd, reg + PLL_DENOM_OFFSET);
-
- return 0;
-}
-
-static void __iomem *pll_get_div_reg_bit(struct clk *clk, u32 *bp, u32 *bm)
-{
- void __iomem *reg;
-
- if (clk == &pll2_bus) {
- reg = PLL2_BUS;
- *bp = BP_PLL_BUS_DIV_SELECT;
- *bm = BM_PLL_BUS_DIV_SELECT;
- } else if (clk == &pll3_usb_otg) {
- reg = PLL3_USB_OTG;
- *bp = BP_PLL_USB_DIV_SELECT;
- *bm = BM_PLL_USB_DIV_SELECT;
- } else if (clk == &pll7_usb_host) {
- reg = PLL7_USB_HOST;
- *bp = BP_PLL_USB_DIV_SELECT;
- *bm = BM_PLL_USB_DIV_SELECT;
- } else {
- BUG();
- }
-
- return reg;
-}
-
-static unsigned long pll_get_rate(struct clk *clk)
-{
- void __iomem *reg;
- u32 div, bp, bm;
-
- reg = pll_get_div_reg_bit(clk, &bp, &bm);
- div = (readl_relaxed(reg) & bm) >> bp;
-
- return (div == 1) ? clk_get_rate(clk->parent) * 22 :
- clk_get_rate(clk->parent) * 20;
-}
-
-static int pll_set_rate(struct clk *clk, unsigned long rate)
-{
- void __iomem *reg;
- u32 val, div, bp, bm;
-
- if (rate == FREQ_528M)
- div = 1;
- else if (rate == FREQ_480M)
- div = 0;
- else
- return -EINVAL;
-
- reg = pll_get_div_reg_bit(clk, &bp, &bm);
- val = readl_relaxed(reg);
- val &= ~bm;
- val |= div << bp;
- writel_relaxed(val, reg);
-
- return 0;
-}
-
-#define pll2_bus_get_rate pll_get_rate
-#define pll2_bus_set_rate pll_set_rate
-#define pll3_usb_otg_get_rate pll_get_rate
-#define pll3_usb_otg_set_rate pll_set_rate
-#define pll7_usb_host_get_rate pll_get_rate
-#define pll7_usb_host_set_rate pll_set_rate
-#define pll4_audio_get_rate pll_av_get_rate
-#define pll4_audio_set_rate pll_av_set_rate
-#define pll5_video_get_rate pll_av_get_rate
-#define pll5_video_set_rate pll_av_set_rate
-#define pll6_mlb_get_rate NULL
-#define pll6_mlb_set_rate NULL
-
-#define DEF_PLL(name) \
- static struct clk name = { \
- .enable = pll_enable, \
- .disable = pll_disable, \
- .get_rate = name##_get_rate, \
- .set_rate = name##_set_rate, \
- .parent = &osc_clk, \
- }
-
-DEF_PLL(pll1_sys);
-DEF_PLL(pll2_bus);
-DEF_PLL(pll3_usb_otg);
-DEF_PLL(pll4_audio);
-DEF_PLL(pll5_video);
-DEF_PLL(pll6_mlb);
-DEF_PLL(pll7_usb_host);
-DEF_PLL(pll8_enet);
-
-static unsigned long pfd_get_rate(struct clk *clk)
-{
- u64 tmp = (u64) clk_get_rate(clk->parent) * 18;
- u32 frac, bp_frac;
-
- if (apbh_dma_clk.usecount == 0)
- apbh_dma_clk.enable(&apbh_dma_clk);
-
- bp_frac = clk->enable_shift - 7;
- frac = readl_relaxed(clk->enable_reg) >> bp_frac & PFD_FRAC_MASK;
- do_div(tmp, frac);
-
- return tmp;
-}
-
-static int pfd_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 val, frac, bp_frac;
- u64 tmp = (u64) clk_get_rate(clk->parent) * 18;
-
- if (apbh_dma_clk.usecount == 0)
- apbh_dma_clk.enable(&apbh_dma_clk);
-
- /*
- * Round up the divider so that we don't set a rate
- * higher than what is requested
- */
- tmp += rate / 2;
- do_div(tmp, rate);
- frac = tmp;
- frac = (frac < 12) ? 12 : frac;
- frac = (frac > 35) ? 35 : frac;
-
- /*
- * The frac field always starts from 7 bits lower
- * position of enable bit
- */
- bp_frac = clk->enable_shift - 7;
- val = readl_relaxed(clk->enable_reg);
- val &= ~(PFD_FRAC_MASK << bp_frac);
- val |= frac << bp_frac;
- writel_relaxed(val, clk->enable_reg);
-
- tmp = (u64) clk_get_rate(clk->parent) * 18;
- do_div(tmp, frac);
-
- if (apbh_dma_clk.usecount == 0)
- apbh_dma_clk.disable(&apbh_dma_clk);
-
- return 0;
-}
-
-static unsigned long pfd_round_rate(struct clk *clk, unsigned long rate)
-{
- u32 frac;
- u64 tmp;
-
- tmp = (u64) clk_get_rate(clk->parent) * 18;
- tmp += rate / 2;
- do_div(tmp, rate);
- frac = tmp;
- frac = (frac < 12) ? 12 : frac;
- frac = (frac > 35) ? 35 : frac;
- tmp = (u64) clk_get_rate(clk->parent) * 18;
- do_div(tmp, frac);
-
- return tmp;
-}
-
-static int pfd_enable(struct clk *clk)
-{
- u32 val;
-
- if (apbh_dma_clk.usecount == 0)
- apbh_dma_clk.enable(&apbh_dma_clk);
-
- val = readl_relaxed(clk->enable_reg);
- val &= ~(1 << clk->enable_shift);
- writel_relaxed(val, clk->enable_reg);
-
- if (apbh_dma_clk.usecount == 0)
- apbh_dma_clk.disable(&apbh_dma_clk);
-
- return 0;
-}
-
-static void pfd_disable(struct clk *clk)
-{
- u32 val;
-
- if (apbh_dma_clk.usecount == 0)
- apbh_dma_clk.enable(&apbh_dma_clk);
-
- val = readl_relaxed(clk->enable_reg);
- val |= 1 << clk->enable_shift;
- writel_relaxed(val, clk->enable_reg);
-
- if (apbh_dma_clk.usecount == 0)
- apbh_dma_clk.disable(&apbh_dma_clk);
-}
-
-#define DEF_PFD(name, er, es, p) \
- static struct clk name = { \
- .enable_reg = er, \
- .enable_shift = es, \
- .enable = pfd_enable, \
- .disable = pfd_disable, \
- .get_rate = pfd_get_rate, \
- .set_rate = pfd_set_rate, \
- .round_rate = pfd_round_rate, \
- .parent = p, \
- }
-
-DEF_PFD(pll2_pfd_352m, PFD_528, PFD0, &pll2_bus);
-DEF_PFD(pll2_pfd_594m, PFD_528, PFD1, &pll2_bus);
-DEF_PFD(pll2_pfd_400m, PFD_528, PFD2, &pll2_bus);
-DEF_PFD(pll3_pfd_720m, PFD_480, PFD0, &pll3_usb_otg);
-DEF_PFD(pll3_pfd_540m, PFD_480, PFD1, &pll3_usb_otg);
-DEF_PFD(pll3_pfd_508m, PFD_480, PFD2, &pll3_usb_otg);
-DEF_PFD(pll3_pfd_454m, PFD_480, PFD3, &pll3_usb_otg);
-
-static unsigned long twd_clk_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 2;
-}
-
-static struct clk twd_clk = {
- .parent = &arm_clk,
- .get_rate = twd_clk_get_rate,
-};
-
-static unsigned long pll2_200m_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 2;
-}
-
-static struct clk pll2_200m = {
- .parent = &pll2_pfd_400m,
- .get_rate = pll2_200m_get_rate,
-};
-
-static unsigned long pll3_120m_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 4;
-}
-
-static struct clk pll3_120m = {
- .parent = &pll3_usb_otg,
- .get_rate = pll3_120m_get_rate,
-};
-
-static unsigned long pll3_80m_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 6;
-}
-
-static struct clk pll3_80m = {
- .parent = &pll3_usb_otg,
- .get_rate = pll3_80m_get_rate,
-};
-
-static unsigned long pll3_60m_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 8;
-}
-
-static struct clk pll3_60m = {
- .parent = &pll3_usb_otg,
- .get_rate = pll3_60m_get_rate,
-};
-
-static int pll1_sw_clk_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 val = readl_relaxed(CCSR);
-
- if (parent == &pll1_sys) {
- val &= ~BM_CCSR_PLL1_SW_SEL;
- val &= ~BM_CCSR_STEP_SEL;
- } else if (parent == &osc_clk) {
- val |= BM_CCSR_PLL1_SW_SEL;
- val &= ~BM_CCSR_STEP_SEL;
- } else if (parent == &pll2_pfd_400m) {
- val |= BM_CCSR_PLL1_SW_SEL;
- val |= BM_CCSR_STEP_SEL;
- } else {
- return -EINVAL;
- }
-
- writel_relaxed(val, CCSR);
-
- return 0;
-}
-
-static struct clk pll1_sw_clk = {
- .parent = &pll1_sys,
- .set_parent = pll1_sw_clk_set_parent,
-};
-
-static void calc_pred_podf_dividers(u32 div, u32 *pred, u32 *podf)
-{
- u32 min_pred, temp_pred, old_err, err;
-
- if (div >= 512) {
- *pred = 8;
- *podf = 64;
- } else if (div >= 8) {
- min_pred = (div - 1) / 64 + 1;
- old_err = 8;
- for (temp_pred = 8; temp_pred >= min_pred; temp_pred--) {
- err = div % temp_pred;
- if (err == 0) {
- *pred = temp_pred;
- break;
- }
- err = temp_pred - err;
- if (err < old_err) {
- old_err = err;
- *pred = temp_pred;
- }
- }
- *podf = (div + *pred - 1) / *pred;
- } else if (div < 8) {
- *pred = div;
- *podf = 1;
- }
-}
-
-static int _clk_enable(struct clk *clk)
-{
- u32 reg;
- reg = readl_relaxed(clk->enable_reg);
- reg |= 0x3 << clk->enable_shift;
- writel_relaxed(reg, clk->enable_reg);
-
- return 0;
-}
-
-static void _clk_disable(struct clk *clk)
-{
- u32 reg;
- reg = readl_relaxed(clk->enable_reg);
- reg &= ~(0x3 << clk->enable_shift);
- writel_relaxed(reg, clk->enable_reg);
-}
-
-static int _clk_enable_1b(struct clk *clk)
-{
- u32 reg;
- reg = readl_relaxed(clk->enable_reg);
- reg |= 0x1 << clk->enable_shift;
- writel_relaxed(reg, clk->enable_reg);
-
- return 0;
-}
-
-static void _clk_disable_1b(struct clk *clk)
-{
- u32 reg;
- reg = readl_relaxed(clk->enable_reg);
- reg &= ~(0x1 << clk->enable_shift);
- writel_relaxed(reg, clk->enable_reg);
-}
-
-struct divider {
- struct clk *clk;
- void __iomem *reg;
- u32 bp_pred;
- u32 bm_pred;
- u32 bp_podf;
- u32 bm_podf;
-};
-
-#define DEF_CLK_DIV1(d, c, r, b) \
- static struct divider d = { \
- .clk = c, \
- .reg = r, \
- .bp_podf = BP_##r##_##b##_PODF, \
- .bm_podf = BM_##r##_##b##_PODF, \
- }
-
-DEF_CLK_DIV1(arm_div, &arm_clk, CACRR, ARM);
-DEF_CLK_DIV1(ipg_div, &ipg_clk, CBCDR, IPG);
-DEF_CLK_DIV1(ahb_div, &ahb_clk, CBCDR, AHB);
-DEF_CLK_DIV1(axi_div, &axi_clk, CBCDR, AXI);
-DEF_CLK_DIV1(mmdc_ch0_axi_div, &mmdc_ch0_axi_clk, CBCDR, MMDC_CH0_AXI);
-DEF_CLK_DIV1(mmdc_ch1_axi_div, &mmdc_ch1_axi_clk, CBCDR, MMDC_CH1_AXI);
-DEF_CLK_DIV1(periph_clk2_div, &periph_clk2_clk, CBCDR, PERIPH_CLK2);
-DEF_CLK_DIV1(periph2_clk2_div, &periph2_clk2_clk, CBCDR, PERIPH2_CLK2);
-DEF_CLK_DIV1(gpu2d_core_div, &gpu2d_core_clk, CBCMR, GPU2D_CORE);
-DEF_CLK_DIV1(gpu3d_core_div, &gpu3d_core_clk, CBCMR, GPU3D_CORE);
-DEF_CLK_DIV1(gpu3d_shader_div, &gpu3d_shader_clk, CBCMR, GPU3D_SHADER);
-DEF_CLK_DIV1(ipg_perclk_div, &ipg_perclk, CSCMR1, PERCLK);
-DEF_CLK_DIV1(emi_div, &emi_clk, CSCMR1, EMI);
-DEF_CLK_DIV1(emi_slow_div, &emi_slow_clk, CSCMR1, EMI_SLOW);
-DEF_CLK_DIV1(can_div, &can1_clk, CSCMR2, CAN);
-DEF_CLK_DIV1(uart_div, &uart_clk, CSCDR1, UART);
-DEF_CLK_DIV1(usdhc1_div, &usdhc1_clk, CSCDR1, USDHC1);
-DEF_CLK_DIV1(usdhc2_div, &usdhc2_clk, CSCDR1, USDHC2);
-DEF_CLK_DIV1(usdhc3_div, &usdhc3_clk, CSCDR1, USDHC3);
-DEF_CLK_DIV1(usdhc4_div, &usdhc4_clk, CSCDR1, USDHC4);
-DEF_CLK_DIV1(vpu_div, &vpu_clk, CSCDR1, VPU_AXI);
-DEF_CLK_DIV1(hsi_tx_div, &hsi_tx_clk, CDCDR, HSI_TX);
-DEF_CLK_DIV1(ipu1_di0_pre_div, &ipu1_di0_pre_clk, CHSCCDR, IPU1_DI0_PRE);
-DEF_CLK_DIV1(ipu1_di1_pre_div, &ipu1_di1_pre_clk, CHSCCDR, IPU1_DI1_PRE);
-DEF_CLK_DIV1(ipu2_di0_pre_div, &ipu2_di0_pre_clk, CSCDR2, IPU2_DI0_PRE);
-DEF_CLK_DIV1(ipu2_di1_pre_div, &ipu2_di1_pre_clk, CSCDR2, IPU2_DI1_PRE);
-DEF_CLK_DIV1(ipu1_div, &ipu1_clk, CSCDR3, IPU1_HSP);
-DEF_CLK_DIV1(ipu2_div, &ipu2_clk, CSCDR3, IPU2_HSP);
-DEF_CLK_DIV1(cko1_div, &cko1_clk, CCOSR, CKO1);
-
-#define DEF_CLK_DIV2(d, c, r, b) \
- static struct divider d = { \
- .clk = c, \
- .reg = r, \
- .bp_pred = BP_##r##_##b##_PRED, \
- .bm_pred = BM_##r##_##b##_PRED, \
- .bp_podf = BP_##r##_##b##_PODF, \
- .bm_podf = BM_##r##_##b##_PODF, \
- }
-
-DEF_CLK_DIV2(ssi1_div, &ssi1_clk, CS1CDR, SSI1);
-DEF_CLK_DIV2(ssi3_div, &ssi3_clk, CS1CDR, SSI3);
-DEF_CLK_DIV2(esai_div, &esai_clk, CS1CDR, ESAI);
-DEF_CLK_DIV2(ssi2_div, &ssi2_clk, CS2CDR, SSI2);
-DEF_CLK_DIV2(enfc_div, &enfc_clk, CS2CDR, ENFC);
-DEF_CLK_DIV2(spdif_div, &spdif_clk, CDCDR, SPDIF);
-DEF_CLK_DIV2(asrc_serial_div, &asrc_serial_clk, CDCDR, ASRC_SERIAL);
-
-static struct divider *dividers[] = {
- &arm_div,
- &ipg_div,
- &ahb_div,
- &axi_div,
- &mmdc_ch0_axi_div,
- &mmdc_ch1_axi_div,
- &periph_clk2_div,
- &periph2_clk2_div,
- &gpu2d_core_div,
- &gpu3d_core_div,
- &gpu3d_shader_div,
- &ipg_perclk_div,
- &emi_div,
- &emi_slow_div,
- &can_div,
- &uart_div,
- &usdhc1_div,
- &usdhc2_div,
- &usdhc3_div,
- &usdhc4_div,
- &vpu_div,
- &hsi_tx_div,
- &ipu1_di0_pre_div,
- &ipu1_di1_pre_div,
- &ipu2_di0_pre_div,
- &ipu2_di1_pre_div,
- &ipu1_div,
- &ipu2_div,
- &ssi1_div,
- &ssi3_div,
- &esai_div,
- &ssi2_div,
- &enfc_div,
- &spdif_div,
- &asrc_serial_div,
- &cko1_div,
-};
-
-static unsigned long ldb_di_clk_get_rate(struct clk *clk)
-{
- u32 val = readl_relaxed(CSCMR2);
-
- val &= (clk == &ldb_di0_clk) ? BM_CSCMR2_LDB_DI0_IPU_DIV :
- BM_CSCMR2_LDB_DI1_IPU_DIV;
- if (val)
- return clk_get_rate(clk->parent) / 7;
- else
- return clk_get_rate(clk->parent) * 2 / 7;
-}
-
-static int ldb_di_clk_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long parent_rate = clk_get_rate(clk->parent);
- u32 val = readl_relaxed(CSCMR2);
-
- if (rate * 7 <= parent_rate + parent_rate / 20)
- val |= BM_CSCMR2_LDB_DI0_IPU_DIV;
- else
- val &= ~BM_CSCMR2_LDB_DI0_IPU_DIV;
-
- writel_relaxed(val, CSCMR2);
-
- return 0;
-}
-
-static unsigned long ldb_di_clk_round_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long parent_rate = clk_get_rate(clk->parent);
-
- if (rate * 7 <= parent_rate + parent_rate / 20)
- return parent_rate / 7;
- else
- return 2 * parent_rate / 7;
-}
-
-static unsigned long _clk_get_rate(struct clk *clk)
-{
- struct divider *d;
- u32 val, pred, podf;
- int i, num;
-
- if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
- return ldb_di_clk_get_rate(clk);
-
- num = ARRAY_SIZE(dividers);
- for (i = 0; i < num; i++)
- if (dividers[i]->clk == clk) {
- d = dividers[i];
- break;
- }
- if (i == num)
- return clk_get_rate(clk->parent);
-
- val = readl_relaxed(d->reg);
- pred = ((val & d->bm_pred) >> d->bp_pred) + 1;
- podf = ((val & d->bm_podf) >> d->bp_podf) + 1;
-
- return clk_get_rate(clk->parent) / (pred * podf);
-}
-
-static int clk_busy_wait(struct clk *clk)
-{
- int timeout = 0x100000;
- u32 bm;
-
- if (clk == &axi_clk)
- bm = BM_CDHIPR_AXI_PODF_BUSY;
- else if (clk == &ahb_clk)
- bm = BM_CDHIPR_AHB_PODF_BUSY;
- else if (clk == &mmdc_ch0_axi_clk)
- bm = BM_CDHIPR_MMDC_CH0_PODF_BUSY;
- else if (clk == &periph_clk)
- bm = BM_CDHIPR_PERIPH_SEL_BUSY;
- else if (clk == &arm_clk)
- bm = BM_CDHIPR_ARM_PODF_BUSY;
- else
- return -EINVAL;
-
- while ((readl_relaxed(CDHIPR) & bm) && --timeout)
- cpu_relax();
-
- if (unlikely(!timeout))
- return -EBUSY;
-
- return 0;
-}
-
-static int _clk_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long parent_rate = clk_get_rate(clk->parent);
- struct divider *d;
- u32 val, div, max_div, pred = 0, podf;
- int i, num;
-
- if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
- return ldb_di_clk_set_rate(clk, rate);
-
- num = ARRAY_SIZE(dividers);
- for (i = 0; i < num; i++)
- if (dividers[i]->clk == clk) {
- d = dividers[i];
- break;
- }
- if (i == num)
- return -EINVAL;
-
- max_div = ((d->bm_pred >> d->bp_pred) + 1) *
- ((d->bm_podf >> d->bp_podf) + 1);
-
- div = parent_rate / rate;
- if (div == 0)
- div++;
-
- if ((parent_rate / div != rate) || div > max_div)
- return -EINVAL;
-
- if (d->bm_pred) {
- calc_pred_podf_dividers(div, &pred, &podf);
- } else {
- pred = 1;
- podf = div;
- }
-
- val = readl_relaxed(d->reg);
- val &= ~(d->bm_pred | d->bm_podf);
- val |= (pred - 1) << d->bp_pred | (podf - 1) << d->bp_podf;
- writel_relaxed(val, d->reg);
-
- if (clk == &axi_clk || clk == &ahb_clk ||
- clk == &mmdc_ch0_axi_clk || clk == &arm_clk)
- return clk_busy_wait(clk);
-
- return 0;
-}
-
-static unsigned long _clk_round_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long parent_rate = clk_get_rate(clk->parent);
- u32 div = parent_rate / rate;
- u32 div_max, pred = 0, podf;
- struct divider *d;
- int i, num;
-
- if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
- return ldb_di_clk_round_rate(clk, rate);
-
- num = ARRAY_SIZE(dividers);
- for (i = 0; i < num; i++)
- if (dividers[i]->clk == clk) {
- d = dividers[i];
- break;
- }
- if (i == num)
- return -EINVAL;
-
- if (div == 0 || parent_rate % rate)
- div++;
-
- if (d->bm_pred) {
- calc_pred_podf_dividers(div, &pred, &podf);
- div = pred * podf;
- } else {
- div_max = (d->bm_podf >> d->bp_podf) + 1;
- if (div > div_max)
- div = div_max;
- }
-
- return parent_rate / div;
-}
-
-struct multiplexer {
- struct clk *clk;
- void __iomem *reg;
- u32 bp;
- u32 bm;
- int pnum;
- struct clk *parents[];
-};
-
-static struct multiplexer axi_mux = {
- .clk = &axi_clk,
- .reg = CBCDR,
- .bp = BP_CBCDR_AXI_SEL,
- .bm = BM_CBCDR_AXI_SEL,
- .parents = {
- &periph_clk,
- &pll2_pfd_400m,
- &pll3_pfd_540m,
- NULL
- },
-};
-
-static struct multiplexer periph_mux = {
- .clk = &periph_clk,
- .reg = CBCDR,
- .bp = BP_CBCDR_PERIPH_CLK_SEL,
- .bm = BM_CBCDR_PERIPH_CLK_SEL,
- .parents = {
- &periph_pre_clk,
- &periph_clk2_clk,
- NULL
- },
-};
-
-static struct multiplexer periph_pre_mux = {
- .clk = &periph_pre_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_PRE_PERIPH_CLK_SEL,
- .bm = BM_CBCMR_PRE_PERIPH_CLK_SEL,
- .parents = {
- &pll2_bus,
- &pll2_pfd_400m,
- &pll2_pfd_352m,
- &pll2_200m,
- NULL
- },
-};
-
-static struct multiplexer periph_clk2_mux = {
- .clk = &periph_clk2_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_PERIPH_CLK2_SEL,
- .bm = BM_CBCMR_PERIPH_CLK2_SEL,
- .parents = {
- &pll3_usb_otg,
- &osc_clk,
- NULL
- },
-};
-
-static struct multiplexer periph2_mux = {
- .clk = &periph2_clk,
- .reg = CBCDR,
- .bp = BP_CBCDR_PERIPH2_CLK_SEL,
- .bm = BM_CBCDR_PERIPH2_CLK_SEL,
- .parents = {
- &periph2_pre_clk,
- &periph2_clk2_clk,
- NULL
- },
-};
-
-static struct multiplexer periph2_pre_mux = {
- .clk = &periph2_pre_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_PRE_PERIPH2_CLK_SEL,
- .bm = BM_CBCMR_PRE_PERIPH2_CLK_SEL,
- .parents = {
- &pll2_bus,
- &pll2_pfd_400m,
- &pll2_pfd_352m,
- &pll2_200m,
- NULL
- },
-};
-
-static struct multiplexer periph2_clk2_mux = {
- .clk = &periph2_clk2_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_PERIPH2_CLK2_SEL,
- .bm = BM_CBCMR_PERIPH2_CLK2_SEL,
- .parents = {
- &pll3_usb_otg,
- &osc_clk,
- NULL
- },
-};
-
-static struct multiplexer gpu2d_axi_mux = {
- .clk = &gpu2d_axi_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_GPU2D_AXI_SEL,
- .bm = BM_CBCMR_GPU2D_AXI_SEL,
- .parents = {
- &axi_clk,
- &ahb_clk,
- NULL
- },
-};
-
-static struct multiplexer gpu3d_axi_mux = {
- .clk = &gpu3d_axi_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_GPU3D_AXI_SEL,
- .bm = BM_CBCMR_GPU3D_AXI_SEL,
- .parents = {
- &axi_clk,
- &ahb_clk,
- NULL
- },
-};
-
-static struct multiplexer gpu3d_core_mux = {
- .clk = &gpu3d_core_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_GPU3D_CORE_SEL,
- .bm = BM_CBCMR_GPU3D_CORE_SEL,
- .parents = {
- &mmdc_ch0_axi_clk,
- &pll3_usb_otg,
- &pll2_pfd_594m,
- &pll2_pfd_400m,
- NULL
- },
-};
-
-static struct multiplexer gpu3d_shader_mux = {
- .clk = &gpu3d_shader_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_GPU3D_SHADER_SEL,
- .bm = BM_CBCMR_GPU3D_SHADER_SEL,
- .parents = {
- &mmdc_ch0_axi_clk,
- &pll3_usb_otg,
- &pll2_pfd_594m,
- &pll3_pfd_720m,
- NULL
- },
-};
-
-static struct multiplexer pcie_axi_mux = {
- .clk = &pcie_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_PCIE_AXI_SEL,
- .bm = BM_CBCMR_PCIE_AXI_SEL,
- .parents = {
- &axi_clk,
- &ahb_clk,
- NULL
- },
-};
-
-static struct multiplexer vdo_axi_mux = {
- .clk = &vdo_axi_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_VDO_AXI_SEL,
- .bm = BM_CBCMR_VDO_AXI_SEL,
- .parents = {
- &axi_clk,
- &ahb_clk,
- NULL
- },
-};
-
-static struct multiplexer vpu_axi_mux = {
- .clk = &vpu_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_VPU_AXI_SEL,
- .bm = BM_CBCMR_VPU_AXI_SEL,
- .parents = {
- &axi_clk,
- &pll2_pfd_400m,
- &pll2_pfd_352m,
- NULL
- },
-};
-
-static struct multiplexer gpu2d_core_mux = {
- .clk = &gpu2d_core_clk,
- .reg = CBCMR,
- .bp = BP_CBCMR_GPU2D_CORE_SEL,
- .bm = BM_CBCMR_GPU2D_CORE_SEL,
- .parents = {
- &axi_clk,
- &pll3_usb_otg,
- &pll2_pfd_352m,
- &pll2_pfd_400m,
- NULL
- },
-};
-
-#define DEF_SSI_MUX(id) \
- static struct multiplexer ssi##id##_mux = { \
- .clk = &ssi##id##_clk, \
- .reg = CSCMR1, \
- .bp = BP_CSCMR1_SSI##id##_SEL, \
- .bm = BM_CSCMR1_SSI##id##_SEL, \
- .parents = { \
- &pll3_pfd_508m, \
- &pll3_pfd_454m, \
- &pll4_audio, \
- NULL \
- }, \
- }
-
-DEF_SSI_MUX(1);
-DEF_SSI_MUX(2);
-DEF_SSI_MUX(3);
-
-#define DEF_USDHC_MUX(id) \
- static struct multiplexer usdhc##id##_mux = { \
- .clk = &usdhc##id##_clk, \
- .reg = CSCMR1, \
- .bp = BP_CSCMR1_USDHC##id##_SEL, \
- .bm = BM_CSCMR1_USDHC##id##_SEL, \
- .parents = { \
- &pll2_pfd_400m, \
- &pll2_pfd_352m, \
- NULL \
- }, \
- }
-
-DEF_USDHC_MUX(1);
-DEF_USDHC_MUX(2);
-DEF_USDHC_MUX(3);
-DEF_USDHC_MUX(4);
-
-static struct multiplexer emi_mux = {
- .clk = &emi_clk,
- .reg = CSCMR1,
- .bp = BP_CSCMR1_EMI_SEL,
- .bm = BM_CSCMR1_EMI_SEL,
- .parents = {
- &axi_clk,
- &pll3_usb_otg,
- &pll2_pfd_400m,
- &pll2_pfd_352m,
- NULL
- },
-};
-
-static struct multiplexer emi_slow_mux = {
- .clk = &emi_slow_clk,
- .reg = CSCMR1,
- .bp = BP_CSCMR1_EMI_SLOW_SEL,
- .bm = BM_CSCMR1_EMI_SLOW_SEL,
- .parents = {
- &axi_clk,
- &pll3_usb_otg,
- &pll2_pfd_400m,
- &pll2_pfd_352m,
- NULL
- },
-};
-
-static struct multiplexer esai_mux = {
- .clk = &esai_clk,
- .reg = CSCMR2,
- .bp = BP_CSCMR2_ESAI_SEL,
- .bm = BM_CSCMR2_ESAI_SEL,
- .parents = {
- &pll4_audio,
- &pll3_pfd_508m,
- &pll3_pfd_454m,
- &pll3_usb_otg,
- NULL
- },
-};
-
-#define DEF_LDB_DI_MUX(id) \
- static struct multiplexer ldb_di##id##_mux = { \
- .clk = &ldb_di##id##_clk, \
- .reg = CS2CDR, \
- .bp = BP_CS2CDR_LDB_DI##id##_SEL, \
- .bm = BM_CS2CDR_LDB_DI##id##_SEL, \
- .parents = { \
- &pll5_video, \
- &pll2_pfd_352m, \
- &pll2_pfd_400m, \
- &pll3_pfd_540m, \
- &pll3_usb_otg, \
- NULL \
- }, \
- }
-
-DEF_LDB_DI_MUX(0);
-DEF_LDB_DI_MUX(1);
-
-static struct multiplexer enfc_mux = {
- .clk = &enfc_clk,
- .reg = CS2CDR,
- .bp = BP_CS2CDR_ENFC_SEL,
- .bm = BM_CS2CDR_ENFC_SEL,
- .parents = {
- &pll2_pfd_352m,
- &pll2_bus,
- &pll3_usb_otg,
- &pll2_pfd_400m,
- NULL
- },
-};
-
-static struct multiplexer spdif_mux = {
- .clk = &spdif_clk,
- .reg = CDCDR,
- .bp = BP_CDCDR_SPDIF_SEL,
- .bm = BM_CDCDR_SPDIF_SEL,
- .parents = {
- &pll4_audio,
- &pll3_pfd_508m,
- &pll3_pfd_454m,
- &pll3_usb_otg,
- NULL
- },
-};
-
-static struct multiplexer asrc_serial_mux = {
- .clk = &asrc_serial_clk,
- .reg = CDCDR,
- .bp = BP_CDCDR_ASRC_SERIAL_SEL,
- .bm = BM_CDCDR_ASRC_SERIAL_SEL,
- .parents = {
- &pll4_audio,
- &pll3_pfd_508m,
- &pll3_pfd_454m,
- &pll3_usb_otg,
- NULL
- },
-};
-
-static struct multiplexer hsi_tx_mux = {
- .clk = &hsi_tx_clk,
- .reg = CDCDR,
- .bp = BP_CDCDR_HSI_TX_SEL,
- .bm = BM_CDCDR_HSI_TX_SEL,
- .parents = {
- &pll3_120m,
- &pll2_pfd_400m,
- NULL
- },
-};
-
-#define DEF_IPU_DI_PRE_MUX(r, i, d) \
- static struct multiplexer ipu##i##_di##d##_pre_mux = { \
- .clk = &ipu##i##_di##d##_pre_clk, \
- .reg = r, \
- .bp = BP_##r##_IPU##i##_DI##d##_PRE_SEL, \
- .bm = BM_##r##_IPU##i##_DI##d##_PRE_SEL, \
- .parents = { \
- &mmdc_ch0_axi_clk, \
- &pll3_usb_otg, \
- &pll5_video, \
- &pll2_pfd_352m, \
- &pll2_pfd_400m, \
- &pll3_pfd_540m, \
- NULL \
- }, \
- }
-
-DEF_IPU_DI_PRE_MUX(CHSCCDR, 1, 0);
-DEF_IPU_DI_PRE_MUX(CHSCCDR, 1, 1);
-DEF_IPU_DI_PRE_MUX(CSCDR2, 2, 0);
-DEF_IPU_DI_PRE_MUX(CSCDR2, 2, 1);
-
-#define DEF_IPU_DI_MUX(r, i, d) \
- static struct multiplexer ipu##i##_di##d##_mux = { \
- .clk = &ipu##i##_di##d##_clk, \
- .reg = r, \
- .bp = BP_##r##_IPU##i##_DI##d##_SEL, \
- .bm = BM_##r##_IPU##i##_DI##d##_SEL, \
- .parents = { \
- &ipu##i##_di##d##_pre_clk, \
- &dummy_clk, \
- &dummy_clk, \
- &ldb_di0_clk, \
- &ldb_di1_clk, \
- NULL \
- }, \
- }
-
-DEF_IPU_DI_MUX(CHSCCDR, 1, 0);
-DEF_IPU_DI_MUX(CHSCCDR, 1, 1);
-DEF_IPU_DI_MUX(CSCDR2, 2, 0);
-DEF_IPU_DI_MUX(CSCDR2, 2, 1);
-
-#define DEF_IPU_MUX(id) \
- static struct multiplexer ipu##id##_mux = { \
- .clk = &ipu##id##_clk, \
- .reg = CSCDR3, \
- .bp = BP_CSCDR3_IPU##id##_HSP_SEL, \
- .bm = BM_CSCDR3_IPU##id##_HSP_SEL, \
- .parents = { \
- &mmdc_ch0_axi_clk, \
- &pll2_pfd_400m, \
- &pll3_120m, \
- &pll3_pfd_540m, \
- NULL \
- }, \
- }
-
-DEF_IPU_MUX(1);
-DEF_IPU_MUX(2);
-
-static struct multiplexer cko1_mux = {
- .clk = &cko1_clk,
- .reg = CCOSR,
- .bp = BP_CCOSR_CKO1_SEL,
- .bm = BM_CCOSR_CKO1_SEL,
- .parents = {
- &pll3_usb_otg,
- &pll2_bus,
- &pll1_sys,
- &pll5_video,
- &dummy_clk,
- &axi_clk,
- &enfc_clk,
- &ipu1_di0_clk,
- &ipu1_di1_clk,
- &ipu2_di0_clk,
- &ipu2_di1_clk,
- &ahb_clk,
- &ipg_clk,
- &ipg_perclk,
- &ckil_clk,
- &pll4_audio,
- NULL
- },
-};
-
-static struct multiplexer *multiplexers[] = {
- &axi_mux,
- &periph_mux,
- &periph_pre_mux,
- &periph_clk2_mux,
- &periph2_mux,
- &periph2_pre_mux,
- &periph2_clk2_mux,
- &gpu2d_axi_mux,
- &gpu3d_axi_mux,
- &gpu3d_core_mux,
- &gpu3d_shader_mux,
- &pcie_axi_mux,
- &vdo_axi_mux,
- &vpu_axi_mux,
- &gpu2d_core_mux,
- &ssi1_mux,
- &ssi2_mux,
- &ssi3_mux,
- &usdhc1_mux,
- &usdhc2_mux,
- &usdhc3_mux,
- &usdhc4_mux,
- &emi_mux,
- &emi_slow_mux,
- &esai_mux,
- &ldb_di0_mux,
- &ldb_di1_mux,
- &enfc_mux,
- &spdif_mux,
- &asrc_serial_mux,
- &hsi_tx_mux,
- &ipu1_di0_pre_mux,
- &ipu1_di0_mux,
- &ipu1_di1_pre_mux,
- &ipu1_di1_mux,
- &ipu2_di0_pre_mux,
- &ipu2_di0_mux,
- &ipu2_di1_pre_mux,
- &ipu2_di1_mux,
- &ipu1_mux,
- &ipu2_mux,
- &cko1_mux,
-};
-
-static int _clk_set_parent(struct clk *clk, struct clk *parent)
-{
- struct multiplexer *m;
- int i, num;
- u32 val;
-
- num = ARRAY_SIZE(multiplexers);
- for (i = 0; i < num; i++)
- if (multiplexers[i]->clk == clk) {
- m = multiplexers[i];
- break;
- }
- if (i == num)
- return -EINVAL;
-
- i = 0;
- while (m->parents[i]) {
- if (parent == m->parents[i])
- break;
- i++;
- }
- if (!m->parents[i] || m->parents[i] == &dummy_clk)
- return -EINVAL;
-
- val = readl_relaxed(m->reg);
- val &= ~m->bm;
- val |= i << m->bp;
- writel_relaxed(val, m->reg);
-
- if (clk == &periph_clk)
- return clk_busy_wait(clk);
-
- return 0;
-}
-
-#define DEF_NG_CLK(name, p) \
- static struct clk name = { \
- .get_rate = _clk_get_rate, \
- .set_rate = _clk_set_rate, \
- .round_rate = _clk_round_rate, \
- .set_parent = _clk_set_parent, \
- .parent = p, \
- }
-
-DEF_NG_CLK(periph_clk2_clk, &osc_clk);
-DEF_NG_CLK(periph_pre_clk, &pll2_bus);
-DEF_NG_CLK(periph_clk, &periph_pre_clk);
-DEF_NG_CLK(periph2_clk2_clk, &osc_clk);
-DEF_NG_CLK(periph2_pre_clk, &pll2_bus);
-DEF_NG_CLK(periph2_clk, &periph2_pre_clk);
-DEF_NG_CLK(axi_clk, &periph_clk);
-DEF_NG_CLK(emi_clk, &axi_clk);
-DEF_NG_CLK(arm_clk, &pll1_sw_clk);
-DEF_NG_CLK(ahb_clk, &periph_clk);
-DEF_NG_CLK(ipg_clk, &ahb_clk);
-DEF_NG_CLK(ipg_perclk, &ipg_clk);
-DEF_NG_CLK(ipu1_di0_pre_clk, &pll3_pfd_540m);
-DEF_NG_CLK(ipu1_di1_pre_clk, &pll3_pfd_540m);
-DEF_NG_CLK(ipu2_di0_pre_clk, &pll3_pfd_540m);
-DEF_NG_CLK(ipu2_di1_pre_clk, &pll3_pfd_540m);
-DEF_NG_CLK(asrc_serial_clk, &pll3_usb_otg);
-
-#define DEF_CLK(name, er, es, p, s) \
- static struct clk name = { \
- .enable_reg = er, \
- .enable_shift = es, \
- .enable = _clk_enable, \
- .disable = _clk_disable, \
- .get_rate = _clk_get_rate, \
- .set_rate = _clk_set_rate, \
- .round_rate = _clk_round_rate, \
- .set_parent = _clk_set_parent, \
- .parent = p, \
- .secondary = s, \
- }
-
-#define DEF_CLK_1B(name, er, es, p, s) \
- static struct clk name = { \
- .enable_reg = er, \
- .enable_shift = es, \
- .enable = _clk_enable_1b, \
- .disable = _clk_disable_1b, \
- .get_rate = _clk_get_rate, \
- .set_rate = _clk_set_rate, \
- .round_rate = _clk_round_rate, \
- .set_parent = _clk_set_parent, \
- .parent = p, \
- .secondary = s, \
- }
-
-DEF_CLK(aips_tz1_clk, CCGR0, CG0, &ahb_clk, NULL);
-DEF_CLK(aips_tz2_clk, CCGR0, CG1, &ahb_clk, NULL);
-DEF_CLK(apbh_dma_clk, CCGR0, CG2, &ahb_clk, NULL);
-DEF_CLK(asrc_clk, CCGR0, CG3, &pll4_audio, NULL);
-DEF_CLK(can1_serial_clk, CCGR0, CG8, &pll3_usb_otg, NULL);
-DEF_CLK(can1_clk, CCGR0, CG7, &pll3_usb_otg, &can1_serial_clk);
-DEF_CLK(can2_serial_clk, CCGR0, CG10, &pll3_usb_otg, NULL);
-DEF_CLK(can2_clk, CCGR0, CG9, &pll3_usb_otg, &can2_serial_clk);
-DEF_CLK(ecspi1_clk, CCGR1, CG0, &pll3_60m, NULL);
-DEF_CLK(ecspi2_clk, CCGR1, CG1, &pll3_60m, NULL);
-DEF_CLK(ecspi3_clk, CCGR1, CG2, &pll3_60m, NULL);
-DEF_CLK(ecspi4_clk, CCGR1, CG3, &pll3_60m, NULL);
-DEF_CLK(ecspi5_clk, CCGR1, CG4, &pll3_60m, NULL);
-DEF_CLK(enet_clk, CCGR1, CG5, &ipg_clk, NULL);
-DEF_CLK(esai_clk, CCGR1, CG8, &pll3_usb_otg, NULL);
-DEF_CLK(gpt_serial_clk, CCGR1, CG11, &ipg_perclk, NULL);
-DEF_CLK(gpt_clk, CCGR1, CG10, &ipg_perclk, &gpt_serial_clk);
-DEF_CLK(gpu2d_core_clk, CCGR1, CG12, &pll2_pfd_352m, &gpu2d_axi_clk);
-DEF_CLK(gpu3d_core_clk, CCGR1, CG13, &pll2_pfd_594m, &gpu3d_axi_clk);
-DEF_CLK(gpu3d_shader_clk, CCGR1, CG13, &pll3_pfd_720m, &gpu3d_axi_clk);
-DEF_CLK(hdmi_iahb_clk, CCGR2, CG0, &ahb_clk, NULL);
-DEF_CLK(hdmi_isfr_clk, CCGR2, CG2, &pll3_pfd_540m, &hdmi_iahb_clk);
-DEF_CLK(i2c1_clk, CCGR2, CG3, &ipg_perclk, NULL);
-DEF_CLK(i2c2_clk, CCGR2, CG4, &ipg_perclk, NULL);
-DEF_CLK(i2c3_clk, CCGR2, CG5, &ipg_perclk, NULL);
-DEF_CLK(iim_clk, CCGR2, CG6, &ipg_clk, NULL);
-DEF_CLK(enfc_clk, CCGR2, CG7, &pll2_pfd_352m, NULL);
-DEF_CLK(ipu1_clk, CCGR3, CG0, &mmdc_ch0_axi_clk, NULL);
-DEF_CLK(ipu1_di0_clk, CCGR3, CG1, &ipu1_di0_pre_clk, NULL);
-DEF_CLK(ipu1_di1_clk, CCGR3, CG2, &ipu1_di1_pre_clk, NULL);
-DEF_CLK(ipu2_clk, CCGR3, CG3, &mmdc_ch0_axi_clk, NULL);
-DEF_CLK(ipu2_di0_clk, CCGR3, CG4, &ipu2_di0_pre_clk, NULL);
-DEF_CLK(ipu2_di1_clk, CCGR3, CG5, &ipu2_di1_pre_clk, NULL);
-DEF_CLK(ldb_di0_clk, CCGR3, CG6, &pll3_pfd_540m, NULL);
-DEF_CLK(ldb_di1_clk, CCGR3, CG7, &pll3_pfd_540m, NULL);
-DEF_CLK(hsi_tx_clk, CCGR3, CG8, &pll2_pfd_400m, NULL);
-DEF_CLK(mlb_clk, CCGR3, CG9, &pll6_mlb, NULL);
-DEF_CLK(mmdc_ch0_ipg_clk, CCGR3, CG12, &ipg_clk, NULL);
-DEF_CLK(mmdc_ch0_axi_clk, CCGR3, CG10, &periph_clk, &mmdc_ch0_ipg_clk);
-DEF_CLK(mmdc_ch1_ipg_clk, CCGR3, CG13, &ipg_clk, NULL);
-DEF_CLK(mmdc_ch1_axi_clk, CCGR3, CG11, &periph2_clk, &mmdc_ch1_ipg_clk);
-DEF_CLK(openvg_axi_clk, CCGR3, CG13, &axi_clk, NULL);
-DEF_CLK(pwm1_clk, CCGR4, CG8, &ipg_perclk, NULL);
-DEF_CLK(pwm2_clk, CCGR4, CG9, &ipg_perclk, NULL);
-DEF_CLK(pwm3_clk, CCGR4, CG10, &ipg_perclk, NULL);
-DEF_CLK(pwm4_clk, CCGR4, CG11, &ipg_perclk, NULL);
-DEF_CLK(gpmi_bch_apb_clk, CCGR4, CG12, &usdhc3_clk, NULL);
-DEF_CLK(gpmi_bch_clk, CCGR4, CG13, &usdhc4_clk, &gpmi_bch_apb_clk);
-DEF_CLK(gpmi_apb_clk, CCGR4, CG15, &usdhc3_clk, &gpmi_bch_clk);
-DEF_CLK(gpmi_io_clk, CCGR4, CG14, &enfc_clk, &gpmi_apb_clk);
-DEF_CLK(sdma_clk, CCGR5, CG3, &ahb_clk, NULL);
-DEF_CLK(spba_clk, CCGR5, CG6, &ipg_clk, NULL);
-DEF_CLK(spdif_clk, CCGR5, CG7, &pll3_usb_otg, &spba_clk);
-DEF_CLK(ssi1_clk, CCGR5, CG9, &pll3_pfd_508m, NULL);
-DEF_CLK(ssi2_clk, CCGR5, CG10, &pll3_pfd_508m, NULL);
-DEF_CLK(ssi3_clk, CCGR5, CG11, &pll3_pfd_508m, NULL);
-DEF_CLK(uart_serial_clk, CCGR5, CG13, &pll3_usb_otg, NULL);
-DEF_CLK(uart_clk, CCGR5, CG12, &pll3_80m, &uart_serial_clk);
-DEF_CLK(usboh3_clk, CCGR6, CG0, &ipg_clk, NULL);
-DEF_CLK(usdhc1_clk, CCGR6, CG1, &pll2_pfd_400m, NULL);
-DEF_CLK(usdhc2_clk, CCGR6, CG2, &pll2_pfd_400m, NULL);
-DEF_CLK(usdhc3_clk, CCGR6, CG3, &pll2_pfd_400m, NULL);
-DEF_CLK(usdhc4_clk, CCGR6, CG4, &pll2_pfd_400m, NULL);
-DEF_CLK(emi_slow_clk, CCGR6, CG5, &axi_clk, NULL);
-DEF_CLK(vdo_axi_clk, CCGR6, CG6, &axi_clk, NULL);
-DEF_CLK(vpu_clk, CCGR6, CG7, &axi_clk, NULL);
-DEF_CLK_1B(cko1_clk, CCOSR, BP_CCOSR_CKO1_EN, &pll2_bus, NULL);
-
-static int pcie_clk_enable(struct clk *clk)
-{
- u32 val;
-
- val = readl_relaxed(PLL8_ENET);
- val |= BM_PLL_ENET_EN_PCIE;
- writel_relaxed(val, PLL8_ENET);
-
- return _clk_enable(clk);
-}
-
-static void pcie_clk_disable(struct clk *clk)
-{
- u32 val;
-
- _clk_disable(clk);
-
- val = readl_relaxed(PLL8_ENET);
- val &= BM_PLL_ENET_EN_PCIE;
- writel_relaxed(val, PLL8_ENET);
-}
-
-static struct clk pcie_clk = {
- .enable_reg = CCGR4,
- .enable_shift = CG0,
- .enable = pcie_clk_enable,
- .disable = pcie_clk_disable,
- .set_parent = _clk_set_parent,
- .parent = &axi_clk,
- .secondary = &pll8_enet,
-};
-
-static int sata_clk_enable(struct clk *clk)
-{
- u32 val;
-
- val = readl_relaxed(PLL8_ENET);
- val |= BM_PLL_ENET_EN_SATA;
- writel_relaxed(val, PLL8_ENET);
-
- return _clk_enable(clk);
-}
-
-static void sata_clk_disable(struct clk *clk)
-{
- u32 val;
-
- _clk_disable(clk);
-
- val = readl_relaxed(PLL8_ENET);
- val &= BM_PLL_ENET_EN_SATA;
- writel_relaxed(val, PLL8_ENET);
-}
-
-static struct clk sata_clk = {
- .enable_reg = CCGR5,
- .enable_shift = CG2,
- .enable = sata_clk_enable,
- .disable = sata_clk_disable,
- .parent = &ipg_clk,
- .secondary = &pll8_enet,
-};
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- }
-
-static struct clk_lookup lookups[] = {
- _REGISTER_CLOCK("2020000.uart", NULL, uart_clk),
- _REGISTER_CLOCK("21e8000.uart", NULL, uart_clk),
- _REGISTER_CLOCK("21ec000.uart", NULL, uart_clk),
- _REGISTER_CLOCK("21f0000.uart", NULL, uart_clk),
- _REGISTER_CLOCK("21f4000.uart", NULL, uart_clk),
- _REGISTER_CLOCK("2188000.enet", NULL, enet_clk),
- _REGISTER_CLOCK("2190000.usdhc", NULL, usdhc1_clk),
- _REGISTER_CLOCK("2194000.usdhc", NULL, usdhc2_clk),
- _REGISTER_CLOCK("2198000.usdhc", NULL, usdhc3_clk),
- _REGISTER_CLOCK("219c000.usdhc", NULL, usdhc4_clk),
- _REGISTER_CLOCK("21a0000.i2c", NULL, i2c1_clk),
- _REGISTER_CLOCK("21a4000.i2c", NULL, i2c2_clk),
- _REGISTER_CLOCK("21a8000.i2c", NULL, i2c3_clk),
- _REGISTER_CLOCK("2008000.ecspi", NULL, ecspi1_clk),
- _REGISTER_CLOCK("200c000.ecspi", NULL, ecspi2_clk),
- _REGISTER_CLOCK("2010000.ecspi", NULL, ecspi3_clk),
- _REGISTER_CLOCK("2014000.ecspi", NULL, ecspi4_clk),
- _REGISTER_CLOCK("2018000.ecspi", NULL, ecspi5_clk),
- _REGISTER_CLOCK("20ec000.sdma", NULL, sdma_clk),
- _REGISTER_CLOCK("20bc000.wdog", NULL, dummy_clk),
- _REGISTER_CLOCK("20c0000.wdog", NULL, dummy_clk),
- _REGISTER_CLOCK("smp_twd", NULL, twd_clk),
- _REGISTER_CLOCK(NULL, "ckih", ckih_clk),
- _REGISTER_CLOCK(NULL, "ckil_clk", ckil_clk),
- _REGISTER_CLOCK(NULL, "aips_tz1_clk", aips_tz1_clk),
- _REGISTER_CLOCK(NULL, "aips_tz2_clk", aips_tz2_clk),
- _REGISTER_CLOCK(NULL, "asrc_clk", asrc_clk),
- _REGISTER_CLOCK(NULL, "can2_clk", can2_clk),
- _REGISTER_CLOCK(NULL, "hdmi_isfr_clk", hdmi_isfr_clk),
- _REGISTER_CLOCK(NULL, "iim_clk", iim_clk),
- _REGISTER_CLOCK(NULL, "mlb_clk", mlb_clk),
- _REGISTER_CLOCK(NULL, "openvg_axi_clk", openvg_axi_clk),
- _REGISTER_CLOCK(NULL, "pwm1_clk", pwm1_clk),
- _REGISTER_CLOCK(NULL, "pwm2_clk", pwm2_clk),
- _REGISTER_CLOCK(NULL, "pwm3_clk", pwm3_clk),
- _REGISTER_CLOCK(NULL, "pwm4_clk", pwm4_clk),
- _REGISTER_CLOCK(NULL, "gpmi_io_clk", gpmi_io_clk),
- _REGISTER_CLOCK(NULL, "usboh3_clk", usboh3_clk),
- _REGISTER_CLOCK(NULL, "sata_clk", sata_clk),
- _REGISTER_CLOCK(NULL, "cko1_clk", cko1_clk),
-};
-
-int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
-{
- u32 val = readl_relaxed(CLPCR);
-
- val &= ~BM_CLPCR_LPM;
- switch (mode) {
- case WAIT_CLOCKED:
- break;
- case WAIT_UNCLOCKED:
- val |= 0x1 << BP_CLPCR_LPM;
- break;
- case STOP_POWER_ON:
- val |= 0x2 << BP_CLPCR_LPM;
- break;
- case WAIT_UNCLOCKED_POWER_OFF:
- val |= 0x1 << BP_CLPCR_LPM;
- val &= ~BM_CLPCR_VSTBY;
- val &= ~BM_CLPCR_SBYOS;
- break;
- case STOP_POWER_OFF:
- val |= 0x2 << BP_CLPCR_LPM;
- val |= 0x3 << BP_CLPCR_STBY_COUNT;
- val |= BM_CLPCR_VSTBY;
- val |= BM_CLPCR_SBYOS;
- break;
- default:
- return -EINVAL;
- }
- writel_relaxed(val, CLPCR);
-
- return 0;
-}
-
-static struct map_desc imx6q_clock_desc[] = {
- imx_map_entry(MX6Q, CCM, MT_DEVICE),
- imx_map_entry(MX6Q, ANATOP, MT_DEVICE),
-};
-
-void __init imx6q_clock_map_io(void)
-{
- iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
-}
-
-int __init mx6q_clocks_init(void)
-{
- struct device_node *np;
- void __iomem *base;
- int i, irq;
-
- /* retrieve the freqency of fixed clocks from device tree */
- for_each_compatible_node(np, NULL, "fixed-clock") {
- u32 rate;
- if (of_property_read_u32(np, "clock-frequency", &rate))
- continue;
-
- if (of_device_is_compatible(np, "fsl,imx-ckil"))
- external_low_reference = rate;
- else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
- external_high_reference = rate;
- else if (of_device_is_compatible(np, "fsl,imx-osc"))
- oscillator_reference = rate;
- }
-
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
-
- /* only keep necessary clocks on */
- writel_relaxed(0x3 << CG0 | 0x3 << CG1 | 0x3 << CG2, CCGR0);
- writel_relaxed(0x3 << CG8 | 0x3 << CG9 | 0x3 << CG10, CCGR2);
- writel_relaxed(0x3 << CG10 | 0x3 << CG12, CCGR3);
- writel_relaxed(0x3 << CG4 | 0x3 << CG6 | 0x3 << CG7, CCGR4);
- writel_relaxed(0x3 << CG0, CCGR5);
- writel_relaxed(0, CCGR6);
- writel_relaxed(0, CCGR7);
-
- clk_enable(&uart_clk);
- clk_enable(&mmdc_ch0_axi_clk);
-
- clk_set_rate(&pll4_audio, FREQ_650M);
- clk_set_rate(&pll5_video, FREQ_650M);
- clk_set_parent(&ipu1_di0_clk, &ipu1_di0_pre_clk);
- clk_set_parent(&ipu1_di0_pre_clk, &pll5_video);
- clk_set_parent(&gpu3d_shader_clk, &pll2_pfd_594m);
- clk_set_rate(&gpu3d_shader_clk, FREQ_594M);
- clk_set_parent(&gpu3d_core_clk, &mmdc_ch0_axi_clk);
- clk_set_rate(&gpu3d_core_clk, FREQ_528M);
- clk_set_parent(&asrc_serial_clk, &pll3_usb_otg);
- clk_set_rate(&asrc_serial_clk, 1500000);
- clk_set_rate(&enfc_clk, 11000000);
-
- /*
- * Before pinctrl API is available, we have to rely on the pad
- * configuration set up by bootloader. For usdhc example here,
- * u-boot sets up the pads for 49.5 MHz case, and we have to lower
- * the usdhc clock from 198 to 49.5 MHz to match the pad configuration.
- *
- * FIXME: This is should be removed after pinctrl API is available.
- * At that time, usdhc driver can call pinctrl API to change pad
- * configuration dynamically per different usdhc clock settings.
- */
- clk_set_rate(&usdhc1_clk, 49500000);
- clk_set_rate(&usdhc2_clk, 49500000);
- clk_set_rate(&usdhc3_clk, 49500000);
- clk_set_rate(&usdhc4_clk, 49500000);
-
- clk_set_parent(&cko1_clk, &ahb_clk);
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
- base = of_iomap(np, 0);
- WARN_ON(!base);
- irq = irq_of_parse_and_map(np, 0);
- mxc_timer_init(&gpt_clk, base, irq);
-
- return 0;
-}
diff --git a/arch/arm/mach-imx/clock-mx51-mx53.c b/arch/arm/mach-imx/clock-mx51-mx53.c
deleted file mode 100644
index 08470504a08..00000000000
--- a/arch/arm/mach-imx/clock-mx51-mx53.c
+++ /dev/null
@@ -1,1675 +0,0 @@
-/*
- * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright (C) 2009-2010 Amit Kucheria <amit.kucheria@canonical.com>
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
-#include <linux/of.h>
-
-#include <asm/div64.h>
-
-#include <mach/hardware.h>
-#include <mach/common.h>
-#include <mach/clock.h>
-
-#include "crm-regs-imx5.h"
-
-/* External clock values passed-in by the board code */
-static unsigned long external_high_reference, external_low_reference;
-static unsigned long oscillator_reference, ckih2_reference;
-
-static struct clk osc_clk;
-static struct clk pll1_main_clk;
-static struct clk pll1_sw_clk;
-static struct clk pll2_sw_clk;
-static struct clk pll3_sw_clk;
-static struct clk mx53_pll4_sw_clk;
-static struct clk lp_apm_clk;
-static struct clk periph_apm_clk;
-static struct clk ahb_clk;
-static struct clk ipg_clk;
-static struct clk usboh3_clk;
-static struct clk emi_fast_clk;
-static struct clk ipu_clk;
-static struct clk mipi_hsc1_clk;
-static struct clk esdhc1_clk;
-static struct clk esdhc2_clk;
-static struct clk esdhc3_mx53_clk;
-
-#define MAX_DPLL_WAIT_TRIES 1000 /* 1000 * udelay(1) = 1ms */
-
-/* calculate best pre and post dividers to get the required divider */
-static void __calc_pre_post_dividers(u32 div, u32 *pre, u32 *post,
- u32 max_pre, u32 max_post)
-{
- if (div >= max_pre * max_post) {
- *pre = max_pre;
- *post = max_post;
- } else if (div >= max_pre) {
- u32 min_pre, temp_pre, old_err, err;
- min_pre = DIV_ROUND_UP(div, max_post);
- old_err = max_pre;
- for (temp_pre = max_pre; temp_pre >= min_pre; temp_pre--) {
- err = div % temp_pre;
- if (err == 0) {
- *pre = temp_pre;
- break;
- }
- err = temp_pre - err;
- if (err < old_err) {
- old_err = err;
- *pre = temp_pre;
- }
- }
- *post = DIV_ROUND_UP(div, *pre);
- } else {
- *pre = div;
- *post = 1;
- }
-}
-
-static void _clk_ccgr_setclk(struct clk *clk, unsigned mode)
-{
- u32 reg = __raw_readl(clk->enable_reg);
-
- reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
- reg |= mode << clk->enable_shift;
-
- __raw_writel(reg, clk->enable_reg);
-}
-
-static int _clk_ccgr_enable(struct clk *clk)
-{
- _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_ON);
- return 0;
-}
-
-static void _clk_ccgr_disable(struct clk *clk)
-{
- _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_OFF);
-}
-
-static int _clk_ccgr_enable_inrun(struct clk *clk)
-{
- _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_IDLE);
- return 0;
-}
-
-static void _clk_ccgr_disable_inwait(struct clk *clk)
-{
- _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_IDLE);
-}
-
-/*
- * For the 4-to-1 muxed input clock
- */
-static inline u32 _get_mux(struct clk *parent, struct clk *m0,
- struct clk *m1, struct clk *m2, struct clk *m3)
-{
- if (parent == m0)
- return 0;
- else if (parent == m1)
- return 1;
- else if (parent == m2)
- return 2;
- else if (parent == m3)
- return 3;
- else
- BUG();
-
- return -EINVAL;
-}
-
-static inline void __iomem *_mx51_get_pll_base(struct clk *pll)
-{
- if (pll == &pll1_main_clk)
- return MX51_DPLL1_BASE;
- else if (pll == &pll2_sw_clk)
- return MX51_DPLL2_BASE;
- else if (pll == &pll3_sw_clk)
- return MX51_DPLL3_BASE;
- else
- BUG();
-
- return NULL;
-}
-
-static inline void __iomem *_mx53_get_pll_base(struct clk *pll)
-{
- if (pll == &pll1_main_clk)
- return MX53_DPLL1_BASE;
- else if (pll == &pll2_sw_clk)
- return MX53_DPLL2_BASE;
- else if (pll == &pll3_sw_clk)
- return MX53_DPLL3_BASE;
- else if (pll == &mx53_pll4_sw_clk)
- return MX53_DPLL4_BASE;
- else
- BUG();
-
- return NULL;
-}
-
-static inline void __iomem *_get_pll_base(struct clk *pll)
-{
- if (cpu_is_mx51())
- return _mx51_get_pll_base(pll);
- else
- return _mx53_get_pll_base(pll);
-}
-
-static unsigned long clk_pll_get_rate(struct clk *clk)
-{
- long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
- unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
- void __iomem *pllbase;
- s64 temp;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- pllbase = _get_pll_base(clk);
-
- dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
- pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
- dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
-
- if (pll_hfsm == 0) {
- dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
- dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
- dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
- } else {
- dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
- dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
- dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
- }
- pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
- mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
- mfi = (mfi <= 5) ? 5 : mfi;
- mfd = dp_mfd & MXC_PLL_DP_MFD_MASK;
- mfn = mfn_abs = dp_mfn & MXC_PLL_DP_MFN_MASK;
- /* Sign extend to 32-bits */
- if (mfn >= 0x04000000) {
- mfn |= 0xFC000000;
- mfn_abs = -mfn;
- }
-
- ref_clk = 2 * parent_rate;
- if (dbl != 0)
- ref_clk *= 2;
-
- ref_clk /= (pdf + 1);
- temp = (u64) ref_clk * mfn_abs;
- do_div(temp, mfd + 1);
- if (mfn < 0)
- temp = -temp;
- temp = (ref_clk * mfi) + temp;
-
- return temp;
-}
-
-static int _clk_pll_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg;
- void __iomem *pllbase;
-
- long mfi, pdf, mfn, mfd = 999999;
- s64 temp64;
- unsigned long quad_parent_rate;
- unsigned long pll_hfsm, dp_ctl;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- pllbase = _get_pll_base(clk);
-
- quad_parent_rate = 4 * parent_rate;
- pdf = mfi = -1;
- while (++pdf < 16 && mfi < 5)
- mfi = rate * (pdf+1) / quad_parent_rate;
- if (mfi > 15)
- return -EINVAL;
- pdf--;
-
- temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
- do_div(temp64, quad_parent_rate/1000000);
- mfn = (long)temp64;
-
- dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
- /* use dpdck0_2 */
- __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
- pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
- if (pll_hfsm == 0) {
- reg = mfi << 4 | pdf;
- __raw_writel(reg, pllbase + MXC_PLL_DP_OP);
- __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
- __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
- } else {
- reg = mfi << 4 | pdf;
- __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
- __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
- __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
- }
-
- return 0;
-}
-
-static int _clk_pll_enable(struct clk *clk)
-{
- u32 reg;
- void __iomem *pllbase;
- int i = 0;
-
- pllbase = _get_pll_base(clk);
- reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
- if (reg & MXC_PLL_DP_CTL_UPEN)
- return 0;
-
- reg |= MXC_PLL_DP_CTL_UPEN;
- __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
-
- /* Wait for lock */
- do {
- reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
- if (reg & MXC_PLL_DP_CTL_LRF)
- break;
-
- udelay(1);
- } while (++i < MAX_DPLL_WAIT_TRIES);
-
- if (i == MAX_DPLL_WAIT_TRIES) {
- pr_err("MX5: pll locking failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void _clk_pll_disable(struct clk *clk)
-{
- u32 reg;
- void __iomem *pllbase;
-
- pllbase = _get_pll_base(clk);
- reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) & ~MXC_PLL_DP_CTL_UPEN;
- __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
-}
-
-static int _clk_pll1_sw_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg, step;
-
- reg = __raw_readl(MXC_CCM_CCSR);
-
- /* When switching from pll_main_clk to a bypass clock, first select a
- * multiplexed clock in 'step_sel', then shift the glitchless mux
- * 'pll1_sw_clk_sel'.
- *
- * When switching back, do it in reverse order
- */
- if (parent == &pll1_main_clk) {
- /* Switch to pll1_main_clk */
- reg &= ~MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
- __raw_writel(reg, MXC_CCM_CCSR);
- /* step_clk mux switched to lp_apm, to save power. */
- reg = __raw_readl(MXC_CCM_CCSR);
- reg &= ~MXC_CCM_CCSR_STEP_SEL_MASK;
- reg |= (MXC_CCM_CCSR_STEP_SEL_LP_APM <<
- MXC_CCM_CCSR_STEP_SEL_OFFSET);
- } else {
- if (parent == &lp_apm_clk) {
- step = MXC_CCM_CCSR_STEP_SEL_LP_APM;
- } else if (parent == &pll2_sw_clk) {
- step = MXC_CCM_CCSR_STEP_SEL_PLL2_DIVIDED;
- } else if (parent == &pll3_sw_clk) {
- step = MXC_CCM_CCSR_STEP_SEL_PLL3_DIVIDED;
- } else
- return -EINVAL;
-
- reg &= ~MXC_CCM_CCSR_STEP_SEL_MASK;
- reg |= (step << MXC_CCM_CCSR_STEP_SEL_OFFSET);
-
- __raw_writel(reg, MXC_CCM_CCSR);
- /* Switch to step_clk */
- reg = __raw_readl(MXC_CCM_CCSR);
- reg |= MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
- }
- __raw_writel(reg, MXC_CCM_CCSR);
- return 0;
-}
-
-static unsigned long clk_pll1_sw_get_rate(struct clk *clk)
-{
- u32 reg, div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- reg = __raw_readl(MXC_CCM_CCSR);
-
- if (clk->parent == &pll2_sw_clk) {
- div = ((reg & MXC_CCM_CCSR_PLL2_PODF_MASK) >>
- MXC_CCM_CCSR_PLL2_PODF_OFFSET) + 1;
- } else if (clk->parent == &pll3_sw_clk) {
- div = ((reg & MXC_CCM_CCSR_PLL3_PODF_MASK) >>
- MXC_CCM_CCSR_PLL3_PODF_OFFSET) + 1;
- } else
- div = 1;
- return parent_rate / div;
-}
-
-static int _clk_pll2_sw_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CCSR);
-
- if (parent == &pll2_sw_clk)
- reg &= ~MXC_CCM_CCSR_PLL2_SW_CLK_SEL;
- else
- reg |= MXC_CCM_CCSR_PLL2_SW_CLK_SEL;
-
- __raw_writel(reg, MXC_CCM_CCSR);
- return 0;
-}
-
-static int _clk_lp_apm_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- if (parent == &osc_clk)
- reg = __raw_readl(MXC_CCM_CCSR) & ~MXC_CCM_CCSR_LP_APM_SEL;
- else
- return -EINVAL;
-
- __raw_writel(reg, MXC_CCM_CCSR);
-
- return 0;
-}
-
-static unsigned long clk_cpu_get_rate(struct clk *clk)
-{
- u32 cacrr, div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
- cacrr = __raw_readl(MXC_CCM_CACRR);
- div = (cacrr & MXC_CCM_CACRR_ARM_PODF_MASK) + 1;
-
- return parent_rate / div;
-}
-
-static int clk_cpu_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg, cpu_podf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
- cpu_podf = parent_rate / rate - 1;
- /* use post divider to change freq */
- reg = __raw_readl(MXC_CCM_CACRR);
- reg &= ~MXC_CCM_CACRR_ARM_PODF_MASK;
- reg |= cpu_podf << MXC_CCM_CACRR_ARM_PODF_OFFSET;
- __raw_writel(reg, MXC_CCM_CACRR);
-
- return 0;
-}
-
-static int _clk_periph_apm_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg, mux;
- int i = 0;
-
- mux = _get_mux(parent, &pll1_sw_clk, &pll3_sw_clk, &lp_apm_clk, NULL);
-
- reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_PERIPH_CLK_SEL_MASK;
- reg |= mux << MXC_CCM_CBCMR_PERIPH_CLK_SEL_OFFSET;
- __raw_writel(reg, MXC_CCM_CBCMR);
-
- /* Wait for lock */
- do {
- reg = __raw_readl(MXC_CCM_CDHIPR);
- if (!(reg & MXC_CCM_CDHIPR_PERIPH_CLK_SEL_BUSY))
- break;
-
- udelay(1);
- } while (++i < MAX_DPLL_WAIT_TRIES);
-
- if (i == MAX_DPLL_WAIT_TRIES) {
- pr_err("MX5: Set parent for periph_apm clock failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int _clk_main_bus_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CBCDR);
-
- if (parent == &pll2_sw_clk)
- reg &= ~MXC_CCM_CBCDR_PERIPH_CLK_SEL;
- else if (parent == &periph_apm_clk)
- reg |= MXC_CCM_CBCDR_PERIPH_CLK_SEL;
- else
- return -EINVAL;
-
- __raw_writel(reg, MXC_CCM_CBCDR);
-
- return 0;
-}
-
-static struct clk main_bus_clk = {
- .parent = &pll2_sw_clk,
- .set_parent = _clk_main_bus_set_parent,
-};
-
-static unsigned long clk_ahb_get_rate(struct clk *clk)
-{
- u32 reg, div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- reg = __raw_readl(MXC_CCM_CBCDR);
- div = ((reg & MXC_CCM_CBCDR_AHB_PODF_MASK) >>
- MXC_CCM_CBCDR_AHB_PODF_OFFSET) + 1;
- return parent_rate / div;
-}
-
-
-static int _clk_ahb_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg, div;
- unsigned long parent_rate;
- int i = 0;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
- if (div > 8 || div < 1 || ((parent_rate / div) != rate))
- return -EINVAL;
-
- reg = __raw_readl(MXC_CCM_CBCDR);
- reg &= ~MXC_CCM_CBCDR_AHB_PODF_MASK;
- reg |= (div - 1) << MXC_CCM_CBCDR_AHB_PODF_OFFSET;
- __raw_writel(reg, MXC_CCM_CBCDR);
-
- /* Wait for lock */
- do {
- reg = __raw_readl(MXC_CCM_CDHIPR);
- if (!(reg & MXC_CCM_CDHIPR_AHB_PODF_BUSY))
- break;
-
- udelay(1);
- } while (++i < MAX_DPLL_WAIT_TRIES);
-
- if (i == MAX_DPLL_WAIT_TRIES) {
- pr_err("MX5: clk_ahb_set_rate failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static unsigned long _clk_ahb_round_rate(struct clk *clk,
- unsigned long rate)
-{
- u32 div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- div = parent_rate / rate;
- if (div > 8)
- div = 8;
- else if (div == 0)
- div++;
- return parent_rate / div;
-}
-
-
-static int _clk_max_enable(struct clk *clk)
-{
- u32 reg;
-
- _clk_ccgr_enable(clk);
-
- /* Handshake with MAX when LPM is entered. */
- reg = __raw_readl(MXC_CCM_CLPCR);
- if (cpu_is_mx51())
- reg &= ~MX51_CCM_CLPCR_BYPASS_MAX_LPM_HS;
- else if (cpu_is_mx53())
- reg &= ~MX53_CCM_CLPCR_BYPASS_MAX_LPM_HS;
- __raw_writel(reg, MXC_CCM_CLPCR);
-
- return 0;
-}
-
-static void _clk_max_disable(struct clk *clk)
-{
- u32 reg;
-
- _clk_ccgr_disable_inwait(clk);
-
- /* No Handshake with MAX when LPM is entered as its disabled. */
- reg = __raw_readl(MXC_CCM_CLPCR);
- if (cpu_is_mx51())
- reg |= MX51_CCM_CLPCR_BYPASS_MAX_LPM_HS;
- else if (cpu_is_mx53())
- reg &= ~MX53_CCM_CLPCR_BYPASS_MAX_LPM_HS;
- __raw_writel(reg, MXC_CCM_CLPCR);
-}
-
-static unsigned long clk_ipg_get_rate(struct clk *clk)
-{
- u32 reg, div;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- reg = __raw_readl(MXC_CCM_CBCDR);
- div = ((reg & MXC_CCM_CBCDR_IPG_PODF_MASK) >>
- MXC_CCM_CBCDR_IPG_PODF_OFFSET) + 1;
-
- return parent_rate / div;
-}
-
-static unsigned long clk_ipg_per_get_rate(struct clk *clk)
-{
- u32 reg, prediv1, prediv2, podf;
- unsigned long parent_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (clk->parent == &main_bus_clk || clk->parent == &lp_apm_clk) {
- /* the main_bus_clk is the one before the DVFS engine */
- reg = __raw_readl(MXC_CCM_CBCDR);
- prediv1 = ((reg & MXC_CCM_CBCDR_PERCLK_PRED1_MASK) >>
- MXC_CCM_CBCDR_PERCLK_PRED1_OFFSET) + 1;
- prediv2 = ((reg & MXC_CCM_CBCDR_PERCLK_PRED2_MASK) >>
- MXC_CCM_CBCDR_PERCLK_PRED2_OFFSET) + 1;
- podf = ((reg & MXC_CCM_CBCDR_PERCLK_PODF_MASK) >>
- MXC_CCM_CBCDR_PERCLK_PODF_OFFSET) + 1;
- return parent_rate / (prediv1 * prediv2 * podf);
- } else if (clk->parent == &ipg_clk)
- return parent_rate;
- else
- BUG();
-}
-
-static int _clk_ipg_per_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CBCMR);
-
- reg &= ~MXC_CCM_CBCMR_PERCLK_LP_APM_CLK_SEL;
- reg &= ~MXC_CCM_CBCMR_PERCLK_IPG_CLK_SEL;
-
- if (parent == &ipg_clk)
- reg |= MXC_CCM_CBCMR_PERCLK_IPG_CLK_SEL;
- else if (parent == &lp_apm_clk)
- reg |= MXC_CCM_CBCMR_PERCLK_LP_APM_CLK_SEL;
- else if (parent != &main_bus_clk)
- return -EINVAL;
-
- __raw_writel(reg, MXC_CCM_CBCMR);
-
- return 0;
-}
-
-#define clk_nfc_set_parent NULL
-
-static unsigned long clk_nfc_get_rate(struct clk *clk)
-{
- unsigned long rate;
- u32 reg, div;
-
- reg = __raw_readl(MXC_CCM_CBCDR);
- div = ((reg & MXC_CCM_CBCDR_NFC_PODF_MASK) >>
- MXC_CCM_CBCDR_NFC_PODF_OFFSET) + 1;
- rate = clk_get_rate(clk->parent) / div;
- WARN_ON(rate == 0);
- return rate;
-}
-
-static unsigned long clk_nfc_round_rate(struct clk *clk,
- unsigned long rate)
-{
- u32 div;
- unsigned long parent_rate = clk_get_rate(clk->parent);
-
- if (!rate)
- return -EINVAL;
-
- div = parent_rate / rate;
-
- if (parent_rate % rate)
- div++;
-
- if (div > 8)
- return -EINVAL;
-
- return parent_rate / div;
-
-}
-
-static int clk_nfc_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg, div;
-
- div = clk_get_rate(clk->parent) / rate;
- if (div == 0)
- div++;
- if (((clk_get_rate(clk->parent) / div) != rate) || (div > 8))
- return -EINVAL;
-
- reg = __raw_readl(MXC_CCM_CBCDR);
- reg &= ~MXC_CCM_CBCDR_NFC_PODF_MASK;
- reg |= (div - 1) << MXC_CCM_CBCDR_NFC_PODF_OFFSET;
- __raw_writel(reg, MXC_CCM_CBCDR);
-
- while (__raw_readl(MXC_CCM_CDHIPR) &
- MXC_CCM_CDHIPR_NFC_IPG_INT_MEM_PODF_BUSY){
- }
-
- return 0;
-}
-
-static unsigned long get_high_reference_clock_rate(struct clk *clk)
-{
- return external_high_reference;
-}
-
-static unsigned long get_low_reference_clock_rate(struct clk *clk)
-{
- return external_low_reference;
-}
-
-static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
-{
- return oscillator_reference;
-}
-
-static unsigned long get_ckih2_reference_clock_rate(struct clk *clk)
-{
- return ckih2_reference;
-}
-
-static unsigned long clk_emi_slow_get_rate(struct clk *clk)
-{
- u32 reg, div;
-
- reg = __raw_readl(MXC_CCM_CBCDR);
- div = ((reg & MXC_CCM_CBCDR_EMI_PODF_MASK) >>
- MXC_CCM_CBCDR_EMI_PODF_OFFSET) + 1;
-
- return clk_get_rate(clk->parent) / div;
-}
-
-static unsigned long _clk_ddr_hf_get_rate(struct clk *clk)
-{
- unsigned long rate;
- u32 reg, div;
-
- reg = __raw_readl(MXC_CCM_CBCDR);
- div = ((reg & MXC_CCM_CBCDR_DDR_PODF_MASK) >>
- MXC_CCM_CBCDR_DDR_PODF_OFFSET) + 1;
- rate = clk_get_rate(clk->parent) / div;
-
- return rate;
-}
-
-/* External high frequency clock */
-static struct clk ckih_clk = {
- .get_rate = get_high_reference_clock_rate,
-};
-
-static struct clk ckih2_clk = {
- .get_rate = get_ckih2_reference_clock_rate,
-};
-
-static struct clk osc_clk = {
- .get_rate = get_oscillator_reference_clock_rate,
-};
-
-/* External low frequency (32kHz) clock */
-static struct clk ckil_clk = {
- .get_rate = get_low_reference_clock_rate,
-};
-
-static struct clk pll1_main_clk = {
- .parent = &osc_clk,
- .get_rate = clk_pll_get_rate,
- .enable = _clk_pll_enable,
- .disable = _clk_pll_disable,
-};
-
-/* Clock tree block diagram (WIP):
- * CCM: Clock Controller Module
- *
- * PLL output -> |
- * | CCM Switcher -> CCM_CLK_ROOT_GEN ->
- * PLL bypass -> |
- *
- */
-
-/* PLL1 SW supplies to ARM core */
-static struct clk pll1_sw_clk = {
- .parent = &pll1_main_clk,
- .set_parent = _clk_pll1_sw_set_parent,
- .get_rate = clk_pll1_sw_get_rate,
-};
-
-/* PLL2 SW supplies to AXI/AHB/IP buses */
-static struct clk pll2_sw_clk = {
- .parent = &osc_clk,
- .get_rate = clk_pll_get_rate,
- .set_rate = _clk_pll_set_rate,
- .set_parent = _clk_pll2_sw_set_parent,
- .enable = _clk_pll_enable,
- .disable = _clk_pll_disable,
-};
-
-/* PLL3 SW supplies to serial clocks like USB, SSI, etc. */
-static struct clk pll3_sw_clk = {
- .parent = &osc_clk,
- .set_rate = _clk_pll_set_rate,
- .get_rate = clk_pll_get_rate,
- .enable = _clk_pll_enable,
- .disable = _clk_pll_disable,
-};
-
-/* PLL4 SW supplies to LVDS Display Bridge(LDB) */
-static struct clk mx53_pll4_sw_clk = {
- .parent = &osc_clk,
- .set_rate = _clk_pll_set_rate,
- .enable = _clk_pll_enable,
- .disable = _clk_pll_disable,
-};
-
-/* Low-power Audio Playback Mode clock */
-static struct clk lp_apm_clk = {
- .parent = &osc_clk,
- .set_parent = _clk_lp_apm_set_parent,
-};
-
-static struct clk periph_apm_clk = {
- .parent = &pll1_sw_clk,
- .set_parent = _clk_periph_apm_set_parent,
-};
-
-static struct clk cpu_clk = {
- .parent = &pll1_sw_clk,
- .get_rate = clk_cpu_get_rate,
- .set_rate = clk_cpu_set_rate,
-};
-
-static struct clk ahb_clk = {
- .parent = &main_bus_clk,
- .get_rate = clk_ahb_get_rate,
- .set_rate = _clk_ahb_set_rate,
- .round_rate = _clk_ahb_round_rate,
-};
-
-static struct clk iim_clk = {
- .parent = &ipg_clk,
- .enable_reg = MXC_CCM_CCGR0,
- .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
-};
-
-/* Main IP interface clock for access to registers */
-static struct clk ipg_clk = {
- .parent = &ahb_clk,
- .get_rate = clk_ipg_get_rate,
-};
-
-static struct clk ipg_perclk = {
- .parent = &lp_apm_clk,
- .get_rate = clk_ipg_per_get_rate,
- .set_parent = _clk_ipg_per_set_parent,
-};
-
-static struct clk ahb_max_clk = {
- .parent = &ahb_clk,
- .enable_reg = MXC_CCM_CCGR0,
- .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
- .enable = _clk_max_enable,
- .disable = _clk_max_disable,
-};
-
-static struct clk aips_tz1_clk = {
- .parent = &ahb_clk,
- .secondary = &ahb_max_clk,
- .enable_reg = MXC_CCM_CCGR0,
- .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
- .enable = _clk_ccgr_enable,
- .disable = _clk_ccgr_disable_inwait,
-};
-
-static struct clk aips_tz2_clk = {
- .parent = &ahb_clk,
- .secondary = &ahb_max_clk,
- .enable_reg = MXC_CCM_CCGR0,
- .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
- .enable = _clk_ccgr_enable,
- .disable = _clk_ccgr_disable_inwait,
-};
-
-static struct clk gpc_dvfs_clk = {
- .enable_reg = MXC_CCM_CCGR5,
- .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
- .enable = _clk_ccgr_enable,
- .disable = _clk_ccgr_disable,
-};
-
-static struct clk gpt_32k_clk = {
- .id = 0,
- .parent = &ckil_clk,
-};
-
-static struct clk dummy_clk = {
- .id = 0,
-};
-
-static struct clk emi_slow_clk = {
- .parent = &pll2_sw_clk,
- .enable_reg = MXC_CCM_CCGR5,
- .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
- .enable = _clk_ccgr_enable,
- .disable = _clk_ccgr_disable_inwait,
- .get_rate = clk_emi_slow_get_rate,
-};
-
-static int clk_ipu_enable(struct clk *clk)
-{
- u32 reg;
-
- _clk_ccgr_enable(clk);
-
- /* Enable handshake with IPU when certain clock rates are changed */
- reg = __raw_readl(MXC_CCM_CCDR);
- reg &= ~MXC_CCM_CCDR_IPU_HS_MASK;
- __raw_writel(reg, MXC_CCM_CCDR);
-
- /* Enable handshake with IPU when LPM is entered */
- reg = __raw_readl(MXC_CCM_CLPCR);
- reg &= ~MXC_CCM_CLPCR_BYPASS_IPU_LPM_HS;
- __raw_writel(reg, MXC_CCM_CLPCR);
-
- return 0;
-}
-
-static void clk_ipu_disable(struct clk *clk)
-{
- u32 reg;
-
- _clk_ccgr_disable(clk);
-
- /* Disable handshake with IPU whe dividers are changed */
- reg = __raw_readl(MXC_CCM_CCDR);
- reg |= MXC_CCM_CCDR_IPU_HS_MASK;
- __raw_writel(reg, MXC_CCM_CCDR);
-
- /* Disable handshake with IPU when LPM is entered */
- reg = __raw_readl(MXC_CCM_CLPCR);
- reg |= MXC_CCM_CLPCR_BYPASS_IPU_LPM_HS;
- __raw_writel(reg, MXC_CCM_CLPCR);
-}
-
-static struct clk ahbmux1_clk = {
- .parent = &ahb_clk,
- .secondary = &ahb_max_clk,
- .enable_reg = MXC_CCM_CCGR0,
- .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
- .enable = _clk_ccgr_enable,
- .disable = _clk_ccgr_disable_inwait,
-};
-
-static struct clk ipu_sec_clk = {
- .parent = &emi_fast_clk,
- .secondary = &ahbmux1_clk,
-};
-
-static struct clk ddr_hf_clk = {
- .parent = &pll1_sw_clk,
- .get_rate = _clk_ddr_hf_get_rate,
-};
-
-static struct clk ddr_clk = {
- .parent = &ddr_hf_clk,
-};
-
-/* clock definitions for MIPI HSC unit which has been removed
- * from documentation, but not from hardware
- */
-static int _clk_hsc_enable(struct clk *clk)
-{
- u32 reg;
-
- _clk_ccgr_enable(clk);
- /* Handshake with IPU when certain clock rates are changed. */
- reg = __raw_readl(MXC_CCM_CCDR);
- reg &= ~MXC_CCM_CCDR_HSC_HS_MASK;
- __raw_writel(reg, MXC_CCM_CCDR);
-
- reg = __raw_readl(MXC_CCM_CLPCR);
- reg &= ~MXC_CCM_CLPCR_BYPASS_HSC_LPM_HS;
- __raw_writel(reg, MXC_CCM_CLPCR);
-
- return 0;
-}
-
-static void _clk_hsc_disable(struct clk *clk)
-{
- u32 reg;
-
- _clk_ccgr_disable(clk);
- /* No handshake with HSC as its not enabled. */
- reg = __raw_readl(MXC_CCM_CCDR);
- reg |= MXC_CCM_CCDR_HSC_HS_MASK;
- __raw_writel(reg, MXC_CCM_CCDR);
-
- reg = __raw_readl(MXC_CCM_CLPCR);
- reg |= MXC_CCM_CLPCR_BYPASS_HSC_LPM_HS;
- __raw_writel(reg, MXC_CCM_CLPCR);
-}
-
-static struct clk mipi_hsp_clk = {
- .parent = &ipu_clk,
- .enable_reg = MXC_CCM_CCGR4,
- .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
- .enable = _clk_hsc_enable,
- .disable = _clk_hsc_disable,
- .secondary = &mipi_hsc1_clk,
-};
-
-#define DEFINE_CLOCK_CCGR(name, i, er, es, pfx, p, s) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = pfx##_get_rate, \
- .set_rate = pfx##_set_rate, \
- .round_rate = pfx##_round_rate, \
- .set_parent = pfx##_set_parent, \
- .enable = _clk_ccgr_enable, \
- .disable = _clk_ccgr_disable, \
- .parent = p, \
- .secondary = s, \
- }
-
-#define DEFINE_CLOCK_MAX(name, i, er, es, pfx, p, s) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = pfx##_get_rate, \
- .set_rate = pfx##_set_rate, \
- .set_parent = pfx##_set_parent, \
- .enable = _clk_max_enable, \
- .disable = _clk_max_disable, \
- .parent = p, \
- .secondary = s, \
- }
-
-#define CLK_GET_RATE(name, nr, bitsname) \
-static unsigned long clk_##name##_get_rate(struct clk *clk) \
-{ \
- u32 reg, pred, podf; \
- \
- reg = __raw_readl(MXC_CCM_CSCDR##nr); \
- pred = (reg & MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK) \
- >> MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET; \
- podf = (reg & MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK) \
- >> MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET; \
- \
- return DIV_ROUND_CLOSEST(clk_get_rate(clk->parent), \
- (pred + 1) * (podf + 1)); \
-}
-
-#define CLK_SET_PARENT(name, nr, bitsname) \
-static int clk_##name##_set_parent(struct clk *clk, struct clk *parent) \
-{ \
- u32 reg, mux; \
- \
- mux = _get_mux(parent, &pll1_sw_clk, &pll2_sw_clk, \
- &pll3_sw_clk, &lp_apm_clk); \
- reg = __raw_readl(MXC_CCM_CSCMR##nr) & \
- ~MXC_CCM_CSCMR##nr##_##bitsname##_CLK_SEL_MASK; \
- reg |= mux << MXC_CCM_CSCMR##nr##_##bitsname##_CLK_SEL_OFFSET; \
- __raw_writel(reg, MXC_CCM_CSCMR##nr); \
- \
- return 0; \
-}
-
-#define CLK_SET_RATE(name, nr, bitsname) \
-static int clk_##name##_set_rate(struct clk *clk, unsigned long rate) \
-{ \
- u32 reg, div, parent_rate; \
- u32 pre = 0, post = 0; \
- \
- parent_rate = clk_get_rate(clk->parent); \
- div = parent_rate / rate; \
- \
- if ((parent_rate / div) != rate) \
- return -EINVAL; \
- \
- __calc_pre_post_dividers(div, &pre, &post, \
- (MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK >> \
- MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET) + 1, \
- (MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK >> \
- MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET) + 1);\
- \
- /* Set sdhc1 clock divider */ \
- reg = __raw_readl(MXC_CCM_CSCDR##nr) & \
- ~(MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK \
- | MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK); \
- reg |= (post - 1) << \
- MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET; \
- reg |= (pre - 1) << \
- MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET; \
- __raw_writel(reg, MXC_CCM_CSCDR##nr); \
- \
- return 0; \
-}
-
-/* UART */
-CLK_GET_RATE(uart, 1, UART)
-CLK_SET_PARENT(uart, 1, UART)
-
-static struct clk uart_root_clk = {
- .parent = &pll2_sw_clk,
- .get_rate = clk_uart_get_rate,
- .set_parent = clk_uart_set_parent,
-};
-
-/* USBOH3 */
-CLK_GET_RATE(usboh3, 1, USBOH3)
-CLK_SET_PARENT(usboh3, 1, USBOH3)
-
-static struct clk usboh3_clk = {
- .parent = &pll2_sw_clk,
- .get_rate = clk_usboh3_get_rate,
- .set_parent = clk_usboh3_set_parent,
- .enable = _clk_ccgr_enable,
- .disable = _clk_ccgr_disable,
- .enable_reg = MXC_CCM_CCGR2,
- .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
-};
-
-static struct clk usb_ahb_clk = {
- .parent = &ipg_clk,
- .enable = _clk_ccgr_enable,
- .disable = _clk_ccgr_disable,
- .enable_reg = MXC_CCM_CCGR2,
- .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
-};
-
-static int clk_usb_phy1_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USB_PHY_CLK_SEL;
-
- if (parent == &pll3_sw_clk)
- reg |= 1 << MXC_CCM_CSCMR1_USB_PHY_CLK_SEL_OFFSET;
-
- __raw_writel(reg, MXC_CCM_CSCMR1);
-
- return 0;
-}
-
-static struct clk usb_phy1_clk = {
- .parent = &pll3_sw_clk,
- .set_parent = clk_usb_phy1_set_parent,
- .enable = _clk_ccgr_enable,
- .enable_reg = MXC_CCM_CCGR2,
- .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
- .disable = _clk_ccgr_disable,
-};
-
-/* eCSPI */
-CLK_GET_RATE(ecspi, 2, CSPI)
-CLK_SET_PARENT(ecspi, 1, CSPI)
-
-static struct clk ecspi_main_clk = {
- .parent = &pll3_sw_clk,
- .get_rate = clk_ecspi_get_rate,
- .set_parent = clk_ecspi_set_parent,
-};
-
-/* eSDHC */
-CLK_GET_RATE(esdhc1, 1, ESDHC1_MSHC1)
-CLK_SET_PARENT(esdhc1, 1, ESDHC1_MSHC1)
-CLK_SET_RATE(esdhc1, 1, ESDHC1_MSHC1)
-
-/* mx51 specific */
-CLK_GET_RATE(esdhc2, 1, ESDHC2_MSHC2)
-CLK_SET_PARENT(esdhc2, 1, ESDHC2_MSHC2)
-CLK_SET_RATE(esdhc2, 1, ESDHC2_MSHC2)
-
-static int clk_esdhc3_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CSCMR1);
- if (parent == &esdhc1_clk)
- reg &= ~MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
- else if (parent == &esdhc2_clk)
- reg |= MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
- else
- return -EINVAL;
- __raw_writel(reg, MXC_CCM_CSCMR1);
-
- return 0;
-}
-
-static int clk_esdhc4_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CSCMR1);
- if (parent == &esdhc1_clk)
- reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
- else if (parent == &esdhc2_clk)
- reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
- else
- return -EINVAL;
- __raw_writel(reg, MXC_CCM_CSCMR1);
-
- return 0;
-}
-
-/* mx53 specific */
-static int clk_esdhc2_mx53_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CSCMR1);
- if (parent == &esdhc1_clk)
- reg &= ~MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
- else if (parent == &esdhc3_mx53_clk)
- reg |= MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
- else
- return -EINVAL;
- __raw_writel(reg, MXC_CCM_CSCMR1);
-
- return 0;
-}
-
-CLK_GET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
-CLK_SET_PARENT(esdhc3_mx53, 1, ESDHC3_MX53)
-CLK_SET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
-
-static int clk_esdhc4_mx53_set_parent(struct clk *clk, struct clk *parent)
-{
- u32 reg;
-
- reg = __raw_readl(MXC_CCM_CSCMR1);
- if (parent == &esdhc1_clk)
- reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
- else if (parent == &esdhc3_mx53_clk)
- reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
- else
- return -EINVAL;
- __raw_writel(reg, MXC_CCM_CSCMR1);
-
- return 0;
-}
-
-#define DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, e, d, p, s) \
- static struct clk name = { \
- .id = i, \
- .enable_reg = er, \
- .enable_shift = es, \
- .get_rate = gr, \
- .set_rate = sr, \
- .enable = e, \
- .disable = d, \
- .parent = p, \
- .secondary = s, \
- }
-
-#define DEFINE_CLOCK(name, i, er, es, gr, sr, p, s) \
- DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, _clk_ccgr_enable, _clk_ccgr_disable, p, s)
-
-/* Shared peripheral bus arbiter */
-DEFINE_CLOCK(spba_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG0_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
-
-/* UART */
-DEFINE_CLOCK(uart1_ipg_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG3_OFFSET,
- NULL, NULL, &ipg_clk, &aips_tz1_clk);
-DEFINE_CLOCK(uart2_ipg_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG5_OFFSET,
- NULL, NULL, &ipg_clk, &aips_tz1_clk);
-DEFINE_CLOCK(uart3_ipg_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG7_OFFSET,
- NULL, NULL, &ipg_clk, &spba_clk);
-DEFINE_CLOCK(uart4_ipg_clk, 3, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG4_OFFSET,
- NULL, NULL, &ipg_clk, &spba_clk);
-DEFINE_CLOCK(uart5_ipg_clk, 4, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG6_OFFSET,
- NULL, NULL, &ipg_clk, &spba_clk);
-DEFINE_CLOCK(uart1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG4_OFFSET,
- NULL, NULL, &uart_root_clk, &uart1_ipg_clk);
-DEFINE_CLOCK(uart2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG6_OFFSET,
- NULL, NULL, &uart_root_clk, &uart2_ipg_clk);
-DEFINE_CLOCK(uart3_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG8_OFFSET,
- NULL, NULL, &uart_root_clk, &uart3_ipg_clk);
-DEFINE_CLOCK(uart4_clk, 3, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG5_OFFSET,
- NULL, NULL, &uart_root_clk, &uart4_ipg_clk);
-DEFINE_CLOCK(uart5_clk, 4, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG7_OFFSET,
- NULL, NULL, &uart_root_clk, &uart5_ipg_clk);
-
-/* GPT */
-DEFINE_CLOCK(gpt_ipg_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG10_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET,
- NULL, NULL, &ipg_clk, &gpt_ipg_clk);
-
-DEFINE_CLOCK(pwm1_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG6_OFFSET,
- NULL, NULL, &ipg_perclk, NULL);
-DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET,
- NULL, NULL, &ipg_perclk, NULL);
-
-/* I2C */
-DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET,
- NULL, NULL, &ipg_perclk, NULL);
-DEFINE_CLOCK(i2c2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG10_OFFSET,
- NULL, NULL, &ipg_perclk, NULL);
-DEFINE_CLOCK(hsi2c_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(i2c3_mx53_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
- NULL, NULL, &ipg_perclk, NULL);
-
-/* FEC */
-DEFINE_CLOCK(fec_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG12_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
-
-/* NFC */
-DEFINE_CLOCK_CCGR(nfc_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG10_OFFSET,
- clk_nfc, &emi_slow_clk, NULL);
-
-/* SSI */
-DEFINE_CLOCK(ssi1_ipg_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG8_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(ssi1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG9_OFFSET,
- NULL, NULL, &pll3_sw_clk, &ssi1_ipg_clk);
-DEFINE_CLOCK(ssi2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG10_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(ssi2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG11_OFFSET,
- NULL, NULL, &pll3_sw_clk, &ssi2_ipg_clk);
-DEFINE_CLOCK(ssi3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG12_OFFSET,
- NULL, NULL, &ipg_clk, NULL);
-DEFINE_CLOCK(ssi3_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG13_OFFSET,
- NULL, NULL, &pll3_sw_clk, &ssi3_ipg_clk);
-
-/* eCSPI */
-DEFINE_CLOCK_FULL(ecspi1_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG9_OFFSET,
- NULL, NULL, _clk_ccgr_enable_inrun, _clk_ccgr_disable,
- &ipg_clk, &spba_clk);
-DEFINE_CLOCK(ecspi1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG10_OFFSET,
- NULL, NULL, &ecspi_main_clk, &ecspi1_ipg_clk);
-DEFINE_CLOCK_FULL(ecspi2_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG11_OFFSET,
- NULL, NULL, _clk_ccgr_enable_inrun, _clk_ccgr_disable,
- &ipg_clk, &aips_tz2_clk);
-DEFINE_CLOCK(ecspi2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG12_OFFSET,
- NULL, NULL, &ecspi_main_clk, &ecspi2_ipg_clk);
-
-/* CSPI */
-DEFINE_CLOCK(cspi_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG9_OFFSET,
- NULL, NULL, &ipg_clk, &aips_tz2_clk);
-DEFINE_CLOCK(cspi_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG13_OFFSET,
- NULL, NULL, &ipg_clk, &cspi_ipg_clk);
-
-/* SDMA */
-DEFINE_CLOCK(sdma_clk, 1, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG15_OFFSET,
- NULL, NULL, &ahb_clk, NULL);
-
-/* eSDHC */
-DEFINE_CLOCK_FULL(esdhc1_ipg_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG0_OFFSET,
- NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-DEFINE_CLOCK_MAX(esdhc1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG1_OFFSET,
- clk_esdhc1, &pll2_sw_clk, &esdhc1_ipg_clk);
-DEFINE_CLOCK_FULL(esdhc2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG2_OFFSET,
- NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-DEFINE_CLOCK_FULL(esdhc3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG4_OFFSET,
- NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-DEFINE_CLOCK_FULL(esdhc4_ipg_clk, 3, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG6_OFFSET,
- NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
-
-/* mx51 specific */
-DEFINE_CLOCK_MAX(esdhc2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG3_OFFSET,
- clk_esdhc2, &pll2_sw_clk, &esdhc2_ipg_clk);
-
-static struct clk esdhc3_clk = {
- .id = 2,
- .parent = &esdhc1_clk,
- .set_parent = clk_esdhc3_set_parent,
- .enable_reg = MXC_CCM_CCGR3,
- .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
- .enable = _clk_max_enable,
- .disable = _clk_max_disable,
- .secondary = &esdhc3_ipg_clk,
-};
-static struct clk esdhc4_clk = {
- .id = 3,
- .parent = &esdhc1_clk,
- .set_parent = clk_esdhc4_set_parent,
- .enable_reg = MXC_CCM_CCGR3,
- .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
- .enable = _clk_max_enable,
- .disable = _clk_max_disable,
- .secondary = &esdhc4_ipg_clk,
-};
-
-/* mx53 specific */
-static struct clk esdhc2_mx53_clk = {
- .id = 2,
- .parent = &esdhc1_clk,
- .set_parent = clk_esdhc2_mx53_set_parent,
- .enable_reg = MXC_CCM_CCGR3,
- .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
- .enable = _clk_max_enable,
- .disable = _clk_max_disable,
- .secondary = &esdhc3_ipg_clk,
-};
-
-DEFINE_CLOCK_MAX(esdhc3_mx53_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG5_OFFSET,
- clk_esdhc3_mx53, &pll2_sw_clk, &esdhc2_ipg_clk);
-
-static struct clk esdhc4_mx53_clk = {
- .id = 3,
- .parent = &esdhc1_clk,
- .set_parent = clk_esdhc4_mx53_set_parent,
- .enable_reg = MXC_CCM_CCGR3,
- .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
- .enable = _clk_max_enable,
- .disable = _clk_max_disable,
- .secondary = &esdhc4_ipg_clk,
-};
-
-static struct clk sata_clk = {
- .parent = &ipg_clk,
- .enable = _clk_max_enable,
- .enable_reg = MXC_CCM_CCGR4,
- .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
- .disable = _clk_max_disable,
-};
-
-static struct clk ahci_phy_clk = {
- .parent = &usb_phy1_clk,
-};
-
-static struct clk ahci_dma_clk = {
- .parent = &ahb_clk,
-};
-
-DEFINE_CLOCK(mipi_esc_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG5_OFFSET, NULL, NULL, NULL, &pll2_sw_clk);
-DEFINE_CLOCK(mipi_hsc2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG4_OFFSET, NULL, NULL, &mipi_esc_clk, &pll2_sw_clk);
-DEFINE_CLOCK(mipi_hsc1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG3_OFFSET, NULL, NULL, &mipi_hsc2_clk, &pll2_sw_clk);
-
-/* IPU */
-DEFINE_CLOCK_FULL(ipu_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG5_OFFSET,
- NULL, NULL, clk_ipu_enable, clk_ipu_disable, &ahb_clk, &ipu_sec_clk);
-
-DEFINE_CLOCK_FULL(emi_fast_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG7_OFFSET,
- NULL, NULL, _clk_ccgr_enable, _clk_ccgr_disable_inwait,
- &ddr_clk, NULL);
-
-DEFINE_CLOCK(ipu_di0_clk, 0, MXC_CCM_CCGR6, MXC_CCM_CCGRx_CG5_OFFSET,
- NULL, NULL, &pll3_sw_clk, NULL);
-DEFINE_CLOCK(ipu_di1_clk, 0, MXC_CCM_CCGR6, MXC_CCM_CCGRx_CG6_OFFSET,
- NULL, NULL, &pll3_sw_clk, NULL);
-
-/* PATA */
-DEFINE_CLOCK(pata_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG0_OFFSET,
- NULL, NULL, &ipg_clk, &spba_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup mx51_lookups[] = {
- /* i.mx51 has the i.mx21 type uart */
- _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
- _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
- _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
- _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
- /* i.mx51 has the i.mx27 type fec */
- _REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
- _REGISTER_CLOCK("mxc_pwm.0", "pwm", pwm1_clk)
- _REGISTER_CLOCK("mxc_pwm.1", "pwm", pwm2_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
- _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
- _REGISTER_CLOCK("imx-i2c.2", NULL, hsi2c_clk)
- _REGISTER_CLOCK("mxc-ehci.0", "usb", usboh3_clk)
- _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_ahb_clk)
- _REGISTER_CLOCK("mxc-ehci.0", "usb_phy1", usb_phy1_clk)
- _REGISTER_CLOCK("mxc-ehci.1", "usb", usboh3_clk)
- _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_ahb_clk)
- _REGISTER_CLOCK("mxc-ehci.2", "usb", usboh3_clk)
- _REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_ahb_clk)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb", usboh3_clk)
- _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", ahb_clk)
- _REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
- _REGISTER_CLOCK("mxc_nand", NULL, nfc_clk)
- _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
- _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
- _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk)
- /* i.mx51 has the i.mx35 type sdma */
- _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
- _REGISTER_CLOCK(NULL, "ckih", ckih_clk)
- _REGISTER_CLOCK(NULL, "ckih2", ckih2_clk)
- _REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk)
- _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
- _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
- /* i.mx51 has the i.mx35 type cspi */
- _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx51.0", NULL, esdhc1_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx51.1", NULL, esdhc2_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx51.2", NULL, esdhc3_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx51.3", NULL, esdhc4_clk)
- _REGISTER_CLOCK(NULL, "cpu_clk", cpu_clk)
- _REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
- _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
- _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
- _REGISTER_CLOCK(NULL, "mipi_hsp", mipi_hsp_clk)
- _REGISTER_CLOCK("imx-ipuv3", NULL, ipu_clk)
- _REGISTER_CLOCK("imx-ipuv3", "di0", ipu_di0_clk)
- _REGISTER_CLOCK("imx-ipuv3", "di1", ipu_di1_clk)
- _REGISTER_CLOCK(NULL, "gpc_dvfs", gpc_dvfs_clk)
- _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
-};
-
-static struct clk_lookup mx53_lookups[] = {
- /* i.mx53 has the i.mx21 type uart */
- _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
- _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
- _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
- _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
- _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
- _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
- /* i.mx53 has the i.mx25 type fec */
- _REGISTER_CLOCK("imx25-fec.0", NULL, fec_clk)
- _REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
- _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
- _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
- _REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_mx53_clk)
- /* i.mx53 has the i.mx51 type ecspi */
- _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
- _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
- /* i.mx53 has the i.mx25 type cspi */
- _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx53.0", NULL, esdhc1_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx53.1", NULL, esdhc2_mx53_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx53.2", NULL, esdhc3_mx53_clk)
- _REGISTER_CLOCK("sdhci-esdhc-imx53.3", NULL, esdhc4_mx53_clk)
- _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
- _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
- /* i.mx53 has the i.mx35 type sdma */
- _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
- _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
- _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
- _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk)
- _REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
- _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
- _REGISTER_CLOCK("imx53-ahci.0", "ahci", sata_clk)
- _REGISTER_CLOCK("imx53-ahci.0", "ahci_phy", ahci_phy_clk)
- _REGISTER_CLOCK("imx53-ahci.0", "ahci_dma", ahci_dma_clk)
-};
-
-static void clk_tree_init(void)
-{
- u32 reg;
-
- ipg_perclk.set_parent(&ipg_perclk, &lp_apm_clk);
-
- /*
- * Initialise the IPG PER CLK dividers to 3. IPG_PER_CLK should be at
- * 8MHz, its derived from lp_apm.
- *
- * FIXME: Verify if true for all boards
- */
- reg = __raw_readl(MXC_CCM_CBCDR);
- reg &= ~MXC_CCM_CBCDR_PERCLK_PRED1_MASK;
- reg &= ~MXC_CCM_CBCDR_PERCLK_PRED2_MASK;
- reg &= ~MXC_CCM_CBCDR_PERCLK_PODF_MASK;
- reg |= (2 << MXC_CCM_CBCDR_PERCLK_PRED1_OFFSET);
- __raw_writel(reg, MXC_CCM_CBCDR);
-}
-
-int __init mx51_clocks_init(unsigned long ckil, unsigned long osc,
- unsigned long ckih1, unsigned long ckih2)
-{
- int i;
-
- external_low_reference = ckil;
- external_high_reference = ckih1;
- ckih2_reference = ckih2;
- oscillator_reference = osc;
-
- for (i = 0; i < ARRAY_SIZE(mx51_lookups); i++)
- clkdev_add(&mx51_lookups[i]);
-
- clk_tree_init();
-
- clk_enable(&cpu_clk);
- clk_enable(&main_bus_clk);
-
- clk_enable(&iim_clk);
- imx_print_silicon_rev("i.MX51", mx51_revision());
- clk_disable(&iim_clk);
-
- /* move usb_phy_clk to 24MHz */
- clk_set_parent(&usb_phy1_clk, &osc_clk);
-
- /* set the usboh3_clk parent to pll2_sw_clk */
- clk_set_parent(&usboh3_clk, &pll2_sw_clk);
-
- /* Set SDHC parents to be PLL2 */
- clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
- clk_set_parent(&esdhc2_clk, &pll2_sw_clk);
-
- /* set SDHC root clock as 166.25MHZ*/
- clk_set_rate(&esdhc1_clk, 166250000);
- clk_set_rate(&esdhc2_clk, 166250000);
-
- /* System timer */
- mxc_timer_init(&gpt_clk, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
- MX51_INT_GPT);
- return 0;
-}
-
-int __init mx53_clocks_init(unsigned long ckil, unsigned long osc,
- unsigned long ckih1, unsigned long ckih2)
-{
- int i;
-
- external_low_reference = ckil;
- external_high_reference = ckih1;
- ckih2_reference = ckih2;
- oscillator_reference = osc;
-
- for (i = 0; i < ARRAY_SIZE(mx53_lookups); i++)
- clkdev_add(&mx53_lookups[i]);
-
- clk_tree_init();
-
- clk_set_parent(&uart_root_clk, &pll3_sw_clk);
- clk_enable(&cpu_clk);
- clk_enable(&main_bus_clk);
-
- clk_enable(&iim_clk);
- imx_print_silicon_rev("i.MX53", mx53_revision());
- clk_disable(&iim_clk);
-
- /* Set SDHC parents to be PLL2 */
- clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
- clk_set_parent(&esdhc3_mx53_clk, &pll2_sw_clk);
-
- /* set SDHC root clock as 200MHZ*/
- clk_set_rate(&esdhc1_clk, 200000000);
- clk_set_rate(&esdhc3_mx53_clk, 200000000);
-
- /* System timer */
- mxc_timer_init(&gpt_clk, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
- MX53_INT_GPT);
- return 0;
-}
-
-#ifdef CONFIG_OF
-static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
- unsigned long *ckih1, unsigned long *ckih2)
-{
- struct device_node *np;
-
- /* retrieve the freqency of fixed clocks from device tree */
- for_each_compatible_node(np, NULL, "fixed-clock") {
- u32 rate;
- if (of_property_read_u32(np, "clock-frequency", &rate))
- continue;
-
- if (of_device_is_compatible(np, "fsl,imx-ckil"))
- *ckil = rate;
- else if (of_device_is_compatible(np, "fsl,imx-osc"))
- *osc = rate;
- else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
- *ckih1 = rate;
- else if (of_device_is_compatible(np, "fsl,imx-ckih2"))
- *ckih2 = rate;
- }
-}
-
-int __init mx51_clocks_init_dt(void)
-{
- unsigned long ckil, osc, ckih1, ckih2;
-
- clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
- return mx51_clocks_init(ckil, osc, ckih1, ckih2);
-}
-
-int __init mx53_clocks_init_dt(void)
-{
- unsigned long ckil, osc, ckih1, ckih2;
-
- clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
- return mx53_clocks_init(ckil, osc, ckih1, ckih2);
-}
-#endif
diff --git a/arch/arm/mach-imx/cpu-imx5.c b/arch/arm/mach-imx/cpu-imx5.c
index aa15c517d06..8eb15a2fcaf 100644
--- a/arch/arm/mach-imx/cpu-imx5.c
+++ b/arch/arm/mach-imx/cpu-imx5.c
@@ -62,11 +62,8 @@ EXPORT_SYMBOL(mx51_revision);
* Dependent on link order - so the assumption is that vfp_init is called
* before us.
*/
-static int __init mx51_neon_fixup(void)
+int __init mx51_neon_fixup(void)
{
- if (!cpu_is_mx51())
- return 0;
-
if (mx51_revision() < IMX_CHIP_REVISION_3_0 &&
(elf_hwcap & HWCAP_NEON)) {
elf_hwcap &= ~HWCAP_NEON;
@@ -75,7 +72,6 @@ static int __init mx51_neon_fixup(void)
return 0;
}
-late_initcall(mx51_neon_fixup);
#endif
static int get_mx53_srev(void)
diff --git a/arch/arm/mach-imx/crmregs-imx3.h b/arch/arm/mach-imx/crmregs-imx3.h
index 53141273df4..a1dfde53e33 100644
--- a/arch/arm/mach-imx/crmregs-imx3.h
+++ b/arch/arm/mach-imx/crmregs-imx3.h
@@ -24,48 +24,47 @@
#define CKIH_CLK_FREQ_27MHZ 27000000
#define CKIL_CLK_FREQ 32768
-#define MXC_CCM_BASE (cpu_is_mx31() ? \
-MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR) : MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR))
+extern void __iomem *mx3_ccm_base;
/* Register addresses */
-#define MXC_CCM_CCMR (MXC_CCM_BASE + 0x00)
-#define MXC_CCM_PDR0 (MXC_CCM_BASE + 0x04)
-#define MXC_CCM_PDR1 (MXC_CCM_BASE + 0x08)
-#define MX35_CCM_PDR2 (MXC_CCM_BASE + 0x0C)
-#define MXC_CCM_RCSR (MXC_CCM_BASE + 0x0C)
-#define MX35_CCM_PDR3 (MXC_CCM_BASE + 0x10)
-#define MXC_CCM_MPCTL (MXC_CCM_BASE + 0x10)
-#define MX35_CCM_PDR4 (MXC_CCM_BASE + 0x14)
-#define MXC_CCM_UPCTL (MXC_CCM_BASE + 0x14)
-#define MX35_CCM_RCSR (MXC_CCM_BASE + 0x18)
-#define MXC_CCM_SRPCTL (MXC_CCM_BASE + 0x18)
-#define MX35_CCM_MPCTL (MXC_CCM_BASE + 0x1C)
-#define MXC_CCM_COSR (MXC_CCM_BASE + 0x1C)
-#define MX35_CCM_PPCTL (MXC_CCM_BASE + 0x20)
-#define MXC_CCM_CGR0 (MXC_CCM_BASE + 0x20)
-#define MX35_CCM_ACMR (MXC_CCM_BASE + 0x24)
-#define MXC_CCM_CGR1 (MXC_CCM_BASE + 0x24)
-#define MX35_CCM_COSR (MXC_CCM_BASE + 0x28)
-#define MXC_CCM_CGR2 (MXC_CCM_BASE + 0x28)
-#define MX35_CCM_CGR0 (MXC_CCM_BASE + 0x2C)
-#define MXC_CCM_WIMR (MXC_CCM_BASE + 0x2C)
-#define MX35_CCM_CGR1 (MXC_CCM_BASE + 0x30)
-#define MXC_CCM_LDC (MXC_CCM_BASE + 0x30)
-#define MX35_CCM_CGR2 (MXC_CCM_BASE + 0x34)
-#define MXC_CCM_DCVR0 (MXC_CCM_BASE + 0x34)
-#define MX35_CCM_CGR3 (MXC_CCM_BASE + 0x38)
-#define MXC_CCM_DCVR1 (MXC_CCM_BASE + 0x38)
-#define MXC_CCM_DCVR2 (MXC_CCM_BASE + 0x3C)
-#define MXC_CCM_DCVR3 (MXC_CCM_BASE + 0x40)
-#define MXC_CCM_LTR0 (MXC_CCM_BASE + 0x44)
-#define MXC_CCM_LTR1 (MXC_CCM_BASE + 0x48)
-#define MXC_CCM_LTR2 (MXC_CCM_BASE + 0x4C)
-#define MXC_CCM_LTR3 (MXC_CCM_BASE + 0x50)
-#define MXC_CCM_LTBR0 (MXC_CCM_BASE + 0x54)
-#define MXC_CCM_LTBR1 (MXC_CCM_BASE + 0x58)
-#define MXC_CCM_PMCR0 (MXC_CCM_BASE + 0x5C)
-#define MXC_CCM_PMCR1 (MXC_CCM_BASE + 0x60)
-#define MXC_CCM_PDR2 (MXC_CCM_BASE + 0x64)
+#define MXC_CCM_CCMR 0x00
+#define MXC_CCM_PDR0 0x04
+#define MXC_CCM_PDR1 0x08
+#define MX35_CCM_PDR2 0x0C
+#define MXC_CCM_RCSR 0x0C
+#define MX35_CCM_PDR3 0x10
+#define MXC_CCM_MPCTL 0x10
+#define MX35_CCM_PDR4 0x14
+#define MXC_CCM_UPCTL 0x14
+#define MX35_CCM_RCSR 0x18
+#define MXC_CCM_SRPCTL 0x18
+#define MX35_CCM_MPCTL 0x1C
+#define MXC_CCM_COSR 0x1C
+#define MX35_CCM_PPCTL 0x20
+#define MXC_CCM_CGR0 0x20
+#define MX35_CCM_ACMR 0x24
+#define MXC_CCM_CGR1 0x24
+#define MX35_CCM_COSR 0x28
+#define MXC_CCM_CGR2 0x28
+#define MX35_CCM_CGR0 0x2C
+#define MXC_CCM_WIMR 0x2C
+#define MX35_CCM_CGR1 0x30
+#define MXC_CCM_LDC 0x30
+#define MX35_CCM_CGR2 0x34
+#define MXC_CCM_DCVR0 0x34
+#define MX35_CCM_CGR3 0x38
+#define MXC_CCM_DCVR1 0x38
+#define MXC_CCM_DCVR2 0x3C
+#define MXC_CCM_DCVR3 0x40
+#define MXC_CCM_LTR0 0x44
+#define MXC_CCM_LTR1 0x48
+#define MXC_CCM_LTR2 0x4C
+#define MXC_CCM_LTR3 0x50
+#define MXC_CCM_LTBR0 0x54
+#define MXC_CCM_LTBR1 0x58
+#define MXC_CCM_PMCR0 0x5C
+#define MXC_CCM_PMCR1 0x60
+#define MXC_CCM_PDR2 0x64
/* Register bit definitions */
#define MXC_CCM_CCMR_WBEN (1 << 27)
diff --git a/arch/arm/mach-imx/imx51-dt.c b/arch/arm/mach-imx/imx51-dt.c
index 5f577fbda2c..18e78dba429 100644
--- a/arch/arm/mach-imx/imx51-dt.c
+++ b/arch/arm/mach-imx/imx51-dt.c
@@ -118,6 +118,7 @@ DT_MACHINE_START(IMX51_DT, "Freescale i.MX51 (Device Tree Support)")
.handle_irq = imx51_handle_irq,
.timer = &imx51_timer,
.init_machine = imx51_dt_init,
+ .init_late = imx51_init_late,
.dt_compat = imx51_dt_board_compat,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/imx53-dt.c b/arch/arm/mach-imx/imx53-dt.c
index 574eca4b89a..eb04b6248e4 100644
--- a/arch/arm/mach-imx/imx53-dt.c
+++ b/arch/arm/mach-imx/imx53-dt.c
@@ -10,6 +10,9 @@
* http://www.gnu.org/copyleft/gpl.html
*/
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
@@ -81,6 +84,19 @@ static const struct of_device_id imx53_iomuxc_of_match[] __initconst = {
{ /* sentinel */ }
};
+static void __init imx53_qsb_init(void)
+{
+ struct clk *clk;
+
+ clk = clk_get_sys(NULL, "ssi_ext1");
+ if (IS_ERR(clk)) {
+ pr_err("failed to get clk ssi_ext1\n");
+ return;
+ }
+
+ clk_register_clkdev(clk, NULL, "0-000a");
+}
+
static void __init imx53_dt_init(void)
{
struct device_node *node;
@@ -99,6 +115,9 @@ static void __init imx53_dt_init(void)
of_node_put(node);
}
+ if (of_machine_is_compatible("fsl,imx53-qsb"))
+ imx53_qsb_init();
+
of_platform_populate(NULL, of_default_bus_match_table,
imx53_auxdata_lookup, NULL);
}
diff --git a/arch/arm/mach-imx/lluart.c b/arch/arm/mach-imx/lluart.c
index 0213f8dcee8..c40a34c0048 100644
--- a/arch/arm/mach-imx/lluart.c
+++ b/arch/arm/mach-imx/lluart.c
@@ -17,6 +17,12 @@
#include <mach/hardware.h>
static struct map_desc imx_lluart_desc = {
+#ifdef CONFIG_DEBUG_IMX6Q_UART2
+ .virtual = MX6Q_IO_P2V(MX6Q_UART2_BASE_ADDR),
+ .pfn = __phys_to_pfn(MX6Q_UART2_BASE_ADDR),
+ .length = MX6Q_UART2_SIZE,
+ .type = MT_DEVICE,
+#endif
#ifdef CONFIG_DEBUG_IMX6Q_UART4
.virtual = MX6Q_IO_P2V(MX6Q_UART4_BASE_ADDR),
.pfn = __phys_to_pfn(MX6Q_UART4_BASE_ADDR),
diff --git a/arch/arm/mach-imx/mach-cpuimx51sd.c b/arch/arm/mach-imx/mach-cpuimx51sd.c
index ce341a6874f..ac50f1671e3 100644
--- a/arch/arm/mach-imx/mach-cpuimx51sd.c
+++ b/arch/arm/mach-imx/mach-cpuimx51sd.c
@@ -369,5 +369,6 @@ MACHINE_START(EUKREA_CPUIMX51SD, "Eukrea CPUIMX51SD")
.handle_irq = imx51_handle_irq,
.timer = &mxc_timer,
.init_machine = eukrea_cpuimx51sd_init,
+ .init_late = imx51_init_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 3df360a52c1..b47e98b7d53 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -10,6 +10,8 @@
* http://www.gnu.org/copyleft/gpl.html
*/
+#include <linux/clk.h>
+#include <linux/clkdev.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -64,18 +66,53 @@ soft:
/* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
static int ksz9021rn_phy_fixup(struct phy_device *phydev)
{
- /* min rx data delay */
- phy_write(phydev, 0x0b, 0x8105);
- phy_write(phydev, 0x0c, 0x0000);
+ if (IS_ENABLED(CONFIG_PHYLIB)) {
+ /* min rx data delay */
+ phy_write(phydev, 0x0b, 0x8105);
+ phy_write(phydev, 0x0c, 0x0000);
- /* max rx/tx clock delay, min rx/tx control delay */
- phy_write(phydev, 0x0b, 0x8104);
- phy_write(phydev, 0x0c, 0xf0f0);
- phy_write(phydev, 0x0b, 0x104);
+ /* max rx/tx clock delay, min rx/tx control delay */
+ phy_write(phydev, 0x0b, 0x8104);
+ phy_write(phydev, 0x0c, 0xf0f0);
+ phy_write(phydev, 0x0b, 0x104);
+ }
return 0;
}
+static void __init imx6q_sabrelite_cko1_setup(void)
+{
+ struct clk *cko1_sel, *ahb, *cko1;
+ unsigned long rate;
+
+ cko1_sel = clk_get_sys(NULL, "cko1_sel");
+ ahb = clk_get_sys(NULL, "ahb");
+ cko1 = clk_get_sys(NULL, "cko1");
+ if (IS_ERR(cko1_sel) || IS_ERR(ahb) || IS_ERR(cko1)) {
+ pr_err("cko1 setup failed!\n");
+ goto put_clk;
+ }
+ clk_set_parent(cko1_sel, ahb);
+ rate = clk_round_rate(cko1, 16000000);
+ clk_set_rate(cko1, rate);
+ clk_register_clkdev(cko1, NULL, "0-000a");
+put_clk:
+ if (!IS_ERR(cko1_sel))
+ clk_put(cko1_sel);
+ if (!IS_ERR(ahb))
+ clk_put(ahb);
+ if (!IS_ERR(cko1))
+ clk_put(cko1);
+}
+
+static void __init imx6q_sabrelite_init(void)
+{
+ if (IS_ENABLED(CONFIG_PHYLIB))
+ phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
+ ksz9021rn_phy_fixup);
+ imx6q_sabrelite_cko1_setup();
+}
+
static void __init imx6q_init_machine(void)
{
/*
@@ -85,8 +122,7 @@ static void __init imx6q_init_machine(void)
pinctrl_provide_dummies();
if (of_machine_is_compatible("fsl,imx6q-sabrelite"))
- phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
- ksz9021rn_phy_fixup);
+ imx6q_sabrelite_init();
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
@@ -139,6 +175,7 @@ static struct sys_timer imx6q_timer = {
static const char *imx6q_dt_compat[] __initdata = {
"fsl,imx6q-arm2",
"fsl,imx6q-sabrelite",
+ "fsl,imx6q-sabresd",
"fsl,imx6q",
NULL,
};
diff --git a/arch/arm/mach-imx/mach-mx51_3ds.c b/arch/arm/mach-imx/mach-mx51_3ds.c
index 83eab4176ca..3c5b163923f 100644
--- a/arch/arm/mach-imx/mach-mx51_3ds.c
+++ b/arch/arm/mach-imx/mach-mx51_3ds.c
@@ -175,5 +175,6 @@ MACHINE_START(MX51_3DS, "Freescale MX51 3-Stack Board")
.handle_irq = imx51_handle_irq,
.timer = &mx51_3ds_timer,
.init_machine = mx51_3ds_init,
+ .init_late = imx51_init_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx51_babbage.c b/arch/arm/mach-imx/mach-mx51_babbage.c
index e4b822e9f71..dde397014d4 100644
--- a/arch/arm/mach-imx/mach-mx51_babbage.c
+++ b/arch/arm/mach-imx/mach-mx51_babbage.c
@@ -163,6 +163,12 @@ static iomux_v3_cfg_t mx51babbage_pads[] = {
MX51_PAD_CSPI1_SCLK__ECSPI1_SCLK,
MX51_PAD_CSPI1_SS0__GPIO4_24,
MX51_PAD_CSPI1_SS1__GPIO4_25,
+
+ /* Audio */
+ MX51_PAD_AUD3_BB_TXD__AUD3_TXD,
+ MX51_PAD_AUD3_BB_RXD__AUD3_RXD,
+ MX51_PAD_AUD3_BB_CK__AUD3_TXC,
+ MX51_PAD_AUD3_BB_FS__AUD3_TXFS,
};
/* Serial ports */
@@ -426,5 +432,6 @@ MACHINE_START(MX51_BABBAGE, "Freescale MX51 Babbage Board")
.handle_irq = imx51_handle_irq,
.timer = &mx51_babbage_timer,
.init_machine = mx51_babbage_init,
+ .init_late = imx51_init_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx51_efikamx.c b/arch/arm/mach-imx/mach-mx51_efikamx.c
index 86e96ef11f9..8d09c0126ca 100644
--- a/arch/arm/mach-imx/mach-mx51_efikamx.c
+++ b/arch/arm/mach-imx/mach-mx51_efikamx.c
@@ -207,29 +207,32 @@ static void mx51_efikamx_power_off(void)
static int __init mx51_efikamx_power_init(void)
{
- if (machine_is_mx51_efikamx()) {
- pwgt1 = regulator_get(NULL, "pwgt1");
- pwgt2 = regulator_get(NULL, "pwgt2");
- if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
- regulator_enable(pwgt1);
- regulator_enable(pwgt2);
- }
- gpio_request(EFIKAMX_POWEROFF, "poweroff");
- pm_power_off = mx51_efikamx_power_off;
-
- /* enable coincell charger. maybe need a small power driver ? */
- coincell = regulator_get(NULL, "coincell");
- if (!IS_ERR(coincell)) {
- regulator_set_voltage(coincell, 3000000, 3000000);
- regulator_enable(coincell);
- }
-
- regulator_has_full_constraints();
+ pwgt1 = regulator_get(NULL, "pwgt1");
+ pwgt2 = regulator_get(NULL, "pwgt2");
+ if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
+ regulator_enable(pwgt1);
+ regulator_enable(pwgt2);
+ }
+ gpio_request(EFIKAMX_POWEROFF, "poweroff");
+ pm_power_off = mx51_efikamx_power_off;
+
+ /* enable coincell charger. maybe need a small power driver ? */
+ coincell = regulator_get(NULL, "coincell");
+ if (!IS_ERR(coincell)) {
+ regulator_set_voltage(coincell, 3000000, 3000000);
+ regulator_enable(coincell);
}
+ regulator_has_full_constraints();
+
return 0;
}
-late_initcall(mx51_efikamx_power_init);
+
+static void __init mx51_efikamx_init_late(void)
+{
+ imx51_init_late();
+ mx51_efikamx_power_init();
+}
static void __init mx51_efikamx_init(void)
{
@@ -292,5 +295,6 @@ MACHINE_START(MX51_EFIKAMX, "Genesi Efika MX (Smarttop)")
.handle_irq = imx51_handle_irq,
.timer = &mx51_efikamx_timer,
.init_machine = mx51_efikamx_init,
+ .init_late = mx51_efikamx_init_late,
.restart = mx51_efikamx_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx51_efikasb.c b/arch/arm/mach-imx/mach-mx51_efikasb.c
index 88f837a6cc7..fdbd181b97e 100644
--- a/arch/arm/mach-imx/mach-mx51_efikasb.c
+++ b/arch/arm/mach-imx/mach-mx51_efikasb.c
@@ -211,22 +211,25 @@ static void mx51_efikasb_power_off(void)
static int __init mx51_efikasb_power_init(void)
{
- if (machine_is_mx51_efikasb()) {
- pwgt1 = regulator_get(NULL, "pwgt1");
- pwgt2 = regulator_get(NULL, "pwgt2");
- if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
- regulator_enable(pwgt1);
- regulator_enable(pwgt2);
- }
- gpio_request(EFIKASB_POWEROFF, "poweroff");
- pm_power_off = mx51_efikasb_power_off;
-
- regulator_has_full_constraints();
+ pwgt1 = regulator_get(NULL, "pwgt1");
+ pwgt2 = regulator_get(NULL, "pwgt2");
+ if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
+ regulator_enable(pwgt1);
+ regulator_enable(pwgt2);
}
+ gpio_request(EFIKASB_POWEROFF, "poweroff");
+ pm_power_off = mx51_efikasb_power_off;
+
+ regulator_has_full_constraints();
return 0;
}
-late_initcall(mx51_efikasb_power_init);
+
+static void __init mx51_efikasb_init_late(void)
+{
+ imx51_init_late();
+ mx51_efikasb_power_init();
+}
/* 01 R1.3 board
10 R2.0 board */
@@ -287,6 +290,7 @@ MACHINE_START(MX51_EFIKASB, "Genesi Efika MX (Smartbook)")
.init_irq = mx51_init_irq,
.handle_irq = imx51_handle_irq,
.init_machine = efikasb_board_init,
+ .init_late = mx51_efikasb_init_late,
.timer = &mx51_efikasb_timer,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c
index 10c9795934a..0a40004154f 100644
--- a/arch/arm/mach-imx/mach-pcm037.c
+++ b/arch/arm/mach-imx/mach-pcm037.c
@@ -694,6 +694,11 @@ static void __init pcm037_reserve(void)
MX3_CAMERA_BUF_SIZE);
}
+static void __init pcm037_init_late(void)
+{
+ pcm037_eet_init_devices();
+}
+
MACHINE_START(PCM037, "Phytec Phycore pcm037")
/* Maintainer: Pengutronix */
.atag_offset = 0x100,
@@ -704,5 +709,6 @@ MACHINE_START(PCM037, "Phytec Phycore pcm037")
.handle_irq = imx31_handle_irq,
.timer = &pcm037_timer,
.init_machine = pcm037_init,
+ .init_late = pcm037_init_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm037_eet.c b/arch/arm/mach-imx/mach-pcm037_eet.c
index 1b7606bef8f..11ffa81ad17 100644
--- a/arch/arm/mach-imx/mach-pcm037_eet.c
+++ b/arch/arm/mach-imx/mach-pcm037_eet.c
@@ -160,9 +160,9 @@ static const struct gpio_keys_platform_data
.rep = 0, /* No auto-repeat */
};
-static int __init eet_init_devices(void)
+int __init pcm037_eet_init_devices(void)
{
- if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET)
+ if (pcm037_variant() != PCM037_EET)
return 0;
mxc_iomux_setup_multiple_pins(pcm037_eet_pins,
@@ -176,4 +176,3 @@ static int __init eet_init_devices(void)
return 0;
}
-late_initcall(eet_init_devices);
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 9128d15b1eb..967ed5b35a4 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -32,6 +32,10 @@
#include <mach/iomux-v3.h>
#include <mach/irqs.h>
+#include "crmregs-imx3.h"
+
+void __iomem *mx3_ccm_base;
+
static void imx3_idle(void)
{
unsigned long reg = 0;
@@ -138,6 +142,7 @@ void __init imx31_init_early(void)
mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
arch_ioremap_caller = imx3_ioremap_caller;
arm_pm_idle = imx3_idle;
+ mx3_ccm_base = MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR);
}
void __init mx31_init_irq(void)
@@ -211,6 +216,7 @@ void __init imx35_init_early(void)
mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
arm_pm_idle = imx3_idle;
arch_ioremap_caller = imx3_ioremap_caller;
+ mx3_ccm_base = MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR);
}
void __init mx35_init_irq(void)
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index ba91e6b31cf..feeee17da96 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -33,6 +33,7 @@ static void imx5_idle(void)
gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
if (IS_ERR(gpc_dvfs_clk))
return;
+ clk_prepare(gpc_dvfs_clk);
}
clk_enable(gpc_dvfs_clk);
mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
@@ -236,3 +237,8 @@ void __init imx53_soc_init(void)
platform_device_register_simple("imx31-audmux", 0, imx53_audmux_res,
ARRAY_SIZE(imx53_audmux_res));
}
+
+void __init imx51_init_late(void)
+{
+ mx51_neon_fixup();
+}
diff --git a/arch/arm/mach-imx/pcm037.h b/arch/arm/mach-imx/pcm037.h
index d6929721a5f..7d167690e17 100644
--- a/arch/arm/mach-imx/pcm037.h
+++ b/arch/arm/mach-imx/pcm037.h
@@ -8,4 +8,10 @@ enum pcm037_board_variant {
extern enum pcm037_board_variant pcm037_variant(void);
+#ifdef CONFIG_MACH_PCM037_EET
+int pcm037_eet_init_devices(void);
+#else
+static inline int pcm037_eet_init_devices(void) { return 0; }
+#endif
+
#endif
diff --git a/arch/arm/mach-imx/pm-imx3.c b/arch/arm/mach-imx/pm-imx3.c
index b3752439632..822103bdb70 100644
--- a/arch/arm/mach-imx/pm-imx3.c
+++ b/arch/arm/mach-imx/pm-imx3.c
@@ -21,14 +21,14 @@
*/
void mx3_cpu_lp_set(enum mx3_cpu_pwr_mode mode)
{
- int reg = __raw_readl(MXC_CCM_CCMR);
+ int reg = __raw_readl(mx3_ccm_base + MXC_CCM_CCMR);
reg &= ~MXC_CCM_CCMR_LPM_MASK;
switch (mode) {
case MX3_WAIT:
if (cpu_is_mx35())
reg |= MXC_CCM_CCMR_LPM_WAIT_MX35;
- __raw_writel(reg, MXC_CCM_CCMR);
+ __raw_writel(reg, mx3_ccm_base + MXC_CCM_CCMR);
break;
default:
pr_err("Unknown cpu power mode: %d\n", mode);
diff --git a/arch/arm/mach-kirkwood/board-dreamplug.c b/arch/arm/mach-kirkwood/board-dreamplug.c
index 985453994dd..55e357ab292 100644
--- a/arch/arm/mach-kirkwood/board-dreamplug.c
+++ b/arch/arm/mach-kirkwood/board-dreamplug.c
@@ -27,7 +27,6 @@
#include <linux/mtd/physmap.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index 10d1969b9e3..edc3f8a9d45 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -43,6 +43,9 @@ static void __init kirkwood_dt_init(void)
kirkwood_l2_init();
#endif
+ /* Setup root of clk tree */
+ kirkwood_clk_init();
+
/* internal devices that every board has */
kirkwood_wdt_init();
kirkwood_xor0_init();
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 3ad037385a5..25fb3fd418e 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -15,7 +15,8 @@
#include <linux/ata_platform.h>
#include <linux/mtd/nand.h>
#include <linux/dma-mapping.h>
-#include <linux/of.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
#include <net/dsa.h>
#include <asm/page.h>
#include <asm/timex.h>
@@ -32,6 +33,7 @@
#include <plat/common.h>
#include <plat/time.h>
#include <plat/addr-map.h>
+#include <plat/mv_xor.h>
#include "common.h"
/*****************************************************************************
@@ -61,20 +63,188 @@ void __init kirkwood_map_io(void)
iotable_init(kirkwood_io_desc, ARRAY_SIZE(kirkwood_io_desc));
}
-/*
- * Default clock control bits. Any bit _not_ set in this variable
- * will be cleared from the hardware after platform devices have been
- * registered. Some reserved bits must be set to 1.
- */
-unsigned int kirkwood_clk_ctrl = CGC_DUNIT | CGC_RESERVED;
+/*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+
+static void disable_sata0(void)
+{
+ /* Disable PLL and IVREF */
+ writel(readl(SATA0_PHY_MODE_2) & ~0xf, SATA0_PHY_MODE_2);
+ /* Disable PHY */
+ writel(readl(SATA0_IF_CTRL) | 0x200, SATA0_IF_CTRL);
+}
+
+static void disable_sata1(void)
+{
+ /* Disable PLL and IVREF */
+ writel(readl(SATA1_PHY_MODE_2) & ~0xf, SATA1_PHY_MODE_2);
+ /* Disable PHY */
+ writel(readl(SATA1_IF_CTRL) | 0x200, SATA1_IF_CTRL);
+}
+
+static void disable_pcie0(void)
+{
+ writel(readl(PCIE_LINK_CTRL) | 0x10, PCIE_LINK_CTRL);
+ while (1)
+ if (readl(PCIE_STATUS) & 0x1)
+ break;
+ writel(readl(PCIE_LINK_CTRL) & ~0x10, PCIE_LINK_CTRL);
+}
+
+static void disable_pcie1(void)
+{
+ u32 dev, rev;
+
+ kirkwood_pcie_id(&dev, &rev);
+
+ if (dev == MV88F6282_DEV_ID) {
+ writel(readl(PCIE1_LINK_CTRL) | 0x10, PCIE1_LINK_CTRL);
+ while (1)
+ if (readl(PCIE1_STATUS) & 0x1)
+ break;
+ writel(readl(PCIE1_LINK_CTRL) & ~0x10, PCIE1_LINK_CTRL);
+ }
+}
+
+/* An extended version of the gated clk. This calls fn() before
+ * disabling the clock. We use this to turn off PHYs etc. */
+struct clk_gate_fn {
+ struct clk_gate gate;
+ void (*fn)(void);
+};
+
+#define to_clk_gate_fn(_gate) container_of(_gate, struct clk_gate_fn, gate)
+#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
+
+static void clk_gate_fn_disable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ struct clk_gate_fn *gate_fn = to_clk_gate_fn(gate);
+
+ if (gate_fn->fn)
+ gate_fn->fn();
+
+ clk_gate_ops.disable(hw);
+}
+
+static struct clk_ops clk_gate_fn_ops;
+
+static struct clk __init *clk_register_gate_fn(struct device *dev,
+ const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock,
+ void (*fn)(void))
+{
+ struct clk_gate_fn *gate_fn;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ gate_fn = kzalloc(sizeof(struct clk_gate_fn), GFP_KERNEL);
+ if (!gate_fn) {
+ pr_err("%s: could not allocate gated clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init.name = name;
+ init.ops = &clk_gate_fn_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_gate assignments */
+ gate_fn->gate.reg = reg;
+ gate_fn->gate.bit_idx = bit_idx;
+ gate_fn->gate.flags = clk_gate_flags;
+ gate_fn->gate.lock = lock;
+ gate_fn->gate.hw.init = &init;
+
+ /* ops is the gate ops, but with our disable function */
+ if (clk_gate_fn_ops.disable != clk_gate_fn_disable) {
+ clk_gate_fn_ops = clk_gate_ops;
+ clk_gate_fn_ops.disable = clk_gate_fn_disable;
+ }
+ clk = clk_register(dev, &gate_fn->gate.hw);
+
+ if (IS_ERR(clk))
+ kfree(gate_fn);
+
+ return clk;
+}
+
+static DEFINE_SPINLOCK(gating_lock);
+static struct clk *tclk;
+
+static struct clk __init *kirkwood_register_gate(const char *name, u8 bit_idx)
+{
+ return clk_register_gate(NULL, name, "tclk", 0,
+ (void __iomem *)CLOCK_GATING_CTRL,
+ bit_idx, 0, &gating_lock);
+}
+
+static struct clk __init *kirkwood_register_gate_fn(const char *name,
+ u8 bit_idx,
+ void (*fn)(void))
+{
+ return clk_register_gate_fn(NULL, name, "tclk", 0,
+ (void __iomem *)CLOCK_GATING_CTRL,
+ bit_idx, 0, &gating_lock, fn);
+}
+
+void __init kirkwood_clk_init(void)
+{
+ struct clk *runit, *ge0, *ge1, *sata0, *sata1, *usb0, *sdio;
+ struct clk *crypto, *xor0, *xor1, *pex0, *pex1, *audio;
+
+ tclk = clk_register_fixed_rate(NULL, "tclk", NULL,
+ CLK_IS_ROOT, kirkwood_tclk);
+
+ runit = kirkwood_register_gate("runit", CGC_BIT_RUNIT);
+ ge0 = kirkwood_register_gate("ge0", CGC_BIT_GE0);
+ ge1 = kirkwood_register_gate("ge1", CGC_BIT_GE1);
+ sata0 = kirkwood_register_gate_fn("sata0", CGC_BIT_SATA0,
+ disable_sata0);
+ sata1 = kirkwood_register_gate_fn("sata1", CGC_BIT_SATA1,
+ disable_sata1);
+ usb0 = kirkwood_register_gate("usb0", CGC_BIT_USB0);
+ sdio = kirkwood_register_gate("sdio", CGC_BIT_SDIO);
+ crypto = kirkwood_register_gate("crypto", CGC_BIT_CRYPTO);
+ xor0 = kirkwood_register_gate("xor0", CGC_BIT_XOR0);
+ xor1 = kirkwood_register_gate("xor1", CGC_BIT_XOR1);
+ pex0 = kirkwood_register_gate_fn("pex0", CGC_BIT_PEX0,
+ disable_pcie0);
+ pex1 = kirkwood_register_gate_fn("pex1", CGC_BIT_PEX1,
+ disable_pcie1);
+ audio = kirkwood_register_gate("audio", CGC_BIT_AUDIO);
+ kirkwood_register_gate("tdm", CGC_BIT_TDM);
+ kirkwood_register_gate("tsu", CGC_BIT_TSU);
+
+ /* clkdev entries, mapping clks to devices */
+ orion_clkdev_add(NULL, "orion_spi.0", runit);
+ orion_clkdev_add(NULL, "orion_spi.1", runit);
+ orion_clkdev_add(NULL, MV643XX_ETH_NAME ".0", ge0);
+ orion_clkdev_add(NULL, MV643XX_ETH_NAME ".1", ge1);
+ orion_clkdev_add(NULL, "orion_wdt", tclk);
+ orion_clkdev_add("0", "sata_mv.0", sata0);
+ orion_clkdev_add("1", "sata_mv.0", sata1);
+ orion_clkdev_add(NULL, "orion-ehci.0", usb0);
+ orion_clkdev_add(NULL, "orion_nand", runit);
+ orion_clkdev_add(NULL, "mvsdio", sdio);
+ orion_clkdev_add(NULL, "mv_crypto", crypto);
+ orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".0", xor0);
+ orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".1", xor1);
+ orion_clkdev_add("0", "pcie", pex0);
+ orion_clkdev_add("1", "pcie", pex1);
+ orion_clkdev_add(NULL, "kirkwood-i2s", audio);
+}
/*****************************************************************************
* EHCI0
****************************************************************************/
void __init kirkwood_ehci_init(void)
{
- kirkwood_clk_ctrl |= CGC_USB0;
orion_ehci_init(USB_PHYS_BASE, IRQ_KIRKWOOD_USB, EHCI_PHY_NA);
}
@@ -84,11 +254,9 @@ void __init kirkwood_ehci_init(void)
****************************************************************************/
void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
- kirkwood_clk_ctrl |= CGC_GE0;
-
orion_ge00_init(eth_data,
GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
- IRQ_KIRKWOOD_GE00_ERR, kirkwood_tclk);
+ IRQ_KIRKWOOD_GE00_ERR);
}
@@ -97,12 +265,9 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
****************************************************************************/
void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
{
-
- kirkwood_clk_ctrl |= CGC_GE1;
-
orion_ge01_init(eth_data,
GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
- IRQ_KIRKWOOD_GE01_ERR, kirkwood_tclk);
+ IRQ_KIRKWOOD_GE01_ERR);
}
@@ -144,7 +309,6 @@ static struct platform_device kirkwood_nand_flash = {
void __init kirkwood_nand_init(struct mtd_partition *parts, int nr_parts,
int chip_delay)
{
- kirkwood_clk_ctrl |= CGC_RUNIT;
kirkwood_nand_data.parts = parts;
kirkwood_nand_data.nr_parts = nr_parts;
kirkwood_nand_data.chip_delay = chip_delay;
@@ -154,7 +318,6 @@ void __init kirkwood_nand_init(struct mtd_partition *parts, int nr_parts,
void __init kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts,
int (*dev_ready)(struct mtd_info *))
{
- kirkwood_clk_ctrl |= CGC_RUNIT;
kirkwood_nand_data.parts = parts;
kirkwood_nand_data.nr_parts = nr_parts;
kirkwood_nand_data.dev_ready = dev_ready;
@@ -175,10 +338,6 @@ static void __init kirkwood_rtc_init(void)
****************************************************************************/
void __init kirkwood_sata_init(struct mv_sata_platform_data *sata_data)
{
- kirkwood_clk_ctrl |= CGC_SATA0;
- if (sata_data->n_ports > 1)
- kirkwood_clk_ctrl |= CGC_SATA1;
-
orion_sata_init(sata_data, SATA_PHYS_BASE, IRQ_KIRKWOOD_SATA);
}
@@ -221,7 +380,6 @@ void __init kirkwood_sdio_init(struct mvsdio_platform_data *mvsdio_data)
mvsdio_data->clock = 100000000;
else
mvsdio_data->clock = 200000000;
- kirkwood_clk_ctrl |= CGC_SDIO;
kirkwood_sdio.dev.platform_data = mvsdio_data;
platform_device_register(&kirkwood_sdio);
}
@@ -232,8 +390,7 @@ void __init kirkwood_sdio_init(struct mvsdio_platform_data *mvsdio_data)
****************************************************************************/
void __init kirkwood_spi_init()
{
- kirkwood_clk_ctrl |= CGC_RUNIT;
- orion_spi_init(SPI_PHYS_BASE, kirkwood_tclk);
+ orion_spi_init(SPI_PHYS_BASE);
}
@@ -253,7 +410,7 @@ void __init kirkwood_i2c_init(void)
void __init kirkwood_uart0_init(void)
{
orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
- IRQ_KIRKWOOD_UART_0, kirkwood_tclk);
+ IRQ_KIRKWOOD_UART_0, tclk);
}
@@ -263,7 +420,7 @@ void __init kirkwood_uart0_init(void)
void __init kirkwood_uart1_init(void)
{
orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
- IRQ_KIRKWOOD_UART_1, kirkwood_tclk);
+ IRQ_KIRKWOOD_UART_1, tclk);
}
/*****************************************************************************
@@ -271,7 +428,6 @@ void __init kirkwood_uart1_init(void)
****************************************************************************/
void __init kirkwood_crypto_init(void)
{
- kirkwood_clk_ctrl |= CGC_CRYPTO;
orion_crypto_init(CRYPTO_PHYS_BASE, KIRKWOOD_SRAM_PHYS_BASE,
KIRKWOOD_SRAM_SIZE, IRQ_KIRKWOOD_CRYPTO);
}
@@ -282,8 +438,6 @@ void __init kirkwood_crypto_init(void)
****************************************************************************/
void __init kirkwood_xor0_init(void)
{
- kirkwood_clk_ctrl |= CGC_XOR0;
-
orion_xor0_init(XOR0_PHYS_BASE, XOR0_HIGH_PHYS_BASE,
IRQ_KIRKWOOD_XOR_00, IRQ_KIRKWOOD_XOR_01);
}
@@ -294,8 +448,6 @@ void __init kirkwood_xor0_init(void)
****************************************************************************/
void __init kirkwood_xor1_init(void)
{
- kirkwood_clk_ctrl |= CGC_XOR1;
-
orion_xor1_init(XOR1_PHYS_BASE, XOR1_HIGH_PHYS_BASE,
IRQ_KIRKWOOD_XOR_10, IRQ_KIRKWOOD_XOR_11);
}
@@ -306,7 +458,7 @@ void __init kirkwood_xor1_init(void)
****************************************************************************/
void __init kirkwood_wdt_init(void)
{
- orion_wdt_init(kirkwood_tclk);
+ orion_wdt_init();
}
@@ -382,7 +534,6 @@ static struct platform_device kirkwood_pcm_device = {
void __init kirkwood_audio_init(void)
{
- kirkwood_clk_ctrl |= CGC_AUDIO;
platform_device_register(&kirkwood_i2s_device);
platform_device_register(&kirkwood_pcm_device);
}
@@ -466,6 +617,9 @@ void __init kirkwood_init(void)
kirkwood_l2_init();
#endif
+ /* Setup root of clk tree */
+ kirkwood_clk_init();
+
/* internal devices that every board has */
kirkwood_rtc_init();
kirkwood_wdt_init();
@@ -478,72 +632,6 @@ void __init kirkwood_init(void)
#endif
}
-static int __init kirkwood_clock_gate(void)
-{
- unsigned int curr = readl(CLOCK_GATING_CTRL);
- u32 dev, rev;
-
-#ifdef CONFIG_OF
- struct device_node *np;
-#endif
- kirkwood_pcie_id(&dev, &rev);
- printk(KERN_DEBUG "Gating clock of unused units\n");
- printk(KERN_DEBUG "before: 0x%08x\n", curr);
-
- /* Make sure those units are accessible */
- writel(curr | CGC_SATA0 | CGC_SATA1 | CGC_PEX0 | CGC_PEX1, CLOCK_GATING_CTRL);
-
-#ifdef CONFIG_OF
- np = of_find_compatible_node(NULL, NULL, "mrvl,orion-nand");
- if (np && of_device_is_available(np)) {
- kirkwood_clk_ctrl |= CGC_RUNIT;
- of_node_put(np);
- }
-#endif
-
- /* For SATA: first shutdown the phy */
- if (!(kirkwood_clk_ctrl & CGC_SATA0)) {
- /* Disable PLL and IVREF */
- writel(readl(SATA0_PHY_MODE_2) & ~0xf, SATA0_PHY_MODE_2);
- /* Disable PHY */
- writel(readl(SATA0_IF_CTRL) | 0x200, SATA0_IF_CTRL);
- }
- if (!(kirkwood_clk_ctrl & CGC_SATA1)) {
- /* Disable PLL and IVREF */
- writel(readl(SATA1_PHY_MODE_2) & ~0xf, SATA1_PHY_MODE_2);
- /* Disable PHY */
- writel(readl(SATA1_IF_CTRL) | 0x200, SATA1_IF_CTRL);
- }
-
- /* For PCIe: first shutdown the phy */
- if (!(kirkwood_clk_ctrl & CGC_PEX0)) {
- writel(readl(PCIE_LINK_CTRL) | 0x10, PCIE_LINK_CTRL);
- while (1)
- if (readl(PCIE_STATUS) & 0x1)
- break;
- writel(readl(PCIE_LINK_CTRL) & ~0x10, PCIE_LINK_CTRL);
- }
-
- /* For PCIe 1: first shutdown the phy */
- if (dev == MV88F6282_DEV_ID) {
- if (!(kirkwood_clk_ctrl & CGC_PEX1)) {
- writel(readl(PCIE1_LINK_CTRL) | 0x10, PCIE1_LINK_CTRL);
- while (1)
- if (readl(PCIE1_STATUS) & 0x1)
- break;
- writel(readl(PCIE1_LINK_CTRL) & ~0x10, PCIE1_LINK_CTRL);
- }
- } else /* keep this bit set for devices that don't have PCIe1 */
- kirkwood_clk_ctrl |= CGC_PEX1;
-
- /* Now gate clock the required units */
- writel(kirkwood_clk_ctrl, CLOCK_GATING_CTRL);
- printk(KERN_DEBUG " after: 0x%08x\n", readl(CLOCK_GATING_CTRL));
-
- return 0;
-}
-late_initcall(kirkwood_clock_gate);
-
void kirkwood_restart(char mode, const char *cmd)
{
/*
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h
index a34c41a5172..9248fa2c165 100644
--- a/arch/arm/mach-kirkwood/common.h
+++ b/arch/arm/mach-kirkwood/common.h
@@ -50,6 +50,7 @@ void kirkwood_nand_init(struct mtd_partition *parts, int nr_parts, int delay);
void kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts, int (*dev_ready)(struct mtd_info *));
void kirkwood_audio_init(void);
void kirkwood_restart(char, const char *);
+void kirkwood_clk_init(void);
/* board init functions for boards not fully converted to fdt */
#ifdef CONFIG_MACH_DREAMPLUG_DT
diff --git a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
index 957bd7997d7..3eee37a3b50 100644
--- a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
+++ b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
@@ -43,6 +43,22 @@
#define L2_WRITETHROUGH 0x00000010
#define CLOCK_GATING_CTRL (BRIDGE_VIRT_BASE | 0x11c)
+#define CGC_BIT_GE0 (0)
+#define CGC_BIT_PEX0 (2)
+#define CGC_BIT_USB0 (3)
+#define CGC_BIT_SDIO (4)
+#define CGC_BIT_TSU (5)
+#define CGC_BIT_DUNIT (6)
+#define CGC_BIT_RUNIT (7)
+#define CGC_BIT_XOR0 (8)
+#define CGC_BIT_AUDIO (9)
+#define CGC_BIT_SATA0 (14)
+#define CGC_BIT_SATA1 (15)
+#define CGC_BIT_XOR1 (16)
+#define CGC_BIT_CRYPTO (17)
+#define CGC_BIT_PEX1 (18)
+#define CGC_BIT_GE1 (19)
+#define CGC_BIT_TDM (20)
#define CGC_GE0 (1 << 0)
#define CGC_PEX0 (1 << 2)
#define CGC_USB0 (1 << 3)
diff --git a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
index 85f6169c248..6d8364a9781 100644
--- a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
+++ b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
@@ -23,7 +23,6 @@
#include <linux/gpio_keys.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <net/dsa.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index de373176ee6..6e8b2efa3c3 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <video/vga.h>
#include <asm/irq.h>
#include <asm/mach/pci.h>
@@ -19,6 +20,23 @@
#include <plat/addr-map.h>
#include "common.h"
+static void kirkwood_enable_pcie_clk(const char *port)
+{
+ struct clk *clk;
+
+ clk = clk_get_sys("pcie", port);
+ if (IS_ERR(clk)) {
+ printk(KERN_ERR "PCIE clock %s missing\n", port);
+ return;
+ }
+ clk_prepare_enable(clk);
+ clk_put(clk);
+}
+
+/* This function is called very early in the boot when probing the
+ hardware to determine what we actually are, and what rate tclk is
+ ticking at. Hence calling kirkwood_enable_pcie_clk() is not
+ possible since the clk tree has not been created yet. */
void kirkwood_enable_pcie(void)
{
u32 curr = readl(CLOCK_GATING_CTRL);
@@ -26,7 +44,7 @@ void kirkwood_enable_pcie(void)
writel(curr | CGC_PEX0, CLOCK_GATING_CTRL);
}
-void __init kirkwood_pcie_id(u32 *dev, u32 *rev)
+void kirkwood_pcie_id(u32 *dev, u32 *rev)
{
kirkwood_enable_pcie();
*dev = orion_pcie_dev_id((void __iomem *)PCIE_VIRT_BASE);
@@ -159,7 +177,6 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
{
- extern unsigned int kirkwood_clk_ctrl;
struct pcie_port *pp;
int index;
@@ -178,11 +195,11 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
switch (index) {
case 0:
- kirkwood_clk_ctrl |= CGC_PEX0;
+ kirkwood_enable_pcie_clk("0");
pcie0_ioresources_init(pp);
break;
case 1:
- kirkwood_clk_ctrl |= CGC_PEX1;
+ kirkwood_enable_pcie_clk("1");
pcie1_ioresources_init(pp);
break;
default:
diff --git a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
index fd2c9c8b683..f742a66a704 100644
--- a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
@@ -16,7 +16,6 @@
#include <linux/gpio.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/kirkwood.h>
diff --git a/arch/arm/mach-kirkwood/t5325-setup.c b/arch/arm/mach-kirkwood/t5325-setup.c
index f9d2a11b7f9..bad738e4404 100644
--- a/arch/arm/mach-kirkwood/t5325-setup.c
+++ b/arch/arm/mach-kirkwood/t5325-setup.c
@@ -16,7 +16,6 @@
#include <linux/mtd/physmap.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <linux/i2c.h>
#include <linux/mv643xx_eth.h>
#include <linux/ata_platform.h>
diff --git a/arch/arm/mach-kirkwood/tsx1x-common.c b/arch/arm/mach-kirkwood/tsx1x-common.c
index 24294b2bc46..8943ede29b4 100644
--- a/arch/arm/mach-kirkwood/tsx1x-common.c
+++ b/arch/arm/mach-kirkwood/tsx1x-common.c
@@ -4,7 +4,6 @@
#include <linux/mtd/physmap.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <linux/serial_reg.h>
#include <mach/kirkwood.h>
#include "common.h"
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index 26aac363a06..4fa3e99d9a6 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -94,6 +94,11 @@ static void __init halibut_map_io(void)
msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a);
}
+static void __init halibut_init_late(void)
+{
+ smd_debugfs_init();
+}
+
MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
.atag_offset = 0x100,
.fixup = halibut_fixup,
@@ -101,5 +106,6 @@ MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
.init_early = halibut_init_early,
.init_irq = halibut_init_irq,
.init_machine = halibut_init,
+ .init_late = halibut_init_late,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-mahimahi.c b/arch/arm/mach-msm/board-mahimahi.c
index 5a4882fc6f7..cf1f89a5dc6 100644
--- a/arch/arm/mach-msm/board-mahimahi.c
+++ b/arch/arm/mach-msm/board-mahimahi.c
@@ -71,6 +71,11 @@ static void __init mahimahi_map_io(void)
msm_clock_init();
}
+static void __init mahimahi_init_late(void)
+{
+ smd_debugfs_init();
+}
+
extern struct sys_timer msm_timer;
MACHINE_START(MAHIMAHI, "mahimahi")
@@ -79,5 +84,6 @@ MACHINE_START(MAHIMAHI, "mahimahi")
.map_io = mahimahi_map_io,
.init_irq = msm_init_irq,
.init_machine = mahimahi_init,
+ .init_late = mahimahi_init_late,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7x27.c b/arch/arm/mach-msm/board-msm7x27.c
index 6d84ee740df..451ab1d43c9 100644
--- a/arch/arm/mach-msm/board-msm7x27.c
+++ b/arch/arm/mach-msm/board-msm7x27.c
@@ -128,11 +128,17 @@ static void __init msm7x2x_map_io(void)
#endif
}
+static void __init msm7x2x_init_late(void)
+{
+ smd_debugfs_init();
+}
+
MACHINE_START(MSM7X27_SURF, "QCT MSM7x27 SURF")
.atag_offset = 0x100,
.map_io = msm7x2x_map_io,
.init_irq = msm7x2x_init_irq,
.init_machine = msm7x2x_init,
+ .init_late = msm7x2x_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -141,6 +147,7 @@ MACHINE_START(MSM7X27_FFA, "QCT MSM7x27 FFA")
.map_io = msm7x2x_map_io,
.init_irq = msm7x2x_init_irq,
.init_machine = msm7x2x_init,
+ .init_late = msm7x2x_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -149,6 +156,7 @@ MACHINE_START(MSM7X25_SURF, "QCT MSM7x25 SURF")
.map_io = msm7x2x_map_io,
.init_irq = msm7x2x_init_irq,
.init_machine = msm7x2x_init,
+ .init_late = msm7x2x_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -157,5 +165,6 @@ MACHINE_START(MSM7X25_FFA, "QCT MSM7x25 FFA")
.map_io = msm7x2x_map_io,
.init_irq = msm7x2x_init_irq,
.init_machine = msm7x2x_init,
+ .init_late = msm7x2x_init_late,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index 75b3cfcada6..a5001378135 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -119,6 +119,11 @@ static void __init msm7x30_map_io(void)
msm_clock_init(msm_clocks_7x30, msm_num_clocks_7x30);
}
+static void __init msm7x30_init_late(void)
+{
+ smd_debugfs_init();
+}
+
MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
.atag_offset = 0x100,
.fixup = msm7x30_fixup,
@@ -126,6 +131,7 @@ MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
+ .init_late = msm7x30_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -136,6 +142,7 @@ MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA")
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
+ .init_late = msm7x30_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -146,5 +153,6 @@ MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID")
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
+ .init_late = msm7x30_init_late,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c
index ed359812853..65f4a1daa2e 100644
--- a/arch/arm/mach-msm/board-msm8960.c
+++ b/arch/arm/mach-msm/board-msm8960.c
@@ -93,6 +93,11 @@ static void __init msm8960_rumi3_init(void)
platform_add_devices(rumi3_devices, ARRAY_SIZE(rumi3_devices));
}
+static void __init msm8960_init_late(void)
+{
+ smd_debugfs_init();
+}
+
MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
.fixup = msm8960_fixup,
.reserve = msm8960_reserve,
@@ -101,6 +106,7 @@ MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
.timer = &msm_timer,
.handle_irq = gic_handle_irq,
.init_machine = msm8960_sim_init,
+ .init_late = msm8960_init_late,
MACHINE_END
MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
@@ -111,5 +117,6 @@ MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
.timer = &msm_timer,
.handle_irq = gic_handle_irq,
.init_machine = msm8960_rumi3_init,
+ .init_late = msm8960_init_late,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index fb3496a52ef..e37a724cd1e 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -81,6 +81,11 @@ static void __init msm8x60_init(void)
{
}
+static void __init msm8x60_init_late(void)
+{
+ smd_debugfs_init();
+}
+
#ifdef CONFIG_OF
static struct of_dev_auxdata msm_auxdata_lookup[] __initdata = {
{}
@@ -111,6 +116,7 @@ MACHINE_START(MSM8X60_RUMI3, "QCT MSM8X60 RUMI3")
.init_irq = msm8x60_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = msm8x60_init,
+ .init_late = msm8x60_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -121,6 +127,7 @@ MACHINE_START(MSM8X60_SURF, "QCT MSM8X60 SURF")
.init_irq = msm8x60_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = msm8x60_init,
+ .init_late = msm8x60_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -131,6 +138,7 @@ MACHINE_START(MSM8X60_SIM, "QCT MSM8X60 SIMULATOR")
.init_irq = msm8x60_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = msm8x60_init,
+ .init_late = msm8x60_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -141,6 +149,7 @@ MACHINE_START(MSM8X60_FFA, "QCT MSM8X60 FFA")
.init_irq = msm8x60_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = msm8x60_init,
+ .init_late = msm8x60_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -150,6 +159,7 @@ DT_MACHINE_START(MSM_DT, "Qualcomm MSM (Flattened Device Tree)")
.map_io = msm8x60_map_io,
.init_irq = msm8x60_init_irq,
.init_machine = msm8x60_dt_init,
+ .init_late = msm8x60_init_late,
.timer = &msm_timer,
.dt_compat = msm8x60_fluid_match,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index fbaa4ed95a3..c8fe0edb976 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -190,11 +190,17 @@ static void __init qsd8x50_init(void)
qsd8x50_init_mmc();
}
+static void __init qsd8x50_init_late(void)
+{
+ smd_debugfs_init();
+}
+
MACHINE_START(QSD8X50_SURF, "QCT QSD8X50 SURF")
.atag_offset = 0x100,
.map_io = qsd8x50_map_io,
.init_irq = qsd8x50_init_irq,
.init_machine = qsd8x50_init,
+ .init_late = qsd8x50_init_late,
.timer = &msm_timer,
MACHINE_END
@@ -203,5 +209,6 @@ MACHINE_START(QSD8X50A_ST1_5, "QCT QSD8X50A ST1.5")
.map_io = qsd8x50_map_io,
.init_irq = qsd8x50_init_irq,
.init_machine = qsd8x50_init,
+ .init_late = qsd8x50_init_late,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c
index 4a8ea0d40b6..2e569ab10ee 100644
--- a/arch/arm/mach-msm/board-sapphire.c
+++ b/arch/arm/mach-msm/board-sapphire.c
@@ -101,6 +101,11 @@ static void __init sapphire_map_io(void)
msm_clock_init();
}
+static void __init sapphire_init_late(void)
+{
+ smd_debugfs_init();
+}
+
MACHINE_START(SAPPHIRE, "sapphire")
/* Maintainer: Brian Swetland <swetland@google.com> */
.atag_offset = 0x100,
@@ -108,5 +113,6 @@ MACHINE_START(SAPPHIRE, "sapphire")
.map_io = sapphire_map_io,
.init_irq = sapphire_init_irq,
.init_machine = sapphire_init,
+ .init_late = sapphire_init_late,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c
index d4060a37e23..bbe13f12fa0 100644
--- a/arch/arm/mach-msm/board-trout.c
+++ b/arch/arm/mach-msm/board-trout.c
@@ -98,6 +98,11 @@ static void __init trout_map_io(void)
msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a);
}
+static void __init trout_init_late(void)
+{
+ smd_debugfs_init();
+}
+
MACHINE_START(TROUT, "HTC Dream")
.atag_offset = 0x100,
.fixup = trout_fixup,
@@ -105,5 +110,6 @@ MACHINE_START(TROUT, "HTC Dream")
.init_early = trout_init_early,
.init_irq = trout_init_irq,
.init_machine = trout_init,
+ .init_late = trout_init_late,
.timer = &msm_timer,
MACHINE_END
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 2ce8f1f2fc4..435f8edfafd 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -47,4 +47,10 @@ int __init msm_add_sdcc(unsigned int controller,
struct msm_mmc_platform_data *plat,
unsigned int stat_irq, unsigned long stat_irq_flags);
+#if defined(CONFIG_MSM_SMD) && defined(CONFIG_DEBUG_FS)
+int smd_debugfs_init(void);
+#else
+static inline int smd_debugfs_init(void) { return 0; }
+#endif
+
#endif
diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c
index c56df9e932a..8056b3e5590 100644
--- a/arch/arm/mach-msm/smd_debug.c
+++ b/arch/arm/mach-msm/smd_debug.c
@@ -216,7 +216,7 @@ static void debug_create(const char *name, umode_t mode,
debugfs_create_file(name, mode, dent, fill, &debug_ops);
}
-static int smd_debugfs_init(void)
+int __init smd_debugfs_init(void)
{
struct dentry *dent;
@@ -234,7 +234,6 @@ static int smd_debugfs_init(void)
return 0;
}
-late_initcall(smd_debugfs_init);
#endif
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index a5dcf766a3f..b4c53b846c9 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/ata_platform.h>
+#include <linux/clk-provider.h>
#include <linux/ethtool.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
@@ -103,24 +104,24 @@ static void get_pclk_l2clk(int hclk, int core_index, int *pclk, int *l2clk)
static int get_tclk(void)
{
- int tclk;
+ int tclk_freq;
/*
* TCLK tick rate is configured by DEV_A[2:0] strap pins.
*/
switch ((readl(SAMPLE_AT_RESET_HIGH) >> 6) & 7) {
case 1:
- tclk = 166666667;
+ tclk_freq = 166666667;
break;
case 3:
- tclk = 200000000;
+ tclk_freq = 200000000;
break;
default:
panic("unknown TCLK PLL setting: %.8x\n",
readl(SAMPLE_AT_RESET_HIGH));
}
- return tclk;
+ return tclk_freq;
}
@@ -166,6 +167,19 @@ void __init mv78xx0_map_io(void)
/*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+static struct clk *tclk;
+
+static void __init clk_init(void)
+{
+ tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
+ get_tclk());
+
+ orion_clkdev_init(tclk);
+}
+
+/*****************************************************************************
* EHCI
****************************************************************************/
void __init mv78xx0_ehci0_init(void)
@@ -199,7 +213,7 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
- IRQ_MV78XX0_GE_ERR, get_tclk());
+ IRQ_MV78XX0_GE_ERR);
}
@@ -210,7 +224,7 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge01_init(eth_data,
GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
- NO_IRQ, get_tclk());
+ NO_IRQ);
}
@@ -234,7 +248,7 @@ void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data)
orion_ge10_init(eth_data,
GE10_PHYS_BASE, IRQ_MV78XX0_GE10_SUM,
- NO_IRQ, get_tclk());
+ NO_IRQ);
}
@@ -258,7 +272,7 @@ void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data)
orion_ge11_init(eth_data,
GE11_PHYS_BASE, IRQ_MV78XX0_GE11_SUM,
- NO_IRQ, get_tclk());
+ NO_IRQ);
}
/*****************************************************************************
@@ -285,7 +299,7 @@ void __init mv78xx0_sata_init(struct mv_sata_platform_data *sata_data)
void __init mv78xx0_uart0_init(void)
{
orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
- IRQ_MV78XX0_UART_0, get_tclk());
+ IRQ_MV78XX0_UART_0, tclk);
}
@@ -295,7 +309,7 @@ void __init mv78xx0_uart0_init(void)
void __init mv78xx0_uart1_init(void)
{
orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
- IRQ_MV78XX0_UART_1, get_tclk());
+ IRQ_MV78XX0_UART_1, tclk);
}
@@ -305,7 +319,7 @@ void __init mv78xx0_uart1_init(void)
void __init mv78xx0_uart2_init(void)
{
orion_uart2_init(UART2_VIRT_BASE, UART2_PHYS_BASE,
- IRQ_MV78XX0_UART_2, get_tclk());
+ IRQ_MV78XX0_UART_2, tclk);
}
/*****************************************************************************
@@ -314,7 +328,7 @@ void __init mv78xx0_uart2_init(void)
void __init mv78xx0_uart3_init(void)
{
orion_uart3_init(UART3_VIRT_BASE, UART3_PHYS_BASE,
- IRQ_MV78XX0_UART_3, get_tclk());
+ IRQ_MV78XX0_UART_3, tclk);
}
/*****************************************************************************
@@ -378,25 +392,26 @@ void __init mv78xx0_init(void)
int hclk;
int pclk;
int l2clk;
- int tclk;
core_index = mv78xx0_core_index();
hclk = get_hclk();
get_pclk_l2clk(hclk, core_index, &pclk, &l2clk);
- tclk = get_tclk();
printk(KERN_INFO "%s ", mv78xx0_id());
printk("core #%d, ", core_index);
printk("PCLK = %dMHz, ", (pclk + 499999) / 1000000);
printk("L2 = %dMHz, ", (l2clk + 499999) / 1000000);
printk("HCLK = %dMHz, ", (hclk + 499999) / 1000000);
- printk("TCLK = %dMHz\n", (tclk + 499999) / 1000000);
+ printk("TCLK = %dMHz\n", (get_tclk() + 499999) / 1000000);
mv78xx0_setup_cpu_mbus();
#ifdef CONFIG_CACHE_FEROCEON_L2
feroceon_l2_init(is_l2_writethrough());
#endif
+
+ /* Setup root of clk tree */
+ clk_init();
}
void mv78xx0_restart(char mode, const char *cmd)
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index 07d5383d68e..91cf0625819 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -7,18 +7,28 @@ config MXS_OCOTP
config SOC_IMX23
bool
+ select ARM_AMBA
select CPU_ARM926T
select HAVE_PWM
select PINCTRL_IMX23
config SOC_IMX28
bool
+ select ARM_AMBA
select CPU_ARM926T
select HAVE_PWM
select PINCTRL_IMX28
comment "MXS platforms:"
+config MACH_MXS_DT
+ bool "Support MXS platforms from device tree"
+ select SOC_IMX23
+ select SOC_IMX28
+ help
+ Include support for Freescale MXS platforms(i.MX23 and i.MX28)
+ using the device tree for discovery
+
config MACH_STMP378X_DEVB
bool "Support STMP378x_devb Platform"
select SOC_IMX23
diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile
index 908bf9a567f..e41590ccb43 100644
--- a/arch/arm/mach-mxs/Makefile
+++ b/arch/arm/mach-mxs/Makefile
@@ -1,12 +1,10 @@
# Common support
-obj-y := clock.o devices.o icoll.o iomux.o system.o timer.o mm.o
+obj-y := devices.o icoll.o iomux.o system.o timer.o mm.o
obj-$(CONFIG_MXS_OCOTP) += ocotp.o
obj-$(CONFIG_PM) += pm.o
-obj-$(CONFIG_SOC_IMX23) += clock-mx23.o
-obj-$(CONFIG_SOC_IMX28) += clock-mx28.o
-
+obj-$(CONFIG_MACH_MXS_DT) += mach-mxs.o
obj-$(CONFIG_MACH_STMP378X_DEVB) += mach-stmp378x_devb.o
obj-$(CONFIG_MACH_MX23EVK) += mach-mx23evk.o
obj-$(CONFIG_MACH_MX28EVK) += mach-mx28evk.o
diff --git a/arch/arm/mach-mxs/clock-mx23.c b/arch/arm/mach-mxs/clock-mx23.c
deleted file mode 100644
index e3ac52c3401..00000000000
--- a/arch/arm/mach-mxs/clock-mx23.c
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
- * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/clkdev.h>
-
-#include <asm/clkdev.h>
-#include <asm/div64.h>
-
-#include <mach/mx23.h>
-#include <mach/common.h>
-#include <mach/clock.h>
-
-#include "regs-clkctrl-mx23.h"
-
-#define CLKCTRL_BASE_ADDR MX23_IO_ADDRESS(MX23_CLKCTRL_BASE_ADDR)
-#define DIGCTRL_BASE_ADDR MX23_IO_ADDRESS(MX23_DIGCTL_BASE_ADDR)
-
-#define PARENT_RATE_SHIFT 8
-
-static int _raw_clk_enable(struct clk *clk)
-{
- u32 reg;
-
- if (clk->enable_reg) {
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(1 << clk->enable_shift);
- __raw_writel(reg, clk->enable_reg);
- }
-
- return 0;
-}
-
-static void _raw_clk_disable(struct clk *clk)
-{
- u32 reg;
-
- if (clk->enable_reg) {
- reg = __raw_readl(clk->enable_reg);
- reg |= 1 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
- }
-}
-
-/*
- * ref_xtal_clk
- */
-static unsigned long ref_xtal_clk_get_rate(struct clk *clk)
-{
- return 24000000;
-}
-
-static struct clk ref_xtal_clk = {
- .get_rate = ref_xtal_clk_get_rate,
-};
-
-/*
- * pll_clk
- */
-static unsigned long pll_clk_get_rate(struct clk *clk)
-{
- return 480000000;
-}
-
-static int pll_clk_enable(struct clk *clk)
-{
- __raw_writel(BM_CLKCTRL_PLLCTRL0_POWER |
- BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_PLLCTRL0_SET);
-
- /* Only a 10us delay is need. PLLCTRL1 LOCK bitfied is only a timer
- * and is incorrect (excessive). Per definition of the PLLCTRL0
- * POWER field, waiting at least 10us.
- */
- udelay(10);
-
- return 0;
-}
-
-static void pll_clk_disable(struct clk *clk)
-{
- __raw_writel(BM_CLKCTRL_PLLCTRL0_POWER |
- BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_PLLCTRL0_CLR);
-}
-
-static struct clk pll_clk = {
- .get_rate = pll_clk_get_rate,
- .enable = pll_clk_enable,
- .disable = pll_clk_disable,
- .parent = &ref_xtal_clk,
-};
-
-/*
- * ref_clk
- */
-#define _CLK_GET_RATE_REF(name, sr, ss) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- unsigned long parent_rate; \
- u32 reg, div; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##sr); \
- div = (reg >> BP_CLKCTRL_##sr##_##ss##FRAC) & 0x3f; \
- parent_rate = clk_get_rate(clk->parent); \
- \
- return SH_DIV((parent_rate >> PARENT_RATE_SHIFT) * 18, \
- div, PARENT_RATE_SHIFT); \
-}
-
-_CLK_GET_RATE_REF(ref_cpu_clk, FRAC, CPU)
-_CLK_GET_RATE_REF(ref_emi_clk, FRAC, EMI)
-_CLK_GET_RATE_REF(ref_pix_clk, FRAC, PIX)
-_CLK_GET_RATE_REF(ref_io_clk, FRAC, IO)
-
-#define _DEFINE_CLOCK_REF(name, er, es) \
- static struct clk name = { \
- .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er, \
- .enable_shift = BP_CLKCTRL_##er##_CLKGATE##es, \
- .get_rate = name##_get_rate, \
- .enable = _raw_clk_enable, \
- .disable = _raw_clk_disable, \
- .parent = &pll_clk, \
- }
-
-_DEFINE_CLOCK_REF(ref_cpu_clk, FRAC, CPU);
-_DEFINE_CLOCK_REF(ref_emi_clk, FRAC, EMI);
-_DEFINE_CLOCK_REF(ref_pix_clk, FRAC, PIX);
-_DEFINE_CLOCK_REF(ref_io_clk, FRAC, IO);
-
-/*
- * General clocks
- *
- * clk_get_rate
- */
-static unsigned long rtc_clk_get_rate(struct clk *clk)
-{
- /* ref_xtal_clk is implemented as the only parent */
- return clk_get_rate(clk->parent) / 768;
-}
-
-static unsigned long clk32k_clk_get_rate(struct clk *clk)
-{
- return clk->parent->get_rate(clk->parent) / 750;
-}
-
-#define _CLK_GET_RATE(name, rs) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- u32 reg, div; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
- \
- if (clk->parent == &ref_xtal_clk) \
- div = (reg & BM_CLKCTRL_##rs##_DIV_XTAL) >> \
- BP_CLKCTRL_##rs##_DIV_XTAL; \
- else \
- div = (reg & BM_CLKCTRL_##rs##_DIV_##rs) >> \
- BP_CLKCTRL_##rs##_DIV_##rs; \
- \
- if (!div) \
- return -EINVAL; \
- \
- return clk_get_rate(clk->parent) / div; \
-}
-
-_CLK_GET_RATE(cpu_clk, CPU)
-_CLK_GET_RATE(emi_clk, EMI)
-
-#define _CLK_GET_RATE1(name, rs) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- u32 reg, div; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
- div = (reg & BM_CLKCTRL_##rs##_DIV) >> BP_CLKCTRL_##rs##_DIV; \
- \
- if (!div) \
- return -EINVAL; \
- \
- return clk_get_rate(clk->parent) / div; \
-}
-
-_CLK_GET_RATE1(hbus_clk, HBUS)
-_CLK_GET_RATE1(xbus_clk, XBUS)
-_CLK_GET_RATE1(ssp_clk, SSP)
-_CLK_GET_RATE1(gpmi_clk, GPMI)
-_CLK_GET_RATE1(lcdif_clk, PIX)
-
-#define _CLK_GET_RATE_STUB(name) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- return clk_get_rate(clk->parent); \
-}
-
-_CLK_GET_RATE_STUB(uart_clk)
-_CLK_GET_RATE_STUB(audio_clk)
-_CLK_GET_RATE_STUB(pwm_clk)
-
-/*
- * clk_set_rate
- */
-static int cpu_clk_set_rate(struct clk *clk, unsigned long rate)
-{
- u32 reg, bm_busy, div_max, d, f, div, frac;
- unsigned long diff, parent_rate, calc_rate;
-
- parent_rate = clk_get_rate(clk->parent);
-
- if (clk->parent == &ref_xtal_clk) {
- div_max = BM_CLKCTRL_CPU_DIV_XTAL >> BP_CLKCTRL_CPU_DIV_XTAL;
- bm_busy = BM_CLKCTRL_CPU_BUSY_REF_XTAL;
- div = DIV_ROUND_UP(parent_rate, rate);
- if (div == 0 || div > div_max)
- return -EINVAL;
- } else {
- div_max = BM_CLKCTRL_CPU_DIV_CPU >> BP_CLKCTRL_CPU_DIV_CPU;
- bm_busy = BM_CLKCTRL_CPU_BUSY_REF_CPU;
- rate >>= PARENT_RATE_SHIFT;
- parent_rate >>= PARENT_RATE_SHIFT;
- diff = parent_rate;
- div = frac = 1;
- for (d = 1; d <= div_max; d++) {
- f = parent_rate * 18 / d / rate;
- if ((parent_rate * 18 / d) % rate)
- f++;
- if (f < 18 || f > 35)
- continue;
-
- calc_rate = parent_rate * 18 / f / d;
- if (calc_rate > rate)
- continue;
-
- if (rate - calc_rate < diff) {
- frac = f;
- div = d;
- diff = rate - calc_rate;
- }
-
- if (diff == 0)
- break;
- }
-
- if (diff == parent_rate)
- return -EINVAL;
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
- reg &= ~BM_CLKCTRL_FRAC_CPUFRAC;
- reg |= frac;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
- }
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU);
- reg &= ~BM_CLKCTRL_CPU_DIV_CPU;
- reg |= div << BP_CLKCTRL_CPU_DIV_CPU;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU);
-
- mxs_clkctrl_timeout(HW_CLKCTRL_CPU, bm_busy);
-
- return 0;
-}
-
-#define _CLK_SET_RATE(name, dr) \
-static int name##_set_rate(struct clk *clk, unsigned long rate) \
-{ \
- u32 reg, div_max, div; \
- unsigned long parent_rate; \
- \
- parent_rate = clk_get_rate(clk->parent); \
- div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV; \
- \
- div = DIV_ROUND_UP(parent_rate, rate); \
- if (div == 0 || div > div_max) \
- return -EINVAL; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
- reg &= ~BM_CLKCTRL_##dr##_DIV; \
- reg |= div << BP_CLKCTRL_##dr##_DIV; \
- if (reg & (1 << clk->enable_shift)) { \
- pr_err("%s: clock is gated\n", __func__); \
- return -EINVAL; \
- } \
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
- \
- mxs_clkctrl_timeout(HW_CLKCTRL_##dr, BM_CLKCTRL_##dr##_BUSY); \
- return 0; \
-}
-
-_CLK_SET_RATE(xbus_clk, XBUS)
-_CLK_SET_RATE(ssp_clk, SSP)
-_CLK_SET_RATE(gpmi_clk, GPMI)
-_CLK_SET_RATE(lcdif_clk, PIX)
-
-#define _CLK_SET_RATE_STUB(name) \
-static int name##_set_rate(struct clk *clk, unsigned long rate) \
-{ \
- return -EINVAL; \
-}
-
-_CLK_SET_RATE_STUB(emi_clk)
-_CLK_SET_RATE_STUB(uart_clk)
-_CLK_SET_RATE_STUB(audio_clk)
-_CLK_SET_RATE_STUB(pwm_clk)
-_CLK_SET_RATE_STUB(clk32k_clk)
-
-/*
- * clk_set_parent
- */
-#define _CLK_SET_PARENT(name, bit) \
-static int name##_set_parent(struct clk *clk, struct clk *parent) \
-{ \
- if (parent != clk->parent) { \
- __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \
- clk->parent = parent; \
- } \
- \
- return 0; \
-}
-
-_CLK_SET_PARENT(cpu_clk, CPU)
-_CLK_SET_PARENT(emi_clk, EMI)
-_CLK_SET_PARENT(ssp_clk, SSP)
-_CLK_SET_PARENT(gpmi_clk, GPMI)
-_CLK_SET_PARENT(lcdif_clk, PIX)
-
-#define _CLK_SET_PARENT_STUB(name) \
-static int name##_set_parent(struct clk *clk, struct clk *parent) \
-{ \
- if (parent != clk->parent) \
- return -EINVAL; \
- else \
- return 0; \
-}
-
-_CLK_SET_PARENT_STUB(uart_clk)
-_CLK_SET_PARENT_STUB(audio_clk)
-_CLK_SET_PARENT_STUB(pwm_clk)
-_CLK_SET_PARENT_STUB(clk32k_clk)
-
-/*
- * clk definition
- */
-static struct clk cpu_clk = {
- .get_rate = cpu_clk_get_rate,
- .set_rate = cpu_clk_set_rate,
- .set_parent = cpu_clk_set_parent,
- .parent = &ref_cpu_clk,
-};
-
-static struct clk hbus_clk = {
- .get_rate = hbus_clk_get_rate,
- .parent = &cpu_clk,
-};
-
-static struct clk xbus_clk = {
- .get_rate = xbus_clk_get_rate,
- .set_rate = xbus_clk_set_rate,
- .parent = &ref_xtal_clk,
-};
-
-static struct clk rtc_clk = {
- .get_rate = rtc_clk_get_rate,
- .parent = &ref_xtal_clk,
-};
-
-/* usb_clk gate is controlled in DIGCTRL other than CLKCTRL */
-static struct clk usb_clk = {
- .enable_reg = DIGCTRL_BASE_ADDR,
- .enable_shift = 2,
- .enable = _raw_clk_enable,
- .disable = _raw_clk_disable,
- .parent = &pll_clk,
-};
-
-#define _DEFINE_CLOCK(name, er, es, p) \
- static struct clk name = { \
- .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er, \
- .enable_shift = BP_CLKCTRL_##er##_##es, \
- .get_rate = name##_get_rate, \
- .set_rate = name##_set_rate, \
- .set_parent = name##_set_parent, \
- .enable = _raw_clk_enable, \
- .disable = _raw_clk_disable, \
- .parent = p, \
- }
-
-_DEFINE_CLOCK(emi_clk, EMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp_clk, SSP, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(gpmi_clk, GPMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(lcdif_clk, PIX, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(uart_clk, XTAL, UART_CLK_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(audio_clk, XTAL, FILT_CLK24M_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(pwm_clk, XTAL, PWM_CLK24M_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(clk32k_clk, XTAL, TIMROT_CLK32K_GATE, &ref_xtal_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup lookups[] = {
- /* for amba bus driver */
- _REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
- /* for amba-pl011 driver */
- _REGISTER_CLOCK("duart", NULL, uart_clk)
- _REGISTER_CLOCK("mxs-auart.0", NULL, uart_clk)
- _REGISTER_CLOCK("rtc", NULL, rtc_clk)
- _REGISTER_CLOCK("mxs-dma-apbh", NULL, hbus_clk)
- _REGISTER_CLOCK("mxs-dma-apbx", NULL, xbus_clk)
- _REGISTER_CLOCK("mxs-mmc.0", NULL, ssp_clk)
- _REGISTER_CLOCK("mxs-mmc.1", NULL, ssp_clk)
- _REGISTER_CLOCK(NULL, "usb", usb_clk)
- _REGISTER_CLOCK(NULL, "audio", audio_clk)
- _REGISTER_CLOCK("mxs-pwm.0", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.1", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.2", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.3", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.4", NULL, pwm_clk)
- _REGISTER_CLOCK("imx23-fb", NULL, lcdif_clk)
- _REGISTER_CLOCK("imx23-gpmi-nand", NULL, gpmi_clk)
-};
-
-static int clk_misc_init(void)
-{
- u32 reg;
- int ret;
-
- /* Fix up parent per register setting */
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ);
- cpu_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_CPU) ?
- &ref_xtal_clk : &ref_cpu_clk;
- emi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_EMI) ?
- &ref_xtal_clk : &ref_emi_clk;
- ssp_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP) ?
- &ref_xtal_clk : &ref_io_clk;
- gpmi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_GPMI) ?
- &ref_xtal_clk : &ref_io_clk;
- lcdif_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_PIX) ?
- &ref_xtal_clk : &ref_pix_clk;
-
- /* Use int div over frac when both are available */
- __raw_writel(BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
- __raw_writel(BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
- __raw_writel(BM_CLKCTRL_HBUS_DIV_FRAC_EN,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS_CLR);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
- reg &= ~BM_CLKCTRL_XBUS_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP);
- reg &= ~BM_CLKCTRL_SSP_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
- reg &= ~BM_CLKCTRL_GPMI_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_PIX);
- reg &= ~BM_CLKCTRL_PIX_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_PIX);
-
- /*
- * Set safe hbus clock divider. A divider of 3 ensure that
- * the Vddd voltage required for the cpu clock is sufficiently
- * high for the hbus clock.
- */
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
- reg &= BM_CLKCTRL_HBUS_DIV;
- reg |= 3 << BP_CLKCTRL_HBUS_DIV;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
-
- ret = mxs_clkctrl_timeout(HW_CLKCTRL_HBUS, BM_CLKCTRL_HBUS_BUSY);
-
- /* Gate off cpu clock in WFI for power saving */
- __raw_writel(BM_CLKCTRL_CPU_INTERRUPT_WAIT,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_SET);
-
- /*
- * 480 MHz seems too high to be ssp clock source directly,
- * so set frac to get a 288 MHz ref_io.
- */
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
- reg &= ~BM_CLKCTRL_FRAC_IOFRAC;
- reg |= 30 << BP_CLKCTRL_FRAC_IOFRAC;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
-
- return ret;
-}
-
-int __init mx23_clocks_init(void)
-{
- clk_misc_init();
-
- /*
- * source ssp clock from ref_io than ref_xtal,
- * as ref_xtal only provides 24 MHz as maximum.
- */
- clk_set_parent(&ssp_clk, &ref_io_clk);
-
- clk_prepare_enable(&cpu_clk);
- clk_prepare_enable(&hbus_clk);
- clk_prepare_enable(&xbus_clk);
- clk_prepare_enable(&emi_clk);
- clk_prepare_enable(&uart_clk);
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- mxs_timer_init(&clk32k_clk, MX23_INT_TIMER0);
-
- return 0;
-}
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
deleted file mode 100644
index cea29c99e21..00000000000
--- a/arch/arm/mach-mxs/clock-mx28.c
+++ /dev/null
@@ -1,803 +0,0 @@
-/*
- * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/clkdev.h>
-#include <linux/spinlock.h>
-
-#include <asm/clkdev.h>
-#include <asm/div64.h>
-
-#include <mach/mx28.h>
-#include <mach/common.h>
-#include <mach/clock.h>
-#include <mach/digctl.h>
-
-#include "regs-clkctrl-mx28.h"
-
-#define CLKCTRL_BASE_ADDR MX28_IO_ADDRESS(MX28_CLKCTRL_BASE_ADDR)
-#define DIGCTRL_BASE_ADDR MX28_IO_ADDRESS(MX28_DIGCTL_BASE_ADDR)
-
-#define PARENT_RATE_SHIFT 8
-
-static struct clk pll2_clk;
-static struct clk cpu_clk;
-static struct clk emi_clk;
-static struct clk saif0_clk;
-static struct clk saif1_clk;
-static struct clk clk32k_clk;
-static DEFINE_SPINLOCK(clkmux_lock);
-
-/*
- * HW_SAIF_CLKMUX_SEL:
- * DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1
- * clock pins selected for SAIF1 input clocks.
- * CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and
- * SAIF0 clock inputs selected for SAIF1 input clocks.
- * EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input
- * clocks.
- * EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input
- * clocks.
- */
-int mxs_saif_clkmux_select(unsigned int clkmux)
-{
- if (clkmux > 0x3)
- return -EINVAL;
-
- spin_lock(&clkmux_lock);
- __raw_writel(BM_DIGCTL_CTRL_SAIF_CLKMUX,
- DIGCTRL_BASE_ADDR + HW_DIGCTL_CTRL + MXS_CLR_ADDR);
- __raw_writel(clkmux << BP_DIGCTL_CTRL_SAIF_CLKMUX,
- DIGCTRL_BASE_ADDR + HW_DIGCTL_CTRL + MXS_SET_ADDR);
- spin_unlock(&clkmux_lock);
-
- return 0;
-}
-
-static int _raw_clk_enable(struct clk *clk)
-{
- u32 reg;
-
- if (clk->enable_reg) {
- reg = __raw_readl(clk->enable_reg);
- reg &= ~(1 << clk->enable_shift);
- __raw_writel(reg, clk->enable_reg);
- }
-
- return 0;
-}
-
-static void _raw_clk_disable(struct clk *clk)
-{
- u32 reg;
-
- if (clk->enable_reg) {
- reg = __raw_readl(clk->enable_reg);
- reg |= 1 << clk->enable_shift;
- __raw_writel(reg, clk->enable_reg);
- }
-}
-
-/*
- * ref_xtal_clk
- */
-static unsigned long ref_xtal_clk_get_rate(struct clk *clk)
-{
- return 24000000;
-}
-
-static struct clk ref_xtal_clk = {
- .get_rate = ref_xtal_clk_get_rate,
-};
-
-/*
- * pll_clk
- */
-static unsigned long pll0_clk_get_rate(struct clk *clk)
-{
- return 480000000;
-}
-
-static unsigned long pll1_clk_get_rate(struct clk *clk)
-{
- return 480000000;
-}
-
-static unsigned long pll2_clk_get_rate(struct clk *clk)
-{
- return 50000000;
-}
-
-#define _CLK_ENABLE_PLL(name, r, g) \
-static int name##_enable(struct clk *clk) \
-{ \
- __raw_writel(BM_CLKCTRL_##r##CTRL0_POWER, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET); \
- udelay(10); \
- \
- if (clk == &pll2_clk) \
- __raw_writel(BM_CLKCTRL_##r##CTRL0_##g, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR); \
- else \
- __raw_writel(BM_CLKCTRL_##r##CTRL0_##g, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET); \
- \
- return 0; \
-}
-
-_CLK_ENABLE_PLL(pll0_clk, PLL0, EN_USB_CLKS)
-_CLK_ENABLE_PLL(pll1_clk, PLL1, EN_USB_CLKS)
-_CLK_ENABLE_PLL(pll2_clk, PLL2, CLKGATE)
-
-#define _CLK_DISABLE_PLL(name, r, g) \
-static void name##_disable(struct clk *clk) \
-{ \
- __raw_writel(BM_CLKCTRL_##r##CTRL0_POWER, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR); \
- \
- if (clk == &pll2_clk) \
- __raw_writel(BM_CLKCTRL_##r##CTRL0_##g, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET); \
- else \
- __raw_writel(BM_CLKCTRL_##r##CTRL0_##g, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR); \
- \
-}
-
-_CLK_DISABLE_PLL(pll0_clk, PLL0, EN_USB_CLKS)
-_CLK_DISABLE_PLL(pll1_clk, PLL1, EN_USB_CLKS)
-_CLK_DISABLE_PLL(pll2_clk, PLL2, CLKGATE)
-
-#define _DEFINE_CLOCK_PLL(name) \
- static struct clk name = { \
- .get_rate = name##_get_rate, \
- .enable = name##_enable, \
- .disable = name##_disable, \
- .parent = &ref_xtal_clk, \
- }
-
-_DEFINE_CLOCK_PLL(pll0_clk);
-_DEFINE_CLOCK_PLL(pll1_clk);
-_DEFINE_CLOCK_PLL(pll2_clk);
-
-/*
- * ref_clk
- */
-#define _CLK_GET_RATE_REF(name, sr, ss) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- unsigned long parent_rate; \
- u32 reg, div; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##sr); \
- div = (reg >> BP_CLKCTRL_##sr##_##ss##FRAC) & 0x3f; \
- parent_rate = clk_get_rate(clk->parent); \
- \
- return SH_DIV((parent_rate >> PARENT_RATE_SHIFT) * 18, \
- div, PARENT_RATE_SHIFT); \
-}
-
-_CLK_GET_RATE_REF(ref_cpu_clk, FRAC0, CPU)
-_CLK_GET_RATE_REF(ref_emi_clk, FRAC0, EMI)
-_CLK_GET_RATE_REF(ref_io0_clk, FRAC0, IO0)
-_CLK_GET_RATE_REF(ref_io1_clk, FRAC0, IO1)
-_CLK_GET_RATE_REF(ref_pix_clk, FRAC1, PIX)
-_CLK_GET_RATE_REF(ref_gpmi_clk, FRAC1, GPMI)
-
-#define _DEFINE_CLOCK_REF(name, er, es) \
- static struct clk name = { \
- .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er, \
- .enable_shift = BP_CLKCTRL_##er##_CLKGATE##es, \
- .get_rate = name##_get_rate, \
- .enable = _raw_clk_enable, \
- .disable = _raw_clk_disable, \
- .parent = &pll0_clk, \
- }
-
-_DEFINE_CLOCK_REF(ref_cpu_clk, FRAC0, CPU);
-_DEFINE_CLOCK_REF(ref_emi_clk, FRAC0, EMI);
-_DEFINE_CLOCK_REF(ref_io0_clk, FRAC0, IO0);
-_DEFINE_CLOCK_REF(ref_io1_clk, FRAC0, IO1);
-_DEFINE_CLOCK_REF(ref_pix_clk, FRAC1, PIX);
-_DEFINE_CLOCK_REF(ref_gpmi_clk, FRAC1, GPMI);
-
-/*
- * General clocks
- *
- * clk_get_rate
- */
-static unsigned long lradc_clk_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 16;
-}
-
-static unsigned long rtc_clk_get_rate(struct clk *clk)
-{
- /* ref_xtal_clk is implemented as the only parent */
- return clk_get_rate(clk->parent) / 768;
-}
-
-static unsigned long clk32k_clk_get_rate(struct clk *clk)
-{
- return clk->parent->get_rate(clk->parent) / 750;
-}
-
-static unsigned long spdif_clk_get_rate(struct clk *clk)
-{
- return clk_get_rate(clk->parent) / 4;
-}
-
-#define _CLK_GET_RATE(name, rs) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- u32 reg, div; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
- \
- if (clk->parent == &ref_xtal_clk) \
- div = (reg & BM_CLKCTRL_##rs##_DIV_XTAL) >> \
- BP_CLKCTRL_##rs##_DIV_XTAL; \
- else \
- div = (reg & BM_CLKCTRL_##rs##_DIV_##rs) >> \
- BP_CLKCTRL_##rs##_DIV_##rs; \
- \
- if (!div) \
- return -EINVAL; \
- \
- return clk_get_rate(clk->parent) / div; \
-}
-
-_CLK_GET_RATE(cpu_clk, CPU)
-_CLK_GET_RATE(emi_clk, EMI)
-
-#define _CLK_GET_RATE1(name, rs) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- u32 reg, div; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
- div = (reg & BM_CLKCTRL_##rs##_DIV) >> BP_CLKCTRL_##rs##_DIV; \
- \
- if (!div) \
- return -EINVAL; \
- \
- if (clk == &saif0_clk || clk == &saif1_clk) \
- return clk_get_rate(clk->parent) >> 16 * div; \
- else \
- return clk_get_rate(clk->parent) / div; \
-}
-
-_CLK_GET_RATE1(hbus_clk, HBUS)
-_CLK_GET_RATE1(xbus_clk, XBUS)
-_CLK_GET_RATE1(ssp0_clk, SSP0)
-_CLK_GET_RATE1(ssp1_clk, SSP1)
-_CLK_GET_RATE1(ssp2_clk, SSP2)
-_CLK_GET_RATE1(ssp3_clk, SSP3)
-_CLK_GET_RATE1(gpmi_clk, GPMI)
-_CLK_GET_RATE1(lcdif_clk, DIS_LCDIF)
-_CLK_GET_RATE1(saif0_clk, SAIF0)
-_CLK_GET_RATE1(saif1_clk, SAIF1)
-
-#define _CLK_GET_RATE_STUB(name) \
-static unsigned long name##_get_rate(struct clk *clk) \
-{ \
- return clk_get_rate(clk->parent); \
-}
-
-_CLK_GET_RATE_STUB(uart_clk)
-_CLK_GET_RATE_STUB(pwm_clk)
-_CLK_GET_RATE_STUB(can0_clk)
-_CLK_GET_RATE_STUB(can1_clk)
-_CLK_GET_RATE_STUB(fec_clk)
-
-/*
- * clk_set_rate
- */
-/* fool compiler */
-#define BM_CLKCTRL_CPU_DIV 0
-#define BP_CLKCTRL_CPU_DIV 0
-#define BM_CLKCTRL_CPU_BUSY 0
-
-#define _CLK_SET_RATE(name, dr, fr, fs) \
-static int name##_set_rate(struct clk *clk, unsigned long rate) \
-{ \
- u32 reg, bm_busy, div_max, d, f, div, frac; \
- unsigned long diff, parent_rate, calc_rate; \
- \
- div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV; \
- bm_busy = BM_CLKCTRL_##dr##_BUSY; \
- \
- if (clk->parent == &ref_xtal_clk) { \
- parent_rate = clk_get_rate(clk->parent); \
- div = DIV_ROUND_UP(parent_rate, rate); \
- if (clk == &cpu_clk) { \
- div_max = BM_CLKCTRL_CPU_DIV_XTAL >> \
- BP_CLKCTRL_CPU_DIV_XTAL; \
- bm_busy = BM_CLKCTRL_CPU_BUSY_REF_XTAL; \
- } \
- if (div == 0 || div > div_max) \
- return -EINVAL; \
- } else { \
- /* \
- * hack alert: this block modifies clk->parent, too, \
- * so the base to use it the grand parent. \
- */ \
- parent_rate = clk_get_rate(clk->parent->parent); \
- rate >>= PARENT_RATE_SHIFT; \
- parent_rate >>= PARENT_RATE_SHIFT; \
- diff = parent_rate; \
- div = frac = 1; \
- if (clk == &cpu_clk) { \
- div_max = BM_CLKCTRL_CPU_DIV_CPU >> \
- BP_CLKCTRL_CPU_DIV_CPU; \
- bm_busy = BM_CLKCTRL_CPU_BUSY_REF_CPU; \
- } \
- for (d = 1; d <= div_max; d++) { \
- f = parent_rate * 18 / d / rate; \
- if ((parent_rate * 18 / d) % rate) \
- f++; \
- if (f < 18 || f > 35) \
- continue; \
- \
- calc_rate = parent_rate * 18 / f / d; \
- if (calc_rate > rate) \
- continue; \
- \
- if (rate - calc_rate < diff) { \
- frac = f; \
- div = d; \
- diff = rate - calc_rate; \
- } \
- \
- if (diff == 0) \
- break; \
- } \
- \
- if (diff == parent_rate) \
- return -EINVAL; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##fr); \
- reg &= ~BM_CLKCTRL_##fr##_##fs##FRAC; \
- reg |= frac << BP_CLKCTRL_##fr##_##fs##FRAC; \
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##fr); \
- } \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
- if (clk == &cpu_clk) { \
- reg &= ~BM_CLKCTRL_CPU_DIV_CPU; \
- reg |= div << BP_CLKCTRL_CPU_DIV_CPU; \
- } else { \
- reg &= ~BM_CLKCTRL_##dr##_DIV; \
- reg |= div << BP_CLKCTRL_##dr##_DIV; \
- if (reg & (1 << clk->enable_shift)) { \
- pr_err("%s: clock is gated\n", __func__); \
- return -EINVAL; \
- } \
- } \
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
- \
- return mxs_clkctrl_timeout(HW_CLKCTRL_##dr, bm_busy); \
-}
-
-_CLK_SET_RATE(cpu_clk, CPU, FRAC0, CPU)
-_CLK_SET_RATE(ssp0_clk, SSP0, FRAC0, IO0)
-_CLK_SET_RATE(ssp1_clk, SSP1, FRAC0, IO0)
-_CLK_SET_RATE(ssp2_clk, SSP2, FRAC0, IO1)
-_CLK_SET_RATE(ssp3_clk, SSP3, FRAC0, IO1)
-_CLK_SET_RATE(lcdif_clk, DIS_LCDIF, FRAC1, PIX)
-_CLK_SET_RATE(gpmi_clk, GPMI, FRAC1, GPMI)
-
-#define _CLK_SET_RATE1(name, dr) \
-static int name##_set_rate(struct clk *clk, unsigned long rate) \
-{ \
- u32 reg, div_max, div; \
- unsigned long parent_rate; \
- \
- parent_rate = clk_get_rate(clk->parent); \
- div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV; \
- \
- div = DIV_ROUND_UP(parent_rate, rate); \
- if (div == 0 || div > div_max) \
- return -EINVAL; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
- reg &= ~BM_CLKCTRL_##dr##_DIV; \
- reg |= div << BP_CLKCTRL_##dr##_DIV; \
- if (reg & (1 << clk->enable_shift)) { \
- pr_err("%s: clock is gated\n", __func__); \
- return -EINVAL; \
- } \
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
- \
- return mxs_clkctrl_timeout(HW_CLKCTRL_##dr, BM_CLKCTRL_##dr##_BUSY);\
-}
-
-_CLK_SET_RATE1(xbus_clk, XBUS)
-
-/* saif clock uses 16 bits frac div */
-#define _CLK_SET_RATE_SAIF(name, rs) \
-static int name##_set_rate(struct clk *clk, unsigned long rate) \
-{ \
- u16 div; \
- u32 reg; \
- u64 lrate; \
- unsigned long parent_rate; \
- \
- parent_rate = clk_get_rate(clk->parent); \
- if (rate > parent_rate) \
- return -EINVAL; \
- \
- lrate = (u64)rate << 16; \
- do_div(lrate, parent_rate); \
- div = (u16)lrate; \
- \
- if (!div) \
- return -EINVAL; \
- \
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
- reg &= ~BM_CLKCTRL_##rs##_DIV; \
- reg |= div << BP_CLKCTRL_##rs##_DIV; \
- if (reg & (1 << clk->enable_shift)) { \
- pr_err("%s: clock is gated\n", __func__); \
- return -EINVAL; \
- } \
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
- \
- return mxs_clkctrl_timeout(HW_CLKCTRL_##rs, BM_CLKCTRL_##rs##_BUSY);\
-}
-
-_CLK_SET_RATE_SAIF(saif0_clk, SAIF0)
-_CLK_SET_RATE_SAIF(saif1_clk, SAIF1)
-
-#define _CLK_SET_RATE_STUB(name) \
-static int name##_set_rate(struct clk *clk, unsigned long rate) \
-{ \
- return -EINVAL; \
-}
-
-_CLK_SET_RATE_STUB(emi_clk)
-_CLK_SET_RATE_STUB(uart_clk)
-_CLK_SET_RATE_STUB(pwm_clk)
-_CLK_SET_RATE_STUB(spdif_clk)
-_CLK_SET_RATE_STUB(clk32k_clk)
-_CLK_SET_RATE_STUB(can0_clk)
-_CLK_SET_RATE_STUB(can1_clk)
-_CLK_SET_RATE_STUB(fec_clk)
-
-/*
- * clk_set_parent
- */
-#define _CLK_SET_PARENT(name, bit) \
-static int name##_set_parent(struct clk *clk, struct clk *parent) \
-{ \
- if (parent != clk->parent) { \
- __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \
- clk->parent = parent; \
- } \
- \
- return 0; \
-}
-
-_CLK_SET_PARENT(cpu_clk, CPU)
-_CLK_SET_PARENT(emi_clk, EMI)
-_CLK_SET_PARENT(ssp0_clk, SSP0)
-_CLK_SET_PARENT(ssp1_clk, SSP1)
-_CLK_SET_PARENT(ssp2_clk, SSP2)
-_CLK_SET_PARENT(ssp3_clk, SSP3)
-_CLK_SET_PARENT(lcdif_clk, DIS_LCDIF)
-_CLK_SET_PARENT(gpmi_clk, GPMI)
-_CLK_SET_PARENT(saif0_clk, SAIF0)
-_CLK_SET_PARENT(saif1_clk, SAIF1)
-
-#define _CLK_SET_PARENT_STUB(name) \
-static int name##_set_parent(struct clk *clk, struct clk *parent) \
-{ \
- if (parent != clk->parent) \
- return -EINVAL; \
- else \
- return 0; \
-}
-
-_CLK_SET_PARENT_STUB(pwm_clk)
-_CLK_SET_PARENT_STUB(uart_clk)
-_CLK_SET_PARENT_STUB(clk32k_clk)
-_CLK_SET_PARENT_STUB(spdif_clk)
-_CLK_SET_PARENT_STUB(fec_clk)
-_CLK_SET_PARENT_STUB(can0_clk)
-_CLK_SET_PARENT_STUB(can1_clk)
-
-/*
- * clk definition
- */
-static struct clk cpu_clk = {
- .get_rate = cpu_clk_get_rate,
- .set_rate = cpu_clk_set_rate,
- .set_parent = cpu_clk_set_parent,
- .parent = &ref_cpu_clk,
-};
-
-static struct clk hbus_clk = {
- .get_rate = hbus_clk_get_rate,
- .parent = &cpu_clk,
-};
-
-static struct clk xbus_clk = {
- .get_rate = xbus_clk_get_rate,
- .set_rate = xbus_clk_set_rate,
- .parent = &ref_xtal_clk,
-};
-
-static struct clk lradc_clk = {
- .get_rate = lradc_clk_get_rate,
- .parent = &clk32k_clk,
-};
-
-static struct clk rtc_clk = {
- .get_rate = rtc_clk_get_rate,
- .parent = &ref_xtal_clk,
-};
-
-/* usb_clk gate is controlled in DIGCTRL other than CLKCTRL */
-static struct clk usb0_clk = {
- .enable_reg = DIGCTRL_BASE_ADDR,
- .enable_shift = 2,
- .enable = _raw_clk_enable,
- .disable = _raw_clk_disable,
- .parent = &pll0_clk,
-};
-
-static struct clk usb1_clk = {
- .enable_reg = DIGCTRL_BASE_ADDR,
- .enable_shift = 16,
- .enable = _raw_clk_enable,
- .disable = _raw_clk_disable,
- .parent = &pll1_clk,
-};
-
-#define _DEFINE_CLOCK(name, er, es, p) \
- static struct clk name = { \
- .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er, \
- .enable_shift = BP_CLKCTRL_##er##_##es, \
- .get_rate = name##_get_rate, \
- .set_rate = name##_set_rate, \
- .set_parent = name##_set_parent, \
- .enable = _raw_clk_enable, \
- .disable = _raw_clk_disable, \
- .parent = p, \
- }
-
-_DEFINE_CLOCK(emi_clk, EMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp0_clk, SSP0, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp1_clk, SSP1, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp2_clk, SSP2, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(ssp3_clk, SSP3, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(lcdif_clk, DIS_LCDIF, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(gpmi_clk, GPMI, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(saif0_clk, SAIF0, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(saif1_clk, SAIF1, CLKGATE, &ref_xtal_clk);
-_DEFINE_CLOCK(can0_clk, FLEXCAN, STOP_CAN0, &ref_xtal_clk);
-_DEFINE_CLOCK(can1_clk, FLEXCAN, STOP_CAN1, &ref_xtal_clk);
-_DEFINE_CLOCK(pwm_clk, XTAL, PWM_CLK24M_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(uart_clk, XTAL, UART_CLK_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(clk32k_clk, XTAL, TIMROT_CLK32K_GATE, &ref_xtal_clk);
-_DEFINE_CLOCK(spdif_clk, SPDIF, CLKGATE, &pll0_clk);
-_DEFINE_CLOCK(fec_clk, ENET, DISABLE, &hbus_clk);
-
-#define _REGISTER_CLOCK(d, n, c) \
- { \
- .dev_id = d, \
- .con_id = n, \
- .clk = &c, \
- },
-
-static struct clk_lookup lookups[] = {
- /* for amba bus driver */
- _REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
- /* for amba-pl011 driver */
- _REGISTER_CLOCK("duart", NULL, uart_clk)
- _REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk)
- _REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk)
- _REGISTER_CLOCK("imx28-gpmi-nand", NULL, gpmi_clk)
- _REGISTER_CLOCK("mxs-auart.0", NULL, uart_clk)
- _REGISTER_CLOCK("mxs-auart.1", NULL, uart_clk)
- _REGISTER_CLOCK("mxs-auart.2", NULL, uart_clk)
- _REGISTER_CLOCK("mxs-auart.3", NULL, uart_clk)
- _REGISTER_CLOCK("mxs-auart.4", NULL, uart_clk)
- _REGISTER_CLOCK("rtc", NULL, rtc_clk)
- _REGISTER_CLOCK("pll2", NULL, pll2_clk)
- _REGISTER_CLOCK("mxs-dma-apbh", NULL, hbus_clk)
- _REGISTER_CLOCK("mxs-dma-apbx", NULL, xbus_clk)
- _REGISTER_CLOCK("mxs-mmc.0", NULL, ssp0_clk)
- _REGISTER_CLOCK("mxs-mmc.1", NULL, ssp1_clk)
- _REGISTER_CLOCK("mxs-mmc.2", NULL, ssp2_clk)
- _REGISTER_CLOCK("mxs-mmc.3", NULL, ssp3_clk)
- _REGISTER_CLOCK("flexcan.0", NULL, can0_clk)
- _REGISTER_CLOCK("flexcan.1", NULL, can1_clk)
- _REGISTER_CLOCK(NULL, "usb0", usb0_clk)
- _REGISTER_CLOCK(NULL, "usb1", usb1_clk)
- _REGISTER_CLOCK("mxs-pwm.0", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.1", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.2", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.3", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.4", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.5", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.6", NULL, pwm_clk)
- _REGISTER_CLOCK("mxs-pwm.7", NULL, pwm_clk)
- _REGISTER_CLOCK(NULL, "lradc", lradc_clk)
- _REGISTER_CLOCK(NULL, "spdif", spdif_clk)
- _REGISTER_CLOCK("imx28-fb", NULL, lcdif_clk)
- _REGISTER_CLOCK("mxs-saif.0", NULL, saif0_clk)
- _REGISTER_CLOCK("mxs-saif.1", NULL, saif1_clk)
-};
-
-static int clk_misc_init(void)
-{
- u32 reg;
- int ret;
-
- /* Fix up parent per register setting */
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ);
- cpu_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_CPU) ?
- &ref_xtal_clk : &ref_cpu_clk;
- emi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_EMI) ?
- &ref_xtal_clk : &ref_emi_clk;
- ssp0_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP0) ?
- &ref_xtal_clk : &ref_io0_clk;
- ssp1_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP1) ?
- &ref_xtal_clk : &ref_io0_clk;
- ssp2_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP2) ?
- &ref_xtal_clk : &ref_io1_clk;
- ssp3_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP3) ?
- &ref_xtal_clk : &ref_io1_clk;
- lcdif_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF) ?
- &ref_xtal_clk : &ref_pix_clk;
- gpmi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_GPMI) ?
- &ref_xtal_clk : &ref_gpmi_clk;
- saif0_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SAIF0) ?
- &ref_xtal_clk : &pll0_clk;
- saif1_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SAIF1) ?
- &ref_xtal_clk : &pll0_clk;
-
- /* Use int div over frac when both are available */
- __raw_writel(BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
- __raw_writel(BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
- __raw_writel(BM_CLKCTRL_HBUS_DIV_FRAC_EN,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS_CLR);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
- reg &= ~BM_CLKCTRL_XBUS_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP0);
- reg &= ~BM_CLKCTRL_SSP0_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP0);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP1);
- reg &= ~BM_CLKCTRL_SSP1_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP1);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP2);
- reg &= ~BM_CLKCTRL_SSP2_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP2);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP3);
- reg &= ~BM_CLKCTRL_SSP3_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP3);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
- reg &= ~BM_CLKCTRL_GPMI_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_DIS_LCDIF);
- reg &= ~BM_CLKCTRL_DIS_LCDIF_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_DIS_LCDIF);
-
- /* SAIF has to use frac div for functional operation */
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF0);
- reg |= BM_CLKCTRL_SAIF0_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF0);
-
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF1);
- reg |= BM_CLKCTRL_SAIF1_DIV_FRAC_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF1);
-
- /*
- * Set safe hbus clock divider. A divider of 3 ensure that
- * the Vddd voltage required for the cpu clock is sufficiently
- * high for the hbus clock.
- */
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
- reg &= BM_CLKCTRL_HBUS_DIV;
- reg |= 3 << BP_CLKCTRL_HBUS_DIV;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
-
- ret = mxs_clkctrl_timeout(HW_CLKCTRL_HBUS, BM_CLKCTRL_HBUS_ASM_BUSY);
-
- /* Gate off cpu clock in WFI for power saving */
- __raw_writel(BM_CLKCTRL_CPU_INTERRUPT_WAIT,
- CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_SET);
-
- /*
- * Extra fec clock setting
- * The DENX M28 uses an external clock source
- * and the clock output must not be enabled
- */
- if (!machine_is_m28evk()) {
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET);
- reg &= ~BM_CLKCTRL_ENET_SLEEP;
- reg |= BM_CLKCTRL_ENET_CLK_OUT_EN;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET);
- }
-
- /*
- * 480 MHz seems too high to be ssp clock source directly,
- * so set frac0 to get a 288 MHz ref_io0.
- */
- reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC0);
- reg &= ~BM_CLKCTRL_FRAC0_IO0FRAC;
- reg |= 30 << BP_CLKCTRL_FRAC0_IO0FRAC;
- __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC0);
-
- return ret;
-}
-
-int __init mx28_clocks_init(void)
-{
- clk_misc_init();
-
- /*
- * source ssp clock from ref_io0 than ref_xtal,
- * as ref_xtal only provides 24 MHz as maximum.
- */
- clk_set_parent(&ssp0_clk, &ref_io0_clk);
- clk_set_parent(&ssp1_clk, &ref_io0_clk);
- clk_set_parent(&ssp2_clk, &ref_io1_clk);
- clk_set_parent(&ssp3_clk, &ref_io1_clk);
-
- clk_prepare_enable(&cpu_clk);
- clk_prepare_enable(&hbus_clk);
- clk_prepare_enable(&xbus_clk);
- clk_prepare_enable(&emi_clk);
- clk_prepare_enable(&uart_clk);
-
- clk_set_parent(&lcdif_clk, &ref_pix_clk);
- clk_set_parent(&saif0_clk, &pll0_clk);
- clk_set_parent(&saif1_clk, &pll0_clk);
-
- /*
- * Set an initial clock rate for the saif internal logic to work
- * properly. This is important when working in EXTMASTER mode that
- * uses the other saif's BITCLK&LRCLK but it still needs a basic
- * clock which should be fast enough for the internal logic.
- */
- clk_set_rate(&saif0_clk, 24000000);
- clk_set_rate(&saif1_clk, 24000000);
-
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
- mxs_timer_init(&clk32k_clk, MX28_INT_TIMER0);
-
- return 0;
-}
diff --git a/arch/arm/mach-mxs/clock.c b/arch/arm/mach-mxs/clock.c
deleted file mode 100644
index 97a6f4acc6c..00000000000
--- a/arch/arm/mach-mxs/clock.c
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Based on arch/arm/plat-omap/clock.c
- *
- * Copyright (C) 2004 - 2005 Nokia corporation
- * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
- * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
- * Copyright 2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-/* #define DEBUG */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/proc_fs.h>
-#include <linux/semaphore.h>
-#include <linux/string.h>
-
-#include <mach/clock.h>
-
-static LIST_HEAD(clocks);
-static DEFINE_MUTEX(clocks_mutex);
-
-/*-------------------------------------------------------------------------
- * Standard clock functions defined in include/linux/clk.h
- *-------------------------------------------------------------------------*/
-
-static void __clk_disable(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return;
- WARN_ON(!clk->usecount);
-
- if (!(--clk->usecount)) {
- if (clk->disable)
- clk->disable(clk);
- __clk_disable(clk->parent);
- }
-}
-
-static int __clk_enable(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
-
- if (clk->usecount++ == 0) {
- __clk_enable(clk->parent);
-
- if (clk->enable)
- clk->enable(clk);
- }
- return 0;
-}
-
-/*
- * The clk_enable/clk_disable could be called by drivers in atomic context,
- * so they should not really hold mutex. Instead, clk_prepare/clk_unprepare
- * can hold a mutex, as the pair will only be called in non-atomic context.
- * Before migrating to common clk framework, we can have __clk_enable and
- * __clk_disable called in clk_prepare/clk_unprepare with mutex held and
- * leave clk_enable/clk_disable as the dummy functions.
- */
-int clk_prepare(struct clk *clk)
-{
- int ret = 0;
-
- if (clk == NULL || IS_ERR(clk))
- return -EINVAL;
-
- mutex_lock(&clocks_mutex);
- ret = __clk_enable(clk);
- mutex_unlock(&clocks_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_prepare);
-
-void clk_unprepare(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return;
-
- mutex_lock(&clocks_mutex);
- __clk_disable(clk);
- mutex_unlock(&clocks_mutex);
-}
-EXPORT_SYMBOL(clk_unprepare);
-
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
- /* nothing to do */
-}
-EXPORT_SYMBOL(clk_disable);
-
-/* Retrieve the *current* clock rate. If the clock itself
- * does not provide a special calculation routine, ask
- * its parent and so on, until one is able to return
- * a valid clock rate
- */
-unsigned long clk_get_rate(struct clk *clk)
-{
- if (clk == NULL || IS_ERR(clk))
- return 0UL;
-
- if (clk->get_rate)
- return clk->get_rate(clk);
-
- return clk_get_rate(clk->parent);
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-/* Round the requested clock rate to the nearest supported
- * rate that is less than or equal to the requested rate.
- * This is dependent on the clock's current parent.
- */
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- if (clk == NULL || IS_ERR(clk) || !clk->round_rate)
- return 0;
-
- return clk->round_rate(clk, rate);
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-/* Set the clock to the requested clock rate. The rate must
- * match a supported rate exactly based on what clk_round_rate returns
- */
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- int ret = -EINVAL;
-
- if (clk == NULL || IS_ERR(clk) || clk->set_rate == NULL || rate == 0)
- return ret;
-
- mutex_lock(&clocks_mutex);
- ret = clk->set_rate(clk, rate);
- mutex_unlock(&clocks_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-/* Set the clock's parent to another clock source */
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
- int ret = -EINVAL;
- struct clk *old;
-
- if (clk == NULL || IS_ERR(clk) || parent == NULL ||
- IS_ERR(parent) || clk->set_parent == NULL)
- return ret;
-
- if (clk->usecount)
- clk_prepare_enable(parent);
-
- mutex_lock(&clocks_mutex);
- ret = clk->set_parent(clk, parent);
- if (ret == 0) {
- old = clk->parent;
- clk->parent = parent;
- } else {
- old = parent;
- }
- mutex_unlock(&clocks_mutex);
-
- if (clk->usecount)
- clk_disable(old);
-
- return ret;
-}
-EXPORT_SYMBOL(clk_set_parent);
-
-/* Retrieve the clock's parent clock source */
-struct clk *clk_get_parent(struct clk *clk)
-{
- struct clk *ret = NULL;
-
- if (clk == NULL || IS_ERR(clk))
- return ret;
-
- return clk->parent;
-}
-EXPORT_SYMBOL(clk_get_parent);
diff --git a/arch/arm/mach-mxs/devices/Kconfig b/arch/arm/mach-mxs/devices/Kconfig
index b8913df4cfa..19659de1c4e 100644
--- a/arch/arm/mach-mxs/devices/Kconfig
+++ b/arch/arm/mach-mxs/devices/Kconfig
@@ -1,6 +1,5 @@
config MXS_HAVE_AMBA_DUART
bool
- select ARM_AMBA
config MXS_HAVE_PLATFORM_AUART
bool
diff --git a/arch/arm/mach-mxs/devices/platform-dma.c b/arch/arm/mach-mxs/devices/platform-dma.c
index 6a0202b1016..46824501de0 100644
--- a/arch/arm/mach-mxs/devices/platform-dma.c
+++ b/arch/arm/mach-mxs/devices/platform-dma.c
@@ -14,7 +14,7 @@
#include <mach/mx28.h>
#include <mach/devices-common.h>
-static struct platform_device *__init mxs_add_dma(const char *devid,
+struct platform_device *__init mxs_add_dma(const char *devid,
resource_size_t base)
{
struct resource res[] = {
@@ -29,22 +29,3 @@ static struct platform_device *__init mxs_add_dma(const char *devid,
res, ARRAY_SIZE(res), NULL, 0,
DMA_BIT_MASK(32));
}
-
-static int __init mxs_add_mxs_dma(void)
-{
- char *apbh = "mxs-dma-apbh";
- char *apbx = "mxs-dma-apbx";
-
- if (cpu_is_mx23()) {
- mxs_add_dma(apbh, MX23_APBH_DMA_BASE_ADDR);
- mxs_add_dma(apbx, MX23_APBX_DMA_BASE_ADDR);
- }
-
- if (cpu_is_mx28()) {
- mxs_add_dma(apbh, MX28_APBH_DMA_BASE_ADDR);
- mxs_add_dma(apbx, MX28_APBX_DMA_BASE_ADDR);
- }
-
- return 0;
-}
-arch_initcall(mxs_add_mxs_dma);
diff --git a/arch/arm/mach-mxs/devices/platform-gpio-mxs.c b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
index ed0885e414e..cd99f19ec63 100644
--- a/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
+++ b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
@@ -14,7 +14,7 @@
#include <mach/devices-common.h>
struct platform_device *__init mxs_add_gpio(
- int id, resource_size_t iobase, int irq)
+ char *name, int id, resource_size_t iobase, int irq)
{
struct resource res[] = {
{
@@ -29,25 +29,5 @@ struct platform_device *__init mxs_add_gpio(
};
return platform_device_register_resndata(&mxs_apbh_bus,
- "gpio-mxs", id, res, ARRAY_SIZE(res), NULL, 0);
+ name, id, res, ARRAY_SIZE(res), NULL, 0);
}
-
-static int __init mxs_add_mxs_gpio(void)
-{
- if (cpu_is_mx23()) {
- mxs_add_gpio(0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0);
- mxs_add_gpio(1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1);
- mxs_add_gpio(2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2);
- }
-
- if (cpu_is_mx28()) {
- mxs_add_gpio(0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0);
- mxs_add_gpio(1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1);
- mxs_add_gpio(2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2);
- mxs_add_gpio(3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3);
- mxs_add_gpio(4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4);
- }
-
- return 0;
-}
-postcore_initcall(mxs_add_mxs_gpio);
diff --git a/arch/arm/mach-mxs/devices/platform-mxs-mmc.c b/arch/arm/mach-mxs/devices/platform-mxs-mmc.c
index bef9d923f54..b33c9d05c55 100644
--- a/arch/arm/mach-mxs/devices/platform-mxs-mmc.c
+++ b/arch/arm/mach-mxs/devices/platform-mxs-mmc.c
@@ -17,8 +17,9 @@
#include <mach/mx28.h>
#include <mach/devices-common.h>
-#define mxs_mxs_mmc_data_entry_single(soc, _id, hwid) \
+#define mxs_mxs_mmc_data_entry_single(soc, _devid, _id, hwid) \
{ \
+ .devid = _devid, \
.id = _id, \
.iobase = soc ## _SSP ## hwid ## _BASE_ADDR, \
.dma = soc ## _DMA_SSP ## hwid, \
@@ -26,23 +27,23 @@
.irq_dma = soc ## _INT_SSP ## hwid ## _DMA, \
}
-#define mxs_mxs_mmc_data_entry(soc, _id, hwid) \
- [_id] = mxs_mxs_mmc_data_entry_single(soc, _id, hwid)
+#define mxs_mxs_mmc_data_entry(soc, _devid, _id, hwid) \
+ [_id] = mxs_mxs_mmc_data_entry_single(soc, _devid, _id, hwid)
#ifdef CONFIG_SOC_IMX23
const struct mxs_mxs_mmc_data mx23_mxs_mmc_data[] __initconst = {
- mxs_mxs_mmc_data_entry(MX23, 0, 1),
- mxs_mxs_mmc_data_entry(MX23, 1, 2),
+ mxs_mxs_mmc_data_entry(MX23, "imx23-mmc", 0, 1),
+ mxs_mxs_mmc_data_entry(MX23, "imx23-mmc", 1, 2),
};
#endif
#ifdef CONFIG_SOC_IMX28
const struct mxs_mxs_mmc_data mx28_mxs_mmc_data[] __initconst = {
- mxs_mxs_mmc_data_entry(MX28, 0, 0),
- mxs_mxs_mmc_data_entry(MX28, 1, 1),
- mxs_mxs_mmc_data_entry(MX28, 2, 2),
- mxs_mxs_mmc_data_entry(MX28, 3, 3),
+ mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 0, 0),
+ mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 1, 1),
+ mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 2, 2),
+ mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 3, 3),
};
#endif
@@ -70,6 +71,6 @@ struct platform_device *__init mxs_add_mxs_mmc(
},
};
- return mxs_add_platform_device("mxs-mmc", data->id,
+ return mxs_add_platform_device(data->devid, data->id,
res, ARRAY_SIZE(res), pdata, sizeof(*pdata));
}
diff --git a/arch/arm/mach-mxs/include/mach/clock.h b/arch/arm/mach-mxs/include/mach/clock.h
deleted file mode 100644
index 592c9ab5d76..00000000000
--- a/arch/arm/mach-mxs/include/mach/clock.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __MACH_MXS_CLOCK_H__
-#define __MACH_MXS_CLOCK_H__
-
-#ifndef __ASSEMBLY__
-#include <linux/list.h>
-
-struct module;
-
-struct clk {
- int id;
- /* Source clock this clk depends on */
- struct clk *parent;
- /* Reference count of clock enable/disable */
- __s8 usecount;
- /* Register bit position for clock's enable/disable control. */
- u8 enable_shift;
- /* Register address for clock's enable/disable control. */
- void __iomem *enable_reg;
- u32 flags;
- /* get the current clock rate (always a fresh value) */
- unsigned long (*get_rate) (struct clk *);
- /* Function ptr to set the clock to a new rate. The rate must match a
- supported rate returned from round_rate. Leave blank if clock is not
- programmable */
- int (*set_rate) (struct clk *, unsigned long);
- /* Function ptr to round the requested clock rate to the nearest
- supported rate that is less than or equal to the requested rate. */
- unsigned long (*round_rate) (struct clk *, unsigned long);
- /* Function ptr to enable the clock. Leave blank if clock can not
- be gated. */
- int (*enable) (struct clk *);
- /* Function ptr to disable the clock. Leave blank if clock can not
- be gated. */
- void (*disable) (struct clk *);
- /* Function ptr to set the parent clock of the clock. */
- int (*set_parent) (struct clk *, struct clk *);
-};
-
-int clk_register(struct clk *clk);
-void clk_unregister(struct clk *clk);
-
-#endif /* __ASSEMBLY__ */
-#endif /* __MACH_MXS_CLOCK_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/common.h b/arch/arm/mach-mxs/include/mach/common.h
index 8d88399b73e..de6c7ba4254 100644
--- a/arch/arm/mach-mxs/include/mach/common.h
+++ b/arch/arm/mach-mxs/include/mach/common.h
@@ -11,28 +11,27 @@
#ifndef __MACH_MXS_COMMON_H__
#define __MACH_MXS_COMMON_H__
-struct clk;
-
extern const u32 *mxs_get_ocotp(void);
extern int mxs_reset_block(void __iomem *);
-extern void mxs_timer_init(struct clk *, int);
+extern void mxs_timer_init(int);
extern void mxs_restart(char, const char *);
extern int mxs_saif_clkmux_select(unsigned int clkmux);
extern void mx23_soc_init(void);
-extern int mx23_register_gpios(void);
extern int mx23_clocks_init(void);
extern void mx23_map_io(void);
extern void mx23_init_irq(void);
extern void mx28_soc_init(void);
-extern int mx28_register_gpios(void);
extern int mx28_clocks_init(void);
extern void mx28_map_io(void);
extern void mx28_init_irq(void);
extern void icoll_init_irq(void);
-extern int mxs_clkctrl_timeout(unsigned int reg_offset, unsigned int mask);
+extern struct platform_device *mxs_add_dma(const char *devid,
+ resource_size_t base);
+extern struct platform_device *mxs_add_gpio(char *name, int id,
+ resource_size_t iobase, int irq);
#endif /* __MACH_MXS_COMMON_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/devices-common.h b/arch/arm/mach-mxs/include/mach/devices-common.h
index 21e45a70d34..e8b1d958240 100644
--- a/arch/arm/mach-mxs/include/mach/devices-common.h
+++ b/arch/arm/mach-mxs/include/mach/devices-common.h
@@ -82,8 +82,9 @@ struct platform_device * __init mxs_add_mxs_i2c(
const struct mxs_mxs_i2c_data *data);
/* mmc */
-#include <mach/mmc.h>
+#include <linux/mmc/mxs-mmc.h>
struct mxs_mxs_mmc_data {
+ const char *devid;
int id;
resource_size_t iobase;
resource_size_t dma;
diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
index da4610ebe9e..dafd48e86c8 100644
--- a/arch/arm/mach-mxs/mach-mx28evk.c
+++ b/arch/arm/mach-mxs/mach-mx28evk.c
@@ -226,7 +226,7 @@ static void __init mx28evk_fec_reset(void)
struct clk *clk;
/* Enable fec phy clock */
- clk = clk_get_sys("pll2", NULL);
+ clk = clk_get_sys("enet_out", NULL);
if (!IS_ERR(clk))
clk_prepare_enable(clk);
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
new file mode 100644
index 00000000000..8cac94b3302
--- /dev/null
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/init.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+#include <mach/common.h>
+
+static int __init mxs_icoll_add_irq_domain(struct device_node *np,
+ struct device_node *interrupt_parent)
+{
+ irq_domain_add_legacy(np, 128, 0, 0, &irq_domain_simple_ops, NULL);
+
+ return 0;
+}
+
+static int __init mxs_gpio_add_irq_domain(struct device_node *np,
+ struct device_node *interrupt_parent)
+{
+ static int gpio_irq_base = MXS_GPIO_IRQ_START;
+
+ irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL);
+ gpio_irq_base += 32;
+
+ return 0;
+}
+
+static const struct of_device_id mxs_irq_match[] __initconst = {
+ { .compatible = "fsl,mxs-icoll", .data = mxs_icoll_add_irq_domain, },
+ { .compatible = "fsl,mxs-gpio", .data = mxs_gpio_add_irq_domain, },
+ { /* sentinel */ }
+};
+
+static void __init mxs_dt_init_irq(void)
+{
+ icoll_init_irq();
+ of_irq_init(mxs_irq_match);
+}
+
+static void __init imx23_timer_init(void)
+{
+ mx23_clocks_init();
+}
+
+static struct sys_timer imx23_timer = {
+ .init = imx23_timer_init,
+};
+
+static void __init imx28_timer_init(void)
+{
+ mx28_clocks_init();
+}
+
+static struct sys_timer imx28_timer = {
+ .init = imx28_timer_init,
+};
+
+static void __init imx28_evk_init(void)
+{
+ struct clk *clk;
+
+ /* Enable fec phy clock */
+ clk = clk_get_sys("enet_out", NULL);
+ if (!IS_ERR(clk))
+ clk_prepare_enable(clk);
+}
+
+static void __init mxs_machine_init(void)
+{
+ if (of_machine_is_compatible("fsl,imx28-evk"))
+ imx28_evk_init();
+
+ of_platform_populate(NULL, of_default_bus_match_table,
+ NULL, NULL);
+}
+
+static const char *imx23_dt_compat[] __initdata = {
+ "fsl,imx23-evk",
+ "fsl,imx23",
+ NULL,
+};
+
+static const char *imx28_dt_compat[] __initdata = {
+ "fsl,imx28-evk",
+ "fsl,imx28",
+ NULL,
+};
+
+DT_MACHINE_START(IMX23, "Freescale i.MX23 (Device Tree)")
+ .map_io = mx23_map_io,
+ .init_irq = mxs_dt_init_irq,
+ .timer = &imx23_timer,
+ .init_machine = mxs_machine_init,
+ .dt_compat = imx23_dt_compat,
+ .restart = mxs_restart,
+MACHINE_END
+
+DT_MACHINE_START(IMX28, "Freescale i.MX28 (Device Tree)")
+ .map_io = mx28_map_io,
+ .init_irq = mxs_dt_init_irq,
+ .timer = &imx28_timer,
+ .init_machine = mxs_machine_init,
+ .dt_compat = imx28_dt_compat,
+ .restart = mxs_restart,
+MACHINE_END
diff --git a/arch/arm/mach-mxs/mm.c b/arch/arm/mach-mxs/mm.c
index 67a384edcf5..dccb67a9e7c 100644
--- a/arch/arm/mach-mxs/mm.c
+++ b/arch/arm/mach-mxs/mm.c
@@ -66,9 +66,25 @@ void __init mx28_init_irq(void)
void __init mx23_soc_init(void)
{
pinctrl_provide_dummies();
+
+ mxs_add_dma("imx23-dma-apbh", MX23_APBH_DMA_BASE_ADDR);
+ mxs_add_dma("imx23-dma-apbx", MX23_APBX_DMA_BASE_ADDR);
+
+ mxs_add_gpio("imx23-gpio", 0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0);
+ mxs_add_gpio("imx23-gpio", 1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1);
+ mxs_add_gpio("imx23-gpio", 2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2);
}
void __init mx28_soc_init(void)
{
pinctrl_provide_dummies();
+
+ mxs_add_dma("imx28-dma-apbh", MX23_APBH_DMA_BASE_ADDR);
+ mxs_add_dma("imx28-dma-apbx", MX23_APBX_DMA_BASE_ADDR);
+
+ mxs_add_gpio("imx28-gpio", 0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0);
+ mxs_add_gpio("imx28-gpio", 1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1);
+ mxs_add_gpio("imx28-gpio", 2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2);
+ mxs_add_gpio("imx28-gpio", 3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3);
+ mxs_add_gpio("imx28-gpio", 4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4);
}
diff --git a/arch/arm/mach-mxs/regs-clkctrl-mx23.h b/arch/arm/mach-mxs/regs-clkctrl-mx23.h
deleted file mode 100644
index 0ea5c9d0e2b..00000000000
--- a/arch/arm/mach-mxs/regs-clkctrl-mx23.h
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * Freescale CLKCTRL Register Definitions
- *
- * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
- * Copyright 2008-2010 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This file is created by xml file. Don't Edit it.
- *
- * Xml Revision: 1.48
- * Template revision: 26195
- */
-
-#ifndef __REGS_CLKCTRL_MX23_H__
-#define __REGS_CLKCTRL_MX23_H__
-
-
-#define HW_CLKCTRL_PLLCTRL0 (0x00000000)
-#define HW_CLKCTRL_PLLCTRL0_SET (0x00000004)
-#define HW_CLKCTRL_PLLCTRL0_CLR (0x00000008)
-#define HW_CLKCTRL_PLLCTRL0_TOG (0x0000000c)
-
-#define BP_CLKCTRL_PLLCTRL0_LFR_SEL 28
-#define BM_CLKCTRL_PLLCTRL0_LFR_SEL 0x30000000
-#define BF_CLKCTRL_PLLCTRL0_LFR_SEL(v) \
- (((v) << 28) & BM_CLKCTRL_PLLCTRL0_LFR_SEL)
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__TIMES_2 0x1
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__TIMES_05 0x2
-#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLLCTRL0_CP_SEL 24
-#define BM_CLKCTRL_PLLCTRL0_CP_SEL 0x03000000
-#define BF_CLKCTRL_PLLCTRL0_CP_SEL(v) \
- (((v) << 24) & BM_CLKCTRL_PLLCTRL0_CP_SEL)
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__TIMES_2 0x1
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__TIMES_05 0x2
-#define BV_CLKCTRL_PLLCTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLLCTRL0_DIV_SEL 20
-#define BM_CLKCTRL_PLLCTRL0_DIV_SEL 0x00300000
-#define BF_CLKCTRL_PLLCTRL0_DIV_SEL(v) \
- (((v) << 20) & BM_CLKCTRL_PLLCTRL0_DIV_SEL)
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__LOWER 0x1
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__LOWEST 0x2
-#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS 0x00040000
-#define BM_CLKCTRL_PLLCTRL0_POWER 0x00010000
-
-#define HW_CLKCTRL_PLLCTRL1 (0x00000010)
-
-#define BM_CLKCTRL_PLLCTRL1_LOCK 0x80000000
-#define BM_CLKCTRL_PLLCTRL1_FORCE_LOCK 0x40000000
-#define BP_CLKCTRL_PLLCTRL1_LOCK_COUNT 0
-#define BM_CLKCTRL_PLLCTRL1_LOCK_COUNT 0x0000FFFF
-#define BF_CLKCTRL_PLLCTRL1_LOCK_COUNT(v) \
- (((v) << 0) & BM_CLKCTRL_PLLCTRL1_LOCK_COUNT)
-
-#define HW_CLKCTRL_CPU (0x00000020)
-#define HW_CLKCTRL_CPU_SET (0x00000024)
-#define HW_CLKCTRL_CPU_CLR (0x00000028)
-#define HW_CLKCTRL_CPU_TOG (0x0000002c)
-
-#define BM_CLKCTRL_CPU_BUSY_REF_XTAL 0x20000000
-#define BM_CLKCTRL_CPU_BUSY_REF_CPU 0x10000000
-#define BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN 0x04000000
-#define BP_CLKCTRL_CPU_DIV_XTAL 16
-#define BM_CLKCTRL_CPU_DIV_XTAL 0x03FF0000
-#define BF_CLKCTRL_CPU_DIV_XTAL(v) \
- (((v) << 16) & BM_CLKCTRL_CPU_DIV_XTAL)
-#define BM_CLKCTRL_CPU_INTERRUPT_WAIT 0x00001000
-#define BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN 0x00000400
-#define BP_CLKCTRL_CPU_DIV_CPU 0
-#define BM_CLKCTRL_CPU_DIV_CPU 0x0000003F
-#define BF_CLKCTRL_CPU_DIV_CPU(v) \
- (((v) << 0) & BM_CLKCTRL_CPU_DIV_CPU)
-
-#define HW_CLKCTRL_HBUS (0x00000030)
-#define HW_CLKCTRL_HBUS_SET (0x00000034)
-#define HW_CLKCTRL_HBUS_CLR (0x00000038)
-#define HW_CLKCTRL_HBUS_TOG (0x0000003c)
-
-#define BM_CLKCTRL_HBUS_BUSY 0x20000000
-#define BM_CLKCTRL_HBUS_DCP_AS_ENABLE 0x10000000
-#define BM_CLKCTRL_HBUS_PXP_AS_ENABLE 0x08000000
-#define BM_CLKCTRL_HBUS_APBHDMA_AS_ENABLE 0x04000000
-#define BM_CLKCTRL_HBUS_APBXDMA_AS_ENABLE 0x02000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_JAM_AS_ENABLE 0x01000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_AS_ENABLE 0x00800000
-#define BM_CLKCTRL_HBUS_CPU_DATA_AS_ENABLE 0x00400000
-#define BM_CLKCTRL_HBUS_CPU_INSTR_AS_ENABLE 0x00200000
-#define BM_CLKCTRL_HBUS_AUTO_SLOW_MODE 0x00100000
-#define BP_CLKCTRL_HBUS_SLOW_DIV 16
-#define BM_CLKCTRL_HBUS_SLOW_DIV 0x00070000
-#define BF_CLKCTRL_HBUS_SLOW_DIV(v) \
- (((v) << 16) & BM_CLKCTRL_HBUS_SLOW_DIV)
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY1 0x0
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY2 0x1
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY4 0x2
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY8 0x3
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY16 0x4
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY32 0x5
-#define BM_CLKCTRL_HBUS_DIV_FRAC_EN 0x00000020
-#define BP_CLKCTRL_HBUS_DIV 0
-#define BM_CLKCTRL_HBUS_DIV 0x0000001F
-#define BF_CLKCTRL_HBUS_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_HBUS_DIV)
-
-#define HW_CLKCTRL_XBUS (0x00000040)
-
-#define BM_CLKCTRL_XBUS_BUSY 0x80000000
-#define BM_CLKCTRL_XBUS_DIV_FRAC_EN 0x00000400
-#define BP_CLKCTRL_XBUS_DIV 0
-#define BM_CLKCTRL_XBUS_DIV 0x000003FF
-#define BF_CLKCTRL_XBUS_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_XBUS_DIV)
-
-#define HW_CLKCTRL_XTAL (0x00000050)
-#define HW_CLKCTRL_XTAL_SET (0x00000054)
-#define HW_CLKCTRL_XTAL_CLR (0x00000058)
-#define HW_CLKCTRL_XTAL_TOG (0x0000005c)
-
-#define BP_CLKCTRL_XTAL_UART_CLK_GATE 31
-#define BM_CLKCTRL_XTAL_UART_CLK_GATE 0x80000000
-#define BP_CLKCTRL_XTAL_FILT_CLK24M_GATE 30
-#define BM_CLKCTRL_XTAL_FILT_CLK24M_GATE 0x40000000
-#define BP_CLKCTRL_XTAL_PWM_CLK24M_GATE 29
-#define BM_CLKCTRL_XTAL_PWM_CLK24M_GATE 0x20000000
-#define BM_CLKCTRL_XTAL_DRI_CLK24M_GATE 0x10000000
-#define BM_CLKCTRL_XTAL_DIGCTRL_CLK1M_GATE 0x08000000
-#define BP_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 26
-#define BM_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 0x04000000
-#define BP_CLKCTRL_XTAL_DIV_UART 0
-#define BM_CLKCTRL_XTAL_DIV_UART 0x00000003
-#define BF_CLKCTRL_XTAL_DIV_UART(v) \
- (((v) << 0) & BM_CLKCTRL_XTAL_DIV_UART)
-
-#define HW_CLKCTRL_PIX (0x00000060)
-
-#define BP_CLKCTRL_PIX_CLKGATE 31
-#define BM_CLKCTRL_PIX_CLKGATE 0x80000000
-#define BM_CLKCTRL_PIX_BUSY 0x20000000
-#define BM_CLKCTRL_PIX_DIV_FRAC_EN 0x00001000
-#define BP_CLKCTRL_PIX_DIV 0
-#define BM_CLKCTRL_PIX_DIV 0x00000FFF
-#define BF_CLKCTRL_PIX_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_PIX_DIV)
-
-#define HW_CLKCTRL_SSP (0x00000070)
-
-#define BP_CLKCTRL_SSP_CLKGATE 31
-#define BM_CLKCTRL_SSP_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP_BUSY 0x20000000
-#define BM_CLKCTRL_SSP_DIV_FRAC_EN 0x00000200
-#define BP_CLKCTRL_SSP_DIV 0
-#define BM_CLKCTRL_SSP_DIV 0x000001FF
-#define BF_CLKCTRL_SSP_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SSP_DIV)
-
-#define HW_CLKCTRL_GPMI (0x00000080)
-
-#define BP_CLKCTRL_GPMI_CLKGATE 31
-#define BM_CLKCTRL_GPMI_CLKGATE 0x80000000
-#define BM_CLKCTRL_GPMI_BUSY 0x20000000
-#define BM_CLKCTRL_GPMI_DIV_FRAC_EN 0x00000400
-#define BP_CLKCTRL_GPMI_DIV 0
-#define BM_CLKCTRL_GPMI_DIV 0x000003FF
-#define BF_CLKCTRL_GPMI_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_GPMI_DIV)
-
-#define HW_CLKCTRL_SPDIF (0x00000090)
-
-#define BM_CLKCTRL_SPDIF_CLKGATE 0x80000000
-
-#define HW_CLKCTRL_EMI (0x000000a0)
-
-#define BP_CLKCTRL_EMI_CLKGATE 31
-#define BM_CLKCTRL_EMI_CLKGATE 0x80000000
-#define BM_CLKCTRL_EMI_SYNC_MODE_EN 0x40000000
-#define BM_CLKCTRL_EMI_BUSY_REF_XTAL 0x20000000
-#define BM_CLKCTRL_EMI_BUSY_REF_EMI 0x10000000
-#define BM_CLKCTRL_EMI_BUSY_REF_CPU 0x08000000
-#define BM_CLKCTRL_EMI_BUSY_SYNC_MODE 0x04000000
-#define BM_CLKCTRL_EMI_BUSY_DCC_RESYNC 0x00020000
-#define BM_CLKCTRL_EMI_DCC_RESYNC_ENABLE 0x00010000
-#define BP_CLKCTRL_EMI_DIV_XTAL 8
-#define BM_CLKCTRL_EMI_DIV_XTAL 0x00000F00
-#define BF_CLKCTRL_EMI_DIV_XTAL(v) \
- (((v) << 8) & BM_CLKCTRL_EMI_DIV_XTAL)
-#define BP_CLKCTRL_EMI_DIV_EMI 0
-#define BM_CLKCTRL_EMI_DIV_EMI 0x0000003F
-#define BF_CLKCTRL_EMI_DIV_EMI(v) \
- (((v) << 0) & BM_CLKCTRL_EMI_DIV_EMI)
-
-#define HW_CLKCTRL_IR (0x000000b0)
-
-#define BM_CLKCTRL_IR_CLKGATE 0x80000000
-#define BM_CLKCTRL_IR_AUTO_DIV 0x20000000
-#define BM_CLKCTRL_IR_IR_BUSY 0x10000000
-#define BM_CLKCTRL_IR_IROV_BUSY 0x08000000
-#define BP_CLKCTRL_IR_IROV_DIV 16
-#define BM_CLKCTRL_IR_IROV_DIV 0x01FF0000
-#define BF_CLKCTRL_IR_IROV_DIV(v) \
- (((v) << 16) & BM_CLKCTRL_IR_IROV_DIV)
-#define BP_CLKCTRL_IR_IR_DIV 0
-#define BM_CLKCTRL_IR_IR_DIV 0x000003FF
-#define BF_CLKCTRL_IR_IR_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_IR_IR_DIV)
-
-#define HW_CLKCTRL_SAIF (0x000000c0)
-
-#define BM_CLKCTRL_SAIF_CLKGATE 0x80000000
-#define BM_CLKCTRL_SAIF_BUSY 0x20000000
-#define BM_CLKCTRL_SAIF_DIV_FRAC_EN 0x00010000
-#define BP_CLKCTRL_SAIF_DIV 0
-#define BM_CLKCTRL_SAIF_DIV 0x0000FFFF
-#define BF_CLKCTRL_SAIF_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SAIF_DIV)
-
-#define HW_CLKCTRL_TV (0x000000d0)
-
-#define BM_CLKCTRL_TV_CLK_TV108M_GATE 0x80000000
-#define BM_CLKCTRL_TV_CLK_TV_GATE 0x40000000
-
-#define HW_CLKCTRL_ETM (0x000000e0)
-
-#define BM_CLKCTRL_ETM_CLKGATE 0x80000000
-#define BM_CLKCTRL_ETM_BUSY 0x20000000
-#define BM_CLKCTRL_ETM_DIV_FRAC_EN 0x00000040
-#define BP_CLKCTRL_ETM_DIV 0
-#define BM_CLKCTRL_ETM_DIV 0x0000003F
-#define BF_CLKCTRL_ETM_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_ETM_DIV)
-
-#define HW_CLKCTRL_FRAC (0x000000f0)
-#define HW_CLKCTRL_FRAC_SET (0x000000f4)
-#define HW_CLKCTRL_FRAC_CLR (0x000000f8)
-#define HW_CLKCTRL_FRAC_TOG (0x000000fc)
-
-#define BP_CLKCTRL_FRAC_CLKGATEIO 31
-#define BM_CLKCTRL_FRAC_CLKGATEIO 0x80000000
-#define BM_CLKCTRL_FRAC_IO_STABLE 0x40000000
-#define BP_CLKCTRL_FRAC_IOFRAC 24
-#define BM_CLKCTRL_FRAC_IOFRAC 0x3F000000
-#define BF_CLKCTRL_FRAC_IOFRAC(v) \
- (((v) << 24) & BM_CLKCTRL_FRAC_IOFRAC)
-#define BP_CLKCTRL_FRAC_CLKGATEPIX 23
-#define BM_CLKCTRL_FRAC_CLKGATEPIX 0x00800000
-#define BM_CLKCTRL_FRAC_PIX_STABLE 0x00400000
-#define BP_CLKCTRL_FRAC_PIXFRAC 16
-#define BM_CLKCTRL_FRAC_PIXFRAC 0x003F0000
-#define BF_CLKCTRL_FRAC_PIXFRAC(v) \
- (((v) << 16) & BM_CLKCTRL_FRAC_PIXFRAC)
-#define BP_CLKCTRL_FRAC_CLKGATEEMI 15
-#define BM_CLKCTRL_FRAC_CLKGATEEMI 0x00008000
-#define BM_CLKCTRL_FRAC_EMI_STABLE 0x00004000
-#define BP_CLKCTRL_FRAC_EMIFRAC 8
-#define BM_CLKCTRL_FRAC_EMIFRAC 0x00003F00
-#define BF_CLKCTRL_FRAC_EMIFRAC(v) \
- (((v) << 8) & BM_CLKCTRL_FRAC_EMIFRAC)
-#define BP_CLKCTRL_FRAC_CLKGATECPU 7
-#define BM_CLKCTRL_FRAC_CLKGATECPU 0x00000080
-#define BM_CLKCTRL_FRAC_CPU_STABLE 0x00000040
-#define BP_CLKCTRL_FRAC_CPUFRAC 0
-#define BM_CLKCTRL_FRAC_CPUFRAC 0x0000003F
-#define BF_CLKCTRL_FRAC_CPUFRAC(v) \
- (((v) << 0) & BM_CLKCTRL_FRAC_CPUFRAC)
-
-#define HW_CLKCTRL_FRAC1 (0x00000100)
-#define HW_CLKCTRL_FRAC1_SET (0x00000104)
-#define HW_CLKCTRL_FRAC1_CLR (0x00000108)
-#define HW_CLKCTRL_FRAC1_TOG (0x0000010c)
-
-#define BM_CLKCTRL_FRAC1_CLKGATEVID 0x80000000
-#define BM_CLKCTRL_FRAC1_VID_STABLE 0x40000000
-
-#define HW_CLKCTRL_CLKSEQ (0x00000110)
-#define HW_CLKCTRL_CLKSEQ_SET (0x00000114)
-#define HW_CLKCTRL_CLKSEQ_CLR (0x00000118)
-#define HW_CLKCTRL_CLKSEQ_TOG (0x0000011c)
-
-#define BM_CLKCTRL_CLKSEQ_BYPASS_ETM 0x00000100
-#define BM_CLKCTRL_CLKSEQ_BYPASS_CPU 0x00000080
-#define BM_CLKCTRL_CLKSEQ_BYPASS_EMI 0x00000040
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP 0x00000020
-#define BM_CLKCTRL_CLKSEQ_BYPASS_GPMI 0x00000010
-#define BM_CLKCTRL_CLKSEQ_BYPASS_IR 0x00000008
-#define BM_CLKCTRL_CLKSEQ_BYPASS_PIX 0x00000002
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF 0x00000001
-
-#define HW_CLKCTRL_RESET (0x00000120)
-
-#define BM_CLKCTRL_RESET_CHIP 0x00000002
-#define BM_CLKCTRL_RESET_DIG 0x00000001
-
-#define HW_CLKCTRL_STATUS (0x00000130)
-
-#define BP_CLKCTRL_STATUS_CPU_LIMIT 30
-#define BM_CLKCTRL_STATUS_CPU_LIMIT 0xC0000000
-#define BF_CLKCTRL_STATUS_CPU_LIMIT(v) \
- (((v) << 30) & BM_CLKCTRL_STATUS_CPU_LIMIT)
-
-#define HW_CLKCTRL_VERSION (0x00000140)
-
-#define BP_CLKCTRL_VERSION_MAJOR 24
-#define BM_CLKCTRL_VERSION_MAJOR 0xFF000000
-#define BF_CLKCTRL_VERSION_MAJOR(v) \
- (((v) << 24) & BM_CLKCTRL_VERSION_MAJOR)
-#define BP_CLKCTRL_VERSION_MINOR 16
-#define BM_CLKCTRL_VERSION_MINOR 0x00FF0000
-#define BF_CLKCTRL_VERSION_MINOR(v) \
- (((v) << 16) & BM_CLKCTRL_VERSION_MINOR)
-#define BP_CLKCTRL_VERSION_STEP 0
-#define BM_CLKCTRL_VERSION_STEP 0x0000FFFF
-#define BF_CLKCTRL_VERSION_STEP(v) \
- (((v) << 0) & BM_CLKCTRL_VERSION_STEP)
-
-#endif /* __REGS_CLKCTRL_MX23_H__ */
diff --git a/arch/arm/mach-mxs/regs-clkctrl-mx28.h b/arch/arm/mach-mxs/regs-clkctrl-mx28.h
deleted file mode 100644
index 7d1b061d794..00000000000
--- a/arch/arm/mach-mxs/regs-clkctrl-mx28.h
+++ /dev/null
@@ -1,486 +0,0 @@
-/*
- * Freescale CLKCTRL Register Definitions
- *
- * Copyright 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This file is created by xml file. Don't Edit it.
- *
- * Xml Revision: 1.48
- * Template revision: 26195
- */
-
-#ifndef __REGS_CLKCTRL_MX28_H__
-#define __REGS_CLKCTRL_MX28_H__
-
-#define HW_CLKCTRL_PLL0CTRL0 (0x00000000)
-#define HW_CLKCTRL_PLL0CTRL0_SET (0x00000004)
-#define HW_CLKCTRL_PLL0CTRL0_CLR (0x00000008)
-#define HW_CLKCTRL_PLL0CTRL0_TOG (0x0000000c)
-
-#define BP_CLKCTRL_PLL0CTRL0_LFR_SEL 28
-#define BM_CLKCTRL_PLL0CTRL0_LFR_SEL 0x30000000
-#define BF_CLKCTRL_PLL0CTRL0_LFR_SEL(v) \
- (((v) << 28) & BM_CLKCTRL_PLL0CTRL0_LFR_SEL)
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__TIMES_2 0x1
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__TIMES_05 0x2
-#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL0CTRL0_CP_SEL 24
-#define BM_CLKCTRL_PLL0CTRL0_CP_SEL 0x03000000
-#define BF_CLKCTRL_PLL0CTRL0_CP_SEL(v) \
- (((v) << 24) & BM_CLKCTRL_PLL0CTRL0_CP_SEL)
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__TIMES_2 0x1
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__TIMES_05 0x2
-#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL0CTRL0_DIV_SEL 20
-#define BM_CLKCTRL_PLL0CTRL0_DIV_SEL 0x00300000
-#define BF_CLKCTRL_PLL0CTRL0_DIV_SEL(v) \
- (((v) << 20) & BM_CLKCTRL_PLL0CTRL0_DIV_SEL)
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__LOWER 0x1
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__LOWEST 0x2
-#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLL0CTRL0_EN_USB_CLKS 0x00040000
-#define BM_CLKCTRL_PLL0CTRL0_POWER 0x00020000
-
-#define HW_CLKCTRL_PLL0CTRL1 (0x00000010)
-
-#define BM_CLKCTRL_PLL0CTRL1_LOCK 0x80000000
-#define BM_CLKCTRL_PLL0CTRL1_FORCE_LOCK 0x40000000
-#define BP_CLKCTRL_PLL0CTRL1_LOCK_COUNT 0
-#define BM_CLKCTRL_PLL0CTRL1_LOCK_COUNT 0x0000FFFF
-#define BF_CLKCTRL_PLL0CTRL1_LOCK_COUNT(v) \
- (((v) << 0) & BM_CLKCTRL_PLL0CTRL1_LOCK_COUNT)
-
-#define HW_CLKCTRL_PLL1CTRL0 (0x00000020)
-#define HW_CLKCTRL_PLL1CTRL0_SET (0x00000024)
-#define HW_CLKCTRL_PLL1CTRL0_CLR (0x00000028)
-#define HW_CLKCTRL_PLL1CTRL0_TOG (0x0000002c)
-
-#define BM_CLKCTRL_PLL1CTRL0_CLKGATEEMI 0x80000000
-#define BP_CLKCTRL_PLL1CTRL0_LFR_SEL 28
-#define BM_CLKCTRL_PLL1CTRL0_LFR_SEL 0x30000000
-#define BF_CLKCTRL_PLL1CTRL0_LFR_SEL(v) \
- (((v) << 28) & BM_CLKCTRL_PLL1CTRL0_LFR_SEL)
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__TIMES_2 0x1
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__TIMES_05 0x2
-#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL1CTRL0_CP_SEL 24
-#define BM_CLKCTRL_PLL1CTRL0_CP_SEL 0x03000000
-#define BF_CLKCTRL_PLL1CTRL0_CP_SEL(v) \
- (((v) << 24) & BM_CLKCTRL_PLL1CTRL0_CP_SEL)
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__TIMES_2 0x1
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__TIMES_05 0x2
-#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__UNDEFINED 0x3
-#define BP_CLKCTRL_PLL1CTRL0_DIV_SEL 20
-#define BM_CLKCTRL_PLL1CTRL0_DIV_SEL 0x00300000
-#define BF_CLKCTRL_PLL1CTRL0_DIV_SEL(v) \
- (((v) << 20) & BM_CLKCTRL_PLL1CTRL0_DIV_SEL)
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__DEFAULT 0x0
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__LOWER 0x1
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__LOWEST 0x2
-#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_PLL1CTRL0_EN_USB_CLKS 0x00040000
-#define BM_CLKCTRL_PLL1CTRL0_POWER 0x00020000
-
-#define HW_CLKCTRL_PLL1CTRL1 (0x00000030)
-
-#define BM_CLKCTRL_PLL1CTRL1_LOCK 0x80000000
-#define BM_CLKCTRL_PLL1CTRL1_FORCE_LOCK 0x40000000
-#define BP_CLKCTRL_PLL1CTRL1_LOCK_COUNT 0
-#define BM_CLKCTRL_PLL1CTRL1_LOCK_COUNT 0x0000FFFF
-#define BF_CLKCTRL_PLL1CTRL1_LOCK_COUNT(v) \
- (((v) << 0) & BM_CLKCTRL_PLL1CTRL1_LOCK_COUNT)
-
-#define HW_CLKCTRL_PLL2CTRL0 (0x00000040)
-#define HW_CLKCTRL_PLL2CTRL0_SET (0x00000044)
-#define HW_CLKCTRL_PLL2CTRL0_CLR (0x00000048)
-#define HW_CLKCTRL_PLL2CTRL0_TOG (0x0000004c)
-
-#define BM_CLKCTRL_PLL2CTRL0_CLKGATE 0x80000000
-#define BP_CLKCTRL_PLL2CTRL0_LFR_SEL 28
-#define BM_CLKCTRL_PLL2CTRL0_LFR_SEL 0x30000000
-#define BF_CLKCTRL_PLL2CTRL0_LFR_SEL(v) \
- (((v) << 28) & BM_CLKCTRL_PLL2CTRL0_LFR_SEL)
-#define BM_CLKCTRL_PLL2CTRL0_HOLD_RING_OFF_B 0x04000000
-#define BP_CLKCTRL_PLL2CTRL0_CP_SEL 24
-#define BM_CLKCTRL_PLL2CTRL0_CP_SEL 0x03000000
-#define BF_CLKCTRL_PLL2CTRL0_CP_SEL(v) \
- (((v) << 24) & BM_CLKCTRL_PLL2CTRL0_CP_SEL)
-#define BM_CLKCTRL_PLL2CTRL0_POWER 0x00800000
-
-#define HW_CLKCTRL_CPU (0x00000050)
-#define HW_CLKCTRL_CPU_SET (0x00000054)
-#define HW_CLKCTRL_CPU_CLR (0x00000058)
-#define HW_CLKCTRL_CPU_TOG (0x0000005c)
-
-#define BM_CLKCTRL_CPU_BUSY_REF_XTAL 0x20000000
-#define BM_CLKCTRL_CPU_BUSY_REF_CPU 0x10000000
-#define BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN 0x04000000
-#define BP_CLKCTRL_CPU_DIV_XTAL 16
-#define BM_CLKCTRL_CPU_DIV_XTAL 0x03FF0000
-#define BF_CLKCTRL_CPU_DIV_XTAL(v) \
- (((v) << 16) & BM_CLKCTRL_CPU_DIV_XTAL)
-#define BM_CLKCTRL_CPU_INTERRUPT_WAIT 0x00001000
-#define BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN 0x00000400
-#define BP_CLKCTRL_CPU_DIV_CPU 0
-#define BM_CLKCTRL_CPU_DIV_CPU 0x0000003F
-#define BF_CLKCTRL_CPU_DIV_CPU(v) \
- (((v) << 0) & BM_CLKCTRL_CPU_DIV_CPU)
-
-#define HW_CLKCTRL_HBUS (0x00000060)
-#define HW_CLKCTRL_HBUS_SET (0x00000064)
-#define HW_CLKCTRL_HBUS_CLR (0x00000068)
-#define HW_CLKCTRL_HBUS_TOG (0x0000006c)
-
-#define BM_CLKCTRL_HBUS_ASM_BUSY 0x80000000
-#define BM_CLKCTRL_HBUS_DCP_AS_ENABLE 0x40000000
-#define BM_CLKCTRL_HBUS_PXP_AS_ENABLE 0x20000000
-#define BM_CLKCTRL_HBUS_ASM_EMIPORT_AS_ENABLE 0x08000000
-#define BM_CLKCTRL_HBUS_APBHDMA_AS_ENABLE 0x04000000
-#define BM_CLKCTRL_HBUS_APBXDMA_AS_ENABLE 0x02000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_JAM_AS_ENABLE 0x01000000
-#define BM_CLKCTRL_HBUS_TRAFFIC_AS_ENABLE 0x00800000
-#define BM_CLKCTRL_HBUS_CPU_DATA_AS_ENABLE 0x00400000
-#define BM_CLKCTRL_HBUS_CPU_INSTR_AS_ENABLE 0x00200000
-#define BM_CLKCTRL_HBUS_ASM_ENABLE 0x00100000
-#define BM_CLKCTRL_HBUS_AUTO_CLEAR_DIV_ENABLE 0x00080000
-#define BP_CLKCTRL_HBUS_SLOW_DIV 16
-#define BM_CLKCTRL_HBUS_SLOW_DIV 0x00070000
-#define BF_CLKCTRL_HBUS_SLOW_DIV(v) \
- (((v) << 16) & BM_CLKCTRL_HBUS_SLOW_DIV)
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY1 0x0
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY2 0x1
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY4 0x2
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY8 0x3
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY16 0x4
-#define BV_CLKCTRL_HBUS_SLOW_DIV__BY32 0x5
-#define BM_CLKCTRL_HBUS_DIV_FRAC_EN 0x00000020
-#define BP_CLKCTRL_HBUS_DIV 0
-#define BM_CLKCTRL_HBUS_DIV 0x0000001F
-#define BF_CLKCTRL_HBUS_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_HBUS_DIV)
-
-#define HW_CLKCTRL_XBUS (0x00000070)
-
-#define BM_CLKCTRL_XBUS_BUSY 0x80000000
-#define BM_CLKCTRL_XBUS_AUTO_CLEAR_DIV_ENABLE 0x00000800
-#define BM_CLKCTRL_XBUS_DIV_FRAC_EN 0x00000400
-#define BP_CLKCTRL_XBUS_DIV 0
-#define BM_CLKCTRL_XBUS_DIV 0x000003FF
-#define BF_CLKCTRL_XBUS_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_XBUS_DIV)
-
-#define HW_CLKCTRL_XTAL (0x00000080)
-#define HW_CLKCTRL_XTAL_SET (0x00000084)
-#define HW_CLKCTRL_XTAL_CLR (0x00000088)
-#define HW_CLKCTRL_XTAL_TOG (0x0000008c)
-
-#define BP_CLKCTRL_XTAL_UART_CLK_GATE 31
-#define BM_CLKCTRL_XTAL_UART_CLK_GATE 0x80000000
-#define BP_CLKCTRL_XTAL_PWM_CLK24M_GATE 29
-#define BM_CLKCTRL_XTAL_PWM_CLK24M_GATE 0x20000000
-#define BP_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 26
-#define BM_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 0x04000000
-#define BP_CLKCTRL_XTAL_DIV_UART 0
-#define BM_CLKCTRL_XTAL_DIV_UART 0x00000003
-#define BF_CLKCTRL_XTAL_DIV_UART(v) \
- (((v) << 0) & BM_CLKCTRL_XTAL_DIV_UART)
-
-#define HW_CLKCTRL_SSP0 (0x00000090)
-
-#define BP_CLKCTRL_SSP0_CLKGATE 31
-#define BM_CLKCTRL_SSP0_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP0_BUSY 0x20000000
-#define BM_CLKCTRL_SSP0_DIV_FRAC_EN 0x00000200
-#define BP_CLKCTRL_SSP0_DIV 0
-#define BM_CLKCTRL_SSP0_DIV 0x000001FF
-#define BF_CLKCTRL_SSP0_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SSP0_DIV)
-
-#define HW_CLKCTRL_SSP1 (0x000000a0)
-
-#define BP_CLKCTRL_SSP1_CLKGATE 31
-#define BM_CLKCTRL_SSP1_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP1_BUSY 0x20000000
-#define BM_CLKCTRL_SSP1_DIV_FRAC_EN 0x00000200
-#define BP_CLKCTRL_SSP1_DIV 0
-#define BM_CLKCTRL_SSP1_DIV 0x000001FF
-#define BF_CLKCTRL_SSP1_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SSP1_DIV)
-
-#define HW_CLKCTRL_SSP2 (0x000000b0)
-
-#define BP_CLKCTRL_SSP2_CLKGATE 31
-#define BM_CLKCTRL_SSP2_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP2_BUSY 0x20000000
-#define BM_CLKCTRL_SSP2_DIV_FRAC_EN 0x00000200
-#define BP_CLKCTRL_SSP2_DIV 0
-#define BM_CLKCTRL_SSP2_DIV 0x000001FF
-#define BF_CLKCTRL_SSP2_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SSP2_DIV)
-
-#define HW_CLKCTRL_SSP3 (0x000000c0)
-
-#define BP_CLKCTRL_SSP3_CLKGATE 31
-#define BM_CLKCTRL_SSP3_CLKGATE 0x80000000
-#define BM_CLKCTRL_SSP3_BUSY 0x20000000
-#define BM_CLKCTRL_SSP3_DIV_FRAC_EN 0x00000200
-#define BP_CLKCTRL_SSP3_DIV 0
-#define BM_CLKCTRL_SSP3_DIV 0x000001FF
-#define BF_CLKCTRL_SSP3_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SSP3_DIV)
-
-#define HW_CLKCTRL_GPMI (0x000000d0)
-
-#define BP_CLKCTRL_GPMI_CLKGATE 31
-#define BM_CLKCTRL_GPMI_CLKGATE 0x80000000
-#define BM_CLKCTRL_GPMI_BUSY 0x20000000
-#define BM_CLKCTRL_GPMI_DIV_FRAC_EN 0x00000400
-#define BP_CLKCTRL_GPMI_DIV 0
-#define BM_CLKCTRL_GPMI_DIV 0x000003FF
-#define BF_CLKCTRL_GPMI_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_GPMI_DIV)
-
-#define HW_CLKCTRL_SPDIF (0x000000e0)
-
-#define BP_CLKCTRL_SPDIF_CLKGATE 31
-#define BM_CLKCTRL_SPDIF_CLKGATE 0x80000000
-
-#define HW_CLKCTRL_EMI (0x000000f0)
-
-#define BP_CLKCTRL_EMI_CLKGATE 31
-#define BM_CLKCTRL_EMI_CLKGATE 0x80000000
-#define BM_CLKCTRL_EMI_SYNC_MODE_EN 0x40000000
-#define BM_CLKCTRL_EMI_BUSY_REF_XTAL 0x20000000
-#define BM_CLKCTRL_EMI_BUSY_REF_EMI 0x10000000
-#define BM_CLKCTRL_EMI_BUSY_REF_CPU 0x08000000
-#define BM_CLKCTRL_EMI_BUSY_SYNC_MODE 0x04000000
-#define BM_CLKCTRL_EMI_BUSY_DCC_RESYNC 0x00020000
-#define BM_CLKCTRL_EMI_DCC_RESYNC_ENABLE 0x00010000
-#define BP_CLKCTRL_EMI_DIV_XTAL 8
-#define BM_CLKCTRL_EMI_DIV_XTAL 0x00000F00
-#define BF_CLKCTRL_EMI_DIV_XTAL(v) \
- (((v) << 8) & BM_CLKCTRL_EMI_DIV_XTAL)
-#define BP_CLKCTRL_EMI_DIV_EMI 0
-#define BM_CLKCTRL_EMI_DIV_EMI 0x0000003F
-#define BF_CLKCTRL_EMI_DIV_EMI(v) \
- (((v) << 0) & BM_CLKCTRL_EMI_DIV_EMI)
-
-#define HW_CLKCTRL_SAIF0 (0x00000100)
-
-#define BP_CLKCTRL_SAIF0_CLKGATE 31
-#define BM_CLKCTRL_SAIF0_CLKGATE 0x80000000
-#define BM_CLKCTRL_SAIF0_BUSY 0x20000000
-#define BM_CLKCTRL_SAIF0_DIV_FRAC_EN 0x00010000
-#define BP_CLKCTRL_SAIF0_DIV 0
-#define BM_CLKCTRL_SAIF0_DIV 0x0000FFFF
-#define BF_CLKCTRL_SAIF0_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SAIF0_DIV)
-
-#define HW_CLKCTRL_SAIF1 (0x00000110)
-
-#define BP_CLKCTRL_SAIF1_CLKGATE 31
-#define BM_CLKCTRL_SAIF1_CLKGATE 0x80000000
-#define BM_CLKCTRL_SAIF1_BUSY 0x20000000
-#define BM_CLKCTRL_SAIF1_DIV_FRAC_EN 0x00010000
-#define BP_CLKCTRL_SAIF1_DIV 0
-#define BM_CLKCTRL_SAIF1_DIV 0x0000FFFF
-#define BF_CLKCTRL_SAIF1_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_SAIF1_DIV)
-
-#define HW_CLKCTRL_DIS_LCDIF (0x00000120)
-
-#define BP_CLKCTRL_DIS_LCDIF_CLKGATE 31
-#define BM_CLKCTRL_DIS_LCDIF_CLKGATE 0x80000000
-#define BM_CLKCTRL_DIS_LCDIF_BUSY 0x20000000
-#define BM_CLKCTRL_DIS_LCDIF_DIV_FRAC_EN 0x00002000
-#define BP_CLKCTRL_DIS_LCDIF_DIV 0
-#define BM_CLKCTRL_DIS_LCDIF_DIV 0x00001FFF
-#define BF_CLKCTRL_DIS_LCDIF_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_DIS_LCDIF_DIV)
-
-#define HW_CLKCTRL_ETM (0x00000130)
-
-#define BM_CLKCTRL_ETM_CLKGATE 0x80000000
-#define BM_CLKCTRL_ETM_BUSY 0x20000000
-#define BM_CLKCTRL_ETM_DIV_FRAC_EN 0x00000080
-#define BP_CLKCTRL_ETM_DIV 0
-#define BM_CLKCTRL_ETM_DIV 0x0000007F
-#define BF_CLKCTRL_ETM_DIV(v) \
- (((v) << 0) & BM_CLKCTRL_ETM_DIV)
-
-#define HW_CLKCTRL_ENET (0x00000140)
-
-#define BM_CLKCTRL_ENET_SLEEP 0x80000000
-#define BP_CLKCTRL_ENET_DISABLE 30
-#define BM_CLKCTRL_ENET_DISABLE 0x40000000
-#define BM_CLKCTRL_ENET_STATUS 0x20000000
-#define BM_CLKCTRL_ENET_BUSY_TIME 0x08000000
-#define BP_CLKCTRL_ENET_DIV_TIME 21
-#define BM_CLKCTRL_ENET_DIV_TIME 0x07E00000
-#define BF_CLKCTRL_ENET_DIV_TIME(v) \
- (((v) << 21) & BM_CLKCTRL_ENET_DIV_TIME)
-#define BM_CLKCTRL_ENET_BUSY 0x08000000
-#define BP_CLKCTRL_ENET_DIV 21
-#define BM_CLKCTRL_ENET_DIV 0x07E00000
-#define BF_CLKCTRL_ENET_DIV(v) \
- (((v) << 21) & BM_CLKCTRL_ENET_DIV)
-#define BP_CLKCTRL_ENET_TIME_SEL 19
-#define BM_CLKCTRL_ENET_TIME_SEL 0x00180000
-#define BF_CLKCTRL_ENET_TIME_SEL(v) \
- (((v) << 19) & BM_CLKCTRL_ENET_TIME_SEL)
-#define BV_CLKCTRL_ENET_TIME_SEL__XTAL 0x0
-#define BV_CLKCTRL_ENET_TIME_SEL__PLL 0x1
-#define BV_CLKCTRL_ENET_TIME_SEL__RMII_CLK 0x2
-#define BV_CLKCTRL_ENET_TIME_SEL__UNDEFINED 0x3
-#define BM_CLKCTRL_ENET_CLK_OUT_EN 0x00040000
-#define BM_CLKCTRL_ENET_RESET_BY_SW_CHIP 0x00020000
-#define BM_CLKCTRL_ENET_RESET_BY_SW 0x00010000
-
-#define HW_CLKCTRL_HSADC (0x00000150)
-
-#define BM_CLKCTRL_HSADC_RESETB 0x40000000
-#define BP_CLKCTRL_HSADC_FREQDIV 28
-#define BM_CLKCTRL_HSADC_FREQDIV 0x30000000
-#define BF_CLKCTRL_HSADC_FREQDIV(v) \
- (((v) << 28) & BM_CLKCTRL_HSADC_FREQDIV)
-
-#define HW_CLKCTRL_FLEXCAN (0x00000160)
-
-#define BP_CLKCTRL_FLEXCAN_STOP_CAN0 30
-#define BM_CLKCTRL_FLEXCAN_STOP_CAN0 0x40000000
-#define BM_CLKCTRL_FLEXCAN_CAN0_STATUS 0x20000000
-#define BP_CLKCTRL_FLEXCAN_STOP_CAN1 28
-#define BM_CLKCTRL_FLEXCAN_STOP_CAN1 0x10000000
-#define BM_CLKCTRL_FLEXCAN_CAN1_STATUS 0x08000000
-
-#define HW_CLKCTRL_FRAC0 (0x000001b0)
-#define HW_CLKCTRL_FRAC0_SET (0x000001b4)
-#define HW_CLKCTRL_FRAC0_CLR (0x000001b8)
-#define HW_CLKCTRL_FRAC0_TOG (0x000001bc)
-
-#define BP_CLKCTRL_FRAC0_CLKGATEIO0 31
-#define BM_CLKCTRL_FRAC0_CLKGATEIO0 0x80000000
-#define BM_CLKCTRL_FRAC0_IO0_STABLE 0x40000000
-#define BP_CLKCTRL_FRAC0_IO0FRAC 24
-#define BM_CLKCTRL_FRAC0_IO0FRAC 0x3F000000
-#define BF_CLKCTRL_FRAC0_IO0FRAC(v) \
- (((v) << 24) & BM_CLKCTRL_FRAC0_IO0FRAC)
-#define BP_CLKCTRL_FRAC0_CLKGATEIO1 23
-#define BM_CLKCTRL_FRAC0_CLKGATEIO1 0x00800000
-#define BM_CLKCTRL_FRAC0_IO1_STABLE 0x00400000
-#define BP_CLKCTRL_FRAC0_IO1FRAC 16
-#define BM_CLKCTRL_FRAC0_IO1FRAC 0x003F0000
-#define BF_CLKCTRL_FRAC0_IO1FRAC(v) \
- (((v) << 16) & BM_CLKCTRL_FRAC0_IO1FRAC)
-#define BP_CLKCTRL_FRAC0_CLKGATEEMI 15
-#define BM_CLKCTRL_FRAC0_CLKGATEEMI 0x00008000
-#define BM_CLKCTRL_FRAC0_EMI_STABLE 0x00004000
-#define BP_CLKCTRL_FRAC0_EMIFRAC 8
-#define BM_CLKCTRL_FRAC0_EMIFRAC 0x00003F00
-#define BF_CLKCTRL_FRAC0_EMIFRAC(v) \
- (((v) << 8) & BM_CLKCTRL_FRAC0_EMIFRAC)
-#define BP_CLKCTRL_FRAC0_CLKGATECPU 7
-#define BM_CLKCTRL_FRAC0_CLKGATECPU 0x00000080
-#define BM_CLKCTRL_FRAC0_CPU_STABLE 0x00000040
-#define BP_CLKCTRL_FRAC0_CPUFRAC 0
-#define BM_CLKCTRL_FRAC0_CPUFRAC 0x0000003F
-#define BF_CLKCTRL_FRAC0_CPUFRAC(v) \
- (((v) << 0) & BM_CLKCTRL_FRAC0_CPUFRAC)
-
-#define HW_CLKCTRL_FRAC1 (0x000001c0)
-#define HW_CLKCTRL_FRAC1_SET (0x000001c4)
-#define HW_CLKCTRL_FRAC1_CLR (0x000001c8)
-#define HW_CLKCTRL_FRAC1_TOG (0x000001cc)
-
-#define BP_CLKCTRL_FRAC1_CLKGATEGPMI 23
-#define BM_CLKCTRL_FRAC1_CLKGATEGPMI 0x00800000
-#define BM_CLKCTRL_FRAC1_GPMI_STABLE 0x00400000
-#define BP_CLKCTRL_FRAC1_GPMIFRAC 16
-#define BM_CLKCTRL_FRAC1_GPMIFRAC 0x003F0000
-#define BF_CLKCTRL_FRAC1_GPMIFRAC(v) \
- (((v) << 16) & BM_CLKCTRL_FRAC1_GPMIFRAC)
-#define BP_CLKCTRL_FRAC1_CLKGATEHSADC 15
-#define BM_CLKCTRL_FRAC1_CLKGATEHSADC 0x00008000
-#define BM_CLKCTRL_FRAC1_HSADC_STABLE 0x00004000
-#define BP_CLKCTRL_FRAC1_HSADCFRAC 8
-#define BM_CLKCTRL_FRAC1_HSADCFRAC 0x00003F00
-#define BF_CLKCTRL_FRAC1_HSADCFRAC(v) \
- (((v) << 8) & BM_CLKCTRL_FRAC1_HSADCFRAC)
-#define BP_CLKCTRL_FRAC1_CLKGATEPIX 7
-#define BM_CLKCTRL_FRAC1_CLKGATEPIX 0x00000080
-#define BM_CLKCTRL_FRAC1_PIX_STABLE 0x00000040
-#define BP_CLKCTRL_FRAC1_PIXFRAC 0
-#define BM_CLKCTRL_FRAC1_PIXFRAC 0x0000003F
-#define BF_CLKCTRL_FRAC1_PIXFRAC(v) \
- (((v) << 0) & BM_CLKCTRL_FRAC1_PIXFRAC)
-
-#define HW_CLKCTRL_CLKSEQ (0x000001d0)
-#define HW_CLKCTRL_CLKSEQ_SET (0x000001d4)
-#define HW_CLKCTRL_CLKSEQ_CLR (0x000001d8)
-#define HW_CLKCTRL_CLKSEQ_TOG (0x000001dc)
-
-#define BM_CLKCTRL_CLKSEQ_BYPASS_CPU 0x00040000
-#define BM_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF 0x00004000
-#define BV_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF__BYPASS 0x1
-#define BV_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF__PFD 0x0
-#define BM_CLKCTRL_CLKSEQ_BYPASS_ETM 0x00000100
-#define BM_CLKCTRL_CLKSEQ_BYPASS_EMI 0x00000080
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP3 0x00000040
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP2 0x00000020
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP1 0x00000010
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP0 0x00000008
-#define BM_CLKCTRL_CLKSEQ_BYPASS_GPMI 0x00000004
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF1 0x00000002
-#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF0 0x00000001
-
-#define HW_CLKCTRL_RESET (0x000001e0)
-
-#define BM_CLKCTRL_RESET_WDOG_POR_DISABLE 0x00000020
-#define BM_CLKCTRL_RESET_EXTERNAL_RESET_ENABLE 0x00000010
-#define BM_CLKCTRL_RESET_THERMAL_RESET_ENABLE 0x00000008
-#define BM_CLKCTRL_RESET_THERMAL_RESET_DEFAULT 0x00000004
-#define BM_CLKCTRL_RESET_CHIP 0x00000002
-#define BM_CLKCTRL_RESET_DIG 0x00000001
-
-#define HW_CLKCTRL_STATUS (0x000001f0)
-
-#define BP_CLKCTRL_STATUS_CPU_LIMIT 30
-#define BM_CLKCTRL_STATUS_CPU_LIMIT 0xC0000000
-#define BF_CLKCTRL_STATUS_CPU_LIMIT(v) \
- (((v) << 30) & BM_CLKCTRL_STATUS_CPU_LIMIT)
-
-#define HW_CLKCTRL_VERSION (0x00000200)
-
-#define BP_CLKCTRL_VERSION_MAJOR 24
-#define BM_CLKCTRL_VERSION_MAJOR 0xFF000000
-#define BF_CLKCTRL_VERSION_MAJOR(v) \
- (((v) << 24) & BM_CLKCTRL_VERSION_MAJOR)
-#define BP_CLKCTRL_VERSION_MINOR 16
-#define BM_CLKCTRL_VERSION_MINOR 0x00FF0000
-#define BF_CLKCTRL_VERSION_MINOR(v) \
- (((v) << 16) & BM_CLKCTRL_VERSION_MINOR)
-#define BP_CLKCTRL_VERSION_STEP 0
-#define BM_CLKCTRL_VERSION_STEP 0x0000FFFF
-#define BF_CLKCTRL_VERSION_STEP(v) \
- (((v) << 0) & BM_CLKCTRL_VERSION_STEP)
-
-#endif /* __REGS_CLKCTRL_MX28_H__ */
diff --git a/arch/arm/mach-mxs/system.c b/arch/arm/mach-mxs/system.c
index 80ac1fca8a0..30042e23bfa 100644
--- a/arch/arm/mach-mxs/system.c
+++ b/arch/arm/mach-mxs/system.c
@@ -37,8 +37,6 @@
#define MXS_MODULE_CLKGATE (1 << 30)
#define MXS_MODULE_SFTRST (1 << 31)
-#define CLKCTRL_TIMEOUT 10 /* 10 ms */
-
static void __iomem *mxs_clkctrl_reset_addr;
/*
@@ -139,17 +137,3 @@ error:
return -ETIMEDOUT;
}
EXPORT_SYMBOL(mxs_reset_block);
-
-int mxs_clkctrl_timeout(unsigned int reg_offset, unsigned int mask)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(CLKCTRL_TIMEOUT);
- while (readl_relaxed(MXS_IO_ADDRESS(MXS_CLKCTRL_BASE_ADDR)
- + reg_offset) & mask) {
- if (time_after(jiffies, timeout)) {
- pr_err("Timeout at CLKCTRL + 0x%x\n", reg_offset);
- return -ETIMEDOUT;
- }
- }
-
- return 0;
-}
diff --git a/arch/arm/mach-mxs/timer.c b/arch/arm/mach-mxs/timer.c
index 564a63279f1..02d36de9c4e 100644
--- a/arch/arm/mach-mxs/timer.c
+++ b/arch/arm/mach-mxs/timer.c
@@ -20,6 +20,7 @@
* MA 02110-1301, USA.
*/
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/clockchips.h>
@@ -243,8 +244,16 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
return 0;
}
-void __init mxs_timer_init(struct clk *timer_clk, int irq)
+void __init mxs_timer_init(int irq)
{
+ struct clk *timer_clk;
+
+ timer_clk = clk_get_sys("timrot", NULL);
+ if (IS_ERR(timer_clk)) {
+ pr_err("%s: failed to get clk\n", __func__);
+ return;
+ }
+
clk_prepare_enable(timer_clk);
/*
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index c1b681ef4cb..f2f8a584701 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -595,7 +595,12 @@ gpio_free:
gpio_free(AMS_DELTA_GPIO_PIN_MODEM_IRQ);
return err;
}
-late_initcall(late_init);
+
+static void __init ams_delta_init_late(void)
+{
+ omap1_init_late();
+ late_init();
+}
static void __init ams_delta_map_io(void)
{
@@ -611,6 +616,7 @@ MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = ams_delta_init,
+ .init_late = ams_delta_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
index 4a4afb37102..c7364fdbda0 100644
--- a/arch/arm/mach-omap1/board-fsample.c
+++ b/arch/arm/mach-omap1/board-fsample.c
@@ -369,6 +369,7 @@ MACHINE_START(OMAP_FSAMPLE, "OMAP730 F-Sample")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_fsample_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-generic.c b/arch/arm/mach-omap1/board-generic.c
index 9a5fe581bc1..e75e2d55a2d 100644
--- a/arch/arm/mach-omap1/board-generic.c
+++ b/arch/arm/mach-omap1/board-generic.c
@@ -88,6 +88,7 @@ MACHINE_START(OMAP_GENERIC, "Generic OMAP1510/1610/1710")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_generic_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 057ec13f064..7e503686f7a 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -431,6 +431,7 @@ MACHINE_START(OMAP_H2, "TI-H2")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = h2_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index f6ddf875965..9fb03f189d9 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -425,6 +425,7 @@ MACHINE_START(OMAP_H3, "TI OMAP1710 H3 board")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = h3_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index 60c06ee2385..118a9d4a4c5 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -605,6 +605,7 @@ MACHINE_START(HERALD, "HTC Herald")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = htcherald_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index 67d7fd57a69..7970223a559 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -457,6 +457,7 @@ MACHINE_START(OMAP_INNOVATOR, "TI-Innovator")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = innovator_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index d21dcc2fbc5..7212ae97f44 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -255,6 +255,7 @@ MACHINE_START(NOKIA770, "Nokia 770")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_nokia770_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index a5f85dda3f6..da8d872d3d1 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -574,6 +574,7 @@ MACHINE_START(OMAP_OSK, "TI-OSK")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = osk_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index a60e6c22f81..949b62a7369 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -267,6 +267,7 @@ MACHINE_START(OMAP_PALMTE, "OMAP310 based Palm Tungsten E")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_palmte_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmtt.c b/arch/arm/mach-omap1/board-palmtt.c
index 8d854878547..7f1e1cf2bf4 100644
--- a/arch/arm/mach-omap1/board-palmtt.c
+++ b/arch/arm/mach-omap1/board-palmtt.c
@@ -313,6 +313,7 @@ MACHINE_START(OMAP_PALMTT, "OMAP1510 based Palm Tungsten|T")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_palmtt_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index 61ed4f0247c..3c71c6bace2 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -330,6 +330,7 @@ MACHINE_START(OMAP_PALMZ71, "OMAP310 based Palm Zire71")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_palmz71_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index a2c88890e76..f2cb24387c2 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -331,6 +331,7 @@ MACHINE_START(OMAP_PERSEUS2, "OMAP730 Perseus2")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_perseus2_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-sx1.c b/arch/arm/mach-omap1/board-sx1.c
index f34cb74a9f4..3b7b82b1368 100644
--- a/arch/arm/mach-omap1/board-sx1.c
+++ b/arch/arm/mach-omap1/board-sx1.c
@@ -407,6 +407,7 @@ MACHINE_START(SX1, "OMAP310 based Siemens SX1")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = omap_sx1_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = omap1_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 37232d04233..afd67f0ec49 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -294,6 +294,7 @@ MACHINE_START(VOICEBLUE, "VoiceBlue OMAP5910")
.reserve = omap_reserve,
.init_irq = omap1_init_irq,
.init_machine = voiceblue_init,
+ .init_late = omap1_init_late,
.timer = &omap1_timer,
.restart = voiceblue_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h
index bb7779b5779..c2552b24f9f 100644
--- a/arch/arm/mach-omap1/common.h
+++ b/arch/arm/mach-omap1/common.h
@@ -53,8 +53,18 @@ static inline void omap16xx_map_io(void)
}
#endif
+#ifdef CONFIG_OMAP_SERIAL_WAKE
+int omap_serial_wakeup_init(void);
+#else
+static inline int omap_serial_wakeup_init(void)
+{
+ return 0;
+}
+#endif
+
void omap1_init_early(void);
void omap1_init_irq(void);
+void omap1_init_late(void);
void omap1_restart(char, const char *);
extern void __init omap_check_revision(void);
@@ -63,7 +73,14 @@ extern void omap1_nand_cmd_ctl(struct mtd_info *mtd, int cmd,
unsigned int ctrl);
extern struct sys_timer omap1_timer;
-extern bool omap_32k_timer_init(void);
+#ifdef CONFIG_OMAP_32K_TIMER
+extern int omap_32k_timer_init(void);
+#else
+static inline int __init omap_32k_timer_init(void)
+{
+ return -ENODEV;
+}
+#endif
extern u32 omap_irq_flags;
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index dcd8ddbec2b..fa1fa4deb6a 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -22,6 +22,7 @@
#include <plat/tc.h>
#include <plat/board.h>
#include <plat/mux.h>
+#include <plat/dma.h>
#include <plat/mmc.h>
#include <plat/omap7xx.h>
@@ -31,6 +32,22 @@
#include "common.h"
#include "clock.h"
+#if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE)
+
+static struct platform_device omap_pcm = {
+ .name = "omap-pcm-audio",
+ .id = -1,
+};
+
+static void omap_init_audio(void)
+{
+ platform_device_register(&omap_pcm);
+}
+
+#else
+static inline void omap_init_audio(void) {}
+#endif
+
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
@@ -128,6 +145,56 @@ static inline void omap1_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
}
}
+#define OMAP_MMC_NR_RES 4
+
+/*
+ * Register MMC devices.
+ */
+static int __init omap_mmc_add(const char *name, int id, unsigned long base,
+ unsigned long size, unsigned int irq,
+ unsigned rx_req, unsigned tx_req,
+ struct omap_mmc_platform_data *data)
+{
+ struct platform_device *pdev;
+ struct resource res[OMAP_MMC_NR_RES];
+ int ret;
+
+ pdev = platform_device_alloc(name, id);
+ if (!pdev)
+ return -ENOMEM;
+
+ memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource));
+ res[0].start = base;
+ res[0].end = base + size - 1;
+ res[0].flags = IORESOURCE_MEM;
+ res[1].start = res[1].end = irq;
+ res[1].flags = IORESOURCE_IRQ;
+ res[2].start = rx_req;
+ res[2].name = "rx";
+ res[2].flags = IORESOURCE_DMA;
+ res[3].start = tx_req;
+ res[3].name = "tx";
+ res[3].flags = IORESOURCE_DMA;
+
+ ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
+ if (ret == 0)
+ ret = platform_device_add_data(pdev, data, sizeof(*data));
+ if (ret)
+ goto fail;
+
+ ret = platform_device_add(pdev);
+ if (ret)
+ goto fail;
+
+ /* return device handle to board setup code */
+ data->dev = &pdev->dev;
+ return 0;
+
+fail:
+ platform_device_put(pdev);
+ return ret;
+}
+
void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
int nr_controllers)
{
@@ -135,6 +202,7 @@ void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
for (i = 0; i < nr_controllers; i++) {
unsigned long base, size;
+ unsigned rx_req, tx_req;
unsigned int irq = 0;
if (!mmc_data[i])
@@ -146,19 +214,24 @@ void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
case 0:
base = OMAP1_MMC1_BASE;
irq = INT_MMC;
+ rx_req = OMAP_DMA_MMC_RX;
+ tx_req = OMAP_DMA_MMC_TX;
break;
case 1:
if (!cpu_is_omap16xx())
return;
base = OMAP1_MMC2_BASE;
irq = INT_1610_MMC2;
+ rx_req = OMAP_DMA_MMC2_RX;
+ tx_req = OMAP_DMA_MMC2_TX;
break;
default:
continue;
}
size = OMAP1_MMC_SIZE;
- omap_mmc_add("mmci-omap", i, base, size, irq, mmc_data[i]);
+ omap_mmc_add("mmci-omap", i, base, size, irq,
+ rx_req, tx_req, mmc_data[i]);
};
}
@@ -242,23 +315,48 @@ void __init omap1_camera_init(void *info)
static inline void omap_init_sti(void) {}
-#if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE)
+/* Numbering for the SPI-capable controllers when used for SPI:
+ * spi = 1
+ * uwire = 2
+ * mmc1..2 = 3..4
+ * mcbsp1..3 = 5..7
+ */
-static struct platform_device omap_pcm = {
- .name = "omap-pcm-audio",
- .id = -1,
+#if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE)
+
+#define OMAP_UWIRE_BASE 0xfffb3000
+
+static struct resource uwire_resources[] = {
+ {
+ .start = OMAP_UWIRE_BASE,
+ .end = OMAP_UWIRE_BASE + 0x20,
+ .flags = IORESOURCE_MEM,
+ },
};
-static void omap_init_audio(void)
+static struct platform_device omap_uwire_device = {
+ .name = "omap_uwire",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(uwire_resources),
+ .resource = uwire_resources,
+};
+
+static void omap_init_uwire(void)
{
- platform_device_register(&omap_pcm);
-}
+ /* FIXME define and use a boot tag; not all boards will be hooking
+ * up devices to the microwire controller, and multi-board configs
+ * mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway...
+ */
+ /* board-specific code must configure chipselects (only a few
+ * are normally used) and SCLK/SDI/SDO (each has two choices).
+ */
+ (void) platform_device_register(&omap_uwire_device);
+}
#else
-static inline void omap_init_audio(void) {}
+static inline void omap_init_uwire(void) {}
#endif
-/*-------------------------------------------------------------------------*/
/*
* This gets called after board-specific INIT_MACHINE, and initializes most
@@ -292,11 +390,12 @@ static int __init omap1_init_devices(void)
* in alphabetical order so they're easier to sort through.
*/
+ omap_init_audio();
omap_init_mbox();
omap_init_rtc();
omap_init_spi100k();
omap_init_sti();
- omap_init_audio();
+ omap_init_uwire();
return 0;
}
diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
index 71ce017bf5d..6c95a59f0f1 100644
--- a/arch/arm/mach-omap1/io.c
+++ b/arch/arm/mach-omap1/io.c
@@ -137,6 +137,11 @@ void __init omap1_init_early(void)
omap_init_consistent_dma_size();
}
+void __init omap1_init_late(void)
+{
+ omap_serial_wakeup_init();
+}
+
/*
* NOTE: Please use ioremap + __raw_read/write where possible instead of these
*/
diff --git a/arch/arm/mach-omap1/serial.c b/arch/arm/mach-omap1/serial.c
index 93ae8f29727..6809c9e56c9 100644
--- a/arch/arm/mach-omap1/serial.c
+++ b/arch/arm/mach-omap1/serial.c
@@ -237,7 +237,7 @@ static void __init omap_serial_set_port_wakeup(int gpio_nr)
enable_irq_wake(gpio_to_irq(gpio_nr));
}
-static int __init omap_serial_wakeup_init(void)
+int __init omap_serial_wakeup_init(void)
{
if (!cpu_is_omap16xx())
return 0;
@@ -251,7 +251,6 @@ static int __init omap_serial_wakeup_init(void)
return 0;
}
-late_initcall(omap_serial_wakeup_init);
#endif /* CONFIG_OMAP_SERIAL_WAKE */
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 4d8dd9a1b04..4062480bfec 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -232,20 +232,6 @@ static inline void omap_mpu_timer_init(void)
}
#endif /* CONFIG_OMAP_MPU_TIMER */
-static inline int omap_32k_timer_usable(void)
-{
- int res = false;
-
- if (cpu_is_omap730() || cpu_is_omap15xx())
- return res;
-
-#ifdef CONFIG_OMAP_32K_TIMER
- res = omap_32k_timer_init();
-#endif
-
- return res;
-}
-
/*
* ---------------------------------------------------------------------------
* Timer initialization
@@ -253,7 +239,7 @@ static inline int omap_32k_timer_usable(void)
*/
static void __init omap1_timer_init(void)
{
- if (!omap_32k_timer_usable())
+ if (omap_32k_timer_init() != 0)
omap_mpu_timer_init();
}
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 325b9a0aa4a..eae49c3980c 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -71,6 +71,7 @@
/* 16xx specific defines */
#define OMAP1_32K_TIMER_BASE 0xfffb9000
+#define OMAP1_32KSYNC_TIMER_BASE 0xfffbc400
#define OMAP1_32K_TIMER_CR 0x08
#define OMAP1_32K_TIMER_TVR 0x00
#define OMAP1_32K_TIMER_TCR 0x04
@@ -182,10 +183,29 @@ static __init void omap_init_32k_timer(void)
* Timer initialization
* ---------------------------------------------------------------------------
*/
-bool __init omap_32k_timer_init(void)
+int __init omap_32k_timer_init(void)
{
- omap_init_clocksource_32k();
- omap_init_32k_timer();
+ int ret = -ENODEV;
- return true;
+ if (cpu_is_omap16xx()) {
+ void __iomem *base;
+ struct clk *sync32k_ick;
+
+ base = ioremap(OMAP1_32KSYNC_TIMER_BASE, SZ_1K);
+ if (!base) {
+ pr_err("32k_counter: failed to map base addr\n");
+ return -ENODEV;
+ }
+
+ sync32k_ick = clk_get(NULL, "omap_32ksync_ick");
+ if (!IS_ERR(sync32k_ick))
+ clk_enable(sync32k_ick);
+
+ ret = omap_init_clocksource_32k(base);
+ }
+
+ if (!ret)
+ omap_init_32k_timer();
+
+ return ret;
}
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 964ee67a3b7..4cf5142f22c 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -78,12 +78,12 @@ config SOC_OMAP3430
default y
select ARCH_OMAP_OTG
-config SOC_OMAPTI81XX
+config SOC_TI81XX
bool "TI81XX support"
depends on ARCH_OMAP3
default y
-config SOC_OMAPAM33XX
+config SOC_AM33XX
bool "AM33XX support"
depends on ARCH_OMAP3
default y
@@ -320,12 +320,12 @@ config MACH_OMAP_3630SDP
config MACH_TI8168EVM
bool "TI8168 Evaluation Module"
- depends on SOC_OMAPTI81XX
+ depends on SOC_TI81XX
default y
config MACH_TI8148EVM
bool "TI8148 Evaluation Module"
- depends on SOC_OMAPTI81XX
+ depends on SOC_TI81XX
default y
config MACH_OMAP_4430SDP
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 385c083d24b..fa742f3c262 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -24,10 +24,11 @@ endif
obj-$(CONFIG_TWL4030_CORE) += omap_twl.o
# SMP support ONLY available for OMAP4
+
obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o
-obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o omap-wakeupgen.o \
- sleep44xx.o
+obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o omap-wakeupgen.o
+obj-$(CONFIG_ARCH_OMAP4) += sleep44xx.o
plus_sec := $(call as-instr,.arch_extension sec,+sec)
AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec)
@@ -64,10 +65,10 @@ endif
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o
obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o
-obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \
- cpuidle34xx.o
-obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o omap-mpuss-lowpower.o \
- cpuidle44xx.o
+obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o
+obj-$(CONFIG_ARCH_OMAP3) += cpuidle34xx.o
+obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o omap-mpuss-lowpower.o
+obj-$(CONFIG_ARCH_OMAP4) += cpuidle44xx.o
obj-$(CONFIG_PM_DEBUG) += pm-debug.o
obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o
obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o
@@ -84,90 +85,86 @@ endif
# PRCM
obj-y += prm_common.o
obj-$(CONFIG_ARCH_OMAP2) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
-obj-$(CONFIG_ARCH_OMAP3) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o \
- vc3xxx_data.o vp3xxx_data.o
-# XXX The presence of cm2xxx_3xxx.o on the line below is temporary and
-# will be removed once the OMAP4 part of the codebase is converted to
-# use OMAP4-specific PRCM functions.
-obj-$(CONFIG_ARCH_OMAP4) += prcm.o cm2xxx_3xxx.o cminst44xx.o \
- cm44xx.o prcm_mpu44xx.o \
- prminst44xx.o vc44xx_data.o \
- vp44xx_data.o prm44xx.o
+obj-$(CONFIG_ARCH_OMAP3) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP3) += vc3xxx_data.o vp3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4) += prcm.o cminst44xx.o cm44xx.o
+obj-$(CONFIG_ARCH_OMAP4) += prcm_mpu44xx.o prminst44xx.o
+obj-$(CONFIG_ARCH_OMAP4) += vc44xx_data.o vp44xx_data.o prm44xx.o
# OMAP voltage domains
voltagedomain-common := voltage.o vc.o vp.o
-obj-$(CONFIG_ARCH_OMAP2) += $(voltagedomain-common) \
- voltagedomains2xxx_data.o
-obj-$(CONFIG_ARCH_OMAP3) += $(voltagedomain-common) \
- voltagedomains3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP4) += $(voltagedomain-common) \
- voltagedomains44xx_data.o
+obj-$(CONFIG_ARCH_OMAP2) += $(voltagedomain-common)
+obj-$(CONFIG_ARCH_OMAP2) += voltagedomains2xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3) += $(voltagedomain-common)
+obj-$(CONFIG_ARCH_OMAP3) += voltagedomains3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4) += $(voltagedomain-common)
+obj-$(CONFIG_ARCH_OMAP4) += voltagedomains44xx_data.o
# OMAP powerdomain framework
powerdomain-common += powerdomain.o powerdomain-common.o
-obj-$(CONFIG_ARCH_OMAP2) += $(powerdomain-common) \
- powerdomain2xxx_3xxx.o \
- powerdomains2xxx_data.o \
- powerdomains2xxx_3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP3) += $(powerdomain-common) \
- powerdomain2xxx_3xxx.o \
- powerdomains3xxx_data.o \
- powerdomains2xxx_3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP4) += $(powerdomain-common) \
- powerdomain44xx.o \
- powerdomains44xx_data.o
+obj-$(CONFIG_ARCH_OMAP2) += $(powerdomain-common)
+obj-$(CONFIG_ARCH_OMAP2) += powerdomains2xxx_data.o
+obj-$(CONFIG_ARCH_OMAP2) += powerdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP2) += powerdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3) += $(powerdomain-common)
+obj-$(CONFIG_ARCH_OMAP3) += powerdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP3) += powerdomains3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3) += powerdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4) += $(powerdomain-common)
+obj-$(CONFIG_ARCH_OMAP4) += powerdomain44xx.o
+obj-$(CONFIG_ARCH_OMAP4) += powerdomains44xx_data.o
# PRCM clockdomain control
-clockdomain-common += clockdomain.o \
- clockdomains_common_data.o
-obj-$(CONFIG_ARCH_OMAP2) += $(clockdomain-common) \
- clockdomain2xxx_3xxx.o \
- clockdomains2xxx_3xxx_data.o
+clockdomain-common += clockdomain.o
+clockdomain-common += clockdomains_common_data.o
+obj-$(CONFIG_ARCH_OMAP2) += $(clockdomain-common)
+obj-$(CONFIG_ARCH_OMAP2) += clockdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP2) += clockdomains2xxx_3xxx_data.o
obj-$(CONFIG_SOC_OMAP2420) += clockdomains2420_data.o
obj-$(CONFIG_SOC_OMAP2430) += clockdomains2430_data.o
-obj-$(CONFIG_ARCH_OMAP3) += $(clockdomain-common) \
- clockdomain2xxx_3xxx.o \
- clockdomains2xxx_3xxx_data.o \
- clockdomains3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP4) += $(clockdomain-common) \
- clockdomain44xx.o \
- clockdomains44xx_data.o
+obj-$(CONFIG_ARCH_OMAP3) += $(clockdomain-common)
+obj-$(CONFIG_ARCH_OMAP3) += clockdomain2xxx_3xxx.o
+obj-$(CONFIG_ARCH_OMAP3) += clockdomains2xxx_3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3) += clockdomains3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP4) += $(clockdomain-common)
+obj-$(CONFIG_ARCH_OMAP4) += clockdomain44xx.o
+obj-$(CONFIG_ARCH_OMAP4) += clockdomains44xx_data.o
# Clock framework
-obj-$(CONFIG_ARCH_OMAP2) += $(clock-common) clock2xxx.o \
- clkt2xxx_sys.o \
- clkt2xxx_dpllcore.o \
- clkt2xxx_virt_prcm_set.o \
- clkt2xxx_apll.o clkt2xxx_osc.o \
- clkt2xxx_dpll.o clkt_iclk.o
+obj-$(CONFIG_ARCH_OMAP2) += $(clock-common) clock2xxx.o
+obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_sys.o
+obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpllcore.o
+obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_virt_prcm_set.o
+obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_apll.o clkt2xxx_osc.o
+obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpll.o clkt_iclk.o
obj-$(CONFIG_SOC_OMAP2420) += clock2420_data.o
obj-$(CONFIG_SOC_OMAP2430) += clock2430.o clock2430_data.o
-obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o \
- clock34xx.o clkt34xx_dpll3m2.o \
- clock3517.o clock36xx.o \
- dpll3xxx.o clock3xxx_data.o \
- clkt_iclk.o
-obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) clock44xx_data.o \
- dpll3xxx.o dpll44xx.o
+obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o
+obj-$(CONFIG_ARCH_OMAP3) += clock34xx.o clkt34xx_dpll3m2.o
+obj-$(CONFIG_ARCH_OMAP3) += clock3517.o clock36xx.o
+obj-$(CONFIG_ARCH_OMAP3) += dpll3xxx.o clock3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3) += clkt_iclk.o
+obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) clock44xx_data.o
+obj-$(CONFIG_ARCH_OMAP4) += dpll3xxx.o dpll44xx.o
# OMAP2 clock rate set data (old "OPP" data)
obj-$(CONFIG_SOC_OMAP2420) += opp2420_data.o
obj-$(CONFIG_SOC_OMAP2430) += opp2430_data.o
# hwmod data
-obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_ipblock_data.o \
- omap_hwmod_2xxx_3xxx_ipblock_data.o \
- omap_hwmod_2xxx_interconnect_data.o \
- omap_hwmod_2xxx_3xxx_interconnect_data.o \
- omap_hwmod_2420_data.o
-obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_ipblock_data.o \
- omap_hwmod_2xxx_3xxx_ipblock_data.o \
- omap_hwmod_2xxx_interconnect_data.o \
- omap_hwmod_2xxx_3xxx_interconnect_data.o \
- omap_hwmod_2430_data.o
-obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_ipblock_data.o \
- omap_hwmod_2xxx_3xxx_interconnect_data.o \
- omap_hwmod_3xxx_data.o
+obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_3xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_3xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2420_data.o
+obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_3xxx_ipblock_data.o
+obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_3xxx_interconnect_data.o
+obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2430_data.o
+obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_ipblock_data.o
+obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_interconnect_data.o
+obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_3xxx_data.o
obj-$(CONFIG_ARCH_OMAP4) += omap_hwmod_44xx_data.o
# EMU peripherals
@@ -208,23 +205,19 @@ obj-$(CONFIG_MACH_OMAP3EVM) += board-omap3evm.o
obj-$(CONFIG_MACH_OMAP3_PANDORA) += board-omap3pandora.o
obj-$(CONFIG_MACH_OMAP_3430SDP) += board-3430sdp.o
obj-$(CONFIG_MACH_NOKIA_N8X0) += board-n8x0.o
-obj-$(CONFIG_MACH_NOKIA_RM680) += board-rm680.o \
- sdram-nokia.o
-obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o \
- sdram-nokia.o \
- board-rx51-peripherals.o \
- board-rx51-video.o
-obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom.o \
- board-zoom-peripherals.o \
- board-zoom-display.o \
- board-zoom-debugboard.o
-obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom.o \
- board-zoom-peripherals.o \
- board-zoom-display.o \
- board-zoom-debugboard.o
-obj-$(CONFIG_MACH_OMAP_3630SDP) += board-3630sdp.o \
- board-zoom-peripherals.o \
- board-zoom-display.o
+obj-$(CONFIG_MACH_NOKIA_RM680) += board-rm680.o sdram-nokia.o
+obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o sdram-nokia.o
+obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-peripherals.o
+obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-video.o
+obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom.o board-zoom-peripherals.o
+obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom-display.o
+obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom-debugboard.o
+obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom.o board-zoom-peripherals.o
+obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom-display.o
+obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom-debugboard.o
+obj-$(CONFIG_MACH_OMAP_3630SDP) += board-3630sdp.o
+obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-peripherals.o
+obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-display.o
obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o
obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o
obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index e658f835d0d..99ca6bad5c3 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -303,6 +303,7 @@ MACHINE_START(OMAP_2430SDP, "OMAP2430 sdp2430 board")
.init_irq = omap2_init_irq,
.handle_irq = omap2_intc_handle_irq,
.init_machine = omap_2430sdp_init,
+ .init_late = omap2430_init_late,
.timer = &omap2_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 37abb0d49b5..a98c688058a 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -605,6 +605,7 @@ MACHINE_START(OMAP_3430SDP, "OMAP3430 3430SDP board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_3430sdp_init,
+ .init_late = omap3430_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index 6ef350d1ae4..2dc9ba523c7 100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -217,6 +217,7 @@ MACHINE_START(OMAP_3630SDP, "OMAP 3630SDP board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_sdp_init,
+ .init_late = omap3630_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 94af6cde2e3..8e17284a803 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -912,6 +912,7 @@ MACHINE_START(OMAP_4430SDP, "OMAP4430 4430SDP board")
.init_irq = gic_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = omap_4430sdp_init,
+ .init_late = omap4430_init_late,
.timer = &omap4_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c
index 3b8a53c1f2a..92432c28673 100644
--- a/arch/arm/mach-omap2/board-am3517crane.c
+++ b/arch/arm/mach-omap2/board-am3517crane.c
@@ -102,6 +102,7 @@ MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = am3517_crane_init,
+ .init_late = am35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index 99790eb646e..18f601096ce 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -385,6 +385,7 @@ MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = am3517_evm_init,
+ .init_late = am35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index 768ece2e9c3..502c31e123b 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -356,6 +356,7 @@ MACHINE_START(OMAP_APOLLON, "OMAP24xx Apollon")
.init_irq = omap2_init_irq,
.handle_irq = omap2_intc_handle_irq,
.init_machine = omap_apollon_init,
+ .init_late = omap2420_init_late,
.timer = &omap2_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index c03df142ea6..ded100c80a9 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -669,6 +669,7 @@ MACHINE_START(CM_T35, "Compulab CM-T35")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = cm_t35_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
@@ -681,6 +682,7 @@ MACHINE_START(CM_T3730, "Compulab CM-T3730")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = cm_t3730_init,
+ .init_late = omap3630_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index 9e66e167e4f..a33ad4641d9 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -303,6 +303,7 @@ MACHINE_START(CM_T3517, "Compulab CM-T3517")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = cm_t3517_init,
+ .init_late = am35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index b063f0d2faa..6567c1cd557 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -644,6 +644,7 @@ MACHINE_START(DEVKIT8000, "OMAP3 Devkit8000")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = devkit8000_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_secure_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 7302ba7ff1b..20293465786 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -125,6 +125,7 @@ DT_MACHINE_START(OMAP4_DT, "Generic OMAP4 (Flattened Device Tree)")
.init_irq = omap_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = omap_generic_init,
+ .init_late = omap4430_init_late,
.timer = &omap4_timer,
.dt_compat = omap4_boards_compat,
.restart = omap_prcm_restart,
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 0bbbabe28fc..876becf8205 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -398,6 +398,7 @@ MACHINE_START(OMAP_H4, "OMAP2420 H4 board")
.init_irq = omap2_init_irq,
.handle_irq = omap2_intc_handle_irq,
.init_machine = omap_h4_init,
+ .init_late = omap2420_init_late,
.timer = &omap2_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 7a274098f67..74915295482 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -650,6 +650,7 @@ MACHINE_START(IGEP0020, "IGEP v2 board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = igep_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
@@ -662,6 +663,7 @@ MACHINE_START(IGEP0030, "IGEP OMAP3 module")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = igep_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 1b6049567ab..ef9e8297749 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -442,6 +442,7 @@ MACHINE_START(OMAP_LDP, "OMAP LDP board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_ldp_init,
+ .init_late = omap3430_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 518091c5f77..8ca14e88a31 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -694,6 +694,7 @@ MACHINE_START(NOKIA_N800, "Nokia N800")
.init_irq = omap2_init_irq,
.handle_irq = omap2_intc_handle_irq,
.init_machine = n8x0_init_machine,
+ .init_late = omap2420_init_late,
.timer = &omap2_timer,
.restart = omap_prcm_restart,
MACHINE_END
@@ -706,6 +707,7 @@ MACHINE_START(NOKIA_N810, "Nokia N810")
.init_irq = omap2_init_irq,
.handle_irq = omap2_intc_handle_irq,
.init_machine = n8x0_init_machine,
+ .init_late = omap2420_init_late,
.timer = &omap2_timer,
.restart = omap_prcm_restart,
MACHINE_END
@@ -718,6 +720,7 @@ MACHINE_START(NOKIA_N810_WIMAX, "Nokia N810 WiMAX")
.init_irq = omap2_init_irq,
.handle_irq = omap2_intc_handle_irq,
.init_machine = n8x0_init_machine,
+ .init_late = omap2420_init_late,
.timer = &omap2_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 2a7b9a9da1d..79c6909eeb7 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -543,6 +543,7 @@ MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3_beagle_init,
+ .init_late = omap3_init_late,
.timer = &omap3_secure_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index ace3c675e9c..639bd07ea38 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -671,6 +671,7 @@ MACHINE_START(OMAP3EVM, "OMAP3 EVM")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3_evm_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
index c008bf8e1c3..932e1778aff 100644
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -242,6 +242,7 @@ MACHINE_START(OMAP3_TORPEDO, "Logic OMAP3 Torpedo board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3logic_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
@@ -254,6 +255,7 @@ MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3logic_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 33d995d0f07..57aebee44fd 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -622,6 +622,7 @@ MACHINE_START(OMAP3_PANDORA, "Pandora Handheld Console")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3pandora_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index 4396bae9167..b318f5602e3 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -436,6 +436,7 @@ MACHINE_START(SBC3530, "OMAP3 STALKER")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3_stalker_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_secure_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index ae2251fa4a6..485d14d6a8c 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -387,6 +387,7 @@ MACHINE_START(TOUCHBOOK, "OMAP3 touchbook Board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3_touchbook_init,
+ .init_late = omap3430_init_late,
.timer = &omap3_secure_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 68b8fc9ff01..982fb2622ab 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -521,6 +521,7 @@ MACHINE_START(OMAP4_PANDA, "OMAP4 Panda board")
.init_irq = gic_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = omap4_panda_init,
+ .init_late = omap4430_init_late,
.timer = &omap4_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 5527c1979a1..8fa2fc3a4c3 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -554,6 +554,7 @@ MACHINE_START(OVERO, "Gumstix Overo")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = overo_init,
+ .init_late = omap35xx_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
index ae53d71f0ce..0ad1bb3bdb9 100644
--- a/arch/arm/mach-omap2/board-rm680.c
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -151,6 +151,7 @@ MACHINE_START(NOKIA_RM680, "Nokia RM-680 board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = rm680_init,
+ .init_late = omap3630_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
@@ -163,6 +164,7 @@ MACHINE_START(NOKIA_RM696, "Nokia RM-696 board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = rm680_init,
+ .init_late = omap3630_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index 2da92a6ba40..345dd931f76 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -127,6 +127,7 @@ MACHINE_START(NOKIA_RX51, "Nokia RX-51 board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = rx51_init,
+ .init_late = omap3430_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ti8168evm.c b/arch/arm/mach-omap2/board-ti8168evm.c
index ab9a7a9e9d6..d4c8392cadb 100644
--- a/arch/arm/mach-omap2/board-ti8168evm.c
+++ b/arch/arm/mach-omap2/board-ti8168evm.c
@@ -52,6 +52,7 @@ MACHINE_START(TI8168EVM, "ti8168evm")
.init_irq = ti81xx_init_irq,
.timer = &omap3_timer,
.init_machine = ti81xx_evm_init,
+ .init_late = ti81xx_init_late,
.restart = omap_prcm_restart,
MACHINE_END
@@ -63,5 +64,6 @@ MACHINE_START(TI8148EVM, "ti8148evm")
.init_irq = ti81xx_init_irq,
.timer = &omap3_timer,
.init_machine = ti81xx_evm_init,
+ .init_late = ti81xx_init_late,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c
index 5c20bcc57f2..4e7e56142e6 100644
--- a/arch/arm/mach-omap2/board-zoom.c
+++ b/arch/arm/mach-omap2/board-zoom.c
@@ -137,6 +137,7 @@ MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_zoom_init,
+ .init_late = omap3430_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
@@ -149,6 +150,7 @@ MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_zoom_init,
+ .init_late = omap3630_init_late,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index d6c9e618031..be9dfd1abe6 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -55,7 +55,7 @@ static inline void omap34xx_map_common_io(void)
}
#endif
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
extern void omapti81xx_map_common_io(void);
#else
static inline void omapti81xx_map_common_io(void)
@@ -63,7 +63,7 @@ static inline void omapti81xx_map_common_io(void)
}
#endif
-#ifdef CONFIG_SOC_OMAPAM33XX
+#ifdef CONFIG_SOC_AM33XX
extern void omapam33xx_map_common_io(void);
#else
static inline void omapam33xx_map_common_io(void)
@@ -79,6 +79,42 @@ static inline void omap44xx_map_common_io(void)
}
#endif
+#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP2)
+int omap2_pm_init(void);
+#else
+static inline int omap2_pm_init(void)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
+int omap3_pm_init(void);
+#else
+static inline int omap3_pm_init(void)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4)
+int omap4_pm_init(void);
+#else
+static inline int omap4_pm_init(void)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_OMAP_MUX
+int omap_mux_late_init(void);
+#else
+static inline int omap_mux_late_init(void)
+{
+ return 0;
+}
+#endif
+
extern void omap2_init_common_infrastructure(void);
extern struct sys_timer omap2_timer;
@@ -95,6 +131,17 @@ void omap3_init_early(void); /* Do not use this one */
void am35xx_init_early(void);
void ti81xx_init_early(void);
void omap4430_init_early(void);
+void omap3_init_late(void); /* Do not use this one */
+void omap4430_init_late(void);
+void omap2420_init_late(void);
+void omap2430_init_late(void);
+void omap3430_init_late(void);
+void omap35xx_init_late(void);
+void omap3630_init_late(void);
+void am35xx_init_late(void);
+void ti81xx_init_late(void);
+void omap4430_init_late(void);
+int omap2_common_pm_late_init(void);
void omap_prcm_restart(char, const char *);
/*
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index ae62ece04ef..7b4b9327e54 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -645,7 +645,11 @@ static inline void omap242x_mmc_mux(struct omap_mmc_platform_data
void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
{
- char *name = "mmci-omap";
+ struct platform_device *pdev;
+ struct omap_hwmod *oh;
+ int id = 0;
+ char *oh_name = "msdi1";
+ char *dev_name = "mmci-omap";
if (!mmc_data[0]) {
pr_err("%s fails: Incomplete platform data\n", __func__);
@@ -653,8 +657,17 @@ void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
}
omap242x_mmc_mux(mmc_data[0]);
- omap_mmc_add(name, 0, OMAP2_MMC1_BASE, OMAP2420_MMC_SIZE,
- INT_24XX_MMC_IRQ, mmc_data[0]);
+
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh) {
+ pr_err("Could not look up %s\n", oh_name);
+ return;
+ }
+ pdev = omap_device_build(dev_name, id, oh, mmc_data[0],
+ sizeof(struct omap_mmc_platform_data), NULL, 0, 0);
+ if (IS_ERR(pdev))
+ WARN(1, "Can'd build omap_device for %s:%s.\n",
+ dev_name, oh->name);
}
#endif
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index b19d8496c16..ff75abe60af 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -227,10 +227,6 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
dma_stride = OMAP2_DMA_STRIDE;
dma_common_ch_start = CSDP;
- if (cpu_is_omap3630() || cpu_is_omap44xx())
- dma_common_ch_end = CCDN;
- else
- dma_common_ch_end = CCFN;
p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
if (!p) {
@@ -277,6 +273,13 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__);
return -ENOMEM;
}
+
+ /* Check the capabilities register for descriptor loading feature */
+ if (dma_read(CAPS_0, 0) & DMA_HAS_DESCRIPTOR_CAPS)
+ dma_common_ch_end = CCDN;
+ else
+ dma_common_ch_end = CCFN;
+
return 0;
}
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c
index 3376388b317..845309f146f 100644
--- a/arch/arm/mach-omap2/dsp.c
+++ b/arch/arm/mach-omap2/dsp.c
@@ -28,8 +28,6 @@
#include <plat/dsp.h>
-extern phys_addr_t omap_dsp_get_mempool_base(void);
-
static struct platform_device *omap_dsp_pdev;
static struct omap_dsp_platform_data omap_dsp_pdata __initdata = {
@@ -47,6 +45,31 @@ static struct omap_dsp_platform_data omap_dsp_pdata __initdata = {
.dsp_cm_rmw_bits = omap2_cm_rmw_mod_reg_bits,
};
+static phys_addr_t omap_dsp_phys_mempool_base;
+
+void __init omap_dsp_reserve_sdram_memblock(void)
+{
+ phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
+ phys_addr_t paddr;
+
+ if (!size)
+ return;
+
+ paddr = arm_memblock_steal(size, SZ_1M);
+ if (!paddr) {
+ pr_err("%s: failed to reserve %llx bytes\n",
+ __func__, (unsigned long long)size);
+ return;
+ }
+
+ omap_dsp_phys_mempool_base = paddr;
+}
+
+static phys_addr_t omap_dsp_get_mempool_base(void)
+{
+ return omap_dsp_phys_mempool_base;
+}
+
static int __init omap_dsp_init(void)
{
struct platform_device *pdev;
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 580e684e882..46b09dae770 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -50,6 +50,19 @@
#define GPMC_ECC_SIZE_CONFIG 0x1fc
#define GPMC_ECC1_RESULT 0x200
+/* GPMC ECC control settings */
+#define GPMC_ECC_CTRL_ECCCLEAR 0x100
+#define GPMC_ECC_CTRL_ECCDISABLE 0x000
+#define GPMC_ECC_CTRL_ECCREG1 0x001
+#define GPMC_ECC_CTRL_ECCREG2 0x002
+#define GPMC_ECC_CTRL_ECCREG3 0x003
+#define GPMC_ECC_CTRL_ECCREG4 0x004
+#define GPMC_ECC_CTRL_ECCREG5 0x005
+#define GPMC_ECC_CTRL_ECCREG6 0x006
+#define GPMC_ECC_CTRL_ECCREG7 0x007
+#define GPMC_ECC_CTRL_ECCREG8 0x008
+#define GPMC_ECC_CTRL_ECCREG9 0x009
+
#define GPMC_CS0_OFFSET 0x60
#define GPMC_CS_SIZE 0x30
@@ -860,8 +873,9 @@ int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size)
gpmc_ecc_used = cs;
/* clear ecc and enable bits */
- val = ((0x00000001<<8) | 0x00000001);
- gpmc_write_reg(GPMC_ECC_CONTROL, val);
+ gpmc_write_reg(GPMC_ECC_CONTROL,
+ GPMC_ECC_CTRL_ECCCLEAR |
+ GPMC_ECC_CTRL_ECCREG1);
/* program ecc and result sizes */
val = ((((ecc_size >> 1) - 1) << 22) | (0x0000000F));
@@ -869,13 +883,15 @@ int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size)
switch (mode) {
case GPMC_ECC_READ:
- gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
+ case GPMC_ECC_WRITE:
+ gpmc_write_reg(GPMC_ECC_CONTROL,
+ GPMC_ECC_CTRL_ECCCLEAR |
+ GPMC_ECC_CTRL_ECCREG1);
break;
case GPMC_ECC_READSYN:
- gpmc_write_reg(GPMC_ECC_CONTROL, 0x100);
- break;
- case GPMC_ECC_WRITE:
- gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
+ gpmc_write_reg(GPMC_ECC_CONTROL,
+ GPMC_ECC_CTRL_ECCCLEAR |
+ GPMC_ECC_CTRL_ECCDISABLE);
break;
default:
printk(KERN_INFO "Error: Unrecognized Mode[%d]!\n", mode);
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index b0268eaffe1..be697d4e084 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -355,7 +355,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
*
* temporary HACK: ocr_mask instead of fixed supply
*/
- if (cpu_is_omap3505() || cpu_is_omap3517())
+ if (soc_is_am35xx())
mmc->slots[0].ocr_mask = MMC_VDD_165_195 |
MMC_VDD_26_27 |
MMC_VDD_27_28 |
@@ -365,7 +365,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
else
mmc->slots[0].ocr_mask = c->ocr_mask;
- if (!cpu_is_omap3517() && !cpu_is_omap3505())
+ if (!soc_is_am35xx())
mmc->slots[0].features |= HSMMC_HAS_PBIAS;
if (cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0))
@@ -388,7 +388,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
}
}
- if (cpu_is_omap3517() || cpu_is_omap3505())
+ if (soc_is_am35xx())
mmc->slots[0].set_power = nop_mmc_set_power;
/* OMAP3630 HSMMC1 supports only 4-bit */
@@ -400,7 +400,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
}
break;
case 2:
- if (cpu_is_omap3517() || cpu_is_omap3505())
+ if (soc_is_am35xx())
mmc->slots[0].set_power = am35x_hsmmc2_set_power;
if (c->ext_clock)
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index f1398171d8a..0389b3264ab 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -185,8 +185,7 @@ static void __init omap3_cpuinfo(void)
*/
if (cpu_is_omap3630()) {
cpu_name = "OMAP3630";
- } else if (cpu_is_omap3517()) {
- /* AM35xx devices */
+ } else if (soc_is_am35xx()) {
cpu_name = (omap3_has_sgx()) ? "AM3517" : "AM3505";
} else if (cpu_is_ti816x()) {
cpu_name = "TI816X";
@@ -352,13 +351,13 @@ void __init omap3xxx_check_revision(void)
*/
switch (rev) {
case 0:
- omap_revision = OMAP3517_REV_ES1_0;
+ omap_revision = AM35XX_REV_ES1_0;
cpu_rev = "1.0";
break;
case 1:
/* FALLTHROUGH */
default:
- omap_revision = OMAP3517_REV_ES1_1;
+ omap_revision = AM35XX_REV_ES1_1;
cpu_rev = "1.1";
}
break;
diff --git a/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
index d79321b0f2a..548de90b58c 100644
--- a/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
+++ b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
@@ -16,18 +16,10 @@
#define OMAP_WKG_ENB_B_0 0x14
#define OMAP_WKG_ENB_C_0 0x18
#define OMAP_WKG_ENB_D_0 0x1c
-#define OMAP_WKG_ENB_SECURE_A_0 0x20
-#define OMAP_WKG_ENB_SECURE_B_0 0x24
-#define OMAP_WKG_ENB_SECURE_C_0 0x28
-#define OMAP_WKG_ENB_SECURE_D_0 0x2c
#define OMAP_WKG_ENB_A_1 0x410
#define OMAP_WKG_ENB_B_1 0x414
#define OMAP_WKG_ENB_C_1 0x418
#define OMAP_WKG_ENB_D_1 0x41c
-#define OMAP_WKG_ENB_SECURE_A_1 0x420
-#define OMAP_WKG_ENB_SECURE_B_1 0x424
-#define OMAP_WKG_ENB_SECURE_C_1 0x428
-#define OMAP_WKG_ENB_SECURE_D_1 0x42c
#define OMAP_AUX_CORE_BOOT_0 0x800
#define OMAP_AUX_CORE_BOOT_1 0x804
#define OMAP_PTMSYNCREQ_MASK 0xc00
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 4b9491aa36f..8d014ba04ab 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -173,7 +173,7 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
};
#endif
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
static struct map_desc omapti81xx_io_desc[] __initdata = {
{
.virtual = L4_34XX_VIRT,
@@ -184,7 +184,7 @@ static struct map_desc omapti81xx_io_desc[] __initdata = {
};
#endif
-#ifdef CONFIG_SOC_OMAPAM33XX
+#ifdef CONFIG_SOC_AM33XX
static struct map_desc omapam33xx_io_desc[] __initdata = {
{
.virtual = L4_34XX_VIRT,
@@ -216,41 +216,11 @@ static struct map_desc omap44xx_io_desc[] __initdata = {
.type = MT_DEVICE,
},
{
- .virtual = OMAP44XX_GPMC_VIRT,
- .pfn = __phys_to_pfn(OMAP44XX_GPMC_PHYS),
- .length = OMAP44XX_GPMC_SIZE,
- .type = MT_DEVICE,
- },
- {
- .virtual = OMAP44XX_EMIF1_VIRT,
- .pfn = __phys_to_pfn(OMAP44XX_EMIF1_PHYS),
- .length = OMAP44XX_EMIF1_SIZE,
- .type = MT_DEVICE,
- },
- {
- .virtual = OMAP44XX_EMIF2_VIRT,
- .pfn = __phys_to_pfn(OMAP44XX_EMIF2_PHYS),
- .length = OMAP44XX_EMIF2_SIZE,
- .type = MT_DEVICE,
- },
- {
- .virtual = OMAP44XX_DMM_VIRT,
- .pfn = __phys_to_pfn(OMAP44XX_DMM_PHYS),
- .length = OMAP44XX_DMM_SIZE,
- .type = MT_DEVICE,
- },
- {
.virtual = L4_PER_44XX_VIRT,
.pfn = __phys_to_pfn(L4_PER_44XX_PHYS),
.length = L4_PER_44XX_SIZE,
.type = MT_DEVICE,
},
- {
- .virtual = L4_EMU_44XX_VIRT,
- .pfn = __phys_to_pfn(L4_EMU_44XX_PHYS),
- .length = L4_EMU_44XX_SIZE,
- .type = MT_DEVICE,
- },
#ifdef CONFIG_OMAP4_ERRATA_I688
{
.virtual = OMAP4_SRAM_VA,
@@ -286,14 +256,14 @@ void __init omap34xx_map_common_io(void)
}
#endif
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
void __init omapti81xx_map_common_io(void)
{
iotable_init(omapti81xx_io_desc, ARRAY_SIZE(omapti81xx_io_desc));
}
#endif
-#ifdef CONFIG_SOC_OMAPAM33XX
+#ifdef CONFIG_SOC_AM33XX
void __init omapam33xx_map_common_io(void)
{
iotable_init(omapam33xx_io_desc, ARRAY_SIZE(omapam33xx_io_desc));
@@ -380,6 +350,13 @@ void __init omap2420_init_early(void)
omap_hwmod_init_postsetup();
omap2420_clk_init();
}
+
+void __init omap2420_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap2_pm_init();
+}
#endif
#ifdef CONFIG_SOC_OMAP2430
@@ -395,6 +372,13 @@ void __init omap2430_init_early(void)
omap_hwmod_init_postsetup();
omap2430_clk_init();
}
+
+void __init omap2430_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap2_pm_init();
+}
#endif
/*
@@ -449,6 +433,48 @@ void __init ti81xx_init_early(void)
omap_hwmod_init_postsetup();
omap3xxx_clk_init();
}
+
+void __init omap3_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap3_pm_init();
+}
+
+void __init omap3430_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap3_pm_init();
+}
+
+void __init omap35xx_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap3_pm_init();
+}
+
+void __init omap3630_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap3_pm_init();
+}
+
+void __init am35xx_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap3_pm_init();
+}
+
+void __init ti81xx_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap3_pm_init();
+}
#endif
#ifdef CONFIG_ARCH_OMAP4
@@ -465,6 +491,13 @@ void __init omap4430_init_early(void)
omap_hwmod_init_postsetup();
omap4xxx_clk_init();
}
+
+void __init omap4430_init_late(void)
+{
+ omap_mux_late_init();
+ omap2_common_pm_late_init();
+ omap4_pm_init();
+}
#endif
void __init omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
diff --git a/arch/arm/mach-omap2/iomap.h b/arch/arm/mach-omap2/iomap.h
index 0812b154f5b..80b88921fab 100644
--- a/arch/arm/mach-omap2/iomap.h
+++ b/arch/arm/mach-omap2/iomap.h
@@ -37,9 +37,6 @@
#define OMAP4_L3_PER_IO_OFFSET 0xb1100000
#define OMAP4_L3_PER_IO_ADDRESS(pa) IOMEM((pa) + OMAP4_L3_PER_IO_OFFSET)
-#define OMAP4_GPMC_IO_OFFSET 0xa9000000
-#define OMAP4_GPMC_IO_ADDRESS(pa) IOMEM((pa) + OMAP4_GPMC_IO_OFFSET)
-
#define OMAP2_EMU_IO_OFFSET 0xaa800000 /* Emulation */
#define OMAP2_EMU_IO_ADDRESS(pa) IOMEM((pa) + OMAP2_EMU_IO_OFFSET)
@@ -170,28 +167,3 @@
#define L4_ABE_44XX_VIRT (L4_ABE_44XX_PHYS + OMAP2_L4_IO_OFFSET)
#define L4_ABE_44XX_SIZE SZ_1M
-#define L4_EMU_44XX_PHYS L4_EMU_44XX_BASE
- /* 0x54000000 --> 0xfe800000 */
-#define L4_EMU_44XX_VIRT (L4_EMU_44XX_PHYS + OMAP2_EMU_IO_OFFSET)
-#define L4_EMU_44XX_SIZE SZ_8M
-
-#define OMAP44XX_GPMC_PHYS OMAP44XX_GPMC_BASE
- /* 0x50000000 --> 0xf9000000 */
-#define OMAP44XX_GPMC_VIRT (OMAP44XX_GPMC_PHYS + OMAP4_GPMC_IO_OFFSET)
-#define OMAP44XX_GPMC_SIZE SZ_1M
-
-
-#define OMAP44XX_EMIF1_PHYS OMAP44XX_EMIF1_BASE
- /* 0x4c000000 --> 0xfd100000 */
-#define OMAP44XX_EMIF1_VIRT (OMAP44XX_EMIF1_PHYS + OMAP4_L3_PER_IO_OFFSET)
-#define OMAP44XX_EMIF1_SIZE SZ_1M
-
-#define OMAP44XX_EMIF2_PHYS OMAP44XX_EMIF2_BASE
- /* 0x4d000000 --> 0xfd200000 */
-#define OMAP44XX_EMIF2_SIZE SZ_1M
-#define OMAP44XX_EMIF2_VIRT (OMAP44XX_EMIF1_VIRT + OMAP44XX_EMIF1_SIZE)
-
-#define OMAP44XX_DMM_PHYS OMAP44XX_DMM_BASE
- /* 0x4e000000 --> 0xfd300000 */
-#define OMAP44XX_DMM_SIZE SZ_1M
-#define OMAP44XX_DMM_VIRT (OMAP44XX_EMIF2_VIRT + OMAP44XX_EMIF2_SIZE)
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 1ecf54565fe..fdc4303be56 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -231,7 +231,7 @@ static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs
goto out;
irqnr = readl_relaxed(base_addr + 0xd8);
-#ifdef CONFIG_SOC_OMAPTI81XX
+#ifdef CONFIG_SOC_TI81XX
if (irqnr)
goto out;
irqnr = readl_relaxed(base_addr + 0xf8);
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 3268ee24ead..80e55c5c999 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -788,7 +788,7 @@ static void __init omap_mux_free_names(struct omap_mux *m)
}
/* Free all data except for GPIO pins unless CONFIG_DEBUG_FS is set */
-static int __init omap_mux_late_init(void)
+int __init omap_mux_late_init(void)
{
struct omap_mux_partition *partition;
int ret;
@@ -823,7 +823,6 @@ static int __init omap_mux_late_init(void)
return 0;
}
-late_initcall(omap_mux_late_init);
static void __init omap_mux_package_fixup(struct omap_mux *p,
struct omap_mux *superset)
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index fd48797fa95..b26d3c9bca1 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -3306,7 +3306,7 @@ int __init omap3xxx_hwmod_init(void)
rev == OMAP3430_REV_ES2_1 || rev == OMAP3430_REV_ES3_0 ||
rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2) {
h = omap34xx_hwmod_ocp_ifs;
- } else if (rev == OMAP3517_REV_ES1_0 || rev == OMAP3517_REV_ES1_1) {
+ } else if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) {
h = am35xx_hwmod_ocp_ifs;
} else if (rev == OMAP3630_REV_ES1_0 || rev == OMAP3630_REV_ES1_1 ||
rev == OMAP3630_REV_ES1_2) {
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index d0c1c969599..9cb5cede0f5 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -295,7 +295,7 @@ static int __init omap2_common_pm_init(void)
}
postcore_initcall(omap2_common_pm_init);
-static int __init omap2_common_pm_late_init(void)
+int __init omap2_common_pm_late_init(void)
{
/*
* In the case of DT, the PMIC and SR initialization will be done using
@@ -322,4 +322,3 @@ static int __init omap2_common_pm_late_init(void)
return 0;
}
-late_initcall(omap2_common_pm_late_init);
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index facfffca9ea..2edeffc923a 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -298,13 +298,10 @@ static void __init prcm_setup_regs(void)
WKUP_MOD, PM_WKEN);
}
-static int __init omap2_pm_init(void)
+int __init omap2_pm_init(void)
{
u32 l;
- if (!cpu_is_omap24xx())
- return -ENODEV;
-
printk(KERN_INFO "Power Management for OMAP2 initializing\n");
l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET);
printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
@@ -370,17 +367,13 @@ static int __init omap2_pm_init(void)
* These routines need to be in SRAM as that's the only
* memory the MPU can see when it wakes up.
*/
- if (cpu_is_omap24xx()) {
- omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend,
- omap24xx_idle_loop_suspend_sz);
+ omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend,
+ omap24xx_idle_loop_suspend_sz);
- omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
- omap24xx_cpu_suspend_sz);
- }
+ omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
+ omap24xx_cpu_suspend_sz);
arm_pm_idle = omap2_pm_idle;
return 0;
}
-
-late_initcall(omap2_pm_init);
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 8b43aefba0e..a34023d0ca7 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -697,15 +697,12 @@ static void __init pm_errata_configure(void)
}
}
-static int __init omap3_pm_init(void)
+int __init omap3_pm_init(void)
{
struct power_state *pwrst, *tmp;
struct clockdomain *neon_clkdm, *mpu_clkdm;
int ret;
- if (!cpu_is_omap34xx())
- return -ENODEV;
-
if (!omap3_has_io_chain_ctrl())
pr_warning("PM: no software I/O chain control; some wakeups may be lost\n");
@@ -804,5 +801,3 @@ err2:
err1:
return ret;
}
-
-late_initcall(omap3_pm_init);
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index 88562535242..ea24174f570 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -141,15 +141,12 @@ static void omap_default_idle(void)
* Initializes all powerdomain and clockdomain target states
* and all PRCM settings.
*/
-static int __init omap4_pm_init(void)
+int __init omap4_pm_init(void)
{
int ret;
struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm, *l4wkup;
struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm;
- if (!cpu_is_omap44xx())
- return -ENODEV;
-
if (omap_rev() == OMAP4430_REV_ES1_0) {
WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
return -ENODEV;
@@ -217,4 +214,3 @@ static int __init omap4_pm_init(void)
err2:
return ret;
}
-late_initcall(omap4_pm_init);
diff --git a/arch/arm/mach-omap2/powerdomains3xxx_data.c b/arch/arm/mach-omap2/powerdomains3xxx_data.c
index b7ea468eea3..fb0a0a6869d 100644
--- a/arch/arm/mach-omap2/powerdomains3xxx_data.c
+++ b/arch/arm/mach-omap2/powerdomains3xxx_data.c
@@ -311,7 +311,7 @@ void __init omap3xxx_powerdomains_init(void)
rev == OMAP3430_REV_ES3_0 || rev == OMAP3630_REV_ES1_0)
pwrdm_register_pwrdms(powerdomains_omap3430es2_es3_0);
else if (rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2 ||
- rev == OMAP3517_REV_ES1_0 || rev == OMAP3517_REV_ES1_1 ||
+ rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1 ||
rev == OMAP3630_REV_ES1_1 || rev == OMAP3630_REV_ES1_2)
pwrdm_register_pwrdms(powerdomains_omap3430es3_1plus);
else
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 1b7835865c8..840929bd9da 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -90,7 +90,7 @@ static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
}
static struct irqaction omap2_gp_timer_irq = {
- .name = "gp timer",
+ .name = "gp_timer",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.handler = omap2_gp_timer_interrupt,
};
@@ -132,7 +132,7 @@ static void omap2_gp_timer_set_mode(enum clock_event_mode mode,
}
static struct clock_event_device clockevent_gpt = {
- .name = "gp timer",
+ .name = "gp_timer",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.shift = 32,
.set_next_event = omap2_gp_timer_set_next_event,
@@ -236,22 +236,8 @@ static void __init omap2_gp_clockevent_init(int gptimer_id,
}
/* Clocksource code */
-
-#ifdef CONFIG_OMAP_32K_TIMER
-/*
- * When 32k-timer is enabled, don't use GPTimer for clocksource
- * instead, just leave default clocksource which uses the 32k
- * sync counter. See clocksource setup in plat-omap/counter_32k.c
- */
-
-static void __init omap2_gp_clocksource_init(int unused, const char *dummy)
-{
- omap_init_clocksource_32k();
-}
-
-#else
-
static struct omap_dm_timer clksrc;
+static bool use_gptimer_clksrc;
/*
* clocksource
@@ -262,7 +248,7 @@ static cycle_t clocksource_read_cycles(struct clocksource *cs)
}
static struct clocksource clocksource_gpt = {
- .name = "gp timer",
+ .name = "gp_timer",
.rating = 300,
.read = clocksource_read_cycles,
.mask = CLOCKSOURCE_MASK(32),
@@ -278,7 +264,46 @@ static u32 notrace dmtimer_read_sched_clock(void)
}
/* Setup free-running counter for clocksource */
-static void __init omap2_gp_clocksource_init(int gptimer_id,
+static int __init omap2_sync32k_clocksource_init(void)
+{
+ int ret;
+ struct omap_hwmod *oh;
+ void __iomem *vbase;
+ const char *oh_name = "counter_32k";
+
+ /*
+ * First check hwmod data is available for sync32k counter
+ */
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh || oh->slaves_cnt == 0)
+ return -ENODEV;
+
+ omap_hwmod_setup_one(oh_name);
+
+ vbase = omap_hwmod_get_mpu_rt_va(oh);
+ if (!vbase) {
+ pr_warn("%s: failed to get counter_32k resource\n", __func__);
+ return -ENXIO;
+ }
+
+ ret = omap_hwmod_enable(oh);
+ if (ret) {
+ pr_warn("%s: failed to enable counter_32k module (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = omap_init_clocksource_32k(vbase);
+ if (ret) {
+ pr_warn("%s: failed to initialize counter_32k as a clocksource (%d)\n",
+ __func__, ret);
+ omap_hwmod_idle(oh);
+ }
+
+ return ret;
+}
+
+static void __init omap2_gptimer_clocksource_init(int gptimer_id,
const char *fck_source)
{
int res;
@@ -286,9 +311,6 @@ static void __init omap2_gp_clocksource_init(int gptimer_id,
res = omap_dm_timer_init_one(&clksrc, gptimer_id, fck_source);
BUG_ON(res);
- pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
- gptimer_id, clksrc.rate);
-
__omap_dm_timer_load_start(&clksrc,
OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1);
setup_sched_clock(dmtimer_read_sched_clock, 32, clksrc.rate);
@@ -296,15 +318,36 @@ static void __init omap2_gp_clocksource_init(int gptimer_id,
if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))
pr_err("Could not register clocksource %s\n",
clocksource_gpt.name);
+ else
+ pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
+ gptimer_id, clksrc.rate);
+}
+
+static void __init omap2_clocksource_init(int gptimer_id,
+ const char *fck_source)
+{
+ /*
+ * First give preference to kernel parameter configuration
+ * by user (clocksource="gp_timer").
+ *
+ * In case of missing kernel parameter for clocksource,
+ * first check for availability for 32k-sync timer, in case
+ * of failure in finding 32k_counter module or registering
+ * it as clocksource, execution will fallback to gp-timer.
+ */
+ if (use_gptimer_clksrc == true)
+ omap2_gptimer_clocksource_init(gptimer_id, fck_source);
+ else if (omap2_sync32k_clocksource_init())
+ /* Fall back to gp-timer code */
+ omap2_gptimer_clocksource_init(gptimer_id, fck_source);
}
-#endif
#define OMAP_SYS_TIMER_INIT(name, clkev_nr, clkev_src, \
clksrc_nr, clksrc_src) \
static void __init omap##name##_timer_init(void) \
{ \
omap2_gp_clockevent_init((clkev_nr), clkev_src); \
- omap2_gp_clocksource_init((clksrc_nr), clksrc_src); \
+ omap2_clocksource_init((clksrc_nr), clksrc_src); \
}
#define OMAP_SYS_TIMER(name) \
@@ -335,7 +378,7 @@ static DEFINE_TWD_LOCAL_TIMER(twd_local_timer,
static void __init omap4_timer_init(void)
{
omap2_gp_clockevent_init(1, OMAP4_CLKEV_SOURCE);
- omap2_gp_clocksource_init(2, OMAP4_MPU_SOURCE);
+ omap2_clocksource_init(2, OMAP4_MPU_SOURCE);
#ifdef CONFIG_LOCAL_TIMERS
/* Local timers are not supprted on OMAP4430 ES1.0 */
if (omap_rev() != OMAP4430_REV_ES1_0) {
@@ -503,3 +546,28 @@ static int __init omap2_dm_timer_init(void)
return 0;
}
arch_initcall(omap2_dm_timer_init);
+
+/**
+ * omap2_override_clocksource - clocksource override with user configuration
+ *
+ * Allows user to override default clocksource, using kernel parameter
+ * clocksource="gp_timer" (For all OMAP2PLUS architectures)
+ *
+ * Note that, here we are using same standard kernel parameter "clocksource=",
+ * and not introducing any OMAP specific interface.
+ */
+static int __init omap2_override_clocksource(char *str)
+{
+ if (!str)
+ return 0;
+ /*
+ * For OMAP architecture, we only have two options
+ * - sync_32k (default)
+ * - gp_timer (sys_clk based)
+ */
+ if (!strcmp(str, "gp_timer"))
+ use_gptimer_clksrc = true;
+
+ return 0;
+}
+early_param("clocksource", omap2_override_clocksource);
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 8d5ed775dd5..b19d1b43c12 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -90,7 +90,7 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
musb_plat.mode = board_data->mode;
musb_plat.extvbus = board_data->extvbus;
- if (cpu_is_omap3517() || cpu_is_omap3505()) {
+ if (soc_is_am35xx()) {
oh_name = "am35x_otg_hs";
name = "musb-am35x";
} else if (cpu_is_ti81xx()) {
diff --git a/arch/arm/mach-omap2/voltagedomains3xxx_data.c b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
index 57db2038b23..d0103c80d04 100644
--- a/arch/arm/mach-omap2/voltagedomains3xxx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
@@ -118,7 +118,7 @@ void __init omap3xxx_voltagedomains_init(void)
}
#endif
- if (cpu_is_omap3517() || cpu_is_omap3505())
+ if (soc_is_am35xx())
voltdms = voltagedomains_am35xx;
else
voltdms = voltagedomains_omap3;
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index e2e9db492d0..9148b229d0d 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -18,6 +18,7 @@
#include <linux/mv643xx_i2c.h>
#include <linux/ata_platform.h>
#include <linux/delay.h>
+#include <linux/clk-provider.h>
#include <net/dsa.h>
#include <asm/page.h>
#include <asm/setup.h>
@@ -70,6 +71,19 @@ void __init orion5x_map_io(void)
/*****************************************************************************
+ * CLK tree
+ ****************************************************************************/
+static struct clk *tclk;
+
+static void __init clk_init(void)
+{
+ tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
+ orion5x_tclk);
+
+ orion_clkdev_init(tclk);
+}
+
+/*****************************************************************************
* EHCI0
****************************************************************************/
void __init orion5x_ehci0_init(void)
@@ -95,7 +109,7 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
{
orion_ge00_init(eth_data,
ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
- IRQ_ORION5X_ETH_ERR, orion5x_tclk);
+ IRQ_ORION5X_ETH_ERR);
}
@@ -132,7 +146,7 @@ void __init orion5x_sata_init(struct mv_sata_platform_data *sata_data)
****************************************************************************/
void __init orion5x_spi_init()
{
- orion_spi_init(SPI_PHYS_BASE, orion5x_tclk);
+ orion_spi_init(SPI_PHYS_BASE);
}
@@ -142,7 +156,7 @@ void __init orion5x_spi_init()
void __init orion5x_uart0_init(void)
{
orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
- IRQ_ORION5X_UART0, orion5x_tclk);
+ IRQ_ORION5X_UART0, tclk);
}
/*****************************************************************************
@@ -151,7 +165,7 @@ void __init orion5x_uart0_init(void)
void __init orion5x_uart1_init(void)
{
orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
- IRQ_ORION5X_UART1, orion5x_tclk);
+ IRQ_ORION5X_UART1, tclk);
}
/*****************************************************************************
@@ -179,7 +193,7 @@ static void __init orion5x_crypto_init(void)
****************************************************************************/
void __init orion5x_wdt_init(void)
{
- orion_wdt_init(orion5x_tclk);
+ orion_wdt_init();
}
@@ -276,6 +290,9 @@ void __init orion5x_init(void)
*/
orion5x_setup_cpu_mbus_bridge();
+ /* Setup root of clk tree */
+ clk_init();
+
/*
* Don't issue "Wait for Interrupt" instruction if we are
* running on D0 5281 silicon.
diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
index e91bf0ba4e8..92df49c1b62 100644
--- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
@@ -16,7 +16,6 @@
#include <linux/mtd/physmap.h>
#include <linux/mv643xx_eth.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <linux/spi/flash.h>
#include <linux/ethtool.h>
#include <net/dsa.h>
diff --git a/arch/arm/mach-pnx4008/core.c b/arch/arm/mach-pnx4008/core.c
index be4c9285850..a00d2f1254e 100644
--- a/arch/arm/mach-pnx4008/core.c
+++ b/arch/arm/mach-pnx4008/core.c
@@ -265,6 +265,17 @@ static void pnx4008_restart(char mode, const char *cmd)
soft_restart(0);
}
+#ifdef CONFIG_PM
+extern int pnx4008_pm_init(void);
+#else
+static inline int pnx4008_pm_init(void) { return 0; }
+#endif
+
+void __init pnx4008_init_late(void)
+{
+ pnx4008_pm_init();
+}
+
extern struct sys_timer pnx4008_timer;
MACHINE_START(PNX4008, "Philips PNX4008")
@@ -273,6 +284,7 @@ MACHINE_START(PNX4008, "Philips PNX4008")
.map_io = pnx4008_map_io,
.init_irq = pnx4008_init_irq,
.init_machine = pnx4008_init,
+ .init_late = pnx4008_init_late,
.timer = &pnx4008_timer,
.restart = pnx4008_restart,
MACHINE_END
diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
index f3e60a049f9..26f8d06b142 100644
--- a/arch/arm/mach-pnx4008/pm.c
+++ b/arch/arm/mach-pnx4008/pm.c
@@ -124,7 +124,7 @@ static const struct platform_suspend_ops pnx4008_pm_ops = {
.valid = pnx4008_pm_valid,
};
-static int __init pnx4008_pm_init(void)
+int __init pnx4008_pm_init(void)
{
u32 sram_size_to_allocate;
@@ -151,5 +151,3 @@ static int __init pnx4008_pm_init(void)
suspend_set_ops(&pnx4008_pm_ops);
return 0;
}
-
-late_initcall(pnx4008_pm_init);
diff --git a/arch/arm/mach-prima2/common.h b/arch/arm/mach-prima2/common.h
index b28a930d4f8..60d826fc218 100644
--- a/arch/arm/mach-prima2/common.h
+++ b/arch/arm/mach-prima2/common.h
@@ -24,4 +24,10 @@ static inline void sirfsoc_map_lluart(void) {}
extern void __init sirfsoc_map_lluart(void);
#endif
+#ifdef CONFIG_SUSPEND
+extern int sirfsoc_pm_init(void);
+#else
+static inline int sirfsoc_pm_init(void) { return 0; }
+#endif
+
#endif
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index 26ebb57719d..fb5a7910af3 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -85,12 +85,11 @@ static const struct platform_suspend_ops sirfsoc_pm_ops = {
.valid = suspend_valid_only_mem,
};
-static int __init sirfsoc_pm_init(void)
+int __init sirfsoc_pm_init(void)
{
suspend_set_ops(&sirfsoc_pm_ops);
return 0;
}
-late_initcall(sirfsoc_pm_init);
static const struct of_device_id pwrc_ids[] = {
{ .compatible = "sirf,prima2-pwrc" },
diff --git a/arch/arm/mach-prima2/prima2.c b/arch/arm/mach-prima2/prima2.c
index 02b9c05ff99..8f0429d4b79 100644
--- a/arch/arm/mach-prima2/prima2.c
+++ b/arch/arm/mach-prima2/prima2.c
@@ -25,6 +25,11 @@ void __init sirfsoc_mach_init(void)
of_platform_bus_probe(NULL, sirfsoc_of_bus_ids, NULL);
}
+void __init sirfsoc_init_late(void)
+{
+ sirfsoc_pm_init();
+}
+
static const char *prima2cb_dt_match[] __initdata = {
"sirf,prima2-cb",
NULL
@@ -39,6 +44,7 @@ MACHINE_START(PRIMA2_EVB, "prima2cb")
.timer = &sirfsoc_timer,
.dma_zone_size = SZ_256M,
.init_machine = sirfsoc_mach_init,
+ .init_late = sirfsoc_init_late,
.dt_compat = prima2cb_dt_match,
.restart = sirfsoc_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index b34287ab5af..e24961109b7 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -518,6 +518,11 @@ config S3C2443_DMA
help
Internal config node for S3C2443 DMA support
+config S3C2443_SETUP_SPI
+ bool
+ help
+ Common setup code for SPI GPIO configurations
+
endif # CPU_S3C2443 || CPU_S3C2416
if CPU_S3C2443
diff --git a/arch/arm/mach-s3c24xx/Makefile b/arch/arm/mach-s3c24xx/Makefile
index 3518fe812d5..0ab6ab15da4 100644
--- a/arch/arm/mach-s3c24xx/Makefile
+++ b/arch/arm/mach-s3c24xx/Makefile
@@ -14,6 +14,8 @@ obj- :=
# core
+obj-y += common.o
+
obj-$(CONFIG_CPU_S3C2410) += s3c2410.o
obj-$(CONFIG_S3C2410_DMA) += dma-s3c2410.o
obj-$(CONFIG_S3C2410_PM) += pm-s3c2410.o sleep-s3c2410.o
@@ -33,6 +35,10 @@ obj-$(CONFIG_S3C2440_DMA) += dma-s3c2440.o
obj-$(CONFIG_CPU_S3C2443) += s3c2443.o irq-s3c2443.o clock-s3c2443.o
+# PM
+
+obj-$(CONFIG_PM) += pm.o irq-pm.o sleep.o
+
# common code
obj-$(CONFIG_S3C2443_COMMON) += common-s3c2443.o
@@ -91,5 +97,6 @@ obj-$(CONFIG_MACH_OSIRIS_DVS) += mach-osiris-dvs.o
# device setup
obj-$(CONFIG_S3C2416_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
+obj-$(CONFIG_S3C2443_SETUP_SPI) += setup-spi.o
obj-$(CONFIG_ARCH_S3C24XX) += setup-i2c.o
obj-$(CONFIG_S3C24XX_SETUP_TS) += setup-ts.o
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2416.c b/arch/arm/mach-s3c24xx/clock-s3c2416.c
index dbc9ab4aaca..8702ecfaab3 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2416.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2416.c
@@ -144,6 +144,7 @@ static struct clk_lookup s3c2416_clk_lookup[] = {
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &hsmmc0_clk),
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &hsmmc_mux0.clk),
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &hsmmc_mux1.clk),
+ CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &hsspi_mux.clk),
};
void __init s3c2416_init_clocks(int xtal)
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2443.c b/arch/arm/mach-s3c24xx/clock-s3c2443.c
index efb3ac35956..a4c5a520d99 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2443.c
@@ -179,6 +179,11 @@ static struct clk *clks[] __initdata = {
&clk_hsmmc,
};
+static struct clk_lookup s3c2443_clk_lookup[] = {
+ CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_hsmmc),
+ CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &clk_hsspi.clk),
+};
+
void __init s3c2443_init_clocks(int xtal)
{
unsigned long epllcon = __raw_readl(S3C2443_EPLLCON);
@@ -210,6 +215,7 @@ void __init s3c2443_init_clocks(int xtal)
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
+ clkdev_add_table(s3c2443_clk_lookup, ARRAY_SIZE(s3c2443_clk_lookup));
s3c_pwmclk_init();
}
diff --git a/arch/arm/mach-s3c24xx/common-s3c2443.c b/arch/arm/mach-s3c24xx/common-s3c2443.c
index 460431589f3..aeeb2be283f 100644
--- a/arch/arm/mach-s3c24xx/common-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/common-s3c2443.c
@@ -424,11 +424,6 @@ static struct clk init_clocks_off[] = {
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_IIS,
}, {
- .name = "hsspi",
- .parent = &clk_p,
- .enable = s3c2443_clkcon_enable_p,
- .ctrlbit = S3C2443_PCLKCON_HSSPI,
- }, {
.name = "adc",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
@@ -562,6 +557,14 @@ static struct clk hsmmc1_clk = {
.ctrlbit = S3C2443_HCLKCON_HSMMC,
};
+static struct clk hsspi_clk = {
+ .name = "spi",
+ .devname = "s3c64xx-spi.0",
+ .parent = &clk_p,
+ .enable = s3c2443_clkcon_enable_p,
+ .ctrlbit = S3C2443_PCLKCON_HSSPI,
+};
+
/* EPLLCON compatible enough to get on/off information */
void __init_or_cpufreq s3c2443_common_setup_clocks(pll_fn get_mpll)
@@ -612,6 +615,7 @@ static struct clk *clks[] __initdata = {
&clk_usb_bus,
&clk_armdiv,
&hsmmc1_clk,
+ &hsspi_clk,
};
static struct clksrc_clk *clksrcs[] __initdata = {
@@ -629,6 +633,7 @@ static struct clk_lookup s3c2443_clk_lookup[] = {
CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_esys_uart.clk),
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &hsmmc1_clk),
+ CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk0", &hsspi_clk),
};
void __init s3c2443_common_init_clocks(int xtal, pll_fn get_mpll,
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/mach-s3c24xx/common.c
index 290942d9add..56cdd34cce4 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/mach-s3c24xx/common.c
@@ -4,7 +4,7 @@
* http://www.simtec.co.uk/products/SWLINUX/
* Ben Dooks <ben@simtec.co.uk>
*
- * S3C24XX CPU Support
+ * Common code for S3C24XX machines
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -41,6 +41,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
+#include <mach/regs-clock.h>
#include <mach/regs-gpio.h>
#include <plat/regs-serial.h>
@@ -52,6 +53,8 @@
#include <plat/s3c2416.h>
#include <plat/s3c244x.h>
#include <plat/s3c2443.h>
+#include <plat/cpu-freq.h>
+#include <plat/pll.h>
/* table of supported CPUs */
@@ -234,3 +237,67 @@ void __init s3c24xx_init_io(struct map_desc *mach_desc, int size)
s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
}
+
+/* Serial port registrations */
+
+static struct resource s3c2410_uart0_resource[] = {
+ [0] = DEFINE_RES_MEM(S3C2410_PA_UART0, SZ_16K),
+ [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX0, \
+ IRQ_S3CUART_ERR0 - IRQ_S3CUART_RX0 + 1, \
+ NULL, IORESOURCE_IRQ)
+};
+
+static struct resource s3c2410_uart1_resource[] = {
+ [0] = DEFINE_RES_MEM(S3C2410_PA_UART1, SZ_16K),
+ [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX1, \
+ IRQ_S3CUART_ERR1 - IRQ_S3CUART_RX1 + 1, \
+ NULL, IORESOURCE_IRQ)
+};
+
+static struct resource s3c2410_uart2_resource[] = {
+ [0] = DEFINE_RES_MEM(S3C2410_PA_UART2, SZ_16K),
+ [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX2, \
+ IRQ_S3CUART_ERR2 - IRQ_S3CUART_RX2 + 1, \
+ NULL, IORESOURCE_IRQ)
+};
+
+static struct resource s3c2410_uart3_resource[] = {
+ [0] = DEFINE_RES_MEM(S3C2443_PA_UART3, SZ_16K),
+ [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX3, \
+ IRQ_S3CUART_ERR3 - IRQ_S3CUART_RX3 + 1, \
+ NULL, IORESOURCE_IRQ)
+};
+
+struct s3c24xx_uart_resources s3c2410_uart_resources[] __initdata = {
+ [0] = {
+ .resources = s3c2410_uart0_resource,
+ .nr_resources = ARRAY_SIZE(s3c2410_uart0_resource),
+ },
+ [1] = {
+ .resources = s3c2410_uart1_resource,
+ .nr_resources = ARRAY_SIZE(s3c2410_uart1_resource),
+ },
+ [2] = {
+ .resources = s3c2410_uart2_resource,
+ .nr_resources = ARRAY_SIZE(s3c2410_uart2_resource),
+ },
+ [3] = {
+ .resources = s3c2410_uart3_resource,
+ .nr_resources = ARRAY_SIZE(s3c2410_uart3_resource),
+ },
+};
+
+/* initialise all the clocks */
+
+void __init_or_cpufreq s3c24xx_setup_clocks(unsigned long fclk,
+ unsigned long hclk,
+ unsigned long pclk)
+{
+ clk_upll.rate = s3c24xx_get_pll(__raw_readl(S3C2410_UPLLCON),
+ clk_xtal.rate);
+
+ clk_mpll.rate = fclk;
+ clk_h.rate = hclk;
+ clk_p.rate = pclk;
+ clk_f.rate = fclk;
+}
diff --git a/arch/arm/mach-s3c24xx/dma-s3c2443.c b/arch/arm/mach-s3c24xx/dma-s3c2443.c
index e227c472a40..2d94228d286 100644
--- a/arch/arm/mach-s3c24xx/dma-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/dma-s3c2443.c
@@ -55,12 +55,20 @@ static struct s3c24xx_dma_map __initdata s3c2443_dma_mappings[] = {
.name = "sdi",
.channels = MAP(S3C2443_DMAREQSEL_SDI),
},
- [DMACH_SPI0] = {
- .name = "spi0",
+ [DMACH_SPI0_RX] = {
+ .name = "spi0-rx",
+ .channels = MAP(S3C2443_DMAREQSEL_SPI0RX),
+ },
+ [DMACH_SPI0_TX] = {
+ .name = "spi0-tx",
.channels = MAP(S3C2443_DMAREQSEL_SPI0TX),
},
- [DMACH_SPI1] = { /* only on S3C2443/S3C2450 */
- .name = "spi1",
+ [DMACH_SPI1_RX] = { /* only on S3C2443/S3C2450 */
+ .name = "spi1-rx",
+ .channels = MAP(S3C2443_DMAREQSEL_SPI1RX),
+ },
+ [DMACH_SPI1_TX] = { /* only on S3C2443/S3C2450 */
+ .name = "spi1-tx",
.channels = MAP(S3C2443_DMAREQSEL_SPI1TX),
},
[DMACH_UART0] = {
diff --git a/arch/arm/mach-s3c24xx/include/mach/dma.h b/arch/arm/mach-s3c24xx/include/mach/dma.h
index acbdfecd418..454831b6603 100644
--- a/arch/arm/mach-s3c24xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c24xx/include/mach/dma.h
@@ -47,6 +47,10 @@ enum dma_ch {
DMACH_UART2_SRC2,
DMACH_UART3, /* s3c2443 has extra uart */
DMACH_UART3_SRC2,
+ DMACH_SPI0_TX, /* s3c2443/2416/2450 hsspi0 */
+ DMACH_SPI0_RX, /* s3c2443/2416/2450 hsspi0 */
+ DMACH_SPI1_TX, /* s3c2443/2450 hsspi1 */
+ DMACH_SPI1_RX, /* s3c2443/2450 hsspi1 */
DMACH_MAX, /* the end entry */
};
diff --git a/arch/arm/mach-s3c24xx/include/mach/map.h b/arch/arm/mach-s3c24xx/include/mach/map.h
index 78ae807f128..8ba381f2dbe 100644
--- a/arch/arm/mach-s3c24xx/include/mach/map.h
+++ b/arch/arm/mach-s3c24xx/include/mach/map.h
@@ -98,6 +98,8 @@
/* SPI */
#define S3C2410_PA_SPI (0x59000000)
+#define S3C2443_PA_SPI0 (0x52000000)
+#define S3C2443_PA_SPI1 S3C2410_PA_SPI
/* SDI */
#define S3C2410_PA_SDI (0x5A000000)
@@ -162,4 +164,7 @@
#define S3C_PA_WDT S3C2410_PA_WATCHDOG
#define S3C_PA_NAND S3C24XX_PA_NAND
+#define S3C_PA_SPI0 S3C2443_PA_SPI0
+#define S3C_PA_SPI1 S3C2443_PA_SPI1
+
#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/plat-s3c24xx/irq-pm.c b/arch/arm/mach-s3c24xx/irq-pm.c
index 0efb2e2848c..0efb2e2848c 100644
--- a/arch/arm/plat-s3c24xx/irq-pm.c
+++ b/arch/arm/mach-s3c24xx/irq-pm.c
diff --git a/arch/arm/plat-s3c24xx/pm.c b/arch/arm/mach-s3c24xx/pm.c
index 60627e63a25..60627e63a25 100644
--- a/arch/arm/plat-s3c24xx/pm.c
+++ b/arch/arm/mach-s3c24xx/pm.c
diff --git a/arch/arm/mach-s3c24xx/setup-spi.c b/arch/arm/mach-s3c24xx/setup-spi.c
new file mode 100644
index 00000000000..5712c85f39b
--- /dev/null
+++ b/arch/arm/mach-s3c24xx/setup-spi.c
@@ -0,0 +1,39 @@
+/*
+ * HS-SPI device setup for S3C2443/S3C2416
+ *
+ * Copyright (C) 2011 Samsung Electronics Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/s3c64xx-spi.h>
+
+#include <mach/hardware.h>
+#include <mach/regs-gpio.h>
+
+#ifdef CONFIG_S3C64XX_DEV_SPI0
+struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
+ .fifo_lvl_mask = 0x7f,
+ .rx_lvl_offset = 13,
+ .tx_st_done = 21,
+ .high_speed = 1,
+};
+
+int s3c64xx_spi0_cfg_gpio(struct platform_device *pdev)
+{
+ /* enable hsspi bit in misccr */
+ s3c2410_modify_misccr(S3C2416_MISCCR_HSSPI_EN2, 1);
+
+ s3c_gpio_cfgall_range(S3C2410_GPE(11), 3,
+ S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
+
+ return 0;
+}
+#endif
diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/mach-s3c24xx/sleep.S
index c56612569b4..c56612569b4 100644
--- a/arch/arm/plat-s3c24xx/sleep.S
+++ b/arch/arm/mach-s3c24xx/sleep.S
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c
index b313380342a..be746e33e86 100644
--- a/arch/arm/mach-s3c64xx/common.c
+++ b/arch/arm/mach-s3c64xx/common.c
@@ -384,3 +384,8 @@ void s3c64xx_restart(char mode, const char *cmd)
/* if all else fails, or mode was for soft, jump to 0 */
soft_restart(0);
}
+
+void __init s3c64xx_init_late(void)
+{
+ s3c64xx_pm_late_initcall();
+}
diff --git a/arch/arm/mach-s3c64xx/common.h b/arch/arm/mach-s3c64xx/common.h
index 7a10be629ab..6cfc99bdfb3 100644
--- a/arch/arm/mach-s3c64xx/common.h
+++ b/arch/arm/mach-s3c64xx/common.h
@@ -24,6 +24,7 @@ void s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_limit);
void s3c64xx_setup_clocks(void);
void s3c64xx_restart(char mode, const char *cmd);
+void s3c64xx_init_late(void);
#ifdef CONFIG_CPU_S3C6400
@@ -51,4 +52,10 @@ extern void s3c6410_init_clocks(int xtal);
#define s3c6410_init NULL
#endif
+#ifdef CONFIG_PM
+int __init s3c64xx_pm_late_initcall(void);
+#else
+static inline int s3c64xx_pm_late_initcall(void) { return 0; }
+#endif
+
#endif /* __ARCH_ARM_MACH_S3C64XX_COMMON_H */
diff --git a/arch/arm/mach-s3c64xx/mach-anw6410.c b/arch/arm/mach-s3c64xx/mach-anw6410.c
index f252691fb20..314df0518af 100644
--- a/arch/arm/mach-s3c64xx/mach-anw6410.c
+++ b/arch/arm/mach-s3c64xx/mach-anw6410.c
@@ -230,6 +230,7 @@ MACHINE_START(ANW6410, "A&W6410")
.handle_irq = vic_handle_irq,
.map_io = anw6410_map_io,
.init_machine = anw6410_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index aa1137fb47e..eda5e027b10 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -813,6 +813,7 @@ MACHINE_START(WLF_CRAGG_6410, "Wolfson Cragganmore 6410")
.handle_irq = vic_handle_irq,
.map_io = crag6410_map_io,
.init_machine = crag6410_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
index 521e07b8501..1bf6b9da20f 100644
--- a/arch/arm/mach-s3c64xx/mach-hmt.c
+++ b/arch/arm/mach-s3c64xx/mach-hmt.c
@@ -272,6 +272,7 @@ MACHINE_START(HMT, "Airgoo-HMT")
.handle_irq = vic_handle_irq,
.map_io = hmt_map_io,
.init_machine = hmt_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
index b2166d4a553..f8ea61ea3b3 100644
--- a/arch/arm/mach-s3c64xx/mach-mini6410.c
+++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
@@ -339,6 +339,7 @@ MACHINE_START(MINI6410, "MINI6410")
.handle_irq = vic_handle_irq,
.map_io = mini6410_map_io,
.init_machine = mini6410_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-ncp.c b/arch/arm/mach-s3c64xx/mach-ncp.c
index 0efa2ba783b..cad2e05eddf 100644
--- a/arch/arm/mach-s3c64xx/mach-ncp.c
+++ b/arch/arm/mach-s3c64xx/mach-ncp.c
@@ -104,6 +104,7 @@ MACHINE_START(NCP, "NCP")
.handle_irq = vic_handle_irq,
.map_io = ncp_map_io,
.init_machine = ncp_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index 5c08266cea2..b92d8e17d50 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -320,6 +320,7 @@ MACHINE_START(REAL6410, "REAL6410")
.handle_irq = vic_handle_irq,
.map_io = real6410_map_io,
.init_machine = real6410_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c
index 3f42431d4dd..c5021d0335c 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq5.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq5.c
@@ -152,6 +152,7 @@ MACHINE_START(SMARTQ5, "SmartQ 5")
.handle_irq = vic_handle_irq,
.map_io = smartq_map_io,
.init_machine = smartq5_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c
index e5c09b6db96..aa9072a4cbe 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq7.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq7.c
@@ -168,6 +168,7 @@ MACHINE_START(SMARTQ7, "SmartQ 7")
.handle_irq = vic_handle_irq,
.map_io = smartq_map_io,
.init_machine = smartq7_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6400.c b/arch/arm/mach-s3c64xx/mach-smdk6400.c
index 5f096534f4c..b0f4525c66b 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6400.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6400.c
@@ -93,6 +93,7 @@ MACHINE_START(SMDK6400, "SMDK6400")
.handle_irq = vic_handle_irq,
.map_io = smdk6400_map_io,
.init_machine = smdk6400_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 7da044f738a..d44319b0941 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -702,6 +702,7 @@ MACHINE_START(SMDK6410, "SMDK6410")
.handle_irq = vic_handle_irq,
.map_io = smdk6410_map_io,
.init_machine = smdk6410_machine_init,
+ .init_late = s3c64xx_init_late,
.timer = &s3c24xx_timer,
.restart = s3c64xx_restart,
MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index 7d3e81b9dd0..7feb426fc20 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -365,10 +365,9 @@ static __init int s3c64xx_pm_initcall(void)
}
arch_initcall(s3c64xx_pm_initcall);
-static __init int s3c64xx_pm_late_initcall(void)
+int __init s3c64xx_pm_late_initcall(void)
{
pm_genpd_poweroff_unused();
return 0;
}
-late_initcall(s3c64xx_pm_late_initcall);
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 375d3f779a8..d1dc7f1a239 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -538,6 +538,7 @@ MACHINE_START(ASSABET, "Intel-Assabet")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = assabet_init,
+ .init_late = sa11x0_init_late,
#ifdef CONFIG_SA1111
.dma_zone_size = SZ_1M,
#endif
diff --git a/arch/arm/mach-sa1100/badge4.c b/arch/arm/mach-sa1100/badge4.c
index e0f0c030258..b30fb99b587 100644
--- a/arch/arm/mach-sa1100/badge4.c
+++ b/arch/arm/mach-sa1100/badge4.c
@@ -305,6 +305,7 @@ MACHINE_START(BADGE4, "Hewlett-Packard Laboratories BadgePAD 4")
.map_io = badge4_map_io,
.nr_irqs = SA1100_NR_IRQS,
.init_irq = sa1100_init_irq,
+ .init_late = sa11x0_init_late,
.timer = &sa1100_timer,
#ifdef CONFIG_SA1111
.dma_zone_size = SZ_1M,
diff --git a/arch/arm/mach-sa1100/cerf.c b/arch/arm/mach-sa1100/cerf.c
index 4a61f60e050..09d7f4b4b35 100644
--- a/arch/arm/mach-sa1100/cerf.c
+++ b/arch/arm/mach-sa1100/cerf.c
@@ -134,5 +134,6 @@ MACHINE_START(CERF, "Intrinsyc CerfBoard/CerfCube")
.init_irq = cerf_init_irq,
.timer = &sa1100_timer,
.init_machine = cerf_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index c7f418b0cde..ea5cff38745 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -401,5 +401,6 @@ MACHINE_START(COLLIE, "Sharp-Collie")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = collie_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 16be4c56abe..9db3e98e8b8 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -359,6 +359,10 @@ static int __init sa1100_init(void)
arch_initcall(sa1100_init);
+void __init sa11x0_init_late(void)
+{
+ sa11x0_pm_init();
+}
/*
* Common I/O mapping:
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h
index 9eb3b3cd5a6..a5b7c13da3e 100644
--- a/arch/arm/mach-sa1100/generic.h
+++ b/arch/arm/mach-sa1100/generic.h
@@ -11,6 +11,7 @@ extern void __init sa1100_map_io(void);
extern void __init sa1100_init_irq(void);
extern void __init sa1100_init_gpio(void);
extern void sa11x0_restart(char, const char *);
+extern void sa11x0_init_late(void);
#define SET_BANK(__nr,__start,__size) \
mi->bank[__nr].start = (__start), \
@@ -41,3 +42,9 @@ void sa11x0_register_mcp(struct mcp_plat_data *data);
struct sa1100fb_mach_info;
void sa11x0_register_lcd(struct sa1100fb_mach_info *inf);
+
+#ifdef CONFIG_PM
+int sa11x0_pm_init(void);
+#else
+static inline int sa11x0_pm_init(void) { return 0; }
+#endif
diff --git a/arch/arm/mach-sa1100/h3100.c b/arch/arm/mach-sa1100/h3100.c
index b2e8d0f418e..e1571eab08a 100644
--- a/arch/arm/mach-sa1100/h3100.c
+++ b/arch/arm/mach-sa1100/h3100.c
@@ -110,6 +110,7 @@ MACHINE_START(H3100, "Compaq iPAQ H3100")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = h3100_mach_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/h3600.c b/arch/arm/mach-sa1100/h3600.c
index cb6659f294f..ba7a2901ab8 100644
--- a/arch/arm/mach-sa1100/h3600.c
+++ b/arch/arm/mach-sa1100/h3600.c
@@ -160,6 +160,7 @@ MACHINE_START(H3600, "Compaq iPAQ H3600")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = h3600_mach_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/hackkit.c b/arch/arm/mach-sa1100/hackkit.c
index 5535475bf58..7f86bd91182 100644
--- a/arch/arm/mach-sa1100/hackkit.c
+++ b/arch/arm/mach-sa1100/hackkit.c
@@ -199,5 +199,6 @@ MACHINE_START(HACKKIT, "HackKit Cpu Board")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = hackkit_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c
index ca7a7e83472..e3084f47027 100644
--- a/arch/arm/mach-sa1100/jornada720.c
+++ b/arch/arm/mach-sa1100/jornada720.c
@@ -348,6 +348,7 @@ MACHINE_START(JORNADA720, "HP Jornada 720")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = jornada720_mach_init,
+ .init_late = sa11x0_init_late,
#ifdef CONFIG_SA1111
.dma_zone_size = SZ_1M,
#endif
diff --git a/arch/arm/mach-sa1100/lart.c b/arch/arm/mach-sa1100/lart.c
index eb6534e0b0d..b775a0abec0 100644
--- a/arch/arm/mach-sa1100/lart.c
+++ b/arch/arm/mach-sa1100/lart.c
@@ -147,6 +147,7 @@ MACHINE_START(LART, "LART")
.nr_irqs = SA1100_NR_IRQS,
.init_irq = sa1100_init_irq,
.init_machine = lart_init,
+ .init_late = sa11x0_init_late,
.timer = &sa1100_timer,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/nanoengine.c b/arch/arm/mach-sa1100/nanoengine.c
index 8f6446b9f02..41f69d97066 100644
--- a/arch/arm/mach-sa1100/nanoengine.c
+++ b/arch/arm/mach-sa1100/nanoengine.c
@@ -112,5 +112,6 @@ MACHINE_START(NANOENGINE, "BSE nanoEngine")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = nanoengine_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 6c58f01b358..266db873a4e 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -89,6 +89,7 @@ void neponset_ncr_frob(unsigned int mask, unsigned int val)
WARN(1, "nep_base unset\n");
}
}
+EXPORT_SYMBOL(neponset_ncr_frob);
static void neponset_set_mctrl(struct uart_port *port, u_int mctrl)
{
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
index 1602575a0d5..37fe0a0a536 100644
--- a/arch/arm/mach-sa1100/pleb.c
+++ b/arch/arm/mach-sa1100/pleb.c
@@ -135,5 +135,6 @@ MACHINE_START(PLEB, "PLEB")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = pleb_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index 2fa499ec6af..690cf0ce5c0 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -117,10 +117,8 @@ static const struct platform_suspend_ops sa11x0_pm_ops = {
.valid = suspend_valid_only_mem,
};
-static int __init sa11x0_pm_init(void)
+int __init sa11x0_pm_init(void)
{
suspend_set_ops(&sa11x0_pm_ops);
return 0;
}
-
-late_initcall(sa11x0_pm_init);
diff --git a/arch/arm/mach-sa1100/shannon.c b/arch/arm/mach-sa1100/shannon.c
index ca8bf59b904..5d33fc3108e 100644
--- a/arch/arm/mach-sa1100/shannon.c
+++ b/arch/arm/mach-sa1100/shannon.c
@@ -104,5 +104,6 @@ MACHINE_START(SHANNON, "Shannon (AKA: Tuxscreen)")
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = shannon_init,
+ .init_late = sa11x0_init_late,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index 3efae03cb3d..fbd53593be5 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -395,6 +395,7 @@ MACHINE_START(SIMPAD, "Simpad")
.map_io = simpad_map_io,
.nr_irqs = SA1100_NR_IRQS,
.init_irq = sa1100_init_irq,
+ .init_late = sa11x0_init_late,
.timer = &sa1100_timer,
.restart = sa11x0_restart,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index e6b177bc941..8aa1962c22a 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -3,7 +3,7 @@
#
# Common objects
-obj-y := timer.o console.o clock.o
+obj-y := timer.o console.o clock.o common.o
# CPU objects
obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 0891ec6e27f..5a6f22f05e9 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -580,5 +580,6 @@ MACHINE_START(AG5EVM, "ag5evm")
.init_irq = sh73a0_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = ag5evm_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index b540b8eb20c..ace60246a5d 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1469,5 +1469,6 @@ MACHINE_START(AP4EVB, "ap4evb")
.init_irq = sh7372_init_irq,
.handle_irq = shmobile_handle_irq_intc,
.init_machine = ap4evb_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-bonito.c b/arch/arm/mach-shmobile/board-bonito.c
index 63ab7062bee..e9b32cfbf74 100644
--- a/arch/arm/mach-shmobile/board-bonito.c
+++ b/arch/arm/mach-shmobile/board-bonito.c
@@ -500,5 +500,6 @@ MACHINE_START(BONITO, "bonito")
.init_irq = r8a7740_init_irq,
.handle_irq = shmobile_handle_irq_intc,
.init_machine = bonito_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c
index 39b6cf85ced..796fa00ad3c 100644
--- a/arch/arm/mach-shmobile/board-g3evm.c
+++ b/arch/arm/mach-shmobile/board-g3evm.c
@@ -338,5 +338,6 @@ MACHINE_START(G3EVM, "g3evm")
.init_irq = sh7367_init_irq,
.handle_irq = shmobile_handle_irq_intc,
.init_machine = g3evm_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index 0e5a39c670b..f1257321999 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -381,5 +381,6 @@ MACHINE_START(G4EVM, "g4evm")
.init_irq = sh7377_init_irq,
.handle_irq = shmobile_handle_irq_intc,
.init_machine = g4evm_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c
index 200dcd42a3a..f60f1b281cc 100644
--- a/arch/arm/mach-shmobile/board-kota2.c
+++ b/arch/arm/mach-shmobile/board-kota2.c
@@ -521,5 +521,6 @@ MACHINE_START(KOTA2, "kota2")
.init_irq = sh73a0_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = kota2_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 50c67b22d08..b577f7c4467 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1638,5 +1638,6 @@ MACHINE_START(MACKEREL, "mackerel")
.init_irq = sh7372_init_irq,
.handle_irq = shmobile_handle_irq_intc,
.init_machine = mackerel_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index ef0e13bf0b3..14de3787caf 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -98,5 +98,6 @@ MACHINE_START(MARZEN, "marzen")
.init_irq = r8a7779_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = marzen_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/common.c b/arch/arm/mach-shmobile/common.c
new file mode 100644
index 00000000000..608aba9d60d
--- /dev/null
+++ b/arch/arm/mach-shmobile/common.c
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <mach/common.h>
+
+void __init shmobile_init_late(void)
+{
+ shmobile_suspend_init();
+ shmobile_cpuidle_init();
+}
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
index 7e6559105d4..7b541e911ab 100644
--- a/arch/arm/mach-shmobile/cpuidle.c
+++ b/arch/arm/mach-shmobile/cpuidle.c
@@ -46,7 +46,7 @@ static struct cpuidle_driver shmobile_cpuidle_driver = {
void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
-static int shmobile_cpuidle_init(void)
+int shmobile_cpuidle_init(void)
{
struct cpuidle_device *dev = &shmobile_cpuidle_dev;
struct cpuidle_driver *drv = &shmobile_cpuidle_driver;
@@ -65,4 +65,3 @@ static int shmobile_cpuidle_init(void)
return 0;
}
-late_initcall(shmobile_cpuidle_init);
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index ff5f12fd742..01e2bc014f1 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -85,4 +85,18 @@ extern int r8a7779_boot_secondary(unsigned int cpu);
extern void r8a7779_smp_prepare_cpus(void);
extern void r8a7779_register_twd(void);
+extern void shmobile_init_late(void);
+
+#ifdef CONFIG_SUSPEND
+int shmobile_suspend_init(void);
+#else
+static inline int shmobile_suspend_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_CPU_IDLE
+int shmobile_cpuidle_init(void);
+#else
+static inline int shmobile_cpuidle_init(void) { return 0; }
+#endif
+
#endif /* __ARCH_MACH_COMMON_H */
diff --git a/arch/arm/mach-shmobile/suspend.c b/arch/arm/mach-shmobile/suspend.c
index 4d1b86a4992..47d83f7a70b 100644
--- a/arch/arm/mach-shmobile/suspend.c
+++ b/arch/arm/mach-shmobile/suspend.c
@@ -39,9 +39,8 @@ struct platform_suspend_ops shmobile_suspend_ops = {
.valid = suspend_valid_only_mem,
};
-static int __init shmobile_suspend_init(void)
+int __init shmobile_suspend_init(void)
{
suspend_set_ops(&shmobile_suspend_ops);
return 0;
}
-late_initcall(shmobile_suspend_init);
diff --git a/arch/arm/mach-spear13xx/Kconfig b/arch/arm/mach-spear13xx/Kconfig
new file mode 100644
index 00000000000..eaadc66d96b
--- /dev/null
+++ b/arch/arm/mach-spear13xx/Kconfig
@@ -0,0 +1,20 @@
+#
+# SPEAr13XX Machine configuration file
+#
+
+if ARCH_SPEAR13XX
+
+menu "SPEAr13xx Implementations"
+config MACH_SPEAR1310
+ bool "SPEAr1310 Machine support with Device Tree"
+ select PINCTRL_SPEAR1310
+ help
+ Supports ST SPEAr1310 machine configured via the device-tree
+
+config MACH_SPEAR1340
+ bool "SPEAr1340 Machine support with Device Tree"
+ select PINCTRL_SPEAR1340
+ help
+ Supports ST SPEAr1340 machine configured via the device-tree
+endmenu
+endif #ARCH_SPEAR13XX
diff --git a/arch/arm/mach-spear13xx/Makefile b/arch/arm/mach-spear13xx/Makefile
new file mode 100644
index 00000000000..3435ea78c15
--- /dev/null
+++ b/arch/arm/mach-spear13xx/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for SPEAr13XX machine series
+#
+
+obj-$(CONFIG_SMP) += headsmp.o platsmp.o
+obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+
+obj-$(CONFIG_ARCH_SPEAR13XX) += spear13xx.o
+obj-$(CONFIG_MACH_SPEAR1310) += spear1310.o
+obj-$(CONFIG_MACH_SPEAR1340) += spear1340.o
diff --git a/arch/arm/mach-spear13xx/Makefile.boot b/arch/arm/mach-spear13xx/Makefile.boot
new file mode 100644
index 00000000000..403efd7e6d2
--- /dev/null
+++ b/arch/arm/mach-spear13xx/Makefile.boot
@@ -0,0 +1,6 @@
+zreladdr-y += 0x00008000
+params_phys-y := 0x00000100
+initrd_phys-y := 0x00800000
+
+dtb-$(CONFIG_MACH_SPEAR1310) += spear1310-evb.dtb
+dtb-$(CONFIG_MACH_SPEAR1340) += spear1340-evb.dtb
diff --git a/arch/arm/mach-spear13xx/headsmp.S b/arch/arm/mach-spear13xx/headsmp.S
new file mode 100644
index 00000000000..ed85473a047
--- /dev/null
+++ b/arch/arm/mach-spear13xx/headsmp.S
@@ -0,0 +1,47 @@
+/*
+ * arch/arm/mach-spear13XX/headsmp.S
+ *
+ * Picked from realview
+ * Copyright (c) 2012 ST Microelectronics Limited
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+ __INIT
+
+/*
+ * spear13xx specific entry point for secondary CPUs. This provides
+ * a "holding pen" into which all secondary cores are held until we're
+ * ready for them to initialise.
+ */
+ENTRY(spear13xx_secondary_startup)
+ mrc p15, 0, r0, c0, c0, 5
+ and r0, r0, #15
+ adr r4, 1f
+ ldmia r4, {r5, r6}
+ sub r4, r4, r5
+ add r6, r6, r4
+pen: ldr r7, [r6]
+ cmp r7, r0
+ bne pen
+
+ /* re-enable coherency */
+ mrc p15, 0, r0, c1, c0, 1
+ orr r0, r0, #(1 << 6) | (1 << 0)
+ mcr p15, 0, r0, c1, c0, 1
+ /*
+ * we've been released from the holding pen: secondary_stack
+ * should now contain the SVC stack for this core
+ */
+ b secondary_startup
+
+ .align
+1: .long .
+ .long pen_release
+ENDPROC(spear13xx_secondary_startup)
diff --git a/arch/arm/mach-spear13xx/hotplug.c b/arch/arm/mach-spear13xx/hotplug.c
new file mode 100644
index 00000000000..5c6867b46d0
--- /dev/null
+++ b/arch/arm/mach-spear13xx/hotplug.c
@@ -0,0 +1,119 @@
+/*
+ * linux/arch/arm/mach-spear13xx/hotplug.c
+ *
+ * Copyright (C) 2012 ST Microelectronics Ltd.
+ * Deepak Sikri <deepak.sikri@st.com>
+ *
+ * based upon linux/arch/arm/mach-realview/hotplug.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+#include <asm/smp_plat.h>
+
+extern volatile int pen_release;
+
+static inline void cpu_enter_lowpower(void)
+{
+ unsigned int v;
+
+ flush_cache_all();
+ asm volatile(
+ " mcr p15, 0, %1, c7, c5, 0\n"
+ " dsb\n"
+ /*
+ * Turn off coherency
+ */
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, #0x20\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "r" (0), "Ir" (CR_C)
+ : "cc", "memory");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+ unsigned int v;
+
+ asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " orr %0, %0, #0x20\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (CR_C)
+ : "cc");
+}
+
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+ for (;;) {
+ wfi();
+
+ if (pen_release == cpu) {
+ /*
+ * OK, proper wakeup, we're done
+ */
+ break;
+ }
+
+ /*
+ * Getting here, means that we have come out of WFI without
+ * having been woken up - this shouldn't happen
+ *
+ * Just note it happening - when we're woken, we can report
+ * its occurrence.
+ */
+ (*spurious)++;
+ }
+}
+
+int platform_cpu_kill(unsigned int cpu)
+{
+ return 1;
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void __cpuinit platform_cpu_die(unsigned int cpu)
+{
+ int spurious = 0;
+
+ /*
+ * we're ready for shutdown now, so do it
+ */
+ cpu_enter_lowpower();
+ platform_do_lowpower(cpu, &spurious);
+
+ /*
+ * bring this CPU back into the world of cache
+ * coherency, and then restore interrupts
+ */
+ cpu_leave_lowpower();
+
+ if (spurious)
+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+ /*
+ * we don't allow CPU 0 to be shutdown (it is still too special
+ * e.g. clock tick interrupts)
+ */
+ return cpu == 0 ? -EPERM : 0;
+}
diff --git a/arch/arm/mach-spear13xx/include/mach/debug-macro.S b/arch/arm/mach-spear13xx/include/mach/debug-macro.S
new file mode 100644
index 00000000000..ea1564609bd
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/debug-macro.S
@@ -0,0 +1,14 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/debug-macro.S
+ *
+ * Debugging macro include header spear13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <plat/debug-macro.S>
diff --git a/arch/arm/mach-spear13xx/include/mach/dma.h b/arch/arm/mach-spear13xx/include/mach/dma.h
new file mode 100644
index 00000000000..383ab04dc6c
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/dma.h
@@ -0,0 +1,128 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/dma.h
+ *
+ * DMA information for SPEAr13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_DMA_H
+#define __MACH_DMA_H
+
+/* request id of all the peripherals */
+enum dma_master_info {
+ /* Accessible from only one master */
+ DMA_MASTER_MCIF = 0,
+ DMA_MASTER_FSMC = 1,
+ /* Accessible from both 0 & 1 */
+ DMA_MASTER_MEMORY = 0,
+ DMA_MASTER_ADC = 0,
+ DMA_MASTER_UART0 = 0,
+ DMA_MASTER_SSP0 = 0,
+ DMA_MASTER_I2C0 = 0,
+
+#ifdef CONFIG_MACH_SPEAR1310
+ /* Accessible from only one master */
+ SPEAR1310_DMA_MASTER_JPEG = 1,
+
+ /* Accessible from both 0 & 1 */
+ SPEAR1310_DMA_MASTER_I2S = 0,
+ SPEAR1310_DMA_MASTER_UART1 = 0,
+ SPEAR1310_DMA_MASTER_UART2 = 0,
+ SPEAR1310_DMA_MASTER_UART3 = 0,
+ SPEAR1310_DMA_MASTER_UART4 = 0,
+ SPEAR1310_DMA_MASTER_UART5 = 0,
+ SPEAR1310_DMA_MASTER_I2C1 = 0,
+ SPEAR1310_DMA_MASTER_I2C2 = 0,
+ SPEAR1310_DMA_MASTER_I2C3 = 0,
+ SPEAR1310_DMA_MASTER_I2C4 = 0,
+ SPEAR1310_DMA_MASTER_I2C5 = 0,
+ SPEAR1310_DMA_MASTER_I2C6 = 0,
+ SPEAR1310_DMA_MASTER_I2C7 = 0,
+ SPEAR1310_DMA_MASTER_SSP1 = 0,
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+ /* Accessible from only one master */
+ SPEAR1340_DMA_MASTER_I2S_PLAY = 1,
+ SPEAR1340_DMA_MASTER_I2S_REC = 1,
+ SPEAR1340_DMA_MASTER_I2C1 = 1,
+ SPEAR1340_DMA_MASTER_UART1 = 1,
+
+ /* following are accessible from both master 0 & 1 */
+ SPEAR1340_DMA_MASTER_SPDIF = 0,
+ SPEAR1340_DMA_MASTER_CAM = 1,
+ SPEAR1340_DMA_MASTER_VIDEO_IN = 0,
+ SPEAR1340_DMA_MASTER_MALI = 0,
+#endif
+};
+
+enum request_id {
+ DMA_REQ_ADC = 0,
+ DMA_REQ_SSP0_TX = 4,
+ DMA_REQ_SSP0_RX = 5,
+ DMA_REQ_UART0_TX = 6,
+ DMA_REQ_UART0_RX = 7,
+ DMA_REQ_I2C0_TX = 8,
+ DMA_REQ_I2C0_RX = 9,
+
+#ifdef CONFIG_MACH_SPEAR1310
+ SPEAR1310_DMA_REQ_FROM_JPEG = 2,
+ SPEAR1310_DMA_REQ_TO_JPEG = 3,
+ SPEAR1310_DMA_REQ_I2S_TX = 10,
+ SPEAR1310_DMA_REQ_I2S_RX = 11,
+
+ SPEAR1310_DMA_REQ_I2C1_RX = 0,
+ SPEAR1310_DMA_REQ_I2C1_TX = 1,
+ SPEAR1310_DMA_REQ_I2C2_RX = 2,
+ SPEAR1310_DMA_REQ_I2C2_TX = 3,
+ SPEAR1310_DMA_REQ_I2C3_RX = 4,
+ SPEAR1310_DMA_REQ_I2C3_TX = 5,
+ SPEAR1310_DMA_REQ_I2C4_RX = 6,
+ SPEAR1310_DMA_REQ_I2C4_TX = 7,
+ SPEAR1310_DMA_REQ_I2C5_RX = 8,
+ SPEAR1310_DMA_REQ_I2C5_TX = 9,
+ SPEAR1310_DMA_REQ_I2C6_RX = 10,
+ SPEAR1310_DMA_REQ_I2C6_TX = 11,
+ SPEAR1310_DMA_REQ_UART1_RX = 12,
+ SPEAR1310_DMA_REQ_UART1_TX = 13,
+ SPEAR1310_DMA_REQ_UART2_RX = 14,
+ SPEAR1310_DMA_REQ_UART2_TX = 15,
+ SPEAR1310_DMA_REQ_UART5_RX = 16,
+ SPEAR1310_DMA_REQ_UART5_TX = 17,
+ SPEAR1310_DMA_REQ_SSP1_RX = 18,
+ SPEAR1310_DMA_REQ_SSP1_TX = 19,
+ SPEAR1310_DMA_REQ_I2C7_RX = 20,
+ SPEAR1310_DMA_REQ_I2C7_TX = 21,
+ SPEAR1310_DMA_REQ_UART3_RX = 28,
+ SPEAR1310_DMA_REQ_UART3_TX = 29,
+ SPEAR1310_DMA_REQ_UART4_RX = 30,
+ SPEAR1310_DMA_REQ_UART4_TX = 31,
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+ SPEAR1340_DMA_REQ_SPDIF_TX = 2,
+ SPEAR1340_DMA_REQ_SPDIF_RX = 3,
+ SPEAR1340_DMA_REQ_I2S_TX = 10,
+ SPEAR1340_DMA_REQ_I2S_RX = 11,
+ SPEAR1340_DMA_REQ_UART1_TX = 12,
+ SPEAR1340_DMA_REQ_UART1_RX = 13,
+ SPEAR1340_DMA_REQ_I2C1_TX = 14,
+ SPEAR1340_DMA_REQ_I2C1_RX = 15,
+ SPEAR1340_DMA_REQ_CAM0_EVEN = 0,
+ SPEAR1340_DMA_REQ_CAM0_ODD = 1,
+ SPEAR1340_DMA_REQ_CAM1_EVEN = 2,
+ SPEAR1340_DMA_REQ_CAM1_ODD = 3,
+ SPEAR1340_DMA_REQ_CAM2_EVEN = 4,
+ SPEAR1340_DMA_REQ_CAM2_ODD = 5,
+ SPEAR1340_DMA_REQ_CAM3_EVEN = 6,
+ SPEAR1340_DMA_REQ_CAM3_ODD = 7,
+#endif
+};
+
+#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/generic.h b/arch/arm/mach-spear13xx/include/mach/generic.h
new file mode 100644
index 00000000000..6d8c45b9f29
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/generic.h
@@ -0,0 +1,49 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/generic.h
+ *
+ * spear13xx machine family generic header file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GENERIC_H
+#define __MACH_GENERIC_H
+
+#include <linux/dmaengine.h>
+#include <asm/mach/time.h>
+
+/* Add spear13xx structure declarations here */
+extern struct sys_timer spear13xx_timer;
+extern struct pl022_ssp_controller pl022_plat_data;
+extern struct dw_dma_platform_data dmac_plat_data;
+extern struct dw_dma_slave cf_dma_priv;
+extern struct dw_dma_slave nand_read_dma_priv;
+extern struct dw_dma_slave nand_write_dma_priv;
+
+/* Add spear13xx family function declarations here */
+void __init spear_setup_of_timer(void);
+void __init spear13xx_map_io(void);
+void __init spear13xx_dt_init_irq(void);
+void __init spear13xx_l2x0_init(void);
+bool dw_dma_filter(struct dma_chan *chan, void *slave);
+void spear_restart(char, const char *);
+void spear13xx_secondary_startup(void);
+
+#ifdef CONFIG_MACH_SPEAR1310
+void __init spear1310_clk_init(void);
+#else
+static inline void spear1310_clk_init(void) {}
+#endif
+
+#ifdef CONFIG_MACH_SPEAR1340
+void __init spear1340_clk_init(void);
+#else
+static inline void spear1340_clk_init(void) {}
+#endif
+
+#endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/gpio.h b/arch/arm/mach-spear13xx/include/mach/gpio.h
new file mode 100644
index 00000000000..cd6f4f86a56
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/gpio.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/gpio.h
+ *
+ * GPIO macros for SPEAr13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_GPIO_H
+#define __MACH_GPIO_H
+
+#include <plat/gpio.h>
+
+#endif /* __MACH_GPIO_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/hardware.h b/arch/arm/mach-spear13xx/include/mach/hardware.h
new file mode 100644
index 00000000000..40a8c178f10
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/hardware.h
@@ -0,0 +1 @@
+/* empty */
diff --git a/arch/arm/mach-spear13xx/include/mach/irqs.h b/arch/arm/mach-spear13xx/include/mach/irqs.h
new file mode 100644
index 00000000000..f542a24aa5f
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/irqs.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/irqs.h
+ *
+ * IRQ helper macros for spear13xx machine family
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_IRQS_H
+#define __MACH_IRQS_H
+
+#define IRQ_GIC_END 160
+#define NR_IRQS IRQ_GIC_END
+
+#endif /* __MACH_IRQS_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/spear.h b/arch/arm/mach-spear13xx/include/mach/spear.h
new file mode 100644
index 00000000000..30c57ef7268
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/spear.h
@@ -0,0 +1,62 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/spear.h
+ *
+ * spear13xx Machine family specific definition
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_SPEAR13XX_H
+#define __MACH_SPEAR13XX_H
+
+#include <asm/memory.h>
+
+#define PERIP_GRP2_BASE UL(0xB3000000)
+#define VA_PERIP_GRP2_BASE UL(0xFE000000)
+#define MCIF_SDHCI_BASE UL(0xB3000000)
+#define SYSRAM0_BASE UL(0xB3800000)
+#define VA_SYSRAM0_BASE UL(0xFE800000)
+#define SYS_LOCATION (VA_SYSRAM0_BASE + 0x600)
+
+#define PERIP_GRP1_BASE UL(0xE0000000)
+#define VA_PERIP_GRP1_BASE UL(0xFD000000)
+#define UART_BASE UL(0xE0000000)
+#define VA_UART_BASE UL(0xFD000000)
+#define SSP_BASE UL(0xE0100000)
+#define MISC_BASE UL(0xE0700000)
+#define VA_MISC_BASE IOMEM(UL(0xFD700000))
+
+#define A9SM_AND_MPMC_BASE UL(0xEC000000)
+#define VA_A9SM_AND_MPMC_BASE UL(0xFC000000)
+
+/* A9SM peripheral offsets */
+#define A9SM_PERIP_BASE UL(0xEC800000)
+#define VA_A9SM_PERIP_BASE UL(0xFC800000)
+#define VA_SCU_BASE (VA_A9SM_PERIP_BASE + 0x00)
+
+#define L2CC_BASE UL(0xED000000)
+#define VA_L2CC_BASE IOMEM(UL(0xFB000000))
+
+/* others */
+#define DMAC0_BASE UL(0xEA800000)
+#define DMAC1_BASE UL(0xEB000000)
+#define MCIF_CF_BASE UL(0xB2800000)
+
+/* Devices present in SPEAr1310 */
+#ifdef CONFIG_MACH_SPEAR1310
+#define SPEAR1310_RAS_GRP1_BASE UL(0xD8000000)
+#define VA_SPEAR1310_RAS_GRP1_BASE UL(0xFA000000)
+#define SPEAR1310_RAS_BASE UL(0xD8400000)
+#define VA_SPEAR1310_RAS_BASE IOMEM(UL(0xFA400000))
+#endif /* CONFIG_MACH_SPEAR1310 */
+
+/* Debug uart for linux, will be used for debug and uncompress messages */
+#define SPEAR_DBG_UART_BASE UART_BASE
+#define VA_SPEAR_DBG_UART_BASE VA_UART_BASE
+
+#endif /* __MACH_SPEAR13XX_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h b/arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h
diff --git a/arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h b/arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h
diff --git a/arch/arm/mach-spear13xx/include/mach/timex.h b/arch/arm/mach-spear13xx/include/mach/timex.h
new file mode 100644
index 00000000000..31af3e8d976
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/timex.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear3xx/include/mach/timex.h
+ *
+ * SPEAr3XX machine family specific timex definitions
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_TIMEX_H
+#define __MACH_TIMEX_H
+
+#include <plat/timex.h>
+
+#endif /* __MACH_TIMEX_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/uncompress.h b/arch/arm/mach-spear13xx/include/mach/uncompress.h
new file mode 100644
index 00000000000..c7840896ae6
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/uncompress.h
@@ -0,0 +1,19 @@
+/*
+ * arch/arm/mach-spear13xx/include/mach/uncompress.h
+ *
+ * Serial port stubs for kernel decompress status messages
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_UNCOMPRESS_H
+#define __MACH_UNCOMPRESS_H
+
+#include <plat/uncompress.h>
+
+#endif /* __MACH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-spear13xx/platsmp.c b/arch/arm/mach-spear13xx/platsmp.c
new file mode 100644
index 00000000000..f5d07f2663d
--- /dev/null
+++ b/arch/arm/mach-spear13xx/platsmp.c
@@ -0,0 +1,127 @@
+/*
+ * arch/arm/mach-spear13xx/platsmp.c
+ *
+ * based upon linux/arch/arm/mach-realview/platsmp.c
+ *
+ * Copyright (C) 2012 ST Microelectronics Ltd.
+ * Shiraz Hashim <shiraz.hashim@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <linux/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/hardware/gic.h>
+#include <asm/smp_scu.h>
+#include <mach/spear.h>
+
+/*
+ * control for which core is the next to come out of the secondary
+ * boot "holding pen"
+ */
+volatile int __cpuinitdata pen_release = -1;
+static DEFINE_SPINLOCK(boot_lock);
+
+static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
+extern void spear13xx_secondary_startup(void);
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+ /*
+ * if any interrupts are already enabled for the primary
+ * core (e.g. timer irq), then they will not have been enabled
+ * for us: do so
+ */
+ gic_secondary_init(0);
+
+ /*
+ * let the primary processor know we're out of the
+ * pen, then head off into the C entry point
+ */
+ pen_release = -1;
+ smp_wmb();
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
+}
+
+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned long timeout;
+
+ /*
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+ * the holding pen - release it, then wait for it to flag
+ * that it has been released by resetting pen_release.
+ *
+ * Note that "pen_release" is the hardware CPU ID, whereas
+ * "cpu" is Linux's internal ID.
+ */
+ pen_release = cpu;
+ flush_cache_all();
+ outer_flush_all();
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ smp_rmb();
+ if (pen_release == -1)
+ break;
+
+ udelay(10);
+ }
+
+ /*
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+ spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+ unsigned int i, ncores = scu_get_core_count(scu_base);
+
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
+ }
+
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
+
+ set_smp_cross_call(gic_raise_softirq);
+}
+
+void __init platform_smp_prepare_cpus(unsigned int max_cpus)
+{
+
+ scu_enable(scu_base);
+
+ /*
+ * Write the address of secondary startup into the system-wide location
+ * (presently it is in SRAM). The BootMonitor waits until it receives a
+ * soft interrupt, and then the secondary CPU branches to this address.
+ */
+ __raw_writel(virt_to_phys(spear13xx_secondary_startup), SYS_LOCATION);
+}
diff --git a/arch/arm/mach-spear13xx/spear1310.c b/arch/arm/mach-spear13xx/spear1310.c
new file mode 100644
index 00000000000..fefd15b2f38
--- /dev/null
+++ b/arch/arm/mach-spear13xx/spear1310.c
@@ -0,0 +1,88 @@
+/*
+ * arch/arm/mach-spear13xx/spear1310.c
+ *
+ * SPEAr1310 machine source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "SPEAr1310: " fmt
+
+#include <linux/amba/pl022.h>
+#include <linux/of_platform.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* Base addresses */
+#define SPEAR1310_SSP1_BASE UL(0x5D400000)
+#define SPEAR1310_SATA0_BASE UL(0xB1000000)
+#define SPEAR1310_SATA1_BASE UL(0xB1800000)
+#define SPEAR1310_SATA2_BASE UL(0xB4000000)
+
+/* ssp device registration */
+static struct pl022_ssp_controller ssp1_plat_data = {
+ .bus_id = 0,
+ .enable_dma = 0,
+ .num_chipselect = 3,
+};
+
+/* Add SPEAr1310 auxdata to pass platform data */
+static struct of_dev_auxdata spear1310_auxdata_lookup[] __initdata = {
+ OF_DEV_AUXDATA("arasan,cf-spear1340", MCIF_CF_BASE, NULL, &cf_dma_priv),
+ OF_DEV_AUXDATA("snps,dma-spear1340", DMAC0_BASE, NULL, &dmac_plat_data),
+ OF_DEV_AUXDATA("snps,dma-spear1340", DMAC1_BASE, NULL, &dmac_plat_data),
+ OF_DEV_AUXDATA("arm,pl022", SSP_BASE, NULL, &pl022_plat_data),
+
+ OF_DEV_AUXDATA("arm,pl022", SPEAR1310_SSP1_BASE, NULL, &ssp1_plat_data),
+ {}
+};
+
+static void __init spear1310_dt_init(void)
+{
+ of_platform_populate(NULL, of_default_bus_match_table,
+ spear1310_auxdata_lookup, NULL);
+}
+
+static const char * const spear1310_dt_board_compat[] = {
+ "st,spear1310",
+ "st,spear1310-evb",
+ NULL,
+};
+
+/*
+ * Following will create 16MB static virtual/physical mappings
+ * PHYSICAL VIRTUAL
+ * 0xD8000000 0xFA000000
+ */
+struct map_desc spear1310_io_desc[] __initdata = {
+ {
+ .virtual = VA_SPEAR1310_RAS_GRP1_BASE,
+ .pfn = __phys_to_pfn(SPEAR1310_RAS_GRP1_BASE),
+ .length = SZ_16M,
+ .type = MT_DEVICE
+ },
+};
+
+static void __init spear1310_map_io(void)
+{
+ iotable_init(spear1310_io_desc, ARRAY_SIZE(spear1310_io_desc));
+ spear13xx_map_io();
+}
+
+DT_MACHINE_START(SPEAR1310_DT, "ST SPEAr1310 SoC with Flattened Device Tree")
+ .map_io = spear1310_map_io,
+ .init_irq = spear13xx_dt_init_irq,
+ .handle_irq = gic_handle_irq,
+ .timer = &spear13xx_timer,
+ .init_machine = spear1310_dt_init,
+ .restart = spear_restart,
+ .dt_compat = spear1310_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-spear13xx/spear1340.c b/arch/arm/mach-spear13xx/spear1340.c
new file mode 100644
index 00000000000..ee38cbc5686
--- /dev/null
+++ b/arch/arm/mach-spear13xx/spear1340.c
@@ -0,0 +1,192 @@
+/*
+ * arch/arm/mach-spear13xx/spear1340.c
+ *
+ * SPEAr1340 machine source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "SPEAr1340: " fmt
+
+#include <linux/ahci_platform.h>
+#include <linux/amba/serial.h>
+#include <linux/delay.h>
+#include <linux/dw_dmac.h>
+#include <linux/of_platform.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/arch.h>
+#include <mach/dma.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* Base addresses */
+#define SPEAR1340_SATA_BASE UL(0xB1000000)
+#define SPEAR1340_UART1_BASE UL(0xB4100000)
+
+/* Power Management Registers */
+#define SPEAR1340_PCM_CFG (VA_MISC_BASE + 0x100)
+#define SPEAR1340_PCM_WKUP_CFG (VA_MISC_BASE + 0x104)
+#define SPEAR1340_SWITCH_CTR (VA_MISC_BASE + 0x108)
+
+#define SPEAR1340_PERIP1_SW_RST (VA_MISC_BASE + 0x318)
+#define SPEAR1340_PERIP2_SW_RST (VA_MISC_BASE + 0x31C)
+#define SPEAR1340_PERIP3_SW_RST (VA_MISC_BASE + 0x320)
+
+/* PCIE - SATA configuration registers */
+#define SPEAR1340_PCIE_SATA_CFG (VA_MISC_BASE + 0x424)
+ /* PCIE CFG MASks */
+ #define SPEAR1340_PCIE_CFG_DEVICE_PRESENT (1 << 11)
+ #define SPEAR1340_PCIE_CFG_POWERUP_RESET (1 << 10)
+ #define SPEAR1340_PCIE_CFG_CORE_CLK_EN (1 << 9)
+ #define SPEAR1340_PCIE_CFG_AUX_CLK_EN (1 << 8)
+ #define SPEAR1340_SATA_CFG_TX_CLK_EN (1 << 4)
+ #define SPEAR1340_SATA_CFG_RX_CLK_EN (1 << 3)
+ #define SPEAR1340_SATA_CFG_POWERUP_RESET (1 << 2)
+ #define SPEAR1340_SATA_CFG_PM_CLK_EN (1 << 1)
+ #define SPEAR1340_PCIE_SATA_SEL_PCIE (0)
+ #define SPEAR1340_PCIE_SATA_SEL_SATA (1)
+ #define SPEAR1340_SATA_PCIE_CFG_MASK 0xF1F
+ #define SPEAR1340_PCIE_CFG_VAL (SPEAR1340_PCIE_SATA_SEL_PCIE | \
+ SPEAR1340_PCIE_CFG_AUX_CLK_EN | \
+ SPEAR1340_PCIE_CFG_CORE_CLK_EN | \
+ SPEAR1340_PCIE_CFG_POWERUP_RESET | \
+ SPEAR1340_PCIE_CFG_DEVICE_PRESENT)
+ #define SPEAR1340_SATA_CFG_VAL (SPEAR1340_PCIE_SATA_SEL_SATA | \
+ SPEAR1340_SATA_CFG_PM_CLK_EN | \
+ SPEAR1340_SATA_CFG_POWERUP_RESET | \
+ SPEAR1340_SATA_CFG_RX_CLK_EN | \
+ SPEAR1340_SATA_CFG_TX_CLK_EN)
+
+#define SPEAR1340_PCIE_MIPHY_CFG (VA_MISC_BASE + 0x428)
+ #define SPEAR1340_MIPHY_OSC_BYPASS_EXT (1 << 31)
+ #define SPEAR1340_MIPHY_CLK_REF_DIV2 (1 << 27)
+ #define SPEAR1340_MIPHY_CLK_REF_DIV4 (2 << 27)
+ #define SPEAR1340_MIPHY_CLK_REF_DIV8 (3 << 27)
+ #define SPEAR1340_MIPHY_PLL_RATIO_TOP(x) (x << 0)
+ #define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA \
+ (SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
+ SPEAR1340_MIPHY_CLK_REF_DIV2 | \
+ SPEAR1340_MIPHY_PLL_RATIO_TOP(60))
+ #define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK \
+ (SPEAR1340_MIPHY_PLL_RATIO_TOP(120))
+ #define SPEAR1340_PCIE_SATA_MIPHY_CFG_PCIE \
+ (SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
+ SPEAR1340_MIPHY_PLL_RATIO_TOP(25))
+
+static struct dw_dma_slave uart1_dma_param[] = {
+ {
+ /* Tx */
+ .cfg_hi = DWC_CFGH_DST_PER(SPEAR1340_DMA_REQ_UART1_TX),
+ .cfg_lo = 0,
+ .src_master = DMA_MASTER_MEMORY,
+ .dst_master = SPEAR1340_DMA_MASTER_UART1,
+ }, {
+ /* Rx */
+ .cfg_hi = DWC_CFGH_SRC_PER(SPEAR1340_DMA_REQ_UART1_RX),
+ .cfg_lo = 0,
+ .src_master = SPEAR1340_DMA_MASTER_UART1,
+ .dst_master = DMA_MASTER_MEMORY,
+ }
+};
+
+static struct amba_pl011_data uart1_data = {
+ .dma_filter = dw_dma_filter,
+ .dma_tx_param = &uart1_dma_param[0],
+ .dma_rx_param = &uart1_dma_param[1],
+};
+
+/* SATA device registration */
+static int sata_miphy_init(struct device *dev, void __iomem *addr)
+{
+ writel(SPEAR1340_SATA_CFG_VAL, SPEAR1340_PCIE_SATA_CFG);
+ writel(SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK,
+ SPEAR1340_PCIE_MIPHY_CFG);
+ /* Switch on sata power domain */
+ writel((readl(SPEAR1340_PCM_CFG) | (0x800)), SPEAR1340_PCM_CFG);
+ msleep(20);
+ /* Disable PCIE SATA Controller reset */
+ writel((readl(SPEAR1340_PERIP1_SW_RST) & (~0x1000)),
+ SPEAR1340_PERIP1_SW_RST);
+ msleep(20);
+
+ return 0;
+}
+
+void sata_miphy_exit(struct device *dev)
+{
+ writel(0, SPEAR1340_PCIE_SATA_CFG);
+ writel(0, SPEAR1340_PCIE_MIPHY_CFG);
+
+ /* Enable PCIE SATA Controller reset */
+ writel((readl(SPEAR1340_PERIP1_SW_RST) | (0x1000)),
+ SPEAR1340_PERIP1_SW_RST);
+ msleep(20);
+ /* Switch off sata power domain */
+ writel((readl(SPEAR1340_PCM_CFG) & (~0x800)), SPEAR1340_PCM_CFG);
+ msleep(20);
+}
+
+int sata_suspend(struct device *dev)
+{
+ if (dev->power.power_state.event == PM_EVENT_FREEZE)
+ return 0;
+
+ sata_miphy_exit(dev);
+
+ return 0;
+}
+
+int sata_resume(struct device *dev)
+{
+ if (dev->power.power_state.event == PM_EVENT_THAW)
+ return 0;
+
+ return sata_miphy_init(dev, NULL);
+}
+
+static struct ahci_platform_data sata_pdata = {
+ .init = sata_miphy_init,
+ .exit = sata_miphy_exit,
+ .suspend = sata_suspend,
+ .resume = sata_resume,
+};
+
+/* Add SPEAr1340 auxdata to pass platform data */
+static struct of_dev_auxdata spear1340_auxdata_lookup[] __initdata = {
+ OF_DEV_AUXDATA("arasan,cf-spear1340", MCIF_CF_BASE, NULL, &cf_dma_priv),
+ OF_DEV_AUXDATA("snps,dma-spear1340", DMAC0_BASE, NULL, &dmac_plat_data),
+ OF_DEV_AUXDATA("snps,dma-spear1340", DMAC1_BASE, NULL, &dmac_plat_data),
+ OF_DEV_AUXDATA("arm,pl022", SSP_BASE, NULL, &pl022_plat_data),
+
+ OF_DEV_AUXDATA("snps,spear-ahci", SPEAR1340_SATA_BASE, NULL,
+ &sata_pdata),
+ OF_DEV_AUXDATA("arm,pl011", SPEAR1340_UART1_BASE, NULL, &uart1_data),
+ {}
+};
+
+static void __init spear1340_dt_init(void)
+{
+ of_platform_populate(NULL, of_default_bus_match_table,
+ spear1340_auxdata_lookup, NULL);
+}
+
+static const char * const spear1340_dt_board_compat[] = {
+ "st,spear1340",
+ "st,spear1340-evb",
+ NULL,
+};
+
+DT_MACHINE_START(SPEAR1340_DT, "ST SPEAr1340 SoC with Flattened Device Tree")
+ .map_io = spear13xx_map_io,
+ .init_irq = spear13xx_dt_init_irq,
+ .handle_irq = gic_handle_irq,
+ .timer = &spear13xx_timer,
+ .init_machine = spear1340_dt_init,
+ .restart = spear_restart,
+ .dt_compat = spear1340_dt_board_compat,
+MACHINE_END
diff --git a/arch/arm/mach-spear13xx/spear13xx.c b/arch/arm/mach-spear13xx/spear13xx.c
new file mode 100644
index 00000000000..50b349ae863
--- /dev/null
+++ b/arch/arm/mach-spear13xx/spear13xx.c
@@ -0,0 +1,197 @@
+/*
+ * arch/arm/mach-spear13xx/spear13xx.c
+ *
+ * SPEAr13XX machines common source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "SPEAr13xx: " fmt
+
+#include <linux/amba/pl022.h>
+#include <linux/clk.h>
+#include <linux/dw_dmac.h>
+#include <linux/err.h>
+#include <linux/of_irq.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/map.h>
+#include <asm/smp_twd.h>
+#include <mach/dma.h>
+#include <mach/generic.h>
+#include <mach/spear.h>
+
+/* common dw_dma filter routine to be used by peripherals */
+bool dw_dma_filter(struct dma_chan *chan, void *slave)
+{
+ struct dw_dma_slave *dws = (struct dw_dma_slave *)slave;
+
+ if (chan->device->dev == dws->dma_dev) {
+ chan->private = slave;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/* ssp device registration */
+static struct dw_dma_slave ssp_dma_param[] = {
+ {
+ /* Tx */
+ .cfg_hi = DWC_CFGH_DST_PER(DMA_REQ_SSP0_TX),
+ .cfg_lo = 0,
+ .src_master = DMA_MASTER_MEMORY,
+ .dst_master = DMA_MASTER_SSP0,
+ }, {
+ /* Rx */
+ .cfg_hi = DWC_CFGH_SRC_PER(DMA_REQ_SSP0_RX),
+ .cfg_lo = 0,
+ .src_master = DMA_MASTER_SSP0,
+ .dst_master = DMA_MASTER_MEMORY,
+ }
+};
+
+struct pl022_ssp_controller pl022_plat_data = {
+ .bus_id = 0,
+ .enable_dma = 1,
+ .dma_filter = dw_dma_filter,
+ .dma_rx_param = &ssp_dma_param[1],
+ .dma_tx_param = &ssp_dma_param[0],
+ .num_chipselect = 3,
+};
+
+/* CF device registration */
+struct dw_dma_slave cf_dma_priv = {
+ .cfg_hi = 0,
+ .cfg_lo = 0,
+ .src_master = 0,
+ .dst_master = 0,
+};
+
+/* dmac device registeration */
+struct dw_dma_platform_data dmac_plat_data = {
+ .nr_channels = 8,
+ .chan_allocation_order = CHAN_ALLOCATION_DESCENDING,
+ .chan_priority = CHAN_PRIORITY_DESCENDING,
+};
+
+void __init spear13xx_l2x0_init(void)
+{
+ /*
+ * 512KB (64KB/way), 8-way associativity, parity supported
+ *
+ * FIXME: 9th bit, of Auxillary Controller register must be set
+ * for some spear13xx devices for stable L2 operation.
+ *
+ * Enable Early BRESP, L2 prefetch for Instruction and Data,
+ * write alloc and 'Full line of zero' options
+ *
+ */
+
+ writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL);
+
+ /*
+ * Program following latencies in order to make
+ * SPEAr1340 work at 600 MHz
+ */
+ writel_relaxed(0x221, VA_L2CC_BASE + L2X0_TAG_LATENCY_CTRL);
+ writel_relaxed(0x441, VA_L2CC_BASE + L2X0_DATA_LATENCY_CTRL);
+ l2x0_init(VA_L2CC_BASE, 0x70A60001, 0xfe00ffff);
+}
+
+/*
+ * Following will create 16MB static virtual/physical mappings
+ * PHYSICAL VIRTUAL
+ * 0xB3000000 0xFE000000
+ * 0xE0000000 0xFD000000
+ * 0xEC000000 0xFC000000
+ * 0xED000000 0xFB000000
+ */
+struct map_desc spear13xx_io_desc[] __initdata = {
+ {
+ .virtual = VA_PERIP_GRP2_BASE,
+ .pfn = __phys_to_pfn(PERIP_GRP2_BASE),
+ .length = SZ_16M,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_PERIP_GRP1_BASE,
+ .pfn = __phys_to_pfn(PERIP_GRP1_BASE),
+ .length = SZ_16M,
+ .type = MT_DEVICE
+ }, {
+ .virtual = VA_A9SM_AND_MPMC_BASE,
+ .pfn = __phys_to_pfn(A9SM_AND_MPMC_BASE),
+ .length = SZ_16M,
+ .type = MT_DEVICE
+ }, {
+ .virtual = (unsigned long)VA_L2CC_BASE,
+ .pfn = __phys_to_pfn(L2CC_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE
+ },
+};
+
+/* This will create static memory mapping for selected devices */
+void __init spear13xx_map_io(void)
+{
+ iotable_init(spear13xx_io_desc, ARRAY_SIZE(spear13xx_io_desc));
+}
+
+static void __init spear13xx_clk_init(void)
+{
+ if (of_machine_is_compatible("st,spear1310"))
+ spear1310_clk_init();
+ else if (of_machine_is_compatible("st,spear1340"))
+ spear1340_clk_init();
+ else
+ pr_err("%s: Unknown machine\n", __func__);
+}
+
+static void __init spear13xx_timer_init(void)
+{
+ char pclk_name[] = "osc_24m_clk";
+ struct clk *gpt_clk, *pclk;
+
+ spear13xx_clk_init();
+
+ /* get the system timer clock */
+ gpt_clk = clk_get_sys("gpt0", NULL);
+ if (IS_ERR(gpt_clk)) {
+ pr_err("%s:couldn't get clk for gpt\n", __func__);
+ BUG();
+ }
+
+ /* get the suitable parent clock for timer*/
+ pclk = clk_get(NULL, pclk_name);
+ if (IS_ERR(pclk)) {
+ pr_err("%s:couldn't get %s as parent for gpt\n", __func__,
+ pclk_name);
+ BUG();
+ }
+
+ clk_set_parent(gpt_clk, pclk);
+ clk_put(gpt_clk);
+ clk_put(pclk);
+
+ spear_setup_of_timer();
+ twd_local_timer_of_register();
+}
+
+struct sys_timer spear13xx_timer = {
+ .init = spear13xx_timer_init,
+};
+
+static const struct of_device_id gic_of_match[] __initconst = {
+ { .compatible = "arm,cortex-a9-gic", .data = gic_of_init },
+ { /* Sentinel */ }
+};
+
+void __init spear13xx_dt_init_irq(void)
+{
+ of_irq_init(gic_of_match);
+}
diff --git a/arch/arm/mach-spear3xx/Makefile b/arch/arm/mach-spear3xx/Makefile
index 17b5d83cf2d..8d12faa178f 100644
--- a/arch/arm/mach-spear3xx/Makefile
+++ b/arch/arm/mach-spear3xx/Makefile
@@ -3,7 +3,7 @@
#
# common files
-obj-$(CONFIG_ARCH_SPEAR3XX) += spear3xx.o clock.o
+obj-$(CONFIG_ARCH_SPEAR3XX) += spear3xx.o
# spear300 specific files
obj-$(CONFIG_MACH_SPEAR300) += spear300.o
diff --git a/arch/arm/mach-spear3xx/clock.c b/arch/arm/mach-spear3xx/clock.c
deleted file mode 100644
index cd6c1109908..00000000000
--- a/arch/arm/mach-spear3xx/clock.c
+++ /dev/null
@@ -1,892 +0,0 @@
-/*
- * arch/arm/mach-spear3xx/clock.c
- *
- * SPEAr3xx machines clock framework source file
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/of_platform.h>
-#include <asm/mach-types.h>
-#include <plat/clock.h>
-#include <mach/misc_regs.h>
-#include <mach/spear.h>
-
-#define PLL1_CTR (MISC_BASE + 0x008)
-#define PLL1_FRQ (MISC_BASE + 0x00C)
-#define PLL1_MOD (MISC_BASE + 0x010)
-#define PLL2_CTR (MISC_BASE + 0x014)
-/* PLL_CTR register masks */
-#define PLL_ENABLE 2
-#define PLL_MODE_SHIFT 4
-#define PLL_MODE_MASK 0x3
-#define PLL_MODE_NORMAL 0
-#define PLL_MODE_FRACTION 1
-#define PLL_MODE_DITH_DSB 2
-#define PLL_MODE_DITH_SSB 3
-
-#define PLL2_FRQ (MISC_BASE + 0x018)
-/* PLL FRQ register masks */
-#define PLL_DIV_N_SHIFT 0
-#define PLL_DIV_N_MASK 0xFF
-#define PLL_DIV_P_SHIFT 8
-#define PLL_DIV_P_MASK 0x7
-#define PLL_NORM_FDBK_M_SHIFT 24
-#define PLL_NORM_FDBK_M_MASK 0xFF
-#define PLL_DITH_FDBK_M_SHIFT 16
-#define PLL_DITH_FDBK_M_MASK 0xFFFF
-
-#define PLL2_MOD (MISC_BASE + 0x01C)
-#define PLL_CLK_CFG (MISC_BASE + 0x020)
-#define CORE_CLK_CFG (MISC_BASE + 0x024)
-/* CORE CLK CFG register masks */
-#define PLL_HCLK_RATIO_SHIFT 10
-#define PLL_HCLK_RATIO_MASK 0x3
-#define HCLK_PCLK_RATIO_SHIFT 8
-#define HCLK_PCLK_RATIO_MASK 0x3
-
-#define PERIP_CLK_CFG (MISC_BASE + 0x028)
-/* PERIP_CLK_CFG register masks */
-#define UART_CLK_SHIFT 4
-#define UART_CLK_MASK 0x1
-#define FIRDA_CLK_SHIFT 5
-#define FIRDA_CLK_MASK 0x3
-#define GPT0_CLK_SHIFT 8
-#define GPT1_CLK_SHIFT 11
-#define GPT2_CLK_SHIFT 12
-#define GPT_CLK_MASK 0x1
-#define AUX_CLK_PLL3_VAL 0
-#define AUX_CLK_PLL1_VAL 1
-
-#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
-/* PERIP1_CLK_ENB register masks */
-#define UART_CLK_ENB 3
-#define SSP_CLK_ENB 5
-#define I2C_CLK_ENB 7
-#define JPEG_CLK_ENB 8
-#define FIRDA_CLK_ENB 10
-#define GPT1_CLK_ENB 11
-#define GPT2_CLK_ENB 12
-#define ADC_CLK_ENB 15
-#define RTC_CLK_ENB 17
-#define GPIO_CLK_ENB 18
-#define DMA_CLK_ENB 19
-#define SMI_CLK_ENB 21
-#define GMAC_CLK_ENB 23
-#define USBD_CLK_ENB 24
-#define USBH_CLK_ENB 25
-#define C3_CLK_ENB 31
-
-#define RAS_CLK_ENB (MISC_BASE + 0x034)
-
-#define PRSC1_CLK_CFG (MISC_BASE + 0x044)
-#define PRSC2_CLK_CFG (MISC_BASE + 0x048)
-#define PRSC3_CLK_CFG (MISC_BASE + 0x04C)
-/* gpt synthesizer register masks */
-#define GPT_MSCALE_SHIFT 0
-#define GPT_MSCALE_MASK 0xFFF
-#define GPT_NSCALE_SHIFT 12
-#define GPT_NSCALE_MASK 0xF
-
-#define AMEM_CLK_CFG (MISC_BASE + 0x050)
-#define EXPI_CLK_CFG (MISC_BASE + 0x054)
-#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
-#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
-#define UART_CLK_SYNT (MISC_BASE + 0x064)
-#define GMAC_CLK_SYNT (MISC_BASE + 0x068)
-#define RAS1_CLK_SYNT (MISC_BASE + 0x06C)
-#define RAS2_CLK_SYNT (MISC_BASE + 0x070)
-#define RAS3_CLK_SYNT (MISC_BASE + 0x074)
-#define RAS4_CLK_SYNT (MISC_BASE + 0x078)
-/* aux clk synthesiser register masks for irda to ras4 */
-#define AUX_SYNT_ENB 31
-#define AUX_EQ_SEL_SHIFT 30
-#define AUX_EQ_SEL_MASK 1
-#define AUX_EQ1_SEL 0
-#define AUX_EQ2_SEL 1
-#define AUX_XSCALE_SHIFT 16
-#define AUX_XSCALE_MASK 0xFFF
-#define AUX_YSCALE_SHIFT 0
-#define AUX_YSCALE_MASK 0xFFF
-
-/* root clks */
-/* 32 KHz oscillator clock */
-static struct clk osc_32k_clk = {
- .flags = ALWAYS_ENABLED,
- .rate = 32000,
-};
-
-/* 24 MHz oscillator clock */
-static struct clk osc_24m_clk = {
- .flags = ALWAYS_ENABLED,
- .rate = 24000000,
-};
-
-/* clock derived from 32 KHz osc clk */
-/* rtc clock */
-static struct clk rtc_clk = {
- .pclk = &osc_32k_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = RTC_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* clock derived from 24 MHz osc clk */
-/* pll masks structure */
-static struct pll_clk_masks pll1_masks = {
- .mode_mask = PLL_MODE_MASK,
- .mode_shift = PLL_MODE_SHIFT,
- .norm_fdbk_m_mask = PLL_NORM_FDBK_M_MASK,
- .norm_fdbk_m_shift = PLL_NORM_FDBK_M_SHIFT,
- .dith_fdbk_m_mask = PLL_DITH_FDBK_M_MASK,
- .dith_fdbk_m_shift = PLL_DITH_FDBK_M_SHIFT,
- .div_p_mask = PLL_DIV_P_MASK,
- .div_p_shift = PLL_DIV_P_SHIFT,
- .div_n_mask = PLL_DIV_N_MASK,
- .div_n_shift = PLL_DIV_N_SHIFT,
-};
-
-/* pll1 configuration structure */
-static struct pll_clk_config pll1_config = {
- .mode_reg = PLL1_CTR,
- .cfg_reg = PLL1_FRQ,
- .masks = &pll1_masks,
-};
-
-/* pll rate configuration table, in ascending order of rates */
-struct pll_rate_tbl pll_rtbl[] = {
- {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* 266 MHz */
- {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* 332 MHz */
-};
-
-/* PLL1 clock */
-static struct clk pll1_clk = {
- .flags = ENABLED_ON_INIT,
- .pclk = &osc_24m_clk,
- .en_reg = PLL1_CTR,
- .en_reg_bit = PLL_ENABLE,
- .calc_rate = &pll_calc_rate,
- .recalc = &pll_clk_recalc,
- .set_rate = &pll_clk_set_rate,
- .rate_config = {pll_rtbl, ARRAY_SIZE(pll_rtbl), 1},
- .private_data = &pll1_config,
-};
-
-/* PLL3 48 MHz clock */
-static struct clk pll3_48m_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &osc_24m_clk,
- .rate = 48000000,
-};
-
-/* watch dog timer clock */
-static struct clk wdt_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &osc_24m_clk,
- .recalc = &follow_parent,
-};
-
-/* clock derived from pll1 clk */
-/* cpu clock */
-static struct clk cpu_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .recalc = &follow_parent,
-};
-
-/* ahb masks structure */
-static struct bus_clk_masks ahb_masks = {
- .mask = PLL_HCLK_RATIO_MASK,
- .shift = PLL_HCLK_RATIO_SHIFT,
-};
-
-/* ahb configuration structure */
-static struct bus_clk_config ahb_config = {
- .reg = CORE_CLK_CFG,
- .masks = &ahb_masks,
-};
-
-/* ahb rate configuration table, in ascending order of rates */
-struct bus_rate_tbl bus_rtbl[] = {
- {.div = 3}, /* == parent divided by 4 */
- {.div = 2}, /* == parent divided by 3 */
- {.div = 1}, /* == parent divided by 2 */
- {.div = 0}, /* == parent divided by 1 */
-};
-
-/* ahb clock */
-static struct clk ahb_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &bus_calc_rate,
- .recalc = &bus_clk_recalc,
- .set_rate = &bus_clk_set_rate,
- .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
- .private_data = &ahb_config,
-};
-
-/* auxiliary synthesizers masks */
-static struct aux_clk_masks aux_masks = {
- .eq_sel_mask = AUX_EQ_SEL_MASK,
- .eq_sel_shift = AUX_EQ_SEL_SHIFT,
- .eq1_mask = AUX_EQ1_SEL,
- .eq2_mask = AUX_EQ2_SEL,
- .xscale_sel_mask = AUX_XSCALE_MASK,
- .xscale_sel_shift = AUX_XSCALE_SHIFT,
- .yscale_sel_mask = AUX_YSCALE_MASK,
- .yscale_sel_shift = AUX_YSCALE_SHIFT,
-};
-
-/* uart synth configurations */
-static struct aux_clk_config uart_synth_config = {
- .synth_reg = UART_CLK_SYNT,
- .masks = &aux_masks,
-};
-
-/* aux rate configuration table, in ascending order of rates */
-struct aux_rate_tbl aux_rtbl[] = {
- /* For PLL1 = 332 MHz */
- {.xscale = 1, .yscale = 8, .eq = 1}, /* 41.5 MHz */
- {.xscale = 1, .yscale = 4, .eq = 1}, /* 83 MHz */
- {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
-};
-
-/* uart synth clock */
-static struct clk uart_synth_clk = {
- .en_reg = UART_CLK_SYNT,
- .en_reg_bit = AUX_SYNT_ENB,
- .pclk = &pll1_clk,
- .calc_rate = &aux_calc_rate,
- .recalc = &aux_clk_recalc,
- .set_rate = &aux_clk_set_rate,
- .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 1},
- .private_data = &uart_synth_config,
-};
-
-/* uart parents */
-static struct pclk_info uart_pclk_info[] = {
- {
- .pclk = &uart_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* uart parent select structure */
-static struct pclk_sel uart_pclk_sel = {
- .pclk_info = uart_pclk_info,
- .pclk_count = ARRAY_SIZE(uart_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = UART_CLK_MASK,
-};
-
-/* uart clock */
-static struct clk uart_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = UART_CLK_ENB,
- .pclk_sel = &uart_pclk_sel,
- .pclk_sel_shift = UART_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* firda configurations */
-static struct aux_clk_config firda_synth_config = {
- .synth_reg = FIRDA_CLK_SYNT,
- .masks = &aux_masks,
-};
-
-/* firda synth clock */
-static struct clk firda_synth_clk = {
- .en_reg = FIRDA_CLK_SYNT,
- .en_reg_bit = AUX_SYNT_ENB,
- .pclk = &pll1_clk,
- .calc_rate = &aux_calc_rate,
- .recalc = &aux_clk_recalc,
- .set_rate = &aux_clk_set_rate,
- .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 1},
- .private_data = &firda_synth_config,
-};
-
-/* firda parents */
-static struct pclk_info firda_pclk_info[] = {
- {
- .pclk = &firda_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* firda parent select structure */
-static struct pclk_sel firda_pclk_sel = {
- .pclk_info = firda_pclk_info,
- .pclk_count = ARRAY_SIZE(firda_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = FIRDA_CLK_MASK,
-};
-
-/* firda clock */
-static struct clk firda_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = FIRDA_CLK_ENB,
- .pclk_sel = &firda_pclk_sel,
- .pclk_sel_shift = FIRDA_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* gpt synthesizer masks */
-static struct gpt_clk_masks gpt_masks = {
- .mscale_sel_mask = GPT_MSCALE_MASK,
- .mscale_sel_shift = GPT_MSCALE_SHIFT,
- .nscale_sel_mask = GPT_NSCALE_MASK,
- .nscale_sel_shift = GPT_NSCALE_SHIFT,
-};
-
-/* gpt rate configuration table, in ascending order of rates */
-struct gpt_rate_tbl gpt_rtbl[] = {
- /* For pll1 = 332 MHz */
- {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
- {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
- {.mscale = 1, .nscale = 0}, /* 83 MHz */
-};
-
-/* gpt0 synth clk config*/
-static struct gpt_clk_config gpt0_synth_config = {
- .synth_reg = PRSC1_CLK_CFG,
- .masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt0_synth_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &gpt_calc_rate,
- .recalc = &gpt_clk_recalc,
- .set_rate = &gpt_clk_set_rate,
- .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
- .private_data = &gpt0_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt0_pclk_info[] = {
- {
- .pclk = &gpt0_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt0_pclk_sel = {
- .pclk_info = gpt0_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt0 timer clock */
-static struct clk gpt0_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk_sel = &gpt0_pclk_sel,
- .pclk_sel_shift = GPT0_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* gpt1 synth clk configurations */
-static struct gpt_clk_config gpt1_synth_config = {
- .synth_reg = PRSC2_CLK_CFG,
- .masks = &gpt_masks,
-};
-
-/* gpt1 synth clock */
-static struct clk gpt1_synth_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &gpt_calc_rate,
- .recalc = &gpt_clk_recalc,
- .set_rate = &gpt_clk_set_rate,
- .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
- .private_data = &gpt1_synth_config,
-};
-
-static struct pclk_info gpt1_pclk_info[] = {
- {
- .pclk = &gpt1_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt1_pclk_sel = {
- .pclk_info = gpt1_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt1_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt1 timer clock */
-static struct clk gpt1_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GPT1_CLK_ENB,
- .pclk_sel = &gpt1_pclk_sel,
- .pclk_sel_shift = GPT1_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* gpt2 synth clk configurations */
-static struct gpt_clk_config gpt2_synth_config = {
- .synth_reg = PRSC3_CLK_CFG,
- .masks = &gpt_masks,
-};
-
-/* gpt1 synth clock */
-static struct clk gpt2_synth_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &gpt_calc_rate,
- .recalc = &gpt_clk_recalc,
- .set_rate = &gpt_clk_set_rate,
- .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
- .private_data = &gpt2_synth_config,
-};
-
-static struct pclk_info gpt2_pclk_info[] = {
- {
- .pclk = &gpt2_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt2_pclk_sel = {
- .pclk_info = gpt2_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt2_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt2 timer clock */
-static struct clk gpt2_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GPT2_CLK_ENB,
- .pclk_sel = &gpt2_pclk_sel,
- .pclk_sel_shift = GPT2_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* clock derived from pll3 clk */
-/* usbh clock */
-static struct clk usbh_clk = {
- .pclk = &pll3_48m_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = USBH_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* usbd clock */
-static struct clk usbd_clk = {
- .pclk = &pll3_48m_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = USBD_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* clock derived from usbh clk */
-/* usbh0 clock */
-static struct clk usbh0_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &usbh_clk,
- .recalc = &follow_parent,
-};
-
-/* usbh1 clock */
-static struct clk usbh1_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &usbh_clk,
- .recalc = &follow_parent,
-};
-
-/* clock derived from ahb clk */
-/* apb masks structure */
-static struct bus_clk_masks apb_masks = {
- .mask = HCLK_PCLK_RATIO_MASK,
- .shift = HCLK_PCLK_RATIO_SHIFT,
-};
-
-/* apb configuration structure */
-static struct bus_clk_config apb_config = {
- .reg = CORE_CLK_CFG,
- .masks = &apb_masks,
-};
-
-/* apb clock */
-static struct clk apb_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &ahb_clk,
- .calc_rate = &bus_calc_rate,
- .recalc = &bus_clk_recalc,
- .set_rate = &bus_clk_set_rate,
- .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
- .private_data = &apb_config,
-};
-
-/* i2c clock */
-static struct clk i2c_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = I2C_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* dma clock */
-static struct clk dma_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = DMA_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* jpeg clock */
-static struct clk jpeg_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = JPEG_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* gmac clock */
-static struct clk gmac_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GMAC_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* smi clock */
-static struct clk smi_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = SMI_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* c3 clock */
-static struct clk c3_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = C3_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* clock derived from apb clk */
-/* adc clock */
-static struct clk adc_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = ADC_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
-/* emi clock */
-static struct clk emi_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &ahb_clk,
- .recalc = &follow_parent,
-};
-#endif
-
-/* ssp clock */
-static struct clk ssp0_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = SSP_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* gpio clock */
-static struct clk gpio_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GPIO_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-static struct clk dummy_apb_pclk;
-
-#if defined(CONFIG_MACH_SPEAR300) || defined(CONFIG_MACH_SPEAR310) || \
- defined(CONFIG_MACH_SPEAR320)
-/* fsmc clock */
-static struct clk fsmc_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &ahb_clk,
- .recalc = &follow_parent,
-};
-#endif
-
-/* common clocks to spear310 and spear320 */
-#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
-/* uart1 clock */
-static struct clk uart1_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* uart2 clock */
-static struct clk uart2_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-#endif /* CONFIG_MACH_SPEAR310 || CONFIG_MACH_SPEAR320 */
-
-/* common clocks to spear300 and spear320 */
-#if defined(CONFIG_MACH_SPEAR300) || defined(CONFIG_MACH_SPEAR320)
-/* clcd clock */
-static struct clk clcd_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll3_48m_clk,
- .recalc = &follow_parent,
-};
-
-/* sdhci clock */
-static struct clk sdhci_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &ahb_clk,
- .recalc = &follow_parent,
-};
-#endif /* CONFIG_MACH_SPEAR300 || CONFIG_MACH_SPEAR320 */
-
-/* spear300 machine specific clock structures */
-#ifdef CONFIG_MACH_SPEAR300
-/* gpio1 clock */
-static struct clk gpio1_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* keyboard clock */
-static struct clk kbd_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-#endif
-
-/* spear310 machine specific clock structures */
-#ifdef CONFIG_MACH_SPEAR310
-/* uart3 clock */
-static struct clk uart3_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* uart4 clock */
-static struct clk uart4_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* uart5 clock */
-static struct clk uart5_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-#endif
-
-/* spear320 machine specific clock structures */
-#ifdef CONFIG_MACH_SPEAR320
-/* can0 clock */
-static struct clk can0_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* can1 clock */
-static struct clk can1_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* i2c1 clock */
-static struct clk i2c1_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &ahb_clk,
- .recalc = &follow_parent,
-};
-
-/* ssp1 clock */
-static struct clk ssp1_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* ssp2 clock */
-static struct clk ssp2_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* pwm clock */
-static struct clk pwm_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-#endif
-
-/* array of all spear 3xx clock lookups */
-static struct clk_lookup spear_clk_lookups[] = {
- CLKDEV_INIT(NULL, "apb_pclk", &dummy_apb_pclk),
- /* root clks */
- CLKDEV_INIT(NULL, "osc_32k_clk", &osc_32k_clk),
- CLKDEV_INIT(NULL, "osc_24m_clk", &osc_24m_clk),
- /* clock derived from 32 KHz osc clk */
- CLKDEV_INIT("fc900000.rtc", NULL, &rtc_clk),
- /* clock derived from 24 MHz osc clk */
- CLKDEV_INIT(NULL, "pll1_clk", &pll1_clk),
- CLKDEV_INIT(NULL, "pll3_48m_clk", &pll3_48m_clk),
- CLKDEV_INIT("fc880000.wdt", NULL, &wdt_clk),
- /* clock derived from pll1 clk */
- CLKDEV_INIT(NULL, "cpu_clk", &cpu_clk),
- CLKDEV_INIT(NULL, "ahb_clk", &ahb_clk),
- CLKDEV_INIT(NULL, "uart_synth_clk", &uart_synth_clk),
- CLKDEV_INIT(NULL, "firda_synth_clk", &firda_synth_clk),
- CLKDEV_INIT(NULL, "gpt0_synth_clk", &gpt0_synth_clk),
- CLKDEV_INIT(NULL, "gpt1_synth_clk", &gpt1_synth_clk),
- CLKDEV_INIT(NULL, "gpt2_synth_clk", &gpt2_synth_clk),
- CLKDEV_INIT("d0000000.serial", NULL, &uart_clk),
- CLKDEV_INIT("firda", NULL, &firda_clk),
- CLKDEV_INIT("gpt0", NULL, &gpt0_clk),
- CLKDEV_INIT("gpt1", NULL, &gpt1_clk),
- CLKDEV_INIT("gpt2", NULL, &gpt2_clk),
- /* clock derived from pll3 clk */
- CLKDEV_INIT("designware_udc", NULL, &usbd_clk),
- CLKDEV_INIT(NULL, "usbh_clk", &usbh_clk),
- /* clock derived from usbh clk */
- CLKDEV_INIT(NULL, "usbh.0_clk", &usbh0_clk),
- CLKDEV_INIT(NULL, "usbh.1_clk", &usbh1_clk),
- /* clock derived from ahb clk */
- CLKDEV_INIT(NULL, "apb_clk", &apb_clk),
- CLKDEV_INIT("d0180000.i2c", NULL, &i2c_clk),
- CLKDEV_INIT("fc400000.dma", NULL, &dma_clk),
- CLKDEV_INIT("jpeg", NULL, &jpeg_clk),
- CLKDEV_INIT("e0800000.eth", NULL, &gmac_clk),
- CLKDEV_INIT("fc000000.flash", NULL, &smi_clk),
- CLKDEV_INIT("c3", NULL, &c3_clk),
- /* clock derived from apb clk */
- CLKDEV_INIT("adc", NULL, &adc_clk),
- CLKDEV_INIT("d0100000.spi", NULL, &ssp0_clk),
- CLKDEV_INIT("fc980000.gpio", NULL, &gpio_clk),
-};
-
-/* array of all spear 300 clock lookups */
-#ifdef CONFIG_MACH_SPEAR300
-static struct clk_lookup spear300_clk_lookups[] = {
- CLKDEV_INIT("60000000.clcd", NULL, &clcd_clk),
- CLKDEV_INIT("94000000.flash", NULL, &fsmc_clk),
- CLKDEV_INIT("a9000000.gpio", NULL, &gpio1_clk),
- CLKDEV_INIT("a0000000.kbd", NULL, &kbd_clk),
- CLKDEV_INIT("70000000.sdhci", NULL, &sdhci_clk),
-};
-
-void __init spear300_clk_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
- clk_register(&spear_clk_lookups[i]);
-
- for (i = 0; i < ARRAY_SIZE(spear300_clk_lookups); i++)
- clk_register(&spear300_clk_lookups[i]);
-
- clk_init();
-}
-#endif
-
-/* array of all spear 310 clock lookups */
-#ifdef CONFIG_MACH_SPEAR310
-static struct clk_lookup spear310_clk_lookups[] = {
- CLKDEV_INIT("44000000.flash", NULL, &fsmc_clk),
- CLKDEV_INIT(NULL, "emi", &emi_clk),
- CLKDEV_INIT("b2000000.serial", NULL, &uart1_clk),
- CLKDEV_INIT("b2080000.serial", NULL, &uart2_clk),
- CLKDEV_INIT("b2100000.serial", NULL, &uart3_clk),
- CLKDEV_INIT("b2180000.serial", NULL, &uart4_clk),
- CLKDEV_INIT("b2200000.serial", NULL, &uart5_clk),
-};
-
-void __init spear310_clk_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
- clk_register(&spear_clk_lookups[i]);
-
- for (i = 0; i < ARRAY_SIZE(spear310_clk_lookups); i++)
- clk_register(&spear310_clk_lookups[i]);
-
- clk_init();
-}
-#endif
-
-/* array of all spear 320 clock lookups */
-#ifdef CONFIG_MACH_SPEAR320
-static struct clk_lookup spear320_clk_lookups[] = {
- CLKDEV_INIT("90000000.clcd", NULL, &clcd_clk),
- CLKDEV_INIT("4c000000.flash", NULL, &fsmc_clk),
- CLKDEV_INIT("a7000000.i2c", NULL, &i2c1_clk),
- CLKDEV_INIT(NULL, "emi", &emi_clk),
- CLKDEV_INIT("pwm", NULL, &pwm_clk),
- CLKDEV_INIT("70000000.sdhci", NULL, &sdhci_clk),
- CLKDEV_INIT("c_can_platform.0", NULL, &can0_clk),
- CLKDEV_INIT("c_can_platform.1", NULL, &can1_clk),
- CLKDEV_INIT("a5000000.spi", NULL, &ssp1_clk),
- CLKDEV_INIT("a6000000.spi", NULL, &ssp2_clk),
- CLKDEV_INIT("a3000000.serial", NULL, &uart1_clk),
- CLKDEV_INIT("a4000000.serial", NULL, &uart2_clk),
-};
-
-void __init spear320_clk_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
- clk_register(&spear_clk_lookups[i]);
-
- for (i = 0; i < ARRAY_SIZE(spear320_clk_lookups); i++)
- clk_register(&spear320_clk_lookups[i]);
-
- clk_init();
-}
-#endif
diff --git a/arch/arm/mach-spear3xx/include/mach/generic.h b/arch/arm/mach-spear3xx/include/mach/generic.h
index bdb304551ca..4a95b9453c2 100644
--- a/arch/arm/mach-spear3xx/include/mach/generic.h
+++ b/arch/arm/mach-spear3xx/include/mach/generic.h
@@ -27,28 +27,11 @@ extern struct pl022_ssp_controller pl022_plat_data;
extern struct pl08x_platform_data pl080_plat_data;
/* Add spear3xx family function declarations here */
-void __init spear_setup_timer(resource_size_t base, int irq);
+void __init spear_setup_of_timer(void);
+void __init spear3xx_clk_init(void);
void __init spear3xx_map_io(void);
void __init spear3xx_dt_init_irq(void);
void spear_restart(char, const char *);
-/* spear300 declarations */
-#ifdef CONFIG_MACH_SPEAR300
-void __init spear300_clk_init(void);
-
-#endif /* CONFIG_MACH_SPEAR300 */
-
-/* spear310 declarations */
-#ifdef CONFIG_MACH_SPEAR310
-void __init spear310_clk_init(void);
-
-#endif /* CONFIG_MACH_SPEAR310 */
-
-/* spear320 declarations */
-#ifdef CONFIG_MACH_SPEAR320
-void __init spear320_clk_init(void);
-
-#endif /* CONFIG_MACH_SPEAR320 */
-
#endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/irqs.h b/arch/arm/mach-spear3xx/include/mach/irqs.h
index 319620a1afb..51bd62a0254 100644
--- a/arch/arm/mach-spear3xx/include/mach/irqs.h
+++ b/arch/arm/mach-spear3xx/include/mach/irqs.h
@@ -16,7 +16,6 @@
/* FIXME: probe all these from DT */
#define SPEAR3XX_IRQ_INTRCOMM_RAS_ARM 1
-#define SPEAR3XX_IRQ_CPU_GPT1_1 2
#define SPEAR3XX_IRQ_GEN_RAS_1 28
#define SPEAR3XX_IRQ_GEN_RAS_2 29
#define SPEAR3XX_IRQ_GEN_RAS_3 30
diff --git a/arch/arm/mach-spear3xx/include/mach/misc_regs.h b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
index e0ab72e6150..18e2ac576f2 100644
--- a/arch/arm/mach-spear3xx/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
@@ -14,6 +14,8 @@
#ifndef __MACH_MISC_REGS_H
#define __MACH_MISC_REGS_H
+#include <mach/spear.h>
+
#define MISC_BASE IOMEM(VA_SPEAR3XX_ICM3_MISC_REG_BASE)
#define DMA_CHN_CFG (MISC_BASE + 0x0A0)
diff --git a/arch/arm/mach-spear3xx/include/mach/spear.h b/arch/arm/mach-spear3xx/include/mach/spear.h
index 6d4dadc6763..51eb953148a 100644
--- a/arch/arm/mach-spear3xx/include/mach/spear.h
+++ b/arch/arm/mach-spear3xx/include/mach/spear.h
@@ -26,7 +26,6 @@
/* ML1 - Multi Layer CPU Subsystem */
#define SPEAR3XX_ICM3_ML1_2_BASE UL(0xF0000000)
#define VA_SPEAR6XX_ML_CPU_BASE UL(0xF0000000)
-#define SPEAR3XX_CPU_TMR_BASE UL(0xF0000000)
/* ICM3 - Basic Subsystem */
#define SPEAR3XX_ICM3_SMI_CTRL_BASE UL(0xFC000000)
@@ -45,4 +44,17 @@
#define SPEAR_SYS_CTRL_BASE SPEAR3XX_ICM3_SYS_CTRL_BASE
#define VA_SPEAR_SYS_CTRL_BASE VA_SPEAR3XX_ICM3_SYS_CTRL_BASE
+/* SPEAr320 Macros */
+#define SPEAR320_SOC_CONFIG_BASE UL(0xB3000000)
+#define VA_SPEAR320_SOC_CONFIG_BASE UL(0xFE000000)
+#define SPEAR320_CONTROL_REG IOMEM(VA_SPEAR320_SOC_CONFIG_BASE)
+#define SPEAR320_EXT_CTRL_REG IOMEM(VA_SPEAR320_SOC_CONFIG_BASE + 0x0018)
+ #define SPEAR320_UARTX_PCLK_MASK 0x1
+ #define SPEAR320_UART2_PCLK_SHIFT 8
+ #define SPEAR320_UART3_PCLK_SHIFT 9
+ #define SPEAR320_UART4_PCLK_SHIFT 10
+ #define SPEAR320_UART5_PCLK_SHIFT 11
+ #define SPEAR320_UART6_PCLK_SHIFT 12
+ #define SPEAR320_RS485_PCLK_SHIFT 13
+
#endif /* __MACH_SPEAR3XX_H */
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
index f75fe25a620..f74a05bdb82 100644
--- a/arch/arm/mach-spear3xx/spear300.c
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -337,7 +337,6 @@ static const char * const spear300_dt_board_compat[] = {
static void __init spear300_map_io(void)
{
spear3xx_map_io();
- spear300_clk_init();
}
DT_MACHINE_START(SPEAR300_DT, "ST SPEAr300 SoC with Flattened Device Tree")
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
index f0842a58dc0..84dfb090074 100644
--- a/arch/arm/mach-spear3xx/spear310.c
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -478,7 +478,6 @@ static const char * const spear310_dt_board_compat[] = {
static void __init spear310_map_io(void)
{
spear3xx_map_io();
- spear310_clk_init();
}
DT_MACHINE_START(SPEAR310_DT, "ST SPEAr310 SoC with Flattened Device Tree")
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
index e8caeef50a5..a88fa841d29 100644
--- a/arch/arm/mach-spear3xx/spear320.c
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -27,7 +27,6 @@
#define SPEAR320_UART2_BASE UL(0xA4000000)
#define SPEAR320_SSP0_BASE UL(0xA5000000)
#define SPEAR320_SSP1_BASE UL(0xA6000000)
-#define SPEAR320_SOC_CONFIG_BASE UL(0xB3000000)
/* Interrupt registers offsets and masks */
#define SPEAR320_INT_STS_MASK_REG 0x04
@@ -481,10 +480,19 @@ static const char * const spear320_dt_board_compat[] = {
NULL,
};
+struct map_desc spear320_io_desc[] __initdata = {
+ {
+ .virtual = VA_SPEAR320_SOC_CONFIG_BASE,
+ .pfn = __phys_to_pfn(SPEAR320_SOC_CONFIG_BASE),
+ .length = SZ_16M,
+ .type = MT_DEVICE
+ },
+};
+
static void __init spear320_map_io(void)
{
+ iotable_init(spear320_io_desc, ARRAY_SIZE(spear320_io_desc));
spear3xx_map_io();
- spear320_clk_init();
}
DT_MACHINE_START(SPEAR320_DT, "ST SPEAr320 SoC with Flattened Device Tree")
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index 826ac20ef1e..f22419ed74a 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -90,6 +90,8 @@ static void __init spear3xx_timer_init(void)
char pclk_name[] = "pll3_48m_clk";
struct clk *gpt_clk, *pclk;
+ spear3xx_clk_init();
+
/* get the system timer clock */
gpt_clk = clk_get_sys("gpt0", NULL);
if (IS_ERR(gpt_clk)) {
@@ -109,7 +111,7 @@ static void __init spear3xx_timer_init(void)
clk_put(gpt_clk);
clk_put(pclk);
- spear_setup_timer(SPEAR3XX_CPU_TMR_BASE, SPEAR3XX_IRQ_CPU_GPT1_1);
+ spear_setup_of_timer();
}
struct sys_timer spear3xx_timer = {
diff --git a/arch/arm/mach-spear6xx/Makefile b/arch/arm/mach-spear6xx/Makefile
index 76e5750552f..898831d93f3 100644
--- a/arch/arm/mach-spear6xx/Makefile
+++ b/arch/arm/mach-spear6xx/Makefile
@@ -3,4 +3,4 @@
#
# common files
-obj-y += clock.o spear6xx.o
+obj-y += spear6xx.o
diff --git a/arch/arm/mach-spear6xx/clock.c b/arch/arm/mach-spear6xx/clock.c
deleted file mode 100644
index bef77d43db8..00000000000
--- a/arch/arm/mach-spear6xx/clock.c
+++ /dev/null
@@ -1,789 +0,0 @@
-/*
- * arch/arm/mach-spear6xx/clock.c
- *
- * SPEAr6xx machines clock framework source file
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <plat/clock.h>
-#include <mach/misc_regs.h>
-#include <mach/spear.h>
-
-#define PLL1_CTR (MISC_BASE + 0x008)
-#define PLL1_FRQ (MISC_BASE + 0x00C)
-#define PLL1_MOD (MISC_BASE + 0x010)
-#define PLL2_CTR (MISC_BASE + 0x014)
-/* PLL_CTR register masks */
-#define PLL_ENABLE 2
-#define PLL_MODE_SHIFT 4
-#define PLL_MODE_MASK 0x3
-#define PLL_MODE_NORMAL 0
-#define PLL_MODE_FRACTION 1
-#define PLL_MODE_DITH_DSB 2
-#define PLL_MODE_DITH_SSB 3
-
-#define PLL2_FRQ (MISC_BASE + 0x018)
-/* PLL FRQ register masks */
-#define PLL_DIV_N_SHIFT 0
-#define PLL_DIV_N_MASK 0xFF
-#define PLL_DIV_P_SHIFT 8
-#define PLL_DIV_P_MASK 0x7
-#define PLL_NORM_FDBK_M_SHIFT 24
-#define PLL_NORM_FDBK_M_MASK 0xFF
-#define PLL_DITH_FDBK_M_SHIFT 16
-#define PLL_DITH_FDBK_M_MASK 0xFFFF
-
-#define PLL2_MOD (MISC_BASE + 0x01C)
-#define PLL_CLK_CFG (MISC_BASE + 0x020)
-#define CORE_CLK_CFG (MISC_BASE + 0x024)
-/* CORE CLK CFG register masks */
-#define PLL_HCLK_RATIO_SHIFT 10
-#define PLL_HCLK_RATIO_MASK 0x3
-#define HCLK_PCLK_RATIO_SHIFT 8
-#define HCLK_PCLK_RATIO_MASK 0x3
-
-#define PERIP_CLK_CFG (MISC_BASE + 0x028)
-/* PERIP_CLK_CFG register masks */
-#define CLCD_CLK_SHIFT 2
-#define CLCD_CLK_MASK 0x3
-#define UART_CLK_SHIFT 4
-#define UART_CLK_MASK 0x1
-#define FIRDA_CLK_SHIFT 5
-#define FIRDA_CLK_MASK 0x3
-#define GPT0_CLK_SHIFT 8
-#define GPT1_CLK_SHIFT 10
-#define GPT2_CLK_SHIFT 11
-#define GPT3_CLK_SHIFT 12
-#define GPT_CLK_MASK 0x1
-#define AUX_CLK_PLL3_VAL 0
-#define AUX_CLK_PLL1_VAL 1
-
-#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
-/* PERIP1_CLK_ENB register masks */
-#define UART0_CLK_ENB 3
-#define UART1_CLK_ENB 4
-#define SSP0_CLK_ENB 5
-#define SSP1_CLK_ENB 6
-#define I2C_CLK_ENB 7
-#define JPEG_CLK_ENB 8
-#define FSMC_CLK_ENB 9
-#define FIRDA_CLK_ENB 10
-#define GPT2_CLK_ENB 11
-#define GPT3_CLK_ENB 12
-#define GPIO2_CLK_ENB 13
-#define SSP2_CLK_ENB 14
-#define ADC_CLK_ENB 15
-#define GPT1_CLK_ENB 11
-#define RTC_CLK_ENB 17
-#define GPIO1_CLK_ENB 18
-#define DMA_CLK_ENB 19
-#define SMI_CLK_ENB 21
-#define CLCD_CLK_ENB 22
-#define GMAC_CLK_ENB 23
-#define USBD_CLK_ENB 24
-#define USBH0_CLK_ENB 25
-#define USBH1_CLK_ENB 26
-
-#define PRSC1_CLK_CFG (MISC_BASE + 0x044)
-#define PRSC2_CLK_CFG (MISC_BASE + 0x048)
-#define PRSC3_CLK_CFG (MISC_BASE + 0x04C)
-/* gpt synthesizer register masks */
-#define GPT_MSCALE_SHIFT 0
-#define GPT_MSCALE_MASK 0xFFF
-#define GPT_NSCALE_SHIFT 12
-#define GPT_NSCALE_MASK 0xF
-
-#define AMEM_CLK_CFG (MISC_BASE + 0x050)
-#define EXPI_CLK_CFG (MISC_BASE + 0x054)
-#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
-#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
-#define UART_CLK_SYNT (MISC_BASE + 0x064)
-#define GMAC_CLK_SYNT (MISC_BASE + 0x068)
-#define RAS1_CLK_SYNT (MISC_BASE + 0x06C)
-#define RAS2_CLK_SYNT (MISC_BASE + 0x070)
-#define RAS3_CLK_SYNT (MISC_BASE + 0x074)
-#define RAS4_CLK_SYNT (MISC_BASE + 0x078)
-/* aux clk synthesiser register masks for irda to ras4 */
-#define AUX_SYNT_ENB 31
-#define AUX_EQ_SEL_SHIFT 30
-#define AUX_EQ_SEL_MASK 1
-#define AUX_EQ1_SEL 0
-#define AUX_EQ2_SEL 1
-#define AUX_XSCALE_SHIFT 16
-#define AUX_XSCALE_MASK 0xFFF
-#define AUX_YSCALE_SHIFT 0
-#define AUX_YSCALE_MASK 0xFFF
-
-/* root clks */
-/* 32 KHz oscillator clock */
-static struct clk osc_32k_clk = {
- .flags = ALWAYS_ENABLED,
- .rate = 32000,
-};
-
-/* 30 MHz oscillator clock */
-static struct clk osc_30m_clk = {
- .flags = ALWAYS_ENABLED,
- .rate = 30000000,
-};
-
-/* clock derived from 32 KHz osc clk */
-/* rtc clock */
-static struct clk rtc_clk = {
- .pclk = &osc_32k_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = RTC_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* clock derived from 30 MHz osc clk */
-/* pll masks structure */
-static struct pll_clk_masks pll1_masks = {
- .mode_mask = PLL_MODE_MASK,
- .mode_shift = PLL_MODE_SHIFT,
- .norm_fdbk_m_mask = PLL_NORM_FDBK_M_MASK,
- .norm_fdbk_m_shift = PLL_NORM_FDBK_M_SHIFT,
- .dith_fdbk_m_mask = PLL_DITH_FDBK_M_MASK,
- .dith_fdbk_m_shift = PLL_DITH_FDBK_M_SHIFT,
- .div_p_mask = PLL_DIV_P_MASK,
- .div_p_shift = PLL_DIV_P_SHIFT,
- .div_n_mask = PLL_DIV_N_MASK,
- .div_n_shift = PLL_DIV_N_SHIFT,
-};
-
-/* pll1 configuration structure */
-static struct pll_clk_config pll1_config = {
- .mode_reg = PLL1_CTR,
- .cfg_reg = PLL1_FRQ,
- .masks = &pll1_masks,
-};
-
-/* pll rate configuration table, in ascending order of rates */
-struct pll_rate_tbl pll_rtbl[] = {
- {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* 266 MHz */
- {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* 332 MHz */
-};
-
-/* PLL1 clock */
-static struct clk pll1_clk = {
- .flags = ENABLED_ON_INIT,
- .pclk = &osc_30m_clk,
- .en_reg = PLL1_CTR,
- .en_reg_bit = PLL_ENABLE,
- .calc_rate = &pll_calc_rate,
- .recalc = &pll_clk_recalc,
- .set_rate = &pll_clk_set_rate,
- .rate_config = {pll_rtbl, ARRAY_SIZE(pll_rtbl), 1},
- .private_data = &pll1_config,
-};
-
-/* PLL3 48 MHz clock */
-static struct clk pll3_48m_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &osc_30m_clk,
- .rate = 48000000,
-};
-
-/* watch dog timer clock */
-static struct clk wdt_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &osc_30m_clk,
- .recalc = &follow_parent,
-};
-
-/* clock derived from pll1 clk */
-/* cpu clock */
-static struct clk cpu_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .recalc = &follow_parent,
-};
-
-/* ahb masks structure */
-static struct bus_clk_masks ahb_masks = {
- .mask = PLL_HCLK_RATIO_MASK,
- .shift = PLL_HCLK_RATIO_SHIFT,
-};
-
-/* ahb configuration structure */
-static struct bus_clk_config ahb_config = {
- .reg = CORE_CLK_CFG,
- .masks = &ahb_masks,
-};
-
-/* ahb rate configuration table, in ascending order of rates */
-struct bus_rate_tbl bus_rtbl[] = {
- {.div = 3}, /* == parent divided by 4 */
- {.div = 2}, /* == parent divided by 3 */
- {.div = 1}, /* == parent divided by 2 */
- {.div = 0}, /* == parent divided by 1 */
-};
-
-/* ahb clock */
-static struct clk ahb_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &bus_calc_rate,
- .recalc = &bus_clk_recalc,
- .set_rate = &bus_clk_set_rate,
- .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
- .private_data = &ahb_config,
-};
-
-/* auxiliary synthesizers masks */
-static struct aux_clk_masks aux_masks = {
- .eq_sel_mask = AUX_EQ_SEL_MASK,
- .eq_sel_shift = AUX_EQ_SEL_SHIFT,
- .eq1_mask = AUX_EQ1_SEL,
- .eq2_mask = AUX_EQ2_SEL,
- .xscale_sel_mask = AUX_XSCALE_MASK,
- .xscale_sel_shift = AUX_XSCALE_SHIFT,
- .yscale_sel_mask = AUX_YSCALE_MASK,
- .yscale_sel_shift = AUX_YSCALE_SHIFT,
-};
-
-/* uart configurations */
-static struct aux_clk_config uart_synth_config = {
- .synth_reg = UART_CLK_SYNT,
- .masks = &aux_masks,
-};
-
-/* aux rate configuration table, in ascending order of rates */
-struct aux_rate_tbl aux_rtbl[] = {
- /* For PLL1 = 332 MHz */
- {.xscale = 1, .yscale = 8, .eq = 1}, /* 41.5 MHz */
- {.xscale = 1, .yscale = 4, .eq = 1}, /* 83 MHz */
- {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
-};
-
-/* uart synth clock */
-static struct clk uart_synth_clk = {
- .en_reg = UART_CLK_SYNT,
- .en_reg_bit = AUX_SYNT_ENB,
- .pclk = &pll1_clk,
- .calc_rate = &aux_calc_rate,
- .recalc = &aux_clk_recalc,
- .set_rate = &aux_clk_set_rate,
- .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
- .private_data = &uart_synth_config,
-};
-
-/* uart parents */
-static struct pclk_info uart_pclk_info[] = {
- {
- .pclk = &uart_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* uart parent select structure */
-static struct pclk_sel uart_pclk_sel = {
- .pclk_info = uart_pclk_info,
- .pclk_count = ARRAY_SIZE(uart_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = UART_CLK_MASK,
-};
-
-/* uart0 clock */
-static struct clk uart0_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = UART0_CLK_ENB,
- .pclk_sel = &uart_pclk_sel,
- .pclk_sel_shift = UART_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* uart1 clock */
-static struct clk uart1_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = UART1_CLK_ENB,
- .pclk_sel = &uart_pclk_sel,
- .pclk_sel_shift = UART_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* firda configurations */
-static struct aux_clk_config firda_synth_config = {
- .synth_reg = FIRDA_CLK_SYNT,
- .masks = &aux_masks,
-};
-
-/* firda synth clock */
-static struct clk firda_synth_clk = {
- .en_reg = FIRDA_CLK_SYNT,
- .en_reg_bit = AUX_SYNT_ENB,
- .pclk = &pll1_clk,
- .calc_rate = &aux_calc_rate,
- .recalc = &aux_clk_recalc,
- .set_rate = &aux_clk_set_rate,
- .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
- .private_data = &firda_synth_config,
-};
-
-/* firda parents */
-static struct pclk_info firda_pclk_info[] = {
- {
- .pclk = &firda_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* firda parent select structure */
-static struct pclk_sel firda_pclk_sel = {
- .pclk_info = firda_pclk_info,
- .pclk_count = ARRAY_SIZE(firda_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = FIRDA_CLK_MASK,
-};
-
-/* firda clock */
-static struct clk firda_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = FIRDA_CLK_ENB,
- .pclk_sel = &firda_pclk_sel,
- .pclk_sel_shift = FIRDA_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* clcd configurations */
-static struct aux_clk_config clcd_synth_config = {
- .synth_reg = CLCD_CLK_SYNT,
- .masks = &aux_masks,
-};
-
-/* firda synth clock */
-static struct clk clcd_synth_clk = {
- .en_reg = CLCD_CLK_SYNT,
- .en_reg_bit = AUX_SYNT_ENB,
- .pclk = &pll1_clk,
- .calc_rate = &aux_calc_rate,
- .recalc = &aux_clk_recalc,
- .set_rate = &aux_clk_set_rate,
- .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
- .private_data = &clcd_synth_config,
-};
-
-/* clcd parents */
-static struct pclk_info clcd_pclk_info[] = {
- {
- .pclk = &clcd_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* clcd parent select structure */
-static struct pclk_sel clcd_pclk_sel = {
- .pclk_info = clcd_pclk_info,
- .pclk_count = ARRAY_SIZE(clcd_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = CLCD_CLK_MASK,
-};
-
-/* clcd clock */
-static struct clk clcd_clk = {
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = CLCD_CLK_ENB,
- .pclk_sel = &clcd_pclk_sel,
- .pclk_sel_shift = CLCD_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* gpt synthesizer masks */
-static struct gpt_clk_masks gpt_masks = {
- .mscale_sel_mask = GPT_MSCALE_MASK,
- .mscale_sel_shift = GPT_MSCALE_SHIFT,
- .nscale_sel_mask = GPT_NSCALE_MASK,
- .nscale_sel_shift = GPT_NSCALE_SHIFT,
-};
-
-/* gpt rate configuration table, in ascending order of rates */
-struct gpt_rate_tbl gpt_rtbl[] = {
- /* For pll1 = 332 MHz */
- {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
- {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
- {.mscale = 1, .nscale = 0}, /* 83 MHz */
-};
-
-/* gpt0 synth clk config*/
-static struct gpt_clk_config gpt0_synth_config = {
- .synth_reg = PRSC1_CLK_CFG,
- .masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt0_synth_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &gpt_calc_rate,
- .recalc = &gpt_clk_recalc,
- .set_rate = &gpt_clk_set_rate,
- .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
- .private_data = &gpt0_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt0_pclk_info[] = {
- {
- .pclk = &gpt0_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt0_pclk_sel = {
- .pclk_info = gpt0_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt0 ARM1 subsystem timer clock */
-static struct clk gpt0_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk_sel = &gpt0_pclk_sel,
- .pclk_sel_shift = GPT0_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-
-/* Note: gpt0 and gpt1 share same parent clocks */
-/* gpt parent select structure */
-static struct pclk_sel gpt1_pclk_sel = {
- .pclk_info = gpt0_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt1 timer clock */
-static struct clk gpt1_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk_sel = &gpt1_pclk_sel,
- .pclk_sel_shift = GPT1_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* gpt2 synth clk config*/
-static struct gpt_clk_config gpt2_synth_config = {
- .synth_reg = PRSC2_CLK_CFG,
- .masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt2_synth_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &gpt_calc_rate,
- .recalc = &gpt_clk_recalc,
- .set_rate = &gpt_clk_set_rate,
- .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
- .private_data = &gpt2_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt2_pclk_info[] = {
- {
- .pclk = &gpt2_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt2_pclk_sel = {
- .pclk_info = gpt2_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt2_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt2 timer clock */
-static struct clk gpt2_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk_sel = &gpt2_pclk_sel,
- .pclk_sel_shift = GPT2_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* gpt3 synth clk config*/
-static struct gpt_clk_config gpt3_synth_config = {
- .synth_reg = PRSC3_CLK_CFG,
- .masks = &gpt_masks,
-};
-
-/* gpt synth clock */
-static struct clk gpt3_synth_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &pll1_clk,
- .calc_rate = &gpt_calc_rate,
- .recalc = &gpt_clk_recalc,
- .set_rate = &gpt_clk_set_rate,
- .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
- .private_data = &gpt3_synth_config,
-};
-
-/* gpt parents */
-static struct pclk_info gpt3_pclk_info[] = {
- {
- .pclk = &gpt3_synth_clk,
- .pclk_val = AUX_CLK_PLL1_VAL,
- }, {
- .pclk = &pll3_48m_clk,
- .pclk_val = AUX_CLK_PLL3_VAL,
- },
-};
-
-/* gpt parent select structure */
-static struct pclk_sel gpt3_pclk_sel = {
- .pclk_info = gpt3_pclk_info,
- .pclk_count = ARRAY_SIZE(gpt3_pclk_info),
- .pclk_sel_reg = PERIP_CLK_CFG,
- .pclk_sel_mask = GPT_CLK_MASK,
-};
-
-/* gpt3 timer clock */
-static struct clk gpt3_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk_sel = &gpt3_pclk_sel,
- .pclk_sel_shift = GPT3_CLK_SHIFT,
- .recalc = &follow_parent,
-};
-
-/* clock derived from pll3 clk */
-/* usbh0 clock */
-static struct clk usbh0_clk = {
- .pclk = &pll3_48m_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = USBH0_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* usbh1 clock */
-static struct clk usbh1_clk = {
- .pclk = &pll3_48m_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = USBH1_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* usbd clock */
-static struct clk usbd_clk = {
- .pclk = &pll3_48m_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = USBD_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* clock derived from ahb clk */
-/* apb masks structure */
-static struct bus_clk_masks apb_masks = {
- .mask = HCLK_PCLK_RATIO_MASK,
- .shift = HCLK_PCLK_RATIO_SHIFT,
-};
-
-/* apb configuration structure */
-static struct bus_clk_config apb_config = {
- .reg = CORE_CLK_CFG,
- .masks = &apb_masks,
-};
-
-/* apb clock */
-static struct clk apb_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &ahb_clk,
- .calc_rate = &bus_calc_rate,
- .recalc = &bus_clk_recalc,
- .set_rate = &bus_clk_set_rate,
- .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
- .private_data = &apb_config,
-};
-
-/* i2c clock */
-static struct clk i2c_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = I2C_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* dma clock */
-static struct clk dma_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = DMA_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* jpeg clock */
-static struct clk jpeg_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = JPEG_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* gmac clock */
-static struct clk gmac_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GMAC_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* smi clock */
-static struct clk smi_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = SMI_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* fsmc clock */
-static struct clk fsmc_clk = {
- .pclk = &ahb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = FSMC_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* clock derived from apb clk */
-/* adc clock */
-static struct clk adc_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = ADC_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* ssp0 clock */
-static struct clk ssp0_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = SSP0_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* ssp1 clock */
-static struct clk ssp1_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = SSP1_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* ssp2 clock */
-static struct clk ssp2_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = SSP2_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* gpio0 ARM subsystem clock */
-static struct clk gpio0_clk = {
- .flags = ALWAYS_ENABLED,
- .pclk = &apb_clk,
- .recalc = &follow_parent,
-};
-
-/* gpio1 clock */
-static struct clk gpio1_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GPIO1_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-/* gpio2 clock */
-static struct clk gpio2_clk = {
- .pclk = &apb_clk,
- .en_reg = PERIP1_CLK_ENB,
- .en_reg_bit = GPIO2_CLK_ENB,
- .recalc = &follow_parent,
-};
-
-static struct clk dummy_apb_pclk;
-
-/* array of all spear 6xx clock lookups */
-static struct clk_lookup spear_clk_lookups[] = {
- CLKDEV_INIT(NULL, "apb_pclk", &dummy_apb_pclk),
- /* root clks */
- CLKDEV_INIT(NULL, "osc_32k_clk", &osc_32k_clk),
- CLKDEV_INIT(NULL, "osc_30m_clk", &osc_30m_clk),
- /* clock derived from 32 KHz os clk */
- CLKDEV_INIT("rtc-spear", NULL, &rtc_clk),
- /* clock derived from 30 MHz os clk */
- CLKDEV_INIT(NULL, "pll1_clk", &pll1_clk),
- CLKDEV_INIT(NULL, "pll3_48m_clk", &pll3_48m_clk),
- CLKDEV_INIT("wdt", NULL, &wdt_clk),
- /* clock derived from pll1 clk */
- CLKDEV_INIT(NULL, "cpu_clk", &cpu_clk),
- CLKDEV_INIT(NULL, "ahb_clk", &ahb_clk),
- CLKDEV_INIT(NULL, "uart_synth_clk", &uart_synth_clk),
- CLKDEV_INIT(NULL, "firda_synth_clk", &firda_synth_clk),
- CLKDEV_INIT(NULL, "clcd_synth_clk", &clcd_synth_clk),
- CLKDEV_INIT(NULL, "gpt0_synth_clk", &gpt0_synth_clk),
- CLKDEV_INIT(NULL, "gpt2_synth_clk", &gpt2_synth_clk),
- CLKDEV_INIT(NULL, "gpt3_synth_clk", &gpt3_synth_clk),
- CLKDEV_INIT("d0000000.serial", NULL, &uart0_clk),
- CLKDEV_INIT("d0080000.serial", NULL, &uart1_clk),
- CLKDEV_INIT("firda", NULL, &firda_clk),
- CLKDEV_INIT("clcd", NULL, &clcd_clk),
- CLKDEV_INIT("gpt0", NULL, &gpt0_clk),
- CLKDEV_INIT("gpt1", NULL, &gpt1_clk),
- CLKDEV_INIT("gpt2", NULL, &gpt2_clk),
- CLKDEV_INIT("gpt3", NULL, &gpt3_clk),
- /* clock derived from pll3 clk */
- CLKDEV_INIT("designware_udc", NULL, &usbd_clk),
- CLKDEV_INIT(NULL, "usbh.0_clk", &usbh0_clk),
- CLKDEV_INIT(NULL, "usbh.1_clk", &usbh1_clk),
- /* clock derived from ahb clk */
- CLKDEV_INIT(NULL, "apb_clk", &apb_clk),
- CLKDEV_INIT("d0200000.i2c", NULL, &i2c_clk),
- CLKDEV_INIT("fc400000.dma", NULL, &dma_clk),
- CLKDEV_INIT("jpeg", NULL, &jpeg_clk),
- CLKDEV_INIT("gmac", NULL, &gmac_clk),
- CLKDEV_INIT("fc000000.flash", NULL, &smi_clk),
- CLKDEV_INIT("d1800000.flash", NULL, &fsmc_clk),
- /* clock derived from apb clk */
- CLKDEV_INIT("adc", NULL, &adc_clk),
- CLKDEV_INIT("ssp-pl022.0", NULL, &ssp0_clk),
- CLKDEV_INIT("ssp-pl022.1", NULL, &ssp1_clk),
- CLKDEV_INIT("ssp-pl022.2", NULL, &ssp2_clk),
- CLKDEV_INIT("f0100000.gpio", NULL, &gpio0_clk),
- CLKDEV_INIT("fc980000.gpio", NULL, &gpio1_clk),
- CLKDEV_INIT("d8100000.gpio", NULL, &gpio2_clk),
-};
-
-void __init spear6xx_clk_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
- clk_register(&spear_clk_lookups[i]);
-
- clk_init();
-}
diff --git a/arch/arm/mach-spear6xx/include/mach/generic.h b/arch/arm/mach-spear6xx/include/mach/generic.h
index 7167fd331d8..65514b15937 100644
--- a/arch/arm/mach-spear6xx/include/mach/generic.h
+++ b/arch/arm/mach-spear6xx/include/mach/generic.h
@@ -16,7 +16,7 @@
#include <linux/init.h>
-void __init spear_setup_timer(resource_size_t base, int irq);
+void __init spear_setup_of_timer(void);
void spear_restart(char, const char *);
void __init spear6xx_clk_init(void);
diff --git a/arch/arm/mach-spear6xx/include/mach/irqs.h b/arch/arm/mach-spear6xx/include/mach/irqs.h
index 2b735389e74..37a5c411a86 100644
--- a/arch/arm/mach-spear6xx/include/mach/irqs.h
+++ b/arch/arm/mach-spear6xx/include/mach/irqs.h
@@ -16,9 +16,6 @@
/* IRQ definitions */
/* VIC 1 */
-/* FIXME: probe this from DT */
-#define IRQ_CPU_GPT1_1 16
-
#define IRQ_VIC_END 64
/* GPIO pins virtual irqs */
diff --git a/arch/arm/mach-spear6xx/include/mach/misc_regs.h b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
index 2b9aaa6cdd1..179e45774b3 100644
--- a/arch/arm/mach-spear6xx/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
@@ -14,6 +14,8 @@
#ifndef __MACH_MISC_REGS_H
#define __MACH_MISC_REGS_H
+#include <mach/spear.h>
+
#define MISC_BASE IOMEM(VA_SPEAR6XX_ICM3_MISC_REG_BASE)
#define DMA_CHN_CFG (MISC_BASE + 0x0A0)
diff --git a/arch/arm/mach-spear6xx/include/mach/spear.h b/arch/arm/mach-spear6xx/include/mach/spear.h
index d278ed047a5..cb8ed2f4dc8 100644
--- a/arch/arm/mach-spear6xx/include/mach/spear.h
+++ b/arch/arm/mach-spear6xx/include/mach/spear.h
@@ -25,7 +25,6 @@
/* ML-1, 2 - Multi Layer CPU Subsystem */
#define SPEAR6XX_ML_CPU_BASE UL(0xF0000000)
#define VA_SPEAR6XX_ML_CPU_BASE UL(0xF0000000)
-#define SPEAR6XX_CPU_TMR_BASE UL(0xF0000000)
/* ICM3 - Basic Subsystem */
#define SPEAR6XX_ICM3_SMI_CTRL_BASE UL(0xFC000000)
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
index de194dbb837..2e2e3596583 100644
--- a/arch/arm/mach-spear6xx/spear6xx.c
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -419,9 +419,6 @@ struct map_desc spear6xx_io_desc[] __initdata = {
void __init spear6xx_map_io(void)
{
iotable_init(spear6xx_io_desc, ARRAY_SIZE(spear6xx_io_desc));
-
- /* This will initialize clock framework */
- spear6xx_clk_init();
}
static void __init spear6xx_timer_init(void)
@@ -429,6 +426,8 @@ static void __init spear6xx_timer_init(void)
char pclk_name[] = "pll3_48m_clk";
struct clk *gpt_clk, *pclk;
+ spear6xx_clk_init();
+
/* get the system timer clock */
gpt_clk = clk_get_sys("gpt0", NULL);
if (IS_ERR(gpt_clk)) {
@@ -448,7 +447,7 @@ static void __init spear6xx_timer_init(void)
clk_put(gpt_clk);
clk_put(pclk);
- spear_setup_timer(SPEAR6XX_CPU_TMR_BASE, IRQ_CPU_GPT1_1);
+ spear_setup_of_timer();
}
struct sys_timer spear6xx_timer = {
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index d0f2546706c..6a113a9bb87 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -50,6 +50,14 @@ config TEGRA_PCI
depends on ARCH_TEGRA_2x_SOC
select PCI
+config TEGRA_AHB
+ bool "Enable AHB driver for NVIDIA Tegra SoCs"
+ default y
+ help
+ Adds AHB configuration functionality for NVIDIA Tegra SoCs,
+ which controls AHB bus master arbitration and some
+ perfomance parameters(priority, prefech size).
+
comment "Tegra board type"
config MACH_HARMONY
@@ -111,7 +119,7 @@ config MACH_VENTANA
Support for the nVidia Ventana development platform
choice
- prompt "Low-level debug console UART"
+ prompt "Default low-level debug console UART"
default TEGRA_DEBUG_UART_NONE
config TEGRA_DEBUG_UART_NONE
@@ -134,6 +142,33 @@ config TEGRA_DEBUG_UARTE
endchoice
+choice
+ prompt "Automatic low-level debug console UART"
+ default TEGRA_DEBUG_UART_AUTO_NONE
+
+config TEGRA_DEBUG_UART_AUTO_NONE
+ bool "None"
+
+config TEGRA_DEBUG_UART_AUTO_ODMDATA
+ bool "Via ODMDATA"
+ help
+ Automatically determines which UART to use for low-level debug based
+ on the ODMDATA value. This value is part of the BCT, and is written
+ to the boot memory device using nvflash, or other flashing tool.
+ When bits 19:18 are 3, then bits 17:15 indicate which UART to use;
+ 0/1/2/3/4 are UART A/B/C/D/E.
+
+config TEGRA_DEBUG_UART_AUTO_SCRATCH
+ bool "Via UART scratch register"
+ help
+ Automatically determines which UART to use for low-level debug based
+ on the UART scratch register value. Some bootloaders put ASCII 'D'
+ in this register when they initialize their own console UART output.
+ Using this option allows the kernel to automatically pick the same
+ UART.
+
+endchoice
+
config TEGRA_SYSTEM_DMA
bool "Enable system DMA driver for NVIDIA Tegra SoCs"
default y
diff --git a/arch/arm/mach-tegra/board-dt-tegra20.c b/arch/arm/mach-tegra/board-dt-tegra20.c
index fac3eb1af17..eb7249db50a 100644
--- a/arch/arm/mach-tegra/board-dt-tegra20.c
+++ b/arch/arm/mach-tegra/board-dt-tegra20.c
@@ -110,6 +110,7 @@ DT_MACHINE_START(TEGRA_DT, "nVidia Tegra20 (Flattened Device Tree)")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_dt_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
.dt_compat = tegra20_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-dt-tegra30.c b/arch/arm/mach-tegra/board-dt-tegra30.c
index 5f7c03e972f..4f76fa7a5da 100644
--- a/arch/arm/mach-tegra/board-dt-tegra30.c
+++ b/arch/arm/mach-tegra/board-dt-tegra30.c
@@ -51,12 +51,22 @@ struct of_dev_auxdata tegra30_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000C500, "tegra-i2c.2", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000C700, "tegra-i2c.3", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000D000, "tegra-i2c.4", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra30-ahub", 0x70080000, "tegra30-ahub", NULL),
{}
};
static __initdata struct tegra_clk_init_table tegra_dt_clk_init_table[] = {
/* name parent rate enabled */
{ "uarta", "pll_p", 408000000, true },
+ { "pll_a", "pll_p_out1", 564480000, true },
+ { "pll_a_out0", "pll_a", 11289600, true },
+ { "extern1", "pll_a_out0", 0, true },
+ { "clk_out_1", "extern1", 0, true },
+ { "i2s0", "pll_a_out0", 11289600, false},
+ { "i2s1", "pll_a_out0", 11289600, false},
+ { "i2s2", "pll_a_out0", 11289600, false},
+ { "i2s3", "pll_a_out0", 11289600, false},
+ { "i2s4", "pll_a_out0", 11289600, false},
{ NULL, NULL, 0, 0},
};
@@ -80,6 +90,7 @@ DT_MACHINE_START(TEGRA30_DT, "NVIDIA Tegra30 (Flattened Device Tree)")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra30_dt_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
.dt_compat = tegra30_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c
index b906b3b6077..e65e837f401 100644
--- a/arch/arm/mach-tegra/board-harmony.c
+++ b/arch/arm/mach-tegra/board-harmony.c
@@ -192,5 +192,6 @@ MACHINE_START(HARMONY, "harmony")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_harmony_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index d0735c70d68..bbc1907e98a 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -162,6 +162,8 @@ static void paz00_i2c_init(void)
static void paz00_usb_init(void)
{
+ tegra_ehci2_ulpi_phy_config.reset_gpio = TEGRA_ULPI_RST;
+
platform_device_register(&tegra_ehci2_device);
platform_device_register(&tegra_ehci3_device);
}
@@ -179,7 +181,6 @@ static __initdata struct tegra_clk_init_table paz00_clk_init_table[] = {
{ "uarta", "pll_p", 216000000, true },
{ "uartc", "pll_p", 216000000, true },
- { "pll_p_out4", "pll_p", 24000000, true },
{ "usbd", "clk_m", 12000000, false },
{ "usb2", "clk_m", 12000000, false },
{ "usb3", "clk_m", 12000000, false },
@@ -224,5 +225,6 @@ MACHINE_START(PAZ00, "Toshiba AC100 / Dynabook AZ")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_paz00_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-seaboard.c b/arch/arm/mach-tegra/board-seaboard.c
index 79064c7a790..71e9f3fc7fb 100644
--- a/arch/arm/mach-tegra/board-seaboard.c
+++ b/arch/arm/mach-tegra/board-seaboard.c
@@ -277,6 +277,7 @@ MACHINE_START(SEABOARD, "seaboard")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_seaboard_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
MACHINE_END
@@ -288,6 +289,7 @@ MACHINE_START(KAEN, "kaen")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_kaen_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
MACHINE_END
@@ -299,5 +301,6 @@ MACHINE_START(WARIO, "wario")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_wario_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-trimslice.c b/arch/arm/mach-tegra/board-trimslice.c
index bc59b379c6f..776aa9564d5 100644
--- a/arch/arm/mach-tegra/board-trimslice.c
+++ b/arch/arm/mach-tegra/board-trimslice.c
@@ -118,6 +118,8 @@ static void trimslice_usb_init(void)
pdata = tegra_ehci1_device.dev.platform_data;
pdata->vbus_gpio = TRIMSLICE_GPIO_USB1_MODE;
+ tegra_ehci2_ulpi_phy_config.reset_gpio = TEGRA_GPIO_PV0;
+
platform_device_register(&tegra_ehci3_device);
platform_device_register(&tegra_ehci2_device);
platform_device_register(&tegra_ehci1_device);
@@ -176,5 +178,6 @@ MACHINE_START(TRIMSLICE, "trimslice")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_trimslice_init,
+ .init_late = tegra_init_late,
.restart = tegra_assert_system_reset,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h
index 75d1543d77c..65014968fc6 100644
--- a/arch/arm/mach-tegra/board.h
+++ b/arch/arm/mach-tegra/board.h
@@ -32,5 +32,19 @@ void __init tegra_init_irq(void);
void __init tegra_dt_init_irq(void);
int __init tegra_pcie_init(bool init_port0, bool init_port1);
+void tegra_init_late(void);
+
+#ifdef CONFIG_DEBUG_FS
+int tegra_clk_debugfs_init(void);
+#else
+static inline int tegra_clk_debugfs_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_2x_SOC) && defined(CONFIG_DEBUG_FS)
+int __init tegra_powergate_debugfs_init(void);
+#else
+static inline int tegra_powergate_debugfs_init(void) { return 0; }
+#endif
+
extern struct sys_timer tegra_timer;
#endif
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c
index 8dad8d18cb4..58f981c0819 100644
--- a/arch/arm/mach-tegra/clock.c
+++ b/arch/arm/mach-tegra/clock.c
@@ -642,7 +642,7 @@ static int clk_debugfs_register(struct clk *c)
return 0;
}
-static int __init clk_debugfs_init(void)
+int __init tegra_clk_debugfs_init(void)
{
struct clk *c;
struct dentry *d;
@@ -669,5 +669,4 @@ err_out:
return err;
}
-late_initcall(clk_debugfs_init);
#endif
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 22df10fb997..204a5c8b0b5 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -82,10 +82,12 @@ static __initdata struct tegra_clk_init_table tegra20_clk_init_table[] = {
{ "pll_p_out1", "pll_p", 28800000, true },
{ "pll_p_out2", "pll_p", 48000000, true },
{ "pll_p_out3", "pll_p", 72000000, true },
- { "pll_p_out4", "pll_p", 108000000, true },
- { "sclk", "pll_p_out4", 108000000, true },
- { "hclk", "sclk", 108000000, true },
- { "pclk", "hclk", 54000000, true },
+ { "pll_p_out4", "pll_p", 24000000, true },
+ { "pll_c", "clk_m", 600000000, true },
+ { "pll_c_out1", "pll_c", 120000000, true },
+ { "sclk", "pll_c_out1", 120000000, true },
+ { "hclk", "sclk", 120000000, true },
+ { "pclk", "hclk", 60000000, true },
{ "csite", NULL, 0, true },
{ "emc", NULL, 0, true },
{ "cpu", NULL, 0, true },
@@ -93,6 +95,17 @@ static __initdata struct tegra_clk_init_table tegra20_clk_init_table[] = {
};
#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+static __initdata struct tegra_clk_init_table tegra30_clk_init_table[] = {
+ /* name parent rate enabled */
+ { "clk_m", NULL, 0, true },
+ { "pll_p", "clk_m", 408000000, true },
+ { "pll_p_out1", "pll_p", 9600000, true },
+ { NULL, NULL, 0, 0},
+};
+#endif
+
+
static void __init tegra_init_cache(u32 tag_latency, u32 data_latency)
{
#ifdef CONFIG_CACHE_L2X0
@@ -127,8 +140,15 @@ void __init tegra30_init_early(void)
{
tegra_init_fuse();
tegra30_init_clocks();
+ tegra_clk_init_from_table(tegra30_clk_init_table);
tegra_init_cache(0x441, 0x551);
tegra_pmc_init();
tegra_powergate_init();
}
#endif
+
+void __init tegra_init_late(void)
+{
+ tegra_clk_debugfs_init();
+ tegra_powergate_debugfs_init();
+}
diff --git a/arch/arm/mach-tegra/devices.c b/arch/arm/mach-tegra/devices.c
index 2d8dfa2faf8..c70e65ffa36 100644
--- a/arch/arm/mach-tegra/devices.c
+++ b/arch/arm/mach-tegra/devices.c
@@ -439,9 +439,8 @@ static struct resource tegra_usb3_resources[] = {
},
};
-static struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = {
- /* All existing boards use GPIO PV0 for phy reset */
- .reset_gpio = TEGRA_GPIO_PV0,
+struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = {
+ .reset_gpio = -1,
.clk = "cdev2",
};
diff --git a/arch/arm/mach-tegra/devices.h b/arch/arm/mach-tegra/devices.h
index 138c642e59f..4f505272649 100644
--- a/arch/arm/mach-tegra/devices.h
+++ b/arch/arm/mach-tegra/devices.h
@@ -22,6 +22,10 @@
#include <linux/platform_device.h>
#include <linux/platform_data/tegra_usb.h>
+#include <mach/usb_phy.h>
+
+extern struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config;
+
extern struct tegra_ehci_platform_data tegra_ehci1_pdata;
extern struct tegra_ehci_platform_data tegra_ehci2_pdata;
extern struct tegra_ehci_platform_data tegra_ehci3_pdata;
diff --git a/arch/arm/mach-tegra/include/mach/tegra-ahb.h b/arch/arm/mach-tegra/include/mach/tegra-ahb.h
new file mode 100644
index 00000000000..e0f8c84b1d8
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tegra-ahb.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MACH_TEGRA_AHB_H__
+#define __MACH_TEGRA_AHB_H__
+
+extern int tegra_ahb_enable_smmu(struct device_node *ahb);
+
+#endif /* __MACH_TEGRA_AHB_H__ */
diff --git a/arch/arm/mach-tegra/include/mach/uncompress.h b/arch/arm/mach-tegra/include/mach/uncompress.h
index 5a440f315e5..937c4c50219 100644
--- a/arch/arm/mach-tegra/include/mach/uncompress.h
+++ b/arch/arm/mach-tegra/include/mach/uncompress.h
@@ -63,52 +63,86 @@ static inline void save_uart_address(void)
buf[0] = 0;
}
-/*
- * Setup before decompression. This is where we do UART selection for
- * earlyprintk and init the uart_base register.
- */
-static inline void arch_decomp_setup(void)
+static const struct {
+ u32 base;
+ u32 reset_reg;
+ u32 clock_reg;
+ u32 bit;
+} uarts[] = {
+ {
+ TEGRA_UARTA_BASE,
+ TEGRA_CLK_RESET_BASE + 0x04,
+ TEGRA_CLK_RESET_BASE + 0x10,
+ 6,
+ },
+ {
+ TEGRA_UARTB_BASE,
+ TEGRA_CLK_RESET_BASE + 0x04,
+ TEGRA_CLK_RESET_BASE + 0x10,
+ 7,
+ },
+ {
+ TEGRA_UARTC_BASE,
+ TEGRA_CLK_RESET_BASE + 0x08,
+ TEGRA_CLK_RESET_BASE + 0x14,
+ 23,
+ },
+ {
+ TEGRA_UARTD_BASE,
+ TEGRA_CLK_RESET_BASE + 0x0c,
+ TEGRA_CLK_RESET_BASE + 0x18,
+ 1,
+ },
+ {
+ TEGRA_UARTE_BASE,
+ TEGRA_CLK_RESET_BASE + 0x0c,
+ TEGRA_CLK_RESET_BASE + 0x18,
+ 2,
+ },
+};
+
+static inline bool uart_clocked(int i)
+{
+ if (*(u8 *)uarts[i].reset_reg & BIT(uarts[i].bit))
+ return false;
+
+ if (!(*(u8 *)uarts[i].clock_reg & BIT(uarts[i].bit)))
+ return false;
+
+ return true;
+}
+
+#ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA
+int auto_odmdata(void)
+{
+ volatile u32 *pmc = (volatile u32 *)TEGRA_PMC_BASE;
+ u32 odmdata = pmc[0xa0 / 4];
+
+ /*
+ * Bits 19:18 are the console type: 0=default, 1=none, 2==DCC, 3==UART
+ * Some boards apparently swap the last two values, but we don't have
+ * any way of catering for that here, so we just accept either. If this
+ * doesn't make sense for your board, just don't enable this feature.
+ *
+ * Bits 17:15 indicate the UART to use, 0/1/2/3/4 are UART A/B/C/D/E.
+ */
+
+ switch ((odmdata >> 18) & 3) {
+ case 2:
+ case 3:
+ break;
+ default:
+ return -1;
+ }
+
+ return (odmdata >> 15) & 7;
+}
+#endif
+
+#ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_SCRATCH
+int auto_scratch(void)
{
- static const struct {
- u32 base;
- u32 reset_reg;
- u32 clock_reg;
- u32 bit;
- } uarts[] = {
- {
- TEGRA_UARTA_BASE,
- TEGRA_CLK_RESET_BASE + 0x04,
- TEGRA_CLK_RESET_BASE + 0x10,
- 6,
- },
- {
- TEGRA_UARTB_BASE,
- TEGRA_CLK_RESET_BASE + 0x04,
- TEGRA_CLK_RESET_BASE + 0x10,
- 7,
- },
- {
- TEGRA_UARTC_BASE,
- TEGRA_CLK_RESET_BASE + 0x08,
- TEGRA_CLK_RESET_BASE + 0x14,
- 23,
- },
- {
- TEGRA_UARTD_BASE,
- TEGRA_CLK_RESET_BASE + 0x0c,
- TEGRA_CLK_RESET_BASE + 0x18,
- 1,
- },
- {
- TEGRA_UARTE_BASE,
- TEGRA_CLK_RESET_BASE + 0x0c,
- TEGRA_CLK_RESET_BASE + 0x18,
- 2,
- },
- };
int i;
- volatile u32 *apb_misc = (volatile u32 *)TEGRA_APB_MISC_BASE;
- u32 chip, div;
/*
* Look for the first UART that:
@@ -125,20 +159,60 @@ static inline void arch_decomp_setup(void)
* back to what's specified in TEGRA_DEBUG_UART_BASE.
*/
for (i = 0; i < ARRAY_SIZE(uarts); i++) {
- if (*(u8 *)uarts[i].reset_reg & BIT(uarts[i].bit))
- continue;
-
- if (!(*(u8 *)uarts[i].clock_reg & BIT(uarts[i].bit)))
+ if (!uart_clocked(i))
continue;
uart = (volatile u8 *)uarts[i].base;
if (uart[UART_SCR << DEBUG_UART_SHIFT] != 'D')
continue;
- break;
+ return i;
}
- if (i == ARRAY_SIZE(uarts))
- uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE;
+
+ return -1;
+}
+#endif
+
+/*
+ * Setup before decompression. This is where we do UART selection for
+ * earlyprintk and init the uart_base register.
+ */
+static inline void arch_decomp_setup(void)
+{
+ int uart_id, auto_uart_id;
+ volatile u32 *apb_misc = (volatile u32 *)TEGRA_APB_MISC_BASE;
+ u32 chip, div;
+
+#if defined(CONFIG_TEGRA_DEBUG_UARTA)
+ uart_id = 0;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTB)
+ uart_id = 1;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTC)
+ uart_id = 2;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTD)
+ uart_id = 3;
+#elif defined(CONFIG_TEGRA_DEBUG_UARTE)
+ uart_id = 4;
+#else
+ uart_id = -1;
+#endif
+
+#if defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA)
+ auto_uart_id = auto_odmdata();
+#elif defined(CONFIG_TEGRA_DEBUG_UART_AUTO_SCRATCH)
+ auto_uart_id = auto_scratch();
+#else
+ auto_uart_id = -1;
+#endif
+ if (auto_uart_id != -1)
+ uart_id = auto_uart_id;
+
+ if (uart_id < 0 || uart_id >= ARRAY_SIZE(uarts) ||
+ !uart_clocked(uart_id))
+ uart = NULL;
+ else
+ uart = (volatile u8 *)uarts[uart_id].base;
+
save_uart_address();
if (uart == NULL)
return;
diff --git a/arch/arm/mach-tegra/include/mach/usb_phy.h b/arch/arm/mach-tegra/include/mach/usb_phy.h
index de1a0f602b2..935ce9f6559 100644
--- a/arch/arm/mach-tegra/include/mach/usb_phy.h
+++ b/arch/arm/mach-tegra/include/mach/usb_phy.h
@@ -61,8 +61,8 @@ struct tegra_usb_phy {
struct usb_phy *ulpi;
};
-struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
- void *config, enum tegra_usb_phy_mode phy_mode);
+struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
+ void __iomem *regs, void *config, enum tegra_usb_phy_mode phy_mode);
int tegra_usb_phy_power_on(struct tegra_usb_phy *phy);
diff --git a/arch/arm/mach-tegra/powergate.c b/arch/arm/mach-tegra/powergate.c
index c238699ae86..f5b12fb4ff1 100644
--- a/arch/arm/mach-tegra/powergate.c
+++ b/arch/arm/mach-tegra/powergate.c
@@ -234,7 +234,7 @@ static const struct file_operations powergate_fops = {
.release = single_release,
};
-static int __init powergate_debugfs_init(void)
+int __init tegra_powergate_debugfs_init(void)
{
struct dentry *d;
int err = -ENOMEM;
@@ -247,6 +247,4 @@ static int __init powergate_debugfs_init(void)
return err;
}
-late_initcall(powergate_debugfs_init);
-
#endif
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c
index bae09b85989..b59315ce369 100644
--- a/arch/arm/mach-tegra/tegra2_clocks.c
+++ b/arch/arm/mach-tegra/tegra2_clocks.c
@@ -1486,6 +1486,10 @@ static struct clk tegra_clk_m = {
};
static struct clk_pll_freq_table tegra_pll_c_freq_table[] = {
+ { 12000000, 600000000, 600, 12, 1, 8 },
+ { 13000000, 600000000, 600, 13, 1, 8 },
+ { 19200000, 600000000, 500, 16, 1, 6 },
+ { 26000000, 600000000, 600, 26, 1, 8 },
{ 0, 0, 0, 0, 0, 0 },
};
diff --git a/arch/arm/mach-tegra/tegra30_clocks.c b/arch/arm/mach-tegra/tegra30_clocks.c
index 6d08b53f92d..e33fe4b14a2 100644
--- a/arch/arm/mach-tegra/tegra30_clocks.c
+++ b/arch/arm/mach-tegra/tegra30_clocks.c
@@ -3015,6 +3015,15 @@ struct clk_duplicate tegra_clk_duplicates[] = {
CLK_DUPLICATE("sbc6", "spi_slave_tegra.5", NULL),
CLK_DUPLICATE("twd", "smp_twd", NULL),
CLK_DUPLICATE("vcp", "nvavp", "vcp"),
+ CLK_DUPLICATE("i2s0", NULL, "i2s0"),
+ CLK_DUPLICATE("i2s1", NULL, "i2s1"),
+ CLK_DUPLICATE("i2s2", NULL, "i2s2"),
+ CLK_DUPLICATE("i2s3", NULL, "i2s3"),
+ CLK_DUPLICATE("i2s4", NULL, "i2s4"),
+ CLK_DUPLICATE("dam0", NULL, "dam0"),
+ CLK_DUPLICATE("dam1", NULL, "dam1"),
+ CLK_DUPLICATE("dam2", NULL, "dam2"),
+ CLK_DUPLICATE("spdif_in", NULL, "spdif_in"),
};
struct clk *tegra_ptr_clks[] = {
diff --git a/arch/arm/mach-tegra/usb_phy.c b/arch/arm/mach-tegra/usb_phy.c
index d71d2fed672..54e353c8e30 100644
--- a/arch/arm/mach-tegra/usb_phy.c
+++ b/arch/arm/mach-tegra/usb_phy.c
@@ -26,6 +26,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
#include <asm/mach-types.h>
@@ -654,8 +655,8 @@ static void ulpi_phy_power_off(struct tegra_usb_phy *phy)
clk_disable(phy->clk);
}
-struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
- void *config, enum tegra_usb_phy_mode phy_mode)
+struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
+ void __iomem *regs, void *config, enum tegra_usb_phy_mode phy_mode)
{
struct tegra_usb_phy *phy;
struct tegra_ulpi_config *ulpi_config;
@@ -711,6 +712,16 @@ struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
err = -ENXIO;
goto err1;
}
+ if (!gpio_is_valid(ulpi_config->reset_gpio))
+ ulpi_config->reset_gpio =
+ of_get_named_gpio(dev->of_node,
+ "nvidia,phy-reset-gpio", 0);
+ if (!gpio_is_valid(ulpi_config->reset_gpio)) {
+ pr_err("%s: invalid reset gpio: %d\n", __func__,
+ ulpi_config->reset_gpio);
+ err = -EINVAL;
+ goto err1;
+ }
gpio_request(ulpi_config->reset_gpio, "ulpi_phy_reset_b");
gpio_direction_output(ulpi_config->reset_gpio, 0);
phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index f943687acaf..fba8adea421 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -206,7 +206,7 @@ static struct resource ab8500_resources[] = {
};
struct platform_device ab8500_device = {
- .name = "ab8500-i2c",
+ .name = "ab8500-core",
.id = 0,
.dev = {
.platform_data = &ab8500_platdata,
@@ -785,6 +785,7 @@ MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
.timer = &ux500_timer,
.handle_irq = gic_handle_irq,
.init_machine = mop500_init_machine,
+ .init_late = ux500_init_late,
MACHINE_END
MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
@@ -794,6 +795,7 @@ MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
.timer = &ux500_timer,
.handle_irq = gic_handle_irq,
.init_machine = hrefv60_init_machine,
+ .init_late = ux500_init_late,
MACHINE_END
MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
@@ -804,6 +806,7 @@ MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
.timer = &ux500_timer,
.handle_irq = gic_handle_irq,
.init_machine = snowball_init_machine,
+ .init_late = ux500_init_late,
MACHINE_END
#ifdef CONFIG_MACH_UX500_DT
@@ -918,6 +921,7 @@ DT_MACHINE_START(U8500_DT, "ST-Ericsson U8500 platform (Device Tree Support)")
.timer = &ux500_timer,
.handle_irq = gic_handle_irq,
.init_machine = u8500_init_machine,
+ .init_late = ux500_init_late,
.dt_compat = u8500_dt_board_compat,
MACHINE_END
#endif
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index 1762c4728f1..8d73b066a18 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -635,7 +635,7 @@ static int clk_debugfs_register(struct clk *c)
return 0;
}
-static int __init clk_debugfs_init(void)
+int __init clk_debugfs_init(void)
{
struct clk *c;
struct dentry *d;
@@ -657,7 +657,6 @@ err_out:
return err;
}
-late_initcall(clk_debugfs_init);
#endif /* defined(CONFIG_DEBUG_FS) */
unsigned long clk_smp_twd_rate = 500000000;
@@ -696,12 +695,11 @@ static struct notifier_block clk_twd_cpufreq_nb = {
.notifier_call = clk_twd_cpufreq_transition,
};
-static int clk_init_smp_twd_cpufreq(void)
+int clk_init_smp_twd_cpufreq(void)
{
return cpufreq_register_notifier(&clk_twd_cpufreq_nb,
CPUFREQ_TRANSITION_NOTIFIER);
}
-late_initcall(clk_init_smp_twd_cpufreq);
#endif
diff --git a/arch/arm/mach-ux500/clock.h b/arch/arm/mach-ux500/clock.h
index d776ada08db..65d27a13f46 100644
--- a/arch/arm/mach-ux500/clock.h
+++ b/arch/arm/mach-ux500/clock.h
@@ -150,3 +150,15 @@ struct clk clk_##_name = { \
int __init clk_db8500_ed_fixup(void);
int __init clk_init(void);
+
+#ifdef CONFIG_DEBUG_FS
+int clk_debugfs_init(void);
+#else
+static inline int clk_debugfs_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_CPU_FREQ
+int clk_init_smp_twd_cpufreq(void);
+#else
+static inline int clk_init_smp_twd_cpufreq(void) { return 0; }
+#endif
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index a29a0e3adcf..e2360e7c770 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -73,6 +73,12 @@ void __init ux500_init_irq(void)
clk_init();
}
+void __init ux500_init_late(void)
+{
+ clk_debugfs_init();
+ clk_init_smp_twd_cpufreq();
+}
+
static const char * __init ux500_get_machine(void)
{
return kasprintf(GFP_KERNEL, "DB%4x", dbx500_partnumber());
diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
index 4e369f1645e..8b7ed82a286 100644
--- a/arch/arm/mach-ux500/include/mach/setup.h
+++ b/arch/arm/mach-ux500/include/mach/setup.h
@@ -20,6 +20,7 @@ extern void __init u8500_map_io(void);
extern struct device * __init u8500_init_devices(void);
extern void __init ux500_init_irq(void);
+extern void __init ux500_init_late(void);
extern struct device *ux500_soc_device_init(const char *soc_id);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index db23ae4aaaa..ea6b4315409 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -17,8 +17,12 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
#include <linux/highmem.h>
+#include <linux/memblock.h>
#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/vmalloc.h>
#include <asm/memory.h>
#include <asm/highmem.h>
@@ -26,9 +30,112 @@
#include <asm/tlbflush.h>
#include <asm/sizes.h>
#include <asm/mach/arch.h>
+#include <asm/dma-iommu.h>
+#include <asm/mach/map.h>
+#include <asm/system_info.h>
+#include <asm/dma-contiguous.h>
#include "mm.h"
+/*
+ * The DMA API is built upon the notion of "buffer ownership". A buffer
+ * is either exclusively owned by the CPU (and therefore may be accessed
+ * by it) or exclusively owned by the DMA device. These helper functions
+ * represent the transitions between these two ownership states.
+ *
+ * Note, however, that on later ARMs, this notion does not work due to
+ * speculative prefetches. We model our approach on the assumption that
+ * the CPU does do speculative prefetches, which means we clean caches
+ * before transfers and delay cache invalidation until transfer completion.
+ *
+ */
+static void __dma_page_cpu_to_dev(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+static void __dma_page_dev_to_cpu(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+
+/**
+ * arm_dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed. The CPU
+ * can regain ownership by calling dma_unmap_page().
+ */
+static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+}
+
+/**
+ * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * Unmap a page streaming mode DMA translation. The handle and size
+ * must match what was provided in the previous dma_map_page() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+ handle & ~PAGE_MASK, size, dir);
+}
+
+static void arm_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned int offset = handle & (PAGE_SIZE - 1);
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ unsigned int offset = handle & (PAGE_SIZE - 1);
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
+
+struct dma_map_ops arm_dma_ops = {
+ .alloc = arm_dma_alloc,
+ .free = arm_dma_free,
+ .mmap = arm_dma_mmap,
+ .map_page = arm_dma_map_page,
+ .unmap_page = arm_dma_unmap_page,
+ .map_sg = arm_dma_map_sg,
+ .unmap_sg = arm_dma_unmap_sg,
+ .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
+ .sync_single_for_device = arm_dma_sync_single_for_device,
+ .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_dma_sync_sg_for_device,
+ .set_dma_mask = arm_dma_set_mask,
+};
+EXPORT_SYMBOL(arm_dma_ops);
+
static u64 get_coherent_dma_mask(struct device *dev)
{
u64 mask = (u64)arm_dma_limit;
@@ -56,6 +163,21 @@ static u64 get_coherent_dma_mask(struct device *dev)
return mask;
}
+static void __dma_clear_buffer(struct page *page, size_t size)
+{
+ void *ptr;
+ /*
+ * Ensure that the allocated pages are zeroed, and that any data
+ * lurking in the kernel direct-mapped region is invalidated.
+ */
+ ptr = page_address(page);
+ if (ptr) {
+ memset(ptr, 0, size);
+ dmac_flush_range(ptr, ptr + size);
+ outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ }
+}
+
/*
* Allocate a DMA buffer for 'dev' of size 'size' using the
* specified gfp mask. Note that 'size' must be page aligned.
@@ -64,23 +186,6 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
{
unsigned long order = get_order(size);
struct page *page, *p, *e;
- void *ptr;
- u64 mask = get_coherent_dma_mask(dev);
-
-#ifdef CONFIG_DMA_API_DEBUG
- u64 limit = (mask + 1) & ~mask;
- if (limit && size >= limit) {
- dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
- size, mask);
- return NULL;
- }
-#endif
-
- if (!mask)
- return NULL;
-
- if (mask < 0xffffffffULL)
- gfp |= GFP_DMA;
page = alloc_pages(gfp, order);
if (!page)
@@ -93,14 +198,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
__free_page(p);
- /*
- * Ensure that the allocated pages are zeroed, and that any data
- * lurking in the kernel direct-mapped region is invalidated.
- */
- ptr = page_address(page);
- memset(ptr, 0, size);
- dmac_flush_range(ptr, ptr + size);
- outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ __dma_clear_buffer(page, size);
return page;
}
@@ -170,6 +268,11 @@ static int __init consistent_init(void)
unsigned long base = consistent_base;
unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
+#ifndef CONFIG_ARM_DMA_USE_IOMMU
+ if (cpu_architecture() >= CPU_ARCH_ARMv6)
+ return 0;
+#endif
+
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) {
pr_err("%s: no memory\n", __func__);
@@ -184,14 +287,14 @@ static int __init consistent_init(void)
pud = pud_alloc(&init_mm, pgd, base);
if (!pud) {
- printk(KERN_ERR "%s: no pud tables\n", __func__);
+ pr_err("%s: no pud tables\n", __func__);
ret = -ENOMEM;
break;
}
pmd = pmd_alloc(&init_mm, pud, base);
if (!pmd) {
- printk(KERN_ERR "%s: no pmd tables\n", __func__);
+ pr_err("%s: no pmd tables\n", __func__);
ret = -ENOMEM;
break;
}
@@ -199,7 +302,7 @@ static int __init consistent_init(void)
pte = pte_alloc_kernel(pmd, base);
if (!pte) {
- printk(KERN_ERR "%s: no pte tables\n", __func__);
+ pr_err("%s: no pte tables\n", __func__);
ret = -ENOMEM;
break;
}
@@ -210,9 +313,101 @@ static int __init consistent_init(void)
return ret;
}
-
core_initcall(consistent_init);
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+ pgprot_t prot, struct page **ret_page);
+
+static struct arm_vmregion_head coherent_head = {
+ .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
+ .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
+};
+
+size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+
+static int __init early_coherent_pool(char *p)
+{
+ coherent_pool_size = memparse(p, &p);
+ return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
+
+/*
+ * Initialise the coherent pool for atomic allocations.
+ */
+static int __init coherent_init(void)
+{
+ pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+ size_t size = coherent_pool_size;
+ struct page *page;
+ void *ptr;
+
+ if (cpu_architecture() < CPU_ARCH_ARMv6)
+ return 0;
+
+ ptr = __alloc_from_contiguous(NULL, size, prot, &page);
+ if (ptr) {
+ coherent_head.vm_start = (unsigned long) ptr;
+ coherent_head.vm_end = (unsigned long) ptr + size;
+ printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
+ (unsigned)size / 1024);
+ return 0;
+ }
+ printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
+ (unsigned)size / 1024);
+ return -ENOMEM;
+}
+/*
+ * CMA is activated by core_initcall, so we must be called after it.
+ */
+postcore_initcall(coherent_init);
+
+struct dma_contig_early_reserve {
+ phys_addr_t base;
+ unsigned long size;
+};
+
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
+
+static int dma_mmu_remap_num __initdata;
+
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+ dma_mmu_remap[dma_mmu_remap_num].base = base;
+ dma_mmu_remap[dma_mmu_remap_num].size = size;
+ dma_mmu_remap_num++;
+}
+
+void __init dma_contiguous_remap(void)
+{
+ int i;
+ for (i = 0; i < dma_mmu_remap_num; i++) {
+ phys_addr_t start = dma_mmu_remap[i].base;
+ phys_addr_t end = start + dma_mmu_remap[i].size;
+ struct map_desc map;
+ unsigned long addr;
+
+ if (end > arm_lowmem_limit)
+ end = arm_lowmem_limit;
+ if (start >= end)
+ return;
+
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY_DMA_READY;
+
+ /*
+ * Clear previous low-memory mapping
+ */
+ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+ addr += PMD_SIZE)
+ pmd_clear(pmd_off_k(addr));
+
+ iotable_init(&map, 1);
+ }
+}
+
static void *
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
const void *caller)
@@ -222,7 +417,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
int bit;
if (!consistent_pte) {
- printk(KERN_ERR "%s: not initialised\n", __func__);
+ pr_err("%s: not initialised\n", __func__);
dump_stack();
return NULL;
}
@@ -249,7 +444,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
pte = consistent_pte[idx] + off;
- c->vm_pages = page;
+ c->priv = page;
do {
BUG_ON(!pte_none(*pte));
@@ -281,14 +476,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
if (!c) {
- printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
+ pr_err("%s: trying to free invalid coherent area: %p\n",
__func__, cpu_addr);
dump_stack();
return;
}
if ((c->vm_end - c->vm_start) != size) {
- printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+ pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
__func__, c->vm_end - c->vm_start, size);
dump_stack();
size = c->vm_end - c->vm_start;
@@ -310,8 +505,8 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
}
if (pte_none(pte) || !pte_present(pte))
- printk(KERN_CRIT "%s: bad page in kernel page table\n",
- __func__);
+ pr_crit("%s: bad page in kernel page table\n",
+ __func__);
} while (size -= PAGE_SIZE);
flush_tlb_kernel_range(c->vm_start, c->vm_end);
@@ -319,20 +514,182 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
arm_vmregion_free(&consistent_head, c);
}
+static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct page *page = virt_to_page(addr);
+ pgprot_t prot = *(pgprot_t *)data;
+
+ set_pte_ext(pte, mk_pte(page, prot), 0);
+ return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+{
+ unsigned long start = (unsigned long) page_address(page);
+ unsigned end = start + size;
+
+ apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
+ dsb();
+ flush_tlb_kernel_range(start, end);
+}
+
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+ pgprot_t prot, struct page **ret_page,
+ const void *caller)
+{
+ struct page *page;
+ void *ptr;
+ page = __dma_alloc_buffer(dev, size, gfp);
+ if (!page)
+ return NULL;
+
+ ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
+ if (!ptr) {
+ __dma_free_buffer(page, size);
+ return NULL;
+ }
+
+ *ret_page = page;
+ return ptr;
+}
+
+static void *__alloc_from_pool(struct device *dev, size_t size,
+ struct page **ret_page, const void *caller)
+{
+ struct arm_vmregion *c;
+ size_t align;
+
+ if (!coherent_head.vm_start) {
+ printk(KERN_ERR "%s: coherent pool not initialised!\n",
+ __func__);
+ dump_stack();
+ return NULL;
+ }
+
+ /*
+ * Align the region allocation - allocations from pool are rather
+ * small, so align them to their order in pages, minimum is a page
+ * size. This helps reduce fragmentation of the DMA space.
+ */
+ align = PAGE_SIZE << get_order(size);
+ c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
+ if (c) {
+ void *ptr = (void *)c->vm_start;
+ struct page *page = virt_to_page(ptr);
+ *ret_page = page;
+ return ptr;
+ }
+ return NULL;
+}
+
+static int __free_from_pool(void *cpu_addr, size_t size)
+{
+ unsigned long start = (unsigned long)cpu_addr;
+ unsigned long end = start + size;
+ struct arm_vmregion *c;
+
+ if (start < coherent_head.vm_start || end > coherent_head.vm_end)
+ return 0;
+
+ c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
+
+ if ((c->vm_end - c->vm_start) != size) {
+ printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+ __func__, c->vm_end - c->vm_start, size);
+ dump_stack();
+ size = c->vm_end - c->vm_start;
+ }
+
+ arm_vmregion_free(&coherent_head, c);
+ return 1;
+}
+
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+ pgprot_t prot, struct page **ret_page)
+{
+ unsigned long order = get_order(size);
+ size_t count = size >> PAGE_SHIFT;
+ struct page *page;
+
+ page = dma_alloc_from_contiguous(dev, count, order);
+ if (!page)
+ return NULL;
+
+ __dma_clear_buffer(page, size);
+ __dma_remap(page, size, prot);
+
+ *ret_page = page;
+ return page_address(page);
+}
+
+static void __free_from_contiguous(struct device *dev, struct page *page,
+ size_t size)
+{
+ __dma_remap(page, size, pgprot_kernel);
+ dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+}
+
+static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
+{
+ prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
+ pgprot_writecombine(prot) :
+ pgprot_dmacoherent(prot);
+ return prot;
+}
+
+#define nommu() 0
+
#else /* !CONFIG_MMU */
-#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page)
-#define __dma_free_remap(addr, size) do { } while (0)
+#define nommu() 1
+
+#define __get_dma_pgprot(attrs, prot) __pgprot(0)
+#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
+#define __alloc_from_pool(dev, size, ret_page, c) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret) NULL
+#define __free_from_pool(cpu_addr, size) 0
+#define __free_from_contiguous(dev, page, size) do { } while (0)
+#define __dma_free_remap(cpu_addr, size) do { } while (0)
#endif /* CONFIG_MMU */
-static void *
-__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
- pgprot_t prot, const void *caller)
+static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
+ struct page **ret_page)
+{
+ struct page *page;
+ page = __dma_alloc_buffer(dev, size, gfp);
+ if (!page)
+ return NULL;
+
+ *ret_page = page;
+ return page_address(page);
+}
+
+
+
+static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, pgprot_t prot, const void *caller)
{
+ u64 mask = get_coherent_dma_mask(dev);
struct page *page;
void *addr;
+#ifdef CONFIG_DMA_API_DEBUG
+ u64 limit = (mask + 1) & ~mask;
+ if (limit && size >= limit) {
+ dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
+ size, mask);
+ return NULL;
+ }
+#endif
+
+ if (!mask)
+ return NULL;
+
+ if (mask < 0xffffffffULL)
+ gfp |= GFP_DMA;
+
/*
* Following is a work-around (a.k.a. hack) to prevent pages
* with __GFP_COMP being passed to split_page() which cannot
@@ -342,22 +699,20 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
*/
gfp &= ~(__GFP_COMP);
- *handle = ~0;
+ *handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
- page = __dma_alloc_buffer(dev, size, gfp);
- if (!page)
- return NULL;
-
- if (!arch_is_coherent())
- addr = __dma_alloc_remap(page, size, gfp, prot, caller);
+ if (arch_is_coherent() || nommu())
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else if (cpu_architecture() < CPU_ARCH_ARMv6)
+ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
+ else if (gfp & GFP_ATOMIC)
+ addr = __alloc_from_pool(dev, size, &page, caller);
else
- addr = page_address(page);
+ addr = __alloc_from_contiguous(dev, size, prot, &page);
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
- else
- __dma_free_buffer(page, size);
return addr;
}
@@ -366,138 +721,71 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
* Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space.
*/
-void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs)
{
+ pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
- return __dma_alloc(dev, size, handle, gfp,
- pgprot_dmacoherent(pgprot_kernel),
+ return __dma_alloc(dev, size, handle, gfp, prot,
__builtin_return_address(0));
}
-EXPORT_SYMBOL(dma_alloc_coherent);
/*
- * Allocate a writecombining region, in much the same way as
- * dma_alloc_coherent above.
+ * Create userspace mapping for the DMA-coherent memory.
*/
-void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
-{
- return __dma_alloc(dev, size, handle, gfp,
- pgprot_writecombine(pgprot_kernel),
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(dma_alloc_writecombine);
-
-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
+int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
{
int ret = -ENXIO;
#ifdef CONFIG_MMU
- unsigned long user_size, kern_size;
- struct arm_vmregion *c;
+ unsigned long pfn = dma_to_pfn(dev, dma_addr);
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
- user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
- if (c) {
- unsigned long off = vma->vm_pgoff;
-
- kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
-
- if (off < kern_size &&
- user_size <= (kern_size - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- page_to_pfn(c->vm_pages) + off,
- user_size << PAGE_SHIFT,
- vma->vm_page_prot);
- }
- }
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
#endif /* CONFIG_MMU */
return ret;
}
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_coherent);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_writecombine);
-
/*
- * free a page as defined by the above mapping.
- * Must not be called with IRQs disabled.
+ * Free a buffer as defined by the above mapping.
*/
-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
+void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
{
- WARN_ON(irqs_disabled());
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
return;
size = PAGE_ALIGN(size);
- if (!arch_is_coherent())
+ if (arch_is_coherent() || nommu()) {
+ __dma_free_buffer(page, size);
+ } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
__dma_free_remap(cpu_addr, size);
-
- __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
-}
-EXPORT_SYMBOL(dma_free_coherent);
-
-/*
- * Make an area consistent for devices.
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
- */
-void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- unsigned long paddr;
-
- BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
- dmac_map_area(kaddr, size, dir);
-
- paddr = __pa(kaddr);
- if (dir == DMA_FROM_DEVICE) {
- outer_inv_range(paddr, paddr + size);
+ __dma_free_buffer(page, size);
} else {
- outer_clean_range(paddr, paddr + size);
- }
- /* FIXME: non-speculating: flush on bidirectional mappings? */
-}
-EXPORT_SYMBOL(___dma_single_cpu_to_dev);
-
-void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
- /* FIXME: non-speculating: not required */
- /* don't bother invalidating if DMA to device */
- if (dir != DMA_TO_DEVICE) {
- unsigned long paddr = __pa(kaddr);
- outer_inv_range(paddr, paddr + size);
+ if (__free_from_pool(cpu_addr, size))
+ return;
+ /*
+ * Non-atomic allocations cannot be freed with IRQs disabled
+ */
+ WARN_ON(irqs_disabled());
+ __free_from_contiguous(dev, page, size);
}
-
- dmac_unmap_area(kaddr, size, dir);
}
-EXPORT_SYMBOL(___dma_single_dev_to_cpu);
static void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
@@ -543,7 +831,13 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
} while (left);
}
-void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr;
@@ -558,9 +852,8 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
}
/* FIXME: non-speculating: flush on bidirectional mappings? */
}
-EXPORT_SYMBOL(___dma_page_cpu_to_dev);
-void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr = page_to_phys(page) + off;
@@ -578,10 +871,9 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
set_bit(PG_dcache_clean, &page->flags);
}
-EXPORT_SYMBOL(___dma_page_dev_to_cpu);
/**
- * dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map
@@ -596,32 +888,32 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
* Device ownership issues as mentioned for dma_map_single are the same
* here.
*/
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
+int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i, j;
- BUG_ON(!valid_dma_direction(dir));
-
for_each_sg(sg, s, nents, i) {
- s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
- s->length, dir);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ s->dma_length = s->length;
+#endif
+ s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
+ s->length, dir, attrs);
if (dma_mapping_error(dev, s->dma_address))
goto bad_mapping;
}
- debug_dma_map_sg(dev, sg, nents, nents, dir);
return nents;
bad_mapping:
for_each_sg(sg, s, i, j)
- __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
return 0;
}
-EXPORT_SYMBOL(dma_map_sg);
/**
- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to unmap (same as was passed to dma_map_sg)
@@ -630,70 +922,55 @@ EXPORT_SYMBOL(dma_map_sg);
* Unmap a set of streaming mode DMA translations. Again, CPU access
* rules concerning calls here are the same as for dma_unmap_single().
*/
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
+void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
- int i;
- debug_dma_unmap_sg(dev, sg, nents, dir);
+ int i;
for_each_sg(sg, s, nents, i)
- __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
}
-EXPORT_SYMBOL(dma_unmap_sg);
/**
- * dma_sync_sg_for_cpu
+ * arm_dma_sync_sg_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
- for_each_sg(sg, s, nents, i) {
- if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
- sg_dma_len(s), dir))
- continue;
-
- __dma_page_dev_to_cpu(sg_page(s), s->offset,
- s->length, dir);
- }
-
- debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+ for_each_sg(sg, s, nents, i)
+ ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
+ dir);
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
/**
- * dma_sync_sg_for_device
+ * arm_dma_sync_sg_for_device
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @sg: list of buffers
* @nents: number of buffers to map (returned from dma_map_sg)
* @dir: DMA transfer direction (same as was passed to dma_map_sg)
*/
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
+ struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
int i;
- for_each_sg(sg, s, nents, i) {
- if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
- sg_dma_len(s), dir))
- continue;
-
- __dma_page_cpu_to_dev(sg_page(s), s->offset,
- s->length, dir);
- }
-
- debug_dma_sync_sg_for_device(dev, sg, nents, dir);
+ for_each_sg(sg, s, nents, i)
+ ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
+ dir);
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
/*
* Return whether the given device DMA address mask can be supported
@@ -709,18 +986,15 @@ int dma_supported(struct device *dev, u64 mask)
}
EXPORT_SYMBOL(dma_supported);
-int dma_set_mask(struct device *dev, u64 dma_mask)
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;
-#ifndef CONFIG_DMABOUNCE
*dev->dma_mask = dma_mask;
-#endif
return 0;
}
-EXPORT_SYMBOL(dma_set_mask);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
@@ -733,3 +1007,679 @@ static int __init dma_debug_do_init(void)
return 0;
}
fs_initcall(dma_debug_do_init);
+
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+
+/* IOMMU */
+
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
+ size_t size)
+{
+ unsigned int order = get_order(size);
+ unsigned int align = 0;
+ unsigned int count, start;
+ unsigned long flags;
+
+ count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+
+ if (order > mapping->order)
+ align = (1 << (order - mapping->order)) - 1;
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+ count, align);
+ if (start > mapping->bits) {
+ spin_unlock_irqrestore(&mapping->lock, flags);
+ return DMA_ERROR_CODE;
+ }
+
+ bitmap_set(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+
+ return mapping->base + (start << (mapping->order + PAGE_SHIFT));
+}
+
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
+ dma_addr_t addr, size_t size)
+{
+ unsigned int start = (addr - mapping->base) >>
+ (mapping->order + PAGE_SHIFT);
+ unsigned int count = ((size >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ bitmap_clear(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+{
+ struct page **pages;
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i = 0;
+
+ if (array_size <= PAGE_SIZE)
+ pages = kzalloc(array_size, gfp);
+ else
+ pages = vzalloc(array_size);
+ if (!pages)
+ return NULL;
+
+ while (count) {
+ int j, order = __ffs(count);
+
+ pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
+ while (!pages[i] && order)
+ pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
+ if (!pages[i])
+ goto error;
+
+ if (order)
+ split_page(pages[i], order);
+ j = 1 << order;
+ while (--j)
+ pages[i + j] = pages[i] + j;
+
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order);
+ i += 1 << order;
+ count -= 1 << order;
+ }
+
+ return pages;
+error:
+ while (--i)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ if (array_size < PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return NULL;
+}
+
+static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+{
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i;
+ for (i = 0; i < count; i++)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ if (array_size < PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return 0;
+}
+
+/*
+ * Create a CPU mapping for a specified pages
+ */
+static void *
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
+{
+ struct arm_vmregion *c;
+ size_t align;
+ size_t count = size >> PAGE_SHIFT;
+ int bit;
+
+ if (!consistent_pte[0]) {
+ pr_err("%s: not initialised\n", __func__);
+ dump_stack();
+ return NULL;
+ }
+
+ /*
+ * Align the virtual region allocation - maximum alignment is
+ * a section size, minimum is a page size. This helps reduce
+ * fragmentation of the DMA space, and also prevents allocations
+ * smaller than a section from crossing a section boundary.
+ */
+ bit = fls(size - 1);
+ if (bit > SECTION_SHIFT)
+ bit = SECTION_SHIFT;
+ align = 1 << bit;
+
+ /*
+ * Allocate a virtual address in the consistent mapping region.
+ */
+ c = arm_vmregion_alloc(&consistent_head, align, size,
+ gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
+ if (c) {
+ pte_t *pte;
+ int idx = CONSISTENT_PTE_INDEX(c->vm_start);
+ int i = 0;
+ u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+
+ pte = consistent_pte[idx] + off;
+ c->priv = pages;
+
+ do {
+ BUG_ON(!pte_none(*pte));
+
+ set_pte_ext(pte, mk_pte(pages[i], prot), 0);
+ pte++;
+ off++;
+ i++;
+ if (off >= PTRS_PER_PTE) {
+ off = 0;
+ pte = consistent_pte[++idx];
+ }
+ } while (i < count);
+
+ dsb();
+
+ return (void *)c->vm_start;
+ }
+ return NULL;
+}
+
+/*
+ * Create a mapping in device IO address space for specified pages
+ */
+static dma_addr_t
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ dma_addr_t dma_addr, iova;
+ int i, ret = DMA_ERROR_CODE;
+
+ dma_addr = __alloc_iova(mapping, size);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ iova = dma_addr;
+ for (i = 0; i < count; ) {
+ unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+ phys_addr_t phys = page_to_phys(pages[i]);
+ unsigned int len, j;
+
+ for (j = i + 1; j < count; j++, next_pfn++)
+ if (page_to_pfn(pages[j]) != next_pfn)
+ break;
+
+ len = (j - i) << PAGE_SHIFT;
+ ret = iommu_map(mapping->domain, iova, phys, len, 0);
+ if (ret < 0)
+ goto fail;
+ iova += len;
+ i = j;
+ }
+ return dma_addr;
+fail:
+ iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
+ __free_iova(mapping, dma_addr, size);
+ return DMA_ERROR_CODE;
+}
+
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ /*
+ * add optional in-page offset from iova to size and align
+ * result to page size
+ */
+ size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+ iova &= PAGE_MASK;
+
+ iommu_unmap(mapping->domain, iova, size);
+ __free_iova(mapping, iova, size);
+ return 0;
+}
+
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+ pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+ struct page **pages;
+ void *addr = NULL;
+
+ *handle = DMA_ERROR_CODE;
+ size = PAGE_ALIGN(size);
+
+ pages = __iommu_alloc_buffer(dev, size, gfp);
+ if (!pages)
+ return NULL;
+
+ *handle = __iommu_create_mapping(dev, pages, size);
+ if (*handle == DMA_ERROR_CODE)
+ goto err_buffer;
+
+ addr = __iommu_alloc_remap(pages, size, gfp, prot);
+ if (!addr)
+ goto err_mapping;
+
+ return addr;
+
+err_mapping:
+ __iommu_remove_mapping(dev, *handle, size);
+err_buffer:
+ __iommu_free_buffer(dev, pages, size);
+ return NULL;
+}
+
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ struct arm_vmregion *c;
+
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+ c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+
+ if (c) {
+ struct page **pages = c->priv;
+
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+ int i = 0;
+
+ do {
+ int ret;
+
+ ret = vm_insert_page(vma, uaddr, pages[i++]);
+ if (ret) {
+ pr_err("Remapping memory, error: %d\n", ret);
+ return ret;
+ }
+
+ uaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
+ }
+ return 0;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
+ */
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
+{
+ struct arm_vmregion *c;
+ size = PAGE_ALIGN(size);
+
+ c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+ if (c) {
+ struct page **pages = c->priv;
+ __dma_free_remap(cpu_addr, size);
+ __iommu_remove_mapping(dev, handle, size);
+ __iommu_free_buffer(dev, pages, size);
+ }
+}
+
+/*
+ * Map a part of the scatter-gather list into contiguous io address space
+ */
+static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
+ size_t size, dma_addr_t *handle,
+ enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova, iova_base;
+ int ret = 0;
+ unsigned int count;
+ struct scatterlist *s;
+
+ size = PAGE_ALIGN(size);
+ *handle = DMA_ERROR_CODE;
+
+ iova_base = iova = __alloc_iova(mapping, size);
+ if (iova == DMA_ERROR_CODE)
+ return -ENOMEM;
+
+ for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
+ phys_addr_t phys = page_to_phys(sg_page(s));
+ unsigned int len = PAGE_ALIGN(s->offset + s->length);
+
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+
+ ret = iommu_map(mapping->domain, iova, phys, len, 0);
+ if (ret < 0)
+ goto fail;
+ count += len >> PAGE_SHIFT;
+ iova += len;
+ }
+ *handle = iova_base;
+
+ return 0;
+fail:
+ iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
+ __free_iova(mapping, iova_base, size);
+ return ret;
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct scatterlist *s = sg, *dma = sg, *start = sg;
+ int i, count = 0;
+ unsigned int offset = s->offset;
+ unsigned int size = s->offset + s->length;
+ unsigned int max = dma_get_max_seg_size(dev);
+
+ for (i = 1; i < nents; i++) {
+ s = sg_next(s);
+
+ s->dma_address = DMA_ERROR_CODE;
+ s->dma_length = 0;
+
+ if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address,
+ dir) < 0)
+ goto bad_mapping;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ size = offset = s->offset;
+ start = s;
+ dma = sg_next(dma);
+ count += 1;
+ }
+ size += s->length;
+ }
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
+ goto bad_mapping;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ return count+1;
+
+bad_mapping:
+ for_each_sg(sg, s, count, i)
+ __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
+ return 0;
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ if (sg_dma_len(s))
+ __iommu_remove_mapping(dev, sg_dma_address(s),
+ sg_dma_len(s));
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(sg_page(s), s->offset,
+ s->length, dir);
+ }
+}
+
+/**
+ * arm_iommu_sync_sg_for_cpu
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+
+}
+
+/**
+ * arm_iommu_sync_sg_for_device
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+}
+
+
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t dma_addr;
+ int ret, len = PAGE_ALIGN(size + offset);
+
+ if (!arch_is_coherent())
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+
+ dma_addr = __alloc_iova(mapping, len);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
+ if (ret < 0)
+ goto fail;
+
+ return dma_addr + offset;
+fail:
+ __free_iova(mapping, dma_addr, len);
+ return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ int offset = handle & ~PAGE_MASK;
+ int len = PAGE_ALIGN(size + offset);
+
+ if (!iova)
+ return;
+
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+
+ iommu_unmap(mapping->domain, iova, len);
+ __free_iova(mapping, iova, len);
+}
+
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ unsigned int offset = handle & ~PAGE_MASK;
+
+ if (!iova)
+ return;
+
+ if (!arch_is_coherent())
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_iommu_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+ unsigned int offset = handle & ~PAGE_MASK;
+
+ if (!iova)
+ return;
+
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+struct dma_map_ops iommu_ops = {
+ .alloc = arm_iommu_alloc_attrs,
+ .free = arm_iommu_free_attrs,
+ .mmap = arm_iommu_mmap_attrs,
+
+ .map_page = arm_iommu_map_page,
+ .unmap_page = arm_iommu_unmap_page,
+ .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
+ .sync_single_for_device = arm_iommu_sync_single_for_device,
+
+ .map_sg = arm_iommu_map_sg,
+ .unmap_sg = arm_iommu_unmap_sg,
+ .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_iommu_sync_sg_for_device,
+};
+
+/**
+ * arm_iommu_create_mapping
+ * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @base: start address of the valid IO address space
+ * @size: size of the valid IO address space
+ * @order: accuracy of the IO addresses allocations
+ *
+ * Creates a mapping structure which holds information about used/unused
+ * IO address ranges, which is required to perform memory allocation and
+ * mapping with IOMMU aware functions.
+ *
+ * The client device need to be attached to the mapping with
+ * arm_iommu_attach_device function.
+ */
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+ int order)
+{
+ unsigned int count = size >> (PAGE_SHIFT + order);
+ unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ struct dma_iommu_mapping *mapping;
+ int err = -ENOMEM;
+
+ if (!count)
+ return ERR_PTR(-EINVAL);
+
+ mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+ if (!mapping)
+ goto err;
+
+ mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!mapping->bitmap)
+ goto err2;
+
+ mapping->base = base;
+ mapping->bits = BITS_PER_BYTE * bitmap_size;
+ mapping->order = order;
+ spin_lock_init(&mapping->lock);
+
+ mapping->domain = iommu_domain_alloc(bus);
+ if (!mapping->domain)
+ goto err3;
+
+ kref_init(&mapping->kref);
+ return mapping;
+err3:
+ kfree(mapping->bitmap);
+err2:
+ kfree(mapping);
+err:
+ return ERR_PTR(err);
+}
+
+static void release_iommu_mapping(struct kref *kref)
+{
+ struct dma_iommu_mapping *mapping =
+ container_of(kref, struct dma_iommu_mapping, kref);
+
+ iommu_domain_free(mapping->domain);
+ kfree(mapping->bitmap);
+ kfree(mapping);
+}
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+ if (mapping)
+ kref_put(&mapping->kref, release_iommu_mapping);
+}
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ * arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping)
+{
+ int err;
+
+ err = iommu_attach_device(mapping->domain, dev);
+ if (err)
+ return err;
+
+ kref_get(&mapping->kref);
+ dev->archdata.mapping = mapping;
+ set_dma_ops(dev, &iommu_ops);
+
+ pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
+ return 0;
+}
+
+#endif
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8f5813bbffb..c21d06c7dd7 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -20,6 +20,7 @@
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
+#include <linux/dma-contiguous.h>
#include <asm/mach-types.h>
#include <asm/memblock.h>
@@ -226,6 +227,17 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
}
#endif
+void __init setup_dma_zone(struct machine_desc *mdesc)
+{
+#ifdef CONFIG_ZONE_DMA
+ if (mdesc->dma_zone_size) {
+ arm_dma_zone_size = mdesc->dma_zone_size;
+ arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
+ } else
+ arm_dma_limit = 0xffffffff;
+#endif
+}
+
static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
unsigned long max_high)
{
@@ -273,12 +285,9 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
* Adjust the sizes according to any special requirements for
* this machine type.
*/
- if (arm_dma_zone_size) {
+ if (arm_dma_zone_size)
arm_adjust_dma_zone(zone_size, zhole_size,
arm_dma_zone_size >> PAGE_SHIFT);
- arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
- } else
- arm_dma_limit = 0xffffffff;
#endif
free_area_init_node(0, zone_size, min, zhole_size);
@@ -364,6 +373,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
if (mdesc->reserve)
mdesc->reserve();
+ /*
+ * reserve memory for DMA contigouos allocations,
+ * must come from DMA area inside low memory
+ */
+ dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
+
arm_memblock_steal_permitted = false;
memblock_allow_resize();
memblock_dump_all();
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 27f4a619b35..93dc0c17cdc 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -67,5 +67,8 @@ extern u32 arm_dma_limit;
#define arm_dma_limit ((u32)~0)
#endif
+extern phys_addr_t arm_lowmem_limit;
+
void __init bootmem_init(void);
void arm_mm_memblock_reserve(void);
+void dma_contiguous_remap(void);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index aa78de8bfdd..e5dad60b558 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -288,6 +288,11 @@ static struct mem_type mem_types[] = {
PMD_SECT_UNCACHED | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
+ [MT_MEMORY_DMA_READY] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .domain = DOMAIN_KERNEL,
+ },
};
const struct mem_type *get_mem_type(unsigned int type)
@@ -429,6 +434,7 @@ static void __init build_mem_type_table(void)
if (arch_is_coherent() && cpu_is_xsc3()) {
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
@@ -460,6 +466,7 @@ static void __init build_mem_type_table(void)
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
@@ -512,6 +519,7 @@ static void __init build_mem_type_table(void)
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
@@ -596,7 +604,7 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
* L1 entries, whereas PGDs refer to a group of L1 entries making
* up one logical pointer to an L2 table.
*/
- if (((addr | end | phys) & ~SECTION_MASK) == 0) {
+ if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
pmd_t *p = pmd;
#ifndef CONFIG_ARM_LPAE
@@ -814,7 +822,7 @@ static int __init early_vmalloc(char *arg)
}
early_param("vmalloc", early_vmalloc);
-static phys_addr_t lowmem_limit __initdata = 0;
+phys_addr_t arm_lowmem_limit __initdata = 0;
void __init sanity_check_meminfo(void)
{
@@ -897,8 +905,8 @@ void __init sanity_check_meminfo(void)
bank->size = newsize;
}
#endif
- if (!bank->highmem && bank->start + bank->size > lowmem_limit)
- lowmem_limit = bank->start + bank->size;
+ if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
+ arm_lowmem_limit = bank->start + bank->size;
j++;
}
@@ -923,8 +931,8 @@ void __init sanity_check_meminfo(void)
}
#endif
meminfo.nr_banks = j;
- high_memory = __va(lowmem_limit - 1) + 1;
- memblock_set_current_limit(lowmem_limit);
+ high_memory = __va(arm_lowmem_limit - 1) + 1;
+ memblock_set_current_limit(arm_lowmem_limit);
}
static inline void prepare_page_table(void)
@@ -949,8 +957,8 @@ static inline void prepare_page_table(void)
* Find the end of the first block of lowmem.
*/
end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
- if (end >= lowmem_limit)
- end = lowmem_limit;
+ if (end >= arm_lowmem_limit)
+ end = arm_lowmem_limit;
/*
* Clear out all the kernel space mappings, except for the first
@@ -1093,8 +1101,8 @@ static void __init map_lowmem(void)
phys_addr_t end = start + reg->size;
struct map_desc map;
- if (end > lowmem_limit)
- end = lowmem_limit;
+ if (end > arm_lowmem_limit)
+ end = arm_lowmem_limit;
if (start >= end)
break;
@@ -1115,11 +1123,12 @@ void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
- memblock_set_current_limit(lowmem_limit);
+ memblock_set_current_limit(arm_lowmem_limit);
build_mem_type_table();
prepare_page_table();
map_lowmem();
+ dma_contiguous_remap();
devicemaps_init(mdesc);
kmap_init();
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
index 162be662c08..bf312c354a2 100644
--- a/arch/arm/mm/vmregion.h
+++ b/arch/arm/mm/vmregion.h
@@ -17,7 +17,7 @@ struct arm_vmregion {
struct list_head vm_list;
unsigned long vm_start;
unsigned long vm_end;
- struct page *vm_pages;
+ void *priv;
int vm_active;
const void *caller;
};
diff --git a/arch/arm/plat-mxc/clock.c b/arch/arm/plat-mxc/clock.c
index 2ed3ab173ad..5079787273d 100644
--- a/arch/arm/plat-mxc/clock.c
+++ b/arch/arm/plat-mxc/clock.c
@@ -41,6 +41,7 @@
#include <mach/clock.h>
#include <mach/hardware.h>
+#ifndef CONFIG_COMMON_CLK
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
@@ -200,6 +201,16 @@ struct clk *clk_get_parent(struct clk *clk)
}
EXPORT_SYMBOL(clk_get_parent);
+#else
+
+/*
+ * Lock to protect the clock module (ccm) registers. Used
+ * on all i.MXs
+ */
+DEFINE_SPINLOCK(imx_ccm_lock);
+
+#endif /* CONFIG_COMMON_CLK */
+
/*
* Get the resulting clock rate from a PLL register value and the input
* frequency. PLLs with this register layout can at least be found on
diff --git a/arch/arm/plat-mxc/include/mach/clock.h b/arch/arm/plat-mxc/include/mach/clock.h
index 753a5988d85..bd940c795cb 100644
--- a/arch/arm/plat-mxc/include/mach/clock.h
+++ b/arch/arm/plat-mxc/include/mach/clock.h
@@ -23,6 +23,7 @@
#ifndef __ASSEMBLY__
#include <linux/list.h>
+#ifndef CONFIG_COMMON_CLK
struct module;
struct clk {
@@ -59,6 +60,9 @@ struct clk {
int clk_register(struct clk *clk);
void clk_unregister(struct clk *clk);
+#endif /* CONFIG_COMMON_CLK */
+
+extern spinlock_t imx_ccm_lock;
unsigned long mxc_decode_pll(unsigned int pll, u32 f_ref);
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index 0319c4a0caf..cf663d84e7c 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -53,6 +53,7 @@ extern void imx35_soc_init(void);
extern void imx50_soc_init(void);
extern void imx51_soc_init(void);
extern void imx53_soc_init(void);
+extern void imx51_init_late(void);
extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq);
extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
extern int mx1_clocks_init(unsigned long fref);
@@ -149,4 +150,10 @@ extern void imx6q_pm_init(void);
static inline void imx6q_pm_init(void) {}
#endif
+#ifdef CONFIG_NEON
+extern int mx51_neon_fixup(void);
+#else
+static inline int mx51_neon_fixup(void) { return 0; }
+#endif
+
#endif
diff --git a/arch/arm/plat-mxc/include/mach/debug-macro.S b/arch/arm/plat-mxc/include/mach/debug-macro.S
index 8ddda365f1a..761e45f9456 100644
--- a/arch/arm/plat-mxc/include/mach/debug-macro.S
+++ b/arch/arm/plat-mxc/include/mach/debug-macro.S
@@ -24,6 +24,8 @@
#define UART_PADDR MX51_UART1_BASE_ADDR
#elif defined (CONFIG_DEBUG_IMX50_IMX53_UART)
#define UART_PADDR MX53_UART1_BASE_ADDR
+#elif defined (CONFIG_DEBUG_IMX6Q_UART2)
+#define UART_PADDR MX6Q_UART2_BASE_ADDR
#elif defined (CONFIG_DEBUG_IMX6Q_UART4)
#define UART_PADDR MX6Q_UART4_BASE_ADDR
#endif
diff --git a/arch/arm/plat-mxc/include/mach/mx6q.h b/arch/arm/plat-mxc/include/mach/mx6q.h
index 254a561a279..f7e7dbac8f4 100644
--- a/arch/arm/plat-mxc/include/mach/mx6q.h
+++ b/arch/arm/plat-mxc/include/mach/mx6q.h
@@ -27,6 +27,8 @@
#define MX6Q_CCM_SIZE 0x4000
#define MX6Q_ANATOP_BASE_ADDR 0x020c8000
#define MX6Q_ANATOP_SIZE 0x1000
+#define MX6Q_UART2_BASE_ADDR 0x021e8000
+#define MX6Q_UART2_SIZE 0x4000
#define MX6Q_UART4_BASE_ADDR 0x021f0000
#define MX6Q_UART4_SIZE 0x4000
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index 7daf7c9a413..99f958ca6cb 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -25,6 +25,7 @@
#include <linux/irq.h>
#include <linux/clockchips.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <mach/hardware.h>
#include <asm/sched_clock.h>
@@ -282,6 +283,19 @@ static int __init mxc_clockevent_init(struct clk *timer_clk)
void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
{
uint32_t tctl_val;
+ struct clk *timer_ipg_clk;
+
+ if (!timer_clk) {
+ timer_clk = clk_get_sys("imx-gpt.0", "per");
+ if (IS_ERR(timer_clk)) {
+ pr_err("i.MX timer: unable to get clk\n");
+ return;
+ }
+
+ timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
+ if (!IS_ERR(timer_ipg_clk))
+ clk_prepare_enable(timer_ipg_clk);
+ }
clk_prepare_enable(timer_clk);
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index 44ae077dbc2..2132c4f389e 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -28,19 +28,20 @@
#include <plat/clock.h>
+/* OMAP2_32KSYNCNT_CR_OFF: offset of 32ksync counter register */
+#define OMAP2_32KSYNCNT_CR_OFF 0x10
+
/*
* 32KHz clocksource ... always available, on pretty most chips except
* OMAP 730 and 1510. Other timers could be used as clocksources, with
* higher resolution in free-running counter modes (e.g. 12 MHz xtal),
* but systems won't necessarily want to spend resources that way.
*/
-static void __iomem *timer_32k_base;
-
-#define OMAP16XX_TIMER_32K_SYNCHRONIZED 0xfffbc410
+static void __iomem *sync32k_cnt_reg;
static u32 notrace omap_32k_read_sched_clock(void)
{
- return timer_32k_base ? __raw_readl(timer_32k_base) : 0;
+ return sync32k_cnt_reg ? __raw_readl(sync32k_cnt_reg) : 0;
}
/**
@@ -60,7 +61,7 @@ static void omap_read_persistent_clock(struct timespec *ts)
struct timespec *tsp = &persistent_ts;
last_cycles = cycles;
- cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0;
+ cycles = sync32k_cnt_reg ? __raw_readl(sync32k_cnt_reg) : 0;
delta = cycles - last_cycles;
nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift);
@@ -69,55 +70,41 @@ static void omap_read_persistent_clock(struct timespec *ts)
*ts = *tsp;
}
-int __init omap_init_clocksource_32k(void)
+/**
+ * omap_init_clocksource_32k - setup and register counter 32k as a
+ * kernel clocksource
+ * @pbase: base addr of counter_32k module
+ * @size: size of counter_32k to map
+ *
+ * Returns 0 upon success or negative error code upon failure.
+ *
+ */
+int __init omap_init_clocksource_32k(void __iomem *vbase)
{
- static char err[] __initdata = KERN_ERR
- "%s: can't register clocksource!\n";
-
- if (cpu_is_omap16xx() || cpu_class_is_omap2()) {
- u32 pbase;
- unsigned long size = SZ_4K;
- void __iomem *base;
- struct clk *sync_32k_ick;
-
- if (cpu_is_omap16xx()) {
- pbase = OMAP16XX_TIMER_32K_SYNCHRONIZED;
- size = SZ_1K;
- } else if (cpu_is_omap2420())
- pbase = OMAP2420_32KSYNCT_BASE + 0x10;
- else if (cpu_is_omap2430())
- pbase = OMAP2430_32KSYNCT_BASE + 0x10;
- else if (cpu_is_omap34xx())
- pbase = OMAP3430_32KSYNCT_BASE + 0x10;
- else if (cpu_is_omap44xx())
- pbase = OMAP4430_32KSYNCT_BASE + 0x10;
- else
- return -ENODEV;
-
- /* For this to work we must have a static mapping in io.c for this area */
- base = ioremap(pbase, size);
- if (!base)
- return -ENODEV;
-
- sync_32k_ick = clk_get(NULL, "omap_32ksync_ick");
- if (!IS_ERR(sync_32k_ick))
- clk_enable(sync_32k_ick);
-
- timer_32k_base = base;
-
- /*
- * 120000 rough estimate from the calculations in
- * __clocksource_updatefreq_scale.
- */
- clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
- 32768, NSEC_PER_SEC, 120000);
-
- if (clocksource_mmio_init(base, "32k_counter", 32768, 250, 32,
- clocksource_mmio_readl_up))
- printk(err, "32k_counter");
-
- setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
- register_persistent_clock(NULL, omap_read_persistent_clock);
+ int ret;
+
+ /*
+ * 32k sync Counter register offset is at 0x10
+ */
+ sync32k_cnt_reg = vbase + OMAP2_32KSYNCNT_CR_OFF;
+
+ /*
+ * 120000 rough estimate from the calculations in
+ * __clocksource_updatefreq_scale.
+ */
+ clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
+ 32768, NSEC_PER_SEC, 120000);
+
+ ret = clocksource_mmio_init(sync32k_cnt_reg, "32k_counter", 32768,
+ 250, 32, clocksource_mmio_readl_up);
+ if (ret) {
+ pr_err("32k_counter: can't register clocksource\n");
+ return ret;
}
+
+ setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
+ register_persistent_clock(NULL, omap_read_persistent_clock);
+ pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
+
return 0;
}
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 09b07d25289..1cba9273d2c 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -28,54 +28,6 @@
#include <plat/menelaus.h>
#include <plat/omap44xx.h>
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
- defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
-
-#define OMAP_MMC_NR_RES 2
-
-/*
- * Register MMC devices. Called from mach-omap1 and mach-omap2 device init.
- */
-int __init omap_mmc_add(const char *name, int id, unsigned long base,
- unsigned long size, unsigned int irq,
- struct omap_mmc_platform_data *data)
-{
- struct platform_device *pdev;
- struct resource res[OMAP_MMC_NR_RES];
- int ret;
-
- pdev = platform_device_alloc(name, id);
- if (!pdev)
- return -ENOMEM;
-
- memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource));
- res[0].start = base;
- res[0].end = base + size - 1;
- res[0].flags = IORESOURCE_MEM;
- res[1].start = res[1].end = irq;
- res[1].flags = IORESOURCE_IRQ;
-
- ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
- if (ret == 0)
- ret = platform_device_add_data(pdev, data, sizeof(*data));
- if (ret)
- goto fail;
-
- ret = platform_device_add(pdev);
- if (ret)
- goto fail;
-
- /* return device handle to board setup code */
- data->dev = &pdev->dev;
- return 0;
-
-fail:
- platform_device_put(pdev);
- return ret;
-}
-
-#endif
-
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_HW_RANDOM_OMAP) || defined(CONFIG_HW_RANDOM_OMAP_MODULE)
@@ -109,79 +61,6 @@ static void omap_init_rng(void)
static inline void omap_init_rng(void) {}
#endif
-/*-------------------------------------------------------------------------*/
-
-/* Numbering for the SPI-capable controllers when used for SPI:
- * spi = 1
- * uwire = 2
- * mmc1..2 = 3..4
- * mcbsp1..3 = 5..7
- */
-
-#if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE)
-
-#define OMAP_UWIRE_BASE 0xfffb3000
-
-static struct resource uwire_resources[] = {
- {
- .start = OMAP_UWIRE_BASE,
- .end = OMAP_UWIRE_BASE + 0x20,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device omap_uwire_device = {
- .name = "omap_uwire",
- .id = -1,
- .num_resources = ARRAY_SIZE(uwire_resources),
- .resource = uwire_resources,
-};
-
-static void omap_init_uwire(void)
-{
- /* FIXME define and use a boot tag; not all boards will be hooking
- * up devices to the microwire controller, and multi-board configs
- * mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway...
- */
-
- /* board-specific code must configure chipselects (only a few
- * are normally used) and SCLK/SDI/SDO (each has two choices).
- */
- (void) platform_device_register(&omap_uwire_device);
-}
-#else
-static inline void omap_init_uwire(void) {}
-#endif
-
-#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
-
-static phys_addr_t omap_dsp_phys_mempool_base;
-
-void __init omap_dsp_reserve_sdram_memblock(void)
-{
- phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
- phys_addr_t paddr;
-
- if (!size)
- return;
-
- paddr = arm_memblock_steal(size, SZ_1M);
- if (!paddr) {
- pr_err("%s: failed to reserve %llx bytes\n",
- __func__, (unsigned long long)size);
- return;
- }
-
- omap_dsp_phys_mempool_base = paddr;
-}
-
-phys_addr_t omap_dsp_get_mempool_base(void)
-{
- return omap_dsp_phys_mempool_base;
-}
-EXPORT_SYMBOL(omap_dsp_get_mempool_base);
-#endif
-
/*
* This gets called after board-specific INIT_MACHINE, and initializes most
* on-chip peripherals accessible on this board (except for few like USB):
@@ -208,7 +87,6 @@ static int __init omap_init_devices(void)
* in alphabetical order so they're easier to sort through.
*/
omap_init_rng();
- omap_init_uwire();
return 0;
}
arch_initcall(omap_init_devices);
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 987e6101267..cb16ade437c 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -852,7 +852,7 @@ omap_dma_set_prio_lch(int lch, unsigned char read_prio,
}
l = p->dma_read(CCR, lch);
l &= ~((1 << 6) | (1 << 26));
- if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
+ if (cpu_class_is_omap2() && !cpu_is_omap242x())
l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
else
l |= ((read_prio & 0x1) << 6);
@@ -2080,7 +2080,7 @@ static int __devinit omap_system_dma_probe(struct platform_device *pdev)
}
}
- if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
+ if (cpu_class_is_omap2() && !cpu_is_omap242x())
omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
DMA_DEFAULT_FIFO_DEPTH, 0);
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index c4ed35e89fb..3b0cfeb33d0 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -82,8 +82,6 @@ static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
static void omap_timer_restore_context(struct omap_dm_timer *timer)
{
- __raw_writel(timer->context.tiocp_cfg,
- timer->io_base + OMAP_TIMER_OCP_CFG_OFFSET);
if (timer->revision == 1)
__raw_writel(timer->context.tistat, timer->sys_stat);
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index a557b8484e6..d1cb6f527b7 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -30,7 +30,7 @@
#include <plat/i2c.h>
#include <plat/omap_hwmod.h>
-extern int __init omap_init_clocksource_32k(void);
+extern int __init omap_init_clocksource_32k(void __iomem *vbase);
extern void __init omap_check_revision(void);
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 4bdf14ec674..297245dba66 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -121,6 +121,7 @@ IS_OMAP_CLASS(16xx, 0x16)
IS_OMAP_CLASS(24xx, 0x24)
IS_OMAP_CLASS(34xx, 0x34)
IS_OMAP_CLASS(44xx, 0x44)
+IS_AM_CLASS(35xx, 0x35)
IS_AM_CLASS(33xx, 0x33)
IS_TI_CLASS(81xx, 0x81)
@@ -148,6 +149,7 @@ IS_AM_SUBCLASS(335x, 0x335)
#define cpu_is_ti81xx() 0
#define cpu_is_ti816x() 0
#define cpu_is_ti814x() 0
+#define soc_is_am35xx() 0
#define cpu_is_am33xx() 0
#define cpu_is_am335x() 0
#define cpu_is_omap44xx() 0
@@ -357,6 +359,7 @@ IS_OMAP_TYPE(3517, 0x3517)
# undef cpu_is_ti81xx
# undef cpu_is_ti816x
# undef cpu_is_ti814x
+# undef soc_is_am35xx
# undef cpu_is_am33xx
# undef cpu_is_am335x
# define cpu_is_omap3430() is_omap3430()
@@ -378,6 +381,7 @@ IS_OMAP_TYPE(3517, 0x3517)
# define cpu_is_ti81xx() is_ti81xx()
# define cpu_is_ti816x() is_ti816x()
# define cpu_is_ti814x() is_ti814x()
+# define soc_is_am35xx() is_am35xx()
# define cpu_is_am33xx() is_am33xx()
# define cpu_is_am335x() is_am335x()
#endif
@@ -433,6 +437,10 @@ IS_OMAP_TYPE(3517, 0x3517)
#define TI8148_REV_ES2_0 (TI814X_CLASS | (0x1 << 8))
#define TI8148_REV_ES2_1 (TI814X_CLASS | (0x2 << 8))
+#define AM35XX_CLASS 0x35170034
+#define AM35XX_REV_ES1_0 AM35XX_CLASS
+#define AM35XX_REV_ES1_1 (AM35XX_CLASS | (0x1 << 8))
+
#define AM335X_CLASS 0x33500034
#define AM335X_REV_ES1_0 AM335X_CLASS
diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h
index 42afb4c4551..c5811d4409b 100644
--- a/arch/arm/plat-omap/include/plat/dma.h
+++ b/arch/arm/plat-omap/include/plat/dma.h
@@ -312,6 +312,11 @@
#define CLEAR_CSR_ON_READ BIT(0xC)
#define IS_WORD_16 BIT(0xD)
+/* Defines for DMA Capabilities */
+#define DMA_HAS_TRANSPARENT_CAPS (0x1 << 18)
+#define DMA_HAS_CONSTANT_FILL_CAPS (0x1 << 19)
+#define DMA_HAS_DESCRIPTOR_CAPS (0x3 << 20)
+
enum omap_reg_offsets {
GCR, GSCR, GRST1, HW_ID,
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index bdf871a84d6..5da73562e48 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -75,7 +75,6 @@ struct clk;
struct timer_regs {
u32 tidr;
- u32 tiocp_cfg;
u32 tistat;
u32 tisr;
u32 tier;
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index 3e7ae0f0215..a7754a886d4 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -177,9 +177,6 @@ extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
int nr_controllers);
void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data);
-int omap_mmc_add(const char *name, int id, unsigned long base,
- unsigned long size, unsigned int irq,
- struct omap_mmc_platform_data *data);
#else
static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
int nr_controllers)
@@ -188,12 +185,6 @@ static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
{
}
-static inline int omap_mmc_add(const char *name, int id, unsigned long base,
- unsigned long size, unsigned int irq,
- struct omap_mmc_platform_data *data)
-{
- return 0;
-}
#endif
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 74daf5ed143..61fd837624a 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -14,15 +14,41 @@
#include <linux/dma-mapping.h>
#include <linux/serial_8250.h>
#include <linux/ata_platform.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
#include <linux/mv643xx_eth.h>
#include <linux/mv643xx_i2c.h>
#include <net/dsa.h>
-#include <linux/spi/orion_spi.h>
-#include <plat/orion_wdt.h>
#include <plat/mv_xor.h>
#include <plat/ehci-orion.h>
#include <mach/bridge-regs.h>
+/* Create a clkdev entry for a given device/clk */
+void __init orion_clkdev_add(const char *con_id, const char *dev_id,
+ struct clk *clk)
+{
+ struct clk_lookup *cl;
+
+ cl = clkdev_alloc(clk, con_id, dev_id);
+ if (cl)
+ clkdev_add(cl);
+}
+
+/* Create clkdev entries for all orion platforms except kirkwood.
+ Kirkwood has gated clocks for some of its peripherals, so creates
+ its own clkdev entries. For all the other orion devices, create
+ clkdev entries to the tclk. */
+void __init orion_clkdev_init(struct clk *tclk)
+{
+ orion_clkdev_add(NULL, "orion_spi.0", tclk);
+ orion_clkdev_add(NULL, "orion_spi.1", tclk);
+ orion_clkdev_add(NULL, MV643XX_ETH_NAME ".0", tclk);
+ orion_clkdev_add(NULL, MV643XX_ETH_NAME ".1", tclk);
+ orion_clkdev_add(NULL, MV643XX_ETH_NAME ".2", tclk);
+ orion_clkdev_add(NULL, MV643XX_ETH_NAME ".3", tclk);
+ orion_clkdev_add(NULL, "orion_wdt", tclk);
+}
+
/* Fill in the resources structure and link it into the platform
device structure. There is always a memory region, and nearly
always an interrupt.*/
@@ -49,6 +75,12 @@ static void fill_resources(struct platform_device *device,
/*****************************************************************************
* UART
****************************************************************************/
+static unsigned long __init uart_get_clk_rate(struct clk *clk)
+{
+ clk_prepare_enable(clk);
+ return clk_get_rate(clk);
+}
+
static void __init uart_complete(
struct platform_device *orion_uart,
struct plat_serial8250_port *data,
@@ -56,12 +88,12 @@ static void __init uart_complete(
unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk)
+ struct clk *clk)
{
data->mapbase = mapbase;
data->membase = (void __iomem *)membase;
data->irq = irq;
- data->uartclk = uartclk;
+ data->uartclk = uart_get_clk_rate(clk);
orion_uart->dev.platform_data = data;
fill_resources(orion_uart, resources, mapbase, 0xff, irq);
@@ -90,10 +122,10 @@ static struct platform_device orion_uart0 = {
void __init orion_uart0_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk)
+ struct clk *clk)
{
uart_complete(&orion_uart0, orion_uart0_data, orion_uart0_resources,
- membase, mapbase, irq, uartclk);
+ membase, mapbase, irq, clk);
}
/*****************************************************************************
@@ -118,10 +150,10 @@ static struct platform_device orion_uart1 = {
void __init orion_uart1_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk)
+ struct clk *clk)
{
uart_complete(&orion_uart1, orion_uart1_data, orion_uart1_resources,
- membase, mapbase, irq, uartclk);
+ membase, mapbase, irq, clk);
}
/*****************************************************************************
@@ -146,10 +178,10 @@ static struct platform_device orion_uart2 = {
void __init orion_uart2_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk)
+ struct clk *clk)
{
uart_complete(&orion_uart2, orion_uart2_data, orion_uart2_resources,
- membase, mapbase, irq, uartclk);
+ membase, mapbase, irq, clk);
}
/*****************************************************************************
@@ -174,10 +206,10 @@ static struct platform_device orion_uart3 = {
void __init orion_uart3_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk)
+ struct clk *clk)
{
uart_complete(&orion_uart3, orion_uart3_data, orion_uart3_resources,
- membase, mapbase, irq, uartclk);
+ membase, mapbase, irq, clk);
}
/*****************************************************************************
@@ -203,13 +235,11 @@ void __init orion_rtc_init(unsigned long mapbase,
****************************************************************************/
static __init void ge_complete(
struct mv643xx_eth_shared_platform_data *orion_ge_shared_data,
- int tclk,
struct resource *orion_ge_resource, unsigned long irq,
struct platform_device *orion_ge_shared,
struct mv643xx_eth_platform_data *eth_data,
struct platform_device *orion_ge)
{
- orion_ge_shared_data->t_clk = tclk;
orion_ge_resource->start = irq;
orion_ge_resource->end = irq;
eth_data->shared = orion_ge_shared;
@@ -260,12 +290,11 @@ static struct platform_device orion_ge00 = {
void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk)
+ unsigned long irq_err)
{
fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
- ge_complete(&orion_ge00_shared_data, tclk,
+ ge_complete(&orion_ge00_shared_data,
orion_ge00_resources, irq, &orion_ge00_shared,
eth_data, &orion_ge00);
}
@@ -313,12 +342,11 @@ static struct platform_device orion_ge01 = {
void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk)
+ unsigned long irq_err)
{
fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
- ge_complete(&orion_ge01_shared_data, tclk,
+ ge_complete(&orion_ge01_shared_data,
orion_ge01_resources, irq, &orion_ge01_shared,
eth_data, &orion_ge01);
}
@@ -366,12 +394,11 @@ static struct platform_device orion_ge10 = {
void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk)
+ unsigned long irq_err)
{
fill_resources(&orion_ge10_shared, orion_ge10_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
- ge_complete(&orion_ge10_shared_data, tclk,
+ ge_complete(&orion_ge10_shared_data,
orion_ge10_resources, irq, &orion_ge10_shared,
eth_data, &orion_ge10);
}
@@ -419,12 +446,11 @@ static struct platform_device orion_ge11 = {
void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk)
+ unsigned long irq_err)
{
fill_resources(&orion_ge11_shared, orion_ge11_shared_resources,
mapbase + 0x2000, SZ_16K - 1, irq_err);
- ge_complete(&orion_ge11_shared_data, tclk,
+ ge_complete(&orion_ge11_shared_data,
orion_ge11_resources, irq, &orion_ge11_shared,
eth_data, &orion_ge11);
}
@@ -521,44 +547,32 @@ void __init orion_i2c_1_init(unsigned long mapbase,
/*****************************************************************************
* SPI
****************************************************************************/
-static struct orion_spi_info orion_spi_plat_data;
static struct resource orion_spi_resources;
static struct platform_device orion_spi = {
.name = "orion_spi",
.id = 0,
- .dev = {
- .platform_data = &orion_spi_plat_data,
- },
};
-static struct orion_spi_info orion_spi_1_plat_data;
static struct resource orion_spi_1_resources;
static struct platform_device orion_spi_1 = {
.name = "orion_spi",
.id = 1,
- .dev = {
- .platform_data = &orion_spi_1_plat_data,
- },
};
/* Note: The SPI silicon core does have interrupts. However the
* current Linux software driver does not use interrupts. */
-void __init orion_spi_init(unsigned long mapbase,
- unsigned long tclk)
+void __init orion_spi_init(unsigned long mapbase)
{
- orion_spi_plat_data.tclk = tclk;
fill_resources(&orion_spi, &orion_spi_resources,
mapbase, SZ_512 - 1, NO_IRQ);
platform_device_register(&orion_spi);
}
-void __init orion_spi_1_init(unsigned long mapbase,
- unsigned long tclk)
+void __init orion_spi_1_init(unsigned long mapbase)
{
- orion_spi_1_plat_data.tclk = tclk;
fill_resources(&orion_spi_1, &orion_spi_1_resources,
mapbase, SZ_512 - 1, NO_IRQ);
platform_device_register(&orion_spi_1);
@@ -567,24 +581,18 @@ void __init orion_spi_1_init(unsigned long mapbase,
/*****************************************************************************
* Watchdog
****************************************************************************/
-static struct orion_wdt_platform_data orion_wdt_data;
-
static struct resource orion_wdt_resource =
DEFINE_RES_MEM(TIMER_VIRT_BASE, 0x28);
static struct platform_device orion_wdt_device = {
.name = "orion_wdt",
.id = -1,
- .dev = {
- .platform_data = &orion_wdt_data,
- },
- .resource = &orion_wdt_resource,
.num_resources = 1,
+ .resource = &orion_wdt_resource,
};
-void __init orion_wdt_init(unsigned long tclk)
+void __init orion_wdt_init(void)
{
- orion_wdt_data.tclk = tclk;
platform_device_register(&orion_wdt_device);
}
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index a7fa005a5a0..e00fdb21360 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -16,22 +16,22 @@ struct dsa_platform_data;
void __init orion_uart0_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk);
+ struct clk *clk);
void __init orion_uart1_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk);
+ struct clk *clk);
void __init orion_uart2_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk);
+ struct clk *clk);
void __init orion_uart3_init(unsigned int membase,
resource_size_t mapbase,
unsigned int irq,
- unsigned int uartclk);
+ struct clk *clk);
void __init orion_rtc_init(unsigned long mapbase,
unsigned long irq);
@@ -39,29 +39,26 @@ void __init orion_rtc_init(unsigned long mapbase,
void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk);
+ unsigned long irq_err);
void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk);
+ unsigned long irq_err);
void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk);
+ unsigned long irq_err);
void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
unsigned long mapbase,
unsigned long irq,
- unsigned long irq_err,
- int tclk);
+ unsigned long irq_err);
void __init orion_ge00_switch_init(struct dsa_platform_data *d,
int irq);
+
void __init orion_i2c_init(unsigned long mapbase,
unsigned long irq,
unsigned long freq_m);
@@ -70,13 +67,11 @@ void __init orion_i2c_1_init(unsigned long mapbase,
unsigned long irq,
unsigned long freq_m);
-void __init orion_spi_init(unsigned long mapbase,
- unsigned long tclk);
+void __init orion_spi_init(unsigned long mapbase);
-void __init orion_spi_1_init(unsigned long mapbase,
- unsigned long tclk);
+void __init orion_spi_1_init(unsigned long mapbase);
-void __init orion_wdt_init(unsigned long tclk);
+void __init orion_wdt_init(void);
void __init orion_xor0_init(unsigned long mapbase_low,
unsigned long mapbase_high,
@@ -106,4 +101,9 @@ void __init orion_crypto_init(unsigned long mapbase,
unsigned long srambase,
unsigned long sram_size,
unsigned long irq);
+
+void __init orion_clkdev_add(const char *con_id, const char *dev_id,
+ struct clk *clk);
+
+void __init orion_clkdev_init(struct clk *tclk);
#endif
diff --git a/arch/arm/plat-orion/include/plat/orion_wdt.h b/arch/arm/plat-orion/include/plat/orion_wdt.h
deleted file mode 100644
index 665c362a2fb..00000000000
--- a/arch/arm/plat-orion/include/plat/orion_wdt.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * arch/arm/plat-orion/include/plat/orion_wdt.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_ORION_WDT_H
-#define __PLAT_ORION_WDT_H
-
-struct orion_wdt_platform_data {
- u32 tclk; /* no <linux/clk.h> support yet */
-};
-
-
-#endif
-
diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c
index 86dbb5bdb17..f20a321088a 100644
--- a/arch/arm/plat-orion/pcie.c
+++ b/arch/arm/plat-orion/pcie.c
@@ -52,12 +52,12 @@
#define PCIE_DEBUG_SOFT_RESET (1<<20)
-u32 __init orion_pcie_dev_id(void __iomem *base)
+u32 orion_pcie_dev_id(void __iomem *base)
{
return readl(base + PCIE_DEV_ID_OFF) >> 16;
}
-u32 __init orion_pcie_rev(void __iomem *base)
+u32 orion_pcie_rev(void __iomem *base)
{
return readl(base + PCIE_DEV_REV_OFF) & 0xff;
}
diff --git a/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h b/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h
index abcc36eb124..5ce8d5e6ea5 100644
--- a/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h
+++ b/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h
@@ -44,6 +44,10 @@ struct pxa27x_keypad_platform_data {
/* direct keys */
int direct_key_num;
unsigned int direct_key_map[MAX_DIRECT_KEY_NUM];
+ /* the key output may be low active */
+ int direct_key_low_active;
+ /* give board a chance to choose the start direct key */
+ unsigned int direct_key_mask;
/* rotary encoders 0 */
int enable_rotary0;
diff --git a/arch/arm/plat-s3c24xx/Makefile b/arch/arm/plat-s3c24xx/Makefile
index 2467b800cc7..9f60549c8da 100644
--- a/arch/arm/plat-s3c24xx/Makefile
+++ b/arch/arm/plat-s3c24xx/Makefile
@@ -12,10 +12,7 @@ obj- :=
# Core files
-obj-y += cpu.o
obj-y += irq.o
-obj-y += dev-uart.o
-obj-y += clock.o
obj-$(CONFIG_S3C24XX_DCLK) += clock-dclk.o
obj-$(CONFIG_CPU_FREQ_S3C24XX) += cpu-freq.o
@@ -23,9 +20,6 @@ obj-$(CONFIG_CPU_FREQ_S3C24XX_DEBUGFS) += cpu-freq-debugfs.o
# Architecture dependent builds
-obj-$(CONFIG_PM) += pm.o
-obj-$(CONFIG_PM) += irq-pm.o
-obj-$(CONFIG_PM) += sleep.o
obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o
obj-$(CONFIG_S3C24XX_DMA) += dma.o
obj-$(CONFIG_S3C2410_IOTIMING) += s3c2410-iotiming.o
diff --git a/arch/arm/plat-s3c24xx/clock.c b/arch/arm/plat-s3c24xx/clock.c
deleted file mode 100644
index 931d26d1a54..00000000000
--- a/arch/arm/plat-s3c24xx/clock.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/* linux/arch/arm/plat-s3c24xx/clock.c
- *
- * Copyright (c) 2004-2005 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX Core clock control support
- *
- * Based on, and code from linux/arch/arm/mach-versatile/clock.c
- **
- ** Copyright (C) 2004 ARM Limited.
- ** Written by Deep Blue Solutions Limited.
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <asm/irq.h>
-
-#include <mach/regs-clock.h>
-#include <mach/regs-gpio.h>
-
-#include <plat/cpu-freq.h>
-
-#include <plat/clock.h>
-#include <plat/cpu.h>
-#include <plat/pll.h>
-
-/* initialise all the clocks */
-
-void __init_or_cpufreq s3c24xx_setup_clocks(unsigned long fclk,
- unsigned long hclk,
- unsigned long pclk)
-{
- clk_upll.rate = s3c24xx_get_pll(__raw_readl(S3C2410_UPLLCON),
- clk_xtal.rate);
-
- clk_mpll.rate = fclk;
- clk_h.rate = hclk;
- clk_p.rate = pclk;
- clk_f.rate = fclk;
-}
diff --git a/arch/arm/plat-s3c24xx/dev-uart.c b/arch/arm/plat-s3c24xx/dev-uart.c
deleted file mode 100644
index 9ab22e662ff..00000000000
--- a/arch/arm/plat-s3c24xx/dev-uart.c
+++ /dev/null
@@ -1,100 +0,0 @@
-/* linux/arch/arm/plat-s3c24xx/dev-uart.c
- *
- * Copyright (c) 2004 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * Base S3C24XX UART resource and platform device definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/serial_core.h>
-#include <linux/platform_device.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/irq.h>
-#include <mach/hardware.h>
-#include <mach/map.h>
-
-#include <plat/devs.h>
-#include <plat/regs-serial.h>
-
-/* Serial port registrations */
-
-static struct resource s3c2410_uart0_resource[] = {
- [0] = {
- .start = S3C2410_PA_UART0,
- .end = S3C2410_PA_UART0 + 0x3fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_S3CUART_RX0,
- .end = IRQ_S3CUART_ERR0,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct resource s3c2410_uart1_resource[] = {
- [0] = {
- .start = S3C2410_PA_UART1,
- .end = S3C2410_PA_UART1 + 0x3fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_S3CUART_RX1,
- .end = IRQ_S3CUART_ERR1,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct resource s3c2410_uart2_resource[] = {
- [0] = {
- .start = S3C2410_PA_UART2,
- .end = S3C2410_PA_UART2 + 0x3fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_S3CUART_RX2,
- .end = IRQ_S3CUART_ERR2,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct resource s3c2410_uart3_resource[] = {
- [0] = {
- .start = S3C2443_PA_UART3,
- .end = S3C2443_PA_UART3 + 0x3fff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_S3CUART_RX3,
- .end = IRQ_S3CUART_ERR3,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct s3c24xx_uart_resources s3c2410_uart_resources[] __initdata = {
- [0] = {
- .resources = s3c2410_uart0_resource,
- .nr_resources = ARRAY_SIZE(s3c2410_uart0_resource),
- },
- [1] = {
- .resources = s3c2410_uart1_resource,
- .nr_resources = ARRAY_SIZE(s3c2410_uart1_resource),
- },
- [2] = {
- .resources = s3c2410_uart2_resource,
- .nr_resources = ARRAY_SIZE(s3c2410_uart2_resource),
- },
- [3] = {
- .resources = s3c2410_uart3_resource,
- .nr_resources = ARRAY_SIZE(s3c2410_uart3_resource),
- },
-};
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
deleted file mode 100644
index 96bea320230..00000000000
--- a/arch/arm/plat-s5p/Kconfig
+++ /dev/null
@@ -1,140 +0,0 @@
-# arch/arm/plat-s5p/Kconfig
-#
-# Copyright (c) 2009 Samsung Electronics Co., Ltd.
-# http://www.samsung.com/
-#
-# Licensed under GPLv2
-
-config PLAT_S5P
- bool
- depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
- default y
- select ARM_VIC if !ARCH_EXYNOS
- select ARM_GIC if ARCH_EXYNOS
- select GIC_NON_BANKED if ARCH_EXYNOS4
- select NO_IOPORT
- select ARCH_REQUIRE_GPIOLIB
- select S3C_GPIO_TRACK
- select S5P_GPIO_DRVSTR
- select SAMSUNG_GPIOLIB_4BIT
- select PLAT_SAMSUNG
- select SAMSUNG_CLKSRC
- select SAMSUNG_IRQ_VIC_TIMER
- help
- Base platform code for Samsung's S5P series SoC.
-
-config S5P_EXT_INT
- bool
- help
- Use the external interrupts (other than GPIO interrupts.)
- Note: Do not choose this for S5P6440 and S5P6450.
-
-config S5P_GPIO_INT
- bool
- help
- Common code for the GPIO interrupts (other than external interrupts.)
-
-config S5P_HRT
- bool
- select SAMSUNG_DEV_PWM
- help
- Use the High Resolution timer support
-
-config S5P_DEV_UART
- def_bool y
- depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210)
-
-config S5P_PM
- bool
- help
- Common code for power management support on S5P and newer SoCs
- Note: Do not select this for S5P6440 and S5P6450.
-
-comment "System MMU"
-
-config S5P_SYSTEM_MMU
- bool "S5P SYSTEM MMU"
- depends on ARCH_EXYNOS4
- help
- Say Y here if you want to enable System MMU
-
-config S5P_SLEEP
- bool
- help
- Internal config node to apply common S5P sleep management code.
- Can be selected by S5P and newer SoCs with similar sleep procedure.
-
-config S5P_DEV_FIMC0
- bool
- help
- Compile in platform device definitions for FIMC controller 0
-
-config S5P_DEV_FIMC1
- bool
- help
- Compile in platform device definitions for FIMC controller 1
-
-config S5P_DEV_FIMC2
- bool
- help
- Compile in platform device definitions for FIMC controller 2
-
-config S5P_DEV_FIMC3
- bool
- help
- Compile in platform device definitions for FIMC controller 3
-
-config S5P_DEV_JPEG
- bool
- help
- Compile in platform device definitions for JPEG codec
-
-config S5P_DEV_G2D
- bool
- help
- Compile in platform device definitions for G2D device
-
-config S5P_DEV_FIMD0
- bool
- help
- Compile in platform device definitions for FIMD controller 0
-
-config S5P_DEV_I2C_HDMIPHY
- bool
- help
- Compile in platform device definitions for I2C HDMIPHY controller
-
-config S5P_DEV_MFC
- bool
- help
- Compile in platform device definitions for MFC
-
-config S5P_DEV_ONENAND
- bool
- help
- Compile in platform device definition for OneNAND controller
-
-config S5P_DEV_CSIS0
- bool
- help
- Compile in platform device definitions for MIPI-CSIS channel 0
-
-config S5P_DEV_CSIS1
- bool
- help
- Compile in platform device definitions for MIPI-CSIS channel 1
-
-config S5P_DEV_TV
- bool
- help
- Compile in platform device definition for TV interface
-
-config S5P_DEV_USB_EHCI
- bool
- help
- Compile in platform device definition for USB EHCI
-
-config S5P_SETUP_MIPIPHY
- bool
- help
- Compile in common setup code for MIPI-CSIS and MIPI-DSIM devices
diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile
deleted file mode 100644
index 4bd82413665..00000000000
--- a/arch/arm/plat-s5p/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-# arch/arm/plat-s5p/Makefile
-#
-# Copyright (c) 2009 Samsung Electronics Co., Ltd.
-# http://www.samsung.com/
-#
-# Licensed under GPLv2
-
-obj-y :=
-obj-m :=
-obj-n := dummy.o
-obj- :=
-
-# Core files
-
-obj-y += clock.o
-obj-y += irq.o
-obj-$(CONFIG_S5P_EXT_INT) += irq-eint.o
-obj-$(CONFIG_S5P_GPIO_INT) += irq-gpioint.o
-obj-$(CONFIG_S5P_SYSTEM_MMU) += sysmmu.o
-obj-$(CONFIG_S5P_PM) += pm.o irq-pm.o
-obj-$(CONFIG_S5P_SLEEP) += sleep.o
-obj-$(CONFIG_S5P_HRT) += s5p-time.o
-
-# devices
-
-obj-$(CONFIG_S5P_DEV_UART) += dev-uart.o
-obj-$(CONFIG_S5P_DEV_MFC) += dev-mfc.o
-obj-$(CONFIG_S5P_SETUP_MIPIPHY) += setup-mipiphy.o
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c
deleted file mode 100644
index c8bec9c7655..00000000000
--- a/arch/arm/plat-s5p/sysmmu.c
+++ /dev/null
@@ -1,313 +0,0 @@
-/* linux/arch/arm/plat-s5p/sysmmu.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/export.h>
-
-#include <asm/pgtable.h>
-
-#include <mach/map.h>
-#include <mach/regs-sysmmu.h>
-#include <plat/sysmmu.h>
-
-#define CTRL_ENABLE 0x5
-#define CTRL_BLOCK 0x7
-#define CTRL_DISABLE 0x0
-
-static struct device *dev;
-
-static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
- S5P_PAGE_FAULT_ADDR,
- S5P_AR_FAULT_ADDR,
- S5P_AW_FAULT_ADDR,
- S5P_DEFAULT_SLAVE_ADDR,
- S5P_AR_FAULT_ADDR,
- S5P_AR_FAULT_ADDR,
- S5P_AW_FAULT_ADDR,
- S5P_AW_FAULT_ADDR
-};
-
-static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
- "PAGE FAULT",
- "AR MULTI-HIT FAULT",
- "AW MULTI-HIT FAULT",
- "BUS ERROR",
- "AR SECURITY PROTECTION FAULT",
- "AR ACCESS PROTECTION FAULT",
- "AW SECURITY PROTECTION FAULT",
- "AW ACCESS PROTECTION FAULT"
-};
-
-static int (*fault_handlers[S5P_SYSMMU_TOTAL_IPNUM])(
- enum S5P_SYSMMU_INTERRUPT_TYPE itype,
- unsigned long pgtable_base,
- unsigned long fault_addr);
-
-/*
- * If adjacent 2 bits are true, the system MMU is enabled.
- * The system MMU is disabled, otherwise.
- */
-static unsigned long sysmmu_states;
-
-static inline void set_sysmmu_active(sysmmu_ips ips)
-{
- sysmmu_states |= 3 << (ips * 2);
-}
-
-static inline void set_sysmmu_inactive(sysmmu_ips ips)
-{
- sysmmu_states &= ~(3 << (ips * 2));
-}
-
-static inline int is_sysmmu_active(sysmmu_ips ips)
-{
- return sysmmu_states & (3 << (ips * 2));
-}
-
-static void __iomem *sysmmusfrs[S5P_SYSMMU_TOTAL_IPNUM];
-
-static inline void sysmmu_block(sysmmu_ips ips)
-{
- __raw_writel(CTRL_BLOCK, sysmmusfrs[ips] + S5P_MMU_CTRL);
- dev_dbg(dev, "%s is blocked.\n", sysmmu_ips_name[ips]);
-}
-
-static inline void sysmmu_unblock(sysmmu_ips ips)
-{
- __raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
- dev_dbg(dev, "%s is unblocked.\n", sysmmu_ips_name[ips]);
-}
-
-static inline void __sysmmu_tlb_invalidate(sysmmu_ips ips)
-{
- __raw_writel(0x1, sysmmusfrs[ips] + S5P_MMU_FLUSH);
- dev_dbg(dev, "TLB of %s is invalidated.\n", sysmmu_ips_name[ips]);
-}
-
-static inline void __sysmmu_set_ptbase(sysmmu_ips ips, unsigned long pgd)
-{
- if (unlikely(pgd == 0)) {
- pgd = (unsigned long)ZERO_PAGE(0);
- __raw_writel(0x20, sysmmusfrs[ips] + S5P_MMU_CFG); /* 4KB LV1 */
- } else {
- __raw_writel(0x0, sysmmusfrs[ips] + S5P_MMU_CFG); /* 16KB LV1 */
- }
-
- __raw_writel(pgd, sysmmusfrs[ips] + S5P_PT_BASE_ADDR);
-
- dev_dbg(dev, "Page table base of %s is initialized with 0x%08lX.\n",
- sysmmu_ips_name[ips], pgd);
- __sysmmu_tlb_invalidate(ips);
-}
-
-void sysmmu_set_fault_handler(sysmmu_ips ips,
- int (*handler)(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
- unsigned long pgtable_base,
- unsigned long fault_addr))
-{
- BUG_ON(!((ips >= SYSMMU_MDMA) && (ips < S5P_SYSMMU_TOTAL_IPNUM)));
- fault_handlers[ips] = handler;
-}
-
-static irqreturn_t s5p_sysmmu_irq(int irq, void *dev_id)
-{
- /* SYSMMU is in blocked when interrupt occurred. */
- unsigned long base = 0;
- sysmmu_ips ips = (sysmmu_ips)dev_id;
- enum S5P_SYSMMU_INTERRUPT_TYPE itype;
-
- itype = (enum S5P_SYSMMU_INTERRUPT_TYPE)
- __ffs(__raw_readl(sysmmusfrs[ips] + S5P_INT_STATUS));
-
- BUG_ON(!((itype >= 0) && (itype < 8)));
-
- dev_alert(dev, "%s occurred by %s.\n", sysmmu_fault_name[itype],
- sysmmu_ips_name[ips]);
-
- if (fault_handlers[ips]) {
- unsigned long addr;
-
- base = __raw_readl(sysmmusfrs[ips] + S5P_PT_BASE_ADDR);
- addr = __raw_readl(sysmmusfrs[ips] + fault_reg_offset[itype]);
-
- if (fault_handlers[ips](itype, base, addr)) {
- __raw_writel(1 << itype,
- sysmmusfrs[ips] + S5P_INT_CLEAR);
- dev_notice(dev, "%s from %s is resolved."
- " Retrying translation.\n",
- sysmmu_fault_name[itype], sysmmu_ips_name[ips]);
- } else {
- base = 0;
- }
- }
-
- sysmmu_unblock(ips);
-
- if (!base)
- dev_notice(dev, "%s from %s is not handled.\n",
- sysmmu_fault_name[itype], sysmmu_ips_name[ips]);
-
- return IRQ_HANDLED;
-}
-
-void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd)
-{
- if (is_sysmmu_active(ips)) {
- sysmmu_block(ips);
- __sysmmu_set_ptbase(ips, pgd);
- sysmmu_unblock(ips);
- } else {
- dev_dbg(dev, "%s is disabled. "
- "Skipping initializing page table base.\n",
- sysmmu_ips_name[ips]);
- }
-}
-
-void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd)
-{
- if (!is_sysmmu_active(ips)) {
- sysmmu_clk_enable(ips);
-
- __sysmmu_set_ptbase(ips, pgd);
-
- __raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
-
- set_sysmmu_active(ips);
- dev_dbg(dev, "%s is enabled.\n", sysmmu_ips_name[ips]);
- } else {
- dev_dbg(dev, "%s is already enabled.\n", sysmmu_ips_name[ips]);
- }
-}
-
-void s5p_sysmmu_disable(sysmmu_ips ips)
-{
- if (is_sysmmu_active(ips)) {
- __raw_writel(CTRL_DISABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
- set_sysmmu_inactive(ips);
- sysmmu_clk_disable(ips);
- dev_dbg(dev, "%s is disabled.\n", sysmmu_ips_name[ips]);
- } else {
- dev_dbg(dev, "%s is already disabled.\n", sysmmu_ips_name[ips]);
- }
-}
-
-void s5p_sysmmu_tlb_invalidate(sysmmu_ips ips)
-{
- if (is_sysmmu_active(ips)) {
- sysmmu_block(ips);
- __sysmmu_tlb_invalidate(ips);
- sysmmu_unblock(ips);
- } else {
- dev_dbg(dev, "%s is disabled. "
- "Skipping invalidating TLB.\n", sysmmu_ips_name[ips]);
- }
-}
-
-static int s5p_sysmmu_probe(struct platform_device *pdev)
-{
- int i, ret;
- struct resource *res, *mem;
-
- dev = &pdev->dev;
-
- for (i = 0; i < S5P_SYSMMU_TOTAL_IPNUM; i++) {
- int irq;
-
- sysmmu_clk_init(dev, i);
- sysmmu_clk_disable(i);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- dev_err(dev, "Failed to get the resource of %s.\n",
- sysmmu_ips_name[i]);
- ret = -ENODEV;
- goto err_res;
- }
-
- mem = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (!mem) {
- dev_err(dev, "Failed to request the memory region of %s.\n",
- sysmmu_ips_name[i]);
- ret = -EBUSY;
- goto err_res;
- }
-
- sysmmusfrs[i] = ioremap(res->start, resource_size(res));
- if (!sysmmusfrs[i]) {
- dev_err(dev, "Failed to ioremap() for %s.\n",
- sysmmu_ips_name[i]);
- ret = -ENXIO;
- goto err_reg;
- }
-
- irq = platform_get_irq(pdev, i);
- if (irq <= 0) {
- dev_err(dev, "Failed to get the IRQ resource of %s.\n",
- sysmmu_ips_name[i]);
- ret = -ENOENT;
- goto err_map;
- }
-
- if (request_irq(irq, s5p_sysmmu_irq, IRQF_DISABLED,
- pdev->name, (void *)i)) {
- dev_err(dev, "Failed to request IRQ for %s.\n",
- sysmmu_ips_name[i]);
- ret = -ENOENT;
- goto err_map;
- }
- }
-
- return 0;
-
-err_map:
- iounmap(sysmmusfrs[i]);
-err_reg:
- release_mem_region(mem->start, resource_size(mem));
-err_res:
- return ret;
-}
-
-static int s5p_sysmmu_remove(struct platform_device *pdev)
-{
- return 0;
-}
-int s5p_sysmmu_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
-int s5p_sysmmu_runtime_resume(struct device *dev)
-{
- return 0;
-}
-
-const struct dev_pm_ops s5p_sysmmu_pm_ops = {
- .runtime_suspend = s5p_sysmmu_runtime_suspend,
- .runtime_resume = s5p_sysmmu_runtime_resume,
-};
-
-static struct platform_driver s5p_sysmmu_driver = {
- .probe = s5p_sysmmu_probe,
- .remove = s5p_sysmmu_remove,
- .driver = {
- .owner = THIS_MODULE,
- .name = "s5p-sysmmu",
- .pm = &s5p_sysmmu_pm_ops,
- }
-};
-
-static int __init s5p_sysmmu_init(void)
-{
- return platform_driver_register(&s5p_sysmmu_driver);
-}
-arch_initcall(s5p_sysmmu_init);
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index a0ffc77da80..a2fae4ea093 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -13,6 +13,24 @@ config PLAT_SAMSUNG
help
Base platform code for all Samsung SoC based systems
+config PLAT_S5P
+ bool
+ depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
+ default y
+ select ARM_VIC if !ARCH_EXYNOS
+ select ARM_GIC if ARCH_EXYNOS
+ select GIC_NON_BANKED if ARCH_EXYNOS4
+ select NO_IOPORT
+ select ARCH_REQUIRE_GPIOLIB
+ select S3C_GPIO_TRACK
+ select S5P_GPIO_DRVSTR
+ select SAMSUNG_GPIOLIB_4BIT
+ select PLAT_SAMSUNG
+ select SAMSUNG_CLKSRC
+ select SAMSUNG_IRQ_VIC_TIMER
+ help
+ Base platform code for Samsung's S5P series SoC.
+
if PLAT_SAMSUNG
# boot configurations
@@ -50,6 +68,14 @@ config S3C_LOWLEVEL_UART_PORT
this configuration should be between zero and two. The port
must have been initialised by the boot-loader before use.
+# timer options
+
+config S5P_HRT
+ bool
+ select SAMSUNG_DEV_PWM
+ help
+ Use the High Resolution timer support
+
# clock options
config SAMSUNG_CLKSRC
@@ -58,6 +84,11 @@ config SAMSUNG_CLKSRC
Select the clock code for the clksrc implementation
used by newer systems such as the S3C64XX.
+config S5P_CLOCK
+ def_bool (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
+ help
+ Support common clock part for ARCH_S5P and ARCH_EXYNOS SoCs
+
# options for IRQ support
config SAMSUNG_IRQ_VIC_TIMER
@@ -65,6 +96,22 @@ config SAMSUNG_IRQ_VIC_TIMER
help
Internal configuration to build the VIC timer interrupt code.
+config S5P_IRQ
+ def_bool (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
+ help
+ Support common interrup part for ARCH_S5P and ARCH_EXYNOS SoCs
+
+config S5P_EXT_INT
+ bool
+ help
+ Use the external interrupts (other than GPIO interrupts.)
+ Note: Do not choose this for S5P6440 and S5P6450.
+
+config S5P_GPIO_INT
+ bool
+ help
+ Common code for the GPIO interrupts (other than external interrupts.)
+
# options for gpio configuration support
config SAMSUNG_GPIOLIB_4BIT
@@ -117,6 +164,12 @@ config S3C_GPIO_TRACK
Internal configuration option to enable the s3c specific gpio
chip tracking if the platform requires it.
+# uart options
+
+config S5P_DEV_UART
+ def_bool y
+ depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210)
+
# ADC driver
config S3C_ADC
@@ -274,6 +327,76 @@ config SAMSUNG_DEV_BACKLIGHT
help
Compile in platform device definition LCD backlight with PWM Timer
+config S5P_DEV_CSIS0
+ bool
+ help
+ Compile in platform device definitions for MIPI-CSIS channel 0
+
+config S5P_DEV_CSIS1
+ bool
+ help
+ Compile in platform device definitions for MIPI-CSIS channel 1
+
+config S5P_DEV_FIMC0
+ bool
+ help
+ Compile in platform device definitions for FIMC controller 0
+
+config S5P_DEV_FIMC1
+ bool
+ help
+ Compile in platform device definitions for FIMC controller 1
+
+config S5P_DEV_FIMC2
+ bool
+ help
+ Compile in platform device definitions for FIMC controller 2
+
+config S5P_DEV_FIMC3
+ bool
+ help
+ Compile in platform device definitions for FIMC controller 3
+
+config S5P_DEV_FIMD0
+ bool
+ help
+ Compile in platform device definitions for FIMD controller 0
+
+config S5P_DEV_G2D
+ bool
+ help
+ Compile in platform device definitions for G2D device
+
+config S5P_DEV_I2C_HDMIPHY
+ bool
+ help
+ Compile in platform device definitions for I2C HDMIPHY controller
+
+config S5P_DEV_JPEG
+ bool
+ help
+ Compile in platform device definitions for JPEG codec
+
+config S5P_DEV_MFC
+ bool
+ help
+ Compile in setup memory (init) code for MFC
+
+config S5P_DEV_ONENAND
+ bool
+ help
+ Compile in platform device definition for OneNAND controller
+
+config S5P_DEV_TV
+ bool
+ help
+ Compile in platform device definition for TV interface
+
+config S5P_DEV_USB_EHCI
+ bool
+ help
+ Compile in platform device definition for USB EHCI
+
config S3C24XX_PWM
bool "PWM device support"
select HAVE_PWM
@@ -281,6 +404,11 @@ config S3C24XX_PWM
Support for exporting the PWM timer blocks via the pwm device
system
+config S5P_SETUP_MIPIPHY
+ bool
+ help
+ Compile in common setup code for MIPI-CSIS and MIPI-DSIM devices
+
# DMA
config S3C_DMA
@@ -291,7 +419,7 @@ config S3C_DMA
config SAMSUNG_DMADEV
bool
select DMADEVICES
- select PL330_DMA if (CPU_EXYNOS4210 || CPU_S5PV210 || CPU_S5PC100 || \
+ select PL330_DMA if (ARCH_EXYNOS5 || ARCH_EXYNOS4 || CPU_S5PV210 || CPU_S5PC100 || \
CPU_S5P6450 || CPU_S5P6440)
select ARM_AMBA
help
@@ -351,6 +479,18 @@ config SAMSUNG_WAKEMASK
and above. This code allows a set of interrupt to wakeup-mask
mappings. See <plat/wakeup-mask.h>
+config S5P_PM
+ bool
+ help
+ Common code for power management support on S5P and newer SoCs
+ Note: Do not select this for S5P6440 and S5P6450.
+
+config S5P_SLEEP
+ bool
+ help
+ Internal config node to apply common S5P sleep management code.
+ Can be selected by S5P and newer SoCs with similar sleep procedure.
+
comment "Power Domain"
config SAMSUNG_PD
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 6012366f33c..860b2db4db1 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -13,12 +13,18 @@ obj- :=
obj-y += init.o cpu.o
obj-$(CONFIG_ARCH_USES_GETTIMEOFFSET) += time.o
+obj-$(CONFIG_S5P_HRT) += s5p-time.o
+
obj-y += clock.o
obj-y += pwm-clock.o
obj-$(CONFIG_SAMSUNG_CLKSRC) += clock-clksrc.o
+obj-$(CONFIG_S5P_CLOCK) += s5p-clock.o
obj-$(CONFIG_SAMSUNG_IRQ_VIC_TIMER) += irq-vic-timer.o
+obj-$(CONFIG_S5P_IRQ) += s5p-irq.o
+obj-$(CONFIG_S5P_EXT_INT) += s5p-irq-eint.o
+obj-$(CONFIG_S5P_GPIO_INT) += s5p-irq-gpioint.o
# ADC
@@ -30,9 +36,13 @@ obj-y += platformdata.o
obj-y += devs.o
obj-y += dev-uart.o
+obj-$(CONFIG_S5P_DEV_MFC) += s5p-dev-mfc.o
+obj-$(CONFIG_S5P_DEV_UART) += s5p-dev-uart.o
obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT) += dev-backlight.o
+obj-$(CONFIG_S5P_SETUP_MIPIPHY) += setup-mipiphy.o
+
# DMA support
obj-$(CONFIG_S3C_DMA) += dma.o s3c-dma-ops.o
@@ -47,6 +57,9 @@ obj-$(CONFIG_SAMSUNG_PM_CHECK) += pm-check.o
obj-$(CONFIG_SAMSUNG_WAKEMASK) += wakeup-mask.o
+obj-$(CONFIG_S5P_PM) += s5p-pm.o s5p-irq-pm.o
+obj-$(CONFIG_S5P_SLEEP) += s5p-sleep.o
+
# PD support
obj-$(CONFIG_SAMSUNG_PD) += pd.o
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index 787ceaca0be..0721293fad6 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -202,7 +202,7 @@ extern struct bus_type s3c2443_subsys;
extern struct bus_type s3c6410_subsys;
extern struct bus_type s5p64x0_subsys;
extern struct bus_type s5pv210_subsys;
-extern struct bus_type exynos4_subsys;
+extern struct bus_type exynos_subsys;
extern void (*s5pc1xx_idle)(void);
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 2155d4af62a..61ca2f356c5 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -133,7 +133,8 @@ extern struct platform_device exynos4_device_pcm1;
extern struct platform_device exynos4_device_pcm2;
extern struct platform_device exynos4_device_pd[];
extern struct platform_device exynos4_device_spdif;
-extern struct platform_device exynos4_device_sysmmu;
+
+extern struct platform_device exynos_device_drm;
extern struct platform_device samsung_asoc_dma;
extern struct platform_device samsung_asoc_idma;
diff --git a/arch/arm/plat-samsung/include/plat/dma-pl330.h b/arch/arm/plat-samsung/include/plat/dma-pl330.h
index 0670f37aaae..d384a8016b4 100644
--- a/arch/arm/plat-samsung/include/plat/dma-pl330.h
+++ b/arch/arm/plat-samsung/include/plat/dma-pl330.h
@@ -90,6 +90,7 @@ enum dma_ch {
DMACH_MIPI_HSI5,
DMACH_MIPI_HSI6,
DMACH_MIPI_HSI7,
+ DMACH_DISP1,
DMACH_MTOM_0,
DMACH_MTOM_1,
DMACH_MTOM_2,
diff --git a/arch/arm/plat-samsung/include/plat/s5p-clock.h b/arch/arm/plat-samsung/include/plat/s5p-clock.h
index 1de4b32f98e..8364b4bea8b 100644
--- a/arch/arm/plat-samsung/include/plat/s5p-clock.h
+++ b/arch/arm/plat-samsung/include/plat/s5p-clock.h
@@ -32,8 +32,10 @@ extern struct clk clk_48m;
extern struct clk s5p_clk_27m;
extern struct clk clk_fout_apll;
extern struct clk clk_fout_bpll;
+extern struct clk clk_fout_bpll_div2;
extern struct clk clk_fout_cpll;
extern struct clk clk_fout_mpll;
+extern struct clk clk_fout_mpll_div2;
extern struct clk clk_fout_epll;
extern struct clk clk_fout_dpll;
extern struct clk clk_fout_vpll;
@@ -42,8 +44,10 @@ extern struct clk clk_vpll;
extern struct clksrc_sources clk_src_apll;
extern struct clksrc_sources clk_src_bpll;
+extern struct clksrc_sources clk_src_bpll_fout;
extern struct clksrc_sources clk_src_cpll;
extern struct clksrc_sources clk_src_mpll;
+extern struct clksrc_sources clk_src_mpll_fout;
extern struct clksrc_sources clk_src_epll;
extern struct clksrc_sources clk_src_dpll;
diff --git a/arch/arm/plat-samsung/include/plat/sysmmu.h b/arch/arm/plat-samsung/include/plat/sysmmu.h
deleted file mode 100644
index 5fe8ee01a5b..00000000000
--- a/arch/arm/plat-samsung/include/plat/sysmmu.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/sysmmu.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Samsung System MMU driver for S5P platform
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_SAMSUNG_SYSMMU_H
-#define __PLAT_SAMSUNG_SYSMMU_H __FILE__
-
-enum S5P_SYSMMU_INTERRUPT_TYPE {
- SYSMMU_PAGEFAULT,
- SYSMMU_AR_MULTIHIT,
- SYSMMU_AW_MULTIHIT,
- SYSMMU_BUSERROR,
- SYSMMU_AR_SECURITY,
- SYSMMU_AR_ACCESS,
- SYSMMU_AW_SECURITY,
- SYSMMU_AW_PROTECTION, /* 7 */
- SYSMMU_FAULTS_NUM
-};
-
-#ifdef CONFIG_S5P_SYSTEM_MMU
-
-#include <mach/sysmmu.h>
-
-/**
- * s5p_sysmmu_enable() - enable system mmu of ip
- * @ips: The ip connected system mmu.
- * #pgd: Base physical address of the 1st level page table
- *
- * This function enable system mmu to transfer address
- * from virtual address to physical address
- */
-void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd);
-
-/**
- * s5p_sysmmu_disable() - disable sysmmu mmu of ip
- * @ips: The ip connected system mmu.
- *
- * This function disable system mmu to transfer address
- * from virtual address to physical address
- */
-void s5p_sysmmu_disable(sysmmu_ips ips);
-
-/**
- * s5p_sysmmu_set_tablebase_pgd() - set page table base address to refer page table
- * @ips: The ip connected system mmu.
- * @pgd: The page table base address.
- *
- * This function set page table base address
- * When system mmu transfer address from virtaul address to physical address,
- * system mmu refer address information from page table
- */
-void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd);
-
-/**
- * s5p_sysmmu_tlb_invalidate() - flush all TLB entry in system mmu
- * @ips: The ip connected system mmu.
- *
- * This function flush all TLB entry in system mmu
- */
-void s5p_sysmmu_tlb_invalidate(sysmmu_ips ips);
-
-/** s5p_sysmmu_set_fault_handler() - Fault handler for System MMUs
- * @itype: type of fault.
- * @pgtable_base: the physical address of page table base. This is 0 if @ips is
- * SYSMMU_BUSERROR.
- * @fault_addr: the device (virtual) address that the System MMU tried to
- * translated. This is 0 if @ips is SYSMMU_BUSERROR.
- * Called when interrupt occurred by the System MMUs
- * The device drivers of peripheral devices that has a System MMU can implement
- * a fault handler to resolve address translation fault by System MMU.
- * The meanings of return value and parameters are described below.
-
- * return value: non-zero if the fault is correctly resolved.
- * zero if the fault is not handled.
- */
-void s5p_sysmmu_set_fault_handler(sysmmu_ips ips,
- int (*handler)(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
- unsigned long pgtable_base,
- unsigned long fault_addr));
-#else
-#define s5p_sysmmu_enable(ips, pgd) do { } while (0)
-#define s5p_sysmmu_disable(ips) do { } while (0)
-#define s5p_sysmmu_set_tablebase_pgd(ips, pgd) do { } while (0)
-#define s5p_sysmmu_tlb_invalidate(ips) do { } while (0)
-#define s5p_sysmmu_set_fault_handler(ips, handler) do { } while (0)
-#endif
-#endif /* __ASM_PLAT_SYSMMU_H */
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-samsung/s5p-clock.c
index f68a9bb1194..031a61899be 100644
--- a/arch/arm/plat-s5p/clock.c
+++ b/arch/arm/plat-samsung/s5p-clock.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/clock.c
- *
+/*
* Copyright 2009 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
@@ -68,6 +67,11 @@ struct clk clk_fout_bpll = {
.id = -1,
};
+struct clk clk_fout_bpll_div2 = {
+ .name = "fout_bpll_div2",
+ .id = -1,
+};
+
/* CPLL clock output */
struct clk clk_fout_cpll = {
@@ -83,6 +87,11 @@ struct clk clk_fout_mpll = {
.id = -1,
};
+struct clk clk_fout_mpll_div2 = {
+ .name = "fout_mpll_div2",
+ .id = -1,
+};
+
/* EPLL clock output */
struct clk clk_fout_epll = {
.name = "fout_epll",
@@ -126,6 +135,16 @@ struct clksrc_sources clk_src_bpll = {
.nr_sources = ARRAY_SIZE(clk_src_bpll_list),
};
+static struct clk *clk_src_bpll_fout_list[] = {
+ [0] = &clk_fout_bpll_div2,
+ [1] = &clk_fout_bpll,
+};
+
+struct clksrc_sources clk_src_bpll_fout = {
+ .sources = clk_src_bpll_fout_list,
+ .nr_sources = ARRAY_SIZE(clk_src_bpll_fout_list),
+};
+
/* Possible clock sources for CPLL Mux */
static struct clk *clk_src_cpll_list[] = {
[0] = &clk_fin_cpll,
@@ -148,6 +167,16 @@ struct clksrc_sources clk_src_mpll = {
.nr_sources = ARRAY_SIZE(clk_src_mpll_list),
};
+static struct clk *clk_src_mpll_fout_list[] = {
+ [0] = &clk_fout_mpll_div2,
+ [1] = &clk_fout_mpll,
+};
+
+struct clksrc_sources clk_src_mpll_fout = {
+ .sources = clk_src_mpll_fout_list,
+ .nr_sources = ARRAY_SIZE(clk_src_mpll_fout_list),
+};
+
/* Possible clock sources for EPLL Mux */
static struct clk *clk_src_epll_list[] = {
[0] = &clk_fin_epll,
diff --git a/arch/arm/plat-s5p/dev-mfc.c b/arch/arm/plat-samsung/s5p-dev-mfc.c
index a30d36b7f61..ad6089465e2 100644
--- a/arch/arm/plat-s5p/dev-mfc.c
+++ b/arch/arm/plat-samsung/s5p-dev-mfc.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/dev-mfc.c
- *
+/*
* Copyright (C) 2010-2011 Samsung Electronics Co.Ltd
*
* Base S5P MFC resource and device definitions
@@ -9,7 +8,6 @@
* published by the Free Software Foundation.
*/
-
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
diff --git a/arch/arm/plat-s5p/dev-uart.c b/arch/arm/plat-samsung/s5p-dev-uart.c
index c9308db3618..cafa3deddcc 100644
--- a/arch/arm/plat-s5p/dev-uart.c
+++ b/arch/arm/plat-samsung/s5p-dev-uart.c
@@ -1,6 +1,5 @@
-/* linux/arch/arm/plat-s5p/dev-uart.c
- *
- * Copyright (c) 2009 Samsung Electronics Co., Ltd.
+/*
+ * Copyright (c) 2009,2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Base S5P UART resource and device definitions
@@ -14,6 +13,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <asm/mach/arch.h>
@@ -26,86 +26,38 @@
/* Serial port registrations */
static struct resource s5p_uart0_resource[] = {
- [0] = {
- .start = S5P_PA_UART0,
- .end = S5P_PA_UART0 + S5P_SZ_UART - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_UART0,
- .end = IRQ_UART0,
- .flags = IORESOURCE_IRQ,
- },
+ [0] = DEFINE_RES_MEM(S5P_PA_UART0, S5P_SZ_UART),
+ [1] = DEFINE_RES_IRQ(IRQ_UART0),
};
static struct resource s5p_uart1_resource[] = {
- [0] = {
- .start = S5P_PA_UART1,
- .end = S5P_PA_UART1 + S5P_SZ_UART - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_UART1,
- .end = IRQ_UART1,
- .flags = IORESOURCE_IRQ,
- },
+ [0] = DEFINE_RES_MEM(S5P_PA_UART1, S5P_SZ_UART),
+ [1] = DEFINE_RES_IRQ(IRQ_UART1),
};
static struct resource s5p_uart2_resource[] = {
- [0] = {
- .start = S5P_PA_UART2,
- .end = S5P_PA_UART2 + S5P_SZ_UART - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_UART2,
- .end = IRQ_UART2,
- .flags = IORESOURCE_IRQ,
- },
+ [0] = DEFINE_RES_MEM(S5P_PA_UART2, S5P_SZ_UART),
+ [1] = DEFINE_RES_IRQ(IRQ_UART2),
};
static struct resource s5p_uart3_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
- [0] = {
- .start = S5P_PA_UART3,
- .end = S5P_PA_UART3 + S5P_SZ_UART - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_UART3,
- .end = IRQ_UART3,
- .flags = IORESOURCE_IRQ,
- },
+ [0] = DEFINE_RES_MEM(S5P_PA_UART3, S5P_SZ_UART),
+ [1] = DEFINE_RES_IRQ(IRQ_UART3),
#endif
};
static struct resource s5p_uart4_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 4
- [0] = {
- .start = S5P_PA_UART4,
- .end = S5P_PA_UART4 + S5P_SZ_UART - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_UART4,
- .end = IRQ_UART4,
- .flags = IORESOURCE_IRQ,
- },
+ [0] = DEFINE_RES_MEM(S5P_PA_UART4, S5P_SZ_UART),
+ [1] = DEFINE_RES_IRQ(IRQ_UART4),
#endif
};
static struct resource s5p_uart5_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 5
- [0] = {
- .start = S5P_PA_UART5,
- .end = S5P_PA_UART5 + S5P_SZ_UART - 1,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = IRQ_UART5,
- .end = IRQ_UART5,
- .flags = IORESOURCE_IRQ,
- },
+ [0] = DEFINE_RES_MEM(S5P_PA_UART5, S5P_SZ_UART),
+ [1] = DEFINE_RES_IRQ(IRQ_UART5),
#endif
};
diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-samsung/s5p-irq-eint.c
index 139c050918c..33bd3f3d20f 100644
--- a/arch/arm/plat-s5p/irq-eint.c
+++ b/arch/arm/plat-samsung/s5p-irq-eint.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/irq-eint.c
- *
+/*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-samsung/s5p-irq-gpioint.c
index 82c7311017a..f9431fe5b06 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-samsung/s5p-irq-gpioint.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/irq-gpioint.c
- *
+/*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* Author: Kyungmin Park <kyungmin.park@samsung.com>
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
diff --git a/arch/arm/plat-s5p/irq-pm.c b/arch/arm/plat-samsung/s5p-irq-pm.c
index d1bfecae6c9..7c1e3b7072f 100644
--- a/arch/arm/plat-s5p/irq-pm.c
+++ b/arch/arm/plat-samsung/s5p-irq-pm.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/irq-pm.c
- *
+/*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
diff --git a/arch/arm/plat-s5p/irq.c b/arch/arm/plat-samsung/s5p-irq.c
index afdaa1082b9..dfb47d638f0 100644
--- a/arch/arm/plat-s5p/irq.c
+++ b/arch/arm/plat-samsung/s5p-irq.c
@@ -1,5 +1,4 @@
-/* arch/arm/plat-s5p/irq.c
- *
+/*
* Copyright (c) 2009 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
diff --git a/arch/arm/plat-s5p/pm.c b/arch/arm/plat-samsung/s5p-pm.c
index d15dc47b0e3..0747468f093 100644
--- a/arch/arm/plat-s5p/pm.c
+++ b/arch/arm/plat-samsung/s5p-pm.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/pm.c
- *
+/*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
diff --git a/arch/arm/plat-s5p/sleep.S b/arch/arm/plat-samsung/s5p-sleep.S
index 006bd01eda0..bdf6dadf879 100644
--- a/arch/arm/plat-s5p/sleep.S
+++ b/arch/arm/plat-samsung/s5p-sleep.S
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/sleep.S
- *
+/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
diff --git a/arch/arm/plat-s5p/s5p-time.c b/arch/arm/plat-samsung/s5p-time.c
index 17c0a2c58df..028b6e877eb 100644
--- a/arch/arm/plat-s5p/s5p-time.c
+++ b/arch/arm/plat-samsung/s5p-time.c
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-s5p/s5p-time.c
- *
+/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
diff --git a/arch/arm/plat-s5p/setup-mipiphy.c b/arch/arm/plat-samsung/setup-mipiphy.c
index 683c466c0e6..683c466c0e6 100644
--- a/arch/arm/plat-s5p/setup-mipiphy.c
+++ b/arch/arm/plat-samsung/setup-mipiphy.c
diff --git a/arch/arm/plat-spear/Kconfig b/arch/arm/plat-spear/Kconfig
index 387655b5ce0..4404f82d597 100644
--- a/arch/arm/plat-spear/Kconfig
+++ b/arch/arm/plat-spear/Kconfig
@@ -8,6 +8,17 @@ choice
prompt "ST SPEAr Family"
default ARCH_SPEAR3XX
+config ARCH_SPEAR13XX
+ bool "ST SPEAr13xx with Device Tree"
+ select ARM_GIC
+ select CPU_V7
+ select USE_OF
+ select HAVE_SMP
+ select MIGHT_HAVE_CACHE_L2X0
+ select PINCTRL
+ help
+ Supports for ARM's SPEAR13XX family
+
config ARCH_SPEAR3XX
bool "ST SPEAr3xx with Device Tree"
select ARM_VIC
@@ -27,6 +38,7 @@ config ARCH_SPEAR6XX
endchoice
# Adding SPEAr machine specific configuration files
+source "arch/arm/mach-spear13xx/Kconfig"
source "arch/arm/mach-spear3xx/Kconfig"
source "arch/arm/mach-spear6xx/Kconfig"
diff --git a/arch/arm/plat-spear/Makefile b/arch/arm/plat-spear/Makefile
index 7744802c83e..2607bd05c52 100644
--- a/arch/arm/plat-spear/Makefile
+++ b/arch/arm/plat-spear/Makefile
@@ -3,6 +3,7 @@
#
# Common support
-obj-y := clock.o restart.o time.o pl080.o
+obj-y := restart.o time.o
-obj-$(CONFIG_ARCH_SPEAR3XX) += shirq.o
+obj-$(CONFIG_ARCH_SPEAR3XX) += pl080.o shirq.o
+obj-$(CONFIG_ARCH_SPEAR6XX) += pl080.o
diff --git a/arch/arm/plat-spear/clock.c b/arch/arm/plat-spear/clock.c
deleted file mode 100644
index 67dd00381ea..00000000000
--- a/arch/arm/plat-spear/clock.c
+++ /dev/null
@@ -1,1005 +0,0 @@
-/*
- * arch/arm/plat-spear/clock.c
- *
- * Clock framework for SPEAr platform
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/bug.h>
-#include <linux/clk.h>
-#include <linux/debugfs.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <plat/clock.h>
-
-static DEFINE_SPINLOCK(clocks_lock);
-static LIST_HEAD(root_clks);
-#ifdef CONFIG_DEBUG_FS
-static LIST_HEAD(clocks);
-#endif
-
-static void propagate_rate(struct clk *, int on_init);
-#ifdef CONFIG_DEBUG_FS
-static int clk_debugfs_reparent(struct clk *);
-#endif
-
-static int generic_clk_enable(struct clk *clk)
-{
- unsigned int val;
-
- if (!clk->en_reg)
- return -EFAULT;
-
- val = readl(clk->en_reg);
- if (unlikely(clk->flags & RESET_TO_ENABLE))
- val &= ~(1 << clk->en_reg_bit);
- else
- val |= 1 << clk->en_reg_bit;
-
- writel(val, clk->en_reg);
-
- return 0;
-}
-
-static void generic_clk_disable(struct clk *clk)
-{
- unsigned int val;
-
- if (!clk->en_reg)
- return;
-
- val = readl(clk->en_reg);
- if (unlikely(clk->flags & RESET_TO_ENABLE))
- val |= 1 << clk->en_reg_bit;
- else
- val &= ~(1 << clk->en_reg_bit);
-
- writel(val, clk->en_reg);
-}
-
-/* generic clk ops */
-static struct clkops generic_clkops = {
- .enable = generic_clk_enable,
- .disable = generic_clk_disable,
-};
-
-/* returns current programmed clocks clock info structure */
-static struct pclk_info *pclk_info_get(struct clk *clk)
-{
- unsigned int val, i;
- struct pclk_info *info = NULL;
-
- val = (readl(clk->pclk_sel->pclk_sel_reg) >> clk->pclk_sel_shift)
- & clk->pclk_sel->pclk_sel_mask;
-
- for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
- if (clk->pclk_sel->pclk_info[i].pclk_val == val)
- info = &clk->pclk_sel->pclk_info[i];
- }
-
- return info;
-}
-
-/*
- * Set Update pclk, and pclk_info of clk and add clock sibling node to current
- * parents children list
- */
-static void clk_reparent(struct clk *clk, struct pclk_info *pclk_info)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&clocks_lock, flags);
- list_del(&clk->sibling);
- list_add(&clk->sibling, &pclk_info->pclk->children);
-
- clk->pclk = pclk_info->pclk;
- spin_unlock_irqrestore(&clocks_lock, flags);
-
-#ifdef CONFIG_DEBUG_FS
- clk_debugfs_reparent(clk);
-#endif
-}
-
-static void do_clk_disable(struct clk *clk)
-{
- if (!clk)
- return;
-
- if (!clk->usage_count) {
- WARN_ON(1);
- return;
- }
-
- clk->usage_count--;
-
- if (clk->usage_count == 0) {
- /*
- * Surely, there are no active childrens or direct users
- * of this clock
- */
- if (clk->pclk)
- do_clk_disable(clk->pclk);
-
- if (clk->ops && clk->ops->disable)
- clk->ops->disable(clk);
- }
-}
-
-static int do_clk_enable(struct clk *clk)
-{
- int ret = 0;
-
- if (!clk)
- return -EFAULT;
-
- if (clk->usage_count == 0) {
- if (clk->pclk) {
- ret = do_clk_enable(clk->pclk);
- if (ret)
- goto err;
- }
- if (clk->ops && clk->ops->enable) {
- ret = clk->ops->enable(clk);
- if (ret) {
- if (clk->pclk)
- do_clk_disable(clk->pclk);
- goto err;
- }
- }
- /*
- * Since the clock is going to be used for the first
- * time please reclac
- */
- if (clk->recalc) {
- ret = clk->recalc(clk);
- if (ret)
- goto err;
- }
- }
- clk->usage_count++;
-err:
- return ret;
-}
-
-/*
- * clk_enable - inform the system when the clock source should be running.
- * @clk: clock source
- *
- * If the clock can not be enabled/disabled, this should return success.
- *
- * Returns success (0) or negative errno.
- */
-int clk_enable(struct clk *clk)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&clocks_lock, flags);
- ret = do_clk_enable(clk);
- spin_unlock_irqrestore(&clocks_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(clk_enable);
-
-/*
- * clk_disable - inform the system when the clock source is no longer required.
- * @clk: clock source
- *
- * Inform the system that a clock source is no longer required by
- * a driver and may be shut down.
- *
- * Implementation detail: if the clock source is shared between
- * multiple drivers, clk_enable() calls must be balanced by the
- * same number of clk_disable() calls for the clock source to be
- * disabled.
- */
-void clk_disable(struct clk *clk)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&clocks_lock, flags);
- do_clk_disable(clk);
- spin_unlock_irqrestore(&clocks_lock, flags);
-}
-EXPORT_SYMBOL(clk_disable);
-
-/**
- * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
- * This is only valid once the clock source has been enabled.
- * @clk: clock source
- */
-unsigned long clk_get_rate(struct clk *clk)
-{
- unsigned long flags, rate;
-
- spin_lock_irqsave(&clocks_lock, flags);
- rate = clk->rate;
- spin_unlock_irqrestore(&clocks_lock, flags);
-
- return rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-/**
- * clk_set_parent - set the parent clock source for this clock
- * @clk: clock source
- * @parent: parent clock source
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
- int i, found = 0, val = 0;
- unsigned long flags;
-
- if (!clk || !parent)
- return -EFAULT;
- if (clk->pclk == parent)
- return 0;
- if (!clk->pclk_sel)
- return -EPERM;
-
- /* check if requested parent is in clk parent list */
- for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
- if (clk->pclk_sel->pclk_info[i].pclk == parent) {
- found = 1;
- break;
- }
- }
-
- if (!found)
- return -EINVAL;
-
- spin_lock_irqsave(&clocks_lock, flags);
- /* reflect parent change in hardware */
- val = readl(clk->pclk_sel->pclk_sel_reg);
- val &= ~(clk->pclk_sel->pclk_sel_mask << clk->pclk_sel_shift);
- val |= clk->pclk_sel->pclk_info[i].pclk_val << clk->pclk_sel_shift;
- writel(val, clk->pclk_sel->pclk_sel_reg);
- spin_unlock_irqrestore(&clocks_lock, flags);
-
- /* reflect parent change in software */
- clk_reparent(clk, &clk->pclk_sel->pclk_info[i]);
-
- propagate_rate(clk, 0);
- return 0;
-}
-EXPORT_SYMBOL(clk_set_parent);
-
-/**
- * clk_set_rate - set the clock rate for a clock source
- * @clk: clock source
- * @rate: desired clock rate in Hz
- *
- * Returns success (0) or negative errno.
- */
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- unsigned long flags;
- int ret = -EINVAL;
-
- if (!clk || !rate)
- return -EFAULT;
-
- if (clk->set_rate) {
- spin_lock_irqsave(&clocks_lock, flags);
- ret = clk->set_rate(clk, rate);
- if (!ret)
- /* if successful -> propagate */
- propagate_rate(clk, 0);
- spin_unlock_irqrestore(&clocks_lock, flags);
- } else if (clk->pclk) {
- u32 mult = clk->div_factor ? clk->div_factor : 1;
- ret = clk_set_rate(clk->pclk, mult * rate);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-/* registers clock in platform clock framework */
-void clk_register(struct clk_lookup *cl)
-{
- struct clk *clk;
- unsigned long flags;
-
- if (!cl || !cl->clk)
- return;
- clk = cl->clk;
-
- spin_lock_irqsave(&clocks_lock, flags);
-
- INIT_LIST_HEAD(&clk->children);
- if (clk->flags & ALWAYS_ENABLED)
- clk->ops = NULL;
- else if (!clk->ops)
- clk->ops = &generic_clkops;
-
- /* root clock don't have any parents */
- if (!clk->pclk && !clk->pclk_sel) {
- list_add(&clk->sibling, &root_clks);
- } else if (clk->pclk && !clk->pclk_sel) {
- /* add clocks with only one parent to parent's children list */
- list_add(&clk->sibling, &clk->pclk->children);
- } else {
- /* clocks with more than one parent */
- struct pclk_info *pclk_info;
-
- pclk_info = pclk_info_get(clk);
- if (!pclk_info) {
- pr_err("CLKDEV: invalid pclk info of clk with"
- " %s dev_id and %s con_id\n",
- cl->dev_id, cl->con_id);
- } else {
- clk->pclk = pclk_info->pclk;
- list_add(&clk->sibling, &pclk_info->pclk->children);
- }
- }
-
- spin_unlock_irqrestore(&clocks_lock, flags);
-
- /* debugfs specific */
-#ifdef CONFIG_DEBUG_FS
- list_add(&clk->node, &clocks);
- clk->cl = cl;
-#endif
-
- /* add clock to arm clockdev framework */
- clkdev_add(cl);
-}
-
-/**
- * propagate_rate - recalculate and propagate all clocks to children
- * @pclk: parent clock required to be propogated
- * @on_init: flag for enabling clocks which are ENABLED_ON_INIT.
- *
- * Recalculates all children clocks
- */
-void propagate_rate(struct clk *pclk, int on_init)
-{
- struct clk *clk, *_temp;
- int ret = 0;
-
- list_for_each_entry_safe(clk, _temp, &pclk->children, sibling) {
- if (clk->recalc) {
- ret = clk->recalc(clk);
- /*
- * recalc will return error if clk out is not programmed
- * In this case configure default rate.
- */
- if (ret && clk->set_rate)
- clk->set_rate(clk, 0);
- }
- propagate_rate(clk, on_init);
-
- if (!on_init)
- continue;
-
- /* Enable clks enabled on init, in software view */
- if (clk->flags & ENABLED_ON_INIT)
- do_clk_enable(clk);
- }
-}
-
-/**
- * round_rate_index - return closest programmable rate index in rate_config tbl
- * @clk: ptr to clock structure
- * @drate: desired rate
- * @rate: final rate will be returned in this variable only.
- *
- * Finds index in rate_config for highest clk rate which is less than
- * requested rate. If there is no clk rate lesser than requested rate then
- * -EINVAL is returned. This routine assumes that rate_config is written
- * in incrementing order of clk rates.
- * If drate passed is zero then default rate is programmed.
- */
-static int
-round_rate_index(struct clk *clk, unsigned long drate, unsigned long *rate)
-{
- unsigned long tmp = 0, prev_rate = 0;
- int index;
-
- if (!clk->calc_rate)
- return -EFAULT;
-
- if (!drate)
- return -EINVAL;
-
- /*
- * This loops ends on two conditions:
- * - as soon as clk is found with rate greater than requested rate.
- * - if all clks in rate_config are smaller than requested rate.
- */
- for (index = 0; index < clk->rate_config.count; index++) {
- prev_rate = tmp;
- tmp = clk->calc_rate(clk, index);
- if (drate < tmp) {
- index--;
- break;
- }
- }
- /* return if can't find suitable clock */
- if (index < 0) {
- index = -EINVAL;
- *rate = 0;
- } else if (index == clk->rate_config.count) {
- /* program with highest clk rate possible */
- index = clk->rate_config.count - 1;
- *rate = tmp;
- } else
- *rate = prev_rate;
-
- return index;
-}
-
-/**
- * clk_round_rate - adjust a rate to the exact rate a clock can provide
- * @clk: clock source
- * @rate: desired clock rate in Hz
- *
- * Returns rounded clock rate in Hz, or negative errno.
- */
-long clk_round_rate(struct clk *clk, unsigned long drate)
-{
- long rate = 0;
- int index;
-
- /*
- * propagate call to parent who supports calc_rate. Similar approach is
- * used in clk_set_rate.
- */
- if (!clk->calc_rate) {
- u32 mult;
- if (!clk->pclk)
- return clk->rate;
-
- mult = clk->div_factor ? clk->div_factor : 1;
- return clk_round_rate(clk->pclk, mult * drate) / mult;
- }
-
- index = round_rate_index(clk, drate, &rate);
- if (index >= 0)
- return rate;
- else
- return index;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-/*All below functions are called with lock held */
-
-/*
- * Calculates pll clk rate for specific value of mode, m, n and p
- *
- * In normal mode
- * rate = (2 * M[15:8] * Fin)/(N * 2^P)
- *
- * In Dithered mode
- * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P)
- */
-unsigned long pll_calc_rate(struct clk *clk, int index)
-{
- unsigned long rate = clk->pclk->rate;
- struct pll_rate_tbl *tbls = clk->rate_config.tbls;
- unsigned int mode;
-
- mode = tbls[index].mode ? 256 : 1;
- return (((2 * rate / 10000) * tbls[index].m) /
- (mode * tbls[index].n * (1 << tbls[index].p))) * 10000;
-}
-
-/*
- * calculates current programmed rate of pll1
- *
- * In normal mode
- * rate = (2 * M[15:8] * Fin)/(N * 2^P)
- *
- * In Dithered mode
- * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P)
- */
-int pll_clk_recalc(struct clk *clk)
-{
- struct pll_clk_config *config = clk->private_data;
- unsigned int num = 2, den = 0, val, mode = 0;
-
- mode = (readl(config->mode_reg) >> config->masks->mode_shift) &
- config->masks->mode_mask;
-
- val = readl(config->cfg_reg);
- /* calculate denominator */
- den = (val >> config->masks->div_p_shift) & config->masks->div_p_mask;
- den = 1 << den;
- den *= (val >> config->masks->div_n_shift) & config->masks->div_n_mask;
-
- /* calculate numerator & denominator */
- if (!mode) {
- /* Normal mode */
- num *= (val >> config->masks->norm_fdbk_m_shift) &
- config->masks->norm_fdbk_m_mask;
- } else {
- /* Dithered mode */
- num *= (val >> config->masks->dith_fdbk_m_shift) &
- config->masks->dith_fdbk_m_mask;
- den *= 256;
- }
-
- if (!den)
- return -EINVAL;
-
- clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000;
- return 0;
-}
-
-/*
- * Configures new clock rate of pll
- */
-int pll_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
- struct pll_rate_tbl *tbls = clk->rate_config.tbls;
- struct pll_clk_config *config = clk->private_data;
- unsigned long val, rate;
- int i;
-
- i = round_rate_index(clk, desired_rate, &rate);
- if (i < 0)
- return i;
-
- val = readl(config->mode_reg) &
- ~(config->masks->mode_mask << config->masks->mode_shift);
- val |= (tbls[i].mode & config->masks->mode_mask) <<
- config->masks->mode_shift;
- writel(val, config->mode_reg);
-
- val = readl(config->cfg_reg) &
- ~(config->masks->div_p_mask << config->masks->div_p_shift);
- val |= (tbls[i].p & config->masks->div_p_mask) <<
- config->masks->div_p_shift;
- val &= ~(config->masks->div_n_mask << config->masks->div_n_shift);
- val |= (tbls[i].n & config->masks->div_n_mask) <<
- config->masks->div_n_shift;
- val &= ~(config->masks->dith_fdbk_m_mask <<
- config->masks->dith_fdbk_m_shift);
- if (tbls[i].mode)
- val |= (tbls[i].m & config->masks->dith_fdbk_m_mask) <<
- config->masks->dith_fdbk_m_shift;
- else
- val |= (tbls[i].m & config->masks->norm_fdbk_m_mask) <<
- config->masks->norm_fdbk_m_shift;
-
- writel(val, config->cfg_reg);
-
- clk->rate = rate;
-
- return 0;
-}
-
-/*
- * Calculates ahb, apb clk rate for specific value of div
- */
-unsigned long bus_calc_rate(struct clk *clk, int index)
-{
- unsigned long rate = clk->pclk->rate;
- struct bus_rate_tbl *tbls = clk->rate_config.tbls;
-
- return rate / (tbls[index].div + 1);
-}
-
-/* calculates current programmed rate of ahb or apb bus */
-int bus_clk_recalc(struct clk *clk)
-{
- struct bus_clk_config *config = clk->private_data;
- unsigned int div;
-
- div = ((readl(config->reg) >> config->masks->shift) &
- config->masks->mask) + 1;
-
- if (!div)
- return -EINVAL;
-
- clk->rate = (unsigned long)clk->pclk->rate / div;
- return 0;
-}
-
-/* Configures new clock rate of AHB OR APB bus */
-int bus_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
- struct bus_rate_tbl *tbls = clk->rate_config.tbls;
- struct bus_clk_config *config = clk->private_data;
- unsigned long val, rate;
- int i;
-
- i = round_rate_index(clk, desired_rate, &rate);
- if (i < 0)
- return i;
-
- val = readl(config->reg) &
- ~(config->masks->mask << config->masks->shift);
- val |= (tbls[i].div & config->masks->mask) << config->masks->shift;
- writel(val, config->reg);
-
- clk->rate = rate;
-
- return 0;
-}
-
-/*
- * gives rate for different values of eq, x and y
- *
- * Fout from synthesizer can be given from two equations:
- * Fout1 = (Fin * X/Y)/2 EQ1
- * Fout2 = Fin * X/Y EQ2
- */
-unsigned long aux_calc_rate(struct clk *clk, int index)
-{
- unsigned long rate = clk->pclk->rate;
- struct aux_rate_tbl *tbls = clk->rate_config.tbls;
- u8 eq = tbls[index].eq ? 1 : 2;
-
- return (((rate/10000) * tbls[index].xscale) /
- (tbls[index].yscale * eq)) * 10000;
-}
-
-/*
- * calculates current programmed rate of auxiliary synthesizers
- * used by: UART, FIRDA
- *
- * Fout from synthesizer can be given from two equations:
- * Fout1 = (Fin * X/Y)/2
- * Fout2 = Fin * X/Y
- *
- * Selection of eqn 1 or 2 is programmed in register
- */
-int aux_clk_recalc(struct clk *clk)
-{
- struct aux_clk_config *config = clk->private_data;
- unsigned int num = 1, den = 1, val, eqn;
-
- val = readl(config->synth_reg);
-
- eqn = (val >> config->masks->eq_sel_shift) &
- config->masks->eq_sel_mask;
- if (eqn == config->masks->eq1_mask)
- den *= 2;
-
- /* calculate numerator */
- num = (val >> config->masks->xscale_sel_shift) &
- config->masks->xscale_sel_mask;
-
- /* calculate denominator */
- den *= (val >> config->masks->yscale_sel_shift) &
- config->masks->yscale_sel_mask;
-
- if (!den)
- return -EINVAL;
-
- clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000;
- return 0;
-}
-
-/* Configures new clock rate of auxiliary synthesizers used by: UART, FIRDA*/
-int aux_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
- struct aux_rate_tbl *tbls = clk->rate_config.tbls;
- struct aux_clk_config *config = clk->private_data;
- unsigned long val, rate;
- int i;
-
- i = round_rate_index(clk, desired_rate, &rate);
- if (i < 0)
- return i;
-
- val = readl(config->synth_reg) &
- ~(config->masks->eq_sel_mask << config->masks->eq_sel_shift);
- val |= (tbls[i].eq & config->masks->eq_sel_mask) <<
- config->masks->eq_sel_shift;
- val &= ~(config->masks->xscale_sel_mask <<
- config->masks->xscale_sel_shift);
- val |= (tbls[i].xscale & config->masks->xscale_sel_mask) <<
- config->masks->xscale_sel_shift;
- val &= ~(config->masks->yscale_sel_mask <<
- config->masks->yscale_sel_shift);
- val |= (tbls[i].yscale & config->masks->yscale_sel_mask) <<
- config->masks->yscale_sel_shift;
- writel(val, config->synth_reg);
-
- clk->rate = rate;
-
- return 0;
-}
-
-/*
- * Calculates gpt clk rate for different values of mscale and nscale
- *
- * Fout= Fin/((2 ^ (N+1)) * (M+1))
- */
-unsigned long gpt_calc_rate(struct clk *clk, int index)
-{
- unsigned long rate = clk->pclk->rate;
- struct gpt_rate_tbl *tbls = clk->rate_config.tbls;
-
- return rate / ((1 << (tbls[index].nscale + 1)) *
- (tbls[index].mscale + 1));
-}
-
-/*
- * calculates current programmed rate of gpt synthesizers
- * Fout from synthesizer can be given from below equations:
- * Fout= Fin/((2 ^ (N+1)) * (M+1))
- */
-int gpt_clk_recalc(struct clk *clk)
-{
- struct gpt_clk_config *config = clk->private_data;
- unsigned int div = 1, val;
-
- val = readl(config->synth_reg);
- div += (val >> config->masks->mscale_sel_shift) &
- config->masks->mscale_sel_mask;
- div *= 1 << (((val >> config->masks->nscale_sel_shift) &
- config->masks->nscale_sel_mask) + 1);
-
- if (!div)
- return -EINVAL;
-
- clk->rate = (unsigned long)clk->pclk->rate / div;
- return 0;
-}
-
-/* Configures new clock rate of gptiliary synthesizers used by: UART, FIRDA*/
-int gpt_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
- struct gpt_rate_tbl *tbls = clk->rate_config.tbls;
- struct gpt_clk_config *config = clk->private_data;
- unsigned long val, rate;
- int i;
-
- i = round_rate_index(clk, desired_rate, &rate);
- if (i < 0)
- return i;
-
- val = readl(config->synth_reg) & ~(config->masks->mscale_sel_mask <<
- config->masks->mscale_sel_shift);
- val |= (tbls[i].mscale & config->masks->mscale_sel_mask) <<
- config->masks->mscale_sel_shift;
- val &= ~(config->masks->nscale_sel_mask <<
- config->masks->nscale_sel_shift);
- val |= (tbls[i].nscale & config->masks->nscale_sel_mask) <<
- config->masks->nscale_sel_shift;
- writel(val, config->synth_reg);
-
- clk->rate = rate;
-
- return 0;
-}
-
-/*
- * Calculates clcd clk rate for different values of div
- *
- * Fout from synthesizer can be given from below equation:
- * Fout= Fin/2*div (division factor)
- * div is 17 bits:-
- * 0-13 (fractional part)
- * 14-16 (integer part)
- * To calculate Fout we left shift val by 14 bits and divide Fin by
- * complete div (including fractional part) and then right shift the
- * result by 14 places.
- */
-unsigned long clcd_calc_rate(struct clk *clk, int index)
-{
- unsigned long rate = clk->pclk->rate;
- struct clcd_rate_tbl *tbls = clk->rate_config.tbls;
-
- rate /= 1000;
- rate <<= 12;
- rate /= (2 * tbls[index].div);
- rate >>= 12;
- rate *= 1000;
-
- return rate;
-}
-
-/*
- * calculates current programmed rate of clcd synthesizer
- * Fout from synthesizer can be given from below equation:
- * Fout= Fin/2*div (division factor)
- * div is 17 bits:-
- * 0-13 (fractional part)
- * 14-16 (integer part)
- * To calculate Fout we left shift val by 14 bits and divide Fin by
- * complete div (including fractional part) and then right shift the
- * result by 14 places.
- */
-int clcd_clk_recalc(struct clk *clk)
-{
- struct clcd_clk_config *config = clk->private_data;
- unsigned int div = 1;
- unsigned long prate;
- unsigned int val;
-
- val = readl(config->synth_reg);
- div = (val >> config->masks->div_factor_shift) &
- config->masks->div_factor_mask;
-
- if (!div)
- return -EINVAL;
-
- prate = clk->pclk->rate / 1000; /* first level division, make it KHz */
-
- clk->rate = (((unsigned long)prate << 12) / (2 * div)) >> 12;
- clk->rate *= 1000;
- return 0;
-}
-
-/* Configures new clock rate of auxiliary synthesizers used by: UART, FIRDA*/
-int clcd_clk_set_rate(struct clk *clk, unsigned long desired_rate)
-{
- struct clcd_rate_tbl *tbls = clk->rate_config.tbls;
- struct clcd_clk_config *config = clk->private_data;
- unsigned long val, rate;
- int i;
-
- i = round_rate_index(clk, desired_rate, &rate);
- if (i < 0)
- return i;
-
- val = readl(config->synth_reg) & ~(config->masks->div_factor_mask <<
- config->masks->div_factor_shift);
- val |= (tbls[i].div & config->masks->div_factor_mask) <<
- config->masks->div_factor_shift;
- writel(val, config->synth_reg);
-
- clk->rate = rate;
-
- return 0;
-}
-
-/*
- * Used for clocks that always have value as the parent clock divided by a
- * fixed divisor
- */
-int follow_parent(struct clk *clk)
-{
- unsigned int div_factor = (clk->div_factor < 1) ? 1 : clk->div_factor;
-
- clk->rate = clk->pclk->rate/div_factor;
- return 0;
-}
-
-/**
- * recalc_root_clocks - recalculate and propagate all root clocks
- *
- * Recalculates all root clocks (clocks with no parent), which if the
- * clock's .recalc is set correctly, should also propagate their rates.
- */
-void recalc_root_clocks(void)
-{
- struct clk *pclk;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&clocks_lock, flags);
- list_for_each_entry(pclk, &root_clks, sibling) {
- if (pclk->recalc) {
- ret = pclk->recalc(pclk);
- /*
- * recalc will return error if clk out is not programmed
- * In this case configure default clock.
- */
- if (ret && pclk->set_rate)
- pclk->set_rate(pclk, 0);
- }
- propagate_rate(pclk, 1);
- /* Enable clks enabled on init, in software view */
- if (pclk->flags & ENABLED_ON_INIT)
- do_clk_enable(pclk);
- }
- spin_unlock_irqrestore(&clocks_lock, flags);
-}
-
-void __init clk_init(void)
-{
- recalc_root_clocks();
-}
-
-#ifdef CONFIG_DEBUG_FS
-/*
- * debugfs support to trace clock tree hierarchy and attributes
- */
-static struct dentry *clk_debugfs_root;
-static int clk_debugfs_register_one(struct clk *c)
-{
- int err;
- struct dentry *d;
- struct clk *pa = c->pclk;
- char s[255];
- char *p = s;
-
- if (c) {
- if (c->cl->con_id)
- p += sprintf(p, "%s", c->cl->con_id);
- if (c->cl->dev_id)
- p += sprintf(p, "%s", c->cl->dev_id);
- }
- d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
- if (!d)
- return -ENOMEM;
- c->dent = d;
-
- d = debugfs_create_u32("usage_count", S_IRUGO, c->dent,
- (u32 *)&c->usage_count);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
- if (!d) {
- err = -ENOMEM;
- goto err_out;
- }
- return 0;
-
-err_out:
- debugfs_remove_recursive(c->dent);
- return err;
-}
-
-static int clk_debugfs_register(struct clk *c)
-{
- int err;
- struct clk *pa = c->pclk;
-
- if (pa && !pa->dent) {
- err = clk_debugfs_register(pa);
- if (err)
- return err;
- }
-
- if (!c->dent) {
- err = clk_debugfs_register_one(c);
- if (err)
- return err;
- }
- return 0;
-}
-
-static int __init clk_debugfs_init(void)
-{
- struct clk *c;
- struct dentry *d;
- int err;
-
- d = debugfs_create_dir("clock", NULL);
- if (!d)
- return -ENOMEM;
- clk_debugfs_root = d;
-
- list_for_each_entry(c, &clocks, node) {
- err = clk_debugfs_register(c);
- if (err)
- goto err_out;
- }
- return 0;
-err_out:
- debugfs_remove_recursive(clk_debugfs_root);
- return err;
-}
-late_initcall(clk_debugfs_init);
-
-static int clk_debugfs_reparent(struct clk *c)
-{
- debugfs_remove(c->dent);
- return clk_debugfs_register_one(c);
-}
-#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/arm/plat-spear/include/plat/clock.h b/arch/arm/plat-spear/include/plat/clock.h
deleted file mode 100644
index 0062bafef12..00000000000
--- a/arch/arm/plat-spear/include/plat/clock.h
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * arch/arm/plat-spear/include/plat/clock.h
- *
- * Clock framework definitions for SPEAr platform
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __PLAT_CLOCK_H
-#define __PLAT_CLOCK_H
-
-#include <linux/list.h>
-#include <linux/clkdev.h>
-#include <linux/types.h>
-
-/* clk structure flags */
-#define ALWAYS_ENABLED (1 << 0) /* clock always enabled */
-#define RESET_TO_ENABLE (1 << 1) /* reset register bit to enable clk */
-#define ENABLED_ON_INIT (1 << 2) /* clocks enabled at init */
-
-/**
- * struct clkops - clock operations
- * @enable: pointer to clock enable function
- * @disable: pointer to clock disable function
- */
-struct clkops {
- int (*enable) (struct clk *);
- void (*disable) (struct clk *);
-};
-
-/**
- * struct pclk_info - parents info
- * @pclk: pointer to parent clk
- * @pclk_val: value to be written for selecting this parent
- */
-struct pclk_info {
- struct clk *pclk;
- u8 pclk_val;
-};
-
-/**
- * struct pclk_sel - parents selection configuration
- * @pclk_info: pointer to array of parent clock info
- * @pclk_count: number of parents
- * @pclk_sel_reg: register for selecting a parent
- * @pclk_sel_mask: mask for selecting parent (can be used to clear bits also)
- */
-struct pclk_sel {
- struct pclk_info *pclk_info;
- u8 pclk_count;
- void __iomem *pclk_sel_reg;
- unsigned int pclk_sel_mask;
-};
-
-/**
- * struct rate_config - clk rate configurations
- * @tbls: array of device specific clk rate tables, in ascending order of rates
- * @count: size of tbls array
- * @default_index: default setting when originally disabled
- */
-struct rate_config {
- void *tbls;
- u8 count;
- u8 default_index;
-};
-
-/**
- * struct clk - clock structure
- * @usage_count: num of users who enabled this clock
- * @flags: flags for clock properties
- * @rate: programmed clock rate in Hz
- * @en_reg: clk enable/disable reg
- * @en_reg_bit: clk enable/disable bit
- * @ops: clk enable/disable ops - generic_clkops selected if NULL
- * @recalc: pointer to clock rate recalculate function
- * @set_rate: pointer to clock set rate function
- * @calc_rate: pointer to clock get rate function for index
- * @rate_config: rate configuration information, used by set_rate
- * @div_factor: division factor to parent clock.
- * @pclk: current parent clk
- * @pclk_sel: pointer to parent selection structure
- * @pclk_sel_shift: register shift for selecting parent of this clock
- * @children: list for childrens or this clock
- * @sibling: node for list of clocks having same parents
- * @private_data: clock specific private data
- * @node: list to maintain clocks linearly
- * @cl: clocklook up associated with this clock
- * @dent: object for debugfs
- */
-struct clk {
- unsigned int usage_count;
- unsigned int flags;
- unsigned long rate;
- void __iomem *en_reg;
- u8 en_reg_bit;
- const struct clkops *ops;
- int (*recalc) (struct clk *);
- int (*set_rate) (struct clk *, unsigned long rate);
- unsigned long (*calc_rate)(struct clk *, int index);
- struct rate_config rate_config;
- unsigned int div_factor;
-
- struct clk *pclk;
- struct pclk_sel *pclk_sel;
- unsigned int pclk_sel_shift;
-
- struct list_head children;
- struct list_head sibling;
- void *private_data;
-#ifdef CONFIG_DEBUG_FS
- struct list_head node;
- struct clk_lookup *cl;
- struct dentry *dent;
-#endif
-};
-
-/* pll configuration structure */
-struct pll_clk_masks {
- u32 mode_mask;
- u32 mode_shift;
-
- u32 norm_fdbk_m_mask;
- u32 norm_fdbk_m_shift;
- u32 dith_fdbk_m_mask;
- u32 dith_fdbk_m_shift;
- u32 div_p_mask;
- u32 div_p_shift;
- u32 div_n_mask;
- u32 div_n_shift;
-};
-
-struct pll_clk_config {
- void __iomem *mode_reg;
- void __iomem *cfg_reg;
- struct pll_clk_masks *masks;
-};
-
-/* pll clk rate config structure */
-struct pll_rate_tbl {
- u8 mode;
- u16 m;
- u8 n;
- u8 p;
-};
-
-/* ahb and apb bus configuration structure */
-struct bus_clk_masks {
- u32 mask;
- u32 shift;
-};
-
-struct bus_clk_config {
- void __iomem *reg;
- struct bus_clk_masks *masks;
-};
-
-/* ahb and apb clk bus rate config structure */
-struct bus_rate_tbl {
- u8 div;
-};
-
-/* Aux clk configuration structure: applicable to UART and FIRDA */
-struct aux_clk_masks {
- u32 eq_sel_mask;
- u32 eq_sel_shift;
- u32 eq1_mask;
- u32 eq2_mask;
- u32 xscale_sel_mask;
- u32 xscale_sel_shift;
- u32 yscale_sel_mask;
- u32 yscale_sel_shift;
-};
-
-struct aux_clk_config {
- void __iomem *synth_reg;
- struct aux_clk_masks *masks;
-};
-
-/* aux clk rate config structure */
-struct aux_rate_tbl {
- u16 xscale;
- u16 yscale;
- u8 eq;
-};
-
-/* GPT clk configuration structure */
-struct gpt_clk_masks {
- u32 mscale_sel_mask;
- u32 mscale_sel_shift;
- u32 nscale_sel_mask;
- u32 nscale_sel_shift;
-};
-
-struct gpt_clk_config {
- void __iomem *synth_reg;
- struct gpt_clk_masks *masks;
-};
-
-/* gpt clk rate config structure */
-struct gpt_rate_tbl {
- u16 mscale;
- u16 nscale;
-};
-
-/* clcd clk configuration structure */
-struct clcd_synth_masks {
- u32 div_factor_mask;
- u32 div_factor_shift;
-};
-
-struct clcd_clk_config {
- void __iomem *synth_reg;
- struct clcd_synth_masks *masks;
-};
-
-/* clcd clk rate config structure */
-struct clcd_rate_tbl {
- u16 div;
-};
-
-/* platform specific clock functions */
-void __init clk_init(void);
-void clk_register(struct clk_lookup *cl);
-void recalc_root_clocks(void);
-
-/* clock recalc & set rate functions */
-int follow_parent(struct clk *clk);
-unsigned long pll_calc_rate(struct clk *clk, int index);
-int pll_clk_recalc(struct clk *clk);
-int pll_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long bus_calc_rate(struct clk *clk, int index);
-int bus_clk_recalc(struct clk *clk);
-int bus_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long gpt_calc_rate(struct clk *clk, int index);
-int gpt_clk_recalc(struct clk *clk);
-int gpt_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long aux_calc_rate(struct clk *clk, int index);
-int aux_clk_recalc(struct clk *clk);
-int aux_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-unsigned long clcd_calc_rate(struct clk *clk, int index);
-int clcd_clk_recalc(struct clk *clk);
-int clcd_clk_set_rate(struct clk *clk, unsigned long desired_rate);
-
-#endif /* __PLAT_CLOCK_H */
diff --git a/arch/arm/plat-spear/restart.c b/arch/arm/plat-spear/restart.c
index 4471a232713..ea0a61302b7 100644
--- a/arch/arm/plat-spear/restart.c
+++ b/arch/arm/plat-spear/restart.c
@@ -16,6 +16,7 @@
#include <mach/spear.h>
#include <mach/generic.h>
+#define SPEAR13XX_SYS_SW_RES (VA_MISC_BASE + 0x204)
void spear_restart(char mode, const char *cmd)
{
if (mode == 's') {
@@ -23,6 +24,10 @@ void spear_restart(char mode, const char *cmd)
soft_restart(0);
} else {
/* hardware reset, Use on-chip reset capability */
+#ifdef CONFIG_ARCH_SPEAR13XX
+ writel_relaxed(0x01, SPEAR13XX_SYS_SW_RES);
+#else
sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE);
+#endif
}
}
diff --git a/arch/arm/plat-spear/time.c b/arch/arm/plat-spear/time.c
index a3164d1647f..03321af5de9 100644
--- a/arch/arm/plat-spear/time.c
+++ b/arch/arm/plat-spear/time.c
@@ -18,6 +18,8 @@
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
#include <linux/time.h>
#include <linux/irq.h>
#include <asm/mach/time.h>
@@ -197,19 +199,32 @@ static void __init spear_clockevent_init(int irq)
setup_irq(irq, &spear_timer_irq);
}
-void __init spear_setup_timer(resource_size_t base, int irq)
+const static struct of_device_id timer_of_match[] __initconst = {
+ { .compatible = "st,spear-timer", },
+ { },
+};
+
+void __init spear_setup_of_timer(void)
{
- int ret;
+ struct device_node *np;
+ int irq, ret;
+
+ np = of_find_matching_node(NULL, timer_of_match);
+ if (!np) {
+ pr_err("%s: No timer passed via DT\n", __func__);
+ return;
+ }
- if (!request_mem_region(base, SZ_1K, "gpt0")) {
- pr_err("%s:cannot get IO addr\n", __func__);
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ pr_err("%s: No irq passed for timer via DT\n", __func__);
return;
}
- gpt_base = ioremap(base, SZ_1K);
+ gpt_base = of_iomap(np, 0);
if (!gpt_base) {
- pr_err("%s:ioremap failed for gpt\n", __func__);
- goto err_mem;
+ pr_err("%s: of iomap failed\n", __func__);
+ return;
}
gpt_clk = clk_get_sys("gpt0", NULL);
@@ -218,10 +233,10 @@ void __init spear_setup_timer(resource_size_t base, int irq)
goto err_iomap;
}
- ret = clk_enable(gpt_clk);
+ ret = clk_prepare_enable(gpt_clk);
if (ret < 0) {
- pr_err("%s:couldn't enable gpt clock\n", __func__);
- goto err_clk;
+ pr_err("%s:couldn't prepare-enable gpt clock\n", __func__);
+ goto err_prepare_enable_clk;
}
spear_clockevent_init(irq);
@@ -229,10 +244,8 @@ void __init spear_setup_timer(resource_size_t base, int irq)
return;
-err_clk:
+err_prepare_enable_clk:
clk_put(gpt_clk);
err_iomap:
iounmap(gpt_base);
-err_mem:
- release_mem_region(base, SZ_1K);
}
diff --git a/arch/avr32/include/asm/kvm_para.h b/arch/avr32/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/avr32/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/blackfin/include/asm/kvm_para.h b/arch/blackfin/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/blackfin/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/c6x/include/asm/kvm_para.h b/arch/c6x/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/c6x/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 22d34d64cc8..bb344650a14 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -40,6 +40,7 @@ config CRIS
bool
default y
select HAVE_IDE
+ select GENERIC_ATOMIC64
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_SHOW
select GENERIC_IOMAP
diff --git a/arch/cris/arch-v10/drivers/ds1302.c b/arch/cris/arch-v10/drivers/ds1302.c
deleted file mode 100644
index 74f99c688c8..00000000000
--- a/arch/cris/arch-v10/drivers/ds1302.c
+++ /dev/null
@@ -1,515 +0,0 @@
-/*!***************************************************************************
-*!
-*! FILE NAME : ds1302.c
-*!
-*! DESCRIPTION: Implements an interface for the DS1302 RTC through Etrax I/O
-*!
-*! Functions exported: ds1302_readreg, ds1302_writereg, ds1302_init
-*!
-*! ---------------------------------------------------------------------------
-*!
-*! (C) Copyright 1999-2007 Axis Communications AB, LUND, SWEDEN
-*!
-*!***************************************************************************/
-
-
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/miscdevice.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/bcd.h>
-#include <linux/capability.h>
-
-#include <asm/uaccess.h>
-#include <arch/svinto.h>
-#include <asm/io.h>
-#include <asm/rtc.h>
-#include <arch/io_interface_mux.h>
-
-#include "i2c.h"
-
-#define RTC_MAJOR_NR 121 /* local major, change later */
-
-static DEFINE_MUTEX(ds1302_mutex);
-static const char ds1302_name[] = "ds1302";
-
-/* The DS1302 might be connected to different bits on different products.
- * It has three signals - SDA, SCL and RST. RST and SCL are always outputs,
- * but SDA can have a selected direction.
- * For now, only PORT_PB is hardcoded.
- */
-
-/* The RST bit may be on either the Generic Port or Port PB. */
-#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
-#define TK_RST_OUT(x) REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x)
-#define TK_RST_DIR(x)
-#else
-#define TK_RST_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x)
-#define TK_RST_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x)
-#endif
-
-
-#define TK_SDA_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_SDABIT, x)
-#define TK_SCL_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_SCLBIT, x)
-
-#define TK_SDA_IN() ((*R_PORT_PB_READ >> CONFIG_ETRAX_DS1302_SDABIT) & 1)
-/* 1 is out, 0 is in */
-#define TK_SDA_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_SDABIT, x)
-#define TK_SCL_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_SCLBIT, x)
-
-
-/*
- * The reason for tempudelay and not udelay is that loops_per_usec
- * (used in udelay) is not set when functions here are called from time.c
- */
-
-static void tempudelay(int usecs)
-{
- volatile int loops;
-
- for(loops = usecs * 12; loops > 0; loops--)
- /* nothing */;
-}
-
-
-/* Send 8 bits. */
-static void
-out_byte(unsigned char x)
-{
- int i;
- TK_SDA_DIR(1);
- for (i = 8; i--;) {
- /* The chip latches incoming bits on the rising edge of SCL. */
- TK_SCL_OUT(0);
- TK_SDA_OUT(x & 1);
- tempudelay(1);
- TK_SCL_OUT(1);
- tempudelay(1);
- x >>= 1;
- }
- TK_SDA_DIR(0);
-}
-
-static unsigned char
-in_byte(void)
-{
- unsigned char x = 0;
- int i;
-
- /* Read byte. Bits come LSB first, on the falling edge of SCL.
- * Assume SDA is in input direction already.
- */
- TK_SDA_DIR(0);
-
- for (i = 8; i--;) {
- TK_SCL_OUT(0);
- tempudelay(1);
- x >>= 1;
- x |= (TK_SDA_IN() << 7);
- TK_SCL_OUT(1);
- tempudelay(1);
- }
-
- return x;
-}
-
-/* Prepares for a transaction by de-activating RST (active-low). */
-
-static void
-start(void)
-{
- TK_SCL_OUT(0);
- tempudelay(1);
- TK_RST_OUT(0);
- tempudelay(5);
- TK_RST_OUT(1);
-}
-
-/* Ends a transaction by taking RST active again. */
-
-static void
-stop(void)
-{
- tempudelay(2);
- TK_RST_OUT(0);
-}
-
-/* Enable writing. */
-
-static void
-ds1302_wenable(void)
-{
- start();
- out_byte(0x8e); /* Write control register */
- out_byte(0x00); /* Disable write protect bit 7 = 0 */
- stop();
-}
-
-/* Disable writing. */
-
-static void
-ds1302_wdisable(void)
-{
- start();
- out_byte(0x8e); /* Write control register */
- out_byte(0x80); /* Disable write protect bit 7 = 0 */
- stop();
-}
-
-
-
-/* Read a byte from the selected register in the DS1302. */
-
-unsigned char
-ds1302_readreg(int reg)
-{
- unsigned char x;
-
- start();
- out_byte(0x81 | (reg << 1)); /* read register */
- x = in_byte();
- stop();
-
- return x;
-}
-
-/* Write a byte to the selected register. */
-
-void
-ds1302_writereg(int reg, unsigned char val)
-{
-#ifndef CONFIG_ETRAX_RTC_READONLY
- int do_writereg = 1;
-#else
- int do_writereg = 0;
-
- if (reg == RTC_TRICKLECHARGER)
- do_writereg = 1;
-#endif
-
- if (do_writereg) {
- ds1302_wenable();
- start();
- out_byte(0x80 | (reg << 1)); /* write register */
- out_byte(val);
- stop();
- ds1302_wdisable();
- }
-}
-
-void
-get_rtc_time(struct rtc_time *rtc_tm)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS);
- rtc_tm->tm_min = CMOS_READ(RTC_MINUTES);
- rtc_tm->tm_hour = CMOS_READ(RTC_HOURS);
- rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
- rtc_tm->tm_mon = CMOS_READ(RTC_MONTH);
- rtc_tm->tm_year = CMOS_READ(RTC_YEAR);
-
- local_irq_restore(flags);
-
- rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
- rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
- rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
- rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
- rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
- rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
-
- /*
- * Account for differences between how the RTC uses the values
- * and how they are defined in a struct rtc_time;
- */
-
- if (rtc_tm->tm_year <= 69)
- rtc_tm->tm_year += 100;
-
- rtc_tm->tm_mon--;
-}
-
-static unsigned char days_in_mo[] =
- {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-
-/* ioctl that supports RTC_RD_TIME and RTC_SET_TIME (read and set time/date). */
-
-static int rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- unsigned long flags;
-
- switch(cmd) {
- case RTC_RD_TIME: /* read the time/date from RTC */
- {
- struct rtc_time rtc_tm;
-
- memset(&rtc_tm, 0, sizeof (struct rtc_time));
- get_rtc_time(&rtc_tm);
- if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time)))
- return -EFAULT;
- return 0;
- }
-
- case RTC_SET_TIME: /* set the RTC */
- {
- struct rtc_time rtc_tm;
- unsigned char mon, day, hrs, min, sec, leap_yr;
- unsigned int yrs;
-
- if (!capable(CAP_SYS_TIME))
- return -EPERM;
-
- if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, sizeof(struct rtc_time)))
- return -EFAULT;
-
- yrs = rtc_tm.tm_year + 1900;
- mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
- day = rtc_tm.tm_mday;
- hrs = rtc_tm.tm_hour;
- min = rtc_tm.tm_min;
- sec = rtc_tm.tm_sec;
-
-
- if ((yrs < 1970) || (yrs > 2069))
- return -EINVAL;
-
- leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
-
- if ((mon > 12) || (day == 0))
- return -EINVAL;
-
- if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
- return -EINVAL;
-
- if ((hrs >= 24) || (min >= 60) || (sec >= 60))
- return -EINVAL;
-
- if (yrs >= 2000)
- yrs -= 2000; /* RTC (0, 1, ... 69) */
- else
- yrs -= 1900; /* RTC (70, 71, ... 99) */
-
- sec = bin2bcd(sec);
- min = bin2bcd(min);
- hrs = bin2bcd(hrs);
- day = bin2bcd(day);
- mon = bin2bcd(mon);
- yrs = bin2bcd(yrs);
-
- local_irq_save(flags);
- CMOS_WRITE(yrs, RTC_YEAR);
- CMOS_WRITE(mon, RTC_MONTH);
- CMOS_WRITE(day, RTC_DAY_OF_MONTH);
- CMOS_WRITE(hrs, RTC_HOURS);
- CMOS_WRITE(min, RTC_MINUTES);
- CMOS_WRITE(sec, RTC_SECONDS);
- local_irq_restore(flags);
-
- /* Notice that at this point, the RTC is updated but
- * the kernel is still running with the old time.
- * You need to set that separately with settimeofday
- * or adjtimex.
- */
- return 0;
- }
-
- case RTC_SET_CHARGE: /* set the RTC TRICKLE CHARGE register */
- {
- int tcs_val;
-
- if (!capable(CAP_SYS_TIME))
- return -EPERM;
-
- if(copy_from_user(&tcs_val, (int*)arg, sizeof(int)))
- return -EFAULT;
-
- tcs_val = RTC_TCR_PATTERN | (tcs_val & 0x0F);
- ds1302_writereg(RTC_TRICKLECHARGER, tcs_val);
- return 0;
- }
- case RTC_VL_READ:
- {
- /* TODO:
- * Implement voltage low detection support
- */
- printk(KERN_WARNING "DS1302: RTC Voltage Low detection"
- " is not supported\n");
- return 0;
- }
- case RTC_VL_CLR:
- {
- /* TODO:
- * Nothing to do since Voltage Low detection is not supported
- */
- return 0;
- }
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-static long rtc_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&ds1302_mutex);
- ret = rtc_ioctl(file, cmd, arg);
- mutex_unlock(&ds1302_mutex);
-
- return ret;
-}
-
-static void
-print_rtc_status(void)
-{
- struct rtc_time tm;
-
- get_rtc_time(&tm);
-
- /*
- * There is no way to tell if the luser has the RTC set for local
- * time or for Universal Standard Time (GMT). Probably local though.
- */
-
- printk(KERN_INFO "rtc_time\t: %02d:%02d:%02d\n",
- tm.tm_hour, tm.tm_min, tm.tm_sec);
- printk(KERN_INFO "rtc_date\t: %04d-%02d-%02d\n",
- tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday);
-}
-
-/* The various file operations we support. */
-
-static const struct file_operations rtc_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = rtc_unlocked_ioctl,
- .llseek = noop_llseek,
-};
-
-/* Probe for the chip by writing something to its RAM and try reading it back. */
-
-#define MAGIC_PATTERN 0x42
-
-static int __init
-ds1302_probe(void)
-{
- int retval, res;
-
- TK_RST_DIR(1);
- TK_SCL_DIR(1);
- TK_SDA_DIR(0);
-
- /* Try to talk to timekeeper. */
-
- ds1302_wenable();
- start();
- out_byte(0xc0); /* write RAM byte 0 */
- out_byte(MAGIC_PATTERN); /* write something magic */
- start();
- out_byte(0xc1); /* read RAM byte 0 */
-
- if((res = in_byte()) == MAGIC_PATTERN) {
- stop();
- ds1302_wdisable();
- printk(KERN_INFO "%s: RTC found.\n", ds1302_name);
- printk(KERN_INFO "%s: SDA, SCL, RST on PB%i, PB%i, %s%i\n",
- ds1302_name,
- CONFIG_ETRAX_DS1302_SDABIT,
- CONFIG_ETRAX_DS1302_SCLBIT,
-#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
- "GENIO",
-#else
- "PB",
-#endif
- CONFIG_ETRAX_DS1302_RSTBIT);
- print_rtc_status();
- retval = 1;
- } else {
- stop();
- retval = 0;
- }
-
- return retval;
-}
-
-
-/* Just probe for the RTC and register the device to handle the ioctl needed. */
-
-int __init
-ds1302_init(void)
-{
-#ifdef CONFIG_ETRAX_I2C
- i2c_init();
-#endif
-
- if (!ds1302_probe()) {
-#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
-#if CONFIG_ETRAX_DS1302_RSTBIT == 27
- /*
- * The only way to set g27 to output is to enable ATA.
- *
- * Make sure that R_GEN_CONFIG is setup correct.
- */
- /* Allocating the ATA interface will grab almost all
- * pins in I/O groups a, b, c and d. A consequence of
- * allocating the ATA interface is that the fixed
- * interfaces shared RAM, parallel port 0, parallel
- * port 1, parallel port W, SCSI-8 port 0, SCSI-8 port
- * 1, SCSI-W, serial port 2, serial port 3,
- * synchronous serial port 3 and USB port 2 and almost
- * all GPIO pins on port g cannot be used.
- */
- if (cris_request_io_interface(if_ata, "ds1302/ATA")) {
- printk(KERN_WARNING "ds1302: Failed to get IO interface\n");
- return -1;
- }
-
-#elif CONFIG_ETRAX_DS1302_RSTBIT == 0
- if (cris_io_interface_allocate_pins(if_gpio_grp_a,
- 'g',
- CONFIG_ETRAX_DS1302_RSTBIT,
- CONFIG_ETRAX_DS1302_RSTBIT)) {
- printk(KERN_WARNING "ds1302: Failed to get IO interface\n");
- return -1;
- }
-
- /* Set the direction of this bit to out. */
- genconfig_shadow = ((genconfig_shadow &
- ~IO_MASK(R_GEN_CONFIG, g0dir)) |
- (IO_STATE(R_GEN_CONFIG, g0dir, out)));
- *R_GEN_CONFIG = genconfig_shadow;
-#endif
- if (!ds1302_probe()) {
- printk(KERN_WARNING "%s: RTC not found.\n", ds1302_name);
- return -1;
- }
-#else
- printk(KERN_WARNING "%s: RTC not found.\n", ds1302_name);
- return -1;
-#endif
- }
- /* Initialise trickle charger */
- ds1302_writereg(RTC_TRICKLECHARGER,
- RTC_TCR_PATTERN |(CONFIG_ETRAX_DS1302_TRICKLE_CHARGE & 0x0F));
- /* Start clock by resetting CLOCK_HALT */
- ds1302_writereg(RTC_SECONDS, (ds1302_readreg(RTC_SECONDS) & 0x7F));
- return 0;
-}
-
-static int __init ds1302_register(void)
-{
- ds1302_init();
- if (register_chrdev(RTC_MAJOR_NR, ds1302_name, &rtc_fops)) {
- printk(KERN_INFO "%s: unable to get major %d for rtc\n",
- ds1302_name, RTC_MAJOR_NR);
- return -1;
- }
- return 0;
-
-}
-
-module_init(ds1302_register);
diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c
deleted file mode 100644
index 9da056860c9..00000000000
--- a/arch/cris/arch-v10/drivers/pcf8563.c
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * PCF8563 RTC
- *
- * From Phillips' datasheet:
- *
- * The PCF8563 is a CMOS real-time clock/calendar optimized for low power
- * consumption. A programmable clock output, interrupt output and voltage
- * low detector are also provided. All address and data are transferred
- * serially via two-line bidirectional I2C-bus. Maximum bus speed is
- * 400 kbits/s. The built-in word address register is incremented
- * automatically after each written or read byte.
- *
- * Copyright (c) 2002-2007, Axis Communications AB
- * All rights reserved.
- *
- * Author: Tobias Anderberg <tobiasa@axis.com>.
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/ioctl.h>
-#include <linux/delay.h>
-#include <linux/bcd.h>
-#include <linux/mutex.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/rtc.h>
-
-#include "i2c.h"
-
-#define PCF8563_MAJOR 121 /* Local major number. */
-#define DEVICE_NAME "rtc" /* Name which is registered in /proc/devices. */
-#define PCF8563_NAME "PCF8563"
-#define DRIVER_VERSION "$Revision: 1.24 $"
-
-/* I2C bus slave registers. */
-#define RTC_I2C_READ 0xa3
-#define RTC_I2C_WRITE 0xa2
-
-/* Two simple wrapper macros, saves a few keystrokes. */
-#define rtc_read(x) i2c_readreg(RTC_I2C_READ, x)
-#define rtc_write(x,y) i2c_writereg(RTC_I2C_WRITE, x, y)
-
-static DEFINE_MUTEX(pcf8563_mutex);
-static DEFINE_MUTEX(rtc_lock); /* Protect state etc */
-
-static const unsigned char days_in_month[] =
- { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
-
-static long pcf8563_unlocked_ioctl(struct file *, unsigned int, unsigned long);
-
-/* Cache VL bit value read at driver init since writing the RTC_SECOND
- * register clears the VL status.
- */
-static int voltage_low;
-
-static const struct file_operations pcf8563_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = pcf8563_unlocked_ioctl,
- .llseek = noop_llseek,
-};
-
-unsigned char
-pcf8563_readreg(int reg)
-{
- unsigned char res = rtc_read(reg);
-
- /* The PCF8563 does not return 0 for unimplemented bits. */
- switch (reg) {
- case RTC_SECONDS:
- case RTC_MINUTES:
- res &= 0x7F;
- break;
- case RTC_HOURS:
- case RTC_DAY_OF_MONTH:
- res &= 0x3F;
- break;
- case RTC_WEEKDAY:
- res &= 0x07;
- break;
- case RTC_MONTH:
- res &= 0x1F;
- break;
- case RTC_CONTROL1:
- res &= 0xA8;
- break;
- case RTC_CONTROL2:
- res &= 0x1F;
- break;
- case RTC_CLOCKOUT_FREQ:
- case RTC_TIMER_CONTROL:
- res &= 0x83;
- break;
- }
- return res;
-}
-
-void
-pcf8563_writereg(int reg, unsigned char val)
-{
- rtc_write(reg, val);
-}
-
-void
-get_rtc_time(struct rtc_time *tm)
-{
- tm->tm_sec = rtc_read(RTC_SECONDS);
- tm->tm_min = rtc_read(RTC_MINUTES);
- tm->tm_hour = rtc_read(RTC_HOURS);
- tm->tm_mday = rtc_read(RTC_DAY_OF_MONTH);
- tm->tm_wday = rtc_read(RTC_WEEKDAY);
- tm->tm_mon = rtc_read(RTC_MONTH);
- tm->tm_year = rtc_read(RTC_YEAR);
-
- if (tm->tm_sec & 0x80) {
- printk(KERN_ERR "%s: RTC Voltage Low - reliable date/time "
- "information is no longer guaranteed!\n", PCF8563_NAME);
- }
-
- tm->tm_year = bcd2bin(tm->tm_year) +
- ((tm->tm_mon & 0x80) ? 100 : 0);
- tm->tm_sec &= 0x7F;
- tm->tm_min &= 0x7F;
- tm->tm_hour &= 0x3F;
- tm->tm_mday &= 0x3F;
- tm->tm_wday &= 0x07; /* Not coded in BCD. */
- tm->tm_mon &= 0x1F;
-
- tm->tm_sec = bcd2bin(tm->tm_sec);
- tm->tm_min = bcd2bin(tm->tm_min);
- tm->tm_hour = bcd2bin(tm->tm_hour);
- tm->tm_mday = bcd2bin(tm->tm_mday);
- tm->tm_mon = bcd2bin(tm->tm_mon);
- tm->tm_mon--; /* Month is 1..12 in RTC but 0..11 in linux */
-}
-
-int __init
-pcf8563_init(void)
-{
- static int res;
- static int first = 1;
-
- if (!first)
- return res;
- first = 0;
-
- /* Initiate the i2c protocol. */
- res = i2c_init();
- if (res < 0) {
- printk(KERN_CRIT "pcf8563_init: Failed to init i2c.\n");
- return res;
- }
-
- /*
- * First of all we need to reset the chip. This is done by
- * clearing control1, control2 and clk freq and resetting
- * all alarms.
- */
- if (rtc_write(RTC_CONTROL1, 0x00) < 0)
- goto err;
-
- if (rtc_write(RTC_CONTROL2, 0x00) < 0)
- goto err;
-
- if (rtc_write(RTC_CLOCKOUT_FREQ, 0x00) < 0)
- goto err;
-
- if (rtc_write(RTC_TIMER_CONTROL, 0x03) < 0)
- goto err;
-
- /* Reset the alarms. */
- if (rtc_write(RTC_MINUTE_ALARM, 0x80) < 0)
- goto err;
-
- if (rtc_write(RTC_HOUR_ALARM, 0x80) < 0)
- goto err;
-
- if (rtc_write(RTC_DAY_ALARM, 0x80) < 0)
- goto err;
-
- if (rtc_write(RTC_WEEKDAY_ALARM, 0x80) < 0)
- goto err;
-
- /* Check for low voltage, and warn about it. */
- if (rtc_read(RTC_SECONDS) & 0x80) {
- voltage_low = 1;
- printk(KERN_WARNING "%s: RTC Voltage Low - reliable "
- "date/time information is no longer guaranteed!\n",
- PCF8563_NAME);
- }
-
- return res;
-
-err:
- printk(KERN_INFO "%s: Error initializing chip.\n", PCF8563_NAME);
- res = -1;
- return res;
-}
-
-void __exit
-pcf8563_exit(void)
-{
- unregister_chrdev(PCF8563_MAJOR, DEVICE_NAME);
-}
-
-/*
- * ioctl calls for this driver. Why return -ENOTTY upon error? Because
- * POSIX says so!
- */
-static int pcf8563_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- /* Some sanity checks. */
- if (_IOC_TYPE(cmd) != RTC_MAGIC)
- return -ENOTTY;
-
- if (_IOC_NR(cmd) > RTC_MAX_IOCTL)
- return -ENOTTY;
-
- switch (cmd) {
- case RTC_RD_TIME:
- {
- struct rtc_time tm;
-
- mutex_lock(&rtc_lock);
- memset(&tm, 0, sizeof tm);
- get_rtc_time(&tm);
-
- if (copy_to_user((struct rtc_time *) arg, &tm,
- sizeof tm)) {
- mutex_unlock(&rtc_lock);
- return -EFAULT;
- }
-
- mutex_unlock(&rtc_lock);
-
- return 0;
- }
- case RTC_SET_TIME:
- {
- int leap;
- int year;
- int century;
- struct rtc_time tm;
-
- memset(&tm, 0, sizeof tm);
- if (!capable(CAP_SYS_TIME))
- return -EPERM;
-
- if (copy_from_user(&tm, (struct rtc_time *) arg, sizeof tm))
- return -EFAULT;
-
- /* Convert from struct tm to struct rtc_time. */
- tm.tm_year += 1900;
- tm.tm_mon += 1;
-
- /*
- * Check if tm.tm_year is a leap year. A year is a leap
- * year if it is divisible by 4 but not 100, except
- * that years divisible by 400 _are_ leap years.
- */
- year = tm.tm_year;
- leap = (tm.tm_mon == 2) &&
- ((year % 4 == 0 && year % 100 != 0) || year % 400 == 0);
-
- /* Perform some sanity checks. */
- if ((tm.tm_year < 1970) ||
- (tm.tm_mon > 12) ||
- (tm.tm_mday == 0) ||
- (tm.tm_mday > days_in_month[tm.tm_mon] + leap) ||
- (tm.tm_wday >= 7) ||
- (tm.tm_hour >= 24) ||
- (tm.tm_min >= 60) ||
- (tm.tm_sec >= 60))
- return -EINVAL;
-
- century = (tm.tm_year >= 2000) ? 0x80 : 0;
- tm.tm_year = tm.tm_year % 100;
-
- tm.tm_year = bin2bcd(tm.tm_year);
- tm.tm_mon = bin2bcd(tm.tm_mon);
- tm.tm_mday = bin2bcd(tm.tm_mday);
- tm.tm_hour = bin2bcd(tm.tm_hour);
- tm.tm_min = bin2bcd(tm.tm_min);
- tm.tm_sec = bin2bcd(tm.tm_sec);
- tm.tm_mon |= century;
-
- mutex_lock(&rtc_lock);
-
- rtc_write(RTC_YEAR, tm.tm_year);
- rtc_write(RTC_MONTH, tm.tm_mon);
- rtc_write(RTC_WEEKDAY, tm.tm_wday); /* Not coded in BCD. */
- rtc_write(RTC_DAY_OF_MONTH, tm.tm_mday);
- rtc_write(RTC_HOURS, tm.tm_hour);
- rtc_write(RTC_MINUTES, tm.tm_min);
- rtc_write(RTC_SECONDS, tm.tm_sec);
-
- mutex_unlock(&rtc_lock);
-
- return 0;
- }
- case RTC_VL_READ:
- if (voltage_low) {
- printk(KERN_ERR "%s: RTC Voltage Low - "
- "reliable date/time information is no "
- "longer guaranteed!\n", PCF8563_NAME);
- }
-
- if (copy_to_user((int *) arg, &voltage_low, sizeof(int)))
- return -EFAULT;
- return 0;
-
- case RTC_VL_CLR:
- {
- /* Clear the VL bit in the seconds register in case
- * the time has not been set already (which would
- * have cleared it). This does not really matter
- * because of the cached voltage_low value but do it
- * anyway for consistency. */
-
- int ret = rtc_read(RTC_SECONDS);
-
- rtc_write(RTC_SECONDS, (ret & 0x7F));
-
- /* Clear the cached value. */
- voltage_low = 0;
-
- return 0;
- }
- default:
- return -ENOTTY;
- }
-
- return 0;
-}
-
-static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&pcf8563_mutex);
- ret = pcf8563_ioctl(filp, cmd, arg);
- mutex_unlock(&pcf8563_mutex);
-
- return ret;
-}
-
-static int __init pcf8563_register(void)
-{
- if (pcf8563_init() < 0) {
- printk(KERN_INFO "%s: Unable to initialize Real-Time Clock "
- "Driver, %s\n", PCF8563_NAME, DRIVER_VERSION);
- return -1;
- }
-
- if (register_chrdev(PCF8563_MAJOR, DEVICE_NAME, &pcf8563_fops) < 0) {
- printk(KERN_INFO "%s: Unable to get major number %d for RTC device.\n",
- PCF8563_NAME, PCF8563_MAJOR);
- return -1;
- }
-
- printk(KERN_INFO "%s Real-Time Clock Driver, %s\n", PCF8563_NAME,
- DRIVER_VERSION);
-
- /* Check for low voltage, and warn about it. */
- if (voltage_low) {
- printk(KERN_WARNING "%s: RTC Voltage Low - reliable date/time "
- "information is no longer guaranteed!\n", PCF8563_NAME);
- }
-
- return 0;
-}
-
-module_init(pcf8563_register);
-module_exit(pcf8563_exit);
diff --git a/arch/cris/arch-v10/kernel/fasttimer.c b/arch/cris/arch-v10/kernel/fasttimer.c
index 8a8196ee8ce..082f1890bac 100644
--- a/arch/cris/arch-v10/kernel/fasttimer.c
+++ b/arch/cris/arch-v10/kernel/fasttimer.c
@@ -21,8 +21,6 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/delay.h>
-#include <asm/rtc.h>
-
#include <arch/svinto.h>
#include <asm/fasttimer.h>
diff --git a/arch/cris/arch-v10/kernel/kgdb.c b/arch/cris/arch-v10/kernel/kgdb.c
index b579dd02e09..37e6d2c50b7 100644
--- a/arch/cris/arch-v10/kernel/kgdb.c
+++ b/arch/cris/arch-v10/kernel/kgdb.c
@@ -264,7 +264,7 @@ static int write_register (int regno, char *val);
/* Write a value to a specified register in the stack of a thread other
than the current thread. */
-static write_stack_register (int thread_id, int regno, char *valptr);
+static int write_stack_register(int thread_id, int regno, char *valptr);
/* Read a value from a specified register in the register image. Returns the
status of the read operation. The register value is returned in valptr. */
diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c
index 20c85b5dc7d..bcffcb6a941 100644
--- a/arch/cris/arch-v10/kernel/time.c
+++ b/arch/cris/arch-v10/kernel/time.c
@@ -19,16 +19,12 @@
#include <asm/signal.h>
#include <asm/io.h>
#include <asm/delay.h>
-#include <asm/rtc.h>
#include <asm/irq_regs.h>
/* define this if you need to use print_timestamp */
/* it will make jiffies at 96 hz instead of 100 hz though */
#undef USE_CASCADE_TIMERS
-extern int set_rtc_mmss(unsigned long nowtime);
-extern int have_rtc;
-
unsigned long get_ns_in_jiffie(void)
{
unsigned char timer_count, t1;
@@ -203,11 +199,6 @@ time_init(void)
*/
loops_per_usec = 50;
- if(RTC_INIT() < 0)
- have_rtc = 0;
- else
- have_rtc = 1;
-
/* Setup the etrax timers
* Base frequency is 25000 hz, divider 250 -> 100 HZ
* In normal mode, we use timer0, so timer1 is free. In cascade
diff --git a/arch/cris/arch-v10/lib/Makefile b/arch/cris/arch-v10/lib/Makefile
index 36e9a9c5239..725153edb76 100644
--- a/arch/cris/arch-v10/lib/Makefile
+++ b/arch/cris/arch-v10/lib/Makefile
@@ -2,8 +2,5 @@
# Makefile for Etrax-specific library files..
#
-
-EXTRA_AFLAGS := -traditional
-
lib-y = checksum.o checksumcopy.o string.o usercopy.o memset.o csumcpfruser.o
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index 642c6fed43d..f8476d9e856 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -1394,11 +1394,10 @@ static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char
if (padlen < MD5_MIN_PAD_LENGTH) padlen += MD5_BLOCK_LENGTH;
- p = kmalloc(padlen, alloc_flag);
+ p = kzalloc(padlen, alloc_flag);
if (!p) return -ENOMEM;
*p = 0x80;
- memset(p+1, 0, padlen - 1);
DEBUG(printk("create_md5_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
@@ -1426,11 +1425,10 @@ static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, cha
if (padlen < SHA1_MIN_PAD_LENGTH) padlen += SHA1_BLOCK_LENGTH;
- p = kmalloc(padlen, alloc_flag);
+ p = kzalloc(padlen, alloc_flag);
if (!p) return -ENOMEM;
*p = 0x80;
- memset(p+1, 0, padlen - 1);
DEBUG(printk("create_sha1_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index f7ad9e8637d..f085229cf87 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -114,8 +114,6 @@ void user_disable_single_step(struct task_struct *child)
void
ptrace_disable(struct task_struct *child)
{
- unsigned long tmp;
-
/* Deconfigure SPC and S-bit. */
user_disable_single_step(child);
put_reg(child, PT_SPC, 0);
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index 6773fc83a67..8c4b45efd7b 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -18,7 +18,6 @@
#include <asm/signal.h>
#include <asm/io.h>
#include <asm/delay.h>
-#include <asm/rtc.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
@@ -67,7 +66,6 @@ unsigned long timer_regs[NR_CPUS] =
};
extern int set_rtc_mmss(unsigned long nowtime);
-extern int have_rtc;
#ifdef CONFIG_CPU_FREQ
static int
@@ -265,11 +263,6 @@ void __init time_init(void)
*/
loops_per_usec = 50;
- if(RTC_INIT() < 0)
- have_rtc = 0;
- else
- have_rtc = 1;
-
/* Start CPU local timer. */
cris_timer_init();
diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
index 1de779f4f24..7caf25d58e6 100644
--- a/arch/cris/include/arch-v32/arch/cache.h
+++ b/arch/cris/include/arch-v32/arch/cache.h
@@ -7,7 +7,7 @@
#define L1_CACHE_BYTES 32
#define L1_CACHE_SHIFT 5
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
void flush_dma_list(dma_descr_data *descr);
void flush_dma_descr(dma_descr_data *descr, int flush_buf);
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index 956eea246b9..04d02a51c5e 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -6,5 +6,4 @@ header-y += arch-v32/
header-y += ethernet.h
header-y += etraxgpio.h
header-y += rs485.h
-header-y += rtc.h
header-y += sync_serial.h
diff --git a/arch/cris/include/asm/posix_types.h b/arch/cris/include/asm/posix_types.h
index 72b3cd6eda0..234891c74e2 100644
--- a/arch/cris/include/asm/posix_types.h
+++ b/arch/cris/include/asm/posix_types.h
@@ -33,4 +33,6 @@ typedef int __kernel_ptrdiff_t;
typedef unsigned short __kernel_old_dev_t;
#define __kernel_old_dev_t __kernel_old_dev_t
+#include <asm-generic/posix_types.h>
+
#endif /* __ARCH_CRIS_POSIX_TYPES_H */
diff --git a/arch/cris/include/asm/rtc.h b/arch/cris/include/asm/rtc.h
deleted file mode 100644
index 17d3019529e..00000000000
--- a/arch/cris/include/asm/rtc.h
+++ /dev/null
@@ -1,107 +0,0 @@
-
-#ifndef __RTC_H__
-#define __RTC_H__
-
-#ifdef CONFIG_ETRAX_DS1302
- /* Dallas DS1302 clock/calendar register numbers. */
-# define RTC_SECONDS 0
-# define RTC_MINUTES 1
-# define RTC_HOURS 2
-# define RTC_DAY_OF_MONTH 3
-# define RTC_MONTH 4
-# define RTC_WEEKDAY 5
-# define RTC_YEAR 6
-# define RTC_CONTROL 7
-
- /* Bits in CONTROL register. */
-# define RTC_CONTROL_WRITEPROTECT 0x80
-# define RTC_TRICKLECHARGER 8
-
- /* Bits in TRICKLECHARGER register TCS TCS TCS TCS DS DS RS RS. */
-# define RTC_TCR_PATTERN 0xA0 /* 1010xxxx */
-# define RTC_TCR_1DIOD 0x04 /* xxxx01xx */
-# define RTC_TCR_2DIOD 0x08 /* xxxx10xx */
-# define RTC_TCR_DISABLED 0x00 /* xxxxxx00 Disabled */
-# define RTC_TCR_2KOHM 0x01 /* xxxxxx01 2KOhm */
-# define RTC_TCR_4KOHM 0x02 /* xxxxxx10 4kOhm */
-# define RTC_TCR_8KOHM 0x03 /* xxxxxx11 8kOhm */
-
-#elif defined(CONFIG_ETRAX_PCF8563)
- /* I2C bus slave registers. */
-# define RTC_I2C_READ 0xa3
-# define RTC_I2C_WRITE 0xa2
-
- /* Phillips PCF8563 registers. */
-# define RTC_CONTROL1 0x00 /* Control/Status register 1. */
-# define RTC_CONTROL2 0x01 /* Control/Status register 2. */
-# define RTC_CLOCKOUT_FREQ 0x0d /* CLKOUT frequency. */
-# define RTC_TIMER_CONTROL 0x0e /* Timer control. */
-# define RTC_TIMER_CNTDOWN 0x0f /* Timer countdown. */
-
- /* BCD encoded clock registers. */
-# define RTC_SECONDS 0x02
-# define RTC_MINUTES 0x03
-# define RTC_HOURS 0x04
-# define RTC_DAY_OF_MONTH 0x05
-# define RTC_WEEKDAY 0x06 /* Not coded in BCD! */
-# define RTC_MONTH 0x07
-# define RTC_YEAR 0x08
-# define RTC_MINUTE_ALARM 0x09
-# define RTC_HOUR_ALARM 0x0a
-# define RTC_DAY_ALARM 0x0b
-# define RTC_WEEKDAY_ALARM 0x0c
-
-#endif
-
-#ifdef CONFIG_ETRAX_DS1302
-extern unsigned char ds1302_readreg(int reg);
-extern void ds1302_writereg(int reg, unsigned char val);
-extern int ds1302_init(void);
-# define CMOS_READ(x) ds1302_readreg(x)
-# define CMOS_WRITE(val,reg) ds1302_writereg(reg,val)
-# define RTC_INIT() ds1302_init()
-#elif defined(CONFIG_ETRAX_PCF8563)
-extern unsigned char pcf8563_readreg(int reg);
-extern void pcf8563_writereg(int reg, unsigned char val);
-extern int pcf8563_init(void);
-# define CMOS_READ(x) pcf8563_readreg(x)
-# define CMOS_WRITE(val,reg) pcf8563_writereg(reg,val)
-# define RTC_INIT() pcf8563_init()
-#else
- /* No RTC configured so we shouldn't try to access any. */
-# define CMOS_READ(x) 42
-# define CMOS_WRITE(x,y)
-# define RTC_INIT() (-1)
-#endif
-
-/*
- * The struct used to pass data via the following ioctl. Similar to the
- * struct tm in <time.h>, but it needs to be here so that the kernel
- * source is self contained, allowing cross-compiles, etc. etc.
- */
-struct rtc_time {
- int tm_sec;
- int tm_min;
- int tm_hour;
- int tm_mday;
- int tm_mon;
- int tm_year;
- int tm_wday;
- int tm_yday;
- int tm_isdst;
-};
-
-/* ioctl() calls that are permitted to the /dev/rtc interface. */
-#define RTC_MAGIC 'p'
-/* Read RTC time. */
-#define RTC_RD_TIME _IOR(RTC_MAGIC, 0x09, struct rtc_time)
-/* Set RTC time. */
-#define RTC_SET_TIME _IOW(RTC_MAGIC, 0x0a, struct rtc_time)
-#define RTC_SET_CHARGE _IOW(RTC_MAGIC, 0x0b, int)
-/* Voltage low detector */
-#define RTC_VL_READ _IOR(RTC_MAGIC, 0x13, int)
-/* Clear voltage low information */
-#define RTC_VL_CLR _IO(RTC_MAGIC, 0x14)
-#define RTC_MAX_IOCTL 0x14
-
-#endif /* __RTC_H__ */
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index 4e73092e85c..277ffc459e4 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -21,7 +21,6 @@
*
*/
-#include <asm/rtc.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/param.h>
@@ -32,7 +31,8 @@
#include <linux/profile.h>
#include <linux/sched.h> /* just for sched_clock() - funny that */
-int have_rtc; /* used to remember if we have an RTC or not */;
+
+#define D(x)
#define TICK_SIZE tick
@@ -50,78 +50,16 @@ u32 arch_gettimeoffset(void)
}
#endif
-/*
- * BUG: This routine does not handle hour overflow properly; it just
- * sets the minutes. Usually you'll only notice that after reboot!
- */
-
int set_rtc_mmss(unsigned long nowtime)
{
- int retval = 0;
- int real_seconds, real_minutes, cmos_minutes;
-
- printk(KERN_DEBUG "set_rtc_mmss(%lu)\n", nowtime);
-
- if(!have_rtc)
- return 0;
-
- cmos_minutes = CMOS_READ(RTC_MINUTES);
- cmos_minutes = bcd2bin(cmos_minutes);
-
- /*
- * since we're only adjusting minutes and seconds,
- * don't interfere with hour overflow. This avoids
- * messing with unknown time zones but requires your
- * RTC not to be off by more than 15 minutes
- */
- real_seconds = nowtime % 60;
- real_minutes = nowtime / 60;
- if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
- real_minutes += 30; /* correct for half hour time zone */
- real_minutes %= 60;
-
- if (abs(real_minutes - cmos_minutes) < 30) {
- real_seconds = bin2bcd(real_seconds);
- real_minutes = bin2bcd(real_minutes);
- CMOS_WRITE(real_seconds,RTC_SECONDS);
- CMOS_WRITE(real_minutes,RTC_MINUTES);
- } else {
- printk_once(KERN_NOTICE
- "set_rtc_mmss: can't update from %d to %d\n",
- cmos_minutes, real_minutes);
- retval = -1;
- }
-
- return retval;
+ D(printk(KERN_DEBUG "set_rtc_mmss(%lu)\n", nowtime));
+ return 0;
}
/* grab the time from the RTC chip */
-
-unsigned long
-get_cmos_time(void)
+unsigned long get_cmos_time(void)
{
- unsigned int year, mon, day, hour, min, sec;
- if(!have_rtc)
- return 0;
-
- sec = CMOS_READ(RTC_SECONDS);
- min = CMOS_READ(RTC_MINUTES);
- hour = CMOS_READ(RTC_HOURS);
- day = CMOS_READ(RTC_DAY_OF_MONTH);
- mon = CMOS_READ(RTC_MONTH);
- year = CMOS_READ(RTC_YEAR);
-
- sec = bcd2bin(sec);
- min = bcd2bin(min);
- hour = bcd2bin(hour);
- day = bcd2bin(day);
- mon = bcd2bin(mon);
- year = bcd2bin(year);
-
- if ((year += 1900) < 1970)
- year += 100;
-
- return mktime(year, mon, day, hour, min, sec);
+ return 0;
}
@@ -132,7 +70,7 @@ int update_persistent_clock(struct timespec now)
void read_persistent_clock(struct timespec *ts)
{
- ts->tv_sec = get_cmos_time();
+ ts->tv_sec = 0;
ts->tv_nsec = 0;
}
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index a6990cb0f09..a68b983dcea 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -52,6 +52,7 @@ SECTIONS
EXCEPTION_TABLE(4)
+ _sdata = .;
RODATA
. = ALIGN (4);
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index b4760d86e1b..45fd542cf17 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -58,6 +58,8 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
struct vm_area_struct * vma;
siginfo_t info;
int fault;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ ((writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
D(printk(KERN_DEBUG
"Page fault for %lX on %X at %lX, prot %d write %d\n",
@@ -115,6 +117,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
if (in_atomic() || !mm)
goto no_context;
+retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
@@ -163,7 +166,11 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -171,10 +178,24 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
+
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ /*
+ * No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
up_read(&mm->mmap_sem);
return;
diff --git a/arch/frv/include/asm/kvm_para.h b/arch/frv/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/frv/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/h8300/include/asm/kvm_para.h b/arch/h8300/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/h8300/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/hexagon/include/asm/kvm_para.h b/arch/hexagon/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/hexagon/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index e35b3a84a40..6d6a5ac48d8 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -365,6 +365,7 @@ struct thash_cb {
};
struct kvm_vcpu_stat {
+ u32 halt_wakeup;
};
struct kvm_vcpu_arch {
@@ -448,6 +449,8 @@ struct kvm_vcpu_arch {
char log_buf[VMM_LOG_LEN];
union context host;
union context guest;
+
+ char mmio_data[8];
};
struct kvm_vm_stat {
diff --git a/arch/ia64/include/asm/kvm_para.h b/arch/ia64/include/asm/kvm_para.h
index 1588aee781a..2019cb99335 100644
--- a/arch/ia64/include/asm/kvm_para.h
+++ b/arch/ia64/include/asm/kvm_para.h
@@ -26,6 +26,11 @@ static inline unsigned int kvm_arch_para_features(void)
return 0;
}
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
#endif
#endif
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 463fb3bbe11..bd77cb507c1 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -232,12 +232,12 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
goto mmio;
vcpu->mmio_needed = 1;
- vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
- vcpu->mmio_size = kvm_run->mmio.len = p->size;
+ vcpu->mmio_fragments[0].gpa = kvm_run->mmio.phys_addr = p->addr;
+ vcpu->mmio_fragments[0].len = kvm_run->mmio.len = p->size;
vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
if (vcpu->mmio_is_write)
- memcpy(vcpu->mmio_data, &p->data, p->size);
+ memcpy(vcpu->arch.mmio_data, &p->data, p->size);
memcpy(kvm_run->mmio.data, &p->data, p->size);
kvm_run->exit_reason = KVM_EXIT_MMIO;
return 0;
@@ -719,7 +719,7 @@ static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
if (!vcpu->mmio_is_write)
- memcpy(&p->data, vcpu->mmio_data, 8);
+ memcpy(&p->data, vcpu->arch.mmio_data, 8);
p->state = STATE_IORESP_READY;
}
@@ -739,7 +739,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
if (vcpu->mmio_needed) {
- memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
+ memcpy(vcpu->arch.mmio_data, kvm_run->mmio.data, 8);
kvm_set_mmio_data(vcpu);
vcpu->mmio_read_completed = 1;
vcpu->mmio_needed = 0;
@@ -1872,21 +1872,6 @@ void kvm_arch_hardware_unsetup(void)
{
}
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
-{
- int me;
- int cpu = vcpu->cpu;
-
- if (waitqueue_active(&vcpu->wq))
- wake_up_interruptible(&vcpu->wq);
-
- me = get_cpu();
- if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
- if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
- smp_send_reschedule(cpu);
- put_cpu();
-}
-
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
{
return __apic_accept_irq(vcpu, irq->vector);
@@ -1956,6 +1941,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
(kvm_highest_pending_irq(vcpu) != -1);
}
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+ return (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests));
+}
+
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
diff --git a/arch/m68k/include/asm/kvm_para.h b/arch/m68k/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/m68k/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 83460468998..0bf44231aaf 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -52,7 +52,7 @@ config GENERIC_CALIBRATE_DELAY
def_bool y
config GENERIC_GPIO
- def_bool y
+ bool
config GENERIC_CSUM
def_bool y
diff --git a/arch/microblaze/include/asm/kvm_para.h b/arch/microblaze/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/microblaze/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index daff9e5e4a1..03f7b8ce6b6 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -492,10 +492,11 @@ C_ENTRY(sys_clone):
bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
lwi r6, r1, PT_R1; /* If so, use paret's stack ptr */
1: addik r7, r1, 0; /* Arg 2: parent context */
- add r8, r0, r0; /* Arg 3: (unused) */
- add r9, r0, r0; /* Arg 4: (unused) */
+ lwi r9, r1, PT_R8; /* parent tid. */
+ lwi r10, r1, PT_R9; /* child tid. */
+ /* do_fork will pick up TLS from regs->r10. */
brid do_fork /* Do real work (tail-call) */
- add r10, r0, r0; /* Arg 5: (unused) */
+ add r8, r0, r0; /* Arg 3: (unused) */
C_ENTRY(sys_execve):
brid microblaze_execve; /* Do real work (tail-call).*/
diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S
index e7eaa7a8cbd..fc1e1322ce4 100644
--- a/arch/microblaze/kernel/mcount.S
+++ b/arch/microblaze/kernel/mcount.S
@@ -138,7 +138,7 @@ NOALIGN_ENTRY(ftrace_call)
#endif /* CONFIG_DYNAMIC_FTRACE */
/* static normal trace */
lwi r6, r1, 120; /* MS: load parent addr */
- addik r5, r15, 0; /* MS: load current function addr */
+ addik r5, r15, -4; /* MS: load current function addr */
/* MS: here is dependency on previous code */
brald r15, r20; /* MS: jump to ftrace handler */
nop;
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 883b92789cd..1944e00f07e 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -182,8 +182,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#endif
ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
+ /*
+ * r21 is the thread reg, r10 is 6th arg to clone
+ * which contains TLS area
+ */
if (clone_flags & CLONE_SETTLS)
- ;
+ childregs->r21 = childregs->r10;
return 0;
}
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index c38a265846d..eb365d6795f 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -92,6 +92,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
int code = SEGV_MAPERR;
int is_write = error_code & ESR_S;
int fault;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (is_write ? FAULT_FLAG_WRITE : 0);
regs->ear = address;
regs->esr = error_code;
@@ -138,6 +140,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
if (kernel_mode(regs) && !search_exception_tables(regs->pc))
goto bad_area_nosemaphore;
+retry:
down_read(&mm->mmap_sem);
}
@@ -210,7 +213,11 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -218,11 +225,27 @@ good_area:
goto do_sigbus;
BUG();
}
- if (unlikely(fault & VM_FAULT_MAJOR))
- current->maj_flt++;
- else
- current->min_flt++;
+
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (unlikely(fault & VM_FAULT_MAJOR))
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ /*
+ * No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
+
up_read(&mm->mmap_sem);
+
/*
* keep track of tlb+htab misses that are good addrs but
* just need pte's created via handle_mm_fault()
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 77050671eee..09ab87ee6fe 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -233,8 +233,9 @@ config LANTIQ
select ARCH_REQUIRE_GPIOLIB
select SWAP_IO_SPACE
select BOOT_RAW
- select HAVE_CLK
- select MIPS_MACHINE
+ select HAVE_MACH_CLKDEV
+ select CLKDEV_LOOKUP
+ select USE_OF
config LASAT
bool "LASAT Networks platforms"
@@ -1783,10 +1784,12 @@ endchoice
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
- range 13 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB
- default "13" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB
- range 12 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB
- default "12" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB
+ range 14 64 if HUGETLB_PAGE && PAGE_SIZE_64KB
+ default "14" if HUGETLB_PAGE && PAGE_SIZE_64KB
+ range 13 64 if HUGETLB_PAGE && PAGE_SIZE_32KB
+ default "13" if HUGETLB_PAGE && PAGE_SIZE_32KB
+ range 12 64 if HUGETLB_PAGE && PAGE_SIZE_16KB
+ default "12" if HUGETLB_PAGE && PAGE_SIZE_16KB
range 11 64
default "11"
help
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 76017c25a9e..764e37a9dbb 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -219,8 +219,8 @@ endif
KBUILD_AFLAGS += $(cflags-y)
KBUILD_CFLAGS += $(cflags-y)
-KBUILD_CPPFLAGS += -D"VMLINUX_LOAD_ADDRESS=$(load-y)"
-KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)"
+KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
+KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
LDFLAGS += -m $(ld-emul)
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
index a83302b96c0..7dde01642d6 100644
--- a/arch/mips/alchemy/devboards/db1200.c
+++ b/arch/mips/alchemy/devboards/db1200.c
@@ -22,6 +22,7 @@
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/leds.h>
diff --git a/arch/mips/ath79/Kconfig b/arch/mips/ath79/Kconfig
index e0fae8f4442..f44feee2d67 100644
--- a/arch/mips/ath79/Kconfig
+++ b/arch/mips/ath79/Kconfig
@@ -26,6 +26,18 @@ config ATH79_MACH_AP81
Say 'Y' here if you want your kernel to support the
Atheros AP81 reference board.
+config ATH79_MACH_DB120
+ bool "Atheros DB120 reference board"
+ select SOC_AR934X
+ select ATH79_DEV_GPIO_BUTTONS
+ select ATH79_DEV_LEDS_GPIO
+ select ATH79_DEV_SPI
+ select ATH79_DEV_USB
+ select ATH79_DEV_WMAC
+ help
+ Say 'Y' here if you want your kernel to support the
+ Atheros DB120 reference board.
+
config ATH79_MACH_PB44
bool "Atheros PB44 reference board"
select SOC_AR71XX
@@ -52,12 +64,14 @@ endmenu
config SOC_AR71XX
select USB_ARCH_HAS_EHCI
select USB_ARCH_HAS_OHCI
+ select HW_HAS_PCI
def_bool n
config SOC_AR724X
select USB_ARCH_HAS_EHCI
select USB_ARCH_HAS_OHCI
select HW_HAS_PCI
+ select PCI_AR724X if PCI
def_bool n
config SOC_AR913X
@@ -68,6 +82,15 @@ config SOC_AR933X
select USB_ARCH_HAS_EHCI
def_bool n
+config SOC_AR934X
+ select USB_ARCH_HAS_EHCI
+ select HW_HAS_PCI
+ select PCI_AR724X if PCI
+ def_bool n
+
+config PCI_AR724X
+ def_bool n
+
config ATH79_DEV_GPIO_BUTTONS
def_bool n
@@ -81,7 +104,7 @@ config ATH79_DEV_USB
def_bool n
config ATH79_DEV_WMAC
- depends on (SOC_AR913X || SOC_AR933X)
+ depends on (SOC_AR913X || SOC_AR933X || SOC_AR934X)
def_bool n
endif
diff --git a/arch/mips/ath79/Makefile b/arch/mips/ath79/Makefile
index 3b911e09dbe..2b54d98263f 100644
--- a/arch/mips/ath79/Makefile
+++ b/arch/mips/ath79/Makefile
@@ -11,6 +11,7 @@
obj-y := prom.o setup.o irq.o common.o clock.o gpio.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_PCI) += pci.o
#
# Devices
@@ -27,5 +28,6 @@ obj-$(CONFIG_ATH79_DEV_WMAC) += dev-wmac.o
#
obj-$(CONFIG_ATH79_MACH_AP121) += mach-ap121.o
obj-$(CONFIG_ATH79_MACH_AP81) += mach-ap81.o
+obj-$(CONFIG_ATH79_MACH_DB120) += mach-db120.o
obj-$(CONFIG_ATH79_MACH_PB44) += mach-pb44.o
obj-$(CONFIG_ATH79_MACH_UBNT_XM) += mach-ubnt-xm.o
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index 54d0eb4db98..b91ad3efe29 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -1,8 +1,11 @@
/*
* Atheros AR71XX/AR724X/AR913X common routines
*
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
* Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
*
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
@@ -163,6 +166,82 @@ static void __init ar933x_clocks_init(void)
ath79_uart_clk.rate = ath79_ref_clk.rate;
}
+static void __init ar934x_clocks_init(void)
+{
+ u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
+
+ bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & AR934X_BOOTSTRAP_REF_CLK_40)
+ ath79_ref_clk.rate = 40 * 1000 * 1000;
+ else
+ ath79_ref_clk.rate = 25 * 1000 * 1000;
+
+ pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG);
+ out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ AR934X_PLL_CPU_CONFIG_REFDIV_MASK;
+ nint = (pll >> AR934X_PLL_CPU_CONFIG_NINT_SHIFT) &
+ AR934X_PLL_CPU_CONFIG_NINT_MASK;
+ frac = (pll >> AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+ AR934X_PLL_CPU_CONFIG_NFRAC_MASK;
+
+ cpu_pll = nint * ath79_ref_clk.rate / ref_div;
+ cpu_pll += frac * ath79_ref_clk.rate / (ref_div * (2 << 6));
+ cpu_pll /= (1 << out_div);
+
+ pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG);
+ out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+ AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+ AR934X_PLL_DDR_CONFIG_REFDIV_MASK;
+ nint = (pll >> AR934X_PLL_DDR_CONFIG_NINT_SHIFT) &
+ AR934X_PLL_DDR_CONFIG_NINT_MASK;
+ frac = (pll >> AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+ AR934X_PLL_DDR_CONFIG_NFRAC_MASK;
+
+ ddr_pll = nint * ath79_ref_clk.rate / ref_div;
+ ddr_pll += frac * ath79_ref_clk.rate / (ref_div * (2 << 10));
+ ddr_pll /= (1 << out_div);
+
+ clk_ctrl = ath79_pll_rr(AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
+
+ postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+ AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK;
+
+ if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_PLL_BYPASS)
+ ath79_cpu_clk.rate = ath79_ref_clk.rate;
+ else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_CPUCLK_FROM_CPUPLL)
+ ath79_cpu_clk.rate = cpu_pll / (postdiv + 1);
+ else
+ ath79_cpu_clk.rate = ddr_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+ AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_MASK;
+
+ if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_PLL_BYPASS)
+ ath79_ddr_clk.rate = ath79_ref_clk.rate;
+ else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL)
+ ath79_ddr_clk.rate = ddr_pll / (postdiv + 1);
+ else
+ ath79_ddr_clk.rate = cpu_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+ AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_MASK;
+
+ if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_PLL_BYPASS)
+ ath79_ahb_clk.rate = ath79_ref_clk.rate;
+ else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+ ath79_ahb_clk.rate = ddr_pll / (postdiv + 1);
+ else
+ ath79_ahb_clk.rate = cpu_pll / (postdiv + 1);
+
+ ath79_wdt_clk.rate = ath79_ref_clk.rate;
+ ath79_uart_clk.rate = ath79_ref_clk.rate;
+}
+
void __init ath79_clocks_init(void)
{
if (soc_is_ar71xx())
@@ -173,6 +252,8 @@ void __init ath79_clocks_init(void)
ar913x_clocks_init();
else if (soc_is_ar933x())
ar933x_clocks_init();
+ else if (soc_is_ar934x())
+ ar934x_clocks_init();
else
BUG();
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index f0fda982b96..5a4adfc9d79 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -1,9 +1,12 @@
/*
* Atheros AR71XX/AR724X/AR913X common routines
*
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
@@ -67,6 +70,8 @@ void ath79_device_reset_set(u32 mask)
reg = AR913X_RESET_REG_RESET_MODULE;
else if (soc_is_ar933x())
reg = AR933X_RESET_REG_RESET_MODULE;
+ else if (soc_is_ar934x())
+ reg = AR934X_RESET_REG_RESET_MODULE;
else
BUG();
@@ -91,6 +96,8 @@ void ath79_device_reset_clear(u32 mask)
reg = AR913X_RESET_REG_RESET_MODULE;
else if (soc_is_ar933x())
reg = AR933X_RESET_REG_RESET_MODULE;
+ else if (soc_is_ar934x())
+ reg = AR934X_RESET_REG_RESET_MODULE;
else
BUG();
diff --git a/arch/mips/ath79/dev-common.c b/arch/mips/ath79/dev-common.c
index f4956f80907..45efc63b08b 100644
--- a/arch/mips/ath79/dev-common.c
+++ b/arch/mips/ath79/dev-common.c
@@ -89,7 +89,8 @@ void __init ath79_register_uart(void)
if (soc_is_ar71xx() ||
soc_is_ar724x() ||
- soc_is_ar913x()) {
+ soc_is_ar913x() ||
+ soc_is_ar934x()) {
ath79_uart_data[0].uartclk = clk_get_rate(clk);
platform_device_register(&ath79_uart_device);
} else if (soc_is_ar933x()) {
diff --git a/arch/mips/ath79/dev-gpio-buttons.c b/arch/mips/ath79/dev-gpio-buttons.c
index 4b0168a11c0..366b35fb164 100644
--- a/arch/mips/ath79/dev-gpio-buttons.c
+++ b/arch/mips/ath79/dev-gpio-buttons.c
@@ -25,12 +25,10 @@ void __init ath79_register_gpio_keys_polled(int id,
struct gpio_keys_button *p;
int err;
- p = kmalloc(nbuttons * sizeof(*p), GFP_KERNEL);
+ p = kmemdup(buttons, nbuttons * sizeof(*p), GFP_KERNEL);
if (!p)
return;
- memcpy(p, buttons, nbuttons * sizeof(*p));
-
pdev = platform_device_alloc("gpio-keys-polled", id);
if (!pdev)
goto err_free_buttons;
diff --git a/arch/mips/ath79/dev-leds-gpio.c b/arch/mips/ath79/dev-leds-gpio.c
index cdade68dcd1..dcb1debcefb 100644
--- a/arch/mips/ath79/dev-leds-gpio.c
+++ b/arch/mips/ath79/dev-leds-gpio.c
@@ -24,12 +24,10 @@ void __init ath79_register_leds_gpio(int id,
struct gpio_led *p;
int err;
- p = kmalloc(num_leds * sizeof(*p), GFP_KERNEL);
+ p = kmemdup(leds, num_leds * sizeof(*p), GFP_KERNEL);
if (!p)
return;
- memcpy(p, leds, num_leds * sizeof(*p));
-
pdev = platform_device_alloc("leds-gpio", id);
if (!pdev)
goto err_free_leds;
diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c
index 9c717bf98ff..d6d893c16ad 100644
--- a/arch/mips/ath79/dev-wmac.c
+++ b/arch/mips/ath79/dev-wmac.c
@@ -1,9 +1,12 @@
/*
* Atheros AR913X/AR933X SoC built-in WMAC device support
*
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
+ * Parts of this file are based on Atheros 2.6.15/2.6.31 BSP
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
@@ -26,8 +29,7 @@ static struct resource ath79_wmac_resources[] = {
/* .start and .end fields are filled dynamically */
.flags = IORESOURCE_MEM,
}, {
- .start = ATH79_CPU_IRQ_IP2,
- .end = ATH79_CPU_IRQ_IP2,
+ /* .start and .end fields are filled dynamically */
.flags = IORESOURCE_IRQ,
},
};
@@ -53,6 +55,8 @@ static void __init ar913x_wmac_setup(void)
ath79_wmac_resources[0].start = AR913X_WMAC_BASE;
ath79_wmac_resources[0].end = AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1;
+ ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
+ ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
}
@@ -79,6 +83,8 @@ static void __init ar933x_wmac_setup(void)
ath79_wmac_resources[0].start = AR933X_WMAC_BASE;
ath79_wmac_resources[0].end = AR933X_WMAC_BASE + AR933X_WMAC_SIZE - 1;
+ ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
+ ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
if (t & AR933X_BOOTSTRAP_REF_CLK_40)
@@ -92,12 +98,32 @@ static void __init ar933x_wmac_setup(void)
ath79_wmac_data.external_reset = ar933x_wmac_reset;
}
+static void ar934x_wmac_setup(void)
+{
+ u32 t;
+
+ ath79_wmac_device.name = "ar934x_wmac";
+
+ ath79_wmac_resources[0].start = AR934X_WMAC_BASE;
+ ath79_wmac_resources[0].end = AR934X_WMAC_BASE + AR934X_WMAC_SIZE - 1;
+ ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
+ ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
+
+ t = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+ if (t & AR934X_BOOTSTRAP_REF_CLK_40)
+ ath79_wmac_data.is_clk_25mhz = false;
+ else
+ ath79_wmac_data.is_clk_25mhz = true;
+}
+
void __init ath79_register_wmac(u8 *cal_data)
{
if (soc_is_ar913x())
ar913x_wmac_setup();
else if (soc_is_ar933x())
ar933x_wmac_setup();
+ else if (soc_is_ar934x())
+ ar934x_wmac_setup();
else
BUG();
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
index 6a51ced7a29..dc938cb2ba5 100644
--- a/arch/mips/ath79/early_printk.c
+++ b/arch/mips/ath79/early_printk.c
@@ -71,6 +71,9 @@ static void prom_putchar_init(void)
case REV_ID_MAJOR_AR7241:
case REV_ID_MAJOR_AR7242:
case REV_ID_MAJOR_AR913X:
+ case REV_ID_MAJOR_AR9341:
+ case REV_ID_MAJOR_AR9342:
+ case REV_ID_MAJOR_AR9344:
_prom_putchar = prom_putchar_ar71xx;
break;
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
index a2f8ca630ed..29054f21183 100644
--- a/arch/mips/ath79/gpio.c
+++ b/arch/mips/ath79/gpio.c
@@ -1,9 +1,12 @@
/*
* Atheros AR71XX/AR724X/AR913X GPIO API support
*
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
@@ -89,6 +92,42 @@ static int ath79_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
+static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ void __iomem *base = ath79_gpio_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+ __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
+ base + AR71XX_GPIO_REG_OE);
+
+ spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+ return 0;
+}
+
+static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ void __iomem *base = ath79_gpio_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+ if (value)
+ __raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+ else
+ __raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+
+ __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
+ base + AR71XX_GPIO_REG_OE);
+
+ spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+ return 0;
+}
+
static struct gpio_chip ath79_gpio_chip = {
.label = "ath79",
.get = ath79_gpio_get_value,
@@ -155,11 +194,17 @@ void __init ath79_gpio_init(void)
ath79_gpio_count = AR913X_GPIO_COUNT;
else if (soc_is_ar933x())
ath79_gpio_count = AR933X_GPIO_COUNT;
+ else if (soc_is_ar934x())
+ ath79_gpio_count = AR934X_GPIO_COUNT;
else
BUG();
ath79_gpio_base = ioremap_nocache(AR71XX_GPIO_BASE, AR71XX_GPIO_SIZE);
ath79_gpio_chip.ngpio = ath79_gpio_count;
+ if (soc_is_ar934x()) {
+ ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
+ ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
+ }
err = gpiochip_add(&ath79_gpio_chip);
if (err)
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 1b073de4468..90d09fc1539 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -1,10 +1,11 @@
/*
* Atheros AR71xx/AR724x/AR913x specific interrupt handling
*
- * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
- * Parts of this file are based on Atheros' 2.6.15 BSP
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -23,8 +24,8 @@
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
-static unsigned int ath79_ip2_flush_reg;
-static unsigned int ath79_ip3_flush_reg;
+static void (*ath79_ip2_handler)(void);
+static void (*ath79_ip3_handler)(void);
static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
{
@@ -129,7 +130,7 @@ static void __init ath79_misc_irq_init(void)
if (soc_is_ar71xx() || soc_is_ar913x())
ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
- else if (soc_is_ar724x() || soc_is_ar933x())
+ else if (soc_is_ar724x() || soc_is_ar933x() || soc_is_ar934x())
ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
else
BUG();
@@ -143,6 +144,39 @@ static void __init ath79_misc_irq_init(void)
irq_set_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler);
}
+static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+{
+ u32 status;
+
+ disable_irq_nosync(irq);
+
+ status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS);
+
+ if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) {
+ ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_PCIE);
+ generic_handle_irq(ATH79_IP2_IRQ(0));
+ } else if (status & AR934X_PCIE_WMAC_INT_WMAC_ALL) {
+ ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_WMAC);
+ generic_handle_irq(ATH79_IP2_IRQ(1));
+ } else {
+ spurious_interrupt();
+ }
+
+ enable_irq(irq);
+}
+
+static void ar934x_ip2_irq_init(void)
+{
+ int i;
+
+ for (i = ATH79_IP2_IRQ_BASE;
+ i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
+ irq_set_chip_and_handler(i, &dummy_irq_chip,
+ handle_level_irq);
+
+ irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar934x_ip2_irq_dispatch);
+}
+
asmlinkage void plat_irq_dispatch(void)
{
unsigned long pending;
@@ -152,10 +186,8 @@ asmlinkage void plat_irq_dispatch(void)
if (pending & STATUSF_IP7)
do_IRQ(ATH79_CPU_IRQ_TIMER);
- else if (pending & STATUSF_IP2) {
- ath79_ddr_wb_flush(ath79_ip2_flush_reg);
- do_IRQ(ATH79_CPU_IRQ_IP2);
- }
+ else if (pending & STATUSF_IP2)
+ ath79_ip2_handler();
else if (pending & STATUSF_IP4)
do_IRQ(ATH79_CPU_IRQ_GE0);
@@ -163,10 +195,8 @@ asmlinkage void plat_irq_dispatch(void)
else if (pending & STATUSF_IP5)
do_IRQ(ATH79_CPU_IRQ_GE1);
- else if (pending & STATUSF_IP3) {
- ath79_ddr_wb_flush(ath79_ip3_flush_reg);
- do_IRQ(ATH79_CPU_IRQ_USB);
- }
+ else if (pending & STATUSF_IP3)
+ ath79_ip3_handler();
else if (pending & STATUSF_IP6)
do_IRQ(ATH79_CPU_IRQ_MISC);
@@ -175,24 +205,97 @@ asmlinkage void plat_irq_dispatch(void)
spurious_interrupt();
}
+/*
+ * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
+ * these devices typically allocate coherent DMA memory, however the
+ * DMA controller may still have some unsynchronized data in the FIFO.
+ * Issue a flush in the handlers to ensure that the driver sees
+ * the update.
+ */
+static void ar71xx_ip2_handler(void)
+{
+ ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_PCI);
+ do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar724x_ip2_handler(void)
+{
+ ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_PCIE);
+ do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar913x_ip2_handler(void)
+{
+ ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_WMAC);
+ do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar933x_ip2_handler(void)
+{
+ ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_WMAC);
+ do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar934x_ip2_handler(void)
+{
+ do_IRQ(ATH79_CPU_IRQ_IP2);
+}
+
+static void ar71xx_ip3_handler(void)
+{
+ ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_USB);
+ do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar724x_ip3_handler(void)
+{
+ ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_USB);
+ do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar913x_ip3_handler(void)
+{
+ ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_USB);
+ do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar933x_ip3_handler(void)
+{
+ ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_USB);
+ do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
+static void ar934x_ip3_handler(void)
+{
+ ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_USB);
+ do_IRQ(ATH79_CPU_IRQ_USB);
+}
+
void __init arch_init_irq(void)
{
if (soc_is_ar71xx()) {
- ath79_ip2_flush_reg = AR71XX_DDR_REG_FLUSH_PCI;
- ath79_ip3_flush_reg = AR71XX_DDR_REG_FLUSH_USB;
+ ath79_ip2_handler = ar71xx_ip2_handler;
+ ath79_ip3_handler = ar71xx_ip3_handler;
} else if (soc_is_ar724x()) {
- ath79_ip2_flush_reg = AR724X_DDR_REG_FLUSH_PCIE;
- ath79_ip3_flush_reg = AR724X_DDR_REG_FLUSH_USB;
+ ath79_ip2_handler = ar724x_ip2_handler;
+ ath79_ip3_handler = ar724x_ip3_handler;
} else if (soc_is_ar913x()) {
- ath79_ip2_flush_reg = AR913X_DDR_REG_FLUSH_WMAC;
- ath79_ip3_flush_reg = AR913X_DDR_REG_FLUSH_USB;
+ ath79_ip2_handler = ar913x_ip2_handler;
+ ath79_ip3_handler = ar913x_ip3_handler;
} else if (soc_is_ar933x()) {
- ath79_ip2_flush_reg = AR933X_DDR_REG_FLUSH_WMAC;
- ath79_ip3_flush_reg = AR933X_DDR_REG_FLUSH_USB;
- } else
+ ath79_ip2_handler = ar933x_ip2_handler;
+ ath79_ip3_handler = ar933x_ip3_handler;
+ } else if (soc_is_ar934x()) {
+ ath79_ip2_handler = ar934x_ip2_handler;
+ ath79_ip3_handler = ar934x_ip3_handler;
+ } else {
BUG();
+ }
cp0_perfcount_irq = ATH79_MISC_IRQ_PERFC;
mips_cpu_irq_init();
ath79_misc_irq_init();
+
+ if (soc_is_ar934x())
+ ar934x_ip2_irq_init();
}
diff --git a/arch/mips/ath79/mach-db120.c b/arch/mips/ath79/mach-db120.c
new file mode 100644
index 00000000000..1983e4d2af4
--- /dev/null
+++ b/arch/mips/ath79/mach-db120.c
@@ -0,0 +1,134 @@
+/*
+ * Atheros DB120 reference board support
+ *
+ * Copyright (c) 2011 Qualcomm Atheros
+ * Copyright (c) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/ath9k_platform.h>
+
+#include "machtypes.h"
+#include "dev-gpio-buttons.h"
+#include "dev-leds-gpio.h"
+#include "dev-spi.h"
+#include "dev-wmac.h"
+#include "pci.h"
+
+#define DB120_GPIO_LED_WLAN_5G 12
+#define DB120_GPIO_LED_WLAN_2G 13
+#define DB120_GPIO_LED_STATUS 14
+#define DB120_GPIO_LED_WPS 15
+
+#define DB120_GPIO_BTN_WPS 16
+
+#define DB120_KEYS_POLL_INTERVAL 20 /* msecs */
+#define DB120_KEYS_DEBOUNCE_INTERVAL (3 * DB120_KEYS_POLL_INTERVAL)
+
+#define DB120_WMAC_CALDATA_OFFSET 0x1000
+#define DB120_PCIE_CALDATA_OFFSET 0x5000
+
+static struct gpio_led db120_leds_gpio[] __initdata = {
+ {
+ .name = "db120:green:status",
+ .gpio = DB120_GPIO_LED_STATUS,
+ .active_low = 1,
+ },
+ {
+ .name = "db120:green:wps",
+ .gpio = DB120_GPIO_LED_WPS,
+ .active_low = 1,
+ },
+ {
+ .name = "db120:green:wlan-5g",
+ .gpio = DB120_GPIO_LED_WLAN_5G,
+ .active_low = 1,
+ },
+ {
+ .name = "db120:green:wlan-2g",
+ .gpio = DB120_GPIO_LED_WLAN_2G,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_keys_button db120_gpio_keys[] __initdata = {
+ {
+ .desc = "WPS button",
+ .type = EV_KEY,
+ .code = KEY_WPS_BUTTON,
+ .debounce_interval = DB120_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = DB120_GPIO_BTN_WPS,
+ .active_low = 1,
+ },
+};
+
+static struct spi_board_info db120_spi_info[] = {
+ {
+ .bus_num = 0,
+ .chip_select = 0,
+ .max_speed_hz = 25000000,
+ .modalias = "s25sl064a",
+ }
+};
+
+static struct ath79_spi_platform_data db120_spi_data = {
+ .bus_num = 0,
+ .num_chipselect = 1,
+};
+
+#ifdef CONFIG_PCI
+static struct ath9k_platform_data db120_ath9k_data;
+
+static int db120_pci_plat_dev_init(struct pci_dev *dev)
+{
+ switch (PCI_SLOT(dev->devfn)) {
+ case 0:
+ dev->dev.platform_data = &db120_ath9k_data;
+ break;
+ }
+
+ return 0;
+}
+
+static void __init db120_pci_init(u8 *eeprom)
+{
+ memcpy(db120_ath9k_data.eeprom_data, eeprom,
+ sizeof(db120_ath9k_data.eeprom_data));
+
+ ath79_pci_set_plat_dev_init(db120_pci_plat_dev_init);
+ ath79_register_pci();
+}
+#else
+static inline void db120_pci_init(void) {}
+#endif /* CONFIG_PCI */
+
+static void __init db120_setup(void)
+{
+ u8 *art = (u8 *) KSEG1ADDR(0x1fff0000);
+
+ ath79_register_leds_gpio(-1, ARRAY_SIZE(db120_leds_gpio),
+ db120_leds_gpio);
+ ath79_register_gpio_keys_polled(-1, DB120_KEYS_POLL_INTERVAL,
+ ARRAY_SIZE(db120_gpio_keys),
+ db120_gpio_keys);
+ ath79_register_spi(&db120_spi_data, db120_spi_info,
+ ARRAY_SIZE(db120_spi_info));
+ ath79_register_wmac(art + DB120_WMAC_CALDATA_OFFSET);
+ db120_pci_init(art + DB120_PCIE_CALDATA_OFFSET);
+}
+
+MIPS_MACHINE(ATH79_MACH_DB120, "DB120", "Atheros DB120 reference board",
+ db120_setup);
diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c
index fe9701a3229..c5f0ea5e00c 100644
--- a/arch/mips/ath79/mach-pb44.c
+++ b/arch/mips/ath79/mach-pb44.c
@@ -19,6 +19,7 @@
#include "dev-leds-gpio.h"
#include "dev-spi.h"
#include "dev-usb.h"
+#include "pci.h"
#define PB44_GPIO_I2C_SCL 0
#define PB44_GPIO_I2C_SDA 1
@@ -114,6 +115,7 @@ static void __init pb44_init(void)
ath79_register_spi(&pb44_spi_data, pb44_spi_info,
ARRAY_SIZE(pb44_spi_info));
ath79_register_usb();
+ ath79_register_pci();
}
MIPS_MACHINE(ATH79_MACH_PB44, "PB44", "Atheros PB44 reference board",
diff --git a/arch/mips/ath79/mach-ubnt-xm.c b/arch/mips/ath79/mach-ubnt-xm.c
index 3c311a53934..4a3c60694c7 100644
--- a/arch/mips/ath79/mach-ubnt-xm.c
+++ b/arch/mips/ath79/mach-ubnt-xm.c
@@ -12,16 +12,15 @@
#include <linux/init.h>
#include <linux/pci.h>
-
-#ifdef CONFIG_PCI
#include <linux/ath9k_platform.h>
-#include <asm/mach-ath79/pci-ath724x.h>
-#endif /* CONFIG_PCI */
+
+#include <asm/mach-ath79/irq.h>
#include "machtypes.h"
#include "dev-gpio-buttons.h"
#include "dev-leds-gpio.h"
#include "dev-spi.h"
+#include "pci.h"
#define UBNT_XM_GPIO_LED_L1 0
#define UBNT_XM_GPIO_LED_L2 1
@@ -33,7 +32,6 @@
#define UBNT_XM_KEYS_POLL_INTERVAL 20
#define UBNT_XM_KEYS_DEBOUNCE_INTERVAL (3 * UBNT_XM_KEYS_POLL_INTERVAL)
-#define UBNT_XM_PCI_IRQ 48
#define UBNT_XM_EEPROM_ADDR (u8 *) KSEG1ADDR(0x1fff1000)
static struct gpio_led ubnt_xm_leds_gpio[] __initdata = {
@@ -84,12 +82,27 @@ static struct ath79_spi_platform_data ubnt_xm_spi_data = {
#ifdef CONFIG_PCI
static struct ath9k_platform_data ubnt_xm_eeprom_data;
-static struct ath724x_pci_data ubnt_xm_pci_data[] = {
- {
- .irq = UBNT_XM_PCI_IRQ,
- .pdata = &ubnt_xm_eeprom_data,
- },
-};
+static int ubnt_xm_pci_plat_dev_init(struct pci_dev *dev)
+{
+ switch (PCI_SLOT(dev->devfn)) {
+ case 0:
+ dev->dev.platform_data = &ubnt_xm_eeprom_data;
+ break;
+ }
+
+ return 0;
+}
+
+static void __init ubnt_xm_pci_init(void)
+{
+ memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR,
+ sizeof(ubnt_xm_eeprom_data.eeprom_data));
+
+ ath79_pci_set_plat_dev_init(ubnt_xm_pci_plat_dev_init);
+ ath79_register_pci();
+}
+#else
+static inline void ubnt_xm_pci_init(void) {}
#endif /* CONFIG_PCI */
static void __init ubnt_xm_init(void)
@@ -104,13 +117,7 @@ static void __init ubnt_xm_init(void)
ath79_register_spi(&ubnt_xm_spi_data, ubnt_xm_spi_info,
ARRAY_SIZE(ubnt_xm_spi_info));
-#ifdef CONFIG_PCI
- memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR,
- sizeof(ubnt_xm_eeprom_data.eeprom_data));
-
- ath724x_pci_add_data(ubnt_xm_pci_data, ARRAY_SIZE(ubnt_xm_pci_data));
-#endif /* CONFIG_PCI */
-
+ ubnt_xm_pci_init();
}
MIPS_MACHINE(ATH79_MACH_UBNT_XM,
diff --git a/arch/mips/ath79/machtypes.h b/arch/mips/ath79/machtypes.h
index 9a1f3826626..af92e5c30d6 100644
--- a/arch/mips/ath79/machtypes.h
+++ b/arch/mips/ath79/machtypes.h
@@ -18,6 +18,7 @@ enum ath79_mach_type {
ATH79_MACH_GENERIC = 0,
ATH79_MACH_AP121, /* Atheros AP121 reference board */
ATH79_MACH_AP81, /* Atheros AP81 reference board */
+ ATH79_MACH_DB120, /* Atheros DB120 reference board */
ATH79_MACH_PB44, /* Atheros PB44 reference board */
ATH79_MACH_UBNT_XM, /* Ubiquiti Networks XM board rev 1.0 */
};
diff --git a/arch/mips/ath79/pci.c b/arch/mips/ath79/pci.c
new file mode 100644
index 00000000000..ca83abd9d31
--- /dev/null
+++ b/arch/mips/ath79/pci.c
@@ -0,0 +1,130 @@
+/*
+ * Atheros AR71XX/AR724X specific PCI setup code
+ *
+ * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/irq.h>
+#include <asm/mach-ath79/pci.h>
+#include "pci.h"
+
+static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev);
+static const struct ath79_pci_irq *ath79_pci_irq_map __initdata;
+static unsigned ath79_pci_nr_irqs __initdata;
+
+static const struct ath79_pci_irq ar71xx_pci_irq_map[] __initconst = {
+ {
+ .slot = 17,
+ .pin = 1,
+ .irq = ATH79_PCI_IRQ(0),
+ }, {
+ .slot = 18,
+ .pin = 1,
+ .irq = ATH79_PCI_IRQ(1),
+ }, {
+ .slot = 19,
+ .pin = 1,
+ .irq = ATH79_PCI_IRQ(2),
+ }
+};
+
+static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = {
+ {
+ .slot = 0,
+ .pin = 1,
+ .irq = ATH79_PCI_IRQ(0),
+ }
+};
+
+int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
+{
+ int irq = -1;
+ int i;
+
+ if (ath79_pci_nr_irqs == 0 ||
+ ath79_pci_irq_map == NULL) {
+ if (soc_is_ar71xx()) {
+ ath79_pci_irq_map = ar71xx_pci_irq_map;
+ ath79_pci_nr_irqs = ARRAY_SIZE(ar71xx_pci_irq_map);
+ } else if (soc_is_ar724x() ||
+ soc_is_ar9342() ||
+ soc_is_ar9344()) {
+ ath79_pci_irq_map = ar724x_pci_irq_map;
+ ath79_pci_nr_irqs = ARRAY_SIZE(ar724x_pci_irq_map);
+ } else {
+ pr_crit("pci %s: invalid irq map\n",
+ pci_name((struct pci_dev *) dev));
+ return irq;
+ }
+ }
+
+ for (i = 0; i < ath79_pci_nr_irqs; i++) {
+ const struct ath79_pci_irq *entry;
+
+ entry = &ath79_pci_irq_map[i];
+ if (entry->slot == slot && entry->pin == pin) {
+ irq = entry->irq;
+ break;
+ }
+ }
+
+ if (irq < 0)
+ pr_crit("pci %s: no irq found for pin %u\n",
+ pci_name((struct pci_dev *) dev), pin);
+ else
+ pr_info("pci %s: using irq %d for pin %u\n",
+ pci_name((struct pci_dev *) dev), irq, pin);
+
+ return irq;
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ if (ath79_pci_plat_dev_init)
+ return ath79_pci_plat_dev_init(dev);
+
+ return 0;
+}
+
+void __init ath79_pci_set_irq_map(unsigned nr_irqs,
+ const struct ath79_pci_irq *map)
+{
+ ath79_pci_nr_irqs = nr_irqs;
+ ath79_pci_irq_map = map;
+}
+
+void __init ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev))
+{
+ ath79_pci_plat_dev_init = func;
+}
+
+int __init ath79_register_pci(void)
+{
+ if (soc_is_ar71xx())
+ return ar71xx_pcibios_init();
+
+ if (soc_is_ar724x())
+ return ar724x_pcibios_init(ATH79_CPU_IRQ_IP2);
+
+ if (soc_is_ar9342() || soc_is_ar9344()) {
+ u32 bootstrap;
+
+ bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & AR934X_BOOTSTRAP_PCIE_RC)
+ return ar724x_pcibios_init(ATH79_IP2_IRQ(0));
+ }
+
+ return -ENODEV;
+}
diff --git a/arch/mips/ath79/pci.h b/arch/mips/ath79/pci.h
new file mode 100644
index 00000000000..51c6625dcc6
--- /dev/null
+++ b/arch/mips/ath79/pci.h
@@ -0,0 +1,34 @@
+/*
+ * Atheros AR71XX/AR724X PCI support
+ *
+ * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _ATH79_PCI_H
+#define _ATH79_PCI_H
+
+struct ath79_pci_irq {
+ u8 slot;
+ u8 pin;
+ int irq;
+};
+
+#ifdef CONFIG_PCI
+void ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map);
+void ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev));
+int ath79_register_pci(void);
+#else
+static inline void
+ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map) {}
+static inline void
+ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *)) {}
+static inline int ath79_register_pci(void) { return 0; }
+#endif
+
+#endif /* _ATH79_PCI_H */
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 80a7d4023d7..60d212ef862 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -1,10 +1,11 @@
/*
* Atheros AR71XX/AR724X/AR913X specific setup
*
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
- * Parts of this file are based on Atheros' 2.6.15 BSP
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -116,18 +117,6 @@ static void __init ath79_detect_sys_type(void)
rev = id & AR724X_REV_ID_REVISION_MASK;
break;
- case REV_ID_MAJOR_AR9330:
- ath79_soc = ATH79_SOC_AR9330;
- chip = "9330";
- rev = id & AR933X_REV_ID_REVISION_MASK;
- break;
-
- case REV_ID_MAJOR_AR9331:
- ath79_soc = ATH79_SOC_AR9331;
- chip = "9331";
- rev = id & AR933X_REV_ID_REVISION_MASK;
- break;
-
case REV_ID_MAJOR_AR913X:
minor = id & AR913X_REV_ID_MINOR_MASK;
rev = id >> AR913X_REV_ID_REVISION_SHIFT;
@@ -145,6 +134,36 @@ static void __init ath79_detect_sys_type(void)
}
break;
+ case REV_ID_MAJOR_AR9330:
+ ath79_soc = ATH79_SOC_AR9330;
+ chip = "9330";
+ rev = id & AR933X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR9331:
+ ath79_soc = ATH79_SOC_AR9331;
+ chip = "9331";
+ rev = id & AR933X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR9341:
+ ath79_soc = ATH79_SOC_AR9341;
+ chip = "9341";
+ rev = id & AR934X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR9342:
+ ath79_soc = ATH79_SOC_AR9342;
+ chip = "9342";
+ rev = id & AR934X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_AR9344:
+ ath79_soc = ATH79_SOC_AR9344;
+ chip = "9344";
+ rev = id & AR934X_REV_ID_REVISION_MASK;
+ break;
+
default:
panic("ath79: unknown SoC, id:0x%08x", id);
}
diff --git a/arch/mips/bcm63xx/boards/Makefile b/arch/mips/bcm63xx/boards/Makefile
index 9f64fb41407..af07c1aa202 100644
--- a/arch/mips/bcm63xx/boards/Makefile
+++ b/arch/mips/bcm63xx/boards/Makefile
@@ -1,3 +1 @@
obj-$(CONFIG_BOARD_BCM963XX) += board_bcm963xx.o
-
-ccflags-y := -Werror
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index d3a9f012aa0..260dc247c05 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/serial.h>
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 97e7ce9b50e..4b93048044e 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -257,8 +257,6 @@ DEFINE_PER_CPU(int, cpu_state);
extern void fixup_irqs(void);
-static DEFINE_SPINLOCK(smp_reserve_lock);
-
static int octeon_cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
@@ -266,8 +264,6 @@ static int octeon_cpu_disable(void)
if (cpu == 0)
return -EBUSY;
- spin_lock(&smp_reserve_lock);
-
set_cpu_online(cpu, false);
cpu_clear(cpu, cpu_callin_map);
local_irq_disable();
@@ -277,8 +273,6 @@ static int octeon_cpu_disable(void)
flush_cache_all();
local_flush_tlb_all();
- spin_unlock(&smp_reserve_lock);
-
return 0;
}
diff --git a/arch/mips/fw/arc/Makefile b/arch/mips/fw/arc/Makefile
index 5314b37aff2..4f349ec1ea2 100644
--- a/arch/mips/fw/arc/Makefile
+++ b/arch/mips/fw/arc/Makefile
@@ -8,5 +8,3 @@ lib-y += cmdline.o env.o file.o identify.o init.o \
lib-$(CONFIG_ARC_MEMORY) += memory.o
lib-$(CONFIG_ARC_CONSOLE) += arc_con.o
lib-$(CONFIG_ARC_PROMLIB) += promlib.o
-
-ccflags-y := -Werror
diff --git a/arch/mips/include/asm/clkdev.h b/arch/mips/include/asm/clkdev.h
new file mode 100644
index 00000000000..262475414e5
--- /dev/null
+++ b/arch/mips/include/asm/clkdev.h
@@ -0,0 +1,25 @@
+/*
+ * based on arch/arm/include/asm/clkdev.h
+ *
+ * Copyright (C) 2008 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Helper for the clk API to assist looking up a struct clk.
+ */
+#ifndef __ASM_CLKDEV_H
+#define __ASM_CLKDEV_H
+
+#include <linux/slab.h>
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do { } while (0)
+
+static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
+{
+ return kzalloc(size, GFP_KERNEL);
+}
+
+#endif
diff --git a/arch/mips/include/asm/kvm_para.h b/arch/mips/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/mips/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index 2f0becb4ec8..1caa78ad06d 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -1,10 +1,11 @@
/*
* Atheros AR71XX/AR724X/AR913X SoC register definitions
*
+ * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
- * Parts of this file are based on Atheros' 2.6.15 BSP
+ * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -60,6 +61,9 @@
#define AR933X_EHCI_BASE 0x1b000000
#define AR933X_EHCI_SIZE 0x1000
+#define AR934X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define AR934X_WMAC_SIZE 0x20000
+
/*
* DDR_CTRL block
*/
@@ -91,6 +95,12 @@
#define AR933X_DDR_REG_FLUSH_USB 0x84
#define AR933X_DDR_REG_FLUSH_WMAC 0x88
+#define AR934X_DDR_REG_FLUSH_GE0 0x9c
+#define AR934X_DDR_REG_FLUSH_GE1 0xa0
+#define AR934X_DDR_REG_FLUSH_USB 0xa4
+#define AR934X_DDR_REG_FLUSH_PCIE 0xa8
+#define AR934X_DDR_REG_FLUSH_WMAC 0xac
+
/*
* PLL block
*/
@@ -150,6 +160,41 @@
#define AR933X_PLL_CLOCK_CTRL_AHB_DIV_SHIFT 15
#define AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK 0x7
+#define AR934X_PLL_CPU_CONFIG_REG 0x00
+#define AR934X_PLL_DDR_CONFIG_REG 0x04
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_REG 0x08
+
+#define AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
+#define AR934X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
+#define AR934X_PLL_CPU_CONFIG_NINT_SHIFT 6
+#define AR934X_PLL_CPU_CONFIG_NINT_MASK 0x3f
+#define AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
+#define AR934X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
+#define AR934X_PLL_CPU_CONFIG_OUTDIV_MASK 0x3
+
+#define AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT 0
+#define AR934X_PLL_DDR_CONFIG_NFRAC_MASK 0x3ff
+#define AR934X_PLL_DDR_CONFIG_NINT_SHIFT 10
+#define AR934X_PLL_DDR_CONFIG_NINT_MASK 0x3f
+#define AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
+#define AR934X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
+#define AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
+#define AR934X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
+
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPUCLK_FROM_CPUPLL BIT(20)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
+#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
/*
* USB_CONFIG block
*/
@@ -185,6 +230,10 @@
#define AR933X_RESET_REG_RESET_MODULE 0x1c
#define AR933X_RESET_REG_BOOTSTRAP 0xac
+#define AR934X_RESET_REG_RESET_MODULE 0x1c
+#define AR934X_RESET_REG_BOOTSTRAP 0xb0
+#define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac
+
#define MISC_INT_ETHSW BIT(12)
#define MISC_INT_TIMER4 BIT(10)
#define MISC_INT_TIMER3 BIT(9)
@@ -241,6 +290,40 @@
#define AR933X_BOOTSTRAP_REF_CLK_40 BIT(0)
+#define AR934X_BOOTSTRAP_SW_OPTION8 BIT(23)
+#define AR934X_BOOTSTRAP_SW_OPTION7 BIT(22)
+#define AR934X_BOOTSTRAP_SW_OPTION6 BIT(21)
+#define AR934X_BOOTSTRAP_SW_OPTION5 BIT(20)
+#define AR934X_BOOTSTRAP_SW_OPTION4 BIT(19)
+#define AR934X_BOOTSTRAP_SW_OPTION3 BIT(18)
+#define AR934X_BOOTSTRAP_SW_OPTION2 BIT(17)
+#define AR934X_BOOTSTRAP_SW_OPTION1 BIT(16)
+#define AR934X_BOOTSTRAP_USB_MODE_DEVICE BIT(7)
+#define AR934X_BOOTSTRAP_PCIE_RC BIT(6)
+#define AR934X_BOOTSTRAP_EJTAG_MODE BIT(5)
+#define AR934X_BOOTSTRAP_REF_CLK_40 BIT(4)
+#define AR934X_BOOTSTRAP_BOOT_FROM_SPI BIT(2)
+#define AR934X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
+#define AR934X_BOOTSTRAP_DDR1 BIT(0)
+
+#define AR934X_PCIE_WMAC_INT_WMAC_MISC BIT(0)
+#define AR934X_PCIE_WMAC_INT_WMAC_TX BIT(1)
+#define AR934X_PCIE_WMAC_INT_WMAC_RXLP BIT(2)
+#define AR934X_PCIE_WMAC_INT_WMAC_RXHP BIT(3)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC BIT(4)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC0 BIT(5)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC1 BIT(6)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC2 BIT(7)
+#define AR934X_PCIE_WMAC_INT_PCIE_RC3 BIT(8)
+#define AR934X_PCIE_WMAC_INT_WMAC_ALL \
+ (AR934X_PCIE_WMAC_INT_WMAC_MISC | AR934X_PCIE_WMAC_INT_WMAC_TX | \
+ AR934X_PCIE_WMAC_INT_WMAC_RXLP | AR934X_PCIE_WMAC_INT_WMAC_RXHP)
+
+#define AR934X_PCIE_WMAC_INT_PCIE_ALL \
+ (AR934X_PCIE_WMAC_INT_PCIE_RC | AR934X_PCIE_WMAC_INT_PCIE_RC0 | \
+ AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \
+ AR934X_PCIE_WMAC_INT_PCIE_RC3)
+
#define REV_ID_MAJOR_MASK 0xfff0
#define REV_ID_MAJOR_AR71XX 0x00a0
#define REV_ID_MAJOR_AR913X 0x00b0
@@ -249,6 +332,9 @@
#define REV_ID_MAJOR_AR7242 0x1100
#define REV_ID_MAJOR_AR9330 0x0110
#define REV_ID_MAJOR_AR9331 0x1110
+#define REV_ID_MAJOR_AR9341 0x0120
+#define REV_ID_MAJOR_AR9342 0x1120
+#define REV_ID_MAJOR_AR9344 0x2120
#define AR71XX_REV_ID_MINOR_MASK 0x3
#define AR71XX_REV_ID_MINOR_AR7130 0x0
@@ -267,6 +353,8 @@
#define AR724X_REV_ID_REVISION_MASK 0x3
+#define AR934X_REV_ID_REVISION_MASK 0xf
+
/*
* SPI block
*/
@@ -308,5 +396,6 @@
#define AR724X_GPIO_COUNT 18
#define AR913X_GPIO_COUNT 22
#define AR933X_GPIO_COUNT 30
+#define AR934X_GPIO_COUNT 23
#endif /* __ASM_MACH_AR71XX_REGS_H */
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 6d0c6c9d562..4f248c3d7b2 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -29,6 +29,9 @@ enum ath79_soc_type {
ATH79_SOC_AR9132,
ATH79_SOC_AR9330,
ATH79_SOC_AR9331,
+ ATH79_SOC_AR9341,
+ ATH79_SOC_AR9342,
+ ATH79_SOC_AR9344,
};
extern enum ath79_soc_type ath79_soc;
@@ -75,6 +78,26 @@ static inline int soc_is_ar933x(void)
ath79_soc == ATH79_SOC_AR9331);
}
+static inline int soc_is_ar9341(void)
+{
+ return (ath79_soc == ATH79_SOC_AR9341);
+}
+
+static inline int soc_is_ar9342(void)
+{
+ return (ath79_soc == ATH79_SOC_AR9342);
+}
+
+static inline int soc_is_ar9344(void)
+{
+ return (ath79_soc == ATH79_SOC_AR9344);
+}
+
+static inline int soc_is_ar934x(void)
+{
+ return soc_is_ar9341() || soc_is_ar9342() || soc_is_ar9344();
+}
+
extern void __iomem *ath79_ddr_base;
extern void __iomem *ath79_pll_base;
extern void __iomem *ath79_reset_base;
diff --git a/arch/mips/include/asm/mach-ath79/irq.h b/arch/mips/include/asm/mach-ath79/irq.h
index 519958fe4e3..0968f69e201 100644
--- a/arch/mips/include/asm/mach-ath79/irq.h
+++ b/arch/mips/include/asm/mach-ath79/irq.h
@@ -10,11 +10,19 @@
#define __ASM_MACH_ATH79_IRQ_H
#define MIPS_CPU_IRQ_BASE 0
-#define NR_IRQS 40
+#define NR_IRQS 48
#define ATH79_MISC_IRQ_BASE 8
#define ATH79_MISC_IRQ_COUNT 32
+#define ATH79_PCI_IRQ_BASE (ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT)
+#define ATH79_PCI_IRQ_COUNT 6
+#define ATH79_PCI_IRQ(_x) (ATH79_PCI_IRQ_BASE + (_x))
+
+#define ATH79_IP2_IRQ_BASE (ATH79_PCI_IRQ_BASE + ATH79_PCI_IRQ_COUNT)
+#define ATH79_IP2_IRQ_COUNT 2
+#define ATH79_IP2_IRQ(_x) (ATH79_IP2_IRQ_BASE + (_x))
+
#define ATH79_CPU_IRQ_IP2 (MIPS_CPU_IRQ_BASE + 2)
#define ATH79_CPU_IRQ_USB (MIPS_CPU_IRQ_BASE + 3)
#define ATH79_CPU_IRQ_GE0 (MIPS_CPU_IRQ_BASE + 4)
diff --git a/arch/mips/include/asm/mach-ath79/pci-ath724x.h b/arch/mips/include/asm/mach-ath79/pci-ath724x.h
deleted file mode 100644
index 454885fa30c..00000000000
--- a/arch/mips/include/asm/mach-ath79/pci-ath724x.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Atheros 724x PCI support
- *
- * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef __ASM_MACH_ATH79_PCI_ATH724X_H
-#define __ASM_MACH_ATH79_PCI_ATH724X_H
-
-struct ath724x_pci_data {
- int irq;
- void *pdata;
-};
-
-void ath724x_pci_add_data(struct ath724x_pci_data *data, int size);
-
-#endif /* __ASM_MACH_ATH79_PCI_ATH724X_H */
diff --git a/arch/mips/include/asm/mach-ath79/pci.h b/arch/mips/include/asm/mach-ath79/pci.h
new file mode 100644
index 00000000000..7868f7fa028
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/pci.h
@@ -0,0 +1,28 @@
+/*
+ * Atheros AR71XX/AR724X PCI support
+ *
+ * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef __ASM_MACH_ATH79_PCI_H
+#define __ASM_MACH_ATH79_PCI_H
+
+#if defined(CONFIG_PCI) && defined(CONFIG_SOC_AR71XX)
+int ar71xx_pcibios_init(void);
+#else
+static inline int ar71xx_pcibios_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_PCI_AR724X)
+int ar724x_pcibios_init(int irq);
+#else
+static inline int ar724x_pcibios_init(int irq) { return 0; }
+#endif
+
+#endif /* __ASM_MACH_ATH79_PCI_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
index 3d5de96d403..1d7dd96aa46 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -2,6 +2,7 @@
#define BCM63XX_GPIO_H
#include <linux/init.h>
+#include <bcm63xx_cpu.h>
int __init bcm63xx_gpio_init(void);
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
new file mode 100644
index 00000000000..318f982f04f
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
@@ -0,0 +1,23 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#ifndef _FALCON_IRQ__
+#define _FALCON_IRQ__
+
+#define INT_NUM_IRQ0 8
+#define INT_NUM_IM0_IRL0 (INT_NUM_IRQ0 + 0)
+#define INT_NUM_IM1_IRL0 (INT_NUM_IM0_IRL0 + 32)
+#define INT_NUM_IM2_IRL0 (INT_NUM_IM1_IRL0 + 32)
+#define INT_NUM_IM3_IRL0 (INT_NUM_IM2_IRL0 + 32)
+#define INT_NUM_IM4_IRL0 (INT_NUM_IM3_IRL0 + 32)
+#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32)
+#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
+
+#define MIPS_CPU_TIMER_IRQ 7
+
+#endif /* _FALCON_IRQ__ */
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/irq.h b/arch/mips/include/asm/mach-lantiq/falcon/irq.h
new file mode 100644
index 00000000000..2caccd9f9db
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/irq.h
@@ -0,0 +1,18 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#ifndef __FALCON_IRQ_H
+#define __FALCON_IRQ_H
+
+#include <falcon_irq.h>
+
+#define NR_IRQS 328
+
+#include_next <irq.h>
+
+#endif
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
new file mode 100644
index 00000000000..b385252584e
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
@@ -0,0 +1,67 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _LTQ_FALCON_H__
+#define _LTQ_FALCON_H__
+
+#ifdef CONFIG_SOC_FALCON
+
+#include <linux/pinctrl/pinctrl.h>
+#include <lantiq.h>
+
+/* Chip IDs */
+#define SOC_ID_FALCON 0x01B8
+
+/* SoC Types */
+#define SOC_TYPE_FALCON 0x01
+
+/*
+ * during early_printk no ioremap possible at this early stage
+ * lets use KSEG1 instead
+ */
+#define LTQ_ASC0_BASE_ADDR 0x1E100C00
+#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC0_BASE_ADDR)
+
+/* WDT */
+#define LTQ_RST_CAUSE_WDTRST 0x0002
+
+/* CHIP ID */
+#define LTQ_STATUS_BASE_ADDR 0x1E802000
+
+#define FALCON_CHIPID ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x0c))
+#define FALCON_CHIPTYPE ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x38))
+#define FALCON_CHIPCONF ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x40))
+
+/* SYSCTL - start/stop/restart/configure/... different parts of the Soc */
+#define SYSCTL_SYS1 0
+#define SYSCTL_SYSETH 1
+#define SYSCTL_SYSGPE 2
+
+/* BOOT_SEL - find what boot media we have */
+#define BS_FLASH 0x1
+#define BS_SPI 0x4
+
+/* global register ranges */
+extern __iomem void *ltq_ebu_membase;
+extern __iomem void *ltq_sys1_membase;
+#define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y))
+#define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x))
+
+#define ltq_sys1_w32(x, y) ltq_w32((x), ltq_sys1_membase + (y))
+#define ltq_sys1_r32(x) ltq_r32(ltq_sys1_membase + (x))
+#define ltq_sys1_w32_mask(clear, set, reg) \
+ ltq_sys1_w32((ltq_sys1_r32(reg) & ~(clear)) | (set), reg)
+
+/*
+ * to keep the irq code generic we need to define this to 0 as falcon
+ * has no EIU/EBU
+ */
+#define LTQ_EBU_PCC_ISTAT 0
+
+#endif /* CONFIG_SOC_FALCON */
+#endif /* _LTQ_XWAY_H__ */
diff --git a/arch/mips/include/asm/mach-lantiq/gpio.h b/arch/mips/include/asm/mach-lantiq/gpio.h
new file mode 100644
index 00000000000..f79505b4360
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/gpio.h
@@ -0,0 +1,16 @@
+#ifndef __ASM_MIPS_MACH_LANTIQ_GPIO_H
+#define __ASM_MIPS_MACH_LANTIQ_GPIO_H
+
+static inline int gpio_to_irq(unsigned int gpio)
+{
+ return -1;
+}
+
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+
+#define gpio_cansleep __gpio_cansleep
+
+#include <asm-generic/gpio.h>
+
+#endif
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h
index ce2f02929d2..5e8a6e96575 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq.h
@@ -9,6 +9,8 @@
#define _LANTIQ_H__
#include <linux/irq.h>
+#include <linux/device.h>
+#include <linux/clk.h>
/* generic reg access functions */
#define ltq_r32(reg) __raw_readl(reg)
@@ -21,25 +23,9 @@
/* register access macros for EBU and CGU */
#define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y))
#define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x))
-#define ltq_cgu_w32(x, y) ltq_w32((x), ltq_cgu_membase + (y))
-#define ltq_cgu_r32(x) ltq_r32(ltq_cgu_membase + (x))
-
+#define ltq_ebu_w32_mask(x, y, z) \
+ ltq_w32_mask(x, y, ltq_ebu_membase + (z))
extern __iomem void *ltq_ebu_membase;
-extern __iomem void *ltq_cgu_membase;
-
-extern unsigned int ltq_get_cpu_ver(void);
-extern unsigned int ltq_get_soc_type(void);
-
-/* clock speeds */
-#define CLOCK_60M 60000000
-#define CLOCK_83M 83333333
-#define CLOCK_111M 111111111
-#define CLOCK_133M 133333333
-#define CLOCK_167M 166666667
-#define CLOCK_200M 200000000
-#define CLOCK_266M 266666666
-#define CLOCK_333M 333333333
-#define CLOCK_400M 400000000
/* spinlock all ebu i/o */
extern spinlock_t ebu_lock;
@@ -49,15 +35,21 @@ extern void ltq_disable_irq(struct irq_data *data);
extern void ltq_mask_and_ack_irq(struct irq_data *data);
extern void ltq_enable_irq(struct irq_data *data);
+/* clock handling */
+extern int clk_activate(struct clk *clk);
+extern void clk_deactivate(struct clk *clk);
+extern struct clk *clk_get_cpu(void);
+extern struct clk *clk_get_fpi(void);
+extern struct clk *clk_get_io(void);
+
+/* find out what bootsource we have */
+extern unsigned char ltq_boot_select(void);
/* find out what caused the last cpu reset */
extern int ltq_reset_cause(void);
-#define LTQ_RST_CAUSE_WDTRST 0x20
#define IOPORT_RESOURCE_START 0x10000000
#define IOPORT_RESOURCE_END 0xffffffff
#define IOMEM_RESOURCE_START 0x10000000
#define IOMEM_RESOURCE_END 0xffffffff
-#define LTQ_FLASH_START 0x10000000
-#define LTQ_FLASH_MAX 0x04000000
#endif
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
index a305f1d0259..e23bf7c9a2d 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
@@ -9,41 +9,8 @@
#ifndef _LANTIQ_PLATFORM_H__
#define _LANTIQ_PLATFORM_H__
-#include <linux/mtd/partitions.h>
#include <linux/socket.h>
-/* struct used to pass info to the pci core */
-enum {
- PCI_CLOCK_INT = 0,
- PCI_CLOCK_EXT
-};
-
-#define PCI_EXIN0 0x0001
-#define PCI_EXIN1 0x0002
-#define PCI_EXIN2 0x0004
-#define PCI_EXIN3 0x0008
-#define PCI_EXIN4 0x0010
-#define PCI_EXIN5 0x0020
-#define PCI_EXIN_MAX 6
-
-#define PCI_GNT1 0x0040
-#define PCI_GNT2 0x0080
-#define PCI_GNT3 0x0100
-#define PCI_GNT4 0x0200
-
-#define PCI_REQ1 0x0400
-#define PCI_REQ2 0x0800
-#define PCI_REQ3 0x1000
-#define PCI_REQ4 0x2000
-#define PCI_REQ_SHIFT 10
-#define PCI_REQ_MASK 0xf
-
-struct ltq_pci_data {
- int clock;
- int gpio;
- int irq[16];
-};
-
/* struct used to pass info to network drivers */
struct ltq_eth_data {
struct sockaddr mac;
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
index b4465a888e2..aa0b3b866f8 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
@@ -17,50 +17,8 @@
#define INT_NUM_IM4_IRL0 (INT_NUM_IRQ0 + 128)
#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
-#define LTQ_ASC_TIR(x) (INT_NUM_IM3_IRL0 + (x * 8))
-#define LTQ_ASC_RIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 1)
-#define LTQ_ASC_EIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 2)
-
-#define LTQ_ASC_ASE_TIR INT_NUM_IM2_IRL0
-#define LTQ_ASC_ASE_RIR (INT_NUM_IM2_IRL0 + 2)
-#define LTQ_ASC_ASE_EIR (INT_NUM_IM2_IRL0 + 3)
-
-#define LTQ_SSC_TIR (INT_NUM_IM0_IRL0 + 15)
-#define LTQ_SSC_RIR (INT_NUM_IM0_IRL0 + 14)
-#define LTQ_SSC_EIR (INT_NUM_IM0_IRL0 + 16)
-
-#define LTQ_MEI_DYING_GASP_INT (INT_NUM_IM1_IRL0 + 21)
-#define LTQ_MEI_INT (INT_NUM_IM1_IRL0 + 23)
-
-#define LTQ_TIMER6_INT (INT_NUM_IM1_IRL0 + 23)
-#define LTQ_USB_INT (INT_NUM_IM1_IRL0 + 22)
-#define LTQ_USB_OC_INT (INT_NUM_IM4_IRL0 + 23)
-
-#define MIPS_CPU_TIMER_IRQ 7
-
#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0)
-#define LTQ_DMA_CH1_INT (INT_NUM_IM2_IRL0 + 1)
-#define LTQ_DMA_CH2_INT (INT_NUM_IM2_IRL0 + 2)
-#define LTQ_DMA_CH3_INT (INT_NUM_IM2_IRL0 + 3)
-#define LTQ_DMA_CH4_INT (INT_NUM_IM2_IRL0 + 4)
-#define LTQ_DMA_CH5_INT (INT_NUM_IM2_IRL0 + 5)
-#define LTQ_DMA_CH6_INT (INT_NUM_IM2_IRL0 + 6)
-#define LTQ_DMA_CH7_INT (INT_NUM_IM2_IRL0 + 7)
-#define LTQ_DMA_CH8_INT (INT_NUM_IM2_IRL0 + 8)
-#define LTQ_DMA_CH9_INT (INT_NUM_IM2_IRL0 + 9)
-#define LTQ_DMA_CH10_INT (INT_NUM_IM2_IRL0 + 10)
-#define LTQ_DMA_CH11_INT (INT_NUM_IM2_IRL0 + 11)
-#define LTQ_DMA_CH12_INT (INT_NUM_IM2_IRL0 + 25)
-#define LTQ_DMA_CH13_INT (INT_NUM_IM2_IRL0 + 26)
-#define LTQ_DMA_CH14_INT (INT_NUM_IM2_IRL0 + 27)
-#define LTQ_DMA_CH15_INT (INT_NUM_IM2_IRL0 + 28)
-#define LTQ_DMA_CH16_INT (INT_NUM_IM2_IRL0 + 29)
-#define LTQ_DMA_CH17_INT (INT_NUM_IM2_IRL0 + 30)
-#define LTQ_DMA_CH18_INT (INT_NUM_IM2_IRL0 + 16)
-#define LTQ_DMA_CH19_INT (INT_NUM_IM2_IRL0 + 21)
-
-#define LTQ_PPE_MBOX_INT (INT_NUM_IM2_IRL0 + 24)
-#define INT_NUM_IM4_IRL14 (INT_NUM_IM4_IRL0 + 14)
+#define MIPS_CPU_TIMER_IRQ 7
#endif
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
index 8a3c6be669d..6a2df709c57 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
@@ -17,38 +17,56 @@
#define SOC_ID_DANUBE1 0x129
#define SOC_ID_DANUBE2 0x12B
#define SOC_ID_TWINPASS 0x12D
-#define SOC_ID_AMAZON_SE 0x152
+#define SOC_ID_AMAZON_SE_1 0x152 /* 50601 */
+#define SOC_ID_AMAZON_SE_2 0x153 /* 50600 */
#define SOC_ID_ARX188 0x16C
-#define SOC_ID_ARX168 0x16D
+#define SOC_ID_ARX168_1 0x16D
+#define SOC_ID_ARX168_2 0x16E
#define SOC_ID_ARX182 0x16F
-
-/* SoC Types */
+#define SOC_ID_GRX188 0x170
+#define SOC_ID_GRX168 0x171
+
+#define SOC_ID_VRX288 0x1C0 /* v1.1 */
+#define SOC_ID_VRX282 0x1C1 /* v1.1 */
+#define SOC_ID_VRX268 0x1C2 /* v1.1 */
+#define SOC_ID_GRX268 0x1C8 /* v1.1 */
+#define SOC_ID_GRX288 0x1C9 /* v1.1 */
+#define SOC_ID_VRX288_2 0x00B /* v1.2 */
+#define SOC_ID_VRX268_2 0x00C /* v1.2 */
+#define SOC_ID_GRX288_2 0x00D /* v1.2 */
+#define SOC_ID_GRX282_2 0x00E /* v1.2 */
+
+ /* SoC Types */
#define SOC_TYPE_DANUBE 0x01
#define SOC_TYPE_TWINPASS 0x02
#define SOC_TYPE_AR9 0x03
-#define SOC_TYPE_VR9 0x04
-#define SOC_TYPE_AMAZON_SE 0x05
+#define SOC_TYPE_VR9 0x04 /* v1.1 */
+#define SOC_TYPE_VR9_2 0x05 /* v1.2 */
+#define SOC_TYPE_AMAZON_SE 0x06
+
+/* BOOT_SEL - find what boot media we have */
+#define BS_EXT_ROM 0x0
+#define BS_FLASH 0x1
+#define BS_MII0 0x2
+#define BS_PCI 0x3
+#define BS_UART1 0x4
+#define BS_SPI 0x5
+#define BS_NAND 0x6
+#define BS_RMII0 0x7
+
+/* helpers used to access the cgu */
+#define ltq_cgu_w32(x, y) ltq_w32((x), ltq_cgu_membase + (y))
+#define ltq_cgu_r32(x) ltq_r32(ltq_cgu_membase + (x))
+extern __iomem void *ltq_cgu_membase;
-/* ASC0/1 - serial port */
-#define LTQ_ASC0_BASE_ADDR 0x1E100400
+/*
+ * during early_printk no ioremap is possible
+ * lets use KSEG1 instead
+ */
#define LTQ_ASC1_BASE_ADDR 0x1E100C00
-#define LTQ_ASC_SIZE 0x400
-
-/* RCU - reset control unit */
-#define LTQ_RCU_BASE_ADDR 0x1F203000
-#define LTQ_RCU_SIZE 0x1000
-
-/* GPTU - general purpose timer unit */
-#define LTQ_GPTU_BASE_ADDR 0x18000300
-#define LTQ_GPTU_SIZE 0x100
+#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
/* EBU - external bus unit */
-#define LTQ_EBU_GPIO_START 0x14000000
-#define LTQ_EBU_GPIO_SIZE 0x1000
-
-#define LTQ_EBU_BASE_ADDR 0x1E105300
-#define LTQ_EBU_SIZE 0x100
-
#define LTQ_EBU_BUSCON0 0x0060
#define LTQ_EBU_PCC_CON 0x0090
#define LTQ_EBU_PCC_IEN 0x00A4
@@ -57,85 +75,17 @@
#define LTQ_EBU_ADDRSEL1 0x0024
#define EBU_WRDIS 0x80000000
-/* CGU - clock generation unit */
-#define LTQ_CGU_BASE_ADDR 0x1F103000
-#define LTQ_CGU_SIZE 0x1000
-
-/* ICU - interrupt control unit */
-#define LTQ_ICU_BASE_ADDR 0x1F880200
-#define LTQ_ICU_SIZE 0x100
-
-/* EIU - external interrupt unit */
-#define LTQ_EIU_BASE_ADDR 0x1F101000
-#define LTQ_EIU_SIZE 0x1000
-
-/* PMU - power management unit */
-#define LTQ_PMU_BASE_ADDR 0x1F102000
-#define LTQ_PMU_SIZE 0x1000
-
-#define PMU_DMA 0x0020
-#define PMU_USB 0x8041
-#define PMU_LED 0x0800
-#define PMU_GPT 0x1000
-#define PMU_PPE 0x2000
-#define PMU_FPI 0x4000
-#define PMU_SWITCH 0x10000000
-
-/* ETOP - ethernet */
-#define LTQ_ETOP_BASE_ADDR 0x1E180000
-#define LTQ_ETOP_SIZE 0x40000
-
-/* DMA */
-#define LTQ_DMA_BASE_ADDR 0x1E104100
-#define LTQ_DMA_SIZE 0x800
-
-/* PCI */
-#define PCI_CR_BASE_ADDR 0x1E105400
-#define PCI_CR_SIZE 0x400
-
/* WDT */
-#define LTQ_WDT_BASE_ADDR 0x1F8803F0
-#define LTQ_WDT_SIZE 0x10
-
-/* STP - serial to parallel conversion unit */
-#define LTQ_STP_BASE_ADDR 0x1E100BB0
-#define LTQ_STP_SIZE 0x40
-
-/* GPIO */
-#define LTQ_GPIO0_BASE_ADDR 0x1E100B10
-#define LTQ_GPIO1_BASE_ADDR 0x1E100B40
-#define LTQ_GPIO2_BASE_ADDR 0x1E100B70
-#define LTQ_GPIO_SIZE 0x30
-
-/* SSC */
-#define LTQ_SSC_BASE_ADDR 0x1e100800
-#define LTQ_SSC_SIZE 0x100
-
-/* MEI - dsl core */
-#define LTQ_MEI_BASE_ADDR 0x1E116000
-
-/* DEU - data encryption unit */
-#define LTQ_DEU_BASE_ADDR 0x1E103100
+#define LTQ_RST_CAUSE_WDTRST 0x20
/* MPS - multi processor unit (voice) */
#define LTQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000)
#define LTQ_MPS_CHIPID ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344))
/* request a non-gpio and set the PIO config */
-extern int ltq_gpio_request(unsigned int pin, unsigned int alt0,
- unsigned int alt1, unsigned int dir, const char *name);
+#define PMU_PPE BIT(13)
extern void ltq_pmu_enable(unsigned int module);
extern void ltq_pmu_disable(unsigned int module);
-static inline int ltq_is_ar9(void)
-{
- return (ltq_get_soc_type() == SOC_TYPE_AR9);
-}
-
-static inline int ltq_is_vr9(void)
-{
- return (ltq_get_soc_type() == SOC_TYPE_VR9);
-}
-
#endif /* CONFIG_SOC_TYPE_XWAY */
#endif /* _LTQ_XWAY_H__ */
diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h
index 46c08563e53..6e23ceb0ba8 100644
--- a/arch/mips/include/asm/mips-boards/generic.h
+++ b/arch/mips/include/asm/mips-boards/generic.h
@@ -93,8 +93,4 @@ extern void mips_pcibios_init(void);
#define mips_pcibios_init() do { } while (0)
#endif
-#ifdef CONFIG_KGDB
-extern void kgdb_config(void);
-#endif
-
#endif /* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 7467d1d933d..530008048c6 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -2,6 +2,7 @@
#define _ASM_MODULE_H
#include <linux/list.h>
+#include <linux/elf.h>
#include <asm/uaccess.h>
struct mod_arch_specific {
diff --git a/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h b/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h
deleted file mode 100644
index d553f8e88df..00000000000
--- a/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h
+++ /dev/null
@@ -1,1365 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_PCIEEP_DEFS_H__
-#define __CVMX_PCIEEP_DEFS_H__
-
-#define CVMX_PCIEEP_CFG000 \
- (0x0000000000000000ull)
-#define CVMX_PCIEEP_CFG001 \
- (0x0000000000000004ull)
-#define CVMX_PCIEEP_CFG002 \
- (0x0000000000000008ull)
-#define CVMX_PCIEEP_CFG003 \
- (0x000000000000000Cull)
-#define CVMX_PCIEEP_CFG004 \
- (0x0000000000000010ull)
-#define CVMX_PCIEEP_CFG004_MASK \
- (0x0000000080000010ull)
-#define CVMX_PCIEEP_CFG005 \
- (0x0000000000000014ull)
-#define CVMX_PCIEEP_CFG005_MASK \
- (0x0000000080000014ull)
-#define CVMX_PCIEEP_CFG006 \
- (0x0000000000000018ull)
-#define CVMX_PCIEEP_CFG006_MASK \
- (0x0000000080000018ull)
-#define CVMX_PCIEEP_CFG007 \
- (0x000000000000001Cull)
-#define CVMX_PCIEEP_CFG007_MASK \
- (0x000000008000001Cull)
-#define CVMX_PCIEEP_CFG008 \
- (0x0000000000000020ull)
-#define CVMX_PCIEEP_CFG008_MASK \
- (0x0000000080000020ull)
-#define CVMX_PCIEEP_CFG009 \
- (0x0000000000000024ull)
-#define CVMX_PCIEEP_CFG009_MASK \
- (0x0000000080000024ull)
-#define CVMX_PCIEEP_CFG010 \
- (0x0000000000000028ull)
-#define CVMX_PCIEEP_CFG011 \
- (0x000000000000002Cull)
-#define CVMX_PCIEEP_CFG012 \
- (0x0000000000000030ull)
-#define CVMX_PCIEEP_CFG012_MASK \
- (0x0000000080000030ull)
-#define CVMX_PCIEEP_CFG013 \
- (0x0000000000000034ull)
-#define CVMX_PCIEEP_CFG015 \
- (0x000000000000003Cull)
-#define CVMX_PCIEEP_CFG016 \
- (0x0000000000000040ull)
-#define CVMX_PCIEEP_CFG017 \
- (0x0000000000000044ull)
-#define CVMX_PCIEEP_CFG020 \
- (0x0000000000000050ull)
-#define CVMX_PCIEEP_CFG021 \
- (0x0000000000000054ull)
-#define CVMX_PCIEEP_CFG022 \
- (0x0000000000000058ull)
-#define CVMX_PCIEEP_CFG023 \
- (0x000000000000005Cull)
-#define CVMX_PCIEEP_CFG028 \
- (0x0000000000000070ull)
-#define CVMX_PCIEEP_CFG029 \
- (0x0000000000000074ull)
-#define CVMX_PCIEEP_CFG030 \
- (0x0000000000000078ull)
-#define CVMX_PCIEEP_CFG031 \
- (0x000000000000007Cull)
-#define CVMX_PCIEEP_CFG032 \
- (0x0000000000000080ull)
-#define CVMX_PCIEEP_CFG033 \
- (0x0000000000000084ull)
-#define CVMX_PCIEEP_CFG034 \
- (0x0000000000000088ull)
-#define CVMX_PCIEEP_CFG037 \
- (0x0000000000000094ull)
-#define CVMX_PCIEEP_CFG038 \
- (0x0000000000000098ull)
-#define CVMX_PCIEEP_CFG039 \
- (0x000000000000009Cull)
-#define CVMX_PCIEEP_CFG040 \
- (0x00000000000000A0ull)
-#define CVMX_PCIEEP_CFG041 \
- (0x00000000000000A4ull)
-#define CVMX_PCIEEP_CFG042 \
- (0x00000000000000A8ull)
-#define CVMX_PCIEEP_CFG064 \
- (0x0000000000000100ull)
-#define CVMX_PCIEEP_CFG065 \
- (0x0000000000000104ull)
-#define CVMX_PCIEEP_CFG066 \
- (0x0000000000000108ull)
-#define CVMX_PCIEEP_CFG067 \
- (0x000000000000010Cull)
-#define CVMX_PCIEEP_CFG068 \
- (0x0000000000000110ull)
-#define CVMX_PCIEEP_CFG069 \
- (0x0000000000000114ull)
-#define CVMX_PCIEEP_CFG070 \
- (0x0000000000000118ull)
-#define CVMX_PCIEEP_CFG071 \
- (0x000000000000011Cull)
-#define CVMX_PCIEEP_CFG072 \
- (0x0000000000000120ull)
-#define CVMX_PCIEEP_CFG073 \
- (0x0000000000000124ull)
-#define CVMX_PCIEEP_CFG074 \
- (0x0000000000000128ull)
-#define CVMX_PCIEEP_CFG448 \
- (0x0000000000000700ull)
-#define CVMX_PCIEEP_CFG449 \
- (0x0000000000000704ull)
-#define CVMX_PCIEEP_CFG450 \
- (0x0000000000000708ull)
-#define CVMX_PCIEEP_CFG451 \
- (0x000000000000070Cull)
-#define CVMX_PCIEEP_CFG452 \
- (0x0000000000000710ull)
-#define CVMX_PCIEEP_CFG453 \
- (0x0000000000000714ull)
-#define CVMX_PCIEEP_CFG454 \
- (0x0000000000000718ull)
-#define CVMX_PCIEEP_CFG455 \
- (0x000000000000071Cull)
-#define CVMX_PCIEEP_CFG456 \
- (0x0000000000000720ull)
-#define CVMX_PCIEEP_CFG458 \
- (0x0000000000000728ull)
-#define CVMX_PCIEEP_CFG459 \
- (0x000000000000072Cull)
-#define CVMX_PCIEEP_CFG460 \
- (0x0000000000000730ull)
-#define CVMX_PCIEEP_CFG461 \
- (0x0000000000000734ull)
-#define CVMX_PCIEEP_CFG462 \
- (0x0000000000000738ull)
-#define CVMX_PCIEEP_CFG463 \
- (0x000000000000073Cull)
-#define CVMX_PCIEEP_CFG464 \
- (0x0000000000000740ull)
-#define CVMX_PCIEEP_CFG465 \
- (0x0000000000000744ull)
-#define CVMX_PCIEEP_CFG466 \
- (0x0000000000000748ull)
-#define CVMX_PCIEEP_CFG467 \
- (0x000000000000074Cull)
-#define CVMX_PCIEEP_CFG468 \
- (0x0000000000000750ull)
-#define CVMX_PCIEEP_CFG490 \
- (0x00000000000007A8ull)
-#define CVMX_PCIEEP_CFG491 \
- (0x00000000000007ACull)
-#define CVMX_PCIEEP_CFG492 \
- (0x00000000000007B0ull)
-#define CVMX_PCIEEP_CFG516 \
- (0x0000000000000810ull)
-#define CVMX_PCIEEP_CFG517 \
- (0x0000000000000814ull)
-
-union cvmx_pcieep_cfg000 {
- uint32_t u32;
- struct cvmx_pcieep_cfg000_s {
- uint32_t devid:16;
- uint32_t vendid:16;
- } s;
- struct cvmx_pcieep_cfg000_s cn52xx;
- struct cvmx_pcieep_cfg000_s cn52xxp1;
- struct cvmx_pcieep_cfg000_s cn56xx;
- struct cvmx_pcieep_cfg000_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg001 {
- uint32_t u32;
- struct cvmx_pcieep_cfg001_s {
- uint32_t dpe:1;
- uint32_t sse:1;
- uint32_t rma:1;
- uint32_t rta:1;
- uint32_t sta:1;
- uint32_t devt:2;
- uint32_t mdpe:1;
- uint32_t fbb:1;
- uint32_t reserved_22_22:1;
- uint32_t m66:1;
- uint32_t cl:1;
- uint32_t i_stat:1;
- uint32_t reserved_11_18:8;
- uint32_t i_dis:1;
- uint32_t fbbe:1;
- uint32_t see:1;
- uint32_t ids_wcc:1;
- uint32_t per:1;
- uint32_t vps:1;
- uint32_t mwice:1;
- uint32_t scse:1;
- uint32_t me:1;
- uint32_t msae:1;
- uint32_t isae:1;
- } s;
- struct cvmx_pcieep_cfg001_s cn52xx;
- struct cvmx_pcieep_cfg001_s cn52xxp1;
- struct cvmx_pcieep_cfg001_s cn56xx;
- struct cvmx_pcieep_cfg001_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg002 {
- uint32_t u32;
- struct cvmx_pcieep_cfg002_s {
- uint32_t bcc:8;
- uint32_t sc:8;
- uint32_t pi:8;
- uint32_t rid:8;
- } s;
- struct cvmx_pcieep_cfg002_s cn52xx;
- struct cvmx_pcieep_cfg002_s cn52xxp1;
- struct cvmx_pcieep_cfg002_s cn56xx;
- struct cvmx_pcieep_cfg002_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg003 {
- uint32_t u32;
- struct cvmx_pcieep_cfg003_s {
- uint32_t bist:8;
- uint32_t mfd:1;
- uint32_t chf:7;
- uint32_t lt:8;
- uint32_t cls:8;
- } s;
- struct cvmx_pcieep_cfg003_s cn52xx;
- struct cvmx_pcieep_cfg003_s cn52xxp1;
- struct cvmx_pcieep_cfg003_s cn56xx;
- struct cvmx_pcieep_cfg003_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg004 {
- uint32_t u32;
- struct cvmx_pcieep_cfg004_s {
- uint32_t lbab:18;
- uint32_t reserved_4_13:10;
- uint32_t pf:1;
- uint32_t typ:2;
- uint32_t mspc:1;
- } s;
- struct cvmx_pcieep_cfg004_s cn52xx;
- struct cvmx_pcieep_cfg004_s cn52xxp1;
- struct cvmx_pcieep_cfg004_s cn56xx;
- struct cvmx_pcieep_cfg004_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg004_mask {
- uint32_t u32;
- struct cvmx_pcieep_cfg004_mask_s {
- uint32_t lmask:31;
- uint32_t enb:1;
- } s;
- struct cvmx_pcieep_cfg004_mask_s cn52xx;
- struct cvmx_pcieep_cfg004_mask_s cn52xxp1;
- struct cvmx_pcieep_cfg004_mask_s cn56xx;
- struct cvmx_pcieep_cfg004_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg005 {
- uint32_t u32;
- struct cvmx_pcieep_cfg005_s {
- uint32_t ubab:32;
- } s;
- struct cvmx_pcieep_cfg005_s cn52xx;
- struct cvmx_pcieep_cfg005_s cn52xxp1;
- struct cvmx_pcieep_cfg005_s cn56xx;
- struct cvmx_pcieep_cfg005_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg005_mask {
- uint32_t u32;
- struct cvmx_pcieep_cfg005_mask_s {
- uint32_t umask:32;
- } s;
- struct cvmx_pcieep_cfg005_mask_s cn52xx;
- struct cvmx_pcieep_cfg005_mask_s cn52xxp1;
- struct cvmx_pcieep_cfg005_mask_s cn56xx;
- struct cvmx_pcieep_cfg005_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg006 {
- uint32_t u32;
- struct cvmx_pcieep_cfg006_s {
- uint32_t lbab:6;
- uint32_t reserved_4_25:22;
- uint32_t pf:1;
- uint32_t typ:2;
- uint32_t mspc:1;
- } s;
- struct cvmx_pcieep_cfg006_s cn52xx;
- struct cvmx_pcieep_cfg006_s cn52xxp1;
- struct cvmx_pcieep_cfg006_s cn56xx;
- struct cvmx_pcieep_cfg006_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg006_mask {
- uint32_t u32;
- struct cvmx_pcieep_cfg006_mask_s {
- uint32_t lmask:31;
- uint32_t enb:1;
- } s;
- struct cvmx_pcieep_cfg006_mask_s cn52xx;
- struct cvmx_pcieep_cfg006_mask_s cn52xxp1;
- struct cvmx_pcieep_cfg006_mask_s cn56xx;
- struct cvmx_pcieep_cfg006_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg007 {
- uint32_t u32;
- struct cvmx_pcieep_cfg007_s {
- uint32_t ubab:32;
- } s;
- struct cvmx_pcieep_cfg007_s cn52xx;
- struct cvmx_pcieep_cfg007_s cn52xxp1;
- struct cvmx_pcieep_cfg007_s cn56xx;
- struct cvmx_pcieep_cfg007_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg007_mask {
- uint32_t u32;
- struct cvmx_pcieep_cfg007_mask_s {
- uint32_t umask:32;
- } s;
- struct cvmx_pcieep_cfg007_mask_s cn52xx;
- struct cvmx_pcieep_cfg007_mask_s cn52xxp1;
- struct cvmx_pcieep_cfg007_mask_s cn56xx;
- struct cvmx_pcieep_cfg007_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg008 {
- uint32_t u32;
- struct cvmx_pcieep_cfg008_s {
- uint32_t reserved_4_31:28;
- uint32_t pf:1;
- uint32_t typ:2;
- uint32_t mspc:1;
- } s;
- struct cvmx_pcieep_cfg008_s cn52xx;
- struct cvmx_pcieep_cfg008_s cn52xxp1;
- struct cvmx_pcieep_cfg008_s cn56xx;
- struct cvmx_pcieep_cfg008_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg008_mask {
- uint32_t u32;
- struct cvmx_pcieep_cfg008_mask_s {
- uint32_t lmask:31;
- uint32_t enb:1;
- } s;
- struct cvmx_pcieep_cfg008_mask_s cn52xx;
- struct cvmx_pcieep_cfg008_mask_s cn52xxp1;
- struct cvmx_pcieep_cfg008_mask_s cn56xx;
- struct cvmx_pcieep_cfg008_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg009 {
- uint32_t u32;
- struct cvmx_pcieep_cfg009_s {
- uint32_t ubab:25;
- uint32_t reserved_0_6:7;
- } s;
- struct cvmx_pcieep_cfg009_s cn52xx;
- struct cvmx_pcieep_cfg009_s cn52xxp1;
- struct cvmx_pcieep_cfg009_s cn56xx;
- struct cvmx_pcieep_cfg009_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg009_mask {
- uint32_t u32;
- struct cvmx_pcieep_cfg009_mask_s {
- uint32_t umask:32;
- } s;
- struct cvmx_pcieep_cfg009_mask_s cn52xx;
- struct cvmx_pcieep_cfg009_mask_s cn52xxp1;
- struct cvmx_pcieep_cfg009_mask_s cn56xx;
- struct cvmx_pcieep_cfg009_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg010 {
- uint32_t u32;
- struct cvmx_pcieep_cfg010_s {
- uint32_t cisp:32;
- } s;
- struct cvmx_pcieep_cfg010_s cn52xx;
- struct cvmx_pcieep_cfg010_s cn52xxp1;
- struct cvmx_pcieep_cfg010_s cn56xx;
- struct cvmx_pcieep_cfg010_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg011 {
- uint32_t u32;
- struct cvmx_pcieep_cfg011_s {
- uint32_t ssid:16;
- uint32_t ssvid:16;
- } s;
- struct cvmx_pcieep_cfg011_s cn52xx;
- struct cvmx_pcieep_cfg011_s cn52xxp1;
- struct cvmx_pcieep_cfg011_s cn56xx;
- struct cvmx_pcieep_cfg011_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg012 {
- uint32_t u32;
- struct cvmx_pcieep_cfg012_s {
- uint32_t eraddr:16;
- uint32_t reserved_1_15:15;
- uint32_t er_en:1;
- } s;
- struct cvmx_pcieep_cfg012_s cn52xx;
- struct cvmx_pcieep_cfg012_s cn52xxp1;
- struct cvmx_pcieep_cfg012_s cn56xx;
- struct cvmx_pcieep_cfg012_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg012_mask {
- uint32_t u32;
- struct cvmx_pcieep_cfg012_mask_s {
- uint32_t mask:31;
- uint32_t enb:1;
- } s;
- struct cvmx_pcieep_cfg012_mask_s cn52xx;
- struct cvmx_pcieep_cfg012_mask_s cn52xxp1;
- struct cvmx_pcieep_cfg012_mask_s cn56xx;
- struct cvmx_pcieep_cfg012_mask_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg013 {
- uint32_t u32;
- struct cvmx_pcieep_cfg013_s {
- uint32_t reserved_8_31:24;
- uint32_t cp:8;
- } s;
- struct cvmx_pcieep_cfg013_s cn52xx;
- struct cvmx_pcieep_cfg013_s cn52xxp1;
- struct cvmx_pcieep_cfg013_s cn56xx;
- struct cvmx_pcieep_cfg013_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg015 {
- uint32_t u32;
- struct cvmx_pcieep_cfg015_s {
- uint32_t ml:8;
- uint32_t mg:8;
- uint32_t inta:8;
- uint32_t il:8;
- } s;
- struct cvmx_pcieep_cfg015_s cn52xx;
- struct cvmx_pcieep_cfg015_s cn52xxp1;
- struct cvmx_pcieep_cfg015_s cn56xx;
- struct cvmx_pcieep_cfg015_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg016 {
- uint32_t u32;
- struct cvmx_pcieep_cfg016_s {
- uint32_t pmes:5;
- uint32_t d2s:1;
- uint32_t d1s:1;
- uint32_t auxc:3;
- uint32_t dsi:1;
- uint32_t reserved_20_20:1;
- uint32_t pme_clock:1;
- uint32_t pmsv:3;
- uint32_t ncp:8;
- uint32_t pmcid:8;
- } s;
- struct cvmx_pcieep_cfg016_s cn52xx;
- struct cvmx_pcieep_cfg016_s cn52xxp1;
- struct cvmx_pcieep_cfg016_s cn56xx;
- struct cvmx_pcieep_cfg016_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg017 {
- uint32_t u32;
- struct cvmx_pcieep_cfg017_s {
- uint32_t pmdia:8;
- uint32_t bpccee:1;
- uint32_t bd3h:1;
- uint32_t reserved_16_21:6;
- uint32_t pmess:1;
- uint32_t pmedsia:2;
- uint32_t pmds:4;
- uint32_t pmeens:1;
- uint32_t reserved_4_7:4;
- uint32_t nsr:1;
- uint32_t reserved_2_2:1;
- uint32_t ps:2;
- } s;
- struct cvmx_pcieep_cfg017_s cn52xx;
- struct cvmx_pcieep_cfg017_s cn52xxp1;
- struct cvmx_pcieep_cfg017_s cn56xx;
- struct cvmx_pcieep_cfg017_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg020 {
- uint32_t u32;
- struct cvmx_pcieep_cfg020_s {
- uint32_t reserved_24_31:8;
- uint32_t m64:1;
- uint32_t mme:3;
- uint32_t mmc:3;
- uint32_t msien:1;
- uint32_t ncp:8;
- uint32_t msicid:8;
- } s;
- struct cvmx_pcieep_cfg020_s cn52xx;
- struct cvmx_pcieep_cfg020_s cn52xxp1;
- struct cvmx_pcieep_cfg020_s cn56xx;
- struct cvmx_pcieep_cfg020_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg021 {
- uint32_t u32;
- struct cvmx_pcieep_cfg021_s {
- uint32_t lmsi:30;
- uint32_t reserved_0_1:2;
- } s;
- struct cvmx_pcieep_cfg021_s cn52xx;
- struct cvmx_pcieep_cfg021_s cn52xxp1;
- struct cvmx_pcieep_cfg021_s cn56xx;
- struct cvmx_pcieep_cfg021_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg022 {
- uint32_t u32;
- struct cvmx_pcieep_cfg022_s {
- uint32_t umsi:32;
- } s;
- struct cvmx_pcieep_cfg022_s cn52xx;
- struct cvmx_pcieep_cfg022_s cn52xxp1;
- struct cvmx_pcieep_cfg022_s cn56xx;
- struct cvmx_pcieep_cfg022_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg023 {
- uint32_t u32;
- struct cvmx_pcieep_cfg023_s {
- uint32_t reserved_16_31:16;
- uint32_t msimd:16;
- } s;
- struct cvmx_pcieep_cfg023_s cn52xx;
- struct cvmx_pcieep_cfg023_s cn52xxp1;
- struct cvmx_pcieep_cfg023_s cn56xx;
- struct cvmx_pcieep_cfg023_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg028 {
- uint32_t u32;
- struct cvmx_pcieep_cfg028_s {
- uint32_t reserved_30_31:2;
- uint32_t imn:5;
- uint32_t si:1;
- uint32_t dpt:4;
- uint32_t pciecv:4;
- uint32_t ncp:8;
- uint32_t pcieid:8;
- } s;
- struct cvmx_pcieep_cfg028_s cn52xx;
- struct cvmx_pcieep_cfg028_s cn52xxp1;
- struct cvmx_pcieep_cfg028_s cn56xx;
- struct cvmx_pcieep_cfg028_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg029 {
- uint32_t u32;
- struct cvmx_pcieep_cfg029_s {
- uint32_t reserved_28_31:4;
- uint32_t cspls:2;
- uint32_t csplv:8;
- uint32_t reserved_16_17:2;
- uint32_t rber:1;
- uint32_t reserved_12_14:3;
- uint32_t el1al:3;
- uint32_t el0al:3;
- uint32_t etfs:1;
- uint32_t pfs:2;
- uint32_t mpss:3;
- } s;
- struct cvmx_pcieep_cfg029_s cn52xx;
- struct cvmx_pcieep_cfg029_s cn52xxp1;
- struct cvmx_pcieep_cfg029_s cn56xx;
- struct cvmx_pcieep_cfg029_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg030 {
- uint32_t u32;
- struct cvmx_pcieep_cfg030_s {
- uint32_t reserved_22_31:10;
- uint32_t tp:1;
- uint32_t ap_d:1;
- uint32_t ur_d:1;
- uint32_t fe_d:1;
- uint32_t nfe_d:1;
- uint32_t ce_d:1;
- uint32_t reserved_15_15:1;
- uint32_t mrrs:3;
- uint32_t ns_en:1;
- uint32_t ap_en:1;
- uint32_t pf_en:1;
- uint32_t etf_en:1;
- uint32_t mps:3;
- uint32_t ro_en:1;
- uint32_t ur_en:1;
- uint32_t fe_en:1;
- uint32_t nfe_en:1;
- uint32_t ce_en:1;
- } s;
- struct cvmx_pcieep_cfg030_s cn52xx;
- struct cvmx_pcieep_cfg030_s cn52xxp1;
- struct cvmx_pcieep_cfg030_s cn56xx;
- struct cvmx_pcieep_cfg030_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg031 {
- uint32_t u32;
- struct cvmx_pcieep_cfg031_s {
- uint32_t pnum:8;
- uint32_t reserved_22_23:2;
- uint32_t lbnc:1;
- uint32_t dllarc:1;
- uint32_t sderc:1;
- uint32_t cpm:1;
- uint32_t l1el:3;
- uint32_t l0el:3;
- uint32_t aslpms:2;
- uint32_t mlw:6;
- uint32_t mls:4;
- } s;
- struct cvmx_pcieep_cfg031_s cn52xx;
- struct cvmx_pcieep_cfg031_s cn52xxp1;
- struct cvmx_pcieep_cfg031_s cn56xx;
- struct cvmx_pcieep_cfg031_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg032 {
- uint32_t u32;
- struct cvmx_pcieep_cfg032_s {
- uint32_t reserved_30_31:2;
- uint32_t dlla:1;
- uint32_t scc:1;
- uint32_t lt:1;
- uint32_t reserved_26_26:1;
- uint32_t nlw:6;
- uint32_t ls:4;
- uint32_t reserved_10_15:6;
- uint32_t hawd:1;
- uint32_t ecpm:1;
- uint32_t es:1;
- uint32_t ccc:1;
- uint32_t rl:1;
- uint32_t ld:1;
- uint32_t rcb:1;
- uint32_t reserved_2_2:1;
- uint32_t aslpc:2;
- } s;
- struct cvmx_pcieep_cfg032_s cn52xx;
- struct cvmx_pcieep_cfg032_s cn52xxp1;
- struct cvmx_pcieep_cfg032_s cn56xx;
- struct cvmx_pcieep_cfg032_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg033 {
- uint32_t u32;
- struct cvmx_pcieep_cfg033_s {
- uint32_t ps_num:13;
- uint32_t nccs:1;
- uint32_t emip:1;
- uint32_t sp_ls:2;
- uint32_t sp_lv:8;
- uint32_t hp_c:1;
- uint32_t hp_s:1;
- uint32_t pip:1;
- uint32_t aip:1;
- uint32_t mrlsp:1;
- uint32_t pcp:1;
- uint32_t abp:1;
- } s;
- struct cvmx_pcieep_cfg033_s cn52xx;
- struct cvmx_pcieep_cfg033_s cn52xxp1;
- struct cvmx_pcieep_cfg033_s cn56xx;
- struct cvmx_pcieep_cfg033_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg034 {
- uint32_t u32;
- struct cvmx_pcieep_cfg034_s {
- uint32_t reserved_25_31:7;
- uint32_t dlls_c:1;
- uint32_t emis:1;
- uint32_t pds:1;
- uint32_t mrlss:1;
- uint32_t ccint_d:1;
- uint32_t pd_c:1;
- uint32_t mrls_c:1;
- uint32_t pf_d:1;
- uint32_t abp_d:1;
- uint32_t reserved_13_15:3;
- uint32_t dlls_en:1;
- uint32_t emic:1;
- uint32_t pcc:1;
- uint32_t pic:2;
- uint32_t aic:2;
- uint32_t hpint_en:1;
- uint32_t ccint_en:1;
- uint32_t pd_en:1;
- uint32_t mrls_en:1;
- uint32_t pf_en:1;
- uint32_t abp_en:1;
- } s;
- struct cvmx_pcieep_cfg034_s cn52xx;
- struct cvmx_pcieep_cfg034_s cn52xxp1;
- struct cvmx_pcieep_cfg034_s cn56xx;
- struct cvmx_pcieep_cfg034_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg037 {
- uint32_t u32;
- struct cvmx_pcieep_cfg037_s {
- uint32_t reserved_5_31:27;
- uint32_t ctds:1;
- uint32_t ctrs:4;
- } s;
- struct cvmx_pcieep_cfg037_s cn52xx;
- struct cvmx_pcieep_cfg037_s cn52xxp1;
- struct cvmx_pcieep_cfg037_s cn56xx;
- struct cvmx_pcieep_cfg037_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg038 {
- uint32_t u32;
- struct cvmx_pcieep_cfg038_s {
- uint32_t reserved_5_31:27;
- uint32_t ctd:1;
- uint32_t ctv:4;
- } s;
- struct cvmx_pcieep_cfg038_s cn52xx;
- struct cvmx_pcieep_cfg038_s cn52xxp1;
- struct cvmx_pcieep_cfg038_s cn56xx;
- struct cvmx_pcieep_cfg038_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg039 {
- uint32_t u32;
- struct cvmx_pcieep_cfg039_s {
- uint32_t reserved_0_31:32;
- } s;
- struct cvmx_pcieep_cfg039_s cn52xx;
- struct cvmx_pcieep_cfg039_s cn52xxp1;
- struct cvmx_pcieep_cfg039_s cn56xx;
- struct cvmx_pcieep_cfg039_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg040 {
- uint32_t u32;
- struct cvmx_pcieep_cfg040_s {
- uint32_t reserved_0_31:32;
- } s;
- struct cvmx_pcieep_cfg040_s cn52xx;
- struct cvmx_pcieep_cfg040_s cn52xxp1;
- struct cvmx_pcieep_cfg040_s cn56xx;
- struct cvmx_pcieep_cfg040_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg041 {
- uint32_t u32;
- struct cvmx_pcieep_cfg041_s {
- uint32_t reserved_0_31:32;
- } s;
- struct cvmx_pcieep_cfg041_s cn52xx;
- struct cvmx_pcieep_cfg041_s cn52xxp1;
- struct cvmx_pcieep_cfg041_s cn56xx;
- struct cvmx_pcieep_cfg041_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg042 {
- uint32_t u32;
- struct cvmx_pcieep_cfg042_s {
- uint32_t reserved_0_31:32;
- } s;
- struct cvmx_pcieep_cfg042_s cn52xx;
- struct cvmx_pcieep_cfg042_s cn52xxp1;
- struct cvmx_pcieep_cfg042_s cn56xx;
- struct cvmx_pcieep_cfg042_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg064 {
- uint32_t u32;
- struct cvmx_pcieep_cfg064_s {
- uint32_t nco:12;
- uint32_t cv:4;
- uint32_t pcieec:16;
- } s;
- struct cvmx_pcieep_cfg064_s cn52xx;
- struct cvmx_pcieep_cfg064_s cn52xxp1;
- struct cvmx_pcieep_cfg064_s cn56xx;
- struct cvmx_pcieep_cfg064_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg065 {
- uint32_t u32;
- struct cvmx_pcieep_cfg065_s {
- uint32_t reserved_21_31:11;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
- } s;
- struct cvmx_pcieep_cfg065_s cn52xx;
- struct cvmx_pcieep_cfg065_s cn52xxp1;
- struct cvmx_pcieep_cfg065_s cn56xx;
- struct cvmx_pcieep_cfg065_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg066 {
- uint32_t u32;
- struct cvmx_pcieep_cfg066_s {
- uint32_t reserved_21_31:11;
- uint32_t urem:1;
- uint32_t ecrcem:1;
- uint32_t mtlpm:1;
- uint32_t rom:1;
- uint32_t ucm:1;
- uint32_t cam:1;
- uint32_t ctm:1;
- uint32_t fcpem:1;
- uint32_t ptlpm:1;
- uint32_t reserved_6_11:6;
- uint32_t sdem:1;
- uint32_t dlpem:1;
- uint32_t reserved_0_3:4;
- } s;
- struct cvmx_pcieep_cfg066_s cn52xx;
- struct cvmx_pcieep_cfg066_s cn52xxp1;
- struct cvmx_pcieep_cfg066_s cn56xx;
- struct cvmx_pcieep_cfg066_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg067 {
- uint32_t u32;
- struct cvmx_pcieep_cfg067_s {
- uint32_t reserved_21_31:11;
- uint32_t ures:1;
- uint32_t ecrces:1;
- uint32_t mtlps:1;
- uint32_t ros:1;
- uint32_t ucs:1;
- uint32_t cas:1;
- uint32_t cts:1;
- uint32_t fcpes:1;
- uint32_t ptlps:1;
- uint32_t reserved_6_11:6;
- uint32_t sdes:1;
- uint32_t dlpes:1;
- uint32_t reserved_0_3:4;
- } s;
- struct cvmx_pcieep_cfg067_s cn52xx;
- struct cvmx_pcieep_cfg067_s cn52xxp1;
- struct cvmx_pcieep_cfg067_s cn56xx;
- struct cvmx_pcieep_cfg067_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg068 {
- uint32_t u32;
- struct cvmx_pcieep_cfg068_s {
- uint32_t reserved_14_31:18;
- uint32_t anfes:1;
- uint32_t rtts:1;
- uint32_t reserved_9_11:3;
- uint32_t rnrs:1;
- uint32_t bdllps:1;
- uint32_t btlps:1;
- uint32_t reserved_1_5:5;
- uint32_t res:1;
- } s;
- struct cvmx_pcieep_cfg068_s cn52xx;
- struct cvmx_pcieep_cfg068_s cn52xxp1;
- struct cvmx_pcieep_cfg068_s cn56xx;
- struct cvmx_pcieep_cfg068_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg069 {
- uint32_t u32;
- struct cvmx_pcieep_cfg069_s {
- uint32_t reserved_14_31:18;
- uint32_t anfem:1;
- uint32_t rttm:1;
- uint32_t reserved_9_11:3;
- uint32_t rnrm:1;
- uint32_t bdllpm:1;
- uint32_t btlpm:1;
- uint32_t reserved_1_5:5;
- uint32_t rem:1;
- } s;
- struct cvmx_pcieep_cfg069_s cn52xx;
- struct cvmx_pcieep_cfg069_s cn52xxp1;
- struct cvmx_pcieep_cfg069_s cn56xx;
- struct cvmx_pcieep_cfg069_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg070 {
- uint32_t u32;
- struct cvmx_pcieep_cfg070_s {
- uint32_t reserved_9_31:23;
- uint32_t ce:1;
- uint32_t cc:1;
- uint32_t ge:1;
- uint32_t gc:1;
- uint32_t fep:5;
- } s;
- struct cvmx_pcieep_cfg070_s cn52xx;
- struct cvmx_pcieep_cfg070_s cn52xxp1;
- struct cvmx_pcieep_cfg070_s cn56xx;
- struct cvmx_pcieep_cfg070_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg071 {
- uint32_t u32;
- struct cvmx_pcieep_cfg071_s {
- uint32_t dword1:32;
- } s;
- struct cvmx_pcieep_cfg071_s cn52xx;
- struct cvmx_pcieep_cfg071_s cn52xxp1;
- struct cvmx_pcieep_cfg071_s cn56xx;
- struct cvmx_pcieep_cfg071_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg072 {
- uint32_t u32;
- struct cvmx_pcieep_cfg072_s {
- uint32_t dword2:32;
- } s;
- struct cvmx_pcieep_cfg072_s cn52xx;
- struct cvmx_pcieep_cfg072_s cn52xxp1;
- struct cvmx_pcieep_cfg072_s cn56xx;
- struct cvmx_pcieep_cfg072_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg073 {
- uint32_t u32;
- struct cvmx_pcieep_cfg073_s {
- uint32_t dword3:32;
- } s;
- struct cvmx_pcieep_cfg073_s cn52xx;
- struct cvmx_pcieep_cfg073_s cn52xxp1;
- struct cvmx_pcieep_cfg073_s cn56xx;
- struct cvmx_pcieep_cfg073_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg074 {
- uint32_t u32;
- struct cvmx_pcieep_cfg074_s {
- uint32_t dword4:32;
- } s;
- struct cvmx_pcieep_cfg074_s cn52xx;
- struct cvmx_pcieep_cfg074_s cn52xxp1;
- struct cvmx_pcieep_cfg074_s cn56xx;
- struct cvmx_pcieep_cfg074_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg448 {
- uint32_t u32;
- struct cvmx_pcieep_cfg448_s {
- uint32_t rtl:16;
- uint32_t rtltl:16;
- } s;
- struct cvmx_pcieep_cfg448_s cn52xx;
- struct cvmx_pcieep_cfg448_s cn52xxp1;
- struct cvmx_pcieep_cfg448_s cn56xx;
- struct cvmx_pcieep_cfg448_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg449 {
- uint32_t u32;
- struct cvmx_pcieep_cfg449_s {
- uint32_t omr:32;
- } s;
- struct cvmx_pcieep_cfg449_s cn52xx;
- struct cvmx_pcieep_cfg449_s cn52xxp1;
- struct cvmx_pcieep_cfg449_s cn56xx;
- struct cvmx_pcieep_cfg449_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg450 {
- uint32_t u32;
- struct cvmx_pcieep_cfg450_s {
- uint32_t lpec:8;
- uint32_t reserved_22_23:2;
- uint32_t link_state:6;
- uint32_t force_link:1;
- uint32_t reserved_8_14:7;
- uint32_t link_num:8;
- } s;
- struct cvmx_pcieep_cfg450_s cn52xx;
- struct cvmx_pcieep_cfg450_s cn52xxp1;
- struct cvmx_pcieep_cfg450_s cn56xx;
- struct cvmx_pcieep_cfg450_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg451 {
- uint32_t u32;
- struct cvmx_pcieep_cfg451_s {
- uint32_t reserved_30_31:2;
- uint32_t l1el:3;
- uint32_t l0el:3;
- uint32_t n_fts_cc:8;
- uint32_t n_fts:8;
- uint32_t ack_freq:8;
- } s;
- struct cvmx_pcieep_cfg451_s cn52xx;
- struct cvmx_pcieep_cfg451_s cn52xxp1;
- struct cvmx_pcieep_cfg451_s cn56xx;
- struct cvmx_pcieep_cfg451_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg452 {
- uint32_t u32;
- struct cvmx_pcieep_cfg452_s {
- uint32_t reserved_26_31:6;
- uint32_t eccrc:1;
- uint32_t reserved_22_24:3;
- uint32_t lme:6;
- uint32_t reserved_8_15:8;
- uint32_t flm:1;
- uint32_t reserved_6_6:1;
- uint32_t dllle:1;
- uint32_t reserved_4_4:1;
- uint32_t ra:1;
- uint32_t le:1;
- uint32_t sd:1;
- uint32_t omr:1;
- } s;
- struct cvmx_pcieep_cfg452_s cn52xx;
- struct cvmx_pcieep_cfg452_s cn52xxp1;
- struct cvmx_pcieep_cfg452_s cn56xx;
- struct cvmx_pcieep_cfg452_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg453 {
- uint32_t u32;
- struct cvmx_pcieep_cfg453_s {
- uint32_t dlld:1;
- uint32_t reserved_26_30:5;
- uint32_t ack_nak:1;
- uint32_t fcd:1;
- uint32_t ilst:24;
- } s;
- struct cvmx_pcieep_cfg453_s cn52xx;
- struct cvmx_pcieep_cfg453_s cn52xxp1;
- struct cvmx_pcieep_cfg453_s cn56xx;
- struct cvmx_pcieep_cfg453_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg454 {
- uint32_t u32;
- struct cvmx_pcieep_cfg454_s {
- uint32_t reserved_29_31:3;
- uint32_t tmfcwt:5;
- uint32_t tmanlt:5;
- uint32_t tmrt:5;
- uint32_t reserved_11_13:3;
- uint32_t nskps:3;
- uint32_t reserved_4_7:4;
- uint32_t ntss:4;
- } s;
- struct cvmx_pcieep_cfg454_s cn52xx;
- struct cvmx_pcieep_cfg454_s cn52xxp1;
- struct cvmx_pcieep_cfg454_s cn56xx;
- struct cvmx_pcieep_cfg454_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg455 {
- uint32_t u32;
- struct cvmx_pcieep_cfg455_s {
- uint32_t m_cfg0_filt:1;
- uint32_t m_io_filt:1;
- uint32_t msg_ctrl:1;
- uint32_t m_cpl_ecrc_filt:1;
- uint32_t m_ecrc_filt:1;
- uint32_t m_cpl_len_err:1;
- uint32_t m_cpl_attr_err:1;
- uint32_t m_cpl_tc_err:1;
- uint32_t m_cpl_fun_err:1;
- uint32_t m_cpl_rid_err:1;
- uint32_t m_cpl_tag_err:1;
- uint32_t m_lk_filt:1;
- uint32_t m_cfg1_filt:1;
- uint32_t m_bar_match:1;
- uint32_t m_pois_filt:1;
- uint32_t m_fun:1;
- uint32_t dfcwt:1;
- uint32_t reserved_11_14:4;
- uint32_t skpiv:11;
- } s;
- struct cvmx_pcieep_cfg455_s cn52xx;
- struct cvmx_pcieep_cfg455_s cn52xxp1;
- struct cvmx_pcieep_cfg455_s cn56xx;
- struct cvmx_pcieep_cfg455_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg456 {
- uint32_t u32;
- struct cvmx_pcieep_cfg456_s {
- uint32_t reserved_2_31:30;
- uint32_t m_vend1_drp:1;
- uint32_t m_vend0_drp:1;
- } s;
- struct cvmx_pcieep_cfg456_s cn52xx;
- struct cvmx_pcieep_cfg456_s cn52xxp1;
- struct cvmx_pcieep_cfg456_s cn56xx;
- struct cvmx_pcieep_cfg456_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg458 {
- uint32_t u32;
- struct cvmx_pcieep_cfg458_s {
- uint32_t dbg_info_l32:32;
- } s;
- struct cvmx_pcieep_cfg458_s cn52xx;
- struct cvmx_pcieep_cfg458_s cn52xxp1;
- struct cvmx_pcieep_cfg458_s cn56xx;
- struct cvmx_pcieep_cfg458_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg459 {
- uint32_t u32;
- struct cvmx_pcieep_cfg459_s {
- uint32_t dbg_info_u32:32;
- } s;
- struct cvmx_pcieep_cfg459_s cn52xx;
- struct cvmx_pcieep_cfg459_s cn52xxp1;
- struct cvmx_pcieep_cfg459_s cn56xx;
- struct cvmx_pcieep_cfg459_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg460 {
- uint32_t u32;
- struct cvmx_pcieep_cfg460_s {
- uint32_t reserved_20_31:12;
- uint32_t tphfcc:8;
- uint32_t tpdfcc:12;
- } s;
- struct cvmx_pcieep_cfg460_s cn52xx;
- struct cvmx_pcieep_cfg460_s cn52xxp1;
- struct cvmx_pcieep_cfg460_s cn56xx;
- struct cvmx_pcieep_cfg460_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg461 {
- uint32_t u32;
- struct cvmx_pcieep_cfg461_s {
- uint32_t reserved_20_31:12;
- uint32_t tchfcc:8;
- uint32_t tcdfcc:12;
- } s;
- struct cvmx_pcieep_cfg461_s cn52xx;
- struct cvmx_pcieep_cfg461_s cn52xxp1;
- struct cvmx_pcieep_cfg461_s cn56xx;
- struct cvmx_pcieep_cfg461_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg462 {
- uint32_t u32;
- struct cvmx_pcieep_cfg462_s {
- uint32_t reserved_20_31:12;
- uint32_t tchfcc:8;
- uint32_t tcdfcc:12;
- } s;
- struct cvmx_pcieep_cfg462_s cn52xx;
- struct cvmx_pcieep_cfg462_s cn52xxp1;
- struct cvmx_pcieep_cfg462_s cn56xx;
- struct cvmx_pcieep_cfg462_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg463 {
- uint32_t u32;
- struct cvmx_pcieep_cfg463_s {
- uint32_t reserved_3_31:29;
- uint32_t rqne:1;
- uint32_t trbne:1;
- uint32_t rtlpfccnr:1;
- } s;
- struct cvmx_pcieep_cfg463_s cn52xx;
- struct cvmx_pcieep_cfg463_s cn52xxp1;
- struct cvmx_pcieep_cfg463_s cn56xx;
- struct cvmx_pcieep_cfg463_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg464 {
- uint32_t u32;
- struct cvmx_pcieep_cfg464_s {
- uint32_t wrr_vc3:8;
- uint32_t wrr_vc2:8;
- uint32_t wrr_vc1:8;
- uint32_t wrr_vc0:8;
- } s;
- struct cvmx_pcieep_cfg464_s cn52xx;
- struct cvmx_pcieep_cfg464_s cn52xxp1;
- struct cvmx_pcieep_cfg464_s cn56xx;
- struct cvmx_pcieep_cfg464_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg465 {
- uint32_t u32;
- struct cvmx_pcieep_cfg465_s {
- uint32_t wrr_vc7:8;
- uint32_t wrr_vc6:8;
- uint32_t wrr_vc5:8;
- uint32_t wrr_vc4:8;
- } s;
- struct cvmx_pcieep_cfg465_s cn52xx;
- struct cvmx_pcieep_cfg465_s cn52xxp1;
- struct cvmx_pcieep_cfg465_s cn56xx;
- struct cvmx_pcieep_cfg465_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg466 {
- uint32_t u32;
- struct cvmx_pcieep_cfg466_s {
- uint32_t rx_queue_order:1;
- uint32_t type_ordering:1;
- uint32_t reserved_24_29:6;
- uint32_t queue_mode:3;
- uint32_t reserved_20_20:1;
- uint32_t header_credits:8;
- uint32_t data_credits:12;
- } s;
- struct cvmx_pcieep_cfg466_s cn52xx;
- struct cvmx_pcieep_cfg466_s cn52xxp1;
- struct cvmx_pcieep_cfg466_s cn56xx;
- struct cvmx_pcieep_cfg466_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg467 {
- uint32_t u32;
- struct cvmx_pcieep_cfg467_s {
- uint32_t reserved_24_31:8;
- uint32_t queue_mode:3;
- uint32_t reserved_20_20:1;
- uint32_t header_credits:8;
- uint32_t data_credits:12;
- } s;
- struct cvmx_pcieep_cfg467_s cn52xx;
- struct cvmx_pcieep_cfg467_s cn52xxp1;
- struct cvmx_pcieep_cfg467_s cn56xx;
- struct cvmx_pcieep_cfg467_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg468 {
- uint32_t u32;
- struct cvmx_pcieep_cfg468_s {
- uint32_t reserved_24_31:8;
- uint32_t queue_mode:3;
- uint32_t reserved_20_20:1;
- uint32_t header_credits:8;
- uint32_t data_credits:12;
- } s;
- struct cvmx_pcieep_cfg468_s cn52xx;
- struct cvmx_pcieep_cfg468_s cn52xxp1;
- struct cvmx_pcieep_cfg468_s cn56xx;
- struct cvmx_pcieep_cfg468_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg490 {
- uint32_t u32;
- struct cvmx_pcieep_cfg490_s {
- uint32_t reserved_26_31:6;
- uint32_t header_depth:10;
- uint32_t reserved_14_15:2;
- uint32_t data_depth:14;
- } s;
- struct cvmx_pcieep_cfg490_s cn52xx;
- struct cvmx_pcieep_cfg490_s cn52xxp1;
- struct cvmx_pcieep_cfg490_s cn56xx;
- struct cvmx_pcieep_cfg490_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg491 {
- uint32_t u32;
- struct cvmx_pcieep_cfg491_s {
- uint32_t reserved_26_31:6;
- uint32_t header_depth:10;
- uint32_t reserved_14_15:2;
- uint32_t data_depth:14;
- } s;
- struct cvmx_pcieep_cfg491_s cn52xx;
- struct cvmx_pcieep_cfg491_s cn52xxp1;
- struct cvmx_pcieep_cfg491_s cn56xx;
- struct cvmx_pcieep_cfg491_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg492 {
- uint32_t u32;
- struct cvmx_pcieep_cfg492_s {
- uint32_t reserved_26_31:6;
- uint32_t header_depth:10;
- uint32_t reserved_14_15:2;
- uint32_t data_depth:14;
- } s;
- struct cvmx_pcieep_cfg492_s cn52xx;
- struct cvmx_pcieep_cfg492_s cn52xxp1;
- struct cvmx_pcieep_cfg492_s cn56xx;
- struct cvmx_pcieep_cfg492_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg516 {
- uint32_t u32;
- struct cvmx_pcieep_cfg516_s {
- uint32_t phy_stat:32;
- } s;
- struct cvmx_pcieep_cfg516_s cn52xx;
- struct cvmx_pcieep_cfg516_s cn52xxp1;
- struct cvmx_pcieep_cfg516_s cn56xx;
- struct cvmx_pcieep_cfg516_s cn56xxp1;
-};
-
-union cvmx_pcieep_cfg517 {
- uint32_t u32;
- struct cvmx_pcieep_cfg517_s {
- uint32_t phy_ctrl:32;
- } s;
- struct cvmx_pcieep_cfg517_s cn52xx;
- struct cvmx_pcieep_cfg517_s cn52xxp1;
- struct cvmx_pcieep_cfg517_s cn56xx;
- struct cvmx_pcieep_cfg517_s cn56xxp1;
-};
-
-#endif
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index fcd4060f642..90bf3b3fce1 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -17,6 +17,7 @@
*/
#include <linux/ioport.h>
+#include <linux/of.h>
/*
* Each pci channel is a top-level PCI bus seem by CPU. A machine with
@@ -26,6 +27,7 @@
struct pci_controller {
struct pci_controller *next;
struct pci_bus *bus;
+ struct device_node *of_node;
struct pci_ops *pci_ops;
struct resource *mem_resource;
@@ -142,4 +144,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
extern char * (*pcibios_plat_setup)(char *str);
+/* this function parses memory ranges from a device node */
+extern void __devinit pci_load_of_ranges(struct pci_controller *hose,
+ struct device_node *node);
+
#endif /* _ASM_PCI_H */
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index 7a6e82ef449..7206d445bab 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -12,6 +12,9 @@
#define __ASM_PROM_H
#ifdef CONFIG_OF
+#include <linux/bug.h>
+#include <linux/io.h>
+#include <linux/types.h>
#include <asm/bootinfo.h>
extern int early_init_dt_scan_memory_arch(unsigned long node,
@@ -21,6 +24,29 @@ extern int reserve_mem_mach(unsigned long addr, unsigned long size);
extern void free_mem_mach(unsigned long addr, unsigned long size);
extern void device_tree_init(void);
+
+static inline unsigned long pci_address_to_pio(phys_addr_t address)
+{
+ /*
+ * The ioport address can be directly used by inX() / outX()
+ */
+ BUG_ON(address > IO_SPACE_LIMIT);
+
+ return (unsigned long) address;
+}
+#define pci_address_to_pio pci_address_to_pio
+
+struct boot_param_header;
+
+extern void __dt_setup_arch(struct boot_param_header *bph);
+
+#define dt_setup_arch(sym) \
+({ \
+ extern struct boot_param_header __dtb_##sym##_begin; \
+ \
+ __dt_setup_arch(&__dtb_##sym##_begin); \
+})
+
#else /* CONFIG_OF */
static inline void device_tree_init(void) { }
#endif /* CONFIG_OF */
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index 6dce6d8d09a..2560b6b6a7d 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -14,7 +14,8 @@ extern void *set_vi_handler(int n, vi_handler_t addr);
extern void *set_except_vector(int n, void *addr);
extern unsigned long ebase;
-extern void per_cpu_trap_init(void);
+extern void per_cpu_trap_init(bool);
+extern void cpu_cache_init(void);
#endif /* __KERNEL__ */
diff --git a/arch/mips/include/asm/sparsemem.h b/arch/mips/include/asm/sparsemem.h
index 7165333ad04..4461198361c 100644
--- a/arch/mips/include/asm/sparsemem.h
+++ b/arch/mips/include/asm/sparsemem.h
@@ -6,7 +6,11 @@
* SECTION_SIZE_BITS 2^N: how big each section will be
* MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
*/
-#define SECTION_SIZE_BITS 28
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PAGE_SIZE_64KB)
+# define SECTION_SIZE_BITS 29
+#else
+# define SECTION_SIZE_BITS 28
+#endif
#define MAX_PHYSMEM_BITS 35
#endif /* CONFIG_SPARSEMEM */
diff --git a/arch/mips/include/asm/termios.h b/arch/mips/include/asm/termios.h
index 8f77f774a2a..abdd87aaf60 100644
--- a/arch/mips/include/asm/termios.h
+++ b/arch/mips/include/asm/termios.h
@@ -60,7 +60,7 @@ struct termio {
};
#ifdef __KERNEL__
-#include <linux/module.h>
+#include <asm/uaccess.h>
/*
* intr=^C quit=^\ erase=del kill=^U
diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h
index ff74aec3561..420ca06b2f4 100644
--- a/arch/mips/include/asm/traps.h
+++ b/arch/mips/include/asm/traps.h
@@ -25,6 +25,7 @@ extern void (*board_nmi_handler_setup)(void);
extern void (*board_ejtag_handler_setup)(void);
extern void (*board_bind_eic_interrupt)(int irq, int regset);
extern void (*board_ebase_setup)(void);
+extern void (*board_cache_error_setup)(void);
extern int register_nmi_notifier(struct notifier_block *nb);
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 504d40aedfa..440a21dab57 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -11,7 +11,7 @@
#include <linux/types.h>
#ifdef CONFIG_EXPORT_UASM
-#include <linux/module.h>
+#include <linux/export.h>
#define __uasminit
#define __uasminitdata
#define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym)
diff --git a/arch/mips/jz4740/Makefile b/arch/mips/jz4740/Makefile
index a9dff332125..e44abea9c20 100644
--- a/arch/mips/jz4740/Makefile
+++ b/arch/mips/jz4740/Makefile
@@ -16,5 +16,3 @@ obj-$(CONFIG_JZ4740_QI_LB60) += board-qi_lb60.o
# PM support
obj-$(CONFIG_PM) += pm.o
-
-ccflags-y := -Werror -Wall
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5099201fb7b..6ae7ce4ac63 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -340,7 +340,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R2000";
c->isa_level = MIPS_CPU_ISA_I;
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
- MIPS_CPU_NOFPUEX;
+ MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
@@ -361,7 +361,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
}
c->isa_level = MIPS_CPU_ISA_I;
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
- MIPS_CPU_NOFPUEX;
+ MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
@@ -387,8 +387,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_WATCH | MIPS_CPU_VCE |
- MIPS_CPU_LLSC;
+ MIPS_CPU_WATCH | MIPS_CPU_VCE |
+ MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_VR41XX:
@@ -434,7 +434,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R4300";
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_R4600:
@@ -446,7 +446,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->tlbsize = 48;
break;
#if 0
- case PRID_IMP_R4650:
+ case PRID_IMP_R4650:
/*
* This processor doesn't have an MMU, so it's not
* "real easy" to run Linux on it. It is left purely
@@ -455,9 +455,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
*/
c->cputype = CPU_R4650;
__cpu_name[cpu] = "R4650";
- c->isa_level = MIPS_CPU_ISA_III;
+ c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
- c->tlbsize = 48;
+ c->tlbsize = 48;
break;
#endif
case PRID_IMP_TX39:
@@ -488,7 +488,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R4700";
c->isa_level = MIPS_CPU_ISA_III;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_TX49:
@@ -505,7 +505,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R5000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R5432:
@@ -513,7 +513,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R5432";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_WATCH | MIPS_CPU_LLSC;
+ MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R5500:
@@ -521,7 +521,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R5500";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_WATCH | MIPS_CPU_LLSC;
+ MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_NEVADA:
@@ -529,7 +529,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "Nevada";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
+ MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
case PRID_IMP_R6000:
@@ -537,7 +537,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R6000";
c->isa_level = MIPS_CPU_ISA_II;
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_R6000A:
@@ -545,7 +545,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R6000A";
c->isa_level = MIPS_CPU_ISA_II;
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_RM7000:
@@ -553,7 +553,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "RM7000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
/*
* Undocumented RM7000: Bit 29 in the info register of
* the RM7000 v2.0 indicates if the TLB has 48 or 64
@@ -569,7 +569,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "RM9000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
/*
* Bit 29 in the info register of the RM9000
* indicates if the TLB has 48 or 64 entries.
@@ -584,8 +584,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "RM8000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
- MIPS_CPU_LLSC;
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
c->tlbsize = 384; /* has weird TLB: 3-way x 128 */
break;
case PRID_IMP_R10000:
@@ -593,9 +593,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R10000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_R12000:
@@ -603,9 +603,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R12000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_R14000:
@@ -613,9 +613,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "R14000";
c->isa_level = MIPS_CPU_ISA_IV;
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
- MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
- MIPS_CPU_LLSC;
+ MIPS_CPU_LLSC;
c->tlbsize = 64;
break;
case PRID_IMP_LOONGSON2:
@@ -739,7 +739,7 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
if (config3 & MIPS_CONF3_VEIC)
c->options |= MIPS_CPU_VEIC;
if (config3 & MIPS_CONF3_MT)
- c->ases |= MIPS_ASE_MIPSMT;
+ c->ases |= MIPS_ASE_MIPSMT;
if (config3 & MIPS_CONF3_ULRI)
c->options |= MIPS_CPU_ULRI;
@@ -767,7 +767,7 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c)
/* MIPS32 or MIPS64 compliant CPU. */
c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
- MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
+ MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
c->scache.flags = MIPS_CACHE_NOT_PRESENT;
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index ab73fa2fb9b..f29099b104c 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1532,7 +1532,8 @@ init_hw_perf_events(void)
irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
} else {
#endif
- if (cp0_perfcount_irq >= 0)
+ if ((cp0_perfcount_irq >= 0) &&
+ (cp0_compare_irq != cp0_perfcount_irq))
irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
else
irq = -1;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index f8b2c592514..5542817c1b4 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -41,27 +41,27 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "processor\t\t: %ld\n", n);
sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
- cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : "");
+ cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : "");
seq_printf(m, fmt, __cpu_name[n],
- (version >> 4) & 0x0f, version & 0x0f,
- (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
+ (version >> 4) & 0x0f, version & 0x0f,
+ (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
- cpu_data[n].udelay_val / (500000/HZ),
- (cpu_data[n].udelay_val / (5000/HZ)) % 100);
+ cpu_data[n].udelay_val / (500000/HZ),
+ (cpu_data[n].udelay_val / (5000/HZ)) % 100);
seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
seq_printf(m, "microsecond timers\t: %s\n",
- cpu_has_counter ? "yes" : "no");
+ cpu_has_counter ? "yes" : "no");
seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
seq_printf(m, "extra interrupt vector\t: %s\n",
- cpu_has_divec ? "yes" : "no");
+ cpu_has_divec ? "yes" : "no");
seq_printf(m, "hardware watchpoint\t: %s",
- cpu_has_watch ? "yes, " : "no\n");
+ cpu_has_watch ? "yes, " : "no\n");
if (cpu_has_watch) {
seq_printf(m, "count: %d, address/irw mask: [",
- cpu_data[n].watch_reg_count);
+ cpu_data[n].watch_reg_count);
for (i = 0; i < cpu_data[n].watch_reg_count; i++)
seq_printf(m, "%s0x%04x", i ? ", " : "" ,
- cpu_data[n].watch_reg_masks[i]);
+ cpu_data[n].watch_reg_masks[i]);
seq_printf(m, "]\n");
}
seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n",
@@ -73,13 +73,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpu_has_mipsmt ? " mt" : ""
);
seq_printf(m, "shadow register sets\t: %d\n",
- cpu_data[n].srsets);
+ cpu_data[n].srsets);
seq_printf(m, "kscratch registers\t: %d\n",
- hweight8(cpu_data[n].kscratch_mask));
+ hweight8(cpu_data[n].kscratch_mask));
seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
- cpu_has_vce ? "%u" : "not available");
+ cpu_has_vce ? "%u" : "not available");
seq_printf(m, fmt, 'D', vced_count);
seq_printf(m, fmt, 'I', vcei_count);
seq_printf(m, "\n");
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 558b5395795..f11b2bbb826 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -95,3 +95,16 @@ void __init device_tree_init(void)
/* free the space reserved for the dt blob */
free_mem_mach(base, size);
}
+
+void __init __dt_setup_arch(struct boot_param_header *bph)
+{
+ if (be32_to_cpu(bph->magic) != OF_DT_HEADER) {
+ pr_err("DTB has bad magic, ignoring builtin OF DTB\n");
+
+ return;
+ }
+
+ initial_boot_params = bph;
+
+ early_init_devtree(initial_boot_params);
+}
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index c504b212f8f..a53f8ec37aa 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -605,6 +605,8 @@ void __init setup_arch(char **cmdline_p)
resource_init();
plat_smp_setup();
+
+ cpu_cache_init();
}
unsigned long kernelsp[NR_CPUS];
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 71a95f55a64..48650c81804 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -106,7 +106,7 @@ asmlinkage __cpuinit void start_secondary(void)
#endif /* CONFIG_MIPS_MT_SMTC */
cpu_probe();
cpu_report();
- per_cpu_trap_init();
+ per_cpu_trap_init(false);
mips_clockevent_init();
mp_ops->init_secondary();
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index cfdaaa4cffc..2d0c2a277f5 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -15,6 +15,7 @@
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/smp.h>
@@ -91,7 +92,7 @@ void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
void (*board_ebase_setup)(void);
-
+void __cpuinitdata(*board_cache_error_setup)(void);
static void show_raw_backtrace(unsigned long reg29)
{
@@ -1490,7 +1491,6 @@ void *set_vi_handler(int n, vi_handler_t addr)
return set_vi_srs_handler(n, addr, 0);
}
-extern void cpu_cache_init(void);
extern void tlb_init(void);
extern void flush_tlb_handlers(void);
@@ -1517,7 +1517,7 @@ static int __init ulri_disable(char *s)
}
__setup("noulri", ulri_disable);
-void __cpuinit per_cpu_trap_init(void)
+void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
{
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
@@ -1616,7 +1616,9 @@ void __cpuinit per_cpu_trap_init(void)
#ifdef CONFIG_MIPS_MT_SMTC
if (bootTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
- cpu_cache_init();
+ /* Boot CPU's cache setup in setup_arch(). */
+ if (!is_boot_cpu)
+ cpu_cache_init();
tlb_init();
#ifdef CONFIG_MIPS_MT_SMTC
} else if (!secondaryTC) {
@@ -1632,7 +1634,7 @@ void __cpuinit per_cpu_trap_init(void)
}
/* Install CPU exception handler */
-void __init set_handler(unsigned long offset, void *addr, unsigned long size)
+void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
{
memcpy((void *)(ebase + offset), addr, size);
local_flush_icache_range(ebase + offset, ebase + offset + size);
@@ -1693,7 +1695,7 @@ void __init trap_init(void)
if (board_ebase_setup)
board_ebase_setup();
- per_cpu_trap_init();
+ per_cpu_trap_init(true);
/*
* Copy the generic exception handlers to their final destination.
@@ -1797,6 +1799,9 @@ void __init trap_init(void)
set_except_vector(26, handle_dsp);
+ if (board_cache_error_setup)
+ board_cache_error_setup();
+
if (cpu_has_vce)
/* Special exception: R4[04]00 uses also the divec space. */
memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig
index 3fccf210451..20bdf40b3ef 100644
--- a/arch/mips/lantiq/Kconfig
+++ b/arch/mips/lantiq/Kconfig
@@ -16,8 +16,22 @@ config SOC_XWAY
bool "XWAY"
select SOC_TYPE_XWAY
select HW_HAS_PCI
+
+config SOC_FALCON
+ bool "FALCON"
+
+endchoice
+
+choice
+ prompt "Devicetree"
+
+config DT_EASY50712
+ bool "Easy50712"
+ depends on SOC_XWAY
endchoice
-source "arch/mips/lantiq/xway/Kconfig"
+config PCI_LANTIQ
+ bool "PCI Support"
+ depends on SOC_XWAY && PCI
endif
diff --git a/arch/mips/lantiq/Makefile b/arch/mips/lantiq/Makefile
index e5dae0e24b0..d6bdc579419 100644
--- a/arch/mips/lantiq/Makefile
+++ b/arch/mips/lantiq/Makefile
@@ -4,8 +4,11 @@
# under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation.
-obj-y := irq.o setup.o clk.o prom.o devices.o
+obj-y := irq.o clk.o prom.o
+
+obj-y += dts/
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_SOC_TYPE_XWAY) += xway/
+obj-$(CONFIG_SOC_FALCON) += falcon/
diff --git a/arch/mips/lantiq/Platform b/arch/mips/lantiq/Platform
index f3dff05722d..b3ec49838fd 100644
--- a/arch/mips/lantiq/Platform
+++ b/arch/mips/lantiq/Platform
@@ -6,3 +6,4 @@ platform-$(CONFIG_LANTIQ) += lantiq/
cflags-$(CONFIG_LANTIQ) += -I$(srctree)/arch/mips/include/asm/mach-lantiq
load-$(CONFIG_LANTIQ) = 0xffffffff80002000
cflags-$(CONFIG_SOC_TYPE_XWAY) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway
+cflags-$(CONFIG_SOC_FALCON) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/falcon
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index 412814fdd3e..d3bcc33f469 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/clk.h>
+#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/list.h>
@@ -22,44 +23,32 @@
#include <lantiq_soc.h>
#include "clk.h"
+#include "prom.h"
-struct clk {
- const char *name;
- unsigned long rate;
- unsigned long (*get_rate) (void);
-};
+/* lantiq socs have 3 static clocks */
+static struct clk cpu_clk_generic[3];
-static struct clk *cpu_clk;
-static int cpu_clk_cnt;
+void clkdev_add_static(unsigned long cpu, unsigned long fpi, unsigned long io)
+{
+ cpu_clk_generic[0].rate = cpu;
+ cpu_clk_generic[1].rate = fpi;
+ cpu_clk_generic[2].rate = io;
+}
-/* lantiq socs have 3 static clocks */
-static struct clk cpu_clk_generic[] = {
- {
- .name = "cpu",
- .get_rate = ltq_get_cpu_hz,
- }, {
- .name = "fpi",
- .get_rate = ltq_get_fpi_hz,
- }, {
- .name = "io",
- .get_rate = ltq_get_io_region_clock,
- },
-};
-
-static struct resource ltq_cgu_resource = {
- .name = "cgu",
- .start = LTQ_CGU_BASE_ADDR,
- .end = LTQ_CGU_BASE_ADDR + LTQ_CGU_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-/* remapped clock register range */
-void __iomem *ltq_cgu_membase;
-
-void clk_init(void)
+struct clk *clk_get_cpu(void)
+{
+ return &cpu_clk_generic[0];
+}
+
+struct clk *clk_get_fpi(void)
+{
+ return &cpu_clk_generic[1];
+}
+EXPORT_SYMBOL_GPL(clk_get_fpi);
+
+struct clk *clk_get_io(void)
{
- cpu_clk = cpu_clk_generic;
- cpu_clk_cnt = ARRAY_SIZE(cpu_clk_generic);
+ return &cpu_clk_generic[2];
}
static inline int clk_good(struct clk *clk)
@@ -82,38 +71,71 @@ unsigned long clk_get_rate(struct clk *clk)
}
EXPORT_SYMBOL(clk_get_rate);
-struct clk *clk_get(struct device *dev, const char *id)
+int clk_set_rate(struct clk *clk, unsigned long rate)
{
- int i;
-
- for (i = 0; i < cpu_clk_cnt; i++)
- if (!strcmp(id, cpu_clk[i].name))
- return &cpu_clk[i];
- BUG();
- return ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
- /* not used */
+ if (unlikely(!clk_good(clk)))
+ return 0;
+ if (clk->rates && *clk->rates) {
+ unsigned long *r = clk->rates;
+
+ while (*r && (*r != rate))
+ r++;
+ if (!*r) {
+ pr_err("clk %s.%s: trying to set invalid rate %ld\n",
+ clk->cl.dev_id, clk->cl.con_id, rate);
+ return -1;
+ }
+ }
+ clk->rate = rate;
+ return 0;
}
-EXPORT_SYMBOL(clk_put);
+EXPORT_SYMBOL(clk_set_rate);
int clk_enable(struct clk *clk)
{
- /* not used */
- return 0;
+ if (unlikely(!clk_good(clk)))
+ return -1;
+
+ if (clk->enable)
+ return clk->enable(clk);
+
+ return -1;
}
EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
- /* not used */
+ if (unlikely(!clk_good(clk)))
+ return;
+
+ if (clk->disable)
+ clk->disable(clk);
}
EXPORT_SYMBOL(clk_disable);
-static inline u32 ltq_get_counter_resolution(void)
+int clk_activate(struct clk *clk)
+{
+ if (unlikely(!clk_good(clk)))
+ return -1;
+
+ if (clk->activate)
+ return clk->activate(clk);
+
+ return -1;
+}
+EXPORT_SYMBOL(clk_activate);
+
+void clk_deactivate(struct clk *clk)
+{
+ if (unlikely(!clk_good(clk)))
+ return;
+
+ if (clk->deactivate)
+ clk->deactivate(clk);
+}
+EXPORT_SYMBOL(clk_deactivate);
+
+static inline u32 get_counter_resolution(void)
{
u32 res;
@@ -133,21 +155,11 @@ void __init plat_time_init(void)
{
struct clk *clk;
- if (insert_resource(&iomem_resource, &ltq_cgu_resource) < 0)
- panic("Failed to insert cgu memory");
+ ltq_soc_init();
- if (request_mem_region(ltq_cgu_resource.start,
- resource_size(&ltq_cgu_resource), "cgu") < 0)
- panic("Failed to request cgu memory");
-
- ltq_cgu_membase = ioremap_nocache(ltq_cgu_resource.start,
- resource_size(&ltq_cgu_resource));
- if (!ltq_cgu_membase) {
- pr_err("Failed to remap cgu memory\n");
- unreachable();
- }
- clk = clk_get(0, "cpu");
- mips_hpt_frequency = clk_get_rate(clk) / ltq_get_counter_resolution();
+ clk = clk_get_cpu();
+ mips_hpt_frequency = clk_get_rate(clk) / get_counter_resolution();
write_c0_compare(read_c0_count());
+ pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
clk_put(clk);
}
diff --git a/arch/mips/lantiq/clk.h b/arch/mips/lantiq/clk.h
index 3328925f2c3..fa670602b91 100644
--- a/arch/mips/lantiq/clk.h
+++ b/arch/mips/lantiq/clk.h
@@ -9,10 +9,70 @@
#ifndef _LTQ_CLK_H__
#define _LTQ_CLK_H__
-extern void clk_init(void);
+#include <linux/clkdev.h>
-extern unsigned long ltq_get_cpu_hz(void);
-extern unsigned long ltq_get_fpi_hz(void);
-extern unsigned long ltq_get_io_region_clock(void);
+/* clock speeds */
+#define CLOCK_33M 33333333
+#define CLOCK_60M 60000000
+#define CLOCK_62_5M 62500000
+#define CLOCK_83M 83333333
+#define CLOCK_83_5M 83500000
+#define CLOCK_98_304M 98304000
+#define CLOCK_100M 100000000
+#define CLOCK_111M 111111111
+#define CLOCK_125M 125000000
+#define CLOCK_133M 133333333
+#define CLOCK_150M 150000000
+#define CLOCK_166M 166666666
+#define CLOCK_167M 166666667
+#define CLOCK_196_608M 196608000
+#define CLOCK_200M 200000000
+#define CLOCK_250M 250000000
+#define CLOCK_266M 266666666
+#define CLOCK_300M 300000000
+#define CLOCK_333M 333333333
+#define CLOCK_393M 393215332
+#define CLOCK_400M 400000000
+#define CLOCK_500M 500000000
+#define CLOCK_600M 600000000
+
+/* clock out speeds */
+#define CLOCK_32_768K 32768
+#define CLOCK_1_536M 1536000
+#define CLOCK_2_5M 2500000
+#define CLOCK_12M 12000000
+#define CLOCK_24M 24000000
+#define CLOCK_25M 25000000
+#define CLOCK_30M 30000000
+#define CLOCK_40M 40000000
+#define CLOCK_48M 48000000
+#define CLOCK_50M 50000000
+#define CLOCK_60M 60000000
+
+struct clk {
+ struct clk_lookup cl;
+ unsigned long rate;
+ unsigned long *rates;
+ unsigned int module;
+ unsigned int bits;
+ unsigned long (*get_rate) (void);
+ int (*enable) (struct clk *clk);
+ void (*disable) (struct clk *clk);
+ int (*activate) (struct clk *clk);
+ void (*deactivate) (struct clk *clk);
+ void (*reboot) (struct clk *clk);
+};
+
+extern void clkdev_add_static(unsigned long cpu, unsigned long fpi,
+ unsigned long io);
+
+extern unsigned long ltq_danube_cpu_hz(void);
+extern unsigned long ltq_danube_fpi_hz(void);
+
+extern unsigned long ltq_ar9_cpu_hz(void);
+extern unsigned long ltq_ar9_fpi_hz(void);
+
+extern unsigned long ltq_vr9_cpu_hz(void);
+extern unsigned long ltq_vr9_fpi_hz(void);
#endif
diff --git a/arch/mips/lantiq/devices.c b/arch/mips/lantiq/devices.c
deleted file mode 100644
index de1cb2bcd79..00000000000
--- a/arch/mips/lantiq/devices.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/reboot.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/etherdevice.h>
-#include <linux/time.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-
-#include <lantiq_soc.h>
-
-#include "devices.h"
-
-/* nor flash */
-static struct resource ltq_nor_resource = {
- .name = "nor",
- .start = LTQ_FLASH_START,
- .end = LTQ_FLASH_START + LTQ_FLASH_MAX - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device ltq_nor = {
- .name = "ltq_nor",
- .resource = &ltq_nor_resource,
- .num_resources = 1,
-};
-
-void __init ltq_register_nor(struct physmap_flash_data *data)
-{
- ltq_nor.dev.platform_data = data;
- platform_device_register(&ltq_nor);
-}
-
-/* watchdog */
-static struct resource ltq_wdt_resource = {
- .name = "watchdog",
- .start = LTQ_WDT_BASE_ADDR,
- .end = LTQ_WDT_BASE_ADDR + LTQ_WDT_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-void __init ltq_register_wdt(void)
-{
- platform_device_register_simple("ltq_wdt", 0, &ltq_wdt_resource, 1);
-}
-
-/* asc ports */
-static struct resource ltq_asc0_resources[] = {
- {
- .name = "asc0",
- .start = LTQ_ASC0_BASE_ADDR,
- .end = LTQ_ASC0_BASE_ADDR + LTQ_ASC_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- IRQ_RES(tx, LTQ_ASC_TIR(0)),
- IRQ_RES(rx, LTQ_ASC_RIR(0)),
- IRQ_RES(err, LTQ_ASC_EIR(0)),
-};
-
-static struct resource ltq_asc1_resources[] = {
- {
- .name = "asc1",
- .start = LTQ_ASC1_BASE_ADDR,
- .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- IRQ_RES(tx, LTQ_ASC_TIR(1)),
- IRQ_RES(rx, LTQ_ASC_RIR(1)),
- IRQ_RES(err, LTQ_ASC_EIR(1)),
-};
-
-void __init ltq_register_asc(int port)
-{
- switch (port) {
- case 0:
- platform_device_register_simple("ltq_asc", 0,
- ltq_asc0_resources, ARRAY_SIZE(ltq_asc0_resources));
- break;
- case 1:
- platform_device_register_simple("ltq_asc", 1,
- ltq_asc1_resources, ARRAY_SIZE(ltq_asc1_resources));
- break;
- default:
- break;
- }
-}
-
-#ifdef CONFIG_PCI
-/* pci */
-static struct platform_device ltq_pci = {
- .name = "ltq_pci",
- .num_resources = 0,
-};
-
-void __init ltq_register_pci(struct ltq_pci_data *data)
-{
- ltq_pci.dev.platform_data = data;
- platform_device_register(&ltq_pci);
-}
-#else
-void __init ltq_register_pci(struct ltq_pci_data *data)
-{
- pr_err("kernel is compiled without PCI support\n");
-}
-#endif
diff --git a/arch/mips/lantiq/devices.h b/arch/mips/lantiq/devices.h
deleted file mode 100644
index 2947bb19a52..00000000000
--- a/arch/mips/lantiq/devices.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#ifndef _LTQ_DEVICES_H__
-#define _LTQ_DEVICES_H__
-
-#include <lantiq_platform.h>
-#include <linux/mtd/physmap.h>
-
-#define IRQ_RES(resname, irq) \
- {.name = #resname, .start = (irq), .flags = IORESOURCE_IRQ}
-
-extern void ltq_register_nor(struct physmap_flash_data *data);
-extern void ltq_register_wdt(void);
-extern void ltq_register_asc(int port);
-extern void ltq_register_pci(struct ltq_pci_data *data);
-
-#endif
diff --git a/arch/mips/lantiq/dts/Makefile b/arch/mips/lantiq/dts/Makefile
new file mode 100644
index 00000000000..674fca45f72
--- /dev/null
+++ b/arch/mips/lantiq/dts/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_DT_EASY50712) := easy50712.dtb.o
+
+$(obj)/%.dtb: $(obj)/%.dts
+ $(call if_changed,dtc)
diff --git a/arch/mips/lantiq/dts/danube.dtsi b/arch/mips/lantiq/dts/danube.dtsi
new file mode 100644
index 00000000000..3a4520f009c
--- /dev/null
+++ b/arch/mips/lantiq/dts/danube.dtsi
@@ -0,0 +1,105 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "lantiq,xway", "lantiq,danube";
+
+ cpus {
+ cpu@0 {
+ compatible = "mips,mips24Kc";
+ };
+ };
+
+ biu@1F800000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "lantiq,biu", "simple-bus";
+ reg = <0x1F800000 0x800000>;
+ ranges = <0x0 0x1F800000 0x7FFFFF>;
+
+ icu0: icu@80200 {
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "lantiq,icu";
+ reg = <0x80200 0x120>;
+ };
+
+ watchdog@803F0 {
+ compatible = "lantiq,wdt";
+ reg = <0x803F0 0x10>;
+ };
+ };
+
+ sram@1F000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "lantiq,sram";
+ reg = <0x1F000000 0x800000>;
+ ranges = <0x0 0x1F000000 0x7FFFFF>;
+
+ eiu0: eiu@101000 {
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ interrupt-parent;
+ compatible = "lantiq,eiu-xway";
+ reg = <0x101000 0x1000>;
+ };
+
+ pmu0: pmu@102000 {
+ compatible = "lantiq,pmu-xway";
+ reg = <0x102000 0x1000>;
+ };
+
+ cgu0: cgu@103000 {
+ compatible = "lantiq,cgu-xway";
+ reg = <0x103000 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ rcu0: rcu@203000 {
+ compatible = "lantiq,rcu-xway";
+ reg = <0x203000 0x1000>;
+ };
+ };
+
+ fpi@10000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "lantiq,fpi", "simple-bus";
+ ranges = <0x0 0x10000000 0xEEFFFFF>;
+ reg = <0x10000000 0xEF00000>;
+
+ gptu@E100A00 {
+ compatible = "lantiq,gptu-xway";
+ reg = <0xE100A00 0x100>;
+ };
+
+ serial@E100C00 {
+ compatible = "lantiq,asc";
+ reg = <0xE100C00 0x400>;
+ interrupt-parent = <&icu0>;
+ interrupts = <112 113 114>;
+ };
+
+ dma0: dma@E104100 {
+ compatible = "lantiq,dma-xway";
+ reg = <0xE104100 0x800>;
+ };
+
+ ebu0: ebu@E105300 {
+ compatible = "lantiq,ebu-xway";
+ reg = <0xE105300 0x100>;
+ };
+
+ pci0: pci@E105400 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ compatible = "lantiq,pci-xway";
+ bus-range = <0x0 0x0>;
+ ranges = <0x2000000 0 0x8000000 0x8000000 0 0x2000000 /* pci memory */
+ 0x1000000 0 0x00000000 0xAE00000 0 0x200000>; /* io space */
+ reg = <0x7000000 0x8000 /* config space */
+ 0xE105400 0x400>; /* pci bridge */
+ };
+ };
+};
diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/lantiq/dts/easy50712.dts
new file mode 100644
index 00000000000..68c17310bc8
--- /dev/null
+++ b/arch/mips/lantiq/dts/easy50712.dts
@@ -0,0 +1,113 @@
+/dts-v1/;
+
+/include/ "danube.dtsi"
+
+/ {
+ chosen {
+ bootargs = "console=ttyLTQ0,115200 init=/etc/preinit";
+ };
+
+ memory@0 {
+ reg = <0x0 0x2000000>;
+ };
+
+ fpi@10000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ localbus@0 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0x0 0x3ffffff /* addrsel0 */
+ 1 0 0x4000000 0x4000010>; /* addsel1 */
+ compatible = "lantiq,localbus", "simple-bus";
+
+ nor-boot@0 {
+ compatible = "lantiq,nor";
+ bank-width = <2>;
+ reg = <0 0x0 0x2000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x00000 0x10000>; /* 64 KB */
+ };
+
+ partition@10000 {
+ label = "uboot_env";
+ reg = <0x10000 0x10000>; /* 64 KB */
+ };
+
+ partition@20000 {
+ label = "linux";
+ reg = <0x20000 0x3d0000>;
+ };
+
+ partition@400000 {
+ label = "rootfs";
+ reg = <0x400000 0x400000>;
+ };
+ };
+ };
+
+ gpio: pinmux@E100B10 {
+ compatible = "lantiq,pinctrl-xway";
+ pinctrl-names = "default";
+ pinctrl-0 = <&state_default>;
+
+ #gpio-cells = <2>;
+ gpio-controller;
+ reg = <0xE100B10 0xA0>;
+
+ state_default: pinmux {
+ stp {
+ lantiq,groups = "stp";
+ lantiq,function = "stp";
+ };
+ exin {
+ lantiq,groups = "exin1";
+ lantiq,function = "exin";
+ };
+ pci {
+ lantiq,groups = "gnt1";
+ lantiq,function = "pci";
+ };
+ conf_out {
+ lantiq,pins = "io4", "io5", "io6"; /* stp */
+ lantiq,open-drain;
+ lantiq,pull = <0>;
+ };
+ };
+ };
+
+ etop@E180000 {
+ compatible = "lantiq,etop-xway";
+ reg = <0xE180000 0x40000>;
+ interrupt-parent = <&icu0>;
+ interrupts = <73 78>;
+ phy-mode = "rmii";
+ mac-address = [ 00 11 22 33 44 55 ];
+ };
+
+ stp0: stp@E100BB0 {
+ #gpio-cells = <2>;
+ compatible = "lantiq,gpio-stp-xway";
+ gpio-controller;
+ reg = <0xE100BB0 0x40>;
+
+ lantiq,shadow = <0xfff>;
+ lantiq,groups = <0x3>;
+ };
+
+ pci@E105400 {
+ lantiq,bus-clock = <33333333>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ 0x7000 0 0 1 &icu0 29 1 // slot 14, irq 29
+ >;
+ gpios-reset = <&gpio 21 0>;
+ req-mask = <0x1>; /* GNT1 */
+ };
+
+ };
+};
diff --git a/arch/mips/lantiq/early_printk.c b/arch/mips/lantiq/early_printk.c
index 972e05f8763..9b28d0940ef 100644
--- a/arch/mips/lantiq/early_printk.c
+++ b/arch/mips/lantiq/early_printk.c
@@ -6,17 +6,16 @@
* Copyright (C) 2010 John Crispin <blogic@openwrt.org>
*/
-#include <linux/init.h>
#include <linux/cpu.h>
-
-#include <lantiq.h>
#include <lantiq_soc.h>
-/* no ioremap possible at this early stage, lets use KSEG1 instead */
-#define LTQ_ASC_BASE KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
#define ASC_BUF 1024
-#define LTQ_ASC_FSTAT ((u32 *)(LTQ_ASC_BASE + 0x0048))
-#define LTQ_ASC_TBUF ((u32 *)(LTQ_ASC_BASE + 0x0020))
+#define LTQ_ASC_FSTAT ((u32 *)(LTQ_EARLY_ASC + 0x0048))
+#ifdef __BIG_ENDIAN
+#define LTQ_ASC_TBUF ((u32 *)(LTQ_EARLY_ASC + 0x0020 + 3))
+#else
+#define LTQ_ASC_TBUF ((u32 *)(LTQ_EARLY_ASC + 0x0020))
+#endif
#define TXMASK 0x3F00
#define TXOFFSET 8
@@ -27,7 +26,7 @@ void prom_putchar(char c)
local_irq_save(flags);
do { } while ((ltq_r32(LTQ_ASC_FSTAT) & TXMASK) >> TXOFFSET);
if (c == '\n')
- ltq_w32('\r', LTQ_ASC_TBUF);
- ltq_w32(c, LTQ_ASC_TBUF);
+ ltq_w8('\r', LTQ_ASC_TBUF);
+ ltq_w8(c, LTQ_ASC_TBUF);
local_irq_restore(flags);
}
diff --git a/arch/mips/lantiq/falcon/Makefile b/arch/mips/lantiq/falcon/Makefile
new file mode 100644
index 00000000000..ff220f97693
--- /dev/null
+++ b/arch/mips/lantiq/falcon/Makefile
@@ -0,0 +1 @@
+obj-y := prom.o reset.o sysctrl.o
diff --git a/arch/mips/lantiq/falcon/prom.c b/arch/mips/lantiq/falcon/prom.c
new file mode 100644
index 00000000000..c1d278f05a3
--- /dev/null
+++ b/arch/mips/lantiq/falcon/prom.c
@@ -0,0 +1,87 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <asm/io.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+
+#define SOC_FALCON "Falcon"
+#define SOC_FALCON_D "Falcon-D"
+#define SOC_FALCON_V "Falcon-V"
+#define SOC_FALCON_M "Falcon-M"
+
+#define COMP_FALCON "lantiq,falcon"
+
+#define PART_SHIFT 12
+#define PART_MASK 0x0FFFF000
+#define REV_SHIFT 28
+#define REV_MASK 0xF0000000
+#define SREV_SHIFT 22
+#define SREV_MASK 0x03C00000
+#define TYPE_SHIFT 26
+#define TYPE_MASK 0x3C000000
+
+/* reset, nmi and ejtag exception vectors */
+#define BOOT_REG_BASE (KSEG1 | 0x1F200000)
+#define BOOT_RVEC (BOOT_REG_BASE | 0x00)
+#define BOOT_NVEC (BOOT_REG_BASE | 0x04)
+#define BOOT_EVEC (BOOT_REG_BASE | 0x08)
+
+void __init ltq_soc_nmi_setup(void)
+{
+ extern void (*nmi_handler)(void);
+
+ ltq_w32((unsigned long)&nmi_handler, (void *)BOOT_NVEC);
+}
+
+void __init ltq_soc_ejtag_setup(void)
+{
+ extern void (*ejtag_debug_handler)(void);
+
+ ltq_w32((unsigned long)&ejtag_debug_handler, (void *)BOOT_EVEC);
+}
+
+void __init ltq_soc_detect(struct ltq_soc_info *i)
+{
+ u32 type;
+ i->partnum = (ltq_r32(FALCON_CHIPID) & PART_MASK) >> PART_SHIFT;
+ i->rev = (ltq_r32(FALCON_CHIPID) & REV_MASK) >> REV_SHIFT;
+ i->srev = ((ltq_r32(FALCON_CHIPCONF) & SREV_MASK) >> SREV_SHIFT);
+ i->compatible = COMP_FALCON;
+ i->type = SOC_TYPE_FALCON;
+ sprintf(i->rev_type, "%c%d%d", (i->srev & 0x4) ? ('B') : ('A'),
+ i->rev & 0x7, (i->srev & 0x3) + 1);
+
+ switch (i->partnum) {
+ case SOC_ID_FALCON:
+ type = (ltq_r32(FALCON_CHIPTYPE) & TYPE_MASK) >> TYPE_SHIFT;
+ switch (type) {
+ case 0:
+ i->name = SOC_FALCON_D;
+ break;
+ case 1:
+ i->name = SOC_FALCON_V;
+ break;
+ case 2:
+ i->name = SOC_FALCON_M;
+ break;
+ default:
+ i->name = SOC_FALCON;
+ break;
+ }
+ break;
+
+ default:
+ unreachable();
+ break;
+ }
+}
diff --git a/arch/mips/lantiq/falcon/reset.c b/arch/mips/lantiq/falcon/reset.c
new file mode 100644
index 00000000000..56824825342
--- /dev/null
+++ b/arch/mips/lantiq/falcon/reset.c
@@ -0,0 +1,90 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
+#include <linux/export.h>
+
+#include <lantiq_soc.h>
+
+/* CPU0 Reset Source Register */
+#define SYS1_CPU0RS 0x0040
+/* reset cause mask */
+#define CPU0RS_MASK 0x0003
+/* CPU0 Boot Mode Register */
+#define SYS1_BM 0x00a0
+/* boot mode mask */
+#define BM_MASK 0x0005
+
+/* allow platform code to find out what surce we booted from */
+unsigned char ltq_boot_select(void)
+{
+ return ltq_sys1_r32(SYS1_BM) & BM_MASK;
+}
+
+/* allow the watchdog driver to find out what the boot reason was */
+int ltq_reset_cause(void)
+{
+ return ltq_sys1_r32(SYS1_CPU0RS) & CPU0RS_MASK;
+}
+EXPORT_SYMBOL_GPL(ltq_reset_cause);
+
+#define BOOT_REG_BASE (KSEG1 | 0x1F200000)
+#define BOOT_PW1_REG (BOOT_REG_BASE | 0x20)
+#define BOOT_PW2_REG (BOOT_REG_BASE | 0x24)
+#define BOOT_PW1 0x4C545100
+#define BOOT_PW2 0x0051544C
+
+#define WDT_REG_BASE (KSEG1 | 0x1F8803F0)
+#define WDT_PW1 0x00BE0000
+#define WDT_PW2 0x00DC0000
+
+static void machine_restart(char *command)
+{
+ local_irq_disable();
+
+ /* reboot magic */
+ ltq_w32(BOOT_PW1, (void *)BOOT_PW1_REG); /* 'LTQ\0' */
+ ltq_w32(BOOT_PW2, (void *)BOOT_PW2_REG); /* '\0QTL' */
+ ltq_w32(0, (void *)BOOT_REG_BASE); /* reset Bootreg RVEC */
+
+ /* watchdog magic */
+ ltq_w32(WDT_PW1, (void *)WDT_REG_BASE);
+ ltq_w32(WDT_PW2 |
+ (0x3 << 26) | /* PWL */
+ (0x2 << 24) | /* CLKDIV */
+ (0x1 << 31) | /* enable */
+ (1), /* reload */
+ (void *)WDT_REG_BASE);
+ unreachable();
+}
+
+static void machine_halt(void)
+{
+ local_irq_disable();
+ unreachable();
+}
+
+static void machine_power_off(void)
+{
+ local_irq_disable();
+ unreachable();
+}
+
+static int __init mips_reboot_setup(void)
+{
+ _machine_restart = machine_restart;
+ _machine_halt = machine_halt;
+ pm_power_off = machine_power_off;
+ return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
new file mode 100644
index 00000000000..ba0123d13d4
--- /dev/null
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -0,0 +1,260 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+#include <asm/delay.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+
+/* infrastructure control register */
+#define SYS1_INFRAC 0x00bc
+/* Configuration fuses for drivers and pll */
+#define STATUS_CONFIG 0x0040
+
+/* GPE frequency selection */
+#define GPPC_OFFSET 24
+#define GPEFREQ_MASK 0x00000C0
+#define GPEFREQ_OFFSET 10
+/* Clock status register */
+#define SYSCTL_CLKS 0x0000
+/* Clock enable register */
+#define SYSCTL_CLKEN 0x0004
+/* Clock clear register */
+#define SYSCTL_CLKCLR 0x0008
+/* Activation Status Register */
+#define SYSCTL_ACTS 0x0020
+/* Activation Register */
+#define SYSCTL_ACT 0x0024
+/* Deactivation Register */
+#define SYSCTL_DEACT 0x0028
+/* reboot Register */
+#define SYSCTL_RBT 0x002c
+/* CPU0 Clock Control Register */
+#define SYS1_CPU0CC 0x0040
+/* HRST_OUT_N Control Register */
+#define SYS1_HRSTOUTC 0x00c0
+/* clock divider bit */
+#define CPU0CC_CPUDIV 0x0001
+
+/* Activation Status Register */
+#define ACTS_ASC1_ACT 0x00000800
+#define ACTS_I2C_ACT 0x00004000
+#define ACTS_P0 0x00010000
+#define ACTS_P1 0x00010000
+#define ACTS_P2 0x00020000
+#define ACTS_P3 0x00020000
+#define ACTS_P4 0x00040000
+#define ACTS_PADCTRL0 0x00100000
+#define ACTS_PADCTRL1 0x00100000
+#define ACTS_PADCTRL2 0x00200000
+#define ACTS_PADCTRL3 0x00200000
+#define ACTS_PADCTRL4 0x00400000
+
+#define sysctl_w32(m, x, y) ltq_w32((x), sysctl_membase[m] + (y))
+#define sysctl_r32(m, x) ltq_r32(sysctl_membase[m] + (x))
+#define sysctl_w32_mask(m, clear, set, reg) \
+ sysctl_w32(m, (sysctl_r32(m, reg) & ~(clear)) | (set), reg)
+
+#define status_w32(x, y) ltq_w32((x), status_membase + (y))
+#define status_r32(x) ltq_r32(status_membase + (x))
+
+static void __iomem *sysctl_membase[3], *status_membase;
+void __iomem *ltq_sys1_membase, *ltq_ebu_membase;
+
+void falcon_trigger_hrst(int level)
+{
+ sysctl_w32(SYSCTL_SYS1, level & 1, SYS1_HRSTOUTC);
+}
+
+static inline void sysctl_wait(struct clk *clk,
+ unsigned int test, unsigned int reg)
+{
+ int err = 1000000;
+
+ do {} while (--err && ((sysctl_r32(clk->module, reg)
+ & clk->bits) != test));
+ if (!err)
+ pr_err("module de/activation failed %d %08X %08X %08X\n",
+ clk->module, clk->bits, test,
+ sysctl_r32(clk->module, reg) & clk->bits);
+}
+
+static int sysctl_activate(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
+ sysctl_w32(clk->module, clk->bits, SYSCTL_ACT);
+ sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
+ return 0;
+}
+
+static void sysctl_deactivate(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
+ sysctl_w32(clk->module, clk->bits, SYSCTL_DEACT);
+ sysctl_wait(clk, 0, SYSCTL_ACTS);
+}
+
+static int sysctl_clken(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
+ sysctl_wait(clk, clk->bits, SYSCTL_CLKS);
+ return 0;
+}
+
+static void sysctl_clkdis(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
+ sysctl_wait(clk, 0, SYSCTL_CLKS);
+}
+
+static void sysctl_reboot(struct clk *clk)
+{
+ unsigned int act;
+ unsigned int bits;
+
+ act = sysctl_r32(clk->module, SYSCTL_ACT);
+ bits = ~act & clk->bits;
+ if (bits != 0) {
+ sysctl_w32(clk->module, bits, SYSCTL_CLKEN);
+ sysctl_w32(clk->module, bits, SYSCTL_ACT);
+ sysctl_wait(clk, bits, SYSCTL_ACTS);
+ }
+ sysctl_w32(clk->module, act & clk->bits, SYSCTL_RBT);
+ sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
+}
+
+/* enable the ONU core */
+static void falcon_gpe_enable(void)
+{
+ unsigned int freq;
+ unsigned int status;
+
+ /* if if the clock is already enabled */
+ status = sysctl_r32(SYSCTL_SYS1, SYS1_INFRAC);
+ if (status & (1 << (GPPC_OFFSET + 1)))
+ return;
+
+ if (status_r32(STATUS_CONFIG) == 0)
+ freq = 1; /* use 625MHz on unfused chip */
+ else
+ freq = (status_r32(STATUS_CONFIG) &
+ GPEFREQ_MASK) >>
+ GPEFREQ_OFFSET;
+
+ /* apply new frequency */
+ sysctl_w32_mask(SYSCTL_SYS1, 7 << (GPPC_OFFSET + 1),
+ freq << (GPPC_OFFSET + 2) , SYS1_INFRAC);
+ udelay(1);
+
+ /* enable new frequency */
+ sysctl_w32_mask(SYSCTL_SYS1, 0, 1 << (GPPC_OFFSET + 1), SYS1_INFRAC);
+ udelay(1);
+}
+
+static inline void clkdev_add_sys(const char *dev, unsigned int module,
+ unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = NULL;
+ clk->cl.clk = clk;
+ clk->module = module;
+ clk->activate = sysctl_activate;
+ clk->deactivate = sysctl_deactivate;
+ clk->enable = sysctl_clken;
+ clk->disable = sysctl_clkdis;
+ clk->reboot = sysctl_reboot;
+ clkdev_add(&clk->cl);
+}
+
+void __init ltq_soc_init(void)
+{
+ struct device_node *np_status =
+ of_find_compatible_node(NULL, NULL, "lantiq,status-falcon");
+ struct device_node *np_ebu =
+ of_find_compatible_node(NULL, NULL, "lantiq,ebu-falcon");
+ struct device_node *np_sys1 =
+ of_find_compatible_node(NULL, NULL, "lantiq,sys1-falcon");
+ struct device_node *np_syseth =
+ of_find_compatible_node(NULL, NULL, "lantiq,syseth-falcon");
+ struct device_node *np_sysgpe =
+ of_find_compatible_node(NULL, NULL, "lantiq,sysgpe-falcon");
+ struct resource res_status, res_ebu, res_sys[3];
+ int i;
+
+ /* check if all the core register ranges are available */
+ if (!np_status || !np_ebu || !np_sys1 || !np_syseth || !np_sysgpe)
+ panic("Failed to load core nodes from devicetree");
+
+ if (of_address_to_resource(np_status, 0, &res_status) ||
+ of_address_to_resource(np_ebu, 0, &res_ebu) ||
+ of_address_to_resource(np_sys1, 0, &res_sys[0]) ||
+ of_address_to_resource(np_syseth, 0, &res_sys[1]) ||
+ of_address_to_resource(np_sysgpe, 0, &res_sys[2]))
+ panic("Failed to get core resources");
+
+ if ((request_mem_region(res_status.start, resource_size(&res_status),
+ res_status.name) < 0) ||
+ (request_mem_region(res_ebu.start, resource_size(&res_ebu),
+ res_ebu.name) < 0) ||
+ (request_mem_region(res_sys[0].start,
+ resource_size(&res_sys[0]),
+ res_sys[0].name) < 0) ||
+ (request_mem_region(res_sys[1].start,
+ resource_size(&res_sys[1]),
+ res_sys[1].name) < 0) ||
+ (request_mem_region(res_sys[2].start,
+ resource_size(&res_sys[2]),
+ res_sys[2].name) < 0))
+ pr_err("Failed to request core reources");
+
+ status_membase = ioremap_nocache(res_status.start,
+ resource_size(&res_status));
+ ltq_ebu_membase = ioremap_nocache(res_ebu.start,
+ resource_size(&res_ebu));
+
+ if (!status_membase || !ltq_ebu_membase)
+ panic("Failed to remap core resources");
+
+ for (i = 0; i < 3; i++) {
+ sysctl_membase[i] = ioremap_nocache(res_sys[i].start,
+ resource_size(&res_sys[i]));
+ if (!sysctl_membase[i])
+ panic("Failed to remap sysctrl resources");
+ }
+ ltq_sys1_membase = sysctl_membase[0];
+
+ falcon_gpe_enable();
+
+ /* get our 3 static rates for cpu, fpi and io clocks */
+ if (ltq_sys1_r32(SYS1_CPU0CC) & CPU0CC_CPUDIV)
+ clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M);
+ else
+ clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M);
+
+ /* add our clock domains */
+ clkdev_add_sys("1d810000.gpio", SYSCTL_SYSETH, ACTS_P0);
+ clkdev_add_sys("1d810100.gpio", SYSCTL_SYSETH, ACTS_P2);
+ clkdev_add_sys("1e800100.gpio", SYSCTL_SYS1, ACTS_P1);
+ clkdev_add_sys("1e800200.gpio", SYSCTL_SYS1, ACTS_P3);
+ clkdev_add_sys("1e800300.gpio", SYSCTL_SYS1, ACTS_P4);
+ clkdev_add_sys("1db01000.pad", SYSCTL_SYSETH, ACTS_PADCTRL0);
+ clkdev_add_sys("1db02000.pad", SYSCTL_SYSETH, ACTS_PADCTRL2);
+ clkdev_add_sys("1e800400.pad", SYSCTL_SYS1, ACTS_PADCTRL1);
+ clkdev_add_sys("1e800500.pad", SYSCTL_SYS1, ACTS_PADCTRL3);
+ clkdev_add_sys("1e800600.pad", SYSCTL_SYS1, ACTS_PADCTRL4);
+ clkdev_add_sys("1e100C00.serial", SYSCTL_SYS1, ACTS_ASC1_ACT);
+ clkdev_add_sys("1e200000.i2c", SYSCTL_SYS1, ACTS_I2C_ACT);
+}
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index d673731c538..57c1a4e5140 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -9,6 +9,11 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/irqdomain.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/bootinfo.h>
#include <asm/irq_cpu.h>
@@ -16,7 +21,7 @@
#include <lantiq_soc.h>
#include <irq.h>
-/* register definitions */
+/* register definitions - internal irqs */
#define LTQ_ICU_IM0_ISR 0x0000
#define LTQ_ICU_IM0_IER 0x0008
#define LTQ_ICU_IM0_IOSR 0x0010
@@ -25,6 +30,7 @@
#define LTQ_ICU_IM1_ISR 0x0028
#define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)
+/* register definitions - external irqs */
#define LTQ_EIU_EXIN_C 0x0000
#define LTQ_EIU_EXIN_INIC 0x0004
#define LTQ_EIU_EXIN_INEN 0x000C
@@ -37,10 +43,14 @@
#define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1)
#define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2)
#define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30)
-
+#define XWAY_EXIN_COUNT 3
#define MAX_EIU 6
-/* irqs generated by device attached to the EBU need to be acked in
+/* the performance counter */
+#define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31)
+
+/*
+ * irqs generated by devices attached to the EBU need to be acked in
* a special manner
*/
#define LTQ_ICU_EBU_IRQ 22
@@ -51,6 +61,17 @@
#define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
#define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
+/* our 2 ipi interrupts for VSMP */
+#define MIPS_CPU_IPI_RESCHED_IRQ 0
+#define MIPS_CPU_IPI_CALL_IRQ 1
+
+/* we have a cascade of 8 irqs */
+#define MIPS_CPU_IRQ_CASCADE 8
+
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+int gic_present;
+#endif
+
static unsigned short ltq_eiu_irq[MAX_EIU] = {
LTQ_EIU_IR0,
LTQ_EIU_IR1,
@@ -60,64 +81,51 @@ static unsigned short ltq_eiu_irq[MAX_EIU] = {
LTQ_EIU_IR5,
};
-static struct resource ltq_icu_resource = {
- .name = "icu",
- .start = LTQ_ICU_BASE_ADDR,
- .end = LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct resource ltq_eiu_resource = {
- .name = "eiu",
- .start = LTQ_EIU_BASE_ADDR,
- .end = LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
+static int exin_avail;
static void __iomem *ltq_icu_membase;
static void __iomem *ltq_eiu_membase;
void ltq_disable_irq(struct irq_data *d)
{
u32 ier = LTQ_ICU_IM0_IER;
- int irq_nr = d->irq - INT_NUM_IRQ0;
+ int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
- ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
- irq_nr %= INT_NUM_IM_OFFSET;
- ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier);
+ ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+ offset %= INT_NUM_IM_OFFSET;
+ ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier);
}
void ltq_mask_and_ack_irq(struct irq_data *d)
{
u32 ier = LTQ_ICU_IM0_IER;
u32 isr = LTQ_ICU_IM0_ISR;
- int irq_nr = d->irq - INT_NUM_IRQ0;
+ int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
- ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
- isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
- irq_nr %= INT_NUM_IM_OFFSET;
- ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier);
- ltq_icu_w32((1 << irq_nr), isr);
+ ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+ isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+ offset %= INT_NUM_IM_OFFSET;
+ ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier);
+ ltq_icu_w32(BIT(offset), isr);
}
static void ltq_ack_irq(struct irq_data *d)
{
u32 isr = LTQ_ICU_IM0_ISR;
- int irq_nr = d->irq - INT_NUM_IRQ0;
+ int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
- isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
- irq_nr %= INT_NUM_IM_OFFSET;
- ltq_icu_w32((1 << irq_nr), isr);
+ isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+ offset %= INT_NUM_IM_OFFSET;
+ ltq_icu_w32(BIT(offset), isr);
}
void ltq_enable_irq(struct irq_data *d)
{
u32 ier = LTQ_ICU_IM0_IER;
- int irq_nr = d->irq - INT_NUM_IRQ0;
+ int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
- ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET);
- irq_nr %= INT_NUM_IM_OFFSET;
- ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier);
+ ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
+ offset %= INT_NUM_IM_OFFSET;
+ ltq_icu_w32(ltq_icu_r32(ier) | BIT(offset), ier);
}
static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
@@ -126,15 +134,15 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
ltq_enable_irq(d);
for (i = 0; i < MAX_EIU; i++) {
- if (d->irq == ltq_eiu_irq[i]) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
/* low level - we should really handle set_type */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
(0x6 << (i * 4)), LTQ_EIU_EXIN_C);
/* clear all pending */
- ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i),
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~BIT(i),
LTQ_EIU_EXIN_INIC);
/* enable */
- ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i),
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
LTQ_EIU_EXIN_INEN);
break;
}
@@ -149,9 +157,9 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d)
ltq_disable_irq(d);
for (i = 0; i < MAX_EIU; i++) {
- if (d->irq == ltq_eiu_irq[i]) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
/* disable */
- ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i),
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
LTQ_EIU_EXIN_INEN);
break;
}
@@ -188,14 +196,15 @@ static void ltq_hw_irqdispatch(int module)
if (irq == 0)
return;
- /* silicon bug causes only the msb set to 1 to be valid. all
+ /*
+ * silicon bug causes only the msb set to 1 to be valid. all
* other bits might be bogus
*/
irq = __fls(irq);
- do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module));
+ do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
/* if this is a EBU irq, we need to ack it or get a deadlock */
- if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0))
+ if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
LTQ_EBU_PCC_ISTAT);
}
@@ -216,6 +225,47 @@ static void ltq_hw5_irqdispatch(void)
do_IRQ(MIPS_CPU_TIMER_IRQ);
}
+#ifdef CONFIG_MIPS_MT_SMP
+void __init arch_init_ipiirq(int irq, struct irqaction *action)
+{
+ setup_irq(irq, action);
+ irq_set_handler(irq, handle_percpu_irq);
+}
+
+static void ltq_sw0_irqdispatch(void)
+{
+ do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
+}
+
+static void ltq_sw1_irqdispatch(void)
+{
+ do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
+}
+static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
+{
+ scheduler_ipi();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
+{
+ smp_call_function_interrupt();
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq_resched = {
+ .handler = ipi_resched_interrupt,
+ .flags = IRQF_PERCPU,
+ .name = "IPI_resched"
+};
+
+static struct irqaction irq_call = {
+ .handler = ipi_call_interrupt,
+ .flags = IRQF_PERCPU,
+ .name = "IPI_call"
+};
+#endif
+
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
@@ -238,45 +288,75 @@ out:
return;
}
+static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ struct irq_chip *chip = &ltq_irq_type;
+ int i;
+
+ for (i = 0; i < exin_avail; i++)
+ if (hw == ltq_eiu_irq[i])
+ chip = &ltq_eiu_type;
+
+ irq_set_chip_and_handler(hw, chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops irq_domain_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = icu_map,
+};
+
static struct irqaction cascade = {
.handler = no_action,
.name = "cascade",
};
-void __init arch_init_irq(void)
+int __init icu_of_init(struct device_node *node, struct device_node *parent)
{
+ struct device_node *eiu_node;
+ struct resource res;
int i;
- if (insert_resource(&iomem_resource, &ltq_icu_resource) < 0)
- panic("Failed to insert icu memory");
+ if (of_address_to_resource(node, 0, &res))
+ panic("Failed to get icu memory range");
- if (request_mem_region(ltq_icu_resource.start,
- resource_size(&ltq_icu_resource), "icu") < 0)
- panic("Failed to request icu memory");
+ if (request_mem_region(res.start, resource_size(&res), res.name) < 0)
+ pr_err("Failed to request icu memory");
- ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start,
- resource_size(&ltq_icu_resource));
+ ltq_icu_membase = ioremap_nocache(res.start, resource_size(&res));
if (!ltq_icu_membase)
panic("Failed to remap icu memory");
- if (insert_resource(&iomem_resource, &ltq_eiu_resource) < 0)
- panic("Failed to insert eiu memory");
-
- if (request_mem_region(ltq_eiu_resource.start,
- resource_size(&ltq_eiu_resource), "eiu") < 0)
- panic("Failed to request eiu memory");
-
- ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start,
- resource_size(&ltq_eiu_resource));
- if (!ltq_eiu_membase)
- panic("Failed to remap eiu memory");
+ /* the external interrupts are optional and xway only */
+ eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu");
+ if (eiu_node && of_address_to_resource(eiu_node, 0, &res)) {
+ /* find out how many external irq sources we have */
+ const __be32 *count = of_get_property(node,
+ "lantiq,count", NULL);
+
+ if (count)
+ exin_avail = *count;
+ if (exin_avail > MAX_EIU)
+ exin_avail = MAX_EIU;
+
+ if (request_mem_region(res.start, resource_size(&res),
+ res.name) < 0)
+ pr_err("Failed to request eiu memory");
+
+ ltq_eiu_membase = ioremap_nocache(res.start,
+ resource_size(&res));
+ if (!ltq_eiu_membase)
+ panic("Failed to remap eiu memory");
+ }
- /* make sure all irqs are turned off by default */
- for (i = 0; i < 5; i++)
+ /* turn off all irqs by default */
+ for (i = 0; i < 5; i++) {
+ /* make sure all irqs are turned off by default */
ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET));
-
- /* clear all possibly pending interrupts */
- ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET));
+ /* clear all possibly pending interrupts */
+ ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET));
+ }
mips_cpu_irq_init();
@@ -293,20 +373,19 @@ void __init arch_init_irq(void)
set_vi_handler(7, ltq_hw5_irqdispatch);
}
- for (i = INT_NUM_IRQ0;
- i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++)
- if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) ||
- (i == LTQ_EIU_IR2))
- irq_set_chip_and_handler(i, &ltq_eiu_type,
- handle_level_irq);
- /* EIU3-5 only exist on ar9 and vr9 */
- else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) ||
- (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9()))
- irq_set_chip_and_handler(i, &ltq_eiu_type,
- handle_level_irq);
- else
- irq_set_chip_and_handler(i, &ltq_irq_type,
- handle_level_irq);
+ irq_domain_add_linear(node, 6 * INT_NUM_IM_OFFSET,
+ &irq_domain_ops, 0);
+
+#if defined(CONFIG_MIPS_MT_SMP)
+ if (cpu_has_vint) {
+ pr_info("Setting up IPI vectored interrupts\n");
+ set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ltq_sw0_irqdispatch);
+ set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ltq_sw1_irqdispatch);
+ }
+ arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ,
+ &irq_resched);
+ arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
+#endif
#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
@@ -315,9 +394,23 @@ void __init arch_init_irq(void)
set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#endif
+
+ /* tell oprofile which irq to use */
+ cp0_perfcount_irq = LTQ_PERF_IRQ;
+ return 0;
}
unsigned int __cpuinit get_c0_compare_int(void)
{
return CP0_LEGACY_COMPARE_IRQ;
}
+
+static struct of_device_id __initdata of_irq_ids[] = {
+ { .compatible = "lantiq,icu", .data = icu_of_init },
+ {},
+};
+
+void __init arch_init_irq(void)
+{
+ of_irq_init(of_irq_ids);
+}
diff --git a/arch/mips/lantiq/machtypes.h b/arch/mips/lantiq/machtypes.h
deleted file mode 100644
index 7e01b8c484e..00000000000
--- a/arch/mips/lantiq/machtypes.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#ifndef _LANTIQ_MACH_H__
-#define _LANTIQ_MACH_H__
-
-#include <asm/mips_machine.h>
-
-enum lantiq_mach_type {
- LTQ_MACH_GENERIC = 0,
- LTQ_MACH_EASY50712, /* Danube evaluation board */
- LTQ_MACH_EASY50601, /* Amazon SE evaluation board */
-};
-
-#endif
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index e34fcfd0d5c..d185e8477fd 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -8,6 +8,7 @@
#include <linux/export.h>
#include <linux/clk.h>
+#include <linux/of_platform.h>
#include <asm/bootinfo.h>
#include <asm/time.h>
@@ -16,19 +17,15 @@
#include "prom.h"
#include "clk.h"
-static struct ltq_soc_info soc_info;
-
-unsigned int ltq_get_cpu_ver(void)
-{
- return soc_info.rev;
-}
-EXPORT_SYMBOL(ltq_get_cpu_ver);
+/* access to the ebu needs to be locked between different drivers */
+DEFINE_SPINLOCK(ebu_lock);
+EXPORT_SYMBOL_GPL(ebu_lock);
-unsigned int ltq_get_soc_type(void)
-{
- return soc_info.type;
-}
-EXPORT_SYMBOL(ltq_get_soc_type);
+/*
+ * this struct is filled by the soc specific detection code and holds
+ * information about the specific soc type, revision and name
+ */
+static struct ltq_soc_info soc_info;
const char *get_system_type(void)
{
@@ -45,27 +42,62 @@ static void __init prom_init_cmdline(void)
char **argv = (char **) KSEG1ADDR(fw_arg1);
int i;
+ arcs_cmdline[0] = '\0';
+
for (i = 0; i < argc; i++) {
- char *p = (char *) KSEG1ADDR(argv[i]);
+ char *p = (char *) KSEG1ADDR(argv[i]);
- if (p && *p) {
+ if (CPHYSADDR(p) && *p) {
strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
}
}
}
-void __init prom_init(void)
+void __init plat_mem_setup(void)
{
- struct clk *clk;
+ ioport_resource.start = IOPORT_RESOURCE_START;
+ ioport_resource.end = IOPORT_RESOURCE_END;
+ iomem_resource.start = IOMEM_RESOURCE_START;
+ iomem_resource.end = IOMEM_RESOURCE_END;
+
+ set_io_port_base((unsigned long) KSEG1);
+ /*
+ * Load the builtin devicetree. This causes the chosen node to be
+ * parsed resulting in our memory appearing
+ */
+ __dt_setup_arch(&__dtb_start);
+}
+
+void __init prom_init(void)
+{
+ /* call the soc specific detetcion code and get it to fill soc_info */
ltq_soc_detect(&soc_info);
- clk_init();
- clk = clk_get(0, "cpu");
- snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev1.%d",
- soc_info.name, soc_info.rev);
- clk_put(clk);
+ snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev %s",
+ soc_info.name, soc_info.rev_type);
soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0';
pr_info("SoC: %s\n", soc_info.sys_type);
prom_init_cmdline();
+
+#if defined(CONFIG_MIPS_MT_SMP)
+ if (register_vsmp_smp_ops())
+ panic("failed to register_vsmp_smp_ops()");
+#endif
}
+
+int __init plat_of_setup(void)
+{
+ static struct of_device_id of_ids[3];
+
+ if (!of_have_populated_dt())
+ panic("device tree not present");
+
+ strncpy(of_ids[0].compatible, soc_info.compatible,
+ sizeof(of_ids[0].compatible));
+ strncpy(of_ids[1].compatible, "simple-bus",
+ sizeof(of_ids[1].compatible));
+ return of_platform_bus_probe(NULL, of_ids, NULL);
+}
+
+arch_initcall(plat_of_setup);
diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h
index b4229d94280..a3fa1a2bfaa 100644
--- a/arch/mips/lantiq/prom.h
+++ b/arch/mips/lantiq/prom.h
@@ -10,16 +10,22 @@
#define _LTQ_PROM_H__
#define LTQ_SYS_TYPE_LEN 0x100
+#define LTQ_SYS_REV_LEN 0x10
struct ltq_soc_info {
unsigned char *name;
unsigned int rev;
+ unsigned char rev_type[LTQ_SYS_REV_LEN];
+ unsigned int srev;
unsigned int partnum;
unsigned int type;
unsigned char sys_type[LTQ_SYS_TYPE_LEN];
+ unsigned char *compatible;
};
extern void ltq_soc_detect(struct ltq_soc_info *i);
-extern void ltq_soc_setup(void);
+extern void ltq_soc_init(void);
+
+extern struct boot_param_header __dtb_start;
#endif
diff --git a/arch/mips/lantiq/setup.c b/arch/mips/lantiq/setup.c
deleted file mode 100644
index 1ff6c9d6cb9..00000000000
--- a/arch/mips/lantiq/setup.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <asm/bootinfo.h>
-
-#include <lantiq_soc.h>
-
-#include "machtypes.h"
-#include "devices.h"
-#include "prom.h"
-
-void __init plat_mem_setup(void)
-{
- /* assume 16M as default incase uboot fails to pass proper ramsize */
- unsigned long memsize = 16;
- char **envp = (char **) KSEG1ADDR(fw_arg2);
-
- ioport_resource.start = IOPORT_RESOURCE_START;
- ioport_resource.end = IOPORT_RESOURCE_END;
- iomem_resource.start = IOMEM_RESOURCE_START;
- iomem_resource.end = IOMEM_RESOURCE_END;
-
- set_io_port_base((unsigned long) KSEG1);
-
- while (*envp) {
- char *e = (char *)KSEG1ADDR(*envp);
- if (!strncmp(e, "memsize=", 8)) {
- e += 8;
- if (strict_strtoul(e, 0, &memsize))
- pr_warn("bad memsize specified\n");
- }
- envp++;
- }
- memsize *= 1024 * 1024;
- add_memory_region(0x00000000, memsize, BOOT_MEM_RAM);
-}
-
-static int __init
-lantiq_setup(void)
-{
- ltq_soc_setup();
- mips_machine_setup();
- return 0;
-}
-
-arch_initcall(lantiq_setup);
-
-static void __init
-lantiq_generic_init(void)
-{
- /* Nothing to do */
-}
-
-MIPS_MACHINE(LTQ_MACH_GENERIC,
- "Generic",
- "Generic Lantiq based board",
- lantiq_generic_init);
diff --git a/arch/mips/lantiq/xway/Kconfig b/arch/mips/lantiq/xway/Kconfig
deleted file mode 100644
index 2b857de3662..00000000000
--- a/arch/mips/lantiq/xway/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
-if SOC_XWAY
-
-menu "MIPS Machine"
-
-config LANTIQ_MACH_EASY50712
- bool "Easy50712 - Danube"
- default y
-
-endmenu
-
-endif
-
-if SOC_AMAZON_SE
-
-menu "MIPS Machine"
-
-config LANTIQ_MACH_EASY50601
- bool "Easy50601 - Amazon SE"
- default y
-
-endmenu
-
-endif
diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile
index c517f2e7756..dc3194f6ee4 100644
--- a/arch/mips/lantiq/xway/Makefile
+++ b/arch/mips/lantiq/xway/Makefile
@@ -1,7 +1 @@
-obj-y := pmu.o ebu.o reset.o gpio.o gpio_stp.o gpio_ebu.o devices.o dma.o
-
-obj-$(CONFIG_SOC_XWAY) += clk-xway.o prom-xway.o setup-xway.o
-obj-$(CONFIG_SOC_AMAZON_SE) += clk-ase.o prom-ase.o setup-ase.o
-
-obj-$(CONFIG_LANTIQ_MACH_EASY50712) += mach-easy50712.o
-obj-$(CONFIG_LANTIQ_MACH_EASY50601) += mach-easy50601.o
+obj-y := prom.o sysctrl.o clk.o reset.o gpio.o dma.o
diff --git a/arch/mips/lantiq/xway/clk-ase.c b/arch/mips/lantiq/xway/clk-ase.c
deleted file mode 100644
index 652258309c9..00000000000
--- a/arch/mips/lantiq/xway/clk-ase.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/io.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-
-#include <asm/time.h>
-#include <asm/irq.h>
-#include <asm/div64.h>
-
-#include <lantiq_soc.h>
-
-/* cgu registers */
-#define LTQ_CGU_SYS 0x0010
-
-unsigned int ltq_get_io_region_clock(void)
-{
- return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_io_region_clock);
-
-unsigned int ltq_get_fpi_bus_clock(int fpi)
-{
- return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_fpi_bus_clock);
-
-unsigned int ltq_get_cpu_hz(void)
-{
- if (ltq_cgu_r32(LTQ_CGU_SYS) & (1 << 5))
- return CLOCK_266M;
- else
- return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_cpu_hz);
-
-unsigned int ltq_get_fpi_hz(void)
-{
- return CLOCK_133M;
-}
-EXPORT_SYMBOL(ltq_get_fpi_hz);
diff --git a/arch/mips/lantiq/xway/clk-xway.c b/arch/mips/lantiq/xway/clk-xway.c
deleted file mode 100644
index 696b1a3e064..00000000000
--- a/arch/mips/lantiq/xway/clk-xway.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/io.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-
-#include <asm/time.h>
-#include <asm/irq.h>
-#include <asm/div64.h>
-
-#include <lantiq_soc.h>
-
-static unsigned int ltq_ram_clocks[] = {
- CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
-#define DDR_HZ ltq_ram_clocks[ltq_cgu_r32(LTQ_CGU_SYS) & 0x3]
-
-#define BASIC_FREQUENCY_1 35328000
-#define BASIC_FREQUENCY_2 36000000
-#define BASIS_REQUENCY_USB 12000000
-
-#define GET_BITS(x, msb, lsb) \
- (((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb))
-
-#define LTQ_CGU_PLL0_CFG 0x0004
-#define LTQ_CGU_PLL1_CFG 0x0008
-#define LTQ_CGU_PLL2_CFG 0x000C
-#define LTQ_CGU_SYS 0x0010
-#define LTQ_CGU_UPDATE 0x0014
-#define LTQ_CGU_IF_CLK 0x0018
-#define LTQ_CGU_OSC_CON 0x001C
-#define LTQ_CGU_SMD 0x0020
-#define LTQ_CGU_CT1SR 0x0028
-#define LTQ_CGU_CT2SR 0x002C
-#define LTQ_CGU_PCMCR 0x0030
-#define LTQ_CGU_PCI_CR 0x0034
-#define LTQ_CGU_PD_PC 0x0038
-#define LTQ_CGU_FMR 0x003C
-
-#define CGU_PLL0_PHASE_DIVIDER_ENABLE \
- (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 31))
-#define CGU_PLL0_BYPASS \
- (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 30))
-#define CGU_PLL0_CFG_DSMSEL \
- (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 28))
-#define CGU_PLL0_CFG_FRAC_EN \
- (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 27))
-#define CGU_PLL1_SRC \
- (ltq_cgu_r32(LTQ_CGU_PLL1_CFG) & (1 << 31))
-#define CGU_PLL2_PHASE_DIVIDER_ENABLE \
- (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & (1 << 20))
-#define CGU_SYS_FPI_SEL (1 << 6)
-#define CGU_SYS_DDR_SEL 0x3
-#define CGU_PLL0_SRC (1 << 29)
-
-#define CGU_PLL0_CFG_PLLK GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 26, 17)
-#define CGU_PLL0_CFG_PLLN GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 12, 6)
-#define CGU_PLL0_CFG_PLLM GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 5, 2)
-#define CGU_PLL2_SRC GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 18, 17)
-#define CGU_PLL2_CFG_INPUT_DIV GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 16, 13)
-
-static unsigned int ltq_get_pll0_fdiv(void);
-
-static inline unsigned int get_input_clock(int pll)
-{
- switch (pll) {
- case 0:
- if (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & CGU_PLL0_SRC)
- return BASIS_REQUENCY_USB;
- else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
- return BASIC_FREQUENCY_1;
- else
- return BASIC_FREQUENCY_2;
- case 1:
- if (CGU_PLL1_SRC)
- return BASIS_REQUENCY_USB;
- else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
- return BASIC_FREQUENCY_1;
- else
- return BASIC_FREQUENCY_2;
- case 2:
- switch (CGU_PLL2_SRC) {
- case 0:
- return ltq_get_pll0_fdiv();
- case 1:
- return CGU_PLL2_PHASE_DIVIDER_ENABLE ?
- BASIC_FREQUENCY_1 :
- BASIC_FREQUENCY_2;
- case 2:
- return BASIS_REQUENCY_USB;
- }
- default:
- return 0;
- }
-}
-
-static inline unsigned int cal_dsm(int pll, unsigned int num, unsigned int den)
-{
- u64 res, clock = get_input_clock(pll);
-
- res = num * clock;
- do_div(res, den);
- return res;
-}
-
-static inline unsigned int mash_dsm(int pll, unsigned int M, unsigned int N,
- unsigned int K)
-{
- unsigned int num = ((N + 1) << 10) + K;
- unsigned int den = (M + 1) << 10;
-
- return cal_dsm(pll, num, den);
-}
-
-static inline unsigned int ssff_dsm_1(int pll, unsigned int M, unsigned int N,
- unsigned int K)
-{
- unsigned int num = ((N + 1) << 11) + K + 512;
- unsigned int den = (M + 1) << 11;
-
- return cal_dsm(pll, num, den);
-}
-
-static inline unsigned int ssff_dsm_2(int pll, unsigned int M, unsigned int N,
- unsigned int K)
-{
- unsigned int num = K >= 512 ?
- ((N + 1) << 12) + K - 512 : ((N + 1) << 12) + K + 3584;
- unsigned int den = (M + 1) << 12;
-
- return cal_dsm(pll, num, den);
-}
-
-static inline unsigned int dsm(int pll, unsigned int M, unsigned int N,
- unsigned int K, unsigned int dsmsel, unsigned int phase_div_en)
-{
- if (!dsmsel)
- return mash_dsm(pll, M, N, K);
- else if (!phase_div_en)
- return mash_dsm(pll, M, N, K);
- else
- return ssff_dsm_2(pll, M, N, K);
-}
-
-static inline unsigned int ltq_get_pll0_fosc(void)
-{
- if (CGU_PLL0_BYPASS)
- return get_input_clock(0);
- else
- return !CGU_PLL0_CFG_FRAC_EN
- ? dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, 0,
- CGU_PLL0_CFG_DSMSEL,
- CGU_PLL0_PHASE_DIVIDER_ENABLE)
- : dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN,
- CGU_PLL0_CFG_PLLK, CGU_PLL0_CFG_DSMSEL,
- CGU_PLL0_PHASE_DIVIDER_ENABLE);
-}
-
-static unsigned int ltq_get_pll0_fdiv(void)
-{
- unsigned int div = CGU_PLL2_CFG_INPUT_DIV + 1;
-
- return (ltq_get_pll0_fosc() + (div >> 1)) / div;
-}
-
-unsigned int ltq_get_io_region_clock(void)
-{
- unsigned int ret = ltq_get_pll0_fosc();
-
- switch (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & CGU_SYS_DDR_SEL) {
- default:
- case 0:
- return (ret + 1) / 2;
- case 1:
- return (ret * 2 + 2) / 5;
- case 2:
- return (ret + 1) / 3;
- case 3:
- return (ret + 2) / 4;
- }
-}
-EXPORT_SYMBOL(ltq_get_io_region_clock);
-
-unsigned int ltq_get_fpi_bus_clock(int fpi)
-{
- unsigned int ret = ltq_get_io_region_clock();
-
- if ((fpi == 2) && (ltq_cgu_r32(LTQ_CGU_SYS) & CGU_SYS_FPI_SEL))
- ret >>= 1;
- return ret;
-}
-EXPORT_SYMBOL(ltq_get_fpi_bus_clock);
-
-unsigned int ltq_get_cpu_hz(void)
-{
- switch (ltq_cgu_r32(LTQ_CGU_SYS) & 0xc) {
- case 0:
- return CLOCK_333M;
- case 4:
- return DDR_HZ;
- case 8:
- return DDR_HZ << 1;
- default:
- return DDR_HZ >> 1;
- }
-}
-EXPORT_SYMBOL(ltq_get_cpu_hz);
-
-unsigned int ltq_get_fpi_hz(void)
-{
- unsigned int ddr_clock = DDR_HZ;
-
- if (ltq_cgu_r32(LTQ_CGU_SYS) & 0x40)
- return ddr_clock >> 1;
- return ddr_clock;
-}
-EXPORT_SYMBOL(ltq_get_fpi_hz);
diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c
new file mode 100644
index 00000000000..9aa17f79a74
--- /dev/null
+++ b/arch/mips/lantiq/xway/clk.c
@@ -0,0 +1,151 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+
+#include <asm/time.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+
+static unsigned int ram_clocks[] = {
+ CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
+#define DDR_HZ ram_clocks[ltq_cgu_r32(CGU_SYS) & 0x3]
+
+/* legacy xway clock */
+#define CGU_SYS 0x10
+
+/* vr9 clock */
+#define CGU_SYS_VR9 0x0c
+#define CGU_IF_CLK_VR9 0x24
+
+unsigned long ltq_danube_fpi_hz(void)
+{
+ unsigned long ddr_clock = DDR_HZ;
+
+ if (ltq_cgu_r32(CGU_SYS) & 0x40)
+ return ddr_clock >> 1;
+ return ddr_clock;
+}
+
+unsigned long ltq_danube_cpu_hz(void)
+{
+ switch (ltq_cgu_r32(CGU_SYS) & 0xc) {
+ case 0:
+ return CLOCK_333M;
+ case 4:
+ return DDR_HZ;
+ case 8:
+ return DDR_HZ << 1;
+ default:
+ return DDR_HZ >> 1;
+ }
+}
+
+unsigned long ltq_ar9_sys_hz(void)
+{
+ if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2)
+ return CLOCK_393M;
+ return CLOCK_333M;
+}
+
+unsigned long ltq_ar9_fpi_hz(void)
+{
+ unsigned long sys = ltq_ar9_sys_hz();
+
+ if (ltq_cgu_r32(CGU_SYS) & BIT(0))
+ return sys;
+ return sys >> 1;
+}
+
+unsigned long ltq_ar9_cpu_hz(void)
+{
+ if (ltq_cgu_r32(CGU_SYS) & BIT(2))
+ return ltq_ar9_fpi_hz();
+ else
+ return ltq_ar9_sys_hz();
+}
+
+unsigned long ltq_vr9_cpu_hz(void)
+{
+ unsigned int cpu_sel;
+ unsigned long clk;
+
+ cpu_sel = (ltq_cgu_r32(CGU_SYS_VR9) >> 4) & 0xf;
+
+ switch (cpu_sel) {
+ case 0:
+ clk = CLOCK_600M;
+ break;
+ case 1:
+ clk = CLOCK_500M;
+ break;
+ case 2:
+ clk = CLOCK_393M;
+ break;
+ case 3:
+ clk = CLOCK_333M;
+ break;
+ case 5:
+ case 6:
+ clk = CLOCK_196_608M;
+ break;
+ case 7:
+ clk = CLOCK_167M;
+ break;
+ case 4:
+ case 8:
+ case 9:
+ clk = CLOCK_125M;
+ break;
+ default:
+ clk = 0;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_vr9_fpi_hz(void)
+{
+ unsigned int ocp_sel, cpu_clk;
+ unsigned long clk;
+
+ cpu_clk = ltq_vr9_cpu_hz();
+ ocp_sel = ltq_cgu_r32(CGU_SYS_VR9) & 0x3;
+
+ switch (ocp_sel) {
+ case 0:
+ /* OCP ratio 1 */
+ clk = cpu_clk;
+ break;
+ case 2:
+ /* OCP ratio 2 */
+ clk = cpu_clk / 2;
+ break;
+ case 3:
+ /* OCP ratio 2.5 */
+ clk = (cpu_clk * 2) / 5;
+ break;
+ case 4:
+ /* OCP ratio 3 */
+ clk = cpu_clk / 3;
+ break;
+ default:
+ clk = 0;
+ break;
+ }
+
+ return clk;
+}
diff --git a/arch/mips/lantiq/xway/devices.c b/arch/mips/lantiq/xway/devices.c
deleted file mode 100644
index d614aa7ff07..00000000000
--- a/arch/mips/lantiq/xway/devices.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/mtd/physmap.h>
-#include <linux/kernel.h>
-#include <linux/reboot.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/etherdevice.h>
-#include <linux/time.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-
-#include <lantiq_soc.h>
-#include <lantiq_irq.h>
-#include <lantiq_platform.h>
-
-#include "devices.h"
-
-/* gpio */
-static struct resource ltq_gpio_resource[] = {
- {
- .name = "gpio0",
- .start = LTQ_GPIO0_BASE_ADDR,
- .end = LTQ_GPIO0_BASE_ADDR + LTQ_GPIO_SIZE - 1,
- .flags = IORESOURCE_MEM,
- }, {
- .name = "gpio1",
- .start = LTQ_GPIO1_BASE_ADDR,
- .end = LTQ_GPIO1_BASE_ADDR + LTQ_GPIO_SIZE - 1,
- .flags = IORESOURCE_MEM,
- }, {
- .name = "gpio2",
- .start = LTQ_GPIO2_BASE_ADDR,
- .end = LTQ_GPIO2_BASE_ADDR + LTQ_GPIO_SIZE - 1,
- .flags = IORESOURCE_MEM,
- }
-};
-
-void __init ltq_register_gpio(void)
-{
- platform_device_register_simple("ltq_gpio", 0,
- &ltq_gpio_resource[0], 1);
- platform_device_register_simple("ltq_gpio", 1,
- &ltq_gpio_resource[1], 1);
-
- /* AR9 and VR9 have an extra gpio block */
- if (ltq_is_ar9() || ltq_is_vr9()) {
- platform_device_register_simple("ltq_gpio", 2,
- &ltq_gpio_resource[2], 1);
- }
-}
-
-/* serial to parallel conversion */
-static struct resource ltq_stp_resource = {
- .name = "stp",
- .start = LTQ_STP_BASE_ADDR,
- .end = LTQ_STP_BASE_ADDR + LTQ_STP_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-void __init ltq_register_gpio_stp(void)
-{
- platform_device_register_simple("ltq_stp", 0, &ltq_stp_resource, 1);
-}
-
-/* asc ports - amazon se has its own serial mapping */
-static struct resource ltq_ase_asc_resources[] = {
- {
- .name = "asc0",
- .start = LTQ_ASC1_BASE_ADDR,
- .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- IRQ_RES(tx, LTQ_ASC_ASE_TIR),
- IRQ_RES(rx, LTQ_ASC_ASE_RIR),
- IRQ_RES(err, LTQ_ASC_ASE_EIR),
-};
-
-void __init ltq_register_ase_asc(void)
-{
- platform_device_register_simple("ltq_asc", 0,
- ltq_ase_asc_resources, ARRAY_SIZE(ltq_ase_asc_resources));
-}
-
-/* ethernet */
-static struct resource ltq_etop_resources = {
- .name = "etop",
- .start = LTQ_ETOP_BASE_ADDR,
- .end = LTQ_ETOP_BASE_ADDR + LTQ_ETOP_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device ltq_etop = {
- .name = "ltq_etop",
- .resource = &ltq_etop_resources,
- .num_resources = 1,
-};
-
-void __init
-ltq_register_etop(struct ltq_eth_data *eth)
-{
- if (eth) {
- ltq_etop.dev.platform_data = eth;
- platform_device_register(&ltq_etop);
- }
-}
diff --git a/arch/mips/lantiq/xway/devices.h b/arch/mips/lantiq/xway/devices.h
deleted file mode 100644
index e90493471bc..00000000000
--- a/arch/mips/lantiq/xway/devices.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#ifndef _LTQ_DEVICES_XWAY_H__
-#define _LTQ_DEVICES_XWAY_H__
-
-#include "../devices.h"
-#include <linux/phy.h>
-
-extern void ltq_register_gpio(void);
-extern void ltq_register_gpio_stp(void);
-extern void ltq_register_ase_asc(void);
-extern void ltq_register_etop(struct ltq_eth_data *eth);
-
-#endif
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index b210e936c7c..55d2c4fa471 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -19,7 +19,8 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
-#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/clk.h>
#include <lantiq_soc.h>
#include <xway_dma.h>
@@ -55,13 +56,6 @@
#define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \
ltq_dma_membase + (z))
-static struct resource ltq_dma_resource = {
- .name = "dma",
- .start = LTQ_DMA_BASE_ADDR,
- .end = LTQ_DMA_BASE_ADDR + LTQ_DMA_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
static void __iomem *ltq_dma_membase;
void
@@ -215,27 +209,28 @@ ltq_dma_init_port(int p)
}
EXPORT_SYMBOL_GPL(ltq_dma_init_port);
-int __init
-ltq_dma_init(void)
+static int __devinit
+ltq_dma_init(struct platform_device *pdev)
{
+ struct clk *clk;
+ struct resource *res;
int i;
- /* insert and request the memory region */
- if (insert_resource(&iomem_resource, &ltq_dma_resource) < 0)
- panic("Failed to insert dma memory");
-
- if (request_mem_region(ltq_dma_resource.start,
- resource_size(&ltq_dma_resource), "dma") < 0)
- panic("Failed to request dma memory");
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ panic("Failed to get dma resource");
/* remap dma register range */
- ltq_dma_membase = ioremap_nocache(ltq_dma_resource.start,
- resource_size(&ltq_dma_resource));
+ ltq_dma_membase = devm_request_and_ioremap(&pdev->dev, res);
if (!ltq_dma_membase)
- panic("Failed to remap dma memory");
+ panic("Failed to remap dma resource");
/* power up and reset the dma engine */
- ltq_pmu_enable(PMU_DMA);
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ panic("Failed to get dma clock");
+
+ clk_enable(clk);
ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL);
/* disable all interrupts */
@@ -248,7 +243,29 @@ ltq_dma_init(void)
ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL);
ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
}
+ dev_info(&pdev->dev, "init done\n");
return 0;
}
-postcore_initcall(ltq_dma_init);
+static const struct of_device_id dma_match[] = {
+ { .compatible = "lantiq,dma-xway" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dma_match);
+
+static struct platform_driver dma_driver = {
+ .probe = ltq_dma_init,
+ .driver = {
+ .name = "dma-xway",
+ .owner = THIS_MODULE,
+ .of_match_table = dma_match,
+ },
+};
+
+int __init
+dma_init(void)
+{
+ return platform_driver_register(&dma_driver);
+}
+
+postcore_initcall(dma_init);
diff --git a/arch/mips/lantiq/xway/ebu.c b/arch/mips/lantiq/xway/ebu.c
deleted file mode 100644
index 862e3e83068..00000000000
--- a/arch/mips/lantiq/xway/ebu.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * EBU - the external bus unit attaches PCI, NOR and NAND
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-
-#include <lantiq_soc.h>
-
-/* all access to the ebu must be locked */
-DEFINE_SPINLOCK(ebu_lock);
-EXPORT_SYMBOL_GPL(ebu_lock);
-
-static struct resource ltq_ebu_resource = {
- .name = "ebu",
- .start = LTQ_EBU_BASE_ADDR,
- .end = LTQ_EBU_BASE_ADDR + LTQ_EBU_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-/* remapped base addr of the clock unit and external bus unit */
-void __iomem *ltq_ebu_membase;
-
-static int __init lantiq_ebu_init(void)
-{
- /* insert and request the memory region */
- if (insert_resource(&iomem_resource, &ltq_ebu_resource) < 0)
- panic("Failed to insert ebu memory");
-
- if (request_mem_region(ltq_ebu_resource.start,
- resource_size(&ltq_ebu_resource), "ebu") < 0)
- panic("Failed to request ebu memory");
-
- /* remap ebu register range */
- ltq_ebu_membase = ioremap_nocache(ltq_ebu_resource.start,
- resource_size(&ltq_ebu_resource));
- if (!ltq_ebu_membase)
- panic("Failed to remap ebu memory");
-
- /* make sure to unprotect the memory region where flash is located */
- ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
- return 0;
-}
-
-postcore_initcall(lantiq_ebu_init);
diff --git a/arch/mips/lantiq/xway/gpio.c b/arch/mips/lantiq/xway/gpio.c
index c429a5bc080..2ab39e93d9b 100644
--- a/arch/mips/lantiq/xway/gpio.c
+++ b/arch/mips/lantiq/xway/gpio.c
@@ -36,18 +36,6 @@ struct ltq_gpio {
static struct ltq_gpio ltq_gpio_port[MAX_PORTS];
-int gpio_to_irq(unsigned int gpio)
-{
- return -EINVAL;
-}
-EXPORT_SYMBOL(gpio_to_irq);
-
-int irq_to_gpio(unsigned int gpio)
-{
- return -EINVAL;
-}
-EXPORT_SYMBOL(irq_to_gpio);
-
int ltq_gpio_request(unsigned int pin, unsigned int alt0,
unsigned int alt1, unsigned int dir, const char *name)
{
diff --git a/arch/mips/lantiq/xway/gpio_ebu.c b/arch/mips/lantiq/xway/gpio_ebu.c
deleted file mode 100644
index aae17170472..00000000000
--- a/arch/mips/lantiq/xway/gpio_ebu.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-
-#include <lantiq_soc.h>
-
-/*
- * By attaching hardware latches to the EBU it is possible to create output
- * only gpios. This driver configures a special memory address, which when
- * written to outputs 16 bit to the latches.
- */
-
-#define LTQ_EBU_BUSCON 0x1e7ff /* 16 bit access, slowest timing */
-#define LTQ_EBU_WP 0x80000000 /* write protect bit */
-
-/* we keep a shadow value of the last value written to the ebu */
-static int ltq_ebu_gpio_shadow = 0x0;
-static void __iomem *ltq_ebu_gpio_membase;
-
-static void ltq_ebu_apply(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ebu_lock, flags);
- ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
- *((__u16 *)ltq_ebu_gpio_membase) = ltq_ebu_gpio_shadow;
- ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
- spin_unlock_irqrestore(&ebu_lock, flags);
-}
-
-static void ltq_ebu_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- if (value)
- ltq_ebu_gpio_shadow |= (1 << offset);
- else
- ltq_ebu_gpio_shadow &= ~(1 << offset);
- ltq_ebu_apply();
-}
-
-static int ltq_ebu_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- ltq_ebu_set(chip, offset, value);
-
- return 0;
-}
-
-static struct gpio_chip ltq_ebu_chip = {
- .label = "ltq_ebu",
- .direction_output = ltq_ebu_direction_output,
- .set = ltq_ebu_set,
- .base = 72,
- .ngpio = 16,
- .can_sleep = 1,
- .owner = THIS_MODULE,
-};
-
-static int ltq_ebu_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- if (!res) {
- dev_err(&pdev->dev, "failed to get memory resource\n");
- return -ENOENT;
- }
-
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "failed to request memory resource\n");
- return -EBUSY;
- }
-
- ltq_ebu_gpio_membase = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!ltq_ebu_gpio_membase) {
- dev_err(&pdev->dev, "Failed to ioremap mem region\n");
- return -ENOMEM;
- }
-
- /* grab the default shadow value passed form the platform code */
- ltq_ebu_gpio_shadow = (unsigned int) pdev->dev.platform_data;
-
- /* tell the ebu controller which memory address we will be using */
- ltq_ebu_w32(pdev->resource->start | 0x1, LTQ_EBU_ADDRSEL1);
-
- /* write protect the region */
- ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
-
- ret = gpiochip_add(&ltq_ebu_chip);
- if (!ret)
- ltq_ebu_apply();
- return ret;
-}
-
-static struct platform_driver ltq_ebu_driver = {
- .probe = ltq_ebu_probe,
- .driver = {
- .name = "ltq_ebu",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init ltq_ebu_init(void)
-{
- int ret = platform_driver_register(&ltq_ebu_driver);
-
- if (ret)
- pr_info("ltq_ebu : Error registering platform driver!");
- return ret;
-}
-
-postcore_initcall(ltq_ebu_init);
diff --git a/arch/mips/lantiq/xway/gpio_stp.c b/arch/mips/lantiq/xway/gpio_stp.c
deleted file mode 100644
index fd07d87adaa..00000000000
--- a/arch/mips/lantiq/xway/gpio_stp.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
- *
- */
-
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <lantiq_soc.h>
-
-#define LTQ_STP_CON0 0x00
-#define LTQ_STP_CON1 0x04
-#define LTQ_STP_CPU0 0x08
-#define LTQ_STP_CPU1 0x0C
-#define LTQ_STP_AR 0x10
-
-#define LTQ_STP_CON_SWU (1 << 31)
-#define LTQ_STP_2HZ 0
-#define LTQ_STP_4HZ (1 << 23)
-#define LTQ_STP_8HZ (2 << 23)
-#define LTQ_STP_10HZ (3 << 23)
-#define LTQ_STP_SPEED_MASK (0xf << 23)
-#define LTQ_STP_UPD_FPI (1 << 31)
-#define LTQ_STP_UPD_MASK (3 << 30)
-#define LTQ_STP_ADSL_SRC (3 << 24)
-
-#define LTQ_STP_GROUP0 (1 << 0)
-
-#define LTQ_STP_RISING 0
-#define LTQ_STP_FALLING (1 << 26)
-#define LTQ_STP_EDGE_MASK (1 << 26)
-
-#define ltq_stp_r32(reg) __raw_readl(ltq_stp_membase + reg)
-#define ltq_stp_w32(val, reg) __raw_writel(val, ltq_stp_membase + reg)
-#define ltq_stp_w32_mask(clear, set, reg) \
- ltq_w32((ltq_r32(ltq_stp_membase + reg) & ~(clear)) | (set), \
- ltq_stp_membase + (reg))
-
-static int ltq_stp_shadow = 0xffff;
-static void __iomem *ltq_stp_membase;
-
-static void ltq_stp_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- if (value)
- ltq_stp_shadow |= (1 << offset);
- else
- ltq_stp_shadow &= ~(1 << offset);
- ltq_stp_w32(ltq_stp_shadow, LTQ_STP_CPU0);
-}
-
-static int ltq_stp_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- ltq_stp_set(chip, offset, value);
-
- return 0;
-}
-
-static struct gpio_chip ltq_stp_chip = {
- .label = "ltq_stp",
- .direction_output = ltq_stp_direction_output,
- .set = ltq_stp_set,
- .base = 48,
- .ngpio = 24,
- .can_sleep = 1,
- .owner = THIS_MODULE,
-};
-
-static int ltq_stp_hw_init(void)
-{
- /* the 3 pins used to control the external stp */
- ltq_gpio_request(4, 1, 0, 1, "stp-st");
- ltq_gpio_request(5, 1, 0, 1, "stp-d");
- ltq_gpio_request(6, 1, 0, 1, "stp-sh");
-
- /* sane defaults */
- ltq_stp_w32(0, LTQ_STP_AR);
- ltq_stp_w32(0, LTQ_STP_CPU0);
- ltq_stp_w32(0, LTQ_STP_CPU1);
- ltq_stp_w32(LTQ_STP_CON_SWU, LTQ_STP_CON0);
- ltq_stp_w32(0, LTQ_STP_CON1);
-
- /* rising or falling edge */
- ltq_stp_w32_mask(LTQ_STP_EDGE_MASK, LTQ_STP_FALLING, LTQ_STP_CON0);
-
- /* per default stp 15-0 are set */
- ltq_stp_w32_mask(0, LTQ_STP_GROUP0, LTQ_STP_CON1);
-
- /* stp are update periodically by the FPI bus */
- ltq_stp_w32_mask(LTQ_STP_UPD_MASK, LTQ_STP_UPD_FPI, LTQ_STP_CON1);
-
- /* set stp update speed */
- ltq_stp_w32_mask(LTQ_STP_SPEED_MASK, LTQ_STP_8HZ, LTQ_STP_CON1);
-
- /* tell the hardware that pin (led) 0 and 1 are controlled
- * by the dsl arc
- */
- ltq_stp_w32_mask(0, LTQ_STP_ADSL_SRC, LTQ_STP_CON0);
-
- ltq_pmu_enable(PMU_LED);
- return 0;
-}
-
-static int __devinit ltq_stp_probe(struct platform_device *pdev)
-{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- int ret = 0;
-
- if (!res)
- return -ENOENT;
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "failed to request STP memory\n");
- return -EBUSY;
- }
- ltq_stp_membase = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!ltq_stp_membase) {
- dev_err(&pdev->dev, "failed to remap STP memory\n");
- return -ENOMEM;
- }
- ret = gpiochip_add(&ltq_stp_chip);
- if (!ret)
- ret = ltq_stp_hw_init();
-
- return ret;
-}
-
-static struct platform_driver ltq_stp_driver = {
- .probe = ltq_stp_probe,
- .driver = {
- .name = "ltq_stp",
- .owner = THIS_MODULE,
- },
-};
-
-int __init ltq_stp_init(void)
-{
- int ret = platform_driver_register(&ltq_stp_driver);
-
- if (ret)
- pr_info("ltq_stp: error registering platform driver");
- return ret;
-}
-
-postcore_initcall(ltq_stp_init);
diff --git a/arch/mips/lantiq/xway/mach-easy50601.c b/arch/mips/lantiq/xway/mach-easy50601.c
deleted file mode 100644
index d5aaf637ab1..00000000000
--- a/arch/mips/lantiq/xway/mach-easy50601.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
-#include <linux/input.h>
-
-#include <lantiq.h>
-
-#include "../machtypes.h"
-#include "devices.h"
-
-static struct mtd_partition easy50601_partitions[] = {
- {
- .name = "uboot",
- .offset = 0x0,
- .size = 0x10000,
- },
- {
- .name = "uboot_env",
- .offset = 0x10000,
- .size = 0x10000,
- },
- {
- .name = "linux",
- .offset = 0x20000,
- .size = 0xE0000,
- },
- {
- .name = "rootfs",
- .offset = 0x100000,
- .size = 0x300000,
- },
-};
-
-static struct physmap_flash_data easy50601_flash_data = {
- .nr_parts = ARRAY_SIZE(easy50601_partitions),
- .parts = easy50601_partitions,
-};
-
-static void __init easy50601_init(void)
-{
- ltq_register_nor(&easy50601_flash_data);
-}
-
-MIPS_MACHINE(LTQ_MACH_EASY50601,
- "EASY50601",
- "EASY50601 Eval Board",
- easy50601_init);
diff --git a/arch/mips/lantiq/xway/mach-easy50712.c b/arch/mips/lantiq/xway/mach-easy50712.c
deleted file mode 100644
index ea5027b3239..00000000000
--- a/arch/mips/lantiq/xway/mach-easy50712.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
-#include <linux/input.h>
-#include <linux/phy.h>
-
-#include <lantiq_soc.h>
-#include <irq.h>
-
-#include "../machtypes.h"
-#include "devices.h"
-
-static struct mtd_partition easy50712_partitions[] = {
- {
- .name = "uboot",
- .offset = 0x0,
- .size = 0x10000,
- },
- {
- .name = "uboot_env",
- .offset = 0x10000,
- .size = 0x10000,
- },
- {
- .name = "linux",
- .offset = 0x20000,
- .size = 0xe0000,
- },
- {
- .name = "rootfs",
- .offset = 0x100000,
- .size = 0x300000,
- },
-};
-
-static struct physmap_flash_data easy50712_flash_data = {
- .nr_parts = ARRAY_SIZE(easy50712_partitions),
- .parts = easy50712_partitions,
-};
-
-static struct ltq_pci_data ltq_pci_data = {
- .clock = PCI_CLOCK_INT,
- .gpio = PCI_GNT1 | PCI_REQ1,
- .irq = {
- [14] = INT_NUM_IM0_IRL0 + 22,
- },
-};
-
-static struct ltq_eth_data ltq_eth_data = {
- .mii_mode = PHY_INTERFACE_MODE_MII,
-};
-
-static void __init easy50712_init(void)
-{
- ltq_register_gpio_stp();
- ltq_register_nor(&easy50712_flash_data);
- ltq_register_pci(&ltq_pci_data);
- ltq_register_etop(&ltq_eth_data);
-}
-
-MIPS_MACHINE(LTQ_MACH_EASY50712,
- "EASY50712",
- "EASY50712 Eval Board",
- easy50712_init);
diff --git a/arch/mips/lantiq/xway/pmu.c b/arch/mips/lantiq/xway/pmu.c
deleted file mode 100644
index fe85361e032..00000000000
--- a/arch/mips/lantiq/xway/pmu.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-
-#include <lantiq_soc.h>
-
-/* PMU - the power management unit allows us to turn part of the core
- * on and off
- */
-
-/* the enable / disable registers */
-#define LTQ_PMU_PWDCR 0x1C
-#define LTQ_PMU_PWDSR 0x20
-
-#define ltq_pmu_w32(x, y) ltq_w32((x), ltq_pmu_membase + (y))
-#define ltq_pmu_r32(x) ltq_r32(ltq_pmu_membase + (x))
-
-static struct resource ltq_pmu_resource = {
- .name = "pmu",
- .start = LTQ_PMU_BASE_ADDR,
- .end = LTQ_PMU_BASE_ADDR + LTQ_PMU_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static void __iomem *ltq_pmu_membase;
-
-void ltq_pmu_enable(unsigned int module)
-{
- int err = 1000000;
-
- ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) & ~module, LTQ_PMU_PWDCR);
- do {} while (--err && (ltq_pmu_r32(LTQ_PMU_PWDSR) & module));
-
- if (!err)
- panic("activating PMU module failed!");
-}
-EXPORT_SYMBOL(ltq_pmu_enable);
-
-void ltq_pmu_disable(unsigned int module)
-{
- ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) | module, LTQ_PMU_PWDCR);
-}
-EXPORT_SYMBOL(ltq_pmu_disable);
-
-int __init ltq_pmu_init(void)
-{
- if (insert_resource(&iomem_resource, &ltq_pmu_resource) < 0)
- panic("Failed to insert pmu memory");
-
- if (request_mem_region(ltq_pmu_resource.start,
- resource_size(&ltq_pmu_resource), "pmu") < 0)
- panic("Failed to request pmu memory");
-
- ltq_pmu_membase = ioremap_nocache(ltq_pmu_resource.start,
- resource_size(&ltq_pmu_resource));
- if (!ltq_pmu_membase)
- panic("Failed to remap pmu memory");
- return 0;
-}
-
-core_initcall(ltq_pmu_init);
diff --git a/arch/mips/lantiq/xway/prom-ase.c b/arch/mips/lantiq/xway/prom-ase.c
deleted file mode 100644
index ae4959ae865..00000000000
--- a/arch/mips/lantiq/xway/prom-ase.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/export.h>
-#include <linux/clk.h>
-#include <asm/bootinfo.h>
-#include <asm/time.h>
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-
-#define SOC_AMAZON_SE "Amazon_SE"
-
-#define PART_SHIFT 12
-#define PART_MASK 0x0FFFFFFF
-#define REV_SHIFT 28
-#define REV_MASK 0xF0000000
-
-void __init ltq_soc_detect(struct ltq_soc_info *i)
-{
- i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
- i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
- switch (i->partnum) {
- case SOC_ID_AMAZON_SE:
- i->name = SOC_AMAZON_SE;
- i->type = SOC_TYPE_AMAZON_SE;
- break;
-
- default:
- unreachable();
- break;
- }
-}
diff --git a/arch/mips/lantiq/xway/prom-xway.c b/arch/mips/lantiq/xway/prom-xway.c
deleted file mode 100644
index 2228133ca35..00000000000
--- a/arch/mips/lantiq/xway/prom-xway.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/export.h>
-#include <linux/clk.h>
-#include <asm/bootinfo.h>
-#include <asm/time.h>
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-
-#define SOC_DANUBE "Danube"
-#define SOC_TWINPASS "Twinpass"
-#define SOC_AR9 "AR9"
-
-#define PART_SHIFT 12
-#define PART_MASK 0x0FFFFFFF
-#define REV_SHIFT 28
-#define REV_MASK 0xF0000000
-
-void __init ltq_soc_detect(struct ltq_soc_info *i)
-{
- i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
- i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
- switch (i->partnum) {
- case SOC_ID_DANUBE1:
- case SOC_ID_DANUBE2:
- i->name = SOC_DANUBE;
- i->type = SOC_TYPE_DANUBE;
- break;
-
- case SOC_ID_TWINPASS:
- i->name = SOC_TWINPASS;
- i->type = SOC_TYPE_DANUBE;
- break;
-
- case SOC_ID_ARX188:
- case SOC_ID_ARX168:
- case SOC_ID_ARX182:
- i->name = SOC_AR9;
- i->type = SOC_TYPE_AR9;
- break;
-
- default:
- unreachable();
- break;
- }
-}
diff --git a/arch/mips/lantiq/xway/prom.c b/arch/mips/lantiq/xway/prom.c
new file mode 100644
index 00000000000..248429ab262
--- /dev/null
+++ b/arch/mips/lantiq/xway/prom.c
@@ -0,0 +1,115 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/export.h>
+#include <linux/clk.h>
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+
+#define SOC_DANUBE "Danube"
+#define SOC_TWINPASS "Twinpass"
+#define SOC_AMAZON_SE "Amazon_SE"
+#define SOC_AR9 "AR9"
+#define SOC_GR9 "GR9"
+#define SOC_VR9 "VR9"
+
+#define COMP_DANUBE "lantiq,danube"
+#define COMP_TWINPASS "lantiq,twinpass"
+#define COMP_AMAZON_SE "lantiq,ase"
+#define COMP_AR9 "lantiq,ar9"
+#define COMP_GR9 "lantiq,gr9"
+#define COMP_VR9 "lantiq,vr9"
+
+#define PART_SHIFT 12
+#define PART_MASK 0x0FFFFFFF
+#define REV_SHIFT 28
+#define REV_MASK 0xF0000000
+
+void __init ltq_soc_detect(struct ltq_soc_info *i)
+{
+ i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
+ i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
+ sprintf(i->rev_type, "1.%d", i->rev);
+ switch (i->partnum) {
+ case SOC_ID_DANUBE1:
+ case SOC_ID_DANUBE2:
+ i->name = SOC_DANUBE;
+ i->type = SOC_TYPE_DANUBE;
+ i->compatible = COMP_DANUBE;
+ break;
+
+ case SOC_ID_TWINPASS:
+ i->name = SOC_TWINPASS;
+ i->type = SOC_TYPE_DANUBE;
+ i->compatible = COMP_TWINPASS;
+ break;
+
+ case SOC_ID_ARX188:
+ case SOC_ID_ARX168_1:
+ case SOC_ID_ARX168_2:
+ case SOC_ID_ARX182:
+ i->name = SOC_AR9;
+ i->type = SOC_TYPE_AR9;
+ i->compatible = COMP_AR9;
+ break;
+
+ case SOC_ID_GRX188:
+ case SOC_ID_GRX168:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_AR9;
+ i->compatible = COMP_GR9;
+ break;
+
+ case SOC_ID_AMAZON_SE_1:
+ case SOC_ID_AMAZON_SE_2:
+#ifdef CONFIG_PCI
+ panic("ase is only supported for non pci kernels");
+#endif
+ i->name = SOC_AMAZON_SE;
+ i->type = SOC_TYPE_AMAZON_SE;
+ i->compatible = COMP_AMAZON_SE;
+ break;
+
+ case SOC_ID_VRX282:
+ case SOC_ID_VRX268:
+ case SOC_ID_VRX288:
+ i->name = SOC_VR9;
+ i->type = SOC_TYPE_VR9;
+ i->compatible = COMP_VR9;
+ break;
+
+ case SOC_ID_GRX268:
+ case SOC_ID_GRX288:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_VR9;
+ i->compatible = COMP_GR9;
+ break;
+
+ case SOC_ID_VRX268_2:
+ case SOC_ID_VRX288_2:
+ i->name = SOC_VR9;
+ i->type = SOC_TYPE_VR9_2;
+ i->compatible = COMP_VR9;
+ break;
+
+ case SOC_ID_GRX282_2:
+ case SOC_ID_GRX288_2:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_VR9_2;
+ i->compatible = COMP_GR9;
+ break;
+
+ default:
+ unreachable();
+ break;
+ }
+}
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c
index 8b66bd87f0c..22c55f73aa9 100644
--- a/arch/mips/lantiq/xway/reset.c
+++ b/arch/mips/lantiq/xway/reset.c
@@ -11,26 +11,31 @@
#include <linux/ioport.h>
#include <linux/pm.h>
#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
#include <asm/reboot.h>
#include <lantiq_soc.h>
+#include "../prom.h"
+
#define ltq_rcu_w32(x, y) ltq_w32((x), ltq_rcu_membase + (y))
#define ltq_rcu_r32(x) ltq_r32(ltq_rcu_membase + (x))
-/* register definitions */
-#define LTQ_RCU_RST 0x0010
-#define LTQ_RCU_RST_ALL 0x40000000
-
-#define LTQ_RCU_RST_STAT 0x0014
-#define LTQ_RCU_STAT_SHIFT 26
+/* reset request register */
+#define RCU_RST_REQ 0x0010
+/* reset status register */
+#define RCU_RST_STAT 0x0014
-static struct resource ltq_rcu_resource = {
- .name = "rcu",
- .start = LTQ_RCU_BASE_ADDR,
- .end = LTQ_RCU_BASE_ADDR + LTQ_RCU_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
+/* reboot bit */
+#define RCU_RD_SRST BIT(30)
+/* reset cause */
+#define RCU_STAT_SHIFT 26
+/* boot selection */
+#define RCU_BOOT_SEL_SHIFT 26
+#define RCU_BOOT_SEL_MASK 0x7
/* remapped base addr of the reset control unit */
static void __iomem *ltq_rcu_membase;
@@ -38,48 +43,64 @@ static void __iomem *ltq_rcu_membase;
/* This function is used by the watchdog driver */
int ltq_reset_cause(void)
{
- u32 val = ltq_rcu_r32(LTQ_RCU_RST_STAT);
- return val >> LTQ_RCU_STAT_SHIFT;
+ u32 val = ltq_rcu_r32(RCU_RST_STAT);
+ return val >> RCU_STAT_SHIFT;
}
EXPORT_SYMBOL_GPL(ltq_reset_cause);
+/* allow platform code to find out what source we booted from */
+unsigned char ltq_boot_select(void)
+{
+ u32 val = ltq_rcu_r32(RCU_RST_STAT);
+ return (val >> RCU_BOOT_SEL_SHIFT) & RCU_BOOT_SEL_MASK;
+}
+
+/* reset a io domain for u micro seconds */
+void ltq_reset_once(unsigned int module, ulong u)
+{
+ ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | module, RCU_RST_REQ);
+ udelay(u);
+ ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) & ~module, RCU_RST_REQ);
+}
+
static void ltq_machine_restart(char *command)
{
- pr_notice("System restart\n");
local_irq_disable();
- ltq_rcu_w32(ltq_rcu_r32(LTQ_RCU_RST) | LTQ_RCU_RST_ALL, LTQ_RCU_RST);
+ ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | RCU_RD_SRST, RCU_RST_REQ);
unreachable();
}
static void ltq_machine_halt(void)
{
- pr_notice("System halted.\n");
local_irq_disable();
unreachable();
}
static void ltq_machine_power_off(void)
{
- pr_notice("Please turn off the power now.\n");
local_irq_disable();
unreachable();
}
static int __init mips_reboot_setup(void)
{
- /* insert and request the memory region */
- if (insert_resource(&iomem_resource, &ltq_rcu_resource) < 0)
- panic("Failed to insert rcu memory");
+ struct resource res;
+ struct device_node *np =
+ of_find_compatible_node(NULL, NULL, "lantiq,rcu-xway");
+
+ /* check if all the reset register range is available */
+ if (!np)
+ panic("Failed to load reset resources from devicetree");
+
+ if (of_address_to_resource(np, 0, &res))
+ panic("Failed to get rcu memory range");
- if (request_mem_region(ltq_rcu_resource.start,
- resource_size(&ltq_rcu_resource), "rcu") < 0)
- panic("Failed to request rcu memory");
+ if (request_mem_region(res.start, resource_size(&res), res.name) < 0)
+ pr_err("Failed to request rcu memory");
- /* remap rcu register range */
- ltq_rcu_membase = ioremap_nocache(ltq_rcu_resource.start,
- resource_size(&ltq_rcu_resource));
+ ltq_rcu_membase = ioremap_nocache(res.start, resource_size(&res));
if (!ltq_rcu_membase)
- panic("Failed to remap rcu memory");
+ panic("Failed to remap core memory");
_machine_restart = ltq_machine_restart;
_machine_halt = ltq_machine_halt;
diff --git a/arch/mips/lantiq/xway/setup-ase.c b/arch/mips/lantiq/xway/setup-ase.c
deleted file mode 100644
index f6f326798a3..00000000000
--- a/arch/mips/lantiq/xway/setup-ase.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
- */
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-#include "devices.h"
-
-void __init ltq_soc_setup(void)
-{
- ltq_register_ase_asc();
- ltq_register_gpio();
- ltq_register_wdt();
-}
diff --git a/arch/mips/lantiq/xway/setup-xway.c b/arch/mips/lantiq/xway/setup-xway.c
deleted file mode 100644
index c292f643a85..00000000000
--- a/arch/mips/lantiq/xway/setup-xway.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
- */
-
-#include <lantiq_soc.h>
-
-#include "../prom.h"
-#include "devices.h"
-
-void __init ltq_soc_setup(void)
-{
- ltq_register_asc(0);
- ltq_register_asc(1);
- ltq_register_gpio();
- ltq_register_wdt();
-}
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
new file mode 100644
index 00000000000..83780f7c842
--- /dev/null
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -0,0 +1,371 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011-2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+#include "../prom.h"
+
+/* clock control register */
+#define CGU_IFCCR 0x0018
+/* system clock register */
+#define CGU_SYS 0x0010
+/* pci control register */
+#define CGU_PCICR 0x0034
+/* ephy configuration register */
+#define CGU_EPHY 0x10
+/* power control register */
+#define PMU_PWDCR 0x1C
+/* power status register */
+#define PMU_PWDSR 0x20
+/* power control register */
+#define PMU_PWDCR1 0x24
+/* power status register */
+#define PMU_PWDSR1 0x28
+/* power control register */
+#define PWDCR(x) ((x) ? (PMU_PWDCR1) : (PMU_PWDCR))
+/* power status register */
+#define PWDSR(x) ((x) ? (PMU_PWDSR1) : (PMU_PWDSR))
+
+/* clock gates that we can en/disable */
+#define PMU_USB0_P BIT(0)
+#define PMU_PCI BIT(4)
+#define PMU_DMA BIT(5)
+#define PMU_USB0 BIT(6)
+#define PMU_ASC0 BIT(7)
+#define PMU_EPHY BIT(7) /* ase */
+#define PMU_SPI BIT(8)
+#define PMU_DFE BIT(9)
+#define PMU_EBU BIT(10)
+#define PMU_STP BIT(11)
+#define PMU_GPT BIT(12)
+#define PMU_AHBS BIT(13) /* vr9 */
+#define PMU_FPI BIT(14)
+#define PMU_AHBM BIT(15)
+#define PMU_ASC1 BIT(17)
+#define PMU_PPE_QSB BIT(18)
+#define PMU_PPE_SLL01 BIT(19)
+#define PMU_PPE_TC BIT(21)
+#define PMU_PPE_EMA BIT(22)
+#define PMU_PPE_DPLUM BIT(23)
+#define PMU_PPE_DPLUS BIT(24)
+#define PMU_USB1_P BIT(26)
+#define PMU_USB1 BIT(27)
+#define PMU_SWITCH BIT(28)
+#define PMU_PPE_TOP BIT(29)
+#define PMU_GPHY BIT(30)
+#define PMU_PCIE_CLK BIT(31)
+
+#define PMU1_PCIE_PHY BIT(0)
+#define PMU1_PCIE_CTL BIT(1)
+#define PMU1_PCIE_PDI BIT(4)
+#define PMU1_PCIE_MSI BIT(5)
+
+#define pmu_w32(x, y) ltq_w32((x), pmu_membase + (y))
+#define pmu_r32(x) ltq_r32(pmu_membase + (x))
+
+static void __iomem *pmu_membase;
+void __iomem *ltq_cgu_membase;
+void __iomem *ltq_ebu_membase;
+
+/* legacy function kept alive to ease clkdev transition */
+void ltq_pmu_enable(unsigned int module)
+{
+ int err = 1000000;
+
+ pmu_w32(pmu_r32(PMU_PWDCR) & ~module, PMU_PWDCR);
+ do {} while (--err && (pmu_r32(PMU_PWDSR) & module));
+
+ if (!err)
+ panic("activating PMU module failed!");
+}
+EXPORT_SYMBOL(ltq_pmu_enable);
+
+/* legacy function kept alive to ease clkdev transition */
+void ltq_pmu_disable(unsigned int module)
+{
+ pmu_w32(pmu_r32(PMU_PWDCR) | module, PMU_PWDCR);
+}
+EXPORT_SYMBOL(ltq_pmu_disable);
+
+/* enable a hw clock */
+static int cgu_enable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | clk->bits, CGU_IFCCR);
+ return 0;
+}
+
+/* disable a hw clock */
+static void cgu_disable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~clk->bits, CGU_IFCCR);
+}
+
+/* enable a clock gate */
+static int pmu_enable(struct clk *clk)
+{
+ int retry = 1000000;
+
+ pmu_w32(pmu_r32(PWDCR(clk->module)) & ~clk->bits,
+ PWDCR(clk->module));
+ do {} while (--retry && (pmu_r32(PWDSR(clk->module)) & clk->bits));
+
+ if (!retry)
+ panic("activating PMU module failed!\n");
+
+ return 0;
+}
+
+/* disable a clock gate */
+static void pmu_disable(struct clk *clk)
+{
+ pmu_w32(pmu_r32(PWDCR(clk->module)) | clk->bits,
+ PWDCR(clk->module));
+}
+
+/* the pci enable helper */
+static int pci_enable(struct clk *clk)
+{
+ unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR);
+ /* set bus clock speed */
+ if (of_machine_is_compatible("lantiq,ar9")) {
+ ifccr &= ~0x1f00000;
+ if (clk->rate == CLOCK_33M)
+ ifccr |= 0xe00000;
+ else
+ ifccr |= 0x700000; /* 62.5M */
+ } else {
+ ifccr &= ~0xf00000;
+ if (clk->rate == CLOCK_33M)
+ ifccr |= 0x800000;
+ else
+ ifccr |= 0x400000; /* 62.5M */
+ }
+ ltq_cgu_w32(ifccr, CGU_IFCCR);
+ pmu_enable(clk);
+ return 0;
+}
+
+/* enable the external clock as a source */
+static int pci_ext_enable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~(1 << 16),
+ CGU_IFCCR);
+ ltq_cgu_w32((1 << 30), CGU_PCICR);
+ return 0;
+}
+
+/* disable the external clock as a source */
+static void pci_ext_disable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | (1 << 16),
+ CGU_IFCCR);
+ ltq_cgu_w32((1 << 31) | (1 << 30), CGU_PCICR);
+}
+
+/* enable a clockout source */
+static int clkout_enable(struct clk *clk)
+{
+ int i;
+
+ /* get the correct rate */
+ for (i = 0; i < 4; i++) {
+ if (clk->rates[i] == clk->rate) {
+ int shift = 14 - (2 * clk->module);
+ unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR);
+
+ ifccr &= ~(3 << shift);
+ ifccr |= i << shift;
+ ltq_cgu_w32(ifccr, CGU_IFCCR);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/* manage the clock gates via PMU */
+static void clkdev_add_pmu(const char *dev, const char *con,
+ unsigned int module, unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = pmu_enable;
+ clk->disable = pmu_disable;
+ clk->module = module;
+ clk->bits = bits;
+ clkdev_add(&clk->cl);
+}
+
+/* manage the clock generator */
+static void clkdev_add_cgu(const char *dev, const char *con,
+ unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = cgu_enable;
+ clk->disable = cgu_disable;
+ clk->bits = bits;
+ clkdev_add(&clk->cl);
+}
+
+/* pci needs its own enable function as the setup is a bit more complex */
+static unsigned long valid_pci_rates[] = {CLOCK_33M, CLOCK_62_5M, 0};
+
+static void clkdev_add_pci(void)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ /* main pci clock */
+ clk->cl.dev_id = "17000000.pci";
+ clk->cl.con_id = NULL;
+ clk->cl.clk = clk;
+ clk->rate = CLOCK_33M;
+ clk->rates = valid_pci_rates;
+ clk->enable = pci_enable;
+ clk->disable = pmu_disable;
+ clk->module = 0;
+ clk->bits = PMU_PCI;
+ clkdev_add(&clk->cl);
+
+ /* use internal/external bus clock */
+ clk_ext->cl.dev_id = "17000000.pci";
+ clk_ext->cl.con_id = "external";
+ clk_ext->cl.clk = clk_ext;
+ clk_ext->enable = pci_ext_enable;
+ clk_ext->disable = pci_ext_disable;
+ clkdev_add(&clk_ext->cl);
+}
+
+/* xway socs can generate clocks on gpio pins */
+static unsigned long valid_clkout_rates[4][5] = {
+ {CLOCK_32_768K, CLOCK_1_536M, CLOCK_2_5M, CLOCK_12M, 0},
+ {CLOCK_40M, CLOCK_12M, CLOCK_24M, CLOCK_48M, 0},
+ {CLOCK_25M, CLOCK_40M, CLOCK_30M, CLOCK_60M, 0},
+ {CLOCK_12M, CLOCK_50M, CLOCK_32_768K, CLOCK_25M, 0},
+};
+
+static void clkdev_add_clkout(void)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ struct clk *clk;
+ char *name;
+
+ name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
+ sprintf(name, "clkout%d", i);
+
+ clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ clk->cl.dev_id = "1f103000.cgu";
+ clk->cl.con_id = name;
+ clk->cl.clk = clk;
+ clk->rate = 0;
+ clk->rates = valid_clkout_rates[i];
+ clk->enable = clkout_enable;
+ clk->module = i;
+ clkdev_add(&clk->cl);
+ }
+}
+
+/* bring up all register ranges that we need for basic system control */
+void __init ltq_soc_init(void)
+{
+ struct resource res_pmu, res_cgu, res_ebu;
+ struct device_node *np_pmu =
+ of_find_compatible_node(NULL, NULL, "lantiq,pmu-xway");
+ struct device_node *np_cgu =
+ of_find_compatible_node(NULL, NULL, "lantiq,cgu-xway");
+ struct device_node *np_ebu =
+ of_find_compatible_node(NULL, NULL, "lantiq,ebu-xway");
+
+ /* check if all the core register ranges are available */
+ if (!np_pmu || !np_cgu || !np_ebu)
+ panic("Failed to load core nodess from devicetree");
+
+ if (of_address_to_resource(np_pmu, 0, &res_pmu) ||
+ of_address_to_resource(np_cgu, 0, &res_cgu) ||
+ of_address_to_resource(np_ebu, 0, &res_ebu))
+ panic("Failed to get core resources");
+
+ if ((request_mem_region(res_pmu.start, resource_size(&res_pmu),
+ res_pmu.name) < 0) ||
+ (request_mem_region(res_cgu.start, resource_size(&res_cgu),
+ res_cgu.name) < 0) ||
+ (request_mem_region(res_ebu.start, resource_size(&res_ebu),
+ res_ebu.name) < 0))
+ pr_err("Failed to request core reources");
+
+ pmu_membase = ioremap_nocache(res_pmu.start, resource_size(&res_pmu));
+ ltq_cgu_membase = ioremap_nocache(res_cgu.start,
+ resource_size(&res_cgu));
+ ltq_ebu_membase = ioremap_nocache(res_ebu.start,
+ resource_size(&res_ebu));
+ if (!pmu_membase || !ltq_cgu_membase || !ltq_ebu_membase)
+ panic("Failed to remap core resources");
+
+ /* make sure to unprotect the memory region where flash is located */
+ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
+
+ /* add our generic xway clocks */
+ clkdev_add_pmu("10000000.fpi", NULL, 0, PMU_FPI);
+ clkdev_add_pmu("1e100400.serial", NULL, 0, PMU_ASC0);
+ clkdev_add_pmu("1e100a00.gptu", NULL, 0, PMU_GPT);
+ clkdev_add_pmu("1e100bb0.stp", NULL, 0, PMU_STP);
+ clkdev_add_pmu("1e104100.dma", NULL, 0, PMU_DMA);
+ clkdev_add_pmu("1e100800.spi", NULL, 0, PMU_SPI);
+ clkdev_add_pmu("1e105300.ebu", NULL, 0, PMU_EBU);
+ clkdev_add_clkout();
+
+ /* add the soc dependent clocks */
+ if (!of_machine_is_compatible("lantiq,vr9"))
+ clkdev_add_pmu("1e180000.etop", NULL, 0, PMU_PPE);
+
+ if (!of_machine_is_compatible("lantiq,ase")) {
+ clkdev_add_pmu("1e100c00.serial", NULL, 0, PMU_ASC1);
+ clkdev_add_pci();
+ }
+
+ if (of_machine_is_compatible("lantiq,ase")) {
+ if (ltq_cgu_r32(CGU_SYS) & (1 << 5))
+ clkdev_add_static(CLOCK_266M, CLOCK_133M, CLOCK_133M);
+ else
+ clkdev_add_static(CLOCK_133M, CLOCK_133M, CLOCK_133M);
+ clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY),
+ clkdev_add_pmu("1e180000.etop", "ephy", 0, PMU_EPHY);
+ } else if (of_machine_is_compatible("lantiq,vr9")) {
+ clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(),
+ ltq_vr9_fpi_hz());
+ clkdev_add_pmu("1d900000.pcie", "phy", 1, PMU1_PCIE_PHY);
+ clkdev_add_pmu("1d900000.pcie", "bus", 0, PMU_PCIE_CLK);
+ clkdev_add_pmu("1d900000.pcie", "msi", 1, PMU1_PCIE_MSI);
+ clkdev_add_pmu("1d900000.pcie", "pdi", 1, PMU1_PCIE_PDI);
+ clkdev_add_pmu("1d900000.pcie", "ctl", 1, PMU1_PCIE_CTL);
+ clkdev_add_pmu("1d900000.pcie", "ahb", 0, PMU_AHBM | PMU_AHBS);
+ } else if (of_machine_is_compatible("lantiq,ar9")) {
+ clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
+ ltq_ar9_fpi_hz());
+ clkdev_add_pmu("1e180000.etop", "switch", 0, PMU_SWITCH);
+ } else {
+ clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
+ ltq_danube_fpi_hz());
+ }
+}
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 47037ec5589..44e69e7a451 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -21,6 +21,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/r4kcache.h>
+#include <asm/traps.h>
#include <asm/mmu_context.h>
#include <asm/war.h>
@@ -248,6 +249,11 @@ static void __cpuinit probe_octeon(void)
}
}
+static void __cpuinit octeon_cache_error_setup(void)
+{
+ extern char except_vec2_octeon;
+ set_handler(0x100, &except_vec2_octeon, 0x80);
+}
/**
* Setup the Octeon cache flush routines
@@ -255,12 +261,6 @@ static void __cpuinit probe_octeon(void)
*/
void __cpuinit octeon_cache_init(void)
{
- extern unsigned long ebase;
- extern char except_vec2_octeon;
-
- memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80);
- octeon_flush_cache_sigtramp(ebase + 0x100);
-
probe_octeon();
shm_align_mask = PAGE_SIZE - 1;
@@ -280,6 +280,8 @@ void __cpuinit octeon_cache_init(void)
build_clear_page();
build_copy_page();
+
+ board_cache_error_setup = octeon_cache_error_setup;
}
/**
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index bda8eb26ece..5109be96d98 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -32,7 +32,7 @@
#include <asm/mmu_context.h>
#include <asm/war.h>
#include <asm/cacheflush.h> /* for run_uncached() */
-
+#include <asm/traps.h>
/*
* Special Variant of smp_call_function for use by cache functions:
@@ -1385,10 +1385,8 @@ static int __init setcoherentio(char *str)
__setup("coherentio", setcoherentio);
#endif
-void __cpuinit r4k_cache_init(void)
+static void __cpuinit r4k_cache_error_setup(void)
{
- extern void build_clear_page(void);
- extern void build_copy_page(void);
extern char __weak except_vec2_generic;
extern char __weak except_vec2_sb1;
struct cpuinfo_mips *c = &current_cpu_data;
@@ -1403,6 +1401,13 @@ void __cpuinit r4k_cache_init(void)
set_uncached_handler(0x100, &except_vec2_generic, 0x80);
break;
}
+}
+
+void __cpuinit r4k_cache_init(void)
+{
+ extern void build_clear_page(void);
+ extern void build_copy_page(void);
+ struct cpuinfo_mips *c = &current_cpu_data;
probe_pcache();
setup_scache();
@@ -1465,4 +1470,5 @@ void __cpuinit r4k_cache_init(void)
local_r4k___flush_cache_all(NULL);
#endif
coherency_setup();
+ board_cache_error_setup = r4k_cache_error_setup;
}
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
index 29f2f13eb31..1208c280f77 100644
--- a/arch/mips/oprofile/Makefile
+++ b/arch/mips/oprofile/Makefile
@@ -1,5 +1,3 @@
-ccflags-y := -Werror
-
obj-$(CONFIG_OPROFILE) += oprofile.o
DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 54759f1669d..baba3bcaa3c 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -298,6 +298,11 @@ static void reset_counters(void *arg)
}
}
+static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
+{
+ return mipsxx_perfcount_handler();
+}
+
static int __init mipsxx_init(void)
{
int counters;
@@ -374,6 +379,10 @@ static int __init mipsxx_init(void)
save_perf_irq = perf_irq;
perf_irq = mipsxx_perfcount_handler;
+ if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
+ return request_irq(cp0_perfcount_irq, mipsxx_perfcount_int,
+ 0, "Perfcounter", save_perf_irq);
+
return 0;
}
@@ -381,6 +390,9 @@ static void mipsxx_exit(void)
{
int counters = op_model_mipsxx_ops.num_counters;
+ if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
+ free_irq(cp0_perfcount_irq, save_perf_irq);
+
counters = counters_per_cpu_to_total(counters);
on_each_cpu(reset_counters, (void *)(long)counters, 1);
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index c3ac4b086eb..c703f43a991 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -19,7 +19,8 @@ obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o
obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \
ops-bcm63xx.o
obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o
-obj-$(CONFIG_SOC_AR724X) += pci-ath724x.o
+obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o
+obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o
#
# These are still pretty much in the old state, watch, go blind.
@@ -41,7 +42,8 @@ obj-$(CONFIG_SIBYTE_SB1250) += fixup-sb1250.o pci-sb1250.o
obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o
obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o
obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o
-obj-$(CONFIG_SOC_XWAY) += pci-lantiq.o ops-lantiq.o
+obj-$(CONFIG_LANTIQ) += fixup-lantiq.o
+obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o
obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o
obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o
obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o
diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c
new file mode 100644
index 00000000000..6c829df28dc
--- /dev/null
+++ b/arch/mips/pci/fixup-lantiq.c
@@ -0,0 +1,40 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+int (*ltq_pci_plat_arch_init)(struct pci_dev *dev) = NULL;
+int (*ltq_pci_plat_dev_init)(struct pci_dev *dev) = NULL;
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ if (ltq_pci_plat_arch_init)
+ return ltq_pci_plat_arch_init(dev);
+
+ if (ltq_pci_plat_dev_init)
+ return ltq_pci_plat_dev_init(dev);
+
+ return 0;
+}
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct of_irq dev_irq;
+ int irq;
+
+ if (of_irq_map_pci(dev, &dev_irq)) {
+ dev_err(&dev->dev, "trying to map irq for unknown slot:%d pin:%d\n",
+ slot, pin);
+ return 0;
+ }
+ irq = irq_create_of_mapping(dev_irq.controller, dev_irq.specifier,
+ dev_irq.size);
+ dev_info(&dev->dev, "SLOT:%d PIN:%d IRQ:%d\n", slot, pin, irq);
+ return irq;
+}
diff --git a/arch/mips/pci/ops-loongson2.c b/arch/mips/pci/ops-loongson2.c
index d657ee0bc13..afd221122d2 100644
--- a/arch/mips/pci/ops-loongson2.c
+++ b/arch/mips/pci/ops-loongson2.c
@@ -15,6 +15,7 @@
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/export.h>
#include <loongson.h>
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
new file mode 100644
index 00000000000..1552522b871
--- /dev/null
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -0,0 +1,375 @@
+/*
+ * Atheros AR71xx PCI host controller driver
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/resource.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/pci.h>
+
+#define AR71XX_PCI_MEM_BASE 0x10000000
+#define AR71XX_PCI_MEM_SIZE 0x08000000
+
+#define AR71XX_PCI_WIN0_OFFS 0x10000000
+#define AR71XX_PCI_WIN1_OFFS 0x11000000
+#define AR71XX_PCI_WIN2_OFFS 0x12000000
+#define AR71XX_PCI_WIN3_OFFS 0x13000000
+#define AR71XX_PCI_WIN4_OFFS 0x14000000
+#define AR71XX_PCI_WIN5_OFFS 0x15000000
+#define AR71XX_PCI_WIN6_OFFS 0x16000000
+#define AR71XX_PCI_WIN7_OFFS 0x07000000
+
+#define AR71XX_PCI_CFG_BASE \
+ (AR71XX_PCI_MEM_BASE + AR71XX_PCI_WIN7_OFFS + 0x10000)
+#define AR71XX_PCI_CFG_SIZE 0x100
+
+#define AR71XX_PCI_REG_CRP_AD_CBE 0x00
+#define AR71XX_PCI_REG_CRP_WRDATA 0x04
+#define AR71XX_PCI_REG_CRP_RDDATA 0x08
+#define AR71XX_PCI_REG_CFG_AD 0x0c
+#define AR71XX_PCI_REG_CFG_CBE 0x10
+#define AR71XX_PCI_REG_CFG_WRDATA 0x14
+#define AR71XX_PCI_REG_CFG_RDDATA 0x18
+#define AR71XX_PCI_REG_PCI_ERR 0x1c
+#define AR71XX_PCI_REG_PCI_ERR_ADDR 0x20
+#define AR71XX_PCI_REG_AHB_ERR 0x24
+#define AR71XX_PCI_REG_AHB_ERR_ADDR 0x28
+
+#define AR71XX_PCI_CRP_CMD_WRITE 0x00010000
+#define AR71XX_PCI_CRP_CMD_READ 0x00000000
+#define AR71XX_PCI_CFG_CMD_READ 0x0000000a
+#define AR71XX_PCI_CFG_CMD_WRITE 0x0000000b
+
+#define AR71XX_PCI_INT_CORE BIT(4)
+#define AR71XX_PCI_INT_DEV2 BIT(2)
+#define AR71XX_PCI_INT_DEV1 BIT(1)
+#define AR71XX_PCI_INT_DEV0 BIT(0)
+
+#define AR71XX_PCI_IRQ_COUNT 5
+
+static DEFINE_SPINLOCK(ar71xx_pci_lock);
+static void __iomem *ar71xx_pcicfg_base;
+
+/* Byte lane enable bits */
+static const u8 ar71xx_pci_ble_table[4][4] = {
+ {0x0, 0xf, 0xf, 0xf},
+ {0xe, 0xd, 0xb, 0x7},
+ {0xc, 0xf, 0x3, 0xf},
+ {0xf, 0xf, 0xf, 0xf},
+};
+
+static const u32 ar71xx_pci_read_mask[8] = {
+ 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0
+};
+
+static inline u32 ar71xx_pci_get_ble(int where, int size, int local)
+{
+ u32 t;
+
+ t = ar71xx_pci_ble_table[size & 3][where & 3];
+ BUG_ON(t == 0xf);
+ t <<= (local) ? 20 : 4;
+
+ return t;
+}
+
+static inline u32 ar71xx_pci_bus_addr(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ u32 ret;
+
+ if (!bus->number) {
+ /* type 0 */
+ ret = (1 << PCI_SLOT(devfn)) | (PCI_FUNC(devfn) << 8) |
+ (where & ~3);
+ } else {
+ /* type 1 */
+ ret = (bus->number << 16) | (PCI_SLOT(devfn) << 11) |
+ (PCI_FUNC(devfn) << 8) | (where & ~3) | 1;
+ }
+
+ return ret;
+}
+
+static int ar71xx_pci_check_error(int quiet)
+{
+ void __iomem *base = ar71xx_pcicfg_base;
+ u32 pci_err;
+ u32 ahb_err;
+
+ pci_err = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR) & 3;
+ if (pci_err) {
+ if (!quiet) {
+ u32 addr;
+
+ addr = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR_ADDR);
+ pr_crit("ar71xx: %s bus error %d at addr 0x%x\n",
+ "PCI", pci_err, addr);
+ }
+
+ /* clear PCI error status */
+ __raw_writel(pci_err, base + AR71XX_PCI_REG_PCI_ERR);
+ }
+
+ ahb_err = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR) & 1;
+ if (ahb_err) {
+ if (!quiet) {
+ u32 addr;
+
+ addr = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR_ADDR);
+ pr_crit("ar71xx: %s bus error %d at addr 0x%x\n",
+ "AHB", ahb_err, addr);
+ }
+
+ /* clear AHB error status */
+ __raw_writel(ahb_err, base + AR71XX_PCI_REG_AHB_ERR);
+ }
+
+ return !!(ahb_err | pci_err);
+}
+
+static inline void ar71xx_pci_local_write(int where, int size, u32 value)
+{
+ void __iomem *base = ar71xx_pcicfg_base;
+ u32 ad_cbe;
+
+ value = value << (8 * (where & 3));
+
+ ad_cbe = AR71XX_PCI_CRP_CMD_WRITE | (where & ~3);
+ ad_cbe |= ar71xx_pci_get_ble(where, size, 1);
+
+ __raw_writel(ad_cbe, base + AR71XX_PCI_REG_CRP_AD_CBE);
+ __raw_writel(value, base + AR71XX_PCI_REG_CRP_WRDATA);
+}
+
+static inline int ar71xx_pci_set_cfgaddr(struct pci_bus *bus,
+ unsigned int devfn,
+ int where, int size, u32 cmd)
+{
+ void __iomem *base = ar71xx_pcicfg_base;
+ u32 addr;
+
+ addr = ar71xx_pci_bus_addr(bus, devfn, where);
+
+ __raw_writel(addr, base + AR71XX_PCI_REG_CFG_AD);
+ __raw_writel(cmd | ar71xx_pci_get_ble(where, size, 0),
+ base + AR71XX_PCI_REG_CFG_CBE);
+
+ return ar71xx_pci_check_error(1);
+}
+
+static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ void __iomem *base = ar71xx_pcicfg_base;
+ unsigned long flags;
+ u32 data;
+ int err;
+ int ret;
+
+ ret = PCIBIOS_SUCCESSFUL;
+ data = ~0;
+
+ spin_lock_irqsave(&ar71xx_pci_lock, flags);
+
+ err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
+ AR71XX_PCI_CFG_CMD_READ);
+ if (err)
+ ret = PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA);
+
+ spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
+
+ *value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7];
+
+ return ret;
+}
+
+static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ void __iomem *base = ar71xx_pcicfg_base;
+ unsigned long flags;
+ int err;
+ int ret;
+
+ value = value << (8 * (where & 3));
+ ret = PCIBIOS_SUCCESSFUL;
+
+ spin_lock_irqsave(&ar71xx_pci_lock, flags);
+
+ err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
+ AR71XX_PCI_CFG_CMD_WRITE);
+ if (err)
+ ret = PCIBIOS_DEVICE_NOT_FOUND;
+ else
+ __raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA);
+
+ spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
+
+ return ret;
+}
+
+static struct pci_ops ar71xx_pci_ops = {
+ .read = ar71xx_pci_read_config,
+ .write = ar71xx_pci_write_config,
+};
+
+static struct resource ar71xx_pci_io_resource = {
+ .name = "PCI IO space",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_IO,
+};
+
+static struct resource ar71xx_pci_mem_resource = {
+ .name = "PCI memory space",
+ .start = AR71XX_PCI_MEM_BASE,
+ .end = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1,
+ .flags = IORESOURCE_MEM
+};
+
+static struct pci_controller ar71xx_pci_controller = {
+ .pci_ops = &ar71xx_pci_ops,
+ .mem_resource = &ar71xx_pci_mem_resource,
+ .io_resource = &ar71xx_pci_io_resource,
+};
+
+static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ void __iomem *base = ath79_reset_base;
+ u32 pending;
+
+ pending = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_STATUS) &
+ __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+ if (pending & AR71XX_PCI_INT_DEV0)
+ generic_handle_irq(ATH79_PCI_IRQ(0));
+
+ else if (pending & AR71XX_PCI_INT_DEV1)
+ generic_handle_irq(ATH79_PCI_IRQ(1));
+
+ else if (pending & AR71XX_PCI_INT_DEV2)
+ generic_handle_irq(ATH79_PCI_IRQ(2));
+
+ else if (pending & AR71XX_PCI_INT_CORE)
+ generic_handle_irq(ATH79_PCI_IRQ(4));
+
+ else
+ spurious_interrupt();
+}
+
+static void ar71xx_pci_irq_unmask(struct irq_data *d)
+{
+ unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
+ void __iomem *base = ath79_reset_base;
+ u32 t;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+ __raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+}
+
+static void ar71xx_pci_irq_mask(struct irq_data *d)
+{
+ unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
+ void __iomem *base = ath79_reset_base;
+ u32 t;
+
+ t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+ __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+
+ /* flush write */
+ __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+}
+
+static struct irq_chip ar71xx_pci_irq_chip = {
+ .name = "AR71XX PCI",
+ .irq_mask = ar71xx_pci_irq_mask,
+ .irq_unmask = ar71xx_pci_irq_unmask,
+ .irq_mask_ack = ar71xx_pci_irq_mask,
+};
+
+static __init void ar71xx_pci_irq_init(void)
+{
+ void __iomem *base = ath79_reset_base;
+ int i;
+
+ __raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_ENABLE);
+ __raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_STATUS);
+
+ BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR71XX_PCI_IRQ_COUNT);
+
+ for (i = ATH79_PCI_IRQ_BASE;
+ i < ATH79_PCI_IRQ_BASE + AR71XX_PCI_IRQ_COUNT; i++)
+ irq_set_chip_and_handler(i, &ar71xx_pci_irq_chip,
+ handle_level_irq);
+
+ irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar71xx_pci_irq_handler);
+}
+
+static __init void ar71xx_pci_reset(void)
+{
+ void __iomem *ddr_base = ath79_ddr_base;
+
+ ath79_device_reset_set(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE);
+ mdelay(100);
+
+ ath79_device_reset_clear(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE);
+ mdelay(100);
+
+ __raw_writel(AR71XX_PCI_WIN0_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN0);
+ __raw_writel(AR71XX_PCI_WIN1_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN1);
+ __raw_writel(AR71XX_PCI_WIN2_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN2);
+ __raw_writel(AR71XX_PCI_WIN3_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN3);
+ __raw_writel(AR71XX_PCI_WIN4_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN4);
+ __raw_writel(AR71XX_PCI_WIN5_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN5);
+ __raw_writel(AR71XX_PCI_WIN6_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN6);
+ __raw_writel(AR71XX_PCI_WIN7_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN7);
+
+ mdelay(100);
+}
+
+__init int ar71xx_pcibios_init(void)
+{
+ u32 t;
+
+ ar71xx_pcicfg_base = ioremap(AR71XX_PCI_CFG_BASE, AR71XX_PCI_CFG_SIZE);
+ if (ar71xx_pcicfg_base == NULL)
+ return -ENOMEM;
+
+ ar71xx_pci_reset();
+
+ /* setup COMMAND register */
+ t = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE
+ | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
+ ar71xx_pci_local_write(PCI_COMMAND, 4, t);
+
+ /* clear bus errors */
+ ar71xx_pci_check_error(1);
+
+ ar71xx_pci_irq_init();
+
+ register_pci_controller(&ar71xx_pci_controller);
+
+ return 0;
+}
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
new file mode 100644
index 00000000000..414a7459858
--- /dev/null
+++ b/arch/mips/pci/pci-ar724x.c
@@ -0,0 +1,292 @@
+/*
+ * Atheros AR724X PCI host controller driver
+ *
+ * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
+ * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/pci.h>
+
+#define AR724X_PCI_CFG_BASE 0x14000000
+#define AR724X_PCI_CFG_SIZE 0x1000
+#define AR724X_PCI_CTRL_BASE (AR71XX_APB_BASE + 0x000f0000)
+#define AR724X_PCI_CTRL_SIZE 0x100
+
+#define AR724X_PCI_MEM_BASE 0x10000000
+#define AR724X_PCI_MEM_SIZE 0x08000000
+
+#define AR724X_PCI_REG_INT_STATUS 0x4c
+#define AR724X_PCI_REG_INT_MASK 0x50
+
+#define AR724X_PCI_INT_DEV0 BIT(14)
+
+#define AR724X_PCI_IRQ_COUNT 1
+
+#define AR7240_BAR0_WAR_VALUE 0xffff
+
+static DEFINE_SPINLOCK(ar724x_pci_lock);
+static void __iomem *ar724x_pci_devcfg_base;
+static void __iomem *ar724x_pci_ctrl_base;
+
+static u32 ar724x_pci_bar0_value;
+static bool ar724x_pci_bar0_is_cached;
+
+static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, uint32_t *value)
+{
+ unsigned long flags;
+ void __iomem *base;
+ u32 data;
+
+ if (devfn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ base = ar724x_pci_devcfg_base;
+
+ spin_lock_irqsave(&ar724x_pci_lock, flags);
+ data = __raw_readl(base + (where & ~3));
+
+ switch (size) {
+ case 1:
+ if (where & 1)
+ data >>= 8;
+ if (where & 2)
+ data >>= 16;
+ data &= 0xff;
+ break;
+ case 2:
+ if (where & 2)
+ data >>= 16;
+ data &= 0xffff;
+ break;
+ case 4:
+ break;
+ default:
+ spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+ if (where == PCI_BASE_ADDRESS_0 && size == 4 &&
+ ar724x_pci_bar0_is_cached) {
+ /* use the cached value */
+ *value = ar724x_pci_bar0_value;
+ } else {
+ *value = data;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, uint32_t value)
+{
+ unsigned long flags;
+ void __iomem *base;
+ u32 data;
+ int s;
+
+ if (devfn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (soc_is_ar7240() && where == PCI_BASE_ADDRESS_0 && size == 4) {
+ if (value != 0xffffffff) {
+ /*
+ * WAR for a hw issue. If the BAR0 register of the
+ * device is set to the proper base address, the
+ * memory space of the device is not accessible.
+ *
+ * Cache the intended value so it can be read back,
+ * and write a SoC specific constant value to the
+ * BAR0 register in order to make the device memory
+ * accessible.
+ */
+ ar724x_pci_bar0_is_cached = true;
+ ar724x_pci_bar0_value = value;
+
+ value = AR7240_BAR0_WAR_VALUE;
+ } else {
+ ar724x_pci_bar0_is_cached = false;
+ }
+ }
+
+ base = ar724x_pci_devcfg_base;
+
+ spin_lock_irqsave(&ar724x_pci_lock, flags);
+ data = __raw_readl(base + (where & ~3));
+
+ switch (size) {
+ case 1:
+ s = ((where & 3) * 8);
+ data &= ~(0xff << s);
+ data |= ((value & 0xff) << s);
+ break;
+ case 2:
+ s = ((where & 2) * 8);
+ data &= ~(0xffff << s);
+ data |= ((value & 0xffff) << s);
+ break;
+ case 4:
+ data = value;
+ break;
+ default:
+ spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ __raw_writel(data, base + (where & ~3));
+ /* flush write */
+ __raw_readl(base + (where & ~3));
+ spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops ar724x_pci_ops = {
+ .read = ar724x_pci_read,
+ .write = ar724x_pci_write,
+};
+
+static struct resource ar724x_io_resource = {
+ .name = "PCI IO space",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_IO,
+};
+
+static struct resource ar724x_mem_resource = {
+ .name = "PCI memory space",
+ .start = AR724X_PCI_MEM_BASE,
+ .end = AR724X_PCI_MEM_BASE + AR724X_PCI_MEM_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct pci_controller ar724x_pci_controller = {
+ .pci_ops = &ar724x_pci_ops,
+ .io_resource = &ar724x_io_resource,
+ .mem_resource = &ar724x_mem_resource,
+};
+
+static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ void __iomem *base;
+ u32 pending;
+
+ base = ar724x_pci_ctrl_base;
+
+ pending = __raw_readl(base + AR724X_PCI_REG_INT_STATUS) &
+ __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+
+ if (pending & AR724X_PCI_INT_DEV0)
+ generic_handle_irq(ATH79_PCI_IRQ(0));
+
+ else
+ spurious_interrupt();
+}
+
+static void ar724x_pci_irq_unmask(struct irq_data *d)
+{
+ void __iomem *base;
+ u32 t;
+
+ base = ar724x_pci_ctrl_base;
+
+ switch (d->irq) {
+ case ATH79_PCI_IRQ(0):
+ t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+ __raw_writel(t | AR724X_PCI_INT_DEV0,
+ base + AR724X_PCI_REG_INT_MASK);
+ /* flush write */
+ __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+ }
+}
+
+static void ar724x_pci_irq_mask(struct irq_data *d)
+{
+ void __iomem *base;
+ u32 t;
+
+ base = ar724x_pci_ctrl_base;
+
+ switch (d->irq) {
+ case ATH79_PCI_IRQ(0):
+ t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+ __raw_writel(t & ~AR724X_PCI_INT_DEV0,
+ base + AR724X_PCI_REG_INT_MASK);
+
+ /* flush write */
+ __raw_readl(base + AR724X_PCI_REG_INT_MASK);
+
+ t = __raw_readl(base + AR724X_PCI_REG_INT_STATUS);
+ __raw_writel(t | AR724X_PCI_INT_DEV0,
+ base + AR724X_PCI_REG_INT_STATUS);
+
+ /* flush write */
+ __raw_readl(base + AR724X_PCI_REG_INT_STATUS);
+ }
+}
+
+static struct irq_chip ar724x_pci_irq_chip = {
+ .name = "AR724X PCI ",
+ .irq_mask = ar724x_pci_irq_mask,
+ .irq_unmask = ar724x_pci_irq_unmask,
+ .irq_mask_ack = ar724x_pci_irq_mask,
+};
+
+static void __init ar724x_pci_irq_init(int irq)
+{
+ void __iomem *base;
+ int i;
+
+ base = ar724x_pci_ctrl_base;
+
+ __raw_writel(0, base + AR724X_PCI_REG_INT_MASK);
+ __raw_writel(0, base + AR724X_PCI_REG_INT_STATUS);
+
+ BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR724X_PCI_IRQ_COUNT);
+
+ for (i = ATH79_PCI_IRQ_BASE;
+ i < ATH79_PCI_IRQ_BASE + AR724X_PCI_IRQ_COUNT; i++)
+ irq_set_chip_and_handler(i, &ar724x_pci_irq_chip,
+ handle_level_irq);
+
+ irq_set_chained_handler(irq, ar724x_pci_irq_handler);
+}
+
+int __init ar724x_pcibios_init(int irq)
+{
+ int ret;
+
+ ret = -ENOMEM;
+
+ ar724x_pci_devcfg_base = ioremap(AR724X_PCI_CFG_BASE,
+ AR724X_PCI_CFG_SIZE);
+ if (ar724x_pci_devcfg_base == NULL)
+ goto err;
+
+ ar724x_pci_ctrl_base = ioremap(AR724X_PCI_CTRL_BASE,
+ AR724X_PCI_CTRL_SIZE);
+ if (ar724x_pci_ctrl_base == NULL)
+ goto err_unmap_devcfg;
+
+ ar724x_pci_irq_init(irq);
+ register_pci_controller(&ar724x_pci_controller);
+
+ return PCIBIOS_SUCCESSFUL;
+
+err_unmap_devcfg:
+ iounmap(ar724x_pci_devcfg_base);
+err:
+ return ret;
+}
diff --git a/arch/mips/pci/pci-ath724x.c b/arch/mips/pci/pci-ath724x.c
deleted file mode 100644
index a4dd24a4130..00000000000
--- a/arch/mips/pci/pci-ath724x.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Atheros 724x PCI support
- *
- * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/pci.h>
-#include <asm/mach-ath79/pci-ath724x.h>
-
-#define reg_read(_phys) (*(unsigned int *) KSEG1ADDR(_phys))
-#define reg_write(_phys, _val) ((*(unsigned int *) KSEG1ADDR(_phys)) = (_val))
-
-#define ATH724X_PCI_DEV_BASE 0x14000000
-#define ATH724X_PCI_MEM_BASE 0x10000000
-#define ATH724X_PCI_MEM_SIZE 0x08000000
-
-static DEFINE_SPINLOCK(ath724x_pci_lock);
-static struct ath724x_pci_data *pci_data;
-static int pci_data_size;
-
-static int ath724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
- int size, uint32_t *value)
-{
- unsigned long flags, addr, tval, mask;
-
- if (devfn)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (where & (size - 1))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- spin_lock_irqsave(&ath724x_pci_lock, flags);
-
- switch (size) {
- case 1:
- addr = where & ~3;
- mask = 0xff000000 >> ((where % 4) * 8);
- tval = reg_read(ATH724X_PCI_DEV_BASE + addr);
- tval = tval & ~mask;
- *value = (tval >> ((4 - (where % 4))*8));
- break;
- case 2:
- addr = where & ~3;
- mask = 0xffff0000 >> ((where % 4)*8);
- tval = reg_read(ATH724X_PCI_DEV_BASE + addr);
- tval = tval & ~mask;
- *value = (tval >> ((4 - (where % 4))*8));
- break;
- case 4:
- *value = reg_read(ATH724X_PCI_DEV_BASE + where);
- break;
- default:
- spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
-
- spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int ath724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
- int size, uint32_t value)
-{
- unsigned long flags, tval, addr, mask;
-
- if (devfn)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (where & (size - 1))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- spin_lock_irqsave(&ath724x_pci_lock, flags);
-
- switch (size) {
- case 1:
- addr = (ATH724X_PCI_DEV_BASE + where) & ~3;
- mask = 0xff000000 >> ((where % 4)*8);
- tval = reg_read(addr);
- tval = tval & ~mask;
- tval |= (value << ((4 - (where % 4))*8)) & mask;
- reg_write(addr, tval);
- break;
- case 2:
- addr = (ATH724X_PCI_DEV_BASE + where) & ~3;
- mask = 0xffff0000 >> ((where % 4)*8);
- tval = reg_read(addr);
- tval = tval & ~mask;
- tval |= (value << ((4 - (where % 4))*8)) & mask;
- reg_write(addr, tval);
- break;
- case 4:
- reg_write((ATH724X_PCI_DEV_BASE + where), value);
- break;
- default:
- spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
- return PCIBIOS_BAD_REGISTER_NUMBER;
- }
-
- spin_unlock_irqrestore(&ath724x_pci_lock, flags);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops ath724x_pci_ops = {
- .read = ath724x_pci_read,
- .write = ath724x_pci_write,
-};
-
-static struct resource ath724x_io_resource = {
- .name = "PCI IO space",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_IO,
-};
-
-static struct resource ath724x_mem_resource = {
- .name = "PCI memory space",
- .start = ATH724X_PCI_MEM_BASE,
- .end = ATH724X_PCI_MEM_BASE + ATH724X_PCI_MEM_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct pci_controller ath724x_pci_controller = {
- .pci_ops = &ath724x_pci_ops,
- .io_resource = &ath724x_io_resource,
- .mem_resource = &ath724x_mem_resource,
-};
-
-void ath724x_pci_add_data(struct ath724x_pci_data *data, int size)
-{
- pci_data = data;
- pci_data_size = size;
-}
-
-int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
-{
- unsigned int devfn = dev->devfn;
- int irq = -1;
-
- if (devfn > pci_data_size - 1)
- return irq;
-
- irq = pci_data[devfn].irq;
-
- return irq;
-}
-
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- unsigned int devfn = dev->devfn;
-
- if (devfn > pci_data_size - 1)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- dev->dev.platform_data = pci_data[devfn].pdata;
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int __init ath724x_pcibios_init(void)
-{
- register_pci_controller(&ath724x_pci_controller);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-arch_initcall(ath724x_pcibios_init);
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 030c77e7926..ea453532a33 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -13,8 +13,12 @@
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
#include <asm/pci.h>
#include <asm/gpio.h>
@@ -22,17 +26,9 @@
#include <lantiq_soc.h>
#include <lantiq_irq.h>
-#include <lantiq_platform.h>
#include "pci-lantiq.h"
-#define LTQ_PCI_CFG_BASE 0x17000000
-#define LTQ_PCI_CFG_SIZE 0x00008000
-#define LTQ_PCI_MEM_BASE 0x18000000
-#define LTQ_PCI_MEM_SIZE 0x02000000
-#define LTQ_PCI_IO_BASE 0x1AE00000
-#define LTQ_PCI_IO_SIZE 0x00200000
-
#define PCI_CR_FCI_ADDR_MAP0 0x00C0
#define PCI_CR_FCI_ADDR_MAP1 0x00C4
#define PCI_CR_FCI_ADDR_MAP2 0x00C8
@@ -68,79 +64,27 @@
#define ltq_pci_cfg_w32(x, y) ltq_w32((x), ltq_pci_mapped_cfg + (y))
#define ltq_pci_cfg_r32(x) ltq_r32(ltq_pci_mapped_cfg + (x))
-struct ltq_pci_gpio_map {
- int pin;
- int alt0;
- int alt1;
- int dir;
- char *name;
-};
-
-/* the pci core can make use of the following gpios */
-static struct ltq_pci_gpio_map ltq_pci_gpio_map[] = {
- { 0, 1, 0, 0, "pci-exin0" },
- { 1, 1, 0, 0, "pci-exin1" },
- { 2, 1, 0, 0, "pci-exin2" },
- { 39, 1, 0, 0, "pci-exin3" },
- { 10, 1, 0, 0, "pci-exin4" },
- { 9, 1, 0, 0, "pci-exin5" },
- { 30, 1, 0, 1, "pci-gnt1" },
- { 23, 1, 0, 1, "pci-gnt2" },
- { 19, 1, 0, 1, "pci-gnt3" },
- { 38, 1, 0, 1, "pci-gnt4" },
- { 29, 1, 0, 0, "pci-req1" },
- { 31, 1, 0, 0, "pci-req2" },
- { 3, 1, 0, 0, "pci-req3" },
- { 37, 1, 0, 0, "pci-req4" },
-};
-
__iomem void *ltq_pci_mapped_cfg;
static __iomem void *ltq_pci_membase;
-int (*ltqpci_plat_dev_init)(struct pci_dev *dev) = NULL;
-
-/* Since the PCI REQ pins can be reused for other functionality, make it
- possible to exclude those from interpretation by the PCI controller */
-static int ltq_pci_req_mask = 0xf;
-
-static int *ltq_pci_irq_map;
-
-struct pci_ops ltq_pci_ops = {
+static int reset_gpio;
+static struct clk *clk_pci, *clk_external;
+static struct resource pci_io_resource;
+static struct resource pci_mem_resource;
+static struct pci_ops pci_ops = {
.read = ltq_pci_read_config_dword,
.write = ltq_pci_write_config_dword
};
-static struct resource pci_io_resource = {
- .name = "pci io space",
- .start = LTQ_PCI_IO_BASE,
- .end = LTQ_PCI_IO_BASE + LTQ_PCI_IO_SIZE - 1,
- .flags = IORESOURCE_IO
-};
-
-static struct resource pci_mem_resource = {
- .name = "pci memory space",
- .start = LTQ_PCI_MEM_BASE,
- .end = LTQ_PCI_MEM_BASE + LTQ_PCI_MEM_SIZE - 1,
- .flags = IORESOURCE_MEM
-};
-
-static struct pci_controller ltq_pci_controller = {
- .pci_ops = &ltq_pci_ops,
+static struct pci_controller pci_controller = {
+ .pci_ops = &pci_ops,
.mem_resource = &pci_mem_resource,
.mem_offset = 0x00000000UL,
.io_resource = &pci_io_resource,
.io_offset = 0x00000000UL,
};
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- if (ltqpci_plat_dev_init)
- return ltqpci_plat_dev_init(dev);
-
- return 0;
-}
-
-static u32 ltq_calc_bar11mask(void)
+static inline u32 ltq_calc_bar11mask(void)
{
u32 mem, bar11mask;
@@ -151,48 +95,42 @@ static u32 ltq_calc_bar11mask(void)
return bar11mask;
}
-static void ltq_pci_setup_gpio(int gpio)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(ltq_pci_gpio_map); i++) {
- if (gpio & (1 << i)) {
- ltq_gpio_request(ltq_pci_gpio_map[i].pin,
- ltq_pci_gpio_map[i].alt0,
- ltq_pci_gpio_map[i].alt1,
- ltq_pci_gpio_map[i].dir,
- ltq_pci_gpio_map[i].name);
- }
- }
- ltq_gpio_request(21, 0, 0, 1, "pci-reset");
- ltq_pci_req_mask = (gpio >> PCI_REQ_SHIFT) & PCI_REQ_MASK;
-}
-
-static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
+static int __devinit ltq_pci_startup(struct platform_device *pdev)
{
+ struct device_node *node = pdev->dev.of_node;
+ const __be32 *req_mask, *bus_clk;
u32 temp_buffer;
- /* set clock to 33Mhz */
- if (ltq_is_ar9()) {
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0x1f00000, LTQ_CGU_IFCCR);
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0xe00000, LTQ_CGU_IFCCR);
- } else {
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
+ /* get our clocks */
+ clk_pci = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk_pci)) {
+ dev_err(&pdev->dev, "failed to get pci clock\n");
+ return PTR_ERR(clk_pci);
}
- /* external or internal clock ? */
- if (conf->clock) {
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~(1 << 16),
- LTQ_CGU_IFCCR);
- ltq_cgu_w32((1 << 30), LTQ_CGU_PCICR);
- } else {
- ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | (1 << 16),
- LTQ_CGU_IFCCR);
- ltq_cgu_w32((1 << 31) | (1 << 30), LTQ_CGU_PCICR);
+ clk_external = clk_get(&pdev->dev, "external");
+ if (IS_ERR(clk_external)) {
+ clk_put(clk_pci);
+ dev_err(&pdev->dev, "failed to get external pci clock\n");
+ return PTR_ERR(clk_external);
}
- /* setup pci clock and gpis used by pci */
- ltq_pci_setup_gpio(conf->gpio);
+ /* read the bus speed that we want */
+ bus_clk = of_get_property(node, "lantiq,bus-clock", NULL);
+ if (bus_clk)
+ clk_set_rate(clk_pci, *bus_clk);
+
+ /* and enable the clocks */
+ clk_enable(clk_pci);
+ if (of_find_property(node, "lantiq,external-clock", NULL))
+ clk_enable(clk_external);
+ else
+ clk_disable(clk_external);
+
+ /* setup reset gpio used by pci */
+ reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
+ if (reset_gpio > 0)
+ devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
/* enable auto-switching between PCI and EBU */
ltq_pci_w32(0xa, PCI_CR_CLK_CTRL);
@@ -205,7 +143,12 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
/* enable external 2 PCI masters */
temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB);
- temp_buffer &= (~(ltq_pci_req_mask << 16));
+ /* setup the request mask */
+ req_mask = of_get_property(node, "req-mask", NULL);
+ if (req_mask)
+ temp_buffer &= ~((*req_mask & 0xf) << 16);
+ else
+ temp_buffer &= ~0xf0000;
/* enable internal arbiter */
temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT);
/* enable internal PCI master reqest */
@@ -249,47 +192,55 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
/* toggle reset pin */
- __gpio_set_value(21, 0);
- wmb();
- mdelay(1);
- __gpio_set_value(21, 1);
- return 0;
-}
-
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- if (ltq_pci_irq_map[slot])
- return ltq_pci_irq_map[slot];
- printk(KERN_ERR "lq_pci: trying to map irq for unknown slot %d\n",
- slot);
-
+ if (reset_gpio > 0) {
+ __gpio_set_value(reset_gpio, 0);
+ wmb();
+ mdelay(1);
+ __gpio_set_value(reset_gpio, 1);
+ }
return 0;
}
static int __devinit ltq_pci_probe(struct platform_device *pdev)
{
- struct ltq_pci_data *ltq_pci_data =
- (struct ltq_pci_data *) pdev->dev.platform_data;
+ struct resource *res_cfg, *res_bridge;
pci_clear_flags(PCI_PROBE_ONLY);
- ltq_pci_irq_map = ltq_pci_data->irq;
- ltq_pci_membase = ioremap_nocache(PCI_CR_BASE_ADDR, PCI_CR_SIZE);
- ltq_pci_mapped_cfg =
- ioremap_nocache(LTQ_PCI_CFG_BASE, LTQ_PCI_CFG_BASE);
- ltq_pci_controller.io_map_base =
- (unsigned long)ioremap(LTQ_PCI_IO_BASE, LTQ_PCI_IO_SIZE - 1);
- ltq_pci_startup(ltq_pci_data);
- register_pci_controller(&ltq_pci_controller);
+ res_cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res_bridge = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_cfg || !res_bridge) {
+ dev_err(&pdev->dev, "missing memory reources\n");
+ return -EINVAL;
+ }
+
+ ltq_pci_membase = devm_request_and_ioremap(&pdev->dev, res_bridge);
+ ltq_pci_mapped_cfg = devm_request_and_ioremap(&pdev->dev, res_cfg);
+
+ if (!ltq_pci_membase || !ltq_pci_mapped_cfg) {
+ dev_err(&pdev->dev, "failed to remap resources\n");
+ return -ENOMEM;
+ }
+
+ ltq_pci_startup(pdev);
+
+ pci_load_of_ranges(&pci_controller, pdev->dev.of_node);
+ register_pci_controller(&pci_controller);
return 0;
}
-static struct platform_driver
-ltq_pci_driver = {
+static const struct of_device_id ltq_pci_match[] = {
+ { .compatible = "lantiq,pci-xway" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_pci_match);
+
+static struct platform_driver ltq_pci_driver = {
.probe = ltq_pci_probe,
.driver = {
- .name = "ltq_pci",
+ .name = "pci-xway",
.owner = THIS_MODULE,
+ .of_match_table = ltq_pci_match,
},
};
@@ -297,7 +248,7 @@ int __init pcibios_init(void)
{
int ret = platform_driver_register(&ltq_pci_driver);
if (ret)
- printk(KERN_INFO "ltq_pci: Error registering platfom driver!");
+ pr_info("pci-xway: Error registering platform driver!");
return ret;
}
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 0514866fa92..271e8c4a54c 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
+#include <linux/of_address.h>
#include <asm/cpu-info.h>
@@ -114,9 +115,63 @@ static void __devinit pcibios_scanbus(struct pci_controller *hose)
pci_bus_assign_resources(bus);
pci_enable_bridges(bus);
}
+ bus->dev.of_node = hose->of_node;
}
}
+#ifdef CONFIG_OF
+void __devinit pci_load_of_ranges(struct pci_controller *hose,
+ struct device_node *node)
+{
+ const __be32 *ranges;
+ int rlen;
+ int pna = of_n_addr_cells(node);
+ int np = pna + 5;
+
+ pr_info("PCI host bridge %s ranges:\n", node->full_name);
+ ranges = of_get_property(node, "ranges", &rlen);
+ if (ranges == NULL)
+ return;
+ hose->of_node = node;
+
+ while ((rlen -= np * 4) >= 0) {
+ u32 pci_space;
+ struct resource *res = NULL;
+ u64 addr, size;
+
+ pci_space = be32_to_cpup(&ranges[0]);
+ addr = of_translate_address(node, ranges + 3);
+ size = of_read_number(ranges + pna + 3, 2);
+ ranges += np;
+ switch ((pci_space >> 24) & 0x3) {
+ case 1: /* PCI IO space */
+ pr_info(" IO 0x%016llx..0x%016llx\n",
+ addr, addr + size - 1);
+ hose->io_map_base =
+ (unsigned long)ioremap(addr, size);
+ res = hose->io_resource;
+ res->flags = IORESOURCE_IO;
+ break;
+ case 2: /* PCI Memory space */
+ case 3: /* PCI 64 bits Memory space */
+ pr_info(" MEM 0x%016llx..0x%016llx\n",
+ addr, addr + size - 1);
+ res = hose->mem_resource;
+ res->flags = IORESOURCE_MEM;
+ break;
+ }
+ if (res != NULL) {
+ res->start = addr;
+ res->name = node->full_name;
+ res->end = res->start + size - 1;
+ res->parent = NULL;
+ res->sibling = NULL;
+ res->child = NULL;
+ }
+ }
+}
+#endif
+
static DEFINE_MUTEX(pci_scan_mutex);
void __devinit register_pci_controller(struct pci_controller *hose)
diff --git a/arch/mips/pmc-sierra/yosemite/Makefile b/arch/mips/pmc-sierra/yosemite/Makefile
index 02f5fb94ea2..5af95ec3319 100644
--- a/arch/mips/pmc-sierra/yosemite/Makefile
+++ b/arch/mips/pmc-sierra/yosemite/Makefile
@@ -5,5 +5,3 @@
obj-y += irq.o prom.o py-console.o setup.o
obj-$(CONFIG_SMP) += smp.o
-
-ccflags-y := -Werror
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
index 3498ac9c35a..b6472fc88a9 100644
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ b/arch/mips/pmc-sierra/yosemite/setup.c
@@ -27,6 +27,7 @@
#include <linux/bcd.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
diff --git a/arch/mips/powertv/Makefile b/arch/mips/powertv/Makefile
index 348d2e850ef..39ca9f8d63a 100644
--- a/arch/mips/powertv/Makefile
+++ b/arch/mips/powertv/Makefile
@@ -27,5 +27,3 @@ obj-y += init.o ioremap.o memory.o powertv_setup.o reset.o time.o \
asic/ pci/
obj-$(CONFIG_USB) += powertv-usb.o
-
-ccflags-y := -Wall
diff --git a/arch/mips/powertv/asic/Makefile b/arch/mips/powertv/asic/Makefile
index d810a33182a..35dcc53eb25 100644
--- a/arch/mips/powertv/asic/Makefile
+++ b/arch/mips/powertv/asic/Makefile
@@ -19,5 +19,3 @@
obj-y += asic-calliope.o asic-cronus.o asic-gaia.o asic-zeus.o \
asic_devices.o asic_int.o irq_asic.o prealloc-calliope.o \
prealloc-cronus.o prealloc-cronuslite.o prealloc-gaia.o prealloc-zeus.o
-
-ccflags-y := -Wall -Werror
diff --git a/arch/mips/powertv/pci/Makefile b/arch/mips/powertv/pci/Makefile
index 5783201cd2c..2610a6af5b2 100644
--- a/arch/mips/powertv/pci/Makefile
+++ b/arch/mips/powertv/pci/Makefile
@@ -17,5 +17,3 @@
#
obj-$(CONFIG_PCI) += fixup-powertv.o
-
-ccflags-y := -Wall -Werror
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index a969eb82663..ea774285e6c 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -15,6 +15,7 @@
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/string.h>
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index d16b462154c..413f17f8e89 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -10,6 +10,7 @@
*/
#include <linux/eisa.h>
#include <linux/init.h>
+#include <linux/export.h>
#include <linux/console.h>
#include <linux/fb.h>
#include <linux/screen_info.h>
diff --git a/arch/mn10300/include/asm/kvm_para.h b/arch/mn10300/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/mn10300/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 4932247d078..49765b53f63 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -19,6 +19,8 @@ config OPENRISC
select GENERIC_CPU_DEVICES
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
config MMU
def_bool y
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index c936483bc8e..3f35c38d7b6 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -66,3 +66,4 @@ generic-y += topology.h
generic-y += types.h
generic-y += ucontext.h
generic-y += user.h
+generic-y += word-at-a-time.h
diff --git a/arch/openrisc/include/asm/kvm_para.h b/arch/openrisc/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/openrisc/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index f5abaa0ffc3..ab2e7a198a4 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -313,42 +313,12 @@ clear_user(void *addr, unsigned long size)
return size;
}
-extern int __strncpy_from_user(char *dst, const char *src, long count);
+#define user_addr_max() \
+ (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
-static inline long strncpy_from_user(char *dst, const char *src, long count)
-{
- if (access_ok(VERIFY_READ, src, 1))
- return __strncpy_from_user(dst, src, count);
- return -EFAULT;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 for error
- */
-
-extern int __strnlen_user(const char *str, long len, unsigned long top);
-
-/*
- * Returns the length of the string at str (including the null byte),
- * or 0 if we hit a page we can't access,
- * or something > len if we didn't find a null byte.
- *
- * The `top' parameter to __strnlen_user is to make sure that
- * we can never overflow from the user area into kernel space.
- */
-static inline long strnlen_user(const char __user *str, long len)
-{
- unsigned long top = (unsigned long)get_fs();
- unsigned long res = 0;
-
- if (__addr_ok(str))
- res = __strnlen_user(str, len, top);
-
- return res;
-}
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
-#define strlen_user(str) strnlen_user(str, TASK_SIZE-1)
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASM_OPENRISC_UACCESS_H */
diff --git a/arch/openrisc/lib/string.S b/arch/openrisc/lib/string.S
index 465f04bc7de..c09fee7dec1 100644
--- a/arch/openrisc/lib/string.S
+++ b/arch/openrisc/lib/string.S
@@ -103,102 +103,3 @@ __clear_user:
.section __ex_table, "a"
.long 9b, 99b // write fault
.previous
-
-/*
- * long strncpy_from_user(char *dst, const char *src, long count)
- *
- *
- */
- .global __strncpy_from_user
-__strncpy_from_user:
- l.addi r1,r1,-16
- l.sw 0(r1),r6
- l.sw 4(r1),r5
- l.sw 8(r1),r4
- l.sw 12(r1),r3
-
- l.addi r11,r5,0
-2: l.sfeq r5,r0
- l.bf 1f
- l.addi r5,r5,-1
-8: l.lbz r6,0(r4)
- l.sfeq r6,r0
- l.bf 1f
-9: l.sb 0(r3),r6
- l.addi r3,r3,1
- l.j 2b
- l.addi r4,r4,1
-1:
- l.lwz r6,0(r1)
- l.addi r5,r5,1
- l.sub r11,r11,r5 // r11 holds the return value
-
- l.lwz r6,0(r1)
- l.lwz r5,4(r1)
- l.lwz r4,8(r1)
- l.lwz r3,12(r1)
- l.jr r9
- l.addi r1,r1,16
-
- .section .fixup, "ax"
-99:
- l.movhi r11,hi(-EFAULT)
- l.ori r11,r11,lo(-EFAULT)
-
- l.lwz r6,0(r1)
- l.lwz r5,4(r1)
- l.lwz r4,8(r1)
- l.lwz r3,12(r1)
- l.jr r9
- l.addi r1,r1,16
- .previous
-
- .section __ex_table, "a"
- .long 8b, 99b // read fault
- .previous
-
-/*
- * extern int __strnlen_user(const char *str, long len, unsigned long top);
- *
- *
- * RTRN: - length of a string including NUL termination character
- * - on page fault 0
- */
-
- .global __strnlen_user
-__strnlen_user:
- l.addi r1,r1,-8
- l.sw 0(r1),r6
- l.sw 4(r1),r3
-
- l.addi r11,r0,0
-2: l.sfeq r11,r4
- l.bf 1f
- l.addi r11,r11,1
-8: l.lbz r6,0(r3)
- l.sfeq r6,r0
- l.bf 1f
- l.sfgeu r3,r5 // are we over the top ?
- l.bf 99f
- l.j 2b
- l.addi r3,r3,1
-
-1:
- l.lwz r6,0(r1)
- l.lwz r3,4(r1)
- l.jr r9
- l.addi r1,r1,8
-
- .section .fixup, "ax"
-99:
- l.addi r11,r0,0
-
- l.lwz r6,0(r1)
- l.lwz r3,4(r1)
- l.jr r9
- l.addi r1,r1,8
- .previous
-
- .section __ex_table, "a"
- .long 8b, 99b // read fault
- .previous
diff --git a/arch/parisc/include/asm/kvm_para.h b/arch/parisc/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/parisc/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 00b9874e224..050cb371a69 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -135,6 +135,8 @@ config PPC
select GENERIC_CMOS_UPDATE
select GENERIC_TIME_VSYSCALL
select GENERIC_CLOCKEVENTS
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
config EARLY_PRINTK
bool
diff --git a/arch/powerpc/boot/dts/mpc8569mds.dts b/arch/powerpc/boot/dts/mpc8569mds.dts
index 7e283c891b7..fe0d60935e9 100644
--- a/arch/powerpc/boot/dts/mpc8569mds.dts
+++ b/arch/powerpc/boot/dts/mpc8569mds.dts
@@ -119,6 +119,7 @@
sdhc@2e000 {
status = "disabled";
sdhci,1-bit-only;
+ bus-width = <1>;
};
par_io@e0100 {
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index b9219e99bd2..50d82c8a037 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -168,6 +168,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000)
#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000)
+#define CPU_FTR_EMB_HV ASM_CONST(0x0000000040000000)
/*
* Add the 64-bit processor unique features in the top half of the word;
@@ -376,7 +377,8 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_47X (CPU_FTRS_440x6)
#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
- CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
+ CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_DEBUG_LVL_EXC)
#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
CPU_FTR_NOEXECUTE)
@@ -385,15 +387,15 @@ extern const char *powerpc_base_platform;
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
- CPU_FTR_DBELL)
+ CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_DEBUG_LVL_EXC)
+ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
- CPU_FTR_DEBUG_LVL_EXC)
+ CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
@@ -486,8 +488,10 @@ enum {
CPU_FTRS_E200 |
#endif
#ifdef CONFIG_E500
- CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
- CPU_FTRS_E5500 | CPU_FTRS_E6500 |
+ CPU_FTRS_E500 | CPU_FTRS_E500_2 |
+#endif
+#ifdef CONFIG_PPC_E500MC
+ CPU_FTRS_E500MC | CPU_FTRS_E5500 | CPU_FTRS_E6500 |
#endif
0,
};
@@ -531,9 +535,12 @@ enum {
CPU_FTRS_E200 &
#endif
#ifdef CONFIG_E500
- CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
- CPU_FTRS_E5500 & CPU_FTRS_E6500 &
+ CPU_FTRS_E500 & CPU_FTRS_E500_2 &
+#endif
+#ifdef CONFIG_PPC_E500MC
+ CPU_FTRS_E500MC & CPU_FTRS_E5500 & CPU_FTRS_E6500 &
#endif
+ ~CPU_FTR_EMB_HV & /* can be removed at runtime */
CPU_FTRS_POSSIBLE,
};
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index efa74ac44a3..154c067761b 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -19,6 +19,9 @@
#define PPC_DBELL_MSG_BRDCAST (0x04000000)
#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
+#define PPC_DBELL_TYPE_MASK PPC_DBELL_TYPE(0xf)
+#define PPC_DBELL_LPID(x) ((x) << (63 - 49))
+#define PPC_DBELL_PIR_MASK 0x3fff
enum ppc_dbell {
PPC_DBELL = 0, /* doorbell */
PPC_DBELL_CRIT = 1, /* critical doorbell */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 61225238819..423cf9eaf4a 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -133,6 +133,16 @@
#define H_PP1 (1UL<<(63-62))
#define H_PP2 (1UL<<(63-63))
+/* Flags for H_REGISTER_VPA subfunction field */
+#define H_VPA_FUNC_SHIFT (63-18) /* Bit posn of subfunction code */
+#define H_VPA_FUNC_MASK 7UL
+#define H_VPA_REG_VPA 1UL /* Register Virtual Processor Area */
+#define H_VPA_REG_DTL 2UL /* Register Dispatch Trace Log */
+#define H_VPA_REG_SLB 3UL /* Register SLB shadow buffer */
+#define H_VPA_DEREG_VPA 5UL /* Deregister Virtual Processor Area */
+#define H_VPA_DEREG_DTL 6UL /* Deregister Dispatch Trace Log */
+#define H_VPA_DEREG_SLB 7UL /* Deregister SLB shadow buffer */
+
/* VASI States */
#define H_VASI_INVALID 0
#define H_VASI_ENABLED 1
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 51010bfc792..c9aac24b02e 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -33,6 +33,7 @@
extern void __replay_interrupt(unsigned int vector);
extern void timer_interrupt(struct pt_regs *);
+extern void performance_monitor_exception(struct pt_regs *regs);
#ifdef CONFIG_PPC64
#include <asm/paca.h>
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index b921c3f4892..1bea4d8ea6f 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -277,6 +277,7 @@ struct kvm_sync_regs {
#define KVM_CPU_E500V2 2
#define KVM_CPU_3S_32 3
#define KVM_CPU_3S_64 4
+#define KVM_CPU_E500MC 5
/* for KVM_CAP_SPAPR_TCE */
struct kvm_create_spapr_tce {
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 7b1f0e0fc65..76fdcfef088 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -20,6 +20,16 @@
#ifndef __POWERPC_KVM_ASM_H__
#define __POWERPC_KVM_ASM_H__
+#ifdef __ASSEMBLY__
+#ifdef CONFIG_64BIT
+#define PPC_STD(sreg, offset, areg) std sreg, (offset)(areg)
+#define PPC_LD(treg, offset, areg) ld treg, (offset)(areg)
+#else
+#define PPC_STD(sreg, offset, areg) stw sreg, (offset+4)(areg)
+#define PPC_LD(treg, offset, areg) lwz treg, (offset+4)(areg)
+#endif
+#endif
+
/* IVPR must be 64KiB-aligned. */
#define VCPU_SIZE_ORDER 4
#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
@@ -48,6 +58,14 @@
#define BOOKE_INTERRUPT_SPE_FP_DATA 33
#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
+#define BOOKE_INTERRUPT_DOORBELL 36
+#define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37
+
+/* booke_hv */
+#define BOOKE_INTERRUPT_GUEST_DBELL 38
+#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39
+#define BOOKE_INTERRUPT_HV_SYSCALL 40
+#define BOOKE_INTERRUPT_HV_PRIV 41
/* book3s */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index fd07f43d662..f0e0c6a66d9 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -453,4 +453,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
#define INS_DCBZ 0x7c0007ec
+/* LPIDs we support with this build -- runtime limit may be lower */
+#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
+
#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 1f2f5b6156b..88609b23b77 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -79,6 +79,9 @@ struct kvmppc_host_state {
u8 napping;
#ifdef CONFIG_KVM_BOOK3S_64_HV
+ u8 hwthread_req;
+ u8 hwthread_state;
+
struct kvm_vcpu *kvm_vcpu;
struct kvmppc_vcore *kvm_vcore;
unsigned long xics_phys;
@@ -122,4 +125,9 @@ struct kvmppc_book3s_shadow_vcpu {
#endif /*__ASSEMBLY__ */
+/* Values for kvm_state */
+#define KVM_HWTHREAD_IN_KERNEL 0
+#define KVM_HWTHREAD_IN_NAP 1
+#define KVM_HWTHREAD_IN_KVM 2
+
#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index a90e0918877..b7cd3356a53 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -23,6 +23,9 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
+/* LPIDs we support with this build -- runtime limit may be lower */
+#define KVMPPC_NR_LPIDS 64
+
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.gpr[num] = val;
diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
new file mode 100644
index 00000000000..30a600fa1b6
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_KVM_BOOKE_HV_ASM_H
+#define ASM_KVM_BOOKE_HV_ASM_H
+
+#ifdef __ASSEMBLY__
+
+/*
+ * All exceptions from guest state must go through KVM
+ * (except for those which are delivered directly to the guest) --
+ * there are no exceptions for which we fall through directly to
+ * the normal host handler.
+ *
+ * Expected inputs (normal exceptions):
+ * SCRATCH0 = saved r10
+ * r10 = thread struct
+ * r11 = appropriate SRR1 variant (currently used as scratch)
+ * r13 = saved CR
+ * *(r10 + THREAD_NORMSAVE(0)) = saved r11
+ * *(r10 + THREAD_NORMSAVE(2)) = saved r13
+ *
+ * Expected inputs (crit/mcheck/debug exceptions):
+ * appropriate SCRATCH = saved r8
+ * r8 = exception level stack frame
+ * r9 = *(r8 + _CCR) = saved CR
+ * r11 = appropriate SRR1 variant (currently used as scratch)
+ * *(r8 + GPR9) = saved r9
+ * *(r8 + GPR10) = saved r10 (r10 not yet clobbered)
+ * *(r8 + GPR11) = saved r11
+ */
+.macro DO_KVM intno srr1
+#ifdef CONFIG_KVM_BOOKE_HV
+BEGIN_FTR_SECTION
+ mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */
+ bf 3, kvmppc_resume_\intno\()_\srr1
+ b kvmppc_handler_\intno\()_\srr1
+kvmppc_resume_\intno\()_\srr1:
+END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+#endif
+.endm
+
+#endif /*__ASSEMBLY__ */
+#endif /* ASM_KVM_BOOKE_HV_ASM_H */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
deleted file mode 100644
index 8cd50a51427..00000000000
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Author: Yu Liu, <yu.liu@freescale.com>
- *
- * Description:
- * This file is derived from arch/powerpc/include/asm/kvm_44x.h,
- * by Hollis Blanchard <hollisb@us.ibm.com>.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_KVM_E500_H__
-#define __ASM_KVM_E500_H__
-
-#include <linux/kvm_host.h>
-
-#define BOOKE_INTERRUPT_SIZE 36
-
-#define E500_PID_NUM 3
-#define E500_TLB_NUM 2
-
-#define E500_TLB_VALID 1
-#define E500_TLB_DIRTY 2
-
-struct tlbe_ref {
- pfn_t pfn;
- unsigned int flags; /* E500_TLB_* */
-};
-
-struct tlbe_priv {
- struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
-};
-
-struct vcpu_id_table;
-
-struct kvmppc_e500_tlb_params {
- int entries, ways, sets;
-};
-
-struct kvmppc_vcpu_e500 {
- /* Unmodified copy of the guest's TLB -- shared with host userspace. */
- struct kvm_book3e_206_tlb_entry *gtlb_arch;
-
- /* Starting entry number in gtlb_arch[] */
- int gtlb_offset[E500_TLB_NUM];
-
- /* KVM internal information associated with each guest TLB entry */
- struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
-
- struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
-
- unsigned int gtlb_nv[E500_TLB_NUM];
-
- /*
- * information associated with each host TLB entry --
- * TLB1 only for now. If/when guest TLB1 entries can be
- * mapped with host TLB0, this will be used for that too.
- *
- * We don't want to use this for guest TLB0 because then we'd
- * have the overhead of doing the translation again even if
- * the entry is still in the guest TLB (e.g. we swapped out
- * and back, and our host TLB entries got evicted).
- */
- struct tlbe_ref *tlb_refs[E500_TLB_NUM];
- unsigned int host_tlb1_nv;
-
- u32 host_pid[E500_PID_NUM];
- u32 pid[E500_PID_NUM];
- u32 svr;
-
- /* vcpu id table */
- struct vcpu_id_table *idt;
-
- u32 l1csr0;
- u32 l1csr1;
- u32 hid0;
- u32 hid1;
- u32 tlb0cfg;
- u32 tlb1cfg;
- u64 mcar;
-
- struct page **shared_tlb_pages;
- int num_shared_tlb_pages;
-
- struct kvm_vcpu vcpu;
-};
-
-static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
-{
- return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
-}
-
-#endif /* __ASM_KVM_E500_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 52eb9c1f4fe..d848cdc4971 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -82,7 +82,7 @@ struct kvm_vcpu;
struct lppaca;
struct slb_shadow;
-struct dtl;
+struct dtl_entry;
struct kvm_vm_stat {
u32 remote_tlb_flush;
@@ -106,6 +106,8 @@ struct kvm_vcpu_stat {
u32 dec_exits;
u32 ext_intr_exits;
u32 halt_wakeup;
+ u32 dbell_exits;
+ u32 gdbell_exits;
#ifdef CONFIG_PPC_BOOK3S
u32 pf_storage;
u32 pf_instruc;
@@ -140,6 +142,7 @@ enum kvm_exit_types {
EMULATED_TLBSX_EXITS,
EMULATED_TLBWE_EXITS,
EMULATED_RFI_EXITS,
+ EMULATED_RFCI_EXITS,
DEC_EXITS,
EXT_INTR_EXITS,
HALT_WAKEUP,
@@ -147,6 +150,8 @@ enum kvm_exit_types {
FP_UNAVAIL,
DEBUG_EXITS,
TIMEINGUEST,
+ DBELL_EXITS,
+ GDBELL_EXITS,
__NUMBER_OF_KVM_EXIT_TYPES
};
@@ -217,10 +222,10 @@ struct kvm_arch_memory_slot {
};
struct kvm_arch {
+ unsigned int lpid;
#ifdef CONFIG_KVM_BOOK3S_64_HV
unsigned long hpt_virt;
struct revmap_entry *revmap;
- unsigned int lpid;
unsigned int host_lpid;
unsigned long host_lpcr;
unsigned long sdr1;
@@ -232,7 +237,6 @@ struct kvm_arch {
unsigned long vrma_slb_v;
int rma_setup_done;
int using_mmu_notifiers;
- struct list_head spapr_tce_tables;
spinlock_t slot_phys_lock;
unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
int slot_npages[KVM_MEM_SLOTS_NUM];
@@ -240,6 +244,9 @@ struct kvm_arch {
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
struct kvmppc_linear_info *hpt_li;
#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct list_head spapr_tce_tables;
+#endif
};
/*
@@ -263,6 +270,9 @@ struct kvmppc_vcore {
struct list_head runnable_threads;
spinlock_t lock;
wait_queue_head_t wq;
+ u64 stolen_tb;
+ u64 preempt_tb;
+ struct kvm_vcpu *runner;
};
#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -274,6 +284,19 @@ struct kvmppc_vcore {
#define VCORE_EXITING 2
#define VCORE_SLEEPING 3
+/*
+ * Struct used to manage memory for a virtual processor area
+ * registered by a PAPR guest. There are three types of area
+ * that a guest can register.
+ */
+struct kvmppc_vpa {
+ void *pinned_addr; /* Address in kernel linear mapping */
+ void *pinned_end; /* End of region */
+ unsigned long next_gpa; /* Guest phys addr for update */
+ unsigned long len; /* Number of bytes required */
+ u8 update_pending; /* 1 => update pinned_addr from next_gpa */
+};
+
struct kvmppc_pte {
ulong eaddr;
u64 vpage;
@@ -345,6 +368,17 @@ struct kvm_vcpu_arch {
u64 vsr[64];
#endif
+#ifdef CONFIG_KVM_BOOKE_HV
+ u32 host_mas4;
+ u32 host_mas6;
+ u32 shadow_epcr;
+ u32 epcr;
+ u32 shadow_msrp;
+ u32 eplc;
+ u32 epsc;
+ u32 oldpir;
+#endif
+
#ifdef CONFIG_PPC_BOOK3S
/* For Gekko paired singles */
u32 qpr[32];
@@ -370,6 +404,7 @@ struct kvm_vcpu_arch {
#endif
u32 vrsave; /* also USPRG0 */
u32 mmucr;
+ /* shadow_msr is unused for BookE HV */
ulong shadow_msr;
ulong csrr0;
ulong csrr1;
@@ -426,8 +461,12 @@ struct kvm_vcpu_arch {
ulong fault_esr;
ulong queued_dear;
ulong queued_esr;
+ u32 tlbcfg[4];
+ u32 mmucfg;
+ u32 epr;
#endif
gpa_t paddr_accessed;
+ gva_t vaddr_accessed;
u8 io_gpr; /* GPR used as IO source/target */
u8 mmio_is_bigendian;
@@ -453,11 +492,6 @@ struct kvm_vcpu_arch {
u8 prodded;
u32 last_inst;
- struct lppaca *vpa;
- struct slb_shadow *slb_shadow;
- struct dtl *dtl;
- struct dtl *dtl_end;
-
wait_queue_head_t *wqp;
struct kvmppc_vcore *vcore;
int ret;
@@ -482,6 +516,14 @@ struct kvm_vcpu_arch {
struct task_struct *run_task;
struct kvm_run *kvm_run;
pgd_t *pgdir;
+
+ spinlock_t vpa_update_lock;
+ struct kvmppc_vpa vpa;
+ struct kvmppc_vpa dtl;
+ struct dtl_entry *dtl_ptr;
+ unsigned long dtl_index;
+ u64 stolen_logged;
+ struct kvmppc_vpa slb_shadow;
#endif
};
@@ -498,4 +540,6 @@ struct kvm_vcpu_arch {
#define KVM_MMIO_REG_QPR 0x0040
#define KVM_MMIO_REG_FQPR 0x0060
+#define __KVM_HAVE_ARCH_WQP
+
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 7b754e74300..c18916bff68 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -206,6 +206,11 @@ static inline unsigned int kvm_arch_para_features(void)
return r;
}
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
#endif /* __KERNEL__ */
#endif /* __POWERPC_KVM_PARA_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9d6dee0f7d4..f68c22fa2fc 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -95,7 +95,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
-extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
@@ -107,8 +107,10 @@ extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int op, int *advance);
-extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
-extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
+extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
+ ulong val);
+extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
+ ulong *val);
extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void);
@@ -126,6 +128,8 @@ extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args);
+extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba, unsigned long tce);
extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
struct kvm_allocate_rma *rma);
extern struct kvmppc_linear_info *kvm_alloc_rma(void);
@@ -138,6 +142,11 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem);
+extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
+ struct kvm_ppc_smmu_info *info);
+
+extern int kvmppc_bookehv_init(void);
+extern void kvmppc_bookehv_exit(void);
/*
* Cuts out inst bits with ordering according to spec.
@@ -204,4 +213,9 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
struct kvm_dirty_tlb *cfg);
+long kvmppc_alloc_lpid(void);
+void kvmppc_claim_lpid(long lpid);
+void kvmppc_free_lpid(long lpid);
+void kvmppc_init_lpid(unsigned long nr_lpids);
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index cdb5421877e..eeabcdbc30f 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -104,6 +104,8 @@
#define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */
#define MAS4_TSIZED_SHIFT 7
+#define MAS5_SGS 0x80000000
+
#define MAS6_SPID0 0x3FFF0000
#define MAS6_SPID1 0x00007FFE
#define MAS6_ISIZE(x) MAS1_TSIZE(x)
@@ -118,6 +120,10 @@
#define MAS7_RPN 0xFFFFFFFF
+#define MAS8_TGS 0x80000000 /* Guest space */
+#define MAS8_VF 0x40000000 /* Virtualization Fault */
+#define MAS8_TLPID 0x000000ff
+
/* Bit definitions for MMUCFG */
#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */
#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 55e85631c42..413a5eaef56 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -240,6 +240,9 @@ struct thread_struct {
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
void* kvm_shadow_vcpu; /* KVM internal data */
#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
+#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
+ struct kvm_vcpu *kvm_vcpu;
+#endif
#ifdef CONFIG_PPC64
unsigned long dscr;
int dscr_inherit;
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 9d7f0fb6902..f0cb7f461b9 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -257,7 +257,9 @@
#define LPCR_LPES_SH 2
#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
+#ifndef SPRN_LPID
#define SPRN_LPID 0x13F /* Logical Partition Identifier */
+#endif
#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
#define SPRN_HMER 0x150 /* Hardware m? error recovery */
#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 8a97aa7289d..2d916c4982c 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -56,18 +56,30 @@
#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */
#define SPRN_EPCR 0x133 /* Embedded Processor Control Register */
#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */
+#define SPRN_MSRP 0x137 /* MSR Protect Register */
#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */
#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */
#define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */
#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */
+#define SPRN_LPID 0x152 /* Logical Partition ID */
#define SPRN_MAS8 0x155 /* MMU Assist Register 8 */
#define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */
#define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */
#define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */
#define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */
#define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */
+#define SPRN_GSPRG0 0x170 /* Guest SPRG0 */
+#define SPRN_GSPRG1 0x171 /* Guest SPRG1 */
+#define SPRN_GSPRG2 0x172 /* Guest SPRG2 */
+#define SPRN_GSPRG3 0x173 /* Guest SPRG3 */
#define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */
#define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */
+#define SPRN_GSRR0 0x17A /* Guest SRR0 */
+#define SPRN_GSRR1 0x17B /* Guest SRR1 */
+#define SPRN_GEPR 0x17C /* Guest EPR */
+#define SPRN_GDEAR 0x17D /* Guest DEAR */
+#define SPRN_GPIR 0x17E /* Guest PIR */
+#define SPRN_GESR 0x17F /* Guest Exception Syndrome Register */
#define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */
#define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */
#define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */
@@ -88,6 +100,13 @@
#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */
#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */
#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */
+#define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */
+#define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */
+#define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */
+#define SPRN_GIVOR8 0x1BB /* Guest IVOR8 */
+#define SPRN_GIVOR13 0x1BC /* Guest IVOR13 */
+#define SPRN_GIVOR14 0x1BD /* Guest IVOR14 */
+#define SPRN_GIVPR 0x1BF /* Guest IVPR */
#define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */
#define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */
#define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */
@@ -240,6 +259,10 @@
#define MCSR_LDG 0x00002000UL /* Guarded Load */
#define MCSR_TLBSYNC 0x00000002UL /* Multiple tlbsyncs detected */
#define MCSR_BSL2_ERR 0x00000001UL /* Backside L2 cache error */
+
+#define MSRP_UCLEP 0x04000000 /* Protect MSR[UCLE] */
+#define MSRP_DEP 0x00000200 /* Protect MSR[DE] */
+#define MSRP_PMMP 0x00000004 /* Protect MSR[PMM] */
#endif
#ifdef CONFIG_E200
@@ -594,6 +617,17 @@
#define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates
* for hypervisor */
+/* Bit definitions for EPLC/EPSC */
+#define EPC_EPR 0x80000000 /* 1 = user, 0 = kernel */
+#define EPC_EPR_SHIFT 31
+#define EPC_EAS 0x40000000 /* Address Space */
+#define EPC_EAS_SHIFT 30
+#define EPC_EGS 0x20000000 /* 1 = guest, 0 = hypervisor */
+#define EPC_EGS_SHIFT 29
+#define EPC_ELPID 0x00ff0000
+#define EPC_ELPID_SHIFT 16
+#define EPC_EPID 0x00003fff
+#define EPC_EPID_SHIFT 0
/*
* The IBM-403 is an even more odd special case, as it is much
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 1a6320290d2..200d763a0a6 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -17,6 +17,7 @@ extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
extern void giveup_fpu(struct task_struct *);
+extern void load_up_fpu(void);
extern void disable_kernel_fp(void);
extern void enable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 2136f58a54e..3b4b4a8da92 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -23,6 +23,7 @@
extern unsigned long tb_ticks_per_jiffy;
extern unsigned long tb_ticks_per_usec;
extern unsigned long tb_ticks_per_sec;
+extern struct clock_event_device decrementer_clockevent;
struct rtc_time;
extern void to_tm(int tim, struct rtc_time * tm);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index bd0fb849515..17bb40cad5b 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -40,6 +40,8 @@
#define segment_eq(a, b) ((a).seg == (b).seg)
+#define user_addr_max() (get_fs().seg)
+
#ifdef __powerpc64__
/*
* This check is sufficient because there is a large enough
@@ -453,42 +455,9 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
return size;
}
-extern int __strncpy_from_user(char *dst, const char __user *src, long count);
-
-static inline long strncpy_from_user(char *dst, const char __user *src,
- long count)
-{
- might_sleep();
- if (likely(access_ok(VERIFY_READ, src, 1)))
- return __strncpy_from_user(dst, src, count);
- return -EFAULT;
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 for error
- */
-extern int __strnlen_user(const char __user *str, long len, unsigned long top);
-
-/*
- * Returns the length of the string at str (including the null byte),
- * or 0 if we hit a page we can't access,
- * or something > len if we didn't find a null byte.
- *
- * The `top' parameter to __strnlen_user is to make sure that
- * we can never overflow from the user area into kernel space.
- */
-static inline int strnlen_user(const char __user *str, long len)
-{
- unsigned long top = current->thread.fs.seg;
-
- if ((unsigned long)str > top)
- return 0;
- return __strnlen_user(str, len, top);
-}
-
-#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
new file mode 100644
index 00000000000..d0b6d4ac6dd
--- /dev/null
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+/*
+ * Word-at-a-time interfaces for PowerPC.
+ */
+
+#include <linux/kernel.h>
+#include <asm/asm-compat.h>
+
+struct word_at_a_time {
+ const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+ unsigned long mask = (val & c->low_bits) + c->low_bits;
+ return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+ long leading_zero_bits;
+
+ asm (PPC_CNTLZL "%0,%1" : "=r" (leading_zero_bits) : "r" (mask));
+ return leading_zero_bits >> 3;
+}
+
+static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+ unsigned long rhs = val | c->low_bits;
+ *data = rhs;
+ return (val + c->high_bits) & ~rhs;
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 4554dc2fe85..52c7ad78242 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -116,6 +116,9 @@ int main(void)
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
#endif
+#ifdef CONFIG_KVM_BOOKE_HV
+ DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu));
+#endif
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
@@ -383,6 +386,7 @@ int main(void)
#ifdef CONFIG_KVM
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
+ DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
@@ -425,9 +429,11 @@ int main(void)
DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
+ DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
+ DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
+
/* book3s */
#ifdef CONFIG_KVM_BOOK3S_64_HV
- DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
@@ -440,9 +446,9 @@ int main(void)
DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
+ DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
#endif
#ifdef CONFIG_PPC_BOOK3S
- DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
@@ -457,7 +463,6 @@ int main(void)
DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions));
DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded));
DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
- DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa));
DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
@@ -533,6 +538,8 @@ int main(void)
HSTATE_FIELD(HSTATE_NAPPING, napping);
#ifdef CONFIG_KVM_BOOK3S_64_HV
+ HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
+ HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
@@ -593,6 +600,12 @@ int main(void)
DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
#endif
+#ifdef CONFIG_KVM_BOOKE_HV
+ DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4));
+ DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6));
+ DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc));
+#endif
+
#ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
arch.timing_exit.tv32.tbu));
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 8053db02b85..69fdd2322a6 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -73,6 +73,7 @@ _GLOBAL(__setup_cpu_e500v2)
mtlr r4
blr
_GLOBAL(__setup_cpu_e500mc)
+ mr r5, r4
mflr r4
bl __e500_icache_setup
bl __e500_dcache_setup
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index f7bed44ee16..1c06d297154 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -63,11 +63,13 @@ BEGIN_FTR_SECTION
GET_PACA(r13)
#ifdef CONFIG_KVM_BOOK3S_64_HV
- lbz r0,PACAPROCSTART(r13)
- cmpwi r0,0x80
- bne 1f
- li r0,1
- stb r0,PACAPROCSTART(r13)
+ li r0,KVM_HWTHREAD_IN_KERNEL
+ stb r0,HSTATE_HWTHREAD_STATE(r13)
+ /* Order setting hwthread_state vs. testing hwthread_req */
+ sync
+ lbz r0,HSTATE_HWTHREAD_REQ(r13)
+ cmpwi r0,0
+ beq 1f
b kvm_start_guest
1:
#endif
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 22d608e8bb7..7a2e5e421ab 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -248,10 +248,11 @@ _ENTRY(_start);
interrupt_base:
/* Critical Input Interrupt */
- CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
+ CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
/* Machine Check Interrupt */
- CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+ CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
+ machine_check_exception)
MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
/* Data Storage Interrupt */
@@ -261,7 +262,8 @@ interrupt_base:
INSTRUCTION_STORAGE_EXCEPTION
/* External Input Interrupt */
- EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, \
+ do_IRQ, EXC_XFER_LITE)
/* Alignment Interrupt */
ALIGNMENT_EXCEPTION
@@ -273,29 +275,32 @@ interrupt_base:
#ifdef CONFIG_PPC_FPU
FP_UNAVAILABLE_EXCEPTION
#else
- EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
+ FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
#endif
/* System Call Interrupt */
START_EXCEPTION(SystemCall)
- NORMAL_EXCEPTION_PROLOG
+ NORMAL_EXCEPTION_PROLOG(BOOKE_INTERRUPT_SYSCALL)
EXC_XFER_EE_LITE(0x0c00, DoSyscall)
/* Auxiliary Processor Unavailable Interrupt */
- EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
+ AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
/* Decrementer Interrupt */
DECREMENTER_EXCEPTION
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
- EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \
+ unknown_exception, EXC_XFER_EE)
/* Watchdog Timer Interrupt */
/* TODO: Add watchdog support */
#ifdef CONFIG_BOOKE_WDT
- CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
+ CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException)
#else
- CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
+ CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception)
#endif
/* Data TLB Error Interrupt */
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 0e4175388f4..5f051eeb93a 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -2,6 +2,9 @@
#define __HEAD_BOOKE_H__
#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
+#include <asm/kvm_asm.h>
+#include <asm/kvm_booke_hv_asm.h>
+
/*
* Macros used for common Book-e exception handling
*/
@@ -28,14 +31,15 @@
*/
#define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4))
-#define NORMAL_EXCEPTION_PROLOG \
+#define NORMAL_EXCEPTION_PROLOG(intno) \
mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
mfspr r10, SPRN_SPRG_THREAD; \
stw r11, THREAD_NORMSAVE(0)(r10); \
stw r13, THREAD_NORMSAVE(2)(r10); \
mfcr r13; /* save CR in r13 for now */\
- mfspr r11,SPRN_SRR1; /* check whether user or kernel */\
- andi. r11,r11,MSR_PR; \
+ mfspr r11, SPRN_SRR1; \
+ DO_KVM BOOKE_INTERRUPT_##intno SPRN_SRR1; \
+ andi. r11, r11, MSR_PR; /* check whether user or kernel */\
mr r11, r1; \
beq 1f; \
/* if from user, start at top of this thread's kernel stack */ \
@@ -113,7 +117,7 @@
* registers as the normal prolog above. Instead we use a portion of the
* critical/machine check exception stack at low physical addresses.
*/
-#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, exc_level_srr0, exc_level_srr1) \
+#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, intno, exc_level_srr0, exc_level_srr1) \
mtspr SPRN_SPRG_WSCRATCH_##exc_level,r8; \
BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \
stw r9,GPR9(r8); /* save various registers */\
@@ -121,8 +125,9 @@
stw r10,GPR10(r8); \
stw r11,GPR11(r8); \
stw r9,_CCR(r8); /* save CR on stack */\
- mfspr r10,exc_level_srr1; /* check whether user or kernel */\
- andi. r10,r10,MSR_PR; \
+ mfspr r11,exc_level_srr1; /* check whether user or kernel */\
+ DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
+ andi. r11,r11,MSR_PR; \
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\
@@ -162,12 +167,30 @@
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
-#define CRITICAL_EXCEPTION_PROLOG \
- EXC_LEVEL_EXCEPTION_PROLOG(CRIT, SPRN_CSRR0, SPRN_CSRR1)
+#define CRITICAL_EXCEPTION_PROLOG(intno) \
+ EXC_LEVEL_EXCEPTION_PROLOG(CRIT, intno, SPRN_CSRR0, SPRN_CSRR1)
#define DEBUG_EXCEPTION_PROLOG \
- EXC_LEVEL_EXCEPTION_PROLOG(DBG, SPRN_DSRR0, SPRN_DSRR1)
+ EXC_LEVEL_EXCEPTION_PROLOG(DBG, DEBUG, SPRN_DSRR0, SPRN_DSRR1)
#define MCHECK_EXCEPTION_PROLOG \
- EXC_LEVEL_EXCEPTION_PROLOG(MC, SPRN_MCSRR0, SPRN_MCSRR1)
+ EXC_LEVEL_EXCEPTION_PROLOG(MC, MACHINE_CHECK, \
+ SPRN_MCSRR0, SPRN_MCSRR1)
+
+/*
+ * Guest Doorbell -- this is a bit odd in that uses GSRR0/1 despite
+ * being delivered to the host. This exception can only happen
+ * inside a KVM guest -- so we just handle up to the DO_KVM rather
+ * than try to fit this into one of the existing prolog macros.
+ */
+#define GUEST_DOORBELL_EXCEPTION \
+ START_EXCEPTION(GuestDoorbell); \
+ mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
+ mfspr r10, SPRN_SPRG_THREAD; \
+ stw r11, THREAD_NORMSAVE(0)(r10); \
+ mfspr r11, SPRN_SRR1; \
+ stw r13, THREAD_NORMSAVE(2)(r10); \
+ mfcr r13; /* save CR in r13 for now */\
+ DO_KVM BOOKE_INTERRUPT_GUEST_DBELL SPRN_GSRR1; \
+ trap
/*
* Exception vectors.
@@ -181,16 +204,16 @@ label:
.long func; \
.long ret_from_except_full
-#define EXCEPTION(n, label, hdlr, xfer) \
+#define EXCEPTION(n, intno, label, hdlr, xfer) \
START_EXCEPTION(label); \
- NORMAL_EXCEPTION_PROLOG; \
+ NORMAL_EXCEPTION_PROLOG(intno); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
xfer(n, hdlr)
-#define CRITICAL_EXCEPTION(n, label, hdlr) \
- START_EXCEPTION(label); \
- CRITICAL_EXCEPTION_PROLOG; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
+#define CRITICAL_EXCEPTION(n, intno, label, hdlr) \
+ START_EXCEPTION(label); \
+ CRITICAL_EXCEPTION_PROLOG(intno); \
+ addi r3,r1,STACK_FRAME_OVERHEAD; \
EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
NOCOPY, crit_transfer_to_handler, \
ret_from_crit_exc)
@@ -302,7 +325,7 @@ label:
#define DEBUG_CRIT_EXCEPTION \
START_EXCEPTION(DebugCrit); \
- CRITICAL_EXCEPTION_PROLOG; \
+ CRITICAL_EXCEPTION_PROLOG(DEBUG); \
\
/* \
* If there is a single step or branch-taken exception in an \
@@ -355,7 +378,7 @@ label:
#define DATA_STORAGE_EXCEPTION \
START_EXCEPTION(DataStorage) \
- NORMAL_EXCEPTION_PROLOG; \
+ NORMAL_EXCEPTION_PROLOG(DATA_STORAGE); \
mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
stw r5,_ESR(r11); \
mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \
@@ -363,7 +386,7 @@ label:
#define INSTRUCTION_STORAGE_EXCEPTION \
START_EXCEPTION(InstructionStorage) \
- NORMAL_EXCEPTION_PROLOG; \
+ NORMAL_EXCEPTION_PROLOG(INST_STORAGE); \
mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
stw r5,_ESR(r11); \
mr r4,r12; /* Pass SRR0 as arg2 */ \
@@ -372,7 +395,7 @@ label:
#define ALIGNMENT_EXCEPTION \
START_EXCEPTION(Alignment) \
- NORMAL_EXCEPTION_PROLOG; \
+ NORMAL_EXCEPTION_PROLOG(ALIGNMENT); \
mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \
stw r4,_DEAR(r11); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -380,7 +403,7 @@ label:
#define PROGRAM_EXCEPTION \
START_EXCEPTION(Program) \
- NORMAL_EXCEPTION_PROLOG; \
+ NORMAL_EXCEPTION_PROLOG(PROGRAM); \
mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \
stw r4,_ESR(r11); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -388,7 +411,7 @@ label:
#define DECREMENTER_EXCEPTION \
START_EXCEPTION(Decrementer) \
- NORMAL_EXCEPTION_PROLOG; \
+ NORMAL_EXCEPTION_PROLOG(DECREMENTER); \
lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \
mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \
addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -396,7 +419,7 @@ label:
#define FP_UNAVAILABLE_EXCEPTION \
START_EXCEPTION(FloatingPointUnavailable) \
- NORMAL_EXCEPTION_PROLOG; \
+ NORMAL_EXCEPTION_PROLOG(FP_UNAVAIL); \
beq 1f; \
bl load_up_fpu; /* if from user, just load it up */ \
b fast_exception_return; \
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index de80e0f9a2b..1f4434a3860 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -301,19 +301,20 @@ _ENTRY(__early_start)
interrupt_base:
/* Critical Input Interrupt */
- CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
+ CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
/* Machine Check Interrupt */
#ifdef CONFIG_E200
/* no RFMCI, MCSRRs on E200 */
- CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
+ CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
+ machine_check_exception)
#else
MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
#endif
/* Data Storage Interrupt */
START_EXCEPTION(DataStorage)
- NORMAL_EXCEPTION_PROLOG
+ NORMAL_EXCEPTION_PROLOG(DATA_STORAGE)
mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
stw r5,_ESR(r11)
mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
@@ -328,7 +329,7 @@ interrupt_base:
INSTRUCTION_STORAGE_EXCEPTION
/* External Input Interrupt */
- EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ, EXC_XFER_LITE)
/* Alignment Interrupt */
ALIGNMENT_EXCEPTION
@@ -342,32 +343,36 @@ interrupt_base:
#else
#ifdef CONFIG_E200
/* E200 treats 'normal' floating point instructions as FP Unavail exception */
- EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE)
+ EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
+ program_check_exception, EXC_XFER_EE)
#else
- EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
+ unknown_exception, EXC_XFER_EE)
#endif
#endif
/* System Call Interrupt */
START_EXCEPTION(SystemCall)
- NORMAL_EXCEPTION_PROLOG
+ NORMAL_EXCEPTION_PROLOG(SYSCALL)
EXC_XFER_EE_LITE(0x0c00, DoSyscall)
/* Auxiliary Processor Unavailable Interrupt */
- EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
+ unknown_exception, EXC_XFER_EE)
/* Decrementer Interrupt */
DECREMENTER_EXCEPTION
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
- EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x3100, FIT, FixedIntervalTimer, \
+ unknown_exception, EXC_XFER_EE)
/* Watchdog Timer Interrupt */
#ifdef CONFIG_BOOKE_WDT
- CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException)
+ CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, WatchdogException)
#else
- CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception)
+ CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, unknown_exception)
#endif
/* Data TLB Error Interrupt */
@@ -375,10 +380,16 @@ interrupt_base:
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
mfspr r10, SPRN_SPRG_THREAD
stw r11, THREAD_NORMSAVE(0)(r10)
+#ifdef CONFIG_KVM_BOOKE_HV
+BEGIN_FTR_SECTION
+ mfspr r11, SPRN_SRR1
+END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+#endif
stw r12, THREAD_NORMSAVE(1)(r10)
stw r13, THREAD_NORMSAVE(2)(r10)
mfcr r13
stw r13, THREAD_NORMSAVE(3)(r10)
+ DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -463,10 +474,16 @@ interrupt_base:
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
mfspr r10, SPRN_SPRG_THREAD
stw r11, THREAD_NORMSAVE(0)(r10)
+#ifdef CONFIG_KVM_BOOKE_HV
+BEGIN_FTR_SECTION
+ mfspr r11, SPRN_SRR1
+END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+#endif
stw r12, THREAD_NORMSAVE(1)(r10)
stw r13, THREAD_NORMSAVE(2)(r10)
mfcr r13
stw r13, THREAD_NORMSAVE(3)(r10)
+ DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -538,36 +555,54 @@ interrupt_base:
#ifdef CONFIG_SPE
/* SPE Unavailable */
START_EXCEPTION(SPEUnavailable)
- NORMAL_EXCEPTION_PROLOG
+ NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
bne load_up_spe
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x2010, KernelSPE)
#else
- EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
+ unknown_exception, EXC_XFER_EE)
#endif /* CONFIG_SPE */
/* SPE Floating Point Data */
#ifdef CONFIG_SPE
- EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
+ EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData, \
+ SPEFloatingPointException, EXC_XFER_EE);
/* SPE Floating Point Round */
- EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE)
+ EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
+ SPEFloatingPointRoundException, EXC_XFER_EE)
#else
- EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
- EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, \
+ unknown_exception, EXC_XFER_EE)
+ EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
+ unknown_exception, EXC_XFER_EE)
#endif /* CONFIG_SPE */
/* Performance Monitor */
- EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
+ EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
+ performance_monitor_exception, EXC_XFER_STD)
- EXCEPTION(0x2070, Doorbell, doorbell_exception, EXC_XFER_STD)
+ EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception, EXC_XFER_STD)
- CRITICAL_EXCEPTION(0x2080, CriticalDoorbell, unknown_exception)
+ CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \
+ CriticalDoorbell, unknown_exception)
/* Debug Interrupt */
DEBUG_DEBUG_EXCEPTION
DEBUG_CRIT_EXCEPTION
+ GUEST_DOORBELL_EXCEPTION
+
+ CRITICAL_EXCEPTION(0, GUEST_DBELL_CRIT, CriticalGuestDoorbell, \
+ unknown_exception)
+
+ /* Hypercall */
+ EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_EE)
+
+ /* Embedded Hypervisor Privilege */
+ EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_EE)
+
/*
* Local functions
*/
@@ -871,8 +906,31 @@ _GLOBAL(__setup_e500mc_ivors)
mtspr SPRN_IVOR36,r3
li r3,CriticalDoorbell@l
mtspr SPRN_IVOR37,r3
+
+ /*
+ * We only want to touch IVOR38-41 if we're running on hardware
+ * that supports category E.HV. The architectural way to determine
+ * this is MMUCFG[LPIDSIZE].
+ */
+ mfspr r3, SPRN_MMUCFG
+ andis. r3, r3, MMUCFG_LPIDSIZE@h
+ beq no_hv
+ li r3,GuestDoorbell@l
+ mtspr SPRN_IVOR38,r3
+ li r3,CriticalGuestDoorbell@l
+ mtspr SPRN_IVOR39,r3
+ li r3,Hypercall@l
+ mtspr SPRN_IVOR40,r3
+ li r3,Ehvpriv@l
+ mtspr SPRN_IVOR41,r3
+skip_hv_ivors:
sync
blr
+no_hv:
+ lwz r3, CPU_SPEC_FEATURES(r5)
+ rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
+ stw r3, CPU_SPEC_FEATURES(r5)
+ b skip_hv_ivors
#ifdef CONFIG_SPE
/*
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 0cdc9a39283..7140d838339 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -16,6 +16,7 @@
#include <asm/asm-offsets.h>
#include <asm/ppc-opcode.h>
#include <asm/hw_irq.h>
+#include <asm/kvm_book3s_asm.h>
#undef DEBUG
@@ -81,6 +82,12 @@ _GLOBAL(power7_idle)
std r9,_MSR(r1)
std r1,PACAR1(r13)
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ /* Tell KVM we're napping */
+ li r4,KVM_HWTHREAD_IN_NAP
+ stb r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
+
/* Magic NAP mode enter sequence */
std r0,0(r1)
ptesync
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 786a2700ec2..3e4031581c6 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -85,8 +85,6 @@ EXPORT_SYMBOL(csum_tcpudp_magic);
EXPORT_SYMBOL(__copy_tofrom_user);
EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(copy_page);
#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
@@ -190,3 +188,7 @@ EXPORT_SYMBOL(__arch_hweight16);
EXPORT_SYMBOL(__arch_hweight32);
EXPORT_SYMBOL(__arch_hweight64);
#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+EXPORT_SYMBOL_GPL(mmu_psize_defs);
+#endif
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 2c42cd72d0f..99a995c2a3f 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -100,7 +100,7 @@ static int decrementer_set_next_event(unsigned long evt,
static void decrementer_set_mode(enum clock_event_mode mode,
struct clock_event_device *dev);
-static struct clock_event_device decrementer_clockevent = {
+struct clock_event_device decrementer_clockevent = {
.name = "decrementer",
.rating = 200,
.irq = 0,
@@ -108,6 +108,7 @@ static struct clock_event_device decrementer_clockevent = {
.set_mode = decrementer_set_mode,
.features = CLOCK_EVT_FEAT_ONESHOT,
};
+EXPORT_SYMBOL(decrementer_clockevent);
DEFINE_PER_CPU(u64, decrementers_next_tb);
static DEFINE_PER_CPU(struct clock_event_device, decrementers);
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 7b612a76c70..50e7dbc7356 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -29,15 +29,18 @@
#include <asm/kvm_ppc.h>
#include "44x_tlb.h"
+#include "booke.h"
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
+ kvmppc_booke_vcpu_load(vcpu, cpu);
kvmppc_44x_tlb_load(vcpu);
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
kvmppc_44x_tlb_put(vcpu);
+ kvmppc_booke_vcpu_put(vcpu);
}
int kvmppc_core_check_processor_compat(void)
@@ -160,6 +163,15 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
}
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+}
+
static int __init kvmppc_44x_init(void)
{
int r;
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 549bb2c9a47..c8c61578fdf 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -37,22 +37,19 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
- int dcrn;
- int ra;
- int rb;
- int rc;
- int rs;
- int rt;
- int ws;
+ int dcrn = get_dcrn(inst);
+ int ra = get_ra(inst);
+ int rb = get_rb(inst);
+ int rc = get_rc(inst);
+ int rs = get_rs(inst);
+ int rt = get_rt(inst);
+ int ws = get_ws(inst);
switch (get_op(inst)) {
case 31:
switch (get_xop(inst)) {
case XOP_MFDCR:
- dcrn = get_dcrn(inst);
- rt = get_rt(inst);
-
/* The guest may access CPR0 registers to determine the timebase
* frequency, and it must know the real host frequency because it
* can directly access the timebase registers.
@@ -88,9 +85,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case XOP_MTDCR:
- dcrn = get_dcrn(inst);
- rs = get_rs(inst);
-
/* emulate some access in kernel */
switch (dcrn) {
case DCRN_CPR0_CONFIG_ADDR:
@@ -108,17 +102,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case XOP_TLBWE:
- ra = get_ra(inst);
- rs = get_rs(inst);
- ws = get_ws(inst);
emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
break;
case XOP_TLBSX:
- rt = get_rt(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
- rc = get_rc(inst);
emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
break;
@@ -141,41 +128,41 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_PID:
- kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break;
+ kvmppc_set_pid(vcpu, spr_val); break;
case SPRN_MMUCR:
- vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break;
+ vcpu->arch.mmucr = spr_val; break;
case SPRN_CCR0:
- vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break;
+ vcpu->arch.ccr0 = spr_val; break;
case SPRN_CCR1:
- vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break;
+ vcpu->arch.ccr1 = spr_val; break;
default:
- emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
+ emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
}
return emulated;
}
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_PID:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break;
+ *spr_val = vcpu->arch.pid; break;
case SPRN_MMUCR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break;
+ *spr_val = vcpu->arch.mmucr; break;
case SPRN_CCR0:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break;
+ *spr_val = vcpu->arch.ccr0; break;
case SPRN_CCR1:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break;
+ *spr_val = vcpu->arch.ccr1; break;
default:
- emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
+ emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
}
return emulated;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 8f64709ae33..f4dacb9c57f 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -90,6 +90,9 @@ config KVM_BOOK3S_64_PR
depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
select KVM_BOOK3S_PR
+config KVM_BOOKE_HV
+ bool
+
config KVM_440
bool "KVM support for PowerPC 440 processors"
depends on EXPERIMENTAL && 44x
@@ -106,7 +109,7 @@ config KVM_440
config KVM_EXIT_TIMING
bool "Detailed exit timing"
- depends on KVM_440 || KVM_E500
+ depends on KVM_440 || KVM_E500V2 || KVM_E500MC
---help---
Calculate elapsed time for every exit/enter cycle. A per-vcpu
report is available in debugfs kvm/vm#_vcpu#_timing.
@@ -115,14 +118,29 @@ config KVM_EXIT_TIMING
If unsure, say N.
-config KVM_E500
- bool "KVM support for PowerPC E500 processors"
- depends on EXPERIMENTAL && E500
+config KVM_E500V2
+ bool "KVM support for PowerPC E500v2 processors"
+ depends on EXPERIMENTAL && E500 && !PPC_E500MC
select KVM
select KVM_MMIO
---help---
Support running unmodified E500 guest kernels in virtual machines on
- E500 host processors.
+ E500v2 host processors.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ If unsure, say N.
+
+config KVM_E500MC
+ bool "KVM support for PowerPC E500MC/E5500 processors"
+ depends on EXPERIMENTAL && PPC_E500MC
+ select KVM
+ select KVM_MMIO
+ select KVM_BOOKE_HV
+ ---help---
+ Support running unmodified E500MC/E5500 (32-bit) guest kernels in
+ virtual machines on E500MC/E5500 host processors.
This module provides access to the hardware capabilities through
a character device node named /dev/kvm.
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 3688aeecc4b..c2a08636e6d 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -36,7 +36,17 @@ kvm-e500-objs := \
e500.o \
e500_tlb.o \
e500_emulate.o
-kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs)
+kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs)
+
+kvm-e500mc-objs := \
+ $(common-objs-y) \
+ booke.o \
+ booke_emulate.o \
+ bookehv_interrupts.o \
+ e500mc.o \
+ e500_tlb.o \
+ e500_emulate.o
+kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
../../../virt/kvm/coalesced_mmio.o \
@@ -44,6 +54,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
book3s_paired_singles.o \
book3s_pr.o \
book3s_pr_papr.o \
+ book3s_64_vio_hv.o \
book3s_emulate.o \
book3s_interrupts.o \
book3s_mmu_hpte.o \
@@ -68,6 +79,7 @@ kvm-book3s_64-module-objs := \
powerpc.o \
emulate.o \
book3s.o \
+ book3s_64_vio.o \
$(kvm-book3s_64-objs-y)
kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)
@@ -88,7 +100,8 @@ kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
obj-$(CONFIG_KVM_440) += kvm.o
-obj-$(CONFIG_KVM_E500) += kvm.o
+obj-$(CONFIG_KVM_E500V2) += kvm.o
+obj-$(CONFIG_KVM_E500MC) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 7d54f4ed6d9..3f2a8360c85 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
return true;
}
-void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
+int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
{
unsigned long *pending = &vcpu->arch.pending_exceptions;
unsigned long old_pending = vcpu->arch.pending_exceptions;
@@ -283,12 +283,17 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
/* Tell the guest about our interrupt status */
kvmppc_update_int_pending(vcpu, *pending, old_pending);
+
+ return 0;
}
pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
ulong mp_pa = vcpu->arch.magic_page_pa;
+ if (!(vcpu->arch.shared->msr & MSR_SF))
+ mp_pa = (uint32_t)mp_pa;
+
/* Magic page override */
if (unlikely(mp_pa) &&
unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index c3beaeef3f6..80a57751758 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -36,13 +36,11 @@
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
#define MAX_LPID_970 63
-#define NR_LPIDS (LPID_RSVD + 1)
-unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
long kvmppc_alloc_hpt(struct kvm *kvm)
{
unsigned long hpt;
- unsigned long lpid;
+ long lpid;
struct revmap_entry *rev;
struct kvmppc_linear_info *li;
@@ -72,14 +70,9 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
}
kvm->arch.revmap = rev;
- /* Allocate the guest's logical partition ID */
- do {
- lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
- if (lpid >= NR_LPIDS) {
- pr_err("kvm_alloc_hpt: No LPIDs free\n");
- goto out_freeboth;
- }
- } while (test_and_set_bit(lpid, lpid_inuse));
+ lpid = kvmppc_alloc_lpid();
+ if (lpid < 0)
+ goto out_freeboth;
kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
kvm->arch.lpid = lpid;
@@ -96,7 +89,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
void kvmppc_free_hpt(struct kvm *kvm)
{
- clear_bit(kvm->arch.lpid, lpid_inuse);
+ kvmppc_free_lpid(kvm->arch.lpid);
vfree(kvm->arch.revmap);
if (kvm->arch.hpt_li)
kvm_release_hpt(kvm->arch.hpt_li);
@@ -171,8 +164,7 @@ int kvmppc_mmu_hv_init(void)
if (!cpu_has_feature(CPU_FTR_HVMODE))
return -EINVAL;
- memset(lpid_inuse, 0, sizeof(lpid_inuse));
-
+ /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
host_lpid = mfspr(SPRN_LPID); /* POWER7 */
rsvd_lpid = LPID_RSVD;
@@ -181,9 +173,11 @@ int kvmppc_mmu_hv_init(void)
rsvd_lpid = MAX_LPID_970;
}
- set_bit(host_lpid, lpid_inuse);
+ kvmppc_init_lpid(rsvd_lpid + 1);
+
+ kvmppc_claim_lpid(host_lpid);
/* rsvd_lpid is reserved for use in partition switching */
- set_bit(rsvd_lpid, lpid_inuse);
+ kvmppc_claim_lpid(rsvd_lpid);
return 0;
}
@@ -452,7 +446,7 @@ static int instruction_is_store(unsigned int instr)
}
static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned long gpa, int is_store)
+ unsigned long gpa, gva_t ea, int is_store)
{
int ret;
u32 last_inst;
@@ -499,6 +493,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
vcpu->arch.paddr_accessed = gpa;
+ vcpu->arch.vaddr_accessed = ea;
return kvmppc_emulate_mmio(run, vcpu);
}
@@ -552,7 +547,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* No memslot means it's an emulated MMIO region */
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa,
+ return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
dsisr & DSISR_ISSTORE);
}
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index f2e6e48ea46..56b983e7b73 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -90,8 +90,6 @@ slb_exit_skip_ ## num:
or r10, r10, r12
slbie r10
- isync
-
/* Fill SLB with our shadow */
lbz r12, SVCPU_SLB_MAX(r3)
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
new file mode 100644
index 00000000000..72ffc899c08
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -0,0 +1,150 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
+ * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/hugetlb.h>
+#include <linux/list.h>
+#include <linux/anon_inodes.h>
+
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash64.h>
+#include <asm/hvcall.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+#include <asm/kvm_host.h>
+#include <asm/udbg.h>
+
+#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
+
+static long kvmppc_stt_npages(unsigned long window_size)
+{
+ return ALIGN((window_size >> SPAPR_TCE_SHIFT)
+ * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
+}
+
+static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
+{
+ struct kvm *kvm = stt->kvm;
+ int i;
+
+ mutex_lock(&kvm->lock);
+ list_del(&stt->list);
+ for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
+ __free_page(stt->pages[i]);
+ kfree(stt);
+ mutex_unlock(&kvm->lock);
+
+ kvm_put_kvm(kvm);
+}
+
+static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
+ struct page *page;
+
+ if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
+ return VM_FAULT_SIGBUS;
+
+ page = stt->pages[vmf->pgoff];
+ get_page(page);
+ vmf->page = page;
+ return 0;
+}
+
+static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
+ .fault = kvm_spapr_tce_fault,
+};
+
+static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &kvm_spapr_tce_vm_ops;
+ return 0;
+}
+
+static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
+{
+ struct kvmppc_spapr_tce_table *stt = filp->private_data;
+
+ release_spapr_tce_table(stt);
+ return 0;
+}
+
+static struct file_operations kvm_spapr_tce_fops = {
+ .mmap = kvm_spapr_tce_mmap,
+ .release = kvm_spapr_tce_release,
+};
+
+long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+ struct kvm_create_spapr_tce *args)
+{
+ struct kvmppc_spapr_tce_table *stt = NULL;
+ long npages;
+ int ret = -ENOMEM;
+ int i;
+
+ /* Check this LIOBN hasn't been previously allocated */
+ list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
+ if (stt->liobn == args->liobn)
+ return -EBUSY;
+ }
+
+ npages = kvmppc_stt_npages(args->window_size);
+
+ stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!stt)
+ goto fail;
+
+ stt->liobn = args->liobn;
+ stt->window_size = args->window_size;
+ stt->kvm = kvm;
+
+ for (i = 0; i < npages; i++) {
+ stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!stt->pages[i])
+ goto fail;
+ }
+
+ kvm_get_kvm(kvm);
+
+ mutex_lock(&kvm->lock);
+ list_add(&stt->list, &kvm->arch.spapr_tce_tables);
+
+ mutex_unlock(&kvm->lock);
+
+ return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
+ stt, O_RDWR);
+
+fail:
+ if (stt) {
+ for (i = 0; i < npages; i++)
+ if (stt->pages[i])
+ __free_page(stt->pages[i]);
+
+ kfree(stt);
+ }
+ return ret;
+}
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index ea0f8c537c2..30c2f3b134c 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -38,6 +38,9 @@
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
+/* WARNING: This will be called in real-mode on HV KVM and virtual
+ * mode on PR KVM
+ */
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce)
{
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 135663a3e4f..b9a989dc76c 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
+ int rt = get_rt(inst);
+ int rs = get_rs(inst);
+ int ra = get_ra(inst);
+ int rb = get_rb(inst);
switch (get_op(inst)) {
case 19:
@@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
case 31:
switch (get_xop(inst)) {
case OP_31_XOP_MFMSR:
- kvmppc_set_gpr(vcpu, get_rt(inst),
- vcpu->arch.shared->msr);
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
break;
case OP_31_XOP_MTMSRD:
{
- ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
+ ulong rs_val = kvmppc_get_gpr(vcpu, rs);
if (inst & 0x10000) {
- vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE);
- vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE);
+ ulong new_msr = vcpu->arch.shared->msr;
+ new_msr &= ~(MSR_RI | MSR_EE);
+ new_msr |= rs_val & (MSR_RI | MSR_EE);
+ vcpu->arch.shared->msr = new_msr;
} else
- kvmppc_set_msr(vcpu, rs);
+ kvmppc_set_msr(vcpu, rs_val);
break;
}
case OP_31_XOP_MTMSR:
- kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
+ kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_MFSR:
{
@@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (vcpu->arch.mmu.mfsrin) {
u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
- kvmppc_set_gpr(vcpu, get_rt(inst), sr);
+ kvmppc_set_gpr(vcpu, rt, sr);
}
break;
}
@@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
{
int srnum;
- srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
+ srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
if (vcpu->arch.mmu.mfsrin) {
u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
- kvmppc_set_gpr(vcpu, get_rt(inst), sr);
+ kvmppc_set_gpr(vcpu, rt, sr);
}
break;
}
case OP_31_XOP_MTSR:
vcpu->arch.mmu.mtsrin(vcpu,
(inst >> 16) & 0xf,
- kvmppc_get_gpr(vcpu, get_rs(inst)));
+ kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_MTSRIN:
vcpu->arch.mmu.mtsrin(vcpu,
- (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
- kvmppc_get_gpr(vcpu, get_rs(inst)));
+ (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
+ kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_TLBIE:
case OP_31_XOP_TLBIEL:
{
bool large = (inst & 0x00200000) ? true : false;
- ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
+ ulong addr = kvmppc_get_gpr(vcpu, rb);
vcpu->arch.mmu.tlbie(vcpu, addr, large);
break;
}
@@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL;
vcpu->arch.mmu.slbmte(vcpu,
- kvmppc_get_gpr(vcpu, get_rs(inst)),
- kvmppc_get_gpr(vcpu, get_rb(inst)));
+ kvmppc_get_gpr(vcpu, rs),
+ kvmppc_get_gpr(vcpu, rb));
break;
case OP_31_XOP_SLBIE:
if (!vcpu->arch.mmu.slbie)
return EMULATE_FAIL;
vcpu->arch.mmu.slbie(vcpu,
- kvmppc_get_gpr(vcpu, get_rb(inst)));
+ kvmppc_get_gpr(vcpu, rb));
break;
case OP_31_XOP_SLBIA:
if (!vcpu->arch.mmu.slbia)
@@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!vcpu->arch.mmu.slbmfee) {
emulated = EMULATE_FAIL;
} else {
- ulong t, rb;
+ ulong t, rb_val;
- rb = kvmppc_get_gpr(vcpu, get_rb(inst));
- t = vcpu->arch.mmu.slbmfee(vcpu, rb);
- kvmppc_set_gpr(vcpu, get_rt(inst), t);
+ rb_val = kvmppc_get_gpr(vcpu, rb);
+ t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
+ kvmppc_set_gpr(vcpu, rt, t);
}
break;
case OP_31_XOP_SLBMFEV:
if (!vcpu->arch.mmu.slbmfev) {
emulated = EMULATE_FAIL;
} else {
- ulong t, rb;
+ ulong t, rb_val;
- rb = kvmppc_get_gpr(vcpu, get_rb(inst));
- t = vcpu->arch.mmu.slbmfev(vcpu, rb);
- kvmppc_set_gpr(vcpu, get_rt(inst), t);
+ rb_val = kvmppc_get_gpr(vcpu, rb);
+ t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
+ kvmppc_set_gpr(vcpu, rt, t);
}
break;
case OP_31_XOP_DCBA:
@@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case OP_31_XOP_DCBZ:
{
- ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
- ulong ra = 0;
+ ulong rb_val = kvmppc_get_gpr(vcpu, rb);
+ ulong ra_val = 0;
ulong addr, vaddr;
u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
u32 dsisr;
int r;
- if (get_ra(inst))
- ra = kvmppc_get_gpr(vcpu, get_ra(inst));
+ if (ra)
+ ra_val = kvmppc_get_gpr(vcpu, ra);
- addr = (ra + rb) & ~31ULL;
+ addr = (ra_val + rb_val) & ~31ULL;
if (!(vcpu->arch.shared->msr & MSR_SF))
addr &= 0xffffffff;
vaddr = addr;
@@ -313,10 +318,9 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
return bat;
}
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
- ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_SDR1:
@@ -428,7 +432,7 @@ unprivileged:
return emulated;
}
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
@@ -441,46 +445,46 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
if (sprn % 2)
- kvmppc_set_gpr(vcpu, rt, bat->raw >> 32);
+ *spr_val = bat->raw >> 32;
else
- kvmppc_set_gpr(vcpu, rt, bat->raw);
+ *spr_val = bat->raw;
break;
}
case SPRN_SDR1:
if (!spr_allowed(vcpu, PRIV_HYPER))
goto unprivileged;
- kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
+ *spr_val = to_book3s(vcpu)->sdr1;
break;
case SPRN_DSISR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr);
+ *spr_val = vcpu->arch.shared->dsisr;
break;
case SPRN_DAR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar);
+ *spr_val = vcpu->arch.shared->dar;
break;
case SPRN_HIOR:
- kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
+ *spr_val = to_book3s(vcpu)->hior;
break;
case SPRN_HID0:
- kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]);
+ *spr_val = to_book3s(vcpu)->hid[0];
break;
case SPRN_HID1:
- kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
+ *spr_val = to_book3s(vcpu)->hid[1];
break;
case SPRN_HID2:
case SPRN_HID2_GEKKO:
- kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
+ *spr_val = to_book3s(vcpu)->hid[2];
break;
case SPRN_HID4:
case SPRN_HID4_GEKKO:
- kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
+ *spr_val = to_book3s(vcpu)->hid[4];
break;
case SPRN_HID5:
- kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
+ *spr_val = to_book3s(vcpu)->hid[5];
break;
case SPRN_CFAR:
case SPRN_PURR:
- kvmppc_set_gpr(vcpu, rt, 0);
+ *spr_val = 0;
break;
case SPRN_GQR0:
case SPRN_GQR1:
@@ -490,8 +494,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_GQR5:
case SPRN_GQR6:
case SPRN_GQR7:
- kvmppc_set_gpr(vcpu, rt,
- to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
+ *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
break;
case SPRN_THRM1:
case SPRN_THRM2:
@@ -506,7 +509,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_PMC3_GEKKO:
case SPRN_PMC4_GEKKO:
case SPRN_WPAR_GEKKO:
- kvmppc_set_gpr(vcpu, rt, 0);
+ *spr_val = 0;
break;
default:
unprivileged:
@@ -565,23 +568,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
{
ulong dar = 0;
- ulong ra;
+ ulong ra = get_ra(inst);
+ ulong rb = get_rb(inst);
switch (get_op(inst)) {
case OP_LFS:
case OP_LFD:
case OP_STFD:
case OP_STFS:
- ra = get_ra(inst);
if (ra)
dar = kvmppc_get_gpr(vcpu, ra);
dar += (s32)((s16)inst);
break;
case 31:
- ra = get_ra(inst);
if (ra)
dar = kvmppc_get_gpr(vcpu, ra);
- dar += kvmppc_get_gpr(vcpu, get_rb(inst));
+ dar += kvmppc_get_gpr(vcpu, rb);
break;
default:
printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 108d1f58017..c6af1d62383 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -60,12 +60,20 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu);
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
local_paca->kvm_hstate.kvm_vcpu = vcpu;
- local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore;
+ local_paca->kvm_hstate.kvm_vcore = vc;
+ if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
+ vc->stolen_tb += mftb() - vc->preempt_tb;
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
+ vc->preempt_tb = mftb();
}
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
@@ -134,6 +142,22 @@ static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
vpa->yield_count = 1;
}
+/* Length for a per-processor buffer is passed in at offset 4 in the buffer */
+struct reg_vpa {
+ u32 dummy;
+ union {
+ u16 hword;
+ u32 word;
+ } length;
+};
+
+static int vpa_is_registered(struct kvmppc_vpa *vpap)
+{
+ if (vpap->update_pending)
+ return vpap->next_gpa != 0;
+ return vpap->pinned_addr != NULL;
+}
+
static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
unsigned long flags,
unsigned long vcpuid, unsigned long vpa)
@@ -142,88 +166,182 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
unsigned long len, nb;
void *va;
struct kvm_vcpu *tvcpu;
- int err = H_PARAMETER;
+ int err;
+ int subfunc;
+ struct kvmppc_vpa *vpap;
tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
if (!tvcpu)
return H_PARAMETER;
- flags >>= 63 - 18;
- flags &= 7;
- if (flags == 0 || flags == 4)
- return H_PARAMETER;
- if (flags < 4) {
- if (vpa & 0x7f)
+ subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
+ if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
+ subfunc == H_VPA_REG_SLB) {
+ /* Registering new area - address must be cache-line aligned */
+ if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
return H_PARAMETER;
- if (flags >= 2 && !tvcpu->arch.vpa)
- return H_RESOURCE;
- /* registering new area; convert logical addr to real */
+
+ /* convert logical addr to kernel addr and read length */
va = kvmppc_pin_guest_page(kvm, vpa, &nb);
if (va == NULL)
return H_PARAMETER;
- if (flags <= 1)
- len = *(unsigned short *)(va + 4);
+ if (subfunc == H_VPA_REG_VPA)
+ len = ((struct reg_vpa *)va)->length.hword;
else
- len = *(unsigned int *)(va + 4);
- if (len > nb)
- goto out_unpin;
- switch (flags) {
- case 1: /* register VPA */
- if (len < 640)
- goto out_unpin;
- if (tvcpu->arch.vpa)
- kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa);
- tvcpu->arch.vpa = va;
- init_vpa(vcpu, va);
- break;
- case 2: /* register DTL */
- if (len < 48)
- goto out_unpin;
- len -= len % 48;
- if (tvcpu->arch.dtl)
- kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl);
- tvcpu->arch.dtl = va;
- tvcpu->arch.dtl_end = va + len;
+ len = ((struct reg_vpa *)va)->length.word;
+ kvmppc_unpin_guest_page(kvm, va);
+
+ /* Check length */
+ if (len > nb || len < sizeof(struct reg_vpa))
+ return H_PARAMETER;
+ } else {
+ vpa = 0;
+ len = 0;
+ }
+
+ err = H_PARAMETER;
+ vpap = NULL;
+ spin_lock(&tvcpu->arch.vpa_update_lock);
+
+ switch (subfunc) {
+ case H_VPA_REG_VPA: /* register VPA */
+ if (len < sizeof(struct lppaca))
break;
- case 3: /* register SLB shadow buffer */
- if (len < 16)
- goto out_unpin;
- if (tvcpu->arch.slb_shadow)
- kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow);
- tvcpu->arch.slb_shadow = va;
+ vpap = &tvcpu->arch.vpa;
+ err = 0;
+ break;
+
+ case H_VPA_REG_DTL: /* register DTL */
+ if (len < sizeof(struct dtl_entry))
break;
- }
- } else {
- switch (flags) {
- case 5: /* unregister VPA */
- if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
- return H_RESOURCE;
- if (!tvcpu->arch.vpa)
- break;
- kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa);
- tvcpu->arch.vpa = NULL;
+ len -= len % sizeof(struct dtl_entry);
+
+ /* Check that they have previously registered a VPA */
+ err = H_RESOURCE;
+ if (!vpa_is_registered(&tvcpu->arch.vpa))
break;
- case 6: /* unregister DTL */
- if (!tvcpu->arch.dtl)
- break;
- kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl);
- tvcpu->arch.dtl = NULL;
+
+ vpap = &tvcpu->arch.dtl;
+ err = 0;
+ break;
+
+ case H_VPA_REG_SLB: /* register SLB shadow buffer */
+ /* Check that they have previously registered a VPA */
+ err = H_RESOURCE;
+ if (!vpa_is_registered(&tvcpu->arch.vpa))
break;
- case 7: /* unregister SLB shadow buffer */
- if (!tvcpu->arch.slb_shadow)
- break;
- kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow);
- tvcpu->arch.slb_shadow = NULL;
+
+ vpap = &tvcpu->arch.slb_shadow;
+ err = 0;
+ break;
+
+ case H_VPA_DEREG_VPA: /* deregister VPA */
+ /* Check they don't still have a DTL or SLB buf registered */
+ err = H_RESOURCE;
+ if (vpa_is_registered(&tvcpu->arch.dtl) ||
+ vpa_is_registered(&tvcpu->arch.slb_shadow))
break;
- }
+
+ vpap = &tvcpu->arch.vpa;
+ err = 0;
+ break;
+
+ case H_VPA_DEREG_DTL: /* deregister DTL */
+ vpap = &tvcpu->arch.dtl;
+ err = 0;
+ break;
+
+ case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */
+ vpap = &tvcpu->arch.slb_shadow;
+ err = 0;
+ break;
+ }
+
+ if (vpap) {
+ vpap->next_gpa = vpa;
+ vpap->len = len;
+ vpap->update_pending = 1;
}
- return H_SUCCESS;
- out_unpin:
- kvmppc_unpin_guest_page(kvm, va);
+ spin_unlock(&tvcpu->arch.vpa_update_lock);
+
return err;
}
+static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
+{
+ void *va;
+ unsigned long nb;
+
+ vpap->update_pending = 0;
+ va = NULL;
+ if (vpap->next_gpa) {
+ va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
+ if (nb < vpap->len) {
+ /*
+ * If it's now too short, it must be that userspace
+ * has changed the mappings underlying guest memory,
+ * so unregister the region.
+ */
+ kvmppc_unpin_guest_page(kvm, va);
+ va = NULL;
+ }
+ }
+ if (vpap->pinned_addr)
+ kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
+ vpap->pinned_addr = va;
+ if (va)
+ vpap->pinned_end = va + vpap->len;
+}
+
+static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ spin_lock(&vcpu->arch.vpa_update_lock);
+ if (vcpu->arch.vpa.update_pending) {
+ kvmppc_update_vpa(kvm, &vcpu->arch.vpa);
+ init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
+ }
+ if (vcpu->arch.dtl.update_pending) {
+ kvmppc_update_vpa(kvm, &vcpu->arch.dtl);
+ vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
+ vcpu->arch.dtl_index = 0;
+ }
+ if (vcpu->arch.slb_shadow.update_pending)
+ kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow);
+ spin_unlock(&vcpu->arch.vpa_update_lock);
+}
+
+static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
+ struct kvmppc_vcore *vc)
+{
+ struct dtl_entry *dt;
+ struct lppaca *vpa;
+ unsigned long old_stolen;
+
+ dt = vcpu->arch.dtl_ptr;
+ vpa = vcpu->arch.vpa.pinned_addr;
+ old_stolen = vcpu->arch.stolen_logged;
+ vcpu->arch.stolen_logged = vc->stolen_tb;
+ if (!dt || !vpa)
+ return;
+ memset(dt, 0, sizeof(struct dtl_entry));
+ dt->dispatch_reason = 7;
+ dt->processor_id = vc->pcpu + vcpu->arch.ptid;
+ dt->timebase = mftb();
+ dt->enqueue_to_dispatch_time = vc->stolen_tb - old_stolen;
+ dt->srr0 = kvmppc_get_pc(vcpu);
+ dt->srr1 = vcpu->arch.shregs.msr;
+ ++dt;
+ if (dt == vcpu->arch.dtl.pinned_end)
+ dt = vcpu->arch.dtl.pinned_addr;
+ vcpu->arch.dtl_ptr = dt;
+ /* order writing *dt vs. writing vpa->dtl_idx */
+ smp_wmb();
+ vpa->dtl_idx = ++vcpu->arch.dtl_index;
+}
+
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
{
unsigned long req = kvmppc_get_gpr(vcpu, 3);
@@ -468,6 +586,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
/* default to host PVR, since we can't spoof it */
vcpu->arch.pvr = mfspr(SPRN_PVR);
kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
+ spin_lock_init(&vcpu->arch.vpa_update_lock);
kvmppc_mmu_book3s_hv_init(vcpu);
@@ -486,6 +605,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
INIT_LIST_HEAD(&vcore->runnable_threads);
spin_lock_init(&vcore->lock);
init_waitqueue_head(&vcore->wq);
+ vcore->preempt_tb = mftb();
}
kvm->arch.vcores[core] = vcore;
}
@@ -498,6 +618,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
++vcore->num_threads;
spin_unlock(&vcore->lock);
vcpu->arch.vcore = vcore;
+ vcpu->arch.stolen_logged = vcore->stolen_tb;
vcpu->arch.cpu_type = KVM_CPU_3S_64;
kvmppc_sanity_check(vcpu);
@@ -512,12 +633,14 @@ out:
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.dtl)
- kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl);
- if (vcpu->arch.slb_shadow)
- kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow);
- if (vcpu->arch.vpa)
- kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa);
+ spin_lock(&vcpu->arch.vpa_update_lock);
+ if (vcpu->arch.dtl.pinned_addr)
+ kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl.pinned_addr);
+ if (vcpu->arch.slb_shadow.pinned_addr)
+ kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow.pinned_addr);
+ if (vcpu->arch.vpa.pinned_addr)
+ kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa.pinned_addr);
+ spin_unlock(&vcpu->arch.vpa_update_lock);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
@@ -569,6 +692,45 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
list_del(&vcpu->arch.run_list);
}
+static int kvmppc_grab_hwthread(int cpu)
+{
+ struct paca_struct *tpaca;
+ long timeout = 1000;
+
+ tpaca = &paca[cpu];
+
+ /* Ensure the thread won't go into the kernel if it wakes */
+ tpaca->kvm_hstate.hwthread_req = 1;
+
+ /*
+ * If the thread is already executing in the kernel (e.g. handling
+ * a stray interrupt), wait for it to get back to nap mode.
+ * The smp_mb() is to ensure that our setting of hwthread_req
+ * is visible before we look at hwthread_state, so if this
+ * races with the code at system_reset_pSeries and the thread
+ * misses our setting of hwthread_req, we are sure to see its
+ * setting of hwthread_state, and vice versa.
+ */
+ smp_mb();
+ while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
+ if (--timeout <= 0) {
+ pr_err("KVM: couldn't grab cpu %d\n", cpu);
+ return -EBUSY;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+static void kvmppc_release_hwthread(int cpu)
+{
+ struct paca_struct *tpaca;
+
+ tpaca = &paca[cpu];
+ tpaca->kvm_hstate.hwthread_req = 0;
+ tpaca->kvm_hstate.kvm_vcpu = NULL;
+}
+
static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
{
int cpu;
@@ -588,8 +750,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
smp_wmb();
#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
if (vcpu->arch.ptid) {
- tpaca->cpu_start = 0x80;
- wmb();
+ kvmppc_grab_hwthread(cpu);
xics_wake_cpu(cpu);
++vc->n_woken;
}
@@ -639,7 +800,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
struct kvm_vcpu *vcpu, *vcpu0, *vnext;
long ret;
u64 now;
- int ptid;
+ int ptid, i;
/* don't start if any threads have a signal pending */
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
@@ -681,17 +842,29 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
vc->nap_count = 0;
vc->entry_exit_count = 0;
vc->vcore_state = VCORE_RUNNING;
+ vc->stolen_tb += mftb() - vc->preempt_tb;
vc->in_guest = 0;
vc->pcpu = smp_processor_id();
vc->napping_threads = 0;
- list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
kvmppc_start_thread(vcpu);
+ if (vcpu->arch.vpa.update_pending ||
+ vcpu->arch.slb_shadow.update_pending ||
+ vcpu->arch.dtl.update_pending)
+ kvmppc_update_vpas(vcpu);
+ kvmppc_create_dtl_entry(vcpu, vc);
+ }
+ /* Grab any remaining hw threads so they can't go into the kernel */
+ for (i = ptid; i < threads_per_core; ++i)
+ kvmppc_grab_hwthread(vc->pcpu + i);
preempt_disable();
spin_unlock(&vc->lock);
kvm_guest_enter();
__kvmppc_vcore_entry(NULL, vcpu0);
+ for (i = 0; i < threads_per_core; ++i)
+ kvmppc_release_hwthread(vc->pcpu + i);
spin_lock(&vc->lock);
/* disable sending of IPIs on virtual external irqs */
@@ -737,6 +910,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
spin_lock(&vc->lock);
out:
vc->vcore_state = VCORE_INACTIVE;
+ vc->preempt_tb = mftb();
list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
arch.run_list) {
if (vcpu->arch.ret != RESUME_GUEST) {
@@ -835,6 +1009,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
spin_lock(&vc->lock);
continue;
}
+ vc->runner = vcpu;
n_ceded = 0;
list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
n_ceded += v->arch.ceded;
@@ -854,6 +1029,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
wake_up(&v->arch.cpu_run);
}
}
+ vc->runner = NULL;
}
if (signal_pending(current)) {
@@ -917,115 +1093,6 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
return r;
}
-static long kvmppc_stt_npages(unsigned long window_size)
-{
- return ALIGN((window_size >> SPAPR_TCE_SHIFT)
- * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
-}
-
-static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
-{
- struct kvm *kvm = stt->kvm;
- int i;
-
- mutex_lock(&kvm->lock);
- list_del(&stt->list);
- for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
- __free_page(stt->pages[i]);
- kfree(stt);
- mutex_unlock(&kvm->lock);
-
- kvm_put_kvm(kvm);
-}
-
-static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
- struct page *page;
-
- if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
- return VM_FAULT_SIGBUS;
-
- page = stt->pages[vmf->pgoff];
- get_page(page);
- vmf->page = page;
- return 0;
-}
-
-static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
- .fault = kvm_spapr_tce_fault,
-};
-
-static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
-{
- vma->vm_ops = &kvm_spapr_tce_vm_ops;
- return 0;
-}
-
-static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
-{
- struct kvmppc_spapr_tce_table *stt = filp->private_data;
-
- release_spapr_tce_table(stt);
- return 0;
-}
-
-static struct file_operations kvm_spapr_tce_fops = {
- .mmap = kvm_spapr_tce_mmap,
- .release = kvm_spapr_tce_release,
-};
-
-long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
- struct kvm_create_spapr_tce *args)
-{
- struct kvmppc_spapr_tce_table *stt = NULL;
- long npages;
- int ret = -ENOMEM;
- int i;
-
- /* Check this LIOBN hasn't been previously allocated */
- list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
- if (stt->liobn == args->liobn)
- return -EBUSY;
- }
-
- npages = kvmppc_stt_npages(args->window_size);
-
- stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
- GFP_KERNEL);
- if (!stt)
- goto fail;
-
- stt->liobn = args->liobn;
- stt->window_size = args->window_size;
- stt->kvm = kvm;
-
- for (i = 0; i < npages; i++) {
- stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!stt->pages[i])
- goto fail;
- }
-
- kvm_get_kvm(kvm);
-
- mutex_lock(&kvm->lock);
- list_add(&stt->list, &kvm->arch.spapr_tce_tables);
-
- mutex_unlock(&kvm->lock);
-
- return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
- stt, O_RDWR);
-
-fail:
- if (stt) {
- for (i = 0; i < npages; i++)
- if (stt->pages[i])
- __free_page(stt->pages[i]);
-
- kfree(stt);
- }
- return ret;
-}
/* Work out RMLS (real mode limit selector) field value for a given RMA size.
Assumes POWER7 or PPC970. */
@@ -1108,6 +1175,38 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
return fd;
}
+static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
+ int linux_psize)
+{
+ struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
+
+ if (!def->shift)
+ return;
+ (*sps)->page_shift = def->shift;
+ (*sps)->slb_enc = def->sllp;
+ (*sps)->enc[0].page_shift = def->shift;
+ (*sps)->enc[0].pte_enc = def->penc;
+ (*sps)++;
+}
+
+int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
+{
+ struct kvm_ppc_one_seg_page_size *sps;
+
+ info->flags = KVM_PPC_PAGE_SIZES_REAL;
+ if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
+ info->flags |= KVM_PPC_1T_SEGMENTS;
+ info->slb_size = mmu_slb_size;
+
+ /* We only support these sizes for now, and no muti-size segments */
+ sps = &info->sps[0];
+ kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
+ kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
+ kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
+
+ return 0;
+}
+
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
@@ -1404,12 +1503,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL;
}
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
return EMULATE_FAIL;
}
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
return EMULATE_FAIL;
}
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index d3fb4df02c4..84035a528c8 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -68,19 +68,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
rotldi r10,r10,16
mtmsrd r10,1
- /* Save host PMU registers and load guest PMU registers */
+ /* Save host PMU registers */
/* R4 is live here (vcpu pointer) but not r3 or r5 */
li r3, 1
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
mfspr r7, SPRN_MMCR0 /* save MMCR0 */
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
+ mfspr r6, SPRN_MMCRA
+BEGIN_FTR_SECTION
+ /* On P7, clear MMCRA in order to disable SDAR updates */
+ li r5, 0
+ mtspr SPRN_MMCRA, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
isync
ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
lbz r5, LPPACA_PMCINUSE(r3)
cmpwi r5, 0
beq 31f /* skip if not */
mfspr r5, SPRN_MMCR1
- mfspr r6, SPRN_MMCRA
std r7, HSTATE_MMCR(r13)
std r5, HSTATE_MMCR + 8(r13)
std r6, HSTATE_MMCR + 16(r13)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b70bf22a3ff..a84aafce2a1 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -26,6 +26,7 @@
#include <asm/hvcall.h>
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
+#include <asm/kvm_book3s_asm.h>
/*****************************************************************************
* *
@@ -82,6 +83,7 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
#define XICS_XIRR 4
#define XICS_QIRR 0xc
+#define XICS_IPI 2 /* interrupt source # for IPIs */
/*
* We come in here when wakened from nap mode on a secondary hw thread.
@@ -94,26 +96,54 @@ kvm_start_guest:
subi r1,r1,STACK_FRAME_OVERHEAD
ld r2,PACATOC(r13)
- /* were we napping due to cede? */
- lbz r0,HSTATE_NAPPING(r13)
- cmpwi r0,0
- bne kvm_end_cede
+ li r0,KVM_HWTHREAD_IN_KVM
+ stb r0,HSTATE_HWTHREAD_STATE(r13)
- /* get vcpu pointer */
- ld r4, HSTATE_KVM_VCPU(r13)
+ /* NV GPR values from power7_idle() will no longer be valid */
+ li r0,1
+ stb r0,PACA_NAPSTATELOST(r13)
- /* We got here with an IPI; clear it */
- ld r5, HSTATE_XICS_PHYS(r13)
- li r0, 0xff
- li r6, XICS_QIRR
- li r7, XICS_XIRR
- lwzcix r8, r5, r7 /* ack the interrupt */
+ /* get vcpu pointer, NULL if we have no vcpu to run */
+ ld r4,HSTATE_KVM_VCPU(r13)
+ cmpdi cr1,r4,0
+
+ /* Check the wake reason in SRR1 to see why we got here */
+ mfspr r3,SPRN_SRR1
+ rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
+ cmpwi r3,4 /* was it an external interrupt? */
+ bne 27f
+
+ /*
+ * External interrupt - for now assume it is an IPI, since we
+ * should never get any other interrupts sent to offline threads.
+ * Only do this for secondary threads.
+ */
+ beq cr1,25f
+ lwz r3,VCPU_PTID(r4)
+ cmpwi r3,0
+ beq 27f
+25: ld r5,HSTATE_XICS_PHYS(r13)
+ li r0,0xff
+ li r6,XICS_QIRR
+ li r7,XICS_XIRR
+ lwzcix r8,r5,r7 /* get and ack the interrupt */
sync
- stbcix r0, r5, r6 /* clear it */
- stwcix r8, r5, r7 /* EOI it */
+ clrldi. r9,r8,40 /* get interrupt source ID. */
+ beq 27f /* none there? */
+ cmpwi r9,XICS_IPI
+ bne 26f
+ stbcix r0,r5,r6 /* clear IPI */
+26: stwcix r8,r5,r7 /* EOI the interrupt */
- /* NV GPR values from power7_idle() will no longer be valid */
- stb r0, PACA_NAPSTATELOST(r13)
+27: /* XXX should handle hypervisor maintenance interrupts etc. here */
+
+ /* if we have no vcpu to run, go back to sleep */
+ beq cr1,kvm_no_guest
+
+ /* were we napping due to cede? */
+ lbz r0,HSTATE_NAPPING(r13)
+ cmpwi r0,0
+ bne kvm_end_cede
.global kvmppc_hv_entry
kvmppc_hv_entry:
@@ -129,24 +159,15 @@ kvmppc_hv_entry:
mflr r0
std r0, HSTATE_VMHANDLER(r13)
- ld r14, VCPU_GPR(r14)(r4)
- ld r15, VCPU_GPR(r15)(r4)
- ld r16, VCPU_GPR(r16)(r4)
- ld r17, VCPU_GPR(r17)(r4)
- ld r18, VCPU_GPR(r18)(r4)
- ld r19, VCPU_GPR(r19)(r4)
- ld r20, VCPU_GPR(r20)(r4)
- ld r21, VCPU_GPR(r21)(r4)
- ld r22, VCPU_GPR(r22)(r4)
- ld r23, VCPU_GPR(r23)(r4)
- ld r24, VCPU_GPR(r24)(r4)
- ld r25, VCPU_GPR(r25)(r4)
- ld r26, VCPU_GPR(r26)(r4)
- ld r27, VCPU_GPR(r27)(r4)
- ld r28, VCPU_GPR(r28)(r4)
- ld r29, VCPU_GPR(r29)(r4)
- ld r30, VCPU_GPR(r30)(r4)
- ld r31, VCPU_GPR(r31)(r4)
+ /* Set partition DABR */
+ /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
+ li r5,3
+ ld r6,VCPU_DABR(r4)
+ mtspr SPRN_DABRX,r5
+ mtspr SPRN_DABR,r6
+BEGIN_FTR_SECTION
+ isync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Load guest PMU registers */
/* R4 is live here (vcpu pointer) */
@@ -185,6 +206,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/* Load up FP, VMX and VSX registers */
bl kvmppc_load_fp
+ ld r14, VCPU_GPR(r14)(r4)
+ ld r15, VCPU_GPR(r15)(r4)
+ ld r16, VCPU_GPR(r16)(r4)
+ ld r17, VCPU_GPR(r17)(r4)
+ ld r18, VCPU_GPR(r18)(r4)
+ ld r19, VCPU_GPR(r19)(r4)
+ ld r20, VCPU_GPR(r20)(r4)
+ ld r21, VCPU_GPR(r21)(r4)
+ ld r22, VCPU_GPR(r22)(r4)
+ ld r23, VCPU_GPR(r23)(r4)
+ ld r24, VCPU_GPR(r24)(r4)
+ ld r25, VCPU_GPR(r25)(r4)
+ ld r26, VCPU_GPR(r26)(r4)
+ ld r27, VCPU_GPR(r27)(r4)
+ ld r28, VCPU_GPR(r28)(r4)
+ ld r29, VCPU_GPR(r29)(r4)
+ ld r30, VCPU_GPR(r30)(r4)
+ ld r31, VCPU_GPR(r31)(r4)
+
BEGIN_FTR_SECTION
/* Switch DSCR to guest value */
ld r5, VCPU_DSCR(r4)
@@ -226,12 +266,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
mtspr SPRN_DAR, r5
mtspr SPRN_DSISR, r6
- /* Set partition DABR */
- li r5,3
- ld r6,VCPU_DABR(r4)
- mtspr SPRN_DABRX,r5
- mtspr SPRN_DABR,r6
-
BEGIN_FTR_SECTION
/* Restore AMR and UAMOR, set AMOR to all 1s */
ld r5,VCPU_AMR(r4)
@@ -925,12 +959,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_AMR,r6
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
- /* Restore host DABR and DABRX */
- ld r5,HSTATE_DABR(r13)
- li r6,7
- mtspr SPRN_DABR,r5
- mtspr SPRN_DABRX,r6
-
/* Switch DSCR back to host value */
BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR
@@ -969,6 +997,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r5, VCPU_SPRG2(r9)
std r6, VCPU_SPRG3(r9)
+ /* save FP state */
+ mr r3, r9
+ bl .kvmppc_save_fp
+
/* Increment yield count if they have a VPA */
ld r8, VCPU_VPA(r9) /* do they have a VPA? */
cmpdi r8, 0
@@ -983,6 +1015,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
mfspr r4, SPRN_MMCR0 /* save MMCR0 */
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ mfspr r6, SPRN_MMCRA
+BEGIN_FTR_SECTION
+ /* On P7, clear MMCRA in order to disable SDAR updates */
+ li r7, 0
+ mtspr SPRN_MMCRA, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
isync
beq 21f /* if no VPA, save PMU stuff anyway */
lbz r7, LPPACA_PMCINUSE(r8)
@@ -991,7 +1029,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
b 22f
21: mfspr r5, SPRN_MMCR1
- mfspr r6, SPRN_MMCRA
std r4, VCPU_MMCR(r9)
std r5, VCPU_MMCR + 8(r9)
std r6, VCPU_MMCR + 16(r9)
@@ -1016,17 +1053,20 @@ BEGIN_FTR_SECTION
stw r11, VCPU_PMC + 28(r9)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
22:
- /* save FP state */
- mr r3, r9
- bl .kvmppc_save_fp
/* Secondary threads go off to take a nap on POWER7 */
BEGIN_FTR_SECTION
- lwz r0,VCPU_PTID(r3)
+ lwz r0,VCPU_PTID(r9)
cmpwi r0,0
bne secondary_nap
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+ /* Restore host DABR and DABRX */
+ ld r5,HSTATE_DABR(r13)
+ li r6,7
+ mtspr SPRN_DABR,r5
+ mtspr SPRN_DABRX,r6
+
/*
* Reload DEC. HDEC interrupts were disabled when
* we reloaded the host's LPCR value.
@@ -1363,7 +1403,12 @@ bounce_ext_interrupt:
_GLOBAL(kvmppc_h_set_dabr)
std r4,VCPU_DABR(r3)
- mtspr SPRN_DABR,r4
+ /* Work around P7 bug where DABR can get corrupted on mtspr */
+1: mtspr SPRN_DABR,r4
+ mfspr r5, SPRN_DABR
+ cmpd r4, r5
+ bne 1b
+ isync
li r3,0
blr
@@ -1445,8 +1490,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
* Take a nap until a decrementer or external interrupt occurs,
* with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
*/
- li r0,0x80
- stb r0,PACAPROCSTART(r13)
+ li r0,1
+ stb r0,HSTATE_HWTHREAD_REQ(r13)
mfspr r5,SPRN_LPCR
ori r5,r5,LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR,r5
@@ -1463,26 +1508,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
kvm_end_cede:
/* Woken by external or decrementer interrupt */
ld r1, HSTATE_HOST_R1(r13)
- ld r2, PACATOC(r13)
- /* If we're a secondary thread and we got here by an IPI, ack it */
- ld r4,HSTATE_KVM_VCPU(r13)
- lwz r3,VCPU_PTID(r4)
- cmpwi r3,0
- beq 27f
- mfspr r3,SPRN_SRR1
- rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
- cmpwi r3,4 /* was it an external interrupt? */
- bne 27f
- ld r5, HSTATE_XICS_PHYS(r13)
- li r0,0xff
- li r6,XICS_QIRR
- li r7,XICS_XIRR
- lwzcix r8,r5,r7 /* ack the interrupt */
- sync
- stbcix r0,r5,r6 /* clear it */
- stwcix r8,r5,r7 /* EOI it */
-27:
/* load up FP state */
bl kvmppc_load_fp
@@ -1580,12 +1606,17 @@ secondary_nap:
stwcx. r3, 0, r4
bne 51b
+kvm_no_guest:
+ li r0, KVM_HWTHREAD_IN_NAP
+ stb r0, HSTATE_HWTHREAD_STATE(r13)
+ li r0, 0
+ std r0, HSTATE_KVM_VCPU(r13)
+
li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4
isync
- li r0, 0
std r0, HSTATE_SCRATCH0(r13)
ptesync
ld r0, HSTATE_SCRATCH0(r13)
@@ -1599,8 +1630,8 @@ secondary_nap:
* r3 = vcpu pointer
*/
_GLOBAL(kvmppc_save_fp)
- mfmsr r9
- ori r8,r9,MSR_FP
+ mfmsr r5
+ ori r8,r5,MSR_FP
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
oris r8,r8,MSR_VEC@h
@@ -1649,7 +1680,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
mfspr r6,SPRN_VRSAVE
stw r6,VCPU_VRSAVE(r3)
- mtmsrd r9
+ mtmsrd r5
isync
blr
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 7759053d391..a1baec340f7 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -120,6 +120,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
if (msr & MSR_POW) {
if (!vcpu->arch.pending_exceptions) {
kvm_vcpu_block(vcpu);
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu->stat.halt_wakeup++;
/* Unset POW bit after we woke up */
@@ -144,6 +145,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
}
}
+ /*
+ * When switching from 32 to 64-bit, we may have a stale 32-bit
+ * magic page around, we need to flush it. Typically 32-bit magic
+ * page will be instanciated when calling into RTAS. Note: We
+ * assume that such transition only happens while in kernel mode,
+ * ie, we never transition from user 32-bit to kernel 64-bit with
+ * a 32-bit magic page around.
+ */
+ if (vcpu->arch.magic_page_pa &&
+ !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
+ /* going from RTAS to normal kernel code */
+ kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
+ ~0xFFFUL);
+ }
+
/* Preload FPU if it's enabled */
if (vcpu->arch.shared->msr & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
@@ -251,6 +267,9 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
ulong mp_pa = vcpu->arch.magic_page_pa;
+ if (!(vcpu->arch.shared->msr & MSR_SF))
+ mp_pa = (uint32_t)mp_pa;
+
if (unlikely(mp_pa) &&
unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
return 1;
@@ -351,6 +370,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* MMIO */
vcpu->stat.mmio_exits++;
vcpu->arch.paddr_accessed = pte.raddr;
+ vcpu->arch.vaddr_accessed = pte.eaddr;
r = kvmppc_emulate_mmio(run, vcpu);
if ( r == RESUME_HOST_NV )
r = RESUME_HOST;
@@ -528,6 +548,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1;
+ /* We get here with MSR.EE=0, so enable it to be a nice citizen */
+ __hard_irq_enable();
+
trace_kvm_book3s_exit(exit_nr, vcpu);
preempt_enable();
kvm_resched(vcpu);
@@ -617,10 +640,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
/* We're good on these - the host merely wanted to get our attention */
case BOOK3S_INTERRUPT_DECREMENTER:
+ case BOOK3S_INTERRUPT_HV_DECREMENTER:
vcpu->stat.dec_exits++;
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_EXTERNAL:
+ case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
+ case BOOK3S_INTERRUPT_EXTERNAL_HV:
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST;
break;
@@ -628,6 +654,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_PROGRAM:
+ case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
{
enum emulation_result er;
struct kvmppc_book3s_shadow_vcpu *svcpu;
@@ -1131,6 +1158,31 @@ out:
return r;
}
+#ifdef CONFIG_PPC64
+int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
+{
+ /* No flags */
+ info->flags = 0;
+
+ /* SLB is always 64 entries */
+ info->slb_size = 64;
+
+ /* Standard 4k base page size segment */
+ info->sps[0].page_shift = 12;
+ info->sps[0].slb_enc = 0;
+ info->sps[0].enc[0].page_shift = 12;
+ info->sps[0].enc[0].pte_enc = 0;
+
+ /* Standard 16M large page size segment */
+ info->sps[1].page_shift = 24;
+ info->sps[1].slb_enc = SLB_VSID_L;
+ info->sps[1].enc[0].page_shift = 24;
+ info->sps[1].enc[0].pte_enc = 0;
+
+ return 0;
+}
+#endif /* CONFIG_PPC64 */
+
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem)
{
@@ -1144,11 +1196,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
int kvmppc_core_init_vm(struct kvm *kvm)
{
+#ifdef CONFIG_PPC64
+ INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+#endif
+
return 0;
}
void kvmppc_core_destroy_vm(struct kvm *kvm)
{
+#ifdef CONFIG_PPC64
+ WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
+#endif
}
static int kvmppc_book3s_init(void)
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index b9589324797..3ff9013d6e7 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -15,6 +15,8 @@
* published by the Free Software Foundation.
*/
+#include <linux/anon_inodes.h>
+
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@@ -98,6 +100,83 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+/* Request defs for kvmppc_h_pr_bulk_remove() */
+#define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
+#define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
+#define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
+#define H_BULK_REMOVE_END 0xc000000000000000ULL
+#define H_BULK_REMOVE_CODE 0x3000000000000000ULL
+#define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
+#define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
+#define H_BULK_REMOVE_PARM 0x2000000000000000ULL
+#define H_BULK_REMOVE_HW 0x3000000000000000ULL
+#define H_BULK_REMOVE_RC 0x0c00000000000000ULL
+#define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
+#define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
+#define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
+#define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
+#define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
+#define H_BULK_REMOVE_MAX_BATCH 4
+
+static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
+{
+ int i;
+ int paramnr = 4;
+ int ret = H_SUCCESS;
+
+ for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
+ unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
+ unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
+ unsigned long pteg, rb, flags;
+ unsigned long pte[2];
+ unsigned long v = 0;
+
+ if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
+ break; /* Exit success */
+ } else if ((tsh & H_BULK_REMOVE_TYPE) !=
+ H_BULK_REMOVE_REQUEST) {
+ ret = H_PARAMETER;
+ break; /* Exit fail */
+ }
+
+ tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
+ tsh |= H_BULK_REMOVE_RESPONSE;
+
+ if ((tsh & H_BULK_REMOVE_ANDCOND) &&
+ (tsh & H_BULK_REMOVE_AVPN)) {
+ tsh |= H_BULK_REMOVE_PARM;
+ kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
+ ret = H_PARAMETER;
+ break; /* Exit fail */
+ }
+
+ pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
+ copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+
+ /* tsl = AVPN */
+ flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
+
+ if ((pte[0] & HPTE_V_VALID) == 0 ||
+ ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) ||
+ ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) {
+ tsh |= H_BULK_REMOVE_NOT_FOUND;
+ } else {
+ /* Splat the pteg in (userland) hpt */
+ copy_to_user((void __user *)pteg, &v, sizeof(v));
+
+ rb = compute_tlbie_rb(pte[0], pte[1],
+ tsh & H_BULK_REMOVE_PTEX);
+ vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
+ tsh |= H_BULK_REMOVE_SUCCESS;
+ tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43;
+ }
+ kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
+ }
+ kvmppc_set_gpr(vcpu, 3, ret);
+
+ return EMULATE_DONE;
+}
+
static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
{
unsigned long flags = kvmppc_get_gpr(vcpu, 4);
@@ -134,6 +213,20 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
+{
+ unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
+ unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
+ unsigned long tce = kvmppc_get_gpr(vcpu, 6);
+ long rc;
+
+ rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
+ if (rc == H_TOO_HARD)
+ return EMULATE_FAIL;
+ kvmppc_set_gpr(vcpu, 3, rc);
+ return EMULATE_DONE;
+}
+
int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
{
switch (cmd) {
@@ -144,12 +237,12 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
case H_PROTECT:
return kvmppc_h_pr_protect(vcpu);
case H_BULK_REMOVE:
- /* We just flush all PTEs, so user space can
- handle the HPT modifications */
- kvmppc_mmu_pte_flush(vcpu, 0, 0);
- break;
+ return kvmppc_h_pr_bulk_remove(vcpu);
+ case H_PUT_TCE:
+ return kvmppc_h_pr_put_tce(vcpu);
case H_CEDE:
kvm_vcpu_block(vcpu);
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu->stat.halt_wakeup++;
return EMULATE_DONE;
}
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 6e6e9cef34a..798491a268b 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -128,24 +128,25 @@ no_dcbz32_on:
/* First clear RI in our current MSR value */
li r0, MSR_RI
andc r6, r6, r0
- MTMSR_EERI(r6)
- mtsrr0 r9
- mtsrr1 r4
PPC_LL r0, SVCPU_R0(r3)
PPC_LL r1, SVCPU_R1(r3)
PPC_LL r2, SVCPU_R2(r3)
- PPC_LL r4, SVCPU_R4(r3)
PPC_LL r5, SVCPU_R5(r3)
- PPC_LL r6, SVCPU_R6(r3)
PPC_LL r7, SVCPU_R7(r3)
PPC_LL r8, SVCPU_R8(r3)
- PPC_LL r9, SVCPU_R9(r3)
PPC_LL r10, SVCPU_R10(r3)
PPC_LL r11, SVCPU_R11(r3)
PPC_LL r12, SVCPU_R12(r3)
PPC_LL r13, SVCPU_R13(r3)
+ MTMSR_EERI(r6)
+ mtsrr0 r9
+ mtsrr1 r4
+
+ PPC_LL r4, SVCPU_R4(r3)
+ PPC_LL r6, SVCPU_R6(r3)
+ PPC_LL r9, SVCPU_R9(r3)
PPC_LL r3, (SVCPU_R3)(r3)
RFI
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ee9e1ee9c85..72f13f4a06e 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -17,6 +17,8 @@
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
* Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ * Scott Wood <scottwood@freescale.com>
+ * Varun Sethi <varun.sethi@freescale.com>
*/
#include <linux/errno.h>
@@ -30,9 +32,12 @@
#include <asm/cputable.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
-#include "timing.h"
#include <asm/cacheflush.h>
+#include <asm/dbell.h>
+#include <asm/hw_irq.h>
+#include <asm/irq.h>
+#include "timing.h"
#include "booke.h"
unsigned long kvmppc_booke_handlers;
@@ -55,6 +60,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "dec", VCPU_STAT(dec_exits) },
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
+ { "doorbell", VCPU_STAT(dbell_exits) },
+ { "guest doorbell", VCPU_STAT(gdbell_exits) },
{ NULL }
};
@@ -121,6 +128,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
{
u32 old_msr = vcpu->arch.shared->msr;
+#ifdef CONFIG_KVM_BOOKE_HV
+ new_msr |= MSR_GS;
+#endif
+
vcpu->arch.shared->msr = new_msr;
kvmppc_mmu_msr_notify(vcpu, old_msr);
@@ -195,17 +206,87 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
}
+static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GSRR0, srr0);
+ mtspr(SPRN_GSRR1, srr1);
+#else
+ vcpu->arch.shared->srr0 = srr0;
+ vcpu->arch.shared->srr1 = srr1;
+#endif
+}
+
+static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
+{
+ vcpu->arch.csrr0 = srr0;
+ vcpu->arch.csrr1 = srr1;
+}
+
+static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
+{
+ if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
+ vcpu->arch.dsrr0 = srr0;
+ vcpu->arch.dsrr1 = srr1;
+ } else {
+ set_guest_csrr(vcpu, srr0, srr1);
+ }
+}
+
+static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
+{
+ vcpu->arch.mcsrr0 = srr0;
+ vcpu->arch.mcsrr1 = srr1;
+}
+
+static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ return mfspr(SPRN_GDEAR);
+#else
+ return vcpu->arch.shared->dar;
+#endif
+}
+
+static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GDEAR, dear);
+#else
+ vcpu->arch.shared->dar = dear;
+#endif
+}
+
+static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ return mfspr(SPRN_GESR);
+#else
+ return vcpu->arch.shared->esr;
+#endif
+}
+
+static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
+{
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GESR, esr);
+#else
+ vcpu->arch.shared->esr = esr;
+#endif
+}
+
/* Deliver the interrupt of the corresponding priority, if possible. */
static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
unsigned int priority)
{
int allowed = 0;
- ulong uninitialized_var(msr_mask);
+ ulong msr_mask = 0;
bool update_esr = false, update_dear = false;
ulong crit_raw = vcpu->arch.shared->critical;
ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
bool crit;
bool keep_irq = false;
+ enum int_class int_class;
/* Truncate crit indicators in 32 bit mode */
if (!(vcpu->arch.shared->msr & MSR_SF)) {
@@ -241,46 +322,85 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
case BOOKE_IRQPRIO_AP_UNAVAIL:
case BOOKE_IRQPRIO_ALIGNMENT:
allowed = 1;
- msr_mask = MSR_CE|MSR_ME|MSR_DE;
+ msr_mask = MSR_CE | MSR_ME | MSR_DE;
+ int_class = INT_CLASS_NONCRIT;
break;
case BOOKE_IRQPRIO_CRITICAL:
- case BOOKE_IRQPRIO_WATCHDOG:
+ case BOOKE_IRQPRIO_DBELL_CRIT:
allowed = vcpu->arch.shared->msr & MSR_CE;
+ allowed = allowed && !crit;
msr_mask = MSR_ME;
+ int_class = INT_CLASS_CRIT;
break;
case BOOKE_IRQPRIO_MACHINE_CHECK:
allowed = vcpu->arch.shared->msr & MSR_ME;
- msr_mask = 0;
+ allowed = allowed && !crit;
+ int_class = INT_CLASS_MC;
break;
case BOOKE_IRQPRIO_DECREMENTER:
case BOOKE_IRQPRIO_FIT:
keep_irq = true;
/* fall through */
case BOOKE_IRQPRIO_EXTERNAL:
+ case BOOKE_IRQPRIO_DBELL:
allowed = vcpu->arch.shared->msr & MSR_EE;
allowed = allowed && !crit;
- msr_mask = MSR_CE|MSR_ME|MSR_DE;
+ msr_mask = MSR_CE | MSR_ME | MSR_DE;
+ int_class = INT_CLASS_NONCRIT;
break;
case BOOKE_IRQPRIO_DEBUG:
allowed = vcpu->arch.shared->msr & MSR_DE;
+ allowed = allowed && !crit;
msr_mask = MSR_ME;
+ int_class = INT_CLASS_CRIT;
break;
}
if (allowed) {
- vcpu->arch.shared->srr0 = vcpu->arch.pc;
- vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
+ switch (int_class) {
+ case INT_CLASS_NONCRIT:
+ set_guest_srr(vcpu, vcpu->arch.pc,
+ vcpu->arch.shared->msr);
+ break;
+ case INT_CLASS_CRIT:
+ set_guest_csrr(vcpu, vcpu->arch.pc,
+ vcpu->arch.shared->msr);
+ break;
+ case INT_CLASS_DBG:
+ set_guest_dsrr(vcpu, vcpu->arch.pc,
+ vcpu->arch.shared->msr);
+ break;
+ case INT_CLASS_MC:
+ set_guest_mcsrr(vcpu, vcpu->arch.pc,
+ vcpu->arch.shared->msr);
+ break;
+ }
+
vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
if (update_esr == true)
- vcpu->arch.shared->esr = vcpu->arch.queued_esr;
+ set_guest_esr(vcpu, vcpu->arch.queued_esr);
if (update_dear == true)
- vcpu->arch.shared->dar = vcpu->arch.queued_dear;
+ set_guest_dear(vcpu, vcpu->arch.queued_dear);
kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
if (!keep_irq)
clear_bit(priority, &vcpu->arch.pending_exceptions);
}
+#ifdef CONFIG_KVM_BOOKE_HV
+ /*
+ * If an interrupt is pending but masked, raise a guest doorbell
+ * so that we are notified when the guest enables the relevant
+ * MSR bit.
+ */
+ if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
+ kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
+ if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
+ kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
+ if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
+ kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
+#endif
+
return allowed;
}
@@ -305,7 +425,7 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
}
priority = __ffs(*pending);
- while (priority <= BOOKE_IRQPRIO_MAX) {
+ while (priority < BOOKE_IRQPRIO_MAX) {
if (kvmppc_booke_irqprio_deliver(vcpu, priority))
break;
@@ -319,8 +439,9 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
}
/* Check pending exceptions and deliver one, if possible. */
-void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
+int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
{
+ int r = 0;
WARN_ON_ONCE(!irqs_disabled());
kvmppc_core_check_exceptions(vcpu);
@@ -328,16 +449,60 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
if (vcpu->arch.shared->msr & MSR_WE) {
local_irq_enable();
kvm_vcpu_block(vcpu);
+ clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
local_irq_disable();
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
- kvmppc_core_check_exceptions(vcpu);
+ r = 1;
};
+
+ return r;
+}
+
+/*
+ * Common checks before entering the guest world. Call with interrupts
+ * disabled.
+ *
+ * returns !0 if a signal is pending and check_signal is true
+ */
+static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
+{
+ int r = 0;
+
+ WARN_ON_ONCE(!irqs_disabled());
+ while (true) {
+ if (need_resched()) {
+ local_irq_enable();
+ cond_resched();
+ local_irq_disable();
+ continue;
+ }
+
+ if (signal_pending(current)) {
+ r = 1;
+ break;
+ }
+
+ if (kvmppc_core_prepare_to_enter(vcpu)) {
+ /* interrupts got enabled in between, so we
+ are back at square 1 */
+ continue;
+ }
+
+ break;
+ }
+
+ return r;
}
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret;
+#ifdef CONFIG_PPC_FPU
+ unsigned int fpscr;
+ int fpexc_mode;
+ u64 fpr[32];
+#endif
if (!vcpu->arch.sane) {
kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -345,17 +510,53 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
}
local_irq_disable();
-
- kvmppc_core_prepare_to_enter(vcpu);
-
- if (signal_pending(current)) {
+ if (kvmppc_prepare_to_enter(vcpu)) {
kvm_run->exit_reason = KVM_EXIT_INTR;
ret = -EINTR;
goto out;
}
kvm_guest_enter();
+
+#ifdef CONFIG_PPC_FPU
+ /* Save userspace FPU state in stack */
+ enable_kernel_fp();
+ memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
+ fpscr = current->thread.fpscr.val;
+ fpexc_mode = current->thread.fpexc_mode;
+
+ /* Restore guest FPU state to thread */
+ memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
+ current->thread.fpscr.val = vcpu->arch.fpscr;
+
+ /*
+ * Since we can't trap on MSR_FP in GS-mode, we consider the guest
+ * as always using the FPU. Kernel usage of FP (via
+ * enable_kernel_fp()) in this thread must not occur while
+ * vcpu->fpu_active is set.
+ */
+ vcpu->fpu_active = 1;
+
+ kvmppc_load_guest_fp(vcpu);
+#endif
+
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+
+#ifdef CONFIG_PPC_FPU
+ kvmppc_save_guest_fp(vcpu);
+
+ vcpu->fpu_active = 0;
+
+ /* Save guest FPU state from thread */
+ memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
+ vcpu->arch.fpscr = current->thread.fpscr.val;
+
+ /* Restore userspace FPU state from stack */
+ memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
+ current->thread.fpscr.val = fpscr;
+ current->thread.fpexc_mode = fpexc_mode;
+#endif
+
kvm_guest_exit();
out:
@@ -363,6 +564,84 @@ out:
return ret;
}
+static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er;
+
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ /* don't overwrite subtypes, just account kvm_stats */
+ kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
+ /* Future optimization: only reload non-volatiles if
+ * they were actually modified by emulation. */
+ return RESUME_GUEST_NV;
+
+ case EMULATE_DO_DCR:
+ run->exit_reason = KVM_EXIT_DCR;
+ return RESUME_HOST;
+
+ case EMULATE_FAIL:
+ printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
+ __func__, vcpu->arch.pc, vcpu->arch.last_inst);
+ /* For debugging, encode the failing instruction and
+ * report it to userspace. */
+ run->hw.hardware_exit_reason = ~0ULL << 32;
+ run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
+ kvmppc_core_queue_program(vcpu, ESR_PIL);
+ return RESUME_HOST;
+
+ default:
+ BUG();
+ }
+}
+
+static void kvmppc_fill_pt_regs(struct pt_regs *regs)
+{
+ ulong r1, ip, msr, lr;
+
+ asm("mr %0, 1" : "=r"(r1));
+ asm("mflr %0" : "=r"(lr));
+ asm("mfmsr %0" : "=r"(msr));
+ asm("bl 1f; 1: mflr %0" : "=r"(ip));
+
+ memset(regs, 0, sizeof(*regs));
+ regs->gpr[1] = r1;
+ regs->nip = ip;
+ regs->msr = msr;
+ regs->link = lr;
+}
+
+static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
+ unsigned int exit_nr)
+{
+ struct pt_regs regs;
+
+ switch (exit_nr) {
+ case BOOKE_INTERRUPT_EXTERNAL:
+ kvmppc_fill_pt_regs(&regs);
+ do_IRQ(&regs);
+ break;
+ case BOOKE_INTERRUPT_DECREMENTER:
+ kvmppc_fill_pt_regs(&regs);
+ timer_interrupt(&regs);
+ break;
+#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
+ case BOOKE_INTERRUPT_DOORBELL:
+ kvmppc_fill_pt_regs(&regs);
+ doorbell_exception(&regs);
+ break;
+#endif
+ case BOOKE_INTERRUPT_MACHINE_CHECK:
+ /* FIXME */
+ break;
+ case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
+ kvmppc_fill_pt_regs(&regs);
+ performance_monitor_exception(&regs);
+ break;
+ }
+}
+
/**
* kvmppc_handle_exit
*
@@ -371,12 +650,14 @@ out:
int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int exit_nr)
{
- enum emulation_result er;
int r = RESUME_HOST;
/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
+ /* restart interrupts if they were meant for the host */
+ kvmppc_restart_interrupt(vcpu, exit_nr);
+
local_irq_enable();
run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -386,62 +667,74 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOKE_INTERRUPT_MACHINE_CHECK:
printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
kvmppc_dump_vcpu(vcpu);
+ /* For debugging, send invalid exit reason to user space */
+ run->hw.hardware_exit_reason = ~1ULL << 32;
+ run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
r = RESUME_HOST;
break;
case BOOKE_INTERRUPT_EXTERNAL:
kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
- if (need_resched())
- cond_resched();
r = RESUME_GUEST;
break;
case BOOKE_INTERRUPT_DECREMENTER:
- /* Since we switched IVPR back to the host's value, the host
- * handled this interrupt the moment we enabled interrupts.
- * Now we just offer it a chance to reschedule the guest. */
kvmppc_account_exit(vcpu, DEC_EXITS);
- if (need_resched())
- cond_resched();
r = RESUME_GUEST;
break;
+ case BOOKE_INTERRUPT_DOORBELL:
+ kvmppc_account_exit(vcpu, DBELL_EXITS);
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
+ kvmppc_account_exit(vcpu, GDBELL_EXITS);
+
+ /*
+ * We are here because there is a pending guest interrupt
+ * which could not be delivered as MSR_CE or MSR_ME was not
+ * set. Once we break from here we will retry delivery.
+ */
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_GUEST_DBELL:
+ kvmppc_account_exit(vcpu, GDBELL_EXITS);
+
+ /*
+ * We are here because there is a pending guest interrupt
+ * which could not be delivered as MSR_EE was not set. Once
+ * we break from here we will retry delivery.
+ */
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_HV_PRIV:
+ r = emulation_exit(run, vcpu);
+ break;
+
case BOOKE_INTERRUPT_PROGRAM:
- if (vcpu->arch.shared->msr & MSR_PR) {
- /* Program traps generated by user-level software must be handled
- * by the guest kernel. */
+ if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
+ /*
+ * Program traps generated by user-level software must
+ * be handled by the guest kernel.
+ *
+ * In GS mode, hypervisor privileged instructions trap
+ * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
+ * actual program interrupts, handled by the guest.
+ */
kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
r = RESUME_GUEST;
kvmppc_account_exit(vcpu, USR_PR_INST);
break;
}
- er = kvmppc_emulate_instruction(run, vcpu);
- switch (er) {
- case EMULATE_DONE:
- /* don't overwrite subtypes, just account kvm_stats */
- kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
- /* Future optimization: only reload non-volatiles if
- * they were actually modified by emulation. */
- r = RESUME_GUEST_NV;
- break;
- case EMULATE_DO_DCR:
- run->exit_reason = KVM_EXIT_DCR;
- r = RESUME_HOST;
- break;
- case EMULATE_FAIL:
- /* XXX Deliver Program interrupt to guest. */
- printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
- __func__, vcpu->arch.pc, vcpu->arch.last_inst);
- /* For debugging, encode the failing instruction and
- * report it to userspace. */
- run->hw.hardware_exit_reason = ~0ULL << 32;
- run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
- r = RESUME_HOST;
- break;
- default:
- BUG();
- }
+ r = emulation_exit(run, vcpu);
break;
case BOOKE_INTERRUPT_FP_UNAVAIL:
@@ -506,6 +799,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
+#ifdef CONFIG_KVM_BOOKE_HV
+ case BOOKE_INTERRUPT_HV_SYSCALL:
+ if (!(vcpu->arch.shared->msr & MSR_PR)) {
+ kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
+ } else {
+ /*
+ * hcall from guest userspace -- send privileged
+ * instruction program check.
+ */
+ kvmppc_core_queue_program(vcpu, ESR_PPR);
+ }
+
+ r = RESUME_GUEST;
+ break;
+#else
case BOOKE_INTERRUPT_SYSCALL:
if (!(vcpu->arch.shared->msr & MSR_PR) &&
(((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
@@ -519,6 +827,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_account_exit(vcpu, SYSCALL_EXITS);
r = RESUME_GUEST;
break;
+#endif
case BOOKE_INTERRUPT_DTLB_MISS: {
unsigned long eaddr = vcpu->arch.fault_dear;
@@ -526,7 +835,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
gpa_t gpaddr;
gfn_t gfn;
-#ifdef CONFIG_KVM_E500
+#ifdef CONFIG_KVM_E500V2
if (!(vcpu->arch.shared->msr & MSR_PR) &&
(eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
kvmppc_map_magic(vcpu);
@@ -567,6 +876,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Guest has mapped and accessed a page which is not
* actually RAM. */
vcpu->arch.paddr_accessed = gpaddr;
+ vcpu->arch.vaddr_accessed = eaddr;
r = kvmppc_emulate_mmio(run, vcpu);
kvmppc_account_exit(vcpu, MMIO_EXITS);
}
@@ -634,15 +944,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
BUG();
}
- local_irq_disable();
-
- kvmppc_core_prepare_to_enter(vcpu);
-
+ /*
+ * To avoid clobbering exit_reason, only check for signals if we
+ * aren't already exiting to userspace for some other reason.
+ */
if (!(r & RESUME_HOST)) {
- /* To avoid clobbering exit_reason, only check for signals if
- * we aren't already exiting to userspace for some other
- * reason. */
- if (signal_pending(current)) {
+ local_irq_disable();
+ if (kvmppc_prepare_to_enter(vcpu)) {
run->exit_reason = KVM_EXIT_INTR;
r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
kvmppc_account_exit(vcpu, SIGNAL_EXITS);
@@ -659,12 +967,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
int r;
vcpu->arch.pc = 0;
- vcpu->arch.shared->msr = 0;
- vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
vcpu->arch.shared->pir = vcpu->vcpu_id;
kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
+ kvmppc_set_msr(vcpu, 0);
+#ifndef CONFIG_KVM_BOOKE_HV
+ vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
vcpu->arch.shadow_pid = 1;
+ vcpu->arch.shared->msr = 0;
+#endif
/* Eye-catching numbers so we know if the guest takes an interrupt
* before it's programmed its own IVPR/IVORs. */
@@ -745,8 +1056,8 @@ static void get_sregs_base(struct kvm_vcpu *vcpu,
sregs->u.e.csrr0 = vcpu->arch.csrr0;
sregs->u.e.csrr1 = vcpu->arch.csrr1;
sregs->u.e.mcsr = vcpu->arch.mcsr;
- sregs->u.e.esr = vcpu->arch.shared->esr;
- sregs->u.e.dear = vcpu->arch.shared->dar;
+ sregs->u.e.esr = get_guest_esr(vcpu);
+ sregs->u.e.dear = get_guest_dear(vcpu);
sregs->u.e.tsr = vcpu->arch.tsr;
sregs->u.e.tcr = vcpu->arch.tcr;
sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
@@ -763,8 +1074,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
vcpu->arch.csrr0 = sregs->u.e.csrr0;
vcpu->arch.csrr1 = sregs->u.e.csrr1;
vcpu->arch.mcsr = sregs->u.e.mcsr;
- vcpu->arch.shared->esr = sregs->u.e.esr;
- vcpu->arch.shared->dar = sregs->u.e.dear;
+ set_guest_esr(vcpu, sregs->u.e.esr);
+ set_guest_dear(vcpu, sregs->u.e.dear);
vcpu->arch.vrsave = sregs->u.e.vrsave;
kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
@@ -932,15 +1243,6 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
{
}
-int kvmppc_core_init_vm(struct kvm *kvm)
-{
- return 0;
-}
-
-void kvmppc_core_destroy_vm(struct kvm *kvm)
-{
-}
-
void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
{
vcpu->arch.tcr = new_tcr;
@@ -968,8 +1270,19 @@ void kvmppc_decrementer_func(unsigned long data)
kvmppc_set_tsr_bits(vcpu, TSR_DIS);
}
+void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ current->thread.kvm_vcpu = vcpu;
+}
+
+void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ current->thread.kvm_vcpu = NULL;
+}
+
int __init kvmppc_booke_init(void)
{
+#ifndef CONFIG_KVM_BOOKE_HV
unsigned long ivor[16];
unsigned long max_ivor = 0;
int i;
@@ -1012,7 +1325,7 @@ int __init kvmppc_booke_init(void)
}
flush_icache_range(kvmppc_booke_handlers,
kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
-
+#endif /* !BOOKE_HV */
return 0;
}
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 2fe202705a3..ba61974c1e2 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <asm/kvm_ppc.h>
+#include <asm/switch_to.h>
#include "timing.h"
/* interrupt priortity ordering */
@@ -48,7 +49,20 @@
#define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19
/* Internal pseudo-irqprio for level triggered externals */
#define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20
-#define BOOKE_IRQPRIO_MAX 20
+#define BOOKE_IRQPRIO_DBELL 21
+#define BOOKE_IRQPRIO_DBELL_CRIT 22
+#define BOOKE_IRQPRIO_MAX 23
+
+#define BOOKE_IRQMASK_EE ((1 << BOOKE_IRQPRIO_EXTERNAL_LEVEL) | \
+ (1 << BOOKE_IRQPRIO_PERFORMANCE_MONITOR) | \
+ (1 << BOOKE_IRQPRIO_DBELL) | \
+ (1 << BOOKE_IRQPRIO_DECREMENTER) | \
+ (1 << BOOKE_IRQPRIO_FIT) | \
+ (1 << BOOKE_IRQPRIO_EXTERNAL))
+
+#define BOOKE_IRQMASK_CE ((1 << BOOKE_IRQPRIO_DBELL_CRIT) | \
+ (1 << BOOKE_IRQPRIO_WATCHDOG) | \
+ (1 << BOOKE_IRQPRIO_CRITICAL))
extern unsigned long kvmppc_booke_handlers;
@@ -61,8 +75,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
-int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
-int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
+int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
+int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
/* low-level asm code to transfer guest state */
void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
@@ -71,4 +85,46 @@ void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
/* high-level function, manages flags, host state */
void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
+void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu);
+
+enum int_class {
+ INT_CLASS_NONCRIT,
+ INT_CLASS_CRIT,
+ INT_CLASS_MC,
+ INT_CLASS_DBG,
+};
+
+void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
+
+/*
+ * Load up guest vcpu FP state if it's needed.
+ * It also set the MSR_FP in thread so that host know
+ * we're holding FPU, and then host can help to save
+ * guest vcpu FP state if other threads require to use FPU.
+ * This simulates an FP unavailable fault.
+ *
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+ if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
+ load_up_fpu();
+ current->thread.regs->msr |= MSR_FP;
+ }
+#endif
+}
+
+/*
+ * Save guest vcpu FP state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+ if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
+ giveup_fpu(current);
+#endif
+}
#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 3e652da3653..6c76397f2af 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -40,8 +40,8 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
- int rs;
- int rt;
+ int rs = get_rs(inst);
+ int rt = get_rt(inst);
switch (get_op(inst)) {
case 19:
@@ -62,19 +62,16 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
switch (get_xop(inst)) {
case OP_31_XOP_MFMSR:
- rt = get_rt(inst);
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
break;
case OP_31_XOP_MTMSR:
- rs = get_rs(inst);
kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_WRTEE:
- rs = get_rs(inst);
vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
| (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
@@ -99,22 +96,32 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+/*
+ * NOTE: some of these registers are not emulated on BOOKE_HV (GS-mode).
+ * Their backing store is in real registers, and these functions
+ * will return the wrong result if called for them in another context
+ * (such as debugging).
+ */
+int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
- ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_DEAR:
- vcpu->arch.shared->dar = spr_val; break;
+ vcpu->arch.shared->dar = spr_val;
+ break;
case SPRN_ESR:
- vcpu->arch.shared->esr = spr_val; break;
+ vcpu->arch.shared->esr = spr_val;
+ break;
case SPRN_DBCR0:
- vcpu->arch.dbcr0 = spr_val; break;
+ vcpu->arch.dbcr0 = spr_val;
+ break;
case SPRN_DBCR1:
- vcpu->arch.dbcr1 = spr_val; break;
+ vcpu->arch.dbcr1 = spr_val;
+ break;
case SPRN_DBSR:
- vcpu->arch.dbsr &= ~spr_val; break;
+ vcpu->arch.dbsr &= ~spr_val;
+ break;
case SPRN_TSR:
kvmppc_clr_tsr_bits(vcpu, spr_val);
break;
@@ -122,20 +129,29 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
kvmppc_set_tcr(vcpu, spr_val);
break;
- /* Note: SPRG4-7 are user-readable. These values are
- * loaded into the real SPRGs when resuming the
- * guest. */
+ /*
+ * Note: SPRG4-7 are user-readable.
+ * These values are loaded into the real SPRGs when resuming the
+ * guest (PR-mode only).
+ */
case SPRN_SPRG4:
- vcpu->arch.shared->sprg4 = spr_val; break;
+ vcpu->arch.shared->sprg4 = spr_val;
+ break;
case SPRN_SPRG5:
- vcpu->arch.shared->sprg5 = spr_val; break;
+ vcpu->arch.shared->sprg5 = spr_val;
+ break;
case SPRN_SPRG6:
- vcpu->arch.shared->sprg6 = spr_val; break;
+ vcpu->arch.shared->sprg6 = spr_val;
+ break;
case SPRN_SPRG7:
- vcpu->arch.shared->sprg7 = spr_val; break;
+ vcpu->arch.shared->sprg7 = spr_val;
+ break;
case SPRN_IVPR:
vcpu->arch.ivpr = spr_val;
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GIVPR, spr_val);
+#endif
break;
case SPRN_IVOR0:
vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
@@ -145,6 +161,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
break;
case SPRN_IVOR2:
vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GIVOR2, spr_val);
+#endif
break;
case SPRN_IVOR3:
vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
@@ -163,6 +182,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
break;
case SPRN_IVOR8:
vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_GIVOR8, spr_val);
+#endif
break;
case SPRN_IVOR9:
vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
@@ -193,75 +215,83 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
return emulated;
}
-int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_IVPR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break;
+ *spr_val = vcpu->arch.ivpr;
+ break;
case SPRN_DEAR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break;
+ *spr_val = vcpu->arch.shared->dar;
+ break;
case SPRN_ESR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break;
+ *spr_val = vcpu->arch.shared->esr;
+ break;
case SPRN_DBCR0:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
+ *spr_val = vcpu->arch.dbcr0;
+ break;
case SPRN_DBCR1:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break;
+ *spr_val = vcpu->arch.dbcr1;
+ break;
case SPRN_DBSR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break;
+ *spr_val = vcpu->arch.dbsr;
+ break;
case SPRN_TSR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break;
+ *spr_val = vcpu->arch.tsr;
+ break;
case SPRN_TCR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break;
+ *spr_val = vcpu->arch.tcr;
+ break;
case SPRN_IVOR0:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
break;
case SPRN_IVOR1:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
break;
case SPRN_IVOR2:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
break;
case SPRN_IVOR3:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
break;
case SPRN_IVOR4:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
break;
case SPRN_IVOR5:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
break;
case SPRN_IVOR6:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
break;
case SPRN_IVOR7:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
break;
case SPRN_IVOR8:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
break;
case SPRN_IVOR9:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
break;
case SPRN_IVOR10:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
break;
case SPRN_IVOR11:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
break;
case SPRN_IVOR12:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
break;
case SPRN_IVOR13:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
break;
case SPRN_IVOR14:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
break;
case SPRN_IVOR15:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
break;
default:
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index c8c4b878795..8feec2ff392 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -419,13 +419,13 @@ lightweight_exit:
* written directly to the shared area, so we
* need to reload them here with the guest's values.
*/
- lwz r3, VCPU_SHARED_SPRG4(r5)
+ PPC_LD(r3, VCPU_SHARED_SPRG4, r5)
mtspr SPRN_SPRG4W, r3
- lwz r3, VCPU_SHARED_SPRG5(r5)
+ PPC_LD(r3, VCPU_SHARED_SPRG5, r5)
mtspr SPRN_SPRG5W, r3
- lwz r3, VCPU_SHARED_SPRG6(r5)
+ PPC_LD(r3, VCPU_SHARED_SPRG6, r5)
mtspr SPRN_SPRG6W, r3
- lwz r3, VCPU_SHARED_SPRG7(r5)
+ PPC_LD(r3, VCPU_SHARED_SPRG7, r5)
mtspr SPRN_SPRG7W, r3
#ifdef CONFIG_KVM_EXIT_TIMING
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
new file mode 100644
index 00000000000..6048a00515d
--- /dev/null
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -0,0 +1,597 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * Author: Varun Sethi <varun.sethi@freescale.com>
+ * Author: Scott Wood <scotwood@freescale.com>
+ *
+ * This file is derived from arch/powerpc/kvm/booke_interrupts.S
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/mmu-44x.h>
+#include <asm/page.h>
+#include <asm/asm-compat.h>
+#include <asm/asm-offsets.h>
+#include <asm/bitsperlong.h>
+#include <asm/thread_info.h>
+
+#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
+
+#define GET_VCPU(vcpu, thread) \
+ PPC_LL vcpu, THREAD_KVM_VCPU(thread)
+
+#define LONGBYTES (BITS_PER_LONG / 8)
+
+#define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES))
+#define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES))
+
+/* The host stack layout: */
+#define HOST_R1 (0 * LONGBYTES) /* Implied by stwu. */
+#define HOST_CALLEE_LR (1 * LONGBYTES)
+#define HOST_RUN (2 * LONGBYTES) /* struct kvm_run */
+/*
+ * r2 is special: it holds 'current', and it made nonvolatile in the
+ * kernel with the -ffixed-r2 gcc option.
+ */
+#define HOST_R2 (3 * LONGBYTES)
+#define HOST_CR (4 * LONGBYTES)
+#define HOST_NV_GPRS (5 * LONGBYTES)
+#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES))
+#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES)
+#define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */
+#define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */
+
+#define NEED_EMU 0x00000001 /* emulation -- save nv regs */
+#define NEED_DEAR 0x00000002 /* save faulting DEAR */
+#define NEED_ESR 0x00000004 /* save faulting ESR */
+
+/*
+ * On entry:
+ * r4 = vcpu, r5 = srr0, r6 = srr1
+ * saved in vcpu: cr, ctr, r3-r13
+ */
+.macro kvm_handler_common intno, srr0, flags
+ /* Restore host stack pointer */
+ PPC_STL r1, VCPU_GPR(r1)(r4)
+ PPC_STL r2, VCPU_GPR(r2)(r4)
+ PPC_LL r1, VCPU_HOST_STACK(r4)
+ PPC_LL r2, HOST_R2(r1)
+
+ mfspr r10, SPRN_PID
+ lwz r8, VCPU_HOST_PID(r4)
+ PPC_LL r11, VCPU_SHARED(r4)
+ PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */
+ li r14, \intno
+
+ stw r10, VCPU_GUEST_PID(r4)
+ mtspr SPRN_PID, r8
+
+#ifdef CONFIG_KVM_EXIT_TIMING
+ /* save exit time */
+1: mfspr r7, SPRN_TBRU
+ mfspr r8, SPRN_TBRL
+ mfspr r9, SPRN_TBRU
+ cmpw r9, r7
+ stw r8, VCPU_TIMING_EXIT_TBL(r4)
+ bne- 1b
+ stw r9, VCPU_TIMING_EXIT_TBU(r4)
+#endif
+
+ oris r8, r6, MSR_CE@h
+ PPC_STD(r6, VCPU_SHARED_MSR, r11)
+ ori r8, r8, MSR_ME | MSR_RI
+ PPC_STL r5, VCPU_PC(r4)
+
+ /*
+ * Make sure CE/ME/RI are set (if appropriate for exception type)
+ * whether or not the guest had it set. Since mfmsr/mtmsr are
+ * somewhat expensive, skip in the common case where the guest
+ * had all these bits set (and thus they're still set if
+ * appropriate for the exception type).
+ */
+ cmpw r6, r8
+ beq 1f
+ mfmsr r7
+ .if \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0
+ oris r7, r7, MSR_CE@h
+ .endif
+ .if \srr0 != SPRN_MCSRR0
+ ori r7, r7, MSR_ME | MSR_RI
+ .endif
+ mtmsr r7
+1:
+
+ .if \flags & NEED_EMU
+ /*
+ * This assumes you have external PID support.
+ * To support a bookehv CPU without external PID, you'll
+ * need to look up the TLB entry and create a temporary mapping.
+ *
+ * FIXME: we don't currently handle if the lwepx faults. PR-mode
+ * booke doesn't handle it either. Since Linux doesn't use
+ * broadcast tlbivax anymore, the only way this should happen is
+ * if the guest maps its memory execute-but-not-read, or if we
+ * somehow take a TLB miss in the middle of this entry code and
+ * evict the relevant entry. On e500mc, all kernel lowmem is
+ * bolted into TLB1 large page mappings, and we don't use
+ * broadcast invalidates, so we should not take a TLB miss here.
+ *
+ * Later we'll need to deal with faults here. Disallowing guest
+ * mappings that are execute-but-not-read could be an option on
+ * e500mc, but not on chips with an LRAT if it is used.
+ */
+
+ mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */
+ PPC_STL r15, VCPU_GPR(r15)(r4)
+ PPC_STL r16, VCPU_GPR(r16)(r4)
+ PPC_STL r17, VCPU_GPR(r17)(r4)
+ PPC_STL r18, VCPU_GPR(r18)(r4)
+ PPC_STL r19, VCPU_GPR(r19)(r4)
+ mr r8, r3
+ PPC_STL r20, VCPU_GPR(r20)(r4)
+ rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS
+ PPC_STL r21, VCPU_GPR(r21)(r4)
+ rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR
+ PPC_STL r22, VCPU_GPR(r22)(r4)
+ rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID
+ PPC_STL r23, VCPU_GPR(r23)(r4)
+ PPC_STL r24, VCPU_GPR(r24)(r4)
+ PPC_STL r25, VCPU_GPR(r25)(r4)
+ PPC_STL r26, VCPU_GPR(r26)(r4)
+ PPC_STL r27, VCPU_GPR(r27)(r4)
+ PPC_STL r28, VCPU_GPR(r28)(r4)
+ PPC_STL r29, VCPU_GPR(r29)(r4)
+ PPC_STL r30, VCPU_GPR(r30)(r4)
+ PPC_STL r31, VCPU_GPR(r31)(r4)
+ mtspr SPRN_EPLC, r8
+
+ /* disable preemption, so we are sure we hit the fixup handler */
+#ifdef CONFIG_PPC64
+ clrrdi r8,r1,THREAD_SHIFT
+#else
+ rlwinm r8,r1,0,0,31-THREAD_SHIFT /* current thread_info */
+#endif
+ li r7, 1
+ stw r7, TI_PREEMPT(r8)
+
+ isync
+
+ /*
+ * In case the read goes wrong, we catch it and write an invalid value
+ * in LAST_INST instead.
+ */
+1: lwepx r9, 0, r5
+2:
+.section .fixup, "ax"
+3: li r9, KVM_INST_FETCH_FAILED
+ b 2b
+.previous
+.section __ex_table,"a"
+ PPC_LONG_ALIGN
+ PPC_LONG 1b,3b
+.previous
+
+ mtspr SPRN_EPLC, r3
+ li r7, 0
+ stw r7, TI_PREEMPT(r8)
+ stw r9, VCPU_LAST_INST(r4)
+ .endif
+
+ .if \flags & NEED_ESR
+ mfspr r8, SPRN_ESR
+ PPC_STL r8, VCPU_FAULT_ESR(r4)
+ .endif
+
+ .if \flags & NEED_DEAR
+ mfspr r9, SPRN_DEAR
+ PPC_STL r9, VCPU_FAULT_DEAR(r4)
+ .endif
+
+ b kvmppc_resume_host
+.endm
+
+/*
+ * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
+ */
+.macro kvm_handler intno srr0, srr1, flags
+_GLOBAL(kvmppc_handler_\intno\()_\srr1)
+ GET_VCPU(r11, r10)
+ PPC_STL r3, VCPU_GPR(r3)(r11)
+ mfspr r3, SPRN_SPRG_RSCRATCH0
+ PPC_STL r4, VCPU_GPR(r4)(r11)
+ PPC_LL r4, THREAD_NORMSAVE(0)(r10)
+ PPC_STL r5, VCPU_GPR(r5)(r11)
+ stw r13, VCPU_CR(r11)
+ mfspr r5, \srr0
+ PPC_STL r3, VCPU_GPR(r10)(r11)
+ PPC_LL r3, THREAD_NORMSAVE(2)(r10)
+ PPC_STL r6, VCPU_GPR(r6)(r11)
+ PPC_STL r4, VCPU_GPR(r11)(r11)
+ mfspr r6, \srr1
+ PPC_STL r7, VCPU_GPR(r7)(r11)
+ PPC_STL r8, VCPU_GPR(r8)(r11)
+ PPC_STL r9, VCPU_GPR(r9)(r11)
+ PPC_STL r3, VCPU_GPR(r13)(r11)
+ mfctr r7
+ PPC_STL r12, VCPU_GPR(r12)(r11)
+ PPC_STL r7, VCPU_CTR(r11)
+ mr r4, r11
+ kvm_handler_common \intno, \srr0, \flags
+.endm
+
+.macro kvm_lvl_handler intno scratch srr0, srr1, flags
+_GLOBAL(kvmppc_handler_\intno\()_\srr1)
+ mfspr r10, SPRN_SPRG_THREAD
+ GET_VCPU(r11, r10)
+ PPC_STL r3, VCPU_GPR(r3)(r11)
+ mfspr r3, \scratch
+ PPC_STL r4, VCPU_GPR(r4)(r11)
+ PPC_LL r4, GPR9(r8)
+ PPC_STL r5, VCPU_GPR(r5)(r11)
+ stw r9, VCPU_CR(r11)
+ mfspr r5, \srr0
+ PPC_STL r3, VCPU_GPR(r8)(r11)
+ PPC_LL r3, GPR10(r8)
+ PPC_STL r6, VCPU_GPR(r6)(r11)
+ PPC_STL r4, VCPU_GPR(r9)(r11)
+ mfspr r6, \srr1
+ PPC_LL r4, GPR11(r8)
+ PPC_STL r7, VCPU_GPR(r7)(r11)
+ PPC_STL r3, VCPU_GPR(r10)(r11)
+ mfctr r7
+ PPC_STL r12, VCPU_GPR(r12)(r11)
+ PPC_STL r13, VCPU_GPR(r13)(r11)
+ PPC_STL r4, VCPU_GPR(r11)(r11)
+ PPC_STL r7, VCPU_CTR(r11)
+ mr r4, r11
+ kvm_handler_common \intno, \srr0, \flags
+.endm
+
+kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \
+ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \
+ SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0
+kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \
+ SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR)
+kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR
+kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \
+ SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR)
+kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR
+kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \
+ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \
+ SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
+kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \
+ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU
+kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
+kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \
+ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
+ SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
+kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
+ SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0
+
+
+/* Registers:
+ * SPRG_SCRATCH0: guest r10
+ * r4: vcpu pointer
+ * r11: vcpu->arch.shared
+ * r14: KVM exit number
+ */
+_GLOBAL(kvmppc_resume_host)
+ /* Save remaining volatile guest register state to vcpu. */
+ mfspr r3, SPRN_VRSAVE
+ PPC_STL r0, VCPU_GPR(r0)(r4)
+ mflr r5
+ mfspr r6, SPRN_SPRG4
+ PPC_STL r5, VCPU_LR(r4)
+ mfspr r7, SPRN_SPRG5
+ stw r3, VCPU_VRSAVE(r4)
+ PPC_STD(r6, VCPU_SHARED_SPRG4, r11)
+ mfspr r8, SPRN_SPRG6
+ PPC_STD(r7, VCPU_SHARED_SPRG5, r11)
+ mfspr r9, SPRN_SPRG7
+ PPC_STD(r8, VCPU_SHARED_SPRG6, r11)
+ mfxer r3
+ PPC_STD(r9, VCPU_SHARED_SPRG7, r11)
+
+ /* save guest MAS registers and restore host mas4 & mas6 */
+ mfspr r5, SPRN_MAS0
+ PPC_STL r3, VCPU_XER(r4)
+ mfspr r6, SPRN_MAS1
+ stw r5, VCPU_SHARED_MAS0(r11)
+ mfspr r7, SPRN_MAS2
+ stw r6, VCPU_SHARED_MAS1(r11)
+ PPC_STD(r7, VCPU_SHARED_MAS2, r11)
+ mfspr r5, SPRN_MAS3
+ mfspr r6, SPRN_MAS4
+ stw r5, VCPU_SHARED_MAS7_3+4(r11)
+ mfspr r7, SPRN_MAS6
+ stw r6, VCPU_SHARED_MAS4(r11)
+ mfspr r5, SPRN_MAS7
+ lwz r6, VCPU_HOST_MAS4(r4)
+ stw r7, VCPU_SHARED_MAS6(r11)
+ lwz r8, VCPU_HOST_MAS6(r4)
+ mtspr SPRN_MAS4, r6
+ stw r5, VCPU_SHARED_MAS7_3+0(r11)
+ mtspr SPRN_MAS6, r8
+ /* Enable MAS register updates via exception */
+ mfspr r3, SPRN_EPCR
+ rlwinm r3, r3, 0, ~SPRN_EPCR_DMIUH
+ mtspr SPRN_EPCR, r3
+ isync
+
+ /* Switch to kernel stack and jump to handler. */
+ PPC_LL r3, HOST_RUN(r1)
+ mr r5, r14 /* intno */
+ mr r14, r4 /* Save vcpu pointer. */
+ bl kvmppc_handle_exit
+
+ /* Restore vcpu pointer and the nonvolatiles we used. */
+ mr r4, r14
+ PPC_LL r14, VCPU_GPR(r14)(r4)
+
+ andi. r5, r3, RESUME_FLAG_NV
+ beq skip_nv_load
+ PPC_LL r15, VCPU_GPR(r15)(r4)
+ PPC_LL r16, VCPU_GPR(r16)(r4)
+ PPC_LL r17, VCPU_GPR(r17)(r4)
+ PPC_LL r18, VCPU_GPR(r18)(r4)
+ PPC_LL r19, VCPU_GPR(r19)(r4)
+ PPC_LL r20, VCPU_GPR(r20)(r4)
+ PPC_LL r21, VCPU_GPR(r21)(r4)
+ PPC_LL r22, VCPU_GPR(r22)(r4)
+ PPC_LL r23, VCPU_GPR(r23)(r4)
+ PPC_LL r24, VCPU_GPR(r24)(r4)
+ PPC_LL r25, VCPU_GPR(r25)(r4)
+ PPC_LL r26, VCPU_GPR(r26)(r4)
+ PPC_LL r27, VCPU_GPR(r27)(r4)
+ PPC_LL r28, VCPU_GPR(r28)(r4)
+ PPC_LL r29, VCPU_GPR(r29)(r4)
+ PPC_LL r30, VCPU_GPR(r30)(r4)
+ PPC_LL r31, VCPU_GPR(r31)(r4)
+skip_nv_load:
+ /* Should we return to the guest? */
+ andi. r5, r3, RESUME_FLAG_HOST
+ beq lightweight_exit
+
+ srawi r3, r3, 2 /* Shift -ERR back down. */
+
+heavyweight_exit:
+ /* Not returning to guest. */
+ PPC_LL r5, HOST_STACK_LR(r1)
+ lwz r6, HOST_CR(r1)
+
+ /*
+ * We already saved guest volatile register state; now save the
+ * non-volatiles.
+ */
+
+ PPC_STL r15, VCPU_GPR(r15)(r4)
+ PPC_STL r16, VCPU_GPR(r16)(r4)
+ PPC_STL r17, VCPU_GPR(r17)(r4)
+ PPC_STL r18, VCPU_GPR(r18)(r4)
+ PPC_STL r19, VCPU_GPR(r19)(r4)
+ PPC_STL r20, VCPU_GPR(r20)(r4)
+ PPC_STL r21, VCPU_GPR(r21)(r4)
+ PPC_STL r22, VCPU_GPR(r22)(r4)
+ PPC_STL r23, VCPU_GPR(r23)(r4)
+ PPC_STL r24, VCPU_GPR(r24)(r4)
+ PPC_STL r25, VCPU_GPR(r25)(r4)
+ PPC_STL r26, VCPU_GPR(r26)(r4)
+ PPC_STL r27, VCPU_GPR(r27)(r4)
+ PPC_STL r28, VCPU_GPR(r28)(r4)
+ PPC_STL r29, VCPU_GPR(r29)(r4)
+ PPC_STL r30, VCPU_GPR(r30)(r4)
+ PPC_STL r31, VCPU_GPR(r31)(r4)
+
+ /* Load host non-volatile register state from host stack. */
+ PPC_LL r14, HOST_NV_GPR(r14)(r1)
+ PPC_LL r15, HOST_NV_GPR(r15)(r1)
+ PPC_LL r16, HOST_NV_GPR(r16)(r1)
+ PPC_LL r17, HOST_NV_GPR(r17)(r1)
+ PPC_LL r18, HOST_NV_GPR(r18)(r1)
+ PPC_LL r19, HOST_NV_GPR(r19)(r1)
+ PPC_LL r20, HOST_NV_GPR(r20)(r1)
+ PPC_LL r21, HOST_NV_GPR(r21)(r1)
+ PPC_LL r22, HOST_NV_GPR(r22)(r1)
+ PPC_LL r23, HOST_NV_GPR(r23)(r1)
+ PPC_LL r24, HOST_NV_GPR(r24)(r1)
+ PPC_LL r25, HOST_NV_GPR(r25)(r1)
+ PPC_LL r26, HOST_NV_GPR(r26)(r1)
+ PPC_LL r27, HOST_NV_GPR(r27)(r1)
+ PPC_LL r28, HOST_NV_GPR(r28)(r1)
+ PPC_LL r29, HOST_NV_GPR(r29)(r1)
+ PPC_LL r30, HOST_NV_GPR(r30)(r1)
+ PPC_LL r31, HOST_NV_GPR(r31)(r1)
+
+ /* Return to kvm_vcpu_run(). */
+ mtlr r5
+ mtcr r6
+ addi r1, r1, HOST_STACK_SIZE
+ /* r3 still contains the return code from kvmppc_handle_exit(). */
+ blr
+
+/* Registers:
+ * r3: kvm_run pointer
+ * r4: vcpu pointer
+ */
+_GLOBAL(__kvmppc_vcpu_run)
+ stwu r1, -HOST_STACK_SIZE(r1)
+ PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
+
+ /* Save host state to stack. */
+ PPC_STL r3, HOST_RUN(r1)
+ mflr r3
+ mfcr r5
+ PPC_STL r3, HOST_STACK_LR(r1)
+
+ stw r5, HOST_CR(r1)
+
+ /* Save host non-volatile register state to stack. */
+ PPC_STL r14, HOST_NV_GPR(r14)(r1)
+ PPC_STL r15, HOST_NV_GPR(r15)(r1)
+ PPC_STL r16, HOST_NV_GPR(r16)(r1)
+ PPC_STL r17, HOST_NV_GPR(r17)(r1)
+ PPC_STL r18, HOST_NV_GPR(r18)(r1)
+ PPC_STL r19, HOST_NV_GPR(r19)(r1)
+ PPC_STL r20, HOST_NV_GPR(r20)(r1)
+ PPC_STL r21, HOST_NV_GPR(r21)(r1)
+ PPC_STL r22, HOST_NV_GPR(r22)(r1)
+ PPC_STL r23, HOST_NV_GPR(r23)(r1)
+ PPC_STL r24, HOST_NV_GPR(r24)(r1)
+ PPC_STL r25, HOST_NV_GPR(r25)(r1)
+ PPC_STL r26, HOST_NV_GPR(r26)(r1)
+ PPC_STL r27, HOST_NV_GPR(r27)(r1)
+ PPC_STL r28, HOST_NV_GPR(r28)(r1)
+ PPC_STL r29, HOST_NV_GPR(r29)(r1)
+ PPC_STL r30, HOST_NV_GPR(r30)(r1)
+ PPC_STL r31, HOST_NV_GPR(r31)(r1)
+
+ /* Load guest non-volatiles. */
+ PPC_LL r14, VCPU_GPR(r14)(r4)
+ PPC_LL r15, VCPU_GPR(r15)(r4)
+ PPC_LL r16, VCPU_GPR(r16)(r4)
+ PPC_LL r17, VCPU_GPR(r17)(r4)
+ PPC_LL r18, VCPU_GPR(r18)(r4)
+ PPC_LL r19, VCPU_GPR(r19)(r4)
+ PPC_LL r20, VCPU_GPR(r20)(r4)
+ PPC_LL r21, VCPU_GPR(r21)(r4)
+ PPC_LL r22, VCPU_GPR(r22)(r4)
+ PPC_LL r23, VCPU_GPR(r23)(r4)
+ PPC_LL r24, VCPU_GPR(r24)(r4)
+ PPC_LL r25, VCPU_GPR(r25)(r4)
+ PPC_LL r26, VCPU_GPR(r26)(r4)
+ PPC_LL r27, VCPU_GPR(r27)(r4)
+ PPC_LL r28, VCPU_GPR(r28)(r4)
+ PPC_LL r29, VCPU_GPR(r29)(r4)
+ PPC_LL r30, VCPU_GPR(r30)(r4)
+ PPC_LL r31, VCPU_GPR(r31)(r4)
+
+
+lightweight_exit:
+ PPC_STL r2, HOST_R2(r1)
+
+ mfspr r3, SPRN_PID
+ stw r3, VCPU_HOST_PID(r4)
+ lwz r3, VCPU_GUEST_PID(r4)
+ mtspr SPRN_PID, r3
+
+ PPC_LL r11, VCPU_SHARED(r4)
+ /* Disable MAS register updates via exception */
+ mfspr r3, SPRN_EPCR
+ oris r3, r3, SPRN_EPCR_DMIUH@h
+ mtspr SPRN_EPCR, r3
+ isync
+ /* Save host mas4 and mas6 and load guest MAS registers */
+ mfspr r3, SPRN_MAS4
+ stw r3, VCPU_HOST_MAS4(r4)
+ mfspr r3, SPRN_MAS6
+ stw r3, VCPU_HOST_MAS6(r4)
+ lwz r3, VCPU_SHARED_MAS0(r11)
+ lwz r5, VCPU_SHARED_MAS1(r11)
+ PPC_LD(r6, VCPU_SHARED_MAS2, r11)
+ lwz r7, VCPU_SHARED_MAS7_3+4(r11)
+ lwz r8, VCPU_SHARED_MAS4(r11)
+ mtspr SPRN_MAS0, r3
+ mtspr SPRN_MAS1, r5
+ mtspr SPRN_MAS2, r6
+ mtspr SPRN_MAS3, r7
+ mtspr SPRN_MAS4, r8
+ lwz r3, VCPU_SHARED_MAS6(r11)
+ lwz r5, VCPU_SHARED_MAS7_3+0(r11)
+ mtspr SPRN_MAS6, r3
+ mtspr SPRN_MAS7, r5
+
+ /*
+ * Host interrupt handlers may have clobbered these guest-readable
+ * SPRGs, so we need to reload them here with the guest's values.
+ */
+ lwz r3, VCPU_VRSAVE(r4)
+ PPC_LD(r5, VCPU_SHARED_SPRG4, r11)
+ mtspr SPRN_VRSAVE, r3
+ PPC_LD(r6, VCPU_SHARED_SPRG5, r11)
+ mtspr SPRN_SPRG4W, r5
+ PPC_LD(r7, VCPU_SHARED_SPRG6, r11)
+ mtspr SPRN_SPRG5W, r6
+ PPC_LD(r8, VCPU_SHARED_SPRG7, r11)
+ mtspr SPRN_SPRG6W, r7
+ mtspr SPRN_SPRG7W, r8
+
+ /* Load some guest volatiles. */
+ PPC_LL r3, VCPU_LR(r4)
+ PPC_LL r5, VCPU_XER(r4)
+ PPC_LL r6, VCPU_CTR(r4)
+ lwz r7, VCPU_CR(r4)
+ PPC_LL r8, VCPU_PC(r4)
+ PPC_LD(r9, VCPU_SHARED_MSR, r11)
+ PPC_LL r0, VCPU_GPR(r0)(r4)
+ PPC_LL r1, VCPU_GPR(r1)(r4)
+ PPC_LL r2, VCPU_GPR(r2)(r4)
+ PPC_LL r10, VCPU_GPR(r10)(r4)
+ PPC_LL r11, VCPU_GPR(r11)(r4)
+ PPC_LL r12, VCPU_GPR(r12)(r4)
+ PPC_LL r13, VCPU_GPR(r13)(r4)
+ mtlr r3
+ mtxer r5
+ mtctr r6
+ mtsrr0 r8
+ mtsrr1 r9
+
+#ifdef CONFIG_KVM_EXIT_TIMING
+ /* save enter time */
+1:
+ mfspr r6, SPRN_TBRU
+ mfspr r9, SPRN_TBRL
+ mfspr r8, SPRN_TBRU
+ cmpw r8, r6
+ stw r9, VCPU_TIMING_LAST_ENTER_TBL(r4)
+ bne 1b
+ stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
+#endif
+
+ /*
+ * Don't execute any instruction which can change CR after
+ * below instruction.
+ */
+ mtcr r7
+
+ /* Finish loading guest volatiles and jump to guest. */
+ PPC_LL r5, VCPU_GPR(r5)(r4)
+ PPC_LL r6, VCPU_GPR(r6)(r4)
+ PPC_LL r7, VCPU_GPR(r7)(r4)
+ PPC_LL r8, VCPU_GPR(r8)(r4)
+ PPC_LL r9, VCPU_GPR(r9)(r4)
+
+ PPC_LL r3, VCPU_GPR(r3)(r4)
+ PPC_LL r4, VCPU_GPR(r4)(r4)
+ rfi
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index ddcd896fa2f..b479ed77c51 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -20,11 +20,282 @@
#include <asm/reg.h>
#include <asm/cputable.h>
#include <asm/tlbflush.h>
-#include <asm/kvm_e500.h>
#include <asm/kvm_ppc.h>
+#include "../mm/mmu_decl.h"
#include "booke.h"
-#include "e500_tlb.h"
+#include "e500.h"
+
+struct id {
+ unsigned long val;
+ struct id **pentry;
+};
+
+#define NUM_TIDS 256
+
+/*
+ * This table provide mappings from:
+ * (guestAS,guestTID,guestPR) --> ID of physical cpu
+ * guestAS [0..1]
+ * guestTID [0..255]
+ * guestPR [0..1]
+ * ID [1..255]
+ * Each vcpu keeps one vcpu_id_table.
+ */
+struct vcpu_id_table {
+ struct id id[2][NUM_TIDS][2];
+};
+
+/*
+ * This table provide reversed mappings of vcpu_id_table:
+ * ID --> address of vcpu_id_table item.
+ * Each physical core has one pcpu_id_table.
+ */
+struct pcpu_id_table {
+ struct id *entry[NUM_TIDS];
+};
+
+static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
+
+/* This variable keeps last used shadow ID on local core.
+ * The valid range of shadow ID is [1..255] */
+static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
+
+/*
+ * Allocate a free shadow id and setup a valid sid mapping in given entry.
+ * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static inline int local_sid_setup_one(struct id *entry)
+{
+ unsigned long sid;
+ int ret = -1;
+
+ sid = ++(__get_cpu_var(pcpu_last_used_sid));
+ if (sid < NUM_TIDS) {
+ __get_cpu_var(pcpu_sids).entry[sid] = entry;
+ entry->val = sid;
+ entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
+ ret = sid;
+ }
+
+ /*
+ * If sid == NUM_TIDS, we've run out of sids. We return -1, and
+ * the caller will invalidate everything and start over.
+ *
+ * sid > NUM_TIDS indicates a race, which we disable preemption to
+ * avoid.
+ */
+ WARN_ON(sid > NUM_TIDS);
+
+ return ret;
+}
+
+/*
+ * Check if given entry contain a valid shadow id mapping.
+ * An ID mapping is considered valid only if
+ * both vcpu and pcpu know this mapping.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+static inline int local_sid_lookup(struct id *entry)
+{
+ if (entry && entry->val != 0 &&
+ __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
+ entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
+ return entry->val;
+ return -1;
+}
+
+/* Invalidate all id mappings on local core -- call with preempt disabled */
+static inline void local_sid_destroy_all(void)
+{
+ __get_cpu_var(pcpu_last_used_sid) = 0;
+ memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
+}
+
+static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
+ return vcpu_e500->idt;
+}
+
+static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ kfree(vcpu_e500->idt);
+ vcpu_e500->idt = NULL;
+}
+
+/* Map guest pid to shadow.
+ * We use PID to keep shadow of current guest non-zero PID,
+ * and use PID1 to keep shadow of guest zero PID.
+ * So that guest tlbe with TID=0 can be accessed at any time */
+static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ preempt_disable();
+ vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
+ get_cur_as(&vcpu_e500->vcpu),
+ get_cur_pid(&vcpu_e500->vcpu),
+ get_cur_pr(&vcpu_e500->vcpu), 1);
+ vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
+ get_cur_as(&vcpu_e500->vcpu), 0,
+ get_cur_pr(&vcpu_e500->vcpu), 1);
+ preempt_enable();
+}
+
+/* Invalidate all mappings on vcpu */
+static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
+
+ /* Update shadow pid when mappings are changed */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+}
+
+/* Invalidate one ID mapping on vcpu */
+static inline void kvmppc_e500_id_table_reset_one(
+ struct kvmppc_vcpu_e500 *vcpu_e500,
+ int as, int pid, int pr)
+{
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+
+ BUG_ON(as >= 2);
+ BUG_ON(pid >= NUM_TIDS);
+ BUG_ON(pr >= 2);
+
+ idt->id[as][pid][pr].val = 0;
+ idt->id[as][pid][pr].pentry = NULL;
+
+ /* Update shadow pid when mappings are changed */
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+}
+
+/*
+ * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
+ * This function first lookup if a valid mapping exists,
+ * if not, then creates a new one.
+ *
+ * The caller must have preemption disabled, and keep it that way until
+ * it has finished with the returned shadow id (either written into the
+ * TLB or arch.shadow_pid, or discarded).
+ */
+unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
+ unsigned int as, unsigned int gid,
+ unsigned int pr, int avoid_recursion)
+{
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+ int sid;
+
+ BUG_ON(as >= 2);
+ BUG_ON(gid >= NUM_TIDS);
+ BUG_ON(pr >= 2);
+
+ sid = local_sid_lookup(&idt->id[as][gid][pr]);
+
+ while (sid <= 0) {
+ /* No mapping yet */
+ sid = local_sid_setup_one(&idt->id[as][gid][pr]);
+ if (sid <= 0) {
+ _tlbil_all();
+ local_sid_destroy_all();
+ }
+
+ /* Update shadow pid when mappings are changed */
+ if (!avoid_recursion)
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+ }
+
+ return sid;
+}
+
+unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
+ struct kvm_book3e_206_tlb_entry *gtlbe)
+{
+ return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe),
+ get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0);
+}
+
+void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+ if (vcpu->arch.pid != pid) {
+ vcpu_e500->pid[0] = vcpu->arch.pid = pid;
+ kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+ }
+}
+
+/* gtlbe must not be mapped by more than one host tlbe */
+void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct kvm_book3e_206_tlb_entry *gtlbe)
+{
+ struct vcpu_id_table *idt = vcpu_e500->idt;
+ unsigned int pr, tid, ts, pid;
+ u32 val, eaddr;
+ unsigned long flags;
+
+ ts = get_tlb_ts(gtlbe);
+ tid = get_tlb_tid(gtlbe);
+
+ preempt_disable();
+
+ /* One guest ID may be mapped to two shadow IDs */
+ for (pr = 0; pr < 2; pr++) {
+ /*
+ * The shadow PID can have a valid mapping on at most one
+ * host CPU. In the common case, it will be valid on this
+ * CPU, in which case we do a local invalidation of the
+ * specific address.
+ *
+ * If the shadow PID is not valid on the current host CPU,
+ * we invalidate the entire shadow PID.
+ */
+ pid = local_sid_lookup(&idt->id[ts][tid][pr]);
+ if (pid <= 0) {
+ kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
+ continue;
+ }
+
+ /*
+ * The guest is invalidating a 4K entry which is in a PID
+ * that has a valid shadow mapping on this host CPU. We
+ * search host TLB to invalidate it's shadow TLB entry,
+ * similar to __tlbil_va except that we need to look in AS1.
+ */
+ val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
+ eaddr = get_tlb_eaddr(gtlbe);
+
+ local_irq_save(flags);
+
+ mtspr(SPRN_MAS6, val);
+ asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
+ val = mfspr(SPRN_MAS1);
+ if (val & MAS1_VALID) {
+ mtspr(SPRN_MAS1, val & ~MAS1_VALID);
+ asm volatile("tlbwe");
+ }
+
+ local_irq_restore(flags);
+ }
+
+ preempt_enable();
+}
+
+void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ kvmppc_e500_id_table_reset_all(vcpu_e500);
+}
+
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
+{
+ /* Recalc shadow pid since MSR changes */
+ kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
+}
void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
{
@@ -36,17 +307,20 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- kvmppc_e500_tlb_load(vcpu, cpu);
+ kvmppc_booke_vcpu_load(vcpu, cpu);
+
+ /* Shadow PID may be expired on local core */
+ kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
- kvmppc_e500_tlb_put(vcpu);
-
#ifdef CONFIG_SPE
if (vcpu->arch.shadow_msr & MSR_SPE)
kvmppc_vcpu_disable_spe(vcpu);
#endif
+
+ kvmppc_booke_vcpu_put(vcpu);
}
int kvmppc_core_check_processor_compat(void)
@@ -61,6 +335,23 @@ int kvmppc_core_check_processor_compat(void)
return r;
}
+static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ struct kvm_book3e_206_tlb_entry *tlbe;
+
+ /* Insert large initial mapping for guest. */
+ tlbe = get_entry(vcpu_e500, 1, 0);
+ tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
+ tlbe->mas2 = 0;
+ tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
+
+ /* 4K map for serial output. Used by kernel wrapper. */
+ tlbe = get_entry(vcpu_e500, 1, 1);
+ tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
+ tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
+ tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
+}
+
int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -76,32 +367,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
-/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
-int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
- struct kvm_translation *tr)
-{
- int index;
- gva_t eaddr;
- u8 pid;
- u8 as;
-
- eaddr = tr->linear_address;
- pid = (tr->linear_address >> 32) & 0xff;
- as = (tr->linear_address >> 40) & 0x1;
-
- index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
- if (index < 0) {
- tr->valid = 0;
- return 0;
- }
-
- tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
- /* XXX what does "writeable" and "usermode" even mean? */
- tr->valid = 1;
-
- return 0;
-}
-
void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -115,19 +380,6 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
- sregs->u.e.mas0 = vcpu->arch.shared->mas0;
- sregs->u.e.mas1 = vcpu->arch.shared->mas1;
- sregs->u.e.mas2 = vcpu->arch.shared->mas2;
- sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
- sregs->u.e.mas4 = vcpu->arch.shared->mas4;
- sregs->u.e.mas6 = vcpu->arch.shared->mas6;
-
- sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG);
- sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg;
- sregs->u.e.tlbcfg[1] = vcpu_e500->tlb1cfg;
- sregs->u.e.tlbcfg[2] = 0;
- sregs->u.e.tlbcfg[3] = 0;
-
sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
@@ -135,11 +387,13 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
kvmppc_get_sregs_ivor(vcpu, sregs);
+ kvmppc_get_sregs_e500_tlb(vcpu, sregs);
}
int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ int ret;
if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
@@ -147,14 +401,9 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
}
- if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
- vcpu->arch.shared->mas0 = sregs->u.e.mas0;
- vcpu->arch.shared->mas1 = sregs->u.e.mas1;
- vcpu->arch.shared->mas2 = sregs->u.e.mas2;
- vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
- vcpu->arch.shared->mas4 = sregs->u.e.mas4;
- vcpu->arch.shared->mas6 = sregs->u.e.mas6;
- }
+ ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
+ if (ret < 0)
+ return ret;
if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
return 0;
@@ -193,9 +442,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (err)
goto free_vcpu;
+ if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
+ goto uninit_vcpu;
+
err = kvmppc_e500_tlb_init(vcpu_e500);
if (err)
- goto uninit_vcpu;
+ goto uninit_id;
vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (!vcpu->arch.shared)
@@ -205,6 +457,8 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
uninit_tlb:
kvmppc_e500_tlb_uninit(vcpu_e500);
+uninit_id:
+ kvmppc_e500_id_table_free(vcpu_e500);
uninit_vcpu:
kvm_vcpu_uninit(vcpu);
free_vcpu:
@@ -218,11 +472,21 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
free_page((unsigned long)vcpu->arch.shared);
- kvm_vcpu_uninit(vcpu);
kvmppc_e500_tlb_uninit(vcpu_e500);
+ kvmppc_e500_id_table_free(vcpu_e500);
+ kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
}
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+}
+
static int __init kvmppc_e500_init(void)
{
int r, i;
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
new file mode 100644
index 00000000000..aa8b81428bf
--- /dev/null
+++ b/arch/powerpc/kvm/e500.h
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Yu Liu <yu.liu@freescale.com>
+ * Scott Wood <scottwood@freescale.com>
+ * Ashish Kalra <ashish.kalra@freescale.com>
+ * Varun Sethi <varun.sethi@freescale.com>
+ *
+ * Description:
+ * This file is based on arch/powerpc/kvm/44x_tlb.h and
+ * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>,
+ * Copyright IBM Corp. 2007-2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef KVM_E500_H
+#define KVM_E500_H
+
+#include <linux/kvm_host.h>
+#include <asm/mmu-book3e.h>
+#include <asm/tlb.h>
+
+#define E500_PID_NUM 3
+#define E500_TLB_NUM 2
+
+#define E500_TLB_VALID 1
+#define E500_TLB_DIRTY 2
+#define E500_TLB_BITMAP 4
+
+struct tlbe_ref {
+ pfn_t pfn;
+ unsigned int flags; /* E500_TLB_* */
+};
+
+struct tlbe_priv {
+ struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
+};
+
+#ifdef CONFIG_KVM_E500V2
+struct vcpu_id_table;
+#endif
+
+struct kvmppc_e500_tlb_params {
+ int entries, ways, sets;
+};
+
+struct kvmppc_vcpu_e500 {
+ struct kvm_vcpu vcpu;
+
+ /* Unmodified copy of the guest's TLB -- shared with host userspace. */
+ struct kvm_book3e_206_tlb_entry *gtlb_arch;
+
+ /* Starting entry number in gtlb_arch[] */
+ int gtlb_offset[E500_TLB_NUM];
+
+ /* KVM internal information associated with each guest TLB entry */
+ struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
+
+ struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
+
+ unsigned int gtlb_nv[E500_TLB_NUM];
+
+ /*
+ * information associated with each host TLB entry --
+ * TLB1 only for now. If/when guest TLB1 entries can be
+ * mapped with host TLB0, this will be used for that too.
+ *
+ * We don't want to use this for guest TLB0 because then we'd
+ * have the overhead of doing the translation again even if
+ * the entry is still in the guest TLB (e.g. we swapped out
+ * and back, and our host TLB entries got evicted).
+ */
+ struct tlbe_ref *tlb_refs[E500_TLB_NUM];
+ unsigned int host_tlb1_nv;
+
+ u32 svr;
+ u32 l1csr0;
+ u32 l1csr1;
+ u32 hid0;
+ u32 hid1;
+ u64 mcar;
+
+ struct page **shared_tlb_pages;
+ int num_shared_tlb_pages;
+
+ u64 *g2h_tlb1_map;
+ unsigned int *h2g_tlb1_rmap;
+
+ /* Minimum and maximum address mapped my TLB1 */
+ unsigned long tlb1_min_eaddr;
+ unsigned long tlb1_max_eaddr;
+
+#ifdef CONFIG_KVM_E500V2
+ u32 pid[E500_PID_NUM];
+
+ /* vcpu id table */
+ struct vcpu_id_table *idt;
+#endif
+};
+
+static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
+{
+ return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
+}
+
+
+/* This geometry is the legacy default -- can be overridden by userspace */
+#define KVM_E500_TLB0_WAY_SIZE 128
+#define KVM_E500_TLB0_WAY_NUM 2
+
+#define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
+#define KVM_E500_TLB1_SIZE 16
+
+#define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
+#define tlbsel_of(index) ((index) >> 16)
+#define esel_of(index) ((index) & 0xFFFF)
+
+#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
+#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
+#define MAS2_ATTRIB_MASK \
+ (MAS2_X0 | MAS2_X1)
+#define MAS3_ATTRIB_MASK \
+ (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
+ | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
+
+int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
+ ulong value);
+int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
+int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
+int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb);
+int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb);
+int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb);
+int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
+void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
+
+void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+
+
+#ifdef CONFIG_KVM_E500V2
+unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
+ unsigned int as, unsigned int gid,
+ unsigned int pr, int avoid_recursion);
+#endif
+
+/* TLB helper functions */
+static inline unsigned int
+get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return (tlbe->mas1 >> 7) & 0x1f;
+}
+
+static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return tlbe->mas2 & 0xfffff000;
+}
+
+static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ unsigned int pgsize = get_tlb_size(tlbe);
+ return 1ULL << 10 << pgsize;
+}
+
+static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ u64 bytes = get_tlb_bytes(tlbe);
+ return get_tlb_eaddr(tlbe) + bytes - 1;
+}
+
+static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return tlbe->mas7_3 & ~0xfffULL;
+}
+
+static inline unsigned int
+get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return (tlbe->mas1 >> 16) & 0xff;
+}
+
+static inline unsigned int
+get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return (tlbe->mas1 >> 12) & 0x1;
+}
+
+static inline unsigned int
+get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return (tlbe->mas1 >> 31) & 0x1;
+}
+
+static inline unsigned int
+get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return (tlbe->mas1 >> 30) & 0x1;
+}
+
+static inline unsigned int
+get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
+}
+
+static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.pid & 0xff;
+}
+
+static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
+{
+ return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
+}
+
+static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
+{
+ return !!(vcpu->arch.shared->msr & MSR_PR);
+}
+
+static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.shared->mas6 >> 16) & 0xff;
+}
+
+static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.shared->mas6 & 0x1;
+}
+
+static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
+{
+ /*
+ * Manual says that tlbsel has 2 bits wide.
+ * Since we only have two TLBs, only lower bit is used.
+ */
+ return (vcpu->arch.shared->mas0 >> 28) & 0x1;
+}
+
+static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.shared->mas0 & 0xfff;
+}
+
+static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
+}
+
+static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
+ const struct kvm_book3e_206_tlb_entry *tlbe)
+{
+ gpa_t gpa;
+
+ if (!get_tlb_v(tlbe))
+ return 0;
+
+#ifndef CONFIG_KVM_BOOKE_HV
+ /* Does it match current guest AS? */
+ /* XXX what about IS != DS? */
+ if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
+ return 0;
+#endif
+
+ gpa = get_tlb_raddr(tlbe);
+ if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
+ /* Mapping is not for RAM. */
+ return 0;
+
+ return 1;
+}
+
+static inline struct kvm_book3e_206_tlb_entry *get_entry(
+ struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
+{
+ int offset = vcpu_e500->gtlb_offset[tlbsel];
+ return &vcpu_e500->gtlb_arch[offset + entry];
+}
+
+void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct kvm_book3e_206_tlb_entry *gtlbe);
+void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
+
+#ifdef CONFIG_KVM_BOOKE_HV
+#define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe)
+#define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu)
+#define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS)
+#else
+unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
+ struct kvm_book3e_206_tlb_entry *gtlbe);
+
+static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf;
+
+ return vcpu_e500->pid[tidseld];
+}
+
+/* Force TS=1 for all guest mappings. */
+#define get_tlb_sts(gtlbe) (MAS1_TS)
+#endif /* !BOOKE_HV */
+
+#endif /* KVM_E500_H */
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 6d0b2bd54fb..8b99e076dc8 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -14,27 +14,96 @@
#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
-#include <asm/kvm_e500.h>
+#include <asm/dbell.h>
#include "booke.h"
-#include "e500_tlb.h"
+#include "e500.h"
+#define XOP_MSGSND 206
+#define XOP_MSGCLR 238
#define XOP_TLBIVAX 786
#define XOP_TLBSX 914
#define XOP_TLBRE 946
#define XOP_TLBWE 978
+#define XOP_TLBILX 18
+
+#ifdef CONFIG_KVM_E500MC
+static int dbell2prio(ulong param)
+{
+ int msg = param & PPC_DBELL_TYPE_MASK;
+ int prio = -1;
+
+ switch (msg) {
+ case PPC_DBELL_TYPE(PPC_DBELL):
+ prio = BOOKE_IRQPRIO_DBELL;
+ break;
+ case PPC_DBELL_TYPE(PPC_DBELL_CRIT):
+ prio = BOOKE_IRQPRIO_DBELL_CRIT;
+ break;
+ default:
+ break;
+ }
+
+ return prio;
+}
+
+static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
+{
+ ulong param = vcpu->arch.gpr[rb];
+ int prio = dbell2prio(param);
+
+ if (prio < 0)
+ return EMULATE_FAIL;
+
+ clear_bit(prio, &vcpu->arch.pending_exceptions);
+ return EMULATE_DONE;
+}
+
+static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
+{
+ ulong param = vcpu->arch.gpr[rb];
+ int prio = dbell2prio(rb);
+ int pir = param & PPC_DBELL_PIR_MASK;
+ int i;
+ struct kvm_vcpu *cvcpu;
+
+ if (prio < 0)
+ return EMULATE_FAIL;
+
+ kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
+ int cpir = cvcpu->arch.shared->pir;
+ if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) {
+ set_bit(prio, &cvcpu->arch.pending_exceptions);
+ kvm_vcpu_kick(cvcpu);
+ }
+ }
+
+ return EMULATE_DONE;
+}
+#endif
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
- int ra;
- int rb;
+ int ra = get_ra(inst);
+ int rb = get_rb(inst);
+ int rt = get_rt(inst);
switch (get_op(inst)) {
case 31:
switch (get_xop(inst)) {
+#ifdef CONFIG_KVM_E500MC
+ case XOP_MSGSND:
+ emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
+ break;
+
+ case XOP_MSGCLR:
+ emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
+ break;
+#endif
+
case XOP_TLBRE:
emulated = kvmppc_e500_emul_tlbre(vcpu);
break;
@@ -44,13 +113,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case XOP_TLBSX:
- rb = get_rb(inst);
emulated = kvmppc_e500_emul_tlbsx(vcpu,rb);
break;
+ case XOP_TLBILX:
+ emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb);
+ break;
+
case XOP_TLBIVAX:
- ra = get_ra(inst);
- rb = get_rb(inst);
emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb);
break;
@@ -70,52 +140,63 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
- ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
+#ifndef CONFIG_KVM_BOOKE_HV
case SPRN_PID:
kvmppc_set_pid(vcpu, spr_val);
break;
case SPRN_PID1:
if (spr_val != 0)
return EMULATE_FAIL;
- vcpu_e500->pid[1] = spr_val; break;
+ vcpu_e500->pid[1] = spr_val;
+ break;
case SPRN_PID2:
if (spr_val != 0)
return EMULATE_FAIL;
- vcpu_e500->pid[2] = spr_val; break;
+ vcpu_e500->pid[2] = spr_val;
+ break;
case SPRN_MAS0:
- vcpu->arch.shared->mas0 = spr_val; break;
+ vcpu->arch.shared->mas0 = spr_val;
+ break;
case SPRN_MAS1:
- vcpu->arch.shared->mas1 = spr_val; break;
+ vcpu->arch.shared->mas1 = spr_val;
+ break;
case SPRN_MAS2:
- vcpu->arch.shared->mas2 = spr_val; break;
+ vcpu->arch.shared->mas2 = spr_val;
+ break;
case SPRN_MAS3:
vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
vcpu->arch.shared->mas7_3 |= spr_val;
break;
case SPRN_MAS4:
- vcpu->arch.shared->mas4 = spr_val; break;
+ vcpu->arch.shared->mas4 = spr_val;
+ break;
case SPRN_MAS6:
- vcpu->arch.shared->mas6 = spr_val; break;
+ vcpu->arch.shared->mas6 = spr_val;
+ break;
case SPRN_MAS7:
vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
break;
+#endif
case SPRN_L1CSR0:
vcpu_e500->l1csr0 = spr_val;
vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
break;
case SPRN_L1CSR1:
- vcpu_e500->l1csr1 = spr_val; break;
+ vcpu_e500->l1csr1 = spr_val;
+ break;
case SPRN_HID0:
- vcpu_e500->hid0 = spr_val; break;
+ vcpu_e500->hid0 = spr_val;
+ break;
case SPRN_HID1:
- vcpu_e500->hid1 = spr_val; break;
+ vcpu_e500->hid1 = spr_val;
+ break;
case SPRN_MMUCSR0:
emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
@@ -135,81 +216,112 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
case SPRN_IVOR35:
vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
break;
-
+#ifdef CONFIG_KVM_BOOKE_HV
+ case SPRN_IVOR36:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
+ break;
+ case SPRN_IVOR37:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
+ break;
+#endif
default:
- emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
+ emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
}
return emulated;
}
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
- unsigned long val;
switch (sprn) {
+#ifndef CONFIG_KVM_BOOKE_HV
case SPRN_PID:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break;
+ *spr_val = vcpu_e500->pid[0];
+ break;
case SPRN_PID1:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break;
+ *spr_val = vcpu_e500->pid[1];
+ break;
case SPRN_PID2:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
+ *spr_val = vcpu_e500->pid[2];
+ break;
case SPRN_MAS0:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break;
+ *spr_val = vcpu->arch.shared->mas0;
+ break;
case SPRN_MAS1:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break;
+ *spr_val = vcpu->arch.shared->mas1;
+ break;
case SPRN_MAS2:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break;
+ *spr_val = vcpu->arch.shared->mas2;
+ break;
case SPRN_MAS3:
- val = (u32)vcpu->arch.shared->mas7_3;
- kvmppc_set_gpr(vcpu, rt, val);
+ *spr_val = (u32)vcpu->arch.shared->mas7_3;
break;
case SPRN_MAS4:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break;
+ *spr_val = vcpu->arch.shared->mas4;
+ break;
case SPRN_MAS6:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break;
+ *spr_val = vcpu->arch.shared->mas6;
+ break;
case SPRN_MAS7:
- val = vcpu->arch.shared->mas7_3 >> 32;
- kvmppc_set_gpr(vcpu, rt, val);
+ *spr_val = vcpu->arch.shared->mas7_3 >> 32;
break;
+#endif
case SPRN_TLB0CFG:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
+ *spr_val = vcpu->arch.tlbcfg[0];
+ break;
case SPRN_TLB1CFG:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break;
+ *spr_val = vcpu->arch.tlbcfg[1];
+ break;
case SPRN_L1CSR0:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break;
+ *spr_val = vcpu_e500->l1csr0;
+ break;
case SPRN_L1CSR1:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break;
+ *spr_val = vcpu_e500->l1csr1;
+ break;
case SPRN_HID0:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break;
+ *spr_val = vcpu_e500->hid0;
+ break;
case SPRN_HID1:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break;
+ *spr_val = vcpu_e500->hid1;
+ break;
case SPRN_SVR:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break;
+ *spr_val = vcpu_e500->svr;
+ break;
case SPRN_MMUCSR0:
- kvmppc_set_gpr(vcpu, rt, 0); break;
+ *spr_val = 0;
+ break;
case SPRN_MMUCFG:
- kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break;
+ *spr_val = vcpu->arch.mmucfg;
+ break;
/* extra exceptions */
case SPRN_IVOR32:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
break;
case SPRN_IVOR33:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
break;
case SPRN_IVOR34:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
break;
case SPRN_IVOR35:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]);
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
+ break;
+#ifdef CONFIG_KVM_BOOKE_HV
+ case SPRN_IVOR36:
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
+ break;
+ case SPRN_IVOR37:
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
break;
+#endif
default:
- emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
+ emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
}
return emulated;
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 6e53e4164de..c510fc96130 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -2,6 +2,9 @@
* Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, yu.liu@freescale.com
+ * Scott Wood, scottwood@freescale.com
+ * Ashish Kalra, ashish.kalra@freescale.com
+ * Varun Sethi, varun.sethi@freescale.com
*
* Description:
* This file is based on arch/powerpc/kvm/44x_tlb.c,
@@ -26,210 +29,15 @@
#include <linux/vmalloc.h>
#include <linux/hugetlb.h>
#include <asm/kvm_ppc.h>
-#include <asm/kvm_e500.h>
-#include "../mm/mmu_decl.h"
-#include "e500_tlb.h"
+#include "e500.h"
#include "trace.h"
#include "timing.h"
#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
-struct id {
- unsigned long val;
- struct id **pentry;
-};
-
-#define NUM_TIDS 256
-
-/*
- * This table provide mappings from:
- * (guestAS,guestTID,guestPR) --> ID of physical cpu
- * guestAS [0..1]
- * guestTID [0..255]
- * guestPR [0..1]
- * ID [1..255]
- * Each vcpu keeps one vcpu_id_table.
- */
-struct vcpu_id_table {
- struct id id[2][NUM_TIDS][2];
-};
-
-/*
- * This table provide reversed mappings of vcpu_id_table:
- * ID --> address of vcpu_id_table item.
- * Each physical core has one pcpu_id_table.
- */
-struct pcpu_id_table {
- struct id *entry[NUM_TIDS];
-};
-
-static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
-
-/* This variable keeps last used shadow ID on local core.
- * The valid range of shadow ID is [1..255] */
-static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
-
static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
-static struct kvm_book3e_206_tlb_entry *get_entry(
- struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
-{
- int offset = vcpu_e500->gtlb_offset[tlbsel];
- return &vcpu_e500->gtlb_arch[offset + entry];
-}
-
-/*
- * Allocate a free shadow id and setup a valid sid mapping in given entry.
- * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
- *
- * The caller must have preemption disabled, and keep it that way until
- * it has finished with the returned shadow id (either written into the
- * TLB or arch.shadow_pid, or discarded).
- */
-static inline int local_sid_setup_one(struct id *entry)
-{
- unsigned long sid;
- int ret = -1;
-
- sid = ++(__get_cpu_var(pcpu_last_used_sid));
- if (sid < NUM_TIDS) {
- __get_cpu_var(pcpu_sids).entry[sid] = entry;
- entry->val = sid;
- entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
- ret = sid;
- }
-
- /*
- * If sid == NUM_TIDS, we've run out of sids. We return -1, and
- * the caller will invalidate everything and start over.
- *
- * sid > NUM_TIDS indicates a race, which we disable preemption to
- * avoid.
- */
- WARN_ON(sid > NUM_TIDS);
-
- return ret;
-}
-
-/*
- * Check if given entry contain a valid shadow id mapping.
- * An ID mapping is considered valid only if
- * both vcpu and pcpu know this mapping.
- *
- * The caller must have preemption disabled, and keep it that way until
- * it has finished with the returned shadow id (either written into the
- * TLB or arch.shadow_pid, or discarded).
- */
-static inline int local_sid_lookup(struct id *entry)
-{
- if (entry && entry->val != 0 &&
- __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
- entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
- return entry->val;
- return -1;
-}
-
-/* Invalidate all id mappings on local core -- call with preempt disabled */
-static inline void local_sid_destroy_all(void)
-{
- __get_cpu_var(pcpu_last_used_sid) = 0;
- memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
-}
-
-static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
- return vcpu_e500->idt;
-}
-
-static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- kfree(vcpu_e500->idt);
-}
-
-/* Invalidate all mappings on vcpu */
-static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
-
- /* Update shadow pid when mappings are changed */
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
-}
-
-/* Invalidate one ID mapping on vcpu */
-static inline void kvmppc_e500_id_table_reset_one(
- struct kvmppc_vcpu_e500 *vcpu_e500,
- int as, int pid, int pr)
-{
- struct vcpu_id_table *idt = vcpu_e500->idt;
-
- BUG_ON(as >= 2);
- BUG_ON(pid >= NUM_TIDS);
- BUG_ON(pr >= 2);
-
- idt->id[as][pid][pr].val = 0;
- idt->id[as][pid][pr].pentry = NULL;
-
- /* Update shadow pid when mappings are changed */
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
-}
-
-/*
- * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
- * This function first lookup if a valid mapping exists,
- * if not, then creates a new one.
- *
- * The caller must have preemption disabled, and keep it that way until
- * it has finished with the returned shadow id (either written into the
- * TLB or arch.shadow_pid, or discarded).
- */
-static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
- unsigned int as, unsigned int gid,
- unsigned int pr, int avoid_recursion)
-{
- struct vcpu_id_table *idt = vcpu_e500->idt;
- int sid;
-
- BUG_ON(as >= 2);
- BUG_ON(gid >= NUM_TIDS);
- BUG_ON(pr >= 2);
-
- sid = local_sid_lookup(&idt->id[as][gid][pr]);
-
- while (sid <= 0) {
- /* No mapping yet */
- sid = local_sid_setup_one(&idt->id[as][gid][pr]);
- if (sid <= 0) {
- _tlbil_all();
- local_sid_destroy_all();
- }
-
- /* Update shadow pid when mappings are changed */
- if (!avoid_recursion)
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
- }
-
- return sid;
-}
-
-/* Map guest pid to shadow.
- * We use PID to keep shadow of current guest non-zero PID,
- * and use PID1 to keep shadow of guest zero PID.
- * So that guest tlbe with TID=0 can be accessed at any time */
-void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- preempt_disable();
- vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
- get_cur_as(&vcpu_e500->vcpu),
- get_cur_pid(&vcpu_e500->vcpu),
- get_cur_pr(&vcpu_e500->vcpu), 1);
- vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
- get_cur_as(&vcpu_e500->vcpu), 0,
- get_cur_pr(&vcpu_e500->vcpu), 1);
- preempt_enable();
-}
-
static inline unsigned int gtlb0_get_next_victim(
struct kvmppc_vcpu_e500 *vcpu_e500)
{
@@ -258,6 +66,7 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
/* Mask off reserved bits. */
mas3 &= MAS3_ATTRIB_MASK;
+#ifndef CONFIG_KVM_BOOKE_HV
if (!usermode) {
/* Guest is in supervisor mode,
* so we need to translate guest
@@ -265,8 +74,9 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
mas3 &= ~E500_TLB_USER_PERM_MASK;
mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
}
-
- return mas3 | E500_TLB_SUPER_PERM_MASK;
+ mas3 |= E500_TLB_SUPER_PERM_MASK;
+#endif
+ return mas3;
}
static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
@@ -292,7 +102,16 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
+#ifdef CONFIG_KVM_BOOKE_HV
+ mtspr(SPRN_MAS8, stlbe->mas8);
+#endif
asm volatile("isync; tlbwe" : : : "memory");
+
+#ifdef CONFIG_KVM_BOOKE_HV
+ /* Must clear mas8 for other host tlbwe's */
+ mtspr(SPRN_MAS8, 0);
+ isync();
+#endif
local_irq_restore(flags);
trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
@@ -337,6 +156,7 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
}
}
+#ifdef CONFIG_KVM_E500V2
void kvmppc_map_magic(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -361,75 +181,41 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
preempt_enable();
}
-
-void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
-{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-
- /* Shadow PID may be expired on local core */
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
-}
-
-void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
-{
-}
+#endif
static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
int tlbsel, int esel)
{
struct kvm_book3e_206_tlb_entry *gtlbe =
get_entry(vcpu_e500, tlbsel, esel);
- struct vcpu_id_table *idt = vcpu_e500->idt;
- unsigned int pr, tid, ts, pid;
- u32 val, eaddr;
- unsigned long flags;
-
- ts = get_tlb_ts(gtlbe);
- tid = get_tlb_tid(gtlbe);
-
- preempt_disable();
-
- /* One guest ID may be mapped to two shadow IDs */
- for (pr = 0; pr < 2; pr++) {
- /*
- * The shadow PID can have a valid mapping on at most one
- * host CPU. In the common case, it will be valid on this
- * CPU, in which case (for TLB0) we do a local invalidation
- * of the specific address.
- *
- * If the shadow PID is not valid on the current host CPU, or
- * if we're invalidating a TLB1 entry, we invalidate the
- * entire shadow PID.
- */
- if (tlbsel == 1 ||
- (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
- kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
- continue;
- }
- /*
- * The guest is invalidating a TLB0 entry which is in a PID
- * that has a valid shadow mapping on this host CPU. We
- * search host TLB0 to invalidate it's shadow TLB entry,
- * similar to __tlbil_va except that we need to look in AS1.
- */
- val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
- eaddr = get_tlb_eaddr(gtlbe);
+ if (tlbsel == 1 &&
+ vcpu_e500->gtlb_priv[1][esel].ref.flags & E500_TLB_BITMAP) {
+ u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
+ int hw_tlb_indx;
+ unsigned long flags;
local_irq_save(flags);
-
- mtspr(SPRN_MAS6, val);
- asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
- val = mfspr(SPRN_MAS1);
- if (val & MAS1_VALID) {
- mtspr(SPRN_MAS1, val & ~MAS1_VALID);
+ while (tmp) {
+ hw_tlb_indx = __ilog2_u64(tmp & -tmp);
+ mtspr(SPRN_MAS0,
+ MAS0_TLBSEL(1) |
+ MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
+ mtspr(SPRN_MAS1, 0);
asm volatile("tlbwe");
+ vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
+ tmp &= tmp - 1;
}
-
+ mb();
+ vcpu_e500->g2h_tlb1_map[esel] = 0;
+ vcpu_e500->gtlb_priv[1][esel].ref.flags &= ~E500_TLB_BITMAP;
local_irq_restore(flags);
+
+ return;
}
- preempt_enable();
+ /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
+ kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
}
static int tlb0_set_base(gva_t addr, int sets, int ways)
@@ -475,6 +261,9 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
set_base = gtlb0_set_base(vcpu_e500, eaddr);
size = vcpu_e500->gtlb_params[0].ways;
} else {
+ if (eaddr < vcpu_e500->tlb1_min_eaddr ||
+ eaddr > vcpu_e500->tlb1_max_eaddr)
+ return -1;
set_base = 0;
}
@@ -530,6 +319,16 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
}
}
+static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ if (vcpu_e500->g2h_tlb1_map)
+ memset(vcpu_e500->g2h_tlb1_map,
+ sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0);
+ if (vcpu_e500->h2g_tlb1_rmap)
+ memset(vcpu_e500->h2g_tlb1_rmap,
+ sizeof(unsigned int) * host_tlb_params[1].entries, 0);
+}
+
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
{
int tlbsel = 0;
@@ -547,7 +346,7 @@ static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
int stlbsel = 1;
int i;
- kvmppc_e500_id_table_reset_all(vcpu_e500);
+ kvmppc_e500_tlbil_all(vcpu_e500);
for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
struct tlbe_ref *ref =
@@ -562,19 +361,18 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
unsigned int eaddr, int as)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- unsigned int victim, pidsel, tsized;
+ unsigned int victim, tsized;
int tlbsel;
/* since we only have two TLBs, only lower bit is used. */
tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
- pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf;
tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
- | MAS1_TID(vcpu_e500->pid[pidsel])
+ | MAS1_TID(get_tlbmiss_tid(vcpu))
| MAS1_TSIZE(tsized);
vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
| (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
@@ -586,23 +384,26 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
/* TID must be supplied by the caller */
static inline void kvmppc_e500_setup_stlbe(
- struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct kvm_vcpu *vcpu,
struct kvm_book3e_206_tlb_entry *gtlbe,
int tsize, struct tlbe_ref *ref, u64 gvaddr,
struct kvm_book3e_206_tlb_entry *stlbe)
{
pfn_t pfn = ref->pfn;
+ u32 pr = vcpu->arch.shared->msr & MSR_PR;
BUG_ON(!(ref->flags & E500_TLB_VALID));
- /* Force TS=1 IPROT=0 for all guest mappings. */
- stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID;
- stlbe->mas2 = (gvaddr & MAS2_EPN)
- | e500_shadow_mas2_attrib(gtlbe->mas2,
- vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
- stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT)
- | e500_shadow_mas3_attrib(gtlbe->mas7_3,
- vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
+ /* Force IPROT=0 for all guest mappings. */
+ stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
+ stlbe->mas2 = (gvaddr & MAS2_EPN) |
+ e500_shadow_mas2_attrib(gtlbe->mas2, pr);
+ stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
+ e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
+
+#ifdef CONFIG_KVM_BOOKE_HV
+ stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
+#endif
}
static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -736,7 +537,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
kvmppc_e500_ref_release(ref);
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
- kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, ref, gvaddr, stlbe);
+ kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
+ ref, gvaddr, stlbe);
}
/* XXX only map the one-one case, for now use TLB0 */
@@ -760,7 +562,7 @@ static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
/* XXX for both one-one and one-to-many , for now use TLB1 */
static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
- struct kvm_book3e_206_tlb_entry *stlbe)
+ struct kvm_book3e_206_tlb_entry *stlbe, int esel)
{
struct tlbe_ref *ref;
unsigned int victim;
@@ -773,15 +575,74 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
ref = &vcpu_e500->tlb_refs[1][victim];
kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
+ vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim;
+ vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
+ if (vcpu_e500->h2g_tlb1_rmap[victim]) {
+ unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim];
+ vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim);
+ }
+ vcpu_e500->h2g_tlb1_rmap[victim] = esel;
+
return victim;
}
-void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
+static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ int size = vcpu_e500->gtlb_params[1].entries;
+ unsigned int offset;
+ gva_t eaddr;
+ int i;
+
+ vcpu_e500->tlb1_min_eaddr = ~0UL;
+ vcpu_e500->tlb1_max_eaddr = 0;
+ offset = vcpu_e500->gtlb_offset[1];
+
+ for (i = 0; i < size; i++) {
+ struct kvm_book3e_206_tlb_entry *tlbe =
+ &vcpu_e500->gtlb_arch[offset + i];
+
+ if (!get_tlb_v(tlbe))
+ continue;
+
+ eaddr = get_tlb_eaddr(tlbe);
+ vcpu_e500->tlb1_min_eaddr =
+ min(vcpu_e500->tlb1_min_eaddr, eaddr);
+
+ eaddr = get_tlb_end(tlbe);
+ vcpu_e500->tlb1_max_eaddr =
+ max(vcpu_e500->tlb1_max_eaddr, eaddr);
+ }
+}
+
+static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct kvm_book3e_206_tlb_entry *gtlbe)
{
+ unsigned long start, end, size;
+
+ size = get_tlb_bytes(gtlbe);
+ start = get_tlb_eaddr(gtlbe) & ~(size - 1);
+ end = start + size - 1;
+
+ return vcpu_e500->tlb1_min_eaddr == start ||
+ vcpu_e500->tlb1_max_eaddr == end;
+}
+
+/* This function is supposed to be called for a adding a new valid tlb entry */
+static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
+ struct kvm_book3e_206_tlb_entry *gtlbe)
+{
+ unsigned long start, end, size;
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- /* Recalc shadow pid since MSR changes */
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
+ if (!get_tlb_v(gtlbe))
+ return;
+
+ size = get_tlb_bytes(gtlbe);
+ start = get_tlb_eaddr(gtlbe) & ~(size - 1);
+ end = start + size - 1;
+
+ vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start);
+ vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end);
}
static inline int kvmppc_e500_gtlbe_invalidate(
@@ -794,6 +655,9 @@ static inline int kvmppc_e500_gtlbe_invalidate(
if (unlikely(get_tlb_iprot(gtlbe)))
return -1;
+ if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
+
gtlbe->mas1 = 0;
return 0;
@@ -811,7 +675,7 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
/* Invalidate all vcpu id mappings */
- kvmppc_e500_id_table_reset_all(vcpu_e500);
+ kvmppc_e500_tlbil_all(vcpu_e500);
return EMULATE_DONE;
}
@@ -844,7 +708,59 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
}
/* Invalidate all vcpu id mappings */
- kvmppc_e500_id_table_reset_all(vcpu_e500);
+ kvmppc_e500_tlbil_all(vcpu_e500);
+
+ return EMULATE_DONE;
+}
+
+static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
+ int pid, int rt)
+{
+ struct kvm_book3e_206_tlb_entry *tlbe;
+ int tid, esel;
+
+ /* invalidate all entries */
+ for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
+ tlbe = get_entry(vcpu_e500, tlbsel, esel);
+ tid = get_tlb_tid(tlbe);
+ if (rt == 0 || tid == pid) {
+ inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
+ kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
+ }
+ }
+}
+
+static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
+ int ra, int rb)
+{
+ int tlbsel, esel;
+ gva_t ea;
+
+ ea = kvmppc_get_gpr(&vcpu_e500->vcpu, rb);
+ if (ra)
+ ea += kvmppc_get_gpr(&vcpu_e500->vcpu, ra);
+
+ for (tlbsel = 0; tlbsel < 2; tlbsel++) {
+ esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
+ if (esel >= 0) {
+ inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
+ kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
+ break;
+ }
+ }
+}
+
+int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ int pid = get_cur_spid(vcpu);
+
+ if (rt == 0 || rt == 1) {
+ tlbilx_all(vcpu_e500, 0, pid, rt);
+ tlbilx_all(vcpu_e500, 1, pid, rt);
+ } else if (rt == 3) {
+ tlbilx_one(vcpu_e500, pid, ra, rb);
+ }
return EMULATE_DONE;
}
@@ -929,9 +845,7 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
int stid;
preempt_disable();
- stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
- get_tlb_tid(gtlbe),
- get_cur_pr(&vcpu_e500->vcpu), 0);
+ stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
stlbe->mas1 |= MAS1_TID(stid);
write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
@@ -941,16 +855,21 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- struct kvm_book3e_206_tlb_entry *gtlbe;
- int tlbsel, esel;
+ struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
+ int tlbsel, esel, stlbsel, sesel;
+ int recal = 0;
tlbsel = get_tlb_tlbsel(vcpu);
esel = get_tlb_esel(vcpu, tlbsel);
gtlbe = get_entry(vcpu_e500, tlbsel, esel);
- if (get_tlb_v(gtlbe))
+ if (get_tlb_v(gtlbe)) {
inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
+ if ((tlbsel == 1) &&
+ kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
+ recal = 1;
+ }
gtlbe->mas1 = vcpu->arch.shared->mas1;
gtlbe->mas2 = vcpu->arch.shared->mas2;
@@ -959,10 +878,20 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
gtlbe->mas2, gtlbe->mas7_3);
+ if (tlbsel == 1) {
+ /*
+ * If a valid tlb1 entry is overwritten then recalculate the
+ * min/max TLB1 map address range otherwise no need to look
+ * in tlb1 array.
+ */
+ if (recal)
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
+ else
+ kvmppc_set_tlb1map_range(vcpu, gtlbe);
+ }
+
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) {
- struct kvm_book3e_206_tlb_entry stlbe;
- int stlbsel, sesel;
u64 eaddr;
u64 raddr;
@@ -989,7 +918,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
* are mapped on the fly. */
stlbsel = 1;
sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
- raddr >> PAGE_SHIFT, gtlbe, &stlbe);
+ raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel);
break;
default:
@@ -1003,6 +932,48 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
+ gva_t eaddr, unsigned int pid, int as)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ int esel, tlbsel;
+
+ for (tlbsel = 0; tlbsel < 2; tlbsel++) {
+ esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
+ if (esel >= 0)
+ return index_of(tlbsel, esel);
+ }
+
+ return -1;
+}
+
+/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
+int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+ int index;
+ gva_t eaddr;
+ u8 pid;
+ u8 as;
+
+ eaddr = tr->linear_address;
+ pid = (tr->linear_address >> 32) & 0xff;
+ as = (tr->linear_address >> 40) & 0x1;
+
+ index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
+ if (index < 0) {
+ tr->valid = 0;
+ return 0;
+ }
+
+ tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
+ /* XXX what does "writeable" and "usermode" even mean? */
+ tr->valid = 1;
+
+ return 0;
+}
+
+
int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
{
unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
@@ -1066,7 +1037,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
sesel = 0; /* unused */
priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
- kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
+ kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
&priv->ref, eaddr, &stlbe);
break;
@@ -1075,7 +1046,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
stlbsel = 1;
sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
- gtlbe, &stlbe);
+ gtlbe, &stlbe, esel);
break;
}
@@ -1087,52 +1058,13 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
}
-int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
- gva_t eaddr, unsigned int pid, int as)
-{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- int esel, tlbsel;
-
- for (tlbsel = 0; tlbsel < 2; tlbsel++) {
- esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
- if (esel >= 0)
- return index_of(tlbsel, esel);
- }
-
- return -1;
-}
-
-void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
-{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-
- if (vcpu->arch.pid != pid) {
- vcpu_e500->pid[0] = vcpu->arch.pid = pid;
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
- }
-}
-
-void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
-{
- struct kvm_book3e_206_tlb_entry *tlbe;
-
- /* Insert large initial mapping for guest. */
- tlbe = get_entry(vcpu_e500, 1, 0);
- tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
- tlbe->mas2 = 0;
- tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
-
- /* 4K map for serial output. Used by kernel wrapper. */
- tlbe = get_entry(vcpu_e500, 1, 1);
- tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
- tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
- tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
-}
-
static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
{
int i;
+ clear_tlb1_bitmap(vcpu_e500);
+ kfree(vcpu_e500->g2h_tlb1_map);
+
clear_tlb_refs(vcpu_e500);
kfree(vcpu_e500->gtlb_priv[0]);
kfree(vcpu_e500->gtlb_priv[1]);
@@ -1155,6 +1087,36 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
vcpu_e500->gtlb_arch = NULL;
}
+void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ sregs->u.e.mas0 = vcpu->arch.shared->mas0;
+ sregs->u.e.mas1 = vcpu->arch.shared->mas1;
+ sregs->u.e.mas2 = vcpu->arch.shared->mas2;
+ sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
+ sregs->u.e.mas4 = vcpu->arch.shared->mas4;
+ sregs->u.e.mas6 = vcpu->arch.shared->mas6;
+
+ sregs->u.e.mmucfg = vcpu->arch.mmucfg;
+ sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
+ sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
+ sregs->u.e.tlbcfg[2] = 0;
+ sregs->u.e.tlbcfg[3] = 0;
+}
+
+int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
+ vcpu->arch.shared->mas0 = sregs->u.e.mas0;
+ vcpu->arch.shared->mas1 = sregs->u.e.mas1;
+ vcpu->arch.shared->mas2 = sregs->u.e.mas2;
+ vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
+ vcpu->arch.shared->mas4 = sregs->u.e.mas4;
+ vcpu->arch.shared->mas6 = sregs->u.e.mas6;
+ }
+
+ return 0;
+}
+
int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
struct kvm_config_tlb *cfg)
{
@@ -1163,6 +1125,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
char *virt;
struct page **pages;
struct tlbe_priv *privs[2] = {};
+ u64 *g2h_bitmap = NULL;
size_t array_len;
u32 sets;
int num_pages, ret, i;
@@ -1224,10 +1187,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
if (!privs[0] || !privs[1])
goto err_put_page;
+ g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1],
+ GFP_KERNEL);
+ if (!g2h_bitmap)
+ goto err_put_page;
+
free_gtlb(vcpu_e500);
vcpu_e500->gtlb_priv[0] = privs[0];
vcpu_e500->gtlb_priv[1] = privs[1];
+ vcpu_e500->g2h_tlb1_map = g2h_bitmap;
vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
(virt + (cfg->array & (PAGE_SIZE - 1)));
@@ -1238,14 +1207,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
vcpu_e500->gtlb_offset[0] = 0;
vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
- vcpu_e500->tlb0cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+ vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
+
+ vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
if (params.tlb_sizes[0] <= 2048)
- vcpu_e500->tlb0cfg |= params.tlb_sizes[0];
- vcpu_e500->tlb0cfg |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
+ vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0];
+ vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
- vcpu_e500->tlb1cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
- vcpu_e500->tlb1cfg |= params.tlb_sizes[1];
- vcpu_e500->tlb1cfg |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
+ vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
+ vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1];
+ vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
vcpu_e500->shared_tlb_pages = pages;
vcpu_e500->num_shared_tlb_pages = num_pages;
@@ -1256,6 +1227,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
vcpu_e500->gtlb_params[1].sets = 1;
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
return 0;
err_put_page:
@@ -1274,13 +1246,14 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
struct kvm_dirty_tlb *dirty)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
-
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
clear_tlb_refs(vcpu_e500);
return 0;
}
int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
{
+ struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
@@ -1357,22 +1330,32 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
if (!vcpu_e500->gtlb_priv[1])
goto err;
- if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
+ vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) *
+ vcpu_e500->gtlb_params[1].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->g2h_tlb1_map)
+ goto err;
+
+ vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
+ host_tlb_params[1].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->h2g_tlb1_rmap)
goto err;
/* Init TLB configuration register */
- vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) &
+ vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
- vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries;
- vcpu_e500->tlb0cfg |=
+ vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries;
+ vcpu->arch.tlbcfg[0] |=
vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;
- vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) &
+ vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
- vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[1].entries;
- vcpu_e500->tlb0cfg |=
+ vcpu->arch.tlbcfg[1] |= vcpu_e500->gtlb_params[1].entries;
+ vcpu->arch.tlbcfg[1] |=
vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
return 0;
err:
@@ -1385,8 +1368,7 @@ err:
void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
free_gtlb(vcpu_e500);
- kvmppc_e500_id_table_free(vcpu_e500);
-
+ kfree(vcpu_e500->h2g_tlb1_rmap);
kfree(vcpu_e500->tlb_refs[0]);
kfree(vcpu_e500->tlb_refs[1]);
}
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
deleted file mode 100644
index 5c6d2d7bf05..00000000000
--- a/arch/powerpc/kvm/e500_tlb.h
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Author: Yu Liu, yu.liu@freescale.com
- *
- * Description:
- * This file is based on arch/powerpc/kvm/44x_tlb.h,
- * by Hollis Blanchard <hollisb@us.ibm.com>.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- */
-
-#ifndef __KVM_E500_TLB_H__
-#define __KVM_E500_TLB_H__
-
-#include <linux/kvm_host.h>
-#include <asm/mmu-book3e.h>
-#include <asm/tlb.h>
-#include <asm/kvm_e500.h>
-
-/* This geometry is the legacy default -- can be overridden by userspace */
-#define KVM_E500_TLB0_WAY_SIZE 128
-#define KVM_E500_TLB0_WAY_NUM 2
-
-#define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
-#define KVM_E500_TLB1_SIZE 16
-
-#define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
-#define tlbsel_of(index) ((index) >> 16)
-#define esel_of(index) ((index) & 0xFFFF)
-
-#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
-#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
-#define MAS2_ATTRIB_MASK \
- (MAS2_X0 | MAS2_X1)
-#define MAS3_ATTRIB_MASK \
- (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
- | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
-
-extern void kvmppc_dump_tlbs(struct kvm_vcpu *);
-extern int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *, ulong);
-extern int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *);
-extern int kvmppc_e500_emul_tlbre(struct kvm_vcpu *);
-extern int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *, int, int);
-extern int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *, int);
-extern int kvmppc_e500_tlb_search(struct kvm_vcpu *, gva_t, unsigned int, int);
-extern void kvmppc_e500_tlb_put(struct kvm_vcpu *);
-extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int);
-extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *);
-extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *);
-extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
-extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *);
-
-/* TLB helper functions */
-static inline unsigned int
-get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return (tlbe->mas1 >> 7) & 0x1f;
-}
-
-static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return tlbe->mas2 & 0xfffff000;
-}
-
-static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- unsigned int pgsize = get_tlb_size(tlbe);
- return 1ULL << 10 << pgsize;
-}
-
-static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- u64 bytes = get_tlb_bytes(tlbe);
- return get_tlb_eaddr(tlbe) + bytes - 1;
-}
-
-static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return tlbe->mas7_3 & ~0xfffULL;
-}
-
-static inline unsigned int
-get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return (tlbe->mas1 >> 16) & 0xff;
-}
-
-static inline unsigned int
-get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return (tlbe->mas1 >> 12) & 0x1;
-}
-
-static inline unsigned int
-get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return (tlbe->mas1 >> 31) & 0x1;
-}
-
-static inline unsigned int
-get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- return (tlbe->mas1 >> 30) & 0x1;
-}
-
-static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.pid & 0xff;
-}
-
-static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
-{
- return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
-}
-
-static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
-{
- return !!(vcpu->arch.shared->msr & MSR_PR);
-}
-
-static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
-{
- return (vcpu->arch.shared->mas6 >> 16) & 0xff;
-}
-
-static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.shared->mas6 & 0x1;
-}
-
-static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
-{
- /*
- * Manual says that tlbsel has 2 bits wide.
- * Since we only have two TLBs, only lower bit is used.
- */
- return (vcpu->arch.shared->mas0 >> 28) & 0x1;
-}
-
-static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.shared->mas0 & 0xfff;
-}
-
-static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
-{
- return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
-}
-
-static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
- const struct kvm_book3e_206_tlb_entry *tlbe)
-{
- gpa_t gpa;
-
- if (!get_tlb_v(tlbe))
- return 0;
-
- /* Does it match current guest AS? */
- /* XXX what about IS != DS? */
- if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
- return 0;
-
- gpa = get_tlb_raddr(tlbe);
- if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
- /* Mapping is not for RAM. */
- return 0;
-
- return 1;
-}
-
-#endif /* __KVM_E500_TLB_H__ */
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
new file mode 100644
index 00000000000..fe6c1de6b70
--- /dev/null
+++ b/arch/powerpc/kvm/e500mc.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) 2010 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Varun Sethi, <varun.sethi@freescale.com>
+ *
+ * Description:
+ * This file is derived from arch/powerpc/kvm/e500.c,
+ * by Yu Liu <yu.liu@freescale.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <asm/reg.h>
+#include <asm/cputable.h>
+#include <asm/tlbflush.h>
+#include <asm/kvm_ppc.h>
+#include <asm/dbell.h>
+
+#include "booke.h"
+#include "e500.h"
+
+void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type)
+{
+ enum ppc_dbell dbell_type;
+ unsigned long tag;
+
+ switch (type) {
+ case INT_CLASS_NONCRIT:
+ dbell_type = PPC_G_DBELL;
+ break;
+ case INT_CLASS_CRIT:
+ dbell_type = PPC_G_DBELL_CRIT;
+ break;
+ case INT_CLASS_MC:
+ dbell_type = PPC_G_DBELL_MC;
+ break;
+ default:
+ WARN_ONCE(1, "%s: unknown int type %d\n", __func__, type);
+ return;
+ }
+
+
+ tag = PPC_DBELL_LPID(vcpu->kvm->arch.lpid) | vcpu->vcpu_id;
+ mb();
+ ppc_msgsnd(dbell_type, 0, tag);
+}
+
+/* gtlbe must not be mapped by more than one host tlb entry */
+void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
+ struct kvm_book3e_206_tlb_entry *gtlbe)
+{
+ unsigned int tid, ts;
+ u32 val, eaddr, lpid;
+ unsigned long flags;
+
+ ts = get_tlb_ts(gtlbe);
+ tid = get_tlb_tid(gtlbe);
+ lpid = vcpu_e500->vcpu.kvm->arch.lpid;
+
+ /* We search the host TLB to invalidate its shadow TLB entry */
+ val = (tid << 16) | ts;
+ eaddr = get_tlb_eaddr(gtlbe);
+
+ local_irq_save(flags);
+
+ mtspr(SPRN_MAS6, val);
+ mtspr(SPRN_MAS5, MAS5_SGS | lpid);
+
+ asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr));
+ val = mfspr(SPRN_MAS1);
+ if (val & MAS1_VALID) {
+ mtspr(SPRN_MAS1, val & ~MAS1_VALID);
+ asm volatile("tlbwe");
+ }
+ mtspr(SPRN_MAS5, 0);
+ /* NOTE: tlbsx also updates mas8, so clear it for host tlbwe */
+ mtspr(SPRN_MAS8, 0);
+ isync();
+
+ local_irq_restore(flags);
+}
+
+void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.kvm->arch.lpid);
+ asm volatile("tlbilxlpid");
+ mtspr(SPRN_MAS5, 0);
+ local_irq_restore(flags);
+}
+
+void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
+{
+ vcpu->arch.pid = pid;
+}
+
+void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
+{
+}
+
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+ kvmppc_booke_vcpu_load(vcpu, cpu);
+
+ mtspr(SPRN_LPID, vcpu->kvm->arch.lpid);
+ mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
+ mtspr(SPRN_GPIR, vcpu->vcpu_id);
+ mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp);
+ mtspr(SPRN_EPLC, vcpu->arch.eplc);
+ mtspr(SPRN_EPSC, vcpu->arch.epsc);
+
+ mtspr(SPRN_GIVPR, vcpu->arch.ivpr);
+ mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
+ mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
+ mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0);
+ mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1);
+ mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2);
+ mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3);
+
+ mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0);
+ mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1);
+
+ mtspr(SPRN_GEPR, vcpu->arch.epr);
+ mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
+ mtspr(SPRN_GESR, vcpu->arch.shared->esr);
+
+ if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
+ kvmppc_e500_tlbil_all(vcpu_e500);
+
+ kvmppc_load_guest_fp(vcpu);
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.eplc = mfspr(SPRN_EPLC);
+ vcpu->arch.epsc = mfspr(SPRN_EPSC);
+
+ vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0);
+ vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1);
+ vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2);
+ vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3);
+
+ vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0);
+ vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1);
+
+ vcpu->arch.epr = mfspr(SPRN_GEPR);
+ vcpu->arch.shared->dar = mfspr(SPRN_GDEAR);
+ vcpu->arch.shared->esr = mfspr(SPRN_GESR);
+
+ vcpu->arch.oldpir = mfspr(SPRN_PIR);
+
+ kvmppc_booke_vcpu_put(vcpu);
+}
+
+int kvmppc_core_check_processor_compat(void)
+{
+ int r;
+
+ if (strcmp(cur_cpu_spec->cpu_name, "e500mc") == 0)
+ r = 0;
+ else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
+ r = 0;
+ else
+ r = -ENOTSUPP;
+
+ return r;
+}
+
+int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+ vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \
+ SPRN_EPCR_DUVD;
+ vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP;
+ vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT);
+ vcpu->arch.epsc = vcpu->arch.eplc;
+
+ vcpu->arch.pvr = mfspr(SPRN_PVR);
+ vcpu_e500->svr = mfspr(SPRN_SVR);
+
+ vcpu->arch.cpu_type = KVM_CPU_E500MC;
+
+ return 0;
+}
+
+void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+ sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_PM |
+ KVM_SREGS_E_PC;
+ sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
+
+ sregs->u.e.impl.fsl.features = 0;
+ sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
+ sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
+ sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
+
+ kvmppc_get_sregs_e500_tlb(vcpu, sregs);
+
+ sregs->u.e.ivor_high[3] =
+ vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
+ sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
+ sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
+
+ kvmppc_get_sregs_ivor(vcpu, sregs);
+}
+
+int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ int ret;
+
+ if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
+ vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
+ vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
+ vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
+ }
+
+ ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
+ if (ret < 0)
+ return ret;
+
+ if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
+ return 0;
+
+ if (sregs->u.e.features & KVM_SREGS_E_PM) {
+ vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
+ sregs->u.e.ivor_high[3];
+ }
+
+ if (sregs->u.e.features & KVM_SREGS_E_PC) {
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] =
+ sregs->u.e.ivor_high[4];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] =
+ sregs->u.e.ivor_high[5];
+ }
+
+ return kvmppc_set_sregs_ivor(vcpu, sregs);
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500;
+ struct kvm_vcpu *vcpu;
+ int err;
+
+ vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ if (!vcpu_e500) {
+ err = -ENOMEM;
+ goto out;
+ }
+ vcpu = &vcpu_e500->vcpu;
+
+ /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */
+ vcpu->arch.oldpir = 0xffffffff;
+
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_vcpu;
+
+ err = kvmppc_e500_tlb_init(vcpu_e500);
+ if (err)
+ goto uninit_vcpu;
+
+ vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ if (!vcpu->arch.shared)
+ goto uninit_tlb;
+
+ return vcpu;
+
+uninit_tlb:
+ kvmppc_e500_tlb_uninit(vcpu_e500);
+uninit_vcpu:
+ kvm_vcpu_uninit(vcpu);
+
+free_vcpu:
+ kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
+out:
+ return ERR_PTR(err);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+ free_page((unsigned long)vcpu->arch.shared);
+ kvmppc_e500_tlb_uninit(vcpu_e500);
+ kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
+}
+
+int kvmppc_core_init_vm(struct kvm *kvm)
+{
+ int lpid;
+
+ lpid = kvmppc_alloc_lpid();
+ if (lpid < 0)
+ return lpid;
+
+ kvm->arch.lpid = lpid;
+ return 0;
+}
+
+void kvmppc_core_destroy_vm(struct kvm *kvm)
+{
+ kvmppc_free_lpid(kvm->arch.lpid);
+}
+
+static int __init kvmppc_e500mc_init(void)
+{
+ int r;
+
+ r = kvmppc_booke_init();
+ if (r)
+ return r;
+
+ kvmppc_init_lpid(64);
+ kvmppc_claim_lpid(0); /* host */
+
+ return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
+}
+
+static void __exit kvmppc_e500mc_exit(void)
+{
+ kvmppc_booke_exit();
+}
+
+module_init(kvmppc_e500mc_init);
+module_exit(kvmppc_e500mc_exit);
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 968f4010188..f90e86dea7a 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm_host.h>
+#include <linux/clockchips.h>
#include <asm/reg.h>
#include <asm/time.h>
@@ -35,7 +36,9 @@
#define OP_TRAP 3
#define OP_TRAP_64 2
+#define OP_31_XOP_TRAP 4
#define OP_31_XOP_LWZX 23
+#define OP_31_XOP_TRAP_64 68
#define OP_31_XOP_LBZX 87
#define OP_31_XOP_STWX 151
#define OP_31_XOP_STBX 215
@@ -102,8 +105,12 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
*/
dec_time = vcpu->arch.dec;
- dec_time *= 1000;
- do_div(dec_time, tb_ticks_per_usec);
+ /*
+ * Guest timebase ticks at the same frequency as host decrementer.
+ * So use the host decrementer calculations for decrementer emulation.
+ */
+ dec_time = dec_time << decrementer_clockevent.shift;
+ do_div(dec_time, decrementer_clockevent.mult);
dec_nsec = do_div(dec_time, NSEC_PER_SEC);
hrtimer_start(&vcpu->arch.dec_timer,
ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
@@ -141,14 +148,13 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
u32 inst = kvmppc_get_last_inst(vcpu);
- u32 ea;
- int ra;
- int rb;
- int rs;
- int rt;
- int sprn;
+ int ra = get_ra(inst);
+ int rs = get_rs(inst);
+ int rt = get_rt(inst);
+ int sprn = get_sprn(inst);
enum emulation_result emulated = EMULATE_DONE;
int advance = 1;
+ ulong spr_val = 0;
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
@@ -170,173 +176,143 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case 31:
switch (get_xop(inst)) {
+ case OP_31_XOP_TRAP:
+#ifdef CONFIG_64BIT
+ case OP_31_XOP_TRAP_64:
+#endif
+#ifdef CONFIG_PPC_BOOK3S
+ kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
+#else
+ kvmppc_core_queue_program(vcpu,
+ vcpu->arch.shared->esr | ESR_PTR);
+#endif
+ advance = 0;
+ break;
case OP_31_XOP_LWZX:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
case OP_31_XOP_LBZX:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break;
case OP_31_XOP_LBZUX:
- rt = get_rt(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
-
- ea = kvmppc_get_gpr(vcpu, rb);
- if (ra)
- ea += kvmppc_get_gpr(vcpu, ra);
-
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
- kvmppc_set_gpr(vcpu, ra, ea);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_STWX:
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 1);
break;
case OP_31_XOP_STBX:
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
break;
case OP_31_XOP_STBUX:
- rs = get_rs(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
-
- ea = kvmppc_get_gpr(vcpu, rb);
- if (ra)
- ea += kvmppc_get_gpr(vcpu, ra);
-
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
- kvmppc_set_gpr(vcpu, rs, ea);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_LHAX:
- rt = get_rt(inst);
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
break;
case OP_31_XOP_LHZX:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
break;
case OP_31_XOP_LHZUX:
- rt = get_rt(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
-
- ea = kvmppc_get_gpr(vcpu, rb);
- if (ra)
- ea += kvmppc_get_gpr(vcpu, ra);
-
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
- kvmppc_set_gpr(vcpu, ra, ea);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_MFSPR:
- sprn = get_sprn(inst);
- rt = get_rt(inst);
-
switch (sprn) {
case SPRN_SRR0:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
+ spr_val = vcpu->arch.shared->srr0;
break;
case SPRN_SRR1:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
+ spr_val = vcpu->arch.shared->srr1;
break;
case SPRN_PVR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
+ spr_val = vcpu->arch.pvr;
+ break;
case SPRN_PIR:
- kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
+ spr_val = vcpu->vcpu_id;
+ break;
case SPRN_MSSSR0:
- kvmppc_set_gpr(vcpu, rt, 0); break;
+ spr_val = 0;
+ break;
/* Note: mftb and TBRL/TBWL are user-accessible, so
* the guest can always access the real TB anyways.
* In fact, we probably will never see these traps. */
case SPRN_TBWL:
- kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
+ spr_val = get_tb() >> 32;
+ break;
case SPRN_TBWU:
- kvmppc_set_gpr(vcpu, rt, get_tb()); break;
+ spr_val = get_tb();
+ break;
case SPRN_SPRG0:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0);
+ spr_val = vcpu->arch.shared->sprg0;
break;
case SPRN_SPRG1:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1);
+ spr_val = vcpu->arch.shared->sprg1;
break;
case SPRN_SPRG2:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2);
+ spr_val = vcpu->arch.shared->sprg2;
break;
case SPRN_SPRG3:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3);
+ spr_val = vcpu->arch.shared->sprg3;
break;
/* Note: SPRG4-7 are user-readable, so we don't get
* a trap. */
case SPRN_DEC:
- {
- kvmppc_set_gpr(vcpu, rt,
- kvmppc_get_dec(vcpu, get_tb()));
+ spr_val = kvmppc_get_dec(vcpu, get_tb());
break;
- }
default:
- emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
- if (emulated == EMULATE_FAIL) {
- printk("mfspr: unknown spr %x\n", sprn);
- kvmppc_set_gpr(vcpu, rt, 0);
+ emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
+ &spr_val);
+ if (unlikely(emulated == EMULATE_FAIL)) {
+ printk(KERN_INFO "mfspr: unknown spr "
+ "0x%x\n", sprn);
}
break;
}
+ kvmppc_set_gpr(vcpu, rt, spr_val);
kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
break;
case OP_31_XOP_STHX:
- rs = get_rs(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
-
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
break;
case OP_31_XOP_STHUX:
- rs = get_rs(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
-
- ea = kvmppc_get_gpr(vcpu, rb);
- if (ra)
- ea += kvmppc_get_gpr(vcpu, ra);
-
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
- kvmppc_set_gpr(vcpu, ra, ea);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_MTSPR:
- sprn = get_sprn(inst);
- rs = get_rs(inst);
+ spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_SRR0:
- vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
+ vcpu->arch.shared->srr0 = spr_val;
break;
case SPRN_SRR1:
- vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs);
+ vcpu->arch.shared->srr1 = spr_val;
break;
/* XXX We need to context-switch the timebase for
@@ -347,27 +323,29 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case SPRN_MSSSR0: break;
case SPRN_DEC:
- vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
+ vcpu->arch.dec = spr_val;
kvmppc_emulate_dec(vcpu);
break;
case SPRN_SPRG0:
- vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs);
+ vcpu->arch.shared->sprg0 = spr_val;
break;
case SPRN_SPRG1:
- vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs);
+ vcpu->arch.shared->sprg1 = spr_val;
break;
case SPRN_SPRG2:
- vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs);
+ vcpu->arch.shared->sprg2 = spr_val;
break;
case SPRN_SPRG3:
- vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs);
+ vcpu->arch.shared->sprg3 = spr_val;
break;
default:
- emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
+ emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
+ spr_val);
if (emulated == EMULATE_FAIL)
- printk("mtspr: unknown spr %x\n", sprn);
+ printk(KERN_INFO "mtspr: unknown spr "
+ "0x%x\n", sprn);
break;
}
kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
@@ -382,7 +360,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_31_XOP_LWBRX:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
break;
@@ -390,25 +367,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_31_XOP_STWBRX:
- rs = get_rs(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
-
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 0);
break;
case OP_31_XOP_LHBRX:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
break;
case OP_31_XOP_STHBRX:
- rs = get_rs(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
-
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 0);
@@ -421,99 +389,78 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
case OP_LWZ:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
case OP_LWZU:
- ra = get_ra(inst);
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_LBZ:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break;
case OP_LBZU:
- ra = get_ra(inst);
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_STW:
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 1);
break;
case OP_STWU:
- ra = get_ra(inst);
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_STB:
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
break;
case OP_STBU:
- ra = get_ra(inst);
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_LHZ:
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
break;
case OP_LHZU:
- ra = get_ra(inst);
- rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_LHA:
- rt = get_rt(inst);
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
break;
case OP_LHAU:
- ra = get_ra(inst);
- rt = get_rt(inst);
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_STH:
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
break;
case OP_STHU:
- ra = get_ra(inst);
- rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
- kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
default:
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 00d7e345b3f..1493c8de947 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -43,6 +43,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
v->requests;
}
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
{
int nr = kvmppc_get_gpr(vcpu, 11);
@@ -74,7 +79,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
}
case HC_VENDOR_KVM | KVM_HC_FEATURES:
r = HC_EV_SUCCESS;
-#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
+#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
/* XXX Missing magic page on 44x */
r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
#endif
@@ -109,6 +114,11 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
goto out;
#endif
+#ifdef CONFIG_KVM_BOOKE_HV
+ if (!cpu_has_feature(CPU_FTR_EMB_HV))
+ goto out;
+#endif
+
r = true;
out:
@@ -225,7 +235,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_OSI:
case KVM_CAP_PPC_GET_PVINFO:
-#ifdef CONFIG_KVM_E500
+#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
case KVM_CAP_SW_TLB:
#endif
r = 1;
@@ -234,10 +244,12 @@ int kvm_dev_ioctl_check_extension(long ext)
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
#endif
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
r = 1;
break;
+#endif /* CONFIG_PPC_BOOK3S_64 */
+#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_CAP_PPC_SMT:
r = threads_per_core;
break;
@@ -267,6 +279,11 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
+#ifdef CONFIG_PPC_BOOK3S_64
+ case KVM_CAP_PPC_GET_SMMU_INFO:
+ r = 1;
+ break;
+#endif
default:
r = 0;
break;
@@ -588,21 +605,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
return r;
}
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
-{
- int me;
- int cpu = vcpu->cpu;
-
- me = get_cpu();
- if (waitqueue_active(vcpu->arch.wqp)) {
- wake_up_interruptible(vcpu->arch.wqp);
- vcpu->stat.halt_wakeup++;
- } else if (cpu != me && cpu != -1) {
- smp_send_reschedule(vcpu->cpu);
- }
- put_cpu();
-}
-
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{
if (irq->irq == KVM_INTERRUPT_UNSET) {
@@ -611,6 +613,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
}
kvmppc_core_queue_external(vcpu, irq);
+
kvm_vcpu_kick(vcpu);
return 0;
@@ -633,7 +636,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
r = 0;
vcpu->arch.papr_enabled = true;
break;
-#ifdef CONFIG_KVM_E500
+#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
case KVM_CAP_SW_TLB: {
struct kvm_config_tlb cfg;
void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
@@ -710,7 +713,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
-#ifdef CONFIG_KVM_E500
+#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
case KVM_DIRTY_TLB: {
struct kvm_dirty_tlb dirty;
r = -EFAULT;
@@ -720,7 +723,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
#endif
-
default:
r = -EINVAL;
}
@@ -777,7 +779,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
break;
}
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CREATE_SPAPR_TCE: {
struct kvm_create_spapr_tce create_tce;
struct kvm *kvm = filp->private_data;
@@ -788,7 +790,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
goto out;
}
+#endif /* CONFIG_PPC_BOOK3S_64 */
+#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_ALLOCATE_RMA: {
struct kvm *kvm = filp->private_data;
struct kvm_allocate_rma rma;
@@ -800,6 +804,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
#endif /* CONFIG_KVM_BOOK3S_64_HV */
+#ifdef CONFIG_PPC_BOOK3S_64
+ case KVM_PPC_GET_SMMU_INFO: {
+ struct kvm *kvm = filp->private_data;
+ struct kvm_ppc_smmu_info info;
+
+ memset(&info, 0, sizeof(info));
+ r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
+ if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
+ r = -EFAULT;
+ break;
+ }
+#endif /* CONFIG_PPC_BOOK3S_64 */
default:
r = -ENOTTY;
}
@@ -808,6 +824,40 @@ out:
return r;
}
+static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
+static unsigned long nr_lpids;
+
+long kvmppc_alloc_lpid(void)
+{
+ long lpid;
+
+ do {
+ lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
+ if (lpid >= nr_lpids) {
+ pr_err("%s: No LPIDs free\n", __func__);
+ return -ENOMEM;
+ }
+ } while (test_and_set_bit(lpid, lpid_inuse));
+
+ return lpid;
+}
+
+void kvmppc_claim_lpid(long lpid)
+{
+ set_bit(lpid, lpid_inuse);
+}
+
+void kvmppc_free_lpid(long lpid)
+{
+ clear_bit(lpid, lpid_inuse);
+}
+
+void kvmppc_init_lpid(unsigned long nr_lpids_param)
+{
+ nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
+ memset(lpid_inuse, 0, sizeof(lpid_inuse));
+}
+
int kvm_arch_init(void *opaque)
{
return 0;
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h
index 8167d42a776..bf191e72b2d 100644
--- a/arch/powerpc/kvm/timing.h
+++ b/arch/powerpc/kvm/timing.h
@@ -93,6 +93,12 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
case SIGNAL_EXITS:
vcpu->stat.signal_exits++;
break;
+ case DBELL_EXITS:
+ vcpu->stat.dbell_exits++;
+ break;
+ case GDBELL_EXITS:
+ vcpu->stat.gdbell_exits++;
+ break;
}
}
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index 455881a5563..093d6316435 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -160,48 +160,3 @@ _GLOBAL(__clear_user)
PPC_LONG 1b,91b
PPC_LONG 8b,92b
.text
-
-_GLOBAL(__strncpy_from_user)
- addi r6,r3,-1
- addi r4,r4,-1
- cmpwi 0,r5,0
- beq 2f
- mtctr r5
-1: lbzu r0,1(r4)
- cmpwi 0,r0,0
- stbu r0,1(r6)
- bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
- beq 3f
-2: addi r6,r6,1
-3: subf r3,r3,r6
- blr
-99: li r3,-EFAULT
- blr
-
- .section __ex_table,"a"
- PPC_LONG 1b,99b
- .text
-
-/* r3 = str, r4 = len (> 0), r5 = top (highest addr) */
-_GLOBAL(__strnlen_user)
- addi r7,r3,-1
- subf r6,r7,r5 /* top+1 - str */
- cmplw 0,r4,r6
- bge 0f
- mr r6,r4
-0: mtctr r6 /* ctr = min(len, top - str) */
-1: lbzu r0,1(r7) /* get next byte */
- cmpwi 0,r0,0
- bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */
- addi r7,r7,1
- subf r3,r3,r7 /* number of bytes we have looked at */
- beqlr /* return if we found a 0 byte */
- cmpw 0,r3,r4 /* did we look at all len bytes? */
- blt 99f /* if not, must have hit top */
- addi r3,r4,1 /* return len + 1 to indicate no null found */
- blr
-99: li r3,0 /* bad address, return 0 */
- blr
-
- .section __ex_table,"a"
- PPC_LONG 1b,99b
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 1d75c92ea8f..66519d263da 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -151,7 +151,7 @@ static void
spufs_evict_inode(struct inode *inode)
{
struct spufs_inode_info *ei = SPUFS_I(inode);
- end_writeback(inode);
+ clear_inode(inode);
if (ei->i_ctx)
put_spu_context(ei->i_ctx);
if (ei->i_gang)
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 6a2cb560e96..73dae8b9b77 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -115,7 +115,7 @@ static struct inode *hypfs_make_inode(struct super_block *sb, umode_t mode)
static void hypfs_evict_inode(struct inode *inode)
{
- end_writeback(inode);
+ clear_inode(inode);
kfree(inode->i_private);
}
diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h
index 96076676e22..bdcbe0f8dd7 100644
--- a/arch/s390/include/asm/kvm.h
+++ b/arch/s390/include/asm/kvm.h
@@ -52,4 +52,9 @@ struct kvm_sync_regs {
__u32 acrs[16]; /* access registers */
__u64 crs[16]; /* control registers */
};
+
+#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
+#define KVM_REG_S390_EPOCHDIFF (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x2)
+#define KVM_REG_S390_CPU_TIMER (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x3)
+#define KVM_REG_S390_CLOCK_COMP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x4)
#endif
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 7343872890a..dd17537b9a9 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -148,6 +148,7 @@ struct kvm_vcpu_stat {
u32 instruction_sigp_restart;
u32 diagnose_10;
u32 diagnose_44;
+ u32 diagnose_9c;
};
struct kvm_s390_io_info {
diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h
index 6964db226f8..a9883296103 100644
--- a/arch/s390/include/asm/kvm_para.h
+++ b/arch/s390/include/asm/kvm_para.h
@@ -149,6 +149,11 @@ static inline unsigned int kvm_arch_para_features(void)
return 0;
}
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
#endif
#endif /* __S390_KVM_PARA_H */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index fed7bee650a..bf238c55740 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -48,6 +48,7 @@ int sclp_cpu_deconfigure(u8 cpu);
void sclp_facilities_detect(void);
unsigned long long sclp_get_rnmax(void);
unsigned long long sclp_get_rzm(void);
+u8 sclp_get_fac85(void);
int sclp_sdias_blk_count(void);
int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
int sclp_chp_configure(struct chp_id chpid);
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index a353f0ea45c..b23d9ac77df 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -47,9 +47,30 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
vcpu->stat.diagnose_44++;
- vcpu_put(vcpu);
- yield();
- vcpu_load(vcpu);
+ kvm_vcpu_on_spin(vcpu);
+ return 0;
+}
+
+static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *tcpu;
+ int tid;
+ int i;
+
+ tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
+ vcpu->stat.diagnose_9c++;
+ VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
+
+ if (tid == vcpu->vcpu_id)
+ return 0;
+
+ kvm_for_each_vcpu(i, tcpu, kvm)
+ if (tcpu->vcpu_id == tid) {
+ kvm_vcpu_yield_to(tcpu);
+ break;
+ }
+
return 0;
}
@@ -89,6 +110,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
return diag_release_pages(vcpu);
case 0x44:
return __diag_time_slice_end(vcpu);
+ case 0x9c:
+ return __diag_time_slice_end_directed(vcpu);
case 0x308:
return __diag_ipl_functions(vcpu);
default:
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 361456577c6..979cbe55bf5 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -101,6 +101,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
}
static intercept_handler_t instruction_handlers[256] = {
+ [0x01] = kvm_s390_handle_01,
[0x83] = kvm_s390_handle_diag,
[0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_b2,
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 217ce44395a..664766d0c83 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -28,6 +28,7 @@
#include <asm/pgtable.h>
#include <asm/nmi.h>
#include <asm/switch_to.h>
+#include <asm/sclp.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -74,6 +75,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
{ "diagnose_10", VCPU_STAT(diagnose_10) },
{ "diagnose_44", VCPU_STAT(diagnose_44) },
+ { "diagnose_9c", VCPU_STAT(diagnose_9c) },
{ NULL }
};
@@ -133,8 +135,16 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_S390_UCONTROL:
#endif
case KVM_CAP_SYNC_REGS:
+ case KVM_CAP_ONE_REG:
r = 1;
break;
+ case KVM_CAP_NR_VCPUS:
+ case KVM_CAP_MAX_VCPUS:
+ r = KVM_MAX_VCPUS;
+ break;
+ case KVM_CAP_S390_COW:
+ r = sclp_get_fac85() & 0x2;
+ break;
default:
r = 0;
}
@@ -423,6 +433,71 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return 0;
}
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+ /* kvm common code refers to this, but never calls it */
+ BUG();
+ return 0;
+}
+
+static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
+ struct kvm_one_reg *reg)
+{
+ int r = -EINVAL;
+
+ switch (reg->id) {
+ case KVM_REG_S390_TODPR:
+ r = put_user(vcpu->arch.sie_block->todpr,
+ (u32 __user *)reg->addr);
+ break;
+ case KVM_REG_S390_EPOCHDIFF:
+ r = put_user(vcpu->arch.sie_block->epoch,
+ (u64 __user *)reg->addr);
+ break;
+ case KVM_REG_S390_CPU_TIMER:
+ r = put_user(vcpu->arch.sie_block->cputm,
+ (u64 __user *)reg->addr);
+ break;
+ case KVM_REG_S390_CLOCK_COMP:
+ r = put_user(vcpu->arch.sie_block->ckc,
+ (u64 __user *)reg->addr);
+ break;
+ default:
+ break;
+ }
+
+ return r;
+}
+
+static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
+ struct kvm_one_reg *reg)
+{
+ int r = -EINVAL;
+
+ switch (reg->id) {
+ case KVM_REG_S390_TODPR:
+ r = get_user(vcpu->arch.sie_block->todpr,
+ (u32 __user *)reg->addr);
+ break;
+ case KVM_REG_S390_EPOCHDIFF:
+ r = get_user(vcpu->arch.sie_block->epoch,
+ (u64 __user *)reg->addr);
+ break;
+ case KVM_REG_S390_CPU_TIMER:
+ r = get_user(vcpu->arch.sie_block->cputm,
+ (u64 __user *)reg->addr);
+ break;
+ case KVM_REG_S390_CLOCK_COMP:
+ r = get_user(vcpu->arch.sie_block->ckc,
+ (u64 __user *)reg->addr);
+ break;
+ default:
+ break;
+ }
+
+ return r;
+}
+
static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
{
kvm_s390_vcpu_initial_reset(vcpu);
@@ -753,6 +828,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_S390_INITIAL_RESET:
r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
break;
+ case KVM_SET_ONE_REG:
+ case KVM_GET_ONE_REG: {
+ struct kvm_one_reg reg;
+ r = -EFAULT;
+ if (copy_from_user(&reg, argp, sizeof(reg)))
+ break;
+ if (ioctl == KVM_SET_ONE_REG)
+ r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
+ else
+ r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
+ break;
+ }
#ifdef CONFIG_KVM_S390_UCONTROL
case KVM_S390_UCAS_MAP: {
struct kvm_s390_ucas_mapping ucasmap;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index ff28f9d1c9e..2294377975e 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -79,6 +79,7 @@ int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
/* implemented in priv.c */
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
+int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
/* implemented in sigp.c */
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index e5a45dbd26a..68a6b2ed16b 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -380,3 +380,34 @@ int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
+static int handle_sckpf(struct kvm_vcpu *vcpu)
+{
+ u32 value;
+
+ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+ return kvm_s390_inject_program_int(vcpu,
+ PGM_PRIVILEGED_OPERATION);
+
+ if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
+ return kvm_s390_inject_program_int(vcpu,
+ PGM_SPECIFICATION);
+
+ value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
+ vcpu->arch.sie_block->todpr = value;
+
+ return 0;
+}
+
+static intercept_handler_t x01_handlers[256] = {
+ [0x07] = handle_sckpf,
+};
+
+int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
+{
+ intercept_handler_t handler;
+
+ handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
+ if (handler)
+ return handler(vcpu);
+ return -EOPNOTSUPP;
+}
diff --git a/arch/score/include/asm/kvm_para.h b/arch/score/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/score/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/sh/include/asm/kvm_para.h b/arch/sh/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/sh/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 15e9e05740d..83bd051754e 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -35,6 +35,7 @@ config SPARC
select GENERIC_CMOS_UPDATE
select GENERIC_CLOCKEVENTS
select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
config SPARC32
def_bool !64BIT
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 2c2e38821f6..67f83e0a0d6 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -21,3 +21,4 @@ generic-y += div64.h
generic-y += local64.h
generic-y += irq_regs.h
generic-y += local.h
+generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/kvm_para.h b/arch/sparc/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/sparc/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 59586b57ef1..53a28dd59f5 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -16,6 +16,8 @@
#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+
#define ARCH_HAS_SORT_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE
@@ -304,24 +306,8 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n)
return n;
}
-extern long __strlen_user(const char __user *);
-extern long __strnlen_user(const char __user *, long len);
-
-static inline long strlen_user(const char __user *str)
-{
- if (!access_ok(VERIFY_READ, str, 0))
- return 0;
- else
- return __strlen_user(str);
-}
-
-static inline long strnlen_user(const char __user *str, long len)
-{
- if (!access_ok(VERIFY_READ, str, 0))
- return 0;
- else
- return __strnlen_user(str, len);
-}
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASSEMBLY__ */
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index dcdfb89cbf3..7c831d848b4 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -17,6 +17,8 @@
#ifndef __ASSEMBLY__
+#include <asm/processor.h>
+
/*
* Sparc64 is segmented, though more like the M68K than the I386.
* We use the secondary ASI to address user memory, which references a
@@ -257,11 +259,9 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
#define clear_user __clear_user
-extern long __strlen_user(const char __user *);
-extern long __strnlen_user(const char __user *, long len);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
-#define strlen_user __strlen_user
-#define strnlen_user __strnlen_user
#define __copy_to_user_inatomic ___copy_to_user
#define __copy_from_user_inatomic ___copy_from_user
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 943d98dc4cd..dff4096f3de 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -10,7 +10,6 @@ lib-y += strlen.o
lib-y += checksum_$(BITS).o
lib-$(CONFIG_SPARC32) += blockops.o
lib-y += memscan_$(BITS).o memcmp.o strncmp_$(BITS).o
-lib-y += strlen_user_$(BITS).o
lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
lib-$(CONFIG_SPARC32) += copy_user.o locks.o
lib-$(CONFIG_SPARC64) += atomic_64.o
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 6b278abdb63..3b31218cafc 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -15,8 +15,6 @@
/* string functions */
EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(__strlen_user);
-EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(strncmp);
/* mem* functions */
diff --git a/arch/sparc/lib/strlen_user_32.S b/arch/sparc/lib/strlen_user_32.S
deleted file mode 100644
index 8c8a371df3c..00000000000
--- a/arch/sparc/lib/strlen_user_32.S
+++ /dev/null
@@ -1,109 +0,0 @@
-/* strlen_user.S: Sparc optimized strlen_user code
- *
- * Return length of string in userspace including terminating 0
- * or 0 for error
- *
- * Copyright (C) 1991,1996 Free Software Foundation
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#define LO_MAGIC 0x01010101
-#define HI_MAGIC 0x80808080
-
-10:
- ldub [%o0], %o5
- cmp %o5, 0
- be 1f
- add %o0, 1, %o0
- andcc %o0, 3, %g0
- be 4f
- or %o4, %lo(HI_MAGIC), %o3
-11:
- ldub [%o0], %o5
- cmp %o5, 0
- be 2f
- add %o0, 1, %o0
- andcc %o0, 3, %g0
- be 5f
- sethi %hi(LO_MAGIC), %o4
-12:
- ldub [%o0], %o5
- cmp %o5, 0
- be 3f
- add %o0, 1, %o0
- b 13f
- or %o4, %lo(LO_MAGIC), %o2
-1:
- retl
- mov 1, %o0
-2:
- retl
- mov 2, %o0
-3:
- retl
- mov 3, %o0
-
- .align 4
- .global __strlen_user, __strnlen_user
-__strlen_user:
- sethi %hi(32768), %o1
-__strnlen_user:
- mov %o1, %g1
- mov %o0, %o1
- andcc %o0, 3, %g0
- bne 10b
- sethi %hi(HI_MAGIC), %o4
- or %o4, %lo(HI_MAGIC), %o3
-4:
- sethi %hi(LO_MAGIC), %o4
-5:
- or %o4, %lo(LO_MAGIC), %o2
-13:
- ld [%o0], %o5
-2:
- sub %o5, %o2, %o4
- andcc %o4, %o3, %g0
- bne 82f
- add %o0, 4, %o0
- sub %o0, %o1, %g2
-81: cmp %g2, %g1
- blu 13b
- mov %o0, %o4
- ba,a 1f
-
- /* Check every byte. */
-82: srl %o5, 24, %g5
- andcc %g5, 0xff, %g0
- be 1f
- add %o0, -3, %o4
- srl %o5, 16, %g5
- andcc %g5, 0xff, %g0
- be 1f
- add %o4, 1, %o4
- srl %o5, 8, %g5
- andcc %g5, 0xff, %g0
- be 1f
- add %o4, 1, %o4
- andcc %o5, 0xff, %g0
- bne 81b
- sub %o0, %o1, %g2
-
- add %o4, 1, %o4
-1:
- retl
- sub %o4, %o1, %o0
-
- .section .fixup,#alloc,#execinstr
- .align 4
-9:
- retl
- clr %o0
-
- .section __ex_table,#alloc
- .align 4
-
- .word 10b, 9b
- .word 11b, 9b
- .word 12b, 9b
- .word 13b, 9b
diff --git a/arch/sparc/lib/strlen_user_64.S b/arch/sparc/lib/strlen_user_64.S
deleted file mode 100644
index c3df71fa492..00000000000
--- a/arch/sparc/lib/strlen_user_64.S
+++ /dev/null
@@ -1,97 +0,0 @@
-/* strlen_user.S: Sparc64 optimized strlen_user code
- *
- * Return length of string in userspace including terminating 0
- * or 0 for error
- *
- * Copyright (C) 1991,1996 Free Software Foundation
- * Copyright (C) 1996,1999 David S. Miller (davem@redhat.com)
- * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#include <linux/linkage.h>
-#include <asm/asi.h>
-
-#define LO_MAGIC 0x01010101
-#define HI_MAGIC 0x80808080
-
- .align 4
-ENTRY(__strlen_user)
- sethi %hi(32768), %o1
-ENTRY(__strnlen_user)
- mov %o1, %g1
- mov %o0, %o1
- andcc %o0, 3, %g0
- be,pt %icc, 9f
- sethi %hi(HI_MAGIC), %o4
-10: lduba [%o0] %asi, %o5
- brz,pn %o5, 21f
- add %o0, 1, %o0
- andcc %o0, 3, %g0
- be,pn %icc, 4f
- or %o4, %lo(HI_MAGIC), %o3
-11: lduba [%o0] %asi, %o5
- brz,pn %o5, 22f
- add %o0, 1, %o0
- andcc %o0, 3, %g0
- be,pt %icc, 13f
- srl %o3, 7, %o2
-12: lduba [%o0] %asi, %o5
- brz,pn %o5, 23f
- add %o0, 1, %o0
- ba,pt %icc, 2f
-15: lda [%o0] %asi, %o5
-9: or %o4, %lo(HI_MAGIC), %o3
-4: srl %o3, 7, %o2
-13: lda [%o0] %asi, %o5
-2: sub %o5, %o2, %o4
- andcc %o4, %o3, %g0
- bne,pn %icc, 82f
- add %o0, 4, %o0
- sub %o0, %o1, %g2
-81: cmp %g2, %g1
- blu,pt %icc, 13b
- mov %o0, %o4
- ba,a,pt %xcc, 1f
-
- /* Check every byte. */
-82: srl %o5, 24, %g7
- andcc %g7, 0xff, %g0
- be,pn %icc, 1f
- add %o0, -3, %o4
- srl %o5, 16, %g7
- andcc %g7, 0xff, %g0
- be,pn %icc, 1f
- add %o4, 1, %o4
- srl %o5, 8, %g7
- andcc %g7, 0xff, %g0
- be,pn %icc, 1f
- add %o4, 1, %o4
- andcc %o5, 0xff, %g0
- bne,pt %icc, 81b
- sub %o0, %o1, %g2
- add %o4, 1, %o4
-1: retl
- sub %o4, %o1, %o0
-21: retl
- mov 1, %o0
-22: retl
- mov 2, %o0
-23: retl
- mov 3, %o0
-ENDPROC(__strlen_user)
-ENDPROC(__strnlen_user)
-
- .section .fixup,#alloc,#execinstr
- .align 4
-30:
- retl
- clr %o0
-
- .section __ex_table,"a"
- .align 4
-
- .word 10b, 30b
- .word 11b, 30b
- .word 12b, 30b
- .word 15b, 30b
- .word 13b, 30b
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 6ad6219fc47..fe128816c44 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -48,6 +48,14 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
config SYS_SUPPORTS_HUGETLBFS
def_bool y
+# Support for additional huge page sizes besides HPAGE_SIZE.
+# The software support is currently only present in the TILE-Gx
+# hypervisor. TILEPro in any case does not support page sizes
+# larger than the default HPAGE_SIZE.
+config HUGETLB_SUPER_PAGES
+ depends on HUGETLB_PAGE && TILEGX
+ def_bool y
+
# FIXME: tilegx can implement a more efficient rwsem.
config RWSEM_GENERIC_SPINLOCK
def_bool y
@@ -107,16 +115,14 @@ config HVC_TILE
select HVC_DRIVER
def_bool y
-# Please note: TILE-Gx support is not yet finalized; this is
-# the preliminary support. TILE-Gx drivers are only provided
-# with the alpha or beta test versions for Tilera customers.
config TILEGX
- depends on EXPERIMENTAL
bool "Building with TILE-Gx (64-bit) compiler and toolchain"
+config TILEPRO
+ def_bool !TILEGX
+
config 64BIT
- depends on TILEGX
- def_bool y
+ def_bool TILEGX
config ARCH_DEFCONFIG
string
@@ -137,6 +143,31 @@ config NR_CPUS
smaller kernel memory footprint results from using a smaller
value on chips with fewer tiles.
+if TILEGX
+
+choice
+ prompt "Kernel page size"
+ default PAGE_SIZE_64KB
+ help
+ This lets you select the page size of the kernel. For best
+ performance on memory-intensive applications, a page size of 64KB
+ is recommended. For workloads involving many small files, many
+ connections, etc., it may be better to select 16KB, which uses
+ memory more efficiently at some cost in TLB performance.
+
+ Note that this option is TILE-Gx specific; currently
+ TILEPro page size is set by rebuilding the hypervisor.
+
+config PAGE_SIZE_16KB
+ bool "16KB"
+
+config PAGE_SIZE_64KB
+ bool "64KB"
+
+endchoice
+
+endif
+
source "kernel/Kconfig.hz"
config KEXEC
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index 9520bc5a4b7..e20b0a0b64a 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -34,7 +34,12 @@ LIBGCC_PATH := \
$(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
# Provide the path to use for "make defconfig".
-KBUILD_DEFCONFIG := $(ARCH)_defconfig
+# We default to the newer TILE-Gx architecture if only "tile" is given.
+ifeq ($(ARCH),tile)
+ KBUILD_DEFCONFIG := tilegx_defconfig
+else
+ KBUILD_DEFCONFIG := $(ARCH)_defconfig
+endif
# Used as a file extension when useful, e.g. head_$(BITS).o
# Not needed for (e.g.) "$(CC) -m32" since the compiler automatically
diff --git a/arch/tile/include/arch/spr_def_32.h b/arch/tile/include/arch/spr_def_32.h
index bbc1f4c924e..78bbce2fb19 100644
--- a/arch/tile/include/arch/spr_def_32.h
+++ b/arch/tile/include/arch/spr_def_32.h
@@ -65,6 +65,31 @@
#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
#define SPR_FAIL 0x4e09
+#define SPR_IDN_AVAIL_EN 0x3e05
+#define SPR_IDN_CA_DATA 0x0b00
+#define SPR_IDN_DATA_AVAIL 0x0b03
+#define SPR_IDN_DEADLOCK_TIMEOUT 0x3406
+#define SPR_IDN_DEMUX_CA_COUNT 0x0a05
+#define SPR_IDN_DEMUX_COUNT_0 0x0a06
+#define SPR_IDN_DEMUX_COUNT_1 0x0a07
+#define SPR_IDN_DEMUX_CTL 0x0a08
+#define SPR_IDN_DEMUX_QUEUE_SEL 0x0a0a
+#define SPR_IDN_DEMUX_STATUS 0x0a0b
+#define SPR_IDN_DEMUX_WRITE_FIFO 0x0a0c
+#define SPR_IDN_DIRECTION_PROTECT 0x2e05
+#define SPR_IDN_PENDING 0x0a0e
+#define SPR_IDN_REFILL_EN 0x0e05
+#define SPR_IDN_SP_FIFO_DATA 0x0a0f
+#define SPR_IDN_SP_FIFO_SEL 0x0a10
+#define SPR_IDN_SP_FREEZE 0x0a11
+#define SPR_IDN_SP_FREEZE__SP_FRZ_MASK 0x1
+#define SPR_IDN_SP_FREEZE__DEMUX_FRZ_MASK 0x2
+#define SPR_IDN_SP_FREEZE__NON_DEST_EXT_MASK 0x4
+#define SPR_IDN_SP_STATE 0x0a12
+#define SPR_IDN_TAG_0 0x0a13
+#define SPR_IDN_TAG_1 0x0a14
+#define SPR_IDN_TAG_VALID 0x0a15
+#define SPR_IDN_TILE_COORD 0x0a16
#define SPR_INTCTRL_0_STATUS 0x4a07
#define SPR_INTCTRL_1_STATUS 0x4807
#define SPR_INTCTRL_2_STATUS 0x4607
@@ -87,12 +112,36 @@
#define SPR_INTERRUPT_MASK_SET_1_1 0x480e
#define SPR_INTERRUPT_MASK_SET_2_0 0x460c
#define SPR_INTERRUPT_MASK_SET_2_1 0x460d
+#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x6000
+#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x6001
+#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x6002
#define SPR_MPL_DMA_CPL_SET_0 0x5800
#define SPR_MPL_DMA_CPL_SET_1 0x5801
#define SPR_MPL_DMA_CPL_SET_2 0x5802
#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
#define SPR_MPL_DMA_NOTIFY_SET_2 0x3802
+#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
+#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
+#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
+#define SPR_MPL_IDN_AVAIL_SET_0 0x3e00
+#define SPR_MPL_IDN_AVAIL_SET_1 0x3e01
+#define SPR_MPL_IDN_AVAIL_SET_2 0x3e02
+#define SPR_MPL_IDN_CA_SET_0 0x3a00
+#define SPR_MPL_IDN_CA_SET_1 0x3a01
+#define SPR_MPL_IDN_CA_SET_2 0x3a02
+#define SPR_MPL_IDN_COMPLETE_SET_0 0x1200
+#define SPR_MPL_IDN_COMPLETE_SET_1 0x1201
+#define SPR_MPL_IDN_COMPLETE_SET_2 0x1202
+#define SPR_MPL_IDN_FIREWALL_SET_0 0x2e00
+#define SPR_MPL_IDN_FIREWALL_SET_1 0x2e01
+#define SPR_MPL_IDN_FIREWALL_SET_2 0x2e02
+#define SPR_MPL_IDN_REFILL_SET_0 0x0e00
+#define SPR_MPL_IDN_REFILL_SET_1 0x0e01
+#define SPR_MPL_IDN_REFILL_SET_2 0x0e02
+#define SPR_MPL_IDN_TIMER_SET_0 0x3400
+#define SPR_MPL_IDN_TIMER_SET_1 0x3401
+#define SPR_MPL_IDN_TIMER_SET_2 0x3402
#define SPR_MPL_INTCTRL_0_SET_0 0x4a00
#define SPR_MPL_INTCTRL_0_SET_1 0x4a01
#define SPR_MPL_INTCTRL_0_SET_2 0x4a02
@@ -102,6 +151,9 @@
#define SPR_MPL_INTCTRL_2_SET_0 0x4600
#define SPR_MPL_INTCTRL_2_SET_1 0x4601
#define SPR_MPL_INTCTRL_2_SET_2 0x4602
+#define SPR_MPL_PERF_COUNT_SET_0 0x4200
+#define SPR_MPL_PERF_COUNT_SET_1 0x4201
+#define SPR_MPL_PERF_COUNT_SET_2 0x4202
#define SPR_MPL_SN_ACCESS_SET_0 0x0800
#define SPR_MPL_SN_ACCESS_SET_1 0x0801
#define SPR_MPL_SN_ACCESS_SET_2 0x0802
@@ -181,6 +233,7 @@
#define SPR_UDN_DEMUX_STATUS 0x0c0d
#define SPR_UDN_DEMUX_WRITE_FIFO 0x0c0e
#define SPR_UDN_DIRECTION_PROTECT 0x3005
+#define SPR_UDN_PENDING 0x0c10
#define SPR_UDN_REFILL_EN 0x1005
#define SPR_UDN_SP_FIFO_DATA 0x0c11
#define SPR_UDN_SP_FIFO_SEL 0x0c12
@@ -195,6 +248,9 @@
#define SPR_UDN_TAG_3 0x0c18
#define SPR_UDN_TAG_VALID 0x0c19
#define SPR_UDN_TILE_COORD 0x0c1a
+#define SPR_WATCH_CTL 0x4209
+#define SPR_WATCH_MASK 0x420a
+#define SPR_WATCH_VAL 0x420b
#endif /* !defined(__ARCH_SPR_DEF_H__) */
diff --git a/arch/tile/include/arch/spr_def_64.h b/arch/tile/include/arch/spr_def_64.h
index cd3e5f95d5f..0da86faa337 100644
--- a/arch/tile/include/arch/spr_def_64.h
+++ b/arch/tile/include/arch/spr_def_64.h
@@ -52,6 +52,13 @@
#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
#define SPR_FAIL 0x2707
+#define SPR_IDN_AVAIL_EN 0x1a05
+#define SPR_IDN_DATA_AVAIL 0x0a80
+#define SPR_IDN_DEADLOCK_TIMEOUT 0x1806
+#define SPR_IDN_DEMUX_COUNT_0 0x0a05
+#define SPR_IDN_DEMUX_COUNT_1 0x0a06
+#define SPR_IDN_DIRECTION_PROTECT 0x1405
+#define SPR_IDN_PENDING 0x0a08
#define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1
#define SPR_INTCTRL_0_STATUS 0x2505
#define SPR_INTCTRL_1_STATUS 0x2405
@@ -88,9 +95,27 @@
#define SPR_IPI_MASK_SET_0 0x1f0a
#define SPR_IPI_MASK_SET_1 0x1e0a
#define SPR_IPI_MASK_SET_2 0x1d0a
+#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x2100
+#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x2101
+#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x2102
#define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700
#define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701
#define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702
+#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
+#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
+#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
+#define SPR_MPL_IDN_AVAIL_SET_0 0x1a00
+#define SPR_MPL_IDN_AVAIL_SET_1 0x1a01
+#define SPR_MPL_IDN_AVAIL_SET_2 0x1a02
+#define SPR_MPL_IDN_COMPLETE_SET_0 0x0500
+#define SPR_MPL_IDN_COMPLETE_SET_1 0x0501
+#define SPR_MPL_IDN_COMPLETE_SET_2 0x0502
+#define SPR_MPL_IDN_FIREWALL_SET_0 0x1400
+#define SPR_MPL_IDN_FIREWALL_SET_1 0x1401
+#define SPR_MPL_IDN_FIREWALL_SET_2 0x1402
+#define SPR_MPL_IDN_TIMER_SET_0 0x1800
+#define SPR_MPL_IDN_TIMER_SET_1 0x1801
+#define SPR_MPL_IDN_TIMER_SET_2 0x1802
#define SPR_MPL_INTCTRL_0_SET_0 0x2500
#define SPR_MPL_INTCTRL_0_SET_1 0x2501
#define SPR_MPL_INTCTRL_0_SET_2 0x2502
@@ -100,6 +125,21 @@
#define SPR_MPL_INTCTRL_2_SET_0 0x2300
#define SPR_MPL_INTCTRL_2_SET_1 0x2301
#define SPR_MPL_INTCTRL_2_SET_2 0x2302
+#define SPR_MPL_IPI_0 0x1f04
+#define SPR_MPL_IPI_0_SET_0 0x1f00
+#define SPR_MPL_IPI_0_SET_1 0x1f01
+#define SPR_MPL_IPI_0_SET_2 0x1f02
+#define SPR_MPL_IPI_1 0x1e04
+#define SPR_MPL_IPI_1_SET_0 0x1e00
+#define SPR_MPL_IPI_1_SET_1 0x1e01
+#define SPR_MPL_IPI_1_SET_2 0x1e02
+#define SPR_MPL_IPI_2 0x1d04
+#define SPR_MPL_IPI_2_SET_0 0x1d00
+#define SPR_MPL_IPI_2_SET_1 0x1d01
+#define SPR_MPL_IPI_2_SET_2 0x1d02
+#define SPR_MPL_PERF_COUNT_SET_0 0x2000
+#define SPR_MPL_PERF_COUNT_SET_1 0x2001
+#define SPR_MPL_PERF_COUNT_SET_2 0x2002
#define SPR_MPL_UDN_ACCESS_SET_0 0x0b00
#define SPR_MPL_UDN_ACCESS_SET_1 0x0b01
#define SPR_MPL_UDN_ACCESS_SET_2 0x0b02
@@ -167,6 +207,9 @@
#define SPR_UDN_DEMUX_COUNT_2 0x0b07
#define SPR_UDN_DEMUX_COUNT_3 0x0b08
#define SPR_UDN_DIRECTION_PROTECT 0x1505
+#define SPR_UDN_PENDING 0x0b0a
+#define SPR_WATCH_MASK 0x200a
+#define SPR_WATCH_VAL 0x200b
#endif /* !defined(__ARCH_SPR_DEF_H__) */
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 0bb42642343..143473e3a0b 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -2,6 +2,7 @@ include include/asm-generic/Kbuild.asm
header-y += ../arch/
+header-y += cachectl.h
header-y += ucontext.h
header-y += hardwall.h
@@ -21,7 +22,6 @@ generic-y += ipcbuf.h
generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
-generic-y += module.h
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += param.h
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 54d1da826f9..e7fb5cfb959 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -303,7 +303,14 @@ void __init_atomic_per_cpu(void);
void __atomic_fault_unlock(int *lock_ptr);
#endif
+/* Return a pointer to the lock for the given address. */
+int *__atomic_hashed_lock(volatile void *v);
+
/* Private helper routines in lib/atomic_asm_32.S */
+struct __get_user {
+ unsigned long val;
+ int err;
+};
extern struct __get_user __atomic_cmpxchg(volatile int *p,
int *lock, int o, int n);
extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
@@ -319,6 +326,9 @@ extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
int *lock, u64 o, u64 n);
+/* Return failure from the atomic wrappers. */
+struct __get_user __atomic_bad_address(int __user *addr);
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_ATOMIC_32_H */
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
index 16f1fa51fea..bd186c4eaa5 100644
--- a/arch/tile/include/asm/bitops.h
+++ b/arch/tile/include/asm/bitops.h
@@ -77,6 +77,11 @@ static inline int ffs(int x)
return __builtin_ffs(x);
}
+static inline int fls64(__u64 w)
+{
+ return (sizeof(__u64) * 8) - __builtin_clzll(w);
+}
+
/**
* fls - find last set bit in word
* @x: the word to search
@@ -90,12 +95,7 @@ static inline int ffs(int x)
*/
static inline int fls(int x)
{
- return (sizeof(int) * 8) - __builtin_clz(x);
-}
-
-static inline int fls64(__u64 w)
-{
- return (sizeof(__u64) * 8) - __builtin_clzll(w);
+ return fls64((unsigned int) x);
}
static inline unsigned int __arch_hweight32(unsigned int w)
diff --git a/arch/tile/include/asm/byteorder.h b/arch/tile/include/asm/byteorder.h
index 9558416d578..fb72ecf4921 100644
--- a/arch/tile/include/asm/byteorder.h
+++ b/arch/tile/include/asm/byteorder.h
@@ -1 +1,21 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#if defined (__BIG_ENDIAN__)
+#include <linux/byteorder/big_endian.h>
+#elif defined (__LITTLE_ENDIAN__)
#include <linux/byteorder/little_endian.h>
+#else
+#error "__BIG_ENDIAN__ or __LITTLE_ENDIAN__ must be defined."
+#endif
diff --git a/arch/tile/include/asm/cachectl.h b/arch/tile/include/asm/cachectl.h
new file mode 100644
index 00000000000..af4c9f9154d
--- /dev/null
+++ b/arch/tile/include/asm/cachectl.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ASM_TILE_CACHECTL_H
+#define _ASM_TILE_CACHECTL_H
+
+/*
+ * Options for cacheflush system call.
+ *
+ * The ICACHE flush is performed on all cores currently running the
+ * current process's address space. The intent is for user
+ * applications to be able to modify code, invoke the system call,
+ * then allow arbitrary other threads in the same address space to see
+ * the newly-modified code. Passing a length of CHIP_L1I_CACHE_SIZE()
+ * or more invalidates the entire icache on all cores in the address
+ * spaces. (Note: currently this option invalidates the entire icache
+ * regardless of the requested address and length, but we may choose
+ * to honor the arguments at some point.)
+ *
+ * Flush and invalidation of memory can normally be performed with the
+ * __insn_flush(), __insn_inv(), and __insn_finv() instructions from
+ * userspace. The DCACHE option to the system call allows userspace
+ * to flush the entire L1+L2 data cache from the core. In this case,
+ * the address and length arguments are not used. The DCACHE flush is
+ * restricted to the current core, not all cores in the address space.
+ */
+#define ICACHE (1<<0) /* invalidate L1 instruction cache */
+#define DCACHE (1<<1) /* flush and invalidate data cache */
+#define BCACHE (ICACHE|DCACHE) /* flush both caches */
+
+#endif /* _ASM_TILE_CACHECTL_H */
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 4b4b28969a6..69adc08d36a 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -242,9 +242,6 @@ long compat_sys_fallocate(int fd, int mode,
long compat_sys_sched_rr_get_interval(compat_pid_t pid,
struct compat_timespec __user *interval);
-/* Tilera Linux syscalls that don't have "compat" versions. */
-#define compat_sys_flush_cache sys_flush_cache
-
/* These are the intvec_64.S trampolines. */
long _compat_sys_execve(const char __user *path,
const compat_uptr_t __user *argv,
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
index 623a6bb741c..d16d006d660 100644
--- a/arch/tile/include/asm/elf.h
+++ b/arch/tile/include/asm/elf.h
@@ -44,7 +44,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#else
#define ELF_CLASS ELFCLASS32
#endif
+#ifdef __BIG_ENDIAN__
+#define ELF_DATA ELFDATA2MSB
+#else
#define ELF_DATA ELFDATA2LSB
+#endif
/*
* There seems to be a bug in how compat_binfmt_elf.c works: it
@@ -59,6 +63,7 @@ enum { ELF_ARCH = CHIP_ELF_TYPE() };
*/
#define elf_check_arch(x) \
((x)->e_ident[EI_CLASS] == ELF_CLASS && \
+ (x)->e_ident[EI_DATA] == ELF_DATA && \
(x)->e_machine == CHIP_ELF_TYPE())
/* The module loader only handles a few relocation types. */
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index d03ec124a59..5909ac3d721 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -28,29 +28,81 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
+#include <asm/atomic.h>
-extern struct __get_user futex_set(u32 __user *v, int i);
-extern struct __get_user futex_add(u32 __user *v, int n);
-extern struct __get_user futex_or(u32 __user *v, int n);
-extern struct __get_user futex_andn(u32 __user *v, int n);
-extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n);
+/*
+ * Support macros for futex operations. Do not use these macros directly.
+ * They assume "ret", "val", "oparg", and "uaddr" in the lexical context.
+ * __futex_cmpxchg() additionally assumes "oldval".
+ */
+
+#ifdef __tilegx__
+
+#define __futex_asm(OP) \
+ asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "0: { movei %0, %5; j 9f }\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".quad 1b, 0b\n" \
+ ".popsection\n" \
+ "9:" \
+ : "=r" (ret), "=r" (val), "+m" (*(uaddr)) \
+ : "r" (uaddr), "r" (oparg), "i" (-EFAULT))
+
+#define __futex_set() __futex_asm(exch4)
+#define __futex_add() __futex_asm(fetchadd4)
+#define __futex_or() __futex_asm(fetchor4)
+#define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); })
+#define __futex_cmpxchg() \
+ ({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); })
+
+#define __futex_xor() \
+ ({ \
+ u32 oldval, n = oparg; \
+ if ((ret = __get_user(oldval, uaddr)) == 0) { \
+ do { \
+ oparg = oldval ^ n; \
+ __futex_cmpxchg(); \
+ } while (ret == 0 && oldval != val); \
+ } \
+ })
+
+/* No need to prefetch, since the atomic ops go to the home cache anyway. */
+#define __futex_prolog()
-#ifndef __tilegx__
-extern struct __get_user futex_xor(u32 __user *v, int n);
#else
-static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
-{
- struct __get_user asm_ret = __get_user_4(uaddr);
- if (!asm_ret.err) {
- int oldval, newval;
- do {
- oldval = asm_ret.val;
- newval = oldval ^ n;
- asm_ret = futex_cmpxchg(uaddr, oldval, newval);
- } while (asm_ret.err == 0 && oldval != asm_ret.val);
+
+#define __futex_call(FN) \
+ { \
+ struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \
+ val = gu.val; \
+ ret = gu.err; \
}
- return asm_ret;
-}
+
+#define __futex_set() __futex_call(__atomic_xchg)
+#define __futex_add() __futex_call(__atomic_xchg_add)
+#define __futex_or() __futex_call(__atomic_or)
+#define __futex_andn() __futex_call(__atomic_andn)
+#define __futex_xor() __futex_call(__atomic_xor)
+
+#define __futex_cmpxchg() \
+ { \
+ struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \
+ lock, oldval, oparg); \
+ val = gu.val; \
+ ret = gu.err; \
+ }
+
+/*
+ * Find the lock pointer for the atomic calls to use, and issue a
+ * prefetch to the user address to bring it into cache. Similar to
+ * __atomic_setup(), but we can't do a read into the L1 since it might
+ * fault; instead we do a prefetch into the L2.
+ */
+#define __futex_prolog() \
+ int *lock; \
+ __insn_prefetch(uaddr); \
+ lock = __atomic_hashed_lock((int __force *)uaddr)
#endif
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
@@ -59,8 +111,12 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
- int ret;
- struct __get_user asm_ret;
+ int uninitialized_var(val), ret;
+
+ __futex_prolog();
+
+ /* The 32-bit futex code makes this assumption, so validate it here. */
+ BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
@@ -71,46 +127,45 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
- asm_ret = futex_set(uaddr, oparg);
+ __futex_set();
break;
case FUTEX_OP_ADD:
- asm_ret = futex_add(uaddr, oparg);
+ __futex_add();
break;
case FUTEX_OP_OR:
- asm_ret = futex_or(uaddr, oparg);
+ __futex_or();
break;
case FUTEX_OP_ANDN:
- asm_ret = futex_andn(uaddr, oparg);
+ __futex_andn();
break;
case FUTEX_OP_XOR:
- asm_ret = futex_xor(uaddr, oparg);
+ __futex_xor();
break;
default:
- asm_ret.err = -ENOSYS;
+ ret = -ENOSYS;
+ break;
}
pagefault_enable();
- ret = asm_ret.err;
-
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ:
- ret = (asm_ret.val == cmparg);
+ ret = (val == cmparg);
break;
case FUTEX_OP_CMP_NE:
- ret = (asm_ret.val != cmparg);
+ ret = (val != cmparg);
break;
case FUTEX_OP_CMP_LT:
- ret = (asm_ret.val < cmparg);
+ ret = (val < cmparg);
break;
case FUTEX_OP_CMP_GE:
- ret = (asm_ret.val >= cmparg);
+ ret = (val >= cmparg);
break;
case FUTEX_OP_CMP_LE:
- ret = (asm_ret.val <= cmparg);
+ ret = (val <= cmparg);
break;
case FUTEX_OP_CMP_GT:
- ret = (asm_ret.val > cmparg);
+ ret = (val > cmparg);
break;
default:
ret = -ENOSYS;
@@ -120,22 +175,20 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
}
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
+ u32 oldval, u32 oparg)
{
- struct __get_user asm_ret;
+ int ret, val;
+
+ __futex_prolog();
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
- asm_ret = futex_cmpxchg(uaddr, oldval, newval);
- *uval = asm_ret.val;
- return asm_ret.err;
-}
+ __futex_cmpxchg();
-#ifndef __tilegx__
-/* Return failure from the atomic wrappers. */
-struct __get_user __atomic_bad_address(int __user *addr);
-#endif
+ *uval = val;
+ return ret;
+}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/tile/include/asm/hardwall.h b/arch/tile/include/asm/hardwall.h
index 2ac422848c7..47514a58d68 100644
--- a/arch/tile/include/asm/hardwall.h
+++ b/arch/tile/include/asm/hardwall.h
@@ -11,12 +11,14 @@
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
- * Provide methods for the HARDWALL_FILE for accessing the UDN.
+ * Provide methods for access control of per-cpu resources like
+ * UDN, IDN, or IPI.
*/
#ifndef _ASM_TILE_HARDWALL_H
#define _ASM_TILE_HARDWALL_H
+#include <arch/chip.h>
#include <linux/ioctl.h>
#define HARDWALL_IOCTL_BASE 0xa2
@@ -24,8 +26,9 @@
/*
* The HARDWALL_CREATE() ioctl is a macro with a "size" argument.
* The resulting ioctl value is passed to the kernel in conjunction
- * with a pointer to a little-endian bitmask of cpus, which must be
- * physically in a rectangular configuration on the chip.
+ * with a pointer to a standard kernel bitmask of cpus.
+ * For network resources (UDN or IDN) the bitmask must physically
+ * represent a rectangular configuration on the chip.
* The "size" is the number of bytes of cpu mask data.
*/
#define _HARDWALL_CREATE 1
@@ -44,13 +47,7 @@
#define HARDWALL_GET_ID \
_IO(HARDWALL_IOCTL_BASE, _HARDWALL_GET_ID)
-#ifndef __KERNEL__
-
-/* This is the canonical name expected by userspace. */
-#define HARDWALL_FILE "/dev/hardwall"
-
-#else
-
+#ifdef __KERNEL__
/* /proc hooks for hardwall. */
struct proc_dir_entry;
#ifdef CONFIG_HARDWALL
@@ -59,7 +56,6 @@ int proc_pid_hardwall(struct task_struct *task, char *buffer);
#else
static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {}
#endif
-
#endif
#endif /* _ASM_TILE_HARDWALL_H */
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h
index d396d180516..b2042380a5a 100644
--- a/arch/tile/include/asm/hugetlb.h
+++ b/arch/tile/include/asm/hugetlb.h
@@ -106,4 +106,25 @@ static inline void arch_release_hugepage(struct page *page)
{
}
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+ struct page *page, int writable)
+{
+ size_t pagesize = huge_page_size(hstate_vma(vma));
+ if (pagesize != PUD_SIZE && pagesize != PMD_SIZE)
+ entry = pte_mksuper(entry);
+ return entry;
+}
+#define arch_make_huge_pte arch_make_huge_pte
+
+/* Sizes to scale up page size for PTEs with HV_PTE_SUPER bit. */
+enum {
+ HUGE_SHIFT_PGDIR = 0,
+ HUGE_SHIFT_PMD = 1,
+ HUGE_SHIFT_PAGE = 2,
+ HUGE_SHIFT_ENTRIES
+};
+extern int huge_shift[HUGE_SHIFT_ENTRIES];
+#endif
+
#endif /* _ASM_TILE_HUGETLB_H */
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 5db0ce54284..b4e96fef2cf 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -28,10 +28,10 @@
*/
#if CHIP_HAS_AUX_PERF_COUNTERS()
#define LINUX_MASKABLE_INTERRUPTS_HI \
- (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
+ (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
#else
#define LINUX_MASKABLE_INTERRUPTS_HI \
- (~(INT_MASK_HI(INT_PERF_COUNT)))
+ (~(INT_MASK_HI(INT_PERF_COUNT)))
#endif
#else
@@ -90,6 +90,14 @@
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
} while (0)
+#define interrupt_mask_save_mask() \
+ (__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \
+ (((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32))
+#define interrupt_mask_restore_mask(mask) do { \
+ unsigned long long __m = (mask); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \
+ __insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \
+} while (0)
#else
#define interrupt_mask_set(n) \
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
@@ -101,6 +109,10 @@
__insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
#define interrupt_mask_reset_mask(mask) \
__insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
+#define interrupt_mask_save_mask() \
+ __insn_mfspr(SPR_INTERRUPT_MASK_K)
+#define interrupt_mask_restore_mask(mask) \
+ __insn_mtspr(SPR_INTERRUPT_MASK_K, (mask))
#endif
/*
@@ -122,7 +134,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
/* Disable all interrupts, including NMIs. */
#define arch_local_irq_disable_all() \
- interrupt_mask_set_mask(-1UL)
+ interrupt_mask_set_mask(-1ULL)
/* Re-enable all maskable interrupts. */
#define arch_local_irq_enable() \
@@ -179,7 +191,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
#ifdef __tilegx__
#if INT_MEM_ERROR != 0
-# error Fix IRQ_DISABLED() macro
+# error Fix IRQS_DISABLED() macro
#endif
/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
@@ -207,9 +219,10 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
mtspr SPR_INTERRUPT_MASK_SET_K, tmp
/* Enable interrupts. */
-#define IRQ_ENABLE(tmp0, tmp1) \
+#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
- ld tmp0, tmp0; \
+ ld tmp0, tmp0
+#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
#else /* !__tilegx__ */
@@ -253,17 +266,22 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
/* Enable interrupts. */
-#define IRQ_ENABLE(tmp0, tmp1) \
+#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
{ \
lw tmp0, tmp0; \
addi tmp1, tmp0, 4 \
}; \
- lw tmp1, tmp1; \
+ lw tmp1, tmp1
+#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
#endif
+#define IRQ_ENABLE(tmp0, tmp1) \
+ IRQ_ENABLE_LOAD(tmp0, tmp1); \
+ IRQ_ENABLE_APPLY(tmp0, tmp1)
+
/*
* Do the CPU's IRQ-state tracing from assembly code. We call a
* C function, but almost everywhere we do, we don't mind clobbering
diff --git a/arch/tile/include/asm/kexec.h b/arch/tile/include/asm/kexec.h
index c11a6cc73bb..fc98ccfc98a 100644
--- a/arch/tile/include/asm/kexec.h
+++ b/arch/tile/include/asm/kexec.h
@@ -19,12 +19,24 @@
#include <asm/page.h>
+#ifndef __tilegx__
/* Maximum physical address we can use pages from. */
#define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE
/* Maximum address we can reach in physical address mode. */
#define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE
/* Maximum address we can use for the control code buffer. */
#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
+#else
+/* We need to limit the memory below PGDIR_SIZE since
+ * we only setup page table for [0, PGDIR_SIZE) before final kexec.
+ */
+/* Maximum physical address we can use pages from. */
+#define KEXEC_SOURCE_MEMORY_LIMIT PGDIR_SIZE
+/* Maximum address we can reach in physical address mode. */
+#define KEXEC_DESTINATION_MEMORY_LIMIT PGDIR_SIZE
+/* Maximum address we can use for the control code buffer. */
+#define KEXEC_CONTROL_MEMORY_LIMIT PGDIR_SIZE
+#endif
#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
diff --git a/arch/tile/include/asm/kvm_para.h b/arch/tile/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/tile/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/tile/include/asm/mmu.h b/arch/tile/include/asm/mmu.h
index 92f94c77b6e..e2c78909679 100644
--- a/arch/tile/include/asm/mmu.h
+++ b/arch/tile/include/asm/mmu.h
@@ -21,7 +21,7 @@ struct mm_context {
* Written under the mmap_sem semaphore; read without the
* semaphore but atomically, but it is conservatively set.
*/
- unsigned int priority_cached;
+ unsigned long priority_cached;
};
typedef struct mm_context mm_context_t;
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h
index 15fb2464112..37f0b741dee 100644
--- a/arch/tile/include/asm/mmu_context.h
+++ b/arch/tile/include/asm/mmu_context.h
@@ -30,11 +30,15 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
-/* Note that arch/tile/kernel/head.S also calls hv_install_context() */
+/*
+ * Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S
+ * also call hv_install_context().
+ */
static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
{
/* FIXME: DIRECTIO should not always be set. FIXME. */
- int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO);
+ int rc = hv_install_context(__pa(pgdir), prot, asid,
+ HV_CTX_DIRECTIO | CTX_PAGE_FLAG);
if (rc < 0)
panic("hv_install_context failed: %d", rc);
}
diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h
new file mode 100644
index 00000000000..44ed07ccd3d
--- /dev/null
+++ b/arch/tile/include/asm/module.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ASM_TILE_MODULE_H
+#define _ASM_TILE_MODULE_H
+
+#include <arch/chip.h>
+
+#include <asm-generic/module.h>
+
+/* We can't use modules built with different page sizes. */
+#if defined(CONFIG_PAGE_SIZE_16KB)
+# define MODULE_PGSZ " 16KB"
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+# define MODULE_PGSZ " 64KB"
+#else
+# define MODULE_PGSZ ""
+#endif
+
+/* We don't really support no-SMP so tag if someone tries. */
+#ifdef CONFIG_SMP
+#define MODULE_NOSMP ""
+#else
+#define MODULE_NOSMP " nosmp"
+#endif
+
+#define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP
+
+#endif /* _ASM_TILE_MODULE_H */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index db93518fac0..9d9131e5c55 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -20,8 +20,17 @@
#include <arch/chip.h>
/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
-#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
-#define HPAGE_SHIFT HV_LOG2_PAGE_SIZE_LARGE
+#if defined(CONFIG_PAGE_SIZE_16KB)
+#define PAGE_SHIFT 14
+#define CTX_PAGE_FLAG HV_CTX_PG_SM_16K
+#elif defined(CONFIG_PAGE_SIZE_64KB)
+#define PAGE_SHIFT 16
+#define CTX_PAGE_FLAG HV_CTX_PG_SM_64K
+#else
+#define PAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_SMALL
+#define CTX_PAGE_FLAG 0
+#endif
+#define HPAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_LARGE
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
@@ -78,8 +87,7 @@ typedef HV_PTE pgprot_t;
/*
* User L2 page tables are managed as one L2 page table per page,
* because we use the page allocator for them. This keeps the allocation
- * simple and makes it potentially useful to implement HIGHPTE at some point.
- * However, it's also inefficient, since L2 page tables are much smaller
+ * simple, but it's also inefficient, since L2 page tables are much smaller
* than pages (currently 2KB vs 64KB). So we should revisit this.
*/
typedef struct page *pgtable_t;
@@ -128,7 +136,7 @@ static inline __attribute_const__ int get_order(unsigned long size)
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
-#define HUGE_MAX_HSTATE 2
+#define HUGE_MAX_HSTATE 6
#ifdef CONFIG_HUGETLB_PAGE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
diff --git a/arch/tile/include/asm/pgalloc.h b/arch/tile/include/asm/pgalloc.h
index e919c0bdc22..1b902508b66 100644
--- a/arch/tile/include/asm/pgalloc.h
+++ b/arch/tile/include/asm/pgalloc.h
@@ -19,24 +19,24 @@
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <asm/fixmap.h>
+#include <asm/page.h>
#include <hv/hypervisor.h>
/* Bits for the size of the second-level page table. */
-#define L2_KERNEL_PGTABLE_SHIFT \
- (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE)
+#define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
+
+/* How big is a kernel L2 page table? */
+#define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT)
/* We currently allocate user L2 page tables by page (unlike kernel L2s). */
-#if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL
-#define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL
+#if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
+#define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
#else
#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
#endif
/* How many pages do we need, as an "order", for a user L2 page table? */
-#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL)
-
-/* How big is a kernel L2 page table? */
-#define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT)
+#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -50,14 +50,14 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *ptep)
{
- set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN,
+ set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)),
__pgprot(_PAGE_PRESENT)));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
pgtable_t page)
{
- set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)),
+ set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))),
__pgprot(_PAGE_PRESENT)));
}
@@ -68,8 +68,20 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
-extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
-extern void pte_free(struct mm_struct *mm, struct page *pte);
+extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
+ int order);
+extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);
+
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+ pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER);
+}
#define pmd_pgtable(pmd) pmd_page(pmd)
@@ -85,8 +97,13 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
pte_free(mm, virt_to_page(pte));
}
-extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
- unsigned long address);
+extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
+ unsigned long address, int order);
+static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
+ unsigned long address)
+{
+ __pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER);
+}
#define check_pgt_cache() do { } while (0)
@@ -104,19 +121,44 @@ void shatter_pmd(pmd_t *pmd);
void shatter_huge_page(unsigned long addr);
#ifdef __tilegx__
-/* We share a single page allocator for both L1 and L2 page tables. */
-#if HV_L1_SIZE != HV_L2_SIZE
-# error Rework assumption that L1 and L2 page tables are same size.
-#endif
-#define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER
+
#define pud_populate(mm, pud, pmd) \
pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
-#define pmd_alloc_one(mm, addr) \
- ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr))))
-#define pmd_free(mm, pmdp) \
- pte_free((mm), virt_to_page(pmdp))
-#define __pmd_free_tlb(tlb, pmdp, address) \
- __pte_free_tlb((tlb), virt_to_page(pmdp), (address))
+
+/* Bits for the size of the L1 (intermediate) page table. */
+#define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT)
+
+/* How big is a kernel L2 page table? */
+#define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT)
+
+/* We currently allocate L1 page tables by page. */
+#if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
+#define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
+#else
+#define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT
#endif
+/* How many pages do we need, as an "order", for an L1 page table? */
+#define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER);
+ return (pmd_t *)page_to_virt(p);
+}
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
+{
+ pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER);
+}
+
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
+ unsigned long address)
+{
+ __pgtable_free_tlb(tlb, virt_to_page(pmdp), address,
+ L1_USER_PGTABLE_ORDER);
+}
+
+#endif /* __tilegx__ */
+
#endif /* _ASM_TILE_PGALLOC_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index 67490910774..73b1a4c9ad0 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -27,8 +27,10 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/pfn.h>
#include <asm/processor.h>
#include <asm/fixmap.h>
+#include <asm/page.h>
struct mm_struct;
struct vm_area_struct;
@@ -69,6 +71,7 @@ extern void set_page_homes(void);
#define _PAGE_PRESENT HV_PTE_PRESENT
#define _PAGE_HUGE_PAGE HV_PTE_PAGE
+#define _PAGE_SUPER_PAGE HV_PTE_SUPER
#define _PAGE_READABLE HV_PTE_READABLE
#define _PAGE_WRITABLE HV_PTE_WRITABLE
#define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE
@@ -85,6 +88,7 @@ extern void set_page_homes(void);
#define _PAGE_ALL (\
_PAGE_PRESENT | \
_PAGE_HUGE_PAGE | \
+ _PAGE_SUPER_PAGE | \
_PAGE_READABLE | \
_PAGE_WRITABLE | \
_PAGE_EXECUTABLE | \
@@ -162,7 +166,7 @@ extern void set_page_homes(void);
(pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
/* Just setting the PFN to zero suffices. */
-#define pte_pgprot(x) hv_pte_set_pfn((x), 0)
+#define pte_pgprot(x) hv_pte_set_pa((x), 0)
/*
* For PTEs and PDEs, we must clear the Present bit first when
@@ -187,6 +191,7 @@ static inline void __pte_clear(pte_t *ptep)
* Undefined behaviour if not..
*/
#define pte_present hv_pte_get_present
+#define pte_mknotpresent hv_pte_clear_present
#define pte_user hv_pte_get_user
#define pte_read hv_pte_get_readable
#define pte_dirty hv_pte_get_dirty
@@ -194,6 +199,7 @@ static inline void __pte_clear(pte_t *ptep)
#define pte_write hv_pte_get_writable
#define pte_exec hv_pte_get_executable
#define pte_huge hv_pte_get_page
+#define pte_super hv_pte_get_super
#define pte_rdprotect hv_pte_clear_readable
#define pte_exprotect hv_pte_clear_executable
#define pte_mkclean hv_pte_clear_dirty
@@ -206,6 +212,7 @@ static inline void __pte_clear(pte_t *ptep)
#define pte_mkyoung hv_pte_set_accessed
#define pte_mkwrite hv_pte_set_writable
#define pte_mkhuge hv_pte_set_page
+#define pte_mksuper hv_pte_set_super
#define pte_special(pte) 0
#define pte_mkspecial(pte) (pte)
@@ -261,7 +268,7 @@ static inline int pte_none(pte_t pte)
static inline unsigned long pte_pfn(pte_t pte)
{
- return hv_pte_get_pfn(pte);
+ return PFN_DOWN(hv_pte_get_pa(pte));
}
/* Set or get the remote cache cpu in a pgprot with remote caching. */
@@ -270,7 +277,7 @@ extern int get_remote_cache_cpu(pgprot_t prot);
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
{
- return hv_pte_set_pfn(prot, pfn);
+ return hv_pte_set_pa(prot, PFN_PHYS(pfn));
}
/* Support for priority mappings. */
@@ -312,7 +319,7 @@ extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
*/
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- return pfn_pte(hv_pte_get_pfn(pte), newprot);
+ return pfn_pte(pte_pfn(pte), newprot);
}
/*
@@ -335,13 +342,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*/
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-#if defined(CONFIG_HIGHPTE)
-extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
-#define pte_unmap(pte) kunmap_atomic(pte)
-#else
#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
#define pte_unmap(pte) do { } while (0)
-#endif
/* Clear a non-executable kernel PTE and flush it from the TLB. */
#define kpte_clear_flush(ptep, vaddr) \
@@ -410,6 +412,46 @@ static inline unsigned long pmd_index(unsigned long address)
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ return ptep_test_and_clear_young(vma, address, pmdp_ptep(pmdp));
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ ptep_set_wrprotect(mm, address, pmdp_ptep(pmdp));
+}
+
+
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ return pte_pmd(ptep_get_and_clear(mm, address, pmdp_ptep(pmdp)));
+}
+
+static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+ set_pte(pmdp_ptep(pmdp), pmd_pte(pmdval));
+}
+
+#define set_pmd_at(mm, addr, pmdp, pmdval) __set_pmd(pmdp, pmdval)
+
+/* Create a pmd from a PTFN. */
+static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
+{
+ return pte_pmd(hv_pte_set_ptfn(prot, ptfn));
+}
+
+/* Return the page-table frame number (ptfn) that a pmd_t points at. */
+#define pmd_ptfn(pmd) hv_pte_get_ptfn(pmd_pte(pmd))
+
/*
* A given kernel pmd_t maps to a specific virtual address (either a
* kernel huge page or a kernel pte_t table). Since kernel pte_t
@@ -430,7 +472,48 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
* OK for pte_lockptr(), since we just end up with potentially one
* lock being used for several pte_t arrays.
*/
-#define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd)))
+#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ __pte_clear(pmdp_ptep(pmdp));
+}
+
+#define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
+#define pmd_young(pmd) pte_young(pmd_pte(pmd))
+#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_huge_page(pmd) pte_huge(pmd_pte(pmd))
+#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
+#define __HAVE_ARCH_PMD_WRITE
+
+#define pfn_pmd(pfn, pgprot) pte_pmd(pfn_pte((pfn), (pgprot)))
+#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
+#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ return pfn_pmd(pmd_pfn(pmd), newprot);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define has_transparent_hugepage() 1
+#define pmd_trans_huge pmd_huge_page
+
+static inline pmd_t pmd_mksplitting(pmd_t pmd)
+{
+ return pte_pmd(hv_pte_set_client2(pmd_pte(pmd)));
+}
+
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+ return hv_pte_get_client2(pmd_pte(pmd));
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
* The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
@@ -448,17 +531,13 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}
-static inline int pmd_huge_page(pmd_t pmd)
-{
- return pmd_val(pmd) & _PAGE_HUGE_PAGE;
-}
-
#include <asm-generic/pgtable.h>
/* Support /proc/NN/pgtable API. */
struct seq_file;
int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm,
- unsigned long vaddr, pte_t *ptep, void **datap);
+ unsigned long vaddr, unsigned long pagesize,
+ pte_t *ptep, void **datap);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
index 9f98529761f..4ce4a7a99c2 100644
--- a/arch/tile/include/asm/pgtable_32.h
+++ b/arch/tile/include/asm/pgtable_32.h
@@ -20,11 +20,12 @@
* The level-1 index is defined by the huge page size. A PGD is composed
* of PTRS_PER_PGD pgd_t's and is the top level of the page table.
*/
-#define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE
-#define PGDIR_SIZE HV_PAGE_SIZE_LARGE
+#define PGDIR_SHIFT HPAGE_SHIFT
+#define PGDIR_SIZE HPAGE_SIZE
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
-#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t))
+#define PTRS_PER_PGD _HV_L1_ENTRIES(HPAGE_SHIFT)
+#define PGD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
+#define SIZEOF_PGD _HV_L1_SIZE(HPAGE_SHIFT)
/*
* The level-2 index is defined by the difference between the huge
@@ -33,8 +34,9 @@
* Note that the hypervisor docs use PTE for what we call pte_t, so
* this nomenclature is somewhat confusing.
*/
-#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
-#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t))
+#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
+#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
+#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
#ifndef __ASSEMBLY__
@@ -111,24 +113,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
return pte;
}
-static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
- set_pte(&pmdp->pud.pgd, pmdval.pud.pgd);
-}
-
-/* Create a pmd from a PTFN. */
-static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
-{
- return (pmd_t){ { hv_pte_set_ptfn(prot, ptfn) } };
-}
-
-/* Return the page-table frame number (ptfn) that a pmd_t points at. */
-#define pmd_ptfn(pmd) hv_pte_get_ptfn((pmd).pud.pgd)
-
-static inline void pmd_clear(pmd_t *pmdp)
-{
- __pte_clear(&pmdp->pud.pgd);
-}
+/*
+ * pmds are wrappers around pgds, which are the same as ptes.
+ * It's often convenient to "cast" back and forth and use the pte methods,
+ * which are the methods supplied by the hypervisor.
+ */
+#define pmd_pte(pmd) ((pmd).pud.pgd)
+#define pmdp_ptep(pmdp) (&(pmdp)->pud.pgd)
+#define pte_pmd(pte) ((pmd_t){ { (pte) } })
#endif /* __ASSEMBLY__ */
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
index fd80328523b..2492fa5478e 100644
--- a/arch/tile/include/asm/pgtable_64.h
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -21,17 +21,19 @@
#define PGDIR_SIZE HV_L1_SPAN
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD HV_L0_ENTRIES
-#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t))
+#define PGD_INDEX(va) HV_L0_INDEX(va)
+#define SIZEOF_PGD HV_L0_SIZE
/*
* The level-1 index is defined by the huge page size. A PMD is composed
* of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
*/
-#define PMD_SHIFT HV_LOG2_PAGE_SIZE_LARGE
-#define PMD_SIZE HV_PAGE_SIZE_LARGE
+#define PMD_SHIFT HPAGE_SHIFT
+#define PMD_SIZE HPAGE_SIZE
#define PMD_MASK (~(PMD_SIZE-1))
-#define PTRS_PER_PMD (1 << (PGDIR_SHIFT - PMD_SHIFT))
-#define SIZEOF_PMD (PTRS_PER_PMD * sizeof(pmd_t))
+#define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT)
+#define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
+#define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT)
/*
* The level-2 index is defined by the difference between the huge
@@ -40,17 +42,19 @@
* Note that the hypervisor docs use PTE for what we call pte_t, so
* this nomenclature is somewhat confusing.
*/
-#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL))
-#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t))
+#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
+#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
+#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
/*
- * Align the vmalloc area to an L2 page table, and leave a guard page
- * at the beginning and end. The vmalloc code also puts in an internal
+ * Align the vmalloc area to an L2 page table. Omit guard pages at
+ * the beginning and end for simplicity (particularly in the per-cpu
+ * memory allocation code). The vmalloc code puts in an internal
* guard page between each allocation.
*/
#define _VMALLOC_END HUGE_VMAP_BASE
-#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE)
-#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE)
+#define VMALLOC_END _VMALLOC_END
+#define VMALLOC_START _VMALLOC_START
#define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE)
@@ -98,7 +102,7 @@ static inline int pud_bad(pud_t pud)
* A pud_t points to a pmd_t array. Since we can have multiple per
* page, we don't have a one-to-one mapping of pud_t's to pages.
*/
-#define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud)))
+#define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud))))
static inline unsigned long pud_index(unsigned long address)
{
@@ -108,28 +112,6 @@ static inline unsigned long pud_index(unsigned long address)
#define pmd_offset(pud, address) \
((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
-static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
- set_pte(pmdp, pmdval);
-}
-
-/* Create a pmd from a PTFN and pgprot. */
-static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
-{
- return hv_pte_set_ptfn(prot, ptfn);
-}
-
-/* Return the page-table frame number (ptfn) that a pmd_t points at. */
-static inline unsigned long pmd_ptfn(pmd_t pmd)
-{
- return hv_pte_get_ptfn(pmd);
-}
-
-static inline void pmd_clear(pmd_t *pmdp)
-{
- __pte_clear(pmdp);
-}
-
/* Normalize an address to having the correct high bits set. */
#define pgd_addr_normalize pgd_addr_normalize
static inline unsigned long pgd_addr_normalize(unsigned long addr)
@@ -170,6 +152,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
return hv_pte(__insn_exch(&ptep->val, 0UL));
}
+/*
+ * pmds are the same as pgds and ptes, so converting is a no-op.
+ */
+#define pmd_pte(pmd) (pmd)
+#define pmdp_ptep(pmdp) (pmdp)
+#define pte_pmd(pte) (pte)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_TILE_PGTABLE_64_H */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 15cd8a4a06c..8c4dd9ff91e 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -76,6 +76,17 @@ struct async_tlb {
#ifdef CONFIG_HARDWALL
struct hardwall_info;
+struct hardwall_task {
+ /* Which hardwall is this task tied to? (or NULL if none) */
+ struct hardwall_info *info;
+ /* Chains this task into the list at info->task_head. */
+ struct list_head list;
+};
+#ifdef __tilepro__
+#define HARDWALL_TYPES 1 /* udn */
+#else
+#define HARDWALL_TYPES 3 /* udn, idn, and ipi */
+#endif
#endif
struct thread_struct {
@@ -116,10 +127,8 @@ struct thread_struct {
unsigned long dstream_pf;
#endif
#ifdef CONFIG_HARDWALL
- /* Is this task tied to an activated hardwall? */
- struct hardwall_info *hardwall;
- /* Chains this task into the list at hardwall->list. */
- struct list_head hardwall_list;
+ /* Hardwall information for various resources. */
+ struct hardwall_task hardwall[HARDWALL_TYPES];
#endif
#if CHIP_HAS_TILE_DMA()
/* Async DMA TLB fault information */
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h
index e58613e0752..c67eb70ea78 100644
--- a/arch/tile/include/asm/setup.h
+++ b/arch/tile/include/asm/setup.h
@@ -41,15 +41,15 @@ void restrict_dma_mpls(void);
#ifdef CONFIG_HARDWALL
/* User-level network management functions */
void reset_network_state(void);
-void grant_network_mpls(void);
-void restrict_network_mpls(void);
struct task_struct;
-int hardwall_deactivate(struct task_struct *task);
+void hardwall_switch_tasks(struct task_struct *prev, struct task_struct *next);
+void hardwall_deactivate_all(struct task_struct *task);
+int hardwall_ipi_valid(int cpu);
/* Hook hardwall code into changes in affinity. */
#define arch_set_cpus_allowed(p, new_mask) do { \
- if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \
- hardwall_deactivate(p); \
+ if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
+ hardwall_deactivate_all(p); \
} while (0)
#endif
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
index 3b5507c31ea..06f0464cfed 100644
--- a/arch/tile/include/asm/syscalls.h
+++ b/arch/tile/include/asm/syscalls.h
@@ -43,7 +43,8 @@ long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
u32 len, int advice);
int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
u32 len_lo, u32 len_hi, int advice);
-long sys_flush_cache(void);
+long sys_cacheflush(unsigned long addr, unsigned long len,
+ unsigned long flags);
#ifndef __tilegx__ /* No mmap() in the 32-bit kernel. */
#define sys_mmap sys_mmap
#endif
diff --git a/arch/tile/include/asm/tlbflush.h b/arch/tile/include/asm/tlbflush.h
index 96199d214fb..dcf91b25a1e 100644
--- a/arch/tile/include/asm/tlbflush.h
+++ b/arch/tile/include/asm/tlbflush.h
@@ -38,16 +38,11 @@ DECLARE_PER_CPU(int, current_asid);
/* The hypervisor tells us what ASIDs are available to us. */
extern int min_asid, max_asid;
-static inline unsigned long hv_page_size(const struct vm_area_struct *vma)
-{
- return (vma->vm_flags & VM_HUGETLB) ? HPAGE_SIZE : PAGE_SIZE;
-}
-
/* Pass as vma pointer for non-executable mapping, if no vma available. */
-#define FLUSH_NONEXEC ((const struct vm_area_struct *)-1UL)
+#define FLUSH_NONEXEC ((struct vm_area_struct *)-1UL)
/* Flush a single user page on this cpu. */
-static inline void local_flush_tlb_page(const struct vm_area_struct *vma,
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr,
unsigned long page_size)
{
@@ -60,7 +55,7 @@ static inline void local_flush_tlb_page(const struct vm_area_struct *vma,
}
/* Flush range of user pages on this cpu. */
-static inline void local_flush_tlb_pages(const struct vm_area_struct *vma,
+static inline void local_flush_tlb_pages(struct vm_area_struct *vma,
unsigned long addr,
unsigned long page_size,
unsigned long len)
@@ -117,10 +112,10 @@ extern void flush_tlb_all(void);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void flush_tlb_current_task(void);
extern void flush_tlb_mm(struct mm_struct *);
-extern void flush_tlb_page(const struct vm_area_struct *, unsigned long);
-extern void flush_tlb_page_mm(const struct vm_area_struct *,
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+extern void flush_tlb_page_mm(struct vm_area_struct *,
struct mm_struct *, unsigned long);
-extern void flush_tlb_range(const struct vm_area_struct *,
+extern void flush_tlb_range(struct vm_area_struct *,
unsigned long start, unsigned long end);
#define flush_tlb() flush_tlb_current_task()
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index ef34d2caa5b..c3dd275f25e 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -114,45 +114,75 @@ struct exception_table_entry {
extern int fixup_exception(struct pt_regs *regs);
/*
- * We return the __get_user_N function results in a structure,
- * thus in r0 and r1. If "err" is zero, "val" is the result
- * of the read; otherwise, "err" is -EFAULT.
- *
- * We rarely need 8-byte values on a 32-bit architecture, but
- * we size the structure to accommodate. In practice, for the
- * the smaller reads, we can zero the high word for free, and
- * the caller will ignore it by virtue of casting anyway.
+ * Support macros for __get_user().
+ *
+ * Implementation note: The "case 8" logic of casting to the type of
+ * the result of subtracting the value from itself is basically a way
+ * of keeping all integer types the same, but casting any pointers to
+ * ptrdiff_t, i.e. also an integer type. This way there are no
+ * questionable casts seen by the compiler on an ILP32 platform.
+ *
+ * Note that __get_user() and __put_user() assume proper alignment.
*/
-struct __get_user {
- unsigned long long val;
- int err;
-};
-/*
- * FIXME: we should express these as inline extended assembler, since
- * they're fundamentally just a variable dereference and some
- * supporting exception_table gunk. Note that (a la i386) we can
- * extend the copy_to_user and copy_from_user routines to call into
- * such extended assembler routines, though we will have to use a
- * different return code in that case (1, 2, or 4, rather than -EFAULT).
- */
-extern struct __get_user __get_user_1(const void __user *);
-extern struct __get_user __get_user_2(const void __user *);
-extern struct __get_user __get_user_4(const void __user *);
-extern struct __get_user __get_user_8(const void __user *);
-extern int __put_user_1(long, void __user *);
-extern int __put_user_2(long, void __user *);
-extern int __put_user_4(long, void __user *);
-extern int __put_user_8(long long, void __user *);
-
-/* Unimplemented routines to cause linker failures */
-extern struct __get_user __get_user_bad(void);
-extern int __put_user_bad(void);
+#ifdef __LP64__
+#define _ASM_PTR ".quad"
+#else
+#define _ASM_PTR ".long"
+#endif
+
+#define __get_user_asm(OP, x, ptr, ret) \
+ asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "0: { movei %1, 0; movei %0, %3 }\n" \
+ "j 9f\n" \
+ ".section __ex_table,\"a\"\n" \
+ _ASM_PTR " 1b, 0b\n" \
+ ".popsection\n" \
+ "9:" \
+ : "=r" (ret), "=r" (x) \
+ : "r" (ptr), "i" (-EFAULT))
+
+#ifdef __tilegx__
+#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
+#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
+#define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret)
+#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
+#else
+#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
+#define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
+#define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
+#ifdef __LITTLE_ENDIAN
+#define __lo32(a, b) a
+#define __hi32(a, b) b
+#else
+#define __lo32(a, b) b
+#define __hi32(a, b) a
+#endif
+#define __get_user_8(x, ptr, ret) \
+ ({ \
+ unsigned int __a, __b; \
+ asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n" \
+ "2: { lw %2, %2; movei %0, 0 }\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "0: { movei %1, 0; movei %2, 0 }\n" \
+ "{ movei %0, %4; j 9f }\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".word 1b, 0b\n" \
+ ".word 2b, 0b\n" \
+ ".popsection\n" \
+ "9:" \
+ : "=r" (ret), "=r" (__a), "=&r" (__b) \
+ : "r" (ptr), "i" (-EFAULT)); \
+ (x) = (__typeof(x))(__typeof((x)-(x))) \
+ (((u64)__hi32(__a, __b) << 32) | \
+ __lo32(__a, __b)); \
+ })
+#endif
+
+extern int __get_user_bad(void)
+ __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
-/*
- * Careful: we have to cast the result to the type of the pointer
- * for sign reasons.
- */
/**
* __get_user: - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
@@ -174,30 +204,62 @@ extern int __put_user_bad(void);
* function.
*/
#define __get_user(x, ptr) \
-({ struct __get_user __ret; \
- __typeof__(*(ptr)) const __user *__gu_addr = (ptr); \
- __chk_user_ptr(__gu_addr); \
- switch (sizeof(*(__gu_addr))) { \
- case 1: \
- __ret = __get_user_1(__gu_addr); \
- break; \
- case 2: \
- __ret = __get_user_2(__gu_addr); \
- break; \
- case 4: \
- __ret = __get_user_4(__gu_addr); \
- break; \
- case 8: \
- __ret = __get_user_8(__gu_addr); \
- break; \
- default: \
- __ret = __get_user_bad(); \
- break; \
- } \
- (x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \
- __ret.val; \
- __ret.err; \
-})
+ ({ \
+ int __ret; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: __get_user_1(x, ptr, __ret); break; \
+ case 2: __get_user_2(x, ptr, __ret); break; \
+ case 4: __get_user_4(x, ptr, __ret); break; \
+ case 8: __get_user_8(x, ptr, __ret); break; \
+ default: __ret = __get_user_bad(); break; \
+ } \
+ __ret; \
+ })
+
+/* Support macros for __put_user(). */
+
+#define __put_user_asm(OP, x, ptr, ret) \
+ asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "0: { movei %0, %3; j 9f }\n" \
+ ".section __ex_table,\"a\"\n" \
+ _ASM_PTR " 1b, 0b\n" \
+ ".popsection\n" \
+ "9:" \
+ : "=r" (ret) \
+ : "r" (ptr), "r" (x), "i" (-EFAULT))
+
+#ifdef __tilegx__
+#define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
+#define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
+#define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
+#define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
+#else
+#define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
+#define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
+#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
+#define __put_user_8(x, ptr, ret) \
+ ({ \
+ u64 __x = (__typeof((x)-(x)))(x); \
+ int __lo = (int) __x, __hi = (int) (__x >> 32); \
+ asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n" \
+ "2: { sw %0, %3; movei %0, 0 }\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "0: { movei %0, %4; j 9f }\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".word 1b, 0b\n" \
+ ".word 2b, 0b\n" \
+ ".popsection\n" \
+ "9:" \
+ : "=&r" (ret) \
+ : "r" (ptr), "r" (__lo32(__lo, __hi)), \
+ "r" (__hi32(__lo, __hi)), "i" (-EFAULT)); \
+ })
+#endif
+
+extern int __put_user_bad(void)
+ __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
/**
* __put_user: - Write a simple value into user space, with less checking.
@@ -217,39 +279,19 @@ extern int __put_user_bad(void);
* function.
*
* Returns zero on success, or -EFAULT on error.
- *
- * Implementation note: The "case 8" logic of casting to the type of
- * the result of subtracting the value from itself is basically a way
- * of keeping all integer types the same, but casting any pointers to
- * ptrdiff_t, i.e. also an integer type. This way there are no
- * questionable casts seen by the compiler on an ILP32 platform.
*/
#define __put_user(x, ptr) \
({ \
- int __pu_err = 0; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- typeof(*__pu_addr) __pu_val = (x); \
- __chk_user_ptr(__pu_addr); \
- switch (sizeof(__pu_val)) { \
- case 1: \
- __pu_err = __put_user_1((long)__pu_val, __pu_addr); \
- break; \
- case 2: \
- __pu_err = __put_user_2((long)__pu_val, __pu_addr); \
- break; \
- case 4: \
- __pu_err = __put_user_4((long)__pu_val, __pu_addr); \
- break; \
- case 8: \
- __pu_err = \
- __put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\
- __pu_addr); \
- break; \
- default: \
- __pu_err = __put_user_bad(); \
- break; \
+ int __ret; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: __put_user_1(x, ptr, __ret); break; \
+ case 2: __put_user_2(x, ptr, __ret); break; \
+ case 4: __put_user_4(x, ptr, __ret); break; \
+ case 8: __put_user_8(x, ptr, __ret); break; \
+ default: __ret = __put_user_bad(); break; \
} \
- __pu_err; \
+ __ret; \
})
/*
@@ -378,7 +420,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
/**
* __copy_in_user() - copy data within user space, with less checking.
* @to: Destination address, in user space.
- * @from: Source address, in kernel space.
+ * @from: Source address, in user space.
* @n: Number of bytes to copy.
*
* Context: User context only. This function may sleep.
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index f70bf1c541f..a017246ca0c 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -24,8 +24,8 @@
#include <asm-generic/unistd.h>
/* Additional Tilera-specific syscalls. */
-#define __NR_flush_cache (__NR_arch_specific_syscall + 1)
-__SYSCALL(__NR_flush_cache, sys_flush_cache)
+#define __NR_cacheflush (__NR_arch_specific_syscall + 1)
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
#ifndef __tilegx__
/* "Fast" syscalls provide atomic support for 32-bit chips. */
diff --git a/arch/tile/include/hv/drv_xgbe_intf.h b/arch/tile/include/hv/drv_xgbe_intf.h
index f13188ac281..2a20b266d94 100644
--- a/arch/tile/include/hv/drv_xgbe_intf.h
+++ b/arch/tile/include/hv/drv_xgbe_intf.h
@@ -460,7 +460,7 @@ typedef void* lepp_comp_t;
* linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for
* our page size of exactly 65536. We add one for a "body" fragment.
*/
-#define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1)
+#define LEPP_MAX_FRAGS (65536 / HV_DEFAULT_PAGE_SIZE_SMALL + 2 + 1)
/** Total number of bytes needed for an lepp_tso_cmd_t. */
#define LEPP_TSO_CMD_SIZE(num_frags, header_size) \
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index 72ec1e972f1..ccd847e2347 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -17,8 +17,8 @@
* The hypervisor's public API.
*/
-#ifndef _TILE_HV_H
-#define _TILE_HV_H
+#ifndef _HV_HV_H
+#define _HV_HV_H
#include <arch/chip.h>
@@ -42,25 +42,45 @@
*/
#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN)
-/** The log2 of the size of small pages, in bytes. This value should
- * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
+/** The log2 of the initial size of small pages, in bytes.
+ * See HV_DEFAULT_PAGE_SIZE_SMALL.
*/
-#define HV_LOG2_PAGE_SIZE_SMALL 16
+#define HV_LOG2_DEFAULT_PAGE_SIZE_SMALL 16
-/** The size of small pages, in bytes. This value should be verified
+/** The initial size of small pages, in bytes. This value should be verified
* at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
+ * It may also be modified when installing a new context.
*/
-#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL)
+#define HV_DEFAULT_PAGE_SIZE_SMALL \
+ (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_SMALL)
-/** The log2 of the size of large pages, in bytes. This value should be
- * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
+/** The log2 of the initial size of large pages, in bytes.
+ * See HV_DEFAULT_PAGE_SIZE_LARGE.
*/
-#define HV_LOG2_PAGE_SIZE_LARGE 24
+#define HV_LOG2_DEFAULT_PAGE_SIZE_LARGE 24
-/** The size of large pages, in bytes. This value should be verified
+/** The initial size of large pages, in bytes. This value should be verified
* at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
+ * It may also be modified when installing a new context.
*/
-#define HV_PAGE_SIZE_LARGE (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_LARGE)
+#define HV_DEFAULT_PAGE_SIZE_LARGE \
+ (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_LARGE)
+
+#if CHIP_VA_WIDTH() > 32
+
+/** The log2 of the initial size of jumbo pages, in bytes.
+ * See HV_DEFAULT_PAGE_SIZE_JUMBO.
+ */
+#define HV_LOG2_DEFAULT_PAGE_SIZE_JUMBO 32
+
+/** The initial size of jumbo pages, in bytes. This value should
+ * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO).
+ * It may also be modified when installing a new context.
+ */
+#define HV_DEFAULT_PAGE_SIZE_JUMBO \
+ (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_JUMBO)
+
+#endif
/** The log2 of the granularity at which page tables must be aligned;
* in other words, the CPA for a page table must have this many zero
@@ -280,8 +300,11 @@
#define HV_DISPATCH_GET_IPI_PTE 56
#endif
+/** hv_set_pte_super_shift */
+#define HV_DISPATCH_SET_PTE_SUPER_SHIFT 57
+
/** One more than the largest dispatch value */
-#define _HV_DISPATCH_END 57
+#define _HV_DISPATCH_END 58
#ifndef __ASSEMBLER__
@@ -401,7 +424,18 @@ typedef enum {
* that the temperature has hit an upper limit and is no longer being
* accurately tracked.
*/
- HV_SYSCONF_BOARD_TEMP = 6
+ HV_SYSCONF_BOARD_TEMP = 6,
+
+ /** Legal page size bitmask for hv_install_context().
+ * For example, if 16KB and 64KB small pages are supported,
+ * it would return "HV_CTX_PG_SM_16K | HV_CTX_PG_SM_64K".
+ */
+ HV_SYSCONF_VALID_PAGE_SIZES = 7,
+
+ /** The size of jumbo pages, in bytes.
+ * If no jumbo pages are available, zero will be returned.
+ */
+ HV_SYSCONF_PAGE_SIZE_JUMBO = 8,
} HV_SysconfQuery;
@@ -474,7 +508,19 @@ typedef enum {
HV_CONFSTR_SWITCH_CONTROL = 14,
/** Chip revision level. */
- HV_CONFSTR_CHIP_REV = 15
+ HV_CONFSTR_CHIP_REV = 15,
+
+ /** CPU module part number. */
+ HV_CONFSTR_CPUMOD_PART_NUM = 16,
+
+ /** CPU module serial number. */
+ HV_CONFSTR_CPUMOD_SERIAL_NUM = 17,
+
+ /** CPU module revision level. */
+ HV_CONFSTR_CPUMOD_REV = 18,
+
+ /** Human-readable CPU module description. */
+ HV_CONFSTR_CPUMOD_DESC = 19
} HV_ConfstrQuery;
@@ -494,11 +540,16 @@ int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len);
/** Tile coordinate */
typedef struct
{
+#ifndef __BIG_ENDIAN__
/** X coordinate, relative to supervisor's top-left coordinate */
int x;
/** Y coordinate, relative to supervisor's top-left coordinate */
int y;
+#else
+ int y;
+ int x;
+#endif
} HV_Coord;
@@ -649,6 +700,12 @@ void hv_set_rtc(HV_RTCTime time);
* new page table does not need to contain any mapping for the
* hv_install_context address itself.
*
+ * At most one HV_CTX_PG_SM_* flag may be specified in "flags";
+ * if multiple flags are specified, HV_EINVAL is returned.
+ * Specifying none of the flags results in using the default page size.
+ * All cores participating in a given client must request the same
+ * page size, or the results are undefined.
+ *
* @param page_table Root of the page table.
* @param access PTE providing info on how to read the page table. This
* value must be consistent between multiple tiles sharing a page table,
@@ -667,8 +724,36 @@ int hv_install_context(HV_PhysAddr page_table, HV_PTE access, HV_ASID asid,
#define HV_CTX_DIRECTIO 0x1 /**< Direct I/O requests are accepted from
PL0. */
+#define HV_CTX_PG_SM_4K 0x10 /**< Use 4K small pages, if available. */
+#define HV_CTX_PG_SM_16K 0x20 /**< Use 16K small pages, if available. */
+#define HV_CTX_PG_SM_64K 0x40 /**< Use 64K small pages, if available. */
+#define HV_CTX_PG_SM_MASK 0xf0 /**< Mask of all possible small pages. */
+
#ifndef __ASSEMBLER__
+
+/** Set the number of pages ganged together by HV_PTE_SUPER at a
+ * particular level of the page table.
+ *
+ * The current TILE-Gx hardware only supports powers of four
+ * (i.e. log2_count must be a multiple of two), and the requested
+ * "super" page size must be less than the span of the next level in
+ * the page table. The largest size that can be requested is 64GB.
+ *
+ * The shift value is initially "0" for all page table levels,
+ * indicating that the HV_PTE_SUPER bit is effectively ignored.
+ *
+ * If you change the count from one non-zero value to another, the
+ * hypervisor will flush the entire TLB and TSB to avoid confusion.
+ *
+ * @param level Page table level (0, 1, or 2)
+ * @param log2_count Base-2 log of the number of pages to gang together,
+ * i.e. how much to shift left the base page size for the super page size.
+ * @return Zero on success, or a hypervisor error code on failure.
+ */
+int hv_set_pte_super_shift(int level, int log2_count);
+
+
/** Value returned from hv_inquire_context(). */
typedef struct
{
@@ -986,8 +1071,13 @@ HV_VirtAddrRange hv_inquire_virtual(int idx);
/** A range of ASID values. */
typedef struct
{
+#ifndef __BIG_ENDIAN__
HV_ASID start; /**< First ASID in the range. */
unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */
+#else
+ unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */
+ HV_ASID start; /**< First ASID in the range. */
+#endif
} HV_ASIDRange;
/** Returns information about a range of ASIDs.
@@ -1238,11 +1328,14 @@ HV_Errno hv_set_command_line(HV_VirtAddr buf, int length);
* with the existing priority pages) or "red/black" (if they don't).
* The bitmask provides information on which parts of the cache
* have been used for pinned pages so far on this tile; if (1 << N)
- * appears in the bitmask, that indicates that a page has been marked
- * "priority" whose PFN equals N, mod 8.
+ * appears in the bitmask, that indicates that a 4KB region of the
+ * cache starting at (N * 4KB) is in use by a "priority" page.
+ * The portion of cache used by a particular page can be computed
+ * by taking the page's PA, modulo CHIP_L2_CACHE_SIZE(), and setting
+ * all the "4KB" bits corresponding to the actual page size.
* @param bitmask A bitmap of priority page set values
*/
-void hv_set_caching(unsigned int bitmask);
+void hv_set_caching(unsigned long bitmask);
/** Zero out a specified number of pages.
@@ -1308,6 +1401,7 @@ typedef enum
/** Message recipient. */
typedef struct
{
+#ifndef __BIG_ENDIAN__
/** X coordinate, relative to supervisor's top-left coordinate */
unsigned int x:11;
@@ -1316,6 +1410,11 @@ typedef struct
/** Status of this recipient */
HV_Recip_State state:10;
+#else //__BIG_ENDIAN__
+ HV_Recip_State state:10;
+ unsigned int y:11;
+ unsigned int x:11;
+#endif
} HV_Recipient;
/** Send a message to a set of recipients.
@@ -1851,12 +1950,12 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
#define HV_PTE_INDEX_USER 10 /**< Page is user-accessible */
#define HV_PTE_INDEX_ACCESSED 11 /**< Page has been accessed */
#define HV_PTE_INDEX_DIRTY 12 /**< Page has been written */
- /* Bits 13-15 are reserved for
+ /* Bits 13-14 are reserved for
future use. */
+#define HV_PTE_INDEX_SUPER 15 /**< Pages ganged together for TLB */
#define HV_PTE_INDEX_MODE 16 /**< Page mode; see HV_PTE_MODE_xxx */
#define HV_PTE_MODE_BITS 3 /**< Number of bits in mode */
- /* Bit 19 is reserved for
- future use. */
+#define HV_PTE_INDEX_CLIENT2 19 /**< Page client state 2 */
#define HV_PTE_INDEX_LOTAR 20 /**< Page's LOTAR; must be high bits
of word */
#define HV_PTE_LOTAR_BITS 12 /**< Number of bits in a LOTAR */
@@ -1869,15 +1968,6 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
of word */
#define HV_PTE_PTFN_BITS 29 /**< Number of bits in a PTFN */
-/** Position of the PFN field within the PTE (subset of the PTFN). */
-#define HV_PTE_INDEX_PFN (HV_PTE_INDEX_PTFN + (HV_LOG2_PAGE_SIZE_SMALL - \
- HV_LOG2_PAGE_TABLE_ALIGN))
-
-/** Length of the PFN field within the PTE (subset of the PTFN). */
-#define HV_PTE_INDEX_PFN_BITS (HV_PTE_INDEX_PTFN_BITS - \
- (HV_LOG2_PAGE_SIZE_SMALL - \
- HV_LOG2_PAGE_TABLE_ALIGN))
-
/*
* Legal values for the PTE's mode field
*/
@@ -1957,7 +2047,10 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
/** Does this PTE map a page?
*
- * If this bit is set in the level-1 page table, the entry should be
+ * If this bit is set in a level-0 page table, the entry should be
+ * interpreted as a level-2 page table entry mapping a jumbo page.
+ *
+ * If this bit is set in a level-1 page table, the entry should be
* interpreted as a level-2 page table entry mapping a large page.
*
* This bit should not be modified by the client while PRESENT is set, as
@@ -1967,6 +2060,18 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
*/
#define HV_PTE_PAGE (__HV_PTE_ONE << HV_PTE_INDEX_PAGE)
+/** Does this PTE implicitly reference multiple pages?
+ *
+ * If this bit is set in the page table (either in the level-2 page table,
+ * or in a higher level page table in conjunction with the PAGE bit)
+ * then the PTE specifies a range of contiguous pages, not a single page.
+ * The hv_set_pte_super_shift() allows you to specify the count for
+ * each level of the page table.
+ *
+ * Note: this bit is not supported on TILEPro systems.
+ */
+#define HV_PTE_SUPER (__HV_PTE_ONE << HV_PTE_INDEX_SUPER)
+
/** Is this a global (non-ASID) mapping?
*
* If this bit is set, the translations established by this PTE will
@@ -2046,6 +2151,13 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
*/
#define HV_PTE_CLIENT1 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT1)
+/** Client-private bit in PTE.
+ *
+ * This bit is guaranteed not to be inspected or modified by the
+ * hypervisor.
+ */
+#define HV_PTE_CLIENT2 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT2)
+
/** Non-coherent (NC) bit in PTE.
*
* If this bit is set, the mapping that is set up will be non-coherent
@@ -2178,8 +2290,10 @@ hv_pte_clear_##name(HV_PTE pte) \
*/
_HV_BIT(present, PRESENT)
_HV_BIT(page, PAGE)
+_HV_BIT(super, SUPER)
_HV_BIT(client0, CLIENT0)
_HV_BIT(client1, CLIENT1)
+_HV_BIT(client2, CLIENT2)
_HV_BIT(migrating, MIGRATING)
_HV_BIT(nc, NC)
_HV_BIT(readable, READABLE)
@@ -2222,40 +2336,11 @@ hv_pte_set_mode(HV_PTE pte, unsigned int val)
*
* This field contains the upper bits of the CPA (client physical
* address) of the target page; the complete CPA is this field with
- * HV_LOG2_PAGE_SIZE_SMALL zero bits appended to it.
- *
- * For PTEs in a level-1 page table where the Page bit is set, the
- * CPA must be aligned modulo the large page size.
- */
-static __inline unsigned int
-hv_pte_get_pfn(const HV_PTE pte)
-{
- return pte.val >> HV_PTE_INDEX_PFN;
-}
-
-
-/** Set the page frame number into a PTE. See hv_pte_get_pfn. */
-static __inline HV_PTE
-hv_pte_set_pfn(HV_PTE pte, unsigned int val)
-{
- /*
- * Note that the use of "PTFN" in the next line is intentional; we
- * don't want any garbage lower bits left in that field.
- */
- pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS) - 1) << HV_PTE_INDEX_PTFN);
- pte.val |= (__hv64) val << HV_PTE_INDEX_PFN;
- return pte;
-}
-
-/** Get the page table frame number from the PTE.
- *
- * This field contains the upper bits of the CPA (client physical
- * address) of the target page table; the complete CPA is this field with
- * with HV_PAGE_TABLE_ALIGN zero bits appended to it.
+ * HV_LOG2_PAGE_TABLE_ALIGN zero bits appended to it.
*
- * For PTEs in a level-1 page table when the Page bit is not set, the
- * CPA must be aligned modulo the sticter of HV_PAGE_TABLE_ALIGN and
- * the level-2 page table size.
+ * For all PTEs in the lowest-level page table, and for all PTEs with
+ * the Page bit set in all page tables, the CPA must be aligned modulo
+ * the relevant page size.
*/
static __inline unsigned long
hv_pte_get_ptfn(const HV_PTE pte)
@@ -2263,7 +2348,6 @@ hv_pte_get_ptfn(const HV_PTE pte)
return pte.val >> HV_PTE_INDEX_PTFN;
}
-
/** Set the page table frame number into a PTE. See hv_pte_get_ptfn. */
static __inline HV_PTE
hv_pte_set_ptfn(HV_PTE pte, unsigned long val)
@@ -2273,6 +2357,20 @@ hv_pte_set_ptfn(HV_PTE pte, unsigned long val)
return pte;
}
+/** Get the client physical address from the PTE. See hv_pte_set_ptfn. */
+static __inline HV_PhysAddr
+hv_pte_get_pa(const HV_PTE pte)
+{
+ return (__hv64) hv_pte_get_ptfn(pte) << HV_LOG2_PAGE_TABLE_ALIGN;
+}
+
+/** Set the client physical address into a PTE. See hv_pte_get_ptfn. */
+static __inline HV_PTE
+hv_pte_set_pa(HV_PTE pte, HV_PhysAddr pa)
+{
+ return hv_pte_set_ptfn(pte, pa >> HV_LOG2_PAGE_TABLE_ALIGN);
+}
+
/** Get the remote tile caching this page.
*
@@ -2308,28 +2406,20 @@ hv_pte_set_lotar(HV_PTE pte, unsigned int val)
#endif /* !__ASSEMBLER__ */
-/** Converts a client physical address to a pfn. */
-#define HV_CPA_TO_PFN(p) ((p) >> HV_LOG2_PAGE_SIZE_SMALL)
-
-/** Converts a pfn to a client physical address. */
-#define HV_PFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_SIZE_SMALL)
-
/** Converts a client physical address to a ptfn. */
#define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN)
/** Converts a ptfn to a client physical address. */
#define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN)
-/** Converts a ptfn to a pfn. */
-#define HV_PTFN_TO_PFN(p) \
- ((p) >> (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN))
-
-/** Converts a pfn to a ptfn. */
-#define HV_PFN_TO_PTFN(p) \
- ((p) << (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN))
-
#if CHIP_VA_WIDTH() > 32
+/*
+ * Note that we currently do not allow customizing the page size
+ * of the L0 pages, but fix them at 4GB, so we do not use the
+ * "_HV_xxx" nomenclature for the L0 macros.
+ */
+
/** Log number of HV_PTE entries in L0 page table */
#define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN)
@@ -2359,69 +2449,104 @@ hv_pte_set_lotar(HV_PTE pte, unsigned int val)
#endif /* CHIP_VA_WIDTH() > 32 */
/** Log number of HV_PTE entries in L1 page table */
-#define HV_LOG2_L1_ENTRIES (HV_LOG2_L1_SPAN - HV_LOG2_PAGE_SIZE_LARGE)
+#define _HV_LOG2_L1_ENTRIES(log2_page_size_large) \
+ (HV_LOG2_L1_SPAN - log2_page_size_large)
/** Number of HV_PTE entries in L1 page table */
-#define HV_L1_ENTRIES (1 << HV_LOG2_L1_ENTRIES)
+#define _HV_L1_ENTRIES(log2_page_size_large) \
+ (1 << _HV_LOG2_L1_ENTRIES(log2_page_size_large))
/** Log size of L1 page table in bytes */
-#define HV_LOG2_L1_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L1_ENTRIES)
+#define _HV_LOG2_L1_SIZE(log2_page_size_large) \
+ (HV_LOG2_PTE_SIZE + _HV_LOG2_L1_ENTRIES(log2_page_size_large))
/** Size of L1 page table in bytes */
-#define HV_L1_SIZE (1 << HV_LOG2_L1_SIZE)
+#define _HV_L1_SIZE(log2_page_size_large) \
+ (1 << _HV_LOG2_L1_SIZE(log2_page_size_large))
/** Log number of HV_PTE entries in level-2 page table */
-#define HV_LOG2_L2_ENTRIES (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)
+#define _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
+ (log2_page_size_large - log2_page_size_small)
/** Number of HV_PTE entries in level-2 page table */
-#define HV_L2_ENTRIES (1 << HV_LOG2_L2_ENTRIES)
+#define _HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
+ (1 << _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
/** Log size of level-2 page table in bytes */
-#define HV_LOG2_L2_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L2_ENTRIES)
+#define _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small) \
+ (HV_LOG2_PTE_SIZE + \
+ _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
/** Size of level-2 page table in bytes */
-#define HV_L2_SIZE (1 << HV_LOG2_L2_SIZE)
+#define _HV_L2_SIZE(log2_page_size_large, log2_page_size_small) \
+ (1 << _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small))
#ifdef __ASSEMBLER__
#if CHIP_VA_WIDTH() > 32
/** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
- (((va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+ (((va) >> log2_page_size_large) & (_HV_L1_ENTRIES(log2_page_size_large) - 1))
#else /* CHIP_VA_WIDTH() > 32 */
/** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
- (((va) >> HV_LOG2_PAGE_SIZE_LARGE))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+ (((va) >> log2_page_size_large))
#endif /* CHIP_VA_WIDTH() > 32 */
/** Index in level-2 page table for a specific VA */
-#define HV_L2_INDEX(va) \
- (((va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1))
+#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
+ (((va) >> log2_page_size_small) & \
+ (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
#else /* __ASSEMBLER __ */
#if CHIP_VA_WIDTH() > 32
/** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
- (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+ (((HV_VirtAddr)(va) >> log2_page_size_large) & \
+ (_HV_L1_ENTRIES(log2_page_size_large) - 1))
#else /* CHIP_VA_WIDTH() > 32 */
/** Index in L1 for a specific VA */
-#define HV_L1_INDEX(va) \
- (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE))
+#define _HV_L1_INDEX(va, log2_page_size_large) \
+ (((HV_VirtAddr)(va) >> log2_page_size_large))
#endif /* CHIP_VA_WIDTH() > 32 */
/** Index in level-2 page table for a specific VA */
-#define HV_L2_INDEX(va) \
- (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1))
+#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
+ (((HV_VirtAddr)(va) >> log2_page_size_small) & \
+ (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
#endif /* __ASSEMBLER __ */
-#endif /* _TILE_HV_H */
+/** Position of the PFN field within the PTE (subset of the PTFN). */
+#define _HV_PTE_INDEX_PFN(log2_page_size) \
+ (HV_PTE_INDEX_PTFN + (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Length of the PFN field within the PTE (subset of the PTFN). */
+#define _HV_PTE_INDEX_PFN_BITS(log2_page_size) \
+ (HV_PTE_INDEX_PTFN_BITS - (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Converts a client physical address to a pfn. */
+#define _HV_CPA_TO_PFN(p, log2_page_size) ((p) >> log2_page_size)
+
+/** Converts a pfn to a client physical address. */
+#define _HV_PFN_TO_CPA(p, log2_page_size) \
+ (((HV_PhysAddr)(p)) << log2_page_size)
+
+/** Converts a ptfn to a pfn. */
+#define _HV_PTFN_TO_PFN(p, log2_page_size) \
+ ((p) >> (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+/** Converts a pfn to a ptfn. */
+#define _HV_PFN_TO_PTFN(p, log2_page_size) \
+ ((p) << (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
+
+#endif /* _HV_HV_H */
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 0d826faf8f3..5de99248d8d 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -9,10 +9,9 @@ obj-y := backtrace.o entry.o irq.o messaging.o \
intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
obj-$(CONFIG_HARDWALL) += hardwall.o
-obj-$(CONFIG_TILEGX) += futex_64.o
obj-$(CONFIG_COMPAT) += compat.o compat_signal.o
obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel_$(BITS).o
obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index ec91568df88..133c4b56a99 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -100,8 +100,9 @@ STD_ENTRY(smp_nap)
*/
STD_ENTRY(_cpu_idle)
movei r1, 1
+ IRQ_ENABLE_LOAD(r2, r3)
mtspr INTERRUPT_CRITICAL_SECTION, r1
- IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
+ IRQ_ENABLE_APPLY(r2, r3) /* unmask, but still with ICS set */
mtspr INTERRUPT_CRITICAL_SECTION, zero
.global _cpu_idle_nap
_cpu_idle_nap:
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 8c41891aab3..20273ee37de 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -33,59 +33,157 @@
/*
- * This data structure tracks the rectangle data, etc., associated
- * one-to-one with a "struct file *" from opening HARDWALL_FILE.
+ * Implement a per-cpu "hardwall" resource class such as UDN or IPI.
+ * We use "hardwall" nomenclature throughout for historical reasons.
+ * The lock here controls access to the list data structure as well as
+ * to the items on the list.
+ */
+struct hardwall_type {
+ int index;
+ int is_xdn;
+ int is_idn;
+ int disabled;
+ const char *name;
+ struct list_head list;
+ spinlock_t lock;
+ struct proc_dir_entry *proc_dir;
+};
+
+enum hardwall_index {
+ HARDWALL_UDN = 0,
+#ifndef __tilepro__
+ HARDWALL_IDN = 1,
+ HARDWALL_IPI = 2,
+#endif
+ _HARDWALL_TYPES
+};
+
+static struct hardwall_type hardwall_types[] = {
+ { /* user-space access to UDN */
+ 0,
+ 1,
+ 0,
+ 0,
+ "udn",
+ LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
+ __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock),
+ NULL
+ },
+#ifndef __tilepro__
+ { /* user-space access to IDN */
+ 1,
+ 1,
+ 1,
+ 1, /* disabled pending hypervisor support */
+ "idn",
+ LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
+ __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock),
+ NULL
+ },
+ { /* access to user-space IPI */
+ 2,
+ 0,
+ 0,
+ 0,
+ "ipi",
+ LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
+ __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock),
+ NULL
+ },
+#endif
+};
+
+/*
+ * This data structure tracks the cpu data, etc., associated
+ * one-to-one with a "struct file *" from opening a hardwall device file.
* Note that the file's private data points back to this structure.
*/
struct hardwall_info {
- struct list_head list; /* "rectangles" list */
+ struct list_head list; /* for hardwall_types.list */
struct list_head task_head; /* head of tasks in this hardwall */
- struct cpumask cpumask; /* cpus in the rectangle */
+ struct hardwall_type *type; /* type of this resource */
+ struct cpumask cpumask; /* cpus reserved */
+ int id; /* integer id for this hardwall */
+ int teardown_in_progress; /* are we tearing this one down? */
+
+ /* Remaining fields only valid for user-network resources. */
int ulhc_x; /* upper left hand corner x coord */
int ulhc_y; /* upper left hand corner y coord */
int width; /* rectangle width */
int height; /* rectangle height */
- int id; /* integer id for this hardwall */
- int teardown_in_progress; /* are we tearing this one down? */
+#if CHIP_HAS_REV1_XDN()
+ atomic_t xdn_pending_count; /* cores in phase 1 of drain */
+#endif
};
-/* Currently allocated hardwall rectangles */
-static LIST_HEAD(rectangles);
/* /proc/tile/hardwall */
static struct proc_dir_entry *hardwall_proc_dir;
/* Functions to manage files in /proc/tile/hardwall. */
-static void hardwall_add_proc(struct hardwall_info *rect);
-static void hardwall_remove_proc(struct hardwall_info *rect);
-
-/*
- * Guard changes to the hardwall data structures.
- * This could be finer grained (e.g. one lock for the list of hardwall
- * rectangles, then separate embedded locks for each one's list of tasks),
- * but there are subtle correctness issues when trying to start with
- * a task's "hardwall" pointer and lock the correct rectangle's embedded
- * lock in the presence of a simultaneous deactivation, so it seems
- * easier to have a single lock, given that none of these data
- * structures are touched very frequently during normal operation.
- */
-static DEFINE_SPINLOCK(hardwall_lock);
+static void hardwall_add_proc(struct hardwall_info *);
+static void hardwall_remove_proc(struct hardwall_info *);
/* Allow disabling UDN access. */
-static int udn_disabled;
static int __init noudn(char *str)
{
pr_info("User-space UDN access is disabled\n");
- udn_disabled = 1;
+ hardwall_types[HARDWALL_UDN].disabled = 1;
return 0;
}
early_param("noudn", noudn);
+#ifndef __tilepro__
+/* Allow disabling IDN access. */
+static int __init noidn(char *str)
+{
+ pr_info("User-space IDN access is disabled\n");
+ hardwall_types[HARDWALL_IDN].disabled = 1;
+ return 0;
+}
+early_param("noidn", noidn);
+
+/* Allow disabling IPI access. */
+static int __init noipi(char *str)
+{
+ pr_info("User-space IPI access is disabled\n");
+ hardwall_types[HARDWALL_IPI].disabled = 1;
+ return 0;
+}
+early_param("noipi", noipi);
+#endif
+
/*
- * Low-level primitives
+ * Low-level primitives for UDN/IDN
*/
+#ifdef __tilepro__
+#define mtspr_XDN(hwt, name, val) \
+ do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0)
+#define mtspr_MPL_XDN(hwt, name, val) \
+ do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0)
+#define mfspr_XDN(hwt, name) \
+ ((void)(hwt), __insn_mfspr(SPR_UDN_##name))
+#else
+#define mtspr_XDN(hwt, name, val) \
+ do { \
+ if ((hwt)->is_idn) \
+ __insn_mtspr(SPR_IDN_##name, (val)); \
+ else \
+ __insn_mtspr(SPR_UDN_##name, (val)); \
+ } while (0)
+#define mtspr_MPL_XDN(hwt, name, val) \
+ do { \
+ if ((hwt)->is_idn) \
+ __insn_mtspr(SPR_MPL_IDN_##name, (val)); \
+ else \
+ __insn_mtspr(SPR_MPL_UDN_##name, (val)); \
+ } while (0)
+#define mfspr_XDN(hwt, name) \
+ ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name))
+#endif
+
/* Set a CPU bit if the CPU is online. */
#define cpu_online_set(cpu, dst) do { \
if (cpu_online(cpu)) \
@@ -101,7 +199,7 @@ static int contains(struct hardwall_info *r, int x, int y)
}
/* Compute the rectangle parameters and validate the cpumask. */
-static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
+static int check_rectangle(struct hardwall_info *r, struct cpumask *mask)
{
int x, y, cpu, ulhc, lrhc;
@@ -114,8 +212,6 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
r->ulhc_y = cpu_y(ulhc);
r->width = cpu_x(lrhc) - r->ulhc_x + 1;
r->height = cpu_y(lrhc) - r->ulhc_y + 1;
- cpumask_copy(&r->cpumask, mask);
- r->id = ulhc; /* The ulhc cpu id can be the hardwall id. */
/* Width and height must be positive */
if (r->width <= 0 || r->height <= 0)
@@ -128,7 +224,7 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
return -EINVAL;
/*
- * Note that offline cpus can't be drained when this UDN
+ * Note that offline cpus can't be drained when this user network
* rectangle eventually closes. We used to detect this
* situation and print a warning, but it annoyed users and
* they ignored it anyway, so now we just return without a
@@ -137,16 +233,6 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
return 0;
}
-/* Do the two given rectangles overlap on any cpu? */
-static int overlaps(struct hardwall_info *a, struct hardwall_info *b)
-{
- return a->ulhc_x + a->width > b->ulhc_x && /* A not to the left */
- b->ulhc_x + b->width > a->ulhc_x && /* B not to the left */
- a->ulhc_y + a->height > b->ulhc_y && /* A not above */
- b->ulhc_y + b->height > a->ulhc_y; /* B not above */
-}
-
-
/*
* Hardware management of hardwall setup, teardown, trapping,
* and enabling/disabling PL0 access to the networks.
@@ -157,23 +243,35 @@ enum direction_protect {
N_PROTECT = (1 << 0),
E_PROTECT = (1 << 1),
S_PROTECT = (1 << 2),
- W_PROTECT = (1 << 3)
+ W_PROTECT = (1 << 3),
+ C_PROTECT = (1 << 4),
};
-static void enable_firewall_interrupts(void)
+static inline int xdn_which_interrupt(struct hardwall_type *hwt)
+{
+#ifndef __tilepro__
+ if (hwt->is_idn)
+ return INT_IDN_FIREWALL;
+#endif
+ return INT_UDN_FIREWALL;
+}
+
+static void enable_firewall_interrupts(struct hardwall_type *hwt)
{
- arch_local_irq_unmask_now(INT_UDN_FIREWALL);
+ arch_local_irq_unmask_now(xdn_which_interrupt(hwt));
}
-static void disable_firewall_interrupts(void)
+static void disable_firewall_interrupts(struct hardwall_type *hwt)
{
- arch_local_irq_mask_now(INT_UDN_FIREWALL);
+ arch_local_irq_mask_now(xdn_which_interrupt(hwt));
}
/* Set up hardwall on this cpu based on the passed hardwall_info. */
-static void hardwall_setup_ipi_func(void *info)
+static void hardwall_setup_func(void *info)
{
struct hardwall_info *r = info;
+ struct hardwall_type *hwt = r->type;
+
int cpu = smp_processor_id();
int x = cpu % smp_width;
int y = cpu / smp_width;
@@ -187,13 +285,12 @@ static void hardwall_setup_ipi_func(void *info)
if (y == r->ulhc_y + r->height - 1)
bits |= S_PROTECT;
BUG_ON(bits == 0);
- __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, bits);
- enable_firewall_interrupts();
-
+ mtspr_XDN(hwt, DIRECTION_PROTECT, bits);
+ enable_firewall_interrupts(hwt);
}
/* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
-static void hardwall_setup(struct hardwall_info *r)
+static void hardwall_protect_rectangle(struct hardwall_info *r)
{
int x, y, cpu, delta;
struct cpumask rect_cpus;
@@ -217,37 +314,50 @@ static void hardwall_setup(struct hardwall_info *r)
}
/* Then tell all the cpus to set up their protection SPR */
- on_each_cpu_mask(&rect_cpus, hardwall_setup_ipi_func, r, 1);
+ on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
}
void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
{
struct hardwall_info *rect;
+ struct hardwall_type *hwt;
struct task_struct *p;
struct siginfo info;
- int x, y;
int cpu = smp_processor_id();
int found_processes;
unsigned long flags;
-
struct pt_regs *old_regs = set_irq_regs(regs);
+
irq_enter();
+ /* Figure out which network trapped. */
+ switch (fault_num) {
+#ifndef __tilepro__
+ case INT_IDN_FIREWALL:
+ hwt = &hardwall_types[HARDWALL_IDN];
+ break;
+#endif
+ case INT_UDN_FIREWALL:
+ hwt = &hardwall_types[HARDWALL_UDN];
+ break;
+ default:
+ BUG();
+ }
+ BUG_ON(hwt->disabled);
+
/* This tile trapped a network access; find the rectangle. */
- x = cpu % smp_width;
- y = cpu / smp_width;
- spin_lock_irqsave(&hardwall_lock, flags);
- list_for_each_entry(rect, &rectangles, list) {
- if (contains(rect, x, y))
+ spin_lock_irqsave(&hwt->lock, flags);
+ list_for_each_entry(rect, &hwt->list, list) {
+ if (cpumask_test_cpu(cpu, &rect->cpumask))
break;
}
/*
* It shouldn't be possible not to find this cpu on the
* rectangle list, since only cpus in rectangles get hardwalled.
- * The hardwall is only removed after the UDN is drained.
+ * The hardwall is only removed after the user network is drained.
*/
- BUG_ON(&rect->list == &rectangles);
+ BUG_ON(&rect->list == &hwt->list);
/*
* If we already started teardown on this hardwall, don't worry;
@@ -255,30 +365,32 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
* to quiesce.
*/
if (rect->teardown_in_progress) {
- pr_notice("cpu %d: detected hardwall violation %#lx"
+ pr_notice("cpu %d: detected %s hardwall violation %#lx"
" while teardown already in progress\n",
- cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT));
+ cpu, hwt->name,
+ (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
goto done;
}
/*
* Kill off any process that is activated in this rectangle.
* We bypass security to deliver the signal, since it must be
- * one of the activated processes that generated the UDN
+ * one of the activated processes that generated the user network
* message that caused this trap, and all the activated
* processes shared a single open file so are pretty tightly
* bound together from a security point of view to begin with.
*/
rect->teardown_in_progress = 1;
wmb(); /* Ensure visibility of rectangle before notifying processes. */
- pr_notice("cpu %d: detected hardwall violation %#lx...\n",
- cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT));
+ pr_notice("cpu %d: detected %s hardwall violation %#lx...\n",
+ cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_HARDWALL;
found_processes = 0;
- list_for_each_entry(p, &rect->task_head, thread.hardwall_list) {
- BUG_ON(p->thread.hardwall != rect);
+ list_for_each_entry(p, &rect->task_head,
+ thread.hardwall[hwt->index].list) {
+ BUG_ON(p->thread.hardwall[hwt->index].info != rect);
if (!(p->flags & PF_EXITING)) {
found_processes = 1;
pr_notice("hardwall: killing %d\n", p->pid);
@@ -289,7 +401,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
pr_notice("hardwall: no associated processes!\n");
done:
- spin_unlock_irqrestore(&hardwall_lock, flags);
+ spin_unlock_irqrestore(&hwt->lock, flags);
/*
* We have to disable firewall interrupts now, or else when we
@@ -298,48 +410,87 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
* haven't yet drained the network, and that would allow packets
* to cross out of the hardwall region.
*/
- disable_firewall_interrupts();
+ disable_firewall_interrupts(hwt);
irq_exit();
set_irq_regs(old_regs);
}
-/* Allow access from user space to the UDN. */
-void grant_network_mpls(void)
+/* Allow access from user space to the user network. */
+void grant_hardwall_mpls(struct hardwall_type *hwt)
{
- __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_0, 1);
- __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_0, 1);
- __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_0, 1);
- __insn_mtspr(SPR_MPL_UDN_TIMER_SET_0, 1);
+#ifndef __tilepro__
+ if (!hwt->is_xdn) {
+ __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1);
+ return;
+ }
+#endif
+ mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1);
+ mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1);
+ mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1);
+ mtspr_MPL_XDN(hwt, TIMER_SET_0, 1);
#if !CHIP_HAS_REV1_XDN()
- __insn_mtspr(SPR_MPL_UDN_REFILL_SET_0, 1);
- __insn_mtspr(SPR_MPL_UDN_CA_SET_0, 1);
+ mtspr_MPL_XDN(hwt, REFILL_SET_0, 1);
+ mtspr_MPL_XDN(hwt, CA_SET_0, 1);
#endif
}
-/* Deny access from user space to the UDN. */
-void restrict_network_mpls(void)
+/* Deny access from user space to the user network. */
+void restrict_hardwall_mpls(struct hardwall_type *hwt)
{
- __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_1, 1);
- __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_1, 1);
- __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_1, 1);
- __insn_mtspr(SPR_MPL_UDN_TIMER_SET_1, 1);
+#ifndef __tilepro__
+ if (!hwt->is_xdn) {
+ __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1);
+ return;
+ }
+#endif
+ mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1);
+ mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1);
+ mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1);
+ mtspr_MPL_XDN(hwt, TIMER_SET_1, 1);
#if !CHIP_HAS_REV1_XDN()
- __insn_mtspr(SPR_MPL_UDN_REFILL_SET_1, 1);
- __insn_mtspr(SPR_MPL_UDN_CA_SET_1, 1);
+ mtspr_MPL_XDN(hwt, REFILL_SET_1, 1);
+ mtspr_MPL_XDN(hwt, CA_SET_1, 1);
#endif
}
+/* Restrict or deny as necessary for the task we're switching to. */
+void hardwall_switch_tasks(struct task_struct *prev,
+ struct task_struct *next)
+{
+ int i;
+ for (i = 0; i < HARDWALL_TYPES; ++i) {
+ if (prev->thread.hardwall[i].info != NULL) {
+ if (next->thread.hardwall[i].info == NULL)
+ restrict_hardwall_mpls(&hardwall_types[i]);
+ } else if (next->thread.hardwall[i].info != NULL) {
+ grant_hardwall_mpls(&hardwall_types[i]);
+ }
+ }
+}
+
+/* Does this task have the right to IPI the given cpu? */
+int hardwall_ipi_valid(int cpu)
+{
+#ifdef __tilegx__
+ struct hardwall_info *info =
+ current->thread.hardwall[HARDWALL_IPI].info;
+ return info && cpumask_test_cpu(cpu, &info->cpumask);
+#else
+ return 0;
+#endif
+}
/*
- * Code to create, activate, deactivate, and destroy hardwall rectangles.
+ * Code to create, activate, deactivate, and destroy hardwall resources.
*/
-/* Create a hardwall for the given rectangle */
-static struct hardwall_info *hardwall_create(
- size_t size, const unsigned char __user *bits)
+/* Create a hardwall for the given resource */
+static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
+ size_t size,
+ const unsigned char __user *bits)
{
- struct hardwall_info *iter, *rect;
+ struct hardwall_info *iter, *info;
struct cpumask mask;
unsigned long flags;
int rc;
@@ -370,55 +521,62 @@ static struct hardwall_info *hardwall_create(
}
}
- /* Allocate a new rectangle optimistically. */
- rect = kmalloc(sizeof(struct hardwall_info),
+ /* Allocate a new hardwall_info optimistically. */
+ info = kmalloc(sizeof(struct hardwall_info),
GFP_KERNEL | __GFP_ZERO);
- if (rect == NULL)
+ if (info == NULL)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&rect->task_head);
+ INIT_LIST_HEAD(&info->task_head);
+ info->type = hwt;
/* Compute the rectangle size and validate that it's plausible. */
- rc = setup_rectangle(rect, &mask);
- if (rc != 0) {
- kfree(rect);
- return ERR_PTR(rc);
+ cpumask_copy(&info->cpumask, &mask);
+ info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits);
+ if (hwt->is_xdn) {
+ rc = check_rectangle(info, &mask);
+ if (rc != 0) {
+ kfree(info);
+ return ERR_PTR(rc);
+ }
}
/* Confirm it doesn't overlap and add it to the list. */
- spin_lock_irqsave(&hardwall_lock, flags);
- list_for_each_entry(iter, &rectangles, list) {
- if (overlaps(iter, rect)) {
- spin_unlock_irqrestore(&hardwall_lock, flags);
- kfree(rect);
+ spin_lock_irqsave(&hwt->lock, flags);
+ list_for_each_entry(iter, &hwt->list, list) {
+ if (cpumask_intersects(&iter->cpumask, &info->cpumask)) {
+ spin_unlock_irqrestore(&hwt->lock, flags);
+ kfree(info);
return ERR_PTR(-EBUSY);
}
}
- list_add_tail(&rect->list, &rectangles);
- spin_unlock_irqrestore(&hardwall_lock, flags);
+ list_add_tail(&info->list, &hwt->list);
+ spin_unlock_irqrestore(&hwt->lock, flags);
/* Set up appropriate hardwalling on all affected cpus. */
- hardwall_setup(rect);
+ if (hwt->is_xdn)
+ hardwall_protect_rectangle(info);
/* Create a /proc/tile/hardwall entry. */
- hardwall_add_proc(rect);
+ hardwall_add_proc(info);
- return rect;
+ return info;
}
/* Activate a given hardwall on this cpu for this process. */
-static int hardwall_activate(struct hardwall_info *rect)
+static int hardwall_activate(struct hardwall_info *info)
{
- int cpu, x, y;
+ int cpu;
unsigned long flags;
struct task_struct *p = current;
struct thread_struct *ts = &p->thread;
+ struct hardwall_type *hwt;
- /* Require a rectangle. */
- if (rect == NULL)
+ /* Require a hardwall. */
+ if (info == NULL)
return -ENODATA;
- /* Not allowed to activate a rectangle that is being torn down. */
- if (rect->teardown_in_progress)
+ /* Not allowed to activate a hardwall that is being torn down. */
+ if (info->teardown_in_progress)
return -EINVAL;
/*
@@ -428,78 +586,87 @@ static int hardwall_activate(struct hardwall_info *rect)
if (cpumask_weight(&p->cpus_allowed) != 1)
return -EPERM;
- /* Make sure we are bound to a cpu in this rectangle. */
+ /* Make sure we are bound to a cpu assigned to this resource. */
cpu = smp_processor_id();
BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
- x = cpu_x(cpu);
- y = cpu_y(cpu);
- if (!contains(rect, x, y))
+ if (!cpumask_test_cpu(cpu, &info->cpumask))
return -EINVAL;
/* If we are already bound to this hardwall, it's a no-op. */
- if (ts->hardwall) {
- BUG_ON(ts->hardwall != rect);
+ hwt = info->type;
+ if (ts->hardwall[hwt->index].info) {
+ BUG_ON(ts->hardwall[hwt->index].info != info);
return 0;
}
- /* Success! This process gets to use the user networks on this cpu. */
- ts->hardwall = rect;
- spin_lock_irqsave(&hardwall_lock, flags);
- list_add(&ts->hardwall_list, &rect->task_head);
- spin_unlock_irqrestore(&hardwall_lock, flags);
- grant_network_mpls();
- printk(KERN_DEBUG "Pid %d (%s) activated for hardwall: cpu %d\n",
- p->pid, p->comm, cpu);
+ /* Success! This process gets to use the resource on this cpu. */
+ ts->hardwall[hwt->index].info = info;
+ spin_lock_irqsave(&hwt->lock, flags);
+ list_add(&ts->hardwall[hwt->index].list, &info->task_head);
+ spin_unlock_irqrestore(&hwt->lock, flags);
+ grant_hardwall_mpls(hwt);
+ printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n",
+ p->pid, p->comm, hwt->name, cpu);
return 0;
}
/*
- * Deactivate a task's hardwall. Must hold hardwall_lock.
+ * Deactivate a task's hardwall. Must hold lock for hardwall_type.
* This method may be called from free_task(), so we don't want to
* rely on too many fields of struct task_struct still being valid.
* We assume the cpus_allowed, pid, and comm fields are still valid.
*/
-static void _hardwall_deactivate(struct task_struct *task)
+static void _hardwall_deactivate(struct hardwall_type *hwt,
+ struct task_struct *task)
{
struct thread_struct *ts = &task->thread;
if (cpumask_weight(&task->cpus_allowed) != 1) {
- pr_err("pid %d (%s) releasing networks with"
+ pr_err("pid %d (%s) releasing %s hardwall with"
" an affinity mask containing %d cpus!\n",
- task->pid, task->comm,
+ task->pid, task->comm, hwt->name,
cpumask_weight(&task->cpus_allowed));
BUG();
}
- BUG_ON(ts->hardwall == NULL);
- ts->hardwall = NULL;
- list_del(&ts->hardwall_list);
+ BUG_ON(ts->hardwall[hwt->index].info == NULL);
+ ts->hardwall[hwt->index].info = NULL;
+ list_del(&ts->hardwall[hwt->index].list);
if (task == current)
- restrict_network_mpls();
+ restrict_hardwall_mpls(hwt);
}
/* Deactivate a task's hardwall. */
-int hardwall_deactivate(struct task_struct *task)
+static int hardwall_deactivate(struct hardwall_type *hwt,
+ struct task_struct *task)
{
unsigned long flags;
int activated;
- spin_lock_irqsave(&hardwall_lock, flags);
- activated = (task->thread.hardwall != NULL);
+ spin_lock_irqsave(&hwt->lock, flags);
+ activated = (task->thread.hardwall[hwt->index].info != NULL);
if (activated)
- _hardwall_deactivate(task);
- spin_unlock_irqrestore(&hardwall_lock, flags);
+ _hardwall_deactivate(hwt, task);
+ spin_unlock_irqrestore(&hwt->lock, flags);
if (!activated)
return -EINVAL;
- printk(KERN_DEBUG "Pid %d (%s) deactivated for hardwall: cpu %d\n",
- task->pid, task->comm, smp_processor_id());
+ printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
+ task->pid, task->comm, hwt->name, smp_processor_id());
return 0;
}
-/* Stop a UDN switch before draining the network. */
-static void stop_udn_switch(void *ignored)
+void hardwall_deactivate_all(struct task_struct *task)
+{
+ int i;
+ for (i = 0; i < HARDWALL_TYPES; ++i)
+ if (task->thread.hardwall[i].info)
+ hardwall_deactivate(&hardwall_types[i], task);
+}
+
+/* Stop the switch before draining the network. */
+static void stop_xdn_switch(void *arg)
{
#if !CHIP_HAS_REV1_XDN()
/* Freeze the switch and the demux. */
@@ -507,13 +674,71 @@ static void stop_udn_switch(void *ignored)
SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
+#else
+ /*
+ * Drop all packets bound for the core or off the edge.
+ * We rely on the normal hardwall protection setup code
+ * to have set the low four bits to trigger firewall interrupts,
+ * and shift those bits up to trigger "drop on send" semantics,
+ * plus adding "drop on send to core" for all switches.
+ * In practice it seems the switches latch the DIRECTION_PROTECT
+ * SPR so they won't start dropping if they're already
+ * delivering the last message to the core, but it doesn't
+ * hurt to enable it here.
+ */
+ struct hardwall_type *hwt = arg;
+ unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT);
+ mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5);
#endif
}
+static void empty_xdn_demuxes(struct hardwall_type *hwt)
+{
+#ifndef __tilepro__
+ if (hwt->is_idn) {
+ while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0))
+ (void) __tile_idn0_receive();
+ while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1))
+ (void) __tile_idn1_receive();
+ return;
+ }
+#endif
+ while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
+ (void) __tile_udn0_receive();
+ while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
+ (void) __tile_udn1_receive();
+ while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
+ (void) __tile_udn2_receive();
+ while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
+ (void) __tile_udn3_receive();
+}
+
/* Drain all the state from a stopped switch. */
-static void drain_udn_switch(void *ignored)
+static void drain_xdn_switch(void *arg)
{
-#if !CHIP_HAS_REV1_XDN()
+ struct hardwall_info *info = arg;
+ struct hardwall_type *hwt = info->type;
+
+#if CHIP_HAS_REV1_XDN()
+ /*
+ * The switches have been configured to drop any messages
+ * destined for cores (or off the edge of the rectangle).
+ * But the current message may continue to be delivered,
+ * so we wait until all the cores have finished any pending
+ * messages before we stop draining.
+ */
+ int pending = mfspr_XDN(hwt, PENDING);
+ while (pending--) {
+ empty_xdn_demuxes(hwt);
+ if (hwt->is_idn)
+ __tile_idn_send(0);
+ else
+ __tile_udn_send(0);
+ }
+ atomic_dec(&info->xdn_pending_count);
+ while (atomic_read(&info->xdn_pending_count))
+ empty_xdn_demuxes(hwt);
+#else
int i;
int from_tile_words, ca_count;
@@ -533,15 +758,7 @@ static void drain_udn_switch(void *ignored)
(void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
/* Empty out demuxes. */
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
- (void) __tile_udn0_receive();
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
- (void) __tile_udn1_receive();
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
- (void) __tile_udn2_receive();
- while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
- (void) __tile_udn3_receive();
- BUG_ON((__insn_mfspr(SPR_UDN_DATA_AVAIL) & 0xF) != 0);
+ empty_xdn_demuxes(hwt);
/* Empty out catch all. */
ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
@@ -563,21 +780,25 @@ static void drain_udn_switch(void *ignored)
#endif
}
-/* Reset random UDN state registers at boot up and during hardwall teardown. */
-void reset_network_state(void)
+/* Reset random XDN state registers at boot up and during hardwall teardown. */
+static void reset_xdn_network_state(struct hardwall_type *hwt)
{
-#if !CHIP_HAS_REV1_XDN()
- /* Reset UDN coordinates to their standard value */
- unsigned int cpu = smp_processor_id();
- unsigned int x = cpu % smp_width;
- unsigned int y = cpu / smp_width;
-#endif
-
- if (udn_disabled)
+ if (hwt->disabled)
return;
+ /* Clear out other random registers so we have a clean slate. */
+ mtspr_XDN(hwt, DIRECTION_PROTECT, 0);
+ mtspr_XDN(hwt, AVAIL_EN, 0);
+ mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0);
+
#if !CHIP_HAS_REV1_XDN()
- __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
+ /* Reset UDN coordinates to their standard value */
+ {
+ unsigned int cpu = smp_processor_id();
+ unsigned int x = cpu % smp_width;
+ unsigned int y = cpu / smp_width;
+ __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
+ }
/* Set demux tags to predefined values and enable them. */
__insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
@@ -585,56 +806,50 @@ void reset_network_state(void)
__insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
__insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
__insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
-#endif
- /* Clear out other random registers so we have a clean slate. */
- __insn_mtspr(SPR_UDN_AVAIL_EN, 0);
- __insn_mtspr(SPR_UDN_DEADLOCK_TIMEOUT, 0);
-#if !CHIP_HAS_REV1_XDN()
+ /* Set other rev0 random registers to a clean state. */
__insn_mtspr(SPR_UDN_REFILL_EN, 0);
__insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
__insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
-#endif
/* Start the switch and demux. */
-#if !CHIP_HAS_REV1_XDN()
__insn_mtspr(SPR_UDN_SP_FREEZE, 0);
#endif
}
-/* Restart a UDN switch after draining. */
-static void restart_udn_switch(void *ignored)
+void reset_network_state(void)
{
- reset_network_state();
-
- /* Disable firewall interrupts. */
- __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, 0);
- disable_firewall_interrupts();
+ reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]);
+#ifndef __tilepro__
+ reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]);
+#endif
}
-/* Build a struct cpumask containing all valid tiles in bounding rectangle. */
-static void fill_mask(struct hardwall_info *r, struct cpumask *result)
+/* Restart an XDN switch after draining. */
+static void restart_xdn_switch(void *arg)
{
- int x, y, cpu;
+ struct hardwall_type *hwt = arg;
- cpumask_clear(result);
+#if CHIP_HAS_REV1_XDN()
+ /* One last drain step to avoid races with injection and draining. */
+ empty_xdn_demuxes(hwt);
+#endif
- cpu = r->ulhc_y * smp_width + r->ulhc_x;
- for (y = 0; y < r->height; ++y, cpu += smp_width - r->width) {
- for (x = 0; x < r->width; ++x, ++cpu)
- cpu_online_set(cpu, result);
- }
+ reset_xdn_network_state(hwt);
+
+ /* Disable firewall interrupts. */
+ disable_firewall_interrupts(hwt);
}
/* Last reference to a hardwall is gone, so clear the network. */
-static void hardwall_destroy(struct hardwall_info *rect)
+static void hardwall_destroy(struct hardwall_info *info)
{
struct task_struct *task;
+ struct hardwall_type *hwt;
unsigned long flags;
- struct cpumask mask;
- /* Make sure this file actually represents a rectangle. */
- if (rect == NULL)
+ /* Make sure this file actually represents a hardwall. */
+ if (info == NULL)
return;
/*
@@ -644,39 +859,53 @@ static void hardwall_destroy(struct hardwall_info *rect)
* deactivate any remaining tasks before freeing the
* hardwall_info object itself.
*/
- spin_lock_irqsave(&hardwall_lock, flags);
- list_for_each_entry(task, &rect->task_head, thread.hardwall_list)
- _hardwall_deactivate(task);
- spin_unlock_irqrestore(&hardwall_lock, flags);
-
- /* Drain the UDN. */
- printk(KERN_DEBUG "Clearing hardwall rectangle %dx%d %d,%d\n",
- rect->width, rect->height, rect->ulhc_x, rect->ulhc_y);
- fill_mask(rect, &mask);
- on_each_cpu_mask(&mask, stop_udn_switch, NULL, 1);
- on_each_cpu_mask(&mask, drain_udn_switch, NULL, 1);
+ hwt = info->type;
+ info->teardown_in_progress = 1;
+ spin_lock_irqsave(&hwt->lock, flags);
+ list_for_each_entry(task, &info->task_head,
+ thread.hardwall[hwt->index].list)
+ _hardwall_deactivate(hwt, task);
+ spin_unlock_irqrestore(&hwt->lock, flags);
+
+ if (hwt->is_xdn) {
+ /* Configure the switches for draining the user network. */
+ printk(KERN_DEBUG
+ "Clearing %s hardwall rectangle %dx%d %d,%d\n",
+ hwt->name, info->width, info->height,
+ info->ulhc_x, info->ulhc_y);
+ on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1);
+
+ /* Drain the network. */
+#if CHIP_HAS_REV1_XDN()
+ atomic_set(&info->xdn_pending_count,
+ cpumask_weight(&info->cpumask));
+ on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0);
+#else
+ on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1);
+#endif
- /* Restart switch and disable firewall. */
- on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1);
+ /* Restart switch and disable firewall. */
+ on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1);
+ }
/* Remove the /proc/tile/hardwall entry. */
- hardwall_remove_proc(rect);
-
- /* Now free the rectangle from the list. */
- spin_lock_irqsave(&hardwall_lock, flags);
- BUG_ON(!list_empty(&rect->task_head));
- list_del(&rect->list);
- spin_unlock_irqrestore(&hardwall_lock, flags);
- kfree(rect);
+ hardwall_remove_proc(info);
+
+ /* Now free the hardwall from the list. */
+ spin_lock_irqsave(&hwt->lock, flags);
+ BUG_ON(!list_empty(&info->task_head));
+ list_del(&info->list);
+ spin_unlock_irqrestore(&hwt->lock, flags);
+ kfree(info);
}
static int hardwall_proc_show(struct seq_file *sf, void *v)
{
- struct hardwall_info *rect = sf->private;
+ struct hardwall_info *info = sf->private;
char buf[256];
- int rc = cpulist_scnprintf(buf, sizeof(buf), &rect->cpumask);
+ int rc = cpulist_scnprintf(buf, sizeof(buf), &info->cpumask);
buf[rc++] = '\n';
seq_write(sf, buf, rc);
return 0;
@@ -695,31 +924,45 @@ static const struct file_operations hardwall_proc_fops = {
.release = single_release,
};
-static void hardwall_add_proc(struct hardwall_info *rect)
+static void hardwall_add_proc(struct hardwall_info *info)
{
char buf[64];
- snprintf(buf, sizeof(buf), "%d", rect->id);
- proc_create_data(buf, 0444, hardwall_proc_dir,
- &hardwall_proc_fops, rect);
+ snprintf(buf, sizeof(buf), "%d", info->id);
+ proc_create_data(buf, 0444, info->type->proc_dir,
+ &hardwall_proc_fops, info);
}
-static void hardwall_remove_proc(struct hardwall_info *rect)
+static void hardwall_remove_proc(struct hardwall_info *info)
{
char buf[64];
- snprintf(buf, sizeof(buf), "%d", rect->id);
- remove_proc_entry(buf, hardwall_proc_dir);
+ snprintf(buf, sizeof(buf), "%d", info->id);
+ remove_proc_entry(buf, info->type->proc_dir);
}
int proc_pid_hardwall(struct task_struct *task, char *buffer)
{
- struct hardwall_info *rect = task->thread.hardwall;
- return rect ? sprintf(buffer, "%d\n", rect->id) : 0;
+ int i;
+ int n = 0;
+ for (i = 0; i < HARDWALL_TYPES; ++i) {
+ struct hardwall_info *info = task->thread.hardwall[i].info;
+ if (info)
+ n += sprintf(&buffer[n], "%s: %d\n",
+ info->type->name, info->id);
+ }
+ return n;
}
void proc_tile_hardwall_init(struct proc_dir_entry *root)
{
- if (!udn_disabled)
- hardwall_proc_dir = proc_mkdir("hardwall", root);
+ int i;
+ for (i = 0; i < HARDWALL_TYPES; ++i) {
+ struct hardwall_type *hwt = &hardwall_types[i];
+ if (hwt->disabled)
+ continue;
+ if (hardwall_proc_dir == NULL)
+ hardwall_proc_dir = proc_mkdir("hardwall", root);
+ hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir);
+ }
}
@@ -729,34 +972,45 @@ void proc_tile_hardwall_init(struct proc_dir_entry *root)
static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
{
- struct hardwall_info *rect = file->private_data;
+ struct hardwall_info *info = file->private_data;
+ int minor = iminor(file->f_mapping->host);
+ struct hardwall_type* hwt;
if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
return -EINVAL;
+ BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES);
+ BUILD_BUG_ON(HARDWALL_TYPES !=
+ sizeof(hardwall_types)/sizeof(hardwall_types[0]));
+
+ if (minor < 0 || minor >= HARDWALL_TYPES)
+ return -EINVAL;
+ hwt = &hardwall_types[minor];
+ WARN_ON(info && hwt != info->type);
+
switch (_IOC_NR(a)) {
case _HARDWALL_CREATE:
- if (udn_disabled)
+ if (hwt->disabled)
return -ENOSYS;
- if (rect != NULL)
+ if (info != NULL)
return -EALREADY;
- rect = hardwall_create(_IOC_SIZE(a),
- (const unsigned char __user *)b);
- if (IS_ERR(rect))
- return PTR_ERR(rect);
- file->private_data = rect;
+ info = hardwall_create(hwt, _IOC_SIZE(a),
+ (const unsigned char __user *)b);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+ file->private_data = info;
return 0;
case _HARDWALL_ACTIVATE:
- return hardwall_activate(rect);
+ return hardwall_activate(info);
case _HARDWALL_DEACTIVATE:
- if (current->thread.hardwall != rect)
+ if (current->thread.hardwall[hwt->index].info != info)
return -EINVAL;
- return hardwall_deactivate(current);
+ return hardwall_deactivate(hwt, current);
case _HARDWALL_GET_ID:
- return rect ? rect->id : -EINVAL;
+ return info ? info->id : -EINVAL;
default:
return -EINVAL;
@@ -775,26 +1029,28 @@ static long hardwall_compat_ioctl(struct file *file,
/* The user process closed the file; revoke access to user networks. */
static int hardwall_flush(struct file *file, fl_owner_t owner)
{
- struct hardwall_info *rect = file->private_data;
+ struct hardwall_info *info = file->private_data;
struct task_struct *task, *tmp;
unsigned long flags;
- if (rect) {
+ if (info) {
/*
* NOTE: if multiple threads are activated on this hardwall
* file, the other threads will continue having access to the
- * UDN until they are context-switched out and back in again.
+ * user network until they are context-switched out and back
+ * in again.
*
* NOTE: A NULL files pointer means the task is being torn
* down, so in that case we also deactivate it.
*/
- spin_lock_irqsave(&hardwall_lock, flags);
- list_for_each_entry_safe(task, tmp, &rect->task_head,
- thread.hardwall_list) {
+ struct hardwall_type *hwt = info->type;
+ spin_lock_irqsave(&hwt->lock, flags);
+ list_for_each_entry_safe(task, tmp, &info->task_head,
+ thread.hardwall[hwt->index].list) {
if (task->files == owner || task->files == NULL)
- _hardwall_deactivate(task);
+ _hardwall_deactivate(hwt, task);
}
- spin_unlock_irqrestore(&hardwall_lock, flags);
+ spin_unlock_irqrestore(&hwt->lock, flags);
}
return 0;
@@ -824,11 +1080,11 @@ static int __init dev_hardwall_init(void)
int rc;
dev_t dev;
- rc = alloc_chrdev_region(&dev, 0, 1, "hardwall");
+ rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall");
if (rc < 0)
return rc;
cdev_init(&hardwall_dev, &dev_hardwall_fops);
- rc = cdev_add(&hardwall_dev, dev, 1);
+ rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES);
if (rc < 0)
return rc;
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index 1a39b7c1c87..f71bfeeaf1a 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -69,7 +69,7 @@ ENTRY(_start)
}
{
moveli lr, lo16(1f)
- move r5, zero
+ moveli r5, CTX_PAGE_FLAG
}
{
auli lr, lr, ha16(1f)
@@ -141,11 +141,11 @@ ENTRY(empty_zero_page)
.macro PTE va, cpa, bits1, no_org=0
.ifeq \no_org
- .org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE
+ .org swapper_pg_dir + PGD_INDEX(\va) * HV_PTE_SIZE
.endif
.word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
(HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
- .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32))
+ .word (\bits1) | (HV_CPA_TO_PTFN(\cpa) << (HV_PTE_INDEX_PTFN - 32))
.endm
__PAGE_ALIGNED_DATA
@@ -166,7 +166,7 @@ ENTRY(swapper_pg_dir)
/* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
(1 << (HV_PTE_INDEX_EXECUTABLE - 32))
- .org swapper_pg_dir + HV_L1_SIZE
+ .org swapper_pg_dir + PGDIR_SIZE
END(swapper_pg_dir)
/*
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
index 6bc3a932fe4..f9a2734f7b8 100644
--- a/arch/tile/kernel/head_64.S
+++ b/arch/tile/kernel/head_64.S
@@ -114,7 +114,7 @@ ENTRY(_start)
shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
}
{
- move r3, zero
+ moveli r3, CTX_PAGE_FLAG
j hv_install_context
}
1:
@@ -210,19 +210,19 @@ ENTRY(empty_zero_page)
.macro PTE cpa, bits1
.quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
- (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
+ (\bits1) | (HV_CPA_TO_PTFN(\cpa) << HV_PTE_INDEX_PTFN)
.endm
__PAGE_ALIGNED_DATA
.align PAGE_SIZE
ENTRY(swapper_pg_dir)
- .org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
+ .org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
.Lsv_data_pmd:
.quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
- .org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE
+ .org swapper_pg_dir + PGD_INDEX(MEM_SV_START) * HV_PTE_SIZE
.Lsv_code_pmd:
.quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
- .org swapper_pg_dir + HV_L0_SIZE
+ .org swapper_pg_dir + SIZEOF_PGD
END(swapper_pg_dir)
.align HV_PAGE_TABLE_ALIGN
@@ -233,11 +233,11 @@ ENTRY(temp_data_pmd)
* permissions later.
*/
.set addr, 0
- .rept HV_L1_ENTRIES
+ .rept PTRS_PER_PMD
PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
- .set addr, addr + HV_PAGE_SIZE_LARGE
+ .set addr, addr + HPAGE_SIZE
.endr
- .org temp_data_pmd + HV_L1_SIZE
+ .org temp_data_pmd + SIZEOF_PMD
END(temp_data_pmd)
.align HV_PAGE_TABLE_ALIGN
@@ -248,11 +248,11 @@ ENTRY(temp_code_pmd)
* permissions later.
*/
.set addr, 0
- .rept HV_L1_ENTRIES
+ .rept PTRS_PER_PMD
PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
- .set addr, addr + HV_PAGE_SIZE_LARGE
+ .set addr, addr + HPAGE_SIZE
.endr
- .org temp_code_pmd + HV_L1_SIZE
+ .org temp_code_pmd + SIZEOF_PMD
END(temp_code_pmd)
/*
diff --git a/arch/tile/kernel/hvglue.lds b/arch/tile/kernel/hvglue.lds
index 2b7cd0a659a..d44c5a67a1e 100644
--- a/arch/tile/kernel/hvglue.lds
+++ b/arch/tile/kernel/hvglue.lds
@@ -55,4 +55,5 @@ hv_store_mapping = TEXT_OFFSET + 0x106a0;
hv_inquire_realpa = TEXT_OFFSET + 0x106c0;
hv_flush_all = TEXT_OFFSET + 0x106e0;
hv_get_ipi_pte = TEXT_OFFSET + 0x10700;
-hv_glue_internals = TEXT_OFFSET + 0x10720;
+hv_set_pte_super_shift = TEXT_OFFSET + 0x10720;
+hv_glue_internals = TEXT_OFFSET + 0x10740;
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 30ae76e50c4..7c06d597ffd 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -220,7 +220,9 @@ intvec_\vecname:
* This routine saves just the first four registers, plus the
* stack context so we can do proper backtracing right away,
* and defers to handle_interrupt to save the rest.
- * The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
+ * The backtracer needs pc, ex1, lr, sp, r52, and faultnum,
+ * and needs sp set to its final location at the bottom of
+ * the stack frame.
*/
addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
wh64 r0 /* cache line 7 */
@@ -450,23 +452,6 @@ intvec_\vecname:
push_reg r5, r52
st r52, r4
- /* Load tp with our per-cpu offset. */
-#ifdef CONFIG_SMP
- {
- mfspr r20, SPR_SYSTEM_SAVE_K_0
- moveli r21, hw2_last(__per_cpu_offset)
- }
- {
- shl16insli r21, r21, hw1(__per_cpu_offset)
- bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
- }
- shl16insli r21, r21, hw0(__per_cpu_offset)
- shl3add r20, r20, r21
- ld tp, r20
-#else
- move tp, zero
-#endif
-
/*
* If we will be returning to the kernel, we will need to
* reset the interrupt masks to the state they had before.
@@ -489,6 +474,44 @@ intvec_\vecname:
.endif
st r21, r32
+ /*
+ * we've captured enough state to the stack (including in
+ * particular our EX_CONTEXT state) that we can now release
+ * the interrupt critical section and replace it with our
+ * standard "interrupts disabled" mask value. This allows
+ * synchronous interrupts (and profile interrupts) to punch
+ * through from this point onwards.
+ *
+ * It's important that no code before this point touch memory
+ * other than our own stack (to keep the invariant that this
+ * is all that gets touched under ICS), and that no code after
+ * this point reference any interrupt-specific SPR, in particular
+ * the EX_CONTEXT_K_ values.
+ */
+ .ifc \function,handle_nmi
+ IRQ_DISABLE_ALL(r20)
+ .else
+ IRQ_DISABLE(r20, r21)
+ .endif
+ mtspr INTERRUPT_CRITICAL_SECTION, zero
+
+ /* Load tp with our per-cpu offset. */
+#ifdef CONFIG_SMP
+ {
+ mfspr r20, SPR_SYSTEM_SAVE_K_0
+ moveli r21, hw2_last(__per_cpu_offset)
+ }
+ {
+ shl16insli r21, r21, hw1(__per_cpu_offset)
+ bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
+ }
+ shl16insli r21, r21, hw0(__per_cpu_offset)
+ shl3add r20, r20, r21
+ ld tp, r20
+#else
+ move tp, zero
+#endif
+
#ifdef __COLLECT_LINKER_FEEDBACK__
/*
* Notify the feedback routines that we were in the
@@ -513,21 +536,6 @@ intvec_\vecname:
#endif
/*
- * we've captured enough state to the stack (including in
- * particular our EX_CONTEXT state) that we can now release
- * the interrupt critical section and replace it with our
- * standard "interrupts disabled" mask value. This allows
- * synchronous interrupts (and profile interrupts) to punch
- * through from this point onwards.
- */
- .ifc \function,handle_nmi
- IRQ_DISABLE_ALL(r20)
- .else
- IRQ_DISABLE(r20, r21)
- .endif
- mtspr INTERRUPT_CRITICAL_SECTION, zero
-
- /*
* Prepare the first 256 stack bytes to be rapidly accessible
* without having to fetch the background data.
*/
@@ -736,9 +744,10 @@ STD_ENTRY(interrupt_return)
beqzt r30, .Lrestore_regs
j 3f
2: TRACE_IRQS_ON
+ IRQ_ENABLE_LOAD(r20, r21)
movei r0, 1
mtspr INTERRUPT_CRITICAL_SECTION, r0
- IRQ_ENABLE(r20, r21)
+ IRQ_ENABLE_APPLY(r20, r21)
beqzt r30, .Lrestore_regs
3:
@@ -755,7 +764,6 @@ STD_ENTRY(interrupt_return)
* that will save some cycles if this turns out to be a syscall.
*/
.Lrestore_regs:
- FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
/*
* Rotate so we have one high bit and one low bit to test.
@@ -1249,7 +1257,7 @@ STD_ENTRY(fill_ra_stack)
int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
- int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
+ int_hand INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index 6255f2eab11..f0b54a93471 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -31,6 +31,8 @@
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/checksum.h>
+#include <asm/tlbflush.h>
+#include <asm/homecache.h>
#include <hv/hypervisor.h>
@@ -222,11 +224,22 @@ struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order)
return alloc_pages_node(0, gfp_mask, order);
}
+/*
+ * Address range in which pa=va mapping is set in setup_quasi_va_is_pa().
+ * For tilepro, PAGE_OFFSET is used since this is the largest possbile value
+ * for tilepro, while for tilegx, we limit it to entire middle level page
+ * table which we assume has been allocated and is undoubtedly large enough.
+ */
+#ifndef __tilegx__
+#define QUASI_VA_IS_PA_ADDR_RANGE PAGE_OFFSET
+#else
+#define QUASI_VA_IS_PA_ADDR_RANGE PGDIR_SIZE
+#endif
+
static void setup_quasi_va_is_pa(void)
{
- HV_PTE *pgtable;
HV_PTE pte;
- int i;
+ unsigned long i;
/*
* Flush our TLB to prevent conflicts between the previous contents
@@ -234,16 +247,22 @@ static void setup_quasi_va_is_pa(void)
*/
local_flush_tlb_all();
- /* setup VA is PA, at least up to PAGE_OFFSET */
-
- pgtable = (HV_PTE *)current->mm->pgd;
+ /*
+ * setup VA is PA, at least up to QUASI_VA_IS_PA_ADDR_RANGE.
+ * Note here we assume that level-1 page table is defined by
+ * HPAGE_SIZE.
+ */
pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
-
- for (i = 0; i < pgd_index(PAGE_OFFSET); i++) {
+ for (i = 0; i < (QUASI_VA_IS_PA_ADDR_RANGE >> HPAGE_SHIFT); i++) {
+ unsigned long vaddr = i << HPAGE_SHIFT;
+ pgd_t *pgd = pgd_offset(current->mm, vaddr);
+ pud_t *pud = pud_offset(pgd, vaddr);
+ pte_t *ptep = (pte_t *) pmd_offset(pud, vaddr);
unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
+
if (pfn_valid(pfn))
- __set_pte(&pgtable[i], pfn_pte(pfn, pte));
+ __set_pte(ptep, pfn_pte(pfn, pte));
}
}
@@ -251,6 +270,7 @@ static void setup_quasi_va_is_pa(void)
void machine_kexec(struct kimage *image)
{
void *reboot_code_buffer;
+ pte_t *ptep;
void (*rnk)(unsigned long, void *, unsigned long)
__noreturn;
@@ -266,8 +286,10 @@ void machine_kexec(struct kimage *image)
*/
homecache_change_page_home(image->control_code_page, 0,
smp_processor_id());
- reboot_code_buffer = vmap(&image->control_code_page, 1, 0,
- __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE));
+ reboot_code_buffer = page_address(image->control_code_page);
+ BUG_ON(reboot_code_buffer == NULL);
+ ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
+ __set_pte(ptep, pte_mkexec(*ptep));
memcpy(reboot_code_buffer, relocate_new_kernel,
relocate_new_kernel_size);
__flush_icache_range(
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index 98d47692010..001cbfa10ac 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -159,7 +159,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
switch (ELF_R_TYPE(rel[i].r_info)) {
-#define MUNGE(func) (*location = ((*location & ~func(-1)) | func(value)))
+#ifdef __LITTLE_ENDIAN
+# define MUNGE(func) \
+ (*location = ((*location & ~func(-1)) | func(value)))
+#else
+/*
+ * Instructions are always little-endian, so when we read them as data,
+ * we have to swap them around before and after modifying them.
+ */
+# define MUNGE(func) \
+ (*location = swab64((swab64(*location) & ~func(-1)) | func(value)))
+#endif
#ifndef __tilegx__
case R_TILE_32:
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
index 446a7f52cc1..dafc447b512 100644
--- a/arch/tile/kernel/proc.c
+++ b/arch/tile/kernel/proc.c
@@ -22,6 +22,7 @@
#include <linux/proc_fs.h>
#include <linux/sysctl.h>
#include <linux/hardirq.h>
+#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <asm/unaligned.h>
#include <asm/pgtable.h>
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index f572c19c408..ba1023d8a02 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -128,10 +128,10 @@ void arch_release_thread_info(struct thread_info *info)
* Calling deactivate here just frees up the data structures.
* If the task we're freeing held the last reference to a
* hardwall fd, it would have been released prior to this point
- * anyway via exit_files(), and "hardwall" would be NULL by now.
+ * anyway via exit_files(), and the hardwall_task.info pointers
+ * would be NULL by now.
*/
- if (info->task->thread.hardwall)
- hardwall_deactivate(info->task);
+ hardwall_deactivate_all(info->task);
#endif
if (step_state) {
@@ -245,7 +245,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
#ifdef CONFIG_HARDWALL
/* New thread does not own any networks. */
- p->thread.hardwall = NULL;
+ memset(&p->thread.hardwall[0], 0,
+ sizeof(struct hardwall_task) * HARDWALL_TYPES);
#endif
@@ -515,12 +516,7 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
#ifdef CONFIG_HARDWALL
/* Enable or disable access to the network registers appropriately. */
- if (prev->thread.hardwall != NULL) {
- if (next->thread.hardwall == NULL)
- restrict_network_mpls();
- } else if (next->thread.hardwall != NULL) {
- grant_network_mpls();
- }
+ hardwall_switch_tasks(prev, next);
#endif
/*
diff --git a/arch/tile/kernel/relocate_kernel.S b/arch/tile/kernel/relocate_kernel_32.S
index 010b418515f..010b418515f 100644
--- a/arch/tile/kernel/relocate_kernel.S
+++ b/arch/tile/kernel/relocate_kernel_32.S
diff --git a/arch/tile/kernel/relocate_kernel_64.S b/arch/tile/kernel/relocate_kernel_64.S
new file mode 100644
index 00000000000..1c09a4f5a4e
--- /dev/null
+++ b/arch/tile/kernel/relocate_kernel_64.S
@@ -0,0 +1,260 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * copy new kernel into place and then call hv_reexec
+ *
+ */
+
+#include <linux/linkage.h>
+#include <arch/chip.h>
+#include <asm/page.h>
+#include <hv/hypervisor.h>
+
+#undef RELOCATE_NEW_KERNEL_VERBOSE
+
+STD_ENTRY(relocate_new_kernel)
+
+ move r30, r0 /* page list */
+ move r31, r1 /* address of page we are on */
+ move r32, r2 /* start address of new kernel */
+
+ shrui r1, r1, PAGE_SHIFT
+ addi r1, r1, 1
+ shli sp, r1, PAGE_SHIFT
+ addi sp, sp, -8
+ /* we now have a stack (whether we need one or not) */
+
+ moveli r40, hw2_last(hv_console_putc)
+ shl16insli r40, r40, hw1(hv_console_putc)
+ shl16insli r40, r40, hw0(hv_console_putc)
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+ moveli r0, 'r'
+ jalr r40
+
+ moveli r0, '_'
+ jalr r40
+
+ moveli r0, 'n'
+ jalr r40
+
+ moveli r0, '_'
+ jalr r40
+
+ moveli r0, 'k'
+ jalr r40
+
+ moveli r0, '\n'
+ jalr r40
+#endif
+
+ /*
+ * Throughout this code r30 is pointer to the element of page
+ * list we are working on.
+ *
+ * Normally we get to the next element of the page list by
+ * incrementing r30 by eight. The exception is if the element
+ * on the page list is an IND_INDIRECTION in which case we use
+ * the element with the low bits masked off as the new value
+ * of r30.
+ *
+ * To get this started, we need the value passed to us (which
+ * will always be an IND_INDIRECTION) in memory somewhere with
+ * r30 pointing at it. To do that, we push the value passed
+ * to us on the stack and make r30 point to it.
+ */
+
+ st sp, r30
+ move r30, sp
+ addi sp, sp, -16
+
+#if CHIP_HAS_CBOX_HOME_MAP()
+ /*
+ * On TILE-GX, we need to flush all tiles' caches, since we may
+ * have been doing hash-for-home caching there. Note that we
+ * must do this _after_ we're completely done modifying any memory
+ * other than our output buffer (which we know is locally cached).
+ * We want the caches to be fully clean when we do the reexec,
+ * because the hypervisor is going to do this flush again at that
+ * point, and we don't want that second flush to overwrite any memory.
+ */
+ {
+ move r0, zero /* cache_pa */
+ moveli r1, hw2_last(HV_FLUSH_EVICT_L2)
+ }
+ {
+ shl16insli r1, r1, hw1(HV_FLUSH_EVICT_L2)
+ movei r2, -1 /* cache_cpumask; -1 means all client tiles */
+ }
+ {
+ shl16insli r1, r1, hw0(HV_FLUSH_EVICT_L2) /* cache_control */
+ move r3, zero /* tlb_va */
+ }
+ {
+ move r4, zero /* tlb_length */
+ move r5, zero /* tlb_pgsize */
+ }
+ {
+ move r6, zero /* tlb_cpumask */
+ move r7, zero /* asids */
+ }
+ {
+ moveli r20, hw2_last(hv_flush_remote)
+ move r8, zero /* asidcount */
+ }
+ shl16insli r20, r20, hw1(hv_flush_remote)
+ shl16insli r20, r20, hw0(hv_flush_remote)
+
+ jalr r20
+#endif
+
+ /* r33 is destination pointer, default to zero */
+
+ moveli r33, 0
+
+.Lloop: ld r10, r30
+
+ andi r9, r10, 0xf /* low 4 bits tell us what type it is */
+ xor r10, r10, r9 /* r10 is now value with low 4 bits stripped */
+
+ cmpeqi r0, r9, 0x1 /* IND_DESTINATION */
+ beqzt r0, .Ltry2
+
+ move r33, r10
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+ moveli r0, 'd'
+ jalr r40
+#endif
+
+ addi r30, r30, 8
+ j .Lloop
+
+.Ltry2:
+ cmpeqi r0, r9, 0x2 /* IND_INDIRECTION */
+ beqzt r0, .Ltry4
+
+ move r30, r10
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+ moveli r0, 'i'
+ jalr r40
+#endif
+
+ j .Lloop
+
+.Ltry4:
+ cmpeqi r0, r9, 0x4 /* IND_DONE */
+ beqzt r0, .Ltry8
+
+ mf
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+ moveli r0, 'D'
+ jalr r40
+ moveli r0, '\n'
+ jalr r40
+#endif
+
+ move r0, r32
+
+ moveli r41, hw2_last(hv_reexec)
+ shl16insli r41, r41, hw1(hv_reexec)
+ shl16insli r41, r41, hw0(hv_reexec)
+
+ jalr r41
+
+ /* we should not get here */
+
+ moveli r0, '?'
+ jalr r40
+ moveli r0, '\n'
+ jalr r40
+
+ j .Lhalt
+
+.Ltry8: cmpeqi r0, r9, 0x8 /* IND_SOURCE */
+ beqz r0, .Lerr /* unknown type */
+
+ /* copy page at r10 to page at r33 */
+
+ move r11, r33
+
+ moveli r0, hw2_last(PAGE_SIZE)
+ shl16insli r0, r0, hw1(PAGE_SIZE)
+ shl16insli r0, r0, hw0(PAGE_SIZE)
+ add r33, r33, r0
+
+ /* copy word at r10 to word at r11 until r11 equals r33 */
+
+ /* We know page size must be multiple of 8, so we can unroll
+ * 8 times safely without any edge case checking.
+ *
+ * Issue a flush of the destination every 8 words to avoid
+ * incoherence when starting the new kernel. (Now this is
+ * just good paranoia because the hv_reexec call will also
+ * take care of this.)
+ */
+
+1:
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0; addi r11, r11, 8 }
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0; addi r11, r11, 8 }
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0; addi r11, r11, 8 }
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0; addi r11, r11, 8 }
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0; addi r11, r11, 8 }
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0; addi r11, r11, 8 }
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0; addi r11, r11, 8 }
+ { ld r0, r10; addi r10, r10, 8 }
+ { st r11, r0 }
+ { flush r11 ; addi r11, r11, 8 }
+
+ cmpeq r0, r33, r11
+ beqzt r0, 1b
+
+#ifdef RELOCATE_NEW_KERNEL_VERBOSE
+ moveli r0, 's'
+ jalr r40
+#endif
+
+ addi r30, r30, 8
+ j .Lloop
+
+
+.Lerr: moveli r0, 'e'
+ jalr r40
+ moveli r0, 'r'
+ jalr r40
+ moveli r0, 'r'
+ jalr r40
+ moveli r0, '\n'
+ jalr r40
+.Lhalt:
+ moveli r41, hw2_last(hv_halt)
+ shl16insli r41, r41, hw1(hv_halt)
+ shl16insli r41, r41, hw0(hv_halt)
+
+ jalr r41
+ STD_ENDPROC(relocate_new_kernel)
+
+ .section .rodata,"a"
+
+ .globl relocate_new_kernel_size
+relocate_new_kernel_size:
+ .long .Lend_relocate_new_kernel - relocate_new_kernel
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 98d80eb49dd..6098ccc59be 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -28,6 +28,7 @@
#include <linux/highmem.h>
#include <linux/smp.h>
#include <linux/timex.h>
+#include <linux/hugetlb.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
@@ -49,9 +50,6 @@ char chip_model[64] __write_once;
struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
-/* We only create bootmem data on node 0. */
-static bootmem_data_t __initdata node0_bdata;
-
/* Information on the NUMA nodes that we compute early */
unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES];
unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES];
@@ -534,37 +532,96 @@ static void __init setup_memory(void)
#endif
}
-static void __init setup_bootmem_allocator(void)
+/*
+ * On 32-bit machines, we only put bootmem on the low controller,
+ * since PAs > 4GB can't be used in bootmem. In principle one could
+ * imagine, e.g., multiple 1 GB controllers all of which could support
+ * bootmem, but in practice using controllers this small isn't a
+ * particularly interesting scenario, so we just keep it simple and
+ * use only the first controller for bootmem on 32-bit machines.
+ */
+static inline int node_has_bootmem(int nid)
{
- unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn;
+#ifdef CONFIG_64BIT
+ return 1;
+#else
+ return nid == 0;
+#endif
+}
- /* Provide a node 0 bdata. */
- NODE_DATA(0)->bdata = &node0_bdata;
+static inline unsigned long alloc_bootmem_pfn(int nid,
+ unsigned long size,
+ unsigned long goal)
+{
+ void *kva = __alloc_bootmem_node(NODE_DATA(nid), size,
+ PAGE_SIZE, goal);
+ unsigned long pfn = kaddr_to_pfn(kva);
+ BUG_ON(goal && PFN_PHYS(pfn) != goal);
+ return pfn;
+}
-#ifdef CONFIG_PCI
- /* Don't let boot memory alias the PCI region. */
- last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn);
+static void __init setup_bootmem_allocator_node(int i)
+{
+ unsigned long start, end, mapsize, mapstart;
+
+ if (node_has_bootmem(i)) {
+ NODE_DATA(i)->bdata = &bootmem_node_data[i];
+ } else {
+ /* Share controller zero's bdata for now. */
+ NODE_DATA(i)->bdata = &bootmem_node_data[0];
+ return;
+ }
+
+ /* Skip up to after the bss in node 0. */
+ start = (i == 0) ? min_low_pfn : node_start_pfn[i];
+
+ /* Only lowmem, if we're a HIGHMEM build. */
+#ifdef CONFIG_HIGHMEM
+ end = node_lowmem_end_pfn[i];
#else
- last_alloc_pfn = max_low_pfn;
+ end = node_end_pfn[i];
#endif
- /*
- * Initialize the boot-time allocator (with low memory only):
- * The first argument says where to put the bitmap, and the
- * second says where the end of allocatable memory is.
- */
- bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn);
+ /* No memory here. */
+ if (end == start)
+ return;
+
+ /* Figure out where the bootmem bitmap is located. */
+ mapsize = bootmem_bootmap_pages(end - start);
+ if (i == 0) {
+ /* Use some space right before the heap on node 0. */
+ mapstart = start;
+ start += mapsize;
+ } else {
+ /* Allocate bitmap on node 0 to avoid page table issues. */
+ mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0);
+ }
+ /* Initialize a node. */
+ init_bootmem_node(NODE_DATA(i), mapstart, start, end);
+
+ /* Free all the space back into the allocator. */
+ free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
+
+#if defined(CONFIG_PCI)
/*
- * Let the bootmem allocator use all the space we've given it
- * except for its own bitmap.
+ * Throw away any memory aliased by the PCI region. FIXME: this
+ * is a temporary hack to work around bug 10502, and needs to be
+ * fixed properly.
*/
- first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size);
- if (first_alloc_pfn >= last_alloc_pfn)
- early_panic("Not enough memory on controller 0 for bootmem\n");
+ if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start)
+ reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn),
+ PFN_PHYS(pci_reserve_end_pfn -
+ pci_reserve_start_pfn),
+ BOOTMEM_EXCLUSIVE);
+#endif
+}
- free_bootmem(PFN_PHYS(first_alloc_pfn),
- PFN_PHYS(last_alloc_pfn - first_alloc_pfn));
+static void __init setup_bootmem_allocator(void)
+{
+ int i;
+ for (i = 0; i < MAX_NUMNODES; ++i)
+ setup_bootmem_allocator_node(i);
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end)
@@ -595,14 +652,6 @@ static int __init percpu_size(void)
return size;
}
-static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal)
-{
- void *kva = __alloc_bootmem(size, PAGE_SIZE, goal);
- unsigned long pfn = kaddr_to_pfn(kva);
- BUG_ON(goal && PFN_PHYS(pfn) != goal);
- return pfn;
-}
-
static void __init zone_sizes_init(void)
{
unsigned long zones_size[MAX_NR_ZONES] = { 0 };
@@ -640,21 +689,22 @@ static void __init zone_sizes_init(void)
* though, there'll be no lowmem, so we just alloc_bootmem
* the memmap. There will be no percpu memory either.
*/
- if (__pfn_to_highbits(start) == 0) {
- /* In low PAs, allocate via bootmem. */
+ if (i != 0 && cpu_isset(i, isolnodes)) {
+ node_memmap_pfn[i] =
+ alloc_bootmem_pfn(0, memmap_size, 0);
+ BUG_ON(node_percpu[i] != 0);
+ } else if (node_has_bootmem(start)) {
unsigned long goal = 0;
node_memmap_pfn[i] =
- alloc_bootmem_pfn(memmap_size, goal);
+ alloc_bootmem_pfn(i, memmap_size, 0);
if (kdata_huge)
goal = PFN_PHYS(lowmem_end) - node_percpu[i];
if (node_percpu[i])
node_percpu_pfn[i] =
- alloc_bootmem_pfn(node_percpu[i], goal);
- } else if (cpu_isset(i, isolnodes)) {
- node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0);
- BUG_ON(node_percpu[i] != 0);
+ alloc_bootmem_pfn(i, node_percpu[i],
+ goal);
} else {
- /* In high PAs, just reserve some pages. */
+ /* In non-bootmem zones, just reserve some pages. */
node_memmap_pfn[i] = node_free_pfn[i];
node_free_pfn[i] += PFN_UP(memmap_size);
if (!kdata_huge) {
@@ -678,16 +728,9 @@ static void __init zone_sizes_init(void)
zones_size[ZONE_NORMAL] = end - start;
#endif
- /*
- * Everyone shares node 0's bootmem allocator, but
- * we use alloc_remap(), above, to put the actual
- * struct page array on the individual controllers,
- * which is most of the data that we actually care about.
- * We can't place bootmem allocators on the other
- * controllers since the bootmem allocator can only
- * operate on 32-bit physical addresses.
- */
- NODE_DATA(i)->bdata = NODE_DATA(0)->bdata;
+ /* Take zone metadata from controller 0 if we're isolnode. */
+ if (node_isset(i, isolnodes))
+ NODE_DATA(i)->bdata = &bootmem_node_data[0];
free_area_init_node(i, zones_size, start, NULL);
printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
@@ -870,6 +913,22 @@ subsys_initcall(topology_init);
#endif /* CONFIG_NUMA */
+/*
+ * Initialize hugepage support on this cpu. We do this on all cores
+ * early in boot: before argument parsing for the boot cpu, and after
+ * argument parsing but before the init functions run on the secondaries.
+ * So the values we set up here in the hypervisor may be overridden on
+ * the boot cpu as arguments are parsed.
+ */
+static __cpuinit void init_super_pages(void)
+{
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+ int i;
+ for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i)
+ hv_set_pte_super_shift(i, huge_shift[i]);
+#endif
+}
+
/**
* setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
* @boot: Is this the boot cpu?
@@ -924,6 +983,8 @@ void __cpuinit setup_cpu(int boot)
/* Reset the network state on this cpu. */
reset_network_state();
#endif
+
+ init_super_pages();
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -1412,13 +1473,13 @@ void __init setup_per_cpu_areas(void)
for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
/* Update the vmalloc mapping and page home. */
- pte_t *ptep =
- virt_to_pte(NULL, (unsigned long)ptr + i);
+ unsigned long addr = (unsigned long)ptr + i;
+ pte_t *ptep = virt_to_pte(NULL, addr);
pte_t pte = *ptep;
BUG_ON(pfn != pte_pfn(pte));
pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
pte = set_remote_cache_cpu(pte, cpu);
- set_pte(ptep, pte);
+ set_pte_at(&init_mm, addr, ptep, pte);
/* Update the lowmem mapping for consistency. */
lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
@@ -1431,7 +1492,7 @@ void __init setup_per_cpu_areas(void)
BUG_ON(pte_huge(*ptep));
}
BUG_ON(pfn != pte_pfn(*ptep));
- set_pte(ptep, pte);
+ set_pte_at(&init_mm, lowmem_va, ptep, pte);
}
}
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 89529c9f060..27742e87e25 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -172,9 +172,6 @@ static tile_bundle_bits rewrite_load_store_unaligned(
return (tilepro_bundle_bits) 0;
}
-#ifndef __LITTLE_ENDIAN
-# error We assume little-endian representation with copy_xx_user size 2 here
-#endif
/* Handle unaligned load/store */
if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
unsigned short val_16;
@@ -195,8 +192,19 @@ static tile_bundle_bits rewrite_load_store_unaligned(
state->update = 1;
}
} else {
+ unsigned short val_16;
val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
- err = copy_to_user(addr, &val, size);
+ switch (size) {
+ case 2:
+ val_16 = val;
+ err = copy_to_user(addr, &val_16, sizeof(val_16));
+ break;
+ case 4:
+ err = copy_to_user(addr, &val, sizeof(val));
+ break;
+ default:
+ BUG();
+ }
}
if (err) {
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 91da0f72195..cbc73a8b8fe 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -203,7 +203,7 @@ void __init ipi_init(void)
if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
panic("Failed to initialize IPI for cpu %d\n", cpu);
- offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
+ offset = PFN_PHYS(pte_pfn(pte));
ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
}
#endif
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index cb44ba7ccd2..b08095b402d 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -32,11 +32,17 @@
#include <asm/syscalls.h>
#include <asm/pgtable.h>
#include <asm/homecache.h>
+#include <asm/cachectl.h>
#include <arch/chip.h>
-SYSCALL_DEFINE0(flush_cache)
+SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
+ unsigned long, flags)
{
- homecache_evict(cpumask_of(smp_processor_id()));
+ if (flags & DCACHE)
+ homecache_evict(cpumask_of(smp_processor_id()));
+ if (flags & ICACHE)
+ flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm),
+ 0, 0, 0, NULL, NULL, 0);
return 0;
}
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
index 71ae728e9d0..e25b0a89c18 100644
--- a/arch/tile/kernel/sysfs.c
+++ b/arch/tile/kernel/sysfs.c
@@ -93,6 +93,10 @@ HV_CONF_ATTR(mezz_part, HV_CONFSTR_MEZZ_PART_NUM)
HV_CONF_ATTR(mezz_serial, HV_CONFSTR_MEZZ_SERIAL_NUM)
HV_CONF_ATTR(mezz_revision, HV_CONFSTR_MEZZ_REV)
HV_CONF_ATTR(mezz_description, HV_CONFSTR_MEZZ_DESC)
+HV_CONF_ATTR(cpumod_part, HV_CONFSTR_CPUMOD_PART_NUM)
+HV_CONF_ATTR(cpumod_serial, HV_CONFSTR_CPUMOD_SERIAL_NUM)
+HV_CONF_ATTR(cpumod_revision, HV_CONFSTR_CPUMOD_REV)
+HV_CONF_ATTR(cpumod_description,HV_CONFSTR_CPUMOD_DESC)
HV_CONF_ATTR(switch_control, HV_CONFSTR_SWITCH_CONTROL)
static struct attribute *board_attrs[] = {
@@ -104,6 +108,10 @@ static struct attribute *board_attrs[] = {
&dev_attr_mezz_serial.attr,
&dev_attr_mezz_revision.attr,
&dev_attr_mezz_description.attr,
+ &dev_attr_cpumod_part.attr,
+ &dev_attr_cpumod_serial.attr,
+ &dev_attr_cpumod_revision.attr,
+ &dev_attr_cpumod_description.attr,
&dev_attr_switch_control.attr,
NULL
};
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c
index a5f241c24ca..3fd54d5bbd4 100644
--- a/arch/tile/kernel/tlb.c
+++ b/arch/tile/kernel/tlb.c
@@ -15,6 +15,7 @@
#include <linux/cpumask.h>
#include <linux/module.h>
+#include <linux/hugetlb.h>
#include <asm/tlbflush.h>
#include <asm/homecache.h>
#include <hv/hypervisor.h>
@@ -49,25 +50,25 @@ void flush_tlb_current_task(void)
flush_tlb_mm(current->mm);
}
-void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm,
+void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long va)
{
- unsigned long size = hv_page_size(vma);
+ unsigned long size = vma_kernel_pagesize(vma);
int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
flush_remote(0, cache, mm_cpumask(mm),
va, size, size, mm_cpumask(mm), NULL, 0);
}
-void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va)
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
{
flush_tlb_page_mm(vma, vma->vm_mm, va);
}
EXPORT_SYMBOL(flush_tlb_page);
-void flush_tlb_range(const struct vm_area_struct *vma,
+void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- unsigned long size = hv_page_size(vma);
+ unsigned long size = vma_kernel_pagesize(vma);
struct mm_struct *mm = vma->vm_mm;
int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 73cff814ac5..5b19a23c890 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -195,6 +195,25 @@ static int special_ill(bundle_bits bundle, int *sigp, int *codep)
return 1;
}
+static const char *const int_name[] = {
+ [INT_MEM_ERROR] = "Memory error",
+ [INT_ILL] = "Illegal instruction",
+ [INT_GPV] = "General protection violation",
+ [INT_UDN_ACCESS] = "UDN access",
+ [INT_IDN_ACCESS] = "IDN access",
+#if CHIP_HAS_SN()
+ [INT_SN_ACCESS] = "SN access",
+#endif
+ [INT_SWINT_3] = "Software interrupt 3",
+ [INT_SWINT_2] = "Software interrupt 2",
+ [INT_SWINT_0] = "Software interrupt 0",
+ [INT_UNALIGN_DATA] = "Unaligned data",
+ [INT_DOUBLE_FAULT] = "Double fault",
+#ifdef __tilegx__
+ [INT_ILL_TRANS] = "Illegal virtual address",
+#endif
+};
+
void __kprobes do_trap(struct pt_regs *regs, int fault_num,
unsigned long reason)
{
@@ -211,10 +230,17 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
* current process and hope for the best.
*/
if (!user_mode(regs)) {
+ const char *name;
if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */
return;
- pr_alert("Kernel took bad trap %d at PC %#lx\n",
- fault_num, regs->pc);
+ if (fault_num >= 0 &&
+ fault_num < sizeof(int_name)/sizeof(int_name[0]) &&
+ int_name[fault_num] != NULL)
+ name = int_name[fault_num];
+ else
+ name = "Unknown interrupt";
+ pr_alert("Kernel took bad trap %d (%s) at PC %#lx\n",
+ fault_num, name, regs->pc);
if (fault_num == INT_GPV)
pr_alert("GPV_REASON is %#lx\n", reason);
show_regs(regs);
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 771b251b409..f5cada70c3c 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -18,7 +18,6 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/atomic.h>
-#include <asm/futex.h>
#include <arch/chip.h>
/* See <asm/atomic_32.h> */
@@ -50,7 +49,7 @@ int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-static inline int *__atomic_hashed_lock(volatile void *v)
+int *__atomic_hashed_lock(volatile void *v)
{
/* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
@@ -191,47 +190,6 @@ u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
EXPORT_SYMBOL(_atomic64_cmpxchg);
-static inline int *__futex_setup(int __user *v)
-{
- /*
- * Issue a prefetch to the counter to bring it into cache.
- * As for __atomic_setup, but we can't do a read into the L1
- * since it might fault; instead we do a prefetch into the L2.
- */
- __insn_prefetch(v);
- return __atomic_hashed_lock((int __force *)v);
-}
-
-struct __get_user futex_set(u32 __user *v, int i)
-{
- return __atomic_xchg((int __force *)v, __futex_setup(v), i);
-}
-
-struct __get_user futex_add(u32 __user *v, int n)
-{
- return __atomic_xchg_add((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_or(u32 __user *v, int n)
-{
- return __atomic_or((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_andn(u32 __user *v, int n)
-{
- return __atomic_andn((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_xor(u32 __user *v, int n)
-{
- return __atomic_xor((int __force *)v, __futex_setup(v), n);
-}
-
-struct __get_user futex_cmpxchg(u32 __user *v, int o, int n)
-{
- return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n);
-}
-
/*
* If any of the atomic or futex routines hit a bad address (not in
* the page tables at kernel PL) this routine is called. The futex
@@ -323,7 +281,4 @@ void __init __init_atomic_per_cpu(void)
BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
-
- /* The futex code makes this assumption, so we validate it here. */
- BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
}
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index 2a81d32de0d..dd5f0a33fda 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -18,14 +18,6 @@
/* arch/tile/lib/usercopy.S */
#include <linux/uaccess.h>
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-EXPORT_SYMBOL(__get_user_8);
-EXPORT_SYMBOL(__put_user_1);
-EXPORT_SYMBOL(__put_user_2);
-EXPORT_SYMBOL(__put_user_4);
-EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(strnlen_user_asm);
EXPORT_SYMBOL(strncpy_from_user_asm);
EXPORT_SYMBOL(clear_user_asm);
diff --git a/arch/tile/lib/memchr_64.c b/arch/tile/lib/memchr_64.c
index 84fdc8d8e73..6f867dbf7c5 100644
--- a/arch/tile/lib/memchr_64.c
+++ b/arch/tile/lib/memchr_64.c
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
+#include "string-endian.h"
void *memchr(const void *s, int c, size_t n)
{
@@ -39,11 +40,8 @@ void *memchr(const void *s, int c, size_t n)
/* Read the first word, but munge it so that bytes before the array
* will not match goal.
- *
- * Note that this shift count expression works because we know
- * shift counts are taken mod 64.
*/
- before_mask = (1ULL << (s_int << 3)) - 1;
+ before_mask = MASK(s_int);
v = (*p | before_mask) ^ (goal & before_mask);
/* Compute the address of the last byte. */
@@ -65,7 +63,7 @@ void *memchr(const void *s, int c, size_t n)
/* We found a match, but it might be in a byte past the end
* of the array.
*/
- ret = ((char *)p) + (__insn_ctz(bits) >> 3);
+ ret = ((char *)p) + (CFZ(bits) >> 3);
return (ret <= last_byte_ptr) ? ret : NULL;
}
EXPORT_SYMBOL(memchr);
diff --git a/arch/tile/lib/memcpy_64.c b/arch/tile/lib/memcpy_64.c
index 3fab9a6a2bb..c79b8e7c682 100644
--- a/arch/tile/lib/memcpy_64.c
+++ b/arch/tile/lib/memcpy_64.c
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
-#define __memcpy memcpy
/* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */
/* Must be 8 bytes in size. */
@@ -188,6 +187,7 @@ int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
/* n != 0 if we get here. Write out any trailing bytes. */
dst1 = (char *)dst8;
+#ifndef __BIG_ENDIAN__
if (n & 4) {
ST4((uint32_t *)dst1, final);
dst1 += 4;
@@ -202,11 +202,30 @@ int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
}
if (n)
ST1((uint8_t *)dst1, final);
+#else
+ if (n & 4) {
+ ST4((uint32_t *)dst1, final >> 32);
+ dst1 += 4;
+ }
+ else
+ {
+ final >>= 32;
+ }
+ if (n & 2) {
+ ST2((uint16_t *)dst1, final >> 16);
+ dst1 += 2;
+ }
+ else
+ {
+ final >>= 16;
+ }
+ if (n & 1)
+ ST1((uint8_t *)dst1, final >> 8);
+#endif
return RETVAL;
}
-
#ifdef USERCOPY_FUNC
#undef ST1
#undef ST2
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
index b2fe15e0107..3bc4b4e40d9 100644
--- a/arch/tile/lib/memcpy_tile64.c
+++ b/arch/tile/lib/memcpy_tile64.c
@@ -160,7 +160,7 @@ retry_source:
break;
if (get_remote_cache_cpu(src_pte) == smp_processor_id())
break;
- src_page = pfn_to_page(hv_pte_get_pfn(src_pte));
+ src_page = pfn_to_page(pte_pfn(src_pte));
get_page(src_page);
if (pte_val(src_pte) != pte_val(*src_ptep)) {
put_page(src_page);
@@ -168,7 +168,7 @@ retry_source:
}
if (pte_huge(src_pte)) {
/* Adjust the PTE to correspond to a small page */
- int pfn = hv_pte_get_pfn(src_pte);
+ int pfn = pte_pfn(src_pte);
pfn += (((unsigned long)source & (HPAGE_SIZE-1))
>> PAGE_SHIFT);
src_pte = pfn_pte(pfn, src_pte);
@@ -188,7 +188,7 @@ retry_dest:
put_page(src_page);
break;
}
- dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte));
+ dst_page = pfn_to_page(pte_pfn(dst_pte));
if (dst_page == src_page) {
/*
* Source and dest are on the same page; this
@@ -206,7 +206,7 @@ retry_dest:
}
if (pte_huge(dst_pte)) {
/* Adjust the PTE to correspond to a small page */
- int pfn = hv_pte_get_pfn(dst_pte);
+ int pfn = pte_pfn(dst_pte);
pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
>> PAGE_SHIFT);
dst_pte = pfn_pte(pfn, dst_pte);
diff --git a/arch/tile/lib/strchr_64.c b/arch/tile/lib/strchr_64.c
index 617a9273aaa..f39f9dc422b 100644
--- a/arch/tile/lib/strchr_64.c
+++ b/arch/tile/lib/strchr_64.c
@@ -15,8 +15,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
-
-#undef strchr
+#include "string-endian.h"
char *strchr(const char *s, int c)
{
@@ -33,13 +32,9 @@ char *strchr(const char *s, int c)
* match neither zero nor goal (we make sure the high bit of each
* byte is 1, and the low 7 bits are all the opposite of the goal
* byte).
- *
- * Note that this shift count expression works because we know shift
- * counts are taken mod 64.
*/
- const uint64_t before_mask = (1ULL << (s_int << 3)) - 1;
- uint64_t v = (*p | before_mask) ^
- (goal & __insn_v1shrsi(before_mask, 1));
+ const uint64_t before_mask = MASK(s_int);
+ uint64_t v = (*p | before_mask) ^ (goal & __insn_v1shrui(before_mask, 1));
uint64_t zero_matches, goal_matches;
while (1) {
@@ -55,8 +50,8 @@ char *strchr(const char *s, int c)
v = *++p;
}
- z = __insn_ctz(zero_matches);
- g = __insn_ctz(goal_matches);
+ z = CFZ(zero_matches);
+ g = CFZ(goal_matches);
/* If we found c before '\0' we got a match. Note that if c == '\0'
* then g == z, and we correctly return the address of the '\0'
diff --git a/arch/tile/lib/string-endian.h b/arch/tile/lib/string-endian.h
new file mode 100644
index 00000000000..c0eed7ce69c
--- /dev/null
+++ b/arch/tile/lib/string-endian.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * Provide a mask based on the pointer alignment that
+ * sets up non-zero bytes before the beginning of the string.
+ * The MASK expression works because shift counts are taken mod 64.
+ * Also, specify how to count "first" and "last" bits
+ * when the bits have been read as a word.
+ */
+
+#include <asm/byteorder.h>
+
+#ifdef __LITTLE_ENDIAN
+#define MASK(x) (__insn_shl(1ULL, (x << 3)) - 1)
+#define NULMASK(x) ((2ULL << x) - 1)
+#define CFZ(x) __insn_ctz(x)
+#define REVCZ(x) __insn_clz(x)
+#else
+#define MASK(x) (__insn_shl(-2LL, ((-x << 3) - 1)))
+#define NULMASK(x) (-2LL << (63 - x))
+#define CFZ(x) __insn_clz(x)
+#define REVCZ(x) __insn_ctz(x)
+#endif
diff --git a/arch/tile/lib/strlen_64.c b/arch/tile/lib/strlen_64.c
index 1c92d46202a..9583fc3361f 100644
--- a/arch/tile/lib/strlen_64.c
+++ b/arch/tile/lib/strlen_64.c
@@ -15,8 +15,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
-
-#undef strlen
+#include "string-endian.h"
size_t strlen(const char *s)
{
@@ -24,15 +23,13 @@ size_t strlen(const char *s)
const uintptr_t s_int = (uintptr_t) s;
const uint64_t *p = (const uint64_t *)(s_int & -8);
- /* Read the first word, but force bytes before the string to be nonzero.
- * This expression works because we know shift counts are taken mod 64.
- */
- uint64_t v = *p | ((1ULL << (s_int << 3)) - 1);
+ /* Read and MASK the first word. */
+ uint64_t v = *p | MASK(s_int);
uint64_t bits;
while ((bits = __insn_v1cmpeqi(v, 0)) == 0)
v = *++p;
- return ((const char *)p) + (__insn_ctz(bits) >> 3) - s;
+ return ((const char *)p) + (CFZ(bits) >> 3) - s;
}
EXPORT_SYMBOL(strlen);
diff --git a/arch/tile/lib/usercopy_32.S b/arch/tile/lib/usercopy_32.S
index 979f76d8374..b62d002af00 100644
--- a/arch/tile/lib/usercopy_32.S
+++ b/arch/tile/lib/usercopy_32.S
@@ -19,82 +19,6 @@
/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
- .pushsection .fixup,"ax"
-
-get_user_fault:
- { move r0, zero; move r1, zero }
- { movei r2, -EFAULT; jrp lr }
- ENDPROC(get_user_fault)
-
-put_user_fault:
- { movei r0, -EFAULT; jrp lr }
- ENDPROC(put_user_fault)
-
- .popsection
-
-/*
- * __get_user_N functions take a pointer in r0, and return 0 in r2
- * on success, with the value in r0; or else -EFAULT in r2.
- */
-#define __get_user_N(bytes, LOAD) \
- STD_ENTRY(__get_user_##bytes); \
-1: { LOAD r0, r0; move r1, zero; move r2, zero }; \
- jrp lr; \
- STD_ENDPROC(__get_user_##bytes); \
- .pushsection __ex_table,"a"; \
- .word 1b, get_user_fault; \
- .popsection
-
-__get_user_N(1, lb_u)
-__get_user_N(2, lh_u)
-__get_user_N(4, lw)
-
-/*
- * __get_user_8 takes a pointer in r0, and returns 0 in r2
- * on success, with the value in r0/r1; or else -EFAULT in r2.
- */
- STD_ENTRY(__get_user_8);
-1: { lw r0, r0; addi r1, r0, 4 };
-2: { lw r1, r1; move r2, zero };
- jrp lr;
- STD_ENDPROC(__get_user_8);
- .pushsection __ex_table,"a";
- .word 1b, get_user_fault;
- .word 2b, get_user_fault;
- .popsection
-
-/*
- * __put_user_N functions take a value in r0 and a pointer in r1,
- * and return 0 in r0 on success or -EFAULT on failure.
- */
-#define __put_user_N(bytes, STORE) \
- STD_ENTRY(__put_user_##bytes); \
-1: { STORE r1, r0; move r0, zero }; \
- jrp lr; \
- STD_ENDPROC(__put_user_##bytes); \
- .pushsection __ex_table,"a"; \
- .word 1b, put_user_fault; \
- .popsection
-
-__put_user_N(1, sb)
-__put_user_N(2, sh)
-__put_user_N(4, sw)
-
-/*
- * __put_user_8 takes a value in r0/r1 and a pointer in r2,
- * and returns 0 in r0 on success or -EFAULT on failure.
- */
-STD_ENTRY(__put_user_8)
-1: { sw r2, r0; addi r2, r2, 4 }
-2: { sw r2, r1; move r0, zero }
- jrp lr
- STD_ENDPROC(__put_user_8)
- .pushsection __ex_table,"a"
- .word 1b, put_user_fault
- .word 2b, put_user_fault
- .popsection
-
-
/*
* strnlen_user_asm takes the pointer in r0, and the length bound in r1.
* It returns the length, including the terminating NUL, or zero on exception.
diff --git a/arch/tile/lib/usercopy_64.S b/arch/tile/lib/usercopy_64.S
index 2ff44f87b78..adb2dbbc70c 100644
--- a/arch/tile/lib/usercopy_64.S
+++ b/arch/tile/lib/usercopy_64.S
@@ -19,55 +19,6 @@
/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
- .pushsection .fixup,"ax"
-
-get_user_fault:
- { movei r1, -EFAULT; move r0, zero }
- jrp lr
- ENDPROC(get_user_fault)
-
-put_user_fault:
- { movei r0, -EFAULT; jrp lr }
- ENDPROC(put_user_fault)
-
- .popsection
-
-/*
- * __get_user_N functions take a pointer in r0, and return 0 in r1
- * on success, with the value in r0; or else -EFAULT in r1.
- */
-#define __get_user_N(bytes, LOAD) \
- STD_ENTRY(__get_user_##bytes); \
-1: { LOAD r0, r0; move r1, zero }; \
- jrp lr; \
- STD_ENDPROC(__get_user_##bytes); \
- .pushsection __ex_table,"a"; \
- .quad 1b, get_user_fault; \
- .popsection
-
-__get_user_N(1, ld1u)
-__get_user_N(2, ld2u)
-__get_user_N(4, ld4u)
-__get_user_N(8, ld)
-
-/*
- * __put_user_N functions take a value in r0 and a pointer in r1,
- * and return 0 in r0 on success or -EFAULT on failure.
- */
-#define __put_user_N(bytes, STORE) \
- STD_ENTRY(__put_user_##bytes); \
-1: { STORE r1, r0; move r0, zero }; \
- jrp lr; \
- STD_ENDPROC(__put_user_##bytes); \
- .pushsection __ex_table,"a"; \
- .quad 1b, put_user_fault; \
- .popsection
-
-__put_user_N(1, st1)
-__put_user_N(2, st2)
-__put_user_N(4, st4)
-__put_user_N(8, st)
-
/*
* strnlen_user_asm takes the pointer in r0, and the length bound in r1.
* It returns the length, including the terminating NUL, or zero on exception.
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 22e58f51ed2..84ce7abbf5a 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -187,7 +187,7 @@ static pgd_t *get_current_pgd(void)
HV_Context ctx = hv_inquire_context();
unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT;
struct page *pgd_page = pfn_to_page(pgd_pfn);
- BUG_ON(PageHighMem(pgd_page)); /* oops, HIGHPTE? */
+ BUG_ON(PageHighMem(pgd_page));
return (pgd_t *) __va(ctx.page_table);
}
@@ -273,11 +273,15 @@ static int handle_page_fault(struct pt_regs *regs,
int si_code;
int is_kernel_mode;
pgd_t *pgd;
+ unsigned int flags;
/* on TILE, protection faults are always writes */
if (!is_page_fault)
write = 1;
+ flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (write ? FAULT_FLAG_WRITE : 0));
+
is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL);
tsk = validate_current();
@@ -382,6 +386,8 @@ static int handle_page_fault(struct pt_regs *regs,
vma = NULL; /* happy compiler */
goto bad_area_nosemaphore;
}
+
+retry:
down_read(&mm->mmap_sem);
}
@@ -429,7 +435,11 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, write);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return 0;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -437,10 +447,22 @@ good_area:
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ /*
+ * No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+ goto retry;
+ }
+ }
#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
/*
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 499f73770b0..dbcbdf7b8aa 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -30,6 +30,7 @@
#include <linux/cache.h>
#include <linux/smp.h>
#include <linux/module.h>
+#include <linux/hugetlb.h>
#include <asm/page.h>
#include <asm/sections.h>
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 42cfcba4e1e..812e2d03797 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -27,85 +27,161 @@
#include <linux/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+#include <asm/setup.h>
+
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+
+/*
+ * Provide an additional huge page size (in addition to the regular default
+ * huge page size) if no "hugepagesz" arguments are specified.
+ * Note that it must be smaller than the default huge page size so
+ * that it's possible to allocate them on demand from the buddy allocator.
+ * You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
+ * or not define it at all.
+ */
+#define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
+
+/* "Extra" page-size multipliers, one per level of the page table. */
+int huge_shift[HUGE_SHIFT_ENTRIES] = {
+#ifdef ADDITIONAL_HUGE_SIZE
+#define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
+ [HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
+#endif
+};
+
+/*
+ * This routine is a hybrid of pte_alloc_map() and pte_alloc_kernel().
+ * It assumes that L2 PTEs are never in HIGHMEM (we don't support that).
+ * It locks the user pagetable, and bumps up the mm->nr_ptes field,
+ * but otherwise allocate the page table using the kernel versions.
+ */
+static pte_t *pte_alloc_hugetlb(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address)
+{
+ pte_t *new;
+
+ if (pmd_none(*pmd)) {
+ new = pte_alloc_one_kernel(mm, address);
+ if (!new)
+ return NULL;
+
+ smp_wmb(); /* See comment in __pte_alloc */
+
+ spin_lock(&mm->page_table_lock);
+ if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
+ mm->nr_ptes++;
+ pmd_populate_kernel(mm, pmd, new);
+ new = NULL;
+ } else
+ VM_BUG_ON(pmd_trans_splitting(*pmd));
+ spin_unlock(&mm->page_table_lock);
+ if (new)
+ pte_free_kernel(mm, new);
+ }
+
+ return pte_offset_kernel(pmd, address);
+}
+#endif
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
- pte_t *pte = NULL;
- /* We do not yet support multiple huge page sizes. */
- BUG_ON(sz != PMD_SIZE);
+ addr &= -sz; /* Mask off any low bits in the address. */
pgd = pgd_offset(mm, addr);
pud = pud_alloc(mm, pgd, addr);
- if (pud)
- pte = (pte_t *) pmd_alloc(mm, pud, addr);
- BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
- return pte;
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+ if (sz >= PGDIR_SIZE) {
+ BUG_ON(sz != PGDIR_SIZE &&
+ sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
+ return (pte_t *)pud;
+ } else {
+ pmd_t *pmd = pmd_alloc(mm, pud, addr);
+ if (sz >= PMD_SIZE) {
+ BUG_ON(sz != PMD_SIZE &&
+ sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
+ return (pte_t *)pmd;
+ }
+ else {
+ if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
+ panic("Unexpected page size %#lx\n", sz);
+ return pte_alloc_hugetlb(mm, pmd, addr);
+ }
+ }
+#else
+ BUG_ON(sz != PMD_SIZE);
+ return (pte_t *) pmd_alloc(mm, pud, addr);
+#endif
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+static pte_t *get_pte(pte_t *base, int index, int level)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd = NULL;
-
- pgd = pgd_offset(mm, addr);
- if (pgd_present(*pgd)) {
- pud = pud_offset(pgd, addr);
- if (pud_present(*pud))
- pmd = pmd_offset(pud, addr);
+ pte_t *ptep = base + index;
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+ if (!pte_present(*ptep) && huge_shift[level] != 0) {
+ unsigned long mask = -1UL << huge_shift[level];
+ pte_t *super_ptep = base + (index & mask);
+ pte_t pte = *super_ptep;
+ if (pte_present(pte) && pte_super(pte))
+ ptep = super_ptep;
}
- return (pte_t *) pmd;
+#endif
+ return ptep;
}
-#ifdef HUGETLB_TEST
-struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
- int write)
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
- unsigned long start = address;
- int length = 1;
- int nr;
- struct page *page;
- struct vm_area_struct *vma;
-
- vma = find_vma(mm, addr);
- if (!vma || !is_vm_hugetlb_page(vma))
- return ERR_PTR(-EINVAL);
-
- pte = huge_pte_offset(mm, address);
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+ pte_t *pte;
+#endif
- /* hugetlb should be locked, and hence, prefaulted */
- WARN_ON(!pte || pte_none(*pte));
+ /* Get the top-level page table entry. */
+ pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
+ if (!pgd_present(*pgd))
+ return NULL;
- page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
+ /* We don't have four levels. */
+ pud = pud_offset(pgd, addr);
+#ifndef __PAGETABLE_PUD_FOLDED
+# error support fourth page table level
+#endif
- WARN_ON(!PageHead(page));
+ /* Check for an L0 huge PTE, if we have three levels. */
+#ifndef __PAGETABLE_PMD_FOLDED
+ if (pud_huge(*pud))
+ return (pte_t *)pud;
- return page;
-}
-
-int pmd_huge(pmd_t pmd)
-{
- return 0;
-}
+ pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
+ pmd_index(addr), 1);
+ if (!pmd_present(*pmd))
+ return NULL;
+#else
+ pmd = pmd_offset(pud, addr);
+#endif
-int pud_huge(pud_t pud)
-{
- return 0;
-}
+ /* Check for an L1 huge PTE. */
+ if (pmd_huge(*pmd))
+ return (pte_t *)pmd;
+
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+ /* Check for an L2 huge PTE. */
+ pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
+ if (!pte_present(*pte))
+ return NULL;
+ if (pte_super(*pte))
+ return pte;
+#endif
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
return NULL;
}
-#else
-
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write)
{
@@ -149,8 +225,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
return 0;
}
-#endif
-
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len,
@@ -322,21 +396,102 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
}
+#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
-static __init int setup_hugepagesz(char *opt)
+#ifdef CONFIG_HUGETLB_SUPER_PAGES
+static __init int __setup_hugepagesz(unsigned long ps)
{
- unsigned long ps = memparse(opt, &opt);
- if (ps == PMD_SIZE) {
- hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
- } else if (ps == PUD_SIZE) {
- hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+ int log_ps = __builtin_ctzl(ps);
+ int level, base_shift;
+
+ if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
+ pr_warn("Not enabling %ld byte huge pages;"
+ " must be a power of four.\n", ps);
+ return -EINVAL;
+ }
+
+ if (ps > 64*1024*1024*1024UL) {
+ pr_warn("Not enabling %ld MB huge pages;"
+ " largest legal value is 64 GB .\n", ps >> 20);
+ return -EINVAL;
+ } else if (ps >= PUD_SIZE) {
+ static long hv_jpage_size;
+ if (hv_jpage_size == 0)
+ hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
+ if (hv_jpage_size != PUD_SIZE) {
+ pr_warn("Not enabling >= %ld MB huge pages:"
+ " hypervisor reports size %ld\n",
+ PUD_SIZE >> 20, hv_jpage_size);
+ return -EINVAL;
+ }
+ level = 0;
+ base_shift = PUD_SHIFT;
+ } else if (ps >= PMD_SIZE) {
+ level = 1;
+ base_shift = PMD_SHIFT;
+ } else if (ps > PAGE_SIZE) {
+ level = 2;
+ base_shift = PAGE_SHIFT;
} else {
- pr_err("hugepagesz: Unsupported page size %lu M\n",
- ps >> 20);
- return 0;
+ pr_err("hugepagesz: huge page size %ld too small\n", ps);
+ return -EINVAL;
}
- return 1;
+
+ if (log_ps != base_shift) {
+ int shift_val = log_ps - base_shift;
+ if (huge_shift[level] != 0) {
+ int old_shift = base_shift + huge_shift[level];
+ pr_warn("Not enabling %ld MB huge pages;"
+ " already have size %ld MB.\n",
+ ps >> 20, (1UL << old_shift) >> 20);
+ return -EINVAL;
+ }
+ if (hv_set_pte_super_shift(level, shift_val) != 0) {
+ pr_warn("Not enabling %ld MB huge pages;"
+ " no hypervisor support.\n", ps >> 20);
+ return -EINVAL;
+ }
+ printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
+ huge_shift[level] = shift_val;
+ }
+
+ hugetlb_add_hstate(log_ps - PAGE_SHIFT);
+
+ return 0;
+}
+
+static bool saw_hugepagesz;
+
+static __init int setup_hugepagesz(char *opt)
+{
+ if (!saw_hugepagesz) {
+ saw_hugepagesz = true;
+ memset(huge_shift, 0, sizeof(huge_shift));
+ }
+ return __setup_hugepagesz(memparse(opt, NULL));
}
__setup("hugepagesz=", setup_hugepagesz);
-#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
+#ifdef ADDITIONAL_HUGE_SIZE
+/*
+ * Provide an additional huge page size if no "hugepagesz" args are given.
+ * In that case, all the cores have properly set up their hv super_shift
+ * already, but we need to notify the hugetlb code to enable the
+ * new huge page size from the Linux point of view.
+ */
+static __init int add_default_hugepagesz(void)
+{
+ if (!saw_hugepagesz) {
+ BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
+ ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
+ BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
+ ADDITIONAL_HUGE_SIZE);
+ BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
+ hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
+ }
+ return 0;
+}
+arch_initcall(add_default_hugepagesz);
+#endif
+
+#endif /* CONFIG_HUGETLB_SUPER_PAGES */
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 6a9d20ddc34..630dd2ce2af 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -82,7 +82,7 @@ static int num_l2_ptes[MAX_NUMNODES];
static void init_prealloc_ptes(int node, int pages)
{
- BUG_ON(pages & (HV_L2_ENTRIES-1));
+ BUG_ON(pages & (PTRS_PER_PTE - 1));
if (pages) {
num_l2_ptes[node] = pages;
l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t),
@@ -131,14 +131,9 @@ static void __init assign_pte(pmd_t *pmd, pte_t *page_table)
#ifdef __tilegx__
-#if HV_L1_SIZE != HV_L2_SIZE
-# error Rework assumption that L1 and L2 page tables are same size.
-#endif
-
-/* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */
static inline pmd_t *alloc_pmd(void)
{
- return (pmd_t *)alloc_pte();
+ return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
}
static inline void assign_pmd(pud_t *pud, pmd_t *pmd)
@@ -444,6 +439,7 @@ static pgd_t pgtables[PTRS_PER_PGD]
*/
static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
{
+ unsigned long long irqmask;
unsigned long address, pfn;
pmd_t *pmd;
pte_t *pte;
@@ -633,10 +629,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
* - install pgtables[] as the real page table
* - flush the TLB so the new page table takes effect
*/
+ irqmask = interrupt_mask_save_mask();
+ interrupt_mask_set_mask(-1ULL);
rc = flush_and_install_context(__pa(pgtables),
init_pgprot((unsigned long)pgtables),
__get_cpu_var(current_asid),
cpumask_bits(my_cpu_mask));
+ interrupt_mask_restore_mask(irqmask);
BUG_ON(rc != 0);
/* Copy the page table back to the normal swapper_pg_dir. */
@@ -699,6 +698,7 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
#endif /* CONFIG_HIGHMEM */
+#ifndef CONFIG_64BIT
static void __init init_free_pfn_range(unsigned long start, unsigned long end)
{
unsigned long pfn;
@@ -771,6 +771,7 @@ static void __init set_non_bootmem_pages_init(void)
init_free_pfn_range(start, end);
}
}
+#endif
/*
* paging_init() sets up the page tables - note that all of lowmem is
@@ -807,7 +808,7 @@ void __init paging_init(void)
* changing init_mm once we get up and running, and there's no
* need for e.g. vmalloc_sync_all().
*/
- BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END));
+ BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1));
pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START);
assign_pmd(pud, alloc_pmd());
#endif
@@ -859,8 +860,10 @@ void __init mem_init(void)
/* this will put all bootmem onto the freelists */
totalram_pages += free_all_bootmem();
+#ifndef CONFIG_64BIT
/* count all remaining LOWMEM and give all HIGHMEM to page allocator */
set_non_bootmem_pages_init();
+#endif
codesize = (unsigned long)&_etext - (unsigned long)&_text;
datasize = (unsigned long)&_end - (unsigned long)&_sdata;
diff --git a/arch/tile/mm/migrate.h b/arch/tile/mm/migrate.h
index cd45a0837fa..91683d97917 100644
--- a/arch/tile/mm/migrate.h
+++ b/arch/tile/mm/migrate.h
@@ -24,6 +24,9 @@
/*
* This function is used as a helper when setting up the initial
* page table (swapper_pg_dir).
+ *
+ * You must mask ALL interrupts prior to invoking this code, since
+ * you can't legally touch the stack during the cache flush.
*/
extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access,
HV_ASID asid,
@@ -39,6 +42,9 @@ extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access,
*
* Note that any non-NULL pointers must not point to the page that
* is handled by the stack_pte itself.
+ *
+ * You must mask ALL interrupts prior to invoking this code, since
+ * you can't legally touch the stack during the cache flush.
*/
extern int homecache_migrate_stack_and_flush(pte_t stack_pte, unsigned long va,
size_t length, pte_t *stack_ptep,
diff --git a/arch/tile/mm/migrate_32.S b/arch/tile/mm/migrate_32.S
index ac01a7cdf77..5305814bf18 100644
--- a/arch/tile/mm/migrate_32.S
+++ b/arch/tile/mm/migrate_32.S
@@ -40,8 +40,7 @@
#define FRAME_R32 16
#define FRAME_R33 20
#define FRAME_R34 24
-#define FRAME_R35 28
-#define FRAME_SIZE 32
+#define FRAME_SIZE 28
@@ -66,12 +65,11 @@
#define r_my_cpumask r5
/* Locals (callee-save); must not be more than FRAME_xxx above. */
-#define r_save_ics r30
-#define r_context_lo r31
-#define r_context_hi r32
-#define r_access_lo r33
-#define r_access_hi r34
-#define r_asid r35
+#define r_context_lo r30
+#define r_context_hi r31
+#define r_access_lo r32
+#define r_access_hi r33
+#define r_asid r34
STD_ENTRY(flush_and_install_context)
/*
@@ -104,11 +102,7 @@ STD_ENTRY(flush_and_install_context)
sw r_tmp, r33
addi r_tmp, sp, FRAME_R34
}
- {
- sw r_tmp, r34
- addi r_tmp, sp, FRAME_R35
- }
- sw r_tmp, r35
+ sw r_tmp, r34
/* Move some arguments to callee-save registers. */
{
@@ -121,13 +115,6 @@ STD_ENTRY(flush_and_install_context)
}
move r_asid, r_asid_in
- /* Disable interrupts, since we can't use our stack. */
- {
- mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION
- movei r_tmp, 1
- }
- mtspr INTERRUPT_CRITICAL_SECTION, r_tmp
-
/* First, flush our L2 cache. */
{
move r0, zero /* cache_pa */
@@ -163,7 +150,7 @@ STD_ENTRY(flush_and_install_context)
}
{
move r4, r_asid
- movei r5, HV_CTX_DIRECTIO
+ moveli r5, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
}
jal hv_install_context
bnz r0, .Ldone
@@ -175,9 +162,6 @@ STD_ENTRY(flush_and_install_context)
}
.Ldone:
- /* Reset interrupts back how they were before. */
- mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics
-
/* Restore the callee-saved registers and return. */
addli lr, sp, FRAME_SIZE
{
@@ -202,10 +186,6 @@ STD_ENTRY(flush_and_install_context)
}
{
lw r34, r_tmp
- addli r_tmp, sp, FRAME_R35
- }
- {
- lw r35, r_tmp
addi sp, sp, FRAME_SIZE
}
jrp lr
diff --git a/arch/tile/mm/migrate_64.S b/arch/tile/mm/migrate_64.S
index e76fea688be..1d15b10833d 100644
--- a/arch/tile/mm/migrate_64.S
+++ b/arch/tile/mm/migrate_64.S
@@ -38,8 +38,7 @@
#define FRAME_R30 16
#define FRAME_R31 24
#define FRAME_R32 32
-#define FRAME_R33 40
-#define FRAME_SIZE 48
+#define FRAME_SIZE 40
@@ -60,10 +59,9 @@
#define r_my_cpumask r3
/* Locals (callee-save); must not be more than FRAME_xxx above. */
-#define r_save_ics r30
-#define r_context r31
-#define r_access r32
-#define r_asid r33
+#define r_context r30
+#define r_access r31
+#define r_asid r32
/*
* Caller-save locals and frame constants are the same as
@@ -93,11 +91,7 @@ STD_ENTRY(flush_and_install_context)
st r_tmp, r31
addi r_tmp, sp, FRAME_R32
}
- {
- st r_tmp, r32
- addi r_tmp, sp, FRAME_R33
- }
- st r_tmp, r33
+ st r_tmp, r32
/* Move some arguments to callee-save registers. */
{
@@ -106,13 +100,6 @@ STD_ENTRY(flush_and_install_context)
}
move r_asid, r_asid_in
- /* Disable interrupts, since we can't use our stack. */
- {
- mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION
- movei r_tmp, 1
- }
- mtspr INTERRUPT_CRITICAL_SECTION, r_tmp
-
/* First, flush our L2 cache. */
{
move r0, zero /* cache_pa */
@@ -147,7 +134,7 @@ STD_ENTRY(flush_and_install_context)
}
{
move r2, r_asid
- movei r3, HV_CTX_DIRECTIO
+ moveli r3, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
}
jal hv_install_context
bnez r0, 1f
@@ -158,10 +145,7 @@ STD_ENTRY(flush_and_install_context)
jal hv_flush_all
}
-1: /* Reset interrupts back how they were before. */
- mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics
-
- /* Restore the callee-saved registers and return. */
+1: /* Restore the callee-saved registers and return. */
addli lr, sp, FRAME_SIZE
{
ld lr, lr
@@ -177,10 +161,6 @@ STD_ENTRY(flush_and_install_context)
}
{
ld r32, r_tmp
- addli r_tmp, sp, FRAME_R33
- }
- {
- ld r33, r_tmp
addi sp, sp, FRAME_SIZE
}
jrp lr
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 2410aa899b3..345edfed9fc 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -132,15 +132,6 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
}
-#if defined(CONFIG_HIGHPTE)
-pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
-{
- pte_t *pte = kmap_atomic(pmd_page(*dir)) +
- (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
- return &pte[pte_index(address)];
-}
-#endif
-
/**
* shatter_huge_page() - ensure a given address is mapped by a small page.
*
@@ -289,33 +280,26 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
+ int order)
{
gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
struct page *p;
-#if L2_USER_PGTABLE_ORDER > 0
int i;
-#endif
-
-#ifdef CONFIG_HIGHPTE
- flags |= __GFP_HIGHMEM;
-#endif
p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
if (p == NULL)
return NULL;
-#if L2_USER_PGTABLE_ORDER > 0
/*
* Make every page have a page_count() of one, not just the first.
* We don't use __GFP_COMP since it doesn't look like it works
* correctly with tlb_remove_page().
*/
- for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+ for (i = 1; i < order; ++i) {
init_page_count(p+i);
inc_zone_page_state(p+i, NR_PAGETABLE);
}
-#endif
pgtable_page_ctor(p);
return p;
@@ -326,28 +310,28 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
* process). We have to correct whatever pte_alloc_one() did before
* returning the pages to the allocator.
*/
-void pte_free(struct mm_struct *mm, struct page *p)
+void pgtable_free(struct mm_struct *mm, struct page *p, int order)
{
int i;
pgtable_page_dtor(p);
__free_page(p);
- for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+ for (i = 1; i < order; ++i) {
__free_page(p+i);
dec_zone_page_state(p+i, NR_PAGETABLE);
}
}
-void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
- unsigned long address)
+void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
+ unsigned long address, int order)
{
int i;
pgtable_page_dtor(pte);
tlb_remove_page(tlb, pte);
- for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
+ for (i = 1; i < order; ++i) {
tlb_remove_page(tlb, pte + i);
dec_zone_page_state(pte + i, NR_PAGETABLE);
}
@@ -490,7 +474,7 @@ void set_pte(pte_t *ptep, pte_t pte)
/* Can this mm load a PTE with cached_priority set? */
static inline int mm_is_priority_cached(struct mm_struct *mm)
{
- return mm->context.priority_cached;
+ return mm->context.priority_cached != 0;
}
/*
@@ -500,8 +484,8 @@ static inline int mm_is_priority_cached(struct mm_struct *mm)
void start_mm_caching(struct mm_struct *mm)
{
if (!mm_is_priority_cached(mm)) {
- mm->context.priority_cached = -1U;
- hv_set_caching(-1U);
+ mm->context.priority_cached = -1UL;
+ hv_set_caching(-1UL);
}
}
@@ -516,7 +500,7 @@ void start_mm_caching(struct mm_struct *mm)
* Presumably we'll come back later and have more luck and clear
* the value then; for now we'll just keep the cache marked for priority.
*/
-static unsigned int update_priority_cached(struct mm_struct *mm)
+static unsigned long update_priority_cached(struct mm_struct *mm)
{
if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
struct vm_area_struct *vm;
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 55c0661e2b5..097091059aa 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -121,15 +121,8 @@ LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
-CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
-define cmd_vmlinux__
- $(CC) $(CFLAGS_vmlinux) -o $@ \
- -Wl,-T,$(vmlinux-lds) $(vmlinux-init) \
- -Wl,--start-group $(vmlinux-main) -Wl,--end-group \
- -lutil \
- $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o \
- FORCE ,$^) ; rm -f linux
-endef
+# Used by link-vmlinux.sh which has special support for um link
+export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
# When cleaning we don't include .config, so we don't include
# TT or skas makefiles and don't clean skas_ptregs.h.
diff --git a/arch/um/include/asm/kvm_para.h b/arch/um/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/um/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/unicore32/include/asm/kvm_para.h b/arch/unicore32/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/unicore32/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index 0e9dec6cadd..e5287d8517a 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -1,4 +1,3 @@
-
obj-$(CONFIG_KVM) += kvm/
# Xen paravirtualization support
@@ -7,6 +6,7 @@ obj-$(CONFIG_XEN) += xen/
# lguest paravirtualization support
obj-$(CONFIG_LGUEST_GUEST) += lguest/
+obj-y += realmode/
obj-y += kernel/
obj-y += mm/
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 66cc380bebf..d700811785e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -32,6 +32,7 @@ config X86
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_FRAME_POINTERS
select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS if !SWIOTLB
select HAVE_KRETPROBES
select HAVE_OPTPROBES
select HAVE_FTRACE_MCOUNT_RECORD
@@ -92,6 +93,8 @@ config X86
select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
select GENERIC_TIME_VSYSCALL if X86_64
select KTIME_SCALAR if X86_32
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS || UPROBES)
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 610001d385d..0c44630d178 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -29,7 +29,7 @@
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mpspec.h>
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
#define COMPILER_DEPENDENT_INT64 long long
#define COMPILER_DEPENDENT_UINT64 unsigned long long
@@ -117,11 +117,8 @@ static inline void acpi_disable_pci(void)
/* Low-level suspend routine. */
extern int acpi_suspend_lowlevel(void);
-extern const unsigned char acpi_wakeup_code[];
-#define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code)))
-
-/* early initialization routine */
-extern void acpi_reserve_wakeup_memory(void);
+/* Physical address to resume after wakeup */
+#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
/*
* Check if the CPU can handle C2 and deeper
diff --git a/arch/x86/include/asm/dma-contiguous.h b/arch/x86/include/asm/dma-contiguous.h
new file mode 100644
index 00000000000..c0924165997
--- /dev/null
+++ b/arch/x86/include/asm/dma-contiguous.h
@@ -0,0 +1,13 @@
+#ifndef ASMX86_DMA_CONTIGUOUS_H
+#define ASMX86_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+static inline void
+dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+
+#endif
+#endif
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 61c0bd25845..f7b4c7903e7 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -13,6 +13,7 @@
#include <asm/io.h>
#include <asm/swiotlb.h>
#include <asm-generic/dma-coherent.h>
+#include <linux/dma-contiguous.h>
#ifdef CONFIG_ISA
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
@@ -62,6 +63,10 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs);
+extern void dma_generic_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs);
+
#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index c222e1a1b12..1ac46c22dd5 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -200,7 +200,7 @@ typedef u32 __attribute__((vector_size(16))) sse128_t;
/* Type, address-of, and value of an instruction's operand. */
struct operand {
- enum { OP_REG, OP_MEM, OP_IMM, OP_XMM, OP_NONE } type;
+ enum { OP_REG, OP_MEM, OP_IMM, OP_XMM, OP_MM, OP_NONE } type;
unsigned int bytes;
union {
unsigned long orig_val;
@@ -213,12 +213,14 @@ struct operand {
unsigned seg;
} mem;
unsigned xmm;
+ unsigned mm;
} addr;
union {
unsigned long val;
u64 val64;
char valptr[sizeof(unsigned long) + 2];
sse128_t vec_val;
+ u64 mm_val;
};
};
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e5b97be12d2..db7c1f2709a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -173,6 +173,9 @@ enum {
#define DR7_FIXED_1 0x00000400
#define DR7_VOLATILE 0xffff23ff
+/* apic attention bits */
+#define KVM_APIC_CHECK_VAPIC 0
+
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
@@ -238,8 +241,6 @@ struct kvm_mmu_page {
#endif
int write_flooding_count;
-
- struct rcu_head rcu;
};
struct kvm_pio_request {
@@ -338,6 +339,7 @@ struct kvm_vcpu_arch {
u64 efer;
u64 apic_base;
struct kvm_lapic *apic; /* kernel irqchip context */
+ unsigned long apic_attention;
int32_t apic_arb_prio;
int mp_state;
int sipi_vector;
@@ -537,8 +539,6 @@ struct kvm_arch {
u64 hv_guest_os_id;
u64 hv_hypercall;
- atomic_t reader_counter;
-
#ifdef CONFIG_KVM_MMU_AUDIT
int audit_point;
#endif
@@ -713,8 +713,9 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
-int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
- struct kvm_memory_slot *slot);
+void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask);
void kvm_mmu_zap_all(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 183922e13de..63ab1661d00 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -95,6 +95,14 @@ struct kvm_vcpu_pv_apf_data {
extern void kvmclock_init(void);
extern int kvm_register_clock(char *txt);
+#ifdef CONFIG_KVM_CLOCK
+bool kvm_check_and_clear_guest_paused(void);
+#else
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+#endif /* CONFIG_KVMCLOCK */
/* This instruction is vmcall. On non-VT architectures, it will generate a
* trap that we will then rewrite to the appropriate instruction.
@@ -173,14 +181,16 @@ static inline int kvm_para_available(void)
if (boot_cpu_data.cpuid_level < 0)
return 0; /* So we don't blow up on old processors */
- cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
- memcpy(signature + 0, &ebx, 4);
- memcpy(signature + 4, &ecx, 4);
- memcpy(signature + 8, &edx, 4);
- signature[12] = 0;
+ if (cpu_has_hypervisor) {
+ cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
+ memcpy(signature + 0, &ebx, 4);
+ memcpy(signature + 4, &ecx, 4);
+ memcpy(signature + 8, &edx, 4);
+ signature[12] = 0;
- if (strcmp(signature, "KVMKVMKVM") == 0)
- return 1;
+ if (strcmp(signature, "KVMKVMKVM") == 0)
+ return 1;
+ }
return 0;
}
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index effff47a3c8..43876f16caf 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
ptep->pte_low = pte.pte_low;
}
+#define pmd_read_atomic pmd_read_atomic
+/*
+ * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
+ * a "*pmdp" dereference done by gcc. Problem is, in certain places
+ * where pte_offset_map_lock is called, concurrent page faults are
+ * allowed, if the mmap_sem is hold for reading. An example is mincore
+ * vs page faults vs MADV_DONTNEED. On the page fault side
+ * pmd_populate rightfully does a set_64bit, but if we're reading the
+ * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
+ * because gcc will not read the 64bit of the pmd atomically. To fix
+ * this all places running pmd_offset_map_lock() while holding the
+ * mmap_sem in read mode, shall read the pmdp pointer using this
+ * function to know if the pmd is null nor not, and in turn to know if
+ * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
+ * operations.
+ *
+ * Without THP if the mmap_sem is hold for reading, the
+ * pmd can only transition from null to not null while pmd_read_atomic runs.
+ * So there's no need of literally reading it atomically.
+ *
+ * With THP if the mmap_sem is hold for reading, the pmd can become
+ * THP or null or point to a pte (and in turn become "stable") at any
+ * time under pmd_read_atomic, so it's mandatory to read it atomically
+ * with cmpxchg8b.
+ */
+#ifndef CONFIG_TRANSPARENT_HUGEPAGE
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+ pmdval_t ret;
+ u32 *tmp = (u32 *)pmdp;
+
+ ret = (pmdval_t) (*tmp);
+ if (ret) {
+ /*
+ * If the low part is null, we must not read the high part
+ * or we can end up with a partial pmd.
+ */
+ smp_rmb();
+ ret |= ((pmdval_t)*(tmp + 1)) << 32;
+ }
+
+ return (pmd_t) { ret };
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+ return (pmd_t) { atomic64_read((atomic64_t *)pmdp) };
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
{
set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 7745b257f03..39bc5777211 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -544,13 +544,16 @@ static inline void load_sp0(struct tss_struct *tss,
* enable), so that any CPU's that boot up
* after us can get the correct flags.
*/
-extern unsigned long mmu_cr4_features;
+extern unsigned long mmu_cr4_features;
+extern u32 *trampoline_cr4_features;
static inline void set_in_cr4(unsigned long mask)
{
unsigned long cr4;
mmu_cr4_features |= mask;
+ if (trampoline_cr4_features)
+ *trampoline_cr4_features = mmu_cr4_features;
cr4 = read_cr4();
cr4 |= mask;
write_cr4(cr4);
@@ -561,6 +564,8 @@ static inline void clear_in_cr4(unsigned long mask)
unsigned long cr4;
mmu_cr4_features &= ~mask;
+ if (trampoline_cr4_features)
+ *trampoline_cr4_features = mmu_cr4_features;
cr4 = read_cr4();
cr4 &= ~mask;
write_cr4(cr4);
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h
index 35f2d1948ad..6167fd79818 100644
--- a/arch/x86/include/asm/pvclock-abi.h
+++ b/arch/x86/include/asm/pvclock-abi.h
@@ -40,5 +40,6 @@ struct pvclock_wall_clock {
} __attribute__((__packed__));
#define PVCLOCK_TSC_STABLE_BIT (1 << 0)
+#define PVCLOCK_GUEST_STOPPED (1 << 1)
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
new file mode 100644
index 00000000000..fce3f4ae5bd
--- /dev/null
+++ b/arch/x86/include/asm/realmode.h
@@ -0,0 +1,62 @@
+#ifndef _ARCH_X86_REALMODE_H
+#define _ARCH_X86_REALMODE_H
+
+#include <linux/types.h>
+#include <asm/io.h>
+
+/* This must match data at realmode.S */
+struct real_mode_header {
+ u32 text_start;
+ u32 ro_end;
+ /* SMP trampoline */
+ u32 trampoline_start;
+ u32 trampoline_status;
+ u32 trampoline_header;
+#ifdef CONFIG_X86_64
+ u32 trampoline_pgd;
+#endif
+ /* ACPI S3 wakeup */
+#ifdef CONFIG_ACPI_SLEEP
+ u32 wakeup_start;
+ u32 wakeup_header;
+#endif
+ /* APM/BIOS reboot */
+#ifdef CONFIG_X86_32
+ u32 machine_real_restart_asm;
+#endif
+};
+
+/* This must match data at trampoline_32/64.S */
+struct trampoline_header {
+#ifdef CONFIG_X86_32
+ u32 start;
+ u16 gdt_pad;
+ u16 gdt_limit;
+ u32 gdt_base;
+#else
+ u64 start;
+ u64 efer;
+ u32 cr4;
+#endif
+};
+
+extern struct real_mode_header *real_mode_header;
+extern unsigned char real_mode_blob_end[];
+
+extern unsigned long init_rsp;
+extern unsigned long initial_code;
+extern unsigned long initial_gs;
+
+extern unsigned char real_mode_blob[];
+extern unsigned char real_mode_relocs[];
+
+#ifdef CONFIG_X86_32
+extern unsigned char startup_32_smp[];
+extern unsigned char boot_gdt[];
+#else
+extern unsigned char secondary_startup_64[];
+#endif
+
+extern void __init setup_real_mode(void);
+
+#endif /* _ARCH_X86_REALMODE_H */
diff --git a/arch/x86/include/asm/sta2x11.h b/arch/x86/include/asm/sta2x11.h
new file mode 100644
index 00000000000..e9d32df89cc
--- /dev/null
+++ b/arch/x86/include/asm/sta2x11.h
@@ -0,0 +1,12 @@
+/*
+ * Header file for STMicroelectronics ConneXt (STA2X11) IOHub
+ */
+#ifndef __ASM_STA2X11_H
+#define __ASM_STA2X11_H
+
+#include <linux/pci.h>
+
+/* This needs to be called from the MFD to configure its sub-devices */
+struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev);
+
+#endif /* __ASM_STA2X11_H */
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
deleted file mode 100644
index feca3118a73..00000000000
--- a/arch/x86/include/asm/trampoline.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef _ASM_X86_TRAMPOLINE_H
-#define _ASM_X86_TRAMPOLINE_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-#include <asm/io.h>
-
-/*
- * Trampoline 80x86 program as an array. These are in the init rodata
- * segment, but that's okay, because we only care about the relative
- * addresses of the symbols.
- */
-extern const unsigned char x86_trampoline_start [];
-extern const unsigned char x86_trampoline_end [];
-extern unsigned char *x86_trampoline_base;
-
-extern unsigned long init_rsp;
-extern unsigned long initial_code;
-extern unsigned long initial_gs;
-
-extern void __init setup_trampolines(void);
-
-extern const unsigned char trampoline_data[];
-extern const unsigned char trampoline_status[];
-
-#define TRAMPOLINE_SYM(x) \
- ((void *)(x86_trampoline_base + \
- ((const unsigned char *)(x) - x86_trampoline_start)))
-
-/* Address of the SMP trampoline */
-static inline unsigned long trampoline_address(void)
-{
- return virt_to_phys(TRAMPOLINE_SYM(trampoline_data));
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_X86_TRAMPOLINE_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 851fe0dc13b..04cd6882308 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -32,6 +32,7 @@
#define segment_eq(a, b) ((a).seg == (b).seg)
+#define user_addr_max() (current_thread_info()->addr_limit.seg)
#define __addr_ok(addr) \
((unsigned long __force)(addr) < \
(current_thread_info()->addr_limit.seg))
@@ -565,6 +566,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
extern __must_check long
strncpy_from_user(char *dst, const char __user *src, long count);
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
/*
* movsl can be slow when source and dest are not both 8-byte aligned
*/
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 8084bc73b18..576e39bca6a 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -213,23 +213,6 @@ static inline unsigned long __must_check copy_from_user(void *to,
return n;
}
-/**
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
- * Context: User context only. This function may sleep.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-#define strlen_user(str) strnlen_user(str, LONG_MAX)
-
-long strnlen_user(const char __user *str, long n);
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index fcd4b6f3ef0..8e796fbbf9c 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -208,9 +208,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
}
}
-__must_check long strnlen_user(const char __user *str, long n);
-__must_check long __strnlen_user(const char __user *str, long n);
-__must_check long strlen_user(const char __user *str);
__must_check unsigned long clear_user(void __user *mem, unsigned long len);
__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
index ae03facfadd..5b238981542 100644
--- a/arch/x86/include/asm/word-at-a-time.h
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -10,6 +10,11 @@
* bit count instruction, that might be better than the multiply
* and shift, for example.
*/
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
#ifdef CONFIG_64BIT
@@ -37,10 +42,31 @@ static inline long count_masked_bytes(long mask)
#endif
-/* Return the high bit set in the first byte that is a zero */
-static inline unsigned long has_zero(unsigned long a)
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
{
- return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80);
+ return count_masked_bytes(mask);
}
/*
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index 1df35417c41..cc146d51449 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -6,6 +6,7 @@ enum ipi_vector {
XEN_CALL_FUNCTION_VECTOR,
XEN_CALL_FUNCTION_SINGLE_VECTOR,
XEN_SPIN_UNLOCK_VECTOR,
+ XEN_IRQ_WORK_VECTOR,
XEN_NR_IPIS,
};
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index c34f96c2f7a..93971e841dd 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -44,6 +44,7 @@ extern unsigned long machine_to_phys_nr;
extern unsigned long get_phys_to_machine(unsigned long pfn);
extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
+extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
extern unsigned long set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e);
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 9bba5b79902..8215e5652d9 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -35,7 +35,6 @@ obj-y += tsc.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
-obj-y += trampoline.o trampoline_$(BITS).o
obj-y += process.o
obj-y += i387.o xsave.o
obj-y += ptrace.o
@@ -48,7 +47,6 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += cpu/
obj-y += acpi/
obj-y += reboot.o
-obj-$(CONFIG_X86_32) += reboot_32.o
obj-$(CONFIG_X86_MSR) += msr.o
obj-$(CONFIG_X86_CPUID) += cpuid.o
obj-$(CONFIG_PCI) += early-quirks.o
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index 6f35260bb3e..163b2258147 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -1,14 +1,7 @@
-subdir- := realmode
-
obj-$(CONFIG_ACPI) += boot.o
-obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_rm.o wakeup_$(BITS).o
+obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o
ifneq ($(CONFIG_ACPI_PROCESSOR),)
obj-y += cstate.o
endif
-$(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin
-
-$(obj)/realmode/wakeup.bin: FORCE
- $(Q)$(MAKE) $(build)=$(obj)/realmode
-
diff --git a/arch/x86/kernel/acpi/realmode/.gitignore b/arch/x86/kernel/acpi/realmode/.gitignore
deleted file mode 100644
index 58f1f48a58f..00000000000
--- a/arch/x86/kernel/acpi/realmode/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-wakeup.bin
-wakeup.elf
-wakeup.lds
diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
deleted file mode 100644
index 6a564ac67ef..00000000000
--- a/arch/x86/kernel/acpi/realmode/Makefile
+++ /dev/null
@@ -1,59 +0,0 @@
-#
-# arch/x86/kernel/acpi/realmode/Makefile
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-
-always := wakeup.bin
-targets := wakeup.elf wakeup.lds
-
-wakeup-y += wakeup.o wakemain.o video-mode.o copy.o bioscall.o regs.o
-
-# The link order of the video-*.o modules can matter. In particular,
-# video-vga.o *must* be listed first, followed by video-vesa.o.
-# Hardware-specific drivers should follow in the order they should be
-# probed, and video-bios.o should typically be last.
-wakeup-y += video-vga.o
-wakeup-y += video-vesa.o
-wakeup-y += video-bios.o
-
-targets += $(wakeup-y)
-
-bootsrc := $(src)/../../../boot
-
-# ---------------------------------------------------------------------------
-
-# How to compile the 16-bit code. Note we always compile for -march=i386,
-# that way we can complain to the user if the CPU is insufficient.
-# Compile with _SETUP since this is similar to the boot-time setup code.
-KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
- -I$(srctree)/$(bootsrc) \
- $(cflags-y) \
- -Wall -Wstrict-prototypes \
- -march=i386 -mregparm=3 \
- -include $(srctree)/$(bootsrc)/code16gcc.h \
- -fno-strict-aliasing -fomit-frame-pointer \
- $(call cc-option, -ffreestanding) \
- $(call cc-option, -fno-toplevel-reorder,\
- $(call cc-option, -fno-unit-at-a-time)) \
- $(call cc-option, -fno-stack-protector) \
- $(call cc-option, -mpreferred-stack-boundary=2)
-KBUILD_CFLAGS += $(call cc-option, -m32)
-KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
-GCOV_PROFILE := n
-
-WAKEUP_OBJS = $(addprefix $(obj)/,$(wakeup-y))
-
-LDFLAGS_wakeup.elf := -T
-
-CPPFLAGS_wakeup.lds += -P -C
-
-$(obj)/wakeup.elf: $(obj)/wakeup.lds $(WAKEUP_OBJS) FORCE
- $(call if_changed,ld)
-
-OBJCOPYFLAGS_wakeup.bin := -O binary
-
-$(obj)/wakeup.bin: $(obj)/wakeup.elf FORCE
- $(call if_changed,objcopy)
diff --git a/arch/x86/kernel/acpi/realmode/bioscall.S b/arch/x86/kernel/acpi/realmode/bioscall.S
deleted file mode 100644
index f51eb0bb56c..00000000000
--- a/arch/x86/kernel/acpi/realmode/bioscall.S
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/bioscall.S"
diff --git a/arch/x86/kernel/acpi/realmode/copy.S b/arch/x86/kernel/acpi/realmode/copy.S
deleted file mode 100644
index dc59ebee69d..00000000000
--- a/arch/x86/kernel/acpi/realmode/copy.S
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/copy.S"
diff --git a/arch/x86/kernel/acpi/realmode/regs.c b/arch/x86/kernel/acpi/realmode/regs.c
deleted file mode 100644
index 6206033ba20..00000000000
--- a/arch/x86/kernel/acpi/realmode/regs.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/regs.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-bios.c b/arch/x86/kernel/acpi/realmode/video-bios.c
deleted file mode 100644
index 7deabc144a2..00000000000
--- a/arch/x86/kernel/acpi/realmode/video-bios.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-bios.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-mode.c b/arch/x86/kernel/acpi/realmode/video-mode.c
deleted file mode 100644
index 328ad209f11..00000000000
--- a/arch/x86/kernel/acpi/realmode/video-mode.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-mode.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-vesa.c b/arch/x86/kernel/acpi/realmode/video-vesa.c
deleted file mode 100644
index 9dbb9672226..00000000000
--- a/arch/x86/kernel/acpi/realmode/video-vesa.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-vesa.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-vga.c b/arch/x86/kernel/acpi/realmode/video-vga.c
deleted file mode 100644
index bcc81255f37..00000000000
--- a/arch/x86/kernel/acpi/realmode/video-vga.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../boot/video-vga.c"
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S
deleted file mode 100644
index d4f8010a5b1..00000000000
--- a/arch/x86/kernel/acpi/realmode/wakeup.lds.S
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * wakeup.ld
- *
- * Linker script for the real-mode wakeup code
- */
-#undef i386
-#include "wakeup.h"
-
-OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
-OUTPUT_ARCH(i386)
-ENTRY(_start)
-
-SECTIONS
-{
- . = 0;
- .jump : {
- *(.jump)
- } = 0x90909090
-
- . = WAKEUP_HEADER_OFFSET;
- .header : {
- *(.header)
- }
-
- . = ALIGN(16);
- .text : {
- *(.text*)
- } = 0x90909090
-
- . = ALIGN(16);
- .rodata : {
- *(.rodata*)
- }
-
- .videocards : {
- video_cards = .;
- *(.videocards)
- video_cards_end = .;
- }
-
- . = ALIGN(16);
- .data : {
- *(.data*)
- }
-
- . = ALIGN(16);
- .bss : {
- __bss_start = .;
- *(.bss)
- __bss_end = .;
- }
-
- .signature : {
- *(.signature)
- }
-
- _end = .;
-
- /DISCARD/ : {
- *(.note*)
- }
-}
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 146a49c763a..95bf99de905 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -14,8 +14,9 @@
#include <asm/desc.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
+#include <asm/realmode.h>
-#include "realmode/wakeup.h"
+#include "../../realmode/rm/wakeup.h"
#include "sleep.h"
unsigned long acpi_realmode_flags;
@@ -36,13 +37,9 @@ asmlinkage void acpi_enter_s3(void)
*/
int acpi_suspend_lowlevel(void)
{
- struct wakeup_header *header;
- /* address in low memory of the wakeup routine. */
- char *acpi_realmode;
+ struct wakeup_header *header =
+ (struct wakeup_header *) __va(real_mode_header->wakeup_header);
- acpi_realmode = TRAMPOLINE_SYM(acpi_wakeup_code);
-
- header = (struct wakeup_header *)(acpi_realmode + WAKEUP_HEADER_OFFSET);
if (header->signature != WAKEUP_HEADER_SIGNATURE) {
printk(KERN_ERR "wakeup header does not match\n");
return -EINVAL;
@@ -50,27 +47,6 @@ int acpi_suspend_lowlevel(void)
header->video_mode = saved_video_mode;
- header->wakeup_jmp_seg = acpi_wakeup_address >> 4;
-
- /*
- * Set up the wakeup GDT. We set these up as Big Real Mode,
- * that is, with limits set to 4 GB. At least the Lenovo
- * Thinkpad X61 is known to need this for the video BIOS
- * initialization quirk to work; this is likely to also
- * be the case for other laptops or integrated video devices.
- */
-
- /* GDT[0]: GDT self-pointer */
- header->wakeup_gdt[0] =
- (u64)(sizeof(header->wakeup_gdt) - 1) +
- ((u64)__pa(&header->wakeup_gdt) << 16);
- /* GDT[1]: big real mode-like code segment */
- header->wakeup_gdt[1] =
- GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff);
- /* GDT[2]: big real mode-like data segment */
- header->wakeup_gdt[2] =
- GDT_ENTRY(0x8093, acpi_wakeup_address, 0xfffff);
-
#ifndef CONFIG_64BIT
store_gdt((struct desc_ptr *)&header->pmode_gdt);
@@ -95,7 +71,6 @@ int acpi_suspend_lowlevel(void)
header->pmode_cr3 = (u32)__pa(&initial_page_table);
saved_magic = 0x12345678;
#else /* CONFIG_64BIT */
- header->trampoline_segment = trampoline_address() >> 4;
#ifdef CONFIG_SMP
stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
early_gdt_descr.address =
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index d68677a2a01..5653a5791ec 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -2,8 +2,8 @@
* Variables and functions used by the code in sleep.c
*/
-#include <asm/trampoline.h>
#include <linux/linkage.h>
+#include <asm/realmode.h>
extern unsigned long saved_video_mode;
extern long saved_magic;
diff --git a/arch/x86/kernel/acpi/wakeup_rm.S b/arch/x86/kernel/acpi/wakeup_rm.S
deleted file mode 100644
index 63b8ab524f2..00000000000
--- a/arch/x86/kernel/acpi/wakeup_rm.S
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Wrapper script for the realmode binary as a transport object
- * before copying to low memory.
- */
-#include <asm/page_types.h>
-
- .section ".x86_trampoline","a"
- .balign PAGE_SIZE
- .globl acpi_wakeup_code
-acpi_wakeup_code:
- .incbin "arch/x86/kernel/acpi/realmode/wakeup.bin"
- .size acpi_wakeup_code, .-acpi_wakeup_code
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
index 507ea58688e..cd8b166a173 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-apei.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -42,7 +42,8 @@ void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err)
struct mce m;
/* Only corrected MC is reported */
- if (!corrected)
+ if (!corrected || !(mem_err->validation_bits &
+ CPER_MEM_VALID_PHYSICAL_ADDRESS))
return;
mce_setup(&m);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 0c82091b165..413c2ced887 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -126,6 +126,16 @@ static struct severity {
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
USER
),
+ MCESEV(
+ KEEP, "HT thread notices Action required: instruction fetch error",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
+ MCGMASK(MCG_STATUS_EIPV, 0)
+ ),
+ MCESEV(
+ AR, "Action required: instruction fetch error",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
+ USER
+ ),
#endif
MCESEV(
PANIC, "Action required: unknown MCACOD",
@@ -165,15 +175,19 @@ static struct severity {
};
/*
- * If the EIPV bit is set, it means the saved IP is the
- * instruction which caused the MCE.
+ * If mcgstatus indicated that ip/cs on the stack were
+ * no good, then "m->cs" will be zero and we will have
+ * to assume the worst case (IN_KERNEL) as we actually
+ * have no idea what we were executing when the machine
+ * check hit.
+ * If we do have a good "m->cs" (or a faked one in the
+ * case we were executing in VM86 mode) we can use it to
+ * distinguish an exception taken in user from from one
+ * taken in the kernel.
*/
static int error_context(struct mce *m)
{
- if (m->mcgstatus & MCG_STATUS_EIPV)
- return (m->ip && (m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
- /* Unknown, assume kernel */
- return IN_KERNEL;
+ return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
}
int mce_severity(struct mce *m, int tolerant, char **msg)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index aaa056f3169..b4180f425fb 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -437,6 +437,14 @@ static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
m->ip = regs->ip;
m->cs = regs->cs;
+
+ /*
+ * When in VM86 mode make the cs look like ring 3
+ * always. This is a lie, but it's better than passing
+ * the additional vm86 bit around everywhere.
+ */
+ if (v8086_mode(regs))
+ m->cs |= 3;
}
/* Use accurate RIP reporting if available. */
if (rip_msr)
@@ -641,16 +649,18 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
* Do a quick check if any of the events requires a panic.
* This decides if we keep the events around or clear them.
*/
-static int mce_no_way_out(struct mce *m, char **msg)
+static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp)
{
- int i;
+ int i, ret = 0;
for (i = 0; i < banks; i++) {
m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
+ if (m->status & MCI_STATUS_VAL)
+ __set_bit(i, validp);
if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
- return 1;
+ ret = 1;
}
- return 0;
+ return ret;
}
/*
@@ -1013,6 +1023,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
*/
int kill_it = 0;
DECLARE_BITMAP(toclear, MAX_NR_BANKS);
+ DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
char *msg = "Unknown";
atomic_inc(&mce_entry);
@@ -1027,7 +1038,8 @@ void do_machine_check(struct pt_regs *regs, long error_code)
final = &__get_cpu_var(mces_seen);
*final = m;
- no_way_out = mce_no_way_out(&m, &msg);
+ memset(valid_banks, 0, sizeof(valid_banks));
+ no_way_out = mce_no_way_out(&m, &msg, valid_banks);
barrier();
@@ -1047,6 +1059,8 @@ void do_machine_check(struct pt_regs *regs, long error_code)
order = mce_start(&no_way_out);
for (i = 0; i < banks; i++) {
__clear_bit(i, toclear);
+ if (!test_bit(i, valid_banks))
+ continue;
if (!mce_banks[i].ctl)
continue;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 62d61e9976e..41857970517 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -113,7 +113,9 @@ static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
int x = e820x->nr_map;
if (x >= ARRAY_SIZE(e820x->map)) {
- printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+ printk(KERN_ERR "e820: too many entries; ignoring [mem %#010llx-%#010llx]\n",
+ (unsigned long long) start,
+ (unsigned long long) (start + size - 1));
return;
}
@@ -133,19 +135,19 @@ static void __init e820_print_type(u32 type)
switch (type) {
case E820_RAM:
case E820_RESERVED_KERN:
- printk(KERN_CONT "(usable)");
+ printk(KERN_CONT "usable");
break;
case E820_RESERVED:
- printk(KERN_CONT "(reserved)");
+ printk(KERN_CONT "reserved");
break;
case E820_ACPI:
- printk(KERN_CONT "(ACPI data)");
+ printk(KERN_CONT "ACPI data");
break;
case E820_NVS:
- printk(KERN_CONT "(ACPI NVS)");
+ printk(KERN_CONT "ACPI NVS");
break;
case E820_UNUSABLE:
- printk(KERN_CONT "(unusable)");
+ printk(KERN_CONT "unusable");
break;
default:
printk(KERN_CONT "type %u", type);
@@ -158,10 +160,10 @@ void __init e820_print_map(char *who)
int i;
for (i = 0; i < e820.nr_map; i++) {
- printk(KERN_INFO " %s: %016Lx - %016Lx ", who,
+ printk(KERN_INFO "%s: [mem %#018Lx-%#018Lx] ", who,
(unsigned long long) e820.map[i].addr,
(unsigned long long)
- (e820.map[i].addr + e820.map[i].size));
+ (e820.map[i].addr + e820.map[i].size - 1));
e820_print_type(e820.map[i].type);
printk(KERN_CONT "\n");
}
@@ -428,9 +430,8 @@ static u64 __init __e820_update_range(struct e820map *e820x, u64 start,
size = ULLONG_MAX - start;
end = start + size;
- printk(KERN_DEBUG "e820 update range: %016Lx - %016Lx ",
- (unsigned long long) start,
- (unsigned long long) end);
+ printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx] ",
+ (unsigned long long) start, (unsigned long long) (end - 1));
e820_print_type(old_type);
printk(KERN_CONT " ==> ");
e820_print_type(new_type);
@@ -509,9 +510,8 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
size = ULLONG_MAX - start;
end = start + size;
- printk(KERN_DEBUG "e820 remove range: %016Lx - %016Lx ",
- (unsigned long long) start,
- (unsigned long long) end);
+ printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx] ",
+ (unsigned long long) start, (unsigned long long) (end - 1));
if (checktype)
e820_print_type(old_type);
printk(KERN_CONT "\n");
@@ -567,7 +567,7 @@ void __init update_e820(void)
if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
return;
e820.nr_map = nr_map;
- printk(KERN_INFO "modified physical RAM map:\n");
+ printk(KERN_INFO "e820: modified physical RAM map:\n");
e820_print_map("modified");
}
static void __init update_e820_saved(void)
@@ -637,8 +637,8 @@ __init void e820_setup_gap(void)
if (!found) {
gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024;
printk(KERN_ERR
- "PCI: Warning: Cannot find a gap in the 32bit address range\n"
- "PCI: Unassigned devices with 32bit resource registers may break!\n");
+ "e820: cannot find a gap in the 32bit address range\n"
+ "e820: PCI devices with unassigned 32bit BARs may break!\n");
}
#endif
@@ -648,8 +648,8 @@ __init void e820_setup_gap(void)
pci_mem_start = gapstart;
printk(KERN_INFO
- "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
- pci_mem_start, gapstart, gapsize);
+ "e820: [mem %#010lx-%#010lx] available for PCI devices\n",
+ gapstart, gapstart + gapsize - 1);
}
/**
@@ -667,7 +667,7 @@ void __init parse_e820_ext(struct setup_data *sdata)
extmap = (struct e820entry *)(sdata->data);
__append_e820_map(extmap, entries);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
- printk(KERN_INFO "extended physical RAM map:\n");
+ printk(KERN_INFO "e820: extended physical RAM map:\n");
e820_print_map("extended");
}
@@ -734,7 +734,7 @@ u64 __init early_reserve_e820(u64 size, u64 align)
addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
if (addr) {
e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED);
- printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
+ printk(KERN_INFO "e820: update e820_saved for early_reserve_e820\n");
update_e820_saved();
}
@@ -784,7 +784,7 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
if (last_pfn > max_arch_pfn)
last_pfn = max_arch_pfn;
- printk(KERN_INFO "last_pfn = %#lx max_arch_pfn = %#lx\n",
+ printk(KERN_INFO "e820: last_pfn = %#lx max_arch_pfn = %#lx\n",
last_pfn, max_arch_pfn);
return last_pfn;
}
@@ -888,7 +888,7 @@ void __init finish_e820_parsing(void)
early_panic("Invalid user supplied memory map");
e820.nr_map = nr;
- printk(KERN_INFO "user-defined physical RAM map:\n");
+ printk(KERN_INFO "e820: user-defined physical RAM map:\n");
e820_print_map("user");
}
}
@@ -996,8 +996,9 @@ void __init e820_reserve_resources_late(void)
end = MAX_RESOURCE_SIZE;
if (start >= end)
continue;
- printk(KERN_DEBUG "reserve RAM buffer: %016llx - %016llx ",
- start, end);
+ printk(KERN_DEBUG
+ "e820: reserve RAM buffer [mem %#010llx-%#010llx]\n",
+ start, end);
reserve_region_with_split(&iomem_resource, start, end,
"RAM buffer");
}
@@ -1047,7 +1048,7 @@ void __init setup_memory_map(void)
who = x86_init.resources.memory_setup();
memcpy(&e820_saved, &e820, sizeof(struct e820map));
- printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+ printk(KERN_INFO "e820: BIOS-provided physical RAM map:\n");
e820_print_map(who);
}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 51ff18616d5..c18f59d1010 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -14,7 +14,6 @@
#include <asm/sections.h>
#include <asm/e820.h>
#include <asm/page.h>
-#include <asm/trampoline.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/bios_ebda.h>
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 3a3b779f41d..037df57a99a 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -24,7 +24,6 @@
#include <asm/sections.h>
#include <asm/kdebug.h>
#include <asm/e820.h>
-#include <asm/trampoline.h>
#include <asm/bios_ebda.h>
static void __init zap_identity_mappings(void)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 463c9797ca6..d42ab17b739 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -274,10 +274,7 @@ num_subarch_entries = (. - subarch_entries) / 4
* If cpu hotplug is not supported then this code can go in init section
* which will be freed later
*/
-
__CPUINIT
-
-#ifdef CONFIG_SMP
ENTRY(startup_32_smp)
cld
movl $(__BOOT_DS),%eax
@@ -288,7 +285,7 @@ ENTRY(startup_32_smp)
movl pa(stack_start),%ecx
movl %eax,%ss
leal -__PAGE_OFFSET(%ecx),%esp
-#endif /* CONFIG_SMP */
+
default_entry:
/*
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 7a40f244732..94bf9cc2c7e 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -139,10 +139,6 @@ ident_complete:
/* Fixup phys_base */
addq %rbp, phys_base(%rip)
- /* Fixup trampoline */
- addq %rbp, trampoline_level4_pgt + 0(%rip)
- addq %rbp, trampoline_level4_pgt + (511*8)(%rip)
-
/* Due to ENTRY(), sometimes the empty space gets filled with
* zeros. Better take a jmp than relying on empty space being
* filled with 0x90 (nop)
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index f8492da65bf..086eb58c6e8 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -22,6 +22,7 @@
#include <asm/msr.h>
#include <asm/apic.h>
#include <linux/percpu.h>
+#include <linux/hardirq.h>
#include <asm/x86_init.h>
#include <asm/reboot.h>
@@ -114,6 +115,25 @@ static void kvm_get_preset_lpj(void)
preset_lpj = lpj;
}
+bool kvm_check_and_clear_guest_paused(void)
+{
+ bool ret = false;
+ struct pvclock_vcpu_time_info *src;
+
+ /*
+ * per_cpu() is safe here because this function is only called from
+ * timer functions where preemption is already disabled.
+ */
+ WARN_ON(!in_atomic());
+ src = &__get_cpu_var(hv_clock);
+ if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
+ __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
+ ret = true;
+ }
+
+ return ret;
+}
+
static struct clocksource kvm_clock = {
.name = "kvm-clock",
.read = kvm_clock_get_cycles,
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index b02d4dd6b8a..d2b56489d70 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -27,7 +27,6 @@
#include <asm/proto.h>
#include <asm/bios_ebda.h>
#include <asm/e820.h>
-#include <asm/trampoline.h>
#include <asm/setup.h>
#include <asm/smp.h>
@@ -568,8 +567,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
struct mpf_intel *mpf;
unsigned long mem;
- apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
- bp, length);
+ apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
+ base, base + length - 1);
BUILD_BUG_ON(sizeof(*mpf) != 16);
while (length > 0) {
@@ -584,8 +583,10 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
#endif
mpf_found = mpf;
- printk(KERN_INFO "found SMP MP-table at [%p] %llx\n",
- mpf, (u64)virt_to_phys(mpf));
+ printk(KERN_INFO "found SMP MP-table at [mem %#010llx-%#010llx] mapped at [%p]\n",
+ (unsigned long long) virt_to_phys(mpf),
+ (unsigned long long) virt_to_phys(mpf) +
+ sizeof(*mpf) - 1, mpf);
mem = virt_to_phys(mpf);
memblock_reserve(mem, sizeof(*mpf));
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 3003250ac51..62c9457ccd2 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -100,14 +100,18 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
struct dma_attrs *attrs)
{
unsigned long dma_mask;
- struct page *page;
+ struct page *page = NULL;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
dma_addr_t addr;
dma_mask = dma_alloc_coherent_mask(dev, flag);
flag |= __GFP_ZERO;
again:
- page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
+ if (!(flag & GFP_ATOMIC))
+ page = dma_alloc_from_contiguous(dev, count, get_order(size));
+ if (!page)
+ page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
if (!page)
return NULL;
@@ -127,6 +131,16 @@ again:
return page_address(page);
}
+void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_addr, struct dma_attrs *attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct page *page = virt_to_page(vaddr);
+
+ if (!dma_release_from_contiguous(dev, page, count))
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+
/*
* See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
* parameter documentation.
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index f96050685b4..871be4a84c7 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -74,12 +74,6 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
return nents;
}
-static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, struct dma_attrs *attrs)
-{
- free_pages((unsigned long)vaddr, get_order(size));
-}
-
static void nommu_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
@@ -97,7 +91,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent,
- .free = nommu_free_coherent,
+ .free = dma_generic_free_coherent,
.map_sg = nommu_map_sg,
.map_page = nommu_map_page,
.sync_single_for_device = nommu_sync_single_for_device,
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 77215c23fba..79c45af8160 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -24,6 +24,7 @@
#ifdef CONFIG_X86_32
# include <linux/ctype.h>
# include <linux/mc146818rtc.h>
+# include <asm/realmode.h>
#else
# include <asm/x86_init.h>
#endif
@@ -156,15 +157,10 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
return 0;
}
-extern const unsigned char machine_real_restart_asm[];
-extern const u64 machine_real_restart_gdt[3];
-
void machine_real_restart(unsigned int type)
{
- void *restart_va;
- unsigned long restart_pa;
- void (*restart_lowmem)(unsigned int);
- u64 *lowmem_gdt;
+ void (*restart_lowmem)(unsigned int) = (void (*)(unsigned int))
+ real_mode_header->machine_real_restart_asm;
local_irq_disable();
@@ -195,21 +191,6 @@ void machine_real_restart(unsigned int type)
* too. */
*((unsigned short *)0x472) = reboot_mode;
- /* Patch the GDT in the low memory trampoline */
- lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
-
- restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
- restart_pa = virt_to_phys(restart_va);
- restart_lowmem = (void (*)(unsigned int))restart_pa;
-
- /* GDT[0]: GDT self-pointer */
- lowmem_gdt[0] =
- (u64)(sizeof(machine_real_restart_gdt) - 1) +
- ((u64)virt_to_phys(lowmem_gdt) << 16);
- /* GDT[1]: 64K real mode code segment */
- lowmem_gdt[1] =
- GDT_ENTRY(0x009b, restart_pa, 0xffff);
-
/* Jump to the identity-mapped low memory code */
restart_lowmem(type);
}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 366c688d619..16be6dc14db 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -49,6 +49,7 @@
#include <asm/pci-direct.h>
#include <linux/init_ohci1394_dma.h>
#include <linux/kvm_para.h>
+#include <linux/dma-contiguous.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -72,7 +73,7 @@
#include <asm/mtrr.h>
#include <asm/apic.h>
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
#include <asm/e820.h>
#include <asm/mpspec.h>
#include <asm/setup.h>
@@ -333,8 +334,8 @@ static void __init relocate_initrd(void)
memblock_reserve(ramdisk_here, area_size);
initrd_start = ramdisk_here + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
- printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
- ramdisk_here, ramdisk_here + ramdisk_size);
+ printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
+ ramdisk_here, ramdisk_here + ramdisk_size - 1);
q = (char *)initrd_start;
@@ -365,8 +366,8 @@ static void __init relocate_initrd(void)
/* high pages is not converted by early_res_to_bootmem */
ramdisk_image = boot_params.hdr.ramdisk_image;
ramdisk_size = boot_params.hdr.ramdisk_size;
- printk(KERN_INFO "Move RAMDISK from %016llx - %016llx to"
- " %08llx - %08llx\n",
+ printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
+ " [mem %#010llx-%#010llx]\n",
ramdisk_image, ramdisk_image + ramdisk_size - 1,
ramdisk_here, ramdisk_here + ramdisk_size - 1);
}
@@ -391,8 +392,8 @@ static void __init reserve_initrd(void)
ramdisk_size, end_of_lowmem>>1);
}
- printk(KERN_INFO "RAMDISK: %08llx - %08llx\n", ramdisk_image,
- ramdisk_end);
+ printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
+ ramdisk_end - 1);
if (ramdisk_end <= end_of_lowmem) {
@@ -905,10 +906,10 @@ void __init setup_arch(char **cmdline_p)
setup_bios_corruption_check();
#endif
- printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n",
- max_pfn_mapped<<PAGE_SHIFT);
+ printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
+ (max_pfn_mapped<<PAGE_SHIFT) - 1);
- setup_trampolines();
+ setup_real_mode();
init_gbpages();
@@ -925,6 +926,7 @@ void __init setup_arch(char **cmdline_p)
}
#endif
memblock.current_limit = get_max_mapped();
+ dma_contiguous_reserve(0);
/*
* NOTE: On x86-32, only from this point on, fixmaps are ready for use.
@@ -966,6 +968,8 @@ void __init setup_arch(char **cmdline_p)
if (boot_cpu_data.cpuid_level >= 0) {
/* A CPU has %cr4 if and only if it has CPUID */
mmu_cr4_features = read_cr4();
+ if (trampoline_cr4_features)
+ *trampoline_cr4_features = mmu_cr4_features;
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 433529e29be..f56f96da77f 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -57,7 +57,7 @@
#include <asm/nmi.h>
#include <asm/irq.h>
#include <asm/idle.h>
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
#include <asm/cpu.h>
#include <asm/numa.h>
#include <asm/pgtable.h>
@@ -73,6 +73,8 @@
#include <asm/smpboot_hooks.h>
#include <asm/i8259.h>
+#include <asm/realmode.h>
+
/* State of each CPU */
DEFINE_PER_CPU(int, cpu_state) = { 0 };
@@ -660,8 +662,12 @@ static void __cpuinit announce_cpu(int cpu, int apicid)
*/
static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
{
+ volatile u32 *trampoline_status =
+ (volatile u32 *) __va(real_mode_header->trampoline_status);
+ /* start_ip had better be page-aligned! */
+ unsigned long start_ip = real_mode_header->trampoline_start;
+
unsigned long boot_error = 0;
- unsigned long start_ip;
int timeout;
alternatives_smp_switch(1);
@@ -684,9 +690,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
initial_code = (unsigned long)start_secondary;
stack_start = idle->thread.sp;
- /* start_ip had better be page-aligned! */
- start_ip = trampoline_address();
-
/* So we see what's up */
announce_cpu(cpu, apicid);
@@ -749,8 +752,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
pr_debug("CPU%d: has booted.\n", cpu);
} else {
boot_error = 1;
- if (*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status)
- == 0xA5A5A5A5)
+ if (*trampoline_status == 0xA5A5A5A5)
/* trampoline started but...? */
pr_err("CPU%d: Stuck ??\n", cpu);
else
@@ -776,7 +778,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
}
/* mark "stuck" area as not stuck */
- *(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) = 0;
+ *trampoline_status = 0;
if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
/*
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 6410744ac5c..f84fe00fad4 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -32,7 +32,7 @@
#include <linux/mm.h>
#include <linux/tboot.h>
-#include <asm/trampoline.h>
+#include <asm/realmode.h>
#include <asm/processor.h>
#include <asm/bootparam.h>
#include <asm/pgtable.h>
@@ -44,7 +44,7 @@
#include <asm/e820.h>
#include <asm/io.h>
-#include "acpi/realmode/wakeup.h"
+#include "../realmode/rm/wakeup.h"
/* Global pointer to shared data; NULL means no measured launch. */
struct tboot *tboot __read_mostly;
@@ -201,7 +201,8 @@ static int tboot_setup_sleep(void)
add_mac_region(e820.map[i].addr, e820.map[i].size);
}
- tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address;
+ tboot->acpi_sinfo.kernel_s3_resume_vector =
+ real_mode_header->wakeup_start;
return 0;
}
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
deleted file mode 100644
index a73b61055ad..00000000000
--- a/arch/x86/kernel/trampoline.c
+++ /dev/null
@@ -1,42 +0,0 @@
-#include <linux/io.h>
-#include <linux/memblock.h>
-
-#include <asm/trampoline.h>
-#include <asm/cacheflush.h>
-#include <asm/pgtable.h>
-
-unsigned char *x86_trampoline_base;
-
-void __init setup_trampolines(void)
-{
- phys_addr_t mem;
- size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start);
-
- /* Has to be in very low memory so we can execute real-mode AP code. */
- mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
- if (!mem)
- panic("Cannot allocate trampoline\n");
-
- x86_trampoline_base = __va(mem);
- memblock_reserve(mem, size);
-
- printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
- x86_trampoline_base, (unsigned long long)mem, size);
-
- memcpy(x86_trampoline_base, x86_trampoline_start, size);
-}
-
-/*
- * setup_trampolines() gets called very early, to guarantee the
- * availability of low memory. This is before the proper kernel page
- * tables are set up, so we cannot set page permissions in that
- * function. Thus, we use an arch_initcall instead.
- */
-static int __init configure_trampolines(void)
-{
- size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start);
-
- set_memory_x((unsigned long)x86_trampoline_base, size >> PAGE_SHIFT);
- return 0;
-}
-arch_initcall(configure_trampolines);
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
deleted file mode 100644
index 451c0a7ef7f..00000000000
--- a/arch/x86/kernel/trampoline_32.S
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- *
- * Trampoline.S Derived from Setup.S by Linus Torvalds
- *
- * 4 Jan 1997 Michael Chastain: changed to gnu as.
- *
- * This is only used for booting secondary CPUs in SMP machine
- *
- * Entry: CS:IP point to the start of our code, we are
- * in real mode with no stack, but the rest of the
- * trampoline page to make our stack and everything else
- * is a mystery.
- *
- * We jump into arch/x86/kernel/head_32.S.
- *
- * On entry to trampoline_data, the processor is in real mode
- * with 16-bit addressing and 16-bit data. CS has some value
- * and IP is zero. Thus, data addresses need to be absolute
- * (no relocation) and are taken with regard to r_base.
- *
- * If you work on this file, check the object module with
- * objdump --reloc to make sure there are no relocation
- * entries except for:
- *
- * TYPE VALUE
- * R_386_32 startup_32_smp
- * R_386_32 boot_gdt
- */
-
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/segment.h>
-#include <asm/page_types.h>
-
-#ifdef CONFIG_SMP
-
- .section ".x86_trampoline","a"
- .balign PAGE_SIZE
- .code16
-
-ENTRY(trampoline_data)
-r_base = .
- wbinvd # Needed for NUMA-Q should be harmless for others
- mov %cs, %ax # Code and data in the same place
- mov %ax, %ds
-
- cli # We should be safe anyway
-
- movl $0xA5A5A5A5, trampoline_status - r_base
- # write marker for master knows we're running
-
- /* GDT tables in non default location kernel can be beyond 16MB and
- * lgdt will not be able to load the address as in real mode default
- * operand size is 16bit. Use lgdtl instead to force operand size
- * to 32 bit.
- */
-
- lidtl boot_idt_descr - r_base # load idt with 0, 0
- lgdtl boot_gdt_descr - r_base # load gdt with whatever is appropriate
-
- xor %ax, %ax
- inc %ax # protected mode (PE) bit
- lmsw %ax # into protected mode
- # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
-
- # These need to be in the same 64K segment as the above;
- # hence we don't use the boot_gdt_descr defined in head.S
-boot_gdt_descr:
- .word __BOOT_DS + 7 # gdt limit
- .long boot_gdt - __PAGE_OFFSET # gdt base
-
-boot_idt_descr:
- .word 0 # idt limit = 0
- .long 0 # idt base = 0L
-
-ENTRY(trampoline_status)
- .long 0
-
-.globl trampoline_end
-trampoline_end:
-
-#endif /* CONFIG_SMP */
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 0f703f10901..22a1530146a 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -197,18 +197,6 @@ SECTIONS
INIT_DATA_SECTION(16)
- /*
- * Code and data for a variety of lowlevel trampolines, to be
- * copied into base memory (< 1 MiB) during initialization.
- * Since it is copied early, the main copy can be discarded
- * afterwards.
- */
- .x86_trampoline : AT(ADDR(.x86_trampoline) - LOAD_OFFSET) {
- x86_trampoline_start = .;
- *(.x86_trampoline)
- x86_trampoline_end = .;
- }
-
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
__x86_cpu_dev_start = .;
*(.x86_cpu_dev.init)
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 1a7fe868f37..a28f338843e 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -36,6 +36,7 @@ config KVM
select TASKSTATS
select TASK_DELAY_ACCT
select PERF_EVENTS
+ select HAVE_KVM_MSI
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 9fed5bedaad..7df1c6d839f 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -247,7 +247,8 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 7.0.ebx */
const u32 kvm_supported_word9_x86_features =
- F(FSGSBASE) | F(BMI1) | F(AVX2) | F(SMEP) | F(BMI2) | F(ERMS);
+ F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
+ F(BMI2) | F(ERMS) | F(RTM);
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
@@ -397,7 +398,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
case KVM_CPUID_SIGNATURE: {
char signature[12] = "KVMKVMKVM\0\0";
u32 *sigptr = (u32 *)signature;
- entry->eax = 0;
+ entry->eax = KVM_CPUID_FEATURES;
entry->ebx = sigptr[0];
entry->ecx = sigptr[1];
entry->edx = sigptr[2];
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 83756223f8a..f95d242ee9f 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -142,6 +142,10 @@
#define Src2FS (OpFS << Src2Shift)
#define Src2GS (OpGS << Src2Shift)
#define Src2Mask (OpMask << Src2Shift)
+#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
+#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
+#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
+#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
#define X2(x...) x, x
#define X3(x...) X2(x), x
@@ -557,6 +561,29 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}
+/*
+ * x86 defines three classes of vector instructions: explicitly
+ * aligned, explicitly unaligned, and the rest, which change behaviour
+ * depending on whether they're AVX encoded or not.
+ *
+ * Also included is CMPXCHG16B which is not a vector instruction, yet it is
+ * subject to the same check.
+ */
+static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
+{
+ if (likely(size < 16))
+ return false;
+
+ if (ctxt->d & Aligned)
+ return true;
+ else if (ctxt->d & Unaligned)
+ return false;
+ else if (ctxt->d & Avx)
+ return false;
+ else
+ return true;
+}
+
static int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned size, bool write, bool fetch,
@@ -621,6 +648,8 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
}
if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
la &= (u32)-1;
+ if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
+ return emulate_gp(ctxt, 0);
*linear = la;
return X86EMUL_CONTINUE;
bad:
@@ -859,6 +888,40 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
ctxt->ops->put_fpu(ctxt);
}
+static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
+{
+ ctxt->ops->get_fpu(ctxt);
+ switch (reg) {
+ case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
+ case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
+ case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
+ case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
+ case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
+ case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
+ case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
+ case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
+ default: BUG();
+ }
+ ctxt->ops->put_fpu(ctxt);
+}
+
+static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
+{
+ ctxt->ops->get_fpu(ctxt);
+ switch (reg) {
+ case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
+ case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
+ case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
+ case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
+ case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
+ case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
+ case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
+ case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
+ default: BUG();
+ }
+ ctxt->ops->put_fpu(ctxt);
+}
+
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
@@ -875,6 +938,13 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
read_sse_reg(ctxt, &op->vec_val, reg);
return;
}
+ if (ctxt->d & Mmx) {
+ reg &= 7;
+ op->type = OP_MM;
+ op->bytes = 8;
+ op->addr.mm = reg;
+ return;
+ }
op->type = OP_REG;
if (ctxt->d & ByteOp) {
@@ -902,7 +972,6 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
}
- ctxt->modrm = insn_fetch(u8, ctxt);
ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
ctxt->modrm_rm |= (ctxt->modrm & 0x07);
@@ -920,6 +989,12 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
return rc;
}
+ if (ctxt->d & Mmx) {
+ op->type = OP_MM;
+ op->bytes = 8;
+ op->addr.xmm = ctxt->modrm_rm & 7;
+ return rc;
+ }
fetch_register_operand(op);
return rc;
}
@@ -1387,6 +1462,9 @@ static int writeback(struct x86_emulate_ctxt *ctxt)
case OP_XMM:
write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
break;
+ case OP_MM:
+ write_mmx_reg(ctxt, &ctxt->dst.mm_val, ctxt->dst.addr.mm);
+ break;
case OP_NONE:
/* no writeback */
break;
@@ -2790,7 +2868,7 @@ static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
- ctxt->dst.val = ctxt->src.val;
+ memcpy(ctxt->dst.valptr, ctxt->src.valptr, ctxt->op_bytes);
return X86EMUL_CONTINUE;
}
@@ -2870,12 +2948,6 @@ static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
}
-static int em_movdqu(struct x86_emulate_ctxt *ctxt)
-{
- memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
- return X86EMUL_CONTINUE;
-}
-
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
int rc;
@@ -3061,35 +3133,13 @@ static int em_btc(struct x86_emulate_ctxt *ctxt)
static int em_bsf(struct x86_emulate_ctxt *ctxt)
{
- u8 zf;
-
- __asm__ ("bsf %2, %0; setz %1"
- : "=r"(ctxt->dst.val), "=q"(zf)
- : "r"(ctxt->src.val));
-
- ctxt->eflags &= ~X86_EFLAGS_ZF;
- if (zf) {
- ctxt->eflags |= X86_EFLAGS_ZF;
- /* Disable writeback. */
- ctxt->dst.type = OP_NONE;
- }
+ emulate_2op_SrcV_nobyte(ctxt, "bsf");
return X86EMUL_CONTINUE;
}
static int em_bsr(struct x86_emulate_ctxt *ctxt)
{
- u8 zf;
-
- __asm__ ("bsr %2, %0; setz %1"
- : "=r"(ctxt->dst.val), "=q"(zf)
- : "r"(ctxt->src.val));
-
- ctxt->eflags &= ~X86_EFLAGS_ZF;
- if (zf) {
- ctxt->eflags |= X86_EFLAGS_ZF;
- /* Disable writeback. */
- ctxt->dst.type = OP_NONE;
- }
+ emulate_2op_SrcV_nobyte(ctxt, "bsr");
return X86EMUL_CONTINUE;
}
@@ -3286,8 +3336,8 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
.check_perm = (_p) }
#define N D(0)
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
-#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
-#define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
+#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
+#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
#define II(_f, _e, _i) \
{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
@@ -3307,25 +3357,25 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
static struct opcode group7_rm1[] = {
- DI(SrcNone | ModRM | Priv, monitor),
- DI(SrcNone | ModRM | Priv, mwait),
+ DI(SrcNone | Priv, monitor),
+ DI(SrcNone | Priv, mwait),
N, N, N, N, N, N,
};
static struct opcode group7_rm3[] = {
- DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa),
- II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
- DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa),
- DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa),
- DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme),
- DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme),
- DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme),
- DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
+ DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
+ II(SrcNone | Prot | VendorSpecific, em_vmmcall, vmmcall),
+ DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
+ DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
+ DIP(SrcNone | Prot | Priv, stgi, check_svme),
+ DIP(SrcNone | Prot | Priv, clgi, check_svme),
+ DIP(SrcNone | Prot | Priv, skinit, check_svme),
+ DIP(SrcNone | Prot | Priv, invlpga, check_svme),
};
static struct opcode group7_rm7[] = {
N,
- DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
+ DIP(SrcNone, rdtscp, check_rdtsc),
N, N, N, N, N, N,
};
@@ -3341,81 +3391,86 @@ static struct opcode group1[] = {
};
static struct opcode group1A[] = {
- I(DstMem | SrcNone | ModRM | Mov | Stack, em_pop), N, N, N, N, N, N, N,
+ I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
};
static struct opcode group3[] = {
- I(DstMem | SrcImm | ModRM, em_test),
- I(DstMem | SrcImm | ModRM, em_test),
- I(DstMem | SrcNone | ModRM | Lock, em_not),
- I(DstMem | SrcNone | ModRM | Lock, em_neg),
- I(SrcMem | ModRM, em_mul_ex),
- I(SrcMem | ModRM, em_imul_ex),
- I(SrcMem | ModRM, em_div_ex),
- I(SrcMem | ModRM, em_idiv_ex),
+ I(DstMem | SrcImm, em_test),
+ I(DstMem | SrcImm, em_test),
+ I(DstMem | SrcNone | Lock, em_not),
+ I(DstMem | SrcNone | Lock, em_neg),
+ I(SrcMem, em_mul_ex),
+ I(SrcMem, em_imul_ex),
+ I(SrcMem, em_div_ex),
+ I(SrcMem, em_idiv_ex),
};
static struct opcode group4[] = {
- I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
- I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45),
+ I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
+ I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
N, N, N, N, N, N,
};
static struct opcode group5[] = {
- I(DstMem | SrcNone | ModRM | Lock, em_grp45),
- I(DstMem | SrcNone | ModRM | Lock, em_grp45),
- I(SrcMem | ModRM | Stack, em_grp45),
- I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
- I(SrcMem | ModRM | Stack, em_grp45),
- I(SrcMemFAddr | ModRM | ImplicitOps, em_grp45),
- I(SrcMem | ModRM | Stack, em_grp45), N,
+ I(DstMem | SrcNone | Lock, em_grp45),
+ I(DstMem | SrcNone | Lock, em_grp45),
+ I(SrcMem | Stack, em_grp45),
+ I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
+ I(SrcMem | Stack, em_grp45),
+ I(SrcMemFAddr | ImplicitOps, em_grp45),
+ I(SrcMem | Stack, em_grp45), N,
};
static struct opcode group6[] = {
- DI(ModRM | Prot, sldt),
- DI(ModRM | Prot, str),
- DI(ModRM | Prot | Priv, lldt),
- DI(ModRM | Prot | Priv, ltr),
+ DI(Prot, sldt),
+ DI(Prot, str),
+ DI(Prot | Priv, lldt),
+ DI(Prot | Priv, ltr),
N, N, N, N,
};
static struct group_dual group7 = { {
- DI(ModRM | Mov | DstMem | Priv, sgdt),
- DI(ModRM | Mov | DstMem | Priv, sidt),
- II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
- II(ModRM | SrcMem | Priv, em_lidt, lidt),
- II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
- II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
- II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
+ DI(Mov | DstMem | Priv, sgdt),
+ DI(Mov | DstMem | Priv, sidt),
+ II(SrcMem | Priv, em_lgdt, lgdt),
+ II(SrcMem | Priv, em_lidt, lidt),
+ II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
+ II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
+ II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
}, {
- I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
+ I(SrcNone | Priv | VendorSpecific, em_vmcall),
EXT(0, group7_rm1),
N, EXT(0, group7_rm3),
- II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
- II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
+ II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
+ II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
+ EXT(0, group7_rm7),
} };
static struct opcode group8[] = {
N, N, N, N,
- I(DstMem | SrcImmByte | ModRM, em_bt),
- I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_bts),
- I(DstMem | SrcImmByte | ModRM | Lock, em_btr),
- I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_btc),
+ I(DstMem | SrcImmByte, em_bt),
+ I(DstMem | SrcImmByte | Lock | PageTable, em_bts),
+ I(DstMem | SrcImmByte | Lock, em_btr),
+ I(DstMem | SrcImmByte | Lock | PageTable, em_btc),
};
static struct group_dual group9 = { {
- N, I(DstMem64 | ModRM | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
+ N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
}, {
N, N, N, N, N, N, N, N,
} };
static struct opcode group11[] = {
- I(DstMem | SrcImm | ModRM | Mov | PageTable, em_mov),
+ I(DstMem | SrcImm | Mov | PageTable, em_mov),
X7(D(Undefined)),
};
static struct gprefix pfx_0f_6f_0f_7f = {
- N, N, N, I(Sse, em_movdqu),
+ I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
+};
+
+static struct gprefix pfx_vmovntpx = {
+ I(0, em_mov), N, N, N,
};
static struct opcode opcode_table[256] = {
@@ -3464,10 +3519,10 @@ static struct opcode opcode_table[256] = {
/* 0x70 - 0x7F */
X16(D(SrcImmByte)),
/* 0x80 - 0x87 */
- G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
- G(DstMem | SrcImm | ModRM | Group, group1),
- G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
- G(DstMem | SrcImmByte | ModRM | Group, group1),
+ G(ByteOp | DstMem | SrcImm, group1),
+ G(DstMem | SrcImm, group1),
+ G(ByteOp | DstMem | SrcImm | No64, group1),
+ G(DstMem | SrcImmByte, group1),
I2bv(DstMem | SrcReg | ModRM, em_test),
I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
/* 0x88 - 0x8F */
@@ -3549,7 +3604,8 @@ static struct opcode twobyte_table[256] = {
IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write),
IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write),
N, N, N, N,
- N, N, N, N, N, N, N, N,
+ N, N, N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx),
+ N, N, N, N,
/* 0x30 - 0x3F */
II(ImplicitOps | Priv, em_wrmsr, wrmsr),
IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
@@ -3897,17 +3953,16 @@ done_prefixes:
}
ctxt->d = opcode.flags;
+ if (ctxt->d & ModRM)
+ ctxt->modrm = insn_fetch(u8, ctxt);
+
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
- ctxt->modrm = insn_fetch(u8, ctxt);
- --ctxt->_eip;
goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
- ctxt->modrm = insn_fetch(u8, ctxt);
- --ctxt->_eip;
goffset = (ctxt->modrm >> 3) & 7;
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
@@ -3960,6 +4015,8 @@ done_prefixes:
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
+ else if (ctxt->d & Mmx)
+ ctxt->op_bytes = 8;
/* ModRM and SIB bytes. */
if (ctxt->d & ModRM) {
@@ -4030,6 +4087,35 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
return false;
}
+static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
+{
+ bool fault = false;
+
+ ctxt->ops->get_fpu(ctxt);
+ asm volatile("1: fwait \n\t"
+ "2: \n\t"
+ ".pushsection .fixup,\"ax\" \n\t"
+ "3: \n\t"
+ "movb $1, %[fault] \n\t"
+ "jmp 2b \n\t"
+ ".popsection \n\t"
+ _ASM_EXTABLE(1b, 3b)
+ : [fault]"+qm"(fault));
+ ctxt->ops->put_fpu(ctxt);
+
+ if (unlikely(fault))
+ return emulate_exception(ctxt, MF_VECTOR, 0, false);
+
+ return X86EMUL_CONTINUE;
+}
+
+static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
+ struct operand *op)
+{
+ if (op->type == OP_MM)
+ read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
+}
+
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
{
struct x86_emulate_ops *ops = ctxt->ops;
@@ -4054,18 +4140,31 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
- if ((ctxt->d & Sse)
- && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
- || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
+ if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
+ || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
rc = emulate_ud(ctxt);
goto done;
}
- if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
+ if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
rc = emulate_nm(ctxt);
goto done;
}
+ if (ctxt->d & Mmx) {
+ rc = flush_pending_x87_faults(ctxt);
+ if (rc != X86EMUL_CONTINUE)
+ goto done;
+ /*
+ * Now that we know the fpu is exception safe, we can fetch
+ * operands from it.
+ */
+ fetch_possible_mmx_operand(ctxt, &ctxt->src);
+ fetch_possible_mmx_operand(ctxt, &ctxt->src2);
+ if (!(ctxt->d & Mov))
+ fetch_possible_mmx_operand(ctxt, &ctxt->dst);
+ }
+
if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index d68f99df690..adba28f88d1 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -34,7 +34,6 @@
#include <linux/kvm_host.h>
#include <linux/slab.h>
-#include <linux/workqueue.h>
#include "irq.h"
#include "i8254.h"
@@ -249,7 +248,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
/* in this case, we had multiple outstanding pit interrupts
* that we needed to inject. Reinject
*/
- queue_work(ps->pit->wq, &ps->pit->expired);
+ queue_kthread_work(&ps->pit->worker, &ps->pit->expired);
ps->irq_ack = 1;
spin_unlock(&ps->inject_lock);
}
@@ -270,7 +269,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
static void destroy_pit_timer(struct kvm_pit *pit)
{
hrtimer_cancel(&pit->pit_state.pit_timer.timer);
- cancel_work_sync(&pit->expired);
+ flush_kthread_work(&pit->expired);
}
static bool kpit_is_periodic(struct kvm_timer *ktimer)
@@ -284,7 +283,7 @@ static struct kvm_timer_ops kpit_ops = {
.is_periodic = kpit_is_periodic,
};
-static void pit_do_work(struct work_struct *work)
+static void pit_do_work(struct kthread_work *work)
{
struct kvm_pit *pit = container_of(work, struct kvm_pit, expired);
struct kvm *kvm = pit->kvm;
@@ -328,7 +327,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
atomic_inc(&ktimer->pending);
- queue_work(pt->wq, &pt->expired);
+ queue_kthread_work(&pt->worker, &pt->expired);
}
if (ktimer->t_ops->is_periodic(ktimer)) {
@@ -353,7 +352,7 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
/* TODO The new value only affected after the retriggered */
hrtimer_cancel(&pt->timer);
- cancel_work_sync(&ps->pit->expired);
+ flush_kthread_work(&ps->pit->expired);
pt->period = interval;
ps->is_periodic = is_period;
@@ -669,6 +668,8 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
{
struct kvm_pit *pit;
struct kvm_kpit_state *pit_state;
+ struct pid *pid;
+ pid_t pid_nr;
int ret;
pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
@@ -685,14 +686,20 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
mutex_lock(&pit->pit_state.lock);
spin_lock_init(&pit->pit_state.inject_lock);
- pit->wq = create_singlethread_workqueue("kvm-pit-wq");
- if (!pit->wq) {
+ pid = get_pid(task_tgid(current));
+ pid_nr = pid_vnr(pid);
+ put_pid(pid);
+
+ init_kthread_worker(&pit->worker);
+ pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
+ "kvm-pit/%d", pid_nr);
+ if (IS_ERR(pit->worker_task)) {
mutex_unlock(&pit->pit_state.lock);
kvm_free_irq_source_id(kvm, pit->irq_source_id);
kfree(pit);
return NULL;
}
- INIT_WORK(&pit->expired, pit_do_work);
+ init_kthread_work(&pit->expired, pit_do_work);
kvm->arch.vpit = pit;
pit->kvm = kvm;
@@ -736,7 +743,7 @@ fail:
kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
kvm_free_irq_source_id(kvm, pit->irq_source_id);
- destroy_workqueue(pit->wq);
+ kthread_stop(pit->worker_task);
kfree(pit);
return NULL;
}
@@ -756,10 +763,10 @@ void kvm_free_pit(struct kvm *kvm)
mutex_lock(&kvm->arch.vpit->pit_state.lock);
timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
hrtimer_cancel(timer);
- cancel_work_sync(&kvm->arch.vpit->expired);
+ flush_kthread_work(&kvm->arch.vpit->expired);
+ kthread_stop(kvm->arch.vpit->worker_task);
kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
- destroy_workqueue(kvm->arch.vpit->wq);
kfree(kvm->arch.vpit);
}
}
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index 51a97426e79..fdf40425ea1 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -1,6 +1,8 @@
#ifndef __I8254_H
#define __I8254_H
+#include <linux/kthread.h>
+
#include "iodev.h"
struct kvm_kpit_channel_state {
@@ -39,8 +41,9 @@ struct kvm_pit {
struct kvm_kpit_state pit_state;
int irq_source_id;
struct kvm_irq_mask_notifier mask_notifier;
- struct workqueue_struct *wq;
- struct work_struct expired;
+ struct kthread_worker worker;
+ struct task_struct *worker_task;
+ struct kthread_work expired;
};
#define KVM_PIT_BASE_ADDRESS 0x40
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 858432287ab..93c15743f1e 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -92,6 +92,11 @@ static inline int apic_test_and_clear_vector(int vec, void *bitmap)
return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
+static inline int apic_test_vector(int vec, void *bitmap)
+{
+ return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
+}
+
static inline void apic_set_vector(int vec, void *bitmap)
{
set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
@@ -480,7 +485,6 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
static void apic_set_eoi(struct kvm_lapic *apic)
{
int vector = apic_find_highest_isr(apic);
- int trigger_mode;
/*
* Not every write EOI will has corresponding ISR,
* one example is when Kernel check timer on setup_IO_APIC
@@ -491,12 +495,15 @@ static void apic_set_eoi(struct kvm_lapic *apic)
apic_clear_vector(vector, apic->regs + APIC_ISR);
apic_update_ppr(apic);
- if (apic_test_and_clear_vector(vector, apic->regs + APIC_TMR))
- trigger_mode = IOAPIC_LEVEL_TRIG;
- else
- trigger_mode = IOAPIC_EDGE_TRIG;
- if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI))
+ if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
+ kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
+ int trigger_mode;
+ if (apic_test_vector(vector, apic->regs + APIC_TMR))
+ trigger_mode = IOAPIC_LEVEL_TRIG;
+ else
+ trigger_mode = IOAPIC_EDGE_TRIG;
kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
+ }
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
}
@@ -1081,6 +1088,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
apic_update_ppr(apic);
vcpu->arch.apic_arb_prio = 0;
+ vcpu->arch.apic_attention = 0;
apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
"0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
@@ -1280,7 +1288,7 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
u32 data;
void *vapic;
- if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
+ if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
@@ -1297,7 +1305,7 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic;
void *vapic;
- if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
+ if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
apic = vcpu->arch.apic;
@@ -1317,10 +1325,11 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
{
- if (!irqchip_in_kernel(vcpu->kvm))
- return;
-
vcpu->arch.apic->vapic_addr = vapic_addr;
+ if (vapic_addr)
+ __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
+ else
+ __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
}
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4cb16426884..72102e0ab7c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -135,8 +135,6 @@ module_param(dbg, bool, 0644);
#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
| PT64_NX_MASK)
-#define PTE_LIST_EXT 4
-
#define ACC_EXEC_MASK 1
#define ACC_WRITE_MASK PT_WRITABLE_MASK
#define ACC_USER_MASK PT_USER_MASK
@@ -151,6 +149,9 @@ module_param(dbg, bool, 0644);
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
+/* make pte_list_desc fit well in cache line */
+#define PTE_LIST_EXT 3
+
struct pte_list_desc {
u64 *sptes[PTE_LIST_EXT];
struct pte_list_desc *more;
@@ -550,19 +551,29 @@ static u64 mmu_spte_get_lockless(u64 *sptep)
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
- rcu_read_lock();
- atomic_inc(&vcpu->kvm->arch.reader_counter);
-
- /* Increase the counter before walking shadow page table */
- smp_mb__after_atomic_inc();
+ /*
+ * Prevent page table teardown by making any free-er wait during
+ * kvm_flush_remote_tlbs() IPI to all active vcpus.
+ */
+ local_irq_disable();
+ vcpu->mode = READING_SHADOW_PAGE_TABLES;
+ /*
+ * Make sure a following spte read is not reordered ahead of the write
+ * to vcpu->mode.
+ */
+ smp_mb();
}
static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
- /* Decrease the counter after walking shadow page table finished */
- smp_mb__before_atomic_dec();
- atomic_dec(&vcpu->kvm->arch.reader_counter);
- rcu_read_unlock();
+ /*
+ * Make sure the write to vcpu->mode is not reordered in front of
+ * reads to sptes. If it does, kvm_commit_zap_page() can see us
+ * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
+ */
+ smp_mb();
+ vcpu->mode = OUTSIDE_GUEST_MODE;
+ local_irq_enable();
}
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -841,32 +852,6 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
return count;
}
-static u64 *pte_list_next(unsigned long *pte_list, u64 *spte)
-{
- struct pte_list_desc *desc;
- u64 *prev_spte;
- int i;
-
- if (!*pte_list)
- return NULL;
- else if (!(*pte_list & 1)) {
- if (!spte)
- return (u64 *)*pte_list;
- return NULL;
- }
- desc = (struct pte_list_desc *)(*pte_list & ~1ul);
- prev_spte = NULL;
- while (desc) {
- for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
- if (prev_spte == spte)
- return desc->sptes[i];
- prev_spte = desc->sptes[i];
- }
- desc = desc->more;
- }
- return NULL;
-}
-
static void
pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
int i, struct pte_list_desc *prev_desc)
@@ -987,11 +972,6 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
return pte_list_add(vcpu, spte, rmapp);
}
-static u64 *rmap_next(unsigned long *rmapp, u64 *spte)
-{
- return pte_list_next(rmapp, spte);
-}
-
static void rmap_remove(struct kvm *kvm, u64 *spte)
{
struct kvm_mmu_page *sp;
@@ -1004,106 +984,201 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
pte_list_remove(spte, rmapp);
}
+/*
+ * Used by the following functions to iterate through the sptes linked by a
+ * rmap. All fields are private and not assumed to be used outside.
+ */
+struct rmap_iterator {
+ /* private fields */
+ struct pte_list_desc *desc; /* holds the sptep if not NULL */
+ int pos; /* index of the sptep */
+};
+
+/*
+ * Iteration must be started by this function. This should also be used after
+ * removing/dropping sptes from the rmap link because in such cases the
+ * information in the itererator may not be valid.
+ *
+ * Returns sptep if found, NULL otherwise.
+ */
+static u64 *rmap_get_first(unsigned long rmap, struct rmap_iterator *iter)
+{
+ if (!rmap)
+ return NULL;
+
+ if (!(rmap & 1)) {
+ iter->desc = NULL;
+ return (u64 *)rmap;
+ }
+
+ iter->desc = (struct pte_list_desc *)(rmap & ~1ul);
+ iter->pos = 0;
+ return iter->desc->sptes[iter->pos];
+}
+
+/*
+ * Must be used with a valid iterator: e.g. after rmap_get_first().
+ *
+ * Returns sptep if found, NULL otherwise.
+ */
+static u64 *rmap_get_next(struct rmap_iterator *iter)
+{
+ if (iter->desc) {
+ if (iter->pos < PTE_LIST_EXT - 1) {
+ u64 *sptep;
+
+ ++iter->pos;
+ sptep = iter->desc->sptes[iter->pos];
+ if (sptep)
+ return sptep;
+ }
+
+ iter->desc = iter->desc->more;
+
+ if (iter->desc) {
+ iter->pos = 0;
+ /* desc->sptes[0] cannot be NULL */
+ return iter->desc->sptes[iter->pos];
+ }
+ }
+
+ return NULL;
+}
+
static void drop_spte(struct kvm *kvm, u64 *sptep)
{
if (mmu_spte_clear_track_bits(sptep))
rmap_remove(kvm, sptep);
}
-int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
- struct kvm_memory_slot *slot)
+static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
{
- unsigned long *rmapp;
- u64 *spte;
- int i, write_protected = 0;
-
- rmapp = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot);
- spte = rmap_next(rmapp, NULL);
- while (spte) {
- BUG_ON(!(*spte & PT_PRESENT_MASK));
- rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
- if (is_writable_pte(*spte)) {
- mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK);
- write_protected = 1;
+ u64 *sptep;
+ struct rmap_iterator iter;
+ int write_protected = 0;
+
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+ BUG_ON(!(*sptep & PT_PRESENT_MASK));
+ rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
+
+ if (!is_writable_pte(*sptep)) {
+ sptep = rmap_get_next(&iter);
+ continue;
}
- spte = rmap_next(rmapp, spte);
- }
- /* check for huge page mappings */
- for (i = PT_DIRECTORY_LEVEL;
- i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
- rmapp = __gfn_to_rmap(gfn, i, slot);
- spte = rmap_next(rmapp, NULL);
- while (spte) {
- BUG_ON(!(*spte & PT_PRESENT_MASK));
- BUG_ON(!is_large_pte(*spte));
- pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
- if (is_writable_pte(*spte)) {
- drop_spte(kvm, spte);
- --kvm->stat.lpages;
- spte = NULL;
- write_protected = 1;
- }
- spte = rmap_next(rmapp, spte);
+ if (level == PT_PAGE_TABLE_LEVEL) {
+ mmu_spte_update(sptep, *sptep & ~PT_WRITABLE_MASK);
+ sptep = rmap_get_next(&iter);
+ } else {
+ BUG_ON(!is_large_pte(*sptep));
+ drop_spte(kvm, sptep);
+ --kvm->stat.lpages;
+ sptep = rmap_get_first(*rmapp, &iter);
}
+
+ write_protected = 1;
}
return write_protected;
}
+/**
+ * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
+ * @kvm: kvm instance
+ * @slot: slot to protect
+ * @gfn_offset: start of the BITS_PER_LONG pages we care about
+ * @mask: indicates which pages we should protect
+ *
+ * Used when we do not need to care about huge page mappings: e.g. during dirty
+ * logging we do not have any such mappings.
+ */
+void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset, unsigned long mask)
+{
+ unsigned long *rmapp;
+
+ while (mask) {
+ rmapp = &slot->rmap[gfn_offset + __ffs(mask)];
+ __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL);
+
+ /* clear the first set bit */
+ mask &= mask - 1;
+ }
+}
+
static int rmap_write_protect(struct kvm *kvm, u64 gfn)
{
struct kvm_memory_slot *slot;
+ unsigned long *rmapp;
+ int i;
+ int write_protected = 0;
slot = gfn_to_memslot(kvm, gfn);
- return kvm_mmu_rmap_write_protect(kvm, gfn, slot);
+
+ for (i = PT_PAGE_TABLE_LEVEL;
+ i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
+ rmapp = __gfn_to_rmap(gfn, i, slot);
+ write_protected |= __rmap_write_protect(kvm, rmapp, i);
+ }
+
+ return write_protected;
}
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
- u64 *spte;
+ u64 *sptep;
+ struct rmap_iterator iter;
int need_tlb_flush = 0;
- while ((spte = rmap_next(rmapp, NULL))) {
- BUG_ON(!(*spte & PT_PRESENT_MASK));
- rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
- drop_spte(kvm, spte);
+ while ((sptep = rmap_get_first(*rmapp, &iter))) {
+ BUG_ON(!(*sptep & PT_PRESENT_MASK));
+ rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", sptep, *sptep);
+
+ drop_spte(kvm, sptep);
need_tlb_flush = 1;
}
+
return need_tlb_flush;
}
static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
+ u64 *sptep;
+ struct rmap_iterator iter;
int need_flush = 0;
- u64 *spte, new_spte;
+ u64 new_spte;
pte_t *ptep = (pte_t *)data;
pfn_t new_pfn;
WARN_ON(pte_huge(*ptep));
new_pfn = pte_pfn(*ptep);
- spte = rmap_next(rmapp, NULL);
- while (spte) {
- BUG_ON(!is_shadow_present_pte(*spte));
- rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
+
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
+ BUG_ON(!is_shadow_present_pte(*sptep));
+ rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", sptep, *sptep);
+
need_flush = 1;
+
if (pte_write(*ptep)) {
- drop_spte(kvm, spte);
- spte = rmap_next(rmapp, NULL);
+ drop_spte(kvm, sptep);
+ sptep = rmap_get_first(*rmapp, &iter);
} else {
- new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
+ new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
new_spte |= (u64)new_pfn << PAGE_SHIFT;
new_spte &= ~PT_WRITABLE_MASK;
new_spte &= ~SPTE_HOST_WRITEABLE;
new_spte &= ~shadow_accessed_mask;
- mmu_spte_clear_track_bits(spte);
- mmu_spte_set(spte, new_spte);
- spte = rmap_next(rmapp, spte);
+
+ mmu_spte_clear_track_bits(sptep);
+ mmu_spte_set(sptep, new_spte);
+ sptep = rmap_get_next(&iter);
}
}
+
if (need_flush)
kvm_flush_remote_tlbs(kvm);
@@ -1162,7 +1237,8 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
- u64 *spte;
+ u64 *sptep;
+ struct rmap_iterator iter;
int young = 0;
/*
@@ -1175,25 +1251,24 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
if (!shadow_accessed_mask)
return kvm_unmap_rmapp(kvm, rmapp, data);
- spte = rmap_next(rmapp, NULL);
- while (spte) {
- int _young;
- u64 _spte = *spte;
- BUG_ON(!(_spte & PT_PRESENT_MASK));
- _young = _spte & PT_ACCESSED_MASK;
- if (_young) {
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;
+ sptep = rmap_get_next(&iter)) {
+ BUG_ON(!(*sptep & PT_PRESENT_MASK));
+
+ if (*sptep & PT_ACCESSED_MASK) {
young = 1;
- clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
+ clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)sptep);
}
- spte = rmap_next(rmapp, spte);
}
+
return young;
}
static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
- u64 *spte;
+ u64 *sptep;
+ struct rmap_iterator iter;
int young = 0;
/*
@@ -1204,16 +1279,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
if (!shadow_accessed_mask)
goto out;
- spte = rmap_next(rmapp, NULL);
- while (spte) {
- u64 _spte = *spte;
- BUG_ON(!(_spte & PT_PRESENT_MASK));
- young = _spte & PT_ACCESSED_MASK;
- if (young) {
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;
+ sptep = rmap_get_next(&iter)) {
+ BUG_ON(!(*sptep & PT_PRESENT_MASK));
+
+ if (*sptep & PT_ACCESSED_MASK) {
young = 1;
break;
}
- spte = rmap_next(rmapp, spte);
}
out:
return young;
@@ -1865,10 +1938,11 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
{
- u64 *parent_pte;
+ u64 *sptep;
+ struct rmap_iterator iter;
- while ((parent_pte = pte_list_next(&sp->parent_ptes, NULL)))
- drop_parent_pte(sp, parent_pte);
+ while ((sptep = rmap_get_first(sp->parent_ptes, &iter)))
+ drop_parent_pte(sp, sptep);
}
static int mmu_zap_unsync_children(struct kvm *kvm,
@@ -1925,30 +1999,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
return ret;
}
-static void kvm_mmu_isolate_pages(struct list_head *invalid_list)
-{
- struct kvm_mmu_page *sp;
-
- list_for_each_entry(sp, invalid_list, link)
- kvm_mmu_isolate_page(sp);
-}
-
-static void free_pages_rcu(struct rcu_head *head)
-{
- struct kvm_mmu_page *next, *sp;
-
- sp = container_of(head, struct kvm_mmu_page, rcu);
- while (sp) {
- if (!list_empty(&sp->link))
- next = list_first_entry(&sp->link,
- struct kvm_mmu_page, link);
- else
- next = NULL;
- kvm_mmu_free_page(sp);
- sp = next;
- }
-}
-
static void kvm_mmu_commit_zap_page(struct kvm *kvm,
struct list_head *invalid_list)
{
@@ -1957,17 +2007,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
if (list_empty(invalid_list))
return;
- kvm_flush_remote_tlbs(kvm);
-
- if (atomic_read(&kvm->arch.reader_counter)) {
- kvm_mmu_isolate_pages(invalid_list);
- sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
- list_del_init(invalid_list);
+ /*
+ * wmb: make sure everyone sees our modifications to the page tables
+ * rmb: make sure we see changes to vcpu->mode
+ */
+ smp_mb();
- trace_kvm_mmu_delay_free_pages(sp);
- call_rcu(&sp->rcu, free_pages_rcu);
- return;
- }
+ /*
+ * Wait for all vcpus to exit guest mode and/or lockless shadow
+ * page table walks.
+ */
+ kvm_flush_remote_tlbs(kvm);
do {
sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
@@ -1975,7 +2025,6 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
kvm_mmu_isolate_page(sp);
kvm_mmu_free_page(sp);
} while (!list_empty(invalid_list));
-
}
/*
@@ -3554,7 +3603,7 @@ static bool detect_write_flooding(struct kvm_mmu_page *sp)
* Skip write-flooding detected for the sp whose level is 1, because
* it can become unsync, then the guest page is not write-protected.
*/
- if (sp->role.level == 1)
+ if (sp->role.level == PT_PAGE_TABLE_LEVEL)
return false;
return ++sp->write_flooding_count >= 3;
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index 715da5a19a5..7d7d0b9e23e 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -192,7 +192,8 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
{
struct kvm_memory_slot *slot;
unsigned long *rmapp;
- u64 *spte;
+ u64 *sptep;
+ struct rmap_iterator iter;
if (sp->role.direct || sp->unsync || sp->role.invalid)
return;
@@ -200,13 +201,12 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
slot = gfn_to_memslot(kvm, sp->gfn);
rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
- spte = rmap_next(rmapp, NULL);
- while (spte) {
- if (is_writable_pte(*spte))
+ for (sptep = rmap_get_first(*rmapp, &iter); sptep;
+ sptep = rmap_get_next(&iter)) {
+ if (is_writable_pte(*sptep))
audit_printk(kvm, "shadow page has writable "
"mappings: gfn %llx role %x\n",
sp->gfn, sp->role.word);
- spte = rmap_next(rmapp, spte);
}
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index df5a70311be..34f970937ef 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -658,7 +658,7 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
{
int offset = 0;
- WARN_ON(sp->role.level != 1);
+ WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
if (PTTYPE == 32)
offset = sp->role.quadrant << PT64_LEVEL_BITS;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e334389e1c7..f75af406b26 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -22,6 +22,7 @@
#include "x86.h"
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
@@ -42,6 +43,12 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+static const struct x86_cpu_id svm_cpu_id[] = {
+ X86_FEATURE_MATCH(X86_FEATURE_SVM),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
+
#define IOPM_ALLOC_ORDER 2
#define MSRPM_ALLOC_ORDER 1
@@ -3240,6 +3247,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
svm_clear_vintr(svm);
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
mark_dirty(svm->vmcb, VMCB_INTR);
+ ++svm->vcpu.stat.irq_window_exits;
/*
* If the user space waits to inject interrupts, exit as soon as
* possible
@@ -3247,7 +3255,6 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
if (!irqchip_in_kernel(svm->vcpu.kvm) &&
kvm_run->request_interrupt_window &&
!kvm_cpu_has_interrupt(&svm->vcpu)) {
- ++svm->vcpu.stat.irq_window_exits;
kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
return 0;
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4ff0ab9bc3c..32eb5886629 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -27,6 +27,7 @@
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
#include <linux/ftrace_event.h>
#include <linux/slab.h>
#include <linux/tboot.h>
@@ -51,6 +52,12 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+static const struct x86_cpu_id vmx_cpu_id[] = {
+ X86_FEATURE_MATCH(X86_FEATURE_VMX),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
+
static bool __read_mostly enable_vpid = 1;
module_param_named(vpid, enable_vpid, bool, 0444);
@@ -386,6 +393,9 @@ struct vcpu_vmx {
struct {
int loaded;
u16 fs_sel, gs_sel, ldt_sel;
+#ifdef CONFIG_X86_64
+ u16 ds_sel, es_sel;
+#endif
int gs_ldt_reload_needed;
int fs_reload_needed;
} host_state;
@@ -1411,6 +1421,11 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
}
#ifdef CONFIG_X86_64
+ savesegment(ds, vmx->host_state.ds_sel);
+ savesegment(es, vmx->host_state.es_sel);
+#endif
+
+#ifdef CONFIG_X86_64
vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
#else
@@ -1450,6 +1465,19 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
}
if (vmx->host_state.fs_reload_needed)
loadsegment(fs, vmx->host_state.fs_sel);
+#ifdef CONFIG_X86_64
+ if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
+ loadsegment(ds, vmx->host_state.ds_sel);
+ loadsegment(es, vmx->host_state.es_sel);
+ }
+#else
+ /*
+ * The sysexit path does not restore ds/es, so we must set them to
+ * a reasonable value ourselves.
+ */
+ loadsegment(ds, __USER_DS);
+ loadsegment(es, __USER_DS);
+#endif
reload_tss();
#ifdef CONFIG_X86_64
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
@@ -3633,8 +3661,18 @@ static void vmx_set_constant_host_state(void)
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
+#ifdef CONFIG_X86_64
+ /*
+ * Load null selectors, so we can avoid reloading them in
+ * __vmx_load_host_state(), in case userspace uses the null selectors
+ * too (the expected case).
+ */
+ vmcs_write16(HOST_DS_SELECTOR, 0);
+ vmcs_write16(HOST_ES_SELECTOR, 0);
+#else
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+#endif
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
@@ -6256,7 +6294,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
}
}
- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
vmx->loaded_vmcs->launched = 1;
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
@@ -6343,7 +6380,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
return &vmx->vcpu;
free_vmcs:
- free_vmcs(vmx->loaded_vmcs->vmcs);
+ free_loaded_vmcs(vmx->loaded_vmcs);
free_msrs:
kfree(vmx->guest_msrs);
uninit_vcpu:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 185a2b823a2..be6d54929fa 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2147,6 +2147,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_ASYNC_PF:
case KVM_CAP_GET_TSC_KHZ:
case KVM_CAP_PCI_2_3:
+ case KVM_CAP_KVMCLOCK_CTRL:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
@@ -2597,6 +2598,23 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
return r;
}
+/*
+ * kvm_set_guest_paused() indicates to the guest kernel that it has been
+ * stopped by the hypervisor. This function will be called from the host only.
+ * EINVAL is returned when the host attempts to set the flag for a guest that
+ * does not support pv clocks.
+ */
+static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
+{
+ struct pvclock_vcpu_time_info *src = &vcpu->arch.hv_clock;
+ if (!vcpu->arch.time_page)
+ return -EINVAL;
+ src->flags |= PVCLOCK_GUEST_STOPPED;
+ mark_page_dirty(vcpu->kvm, vcpu->arch.time >> PAGE_SHIFT);
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+ return 0;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2873,6 +2891,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = vcpu->arch.virtual_tsc_khz;
goto out;
}
+ case KVM_KVMCLOCK_CTRL: {
+ r = kvm_set_guest_paused(vcpu);
+ goto out;
+ }
default:
r = -EINVAL;
}
@@ -3045,57 +3067,32 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
}
/**
- * write_protect_slot - write protect a slot for dirty logging
- * @kvm: the kvm instance
- * @memslot: the slot we protect
- * @dirty_bitmap: the bitmap indicating which pages are dirty
- * @nr_dirty_pages: the number of dirty pages
+ * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
+ * @kvm: kvm instance
+ * @log: slot id and address to which we copy the log
*
- * We have two ways to find all sptes to protect:
- * 1. Use kvm_mmu_slot_remove_write_access() which walks all shadow pages and
- * checks ones that have a spte mapping a page in the slot.
- * 2. Use kvm_mmu_rmap_write_protect() for each gfn found in the bitmap.
+ * We need to keep it in mind that VCPU threads can write to the bitmap
+ * concurrently. So, to avoid losing data, we keep the following order for
+ * each bit:
*
- * Generally speaking, if there are not so many dirty pages compared to the
- * number of shadow pages, we should use the latter.
+ * 1. Take a snapshot of the bit and clear it if needed.
+ * 2. Write protect the corresponding page.
+ * 3. Flush TLB's if needed.
+ * 4. Copy the snapshot to the userspace.
*
- * Note that letting others write into a page marked dirty in the old bitmap
- * by using the remaining tlb entry is not a problem. That page will become
- * write protected again when we flush the tlb and then be reported dirty to
- * the user space by copying the old bitmap.
- */
-static void write_protect_slot(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- unsigned long *dirty_bitmap,
- unsigned long nr_dirty_pages)
-{
- spin_lock(&kvm->mmu_lock);
-
- /* Not many dirty pages compared to # of shadow pages. */
- if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
- unsigned long gfn_offset;
-
- for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
- unsigned long gfn = memslot->base_gfn + gfn_offset;
-
- kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
- }
- kvm_flush_remote_tlbs(kvm);
- } else
- kvm_mmu_slot_remove_write_access(kvm, memslot->id);
-
- spin_unlock(&kvm->mmu_lock);
-}
-
-/*
- * Get (and clear) the dirty memory log for a memory slot.
+ * Between 2 and 3, the guest may write to the page using the remaining TLB
+ * entry. This is not a problem because the page will be reported dirty at
+ * step 4 using the snapshot taken before and step 3 ensures that successive
+ * writes will be logged for the next call.
*/
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log)
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
int r;
struct kvm_memory_slot *memslot;
- unsigned long n, nr_dirty_pages;
+ unsigned long n, i;
+ unsigned long *dirty_bitmap;
+ unsigned long *dirty_bitmap_buffer;
+ bool is_dirty = false;
mutex_lock(&kvm->slots_lock);
@@ -3104,49 +3101,42 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
goto out;
memslot = id_to_memslot(kvm->memslots, log->slot);
+
+ dirty_bitmap = memslot->dirty_bitmap;
r = -ENOENT;
- if (!memslot->dirty_bitmap)
+ if (!dirty_bitmap)
goto out;
n = kvm_dirty_bitmap_bytes(memslot);
- nr_dirty_pages = memslot->nr_dirty_pages;
- /* If nothing is dirty, don't bother messing with page tables. */
- if (nr_dirty_pages) {
- struct kvm_memslots *slots, *old_slots;
- unsigned long *dirty_bitmap, *dirty_bitmap_head;
+ dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
+ memset(dirty_bitmap_buffer, 0, n);
- dirty_bitmap = memslot->dirty_bitmap;
- dirty_bitmap_head = memslot->dirty_bitmap_head;
- if (dirty_bitmap == dirty_bitmap_head)
- dirty_bitmap_head += n / sizeof(long);
- memset(dirty_bitmap_head, 0, n);
+ spin_lock(&kvm->mmu_lock);
- r = -ENOMEM;
- slots = kmemdup(kvm->memslots, sizeof(*kvm->memslots), GFP_KERNEL);
- if (!slots)
- goto out;
+ for (i = 0; i < n / sizeof(long); i++) {
+ unsigned long mask;
+ gfn_t offset;
- memslot = id_to_memslot(slots, log->slot);
- memslot->nr_dirty_pages = 0;
- memslot->dirty_bitmap = dirty_bitmap_head;
- update_memslots(slots, NULL);
+ if (!dirty_bitmap[i])
+ continue;
- old_slots = kvm->memslots;
- rcu_assign_pointer(kvm->memslots, slots);
- synchronize_srcu_expedited(&kvm->srcu);
- kfree(old_slots);
+ is_dirty = true;
- write_protect_slot(kvm, memslot, dirty_bitmap, nr_dirty_pages);
+ mask = xchg(&dirty_bitmap[i], 0);
+ dirty_bitmap_buffer[i] = mask;
- r = -EFAULT;
- if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
- goto out;
- } else {
- r = -EFAULT;
- if (clear_user(log->dirty_bitmap, n))
- goto out;
+ offset = i * BITS_PER_LONG;
+ kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask);
}
+ if (is_dirty)
+ kvm_flush_remote_tlbs(kvm);
+
+ spin_unlock(&kvm->mmu_lock);
+
+ r = -EFAULT;
+ if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
+ goto out;
r = 0;
out:
@@ -3728,9 +3718,8 @@ struct read_write_emulator_ops {
static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
{
if (vcpu->mmio_read_completed) {
- memcpy(val, vcpu->mmio_data, bytes);
trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
- vcpu->mmio_phys_addr, *(u64 *)val);
+ vcpu->mmio_fragments[0].gpa, *(u64 *)val);
vcpu->mmio_read_completed = 0;
return 1;
}
@@ -3766,8 +3755,9 @@ static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{
- memcpy(vcpu->mmio_data, val, bytes);
- memcpy(vcpu->run->mmio.data, vcpu->mmio_data, 8);
+ struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
+
+ memcpy(vcpu->run->mmio.data, frag->data, frag->len);
return X86EMUL_CONTINUE;
}
@@ -3794,10 +3784,7 @@ static int emulator_read_write_onepage(unsigned long addr, void *val,
gpa_t gpa;
int handled, ret;
bool write = ops->write;
-
- if (ops->read_write_prepare &&
- ops->read_write_prepare(vcpu, val, bytes))
- return X86EMUL_CONTINUE;
+ struct kvm_mmio_fragment *frag;
ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
@@ -3823,15 +3810,19 @@ mmio:
bytes -= handled;
val += handled;
- vcpu->mmio_needed = 1;
- vcpu->run->exit_reason = KVM_EXIT_MMIO;
- vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
- vcpu->mmio_size = bytes;
- vcpu->run->mmio.len = min(vcpu->mmio_size, 8);
- vcpu->run->mmio.is_write = vcpu->mmio_is_write = write;
- vcpu->mmio_index = 0;
+ while (bytes) {
+ unsigned now = min(bytes, 8U);
- return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
+ frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
+ frag->gpa = gpa;
+ frag->data = val;
+ frag->len = now;
+
+ gpa += now;
+ val += now;
+ bytes -= now;
+ }
+ return X86EMUL_CONTINUE;
}
int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
@@ -3840,10 +3831,18 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
struct read_write_emulator_ops *ops)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ gpa_t gpa;
+ int rc;
+
+ if (ops->read_write_prepare &&
+ ops->read_write_prepare(vcpu, val, bytes))
+ return X86EMUL_CONTINUE;
+
+ vcpu->mmio_nr_fragments = 0;
/* Crossing a page boundary? */
if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
- int rc, now;
+ int now;
now = -addr & ~PAGE_MASK;
rc = emulator_read_write_onepage(addr, val, now, exception,
@@ -3856,8 +3855,25 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
bytes -= now;
}
- return emulator_read_write_onepage(addr, val, bytes, exception,
- vcpu, ops);
+ rc = emulator_read_write_onepage(addr, val, bytes, exception,
+ vcpu, ops);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+ if (!vcpu->mmio_nr_fragments)
+ return rc;
+
+ gpa = vcpu->mmio_fragments[0].gpa;
+
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_cur_fragment = 0;
+
+ vcpu->run->mmio.len = vcpu->mmio_fragments[0].len;
+ vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
+ vcpu->run->mmio.phys_addr = gpa;
+
+ return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
}
static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
@@ -5263,10 +5279,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_deliver_pmi(vcpu);
}
- r = kvm_mmu_reload(vcpu);
- if (unlikely(r))
- goto out;
-
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
inject_pending_event(vcpu);
@@ -5282,6 +5294,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
}
+ r = kvm_mmu_reload(vcpu);
+ if (unlikely(r)) {
+ kvm_x86_ops->cancel_injection(vcpu);
+ goto out;
+ }
+
preempt_disable();
kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -5456,33 +5474,55 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return r;
}
+/*
+ * Implements the following, as a state machine:
+ *
+ * read:
+ * for each fragment
+ * write gpa, len
+ * exit
+ * copy data
+ * execute insn
+ *
+ * write:
+ * for each fragment
+ * write gpa, len
+ * copy data
+ * exit
+ */
static int complete_mmio(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
+ struct kvm_mmio_fragment *frag;
int r;
if (!(vcpu->arch.pio.count || vcpu->mmio_needed))
return 1;
if (vcpu->mmio_needed) {
- vcpu->mmio_needed = 0;
+ /* Complete previous fragment */
+ frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++];
if (!vcpu->mmio_is_write)
- memcpy(vcpu->mmio_data + vcpu->mmio_index,
- run->mmio.data, 8);
- vcpu->mmio_index += 8;
- if (vcpu->mmio_index < vcpu->mmio_size) {
- run->exit_reason = KVM_EXIT_MMIO;
- run->mmio.phys_addr = vcpu->mmio_phys_addr + vcpu->mmio_index;
- memcpy(run->mmio.data, vcpu->mmio_data + vcpu->mmio_index, 8);
- run->mmio.len = min(vcpu->mmio_size - vcpu->mmio_index, 8);
- run->mmio.is_write = vcpu->mmio_is_write;
- vcpu->mmio_needed = 1;
- return 0;
+ memcpy(frag->data, run->mmio.data, frag->len);
+ if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
+ vcpu->mmio_needed = 0;
+ if (vcpu->mmio_is_write)
+ return 1;
+ vcpu->mmio_read_completed = 1;
+ goto done;
}
+ /* Initiate next fragment */
+ ++frag;
+ run->exit_reason = KVM_EXIT_MMIO;
+ run->mmio.phys_addr = frag->gpa;
if (vcpu->mmio_is_write)
- return 1;
- vcpu->mmio_read_completed = 1;
+ memcpy(run->mmio.data, frag->data, frag->len);
+ run->mmio.len = frag->len;
+ run->mmio.is_write = vcpu->mmio_is_write;
+ return 0;
+
}
+done:
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -6399,21 +6439,9 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
kvm_cpu_has_interrupt(vcpu));
}
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
- int me;
- int cpu = vcpu->cpu;
-
- if (waitqueue_active(&vcpu->wq)) {
- wake_up_interruptible(&vcpu->wq);
- ++vcpu->stat.halt_wakeup;
- }
-
- me = get_cpu();
- if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
- if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
- smp_send_reschedule(cpu);
- put_cpu();
+ return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
}
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index cb80c293cdd..3d1134ddb88 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -64,7 +64,7 @@ static inline int is_pse(struct kvm_vcpu *vcpu)
static inline int is_paging(struct kvm_vcpu *vcpu)
{
- return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
+ return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
}
static inline u32 bit(int bitno)
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index 2e4e4b02c37..f61ee67ec00 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -43,100 +43,3 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
return len;
}
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
-
-/*
- * Do a strncpy, return length of string without final '\0'.
- * 'count' is the user-supplied count (return 'count' if we
- * hit it), 'max' is the address space maximum (and we return
- * -EFAULT if we hit it).
- */
-static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
-{
- long res = 0;
-
- /*
- * Truncate 'max' to the user-specified limit, so that
- * we only have one limit we need to check in the loop
- */
- if (max > count)
- max = count;
-
- while (max >= sizeof(unsigned long)) {
- unsigned long c, mask;
-
- /* Fall back to byte-at-a-time if we get a page fault */
- if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
- break;
- mask = has_zero(c);
- if (mask) {
- mask = (mask - 1) & ~mask;
- mask >>= 7;
- *(unsigned long *)(dst+res) = c & mask;
- return res + count_masked_bytes(mask);
- }
- *(unsigned long *)(dst+res) = c;
- res += sizeof(unsigned long);
- max -= sizeof(unsigned long);
- }
-
- while (max) {
- char c;
-
- if (unlikely(__get_user(c,src+res)))
- return -EFAULT;
- dst[res] = c;
- if (!c)
- return res;
- res++;
- max--;
- }
-
- /*
- * Uhhuh. We hit 'max'. But was that the user-specified maximum
- * too? If so, that's ok - we got as much as the user asked for.
- */
- if (res >= count)
- return res;
-
- /*
- * Nope: we hit the address space limit, and we still had more
- * characters the caller would have wanted. That's an EFAULT.
- */
- return -EFAULT;
-}
-
-/**
- * strncpy_from_user: - Copy a NUL terminated string from userspace.
- * @dst: Destination address, in kernel space. This buffer must be at
- * least @count bytes long.
- * @src: Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-long
-strncpy_from_user(char *dst, const char __user *src, long count)
-{
- unsigned long max_addr, src_addr;
-
- if (unlikely(count <= 0))
- return 0;
-
- max_addr = current_thread_info()->addr_limit.seg;
- src_addr = (unsigned long)src;
- if (likely(src_addr < max_addr)) {
- unsigned long max = max_addr - src_addr;
- return do_strncpy_from_user(dst, src, count, max);
- }
- return -EFAULT;
-}
-EXPORT_SYMBOL(strncpy_from_user);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 883b216c60b..1781b2f950e 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -95,47 +95,6 @@ __clear_user(void __user *to, unsigned long n)
}
EXPORT_SYMBOL(__clear_user);
-/**
- * strnlen_user: - Get the size of a string in user space.
- * @s: The string to measure.
- * @n: The maximum valid length
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- * If the string is too long, returns a value greater than @n.
- */
-long strnlen_user(const char __user *s, long n)
-{
- unsigned long mask = -__addr_ok(s);
- unsigned long res, tmp;
-
- might_fault();
-
- __asm__ __volatile__(
- " testl %0, %0\n"
- " jz 3f\n"
- " andl %0,%%ecx\n"
- "0: repne; scasb\n"
- " setne %%al\n"
- " subl %%ecx,%0\n"
- " addl %0,%%eax\n"
- "1:\n"
- ".section .fixup,\"ax\"\n"
- "2: xorl %%eax,%%eax\n"
- " jmp 1b\n"
- "3: movb $1,%%al\n"
- " jmp 1b\n"
- ".previous\n"
- _ASM_EXTABLE(0b,2b)
- :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
- :"0" (n), "1" (s), "2" (0), "3" (mask)
- :"cc");
- return res & mask;
-}
-EXPORT_SYMBOL(strnlen_user);
-
#ifdef CONFIG_X86_INTEL_USERCOPY
static unsigned long
__copy_user_intel(void __user *to, const void *from, unsigned long size)
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 0d0326f388c..e5b130bc2d0 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -52,54 +52,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
}
EXPORT_SYMBOL(clear_user);
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-
-long __strnlen_user(const char __user *s, long n)
-{
- long res = 0;
- char c;
-
- while (1) {
- if (res>n)
- return n+1;
- if (__get_user(c, s))
- return 0;
- if (!c)
- return res+1;
- res++;
- s++;
- }
-}
-EXPORT_SYMBOL(__strnlen_user);
-
-long strnlen_user(const char __user *s, long n)
-{
- if (!access_ok(VERIFY_READ, s, 1))
- return 0;
- return __strnlen_user(s, n);
-}
-EXPORT_SYMBOL(strnlen_user);
-
-long strlen_user(const char __user *s)
-{
- long res = 0;
- char c;
-
- for (;;) {
- if (get_user(c, s))
- return 0;
- if (!c)
- return res+1;
- res++;
- s++;
- }
-}
-EXPORT_SYMBOL(strlen_user);
-
unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
{
if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 319b6f2fb8b..97141c26a13 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -84,8 +84,9 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
pgt_buf_end = pgt_buf_start;
pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
- printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
- end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
+ printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
+ end - 1, pgt_buf_start << PAGE_SHIFT,
+ (pgt_buf_top << PAGE_SHIFT) - 1);
}
void __init native_pagetable_reserve(u64 start, u64 end)
@@ -132,7 +133,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
int nr_range, i;
int use_pse, use_gbpages;
- printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
+ printk(KERN_INFO "init_memory_mapping: [mem %#010lx-%#010lx]\n",
+ start, end - 1);
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
/*
@@ -251,8 +253,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
}
for (i = 0; i < nr_range; i++)
- printk(KERN_DEBUG " %010lx - %010lx page %s\n",
- mr[i].start, mr[i].end,
+ printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
+ mr[i].start, mr[i].end - 1,
(mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
(mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
@@ -350,8 +352,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
* create a kernel page fault:
*/
#ifdef CONFIG_DEBUG_PAGEALLOC
- printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
- begin, end);
+ printk(KERN_INFO "debug: unmapping init [mem %#010lx-%#010lx]\n",
+ begin, end - 1);
set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
#else
/*
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 19d3fa08b11..2d125be1bae 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -141,8 +141,8 @@ static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
/* whine about and ignore invalid blks */
if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
- pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
- nid, start, end);
+ pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
+ nid, start, end - 1);
return 0;
}
@@ -210,8 +210,8 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
start = roundup(start, ZONE_ALIGN);
- printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n",
- nid, start, end);
+ printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
+ nid, start, end - 1);
/*
* Allocate node data. Try remap allocator first, node-local
@@ -232,7 +232,7 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
}
/* report and initialize */
- printk(KERN_INFO " NODE_DATA [%016Lx - %016Lx]%s\n",
+ printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]%s\n",
nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
if (!remapped && tnid != nid)
@@ -291,14 +291,14 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
*/
if (bi->end > bj->start && bi->start < bj->end) {
if (bi->nid != bj->nid) {
- pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
- bi->nid, bi->start, bi->end,
- bj->nid, bj->start, bj->end);
+ pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
+ bi->nid, bi->start, bi->end - 1,
+ bj->nid, bj->start, bj->end - 1);
return -EINVAL;
}
- pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
- bi->nid, bi->start, bi->end,
- bj->start, bj->end);
+ pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
+ bi->nid, bi->start, bi->end - 1,
+ bj->start, bj->end - 1);
}
/*
@@ -320,9 +320,9 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
}
if (k < mi->nr_blks)
continue;
- printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%Lx,%Lx)\n",
- bi->nid, bi->start, bi->end, bj->start, bj->end,
- start, end);
+ printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
+ bi->nid, bi->start, bi->end - 1, bj->start,
+ bj->end - 1, start, end - 1);
bi->start = start;
bi->end = end;
numa_remove_memblk_from(j--, mi);
@@ -616,8 +616,8 @@ static int __init dummy_numa_init(void)
{
printk(KERN_INFO "%s\n",
numa_off ? "NUMA turned off" : "No NUMA configuration found");
- printk(KERN_INFO "Faking a node at %016Lx-%016Lx\n",
- 0LLU, PFN_PHYS(max_pfn));
+ printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
+ 0LLU, PFN_PHYS(max_pfn) - 1);
node_set(0, numa_nodes_parsed);
numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index 871dd886817..dbbbb47260c 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -68,8 +68,8 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
numa_remove_memblk_from(phys_blk, pi);
}
- printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
- eb->start, eb->end, (eb->end - eb->start) >> 20);
+ printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n",
+ nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20);
return 0;
}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index bea6e573e02..3d68ef6d226 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -225,9 +225,8 @@ static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
page = pfn_to_page(pfn);
type = get_page_memtype(page);
if (type != -1) {
- printk(KERN_INFO "reserve_ram_pages_type failed "
- "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
- start, end, type, req_type);
+ printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
+ start, end - 1, type, req_type);
if (new_type)
*new_type = type;
@@ -330,9 +329,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
err = rbt_memtype_check_insert(new, new_type);
if (err) {
- printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
- "track %s, req %s\n",
- start, end, cattr_name(new->type), cattr_name(req_type));
+ printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
+ start, end - 1,
+ cattr_name(new->type), cattr_name(req_type));
kfree(new);
spin_unlock(&memtype_lock);
@@ -341,8 +340,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
spin_unlock(&memtype_lock);
- dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
- start, end, cattr_name(new->type), cattr_name(req_type),
+ dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
+ start, end - 1, cattr_name(new->type), cattr_name(req_type),
new_type ? cattr_name(*new_type) : "-");
return err;
@@ -376,14 +375,14 @@ int free_memtype(u64 start, u64 end)
spin_unlock(&memtype_lock);
if (!entry) {
- printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
- current->comm, current->pid, start, end);
+ printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
+ current->comm, current->pid, start, end - 1);
return -EINVAL;
}
kfree(entry);
- dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
+ dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
return 0;
}
@@ -507,9 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
while (cursor < to) {
if (!devmem_is_allowed(pfn)) {
- printk(KERN_INFO
- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
- current->comm, from, to);
+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
+ current->comm, from, to - 1);
return 0;
}
cursor += PAGE_SIZE;
@@ -570,12 +568,11 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
size;
if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
- printk(KERN_INFO
- "%s:%d ioremap_change_attr failed %s "
- "for %Lx-%Lx\n",
+ printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
+ "for [mem %#010Lx-%#010Lx]\n",
current->comm, current->pid,
cattr_name(flags),
- base, (unsigned long long)(base + size));
+ base, (unsigned long long)(base + size-1));
return -EINVAL;
}
return 0;
@@ -607,12 +604,11 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
flags = lookup_memtype(paddr);
if (want_flags != flags) {
- printk(KERN_WARNING
- "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
+ printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
current->comm, current->pid,
cattr_name(want_flags),
(unsigned long long)paddr,
- (unsigned long long)(paddr + size),
+ (unsigned long long)(paddr + size - 1),
cattr_name(flags));
*vma_prot = __pgprot((pgprot_val(*vma_prot) &
(~_PAGE_CACHE_MASK)) |
@@ -630,11 +626,11 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
!is_new_memtype_allowed(paddr, size, want_flags, flags)) {
free_memtype(paddr, paddr + size);
printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
- " for %Lx-%Lx, got %s\n",
+ " for [mem %#010Lx-%#010Lx], got %s\n",
current->comm, current->pid,
cattr_name(want_flags),
(unsigned long long)paddr,
- (unsigned long long)(paddr + size),
+ (unsigned long long)(paddr + size - 1),
cattr_name(flags));
return -EINVAL;
}
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index efb5b4b9371..732af3a9618 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -176,8 +176,9 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
return;
}
- printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
- start, end);
+ printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
+ node, pxm,
+ (unsigned long long) start, (unsigned long long) end - 1);
}
void __init acpi_numa_arch_fixup(void) {}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 7415aa92791..56ab74989cf 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -64,6 +64,10 @@ static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
int shareable = 0;
char *name;
+ irq = xen_irq_from_gsi(gsi);
+ if (irq > 0)
+ return irq;
+
if (set_pirq)
pirq = gsi;
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
new file mode 100644
index 00000000000..94f7fbe97b0
--- /dev/null
+++ b/arch/x86/realmode/Makefile
@@ -0,0 +1,18 @@
+#
+# arch/x86/realmode/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+subdir- := rm
+
+obj-y += init.o
+obj-y += rmpiggy.o
+
+$(obj)/rmpiggy.o: $(obj)/rm/realmode.bin
+
+$(obj)/rm/realmode.bin: FORCE
+ $(Q)$(MAKE) $(build)=$(obj)/rm $@
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
new file mode 100644
index 00000000000..cbca565af5b
--- /dev/null
+++ b/arch/x86/realmode/init.c
@@ -0,0 +1,115 @@
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <asm/realmode.h>
+
+struct real_mode_header *real_mode_header;
+u32 *trampoline_cr4_features;
+
+void __init setup_real_mode(void)
+{
+ phys_addr_t mem;
+ u16 real_mode_seg;
+ u32 *rel;
+ u32 count;
+ u32 *ptr;
+ u16 *seg;
+ int i;
+ unsigned char *base;
+ struct trampoline_header *trampoline_header;
+ size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+#ifdef CONFIG_X86_64
+ u64 *trampoline_pgd;
+ u64 efer;
+#endif
+
+ /* Has to be in very low memory so we can execute real-mode AP code. */
+ mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
+ if (!mem)
+ panic("Cannot allocate trampoline\n");
+
+ base = __va(mem);
+ memblock_reserve(mem, size);
+ real_mode_header = (struct real_mode_header *) base;
+ printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
+ base, (unsigned long long)mem, size);
+
+ memcpy(base, real_mode_blob, size);
+
+ real_mode_seg = __pa(base) >> 4;
+ rel = (u32 *) real_mode_relocs;
+
+ /* 16-bit segment relocations. */
+ count = rel[0];
+ rel = &rel[1];
+ for (i = 0; i < count; i++) {
+ seg = (u16 *) (base + rel[i]);
+ *seg = real_mode_seg;
+ }
+
+ /* 32-bit linear relocations. */
+ count = rel[i];
+ rel = &rel[i + 1];
+ for (i = 0; i < count; i++) {
+ ptr = (u32 *) (base + rel[i]);
+ *ptr += __pa(base);
+ }
+
+ /* Must be perfomed *after* relocation. */
+ trampoline_header = (struct trampoline_header *)
+ __va(real_mode_header->trampoline_header);
+
+#ifdef CONFIG_X86_32
+ trampoline_header->start = __pa(startup_32_smp);
+ trampoline_header->gdt_limit = __BOOT_DS + 7;
+ trampoline_header->gdt_base = __pa(boot_gdt);
+#else
+ /*
+ * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
+ * so we need to mask it out.
+ */
+ rdmsrl(MSR_EFER, efer);
+ trampoline_header->efer = efer & ~EFER_LMA;
+
+ trampoline_header->start = (u64) secondary_startup_64;
+ trampoline_cr4_features = &trampoline_header->cr4;
+ *trampoline_cr4_features = read_cr4();
+
+ trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
+ trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
+ trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
+#endif
+}
+
+/*
+ * set_real_mode_permissions() gets called very early, to guarantee the
+ * availability of low memory. This is before the proper kernel page
+ * tables are set up, so we cannot set page permissions in that
+ * function. Thus, we use an arch_initcall instead.
+ */
+static int __init set_real_mode_permissions(void)
+{
+ unsigned char *base = (unsigned char *) real_mode_header;
+ size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+
+ size_t ro_size =
+ PAGE_ALIGN(real_mode_header->ro_end) -
+ __pa(base);
+
+ size_t text_size =
+ PAGE_ALIGN(real_mode_header->ro_end) -
+ real_mode_header->text_start;
+
+ unsigned long text_start =
+ (unsigned long) __va(real_mode_header->text_start);
+
+ set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
+ set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
+ set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
+
+ return 0;
+}
+
+arch_initcall(set_real_mode_permissions);
diff --git a/arch/x86/realmode/rm/.gitignore b/arch/x86/realmode/rm/.gitignore
new file mode 100644
index 00000000000..b6ed3a2555c
--- /dev/null
+++ b/arch/x86/realmode/rm/.gitignore
@@ -0,0 +1,3 @@
+pasyms.h
+realmode.lds
+realmode.relocs
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
new file mode 100644
index 00000000000..5b84a2d3088
--- /dev/null
+++ b/arch/x86/realmode/rm/Makefile
@@ -0,0 +1,82 @@
+#
+# arch/x86/realmode/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+always := realmode.bin realmode.relocs
+
+wakeup-objs := wakeup_asm.o wakemain.o video-mode.o
+wakeup-objs += copy.o bioscall.o regs.o
+# The link order of the video-*.o modules can matter. In particular,
+# video-vga.o *must* be listed first, followed by video-vesa.o.
+# Hardware-specific drivers should follow in the order they should be
+# probed, and video-bios.o should typically be last.
+wakeup-objs += video-vga.o
+wakeup-objs += video-vesa.o
+wakeup-objs += video-bios.o
+
+realmode-y += header.o
+realmode-y += trampoline_$(BITS).o
+realmode-y += stack.o
+realmode-$(CONFIG_X86_32) += reboot_32.o
+realmode-$(CONFIG_ACPI_SLEEP) += $(wakeup-objs)
+
+targets += $(realmode-y)
+
+REALMODE_OBJS = $(addprefix $(obj)/,$(realmode-y))
+
+sed-pasyms := -n -r -e 's/^([0-9a-fA-F]+) [ABCDGRSTVW] (.+)$$/pa_\2 = \2;/p'
+
+quiet_cmd_pasyms = PASYMS $@
+ cmd_pasyms = $(NM) $(filter-out FORCE,$^) | \
+ sed $(sed-pasyms) | sort | uniq > $@
+
+targets += pasyms.h
+$(obj)/pasyms.h: $(REALMODE_OBJS) FORCE
+ $(call if_changed,pasyms)
+
+targets += realmode.lds
+$(obj)/realmode.lds: $(obj)/pasyms.h
+
+LDFLAGS_realmode.elf := --emit-relocs -T
+CPPFLAGS_realmode.lds += -P -C -I$(obj)
+
+targets += realmode.elf
+$(obj)/realmode.elf: $(obj)/realmode.lds $(REALMODE_OBJS) FORCE
+ $(call if_changed,ld)
+
+OBJCOPYFLAGS_realmode.bin := -O binary
+
+targets += realmode.bin
+$(obj)/realmode.bin: $(obj)/realmode.elf $(obj)/realmode.relocs
+ $(call if_changed,objcopy)
+
+quiet_cmd_relocs = RELOCS $@
+ cmd_relocs = arch/x86/tools/relocs --realmode $< > $@
+
+targets += realmode.relocs
+$(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
+ $(call if_changed,relocs)
+
+# ---------------------------------------------------------------------------
+
+# How to compile the 16-bit code. Note we always compile for -march=i386,
+# that way we can complain to the user if the CPU is insufficient.
+KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
+ -I$(srctree)/arch/x86/boot \
+ -DDISABLE_BRANCH_PROFILING \
+ -Wall -Wstrict-prototypes \
+ -march=i386 -mregparm=3 \
+ -include $(srctree)/$(src)/../../boot/code16gcc.h \
+ -fno-strict-aliasing -fomit-frame-pointer \
+ $(call cc-option, -ffreestanding) \
+ $(call cc-option, -fno-toplevel-reorder,\
+ $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
+KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+GCOV_PROFILE := n
diff --git a/arch/x86/realmode/rm/bioscall.S b/arch/x86/realmode/rm/bioscall.S
new file mode 100644
index 00000000000..16162d19791
--- /dev/null
+++ b/arch/x86/realmode/rm/bioscall.S
@@ -0,0 +1 @@
+#include "../../boot/bioscall.S"
diff --git a/arch/x86/realmode/rm/copy.S b/arch/x86/realmode/rm/copy.S
new file mode 100644
index 00000000000..b785e6f38fd
--- /dev/null
+++ b/arch/x86/realmode/rm/copy.S
@@ -0,0 +1 @@
+#include "../../boot/copy.S"
diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
new file mode 100644
index 00000000000..fadf48378ad
--- /dev/null
+++ b/arch/x86/realmode/rm/header.S
@@ -0,0 +1,41 @@
+/*
+ * Real-mode blob header; this should match realmode.h and be
+ * readonly; for mutable data instead add pointers into the .data
+ * or .bss sections as appropriate.
+ */
+
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+
+#include "realmode.h"
+
+ .section ".header", "a"
+
+ .balign 16
+GLOBAL(real_mode_header)
+ .long pa_text_start
+ .long pa_ro_end
+ /* SMP trampoline */
+ .long pa_trampoline_start
+ .long pa_trampoline_status
+ .long pa_trampoline_header
+#ifdef CONFIG_X86_64
+ .long pa_trampoline_pgd;
+#endif
+ /* ACPI S3 wakeup */
+#ifdef CONFIG_ACPI_SLEEP
+ .long pa_wakeup_start
+ .long pa_wakeup_header
+#endif
+ /* APM/BIOS reboot */
+#ifdef CONFIG_X86_32
+ .long pa_machine_real_restart_asm
+#endif
+END(real_mode_header)
+
+ /* End signature, used to verify integrity */
+ .section ".signature","a"
+ .balign 4
+GLOBAL(end_signature)
+ .long REALMODE_END_SIGNATURE
+END(end_signature)
diff --git a/arch/x86/realmode/rm/realmode.h b/arch/x86/realmode/rm/realmode.h
new file mode 100644
index 00000000000..d74cff6350e
--- /dev/null
+++ b/arch/x86/realmode/rm/realmode.h
@@ -0,0 +1,21 @@
+#ifndef ARCH_X86_REALMODE_RM_REALMODE_H
+#define ARCH_X86_REALMODE_RM_REALMODE_H
+
+#ifdef __ASSEMBLY__
+
+/*
+ * 16-bit ljmpw to the real_mode_seg
+ *
+ * This must be open-coded since gas will choke on using a
+ * relocatable symbol for the segment portion.
+ */
+#define LJMPW_RM(to) .byte 0xea ; .word (to), real_mode_seg
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Signature at the end of the realmode region
+ */
+#define REALMODE_END_SIGNATURE 0x65a22c82
+
+#endif /* ARCH_X86_REALMODE_RM_REALMODE_H */
diff --git a/arch/x86/realmode/rm/realmode.lds.S b/arch/x86/realmode/rm/realmode.lds.S
new file mode 100644
index 00000000000..86b2e8d6b1f
--- /dev/null
+++ b/arch/x86/realmode/rm/realmode.lds.S
@@ -0,0 +1,76 @@
+/*
+ * realmode.lds.S
+ *
+ * Linker script for the real-mode code
+ */
+
+#include <asm/page_types.h>
+
+#undef i386
+
+OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+OUTPUT_ARCH(i386)
+
+SECTIONS
+{
+ real_mode_seg = 0;
+
+ . = 0;
+ .header : {
+ pa_real_mode_base = .;
+ *(.header)
+ }
+
+ . = ALIGN(4);
+ .rodata : {
+ *(.rodata)
+ *(.rodata.*)
+ . = ALIGN(16);
+ video_cards = .;
+ *(.videocards)
+ video_cards_end = .;
+ }
+
+ . = ALIGN(PAGE_SIZE);
+ pa_text_start = .;
+ .text : {
+ *(.text)
+ *(.text.*)
+ }
+
+ .text32 : {
+ *(.text32)
+ *(.text32.*)
+ }
+
+ .text64 : {
+ *(.text64)
+ *(.text64.*)
+ }
+ pa_ro_end = .;
+
+ . = ALIGN(PAGE_SIZE);
+ .data : {
+ *(.data)
+ *(.data.*)
+ }
+
+ . = ALIGN(128);
+ .bss : {
+ *(.bss*)
+ }
+
+ /* End signature for integrity checking */
+ . = ALIGN(4);
+ .signature : {
+ *(.signature)
+ }
+
+ /DISCARD/ : {
+ *(.note*)
+ *(.debug*)
+ *(.eh_frame*)
+ }
+
+#include "pasyms.h"
+}
diff --git a/arch/x86/kernel/reboot_32.S b/arch/x86/realmode/rm/reboot_32.S
index 1d5c46df0d7..114044876b3 100644
--- a/arch/x86/kernel/reboot_32.S
+++ b/arch/x86/realmode/rm/reboot_32.S
@@ -2,6 +2,7 @@
#include <linux/init.h>
#include <asm/segment.h>
#include <asm/page_types.h>
+#include "realmode.h"
/*
* The following code and data reboots the machine by switching to real
@@ -13,34 +14,20 @@
*
* This code is called with the restart type (0 = BIOS, 1 = APM) in %eax.
*/
- .section ".x86_trampoline","a"
- .balign 16
+ .section ".text32", "ax"
.code32
-ENTRY(machine_real_restart_asm)
-r_base = .
- /* Get our own relocated address */
- call 1f
-1: popl %ebx
- subl $(1b - r_base), %ebx
-
- /* Compute the equivalent real-mode segment */
- movl %ebx, %ecx
- shrl $4, %ecx
-
- /* Patch post-real-mode segment jump */
- movw (dispatch_table - r_base)(%ebx,%eax,2),%ax
- movw %ax, (101f - r_base)(%ebx)
- movw %cx, (102f - r_base)(%ebx)
+ .balign 16
+ENTRY(machine_real_restart_asm)
/* Set up the IDT for real mode. */
- lidtl (machine_real_restart_idt - r_base)(%ebx)
+ lidtl pa_machine_real_restart_idt
/*
* Set up a GDT from which we can load segment descriptors for real
* mode. The GDT is not used in real mode; it is just needed here to
* prepare the descriptors.
*/
- lgdtl (machine_real_restart_gdt - r_base)(%ebx)
+ lgdtl pa_machine_real_restart_gdt
/*
* Load the data segment registers with 16-bit compatible values
@@ -51,7 +38,7 @@ r_base = .
movl %ecx, %fs
movl %ecx, %gs
movl %ecx, %ss
- ljmpl $8, $1f - r_base
+ ljmpw $8, $1f
/*
* This is 16-bit protected mode code to disable paging and the cache,
@@ -76,27 +63,29 @@ r_base = .
*
* Most of this work is probably excessive, but it is what is tested.
*/
+ .text
.code16
+
+ .balign 16
+machine_real_restart_asm16:
1:
xorl %ecx, %ecx
- movl %cr0, %eax
- andl $0x00000011, %eax
- orl $0x60000000, %eax
- movl %eax, %cr0
+ movl %cr0, %edx
+ andl $0x00000011, %edx
+ orl $0x60000000, %edx
+ movl %edx, %cr0
movl %ecx, %cr3
movl %cr0, %edx
- andl $0x60000000, %edx /* If no cache bits -> no wbinvd */
+ testl $0x60000000, %edx /* If no cache bits -> no wbinvd */
jz 2f
wbinvd
2:
- andb $0x10, %al
- movl %eax, %cr0
- .byte 0xea /* ljmpw */
-101: .word 0 /* Offset */
-102: .word 0 /* Segment */
-
-bios:
- ljmpw $0xf000, $0xfff0
+ andb $0x10, %dl
+ movl %edx, %cr0
+ LJMPW_RM(3f)
+3:
+ andw %ax, %ax
+ jz bios
apm:
movw $0x1000, %ax
@@ -106,26 +95,34 @@ apm:
movw $0x0001, %bx
movw $0x0003, %cx
int $0x15
+ /* This should never return... */
-END(machine_real_restart_asm)
+bios:
+ ljmpw $0xf000, $0xfff0
- .balign 16
- /* These must match <asm/reboot.h */
-dispatch_table:
- .word bios - r_base
- .word apm - r_base
-END(dispatch_table)
+ .section ".rodata", "a"
- .balign 16
-machine_real_restart_idt:
+ .balign 16
+GLOBAL(machine_real_restart_idt)
.word 0xffff /* Length - real mode default value */
.long 0 /* Base - real mode default value */
END(machine_real_restart_idt)
- .balign 16
-ENTRY(machine_real_restart_gdt)
- .quad 0 /* Self-pointer, filled in by PM code */
- .quad 0 /* 16-bit code segment, filled in by PM code */
+ .balign 16
+GLOBAL(machine_real_restart_gdt)
+ /* Self-pointer */
+ .word 0xffff /* Length - real mode default value */
+ .long pa_machine_real_restart_gdt
+ .word 0
+
+ /*
+ * 16-bit code segment pointing to real_mode_seg
+ * Selector value 8
+ */
+ .word 0xffff /* Limit */
+ .long 0x9b000000 + pa_real_mode_base
+ .word 0
+
/*
* 16-bit data segment with the selector value 16 = 0x10 and
* base value 0x100; since this is consistent with real mode
diff --git a/arch/x86/realmode/rm/regs.c b/arch/x86/realmode/rm/regs.c
new file mode 100644
index 00000000000..fbb15b9f9ca
--- /dev/null
+++ b/arch/x86/realmode/rm/regs.c
@@ -0,0 +1 @@
+#include "../../boot/regs.c"
diff --git a/arch/x86/realmode/rm/stack.S b/arch/x86/realmode/rm/stack.S
new file mode 100644
index 00000000000..867ae87adfa
--- /dev/null
+++ b/arch/x86/realmode/rm/stack.S
@@ -0,0 +1,19 @@
+/*
+ * Common heap and stack allocations
+ */
+
+#include <linux/linkage.h>
+
+ .data
+GLOBAL(HEAP)
+ .long rm_heap
+GLOBAL(heap_end)
+ .long rm_stack
+
+ .bss
+ .balign 16
+GLOBAL(rm_heap)
+ .space 2048
+GLOBAL(rm_stack)
+ .space 2048
+GLOBAL(rm_stack_end)
diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
new file mode 100644
index 00000000000..c1b2791183e
--- /dev/null
+++ b/arch/x86/realmode/rm/trampoline_32.S
@@ -0,0 +1,74 @@
+/*
+ *
+ * Trampoline.S Derived from Setup.S by Linus Torvalds
+ *
+ * 4 Jan 1997 Michael Chastain: changed to gnu as.
+ *
+ * This is only used for booting secondary CPUs in SMP machine
+ *
+ * Entry: CS:IP point to the start of our code, we are
+ * in real mode with no stack, but the rest of the
+ * trampoline page to make our stack and everything else
+ * is a mystery.
+ *
+ * We jump into arch/x86/kernel/head_32.S.
+ *
+ * On entry to trampoline_start, the processor is in real mode
+ * with 16-bit addressing and 16-bit data. CS has some value
+ * and IP is zero. Thus, we load CS to the physical segment
+ * of the real mode code before doing anything further.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/segment.h>
+#include <asm/page_types.h>
+#include "realmode.h"
+
+ .text
+ .code16
+
+ .balign PAGE_SIZE
+ENTRY(trampoline_start)
+ wbinvd # Needed for NUMA-Q should be harmless for others
+
+ LJMPW_RM(1f)
+1:
+ mov %cs, %ax # Code and data in the same place
+ mov %ax, %ds
+
+ cli # We should be safe anyway
+
+ movl tr_start, %eax # where we need to go
+
+ movl $0xA5A5A5A5, trampoline_status
+ # write marker for master knows we're running
+
+ /*
+ * GDT tables in non default location kernel can be beyond 16MB and
+ * lgdt will not be able to load the address as in real mode default
+ * operand size is 16bit. Use lgdtl instead to force operand size
+ * to 32 bit.
+ */
+ lidtl tr_idt # load idt with 0, 0
+ lgdtl tr_gdt # load gdt with whatever is appropriate
+
+ movw $1, %dx # protected mode (PE) bit
+ lmsw %dx # into protected mode
+
+ ljmpl $__BOOT_CS, $pa_startup_32
+
+ .section ".text32","ax"
+ .code32
+ENTRY(startup_32) # note: also used from wakeup_asm.S
+ jmp *%eax
+
+ .bss
+ .balign 8
+GLOBAL(trampoline_header)
+ tr_start: .space 4
+ tr_gdt_pad: .space 2
+ tr_gdt: .space 6
+END(trampoline_header)
+
+#include "trampoline_common.S"
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
index 09ff51799e9..bb360dc39d2 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -5,12 +5,12 @@
* 4 Jan 1997 Michael Chastain: changed to gnu as.
* 15 Sept 2005 Eric Biederman: 64bit PIC support
*
- * Entry: CS:IP point to the start of our code, we are
- * in real mode with no stack, but the rest of the
+ * Entry: CS:IP point to the start of our code, we are
+ * in real mode with no stack, but the rest of the
* trampoline page to make our stack and everything else
* is a mystery.
*
- * On entry to trampoline_data, the processor is in real mode
+ * On entry to trampoline_start, the processor is in real mode
* with 16-bit addressing and 16-bit data. CS has some value
* and IP is zero. Thus, data addresses need to be absolute
* (no relocation) and are taken with regard to r_base.
@@ -31,43 +31,33 @@
#include <asm/msr.h>
#include <asm/segment.h>
#include <asm/processor-flags.h>
+#include "realmode.h"
- .section ".x86_trampoline","a"
- .balign PAGE_SIZE
+ .text
.code16
-ENTRY(trampoline_data)
-r_base = .
+ .balign PAGE_SIZE
+ENTRY(trampoline_start)
cli # We should be safe anyway
wbinvd
+
+ LJMPW_RM(1f)
+1:
mov %cs, %ax # Code and data in the same place
mov %ax, %ds
mov %ax, %es
mov %ax, %ss
+ movl $0xA5A5A5A5, trampoline_status
+ # write marker for master knows we're running
- movl $0xA5A5A5A5, trampoline_status - r_base
- # write marker for master knows we're running
-
- # Setup stack
- movw $(trampoline_stack_end - r_base), %sp
+ # Setup stack
+ movl $rm_stack_end, %esp
call verify_cpu # Verify the cpu supports long mode
testl %eax, %eax # Check for return code
jnz no_longmode
- mov %cs, %ax
- movzx %ax, %esi # Find the 32bit trampoline location
- shll $4, %esi
-
- # Fixup the absolute vectors
- leal (startup_32 - r_base)(%esi), %eax
- movl %eax, startup_32_vector - r_base
- leal (startup_64 - r_base)(%esi), %eax
- movl %eax, startup_64_vector - r_base
- leal (tgdt - r_base)(%esi), %eax
- movl %eax, (tgdt + 2 - r_base)
-
/*
* GDT tables in non default location kernel can be beyond 16MB and
* lgdt will not be able to load the address as in real mode default
@@ -75,36 +65,49 @@ r_base = .
* to 32 bit.
*/
- lidtl tidt - r_base # load idt with 0, 0
- lgdtl tgdt - r_base # load gdt with whatever is appropriate
+ lidtl tr_idt # load idt with 0, 0
+ lgdtl tr_gdt # load gdt with whatever is appropriate
+
+ movw $__KERNEL_DS, %dx # Data segment descriptor
- mov $X86_CR0_PE, %ax # protected mode (PE) bit
- lmsw %ax # into protected mode
+ # Enable protected mode
+ movl $X86_CR0_PE, %eax # protected mode (PE) bit
+ movl %eax, %cr0 # into protected mode
# flush prefetch and jump to startup_32
- ljmpl *(startup_32_vector - r_base)
+ ljmpl $__KERNEL32_CS, $pa_startup_32
+no_longmode:
+ hlt
+ jmp no_longmode
+#include "../kernel/verify_cpu.S"
+
+ .section ".text32","ax"
.code32
.balign 4
-startup_32:
- movl $__KERNEL_DS, %eax # Initialize the %ds segment register
- movl %eax, %ds
-
- movl $X86_CR4_PAE, %eax
+ENTRY(startup_32)
+ movl %edx, %ss
+ addl $pa_real_mode_base, %esp
+ movl %edx, %ds
+ movl %edx, %es
+ movl %edx, %fs
+ movl %edx, %gs
+
+ movl pa_tr_cr4, %eax
movl %eax, %cr4 # Enable PAE mode
- # Setup trampoline 4 level pagetables
- leal (trampoline_level4_pgt - r_base)(%esi), %eax
+ # Setup trampoline 4 level pagetables
+ movl $pa_trampoline_pgd, %eax
movl %eax, %cr3
+ # Set up EFER
+ movl pa_tr_efer, %eax
+ movl pa_tr_efer + 4, %edx
movl $MSR_EFER, %ecx
- movl $(1 << _EFER_LME), %eax # Enable Long Mode
- xorl %edx, %edx
wrmsr
# Enable paging and in turn activate Long Mode
- # Enable protected mode
- movl $(X86_CR0_PG | X86_CR0_PE), %eax
+ movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
movl %eax, %cr0
/*
@@ -113,59 +116,38 @@ startup_32:
* EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
* the new gdt/idt that has __KERNEL_CS with CS.L = 1.
*/
- ljmp *(startup_64_vector - r_base)(%esi)
+ ljmpl $__KERNEL_CS, $pa_startup_64
+ .section ".text64","ax"
.code64
.balign 4
-startup_64:
+ENTRY(startup_64)
# Now jump into the kernel using virtual addresses
- movq $secondary_startup_64, %rax
- jmp *%rax
-
- .code16
-no_longmode:
- hlt
- jmp no_longmode
-#include "verify_cpu.S"
-
- .balign 4
- # Careful these need to be in the same 64K segment as the above;
-tidt:
- .word 0 # idt limit = 0
- .word 0, 0 # idt base = 0L
+ jmpq *tr_start(%rip)
+ .section ".rodata","a"
# Duplicate the global descriptor table
# so the kernel can live anywhere
- .balign 4
-tgdt:
- .short tgdt_end - tgdt # gdt limit
- .long tgdt - r_base
- .short 0
+ .balign 16
+ .globl tr_gdt
+tr_gdt:
+ .short tr_gdt_end - tr_gdt - 1 # gdt limit
+ .long pa_tr_gdt
+ .short 0
.quad 0x00cf9b000000ffff # __KERNEL32_CS
.quad 0x00af9b000000ffff # __KERNEL_CS
.quad 0x00cf93000000ffff # __KERNEL_DS
-tgdt_end:
+tr_gdt_end:
- .balign 4
-startup_32_vector:
- .long startup_32 - r_base
- .word __KERNEL32_CS, 0
+ .bss
+ .balign PAGE_SIZE
+GLOBAL(trampoline_pgd) .space PAGE_SIZE
- .balign 4
-startup_64_vector:
- .long startup_64 - r_base
- .word __KERNEL_CS, 0
+ .balign 8
+GLOBAL(trampoline_header)
+ tr_start: .space 8
+ GLOBAL(tr_efer) .space 8
+ GLOBAL(tr_cr4) .space 4
+END(trampoline_header)
- .balign 4
-ENTRY(trampoline_status)
- .long 0
-
-trampoline_stack:
- .org 0x1000
-trampoline_stack_end:
-ENTRY(trampoline_level4_pgt)
- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
- .fill 510,8,0
- .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
-
-ENTRY(trampoline_end)
+#include "trampoline_common.S"
diff --git a/arch/x86/realmode/rm/trampoline_common.S b/arch/x86/realmode/rm/trampoline_common.S
new file mode 100644
index 00000000000..b1ecdb9692a
--- /dev/null
+++ b/arch/x86/realmode/rm/trampoline_common.S
@@ -0,0 +1,7 @@
+ .section ".rodata","a"
+ .balign 16
+tr_idt: .fill 1, 6, 0
+
+ .bss
+ .balign 4
+GLOBAL(trampoline_status) .space 4
diff --git a/arch/x86/realmode/rm/video-bios.c b/arch/x86/realmode/rm/video-bios.c
new file mode 100644
index 00000000000..848b25aaf11
--- /dev/null
+++ b/arch/x86/realmode/rm/video-bios.c
@@ -0,0 +1 @@
+#include "../../boot/video-bios.c"
diff --git a/arch/x86/realmode/rm/video-mode.c b/arch/x86/realmode/rm/video-mode.c
new file mode 100644
index 00000000000..2a98b7e2368
--- /dev/null
+++ b/arch/x86/realmode/rm/video-mode.c
@@ -0,0 +1 @@
+#include "../../boot/video-mode.c"
diff --git a/arch/x86/realmode/rm/video-vesa.c b/arch/x86/realmode/rm/video-vesa.c
new file mode 100644
index 00000000000..413edddb51e
--- /dev/null
+++ b/arch/x86/realmode/rm/video-vesa.c
@@ -0,0 +1 @@
+#include "../../boot/video-vesa.c"
diff --git a/arch/x86/realmode/rm/video-vga.c b/arch/x86/realmode/rm/video-vga.c
new file mode 100644
index 00000000000..3085f5c9d28
--- /dev/null
+++ b/arch/x86/realmode/rm/video-vga.c
@@ -0,0 +1 @@
+#include "../../boot/video-vga.c"
diff --git a/arch/x86/kernel/acpi/realmode/wakemain.c b/arch/x86/realmode/rm/wakemain.c
index 883962d9eef..91405d515ec 100644
--- a/arch/x86/kernel/acpi/realmode/wakemain.c
+++ b/arch/x86/realmode/rm/wakemain.c
@@ -65,7 +65,8 @@ void main(void)
{
/* Kill machine if structures are wrong */
if (wakeup_header.real_magic != 0x12345678)
- while (1);
+ while (1)
+ ;
if (wakeup_header.realmode_flags & 4)
send_morse("...-");
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/realmode/rm/wakeup.h
index 97a29e1430e..9317e0042f2 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.h
+++ b/arch/x86/realmode/rm/wakeup.h
@@ -12,9 +12,8 @@
/* This must match data at wakeup.S */
struct wakeup_header {
u16 video_mode; /* Video mode number */
- u16 _jmp1; /* ljmpl opcode, 32-bit only */
u32 pmode_entry; /* Protected mode resume point, 32-bit only */
- u16 _jmp2; /* CS value, 32-bit only */
+ u16 pmode_cs;
u32 pmode_cr0; /* Protected mode cr0 */
u32 pmode_cr3; /* Protected mode cr3 */
u32 pmode_cr4; /* Protected mode cr4 */
@@ -26,12 +25,6 @@ struct wakeup_header {
u32 pmode_behavior; /* Wakeup routine behavior flags */
u32 realmode_flags;
u32 real_magic;
- u16 trampoline_segment; /* segment with trampoline code, 64-bit only */
- u8 _pad1;
- u8 wakeup_jmp;
- u16 wakeup_jmp_off;
- u16 wakeup_jmp_seg;
- u64 wakeup_gdt[3];
u32 signature; /* To check we have correct structure */
} __attribute__((__packed__));
@@ -40,7 +33,6 @@ extern struct wakeup_header wakeup_header;
#define WAKEUP_HEADER_OFFSET 8
#define WAKEUP_HEADER_SIGNATURE 0x51ee1111
-#define WAKEUP_END_SIGNATURE 0x65a22c82
/* Wakeup behavior bits */
#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/realmode/rm/wakeup_asm.S
index b4fd836e405..8905166b0bb 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ b/arch/x86/realmode/rm/wakeup_asm.S
@@ -1,50 +1,47 @@
/*
* ACPI wakeup real mode startup stub
*/
+#include <linux/linkage.h>
#include <asm/segment.h>
#include <asm/msr-index.h>
#include <asm/page_types.h>
#include <asm/pgtable_types.h>
#include <asm/processor-flags.h>
+#include "realmode.h"
#include "wakeup.h"
.code16
- .section ".jump", "ax"
- .globl _start
-_start:
- cli
- jmp wakeup_code
/* This should match the structure in wakeup.h */
- .section ".header", "a"
- .globl wakeup_header
-wakeup_header:
-video_mode: .short 0 /* Video mode number */
-pmode_return: .byte 0x66, 0xea /* ljmpl */
- .long 0 /* offset goes here */
- .short __KERNEL_CS
-pmode_cr0: .long 0 /* Saved %cr0 */
-pmode_cr3: .long 0 /* Saved %cr3 */
-pmode_cr4: .long 0 /* Saved %cr4 */
-pmode_efer: .quad 0 /* Saved EFER */
-pmode_gdt: .quad 0
-pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */
-pmode_behavior: .long 0 /* Wakeup behavior flags */
-realmode_flags: .long 0
-real_magic: .long 0
-trampoline_segment: .word 0
-_pad1: .byte 0
-wakeup_jmp: .byte 0xea /* ljmpw */
-wakeup_jmp_off: .word 3f
-wakeup_jmp_seg: .word 0
-wakeup_gdt: .quad 0, 0, 0
-signature: .long WAKEUP_HEADER_SIGNATURE
+ .section ".data", "aw"
+
+ .balign 16
+GLOBAL(wakeup_header)
+ video_mode: .short 0 /* Video mode number */
+ pmode_entry: .long 0
+ pmode_cs: .short __KERNEL_CS
+ pmode_cr0: .long 0 /* Saved %cr0 */
+ pmode_cr3: .long 0 /* Saved %cr3 */
+ pmode_cr4: .long 0 /* Saved %cr4 */
+ pmode_efer: .quad 0 /* Saved EFER */
+ pmode_gdt: .quad 0
+ pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */
+ pmode_behavior: .long 0 /* Wakeup behavior flags */
+ realmode_flags: .long 0
+ real_magic: .long 0
+ signature: .long WAKEUP_HEADER_SIGNATURE
+END(wakeup_header)
.text
.code16
-wakeup_code:
+
+ .balign 16
+ENTRY(wakeup_start)
+ cli
cld
+ LJMPW_RM(3f)
+3:
/* Apparently some dimwit BIOS programmers don't know how to
program a PM to RM transition, and we might end up here with
junk in the data segment descriptor registers. The only way
@@ -54,8 +51,7 @@ wakeup_code:
movl %cr0, %eax
orb $X86_CR0_PE, %al
movl %eax, %cr0
- jmp 1f
-1: ljmpw $8, $2f
+ ljmpw $8, $2f
2:
movw %cx, %ds
movw %cx, %es
@@ -65,16 +61,18 @@ wakeup_code:
andb $~X86_CR0_PE, %al
movl %eax, %cr0
- jmp wakeup_jmp
+ LJMPW_RM(3f)
3:
/* Set up segments */
movw %cs, %ax
+ movw %ax, %ss
+ movl $rm_stack_end, %esp
movw %ax, %ds
movw %ax, %es
- movw %ax, %ss
- lidtl wakeup_idt
+ movw %ax, %fs
+ movw %ax, %gs
- movl $wakeup_stack_end, %esp
+ lidtl wakeup_idt
/* Clear the EFLAGS */
pushl $0
@@ -87,7 +85,7 @@ wakeup_code:
/* Check we really have everything... */
movl end_signature, %eax
- cmpl $WAKEUP_END_SIGNATURE, %eax
+ cmpl $REALMODE_END_SIGNATURE, %eax
jne bogus_real_magic
/* Call the C code */
@@ -128,14 +126,13 @@ wakeup_code:
lgdtl pmode_gdt
/* This really couldn't... */
- movl pmode_cr0, %eax
- movl %eax, %cr0
- jmp pmode_return
+ movl pmode_entry, %eax
+ movl pmode_cr0, %ecx
+ movl %ecx, %cr0
+ ljmpl $__KERNEL_CS, $pa_startup_32
+ /* -> jmp *%eax in trampoline_32.S */
#else
- pushw $0
- pushw trampoline_segment
- pushw $0
- lret
+ jmp trampoline_start
#endif
bogus_real_magic:
@@ -143,28 +140,38 @@ bogus_real_magic:
hlt
jmp 1b
- .data
+ .section ".rodata","a"
+
+ /*
+ * Set up the wakeup GDT. We set these up as Big Real Mode,
+ * that is, with limits set to 4 GB. At least the Lenovo
+ * Thinkpad X61 is known to need this for the video BIOS
+ * initialization quirk to work; this is likely to also
+ * be the case for other laptops or integrated video devices.
+ */
+
+ .balign 16
+GLOBAL(wakeup_gdt)
+ .word 3*8-1 /* Self-descriptor */
+ .long pa_wakeup_gdt
+ .word 0
+
+ .word 0xffff /* 16-bit code segment @ real_mode_base */
+ .long 0x9b000000 + pa_real_mode_base
+ .word 0x008f /* big real mode */
+
+ .word 0xffff /* 16-bit data segment @ real_mode_base */
+ .long 0x93000000 + pa_real_mode_base
+ .word 0x008f /* big real mode */
+END(wakeup_gdt)
+
+ .section ".rodata","a"
.balign 8
/* This is the standard real-mode IDT */
-wakeup_idt:
+ .balign 16
+GLOBAL(wakeup_idt)
.word 0xffff /* limit */
.long 0 /* address */
.word 0
-
- .globl HEAP, heap_end
-HEAP:
- .long wakeup_heap
-heap_end:
- .long wakeup_stack
-
- .bss
-wakeup_heap:
- .space 2048
-wakeup_stack:
- .space 2048
-wakeup_stack_end:
-
- .section ".signature","a"
-end_signature:
- .long WAKEUP_END_SIGNATURE
+END(wakeup_idt)
diff --git a/arch/x86/realmode/rmpiggy.S b/arch/x86/realmode/rmpiggy.S
new file mode 100644
index 00000000000..204c6ece0e9
--- /dev/null
+++ b/arch/x86/realmode/rmpiggy.S
@@ -0,0 +1,20 @@
+/*
+ * Wrapper script for the realmode binary as a transport object
+ * before copying to low memory.
+ */
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+
+ .section ".init.data","aw"
+
+ .balign PAGE_SIZE
+
+GLOBAL(real_mode_blob)
+ .incbin "arch/x86/realmode/rm/realmode.bin"
+END(real_mode_blob)
+
+GLOBAL(real_mode_blob_end);
+
+GLOBAL(real_mode_relocs)
+ .incbin "arch/x86/realmode/rm/realmode.relocs"
+END(real_mode_relocs)
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index b685296d446..5a1847d6193 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -78,6 +78,13 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
static const char * const sym_regex_realmode[S_NSYMTYPES] = {
/*
+ * These symbols are known to be relative, even if the linker marks them
+ * as absolute (typically defined outside any section in the linker script.)
+ */
+ [S_REL] =
+ "^pa_",
+
+/*
* These are 16-bit segment symbols when compiling 16-bit code.
*/
[S_SEG] =
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
index ef1db1900d8..c8377fb26cd 100644
--- a/arch/x86/xen/debugfs.c
+++ b/arch/x86/xen/debugfs.c
@@ -19,107 +19,3 @@ struct dentry * __init xen_init_debugfs(void)
return d_xen_debug;
}
-struct array_data
-{
- void *array;
- unsigned elements;
-};
-
-static int u32_array_open(struct inode *inode, struct file *file)
-{
- file->private_data = NULL;
- return nonseekable_open(inode, file);
-}
-
-static size_t format_array(char *buf, size_t bufsize, const char *fmt,
- u32 *array, unsigned array_size)
-{
- size_t ret = 0;
- unsigned i;
-
- for(i = 0; i < array_size; i++) {
- size_t len;
-
- len = snprintf(buf, bufsize, fmt, array[i]);
- len++; /* ' ' or '\n' */
- ret += len;
-
- if (buf) {
- buf += len;
- bufsize -= len;
- buf[-1] = (i == array_size-1) ? '\n' : ' ';
- }
- }
-
- ret++; /* \0 */
- if (buf)
- *buf = '\0';
-
- return ret;
-}
-
-static char *format_array_alloc(const char *fmt, u32 *array, unsigned array_size)
-{
- size_t len = format_array(NULL, 0, fmt, array, array_size);
- char *ret;
-
- ret = kmalloc(len, GFP_KERNEL);
- if (ret == NULL)
- return NULL;
-
- format_array(ret, len, fmt, array, array_size);
- return ret;
-}
-
-static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len,
- loff_t *ppos)
-{
- struct inode *inode = file->f_path.dentry->d_inode;
- struct array_data *data = inode->i_private;
- size_t size;
-
- if (*ppos == 0) {
- if (file->private_data) {
- kfree(file->private_data);
- file->private_data = NULL;
- }
-
- file->private_data = format_array_alloc("%u", data->array, data->elements);
- }
-
- size = 0;
- if (file->private_data)
- size = strlen(file->private_data);
-
- return simple_read_from_buffer(buf, len, ppos, file->private_data, size);
-}
-
-static int xen_array_release(struct inode *inode, struct file *file)
-{
- kfree(file->private_data);
-
- return 0;
-}
-
-static const struct file_operations u32_array_fops = {
- .owner = THIS_MODULE,
- .open = u32_array_open,
- .release= xen_array_release,
- .read = u32_array_read,
- .llseek = no_llseek,
-};
-
-struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *array, unsigned elements)
-{
- struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
-
- if (data == NULL)
- return NULL;
-
- data->array = array;
- data->elements = elements;
-
- return debugfs_create_file(name, mode, parent, data, &u32_array_fops);
-}
diff --git a/arch/x86/xen/debugfs.h b/arch/x86/xen/debugfs.h
index 78d25499be5..12ebf3325c7 100644
--- a/arch/x86/xen/debugfs.h
+++ b/arch/x86/xen/debugfs.h
@@ -3,8 +3,4 @@
struct dentry * __init xen_init_debugfs(void);
-struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *array, unsigned elements);
-
#endif /* _XEN_DEBUGFS_H */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c0f5facdb10..75f33b2a593 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -42,6 +42,7 @@
#include <xen/page.h>
#include <xen/hvm.h>
#include <xen/hvc-console.h>
+#include <xen/acpi.h>
#include <asm/paravirt.h>
#include <asm/apic.h>
@@ -75,6 +76,7 @@
#include "xen-ops.h"
#include "mmu.h"
+#include "smp.h"
#include "multicalls.h"
EXPORT_SYMBOL_GPL(hypercall_page);
@@ -883,6 +885,14 @@ static void set_xen_basic_apic_ops(void)
apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
apic->set_apic_id = xen_set_apic_id;
apic->get_apic_id = xen_get_apic_id;
+
+#ifdef CONFIG_SMP
+ apic->send_IPI_allbutself = xen_send_IPI_allbutself;
+ apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
+ apic->send_IPI_mask = xen_send_IPI_mask;
+ apic->send_IPI_all = xen_send_IPI_all;
+ apic->send_IPI_self = xen_send_IPI_self;
+#endif
}
#endif
@@ -1340,7 +1350,6 @@ asmlinkage void __init xen_start_kernel(void)
xen_raw_console_write("mapping kernel into physical memory\n");
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
- xen_ident_map_ISA();
/* Allocate and initialize top and mid mfn levels for p2m structure */
xen_build_mfn_list_list();
@@ -1400,6 +1409,8 @@ asmlinkage void __init xen_start_kernel(void)
/* Make sure ACS will be enabled */
pci_request_acs();
+
+ xen_acpi_sleep_register();
}
#ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3506cd4f9a4..3a73785631c 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1933,29 +1933,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
#endif
}
-void __init xen_ident_map_ISA(void)
-{
- unsigned long pa;
-
- /*
- * If we're dom0, then linear map the ISA machine addresses into
- * the kernel's address space.
- */
- if (!xen_initial_domain())
- return;
-
- xen_raw_printk("Xen: setup ISA identity maps\n");
-
- for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
- pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
-
- if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
- BUG();
- }
-
- xen_flush_tlb();
-}
-
static void __init xen_post_allocator_init(void)
{
pv_mmu_ops.set_pte = xen_set_pte;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 1b267e75158..ffd08c414e9 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -499,16 +499,18 @@ static bool alloc_p2m(unsigned long pfn)
return true;
}
-static bool __init __early_alloc_p2m(unsigned long pfn)
+static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary)
{
unsigned topidx, mididx, idx;
+ unsigned long *p2m;
+ unsigned long *mid_mfn_p;
topidx = p2m_top_index(pfn);
mididx = p2m_mid_index(pfn);
idx = p2m_index(pfn);
/* Pfff.. No boundary cross-over, lets get out. */
- if (!idx)
+ if (!idx && check_boundary)
return false;
WARN(p2m_top[topidx][mididx] == p2m_identity,
@@ -522,24 +524,66 @@ static bool __init __early_alloc_p2m(unsigned long pfn)
return false;
/* Boundary cross-over for the edges: */
- if (idx) {
- unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
- unsigned long *mid_mfn_p;
+ p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_init(p2m);
+ p2m_init(p2m);
- p2m_top[topidx][mididx] = p2m;
+ p2m_top[topidx][mididx] = p2m;
- /* For save/restore we need to MFN of the P2M saved */
-
- mid_mfn_p = p2m_top_mfn_p[topidx];
- WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
- "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
- topidx, mididx);
- mid_mfn_p[mididx] = virt_to_mfn(p2m);
+ /* For save/restore we need to MFN of the P2M saved */
+
+ mid_mfn_p = p2m_top_mfn_p[topidx];
+ WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
+ "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
+ topidx, mididx);
+ mid_mfn_p[mididx] = virt_to_mfn(p2m);
+
+ return true;
+}
+
+static bool __init early_alloc_p2m(unsigned long pfn)
+{
+ unsigned topidx = p2m_top_index(pfn);
+ unsigned long *mid_mfn_p;
+ unsigned long **mid;
+
+ mid = p2m_top[topidx];
+ mid_mfn_p = p2m_top_mfn_p[topidx];
+ if (mid == p2m_mid_missing) {
+ mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
+
+ p2m_mid_init(mid);
+
+ p2m_top[topidx] = mid;
+ BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
}
- return idx != 0;
+ /* And the save/restore P2M tables.. */
+ if (mid_mfn_p == p2m_mid_missing_mfn) {
+ mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ p2m_mid_mfn_init(mid_mfn_p);
+
+ p2m_top_mfn_p[topidx] = mid_mfn_p;
+ p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
+ /* Note: we don't set mid_mfn_p[midix] here,
+ * look in early_alloc_p2m_middle */
+ }
+ return true;
+}
+bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
+{
+ if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
+ if (!early_alloc_p2m(pfn))
+ return false;
+
+ if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
+ return false;
+
+ if (!__set_phys_to_machine(pfn, mfn))
+ return false;
+ }
+
+ return true;
}
unsigned long __init set_phys_range_identity(unsigned long pfn_s,
unsigned long pfn_e)
@@ -559,35 +603,11 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
{
- unsigned topidx = p2m_top_index(pfn);
- unsigned long *mid_mfn_p;
- unsigned long **mid;
-
- mid = p2m_top[topidx];
- mid_mfn_p = p2m_top_mfn_p[topidx];
- if (mid == p2m_mid_missing) {
- mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
-
- p2m_mid_init(mid);
-
- p2m_top[topidx] = mid;
-
- BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
- }
- /* And the save/restore P2M tables.. */
- if (mid_mfn_p == p2m_mid_missing_mfn) {
- mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
- p2m_mid_mfn_init(mid_mfn_p);
-
- p2m_top_mfn_p[topidx] = mid_mfn_p;
- p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
- /* Note: we don't set mid_mfn_p[midix] here,
- * look in __early_alloc_p2m */
- }
+ WARN_ON(!early_alloc_p2m(pfn));
}
- __early_alloc_p2m(pfn_s);
- __early_alloc_p2m(pfn_e);
+ early_alloc_p2m_middle(pfn_s, true);
+ early_alloc_p2m_middle(pfn_e, true);
for (pfn = pfn_s; pfn < pfn_e; pfn++)
if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1ba8dff2675..3ebba0753d3 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -26,7 +26,6 @@
#include <xen/interface/memory.h>
#include <xen/interface/physdev.h>
#include <xen/features.h>
-
#include "xen-ops.h"
#include "vdso.h"
@@ -84,8 +83,8 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
-static unsigned long __init xen_release_chunk(unsigned long start,
- unsigned long end)
+static unsigned long __init xen_do_chunk(unsigned long start,
+ unsigned long end, bool release)
{
struct xen_memory_reservation reservation = {
.address_bits = 0,
@@ -96,30 +95,138 @@ static unsigned long __init xen_release_chunk(unsigned long start,
unsigned long pfn;
int ret;
- for(pfn = start; pfn < end; pfn++) {
+ for (pfn = start; pfn < end; pfn++) {
+ unsigned long frame;
unsigned long mfn = pfn_to_mfn(pfn);
- /* Make sure pfn exists to start with */
- if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
- continue;
-
- set_xen_guest_handle(reservation.extent_start, &mfn);
+ if (release) {
+ /* Make sure pfn exists to start with */
+ if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
+ continue;
+ frame = mfn;
+ } else {
+ if (mfn != INVALID_P2M_ENTRY)
+ continue;
+ frame = pfn;
+ }
+ set_xen_guest_handle(reservation.extent_start, &frame);
reservation.nr_extents = 1;
- ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+ ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
&reservation);
- WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
+ WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
+ release ? "release" : "populate", pfn, ret);
+
if (ret == 1) {
- __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
+ if (release)
+ break;
+ set_xen_guest_handle(reservation.extent_start, &frame);
+ reservation.nr_extents = 1;
+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+ &reservation);
+ break;
+ }
len++;
- }
+ } else
+ break;
}
- printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n",
- start, end, len);
+ if (len)
+ printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
+ release ? "Freeing" : "Populating",
+ start, end, len,
+ release ? "freed" : "added");
return len;
}
+static unsigned long __init xen_release_chunk(unsigned long start,
+ unsigned long end)
+{
+ return xen_do_chunk(start, end, true);
+}
+
+static unsigned long __init xen_populate_chunk(
+ const struct e820entry *list, size_t map_size,
+ unsigned long max_pfn, unsigned long *last_pfn,
+ unsigned long credits_left)
+{
+ const struct e820entry *entry;
+ unsigned int i;
+ unsigned long done = 0;
+ unsigned long dest_pfn;
+
+ for (i = 0, entry = list; i < map_size; i++, entry++) {
+ unsigned long credits = credits_left;
+ unsigned long s_pfn;
+ unsigned long e_pfn;
+ unsigned long pfns;
+ long capacity;
+
+ if (credits <= 0)
+ break;
+
+ if (entry->type != E820_RAM)
+ continue;
+
+ e_pfn = PFN_UP(entry->addr + entry->size);
+
+ /* We only care about E820 after the xen_start_info->nr_pages */
+ if (e_pfn <= max_pfn)
+ continue;
+
+ s_pfn = PFN_DOWN(entry->addr);
+ /* If the E820 falls within the nr_pages, we want to start
+ * at the nr_pages PFN.
+ * If that would mean going past the E820 entry, skip it
+ */
+ if (s_pfn <= max_pfn) {
+ capacity = e_pfn - max_pfn;
+ dest_pfn = max_pfn;
+ } else {
+ /* last_pfn MUST be within E820_RAM regions */
+ if (*last_pfn && e_pfn >= *last_pfn)
+ s_pfn = *last_pfn;
+ capacity = e_pfn - s_pfn;
+ dest_pfn = s_pfn;
+ }
+ /* If we had filled this E820_RAM entry, go to the next one. */
+ if (capacity <= 0)
+ continue;
+
+ if (credits > capacity)
+ credits = capacity;
+
+ pfns = xen_do_chunk(dest_pfn, dest_pfn + credits, false);
+ done += pfns;
+ credits_left -= pfns;
+ *last_pfn = (dest_pfn + pfns);
+ }
+ return done;
+}
+
+static void __init xen_set_identity_and_release_chunk(
+ unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
+ unsigned long *released, unsigned long *identity)
+{
+ unsigned long pfn;
+
+ /*
+ * If the PFNs are currently mapped, the VA mapping also needs
+ * to be updated to be 1:1.
+ */
+ for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
+ (void)HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ mfn_pte(pfn, PAGE_KERNEL_IO), 0);
+
+ if (start_pfn < nr_pages)
+ *released += xen_release_chunk(
+ start_pfn, min(end_pfn, nr_pages));
+
+ *identity += set_phys_range_identity(start_pfn, end_pfn);
+}
+
static unsigned long __init xen_set_identity_and_release(
const struct e820entry *list, size_t map_size, unsigned long nr_pages)
{
@@ -142,7 +249,6 @@ static unsigned long __init xen_set_identity_and_release(
*/
for (i = 0, entry = list; i < map_size; i++, entry++) {
phys_addr_t end = entry->addr + entry->size;
-
if (entry->type == E820_RAM || i == map_size - 1) {
unsigned long start_pfn = PFN_DOWN(start);
unsigned long end_pfn = PFN_UP(end);
@@ -150,20 +256,19 @@ static unsigned long __init xen_set_identity_and_release(
if (entry->type == E820_RAM)
end_pfn = PFN_UP(entry->addr);
- if (start_pfn < end_pfn) {
- if (start_pfn < nr_pages)
- released += xen_release_chunk(
- start_pfn, min(end_pfn, nr_pages));
+ if (start_pfn < end_pfn)
+ xen_set_identity_and_release_chunk(
+ start_pfn, end_pfn, nr_pages,
+ &released, &identity);
- identity += set_phys_range_identity(
- start_pfn, end_pfn);
- }
start = end;
}
}
- printk(KERN_INFO "Released %lu pages of unused memory\n", released);
- printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
+ if (released)
+ printk(KERN_INFO "Released %lu pages of unused memory\n", released);
+ if (identity)
+ printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
return released;
}
@@ -217,7 +322,9 @@ char * __init xen_memory_setup(void)
int rc;
struct xen_memory_map memmap;
unsigned long max_pages;
+ unsigned long last_pfn = 0;
unsigned long extra_pages = 0;
+ unsigned long populated;
int i;
int op;
@@ -257,9 +364,20 @@ char * __init xen_memory_setup(void)
*/
xen_released_pages = xen_set_identity_and_release(
map, memmap.nr_entries, max_pfn);
- extra_pages += xen_released_pages;
/*
+ * Populate back the non-RAM pages and E820 gaps that had been
+ * released. */
+ populated = xen_populate_chunk(map, memmap.nr_entries,
+ max_pfn, &last_pfn, xen_released_pages);
+
+ extra_pages += (xen_released_pages - populated);
+
+ if (last_pfn > max_pfn) {
+ max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
+ mem_end = PFN_PHYS(max_pfn);
+ }
+ /*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
* factor the base size. On non-highmem systems, the base
* size is the full initial memory allocation; on highmem it
@@ -272,7 +390,6 @@ char * __init xen_memory_setup(void)
*/
extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages);
-
i = 0;
while (i < memmap.nr_entries) {
u64 addr = map[i].addr;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3700945ed0d..afb250d22a6 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/smp.h>
+#include <linux/irq_work.h>
#include <asm/paravirt.h>
#include <asm/desc.h>
@@ -41,10 +42,12 @@ cpumask_var_t xen_cpu_initialized_map;
static DEFINE_PER_CPU(int, xen_resched_irq);
static DEFINE_PER_CPU(int, xen_callfunc_irq);
static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
+static DEFINE_PER_CPU(int, xen_irq_work);
static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
+static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
/*
* Reschedule call back.
@@ -143,6 +146,17 @@ static int xen_smp_intr_init(unsigned int cpu)
goto fail;
per_cpu(xen_callfuncsingle_irq, cpu) = rc;
+ callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
+ rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
+ cpu,
+ xen_irq_work_interrupt,
+ IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
+ callfunc_name,
+ NULL);
+ if (rc < 0)
+ goto fail;
+ per_cpu(xen_irq_work, cpu) = rc;
+
return 0;
fail:
@@ -155,6 +169,8 @@ static int xen_smp_intr_init(unsigned int cpu)
if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
NULL);
+ if (per_cpu(xen_irq_work, cpu) >= 0)
+ unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
return rc;
}
@@ -407,6 +423,7 @@ static void xen_cpu_die(unsigned int cpu)
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
xen_uninit_lock_cpu(cpu);
xen_teardown_timer(cpu);
@@ -469,8 +486,8 @@ static void xen_smp_send_reschedule(int cpu)
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
}
-static void xen_send_IPI_mask(const struct cpumask *mask,
- enum ipi_vector vector)
+static void __xen_send_IPI_mask(const struct cpumask *mask,
+ int vector)
{
unsigned cpu;
@@ -482,7 +499,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
{
int cpu;
- xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
+ __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
/* Make sure other vcpus get a chance to run if they need to. */
for_each_cpu(cpu, mask) {
@@ -495,10 +512,86 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- xen_send_IPI_mask(cpumask_of(cpu),
+ __xen_send_IPI_mask(cpumask_of(cpu),
XEN_CALL_FUNCTION_SINGLE_VECTOR);
}
+static inline int xen_map_vector(int vector)
+{
+ int xen_vector;
+
+ switch (vector) {
+ case RESCHEDULE_VECTOR:
+ xen_vector = XEN_RESCHEDULE_VECTOR;
+ break;
+ case CALL_FUNCTION_VECTOR:
+ xen_vector = XEN_CALL_FUNCTION_VECTOR;
+ break;
+ case CALL_FUNCTION_SINGLE_VECTOR:
+ xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
+ break;
+ case IRQ_WORK_VECTOR:
+ xen_vector = XEN_IRQ_WORK_VECTOR;
+ break;
+ default:
+ xen_vector = -1;
+ printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
+ vector);
+ }
+
+ return xen_vector;
+}
+
+void xen_send_IPI_mask(const struct cpumask *mask,
+ int vector)
+{
+ int xen_vector = xen_map_vector(vector);
+
+ if (xen_vector >= 0)
+ __xen_send_IPI_mask(mask, xen_vector);
+}
+
+void xen_send_IPI_all(int vector)
+{
+ int xen_vector = xen_map_vector(vector);
+
+ if (xen_vector >= 0)
+ __xen_send_IPI_mask(cpu_online_mask, xen_vector);
+}
+
+void xen_send_IPI_self(int vector)
+{
+ int xen_vector = xen_map_vector(vector);
+
+ if (xen_vector >= 0)
+ xen_send_IPI_one(smp_processor_id(), xen_vector);
+}
+
+void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
+ int vector)
+{
+ unsigned cpu;
+ unsigned int this_cpu = smp_processor_id();
+
+ if (!(num_online_cpus() > 1))
+ return;
+
+ for_each_cpu_and(cpu, mask, cpu_online_mask) {
+ if (this_cpu == cpu)
+ continue;
+
+ xen_smp_send_call_function_single_ipi(cpu);
+ }
+}
+
+void xen_send_IPI_allbutself(int vector)
+{
+ int xen_vector = xen_map_vector(vector);
+
+ if (xen_vector >= 0)
+ xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
+}
+
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
{
irq_enter();
@@ -519,6 +612,16 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
+{
+ irq_enter();
+ irq_work_run();
+ inc_irq_stat(apic_irq_work_irqs);
+ irq_exit();
+
+ return IRQ_HANDLED;
+}
+
static const struct smp_ops xen_smp_ops __initconst = {
.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
.smp_prepare_cpus = xen_smp_prepare_cpus,
@@ -565,6 +668,7 @@ static void xen_hvm_cpu_die(unsigned int cpu)
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
+ unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
native_cpu_die(cpu);
}
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
new file mode 100644
index 00000000000..8981a76d081
--- /dev/null
+++ b/arch/x86/xen/smp.h
@@ -0,0 +1,12 @@
+#ifndef _XEN_SMP_H
+
+extern void xen_send_IPI_mask(const struct cpumask *mask,
+ int vector);
+extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
+ int vector);
+extern void xen_send_IPI_allbutself(int vector);
+extern void physflat_send_IPI_allbutself(int vector);
+extern void xen_send_IPI_all(int vector);
+extern void xen_send_IPI_self(int vector);
+
+#endif
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index d69cc6c3f80..83e866d714c 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -440,12 +440,12 @@ static int __init xen_spinlock_debugfs(void)
debugfs_create_u64("time_total", 0444, d_spin_debug,
&spinlock_stats.time_total);
- xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
- spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1);
- xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
- spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1);
- xen_debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
- spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
+ debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
+ spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1);
+ debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
+ spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1);
+ debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
+ spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
return 0;
}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 45c0c0667bd..202d4c15015 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -28,7 +28,6 @@ void xen_setup_shared_info(void);
void xen_build_mfn_list_list(void);
void xen_setup_machphys_mapping(void);
pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
-void xen_ident_map_ISA(void);
void xen_reserve_top(void);
extern unsigned long xen_max_p2m_pfn;
diff --git a/arch/xtensa/include/asm/kvm_para.h b/arch/xtensa/include/asm/kvm_para.h
new file mode 100644
index 00000000000..14fab8f0b95
--- /dev/null
+++ b/arch/xtensa/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/drivers/Makefile b/drivers/Makefile
index 0ee98d50f97..2ba29ffef2c 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_SFI) += sfi/
# PnP must come after ACPI since it will eventually need to check if acpi
# was used and do nothing if so
obj-$(CONFIG_PNP) += pnp/
-obj-$(CONFIG_ARM_AMBA) += amba/
+obj-y += amba/
# Many drivers will want to use DMA so this has to be made available
# really early.
obj-$(CONFIG_DMA_ENGINE) += dma/
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index 8cf6c46e99f..6680df36b96 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/sysfs.h>
+#include <linux/io.h>
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 06527c52661..74ee4ab577b 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -93,11 +93,9 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
/* do we have a wakeup address for S2 and S3? */
if (acpi_state == ACPI_STATE_S3) {
- if (!acpi_wakeup_address) {
+ if (!acpi_wakeup_address)
return -EFAULT;
- }
- acpi_set_firmware_waking_vector(
- (acpi_physical_address)acpi_wakeup_address);
+ acpi_set_firmware_waking_vector(acpi_wakeup_address);
}
ACPI_FLUSH_CPU_CACHE();
diff --git a/drivers/amba/Makefile b/drivers/amba/Makefile
index 40fe74097be..66e81c2f1e3 100644
--- a/drivers/amba/Makefile
+++ b/drivers/amba/Makefile
@@ -1,2 +1,2 @@
-obj-y += bus.o
-
+obj-$(CONFIG_ARM_AMBA) += bus.o
+obj-$(CONFIG_TEGRA_AHB) += tegra-ahb.o
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
new file mode 100644
index 00000000000..aa0b1f16052
--- /dev/null
+++ b/drivers/amba/tegra-ahb.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Jay Cheng <jacheng@nvidia.com>
+ * James Wylder <james.wylder@motorola.com>
+ * Benoit Goby <benoit@android.com>
+ * Colin Cross <ccross@android.com>
+ * Hiroshi DOYU <hdoyu@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#define DRV_NAME "tegra-ahb"
+
+#define AHB_ARBITRATION_DISABLE 0x00
+#define AHB_ARBITRATION_PRIORITY_CTRL 0x04
+#define AHB_PRIORITY_WEIGHT(x) (((x) & 0x7) << 29)
+#define PRIORITY_SELECT_USB BIT(6)
+#define PRIORITY_SELECT_USB2 BIT(18)
+#define PRIORITY_SELECT_USB3 BIT(17)
+
+#define AHB_GIZMO_AHB_MEM 0x0c
+#define ENB_FAST_REARBITRATE BIT(2)
+#define DONT_SPLIT_AHB_WR BIT(7)
+
+#define AHB_GIZMO_APB_DMA 0x10
+#define AHB_GIZMO_IDE 0x18
+#define AHB_GIZMO_USB 0x1c
+#define AHB_GIZMO_AHB_XBAR_BRIDGE 0x20
+#define AHB_GIZMO_CPU_AHB_BRIDGE 0x24
+#define AHB_GIZMO_COP_AHB_BRIDGE 0x28
+#define AHB_GIZMO_XBAR_APB_CTLR 0x2c
+#define AHB_GIZMO_VCP_AHB_BRIDGE 0x30
+#define AHB_GIZMO_NAND 0x3c
+#define AHB_GIZMO_SDMMC4 0x44
+#define AHB_GIZMO_XIO 0x48
+#define AHB_GIZMO_BSEV 0x60
+#define AHB_GIZMO_BSEA 0x70
+#define AHB_GIZMO_NOR 0x74
+#define AHB_GIZMO_USB2 0x78
+#define AHB_GIZMO_USB3 0x7c
+#define IMMEDIATE BIT(18)
+
+#define AHB_GIZMO_SDMMC1 0x80
+#define AHB_GIZMO_SDMMC2 0x84
+#define AHB_GIZMO_SDMMC3 0x88
+#define AHB_MEM_PREFETCH_CFG_X 0xd8
+#define AHB_ARBITRATION_XBAR_CTRL 0xdc
+#define AHB_MEM_PREFETCH_CFG3 0xe0
+#define AHB_MEM_PREFETCH_CFG4 0xe4
+#define AHB_MEM_PREFETCH_CFG1 0xec
+#define AHB_MEM_PREFETCH_CFG2 0xf0
+#define PREFETCH_ENB BIT(31)
+#define MST_ID(x) (((x) & 0x1f) << 26)
+#define AHBDMA_MST_ID MST_ID(5)
+#define USB_MST_ID MST_ID(6)
+#define USB2_MST_ID MST_ID(18)
+#define USB3_MST_ID MST_ID(17)
+#define ADDR_BNDRY(x) (((x) & 0xf) << 21)
+#define INACTIVITY_TIMEOUT(x) (((x) & 0xffff) << 0)
+
+#define AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID 0xf8
+
+#define AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE BIT(17)
+
+static struct platform_driver tegra_ahb_driver;
+
+static const u32 tegra_ahb_gizmo[] = {
+ AHB_ARBITRATION_DISABLE,
+ AHB_ARBITRATION_PRIORITY_CTRL,
+ AHB_GIZMO_AHB_MEM,
+ AHB_GIZMO_APB_DMA,
+ AHB_GIZMO_IDE,
+ AHB_GIZMO_USB,
+ AHB_GIZMO_AHB_XBAR_BRIDGE,
+ AHB_GIZMO_CPU_AHB_BRIDGE,
+ AHB_GIZMO_COP_AHB_BRIDGE,
+ AHB_GIZMO_XBAR_APB_CTLR,
+ AHB_GIZMO_VCP_AHB_BRIDGE,
+ AHB_GIZMO_NAND,
+ AHB_GIZMO_SDMMC4,
+ AHB_GIZMO_XIO,
+ AHB_GIZMO_BSEV,
+ AHB_GIZMO_BSEA,
+ AHB_GIZMO_NOR,
+ AHB_GIZMO_USB2,
+ AHB_GIZMO_USB3,
+ AHB_GIZMO_SDMMC1,
+ AHB_GIZMO_SDMMC2,
+ AHB_GIZMO_SDMMC3,
+ AHB_MEM_PREFETCH_CFG_X,
+ AHB_ARBITRATION_XBAR_CTRL,
+ AHB_MEM_PREFETCH_CFG3,
+ AHB_MEM_PREFETCH_CFG4,
+ AHB_MEM_PREFETCH_CFG1,
+ AHB_MEM_PREFETCH_CFG2,
+ AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID,
+};
+
+struct tegra_ahb {
+ void __iomem *regs;
+ struct device *dev;
+ u32 ctx[0];
+};
+
+static inline u32 gizmo_readl(struct tegra_ahb *ahb, u32 offset)
+{
+ return readl(ahb->regs + offset);
+}
+
+static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset)
+{
+ writel(value, ahb->regs + offset);
+}
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+static int tegra_ahb_match_by_smmu(struct device *dev, void *data)
+{
+ struct tegra_ahb *ahb = dev_get_drvdata(dev);
+ struct device_node *dn = data;
+
+ return (ahb->dev->of_node == dn) ? 1 : 0;
+}
+
+int tegra_ahb_enable_smmu(struct device_node *dn)
+{
+ struct device *dev;
+ u32 val;
+ struct tegra_ahb *ahb;
+
+ dev = driver_find_device(&tegra_ahb_driver.driver, NULL, dn,
+ tegra_ahb_match_by_smmu);
+ if (!dev)
+ return -EPROBE_DEFER;
+ ahb = dev_get_drvdata(dev);
+ val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL);
+ val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE;
+ gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL);
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ahb_enable_smmu);
+#endif
+
+static int tegra_ahb_suspend(struct device *dev)
+{
+ int i;
+ struct tegra_ahb *ahb = dev_get_drvdata(dev);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
+ ahb->ctx[i] = gizmo_readl(ahb, tegra_ahb_gizmo[i]);
+ return 0;
+}
+
+static int tegra_ahb_resume(struct device *dev)
+{
+ int i;
+ struct tegra_ahb *ahb = dev_get_drvdata(dev);
+
+ for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
+ gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
+ tegra_ahb_suspend,
+ tegra_ahb_resume, NULL);
+
+static void tegra_ahb_gizmo_init(struct tegra_ahb *ahb)
+{
+ u32 val;
+
+ val = gizmo_readl(ahb, AHB_GIZMO_AHB_MEM);
+ val |= ENB_FAST_REARBITRATE | IMMEDIATE | DONT_SPLIT_AHB_WR;
+ gizmo_writel(ahb, val, AHB_GIZMO_AHB_MEM);
+
+ val = gizmo_readl(ahb, AHB_GIZMO_USB);
+ val |= IMMEDIATE;
+ gizmo_writel(ahb, val, AHB_GIZMO_USB);
+
+ val = gizmo_readl(ahb, AHB_GIZMO_USB2);
+ val |= IMMEDIATE;
+ gizmo_writel(ahb, val, AHB_GIZMO_USB2);
+
+ val = gizmo_readl(ahb, AHB_GIZMO_USB3);
+ val |= IMMEDIATE;
+ gizmo_writel(ahb, val, AHB_GIZMO_USB3);
+
+ val = gizmo_readl(ahb, AHB_ARBITRATION_PRIORITY_CTRL);
+ val |= PRIORITY_SELECT_USB |
+ PRIORITY_SELECT_USB2 |
+ PRIORITY_SELECT_USB3 |
+ AHB_PRIORITY_WEIGHT(7);
+ gizmo_writel(ahb, val, AHB_ARBITRATION_PRIORITY_CTRL);
+
+ val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG1);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB |
+ AHBDMA_MST_ID |
+ ADDR_BNDRY(0xc) |
+ INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG1);
+
+ val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG2);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB |
+ USB_MST_ID |
+ ADDR_BNDRY(0xc) |
+ INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG2);
+
+ val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG3);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB |
+ USB3_MST_ID |
+ ADDR_BNDRY(0xc) |
+ INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG3);
+
+ val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG4);
+ val &= ~MST_ID(~0);
+ val |= PREFETCH_ENB |
+ USB2_MST_ID |
+ ADDR_BNDRY(0xc) |
+ INACTIVITY_TIMEOUT(0x1000);
+ gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG4);
+}
+
+static int __devinit tegra_ahb_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct tegra_ahb *ahb;
+ size_t bytes;
+
+ bytes = sizeof(*ahb) + sizeof(u32) * ARRAY_SIZE(tegra_ahb_gizmo);
+ ahb = devm_kzalloc(&pdev->dev, bytes, GFP_KERNEL);
+ if (!ahb)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ ahb->regs = devm_request_and_ioremap(&pdev->dev, res);
+ if (!ahb->regs)
+ return -EBUSY;
+
+ ahb->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ahb);
+ tegra_ahb_gizmo_init(ahb);
+ return 0;
+}
+
+static int __devexit tegra_ahb_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
+ { .compatible = "nvidia,tegra30-ahb", },
+ { .compatible = "nvidia,tegra20-ahb", },
+ {},
+};
+
+static struct platform_driver tegra_ahb_driver = {
+ .probe = tegra_ahb_probe,
+ .remove = __devexit_p(tegra_ahb_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = tegra_ahb_of_match,
+ .pm = &tegra_ahb_pm,
+ },
+};
+module_platform_driver(tegra_ahb_driver);
+
+MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
+MODULE_DESCRIPTION("Tegra AHB driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 7336d4a7ab3..24712adf69d 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -553,6 +553,7 @@ struct mv_host_priv {
#if defined(CONFIG_HAVE_CLK)
struct clk *clk;
+ struct clk **port_clks;
#endif
/*
* These consistent DMA memory pools give us guaranteed
@@ -4027,6 +4028,9 @@ static int mv_platform_probe(struct platform_device *pdev)
struct resource *res;
int n_ports = 0;
int rc;
+#if defined(CONFIG_HAVE_CLK)
+ int port;
+#endif
ata_print_version_once(&pdev->dev, DRV_VERSION);
@@ -4054,6 +4058,13 @@ static int mv_platform_probe(struct platform_device *pdev)
if (!host || !hpriv)
return -ENOMEM;
+#if defined(CONFIG_HAVE_CLK)
+ hpriv->port_clks = devm_kzalloc(&pdev->dev,
+ sizeof(struct clk *) * n_ports,
+ GFP_KERNEL);
+ if (!hpriv->port_clks)
+ return -ENOMEM;
+#endif
host->private_data = hpriv;
hpriv->n_ports = n_ports;
hpriv->board_idx = chip_soc;
@@ -4066,9 +4077,17 @@ static int mv_platform_probe(struct platform_device *pdev)
#if defined(CONFIG_HAVE_CLK)
hpriv->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(hpriv->clk))
- dev_notice(&pdev->dev, "cannot get clkdev\n");
+ dev_notice(&pdev->dev, "cannot get optional clkdev\n");
else
- clk_enable(hpriv->clk);
+ clk_prepare_enable(hpriv->clk);
+
+ for (port = 0; port < n_ports; port++) {
+ char port_number[16];
+ sprintf(port_number, "%d", port);
+ hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
+ if (!IS_ERR(hpriv->port_clks[port]))
+ clk_prepare_enable(hpriv->port_clks[port]);
+ }
#endif
/*
@@ -4098,9 +4117,15 @@ static int mv_platform_probe(struct platform_device *pdev)
err:
#if defined(CONFIG_HAVE_CLK)
if (!IS_ERR(hpriv->clk)) {
- clk_disable(hpriv->clk);
+ clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
+ for (port = 0; port < n_ports; port++) {
+ if (!IS_ERR(hpriv->port_clks[port])) {
+ clk_disable_unprepare(hpriv->port_clks[port]);
+ clk_put(hpriv->port_clks[port]);
+ }
+ }
#endif
return rc;
@@ -4119,14 +4144,21 @@ static int __devexit mv_platform_remove(struct platform_device *pdev)
struct ata_host *host = platform_get_drvdata(pdev);
#if defined(CONFIG_HAVE_CLK)
struct mv_host_priv *hpriv = host->private_data;
+ int port;
#endif
ata_host_detach(host);
#if defined(CONFIG_HAVE_CLK)
if (!IS_ERR(hpriv->clk)) {
- clk_disable(hpriv->clk);
+ clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
+ for (port = 0; port < host->n_ports; port++) {
+ if (!IS_ERR(hpriv->port_clks[port])) {
+ clk_disable_unprepare(hpriv->port_clks[port]);
+ clk_put(hpriv->port_clks[port]);
+ }
+ }
#endif
return 0;
}
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 9aa618acfe9..9b21469482a 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -192,4 +192,93 @@ config DMA_SHARED_BUFFER
APIs extension; the file's descriptor can then be passed on to other
driver.
+config CMA
+ bool "Contiguous Memory Allocator (EXPERIMENTAL)"
+ depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
+ select MIGRATION
+ help
+ This enables the Contiguous Memory Allocator which allows drivers
+ to allocate big physically-contiguous blocks of memory for use with
+ hardware components that do not support I/O map nor scatter-gather.
+
+ For more information see <include/linux/dma-contiguous.h>.
+ If unsure, say "n".
+
+if CMA
+
+config CMA_DEBUG
+ bool "CMA debug messages (DEVELOPMENT)"
+ depends on DEBUG_KERNEL
+ help
+ Turns on debug messages in CMA. This produces KERN_DEBUG
+ messages for every CMA call as well as various messages while
+ processing calls such as dma_alloc_from_contiguous().
+ This option does not affect warning and error messages.
+
+comment "Default contiguous memory area size:"
+
+config CMA_SIZE_MBYTES
+ int "Size in Mega Bytes"
+ depends on !CMA_SIZE_SEL_PERCENTAGE
+ default 16
+ help
+ Defines the size (in MiB) of the default memory area for Contiguous
+ Memory Allocator.
+
+config CMA_SIZE_PERCENTAGE
+ int "Percentage of total memory"
+ depends on !CMA_SIZE_SEL_MBYTES
+ default 10
+ help
+ Defines the size of the default memory area for Contiguous Memory
+ Allocator as a percentage of the total memory in the system.
+
+choice
+ prompt "Selected region size"
+ default CMA_SIZE_SEL_ABSOLUTE
+
+config CMA_SIZE_SEL_MBYTES
+ bool "Use mega bytes value only"
+
+config CMA_SIZE_SEL_PERCENTAGE
+ bool "Use percentage value only"
+
+config CMA_SIZE_SEL_MIN
+ bool "Use lower value (minimum)"
+
+config CMA_SIZE_SEL_MAX
+ bool "Use higher value (maximum)"
+
+endchoice
+
+config CMA_ALIGNMENT
+ int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
+ range 4 9
+ default 8
+ help
+ DMA mapping framework by default aligns all buffers to the smallest
+ PAGE_SIZE order which is greater than or equal to the requested buffer
+ size. This works well for buffers up to a few hundreds kilobytes, but
+ for larger buffers it just a memory waste. With this parameter you can
+ specify the maximum PAGE_SIZE order for contiguous buffers. Larger
+ buffers will be aligned only to this specified order. The order is
+ expressed as a power of two multiplied by the PAGE_SIZE.
+
+ For example, if your system defaults to 4KiB pages, the order value
+ of 8 means that the buffers will be aligned up to 1MiB only.
+
+ If unsure, leave the default value "8".
+
+config CMA_AREAS
+ int "Maximum count of the CMA device-private areas"
+ default 7
+ help
+ CMA allows to create CMA areas for particular devices. This parameter
+ sets the maximum number of such device private CMA areas in the
+ system.
+
+ If unsure, leave the default value "7".
+
+endif
+
endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index b6d1b9c4200..5aa2d703d19 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,6 +6,7 @@ obj-y := core.o bus.o dd.o syscore.o \
attribute_container.o transport_class.o \
topology.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
+obj-$(CONFIG_CMA) += dma-contiguous.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 05c64c11bad..24e88fe29ec 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -44,8 +44,26 @@ static int dma_buf_release(struct inode *inode, struct file *file)
return 0;
}
+static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+
+ /* check for overflowing the buffer's size */
+ if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+ dmabuf->size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ return dmabuf->ops->mmap(dmabuf, vma);
+}
+
static const struct file_operations dma_buf_fops = {
.release = dma_buf_release,
+ .mmap = dma_buf_mmap_internal,
};
/*
@@ -82,7 +100,8 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
|| !ops->unmap_dma_buf
|| !ops->release
|| !ops->kmap_atomic
- || !ops->kmap)) {
+ || !ops->kmap
+ || !ops->mmap)) {
return ERR_PTR(-EINVAL);
}
@@ -406,3 +425,81 @@ void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
}
EXPORT_SYMBOL_GPL(dma_buf_kunmap);
+
+
+/**
+ * dma_buf_mmap - Setup up a userspace mmap with the given vma
+ * @dmabuf: [in] buffer that should back the vma
+ * @vma: [in] vma for the mmap
+ * @pgoff: [in] offset in pages where this mmap should start within the
+ * dma-buf buffer.
+ *
+ * This function adjusts the passed in vma so that it points at the file of the
+ * dma_buf operation. It alsog adjusts the starting pgoff and does bounds
+ * checking on the size of the vma. Then it calls the exporters mmap function to
+ * set up the mapping.
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
+ unsigned long pgoff)
+{
+ if (WARN_ON(!dmabuf || !vma))
+ return -EINVAL;
+
+ /* check for offset overflow */
+ if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+ return -EOVERFLOW;
+
+ /* check for overflowing the buffer's size */
+ if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+ dmabuf->size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ /* readjust the vma */
+ if (vma->vm_file)
+ fput(vma->vm_file);
+
+ vma->vm_file = dmabuf->file;
+ get_file(vma->vm_file);
+
+ vma->vm_pgoff = pgoff;
+
+ return dmabuf->ops->mmap(dmabuf, vma);
+}
+EXPORT_SYMBOL_GPL(dma_buf_mmap);
+
+/**
+ * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
+ * address space. Same restrictions as for vmap and friends apply.
+ * @dmabuf: [in] buffer to vmap
+ *
+ * This call may fail due to lack of virtual mapping address space.
+ * These calls are optional in drivers. The intended use for them
+ * is for mapping objects linear in kernel space for high use objects.
+ * Please attempt to use kmap/kunmap before thinking about these interfaces.
+ */
+void *dma_buf_vmap(struct dma_buf *dmabuf)
+{
+ if (WARN_ON(!dmabuf))
+ return NULL;
+
+ if (dmabuf->ops->vmap)
+ return dmabuf->ops->vmap(dmabuf);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dma_buf_vmap);
+
+/**
+ * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
+ * @dmabuf: [in] buffer to vunmap
+ */
+void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+ if (WARN_ON(!dmabuf))
+ return;
+
+ if (dmabuf->ops->vunmap)
+ dmabuf->ops->vunmap(dmabuf, vaddr);
+}
+EXPORT_SYMBOL_GPL(dma_buf_vunmap);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index bb0025c510b..1b85949e3d2 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -10,6 +10,7 @@
struct dma_coherent_mem {
void *virt_base;
dma_addr_t device_base;
+ phys_addr_t pfn_base;
int size;
int flags;
unsigned long *bitmap;
@@ -44,6 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dev->dma_mem->virt_base = mem_base;
dev->dma_mem->device_base = device_addr;
+ dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
dev->dma_mem->size = pages;
dev->dma_mem->flags = flags;
@@ -176,3 +178,43 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
return 0;
}
EXPORT_SYMBOL(dma_release_from_coherent);
+
+/**
+ * dma_mmap_from_coherent() - try to mmap the memory allocated from
+ * per-device coherent memory pool to userspace
+ * @dev: device from which the memory was allocated
+ * @vma: vm_area for the userspace memory
+ * @vaddr: cpu address returned by dma_alloc_from_coherent
+ * @size: size of the memory buffer allocated by dma_alloc_from_coherent
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if
+ * dma_release_coherent() should proceed with mapping memory from
+ * generic pools.
+ */
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *vaddr, size_t size, int *ret)
+{
+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+ if (mem && vaddr >= mem->virt_base && vaddr + size <=
+ (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+ unsigned long off = vma->vm_pgoff;
+ int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+ int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int count = size >> PAGE_SHIFT;
+
+ *ret = -ENXIO;
+ if (off < count && user_count <= count - off) {
+ unsigned pfn = mem->pfn_base + start + off;
+ *ret = remap_pfn_range(vma, vma->vm_start, pfn,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(dma_mmap_from_coherent);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
new file mode 100644
index 00000000000..78efb0306a4
--- /dev/null
+++ b/drivers/base/dma-contiguous.c
@@ -0,0 +1,401 @@
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ * Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+#define pr_fmt(fmt) "cma: " fmt
+
+#ifdef CONFIG_CMA_DEBUG
+#ifndef DEBUG
+# define DEBUG
+#endif
+#endif
+
+#include <asm/page.h>
+#include <asm/dma-contiguous.h>
+
+#include <linux/memblock.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-isolation.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/mm_types.h>
+#include <linux/dma-contiguous.h>
+
+#ifndef SZ_1M
+#define SZ_1M (1 << 20)
+#endif
+
+struct cma {
+ unsigned long base_pfn;
+ unsigned long count;
+ unsigned long *bitmap;
+};
+
+struct cma *dma_contiguous_default_area;
+
+#ifdef CONFIG_CMA_SIZE_MBYTES
+#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
+#else
+#define CMA_SIZE_MBYTES 0
+#endif
+
+/*
+ * Default global CMA area size can be defined in kernel's .config.
+ * This is usefull mainly for distro maintainers to create a kernel
+ * that works correctly for most supported systems.
+ * The size can be set in bytes or as a percentage of the total memory
+ * in the system.
+ *
+ * Users, who want to set the size of global CMA area for their system
+ * should use cma= kernel parameter.
+ */
+static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
+static long size_cmdline = -1;
+
+static int __init early_cma(char *p)
+{
+ pr_debug("%s(%s)\n", __func__, p);
+ size_cmdline = memparse(p, &p);
+ return 0;
+}
+early_param("cma", early_cma);
+
+#ifdef CONFIG_CMA_SIZE_PERCENTAGE
+
+static unsigned long __init __maybe_unused cma_early_percent_memory(void)
+{
+ struct memblock_region *reg;
+ unsigned long total_pages = 0;
+
+ /*
+ * We cannot use memblock_phys_mem_size() here, because
+ * memblock_analyze() has not been called yet.
+ */
+ for_each_memblock(memory, reg)
+ total_pages += memblock_region_memory_end_pfn(reg) -
+ memblock_region_memory_base_pfn(reg);
+
+ return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
+}
+
+#else
+
+static inline __maybe_unused unsigned long cma_early_percent_memory(void)
+{
+ return 0;
+}
+
+#endif
+
+/**
+ * dma_contiguous_reserve() - reserve area for contiguous memory handling
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory.
+ */
+void __init dma_contiguous_reserve(phys_addr_t limit)
+{
+ unsigned long selected_size = 0;
+
+ pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+ if (size_cmdline != -1) {
+ selected_size = size_cmdline;
+ } else {
+#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+ selected_size = size_bytes;
+#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
+ selected_size = cma_early_percent_memory();
+#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
+ selected_size = min(size_bytes, cma_early_percent_memory());
+#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
+ selected_size = max(size_bytes, cma_early_percent_memory());
+#endif
+ }
+
+ if (selected_size) {
+ pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+ selected_size / SZ_1M);
+
+ dma_declare_contiguous(NULL, selected_size, 0, limit);
+ }
+};
+
+static DEFINE_MUTEX(cma_mutex);
+
+static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
+{
+ unsigned long pfn = base_pfn;
+ unsigned i = count >> pageblock_order;
+ struct zone *zone;
+
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ zone = page_zone(pfn_to_page(pfn));
+
+ do {
+ unsigned j;
+ base_pfn = pfn;
+ for (j = pageblock_nr_pages; j; --j, pfn++) {
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ if (page_zone(pfn_to_page(pfn)) != zone)
+ return -EINVAL;
+ }
+ init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+ } while (--i);
+ return 0;
+}
+
+static __init struct cma *cma_create_area(unsigned long base_pfn,
+ unsigned long count)
+{
+ int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ struct cma *cma;
+ int ret = -ENOMEM;
+
+ pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
+
+ cma = kmalloc(sizeof *cma, GFP_KERNEL);
+ if (!cma)
+ return ERR_PTR(-ENOMEM);
+
+ cma->base_pfn = base_pfn;
+ cma->count = count;
+ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+
+ if (!cma->bitmap)
+ goto no_mem;
+
+ ret = cma_activate_area(base_pfn, count);
+ if (ret)
+ goto error;
+
+ pr_debug("%s: returned %p\n", __func__, (void *)cma);
+ return cma;
+
+error:
+ kfree(cma->bitmap);
+no_mem:
+ kfree(cma);
+ return ERR_PTR(ret);
+}
+
+static struct cma_reserved {
+ phys_addr_t start;
+ unsigned long size;
+ struct device *dev;
+} cma_reserved[MAX_CMA_AREAS] __initdata;
+static unsigned cma_reserved_count __initdata;
+
+static int __init cma_init_reserved_areas(void)
+{
+ struct cma_reserved *r = cma_reserved;
+ unsigned i = cma_reserved_count;
+
+ pr_debug("%s()\n", __func__);
+
+ for (; i; --i, ++r) {
+ struct cma *cma;
+ cma = cma_create_area(PFN_DOWN(r->start),
+ r->size >> PAGE_SHIFT);
+ if (!IS_ERR(cma))
+ dev_set_cma_area(r->dev, cma);
+ }
+ return 0;
+}
+core_initcall(cma_init_reserved_areas);
+
+/**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+ * for particular device
+ * @dev: Pointer to device structure.
+ * @size: Size of the reserved memory.
+ * @base: Start address of the reserved memory (optional, 0 for any).
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory for specified device. It should be
+ * called by board specific code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+int __init dma_declare_contiguous(struct device *dev, unsigned long size,
+ phys_addr_t base, phys_addr_t limit)
+{
+ struct cma_reserved *r = &cma_reserved[cma_reserved_count];
+ unsigned long alignment;
+
+ pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
+ (unsigned long)size, (unsigned long)base,
+ (unsigned long)limit);
+
+ /* Sanity checks */
+ if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+ pr_err("Not enough slots for CMA reserved regions!\n");
+ return -ENOSPC;
+ }
+
+ if (!size)
+ return -EINVAL;
+
+ /* Sanitise input arguments */
+ alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
+ base = ALIGN(base, alignment);
+ size = ALIGN(size, alignment);
+ limit &= ~(alignment - 1);
+
+ /* Reserve memory */
+ if (base) {
+ if (memblock_is_region_reserved(base, size) ||
+ memblock_reserve(base, size) < 0) {
+ base = -EBUSY;
+ goto err;
+ }
+ } else {
+ /*
+ * Use __memblock_alloc_base() since
+ * memblock_alloc_base() panic()s.
+ */
+ phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
+ if (!addr) {
+ base = -ENOMEM;
+ goto err;
+ } else if (addr + size > ~(unsigned long)0) {
+ memblock_free(addr, size);
+ base = -EINVAL;
+ goto err;
+ } else {
+ base = addr;
+ }
+ }
+
+ /*
+ * Each reserved area must be initialised later, when more kernel
+ * subsystems (like slab allocator) are available.
+ */
+ r->start = base;
+ r->size = size;
+ r->dev = dev;
+ cma_reserved_count++;
+ pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
+ (unsigned long)base);
+
+ /* Architecture specific contiguous memory fixup. */
+ dma_contiguous_early_fixup(base, size);
+ return 0;
+err:
+ pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
+ return base;
+}
+
+/**
+ * dma_alloc_from_contiguous() - allocate pages from contiguous area
+ * @dev: Pointer to device for which the allocation is performed.
+ * @count: Requested number of pages.
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
+ *
+ * This function allocates memory buffer for specified device. It uses
+ * device specific contiguous memory area if available or the default
+ * global one. Requires architecture specific get_dev_cma_area() helper
+ * function.
+ */
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int align)
+{
+ unsigned long mask, pfn, pageno, start = 0;
+ struct cma *cma = dev_get_cma_area(dev);
+ int ret;
+
+ if (!cma || !cma->count)
+ return NULL;
+
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+ pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
+ count, align);
+
+ if (!count)
+ return NULL;
+
+ mask = (1 << align) - 1;
+
+ mutex_lock(&cma_mutex);
+
+ for (;;) {
+ pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
+ start, count, mask);
+ if (pageno >= cma->count) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ pfn = cma->base_pfn + pageno;
+ ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+ if (ret == 0) {
+ bitmap_set(cma->bitmap, pageno, count);
+ break;
+ } else if (ret != -EBUSY) {
+ goto error;
+ }
+ pr_debug("%s(): memory range at %p is busy, retrying\n",
+ __func__, pfn_to_page(pfn));
+ /* try again with a bit different memory target */
+ start = pageno + mask + 1;
+ }
+
+ mutex_unlock(&cma_mutex);
+
+ pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
+ return pfn_to_page(pfn);
+error:
+ mutex_unlock(&cma_mutex);
+ return NULL;
+}
+
+/**
+ * dma_release_from_contiguous() - release allocated pages
+ * @dev: Pointer to device for which the pages were allocated.
+ * @pages: Allocated pages.
+ * @count: Number of allocated pages.
+ *
+ * This function releases memory allocated by dma_alloc_from_contiguous().
+ * It returns false when provided pages do not belong to contiguous area and
+ * true otherwise.
+ */
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count)
+{
+ struct cma *cma = dev_get_cma_area(dev);
+ unsigned long pfn;
+
+ if (!cma || !pages)
+ return false;
+
+ pr_debug("%s(page %p)\n", __func__, (void *)pages);
+
+ pfn = page_to_pfn(pages);
+
+ if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+ return false;
+
+ VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
+
+ mutex_lock(&cma_mutex);
+ bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
+ free_contig_range(pfn, count);
+ mutex_unlock(&cma_mutex);
+
+ return true;
+}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 90aa2a11a93..af1a177216f 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -592,11 +592,9 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
{
int n;
- n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
- if (n > 0 && PAGE_SIZE > n + 1) {
- *(buf + n++) = '\n';
- *(buf + n++) = '\0';
- }
+ n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
+ buf[n++] = '\n';
+ buf[n] = '\0';
return n;
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 165e1febae5..4864407e3fc 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -12,6 +12,7 @@ config HAVE_MACH_CLKDEV
config COMMON_CLK
bool
select HAVE_CLK_PREPARE
+ select CLKDEV_LOOKUP
---help---
The common clock framework is a single definition of struct
clk, useful across many platforms, as well as an
@@ -22,17 +23,6 @@ config COMMON_CLK
menu "Common Clock Framework"
depends on COMMON_CLK
-config COMMON_CLK_DISABLE_UNUSED
- bool "Disabled unused clocks at boot"
- depends on COMMON_CLK
- ---help---
- Traverses the entire clock tree and disables any clocks that are
- enabled in hardware but have not been enabled by any device drivers.
- This saves power and keeps the software model of the clock in line
- with reality.
-
- If in doubt, say "N".
-
config COMMON_CLK_DEBUG
bool "DebugFS representation of clock tree"
depends on COMMON_CLK
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 1f736bc11c4..b9a5158a30b 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,4 +1,7 @@
obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \
- clk-mux.o clk-divider.o
+ clk-mux.o clk-divider.o clk-fixed-factor.o
+# SoCs specific
+obj-$(CONFIG_ARCH_MXS) += mxs/
+obj-$(CONFIG_PLAT_SPEAR) += spear/
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index d5ac6a75ea5..8ea11b44452 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -45,7 +45,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
return parent_rate / div;
}
-EXPORT_SYMBOL_GPL(clk_divider_recalc_rate);
/*
* The reverse of DIV_ROUND_UP: The maximum number which
@@ -68,8 +67,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
if (divider->flags & CLK_DIVIDER_ONE_BASED)
maxdiv--;
- if (!best_parent_rate) {
- parent_rate = __clk_get_rate(__clk_get_parent(hw->clk));
+ if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
+ parent_rate = *best_parent_rate;
bestdiv = DIV_ROUND_UP(parent_rate, rate);
bestdiv = bestdiv == 0 ? 1 : bestdiv;
bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
@@ -109,24 +108,18 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
int div;
div = clk_divider_bestdiv(hw, rate, prate);
- if (prate)
- return *prate / div;
- else {
- unsigned long r;
- r = __clk_get_rate(__clk_get_parent(hw->clk));
- return r / div;
- }
+ return *prate / div;
}
-EXPORT_SYMBOL_GPL(clk_divider_round_rate);
-static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate)
+static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
struct clk_divider *divider = to_clk_divider(hw);
unsigned int div;
unsigned long flags = 0;
u32 val;
- div = __clk_get_rate(__clk_get_parent(hw->clk)) / rate;
+ div = parent_rate / rate;
if (!(divider->flags & CLK_DIVIDER_ONE_BASED))
div--;
@@ -147,15 +140,26 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate)
return 0;
}
-EXPORT_SYMBOL_GPL(clk_divider_set_rate);
-struct clk_ops clk_divider_ops = {
+const struct clk_ops clk_divider_ops = {
.recalc_rate = clk_divider_recalc_rate,
.round_rate = clk_divider_round_rate,
.set_rate = clk_divider_set_rate,
};
EXPORT_SYMBOL_GPL(clk_divider_ops);
+/**
+ * clk_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
struct clk *clk_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
@@ -163,38 +167,34 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
{
struct clk_divider *div;
struct clk *clk;
+ struct clk_init_data init;
+ /* allocate the divider */
div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
-
if (!div) {
pr_err("%s: could not allocate divider clk\n", __func__);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
+ init.name = name;
+ init.ops = &clk_divider_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
/* struct clk_divider assignments */
div->reg = reg;
div->shift = shift;
div->width = width;
div->flags = clk_divider_flags;
div->lock = lock;
+ div->hw.init = &init;
- if (parent_name) {
- div->parent[0] = kstrdup(parent_name, GFP_KERNEL);
- if (!div->parent[0])
- goto out;
- }
-
- clk = clk_register(dev, name,
- &clk_divider_ops, &div->hw,
- div->parent,
- (parent_name ? 1 : 0),
- flags);
- if (clk)
- return clk;
+ /* register the clock */
+ clk = clk_register(dev, &div->hw);
-out:
- kfree(div->parent[0]);
- kfree(div);
+ if (IS_ERR(clk))
+ kfree(div);
- return NULL;
+ return clk;
}
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
new file mode 100644
index 00000000000..c8c003e217a
--- /dev/null
+++ b/drivers/clk/clk-fixed-factor.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Standard functionality for the common clock API.
+ */
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+/*
+ * DOC: basic fixed multiplier and divider clock that cannot gate
+ *
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * enable - clk_enable only ensures that parents are enabled
+ * rate - rate is fixed. clk->rate = parent->rate / div * mult
+ * parent - fixed parent. No clk_set_parent support
+ */
+
+#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
+
+static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+
+ return parent_rate * fix->mult / fix->div;
+}
+
+static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
+
+ if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+ unsigned long best_parent;
+
+ best_parent = (rate / fix->mult) * fix->div;
+ *prate = __clk_round_rate(__clk_get_parent(hw->clk),
+ best_parent);
+ }
+
+ return (*prate / fix->div) * fix->mult;
+}
+
+static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+struct clk_ops clk_fixed_factor_ops = {
+ .round_rate = clk_factor_round_rate,
+ .set_rate = clk_factor_set_rate,
+ .recalc_rate = clk_factor_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
+
+struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div)
+{
+ struct clk_fixed_factor *fix;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ fix = kmalloc(sizeof(*fix), GFP_KERNEL);
+ if (!fix) {
+ pr_err("%s: could not allocate fixed factor clk\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_fixed_factor assignments */
+ fix->mult = mult;
+ fix->div = div;
+ fix->hw.init = &init;
+
+ init.name = name;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(dev, &fix->hw);
+
+ if (IS_ERR(clk))
+ kfree(fix);
+
+ return clk;
+}
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 90c79fb5d1b..cbd24622978 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -32,51 +32,50 @@ static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
{
return to_clk_fixed_rate(hw)->fixed_rate;
}
-EXPORT_SYMBOL_GPL(clk_fixed_rate_recalc_rate);
-struct clk_ops clk_fixed_rate_ops = {
+const struct clk_ops clk_fixed_rate_ops = {
.recalc_rate = clk_fixed_rate_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
+/**
+ * clk_register_fixed_rate - register fixed-rate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate)
{
struct clk_fixed_rate *fixed;
- char **parent_names = NULL;
- u8 len;
+ struct clk *clk;
+ struct clk_init_data init;
+ /* allocate fixed-rate clock */
fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
-
if (!fixed) {
pr_err("%s: could not allocate fixed clk\n", __func__);
return ERR_PTR(-ENOMEM);
}
+ init.name = name;
+ init.ops = &clk_fixed_rate_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
/* struct clk_fixed_rate assignments */
fixed->fixed_rate = fixed_rate;
+ fixed->hw.init = &init;
- if (parent_name) {
- parent_names = kmalloc(sizeof(char *), GFP_KERNEL);
-
- if (! parent_names)
- goto out;
+ /* register the clock */
+ clk = clk_register(dev, &fixed->hw);
- len = sizeof(char) * strlen(parent_name);
-
- parent_names[0] = kmalloc(len, GFP_KERNEL);
-
- if (!parent_names[0])
- goto out;
-
- strncpy(parent_names[0], parent_name, len);
- }
+ if (IS_ERR(clk))
+ kfree(fixed);
-out:
- return clk_register(dev, name,
- &clk_fixed_rate_ops, &fixed->hw,
- parent_names,
- (parent_name ? 1 : 0),
- flags);
+ return clk;
}
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index b5902e2ef2f..578465e04be 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -28,32 +28,38 @@
#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
-static void clk_gate_set_bit(struct clk_gate *gate)
+/*
+ * It works on following logic:
+ *
+ * For enabling clock, enable = 1
+ * set2dis = 1 -> clear bit -> set = 0
+ * set2dis = 0 -> set bit -> set = 1
+ *
+ * For disabling clock, enable = 0
+ * set2dis = 1 -> set bit -> set = 1
+ * set2dis = 0 -> clear bit -> set = 0
+ *
+ * So, result is always: enable xor set2dis.
+ */
+static void clk_gate_endisable(struct clk_hw *hw, int enable)
{
- u32 reg;
+ struct clk_gate *gate = to_clk_gate(hw);
+ int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
unsigned long flags = 0;
+ u32 reg;
+
+ set ^= enable;
if (gate->lock)
spin_lock_irqsave(gate->lock, flags);
reg = readl(gate->reg);
- reg |= BIT(gate->bit_idx);
- writel(reg, gate->reg);
-
- if (gate->lock)
- spin_unlock_irqrestore(gate->lock, flags);
-}
-
-static void clk_gate_clear_bit(struct clk_gate *gate)
-{
- u32 reg;
- unsigned long flags = 0;
- if (gate->lock)
- spin_lock_irqsave(gate->lock, flags);
+ if (set)
+ reg |= BIT(gate->bit_idx);
+ else
+ reg &= ~BIT(gate->bit_idx);
- reg = readl(gate->reg);
- reg &= ~BIT(gate->bit_idx);
writel(reg, gate->reg);
if (gate->lock)
@@ -62,27 +68,15 @@ static void clk_gate_clear_bit(struct clk_gate *gate)
static int clk_gate_enable(struct clk_hw *hw)
{
- struct clk_gate *gate = to_clk_gate(hw);
-
- if (gate->flags & CLK_GATE_SET_TO_DISABLE)
- clk_gate_clear_bit(gate);
- else
- clk_gate_set_bit(gate);
+ clk_gate_endisable(hw, 1);
return 0;
}
-EXPORT_SYMBOL_GPL(clk_gate_enable);
static void clk_gate_disable(struct clk_hw *hw)
{
- struct clk_gate *gate = to_clk_gate(hw);
-
- if (gate->flags & CLK_GATE_SET_TO_DISABLE)
- clk_gate_set_bit(gate);
- else
- clk_gate_clear_bit(gate);
+ clk_gate_endisable(hw, 0);
}
-EXPORT_SYMBOL_GPL(clk_gate_disable);
static int clk_gate_is_enabled(struct clk_hw *hw)
{
@@ -99,15 +93,25 @@ static int clk_gate_is_enabled(struct clk_hw *hw)
return reg ? 1 : 0;
}
-EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
-struct clk_ops clk_gate_ops = {
+const struct clk_ops clk_gate_ops = {
.enable = clk_gate_enable,
.disable = clk_gate_disable,
.is_enabled = clk_gate_is_enabled,
};
EXPORT_SYMBOL_GPL(clk_gate_ops);
+/**
+ * clk_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
@@ -115,36 +119,32 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
{
struct clk_gate *gate;
struct clk *clk;
+ struct clk_init_data init;
+ /* allocate the gate */
gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
-
if (!gate) {
pr_err("%s: could not allocate gated clk\n", __func__);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
+ init.name = name;
+ init.ops = &clk_gate_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
/* struct clk_gate assignments */
gate->reg = reg;
gate->bit_idx = bit_idx;
gate->flags = clk_gate_flags;
gate->lock = lock;
+ gate->hw.init = &init;
- if (parent_name) {
- gate->parent[0] = kstrdup(parent_name, GFP_KERNEL);
- if (!gate->parent[0])
- goto out;
- }
+ clk = clk_register(dev, &gate->hw);
+
+ if (IS_ERR(clk))
+ kfree(gate);
- clk = clk_register(dev, name,
- &clk_gate_ops, &gate->hw,
- gate->parent,
- (parent_name ? 1 : 0),
- flags);
- if (clk)
- return clk;
-out:
- kfree(gate->parent[0]);
- kfree(gate);
-
- return NULL;
+ return clk;
}
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index c71ad1f41a9..fd36a8ea73d 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -55,7 +55,6 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
return val;
}
-EXPORT_SYMBOL_GPL(clk_mux_get_parent);
static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
{
@@ -82,35 +81,47 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
return 0;
}
-EXPORT_SYMBOL_GPL(clk_mux_set_parent);
-struct clk_ops clk_mux_ops = {
+const struct clk_ops clk_mux_ops = {
.get_parent = clk_mux_get_parent,
.set_parent = clk_mux_set_parent,
};
EXPORT_SYMBOL_GPL(clk_mux_ops);
struct clk *clk_register_mux(struct device *dev, const char *name,
- char **parent_names, u8 num_parents, unsigned long flags,
+ const char **parent_names, u8 num_parents, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_mux_flags, spinlock_t *lock)
{
struct clk_mux *mux;
+ struct clk *clk;
+ struct clk_init_data init;
- mux = kmalloc(sizeof(struct clk_mux), GFP_KERNEL);
-
+ /* allocate the mux */
+ mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
if (!mux) {
pr_err("%s: could not allocate mux clk\n", __func__);
return ERR_PTR(-ENOMEM);
}
+ init.name = name;
+ init.ops = &clk_mux_ops;
+ init.flags = flags;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
/* struct clk_mux assignments */
mux->reg = reg;
mux->shift = shift;
mux->width = width;
mux->flags = clk_mux_flags;
mux->lock = lock;
+ mux->hw.init = &init;
+
+ clk = clk_register(dev, &mux->hw);
+
+ if (IS_ERR(clk))
+ kfree(mux);
- return clk_register(dev, name, &clk_mux_ops, &mux->hw,
- parent_names, num_parents, flags);
+ return clk;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 9cf6f59e3e1..687b00d67c8 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -194,9 +194,8 @@ static int __init clk_debug_init(void)
late_initcall(clk_debug_init);
#else
static inline int clk_debug_register(struct clk *clk) { return 0; }
-#endif /* CONFIG_COMMON_CLK_DEBUG */
+#endif
-#ifdef CONFIG_COMMON_CLK_DISABLE_UNUSED
/* caller must hold prepare_lock */
static void clk_disable_unused_subtree(struct clk *clk)
{
@@ -246,9 +245,6 @@ static int clk_disable_unused(void)
return 0;
}
late_initcall(clk_disable_unused);
-#else
-static inline int clk_disable_unused(struct clk *clk) { return 0; }
-#endif /* CONFIG_COMMON_CLK_DISABLE_UNUSED */
/*** helper functions ***/
@@ -287,7 +283,7 @@ unsigned long __clk_get_rate(struct clk *clk)
unsigned long ret;
if (!clk) {
- ret = -EINVAL;
+ ret = 0;
goto out;
}
@@ -297,7 +293,7 @@ unsigned long __clk_get_rate(struct clk *clk)
goto out;
if (!clk->parent)
- ret = -ENODEV;
+ ret = 0;
out:
return ret;
@@ -562,7 +558,7 @@ EXPORT_SYMBOL_GPL(clk_enable);
* @clk: the clk whose rate is being returned
*
* Simply returns the cached rate of the clk. Does not query the hardware. If
- * clk is NULL then returns -EINVAL.
+ * clk is NULL then returns 0.
*/
unsigned long clk_get_rate(struct clk *clk)
{
@@ -584,18 +580,22 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
*/
unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
{
- unsigned long unused;
+ unsigned long parent_rate = 0;
if (!clk)
return -EINVAL;
- if (!clk->ops->round_rate)
- return clk->rate;
+ if (!clk->ops->round_rate) {
+ if (clk->flags & CLK_SET_RATE_PARENT)
+ return __clk_round_rate(clk->parent, rate);
+ else
+ return clk->rate;
+ }
- if (clk->flags & CLK_SET_RATE_PARENT)
- return clk->ops->round_rate(clk->hw, rate, &unused);
- else
- return clk->ops->round_rate(clk->hw, rate, NULL);
+ if (clk->parent)
+ parent_rate = clk->parent->rate;
+
+ return clk->ops->round_rate(clk->hw, rate, &parent_rate);
}
/**
@@ -765,25 +765,41 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
{
struct clk *top = clk;
- unsigned long best_parent_rate = clk->parent->rate;
+ unsigned long best_parent_rate = 0;
unsigned long new_rate;
- if (!clk->ops->round_rate && !(clk->flags & CLK_SET_RATE_PARENT)) {
- clk->new_rate = clk->rate;
+ /* sanity */
+ if (IS_ERR_OR_NULL(clk))
+ return NULL;
+
+ /* save parent rate, if it exists */
+ if (clk->parent)
+ best_parent_rate = clk->parent->rate;
+
+ /* never propagate up to the parent */
+ if (!(clk->flags & CLK_SET_RATE_PARENT)) {
+ if (!clk->ops->round_rate) {
+ clk->new_rate = clk->rate;
+ return NULL;
+ }
+ new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
+ goto out;
+ }
+
+ /* need clk->parent from here on out */
+ if (!clk->parent) {
+ pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
return NULL;
}
- if (!clk->ops->round_rate && (clk->flags & CLK_SET_RATE_PARENT)) {
+ if (!clk->ops->round_rate) {
top = clk_calc_new_rates(clk->parent, rate);
- new_rate = clk->new_rate = clk->parent->new_rate;
+ new_rate = clk->parent->new_rate;
goto out;
}
- if (clk->flags & CLK_SET_RATE_PARENT)
- new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
- else
- new_rate = clk->ops->round_rate(clk->hw, rate, NULL);
+ new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
if (best_parent_rate != clk->parent->rate) {
top = clk_calc_new_rates(clk->parent, best_parent_rate);
@@ -839,7 +855,7 @@ static void clk_change_rate(struct clk *clk)
old_rate = clk->rate;
if (clk->ops->set_rate)
- clk->ops->set_rate(clk->hw, clk->new_rate);
+ clk->ops->set_rate(clk->hw, clk->new_rate, clk->parent->rate);
if (clk->ops->recalc_rate)
clk->rate = clk->ops->recalc_rate(clk->hw,
@@ -859,38 +875,19 @@ static void clk_change_rate(struct clk *clk)
* @clk: the clk whose rate is being changed
* @rate: the new rate for clk
*
- * In the simplest case clk_set_rate will only change the rate of clk.
- *
- * If clk has the CLK_SET_RATE_GATE flag set and it is enabled this call
- * will fail; only when the clk is disabled will it be able to change
- * its rate.
+ * In the simplest case clk_set_rate will only adjust the rate of clk.
*
- * Setting the CLK_SET_RATE_PARENT flag allows clk_set_rate to
- * recursively propagate up to clk's parent; whether or not this happens
- * depends on the outcome of clk's .round_rate implementation. If
- * *parent_rate is 0 after calling .round_rate then upstream parent
- * propagation is ignored. If *parent_rate comes back with a new rate
- * for clk's parent then we propagate up to clk's parent and set it's
- * rate. Upward propagation will continue until either a clk does not
- * support the CLK_SET_RATE_PARENT flag or .round_rate stops requesting
- * changes to clk's parent_rate. If there is a failure during upstream
- * propagation then clk_set_rate will unwind and restore each clk's rate
- * that had been successfully changed. Afterwards a rate change abort
- * notification will be propagated downstream, starting from the clk
- * that failed.
+ * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
+ * propagate up to clk's parent; whether or not this happens depends on the
+ * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
+ * after calling .round_rate then upstream parent propagation is ignored. If
+ * *parent_rate comes back with a new rate for clk's parent then we propagate
+ * up to clk's parent and set it's rate. Upward propagation will continue
+ * until either a clk does not support the CLK_SET_RATE_PARENT flag or
+ * .round_rate stops requesting changes to clk's parent_rate.
*
- * At the end of all of the rate setting, clk_set_rate internally calls
- * __clk_recalc_rates and propagates the rate changes downstream,
- * starting from the highest clk whose rate was changed. This has the
- * added benefit of propagating post-rate change notifiers.
- *
- * Note that while post-rate change and rate change abort notifications
- * are guaranteed to be sent to a clk only once per call to
- * clk_set_rate, pre-change notifications will be sent for every clk
- * whose rate is changed. Stacking pre-change notifications is noisy
- * for the drivers subscribed to them, but this allows drivers to react
- * to intermediate clk rate changes up until the point where the final
- * rate is achieved at the end of upstream propagation.
+ * Rate changes are accomplished via tree traversal that also recalculates the
+ * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
*
* Returns 0 on success, -EERROR otherwise.
*/
@@ -906,6 +903,11 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
if (rate == clk->rate)
goto out;
+ if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
+ ret = -EBUSY;
+ goto out;
+ }
+
/* calculate new rates and get the topmost changed clock */
top = clk_calc_new_rates(clk, rate);
if (!top) {
@@ -1175,40 +1177,41 @@ EXPORT_SYMBOL_GPL(clk_set_parent);
*
* Initializes the lists in struct clk, queries the hardware for the
* parent and rate and sets them both.
- *
- * Any struct clk passed into __clk_init must have the following members
- * populated:
- * .name
- * .ops
- * .hw
- * .parent_names
- * .num_parents
- * .flags
- *
- * Essentially, everything that would normally be passed into clk_register is
- * assumed to be initialized already in __clk_init. The other members may be
- * populated, but are optional.
- *
- * __clk_init is only exposed via clk-private.h and is intended for use with
- * very large numbers of clocks that need to be statically initialized. It is
- * a layering violation to include clk-private.h from any code which implements
- * a clock's .ops; as such any statically initialized clock data MUST be in a
- * separate C file from the logic that implements it's operations.
*/
-void __clk_init(struct device *dev, struct clk *clk)
+int __clk_init(struct device *dev, struct clk *clk)
{
- int i;
+ int i, ret = 0;
struct clk *orphan;
struct hlist_node *tmp, *tmp2;
if (!clk)
- return;
+ return -EINVAL;
mutex_lock(&prepare_lock);
/* check to see if a clock with this name is already registered */
- if (__clk_lookup(clk->name))
+ if (__clk_lookup(clk->name)) {
+ pr_debug("%s: clk %s already initialized\n",
+ __func__, clk->name);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ /* check that clk_ops are sane. See Documentation/clk.txt */
+ if (clk->ops->set_rate &&
+ !(clk->ops->round_rate && clk->ops->recalc_rate)) {
+ pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
+ __func__, clk->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (clk->ops->set_parent && !clk->ops->get_parent) {
+ pr_warning("%s: %s must implement .get_parent & .set_parent\n",
+ __func__, clk->name);
+ ret = -EINVAL;
goto out;
+ }
/* throw a WARN if any entries in parent_names are NULL */
for (i = 0; i < clk->num_parents; i++)
@@ -1302,48 +1305,130 @@ void __clk_init(struct device *dev, struct clk *clk)
out:
mutex_unlock(&prepare_lock);
- return;
+ return ret;
}
/**
+ * __clk_register - register a clock and return a cookie.
+ *
+ * Same as clk_register, except that the .clk field inside hw shall point to a
+ * preallocated (generally statically allocated) struct clk. None of the fields
+ * of the struct clk need to be initialized.
+ *
+ * The data pointed to by .init and .clk field shall NOT be marked as init
+ * data.
+ *
+ * __clk_register is only exposed via clk-private.h and is intended for use with
+ * very large numbers of clocks that need to be statically initialized. It is
+ * a layering violation to include clk-private.h from any code which implements
+ * a clock's .ops; as such any statically initialized clock data MUST be in a
+ * separate C file from the logic that implements it's operations. Returns 0
+ * on success, otherwise an error code.
+ */
+struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
+{
+ int ret;
+ struct clk *clk;
+
+ clk = hw->clk;
+ clk->name = hw->init->name;
+ clk->ops = hw->init->ops;
+ clk->hw = hw;
+ clk->flags = hw->init->flags;
+ clk->parent_names = hw->init->parent_names;
+ clk->num_parents = hw->init->num_parents;
+
+ ret = __clk_init(dev, clk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk;
+}
+EXPORT_SYMBOL_GPL(__clk_register);
+
+/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
- * @name: clock name
- * @ops: operations this clock supports
* @hw: link to hardware-specific clock data
- * @parent_names: array of string names for all possible parents
- * @num_parents: number of possible parents
- * @flags: framework-level hints and quirks
*
* clk_register is the primary interface for populating the clock tree with new
* clock nodes. It returns a pointer to the newly allocated struct clk which
* cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API.
+ * rest of the clock API. In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
*/
-struct clk *clk_register(struct device *dev, const char *name,
- const struct clk_ops *ops, struct clk_hw *hw,
- char **parent_names, u8 num_parents, unsigned long flags)
+struct clk *clk_register(struct device *dev, struct clk_hw *hw)
{
+ int i, ret;
struct clk *clk;
clk = kzalloc(sizeof(*clk), GFP_KERNEL);
- if (!clk)
- return NULL;
+ if (!clk) {
+ pr_err("%s: could not allocate clk\n", __func__);
+ ret = -ENOMEM;
+ goto fail_out;
+ }
- clk->name = name;
- clk->ops = ops;
+ clk->name = kstrdup(hw->init->name, GFP_KERNEL);
+ if (!clk->name) {
+ pr_err("%s: could not allocate clk->name\n", __func__);
+ ret = -ENOMEM;
+ goto fail_name;
+ }
+ clk->ops = hw->init->ops;
clk->hw = hw;
- clk->flags = flags;
- clk->parent_names = parent_names;
- clk->num_parents = num_parents;
+ clk->flags = hw->init->flags;
+ clk->num_parents = hw->init->num_parents;
hw->clk = clk;
- __clk_init(dev, clk);
+ /* allocate local copy in case parent_names is __initdata */
+ clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
+ GFP_KERNEL);
- return clk;
+ if (!clk->parent_names) {
+ pr_err("%s: could not allocate clk->parent_names\n", __func__);
+ ret = -ENOMEM;
+ goto fail_parent_names;
+ }
+
+
+ /* copy each string name in case parent_names is __initdata */
+ for (i = 0; i < clk->num_parents; i++) {
+ clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
+ GFP_KERNEL);
+ if (!clk->parent_names[i]) {
+ pr_err("%s: could not copy parent_names\n", __func__);
+ ret = -ENOMEM;
+ goto fail_parent_names_copy;
+ }
+ }
+
+ ret = __clk_init(dev, clk);
+ if (!ret)
+ return clk;
+
+fail_parent_names_copy:
+ while (--i >= 0)
+ kfree(clk->parent_names[i]);
+ kfree(clk->parent_names);
+fail_parent_names:
+ kfree(clk->name);
+fail_name:
+ kfree(clk);
+fail_out:
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(clk_register);
+/**
+ * clk_unregister - unregister a currently registered clock
+ * @clk: clock to unregister
+ *
+ * Currently unimplemented.
+ */
+void clk_unregister(struct clk *clk) {}
+EXPORT_SYMBOL_GPL(clk_unregister);
+
/*** clk rate change notifiers ***/
/**
diff --git a/drivers/clk/mxs/Makefile b/drivers/clk/mxs/Makefile
new file mode 100644
index 00000000000..7bedeec0852
--- /dev/null
+++ b/drivers/clk/mxs/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for mxs specific clk
+#
+
+obj-y += clk.o clk-pll.o clk-ref.o clk-div.o clk-frac.o
+
+obj-$(CONFIG_SOC_IMX23) += clk-imx23.o
+obj-$(CONFIG_SOC_IMX28) += clk-imx28.o
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
new file mode 100644
index 00000000000..90e1da93877
--- /dev/null
+++ b/drivers/clk/mxs/clk-div.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_div - mxs integer divider clock
+ * @divider: the parent class
+ * @ops: pointer to clk_ops of parent class
+ * @reg: register address
+ * @busy: busy bit shift
+ *
+ * The mxs divider clock is a subclass of basic clk_divider with an
+ * addtional busy bit.
+ */
+struct clk_div {
+ struct clk_divider divider;
+ const struct clk_ops *ops;
+ void __iomem *reg;
+ u8 busy;
+};
+
+static inline struct clk_div *to_clk_div(struct clk_hw *hw)
+{
+ struct clk_divider *divider = container_of(hw, struct clk_divider, hw);
+
+ return container_of(divider, struct clk_div, divider);
+}
+
+static unsigned long clk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_div *div = to_clk_div(hw);
+
+ return div->ops->recalc_rate(&div->divider.hw, parent_rate);
+}
+
+static long clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_div *div = to_clk_div(hw);
+
+ return div->ops->round_rate(&div->divider.hw, rate, prate);
+}
+
+static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_div *div = to_clk_div(hw);
+ int ret;
+
+ ret = div->ops->set_rate(&div->divider.hw, rate, parent_rate);
+ if (!ret)
+ ret = mxs_clk_wait(div->reg, div->busy);
+
+ return ret;
+}
+
+static struct clk_ops clk_div_ops = {
+ .recalc_rate = clk_div_recalc_rate,
+ .round_rate = clk_div_round_rate,
+ .set_rate = clk_div_set_rate,
+};
+
+struct clk *mxs_clk_div(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width, u8 busy)
+{
+ struct clk_div *div;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_div_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ div->reg = reg;
+ div->busy = busy;
+
+ div->divider.reg = reg;
+ div->divider.shift = shift;
+ div->divider.width = width;
+ div->divider.flags = CLK_DIVIDER_ONE_BASED;
+ div->divider.lock = &mxs_lock;
+ div->divider.hw.init = &init;
+ div->ops = &clk_divider_ops;
+
+ clk = clk_register(NULL, &div->divider.hw);
+ if (IS_ERR(clk))
+ kfree(div);
+
+ return clk;
+}
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
new file mode 100644
index 00000000000..e6aa6b567d6
--- /dev/null
+++ b/drivers/clk/mxs/clk-frac.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_frac - mxs fractional divider clock
+ * @hw: clk_hw for the fractional divider clock
+ * @reg: register address
+ * @shift: the divider bit shift
+ * @width: the divider bit width
+ * @busy: busy bit shift
+ *
+ * The clock is an adjustable fractional divider with a busy bit to wait
+ * when the divider is adjusted.
+ */
+struct clk_frac {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 shift;
+ u8 width;
+ u8 busy;
+};
+
+#define to_clk_frac(_hw) container_of(_hw, struct clk_frac, hw)
+
+static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ u32 div;
+
+ div = readl_relaxed(frac->reg) >> frac->shift;
+ div &= (1 << frac->width) - 1;
+
+ return (parent_rate >> frac->width) * div;
+}
+
+static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ unsigned long parent_rate = *prate;
+ u32 div;
+ u64 tmp;
+
+ if (rate > parent_rate)
+ return -EINVAL;
+
+ tmp = rate;
+ tmp <<= frac->width;
+ do_div(tmp, parent_rate);
+ div = tmp;
+
+ if (!div)
+ return -EINVAL;
+
+ return (parent_rate >> frac->width) * div;
+}
+
+static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ unsigned long flags;
+ u32 div, val;
+ u64 tmp;
+
+ if (rate > parent_rate)
+ return -EINVAL;
+
+ tmp = rate;
+ tmp <<= frac->width;
+ do_div(tmp, parent_rate);
+ div = tmp;
+
+ if (!div)
+ return -EINVAL;
+
+ spin_lock_irqsave(&mxs_lock, flags);
+
+ val = readl_relaxed(frac->reg);
+ val &= ~(((1 << frac->width) - 1) << frac->shift);
+ val |= div << frac->shift;
+ writel_relaxed(val, frac->reg);
+
+ spin_unlock_irqrestore(&mxs_lock, flags);
+
+ return mxs_clk_wait(frac->reg, frac->busy);
+}
+
+static struct clk_ops clk_frac_ops = {
+ .recalc_rate = clk_frac_recalc_rate,
+ .round_rate = clk_frac_round_rate,
+ .set_rate = clk_frac_set_rate,
+};
+
+struct clk *mxs_clk_frac(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width, u8 busy)
+{
+ struct clk_frac *frac;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ frac = kzalloc(sizeof(*frac), GFP_KERNEL);
+ if (!frac)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_frac_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ frac->reg = reg;
+ frac->shift = shift;
+ frac->width = width;
+ frac->busy = busy;
+ frac->hw.init = &init;
+
+ clk = clk_register(NULL, &frac->hw);
+ if (IS_ERR(clk))
+ kfree(frac);
+
+ return clk;
+}
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
new file mode 100644
index 00000000000..f7be225f544
--- /dev/null
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <mach/common.h>
+#include <mach/mx23.h>
+#include "clk.h"
+
+#define DIGCTRL MX23_IO_ADDRESS(MX23_DIGCTL_BASE_ADDR)
+#define CLKCTRL MX23_IO_ADDRESS(MX23_CLKCTRL_BASE_ADDR)
+#define PLLCTRL0 (CLKCTRL + 0x0000)
+#define CPU (CLKCTRL + 0x0020)
+#define HBUS (CLKCTRL + 0x0030)
+#define XBUS (CLKCTRL + 0x0040)
+#define XTAL (CLKCTRL + 0x0050)
+#define PIX (CLKCTRL + 0x0060)
+#define SSP (CLKCTRL + 0x0070)
+#define GPMI (CLKCTRL + 0x0080)
+#define SPDIF (CLKCTRL + 0x0090)
+#define EMI (CLKCTRL + 0x00a0)
+#define SAIF (CLKCTRL + 0x00c0)
+#define TV (CLKCTRL + 0x00d0)
+#define ETM (CLKCTRL + 0x00e0)
+#define FRAC (CLKCTRL + 0x00f0)
+#define CLKSEQ (CLKCTRL + 0x0110)
+
+#define BP_CPU_INTERRUPT_WAIT 12
+#define BP_CLKSEQ_BYPASS_SAIF 0
+#define BP_CLKSEQ_BYPASS_SSP 5
+#define BP_SAIF_DIV_FRAC_EN 16
+#define BP_FRAC_IOFRAC 24
+
+static void __init clk_misc_init(void)
+{
+ u32 val;
+
+ /* Gate off cpu clock in WFI for power saving */
+ __mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU);
+
+ /* Clear BYPASS for SAIF */
+ __mxs_clrl(1 << BP_CLKSEQ_BYPASS_SAIF, CLKSEQ);
+
+ /* SAIF has to use frac div for functional operation */
+ val = readl_relaxed(SAIF);
+ val |= 1 << BP_SAIF_DIV_FRAC_EN;
+ writel_relaxed(val, SAIF);
+
+ /*
+ * Source ssp clock from ref_io than ref_xtal,
+ * as ref_xtal only provides 24 MHz as maximum.
+ */
+ __mxs_clrl(1 << BP_CLKSEQ_BYPASS_SSP, CLKSEQ);
+
+ /*
+ * 480 MHz seems too high to be ssp clock source directly,
+ * so set frac to get a 288 MHz ref_io.
+ */
+ __mxs_clrl(0x3f << BP_FRAC_IOFRAC, FRAC);
+ __mxs_setl(30 << BP_FRAC_IOFRAC, FRAC);
+}
+
+static struct clk_lookup uart_lookups[] __initdata = {
+ { .dev_id = "duart", },
+ { .dev_id = "mxs-auart.0", },
+ { .dev_id = "mxs-auart.1", },
+ { .dev_id = "8006c000.serial", },
+ { .dev_id = "8006e000.serial", },
+ { .dev_id = "80070000.serial", },
+};
+
+static struct clk_lookup hbus_lookups[] __initdata = {
+ { .dev_id = "imx23-dma-apbh", },
+ { .dev_id = "80004000.dma-apbh", },
+};
+
+static struct clk_lookup xbus_lookups[] __initdata = {
+ { .dev_id = "duart", .con_id = "apb_pclk"},
+ { .dev_id = "80070000.serial", .con_id = "apb_pclk"},
+ { .dev_id = "imx23-dma-apbx", },
+ { .dev_id = "80024000.dma-apbx", },
+};
+
+static struct clk_lookup ssp_lookups[] __initdata = {
+ { .dev_id = "imx23-mmc.0", },
+ { .dev_id = "imx23-mmc.1", },
+ { .dev_id = "80010000.ssp", },
+ { .dev_id = "80034000.ssp", },
+};
+
+static struct clk_lookup lcdif_lookups[] __initdata = {
+ { .dev_id = "imx23-fb", },
+ { .dev_id = "80030000.lcdif", },
+};
+
+static struct clk_lookup gpmi_lookups[] __initdata = {
+ { .dev_id = "imx23-gpmi-nand", },
+ { .dev_id = "8000c000.gpmi", },
+};
+
+static const char *sel_pll[] __initconst = { "pll", "ref_xtal", };
+static const char *sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", };
+static const char *sel_pix[] __initconst = { "ref_pix", "ref_xtal", };
+static const char *sel_io[] __initconst = { "ref_io", "ref_xtal", };
+static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
+static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
+
+enum imx23_clk {
+ ref_xtal, pll, ref_cpu, ref_emi, ref_pix, ref_io, saif_sel,
+ lcdif_sel, gpmi_sel, ssp_sel, emi_sel, cpu, etm_sel, cpu_pll,
+ cpu_xtal, hbus, xbus, lcdif_div, ssp_div, gpmi_div, emi_pll,
+ emi_xtal, etm_div, saif_div, clk32k_div, rtc, adc, spdif_div,
+ clk32k, dri, pwm, filt, uart, ssp, gpmi, spdif, emi, saif,
+ lcdif, etm, usb, usb_pwr,
+ clk_max
+};
+
+static struct clk *clks[clk_max];
+
+static enum imx23_clk clks_init_on[] __initdata = {
+ cpu, hbus, xbus, emi, uart,
+};
+
+int __init mx23_clocks_init(void)
+{
+ int i;
+
+ clk_misc_init();
+
+ clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000);
+ clks[pll] = mxs_clk_pll("pll", "ref_xtal", PLLCTRL0, 16, 480000000);
+ clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll", FRAC, 0);
+ clks[ref_emi] = mxs_clk_ref("ref_emi", "pll", FRAC, 1);
+ clks[ref_pix] = mxs_clk_ref("ref_pix", "pll", FRAC, 2);
+ clks[ref_io] = mxs_clk_ref("ref_io", "pll", FRAC, 3);
+ clks[saif_sel] = mxs_clk_mux("saif_sel", CLKSEQ, 0, 1, sel_pll, ARRAY_SIZE(sel_pll));
+ clks[lcdif_sel] = mxs_clk_mux("lcdif_sel", CLKSEQ, 1, 1, sel_pix, ARRAY_SIZE(sel_pix));
+ clks[gpmi_sel] = mxs_clk_mux("gpmi_sel", CLKSEQ, 4, 1, sel_io, ARRAY_SIZE(sel_io));
+ clks[ssp_sel] = mxs_clk_mux("ssp_sel", CLKSEQ, 5, 1, sel_io, ARRAY_SIZE(sel_io));
+ clks[emi_sel] = mxs_clk_mux("emi_sel", CLKSEQ, 6, 1, emi_sels, ARRAY_SIZE(emi_sels));
+ clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 7, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
+ clks[etm_sel] = mxs_clk_mux("etm_sel", CLKSEQ, 8, 1, sel_cpu, ARRAY_SIZE(sel_cpu));
+ clks[cpu_pll] = mxs_clk_div("cpu_pll", "ref_cpu", CPU, 0, 6, 28);
+ clks[cpu_xtal] = mxs_clk_div("cpu_xtal", "ref_xtal", CPU, 16, 10, 29);
+ clks[hbus] = mxs_clk_div("hbus", "cpu", HBUS, 0, 5, 29);
+ clks[xbus] = mxs_clk_div("xbus", "ref_xtal", XBUS, 0, 10, 31);
+ clks[lcdif_div] = mxs_clk_div("lcdif_div", "lcdif_sel", PIX, 0, 12, 29);
+ clks[ssp_div] = mxs_clk_div("ssp_div", "ssp_sel", SSP, 0, 9, 29);
+ clks[gpmi_div] = mxs_clk_div("gpmi_div", "gpmi_sel", GPMI, 0, 10, 29);
+ clks[emi_pll] = mxs_clk_div("emi_pll", "ref_emi", EMI, 0, 6, 28);
+ clks[emi_xtal] = mxs_clk_div("emi_xtal", "ref_xtal", EMI, 8, 4, 29);
+ clks[etm_div] = mxs_clk_div("etm_div", "etm_sel", ETM, 0, 6, 29);
+ clks[saif_div] = mxs_clk_frac("saif_div", "saif_sel", SAIF, 0, 16, 29);
+ clks[clk32k_div] = mxs_clk_fixed_factor("clk32k_div", "ref_xtal", 1, 750);
+ clks[rtc] = mxs_clk_fixed_factor("rtc", "ref_xtal", 1, 768);
+ clks[adc] = mxs_clk_fixed_factor("adc", "clk32k", 1, 16);
+ clks[spdif_div] = mxs_clk_fixed_factor("spdif_div", "pll", 1, 4);
+ clks[clk32k] = mxs_clk_gate("clk32k", "clk32k_div", XTAL, 26);
+ clks[dri] = mxs_clk_gate("dri", "ref_xtal", XTAL, 28);
+ clks[pwm] = mxs_clk_gate("pwm", "ref_xtal", XTAL, 29);
+ clks[filt] = mxs_clk_gate("filt", "ref_xtal", XTAL, 30);
+ clks[uart] = mxs_clk_gate("uart", "ref_xtal", XTAL, 31);
+ clks[ssp] = mxs_clk_gate("ssp", "ssp_div", SSP, 31);
+ clks[gpmi] = mxs_clk_gate("gpmi", "gpmi_div", GPMI, 31);
+ clks[spdif] = mxs_clk_gate("spdif", "spdif_div", SPDIF, 31);
+ clks[emi] = mxs_clk_gate("emi", "emi_sel", EMI, 31);
+ clks[saif] = mxs_clk_gate("saif", "saif_div", SAIF, 31);
+ clks[lcdif] = mxs_clk_gate("lcdif", "lcdif_div", PIX, 31);
+ clks[etm] = mxs_clk_gate("etm", "etm_div", ETM, 31);
+ clks[usb] = mxs_clk_gate("usb", "usb_pwr", DIGCTRL, 2);
+ clks[usb_pwr] = clk_register_gate(NULL, "usb_pwr", "pll", 0, PLLCTRL0, 18, 0, &mxs_lock);
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ if (IS_ERR(clks[i])) {
+ pr_err("i.MX23 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ return PTR_ERR(clks[i]);
+ }
+
+ clk_register_clkdev(clks[clk32k], NULL, "timrot");
+ clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
+ clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
+ clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
+ clk_register_clkdevs(clks[ssp], ssp_lookups, ARRAY_SIZE(ssp_lookups));
+ clk_register_clkdevs(clks[gpmi], gpmi_lookups, ARRAY_SIZE(gpmi_lookups));
+ clk_register_clkdevs(clks[lcdif], lcdif_lookups, ARRAY_SIZE(lcdif_lookups));
+
+ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+ clk_prepare_enable(clks[clks_init_on[i]]);
+
+ mxs_timer_init(MX23_INT_TIMER0);
+
+ return 0;
+}
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
new file mode 100644
index 00000000000..2826a2606a2
--- /dev/null
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <mach/common.h>
+#include <mach/mx28.h>
+#include "clk.h"
+
+#define CLKCTRL MX28_IO_ADDRESS(MX28_CLKCTRL_BASE_ADDR)
+#define PLL0CTRL0 (CLKCTRL + 0x0000)
+#define PLL1CTRL0 (CLKCTRL + 0x0020)
+#define PLL2CTRL0 (CLKCTRL + 0x0040)
+#define CPU (CLKCTRL + 0x0050)
+#define HBUS (CLKCTRL + 0x0060)
+#define XBUS (CLKCTRL + 0x0070)
+#define XTAL (CLKCTRL + 0x0080)
+#define SSP0 (CLKCTRL + 0x0090)
+#define SSP1 (CLKCTRL + 0x00a0)
+#define SSP2 (CLKCTRL + 0x00b0)
+#define SSP3 (CLKCTRL + 0x00c0)
+#define GPMI (CLKCTRL + 0x00d0)
+#define SPDIF (CLKCTRL + 0x00e0)
+#define EMI (CLKCTRL + 0x00f0)
+#define SAIF0 (CLKCTRL + 0x0100)
+#define SAIF1 (CLKCTRL + 0x0110)
+#define LCDIF (CLKCTRL + 0x0120)
+#define ETM (CLKCTRL + 0x0130)
+#define ENET (CLKCTRL + 0x0140)
+#define FLEXCAN (CLKCTRL + 0x0160)
+#define FRAC0 (CLKCTRL + 0x01b0)
+#define FRAC1 (CLKCTRL + 0x01c0)
+#define CLKSEQ (CLKCTRL + 0x01d0)
+
+#define BP_CPU_INTERRUPT_WAIT 12
+#define BP_SAIF_DIV_FRAC_EN 16
+#define BP_ENET_DIV_TIME 21
+#define BP_ENET_SLEEP 31
+#define BP_CLKSEQ_BYPASS_SAIF0 0
+#define BP_CLKSEQ_BYPASS_SSP0 3
+#define BP_FRAC0_IO1FRAC 16
+#define BP_FRAC0_IO0FRAC 24
+
+#define DIGCTRL MX28_IO_ADDRESS(MX28_DIGCTL_BASE_ADDR)
+#define BP_SAIF_CLKMUX 10
+
+/*
+ * HW_SAIF_CLKMUX_SEL:
+ * DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1
+ * clock pins selected for SAIF1 input clocks.
+ * CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and
+ * SAIF0 clock inputs selected for SAIF1 input clocks.
+ * EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input
+ * clocks.
+ * EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input
+ * clocks.
+ */
+int mxs_saif_clkmux_select(unsigned int clkmux)
+{
+ if (clkmux > 0x3)
+ return -EINVAL;
+
+ __mxs_clrl(0x3 << BP_SAIF_CLKMUX, DIGCTRL);
+ __mxs_setl(clkmux << BP_SAIF_CLKMUX, DIGCTRL);
+
+ return 0;
+}
+
+static void __init clk_misc_init(void)
+{
+ u32 val;
+
+ /* Gate off cpu clock in WFI for power saving */
+ __mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU);
+
+ /* 0 is a bad default value for a divider */
+ __mxs_setl(1 << BP_ENET_DIV_TIME, ENET);
+
+ /* Clear BYPASS for SAIF */
+ __mxs_clrl(0x3 << BP_CLKSEQ_BYPASS_SAIF0, CLKSEQ);
+
+ /* SAIF has to use frac div for functional operation */
+ val = readl_relaxed(SAIF0);
+ val |= 1 << BP_SAIF_DIV_FRAC_EN;
+ writel_relaxed(val, SAIF0);
+
+ val = readl_relaxed(SAIF1);
+ val |= 1 << BP_SAIF_DIV_FRAC_EN;
+ writel_relaxed(val, SAIF1);
+
+ /* Extra fec clock setting */
+ val = readl_relaxed(ENET);
+ val &= ~(1 << BP_ENET_SLEEP);
+ writel_relaxed(val, ENET);
+
+ /*
+ * Source ssp clock from ref_io than ref_xtal,
+ * as ref_xtal only provides 24 MHz as maximum.
+ */
+ __mxs_clrl(0xf << BP_CLKSEQ_BYPASS_SSP0, CLKSEQ);
+
+ /*
+ * 480 MHz seems too high to be ssp clock source directly,
+ * so set frac0 to get a 288 MHz ref_io0.
+ */
+ val = readl_relaxed(FRAC0);
+ val &= ~(0x3f << BP_FRAC0_IO0FRAC);
+ val |= 30 << BP_FRAC0_IO0FRAC;
+ writel_relaxed(val, FRAC0);
+}
+
+static struct clk_lookup uart_lookups[] __initdata = {
+ { .dev_id = "duart", },
+ { .dev_id = "mxs-auart.0", },
+ { .dev_id = "mxs-auart.1", },
+ { .dev_id = "mxs-auart.2", },
+ { .dev_id = "mxs-auart.3", },
+ { .dev_id = "mxs-auart.4", },
+ { .dev_id = "8006a000.serial", },
+ { .dev_id = "8006c000.serial", },
+ { .dev_id = "8006e000.serial", },
+ { .dev_id = "80070000.serial", },
+ { .dev_id = "80072000.serial", },
+ { .dev_id = "80074000.serial", },
+};
+
+static struct clk_lookup hbus_lookups[] __initdata = {
+ { .dev_id = "imx28-dma-apbh", },
+ { .dev_id = "80004000.dma-apbh", },
+};
+
+static struct clk_lookup xbus_lookups[] __initdata = {
+ { .dev_id = "duart", .con_id = "apb_pclk"},
+ { .dev_id = "80074000.serial", .con_id = "apb_pclk"},
+ { .dev_id = "imx28-dma-apbx", },
+ { .dev_id = "80024000.dma-apbx", },
+};
+
+static struct clk_lookup ssp0_lookups[] __initdata = {
+ { .dev_id = "imx28-mmc.0", },
+ { .dev_id = "80010000.ssp", },
+};
+
+static struct clk_lookup ssp1_lookups[] __initdata = {
+ { .dev_id = "imx28-mmc.1", },
+ { .dev_id = "80012000.ssp", },
+};
+
+static struct clk_lookup ssp2_lookups[] __initdata = {
+ { .dev_id = "imx28-mmc.2", },
+ { .dev_id = "80014000.ssp", },
+};
+
+static struct clk_lookup ssp3_lookups[] __initdata = {
+ { .dev_id = "imx28-mmc.3", },
+ { .dev_id = "80016000.ssp", },
+};
+
+static struct clk_lookup lcdif_lookups[] __initdata = {
+ { .dev_id = "imx28-fb", },
+ { .dev_id = "80030000.lcdif", },
+};
+
+static struct clk_lookup gpmi_lookups[] __initdata = {
+ { .dev_id = "imx28-gpmi-nand", },
+ { .dev_id = "8000c000.gpmi", },
+};
+
+static struct clk_lookup fec_lookups[] __initdata = {
+ { .dev_id = "imx28-fec.0", },
+ { .dev_id = "imx28-fec.1", },
+ { .dev_id = "800f0000.ethernet", },
+ { .dev_id = "800f4000.ethernet", },
+};
+
+static struct clk_lookup can0_lookups[] __initdata = {
+ { .dev_id = "flexcan.0", },
+ { .dev_id = "80032000.can", },
+};
+
+static struct clk_lookup can1_lookups[] __initdata = {
+ { .dev_id = "flexcan.1", },
+ { .dev_id = "80034000.can", },
+};
+
+static struct clk_lookup saif0_lookups[] __initdata = {
+ { .dev_id = "mxs-saif.0", },
+ { .dev_id = "80042000.saif", },
+};
+
+static struct clk_lookup saif1_lookups[] __initdata = {
+ { .dev_id = "mxs-saif.1", },
+ { .dev_id = "80046000.saif", },
+};
+
+static const char *sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", };
+static const char *sel_io0[] __initconst = { "ref_io0", "ref_xtal", };
+static const char *sel_io1[] __initconst = { "ref_io1", "ref_xtal", };
+static const char *sel_pix[] __initconst = { "ref_pix", "ref_xtal", };
+static const char *sel_gpmi[] __initconst = { "ref_gpmi", "ref_xtal", };
+static const char *sel_pll0[] __initconst = { "pll0", "ref_xtal", };
+static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
+static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
+static const char *ptp_sels[] __initconst = { "ref_xtal", "pll0", };
+
+enum imx28_clk {
+ ref_xtal, pll0, pll1, pll2, ref_cpu, ref_emi, ref_io0, ref_io1,
+ ref_pix, ref_hsadc, ref_gpmi, saif0_sel, saif1_sel, gpmi_sel,
+ ssp0_sel, ssp1_sel, ssp2_sel, ssp3_sel, emi_sel, etm_sel,
+ lcdif_sel, cpu, ptp_sel, cpu_pll, cpu_xtal, hbus, xbus,
+ ssp0_div, ssp1_div, ssp2_div, ssp3_div, gpmi_div, emi_pll,
+ emi_xtal, lcdif_div, etm_div, ptp, saif0_div, saif1_div,
+ clk32k_div, rtc, lradc, spdif_div, clk32k, pwm, uart, ssp0,
+ ssp1, ssp2, ssp3, gpmi, spdif, emi, saif0, saif1, lcdif, etm,
+ fec, can0, can1, usb0, usb1, usb0_pwr, usb1_pwr, enet_out,
+ clk_max
+};
+
+static struct clk *clks[clk_max];
+
+static enum imx28_clk clks_init_on[] __initdata = {
+ cpu, hbus, xbus, emi, uart,
+};
+
+int __init mx28_clocks_init(void)
+{
+ int i;
+
+ clk_misc_init();
+
+ clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000);
+ clks[pll0] = mxs_clk_pll("pll0", "ref_xtal", PLL0CTRL0, 17, 480000000);
+ clks[pll1] = mxs_clk_pll("pll1", "ref_xtal", PLL1CTRL0, 17, 480000000);
+ clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000);
+ clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0);
+ clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1);
+ clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 2);
+ clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 3);
+ clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0);
+ clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1);
+ clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2);
+ clks[saif0_sel] = mxs_clk_mux("saif0_sel", CLKSEQ, 0, 1, sel_pll0, ARRAY_SIZE(sel_pll0));
+ clks[saif1_sel] = mxs_clk_mux("saif1_sel", CLKSEQ, 1, 1, sel_pll0, ARRAY_SIZE(sel_pll0));
+ clks[gpmi_sel] = mxs_clk_mux("gpmi_sel", CLKSEQ, 2, 1, sel_gpmi, ARRAY_SIZE(sel_gpmi));
+ clks[ssp0_sel] = mxs_clk_mux("ssp0_sel", CLKSEQ, 3, 1, sel_io0, ARRAY_SIZE(sel_io0));
+ clks[ssp1_sel] = mxs_clk_mux("ssp1_sel", CLKSEQ, 4, 1, sel_io0, ARRAY_SIZE(sel_io0));
+ clks[ssp2_sel] = mxs_clk_mux("ssp2_sel", CLKSEQ, 5, 1, sel_io1, ARRAY_SIZE(sel_io1));
+ clks[ssp3_sel] = mxs_clk_mux("ssp3_sel", CLKSEQ, 6, 1, sel_io1, ARRAY_SIZE(sel_io1));
+ clks[emi_sel] = mxs_clk_mux("emi_sel", CLKSEQ, 7, 1, emi_sels, ARRAY_SIZE(emi_sels));
+ clks[etm_sel] = mxs_clk_mux("etm_sel", CLKSEQ, 8, 1, sel_cpu, ARRAY_SIZE(sel_cpu));
+ clks[lcdif_sel] = mxs_clk_mux("lcdif_sel", CLKSEQ, 14, 1, sel_pix, ARRAY_SIZE(sel_pix));
+ clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 18, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
+ clks[ptp_sel] = mxs_clk_mux("ptp_sel", ENET, 19, 1, ptp_sels, ARRAY_SIZE(ptp_sels));
+ clks[cpu_pll] = mxs_clk_div("cpu_pll", "ref_cpu", CPU, 0, 6, 28);
+ clks[cpu_xtal] = mxs_clk_div("cpu_xtal", "ref_xtal", CPU, 16, 10, 29);
+ clks[hbus] = mxs_clk_div("hbus", "cpu", HBUS, 0, 5, 31);
+ clks[xbus] = mxs_clk_div("xbus", "ref_xtal", XBUS, 0, 10, 31);
+ clks[ssp0_div] = mxs_clk_div("ssp0_div", "ssp0_sel", SSP0, 0, 9, 29);
+ clks[ssp1_div] = mxs_clk_div("ssp1_div", "ssp1_sel", SSP1, 0, 9, 29);
+ clks[ssp2_div] = mxs_clk_div("ssp2_div", "ssp2_sel", SSP2, 0, 9, 29);
+ clks[ssp3_div] = mxs_clk_div("ssp3_div", "ssp3_sel", SSP3, 0, 9, 29);
+ clks[gpmi_div] = mxs_clk_div("gpmi_div", "gpmi_sel", GPMI, 0, 10, 29);
+ clks[emi_pll] = mxs_clk_div("emi_pll", "ref_emi", EMI, 0, 6, 28);
+ clks[emi_xtal] = mxs_clk_div("emi_xtal", "ref_xtal", EMI, 8, 4, 29);
+ clks[lcdif_div] = mxs_clk_div("lcdif_div", "lcdif_sel", LCDIF, 0, 13, 29);
+ clks[etm_div] = mxs_clk_div("etm_div", "etm_sel", ETM, 0, 7, 29);
+ clks[ptp] = mxs_clk_div("ptp", "ptp_sel", ENET, 21, 6, 27);
+ clks[saif0_div] = mxs_clk_frac("saif0_div", "saif0_sel", SAIF0, 0, 16, 29);
+ clks[saif1_div] = mxs_clk_frac("saif1_div", "saif1_sel", SAIF1, 0, 16, 29);
+ clks[clk32k_div] = mxs_clk_fixed_factor("clk32k_div", "ref_xtal", 1, 750);
+ clks[rtc] = mxs_clk_fixed_factor("rtc", "ref_xtal", 1, 768);
+ clks[lradc] = mxs_clk_fixed_factor("lradc", "clk32k", 1, 16);
+ clks[spdif_div] = mxs_clk_fixed_factor("spdif_div", "pll0", 1, 4);
+ clks[clk32k] = mxs_clk_gate("clk32k", "clk32k_div", XTAL, 26);
+ clks[pwm] = mxs_clk_gate("pwm", "ref_xtal", XTAL, 29);
+ clks[uart] = mxs_clk_gate("uart", "ref_xtal", XTAL, 31);
+ clks[ssp0] = mxs_clk_gate("ssp0", "ssp0_div", SSP0, 31);
+ clks[ssp1] = mxs_clk_gate("ssp1", "ssp1_div", SSP1, 31);
+ clks[ssp2] = mxs_clk_gate("ssp2", "ssp2_div", SSP2, 31);
+ clks[ssp3] = mxs_clk_gate("ssp3", "ssp3_div", SSP3, 31);
+ clks[gpmi] = mxs_clk_gate("gpmi", "gpmi_div", GPMI, 31);
+ clks[spdif] = mxs_clk_gate("spdif", "spdif_div", SPDIF, 31);
+ clks[emi] = mxs_clk_gate("emi", "emi_sel", EMI, 31);
+ clks[saif0] = mxs_clk_gate("saif0", "saif0_div", SAIF0, 31);
+ clks[saif1] = mxs_clk_gate("saif1", "saif1_div", SAIF1, 31);
+ clks[lcdif] = mxs_clk_gate("lcdif", "lcdif_div", LCDIF, 31);
+ clks[etm] = mxs_clk_gate("etm", "etm_div", ETM, 31);
+ clks[fec] = mxs_clk_gate("fec", "hbus", ENET, 30);
+ clks[can0] = mxs_clk_gate("can0", "ref_xtal", FLEXCAN, 30);
+ clks[can1] = mxs_clk_gate("can1", "ref_xtal", FLEXCAN, 28);
+ clks[usb0] = mxs_clk_gate("usb0", "usb0_pwr", DIGCTRL, 2);
+ clks[usb1] = mxs_clk_gate("usb1", "usb1_pwr", DIGCTRL, 16);
+ clks[usb0_pwr] = clk_register_gate(NULL, "usb0_pwr", "pll0", 0, PLL0CTRL0, 18, 0, &mxs_lock);
+ clks[usb1_pwr] = clk_register_gate(NULL, "usb1_pwr", "pll1", 0, PLL1CTRL0, 18, 0, &mxs_lock);
+ clks[enet_out] = clk_register_gate(NULL, "enet_out", "pll2", 0, ENET, 18, 0, &mxs_lock);
+
+ for (i = 0; i < ARRAY_SIZE(clks); i++)
+ if (IS_ERR(clks[i])) {
+ pr_err("i.MX28 clk %d: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ return PTR_ERR(clks[i]);
+ }
+
+ clk_register_clkdev(clks[clk32k], NULL, "timrot");
+ clk_register_clkdev(clks[enet_out], NULL, "enet_out");
+ clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
+ clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
+ clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
+ clk_register_clkdevs(clks[ssp0], ssp0_lookups, ARRAY_SIZE(ssp0_lookups));
+ clk_register_clkdevs(clks[ssp1], ssp1_lookups, ARRAY_SIZE(ssp1_lookups));
+ clk_register_clkdevs(clks[ssp2], ssp2_lookups, ARRAY_SIZE(ssp2_lookups));
+ clk_register_clkdevs(clks[ssp3], ssp3_lookups, ARRAY_SIZE(ssp3_lookups));
+ clk_register_clkdevs(clks[gpmi], gpmi_lookups, ARRAY_SIZE(gpmi_lookups));
+ clk_register_clkdevs(clks[saif0], saif0_lookups, ARRAY_SIZE(saif0_lookups));
+ clk_register_clkdevs(clks[saif1], saif1_lookups, ARRAY_SIZE(saif1_lookups));
+ clk_register_clkdevs(clks[lcdif], lcdif_lookups, ARRAY_SIZE(lcdif_lookups));
+ clk_register_clkdevs(clks[fec], fec_lookups, ARRAY_SIZE(fec_lookups));
+ clk_register_clkdevs(clks[can0], can0_lookups, ARRAY_SIZE(can0_lookups));
+ clk_register_clkdevs(clks[can1], can1_lookups, ARRAY_SIZE(can1_lookups));
+
+ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+ clk_prepare_enable(clks[clks_init_on[i]]);
+
+ mxs_timer_init(MX28_INT_TIMER0);
+
+ return 0;
+}
diff --git a/drivers/clk/mxs/clk-pll.c b/drivers/clk/mxs/clk-pll.c
new file mode 100644
index 00000000000..fadae41833e
--- /dev/null
+++ b/drivers/clk/mxs/clk-pll.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_pll - mxs pll clock
+ * @hw: clk_hw for the pll
+ * @base: base address of the pll
+ * @power: the shift of power bit
+ * @rate: the clock rate of the pll
+ *
+ * The mxs pll is a fixed rate clock with power and gate control,
+ * and the shift of gate bit is always 31.
+ */
+struct clk_pll {
+ struct clk_hw hw;
+ void __iomem *base;
+ u8 power;
+ unsigned long rate;
+};
+
+#define to_clk_pll(_hw) container_of(_hw, struct clk_pll, hw)
+
+static int clk_pll_prepare(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ writel_relaxed(1 << pll->power, pll->base + SET);
+
+ udelay(10);
+
+ return 0;
+}
+
+static void clk_pll_unprepare(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ writel_relaxed(1 << pll->power, pll->base + CLR);
+}
+
+static int clk_pll_enable(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ writel_relaxed(1 << 31, pll->base + CLR);
+
+ return 0;
+}
+
+static void clk_pll_disable(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ writel_relaxed(1 << 31, pll->base + SET);
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ return pll->rate;
+}
+
+static const struct clk_ops clk_pll_ops = {
+ .prepare = clk_pll_prepare,
+ .unprepare = clk_pll_unprepare,
+ .enable = clk_pll_enable,
+ .disable = clk_pll_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+};
+
+struct clk *mxs_clk_pll(const char *name, const char *parent_name,
+ void __iomem *base, u8 power, unsigned long rate)
+{
+ struct clk_pll *pll;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_pll_ops;
+ init.flags = 0;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ pll->base = base;
+ pll->rate = rate;
+ pll->power = power;
+ pll->hw.init = &init;
+
+ clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c
new file mode 100644
index 00000000000..4adeed6c2f9
--- /dev/null
+++ b/drivers/clk/mxs/clk-ref.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "clk.h"
+
+/**
+ * struct clk_ref - mxs reference clock
+ * @hw: clk_hw for the reference clock
+ * @reg: register address
+ * @idx: the index of the reference clock within the same register
+ *
+ * The mxs reference clock sources from pll. Every 4 reference clocks share
+ * one register space, and @idx is used to identify them. Each reference
+ * clock has a gate control and a fractional * divider. The rate is calculated
+ * as pll rate * (18 / FRAC), where FRAC = 18 ~ 35.
+ */
+struct clk_ref {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 idx;
+};
+
+#define to_clk_ref(_hw) container_of(_hw, struct clk_ref, hw)
+
+static int clk_ref_enable(struct clk_hw *hw)
+{
+ struct clk_ref *ref = to_clk_ref(hw);
+
+ writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + CLR);
+
+ return 0;
+}
+
+static void clk_ref_disable(struct clk_hw *hw)
+{
+ struct clk_ref *ref = to_clk_ref(hw);
+
+ writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + SET);
+}
+
+static unsigned long clk_ref_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_ref *ref = to_clk_ref(hw);
+ u64 tmp = parent_rate;
+ u8 frac = (readl_relaxed(ref->reg) >> (ref->idx * 8)) & 0x3f;
+
+ tmp *= 18;
+ do_div(tmp, frac);
+
+ return tmp;
+}
+
+static long clk_ref_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long parent_rate = *prate;
+ u64 tmp = parent_rate;
+ u8 frac;
+
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+
+ if (frac < 18)
+ frac = 18;
+ else if (frac > 35)
+ frac = 35;
+
+ tmp = parent_rate;
+ tmp *= 18;
+ do_div(tmp, frac);
+
+ return tmp;
+}
+
+static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_ref *ref = to_clk_ref(hw);
+ unsigned long flags;
+ u64 tmp = parent_rate;
+ u32 val;
+ u8 frac, shift = ref->idx * 8;
+
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+
+ if (frac < 18)
+ frac = 18;
+ else if (frac > 35)
+ frac = 35;
+
+ spin_lock_irqsave(&mxs_lock, flags);
+
+ val = readl_relaxed(ref->reg);
+ val &= ~(0x3f << shift);
+ val |= frac << shift;
+ writel_relaxed(val, ref->reg);
+
+ spin_unlock_irqrestore(&mxs_lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops clk_ref_ops = {
+ .enable = clk_ref_enable,
+ .disable = clk_ref_disable,
+ .recalc_rate = clk_ref_recalc_rate,
+ .round_rate = clk_ref_round_rate,
+ .set_rate = clk_ref_set_rate,
+};
+
+struct clk *mxs_clk_ref(const char *name, const char *parent_name,
+ void __iomem *reg, u8 idx)
+{
+ struct clk_ref *ref;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_ref_ops;
+ init.flags = 0;
+ init.parent_names = (parent_name ? &parent_name: NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ ref->reg = reg;
+ ref->idx = idx;
+ ref->hw.init = &init;
+
+ clk = clk_register(NULL, &ref->hw);
+ if (IS_ERR(clk))
+ kfree(ref);
+
+ return clk;
+}
diff --git a/drivers/clk/mxs/clk.c b/drivers/clk/mxs/clk.c
new file mode 100644
index 00000000000..b24d56067c8
--- /dev/null
+++ b/drivers/clk/mxs/clk.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+
+DEFINE_SPINLOCK(mxs_lock);
+
+int mxs_clk_wait(void __iomem *reg, u8 shift)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(10);
+
+ while (readl_relaxed(reg) & (1 << shift))
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ return 0;
+}
diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h
new file mode 100644
index 00000000000..81421e28e69
--- /dev/null
+++ b/drivers/clk/mxs/clk.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#ifndef __MXS_CLK_H
+#define __MXS_CLK_H
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#define SET 0x4
+#define CLR 0x8
+
+extern spinlock_t mxs_lock;
+
+int mxs_clk_wait(void __iomem *reg, u8 shift);
+
+struct clk *mxs_clk_pll(const char *name, const char *parent_name,
+ void __iomem *base, u8 power, unsigned long rate);
+
+struct clk *mxs_clk_ref(const char *name, const char *parent_name,
+ void __iomem *reg, u8 idx);
+
+struct clk *mxs_clk_div(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width, u8 busy);
+
+struct clk *mxs_clk_frac(const char *name, const char *parent_name,
+ void __iomem *reg, u8 shift, u8 width, u8 busy);
+
+static inline struct clk *mxs_clk_fixed(const char *name, int rate)
+{
+ return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+}
+
+static inline struct clk *mxs_clk_gate(const char *name,
+ const char *parent_name, void __iomem *reg, u8 shift)
+{
+ return clk_register_gate(NULL, name, parent_name, CLK_SET_RATE_PARENT,
+ reg, shift, CLK_GATE_SET_TO_DISABLE,
+ &mxs_lock);
+}
+
+static inline struct clk *mxs_clk_mux(const char *name, void __iomem *reg,
+ u8 shift, u8 width, const char **parent_names, int num_parents)
+{
+ return clk_register_mux(NULL, name, parent_names, num_parents,
+ CLK_SET_RATE_PARENT, reg, shift, width,
+ 0, &mxs_lock);
+}
+
+static inline struct clk *mxs_clk_fixed_factor(const char *name,
+ const char *parent_name, unsigned int mult, unsigned int div)
+{
+ return clk_register_fixed_factor(NULL, name, parent_name,
+ CLK_SET_RATE_PARENT, mult, div);
+}
+
+#endif /* __MXS_CLK_H */
diff --git a/drivers/clk/spear/Makefile b/drivers/clk/spear/Makefile
new file mode 100644
index 00000000000..cdb425d3b8e
--- /dev/null
+++ b/drivers/clk/spear/Makefile
@@ -0,0 +1,10 @@
+#
+# SPEAr Clock specific Makefile
+#
+
+obj-y += clk.o clk-aux-synth.o clk-frac-synth.o clk-gpt-synth.o clk-vco-pll.o
+
+obj-$(CONFIG_ARCH_SPEAR3XX) += spear3xx_clock.o
+obj-$(CONFIG_ARCH_SPEAR6XX) += spear6xx_clock.o
+obj-$(CONFIG_MACH_SPEAR1310) += spear1310_clock.o
+obj-$(CONFIG_MACH_SPEAR1340) += spear1340_clock.o
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
new file mode 100644
index 00000000000..af34074e702
--- /dev/null
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Auxiliary Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-aux-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+/*
+ * DOC: Auxiliary Synthesizer clock
+ *
+ * Aux synth gives rate for different values of eq, x and y
+ *
+ * Fout from synthesizer can be given from two equations:
+ * Fout1 = (Fin * X/Y)/2 EQ1
+ * Fout2 = Fin * X/Y EQ2
+ */
+
+#define to_clk_aux(_hw) container_of(_hw, struct clk_aux, hw)
+
+static struct aux_clk_masks default_aux_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = AUX_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = AUX_XSCALE_MASK,
+ .xscale_sel_shift = AUX_XSCALE_SHIFT,
+ .yscale_sel_mask = AUX_YSCALE_MASK,
+ .yscale_sel_shift = AUX_YSCALE_SHIFT,
+ .enable_bit = AUX_SYNT_ENB,
+};
+
+static unsigned long aux_calc_rate(struct clk_hw *hw, unsigned long prate,
+ int index)
+{
+ struct clk_aux *aux = to_clk_aux(hw);
+ struct aux_rate_tbl *rtbl = aux->rtbl;
+ u8 eq = rtbl[index].eq ? 1 : 2;
+
+ return (((prate / 10000) * rtbl[index].xscale) /
+ (rtbl[index].yscale * eq)) * 10000;
+}
+
+static long clk_aux_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ struct clk_aux *aux = to_clk_aux(hw);
+ int unused;
+
+ return clk_round_rate_index(hw, drate, *prate, aux_calc_rate,
+ aux->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_aux_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_aux *aux = to_clk_aux(hw);
+ unsigned int num = 1, den = 1, val, eqn;
+ unsigned long flags = 0;
+
+ if (aux->lock)
+ spin_lock_irqsave(aux->lock, flags);
+
+ val = readl_relaxed(aux->reg);
+
+ if (aux->lock)
+ spin_unlock_irqrestore(aux->lock, flags);
+
+ eqn = (val >> aux->masks->eq_sel_shift) & aux->masks->eq_sel_mask;
+ if (eqn == aux->masks->eq1_mask)
+ den = 2;
+
+ /* calculate numerator */
+ num = (val >> aux->masks->xscale_sel_shift) &
+ aux->masks->xscale_sel_mask;
+
+ /* calculate denominator */
+ den *= (val >> aux->masks->yscale_sel_shift) &
+ aux->masks->yscale_sel_mask;
+
+ if (!den)
+ return 0;
+
+ return (((parent_rate / 10000) * num) / den) * 10000;
+}
+
+/* Configures new clock rate of aux */
+static int clk_aux_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_aux *aux = to_clk_aux(hw);
+ struct aux_rate_tbl *rtbl = aux->rtbl;
+ unsigned long val, flags = 0;
+ int i;
+
+ clk_round_rate_index(hw, drate, prate, aux_calc_rate, aux->rtbl_cnt,
+ &i);
+
+ if (aux->lock)
+ spin_lock_irqsave(aux->lock, flags);
+
+ val = readl_relaxed(aux->reg) &
+ ~(aux->masks->eq_sel_mask << aux->masks->eq_sel_shift);
+ val |= (rtbl[i].eq & aux->masks->eq_sel_mask) <<
+ aux->masks->eq_sel_shift;
+ val &= ~(aux->masks->xscale_sel_mask << aux->masks->xscale_sel_shift);
+ val |= (rtbl[i].xscale & aux->masks->xscale_sel_mask) <<
+ aux->masks->xscale_sel_shift;
+ val &= ~(aux->masks->yscale_sel_mask << aux->masks->yscale_sel_shift);
+ val |= (rtbl[i].yscale & aux->masks->yscale_sel_mask) <<
+ aux->masks->yscale_sel_shift;
+ writel_relaxed(val, aux->reg);
+
+ if (aux->lock)
+ spin_unlock_irqrestore(aux->lock, flags);
+
+ return 0;
+}
+
+static struct clk_ops clk_aux_ops = {
+ .recalc_rate = clk_aux_recalc_rate,
+ .round_rate = clk_aux_round_rate,
+ .set_rate = clk_aux_set_rate,
+};
+
+struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
+ const char *parent_name, unsigned long flags, void __iomem *reg,
+ struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+ u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk)
+{
+ struct clk_aux *aux;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ if (!aux_name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+ pr_err("Invalid arguments passed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+ if (!aux) {
+ pr_err("could not allocate aux clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_aux assignments */
+ if (!masks)
+ aux->masks = &default_aux_masks;
+ else
+ aux->masks = masks;
+
+ aux->reg = reg;
+ aux->rtbl = rtbl;
+ aux->rtbl_cnt = rtbl_cnt;
+ aux->lock = lock;
+ aux->hw.init = &init;
+
+ init.name = aux_name;
+ init.ops = &clk_aux_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(NULL, &aux->hw);
+ if (IS_ERR_OR_NULL(clk))
+ goto free_aux;
+
+ if (gate_name) {
+ struct clk *tgate_clk;
+
+ tgate_clk = clk_register_gate(NULL, gate_name, aux_name, 0, reg,
+ aux->masks->enable_bit, 0, lock);
+ if (IS_ERR_OR_NULL(tgate_clk))
+ goto free_aux;
+
+ if (gate_clk)
+ *gate_clk = tgate_clk;
+ }
+
+ return clk;
+
+free_aux:
+ kfree(aux);
+ pr_err("clk register failed\n");
+
+ return NULL;
+}
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
new file mode 100644
index 00000000000..4dbdb3fe18e
--- /dev/null
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Fractional Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-frac-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define DIV_FACTOR_MASK 0x1FFFF
+
+/*
+ * DOC: Fractional Synthesizer clock
+ *
+ * Fout from synthesizer can be given from below equation:
+ *
+ * Fout= Fin/2*div (division factor)
+ * div is 17 bits:-
+ * 0-13 (fractional part)
+ * 14-16 (integer part)
+ * div is (16-14 bits).(13-0 bits) (in binary)
+ *
+ * Fout = Fin/(2 * div)
+ * Fout = ((Fin / 10000)/(2 * div)) * 10000
+ * Fout = (2^14 * (Fin / 10000)/(2^14 * (2 * div))) * 10000
+ * Fout = (((Fin / 10000) << 14)/(2 * (div << 14))) * 10000
+ *
+ * div << 14 simply 17 bit value written at register.
+ * Max error due to scaling down by 10000 is 10 KHz
+ */
+
+#define to_clk_frac(_hw) container_of(_hw, struct clk_frac, hw)
+
+static unsigned long frac_calc_rate(struct clk_hw *hw, unsigned long prate,
+ int index)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ struct frac_rate_tbl *rtbl = frac->rtbl;
+
+ prate /= 10000;
+ prate <<= 14;
+ prate /= (2 * rtbl[index].div);
+ prate *= 10000;
+
+ return prate;
+}
+
+static long clk_frac_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ int unused;
+
+ return clk_round_rate_index(hw, drate, *prate, frac_calc_rate,
+ frac->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ unsigned long flags = 0;
+ unsigned int div = 1, val;
+
+ if (frac->lock)
+ spin_lock_irqsave(frac->lock, flags);
+
+ val = readl_relaxed(frac->reg);
+
+ if (frac->lock)
+ spin_unlock_irqrestore(frac->lock, flags);
+
+ div = val & DIV_FACTOR_MASK;
+
+ if (!div)
+ return 0;
+
+ parent_rate = parent_rate / 10000;
+
+ parent_rate = (parent_rate << 14) / (2 * div);
+ return parent_rate * 10000;
+}
+
+/* Configures new clock rate of frac */
+static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_frac *frac = to_clk_frac(hw);
+ struct frac_rate_tbl *rtbl = frac->rtbl;
+ unsigned long flags = 0, val;
+ int i;
+
+ clk_round_rate_index(hw, drate, prate, frac_calc_rate, frac->rtbl_cnt,
+ &i);
+
+ if (frac->lock)
+ spin_lock_irqsave(frac->lock, flags);
+
+ val = readl_relaxed(frac->reg) & ~DIV_FACTOR_MASK;
+ val |= rtbl[i].div & DIV_FACTOR_MASK;
+ writel_relaxed(val, frac->reg);
+
+ if (frac->lock)
+ spin_unlock_irqrestore(frac->lock, flags);
+
+ return 0;
+}
+
+struct clk_ops clk_frac_ops = {
+ .recalc_rate = clk_frac_recalc_rate,
+ .round_rate = clk_frac_round_rate,
+ .set_rate = clk_frac_set_rate,
+};
+
+struct clk *clk_register_frac(const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock)
+{
+ struct clk_init_data init;
+ struct clk_frac *frac;
+ struct clk *clk;
+
+ if (!name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+ pr_err("Invalid arguments passed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ frac = kzalloc(sizeof(*frac), GFP_KERNEL);
+ if (!frac) {
+ pr_err("could not allocate frac clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_frac assignments */
+ frac->reg = reg;
+ frac->rtbl = rtbl;
+ frac->rtbl_cnt = rtbl_cnt;
+ frac->lock = lock;
+ frac->hw.init = &init;
+
+ init.name = name;
+ init.ops = &clk_frac_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(NULL, &frac->hw);
+ if (!IS_ERR_OR_NULL(clk))
+ return clk;
+
+ pr_err("clk register failed\n");
+ kfree(frac);
+
+ return NULL;
+}
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
new file mode 100644
index 00000000000..b471c9762a9
--- /dev/null
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * General Purpose Timer Synthesizer clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-gpt-synth: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+#define GPT_MSCALE_MASK 0xFFF
+#define GPT_NSCALE_SHIFT 12
+#define GPT_NSCALE_MASK 0xF
+
+/*
+ * DOC: General Purpose Timer Synthesizer clock
+ *
+ * Calculates gpt synth clk rate for different values of mscale and nscale
+ *
+ * Fout= Fin/((2 ^ (N+1)) * (M+1))
+ */
+
+#define to_clk_gpt(_hw) container_of(_hw, struct clk_gpt, hw)
+
+static unsigned long gpt_calc_rate(struct clk_hw *hw, unsigned long prate,
+ int index)
+{
+ struct clk_gpt *gpt = to_clk_gpt(hw);
+ struct gpt_rate_tbl *rtbl = gpt->rtbl;
+
+ prate /= ((1 << (rtbl[index].nscale + 1)) * (rtbl[index].mscale + 1));
+
+ return prate;
+}
+
+static long clk_gpt_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ struct clk_gpt *gpt = to_clk_gpt(hw);
+ int unused;
+
+ return clk_round_rate_index(hw, drate, *prate, gpt_calc_rate,
+ gpt->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_gpt_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_gpt *gpt = to_clk_gpt(hw);
+ unsigned long flags = 0;
+ unsigned int div = 1, val;
+
+ if (gpt->lock)
+ spin_lock_irqsave(gpt->lock, flags);
+
+ val = readl_relaxed(gpt->reg);
+
+ if (gpt->lock)
+ spin_unlock_irqrestore(gpt->lock, flags);
+
+ div += val & GPT_MSCALE_MASK;
+ div *= 1 << (((val >> GPT_NSCALE_SHIFT) & GPT_NSCALE_MASK) + 1);
+
+ if (!div)
+ return 0;
+
+ return parent_rate / div;
+}
+
+/* Configures new clock rate of gpt */
+static int clk_gpt_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_gpt *gpt = to_clk_gpt(hw);
+ struct gpt_rate_tbl *rtbl = gpt->rtbl;
+ unsigned long flags = 0, val;
+ int i;
+
+ clk_round_rate_index(hw, drate, prate, gpt_calc_rate, gpt->rtbl_cnt,
+ &i);
+
+ if (gpt->lock)
+ spin_lock_irqsave(gpt->lock, flags);
+
+ val = readl(gpt->reg) & ~GPT_MSCALE_MASK;
+ val &= ~(GPT_NSCALE_MASK << GPT_NSCALE_SHIFT);
+
+ val |= rtbl[i].mscale & GPT_MSCALE_MASK;
+ val |= (rtbl[i].nscale & GPT_NSCALE_MASK) << GPT_NSCALE_SHIFT;
+
+ writel_relaxed(val, gpt->reg);
+
+ if (gpt->lock)
+ spin_unlock_irqrestore(gpt->lock, flags);
+
+ return 0;
+}
+
+static struct clk_ops clk_gpt_ops = {
+ .recalc_rate = clk_gpt_recalc_rate,
+ .round_rate = clk_gpt_round_rate,
+ .set_rate = clk_gpt_set_rate,
+};
+
+struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
+ long flags, void __iomem *reg, struct gpt_rate_tbl *rtbl, u8
+ rtbl_cnt, spinlock_t *lock)
+{
+ struct clk_init_data init;
+ struct clk_gpt *gpt;
+ struct clk *clk;
+
+ if (!name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
+ pr_err("Invalid arguments passed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ gpt = kzalloc(sizeof(*gpt), GFP_KERNEL);
+ if (!gpt) {
+ pr_err("could not allocate gpt clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* struct clk_gpt assignments */
+ gpt->reg = reg;
+ gpt->rtbl = rtbl;
+ gpt->rtbl_cnt = rtbl_cnt;
+ gpt->lock = lock;
+ gpt->hw.init = &init;
+
+ init.name = name;
+ init.ops = &clk_gpt_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk = clk_register(NULL, &gpt->hw);
+ if (!IS_ERR_OR_NULL(clk))
+ return clk;
+
+ pr_err("clk register failed\n");
+ kfree(gpt);
+
+ return NULL;
+}
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
new file mode 100644
index 00000000000..dcd4bdf4b0d
--- /dev/null
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * VCO-PLL clock implementation
+ */
+
+#define pr_fmt(fmt) "clk-vco-pll: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "clk.h"
+
+/*
+ * DOC: VCO-PLL clock
+ *
+ * VCO and PLL rate are derived from following equations:
+ *
+ * In normal mode
+ * vco = (2 * M[15:8] * Fin)/N
+ *
+ * In Dithered mode
+ * vco = (2 * M[15:0] * Fin)/(256 * N)
+ *
+ * pll_rate = pll/2^p
+ *
+ * vco and pll are very closely bound to each other, "vco needs to program:
+ * mode, m & n" and "pll needs to program p", both share common enable/disable
+ * logic.
+ *
+ * clk_register_vco_pll() registers instances of both vco & pll.
+ * CLK_SET_RATE_PARENT flag is forced for pll, as it will always pass its
+ * set_rate to vco. A single rate table exists for both the clocks, which
+ * configures m, n and p.
+ */
+
+/* PLL_CTR register masks */
+#define PLL_MODE_NORMAL 0
+#define PLL_MODE_FRACTION 1
+#define PLL_MODE_DITH_DSM 2
+#define PLL_MODE_DITH_SSM 3
+#define PLL_MODE_MASK 3
+#define PLL_MODE_SHIFT 3
+#define PLL_ENABLE 2
+
+#define PLL_LOCK_SHIFT 0
+#define PLL_LOCK_MASK 1
+
+/* PLL FRQ register masks */
+#define PLL_NORM_FDBK_M_MASK 0xFF
+#define PLL_NORM_FDBK_M_SHIFT 24
+#define PLL_DITH_FDBK_M_MASK 0xFFFF
+#define PLL_DITH_FDBK_M_SHIFT 16
+#define PLL_DIV_P_MASK 0x7
+#define PLL_DIV_P_SHIFT 8
+#define PLL_DIV_N_MASK 0xFF
+#define PLL_DIV_N_SHIFT 0
+
+#define to_clk_vco(_hw) container_of(_hw, struct clk_vco, hw)
+#define to_clk_pll(_hw) container_of(_hw, struct clk_pll, hw)
+
+/* Calculates pll clk rate for specific value of mode, m, n and p */
+static unsigned long pll_calc_rate(struct pll_rate_tbl *rtbl,
+ unsigned long prate, int index, unsigned long *pll_rate)
+{
+ unsigned long rate = prate;
+ unsigned int mode;
+
+ mode = rtbl[index].mode ? 256 : 1;
+ rate = (((2 * rate / 10000) * rtbl[index].m) / (mode * rtbl[index].n));
+
+ if (pll_rate)
+ *pll_rate = (rate / (1 << rtbl[index].p)) * 10000;
+
+ return rate * 10000;
+}
+
+static long clk_pll_round_rate_index(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate, int *index)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ unsigned long prev_rate, vco_prev_rate, rate = 0;
+ unsigned long vco_parent_rate =
+ __clk_get_rate(__clk_get_parent(__clk_get_parent(hw->clk)));
+
+ if (!prate) {
+ pr_err("%s: prate is must for pll clk\n", __func__);
+ return -EINVAL;
+ }
+
+ for (*index = 0; *index < pll->vco->rtbl_cnt; (*index)++) {
+ prev_rate = rate;
+ vco_prev_rate = *prate;
+ *prate = pll_calc_rate(pll->vco->rtbl, vco_parent_rate, *index,
+ &rate);
+ if (drate < rate) {
+ /* previous clock was best */
+ if (*index) {
+ rate = prev_rate;
+ *prate = vco_prev_rate;
+ (*index)--;
+ }
+ break;
+ }
+ }
+
+ return rate;
+}
+
+static long clk_pll_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ int unused;
+
+ return clk_pll_round_rate_index(hw, drate, prate, &unused);
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long
+ parent_rate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ unsigned long flags = 0;
+ unsigned int p;
+
+ if (pll->vco->lock)
+ spin_lock_irqsave(pll->vco->lock, flags);
+
+ p = readl_relaxed(pll->vco->cfg_reg);
+
+ if (pll->vco->lock)
+ spin_unlock_irqrestore(pll->vco->lock, flags);
+
+ p = (p >> PLL_DIV_P_SHIFT) & PLL_DIV_P_MASK;
+
+ return parent_rate / (1 << p);
+}
+
+static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct pll_rate_tbl *rtbl = pll->vco->rtbl;
+ unsigned long flags = 0, val;
+ int i;
+
+ clk_pll_round_rate_index(hw, drate, NULL, &i);
+
+ if (pll->vco->lock)
+ spin_lock_irqsave(pll->vco->lock, flags);
+
+ val = readl_relaxed(pll->vco->cfg_reg);
+ val &= ~(PLL_DIV_P_MASK << PLL_DIV_P_SHIFT);
+ val |= (rtbl[i].p & PLL_DIV_P_MASK) << PLL_DIV_P_SHIFT;
+ writel_relaxed(val, pll->vco->cfg_reg);
+
+ if (pll->vco->lock)
+ spin_unlock_irqrestore(pll->vco->lock, flags);
+
+ return 0;
+}
+
+static struct clk_ops clk_pll_ops = {
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_round_rate,
+ .set_rate = clk_pll_set_rate,
+};
+
+static inline unsigned long vco_calc_rate(struct clk_hw *hw,
+ unsigned long prate, int index)
+{
+ struct clk_vco *vco = to_clk_vco(hw);
+
+ return pll_calc_rate(vco->rtbl, prate, index, NULL);
+}
+
+static long clk_vco_round_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long *prate)
+{
+ struct clk_vco *vco = to_clk_vco(hw);
+ int unused;
+
+ return clk_round_rate_index(hw, drate, *prate, vco_calc_rate,
+ vco->rtbl_cnt, &unused);
+}
+
+static unsigned long clk_vco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_vco *vco = to_clk_vco(hw);
+ unsigned long flags = 0;
+ unsigned int num = 2, den = 0, val, mode = 0;
+
+ if (vco->lock)
+ spin_lock_irqsave(vco->lock, flags);
+
+ mode = (readl_relaxed(vco->mode_reg) >> PLL_MODE_SHIFT) & PLL_MODE_MASK;
+
+ val = readl_relaxed(vco->cfg_reg);
+
+ if (vco->lock)
+ spin_unlock_irqrestore(vco->lock, flags);
+
+ den = (val >> PLL_DIV_N_SHIFT) & PLL_DIV_N_MASK;
+
+ /* calculate numerator & denominator */
+ if (!mode) {
+ /* Normal mode */
+ num *= (val >> PLL_NORM_FDBK_M_SHIFT) & PLL_NORM_FDBK_M_MASK;
+ } else {
+ /* Dithered mode */
+ num *= (val >> PLL_DITH_FDBK_M_SHIFT) & PLL_DITH_FDBK_M_MASK;
+ den *= 256;
+ }
+
+ if (!den) {
+ WARN(1, "%s: denominator can't be zero\n", __func__);
+ return 0;
+ }
+
+ return (((parent_rate / 10000) * num) / den) * 10000;
+}
+
+/* Configures new clock rate of vco */
+static int clk_vco_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct clk_vco *vco = to_clk_vco(hw);
+ struct pll_rate_tbl *rtbl = vco->rtbl;
+ unsigned long flags = 0, val;
+ int i;
+
+ clk_round_rate_index(hw, drate, prate, vco_calc_rate, vco->rtbl_cnt,
+ &i);
+
+ if (vco->lock)
+ spin_lock_irqsave(vco->lock, flags);
+
+ val = readl_relaxed(vco->mode_reg);
+ val &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
+ val |= (rtbl[i].mode & PLL_MODE_MASK) << PLL_MODE_SHIFT;
+ writel_relaxed(val, vco->mode_reg);
+
+ val = readl_relaxed(vco->cfg_reg);
+ val &= ~(PLL_DIV_N_MASK << PLL_DIV_N_SHIFT);
+ val |= (rtbl[i].n & PLL_DIV_N_MASK) << PLL_DIV_N_SHIFT;
+
+ val &= ~(PLL_DITH_FDBK_M_MASK << PLL_DITH_FDBK_M_SHIFT);
+ if (rtbl[i].mode)
+ val |= (rtbl[i].m & PLL_DITH_FDBK_M_MASK) <<
+ PLL_DITH_FDBK_M_SHIFT;
+ else
+ val |= (rtbl[i].m & PLL_NORM_FDBK_M_MASK) <<
+ PLL_NORM_FDBK_M_SHIFT;
+
+ writel_relaxed(val, vco->cfg_reg);
+
+ if (vco->lock)
+ spin_unlock_irqrestore(vco->lock, flags);
+
+ return 0;
+}
+
+static struct clk_ops clk_vco_ops = {
+ .recalc_rate = clk_vco_recalc_rate,
+ .round_rate = clk_vco_round_rate,
+ .set_rate = clk_vco_set_rate,
+};
+
+struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
+ const char *vco_gate_name, const char *parent_name,
+ unsigned long flags, void __iomem *mode_reg, void __iomem
+ *cfg_reg, struct pll_rate_tbl *rtbl, u8 rtbl_cnt,
+ spinlock_t *lock, struct clk **pll_clk,
+ struct clk **vco_gate_clk)
+{
+ struct clk_vco *vco;
+ struct clk_pll *pll;
+ struct clk *vco_clk, *tpll_clk, *tvco_gate_clk;
+ struct clk_init_data vco_init, pll_init;
+ const char **vco_parent_name;
+
+ if (!vco_name || !pll_name || !parent_name || !mode_reg || !cfg_reg ||
+ !rtbl || !rtbl_cnt) {
+ pr_err("Invalid arguments passed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ vco = kzalloc(sizeof(*vco), GFP_KERNEL);
+ if (!vco) {
+ pr_err("could not allocate vco clk\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll) {
+ pr_err("could not allocate pll clk\n");
+ goto free_vco;
+ }
+
+ /* struct clk_vco assignments */
+ vco->mode_reg = mode_reg;
+ vco->cfg_reg = cfg_reg;
+ vco->rtbl = rtbl;
+ vco->rtbl_cnt = rtbl_cnt;
+ vco->lock = lock;
+ vco->hw.init = &vco_init;
+
+ pll->vco = vco;
+ pll->hw.init = &pll_init;
+
+ if (vco_gate_name) {
+ tvco_gate_clk = clk_register_gate(NULL, vco_gate_name,
+ parent_name, 0, mode_reg, PLL_ENABLE, 0, lock);
+ if (IS_ERR_OR_NULL(tvco_gate_clk))
+ goto free_pll;
+
+ if (vco_gate_clk)
+ *vco_gate_clk = tvco_gate_clk;
+ vco_parent_name = &vco_gate_name;
+ } else {
+ vco_parent_name = &parent_name;
+ }
+
+ vco_init.name = vco_name;
+ vco_init.ops = &clk_vco_ops;
+ vco_init.flags = flags;
+ vco_init.parent_names = vco_parent_name;
+ vco_init.num_parents = 1;
+
+ pll_init.name = pll_name;
+ pll_init.ops = &clk_pll_ops;
+ pll_init.flags = CLK_SET_RATE_PARENT;
+ pll_init.parent_names = &vco_name;
+ pll_init.num_parents = 1;
+
+ vco_clk = clk_register(NULL, &vco->hw);
+ if (IS_ERR_OR_NULL(vco_clk))
+ goto free_pll;
+
+ tpll_clk = clk_register(NULL, &pll->hw);
+ if (IS_ERR_OR_NULL(tpll_clk))
+ goto free_pll;
+
+ if (pll_clk)
+ *pll_clk = tpll_clk;
+
+ return vco_clk;
+
+free_pll:
+ kfree(pll);
+free_vco:
+ kfree(vco);
+
+ pr_err("Failed to register vco pll clock\n");
+
+ return ERR_PTR(-ENOMEM);
+}
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
new file mode 100644
index 00000000000..376d4e5ff32
--- /dev/null
+++ b/drivers/clk/spear/clk.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * SPEAr clk - Common routines
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/types.h>
+#include "clk.h"
+
+long clk_round_rate_index(struct clk_hw *hw, unsigned long drate,
+ unsigned long parent_rate, clk_calc_rate calc_rate, u8 rtbl_cnt,
+ int *index)
+{
+ unsigned long prev_rate, rate = 0;
+
+ for (*index = 0; *index < rtbl_cnt; (*index)++) {
+ prev_rate = rate;
+ rate = calc_rate(hw, parent_rate, *index);
+ if (drate < rate) {
+ /* previous clock was best */
+ if (*index) {
+ rate = prev_rate;
+ (*index)--;
+ }
+ break;
+ }
+ }
+
+ return rate;
+}
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
new file mode 100644
index 00000000000..3321c46a071
--- /dev/null
+++ b/drivers/clk/spear/clk.h
@@ -0,0 +1,134 @@
+/*
+ * Clock framework definitions for SPEAr platform
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __SPEAR_CLK_H
+#define __SPEAR_CLK_H
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+/* Auxiliary Synth clk */
+/* Default masks */
+#define AUX_EQ_SEL_SHIFT 30
+#define AUX_EQ_SEL_MASK 1
+#define AUX_EQ1_SEL 0
+#define AUX_EQ2_SEL 1
+#define AUX_XSCALE_SHIFT 16
+#define AUX_XSCALE_MASK 0xFFF
+#define AUX_YSCALE_SHIFT 0
+#define AUX_YSCALE_MASK 0xFFF
+#define AUX_SYNT_ENB 31
+
+struct aux_clk_masks {
+ u32 eq_sel_mask;
+ u32 eq_sel_shift;
+ u32 eq1_mask;
+ u32 eq2_mask;
+ u32 xscale_sel_mask;
+ u32 xscale_sel_shift;
+ u32 yscale_sel_mask;
+ u32 yscale_sel_shift;
+ u32 enable_bit;
+};
+
+struct aux_rate_tbl {
+ u16 xscale;
+ u16 yscale;
+ u8 eq;
+};
+
+struct clk_aux {
+ struct clk_hw hw;
+ void __iomem *reg;
+ struct aux_clk_masks *masks;
+ struct aux_rate_tbl *rtbl;
+ u8 rtbl_cnt;
+ spinlock_t *lock;
+};
+
+/* Fractional Synth clk */
+struct frac_rate_tbl {
+ u32 div;
+};
+
+struct clk_frac {
+ struct clk_hw hw;
+ void __iomem *reg;
+ struct frac_rate_tbl *rtbl;
+ u8 rtbl_cnt;
+ spinlock_t *lock;
+};
+
+/* GPT clk */
+struct gpt_rate_tbl {
+ u16 mscale;
+ u16 nscale;
+};
+
+struct clk_gpt {
+ struct clk_hw hw;
+ void __iomem *reg;
+ struct gpt_rate_tbl *rtbl;
+ u8 rtbl_cnt;
+ spinlock_t *lock;
+};
+
+/* VCO-PLL clk */
+struct pll_rate_tbl {
+ u8 mode;
+ u16 m;
+ u8 n;
+ u8 p;
+};
+
+struct clk_vco {
+ struct clk_hw hw;
+ void __iomem *mode_reg;
+ void __iomem *cfg_reg;
+ struct pll_rate_tbl *rtbl;
+ u8 rtbl_cnt;
+ spinlock_t *lock;
+};
+
+struct clk_pll {
+ struct clk_hw hw;
+ struct clk_vco *vco;
+ const char *parent[1];
+ spinlock_t *lock;
+};
+
+typedef unsigned long (*clk_calc_rate)(struct clk_hw *hw, unsigned long prate,
+ int index);
+
+/* clk register routines */
+struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
+ const char *parent_name, unsigned long flags, void __iomem *reg,
+ struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
+ u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk);
+struct clk *clk_register_frac(const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock);
+struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
+ long flags, void __iomem *reg, struct gpt_rate_tbl *rtbl, u8
+ rtbl_cnt, spinlock_t *lock);
+struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
+ const char *vco_gate_name, const char *parent_name,
+ unsigned long flags, void __iomem *mode_reg, void __iomem
+ *cfg_reg, struct pll_rate_tbl *rtbl, u8 rtbl_cnt,
+ spinlock_t *lock, struct clk **pll_clk,
+ struct clk **vco_gate_clk);
+
+long clk_round_rate_index(struct clk_hw *hw, unsigned long drate,
+ unsigned long parent_rate, clk_calc_rate calc_rate, u8 rtbl_cnt,
+ int *index);
+
+#endif /* __SPEAR_CLK_H */
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
new file mode 100644
index 00000000000..42b68df9aee
--- /dev/null
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -0,0 +1,1106 @@
+/*
+ * arch/arm/mach-spear13xx/spear1310_clock.c
+ *
+ * SPEAr1310 machine clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/spear.h>
+#include "clk.h"
+
+/* PLL related registers and bit values */
+#define SPEAR1310_PLL_CFG (VA_MISC_BASE + 0x210)
+ /* PLL_CFG bit values */
+ #define SPEAR1310_CLCD_SYNT_CLK_MASK 1
+ #define SPEAR1310_CLCD_SYNT_CLK_SHIFT 31
+ #define SPEAR1310_RAS_SYNT2_3_CLK_MASK 2
+ #define SPEAR1310_RAS_SYNT2_3_CLK_SHIFT 29
+ #define SPEAR1310_RAS_SYNT_CLK_MASK 2
+ #define SPEAR1310_RAS_SYNT0_1_CLK_SHIFT 27
+ #define SPEAR1310_PLL_CLK_MASK 2
+ #define SPEAR1310_PLL3_CLK_SHIFT 24
+ #define SPEAR1310_PLL2_CLK_SHIFT 22
+ #define SPEAR1310_PLL1_CLK_SHIFT 20
+
+#define SPEAR1310_PLL1_CTR (VA_MISC_BASE + 0x214)
+#define SPEAR1310_PLL1_FRQ (VA_MISC_BASE + 0x218)
+#define SPEAR1310_PLL2_CTR (VA_MISC_BASE + 0x220)
+#define SPEAR1310_PLL2_FRQ (VA_MISC_BASE + 0x224)
+#define SPEAR1310_PLL3_CTR (VA_MISC_BASE + 0x22C)
+#define SPEAR1310_PLL3_FRQ (VA_MISC_BASE + 0x230)
+#define SPEAR1310_PLL4_CTR (VA_MISC_BASE + 0x238)
+#define SPEAR1310_PLL4_FRQ (VA_MISC_BASE + 0x23C)
+#define SPEAR1310_PERIP_CLK_CFG (VA_MISC_BASE + 0x244)
+ /* PERIP_CLK_CFG bit values */
+ #define SPEAR1310_GPT_OSC24_VAL 0
+ #define SPEAR1310_GPT_APB_VAL 1
+ #define SPEAR1310_GPT_CLK_MASK 1
+ #define SPEAR1310_GPT3_CLK_SHIFT 11
+ #define SPEAR1310_GPT2_CLK_SHIFT 10
+ #define SPEAR1310_GPT1_CLK_SHIFT 9
+ #define SPEAR1310_GPT0_CLK_SHIFT 8
+ #define SPEAR1310_UART_CLK_PLL5_VAL 0
+ #define SPEAR1310_UART_CLK_OSC24_VAL 1
+ #define SPEAR1310_UART_CLK_SYNT_VAL 2
+ #define SPEAR1310_UART_CLK_MASK 2
+ #define SPEAR1310_UART_CLK_SHIFT 4
+
+ #define SPEAR1310_AUX_CLK_PLL5_VAL 0
+ #define SPEAR1310_AUX_CLK_SYNT_VAL 1
+ #define SPEAR1310_CLCD_CLK_MASK 2
+ #define SPEAR1310_CLCD_CLK_SHIFT 2
+ #define SPEAR1310_C3_CLK_MASK 1
+ #define SPEAR1310_C3_CLK_SHIFT 1
+
+#define SPEAR1310_GMAC_CLK_CFG (VA_MISC_BASE + 0x248)
+ #define SPEAR1310_GMAC_PHY_IF_SEL_MASK 3
+ #define SPEAR1310_GMAC_PHY_IF_SEL_SHIFT 4
+ #define SPEAR1310_GMAC_PHY_CLK_MASK 1
+ #define SPEAR1310_GMAC_PHY_CLK_SHIFT 3
+ #define SPEAR1310_GMAC_PHY_INPUT_CLK_MASK 2
+ #define SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT 1
+
+#define SPEAR1310_I2S_CLK_CFG (VA_MISC_BASE + 0x24C)
+ /* I2S_CLK_CFG register mask */
+ #define SPEAR1310_I2S_SCLK_X_MASK 0x1F
+ #define SPEAR1310_I2S_SCLK_X_SHIFT 27
+ #define SPEAR1310_I2S_SCLK_Y_MASK 0x1F
+ #define SPEAR1310_I2S_SCLK_Y_SHIFT 22
+ #define SPEAR1310_I2S_SCLK_EQ_SEL_SHIFT 21
+ #define SPEAR1310_I2S_SCLK_SYNTH_ENB 20
+ #define SPEAR1310_I2S_PRS1_CLK_X_MASK 0xFF
+ #define SPEAR1310_I2S_PRS1_CLK_X_SHIFT 12
+ #define SPEAR1310_I2S_PRS1_CLK_Y_MASK 0xFF
+ #define SPEAR1310_I2S_PRS1_CLK_Y_SHIFT 4
+ #define SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT 3
+ #define SPEAR1310_I2S_REF_SEL_MASK 1
+ #define SPEAR1310_I2S_REF_SHIFT 2
+ #define SPEAR1310_I2S_SRC_CLK_MASK 2
+ #define SPEAR1310_I2S_SRC_CLK_SHIFT 0
+
+#define SPEAR1310_C3_CLK_SYNT (VA_MISC_BASE + 0x250)
+#define SPEAR1310_UART_CLK_SYNT (VA_MISC_BASE + 0x254)
+#define SPEAR1310_GMAC_CLK_SYNT (VA_MISC_BASE + 0x258)
+#define SPEAR1310_SDHCI_CLK_SYNT (VA_MISC_BASE + 0x25C)
+#define SPEAR1310_CFXD_CLK_SYNT (VA_MISC_BASE + 0x260)
+#define SPEAR1310_ADC_CLK_SYNT (VA_MISC_BASE + 0x264)
+#define SPEAR1310_AMBA_CLK_SYNT (VA_MISC_BASE + 0x268)
+#define SPEAR1310_CLCD_CLK_SYNT (VA_MISC_BASE + 0x270)
+#define SPEAR1310_RAS_CLK_SYNT0 (VA_MISC_BASE + 0x280)
+#define SPEAR1310_RAS_CLK_SYNT1 (VA_MISC_BASE + 0x288)
+#define SPEAR1310_RAS_CLK_SYNT2 (VA_MISC_BASE + 0x290)
+#define SPEAR1310_RAS_CLK_SYNT3 (VA_MISC_BASE + 0x298)
+ /* Check Fractional synthesizer reg masks */
+
+#define SPEAR1310_PERIP1_CLK_ENB (VA_MISC_BASE + 0x300)
+ /* PERIP1_CLK_ENB register masks */
+ #define SPEAR1310_RTC_CLK_ENB 31
+ #define SPEAR1310_ADC_CLK_ENB 30
+ #define SPEAR1310_C3_CLK_ENB 29
+ #define SPEAR1310_JPEG_CLK_ENB 28
+ #define SPEAR1310_CLCD_CLK_ENB 27
+ #define SPEAR1310_DMA_CLK_ENB 25
+ #define SPEAR1310_GPIO1_CLK_ENB 24
+ #define SPEAR1310_GPIO0_CLK_ENB 23
+ #define SPEAR1310_GPT1_CLK_ENB 22
+ #define SPEAR1310_GPT0_CLK_ENB 21
+ #define SPEAR1310_I2S0_CLK_ENB 20
+ #define SPEAR1310_I2S1_CLK_ENB 19
+ #define SPEAR1310_I2C0_CLK_ENB 18
+ #define SPEAR1310_SSP_CLK_ENB 17
+ #define SPEAR1310_UART_CLK_ENB 15
+ #define SPEAR1310_PCIE_SATA_2_CLK_ENB 14
+ #define SPEAR1310_PCIE_SATA_1_CLK_ENB 13
+ #define SPEAR1310_PCIE_SATA_0_CLK_ENB 12
+ #define SPEAR1310_UOC_CLK_ENB 11
+ #define SPEAR1310_UHC1_CLK_ENB 10
+ #define SPEAR1310_UHC0_CLK_ENB 9
+ #define SPEAR1310_GMAC_CLK_ENB 8
+ #define SPEAR1310_CFXD_CLK_ENB 7
+ #define SPEAR1310_SDHCI_CLK_ENB 6
+ #define SPEAR1310_SMI_CLK_ENB 5
+ #define SPEAR1310_FSMC_CLK_ENB 4
+ #define SPEAR1310_SYSRAM0_CLK_ENB 3
+ #define SPEAR1310_SYSRAM1_CLK_ENB 2
+ #define SPEAR1310_SYSROM_CLK_ENB 1
+ #define SPEAR1310_BUS_CLK_ENB 0
+
+#define SPEAR1310_PERIP2_CLK_ENB (VA_MISC_BASE + 0x304)
+ /* PERIP2_CLK_ENB register masks */
+ #define SPEAR1310_THSENS_CLK_ENB 8
+ #define SPEAR1310_I2S_REF_PAD_CLK_ENB 7
+ #define SPEAR1310_ACP_CLK_ENB 6
+ #define SPEAR1310_GPT3_CLK_ENB 5
+ #define SPEAR1310_GPT2_CLK_ENB 4
+ #define SPEAR1310_KBD_CLK_ENB 3
+ #define SPEAR1310_CPU_DBG_CLK_ENB 2
+ #define SPEAR1310_DDR_CORE_CLK_ENB 1
+ #define SPEAR1310_DDR_CTRL_CLK_ENB 0
+
+#define SPEAR1310_RAS_CLK_ENB (VA_MISC_BASE + 0x310)
+ /* RAS_CLK_ENB register masks */
+ #define SPEAR1310_SYNT3_CLK_ENB 17
+ #define SPEAR1310_SYNT2_CLK_ENB 16
+ #define SPEAR1310_SYNT1_CLK_ENB 15
+ #define SPEAR1310_SYNT0_CLK_ENB 14
+ #define SPEAR1310_PCLK3_CLK_ENB 13
+ #define SPEAR1310_PCLK2_CLK_ENB 12
+ #define SPEAR1310_PCLK1_CLK_ENB 11
+ #define SPEAR1310_PCLK0_CLK_ENB 10
+ #define SPEAR1310_PLL3_CLK_ENB 9
+ #define SPEAR1310_PLL2_CLK_ENB 8
+ #define SPEAR1310_C125M_PAD_CLK_ENB 7
+ #define SPEAR1310_C30M_CLK_ENB 6
+ #define SPEAR1310_C48M_CLK_ENB 5
+ #define SPEAR1310_OSC_25M_CLK_ENB 4
+ #define SPEAR1310_OSC_32K_CLK_ENB 3
+ #define SPEAR1310_OSC_24M_CLK_ENB 2
+ #define SPEAR1310_PCLK_CLK_ENB 1
+ #define SPEAR1310_ACLK_CLK_ENB 0
+
+/* RAS Area Control Register */
+#define SPEAR1310_RAS_CTRL_REG0 (VA_SPEAR1310_RAS_BASE + 0x000)
+ #define SPEAR1310_SSP1_CLK_MASK 3
+ #define SPEAR1310_SSP1_CLK_SHIFT 26
+ #define SPEAR1310_TDM_CLK_MASK 1
+ #define SPEAR1310_TDM2_CLK_SHIFT 24
+ #define SPEAR1310_TDM1_CLK_SHIFT 23
+ #define SPEAR1310_I2C_CLK_MASK 1
+ #define SPEAR1310_I2C7_CLK_SHIFT 22
+ #define SPEAR1310_I2C6_CLK_SHIFT 21
+ #define SPEAR1310_I2C5_CLK_SHIFT 20
+ #define SPEAR1310_I2C4_CLK_SHIFT 19
+ #define SPEAR1310_I2C3_CLK_SHIFT 18
+ #define SPEAR1310_I2C2_CLK_SHIFT 17
+ #define SPEAR1310_I2C1_CLK_SHIFT 16
+ #define SPEAR1310_GPT64_CLK_MASK 1
+ #define SPEAR1310_GPT64_CLK_SHIFT 15
+ #define SPEAR1310_RAS_UART_CLK_MASK 1
+ #define SPEAR1310_UART5_CLK_SHIFT 14
+ #define SPEAR1310_UART4_CLK_SHIFT 13
+ #define SPEAR1310_UART3_CLK_SHIFT 12
+ #define SPEAR1310_UART2_CLK_SHIFT 11
+ #define SPEAR1310_UART1_CLK_SHIFT 10
+ #define SPEAR1310_PCI_CLK_MASK 1
+ #define SPEAR1310_PCI_CLK_SHIFT 0
+
+#define SPEAR1310_RAS_CTRL_REG1 (VA_SPEAR1310_RAS_BASE + 0x004)
+ #define SPEAR1310_PHY_CLK_MASK 0x3
+ #define SPEAR1310_RMII_PHY_CLK_SHIFT 0
+ #define SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT 2
+
+#define SPEAR1310_RAS_SW_CLK_CTRL (VA_SPEAR1310_RAS_BASE + 0x0148)
+ #define SPEAR1310_CAN1_CLK_ENB 25
+ #define SPEAR1310_CAN0_CLK_ENB 24
+ #define SPEAR1310_GPT64_CLK_ENB 23
+ #define SPEAR1310_SSP1_CLK_ENB 22
+ #define SPEAR1310_I2C7_CLK_ENB 21
+ #define SPEAR1310_I2C6_CLK_ENB 20
+ #define SPEAR1310_I2C5_CLK_ENB 19
+ #define SPEAR1310_I2C4_CLK_ENB 18
+ #define SPEAR1310_I2C3_CLK_ENB 17
+ #define SPEAR1310_I2C2_CLK_ENB 16
+ #define SPEAR1310_I2C1_CLK_ENB 15
+ #define SPEAR1310_UART5_CLK_ENB 14
+ #define SPEAR1310_UART4_CLK_ENB 13
+ #define SPEAR1310_UART3_CLK_ENB 12
+ #define SPEAR1310_UART2_CLK_ENB 11
+ #define SPEAR1310_UART1_CLK_ENB 10
+ #define SPEAR1310_RS485_1_CLK_ENB 9
+ #define SPEAR1310_RS485_0_CLK_ENB 8
+ #define SPEAR1310_TDM2_CLK_ENB 7
+ #define SPEAR1310_TDM1_CLK_ENB 6
+ #define SPEAR1310_PCI_CLK_ENB 5
+ #define SPEAR1310_GMII_CLK_ENB 4
+ #define SPEAR1310_MII2_CLK_ENB 3
+ #define SPEAR1310_MII1_CLK_ENB 2
+ #define SPEAR1310_MII0_CLK_ENB 1
+ #define SPEAR1310_ESRAM_CLK_ENB 0
+
+static DEFINE_SPINLOCK(_lock);
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+ /* PCLK 24MHz */
+ {.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */
+ {.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */
+ {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/* vco-pll4 rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll4_rtbl[] = {
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */
+ {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+ /* For VCO1div2 = 500 MHz */
+ {.xscale = 10, .yscale = 204, .eq = 0}, /* 12.29 MHz */
+ {.xscale = 4, .yscale = 21, .eq = 0}, /* 48 MHz */
+ {.xscale = 2, .yscale = 6, .eq = 0}, /* 83 MHz */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* 125 MHz */
+ {.xscale = 1, .yscale = 3, .eq = 1}, /* 166 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* 250 MHz */
+};
+
+/* gmac rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl gmac_rtbl[] = {
+ /* For gmac phy input clk */
+ {.xscale = 2, .yscale = 6, .eq = 0}, /* divided by 6 */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* divided by 4 */
+ {.xscale = 1, .yscale = 3, .eq = 1}, /* divided by 3 */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* divided by 2 */
+};
+
+/* clcd rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl clcd_rtbl[] = {
+ {.div = 0x14000}, /* 25 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x1284B}, /* 27 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0D8D3}, /* 58 Mhz , for vco1div4 = 393 MHz */
+ {.div = 0x0B72C}, /* 58 Mhz , for vco1div4 = 332 MHz */
+ {.div = 0x089EE}, /* 58 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x06f1C}, /* 72 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x06E58}, /* 58 Mhz , for vco1div4 = 200 MHz */
+ {.div = 0x06c1B}, /* 74 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x04A12}, /* 108 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0378E}, /* 144 Mhz , for vc01div4 = 250 MHz*/
+};
+
+/* i2s prescaler1 masks */
+static struct aux_clk_masks i2s_prs1_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = SPEAR1310_I2S_PRS1_CLK_X_MASK,
+ .xscale_sel_shift = SPEAR1310_I2S_PRS1_CLK_X_SHIFT,
+ .yscale_sel_mask = SPEAR1310_I2S_PRS1_CLK_Y_MASK,
+ .yscale_sel_shift = SPEAR1310_I2S_PRS1_CLK_Y_SHIFT,
+};
+
+/* i2s sclk (bit clock) syynthesizers masks */
+static struct aux_clk_masks i2s_sclk_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = SPEAR1310_I2S_SCLK_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = SPEAR1310_I2S_SCLK_X_MASK,
+ .xscale_sel_shift = SPEAR1310_I2S_SCLK_X_SHIFT,
+ .yscale_sel_mask = SPEAR1310_I2S_SCLK_Y_MASK,
+ .yscale_sel_shift = SPEAR1310_I2S_SCLK_Y_SHIFT,
+ .enable_bit = SPEAR1310_I2S_SCLK_SYNTH_ENB,
+};
+
+/* i2s prs1 aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_prs1_rtbl[] = {
+ /* For parent clk = 49.152 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 0}, /* 12.288 MHz */
+};
+
+/* i2s sclk aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_sclk_rtbl[] = {
+ /* For i2s_ref_clk = 12.288MHz */
+ {.xscale = 1, .yscale = 4, .eq = 0}, /* 1.53 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 0}, /* 3.07 Mhz */
+};
+
+/* adc rate configuration table, in ascending order of rates */
+/* possible adc range is 2.5 MHz to 20 MHz. */
+static struct aux_rate_tbl adc_rtbl[] = {
+ /* For ahb = 166.67 MHz */
+ {.xscale = 1, .yscale = 31, .eq = 0}, /* 2.68 MHz */
+ {.xscale = 2, .yscale = 21, .eq = 0}, /* 7.94 MHz */
+ {.xscale = 4, .yscale = 21, .eq = 0}, /* 15.87 MHz */
+ {.xscale = 10, .yscale = 42, .eq = 0}, /* 19.84 MHz */
+};
+
+/* General synth rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl gen_rtbl[] = {
+ /* For vco1div4 = 250 MHz */
+ {.div = 0x14000}, /* 25 MHz */
+ {.div = 0x0A000}, /* 50 MHz */
+ {.div = 0x05000}, /* 100 MHz */
+ {.div = 0x02000}, /* 250 MHz */
+};
+
+/* clock parents */
+static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
+static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
+static const char *uart0_parents[] = { "pll5_clk", "uart_synth_gate_clk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
+static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+ "osc_25m_clk", };
+static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
+ "gmac_phy_synth_gate_clk", };
+static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *i2s_src_parents[] = { "vco1div2_clk", "none", "pll3_clk",
+ "i2s_src_pad_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
+static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
+ "pll3_clk", };
+static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
+ "pll2_clk", };
+static const char *rmii_phy_parents[] = { "ras_tx50_clk", "none",
+ "ras_pll2_clk", "ras_synth0_clk", };
+static const char *smii_rgmii_phy_parents[] = { "none", "ras_tx125_clk",
+ "ras_pll2_clk", "ras_synth0_clk", };
+static const char *uart_parents[] = { "ras_apb_clk", "gen_synth3_clk", };
+static const char *i2c_parents[] = { "ras_apb_clk", "gen_synth1_clk", };
+static const char *ssp1_parents[] = { "ras_apb_clk", "gen_synth1_clk",
+ "ras_plclk0_clk", };
+static const char *pci_parents[] = { "ras_pll3_clk", "gen_synth2_clk", };
+static const char *tdm_parents[] = { "ras_pll3_clk", "gen_synth1_clk", };
+
+void __init spear1310_clk_init(void)
+{
+ struct clk *clk, *clk1;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+ 32000);
+ clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+ 24000000);
+ clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
+ 25000000);
+ clk_register_clkdev(clk, "osc_25m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
+ CLK_IS_ROOT, 125000000);
+ clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
+ CLK_IS_ROOT, 12288000);
+ clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
+
+ /* clock derived from 32 KHz osc clk */
+ clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_RTC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+ /* clock derived from 24 or 25 MHz osc clk */
+ /* vco-pll */
+ clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco1_mux_clk", NULL);
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
+ 0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco1_clk", NULL);
+ clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco2_mux_clk", NULL);
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
+ 0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco2_clk", NULL);
+ clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+ clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco3_mux_clk", NULL);
+ clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
+ 0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco3_clk", NULL);
+ clk_register_clkdev(clk1, "pll3_clk", NULL);
+
+ clk = clk_register_vco_pll("vco4_clk", "pll4_clk", NULL, "osc_24m_clk",
+ 0, SPEAR1310_PLL4_CTR, SPEAR1310_PLL4_FRQ, pll4_rtbl,
+ ARRAY_SIZE(pll4_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco4_clk", NULL);
+ clk_register_clkdev(clk1, "pll4_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "pll5_clk", "osc_24m_clk", 0,
+ 48000000);
+ clk_register_clkdev(clk, "pll5_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "pll6_clk", "osc_25m_clk", 0,
+ 25000000);
+ clk_register_clkdev(clk, "pll6_clk", NULL);
+
+ /* vco div n clocks */
+ clk = clk_register_fixed_factor(NULL, "vco1div2_clk", "vco1_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco1div2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco1div4_clk", "vco1_clk", 0, 1,
+ 4);
+ clk_register_clkdev(clk, "vco1div4_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco2div2_clk", "vco2_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco2div2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco3div2_clk", "vco3_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco3div2_clk", NULL);
+
+ /* peripherals */
+ clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
+ 128);
+ clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_THSENS_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_thermal");
+
+ /* clock derived from pll4 clk */
+ clk = clk_register_fixed_factor(NULL, "ddr_clk", "pll4_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "ddr_clk", NULL);
+
+ /* clock derived from pll1 clk */
+ clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 2);
+ clk_register_clkdev(clk, "cpu_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "wdt_clk", "cpu_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, NULL, "ec800620.wdt");
+
+ clk = clk_register_fixed_factor(NULL, "ahb_clk", "pll1_clk", 0, 1,
+ 6);
+ clk_register_clkdev(clk, "ahb_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "apb_clk", "pll1_clk", 0, 1,
+ 12);
+ clk_register_clkdev(clk, "apb_clk", NULL);
+
+ /* gpt clocks */
+ clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt0");
+
+ clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt1");
+
+ clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt2");
+
+ clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt3");
+
+ /* others */
+ clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1310_UART_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_synth_clk", NULL);
+ clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+ ARRAY_SIZE(uart0_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_UART_CLK_SHIFT, SPEAR1310_UART_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UART_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0000000.serial");
+
+ clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1310_SDHCI_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
+ clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SDHCI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b3000000.sdhci");
+
+ clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1310_CFXD_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
+ clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CFXD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2800000.cf");
+ clk_register_clkdev(clk, NULL, "arasan_xd");
+
+ clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1310_C3_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "c3_synth_clk", NULL);
+ clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+ ARRAY_SIZE(c3_parents), 0, SPEAR1310_PERIP_CLK_CFG,
+ SPEAR1310_C3_CLK_SHIFT, SPEAR1310_C3_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "c3_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_C3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "c3");
+
+ /* gmac */
+ clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
+ gmac_phy_input_parents,
+ ARRAY_SIZE(gmac_phy_input_parents), 0,
+ SPEAR1310_GMAC_CLK_CFG,
+ SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
+ SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+
+ clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
+ "gmac_phy_input_mux_clk", 0, SPEAR1310_GMAC_CLK_SYNT,
+ NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
+ clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+ ARRAY_SIZE(gmac_phy_parents), 0,
+ SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
+ SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "stmmacphy.0");
+
+ /* clcd */
+ clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+ ARRAY_SIZE(clcd_synth_parents), 0,
+ SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
+ SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+
+ clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+ SPEAR1310_CLCD_CLK_SYNT, clcd_rtbl,
+ ARRAY_SIZE(clcd_rtbl), &_lock);
+ clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+
+ clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+ ARRAY_SIZE(clcd_pixel_parents), 0,
+ SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
+ SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
+
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CLCD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "clcd_clk", NULL);
+
+ /* i2s */
+ clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+ ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
+ SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "i2s_src_clk", NULL);
+
+ clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+ SPEAR1310_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
+ ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
+ clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+ ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1310_I2S_CLK_CFG,
+ SPEAR1310_I2S_REF_SHIFT, SPEAR1310_I2S_REF_SEL_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_I2S_REF_PAD_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
+
+ clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
+ "i2s_ref_pad_clk", 0, SPEAR1310_I2S_CLK_CFG,
+ &i2s_sclk_masks, i2s_sclk_rtbl,
+ ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
+ clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+
+ /* clock derived from ahb clk */
+ clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2C0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0280000.i2c");
+
+ clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_DMA_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "ea800000.dma");
+ clk_register_clkdev(clk, NULL, "eb000000.dma");
+
+ clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_JPEG_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2000000.jpeg");
+
+ clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GMAC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e2000000.eth");
+
+ clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_FSMC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b0000000.flash");
+
+ clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SMI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "ea000000.flash");
+
+ clk = clk_register_gate(NULL, "usbh0_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UHC0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+ clk = clk_register_gate(NULL, "usbh1_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UHC1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uoc_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UOC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "uoc");
+
+ clk = clk_register_gate(NULL, "pcie_sata_0_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_0_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "dw_pcie.0");
+ clk_register_clkdev(clk, NULL, "ahci.0");
+
+ clk = clk_register_gate(NULL, "pcie_sata_1_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_1_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "dw_pcie.1");
+ clk_register_clkdev(clk, NULL, "ahci.1");
+
+ clk = clk_register_gate(NULL, "pcie_sata_2_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_2_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "dw_pcie.2");
+ clk_register_clkdev(clk, NULL, "ahci.2");
+
+ clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SYSRAM0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "sysram0_clk", NULL);
+
+ clk = clk_register_gate(NULL, "sysram1_clk", "ahb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SYSRAM1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "sysram1_clk", NULL);
+
+ clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+ 0, SPEAR1310_ADC_CLK_SYNT, NULL, adc_rtbl,
+ ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "adc_synth_clk", NULL);
+ clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_ADC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "adc_clk");
+
+ /* clock derived from apb clk */
+ clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SSP_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0100000.spi");
+
+ clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPIO0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0600000.gpio");
+
+ clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPIO1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0680000.gpio");
+
+ clk = clk_register_gate(NULL, "i2s0_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2S0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0180000.i2s");
+
+ clk = clk_register_gate(NULL, "i2s1_clk", "apb_clk", 0,
+ SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2S1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0200000.i2s");
+
+ clk = clk_register_gate(NULL, "kbd_clk", "apb_clk", 0,
+ SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_KBD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0300000.kbd");
+
+ /* RAS clks */
+ clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
+ gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
+ 0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
+ SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
+ gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
+ 0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
+ SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+
+ clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+ SPEAR1310_RAS_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+
+ clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+ SPEAR1310_RAS_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+
+ clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+ SPEAR1310_RAS_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+
+ clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+ SPEAR1310_RAS_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_osc_24m_clk", "osc_24m_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_24M_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_osc_24m_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_osc_25m_clk", "osc_25m_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_25M_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_osc_25m_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_osc_32k_clk", "osc_32k_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_32K_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_osc_32k_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll2_clk", "pll2_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_PLL2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_pll2_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_PLL3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_pll3_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_125m_pad_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_C125M_PAD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_tx125_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "ras_30m_fixed_clk", "pll5_clk", 0,
+ 30000000);
+ clk = clk_register_gate(NULL, "ras_30m_clk", "ras_30m_fixed_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_C30M_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_30m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "ras_48m_fixed_clk", "pll5_clk", 0,
+ 48000000);
+ clk = clk_register_gate(NULL, "ras_48m_clk", "ras_48m_fixed_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_C48M_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_48m_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_ahb_clk", "ahb_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_ACLK_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_ahb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0,
+ SPEAR1310_RAS_CLK_ENB, SPEAR1310_PCLK_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ras_apb_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "ras_plclk0_clk", NULL, CLK_IS_ROOT,
+ 50000000);
+
+ clk = clk_register_fixed_rate(NULL, "ras_tx50_clk", NULL, CLK_IS_ROOT,
+ 50000000);
+
+ clk = clk_register_gate(NULL, "can0_clk", "apb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_CAN0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "c_can_platform.0");
+
+ clk = clk_register_gate(NULL, "can1_clk", "apb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_CAN1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "c_can_platform.1");
+
+ clk = clk_register_gate(NULL, "ras_smii0_clk", "ras_ahb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c400000.eth");
+
+ clk = clk_register_gate(NULL, "ras_smii1_clk", "ras_ahb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c500000.eth");
+
+ clk = clk_register_gate(NULL, "ras_smii2_clk", "ras_ahb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c600000.eth");
+
+ clk = clk_register_gate(NULL, "ras_rgmii_clk", "ras_ahb_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_GMII_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c700000.eth");
+
+ clk = clk_register_mux(NULL, "smii_rgmii_phy_mux_clk",
+ smii_rgmii_phy_parents,
+ ARRAY_SIZE(smii_rgmii_phy_parents), 0,
+ SPEAR1310_RAS_CTRL_REG1,
+ SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT,
+ SPEAR1310_PHY_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "stmmacphy.1");
+ clk_register_clkdev(clk, NULL, "stmmacphy.2");
+ clk_register_clkdev(clk, NULL, "stmmacphy.4");
+
+ clk = clk_register_mux(NULL, "rmii_phy_mux_clk", rmii_phy_parents,
+ ARRAY_SIZE(rmii_phy_parents), 0,
+ SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
+ SPEAR1310_PHY_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "stmmacphy.3");
+
+ clk = clk_register_mux(NULL, "uart1_mux_clk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c800000.serial");
+
+ clk = clk_register_mux(NULL, "uart2_mux_clk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart2_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart2_clk", "uart2_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5c900000.serial");
+
+ clk = clk_register_mux(NULL, "uart3_mux_clk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart3_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart3_clk", "uart3_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5ca00000.serial");
+
+ clk = clk_register_mux(NULL, "uart4_mux_clk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart4_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart4_clk", "uart4_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART4_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5cb00000.serial");
+
+ clk = clk_register_mux(NULL, "uart5_mux_clk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "uart5_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart5_clk", "uart5_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART5_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5cc00000.serial");
+
+ clk = clk_register_mux(NULL, "i2c1_mux_clk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c1_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5cd00000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c2_mux_clk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c2_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5ce00000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c3_mux_clk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c3_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5cf00000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c4_mux_clk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c4_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C4_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d000000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c5_mux_clk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c5_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C5_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d100000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c6_mux_clk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c6_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C6_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d200000.i2c");
+
+ clk = clk_register_mux(NULL, "i2c7_mux_clk", i2c_parents,
+ ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2c7_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C7_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d300000.i2c");
+
+ clk = clk_register_mux(NULL, "ssp1_mux_clk", ssp1_parents,
+ ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ssp1_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_SSP1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "5d400000.spi");
+
+ clk = clk_register_mux(NULL, "pci_mux_clk", pci_parents,
+ ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "pci_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "pci_clk", "pci_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_PCI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "pci");
+
+ clk = clk_register_mux(NULL, "tdm1_mux_clk", tdm_parents,
+ ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "tdm1_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
+
+ clk = clk_register_mux(NULL, "tdm2_mux_clk", tdm_parents,
+ ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
+ SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "tdm2_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mux_clk", 0,
+ SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "tdm_hdlc.1");
+}
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
new file mode 100644
index 00000000000..f130919d5bf
--- /dev/null
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -0,0 +1,964 @@
+/*
+ * arch/arm/mach-spear13xx/spear1340_clock.c
+ *
+ * SPEAr1340 machine clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/spear.h>
+#include "clk.h"
+
+/* Clock Configuration Registers */
+#define SPEAR1340_SYS_CLK_CTRL (VA_MISC_BASE + 0x200)
+ #define SPEAR1340_HCLK_SRC_SEL_SHIFT 27
+ #define SPEAR1340_HCLK_SRC_SEL_MASK 1
+ #define SPEAR1340_SCLK_SRC_SEL_SHIFT 23
+ #define SPEAR1340_SCLK_SRC_SEL_MASK 3
+
+/* PLL related registers and bit values */
+#define SPEAR1340_PLL_CFG (VA_MISC_BASE + 0x210)
+ /* PLL_CFG bit values */
+ #define SPEAR1340_CLCD_SYNT_CLK_MASK 1
+ #define SPEAR1340_CLCD_SYNT_CLK_SHIFT 31
+ #define SPEAR1340_GEN_SYNT2_3_CLK_SHIFT 29
+ #define SPEAR1340_GEN_SYNT_CLK_MASK 2
+ #define SPEAR1340_GEN_SYNT0_1_CLK_SHIFT 27
+ #define SPEAR1340_PLL_CLK_MASK 2
+ #define SPEAR1340_PLL3_CLK_SHIFT 24
+ #define SPEAR1340_PLL2_CLK_SHIFT 22
+ #define SPEAR1340_PLL1_CLK_SHIFT 20
+
+#define SPEAR1340_PLL1_CTR (VA_MISC_BASE + 0x214)
+#define SPEAR1340_PLL1_FRQ (VA_MISC_BASE + 0x218)
+#define SPEAR1340_PLL2_CTR (VA_MISC_BASE + 0x220)
+#define SPEAR1340_PLL2_FRQ (VA_MISC_BASE + 0x224)
+#define SPEAR1340_PLL3_CTR (VA_MISC_BASE + 0x22C)
+#define SPEAR1340_PLL3_FRQ (VA_MISC_BASE + 0x230)
+#define SPEAR1340_PLL4_CTR (VA_MISC_BASE + 0x238)
+#define SPEAR1340_PLL4_FRQ (VA_MISC_BASE + 0x23C)
+#define SPEAR1340_PERIP_CLK_CFG (VA_MISC_BASE + 0x244)
+ /* PERIP_CLK_CFG bit values */
+ #define SPEAR1340_SPDIF_CLK_MASK 1
+ #define SPEAR1340_SPDIF_OUT_CLK_SHIFT 15
+ #define SPEAR1340_SPDIF_IN_CLK_SHIFT 14
+ #define SPEAR1340_GPT3_CLK_SHIFT 13
+ #define SPEAR1340_GPT2_CLK_SHIFT 12
+ #define SPEAR1340_GPT_CLK_MASK 1
+ #define SPEAR1340_GPT1_CLK_SHIFT 9
+ #define SPEAR1340_GPT0_CLK_SHIFT 8
+ #define SPEAR1340_UART_CLK_MASK 2
+ #define SPEAR1340_UART1_CLK_SHIFT 6
+ #define SPEAR1340_UART0_CLK_SHIFT 4
+ #define SPEAR1340_CLCD_CLK_MASK 2
+ #define SPEAR1340_CLCD_CLK_SHIFT 2
+ #define SPEAR1340_C3_CLK_MASK 1
+ #define SPEAR1340_C3_CLK_SHIFT 1
+
+#define SPEAR1340_GMAC_CLK_CFG (VA_MISC_BASE + 0x248)
+ #define SPEAR1340_GMAC_PHY_CLK_MASK 1
+ #define SPEAR1340_GMAC_PHY_CLK_SHIFT 2
+ #define SPEAR1340_GMAC_PHY_INPUT_CLK_MASK 2
+ #define SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT 0
+
+#define SPEAR1340_I2S_CLK_CFG (VA_MISC_BASE + 0x24C)
+ /* I2S_CLK_CFG register mask */
+ #define SPEAR1340_I2S_SCLK_X_MASK 0x1F
+ #define SPEAR1340_I2S_SCLK_X_SHIFT 27
+ #define SPEAR1340_I2S_SCLK_Y_MASK 0x1F
+ #define SPEAR1340_I2S_SCLK_Y_SHIFT 22
+ #define SPEAR1340_I2S_SCLK_EQ_SEL_SHIFT 21
+ #define SPEAR1340_I2S_SCLK_SYNTH_ENB 20
+ #define SPEAR1340_I2S_PRS1_CLK_X_MASK 0xFF
+ #define SPEAR1340_I2S_PRS1_CLK_X_SHIFT 12
+ #define SPEAR1340_I2S_PRS1_CLK_Y_MASK 0xFF
+ #define SPEAR1340_I2S_PRS1_CLK_Y_SHIFT 4
+ #define SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT 3
+ #define SPEAR1340_I2S_REF_SEL_MASK 1
+ #define SPEAR1340_I2S_REF_SHIFT 2
+ #define SPEAR1340_I2S_SRC_CLK_MASK 2
+ #define SPEAR1340_I2S_SRC_CLK_SHIFT 0
+
+#define SPEAR1340_C3_CLK_SYNT (VA_MISC_BASE + 0x250)
+#define SPEAR1340_UART0_CLK_SYNT (VA_MISC_BASE + 0x254)
+#define SPEAR1340_UART1_CLK_SYNT (VA_MISC_BASE + 0x258)
+#define SPEAR1340_GMAC_CLK_SYNT (VA_MISC_BASE + 0x25C)
+#define SPEAR1340_SDHCI_CLK_SYNT (VA_MISC_BASE + 0x260)
+#define SPEAR1340_CFXD_CLK_SYNT (VA_MISC_BASE + 0x264)
+#define SPEAR1340_ADC_CLK_SYNT (VA_MISC_BASE + 0x270)
+#define SPEAR1340_AMBA_CLK_SYNT (VA_MISC_BASE + 0x274)
+#define SPEAR1340_CLCD_CLK_SYNT (VA_MISC_BASE + 0x27C)
+#define SPEAR1340_SYS_CLK_SYNT (VA_MISC_BASE + 0x284)
+#define SPEAR1340_GEN_CLK_SYNT0 (VA_MISC_BASE + 0x28C)
+#define SPEAR1340_GEN_CLK_SYNT1 (VA_MISC_BASE + 0x294)
+#define SPEAR1340_GEN_CLK_SYNT2 (VA_MISC_BASE + 0x29C)
+#define SPEAR1340_GEN_CLK_SYNT3 (VA_MISC_BASE + 0x304)
+#define SPEAR1340_PERIP1_CLK_ENB (VA_MISC_BASE + 0x30C)
+ #define SPEAR1340_RTC_CLK_ENB 31
+ #define SPEAR1340_ADC_CLK_ENB 30
+ #define SPEAR1340_C3_CLK_ENB 29
+ #define SPEAR1340_CLCD_CLK_ENB 27
+ #define SPEAR1340_DMA_CLK_ENB 25
+ #define SPEAR1340_GPIO1_CLK_ENB 24
+ #define SPEAR1340_GPIO0_CLK_ENB 23
+ #define SPEAR1340_GPT1_CLK_ENB 22
+ #define SPEAR1340_GPT0_CLK_ENB 21
+ #define SPEAR1340_I2S_PLAY_CLK_ENB 20
+ #define SPEAR1340_I2S_REC_CLK_ENB 19
+ #define SPEAR1340_I2C0_CLK_ENB 18
+ #define SPEAR1340_SSP_CLK_ENB 17
+ #define SPEAR1340_UART0_CLK_ENB 15
+ #define SPEAR1340_PCIE_SATA_CLK_ENB 12
+ #define SPEAR1340_UOC_CLK_ENB 11
+ #define SPEAR1340_UHC1_CLK_ENB 10
+ #define SPEAR1340_UHC0_CLK_ENB 9
+ #define SPEAR1340_GMAC_CLK_ENB 8
+ #define SPEAR1340_CFXD_CLK_ENB 7
+ #define SPEAR1340_SDHCI_CLK_ENB 6
+ #define SPEAR1340_SMI_CLK_ENB 5
+ #define SPEAR1340_FSMC_CLK_ENB 4
+ #define SPEAR1340_SYSRAM0_CLK_ENB 3
+ #define SPEAR1340_SYSRAM1_CLK_ENB 2
+ #define SPEAR1340_SYSROM_CLK_ENB 1
+ #define SPEAR1340_BUS_CLK_ENB 0
+
+#define SPEAR1340_PERIP2_CLK_ENB (VA_MISC_BASE + 0x310)
+ #define SPEAR1340_THSENS_CLK_ENB 8
+ #define SPEAR1340_I2S_REF_PAD_CLK_ENB 7
+ #define SPEAR1340_ACP_CLK_ENB 6
+ #define SPEAR1340_GPT3_CLK_ENB 5
+ #define SPEAR1340_GPT2_CLK_ENB 4
+ #define SPEAR1340_KBD_CLK_ENB 3
+ #define SPEAR1340_CPU_DBG_CLK_ENB 2
+ #define SPEAR1340_DDR_CORE_CLK_ENB 1
+ #define SPEAR1340_DDR_CTRL_CLK_ENB 0
+
+#define SPEAR1340_PERIP3_CLK_ENB (VA_MISC_BASE + 0x314)
+ #define SPEAR1340_PLGPIO_CLK_ENB 18
+ #define SPEAR1340_VIDEO_DEC_CLK_ENB 16
+ #define SPEAR1340_VIDEO_ENC_CLK_ENB 15
+ #define SPEAR1340_SPDIF_OUT_CLK_ENB 13
+ #define SPEAR1340_SPDIF_IN_CLK_ENB 12
+ #define SPEAR1340_VIDEO_IN_CLK_ENB 11
+ #define SPEAR1340_CAM0_CLK_ENB 10
+ #define SPEAR1340_CAM1_CLK_ENB 9
+ #define SPEAR1340_CAM2_CLK_ENB 8
+ #define SPEAR1340_CAM3_CLK_ENB 7
+ #define SPEAR1340_MALI_CLK_ENB 6
+ #define SPEAR1340_CEC0_CLK_ENB 5
+ #define SPEAR1340_CEC1_CLK_ENB 4
+ #define SPEAR1340_PWM_CLK_ENB 3
+ #define SPEAR1340_I2C1_CLK_ENB 2
+ #define SPEAR1340_UART1_CLK_ENB 1
+
+static DEFINE_SPINLOCK(_lock);
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+ /* PCLK 24MHz */
+ {.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */
+ {.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */
+ {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+ {.mode = 0, .m = 0x96, .n = 0x06, .p = 0x0}, /* vco 1200, pll 1200 MHz */
+};
+
+/* vco-pll4 rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll4_rtbl[] = {
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */
+ {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */
+ {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
+};
+
+/*
+ * All below entries generate 166 MHz for
+ * different values of vco1div2
+ */
+static struct frac_rate_tbl amba_synth_rtbl[] = {
+ {.div = 0x06062}, /* for vco1div2 = 500 MHz */
+ {.div = 0x04D1B}, /* for vco1div2 = 400 MHz */
+ {.div = 0x04000}, /* for vco1div2 = 332 MHz */
+ {.div = 0x03031}, /* for vco1div2 = 250 MHz */
+ {.div = 0x0268D}, /* for vco1div2 = 200 MHz */
+};
+
+/*
+ * Synthesizer Clock derived from vcodiv2. This clock is one of the
+ * possible clocks to feed cpu directly.
+ * We can program this synthesizer to make cpu run on different clock
+ * frequencies.
+ * Following table provides configuration values to let cpu run on 200,
+ * 250, 332, 400 or 500 MHz considering different possibilites of input
+ * (vco1div2) clock.
+ *
+ * --------------------------------------------------------------------
+ * vco1div2(Mhz) fout(Mhz) cpuclk = fout/2 div
+ * --------------------------------------------------------------------
+ * 400 200 100 0x04000
+ * 400 250 125 0x03333
+ * 400 332 166 0x0268D
+ * 400 400 200 0x02000
+ * --------------------------------------------------------------------
+ * 500 200 100 0x05000
+ * 500 250 125 0x04000
+ * 500 332 166 0x03031
+ * 500 400 200 0x02800
+ * 500 500 250 0x02000
+ * --------------------------------------------------------------------
+ * 664 200 100 0x06a38
+ * 664 250 125 0x054FD
+ * 664 332 166 0x04000
+ * 664 400 200 0x0351E
+ * 664 500 250 0x02A7E
+ * --------------------------------------------------------------------
+ * 800 200 100 0x08000
+ * 800 250 125 0x06666
+ * 800 332 166 0x04D18
+ * 800 400 200 0x04000
+ * 800 500 250 0x03333
+ * --------------------------------------------------------------------
+ * sys rate configuration table is in descending order of divisor.
+ */
+static struct frac_rate_tbl sys_synth_rtbl[] = {
+ {.div = 0x08000},
+ {.div = 0x06a38},
+ {.div = 0x06666},
+ {.div = 0x054FD},
+ {.div = 0x05000},
+ {.div = 0x04D18},
+ {.div = 0x04000},
+ {.div = 0x0351E},
+ {.div = 0x03333},
+ {.div = 0x03031},
+ {.div = 0x02A7E},
+ {.div = 0x02800},
+ {.div = 0x0268D},
+ {.div = 0x02000},
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+ /* For VCO1div2 = 500 MHz */
+ {.xscale = 10, .yscale = 204, .eq = 0}, /* 12.29 MHz */
+ {.xscale = 4, .yscale = 21, .eq = 0}, /* 48 MHz */
+ {.xscale = 2, .yscale = 6, .eq = 0}, /* 83 MHz */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* 125 MHz */
+ {.xscale = 1, .yscale = 3, .eq = 1}, /* 166 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* 250 MHz */
+};
+
+/* gmac rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl gmac_rtbl[] = {
+ /* For gmac phy input clk */
+ {.xscale = 2, .yscale = 6, .eq = 0}, /* divided by 6 */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* divided by 4 */
+ {.xscale = 1, .yscale = 3, .eq = 1}, /* divided by 3 */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* divided by 2 */
+};
+
+/* clcd rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl clcd_rtbl[] = {
+ {.div = 0x14000}, /* 25 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x1284B}, /* 27 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0D8D3}, /* 58 Mhz , for vco1div4 = 393 MHz */
+ {.div = 0x0B72C}, /* 58 Mhz , for vco1div4 = 332 MHz */
+ {.div = 0x089EE}, /* 58 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x07BA0}, /* 65 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x06f1C}, /* 72 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x06E58}, /* 58 Mhz , for vco1div4 = 200 MHz */
+ {.div = 0x06c1B}, /* 74 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x04A12}, /* 108 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0378E}, /* 144 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x0360D}, /* 148 Mhz , for vc01div4 = 250 MHz*/
+ {.div = 0x035E0}, /* 148.5 MHz, for vc01div4 = 250 MHz*/
+};
+
+/* i2s prescaler1 masks */
+static struct aux_clk_masks i2s_prs1_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = SPEAR1340_I2S_PRS1_CLK_X_MASK,
+ .xscale_sel_shift = SPEAR1340_I2S_PRS1_CLK_X_SHIFT,
+ .yscale_sel_mask = SPEAR1340_I2S_PRS1_CLK_Y_MASK,
+ .yscale_sel_shift = SPEAR1340_I2S_PRS1_CLK_Y_SHIFT,
+};
+
+/* i2s sclk (bit clock) syynthesizers masks */
+static struct aux_clk_masks i2s_sclk_masks = {
+ .eq_sel_mask = AUX_EQ_SEL_MASK,
+ .eq_sel_shift = SPEAR1340_I2S_SCLK_EQ_SEL_SHIFT,
+ .eq1_mask = AUX_EQ1_SEL,
+ .eq2_mask = AUX_EQ2_SEL,
+ .xscale_sel_mask = SPEAR1340_I2S_SCLK_X_MASK,
+ .xscale_sel_shift = SPEAR1340_I2S_SCLK_X_SHIFT,
+ .yscale_sel_mask = SPEAR1340_I2S_SCLK_Y_MASK,
+ .yscale_sel_shift = SPEAR1340_I2S_SCLK_Y_SHIFT,
+ .enable_bit = SPEAR1340_I2S_SCLK_SYNTH_ENB,
+};
+
+/* i2s prs1 aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_prs1_rtbl[] = {
+ /* For parent clk = 49.152 MHz */
+ {.xscale = 1, .yscale = 12, .eq = 0}, /* 2.048 MHz, smp freq = 8Khz */
+ {.xscale = 11, .yscale = 96, .eq = 0}, /* 2.816 MHz, smp freq = 11Khz */
+ {.xscale = 1, .yscale = 6, .eq = 0}, /* 4.096 MHz, smp freq = 16Khz */
+ {.xscale = 11, .yscale = 48, .eq = 0}, /* 5.632 MHz, smp freq = 22Khz */
+
+ /*
+ * with parent clk = 49.152, freq gen is 8.192 MHz, smp freq = 32Khz
+ * with parent clk = 12.288, freq gen is 2.048 MHz, smp freq = 8Khz
+ */
+ {.xscale = 1, .yscale = 3, .eq = 0},
+
+ /* For parent clk = 49.152 MHz */
+ {.xscale = 17, .yscale = 37, .eq = 0}, /* 11.289 MHz, smp freq = 44Khz*/
+ {.xscale = 1, .yscale = 2, .eq = 0}, /* 12.288 MHz, smp freq = 48Khz*/
+};
+
+/* i2s sclk aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl i2s_sclk_rtbl[] = {
+ /* For sclk = ref_clk * x/2/y */
+ {.xscale = 1, .yscale = 4, .eq = 0},
+ {.xscale = 1, .yscale = 2, .eq = 0},
+};
+
+/* adc rate configuration table, in ascending order of rates */
+/* possible adc range is 2.5 MHz to 20 MHz. */
+static struct aux_rate_tbl adc_rtbl[] = {
+ /* For ahb = 166.67 MHz */
+ {.xscale = 1, .yscale = 31, .eq = 0}, /* 2.68 MHz */
+ {.xscale = 2, .yscale = 21, .eq = 0}, /* 7.94 MHz */
+ {.xscale = 4, .yscale = 21, .eq = 0}, /* 15.87 MHz */
+ {.xscale = 10, .yscale = 42, .eq = 0}, /* 19.84 MHz */
+};
+
+/* General synth rate configuration table, in ascending order of rates */
+static struct frac_rate_tbl gen_rtbl[] = {
+ /* For vco1div4 = 250 MHz */
+ {.div = 0x1624E}, /* 22.5792 MHz */
+ {.div = 0x14585}, /* 24.576 MHz */
+ {.div = 0x14000}, /* 25 MHz */
+ {.div = 0x0B127}, /* 45.1584 MHz */
+ {.div = 0x0A000}, /* 50 MHz */
+ {.div = 0x061A8}, /* 81.92 MHz */
+ {.div = 0x05000}, /* 100 MHz */
+ {.div = 0x02800}, /* 200 MHz */
+ {.div = 0x02620}, /* 210 MHz */
+ {.div = 0x02460}, /* 220 MHz */
+ {.div = 0x022C0}, /* 230 MHz */
+ {.div = 0x02160}, /* 240 MHz */
+ {.div = 0x02000}, /* 250 MHz */
+};
+
+/* clock parents */
+static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
+static const char *sys_parents[] = { "none", "pll1_clk", "none", "none",
+ "sys_synth_clk", "none", "pll2_clk", "pll3_clk", };
+static const char *ahb_parents[] = { "cpu_div3_clk", "amba_synth_clk", };
+static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
+static const char *uart0_parents[] = { "pll5_clk", "osc_24m_clk",
+ "uart0_synth_gate_clk", };
+static const char *uart1_parents[] = { "pll5_clk", "osc_24m_clk",
+ "uart1_synth_gate_clk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
+static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+ "osc_25m_clk", };
+static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
+ "gmac_phy_synth_gate_clk", };
+static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *i2s_src_parents[] = { "vco1div2_clk", "pll2_clk", "pll3_clk",
+ "i2s_src_pad_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
+static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_synth2_clk",
+};
+static const char *spdif_in_parents[] = { "pll2_clk", "gen_synth3_clk", };
+
+static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
+ "pll3_clk", };
+static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
+ "pll2_clk", };
+
+void __init spear1340_clk_init(void)
+{
+ struct clk *clk, *clk1;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+ 32000);
+ clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+ 24000000);
+ clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
+ 25000000);
+ clk_register_clkdev(clk, "osc_25m_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
+ CLK_IS_ROOT, 125000000);
+ clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
+ CLK_IS_ROOT, 12288000);
+ clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
+
+ /* clock derived from 32 KHz osc clk */
+ clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_RTC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+ /* clock derived from 24 or 25 MHz osc clk */
+ /* vco-pll */
+ clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco1_mux_clk", NULL);
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
+ 0, SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco1_clk", NULL);
+ clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco2_mux_clk", NULL);
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
+ 0, SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco2_clk", NULL);
+ clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+ clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+ ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "vco3_mux_clk", NULL);
+ clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
+ 0, SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco3_clk", NULL);
+ clk_register_clkdev(clk1, "pll3_clk", NULL);
+
+ clk = clk_register_vco_pll("vco4_clk", "pll4_clk", NULL, "osc_24m_clk",
+ 0, SPEAR1340_PLL4_CTR, SPEAR1340_PLL4_FRQ, pll4_rtbl,
+ ARRAY_SIZE(pll4_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco4_clk", NULL);
+ clk_register_clkdev(clk1, "pll4_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "pll5_clk", "osc_24m_clk", 0,
+ 48000000);
+ clk_register_clkdev(clk, "pll5_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "pll6_clk", "osc_25m_clk", 0,
+ 25000000);
+ clk_register_clkdev(clk, "pll6_clk", NULL);
+
+ /* vco div n clocks */
+ clk = clk_register_fixed_factor(NULL, "vco1div2_clk", "vco1_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco1div2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco1div4_clk", "vco1_clk", 0, 1,
+ 4);
+ clk_register_clkdev(clk, "vco1div4_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco2div2_clk", "vco2_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco2div2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "vco3div2_clk", "vco3_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "vco3div2_clk", NULL);
+
+ /* peripherals */
+ clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
+ 128);
+ clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_THSENS_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_thermal");
+
+ /* clock derived from pll4 clk */
+ clk = clk_register_fixed_factor(NULL, "ddr_clk", "pll4_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "ddr_clk", NULL);
+
+ /* clock derived from pll1 clk */
+ clk = clk_register_frac("sys_synth_clk", "vco1div2_clk", 0,
+ SPEAR1340_SYS_CLK_SYNT, sys_synth_rtbl,
+ ARRAY_SIZE(sys_synth_rtbl), &_lock);
+ clk_register_clkdev(clk, "sys_synth_clk", NULL);
+
+ clk = clk_register_frac("amba_synth_clk", "vco1div2_clk", 0,
+ SPEAR1340_AMBA_CLK_SYNT, amba_synth_rtbl,
+ ARRAY_SIZE(amba_synth_rtbl), &_lock);
+ clk_register_clkdev(clk, "amba_synth_clk", NULL);
+
+ clk = clk_register_mux(NULL, "sys_mux_clk", sys_parents,
+ ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
+ SPEAR1340_SCLK_SRC_SEL_SHIFT,
+ SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "sys_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mux_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "cpu_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "cpu_div3_clk", "cpu_clk", 0, 1,
+ 3);
+ clk_register_clkdev(clk, "cpu_div3_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "wdt_clk", "cpu_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, NULL, "ec800620.wdt");
+
+ clk = clk_register_mux(NULL, "ahb_clk", ahb_parents,
+ ARRAY_SIZE(ahb_parents), 0, SPEAR1340_SYS_CLK_CTRL,
+ SPEAR1340_HCLK_SRC_SEL_SHIFT,
+ SPEAR1340_HCLK_SRC_SEL_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "ahb_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1,
+ 2);
+ clk_register_clkdev(clk, "apb_clk", NULL);
+
+ /* gpt clocks */
+ clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt0");
+
+ clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt1");
+
+ clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt2");
+
+ clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+ ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "gpt3");
+
+ /* others */
+ clk = clk_register_aux("uart0_synth_clk", "uart0_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1340_UART0_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart0_synth_clk", NULL);
+ clk_register_clkdev(clk1, "uart0_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+ ARRAY_SIZE(uart0_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_UART0_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0000000.serial");
+
+ clk = clk_register_aux("uart1_synth_clk", "uart1_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1340_UART1_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart1_synth_clk", NULL);
+ clk_register_clkdev(clk1, "uart1_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "uart1_mux_clk", uart1_parents,
+ ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b4100000.serial");
+
+ clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1340_SDHCI_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
+ clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SDHCI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b3000000.sdhci");
+
+ clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1340_CFXD_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
+ clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CFXD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2800000.cf");
+ clk_register_clkdev(clk, NULL, "arasan_xd");
+
+ clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
+ "vco1div2_clk", 0, SPEAR1340_C3_CLK_SYNT, NULL,
+ aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "c3_synth_clk", NULL);
+ clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+ ARRAY_SIZE(c3_parents), 0, SPEAR1340_PERIP_CLK_CFG,
+ SPEAR1340_C3_CLK_SHIFT, SPEAR1340_C3_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "c3_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_C3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "c3");
+
+ /* gmac */
+ clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
+ gmac_phy_input_parents,
+ ARRAY_SIZE(gmac_phy_input_parents), 0,
+ SPEAR1340_GMAC_CLK_CFG,
+ SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
+ SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+
+ clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
+ "gmac_phy_input_mux_clk", 0, SPEAR1340_GMAC_CLK_SYNT,
+ NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
+ clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+ ARRAY_SIZE(gmac_phy_parents), 0,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
+ SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "stmmacphy.0");
+
+ /* clcd */
+ clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+ ARRAY_SIZE(clcd_synth_parents), 0,
+ SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
+ SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+
+ clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+ SPEAR1340_CLCD_CLK_SYNT, clcd_rtbl,
+ ARRAY_SIZE(clcd_rtbl), &_lock);
+ clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+
+ clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+ ARRAY_SIZE(clcd_pixel_parents), 0,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
+ SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
+
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CLCD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "clcd_clk", NULL);
+
+ /* i2s */
+ clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+ ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
+ SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
+ 0, &_lock);
+ clk_register_clkdev(clk, "i2s_src_clk", NULL);
+
+ clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+ SPEAR1340_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
+ ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
+ clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+ ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1340_I2S_CLK_CFG,
+ SPEAR1340_I2S_REF_SHIFT, SPEAR1340_I2S_REF_SEL_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+ clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_I2S_REF_PAD_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
+
+ clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
+ "i2s_ref_mux_clk", 0, SPEAR1340_I2S_CLK_CFG,
+ &i2s_sclk_masks, i2s_sclk_rtbl,
+ ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
+ clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+
+ /* clock derived from ahb clk */
+ clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0280000.i2c");
+
+ clk = clk_register_gate(NULL, "i2c1_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b4000000.i2c");
+
+ clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_DMA_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "ea800000.dma");
+ clk_register_clkdev(clk, NULL, "eb000000.dma");
+
+ clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GMAC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e2000000.eth");
+
+ clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_FSMC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b0000000.flash");
+
+ clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SMI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "ea000000.flash");
+
+ clk = clk_register_gate(NULL, "usbh0_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UHC0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+ clk = clk_register_gate(NULL, "usbh1_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UHC1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uoc_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UOC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "uoc");
+
+ clk = clk_register_gate(NULL, "pcie_sata_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_PCIE_SATA_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "dw_pcie");
+ clk_register_clkdev(clk, NULL, "ahci");
+
+ clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SYSRAM0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "sysram0_clk", NULL);
+
+ clk = clk_register_gate(NULL, "sysram1_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SYSRAM1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, "sysram1_clk", NULL);
+
+ clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+ 0, SPEAR1340_ADC_CLK_SYNT, NULL, adc_rtbl,
+ ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "adc_synth_clk", NULL);
+ clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_ADC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "adc_clk");
+
+ /* clock derived from apb clk */
+ clk = clk_register_gate(NULL, "ssp_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SSP_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0100000.spi");
+
+ clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPIO0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0600000.gpio");
+
+ clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPIO1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0680000.gpio");
+
+ clk = clk_register_gate(NULL, "i2s_play_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2S_PLAY_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2400000.i2s");
+
+ clk = clk_register_gate(NULL, "i2s_rec_clk", "apb_clk", 0,
+ SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2S_REC_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "b2000000.i2s");
+
+ clk = clk_register_gate(NULL, "kbd_clk", "apb_clk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_KBD_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "e0300000.kbd");
+
+ /* RAS clks */
+ clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
+ gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
+ 0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
+ SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
+ gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
+ 0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
+ SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+
+ clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+ SPEAR1340_GEN_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+
+ clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+ SPEAR1340_GEN_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+
+ clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+ SPEAR1340_GEN_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+
+ clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+ SPEAR1340_GEN_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
+ &_lock);
+ clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+
+ clk = clk_register_gate(NULL, "mali_clk", "gen_synth3_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_MALI_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "mali");
+
+ clk = clk_register_gate(NULL, "cec0_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CEC0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_cec.0");
+
+ clk = clk_register_gate(NULL, "cec1_clk", "ahb_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CEC1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_cec.1");
+
+ clk = clk_register_mux(NULL, "spdif_out_mux_clk", spdif_out_parents,
+ ARRAY_SIZE(spdif_out_parents), 0,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
+ SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "spdif_out_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_OUT_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "spdif-out");
+
+ clk = clk_register_mux(NULL, "spdif_in_mux_clk", spdif_in_parents,
+ ARRAY_SIZE(spdif_in_parents), 0,
+ SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
+ SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "spdif_in_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_IN_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spdif-in");
+
+ clk = clk_register_gate(NULL, "acp_clk", "acp_mux_clk", 0,
+ SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "acp_clk");
+
+ clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "plgpio");
+
+ clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "video_dec");
+
+ clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB,
+ 0, &_lock);
+ clk_register_clkdev(clk, NULL, "video_enc");
+
+ clk = clk_register_gate(NULL, "video_in_clk", "video_in_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_vip");
+
+ clk = clk_register_gate(NULL, "cam0_clk", "cam0_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_camif.0");
+
+ clk = clk_register_gate(NULL, "cam1_clk", "cam1_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_camif.1");
+
+ clk = clk_register_gate(NULL, "cam2_clk", "cam2_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_camif.2");
+
+ clk = clk_register_gate(NULL, "cam3_clk", "cam3_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "spear_camif.3");
+
+ clk = clk_register_gate(NULL, "pwm_clk", "pwm_mux_clk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PWM_CLK_ENB, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "pwm");
+}
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
new file mode 100644
index 00000000000..440bb3e4c97
--- /dev/null
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -0,0 +1,612 @@
+/*
+ * SPEAr3xx machines clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <mach/misc_regs.h>
+#include "clk.h"
+
+static DEFINE_SPINLOCK(_lock);
+
+#define PLL1_CTR (MISC_BASE + 0x008)
+#define PLL1_FRQ (MISC_BASE + 0x00C)
+#define PLL2_CTR (MISC_BASE + 0x014)
+#define PLL2_FRQ (MISC_BASE + 0x018)
+#define PLL_CLK_CFG (MISC_BASE + 0x020)
+ /* PLL_CLK_CFG register masks */
+ #define MCTR_CLK_SHIFT 28
+ #define MCTR_CLK_MASK 3
+
+#define CORE_CLK_CFG (MISC_BASE + 0x024)
+ /* CORE CLK CFG register masks */
+ #define GEN_SYNTH2_3_CLK_SHIFT 18
+ #define GEN_SYNTH2_3_CLK_MASK 1
+
+ #define HCLK_RATIO_SHIFT 10
+ #define HCLK_RATIO_MASK 2
+ #define PCLK_RATIO_SHIFT 8
+ #define PCLK_RATIO_MASK 2
+
+#define PERIP_CLK_CFG (MISC_BASE + 0x028)
+ /* PERIP_CLK_CFG register masks */
+ #define UART_CLK_SHIFT 4
+ #define UART_CLK_MASK 1
+ #define FIRDA_CLK_SHIFT 5
+ #define FIRDA_CLK_MASK 2
+ #define GPT0_CLK_SHIFT 8
+ #define GPT1_CLK_SHIFT 11
+ #define GPT2_CLK_SHIFT 12
+ #define GPT_CLK_MASK 1
+
+#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
+ /* PERIP1_CLK_ENB register masks */
+ #define UART_CLK_ENB 3
+ #define SSP_CLK_ENB 5
+ #define I2C_CLK_ENB 7
+ #define JPEG_CLK_ENB 8
+ #define FIRDA_CLK_ENB 10
+ #define GPT1_CLK_ENB 11
+ #define GPT2_CLK_ENB 12
+ #define ADC_CLK_ENB 15
+ #define RTC_CLK_ENB 17
+ #define GPIO_CLK_ENB 18
+ #define DMA_CLK_ENB 19
+ #define SMI_CLK_ENB 21
+ #define GMAC_CLK_ENB 23
+ #define USBD_CLK_ENB 24
+ #define USBH_CLK_ENB 25
+ #define C3_CLK_ENB 31
+
+#define RAS_CLK_ENB (MISC_BASE + 0x034)
+ #define RAS_AHB_CLK_ENB 0
+ #define RAS_PLL1_CLK_ENB 1
+ #define RAS_APB_CLK_ENB 2
+ #define RAS_32K_CLK_ENB 3
+ #define RAS_24M_CLK_ENB 4
+ #define RAS_48M_CLK_ENB 5
+ #define RAS_PLL2_CLK_ENB 7
+ #define RAS_SYNT0_CLK_ENB 8
+ #define RAS_SYNT1_CLK_ENB 9
+ #define RAS_SYNT2_CLK_ENB 10
+ #define RAS_SYNT3_CLK_ENB 11
+
+#define PRSC0_CLK_CFG (MISC_BASE + 0x044)
+#define PRSC1_CLK_CFG (MISC_BASE + 0x048)
+#define PRSC2_CLK_CFG (MISC_BASE + 0x04C)
+#define AMEM_CLK_CFG (MISC_BASE + 0x050)
+ #define AMEM_CLK_ENB 0
+
+#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
+#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
+#define UART_CLK_SYNT (MISC_BASE + 0x064)
+#define GMAC_CLK_SYNT (MISC_BASE + 0x068)
+#define GEN0_CLK_SYNT (MISC_BASE + 0x06C)
+#define GEN1_CLK_SYNT (MISC_BASE + 0x070)
+#define GEN2_CLK_SYNT (MISC_BASE + 0x074)
+#define GEN3_CLK_SYNT (MISC_BASE + 0x078)
+
+/* pll rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+ {.mode = 0, .m = 0x53, .n = 0x0C, .p = 0x1}, /* vco 332 & pll 166 MHz */
+ {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* vco 532 & pll 266 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* vco 664 & pll 332 MHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+ /* For PLL1 = 332 MHz */
+ {.xscale = 2, .yscale = 27, .eq = 0}, /* 12.296 MHz */
+ {.xscale = 2, .yscale = 8, .eq = 0}, /* 41.5 MHz */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* 83 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
+};
+
+/* gpt rate configuration table, in ascending order of rates */
+static struct gpt_rate_tbl gpt_rtbl[] = {
+ /* For pll1 = 332 MHz */
+ {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
+ {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
+ {.mscale = 1, .nscale = 0}, /* 83 MHz */
+};
+
+/* clock parents */
+static const char *uart0_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
+static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
+};
+static const char *gpt0_parents[] = { "pll3_48m_clk", "gpt0_synth_clk", };
+static const char *gpt1_parents[] = { "pll3_48m_clk", "gpt1_synth_clk", };
+static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
+static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", };
+static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
+ "pll2_clk", };
+
+#ifdef CONFIG_MACH_SPEAR300
+static void __init spear300_clk_init(void)
+{
+ struct clk *clk;
+
+ clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+ 1, 1);
+ clk_register_clkdev(clk, NULL, "60000000.clcd");
+
+ clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "94000000.flash");
+
+ clk = clk_register_fixed_factor(NULL, "sdhci_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "70000000.sdhci");
+
+ clk = clk_register_fixed_factor(NULL, "gpio1_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a9000000.gpio");
+
+ clk = clk_register_fixed_factor(NULL, "kbd_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a0000000.kbd");
+}
+#endif
+
+/* array of all spear 310 clock lookups */
+#ifdef CONFIG_MACH_SPEAR310
+static void __init spear310_clk_init(void)
+{
+ struct clk *clk;
+
+ clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "emi", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "44000000.flash");
+
+ clk = clk_register_fixed_factor(NULL, "tdm_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "tdm");
+
+ clk = clk_register_fixed_factor(NULL, "uart1_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2000000.serial");
+
+ clk = clk_register_fixed_factor(NULL, "uart2_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2080000.serial");
+
+ clk = clk_register_fixed_factor(NULL, "uart3_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2100000.serial");
+
+ clk = clk_register_fixed_factor(NULL, "uart4_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2180000.serial");
+
+ clk = clk_register_fixed_factor(NULL, "uart5_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "b2200000.serial");
+}
+#endif
+
+/* array of all spear 320 clock lookups */
+#ifdef CONFIG_MACH_SPEAR320
+ #define SMII_PCLK_SHIFT 18
+ #define SMII_PCLK_MASK 2
+ #define SMII_PCLK_VAL_PAD 0x0
+ #define SMII_PCLK_VAL_PLL2 0x1
+ #define SMII_PCLK_VAL_SYNTH0 0x2
+ #define SDHCI_PCLK_SHIFT 15
+ #define SDHCI_PCLK_MASK 1
+ #define SDHCI_PCLK_VAL_48M 0x0
+ #define SDHCI_PCLK_VAL_SYNTH3 0x1
+ #define I2S_REF_PCLK_SHIFT 8
+ #define I2S_REF_PCLK_MASK 1
+ #define I2S_REF_PCLK_SYNTH_VAL 0x1
+ #define I2S_REF_PCLK_PLL2_VAL 0x0
+ #define UART1_PCLK_SHIFT 6
+ #define UART1_PCLK_MASK 1
+ #define SPEAR320_UARTX_PCLK_VAL_SYNTH1 0x0
+ #define SPEAR320_UARTX_PCLK_VAL_APB 0x1
+
+static const char *i2s_ref_parents[] = { "ras_pll2_clk",
+ "ras_gen2_synth_gate_clk", };
+static const char *sdhci_parents[] = { "ras_pll3_48m_clk",
+ "ras_gen3_synth_gate_clk",
+};
+static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
+ "ras_gen0_synth_gate_clk", };
+static const char *uartx_parents[] = { "ras_gen1_synth_gate_clk", "ras_apb_clk",
+};
+
+static void __init spear320_clk_init(void)
+{
+ struct clk *clk;
+
+ clk = clk_register_fixed_rate(NULL, "smii_125m_pad_clk", NULL,
+ CLK_IS_ROOT, 125000000);
+ clk_register_clkdev(clk, "smii_125m_pad", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+ 1, 1);
+ clk_register_clkdev(clk, NULL, "90000000.clcd");
+
+ clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "emi", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "4c000000.flash");
+
+ clk = clk_register_fixed_factor(NULL, "i2c1_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a7000000.i2c");
+
+ clk = clk_register_fixed_factor(NULL, "pwm_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "pwm", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "ssp1_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a5000000.spi");
+
+ clk = clk_register_fixed_factor(NULL, "ssp2_clk", "ras_ahb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "a6000000.spi");
+
+ clk = clk_register_fixed_factor(NULL, "can0_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "c_can_platform.0");
+
+ clk = clk_register_fixed_factor(NULL, "can1_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "c_can_platform.1");
+
+ clk = clk_register_fixed_factor(NULL, "i2s_clk", "ras_apb_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "i2s");
+
+ clk = clk_register_mux(NULL, "i2s_ref_clk", i2s_ref_parents,
+ ARRAY_SIZE(i2s_ref_parents), 0, SPEAR320_CONTROL_REG,
+ I2S_REF_PCLK_SHIFT, I2S_REF_PCLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "i2s_ref_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "i2s_sclk", "i2s_ref_clk", 0, 1,
+ 4);
+ clk_register_clkdev(clk, "i2s_sclk", NULL);
+
+ clk = clk_register_mux(NULL, "rs485_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_RS485_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "a9300000.serial");
+
+ clk = clk_register_mux(NULL, "sdhci_clk", sdhci_parents,
+ ARRAY_SIZE(sdhci_parents), 0, SPEAR320_CONTROL_REG,
+ SDHCI_PCLK_SHIFT, SDHCI_PCLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "70000000.sdhci");
+
+ clk = clk_register_mux(NULL, "smii_pclk", smii0_parents,
+ ARRAY_SIZE(smii0_parents), 0, SPEAR320_CONTROL_REG,
+ SMII_PCLK_SHIFT, SMII_PCLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "smii_pclk");
+
+ clk = clk_register_fixed_factor(NULL, "smii_clk", "smii_pclk", 0, 1, 1);
+ clk_register_clkdev(clk, NULL, "smii");
+
+ clk = clk_register_mux(NULL, "uart1_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_CONTROL_REG,
+ UART1_PCLK_SHIFT, UART1_PCLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "a3000000.serial");
+
+ clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART2_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "a4000000.serial");
+
+ clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART3_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "a9100000.serial");
+
+ clk = clk_register_mux(NULL, "uart4_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART4_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "a9200000.serial");
+
+ clk = clk_register_mux(NULL, "uart5_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART5_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "60000000.serial");
+
+ clk = clk_register_mux(NULL, "uart6_clk", uartx_parents,
+ ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
+ SPEAR320_UART6_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, NULL, "60100000.serial");
+}
+#endif
+
+void __init spear3xx_clk_init(void)
+{
+ struct clk *clk, *clk1;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+ 32000);
+ clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
+ 24000000);
+ clk_register_clkdev(clk, "osc_24m_clk", NULL);
+
+ /* clock derived from 32 KHz osc clk */
+ clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
+ PERIP1_CLK_ENB, RTC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc900000.rtc");
+
+ /* clock derived from 24 MHz osc clk */
+ clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+ 48000000);
+ clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "fc880000.wdt");
+
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL,
+ "osc_24m_clk", 0, PLL1_CTR, PLL1_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco1_clk", NULL);
+ clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
+ "osc_24m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco2_clk", NULL);
+ clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+ /* clock derived from pll1 clk */
+ clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 1);
+ clk_register_clkdev(clk, "cpu_clk", NULL);
+
+ clk = clk_register_divider(NULL, "ahb_clk", "pll1_clk",
+ CLK_SET_RATE_PARENT, CORE_CLK_CFG, HCLK_RATIO_SHIFT,
+ HCLK_RATIO_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "ahb_clk", NULL);
+
+ clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
+ "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_synth_clk", NULL);
+ clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+ ARRAY_SIZE(uart0_parents), 0, PERIP_CLK_CFG,
+ UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart0", "uart0_mux_clk", 0,
+ PERIP1_CLK_ENB, UART_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0000000.serial");
+
+ clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
+ "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "firda_synth_clk", NULL);
+ clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+ ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
+ FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "firda_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+ PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "firda");
+
+ /* gpt clocks */
+ clk_register_gpt("gpt0_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
+ ARRAY_SIZE(gpt0_parents), 0, PERIP_CLK_CFG,
+ GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt0");
+
+ clk_register_gpt("gpt1_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt1_parents,
+ ARRAY_SIZE(gpt1_parents), 0, PERIP_CLK_CFG,
+ GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt1");
+
+ clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+ ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
+ GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt2");
+
+ /* general synths clocks */
+ clk = clk_register_aux("gen0_synth_clk", "gen0_synth_gate_clk",
+ "pll1_clk", 0, GEN0_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "gen0_synth_clk", NULL);
+ clk_register_clkdev(clk1, "gen0_synth_gate_clk", NULL);
+
+ clk = clk_register_aux("gen1_synth_clk", "gen1_synth_gate_clk",
+ "pll1_clk", 0, GEN1_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "gen1_synth_clk", NULL);
+ clk_register_clkdev(clk1, "gen1_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gen2_3_parent_clk", gen2_3_parents,
+ ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
+ GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "gen2_3_parent_clk", NULL);
+
+ clk = clk_register_aux("gen2_synth_clk", "gen2_synth_gate_clk",
+ "gen2_3_parent_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "gen2_synth_clk", NULL);
+ clk_register_clkdev(clk1, "gen2_synth_gate_clk", NULL);
+
+ clk = clk_register_aux("gen3_synth_clk", "gen3_synth_gate_clk",
+ "gen2_3_parent_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "gen3_synth_clk", NULL);
+ clk_register_clkdev(clk1, "gen3_synth_gate_clk", NULL);
+
+ /* clock derived from pll3 clk */
+ clk = clk_register_gate(NULL, "usbh_clk", "pll3_48m_clk", 0,
+ PERIP1_CLK_ENB, USBH_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "usbh_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "usbh.0_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "usbh.1_clk", "usbh_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, "usbh.1_clk", NULL);
+
+ clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
+ PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "designware_udc");
+
+ /* clock derived from ahb clk */
+ clk = clk_register_fixed_factor(NULL, "ahbmult2_clk", "ahb_clk", 0, 2,
+ 1);
+ clk_register_clkdev(clk, "ahbmult2_clk", NULL);
+
+ clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
+ ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
+ MCTR_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "ddr_clk", NULL);
+
+ clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
+ CLK_SET_RATE_PARENT, CORE_CLK_CFG, PCLK_RATIO_SHIFT,
+ PCLK_RATIO_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "apb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "amem_clk", "ahb_clk", 0, AMEM_CLK_CFG,
+ AMEM_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "amem_clk", NULL);
+
+ clk = clk_register_gate(NULL, "c3_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ C3_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "c3_clk");
+
+ clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ DMA_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc400000.dma");
+
+ clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ GMAC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "e0800000.eth");
+
+ clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ I2C_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0180000.i2c");
+
+ clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ JPEG_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "jpeg");
+
+ clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ SMI_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc000000.flash");
+
+ /* clock derived from apb clk */
+ clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ ADC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "adc");
+
+ clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ GPIO_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc980000.gpio");
+
+ clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ SSP_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0100000.spi");
+
+ /* RAS clk enable */
+ clk = clk_register_gate(NULL, "ras_ahb_clk", "ahb_clk", 0, RAS_CLK_ENB,
+ RAS_AHB_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_ahb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB,
+ RAS_APB_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_apb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0,
+ RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_32k_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_24m_clk", "osc_24m_clk", 0,
+ RAS_CLK_ENB, RAS_24M_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_24m_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll1_clk", "pll1_clk", 0,
+ RAS_CLK_ENB, RAS_PLL1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_pll1_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll2_clk", "pll2_clk", 0,
+ RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_pll2_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_pll3_48m_clk", "pll3_48m_clk", 0,
+ RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_pll3_48m_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_gen0_synth_gate_clk",
+ "gen0_synth_gate_clk", 0, RAS_CLK_ENB,
+ RAS_SYNT0_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_gen0_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_gen1_synth_gate_clk",
+ "gen1_synth_gate_clk", 0, RAS_CLK_ENB,
+ RAS_SYNT1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_gen1_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_gen2_synth_gate_clk",
+ "gen2_synth_gate_clk", 0, RAS_CLK_ENB,
+ RAS_SYNT2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_gen2_synth_gate_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_gen3_synth_gate_clk",
+ "gen3_synth_gate_clk", 0, RAS_CLK_ENB,
+ RAS_SYNT3_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_gen3_synth_gate_clk", NULL);
+
+ if (of_machine_is_compatible("st,spear300"))
+ spear300_clk_init();
+ else if (of_machine_is_compatible("st,spear310"))
+ spear310_clk_init();
+ else if (of_machine_is_compatible("st,spear320"))
+ spear320_clk_init();
+}
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
new file mode 100644
index 00000000000..f9a20b38230
--- /dev/null
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -0,0 +1,342 @@
+/*
+ * SPEAr6xx machines clock framework source file
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <mach/misc_regs.h>
+#include "clk.h"
+
+static DEFINE_SPINLOCK(_lock);
+
+#define PLL1_CTR (MISC_BASE + 0x008)
+#define PLL1_FRQ (MISC_BASE + 0x00C)
+#define PLL2_CTR (MISC_BASE + 0x014)
+#define PLL2_FRQ (MISC_BASE + 0x018)
+#define PLL_CLK_CFG (MISC_BASE + 0x020)
+ /* PLL_CLK_CFG register masks */
+ #define MCTR_CLK_SHIFT 28
+ #define MCTR_CLK_MASK 3
+
+#define CORE_CLK_CFG (MISC_BASE + 0x024)
+ /* CORE CLK CFG register masks */
+ #define HCLK_RATIO_SHIFT 10
+ #define HCLK_RATIO_MASK 2
+ #define PCLK_RATIO_SHIFT 8
+ #define PCLK_RATIO_MASK 2
+
+#define PERIP_CLK_CFG (MISC_BASE + 0x028)
+ /* PERIP_CLK_CFG register masks */
+ #define CLCD_CLK_SHIFT 2
+ #define CLCD_CLK_MASK 2
+ #define UART_CLK_SHIFT 4
+ #define UART_CLK_MASK 1
+ #define FIRDA_CLK_SHIFT 5
+ #define FIRDA_CLK_MASK 2
+ #define GPT0_CLK_SHIFT 8
+ #define GPT1_CLK_SHIFT 10
+ #define GPT2_CLK_SHIFT 11
+ #define GPT3_CLK_SHIFT 12
+ #define GPT_CLK_MASK 1
+
+#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
+ /* PERIP1_CLK_ENB register masks */
+ #define UART0_CLK_ENB 3
+ #define UART1_CLK_ENB 4
+ #define SSP0_CLK_ENB 5
+ #define SSP1_CLK_ENB 6
+ #define I2C_CLK_ENB 7
+ #define JPEG_CLK_ENB 8
+ #define FSMC_CLK_ENB 9
+ #define FIRDA_CLK_ENB 10
+ #define GPT2_CLK_ENB 11
+ #define GPT3_CLK_ENB 12
+ #define GPIO2_CLK_ENB 13
+ #define SSP2_CLK_ENB 14
+ #define ADC_CLK_ENB 15
+ #define GPT1_CLK_ENB 11
+ #define RTC_CLK_ENB 17
+ #define GPIO1_CLK_ENB 18
+ #define DMA_CLK_ENB 19
+ #define SMI_CLK_ENB 21
+ #define CLCD_CLK_ENB 22
+ #define GMAC_CLK_ENB 23
+ #define USBD_CLK_ENB 24
+ #define USBH0_CLK_ENB 25
+ #define USBH1_CLK_ENB 26
+
+#define PRSC0_CLK_CFG (MISC_BASE + 0x044)
+#define PRSC1_CLK_CFG (MISC_BASE + 0x048)
+#define PRSC2_CLK_CFG (MISC_BASE + 0x04C)
+
+#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
+#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
+#define UART_CLK_SYNT (MISC_BASE + 0x064)
+
+/* vco rate configuration table, in ascending order of rates */
+static struct pll_rate_tbl pll_rtbl[] = {
+ {.mode = 0, .m = 0x53, .n = 0x0F, .p = 0x1}, /* vco 332 & pll 166 MHz */
+ {.mode = 0, .m = 0x85, .n = 0x0F, .p = 0x1}, /* vco 532 & pll 266 MHz */
+ {.mode = 0, .m = 0xA6, .n = 0x0F, .p = 0x1}, /* vco 664 & pll 332 MHz */
+};
+
+/* aux rate configuration table, in ascending order of rates */
+static struct aux_rate_tbl aux_rtbl[] = {
+ /* For PLL1 = 332 MHz */
+ {.xscale = 2, .yscale = 8, .eq = 0}, /* 41.5 MHz */
+ {.xscale = 2, .yscale = 4, .eq = 0}, /* 83 MHz */
+ {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
+};
+
+static const char *clcd_parents[] = { "pll3_48m_clk", "clcd_synth_gate_clk", };
+static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
+};
+static const char *uart_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
+static const char *gpt0_1_parents[] = { "pll3_48m_clk", "gpt0_1_synth_clk", };
+static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
+static const char *gpt3_parents[] = { "pll3_48m_clk", "gpt3_synth_clk", };
+static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
+ "pll2_clk", };
+
+/* gpt rate configuration table, in ascending order of rates */
+static struct gpt_rate_tbl gpt_rtbl[] = {
+ /* For pll1 = 332 MHz */
+ {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
+ {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
+ {.mscale = 1, .nscale = 0}, /* 83 MHz */
+};
+
+void __init spear6xx_clk_init(void)
+{
+ struct clk *clk, *clk1;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
+ 32000);
+ clk_register_clkdev(clk, "osc_32k_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "osc_30m_clk", NULL, CLK_IS_ROOT,
+ 30000000);
+ clk_register_clkdev(clk, "osc_30m_clk", NULL);
+
+ /* clock derived from 32 KHz osc clk */
+ clk = clk_register_gate(NULL, "rtc_spear", "osc_32k_clk", 0,
+ PERIP1_CLK_ENB, RTC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "rtc-spear");
+
+ /* clock derived from 30 MHz osc clk */
+ clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+ 48000000);
+ clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_30m_clk",
+ 0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
+ &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco1_clk", NULL);
+ clk_register_clkdev(clk1, "pll1_clk", NULL);
+
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
+ "osc_30m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
+ ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk_register_clkdev(clk, "vco2_clk", NULL);
+ clk_register_clkdev(clk1, "pll2_clk", NULL);
+
+ clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_30m_clk", 0, 1,
+ 1);
+ clk_register_clkdev(clk, NULL, "wdt");
+
+ /* clock derived from pll1 clk */
+ clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 1);
+ clk_register_clkdev(clk, "cpu_clk", NULL);
+
+ clk = clk_register_divider(NULL, "ahb_clk", "pll1_clk",
+ CLK_SET_RATE_PARENT, CORE_CLK_CFG, HCLK_RATIO_SHIFT,
+ HCLK_RATIO_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "ahb_clk", NULL);
+
+ clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
+ "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_synth_clk", NULL);
+ clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "uart_mux_clk", uart_parents,
+ ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
+ UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "uart_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "uart0", "uart_mux_clk", 0,
+ PERIP1_CLK_ENB, UART0_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0000000.serial");
+
+ clk = clk_register_gate(NULL, "uart1", "uart_mux_clk", 0,
+ PERIP1_CLK_ENB, UART1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0080000.serial");
+
+ clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
+ "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "firda_synth_clk", NULL);
+ clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+ ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
+ FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "firda_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+ PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "firda");
+
+ clk = clk_register_aux("clcd_synth_clk", "clcd_synth_gate_clk",
+ "pll1_clk", 0, CLCD_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+ clk_register_clkdev(clk1, "clcd_synth_gate_clk", NULL);
+
+ clk = clk_register_mux(NULL, "clcd_mux_clk", clcd_parents,
+ ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
+ CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "clcd_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_mux_clk", 0,
+ PERIP1_CLK_ENB, CLCD_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "clcd");
+
+ /* gpt clocks */
+ clk = clk_register_gpt("gpt0_1_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk_register_clkdev(clk, "gpt0_1_synth_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt0_1_parents,
+ ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
+ GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt0");
+
+ clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt0_1_parents,
+ ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
+ GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt1");
+
+ clk = clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk_register_clkdev(clk, "gpt2_synth_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+ ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
+ GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt2");
+
+ clk = clk_register_gpt("gpt3_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
+ gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk_register_clkdev(clk, "gpt3_synth_clk", NULL);
+
+ clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt3_parents,
+ ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
+ GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+ PERIP1_CLK_ENB, GPT3_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gpt3");
+
+ /* clock derived from pll3 clk */
+ clk = clk_register_gate(NULL, "usbh0_clk", "pll3_48m_clk", 0,
+ PERIP1_CLK_ENB, USBH0_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "usbh.0_clk");
+
+ clk = clk_register_gate(NULL, "usbh1_clk", "pll3_48m_clk", 0,
+ PERIP1_CLK_ENB, USBH1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "usbh.1_clk");
+
+ clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
+ PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "designware_udc");
+
+ /* clock derived from ahb clk */
+ clk = clk_register_fixed_factor(NULL, "ahbmult2_clk", "ahb_clk", 0, 2,
+ 1);
+ clk_register_clkdev(clk, "ahbmult2_clk", NULL);
+
+ clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
+ ARRAY_SIZE(ddr_parents),
+ 0, PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0,
+ &_lock);
+ clk_register_clkdev(clk, "ddr_clk", NULL);
+
+ clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
+ CLK_SET_RATE_PARENT, CORE_CLK_CFG, PCLK_RATIO_SHIFT,
+ PCLK_RATIO_MASK, 0, &_lock);
+ clk_register_clkdev(clk, "apb_clk", NULL);
+
+ clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ DMA_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc400000.dma");
+
+ clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ FSMC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d1800000.flash");
+
+ clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ GMAC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "gmac");
+
+ clk = clk_register_gate(NULL, "i2c_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ I2C_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d0200000.i2c");
+
+ clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ JPEG_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "jpeg");
+
+ clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
+ SMI_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc000000.flash");
+
+ /* clock derived from apb clk */
+ clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ ADC_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "adc");
+
+ clk = clk_register_fixed_factor(NULL, "gpio0_clk", "apb_clk", 0, 1, 1);
+ clk_register_clkdev(clk, NULL, "f0100000.gpio");
+
+ clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ GPIO1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "fc980000.gpio");
+
+ clk = clk_register_gate(NULL, "gpio2_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ GPIO2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "d8100000.gpio");
+
+ clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ SSP0_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "ssp-pl022.0");
+
+ clk = clk_register_gate(NULL, "ssp1_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ SSP1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "ssp-pl022.1");
+
+ clk = clk_register_gate(NULL, "ssp2_clk", "apb_clk", 0, PERIP1_CLK_ENB,
+ SSP2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, NULL, "ssp-pl022.2");
+}
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index e6ecc5f2394..1cc6b3f3e26 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -16,6 +16,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/clk.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
@@ -79,6 +80,7 @@ struct crypto_priv {
void __iomem *reg;
void __iomem *sram;
int irq;
+ struct clk *clk;
struct task_struct *queue_th;
/* the lock protects queue and eng_st */
@@ -1053,6 +1055,12 @@ static int mv_probe(struct platform_device *pdev)
if (ret)
goto err_thread;
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ cp->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(cp->clk))
+ clk_prepare_enable(cp->clk);
+
writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
@@ -1118,6 +1126,12 @@ static int mv_remove(struct platform_device *pdev)
memset(cp->sram, 0, cp->sram_size);
iounmap(cp->sram);
iounmap(cp->reg);
+
+ if (!IS_ERR(cp->clk)) {
+ clk_disable_unprepare(cp->clk);
+ clk_put(cp->clk);
+ }
+
kfree(cp);
cpg = NULL;
return 0;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ef378b5b17e..aadeb5be9db 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -238,6 +238,7 @@ config IMX_DMA
config MXS_DMA
bool "MXS DMA support"
depends on SOC_IMX23 || SOC_IMX28
+ select STMP_DEVICE
select DMA_ENGINE
help
Support the MXS DMA engine. This engine including APBH-DMA
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 3d704abd791..49ecbbb8932 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -95,10 +95,14 @@ static struct amba_driver pl08x_amba_driver;
* struct vendor_data - vendor-specific config parameters for PL08x derivatives
* @channels: the number of channels available in this variant
* @dualmaster: whether this version supports dual AHB masters or not.
+ * @nomadik: whether the channels have Nomadik security extension bits
+ * that need to be checked for permission before use and some registers are
+ * missing
*/
struct vendor_data {
u8 channels;
bool dualmaster;
+ bool nomadik;
};
/*
@@ -385,7 +389,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
spin_lock_irqsave(&ch->lock, flags);
- if (!ch->serving) {
+ if (!ch->locked && !ch->serving) {
ch->serving = virt_chan;
ch->signal = -1;
spin_unlock_irqrestore(&ch->lock, flags);
@@ -1324,7 +1328,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
int ret, tmp;
dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
- __func__, sgl->length, plchan->name);
+ __func__, sg_dma_len(sgl), plchan->name);
txd = pl08x_get_txd(plchan, flags);
if (!txd) {
@@ -1378,11 +1382,11 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
dsg->len = sg_dma_len(sg);
if (direction == DMA_MEM_TO_DEV) {
- dsg->src_addr = sg_phys(sg);
+ dsg->src_addr = sg_dma_address(sg);
dsg->dst_addr = slave_addr;
} else {
dsg->src_addr = slave_addr;
- dsg->dst_addr = sg_phys(sg);
+ dsg->dst_addr = sg_dma_address(sg);
}
}
@@ -1484,6 +1488,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
*/
static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
{
+ /* The Nomadik variant does not have the config register */
+ if (pl08x->vd->nomadik)
+ return;
writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
}
@@ -1616,7 +1623,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
__func__, err);
writel(err, pl08x->base + PL080_ERR_CLEAR);
}
- tc = readl(pl08x->base + PL080_INT_STATUS);
+ tc = readl(pl08x->base + PL080_TC_STATUS);
if (tc)
writel(tc, pl08x->base + PL080_TC_CLEAR);
@@ -1773,8 +1780,10 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
spin_lock_irqsave(&ch->lock, flags);
virt_chan = ch->serving;
- seq_printf(s, "%d\t\t%s\n",
- ch->id, virt_chan ? virt_chan->name : "(none)");
+ seq_printf(s, "%d\t\t%s%s\n",
+ ch->id,
+ virt_chan ? virt_chan->name : "(none)",
+ ch->locked ? " LOCKED" : "");
spin_unlock_irqrestore(&ch->lock, flags);
}
@@ -1918,7 +1927,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
}
/* Initialize physical channels */
- pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
+ pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
GFP_KERNEL);
if (!pl08x->phy_chans) {
dev_err(&adev->dev, "%s failed to allocate "
@@ -1933,8 +1942,23 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ch->id = i;
ch->base = pl08x->base + PL080_Cx_BASE(i);
spin_lock_init(&ch->lock);
- ch->serving = NULL;
ch->signal = -1;
+
+ /*
+ * Nomadik variants can have channels that are locked
+ * down for the secure world only. Lock up these channels
+ * by perpetually serving a dummy virtual channel.
+ */
+ if (vd->nomadik) {
+ u32 val;
+
+ val = readl(ch->base + PL080_CH_CONFIG);
+ if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
+ dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
+ ch->locked = true;
+ }
+ }
+
dev_dbg(&adev->dev, "physical channel %d is %s\n",
i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
}
@@ -2017,6 +2041,12 @@ static struct vendor_data vendor_pl080 = {
.dualmaster = true,
};
+static struct vendor_data vendor_nomadik = {
+ .channels = 8,
+ .dualmaster = true,
+ .nomadik = true,
+};
+
static struct vendor_data vendor_pl081 = {
.channels = 2,
.dualmaster = false,
@@ -2037,9 +2067,9 @@ static struct amba_id pl08x_ids[] = {
},
/* Nomadik 8815 PL080 variant */
{
- .id = 0x00280880,
+ .id = 0x00280080,
.mask = 0x00ffffff,
- .data = &vendor_pl080,
+ .data = &vendor_nomadik,
},
{ 0, 0 },
};
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index bf0d7e4e345..7292aa87b2d 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -39,7 +39,6 @@
*/
#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
-#define ATC_DEFAULT_CTRLA (0)
#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
|ATC_DIF(AT_DMA_MEM_IF))
@@ -574,7 +573,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
return NULL;
}
- ctrla = ATC_DEFAULT_CTRLA;
ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
| ATC_SRC_ADDR_MODE_INCR
| ATC_DST_ADDR_MODE_INCR
@@ -585,13 +583,13 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
* of the most common optimization.
*/
if (!((src | dest | len) & 3)) {
- ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
+ ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
src_width = dst_width = 2;
} else if (!((src | dest | len) & 1)) {
- ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
+ ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
src_width = dst_width = 1;
} else {
- ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
+ ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
src_width = dst_width = 0;
}
@@ -668,7 +666,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
- ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
+ ctrla = ATC_SCSIZE(sconfig->src_maxburst)
+ | ATC_DCSIZE(sconfig->dst_maxburst);
ctrlb = ATC_IEN;
switch (direction) {
@@ -796,12 +795,12 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
enum dma_transfer_direction direction)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
- struct at_dma_slave *atslave = chan->private;
struct dma_slave_config *sconfig = &atchan->dma_sconfig;
u32 ctrla;
/* prepare common CRTLA value */
- ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
+ ctrla = ATC_SCSIZE(sconfig->src_maxburst)
+ | ATC_DCSIZE(sconfig->dst_maxburst)
| ATC_DST_WIDTH(reg_width)
| ATC_SRC_WIDTH(reg_width)
| period_len >> reg_width;
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 897a8bcaec9..8a6c8e8b294 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -87,7 +87,26 @@
/* Bitfields in CTRLA */
#define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */
#define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */
-/* Chunck Tranfer size definitions are in at_hdmac.h */
+#define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */
+#define ATC_SCSIZE(x) (ATC_SCSIZE_MASK & ((x) << 16))
+#define ATC_SCSIZE_1 (0x0 << 16)
+#define ATC_SCSIZE_4 (0x1 << 16)
+#define ATC_SCSIZE_8 (0x2 << 16)
+#define ATC_SCSIZE_16 (0x3 << 16)
+#define ATC_SCSIZE_32 (0x4 << 16)
+#define ATC_SCSIZE_64 (0x5 << 16)
+#define ATC_SCSIZE_128 (0x6 << 16)
+#define ATC_SCSIZE_256 (0x7 << 16)
+#define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */
+#define ATC_DCSIZE(x) (ATC_DCSIZE_MASK & ((x) << 20))
+#define ATC_DCSIZE_1 (0x0 << 20)
+#define ATC_DCSIZE_4 (0x1 << 20)
+#define ATC_DCSIZE_8 (0x2 << 20)
+#define ATC_DCSIZE_16 (0x3 << 20)
+#define ATC_DCSIZE_32 (0x4 << 20)
+#define ATC_DCSIZE_64 (0x5 << 20)
+#define ATC_DCSIZE_128 (0x6 << 20)
+#define ATC_DCSIZE_256 (0x7 << 20)
#define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */
#define ATC_SRC_WIDTH(x) ((x) << 24)
#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 750925f9638..e67b4e06a91 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1033,7 +1033,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!sgl)
goto out;
- if (sgl->length == 0)
+ if (sg_dma_len(sgl) == 0)
goto out;
spin_lock_irqsave(&cohc->lock, flg);
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 6c0e2d4c668..780e0429b38 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -270,10 +270,10 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
if (dir == DMA_MEM_TO_DEV)
/* increment source address */
- src = sg_phys(sg);
+ src = sg_dma_address(sg);
else
/* increment destination address */
- dst = sg_phys(sg);
+ dst = sg_dma_address(sg);
bytes_to_transfer = sg_dma_len(sg);
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 7439079f5ee..e23dc82d43a 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -742,7 +743,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dw_desc *desc;
u32 len, dlen, mem;
- mem = sg_phys(sg);
+ mem = sg_dma_address(sg);
len = sg_dma_len(sg);
if (!((mem | len) & 7))
@@ -809,7 +810,7 @@ slave_sg_todev_fill_desc:
struct dw_desc *desc;
u32 len, dlen, mem;
- mem = sg_phys(sg);
+ mem = sg_dma_address(sg);
len = sg_dma_len(sg);
if (!((mem | len) & 7))
@@ -1429,7 +1430,7 @@ static int __init dw_probe(struct platform_device *pdev)
err = PTR_ERR(dw->clk);
goto err_clk;
}
- clk_enable(dw->clk);
+ clk_prepare_enable(dw->clk);
/* force dma off, just in case */
dw_dma_off(dw);
@@ -1510,7 +1511,7 @@ static int __init dw_probe(struct platform_device *pdev)
return 0;
err_irq:
- clk_disable(dw->clk);
+ clk_disable_unprepare(dw->clk);
clk_put(dw->clk);
err_clk:
iounmap(dw->regs);
@@ -1540,7 +1541,7 @@ static int __exit dw_remove(struct platform_device *pdev)
channel_clear_bit(dw, CH_EN, dwc->mask);
}
- clk_disable(dw->clk);
+ clk_disable_unprepare(dw->clk);
clk_put(dw->clk);
iounmap(dw->regs);
@@ -1559,7 +1560,7 @@ static void dw_shutdown(struct platform_device *pdev)
struct dw_dma *dw = platform_get_drvdata(pdev);
dw_dma_off(platform_get_drvdata(pdev));
- clk_disable(dw->clk);
+ clk_disable_unprepare(dw->clk);
}
static int dw_suspend_noirq(struct device *dev)
@@ -1568,7 +1569,7 @@ static int dw_suspend_noirq(struct device *dev)
struct dw_dma *dw = platform_get_drvdata(pdev);
dw_dma_off(platform_get_drvdata(pdev));
- clk_disable(dw->clk);
+ clk_disable_unprepare(dw->clk);
return 0;
}
@@ -1578,7 +1579,7 @@ static int dw_resume_noirq(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct dw_dma *dw = platform_get_drvdata(pdev);
- clk_enable(dw->clk);
+ clk_prepare_enable(dw->clk);
dma_writel(dw, CFG, DW_CFG_DMA_EN);
return 0;
}
@@ -1592,12 +1593,21 @@ static const struct dev_pm_ops dw_dev_pm_ops = {
.poweroff_noirq = dw_suspend_noirq,
};
+#ifdef CONFIG_OF
+static const struct of_device_id dw_dma_id_table[] = {
+ { .compatible = "snps,dma-spear1340" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dw_dma_id_table);
+#endif
+
static struct platform_driver dw_driver = {
.remove = __exit_p(dw_remove),
.shutdown = dw_shutdown,
.driver = {
.name = "dw_dmac",
.pm = &dw_dev_pm_ops,
+ .of_match_table = of_match_ptr(dw_dma_id_table),
},
};
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index f6e9b572b99..c64917ec313 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -71,6 +71,7 @@
#define M2M_CONTROL_TM_SHIFT 13
#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_NFBINT BIT(21)
#define M2M_CONTROL_RSS_SHIFT 22
#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
@@ -79,7 +80,22 @@
#define M2M_CONTROL_PWSC_SHIFT 25
#define M2M_INTERRUPT 0x0004
-#define M2M_INTERRUPT_DONEINT BIT(1)
+#define M2M_INTERRUPT_MASK 6
+
+#define M2M_STATUS 0x000c
+#define M2M_STATUS_CTL_SHIFT 1
+#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
+#define M2M_STATUS_BUF_SHIFT 4
+#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
+#define M2M_STATUS_DONE BIT(6)
#define M2M_BCR0 0x0010
#define M2M_BCR1 0x0014
@@ -426,15 +442,6 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
/*
* M2M DMA implementation
- *
- * For the M2M transfers we don't use NFB at all. This is because it simply
- * doesn't work well with memcpy transfers. When you submit both buffers it is
- * extremely unlikely that you get an NFB interrupt, but it instead reports
- * DONE interrupt and both buffers are already transferred which means that we
- * weren't able to update the next buffer.
- *
- * So for now we "simulate" NFB by just submitting buffer after buffer
- * without double buffering.
*/
static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
@@ -543,6 +550,11 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
m2m_fill_desc(edmac);
control |= M2M_CONTROL_DONEINT;
+ if (ep93xx_dma_advance_active(edmac)) {
+ m2m_fill_desc(edmac);
+ control |= M2M_CONTROL_NFBINT;
+ }
+
/*
* Now we can finally enable the channel. For M2M channel this must be
* done _after_ the BCRx registers are programmed.
@@ -560,32 +572,89 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
}
}
+/*
+ * According to EP93xx User's Guide, we should receive DONE interrupt when all
+ * M2M DMA controller transactions complete normally. This is not always the
+ * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
+ * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
+ * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
+ * In effect, disabling the channel when only DONE bit is set could stop
+ * currently running DMA transfer. To avoid this, we use Buffer FSM and
+ * Control FSM to check current state of DMA channel.
+ */
static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
{
+ u32 status = readl(edmac->regs + M2M_STATUS);
+ u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
+ u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
+ bool done = status & M2M_STATUS_DONE;
+ bool last_done;
u32 control;
+ struct ep93xx_dma_desc *desc;
- if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
+ /* Accept only DONE and NFB interrupts */
+ if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
return INTERRUPT_UNKNOWN;
- /* Clear the DONE bit */
- writel(0, edmac->regs + M2M_INTERRUPT);
+ if (done) {
+ /* Clear the DONE bit */
+ writel(0, edmac->regs + M2M_INTERRUPT);
+ }
- /* Disable interrupts and the channel */
- control = readl(edmac->regs + M2M_CONTROL);
- control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
- writel(control, edmac->regs + M2M_CONTROL);
+ /*
+ * Check whether we are done with descriptors or not. This, together
+ * with DMA channel state, determines action to take in interrupt.
+ */
+ desc = ep93xx_dma_get_active(edmac);
+ last_done = !desc || desc->txd.cookie;
/*
- * Since we only get DONE interrupt we have to find out ourselves
- * whether there still is something to process. So we try to advance
- * the chain an see whether it succeeds.
+ * Use M2M DMA Buffer FSM and Control FSM to check current state of
+ * DMA channel. Using DONE and NFB bits from channel status register
+ * or bits from channel interrupt register is not reliable.
*/
- if (ep93xx_dma_advance_active(edmac)) {
- edmac->edma->hw_submit(edmac);
- return INTERRUPT_NEXT_BUFFER;
+ if (!last_done &&
+ (buf_fsm == M2M_STATUS_BUF_NO ||
+ buf_fsm == M2M_STATUS_BUF_ON)) {
+ /*
+ * Two buffers are ready for update when Buffer FSM is in
+ * DMA_NO_BUF state. Only one buffer can be prepared without
+ * disabling the channel or polling the DONE bit.
+ * To simplify things, always prepare only one buffer.
+ */
+ if (ep93xx_dma_advance_active(edmac)) {
+ m2m_fill_desc(edmac);
+ if (done && !edmac->chan.private) {
+ /* Software trigger for memcpy channel */
+ control = readl(edmac->regs + M2M_CONTROL);
+ control |= M2M_CONTROL_START;
+ writel(control, edmac->regs + M2M_CONTROL);
+ }
+ return INTERRUPT_NEXT_BUFFER;
+ } else {
+ last_done = true;
+ }
+ }
+
+ /*
+ * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
+ * and Control FSM is in DMA_STALL state.
+ */
+ if (last_done &&
+ buf_fsm == M2M_STATUS_BUF_NO &&
+ ctl_fsm == M2M_STATUS_CTL_STALL) {
+ /* Disable interrupts and the channel */
+ control = readl(edmac->regs + M2M_CONTROL);
+ control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
+ | M2M_CONTROL_ENABLE);
+ writel(control, edmac->regs + M2M_CONTROL);
+ return INTERRUPT_DONE;
}
- return INTERRUPT_DONE;
+ /*
+ * Nothing to do this time.
+ */
+ return INTERRUPT_NEXT_BUFFER;
}
/*
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index bb787d8e152..fcfeb3cd8d3 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -227,7 +227,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
struct scatterlist *sg = d->sg;
unsigned long now;
- now = min(d->len, sg->length);
+ now = min(d->len, sg_dma_len(sg));
if (d->len != IMX_DMA_LENGTH_LOOP)
d->len -= now;
@@ -763,16 +763,16 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
for_each_sg(sgl, sg, sg_len, i) {
- dma_length += sg->length;
+ dma_length += sg_dma_len(sg);
}
switch (imxdmac->word_size) {
case DMA_SLAVE_BUSWIDTH_4_BYTES:
- if (sgl->length & 3 || sgl->dma_address & 3)
+ if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
return NULL;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
- if (sgl->length & 1 || sgl->dma_address & 1)
+ if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
return NULL;
break;
case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -831,13 +831,13 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
imxdmac->sg_list[i].page_link = 0;
imxdmac->sg_list[i].offset = 0;
imxdmac->sg_list[i].dma_address = dma_addr;
- imxdmac->sg_list[i].length = period_len;
+ sg_dma_len(&imxdmac->sg_list[i]) = period_len;
dma_addr += period_len;
}
/* close the loop */
imxdmac->sg_list[periods].offset = 0;
- imxdmac->sg_list[periods].length = 0;
+ sg_dma_len(&imxdmac->sg_list[periods]) = 0;
imxdmac->sg_list[periods].page_link =
((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d3e38e28bb6..fb4f4990f5e 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,7 +24,7 @@
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
-#include <linux/wait.h>
+#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/spinlock.h>
@@ -271,6 +271,7 @@ struct sdma_channel {
enum dma_status status;
unsigned int chn_count;
unsigned int chn_real_count;
+ struct tasklet_struct tasklet;
};
#define IMX_DMA_SG_LOOP BIT(0)
@@ -322,8 +323,9 @@ struct sdma_engine {
struct sdma_context_data *context;
dma_addr_t context_phys;
struct dma_device dma_device;
- struct clk *clk;
- struct mutex channel_0_lock;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ spinlock_t channel_0_lock;
struct sdma_script_start_addrs *script_addrs;
};
@@ -401,19 +403,27 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
}
/*
- * sdma_run_channel - run a channel and wait till it's done
+ * sdma_run_channel0 - run a channel and wait till it's done
*/
-static int sdma_run_channel(struct sdma_channel *sdmac)
+static int sdma_run_channel0(struct sdma_engine *sdma)
{
- struct sdma_engine *sdma = sdmac->sdma;
- int channel = sdmac->channel;
int ret;
+ unsigned long timeout = 500;
- init_completion(&sdmac->done);
+ sdma_enable_channel(sdma, 0);
- sdma_enable_channel(sdma, channel);
+ while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
+ if (timeout-- <= 0)
+ break;
+ udelay(1);
+ }
- ret = wait_for_completion_timeout(&sdmac->done, HZ);
+ if (ret) {
+ /* Clear the interrupt status */
+ writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
+ } else {
+ dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
+ }
return ret ? 0 : -ETIMEDOUT;
}
@@ -425,17 +435,17 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
void *buf_virt;
dma_addr_t buf_phys;
int ret;
-
- mutex_lock(&sdma->channel_0_lock);
+ unsigned long flags;
buf_virt = dma_alloc_coherent(NULL,
size,
&buf_phys, GFP_KERNEL);
if (!buf_virt) {
- ret = -ENOMEM;
- goto err_out;
+ return -ENOMEM;
}
+ spin_lock_irqsave(&sdma->channel_0_lock, flags);
+
bd0->mode.command = C0_SETPM;
bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
bd0->mode.count = size / 2;
@@ -444,12 +454,11 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
memcpy(buf_virt, buf, size);
- ret = sdma_run_channel(&sdma->channel[0]);
+ ret = sdma_run_channel0(sdma);
- dma_free_coherent(NULL, size, buf_virt, buf_phys);
+ spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
-err_out:
- mutex_unlock(&sdma->channel_0_lock);
+ dma_free_coherent(NULL, size, buf_virt, buf_phys);
return ret;
}
@@ -534,13 +543,11 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
sdmac->desc.callback(sdmac->desc.callback_param);
}
-static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
+static void sdma_tasklet(unsigned long data)
{
- complete(&sdmac->done);
+ struct sdma_channel *sdmac = (struct sdma_channel *) data;
- /* not interested in channel 0 interrupts */
- if (sdmac->channel == 0)
- return;
+ complete(&sdmac->done);
if (sdmac->flags & IMX_DMA_SG_LOOP)
sdma_handle_channel_loop(sdmac);
@@ -554,13 +561,15 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
unsigned long stat;
stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+ /* not interested in channel 0 interrupts */
+ stat &= ~1;
writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
while (stat) {
int channel = fls(stat) - 1;
struct sdma_channel *sdmac = &sdma->channel[channel];
- mxc_sdma_handle_channel(sdmac);
+ tasklet_schedule(&sdmac->tasklet);
__clear_bit(channel, &stat);
}
@@ -659,6 +668,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
struct sdma_context_data *context = sdma->context;
struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
int ret;
+ unsigned long flags;
if (sdmac->direction == DMA_DEV_TO_MEM) {
load_address = sdmac->pc_from_device;
@@ -676,7 +686,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
- mutex_lock(&sdma->channel_0_lock);
+ spin_lock_irqsave(&sdma->channel_0_lock, flags);
memset(context, 0, sizeof(*context));
context->channel_state.pc = load_address;
@@ -695,10 +705,9 @@ static int sdma_load_context(struct sdma_channel *sdmac)
bd0->mode.count = sizeof(*context) / 4;
bd0->buffer_addr = sdma->context_phys;
bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
+ ret = sdma_run_channel0(sdma);
- ret = sdma_run_channel(&sdma->channel[0]);
-
- mutex_unlock(&sdma->channel_0_lock);
+ spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
return ret;
}
@@ -859,7 +868,8 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
sdmac->peripheral_type = data->peripheral_type;
sdmac->event_id0 = data->dma_request;
- clk_enable(sdmac->sdma->clk);
+ clk_enable(sdmac->sdma->clk_ipg);
+ clk_enable(sdmac->sdma->clk_ahb);
ret = sdma_request_channel(sdmac);
if (ret)
@@ -896,7 +906,8 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
- clk_disable(sdma->clk);
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
}
static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
@@ -938,7 +949,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
bd->buffer_addr = sg->dma_address;
- count = sg->length;
+ count = sg_dma_len(sg);
if (count > 0xffff) {
dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
@@ -1169,12 +1180,14 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
addr = (void *)header + header->script_addrs_start;
ram_code = (void *)header + header->ram_code_start;
- clk_enable(sdma->clk);
+ clk_enable(sdma->clk_ipg);
+ clk_enable(sdma->clk_ahb);
/* download the RAM image for SDMA */
sdma_load_script(sdma, ram_code,
header->ram_code_size,
addr->ram_code_start_addr);
- clk_disable(sdma->clk);
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
sdma_add_scripts(sdma, addr);
@@ -1216,7 +1229,8 @@ static int __init sdma_init(struct sdma_engine *sdma)
return -ENODEV;
}
- clk_enable(sdma->clk);
+ clk_enable(sdma->clk_ipg);
+ clk_enable(sdma->clk_ahb);
/* Be sure SDMA has not started yet */
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
@@ -1269,12 +1283,14 @@ static int __init sdma_init(struct sdma_engine *sdma)
/* Initializes channel's priorities */
sdma_set_channel_priority(&sdma->channel[0], 7);
- clk_disable(sdma->clk);
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
return 0;
err_dma_alloc:
- clk_disable(sdma->clk);
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
dev_err(sdma->dev, "initialisation failed with %d\n", ret);
return ret;
}
@@ -1297,7 +1313,7 @@ static int __init sdma_probe(struct platform_device *pdev)
if (!sdma)
return -ENOMEM;
- mutex_init(&sdma->channel_0_lock);
+ spin_lock_init(&sdma->channel_0_lock);
sdma->dev = &pdev->dev;
@@ -1313,12 +1329,21 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_request_region;
}
- sdma->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(sdma->clk)) {
- ret = PTR_ERR(sdma->clk);
+ sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(sdma->clk_ipg)) {
+ ret = PTR_ERR(sdma->clk_ipg);
goto err_clk;
}
+ sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sdma->clk_ahb)) {
+ ret = PTR_ERR(sdma->clk_ahb);
+ goto err_clk;
+ }
+
+ clk_prepare(sdma->clk_ipg);
+ clk_prepare(sdma->clk_ahb);
+
sdma->regs = ioremap(iores->start, resource_size(iores));
if (!sdma->regs) {
ret = -ENOMEM;
@@ -1359,6 +1384,8 @@ static int __init sdma_probe(struct platform_device *pdev)
dma_cookie_init(&sdmac->chan);
sdmac->channel = i;
+ tasklet_init(&sdmac->tasklet, sdma_tasklet,
+ (unsigned long) sdmac);
/*
* Add the channel to the DMAC list. Do not add channel 0 though
* because we need it internally in the SDMA driver. This also means
@@ -1426,7 +1453,6 @@ err_alloc:
err_request_irq:
iounmap(sdma->regs);
err_ioremap:
- clk_put(sdma->clk);
err_clk:
release_mem_region(iores->start, resource_size(iores));
err_request_region:
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index c900ca7aaec..222e907bfaa 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -394,11 +394,11 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
}
}
/*Populate CTL_HI values*/
- ctl_hi.ctlx.block_ts = get_block_ts(sg->length,
+ ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
desc->width,
midc->dma->block_size);
/*Populate SAR and DAR values*/
- sg_phy_addr = sg_phys(sg);
+ sg_phy_addr = sg_dma_address(sg);
if (desc->dirn == DMA_MEM_TO_DEV) {
lli_bloc_desc->sar = sg_phy_addr;
lli_bloc_desc->dar = mids->dma_slave.dst_addr;
@@ -747,7 +747,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
txd = intel_mid_dma_prep_memcpy(chan,
mids->dma_slave.dst_addr,
mids->dma_slave.src_addr,
- sgl->length,
+ sg_dma_len(sgl),
flags);
return txd;
} else {
@@ -759,7 +759,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
sg_len, direction, flags);
- txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags);
+ txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
if (NULL == txd) {
pr_err("MDMA: Prep memcpy failed\n");
return NULL;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 62e3f8ec246..5ec72044ea4 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1715,7 +1715,7 @@ static int __init ipu_probe(struct platform_device *pdev)
}
/* Make sure IPU HSP clock is running */
- clk_enable(ipu_data.ipu_clk);
+ clk_prepare_enable(ipu_data.ipu_clk);
/* Disable all interrupts */
idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
@@ -1747,7 +1747,7 @@ static int __init ipu_probe(struct platform_device *pdev)
err_idmac_init:
err_attach_irq:
ipu_irq_detach_irq(&ipu_data, pdev);
- clk_disable(ipu_data.ipu_clk);
+ clk_disable_unprepare(ipu_data.ipu_clk);
clk_put(ipu_data.ipu_clk);
err_clk_get:
iounmap(ipu_data.reg_ic);
@@ -1765,7 +1765,7 @@ static int __exit ipu_remove(struct platform_device *pdev)
ipu_idmac_exit(ipu);
ipu_irq_detach_irq(ipu, pdev);
- clk_disable(ipu->ipu_clk);
+ clk_disable_unprepare(ipu->ipu_clk);
clk_put(ipu->ipu_clk);
iounmap(ipu->reg_ic);
iounmap(ipu->reg_ipu);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fa5d55fea46..0b12e68bf79 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/memory.h>
+#include <linux/clk.h>
#include <plat/mv_xor.h>
#include "dmaengine.h"
@@ -1307,11 +1308,25 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
if (dram)
mv_xor_conf_mbus_windows(msp, dram);
+ /* Not all platforms can gate the clock, so it is not
+ * an error if the clock does not exists.
+ */
+ msp->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(msp->clk))
+ clk_prepare_enable(msp->clk);
+
return 0;
}
static int mv_xor_shared_remove(struct platform_device *pdev)
{
+ struct mv_xor_shared_private *msp = platform_get_drvdata(pdev);
+
+ if (!IS_ERR(msp->clk)) {
+ clk_disable_unprepare(msp->clk);
+ clk_put(msp->clk);
+ }
+
return 0;
}
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 654876b7ba1..a5b422f5a8a 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -55,6 +55,7 @@
struct mv_xor_shared_private {
void __iomem *xor_base;
void __iomem *xor_high_base;
+ struct clk *clk;
};
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 655d4ce6ed0..c96ab15319f 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -22,11 +22,14 @@
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/fsl/mxs-dma.h>
+#include <linux/stmp_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <asm/irq.h>
#include <mach/mxs.h>
-#include <mach/common.h>
#include "dmaengine.h"
@@ -36,12 +39,8 @@
* dma can program the controller registers of peripheral devices.
*/
-#define MXS_DMA_APBH 0
-#define MXS_DMA_APBX 1
-#define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH)
-
-#define APBH_VERSION_LATEST 3
-#define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST)
+#define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH)
+#define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA)
#define HW_APBHX_CTRL0 0x000
#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
@@ -51,13 +50,14 @@
#define HW_APBHX_CTRL2 0x020
#define HW_APBHX_CHANNEL_CTRL 0x030
#define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
-#define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800)
-#define HW_APBX_VERSION 0x800
-#define BP_APBHX_VERSION_MAJOR 24
-#define HW_APBHX_CHn_NXTCMDAR(n) \
- (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70)
-#define HW_APBHX_CHn_SEMA(n) \
- (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70)
+/*
+ * The offset of NXTCMDAR register is different per both dma type and version,
+ * while stride for each channel is all the same 0x70.
+ */
+#define HW_APBHX_CHn_NXTCMDAR(d, n) \
+ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
+#define HW_APBHX_CHn_SEMA(d, n) \
+ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
/*
* ccw bits definitions
@@ -121,9 +121,19 @@ struct mxs_dma_chan {
#define MXS_DMA_CHANNELS 16
#define MXS_DMA_CHANNELS_MASK 0xffff
+enum mxs_dma_devtype {
+ MXS_DMA_APBH,
+ MXS_DMA_APBX,
+};
+
+enum mxs_dma_id {
+ IMX23_DMA,
+ IMX28_DMA,
+};
+
struct mxs_dma_engine {
- int dev_id;
- unsigned int version;
+ enum mxs_dma_id dev_id;
+ enum mxs_dma_devtype type;
void __iomem *base;
struct clk *clk;
struct dma_device dma_device;
@@ -131,17 +141,86 @@ struct mxs_dma_engine {
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
};
+struct mxs_dma_type {
+ enum mxs_dma_id id;
+ enum mxs_dma_devtype type;
+};
+
+static struct mxs_dma_type mxs_dma_types[] = {
+ {
+ .id = IMX23_DMA,
+ .type = MXS_DMA_APBH,
+ }, {
+ .id = IMX23_DMA,
+ .type = MXS_DMA_APBX,
+ }, {
+ .id = IMX28_DMA,
+ .type = MXS_DMA_APBH,
+ }, {
+ .id = IMX28_DMA,
+ .type = MXS_DMA_APBX,
+ }
+};
+
+static struct platform_device_id mxs_dma_ids[] = {
+ {
+ .name = "imx23-dma-apbh",
+ .driver_data = (kernel_ulong_t) &mxs_dma_types[0],
+ }, {
+ .name = "imx23-dma-apbx",
+ .driver_data = (kernel_ulong_t) &mxs_dma_types[1],
+ }, {
+ .name = "imx28-dma-apbh",
+ .driver_data = (kernel_ulong_t) &mxs_dma_types[2],
+ }, {
+ .name = "imx28-dma-apbx",
+ .driver_data = (kernel_ulong_t) &mxs_dma_types[3],
+ }, {
+ /* end of list */
+ }
+};
+
+static const struct of_device_id mxs_dma_dt_ids[] = {
+ { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
+ { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
+ { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
+ { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
+
+static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct mxs_dma_chan, chan);
+}
+
+int mxs_dma_is_apbh(struct dma_chan *chan)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+
+ return dma_is_apbh(mxs_dma);
+}
+
+int mxs_dma_is_apbx(struct dma_chan *chan)
+{
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+
+ return !dma_is_apbh(mxs_dma);
+}
+
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
- if (dma_is_apbh() && apbh_is_old())
+ if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
else
writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
- mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
}
static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
@@ -151,10 +230,10 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
/* set cmd_addr up */
writel(mxs_chan->ccw_phys,
- mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
+ mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
/* write 1 to SEMA to kick off the channel */
- writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
+ writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
}
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
@@ -168,12 +247,12 @@ static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
int chan_id = mxs_chan->chan.chan_id;
/* freeze the channel */
- if (dma_is_apbh() && apbh_is_old())
+ if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
else
writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
mxs_chan->status = DMA_PAUSED;
}
@@ -184,21 +263,16 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
int chan_id = mxs_chan->chan.chan_id;
/* unfreeze the channel */
- if (dma_is_apbh() && apbh_is_old())
+ if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
else
writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR);
+ mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
mxs_chan->status = DMA_IN_PROGRESS;
}
-static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct mxs_dma_chan, chan);
-}
-
static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
return dma_cookie_assign(tx);
@@ -220,11 +294,11 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
/* completion status */
stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
stat1 &= MXS_DMA_CHANNELS_MASK;
- writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR);
+ writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
/* error status */
stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
- writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR);
+ writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
/*
* When both completion and error of termination bits set at the
@@ -415,9 +489,9 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
} else {
for_each_sg(sgl, sg, sg_len, i) {
- if (sg->length > MAX_XFER_BYTES) {
+ if (sg_dma_len(sg) > MAX_XFER_BYTES) {
dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
- sg->length, MAX_XFER_BYTES);
+ sg_dma_len(sg), MAX_XFER_BYTES);
goto err_out;
}
@@ -425,7 +499,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
ccw->bufaddr = sg->dma_address;
- ccw->xfer_bytes = sg->length;
+ ccw->xfer_bytes = sg_dma_len(sg);
ccw->bits = 0;
ccw->bits |= CCW_CHAIN;
@@ -567,27 +641,21 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
if (ret)
return ret;
- ret = mxs_reset_block(mxs_dma->base);
+ ret = stmp_reset_block(mxs_dma->base);
if (ret)
goto err_out;
- /* only major version matters */
- mxs_dma->version = readl(mxs_dma->base +
- ((mxs_dma->dev_id == MXS_DMA_APBX) ?
- HW_APBX_VERSION : HW_APBH_VERSION)) >>
- BP_APBHX_VERSION_MAJOR;
-
/* enable apbh burst */
- if (dma_is_apbh()) {
+ if (dma_is_apbh(mxs_dma)) {
writel(BM_APBH_CTRL0_APB_BURST_EN,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
writel(BM_APBH_CTRL0_APB_BURST8_EN,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
}
/* enable irq for all the channels */
writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
- mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
+ mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
err_out:
clk_disable_unprepare(mxs_dma->clk);
@@ -596,8 +664,9 @@ err_out:
static int __init mxs_dma_probe(struct platform_device *pdev)
{
- const struct platform_device_id *id_entry =
- platform_get_device_id(pdev);
+ const struct platform_device_id *id_entry;
+ const struct of_device_id *of_id;
+ const struct mxs_dma_type *dma_type;
struct mxs_dma_engine *mxs_dma;
struct resource *iores;
int ret, i;
@@ -606,7 +675,15 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
if (!mxs_dma)
return -ENOMEM;
- mxs_dma->dev_id = id_entry->driver_data;
+ of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
+ if (of_id)
+ id_entry = of_id->data;
+ else
+ id_entry = platform_get_device_id(pdev);
+
+ dma_type = (struct mxs_dma_type *)id_entry->driver_data;
+ mxs_dma->type = dma_type->type;
+ mxs_dma->dev_id = dma_type->id;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -689,23 +766,12 @@ err_request_region:
return ret;
}
-static struct platform_device_id mxs_dma_type[] = {
- {
- .name = "mxs-dma-apbh",
- .driver_data = MXS_DMA_APBH,
- }, {
- .name = "mxs-dma-apbx",
- .driver_data = MXS_DMA_APBX,
- }, {
- /* end of list */
- }
-};
-
static struct platform_driver mxs_dma_driver = {
.driver = {
.name = "mxs-dma",
+ .of_match_table = mxs_dma_dt_ids,
},
- .id_table = mxs_dma_type,
+ .id_table = mxs_dma_ids,
};
static int __init mxs_dma_module_init(void)
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 65c0495a6d4..987ab5cd261 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -621,7 +621,7 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
goto err_desc_get;
desc->regs.dev_addr = reg;
- desc->regs.mem_addr = sg_phys(sg);
+ desc->regs.mem_addr = sg_dma_address(sg);
desc->regs.size = sg_dma_len(sg);
desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fa3fb21e60b..cbcc28e79be 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -21,7 +21,6 @@
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/interrupt.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 2ed1ac3513f..000d309602b 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2362,7 +2362,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
}
sg[periods].offset = 0;
- sg[periods].length = 0;
+ sg_dma_len(&sg[periods]) = 0;
sg[periods].page_link =
((unsigned long)sg | 0x01) & ~0x02;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 7ef73c919c5..7be9b7288e9 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -715,25 +715,6 @@ static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
input_addr_to_dram_addr(mci, input_addr));
}
-/*
- * Find the minimum and maximum InputAddr values that map to the given @csrow.
- * Pass back these values in *input_addr_min and *input_addr_max.
- */
-static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
- u64 *input_addr_min, u64 *input_addr_max)
-{
- struct amd64_pvt *pvt;
- u64 base, mask;
-
- pvt = mci->pvt_info;
- BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
-
- get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
-
- *input_addr_min = base & ~mask;
- *input_addr_max = base | mask;
-}
-
/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset(u64 error_address,
u32 *page, u32 *offset)
@@ -1058,6 +1039,37 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
int channel, csrow;
u32 page, offset;
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
+
+ /*
+ * Find out which node the error address belongs to. This may be
+ * different from the node that detected the error.
+ */
+ src_mci = find_mc_by_sys_addr(mci, sys_addr);
+ if (!src_mci) {
+ amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
+ (unsigned long)sys_addr);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "failed to map error addr to a node",
+ NULL);
+ return;
+ }
+
+ /* Now map the sys_addr to a CSROW */
+ csrow = sys_addr_to_csrow(src_mci, sys_addr);
+ if (csrow < 0) {
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "failed to map error addr to a csrow",
+ NULL);
+ return;
+ }
+
/* CHIPKILL enabled */
if (pvt->nbcfg & NBCFG_CHIPKILL) {
channel = get_channel_from_ecc_syndrome(mci, syndrome);
@@ -1067,9 +1079,15 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
* 2 DIMMs is in error. So we need to ID 'both' of them
* as suspect.
*/
- amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
- "error reporting race\n", syndrome);
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
+ "possible error reporting race\n",
+ syndrome);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ csrow, -1, -1,
+ EDAC_MOD_STR,
+ "unknown syndrome - possible error reporting race",
+ NULL);
return;
}
} else {
@@ -1084,28 +1102,10 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
channel = ((sys_addr & BIT(3)) != 0);
}
- /*
- * Find out which node the error address belongs to. This may be
- * different from the node that detected the error.
- */
- src_mci = find_mc_by_sys_addr(mci, sys_addr);
- if (!src_mci) {
- amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
- (unsigned long)sys_addr);
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
- return;
- }
-
- /* Now map the sys_addr to a CSROW */
- csrow = sys_addr_to_csrow(src_mci, sys_addr);
- if (csrow < 0) {
- edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
- } else {
- error_address_to_page_and_offset(sys_addr, &page, &offset);
-
- edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
- channel, EDAC_MOD_STR);
- }
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci,
+ page, offset, syndrome,
+ csrow, channel, -1,
+ EDAC_MOD_STR, "", NULL);
}
static int ddr2_cs_size(unsigned i, bool dct_width)
@@ -1611,15 +1611,20 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
u32 page, offset;
int nid, csrow, chan = 0;
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
+
csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
if (csrow < 0) {
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "failed to map error addr to a csrow",
+ NULL);
return;
}
- error_address_to_page_and_offset(sys_addr, &page, &offset);
-
/*
* We need the syndromes for channel detection only when we're
* ganged. Otherwise @chan should already contain the channel at
@@ -1628,16 +1633,10 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
if (dct_ganging_enabled(pvt))
chan = get_channel_from_ecc_syndrome(mci, syndrome);
- if (chan >= 0)
- edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
- EDAC_MOD_STR);
- else
- /*
- * Channel unknown, report all channels on this CSROW as failed.
- */
- for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
- edac_mc_handle_ce(mci, page, offset, syndrome,
- csrow, chan, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ csrow, chan, -1,
+ EDAC_MOD_STR, "", NULL);
}
/*
@@ -1918,7 +1917,12 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
/* Ensure that the Error Address is VALID */
if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "HW has no ERROR_ADDRESS available",
+ NULL);
return;
}
@@ -1942,11 +1946,17 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "HW has no ERROR_ADDRESS available",
+ NULL);
return;
}
sys_addr = get_error_address(m);
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
/*
* Find out which node the error address belongs to. This may be
@@ -1956,7 +1966,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (!src_mci) {
amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
(unsigned long)sys_addr);
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "ERROR ADDRESS NOT mapped to a MC", NULL);
return;
}
@@ -1966,10 +1980,17 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (csrow < 0) {
amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
(unsigned long)sys_addr);
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "ERROR ADDRESS NOT mapped to CS",
+ NULL);
} else {
- error_address_to_page_and_offset(sys_addr, &page, &offset);
- edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ csrow, -1, -1,
+ EDAC_MOD_STR, "", NULL);
}
}
@@ -2171,7 +2192,7 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
- debugf0(" nr_pages= %u channel-count = %d\n",
+ debugf0(" nr_pages/channel= %u channel-count = %d\n",
nr_pages, pvt->channel_count);
return nr_pages;
@@ -2185,9 +2206,12 @@ static int init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow;
struct amd64_pvt *pvt = mci->pvt_info;
- u64 input_addr_min, input_addr_max, sys_addr, base, mask;
+ u64 base, mask;
u32 val;
- int i, empty = 1;
+ int i, j, empty = 1;
+ enum mem_type mtype;
+ enum edac_type edac_mode;
+ int nr_pages = 0;
amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
@@ -2211,41 +2235,32 @@ static int init_csrows(struct mem_ctl_info *mci)
empty = 0;
if (csrow_enabled(i, 0, pvt))
- csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
+ nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
if (csrow_enabled(i, 1, pvt))
- csrow->nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
- find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
- sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
- csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
- sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
- csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
+ nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
get_cs_base_and_mask(pvt, i, 0, &base, &mask);
- csrow->page_mask = ~mask;
/* 8 bytes of resolution */
- csrow->mtype = amd64_determine_memory_type(pvt, i);
+ mtype = amd64_determine_memory_type(pvt, i);
debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
- debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
- (unsigned long)input_addr_min,
- (unsigned long)input_addr_max);
- debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
- (unsigned long)sys_addr, csrow->page_mask);
- debugf1(" nr_pages: %u first_page: 0x%lx "
- "last_page: 0x%lx\n",
- (unsigned)csrow->nr_pages,
- csrow->first_page, csrow->last_page);
+ debugf1(" nr_pages: %u\n", nr_pages * pvt->channel_count);
/*
* determine whether CHIPKILL or JUST ECC or NO ECC is operating
*/
if (pvt->nbcfg & NBCFG_ECC_ENABLE)
- csrow->edac_mode =
- (pvt->nbcfg & NBCFG_CHIPKILL) ?
- EDAC_S4ECD4ED : EDAC_SECDED;
+ edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
+ EDAC_S4ECD4ED : EDAC_SECDED;
else
- csrow->edac_mode = EDAC_NONE;
+ edac_mode = EDAC_NONE;
+
+ for (j = 0; j < pvt->channel_count; j++) {
+ csrow->channels[j].dimm->mtype = mtype;
+ csrow->channels[j].dimm->edac_mode = edac_mode;
+ csrow->channels[j].dimm->nr_pages = nr_pages;
+ }
}
return empty;
@@ -2540,6 +2555,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
struct amd64_pvt *pvt = NULL;
struct amd64_family_type *fam_type = NULL;
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
int err = 0, ret;
u8 nid = get_node_id(F2);
@@ -2574,7 +2590,13 @@ static int amd64_init_one_instance(struct pci_dev *F2)
goto err_siblings;
ret = -ENOMEM;
- mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = pvt->csels[0].b_cnt;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = pvt->channel_count;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
if (!mci)
goto err_siblings;
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index f8fd3c807bd..9774d443fa5 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -29,7 +29,6 @@
edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
#define AMD76X_NR_CSROWS 8
-#define AMD76X_NR_CHANS 1
#define AMD76X_NR_DIMMS 4
/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
@@ -146,8 +145,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
if (handle_errors) {
row = (info->ecc_mode_status >> 4) & 0xf;
- edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
- row, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ mci->csrows[row].first_page, 0, 0,
+ row, 0, -1,
+ mci->ctl_name, "", NULL);
}
}
@@ -159,8 +160,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
if (handle_errors) {
row = info->ecc_mode_status & 0xf;
- edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
- 0, row, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ mci->csrows[row].first_page, 0, 0,
+ row, 0, -1,
+ mci->ctl_name, "", NULL);
}
}
@@ -186,11 +189,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
enum edac_type edac_mode)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
u32 mba, mba_base, mba_mask, dms;
int index;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_dword(pdev,
@@ -203,13 +208,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
csrow->first_page = mba_base >> PAGE_SHIFT;
- csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ dimm->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
csrow->page_mask = mba_mask >> PAGE_SHIFT;
- csrow->grain = csrow->nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_RDDR;
- csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
- csrow->edac_mode = edac_mode;
+ dimm->grain = dimm->nr_pages << PAGE_SHIFT;
+ dimm->mtype = MEM_RDDR;
+ dimm->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
+ dimm->edac_mode = edac_mode;
}
}
@@ -230,7 +235,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
EDAC_SECDED,
EDAC_SECDED
};
- struct mem_ctl_info *mci = NULL;
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
u32 ems;
u32 ems_mode;
struct amd76x_error_info discard;
@@ -238,11 +244,17 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
debugf0("%s()\n", __func__);
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
ems_mode = (ems >> 10) & 0x3;
- mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0);
- if (mci == NULL) {
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = AMD76X_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
+
+ if (mci == NULL)
return -ENOMEM;
- }
debugf0("%s(): mci = %p\n", __func__, mci);
mci->dev = &pdev->dev;
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index 9a6a274e692..69ee6aab5c7 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -48,8 +48,9 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
syndrome = (ar & 0x000000001fe00000ul) >> 21;
/* TODO: Decoding of the error address */
- edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
- syndrome, 0, chan, "");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ csrow->first_page + pfn, offset, syndrome,
+ 0, chan, -1, "", "", NULL);
}
static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
@@ -69,7 +70,9 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
offset = address & ~PAGE_MASK;
/* TODO: Decoding of the error address */
- edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ csrow->first_page + pfn, offset, 0,
+ 0, chan, -1, "", "", NULL);
}
static void cell_edac_check(struct mem_ctl_info *mci)
@@ -124,8 +127,11 @@ static void cell_edac_check(struct mem_ctl_info *mci)
static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow = &mci->csrows[0];
+ struct dimm_info *dimm;
struct cell_edac_priv *priv = mci->pvt_info;
struct device_node *np;
+ int j;
+ u32 nr_pages;
for (np = NULL;
(np = of_find_node_by_name(np, "memory")) != NULL;) {
@@ -140,15 +146,20 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
if (of_node_to_nid(np) != priv->node)
continue;
csrow->first_page = r.start >> PAGE_SHIFT;
- csrow->nr_pages = resource_size(&r) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->mtype = MEM_XDR;
- csrow->edac_mode = EDAC_SECDED;
+ nr_pages = resource_size(&r) >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + nr_pages - 1;
+
+ for (j = 0; j < csrow->nr_channels; j++) {
+ dimm = csrow->channels[j].dimm;
+ dimm->mtype = MEM_XDR;
+ dimm->edac_mode = EDAC_SECDED;
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ }
dev_dbg(mci->dev,
"Initialized on node %d, chanmask=0x%x,"
" first_page=0x%lx, nr_pages=0x%x\n",
priv->node, priv->chanmask,
- csrow->first_page, csrow->nr_pages);
+ csrow->first_page, nr_pages);
break;
}
}
@@ -157,9 +168,10 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
{
struct cbe_mic_tm_regs __iomem *regs;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct cell_edac_priv *priv;
u64 reg;
- int rc, chanmask;
+ int rc, chanmask, num_chans;
regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
if (regs == NULL)
@@ -184,8 +196,16 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
in_be64(&regs->mic_fir));
/* Allocate & init EDAC MC data structure */
- mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1,
- chanmask == 3 ? 2 : 1, pdev->id);
+ num_chans = chanmask == 3 ? 2 : 1;
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = num_chans;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
+ sizeof(struct cell_edac_priv));
if (mci == NULL)
return -ENOMEM;
priv = mci->pvt_info;
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index a774c0ddaf5..e22030a9de6 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -329,9 +329,10 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
{
struct cpc925_mc_pdata *pdata = mci->pvt_info;
struct csrow_info *csrow;
- int index;
+ struct dimm_info *dimm;
+ int index, j;
u32 mbmr, mbbar, bba;
- unsigned long row_size, last_nr_pages = 0;
+ unsigned long row_size, nr_pages, last_nr_pages = 0;
get_total_mem(pdata);
@@ -350,36 +351,41 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
row_size = bba * (1UL << 28); /* 256M */
csrow->first_page = last_nr_pages;
- csrow->nr_pages = row_size >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ nr_pages = row_size >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + nr_pages - 1;
last_nr_pages = csrow->last_page + 1;
- csrow->mtype = MEM_RDDR;
- csrow->edac_mode = EDAC_SECDED;
-
- switch (csrow->nr_channels) {
- case 1: /* Single channel */
- csrow->grain = 32; /* four-beat burst of 32 bytes */
- break;
- case 2: /* Dual channel */
- default:
- csrow->grain = 64; /* four-beat burst of 64 bytes */
- break;
- }
-
- switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
- case 6: /* 0110, no way to differentiate X8 VS X16 */
- case 5: /* 0101 */
- case 8: /* 1000 */
- csrow->dtype = DEV_X16;
- break;
- case 7: /* 0111 */
- case 9: /* 1001 */
- csrow->dtype = DEV_X8;
- break;
- default:
- csrow->dtype = DEV_UNKNOWN;
- break;
+ for (j = 0; j < csrow->nr_channels; j++) {
+ dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ dimm->mtype = MEM_RDDR;
+ dimm->edac_mode = EDAC_SECDED;
+
+ switch (csrow->nr_channels) {
+ case 1: /* Single channel */
+ dimm->grain = 32; /* four-beat burst of 32 bytes */
+ break;
+ case 2: /* Dual channel */
+ default:
+ dimm->grain = 64; /* four-beat burst of 64 bytes */
+ break;
+ }
+
+ switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
+ case 6: /* 0110, no way to differentiate X8 VS X16 */
+ case 5: /* 0101 */
+ case 8: /* 1000 */
+ dimm->dtype = DEV_X16;
+ break;
+ case 7: /* 0111 */
+ case 9: /* 1001 */
+ dimm->dtype = DEV_X8;
+ break;
+ default:
+ dimm->dtype = DEV_UNKNOWN;
+ break;
+ }
}
}
}
@@ -549,13 +555,18 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
if (apiexcp & CECC_EXCP_DETECTED) {
cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
channel = cpc925_mc_find_channel(mci, syndrome);
- edac_mc_handle_ce(mci, pfn, offset, syndrome,
- csrow, channel, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, offset, syndrome,
+ csrow, channel, -1,
+ mci->ctl_name, "", NULL);
}
if (apiexcp & UECC_EXCP_DETECTED) {
cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
- edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, offset, 0,
+ csrow, -1, -1,
+ mci->ctl_name, "", NULL);
}
cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
@@ -927,6 +938,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
{
static int edac_mc_idx;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
void __iomem *vbase;
struct cpc925_mc_pdata *pdata;
struct resource *r;
@@ -962,9 +974,16 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
goto err2;
}
- nr_channels = cpc925_mc_get_channels(vbase);
- mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata),
- CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx);
+ nr_channels = cpc925_mc_get_channels(vbase) + 1;
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = CPC925_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_channels;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+ sizeof(struct cpc925_mc_pdata));
if (!mci) {
cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
res = -ENOMEM;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 41223261ede..3186512c973 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -4,7 +4,11 @@
* This file may be distributed under the terms of the
* GNU General Public License.
*
- * See "enum e752x_chips" below for supported chipsets
+ * Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
+ *
+ * Datasheets:
+ * http://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
+ * ftp://download.intel.com/design/intarch/datashts/31345803.pdf
*
* Written by Tom Zimmerman
*
@@ -13,8 +17,6 @@
* Wang Zhenyu at intel.com
* Dave Jiang at mvista.com
*
- * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
- *
*/
#include <linux/module.h>
@@ -187,6 +189,25 @@ enum e752x_chips {
I3100 = 3
};
+/*
+ * Those chips Support single-rank and dual-rank memories only.
+ *
+ * On e752x chips, the odd rows are present only on dual-rank memories.
+ * Dividing the rank by two will provide the dimm#
+ *
+ * i3100 MC has a different mapping: it supports only 4 ranks.
+ *
+ * The mapping is (from 1 to n):
+ * slot single-ranked double-ranked
+ * dimm #1 -> rank #4 NA
+ * dimm #2 -> rank #3 NA
+ * dimm #3 -> rank #2 Ranks 2 and 3
+ * dimm #4 -> rank $1 Ranks 1 and 4
+ *
+ * FIXME: The current mapping for i3100 considers that it supports up to 8
+ * ranks/chanel, but datasheet says that the MC supports only 4 ranks.
+ */
+
struct e752x_pvt {
struct pci_dev *bridge_ck;
struct pci_dev *dev_d0f0;
@@ -350,8 +371,10 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
channel = !(error_one & 1);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
- sec1_syndrome, row, channel, "e752x CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset_in_page(sec1_add << 4), sec1_syndrome,
+ row, channel, -1,
+ "e752x CE", "", NULL);
}
static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
@@ -385,9 +408,12 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ue(mci, block_page,
- offset_in_page(error_2b << 4),
- row, "e752x UE from Read");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ block_page,
+ offset_in_page(error_2b << 4), 0,
+ row, -1, -1,
+ "e752x UE from Read", "", NULL);
+
}
if (error_one & 0x0404) {
error_2b = scrb_add;
@@ -401,9 +427,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ue(mci, block_page,
- offset_in_page(error_2b << 4),
- row, "e752x UE from Scruber");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ block_page,
+ offset_in_page(error_2b << 4), 0,
+ row, -1, -1,
+ "e752x UE from Scruber", "", NULL);
}
}
@@ -426,7 +454,9 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
return;
debugf3("%s()\n", __func__);
- edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "e752x UE log memory write", "", NULL);
}
static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
@@ -1044,7 +1074,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
u8 value;
- u32 dra, drc, cumul_size;
+ u32 dra, drc, cumul_size, i, nr_pages;
dra = 0;
for (index = 0; index < 4; index++) {
@@ -1053,7 +1083,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
dra |= dra_reg << (index * 8);
}
pci_read_config_dword(pdev, E752X_DRC, &drc);
- drc_chan = dual_channel_active(ddrcsr);
+ drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
drc_ddim = (drc >> 20) & 0x3;
@@ -1078,26 +1108,33 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
- csrow->mtype = MEM_RDDR; /* only one type supported */
- csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
-
- /*
- * if single channel or x8 devices then SECDED
- * if dual channel and x4 then S4ECD4ED
- */
- if (drc_ddim) {
- if (drc_chan && mem_dev) {
- csrow->edac_mode = EDAC_S4ECD4ED;
- mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
- } else {
- csrow->edac_mode = EDAC_SECDED;
- mci->edac_cap |= EDAC_FLAG_SECDED;
- }
- } else
- csrow->edac_mode = EDAC_NONE;
+
+ for (i = 0; i < csrow->nr_channels; i++) {
+ struct dimm_info *dimm = csrow->channels[i].dimm;
+
+ debugf3("Initializing rank at (%i,%i)\n", index, i);
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ dimm->mtype = MEM_RDDR; /* only one type supported */
+ dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ dimm->edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ dimm->edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ dimm->edac_mode = EDAC_NONE;
+ }
}
}
@@ -1226,6 +1263,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
u16 pci_data;
u8 stat8;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct e752x_pvt *pvt;
u16 ddrcsr;
int drc_chan; /* Number of channels 0=1chan,1=2chan */
@@ -1252,11 +1290,15 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
/* Dual channel = 1, Single channel = 0 */
drc_chan = dual_channel_active(ddrcsr);
- mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
-
- if (mci == NULL) {
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = E752X_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = drc_chan + 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
+ if (mci == NULL)
return -ENOMEM;
- }
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 68dea87b72e..9a9c1a54679 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -10,6 +10,9 @@
* Based on work by Dan Hollis <goemon at anime dot net> and others.
* http://www.anime.net/~goemon/linux-ecc/
*
+ * Datasheet:
+ * http://www.intel.com/content/www/us/en/chipsets/e7501-chipset-memory-controller-hub-datasheet.html
+ *
* Contributors:
* Eric Biederman (Linux Networx)
* Tom Zimmerman (Linux Networx)
@@ -71,7 +74,7 @@
#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
#define E7XXX_NR_CSROWS 8 /* number of csrows */
-#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */
+#define E7XXX_NR_DIMMS 8 /* 2 channels, 4 dimms/channel */
/* E7XXX register addresses - device 0 function 0 */
#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
@@ -216,13 +219,15 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
row = edac_mc_find_csrow_by_page(mci, page);
/* convert syndrome to channel */
channel = e7xxx_find_channel(syndrome);
- edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome,
+ row, channel, -1, "e7xxx CE", "", NULL);
}
static void process_ce_no_info(struct mem_ctl_info *mci)
{
debugf3("%s()\n", __func__);
- edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
+ "e7xxx CE log register overflow", "", NULL);
}
static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
@@ -236,13 +241,17 @@ static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
/* FIXME - should use PAGE_SHIFT */
block_page = error_2b >> 6; /* convert to 4k address */
row = edac_mc_find_csrow_by_page(mci, block_page);
- edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0,
+ row, -1, -1, "e7xxx UE", "", NULL);
}
static void process_ue_no_info(struct mem_ctl_info *mci)
{
debugf3("%s()\n", __func__);
- edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
+ "e7xxx UE log register overflow", "", NULL);
}
static void e7xxx_get_error_info(struct mem_ctl_info *mci,
@@ -347,11 +356,12 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
int dev_idx, u32 drc)
{
unsigned long last_cumul_size;
- int index;
+ int index, j;
u8 value;
- u32 dra, cumul_size;
+ u32 dra, cumul_size, nr_pages;
int drc_chan, drc_drbg, drc_ddim, mem_dev;
struct csrow_info *csrow;
+ struct dimm_info *dimm;
pci_read_config_dword(pdev, E7XXX_DRA, &dra);
drc_chan = dual_channel_active(drc, dev_idx);
@@ -379,26 +389,32 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
- csrow->mtype = MEM_RDDR; /* only one type supported */
- csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
-
- /*
- * if single channel or x8 devices then SECDED
- * if dual channel and x4 then S4ECD4ED
- */
- if (drc_ddim) {
- if (drc_chan && mem_dev) {
- csrow->edac_mode = EDAC_S4ECD4ED;
- mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
- } else {
- csrow->edac_mode = EDAC_SECDED;
- mci->edac_cap |= EDAC_FLAG_SECDED;
- }
- } else
- csrow->edac_mode = EDAC_NONE;
+
+ for (j = 0; j < drc_chan + 1; j++) {
+ dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / (drc_chan + 1);
+ dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ dimm->mtype = MEM_RDDR; /* only one type supported */
+ dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ dimm->edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ dimm->edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ dimm->edac_mode = EDAC_NONE;
+ }
}
}
@@ -406,6 +422,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
{
u16 pci_data;
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
struct e7xxx_pvt *pvt = NULL;
u32 drc;
int drc_chan;
@@ -416,8 +433,21 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
pci_read_config_dword(pdev, E7XXX_DRC, &drc);
drc_chan = dual_channel_active(drc, dev_idx);
- mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0);
-
+ /*
+ * According with the datasheet, this device has a maximum of
+ * 4 DIMMS per channel, either single-rank or dual-rank. So, the
+ * total amount of dimms is 8 (E7XXX_NR_DIMMS).
+ * That means that the DIMM is mapped as CSROWs, and the channel
+ * will map the rank. So, an error to either channel should be
+ * attributed to the same dimm.
+ */
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = E7XXX_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = drc_chan + 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 5b739411d62..117490d4f83 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -447,8 +447,10 @@ static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
#endif /* CONFIG_PCI */
-extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
- unsigned nr_chans, int edac_index);
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+ unsigned n_layers,
+ struct edac_mc_layer *layers,
+ unsigned sz_pvt);
extern int edac_mc_add_mc(struct mem_ctl_info *mci);
extern void edac_mc_free(struct mem_ctl_info *mci);
extern struct mem_ctl_info *edac_mc_find(int idx);
@@ -456,35 +458,17 @@ extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
unsigned long page);
-
-/*
- * The no info errors are used when error overflows are reported.
- * There are a limited number of error logging registers that can
- * be exausted. When all registers are exhausted and an additional
- * error occurs then an error overflow register records that an
- * error occurred and the type of error, but doesn't have any
- * further information. The ce/ue versions make for cleaner
- * reporting logic and function interface - reduces conditional
- * statement clutter and extra function arguments.
- */
-extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page,
- unsigned long syndrome, int row, int channel,
- const char *msg);
-extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
- const char *msg);
-extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, int row,
- const char *msg);
-extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
- const char *msg);
-extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow,
- unsigned int channel0, unsigned int channel1,
- char *msg);
-extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow,
- unsigned int channel, char *msg);
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ const unsigned long page_frame_number,
+ const unsigned long offset_in_page,
+ const unsigned long syndrome,
+ const int layer0,
+ const int layer1,
+ const int layer2,
+ const char *msg,
+ const char *other_detail,
+ const void *mcelog);
/*
* edac_device APIs
@@ -496,6 +480,7 @@ extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg);
extern int edac_device_alloc_index(void);
+extern const char *edac_layer_name[];
/*
* edac_pci APIs
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 45b8f4bdd77..ee3f1f810c1 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -79,7 +79,7 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
unsigned total_size;
unsigned count;
unsigned instance, block, attr;
- void *pvt;
+ void *pvt, *p;
int err;
debugf4("%s() instances=%d blocks=%d\n",
@@ -92,35 +92,30 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
* to be at least as stringent as what the compiler would
* provide if we could simply hardcode everything into a single struct.
*/
- dev_ctl = (struct edac_device_ctl_info *)NULL;
+ p = NULL;
+ dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1);
/* Calc the 'end' offset past end of ONE ctl_info structure
* which will become the start of the 'instance' array
*/
- dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
+ dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances);
/* Calc the 'end' offset past the instance array within the ctl_info
* which will become the start of the block array
*/
- dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
+ count = nr_instances * nr_blocks;
+ dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count);
/* Calc the 'end' offset past the dev_blk array
* which will become the start of the attrib array, if any.
*/
- count = nr_instances * nr_blocks;
- dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
-
- /* Check for case of when an attribute array is specified */
- if (nr_attrib > 0) {
- /* calc how many nr_attrib we need */
+ /* calc how many nr_attrib we need */
+ if (nr_attrib > 0)
count *= nr_attrib;
+ dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count);
- /* Calc the 'end' offset past the attributes array */
- pvt = edac_align_ptr(&dev_attrib[count], sz_private);
- } else {
- /* no attribute array specified */
- pvt = edac_align_ptr(dev_attrib, sz_private);
- }
+ /* Calc the 'end' offset past the attributes array */
+ pvt = edac_align_ptr(&p, sz_private, 1);
/* 'pvt' now points to where the private data area is.
* At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index feef7733fae..10f375032e9 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -43,9 +43,26 @@ static void edac_mc_dump_channel(struct rank_info *chan)
{
debugf4("\tchannel = %p\n", chan);
debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
- debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
- debugf4("\tchannel->label = '%s'\n", chan->label);
debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
+ debugf4("\tchannel->dimm = %p\n", chan->dimm);
+}
+
+static void edac_mc_dump_dimm(struct dimm_info *dimm)
+{
+ int i;
+
+ debugf4("\tdimm = %p\n", dimm);
+ debugf4("\tdimm->label = '%s'\n", dimm->label);
+ debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
+ debugf4("\tdimm location ");
+ for (i = 0; i < dimm->mci->n_layers; i++) {
+ printk(KERN_CONT "%d", dimm->location[i]);
+ if (i < dimm->mci->n_layers - 1)
+ printk(KERN_CONT ".");
+ }
+ printk(KERN_CONT "\n");
+ debugf4("\tdimm->grain = %d\n", dimm->grain);
+ debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
}
static void edac_mc_dump_csrow(struct csrow_info *csrow)
@@ -55,7 +72,6 @@ static void edac_mc_dump_csrow(struct csrow_info *csrow)
debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
- debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
debugf4("\tcsrow->channels = %p\n", csrow->channels);
debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
@@ -70,6 +86,8 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci)
debugf4("\tmci->edac_check = %p\n", mci->edac_check);
debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
mci->nr_csrows, mci->csrows);
+ debugf3("\tmci->nr_dimms = %d, dimms = %p\n",
+ mci->tot_dimms, mci->dimms);
debugf3("\tdev = %p\n", mci->dev);
debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
@@ -101,18 +119,37 @@ const char *edac_mem_types[] = {
};
EXPORT_SYMBOL_GPL(edac_mem_types);
-/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
- * Adjust 'ptr' so that its alignment is at least as stringent as what the
- * compiler would provide for X and return the aligned result.
+/**
+ * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
+ * @p: pointer to a pointer with the memory offset to be used. At
+ * return, this will be incremented to point to the next offset
+ * @size: Size of the data structure to be reserved
+ * @n_elems: Number of elements that should be reserved
*
* If 'size' is a constant, the compiler will optimize this whole function
- * down to either a no-op or the addition of a constant to the value of 'ptr'.
+ * down to either a no-op or the addition of a constant to the value of '*p'.
+ *
+ * The 'p' pointer is absolutely needed to keep the proper advancing
+ * further in memory to the proper offsets when allocating the struct along
+ * with its embedded structs, as edac_device_alloc_ctl_info() does it
+ * above, for example.
+ *
+ * At return, the pointer 'p' will be incremented to be used on a next call
+ * to this function.
*/
-void *edac_align_ptr(void *ptr, unsigned size)
+void *edac_align_ptr(void **p, unsigned size, int n_elems)
{
unsigned align, r;
+ void *ptr = *p;
+
+ *p += size * n_elems;
- /* Here we assume that the alignment of a "long long" is the most
+ /*
+ * 'p' can possibly be an unaligned item X such that sizeof(X) is
+ * 'size'. Adjust 'p' so that its alignment is at least as
+ * stringent as what the compiler would provide for X and return
+ * the aligned result.
+ * Here we assume that the alignment of a "long long" is the most
* stringent alignment that the compiler will ever provide by default.
* As far as I know, this is a reasonable assumption.
*/
@@ -132,14 +169,18 @@ void *edac_align_ptr(void *ptr, unsigned size)
if (r == 0)
return (char *)ptr;
+ *p += align - r;
+
return (void *)(((unsigned long)ptr) + align - r);
}
/**
- * edac_mc_alloc: Allocate a struct mem_ctl_info structure
- * @size_pvt: size of private storage needed
- * @nr_csrows: Number of CWROWS needed for this MC
- * @nr_chans: Number of channels for the MC
+ * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
+ * @mc_num: Memory controller number
+ * @n_layers: Number of MC hierarchy layers
+ * layers: Describes each layer as seen by the Memory Controller
+ * @size_pvt: size of private storage needed
+ *
*
* Everything is kmalloc'ed as one big chunk - more efficient.
* Only can be used if all structures have the same lifetime - otherwise
@@ -147,32 +188,77 @@ void *edac_align_ptr(void *ptr, unsigned size)
*
* Use edac_mc_free() to free mc structures allocated by this function.
*
+ * NOTE: drivers handle multi-rank memories in different ways: in some
+ * drivers, one multi-rank memory stick is mapped as one entry, while, in
+ * others, a single multi-rank memory stick would be mapped into several
+ * entries. Currently, this function will allocate multiple struct dimm_info
+ * on such scenarios, as grouping the multiple ranks require drivers change.
+ *
* Returns:
- * NULL allocation failed
- * struct mem_ctl_info pointer
+ * On failure: NULL
+ * On success: struct mem_ctl_info pointer
*/
-struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
- unsigned nr_chans, int edac_index)
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+ unsigned n_layers,
+ struct edac_mc_layer *layers,
+ unsigned sz_pvt)
{
struct mem_ctl_info *mci;
- struct csrow_info *csi, *csrow;
+ struct edac_mc_layer *layer;
+ struct csrow_info *csi, *csr;
struct rank_info *chi, *chp, *chan;
- void *pvt;
- unsigned size;
- int row, chn;
- int err;
+ struct dimm_info *dimm;
+ u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
+ unsigned pos[EDAC_MAX_LAYERS];
+ unsigned size, tot_dimms = 1, count = 1;
+ unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
+ void *pvt, *p, *ptr = NULL;
+ int i, j, err, row, chn, n, len;
+ bool per_rank = false;
+
+ BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
+ /*
+ * Calculate the total amount of dimms and csrows/cschannels while
+ * in the old API emulation mode
+ */
+ for (i = 0; i < n_layers; i++) {
+ tot_dimms *= layers[i].size;
+ if (layers[i].is_virt_csrow)
+ tot_csrows *= layers[i].size;
+ else
+ tot_channels *= layers[i].size;
+
+ if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
+ per_rank = true;
+ }
/* Figure out the offsets of the various items from the start of an mc
* structure. We want the alignment of each item to be at least as
* stringent as what the compiler would provide if we could simply
* hardcode everything into a single struct.
*/
- mci = (struct mem_ctl_info *)0;
- csi = edac_align_ptr(&mci[1], sizeof(*csi));
- chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
- pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
+ mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
+ layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
+ csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows);
+ chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels);
+ dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms);
+ for (i = 0; i < n_layers; i++) {
+ count *= layers[i].size;
+ debugf4("%s: errcount layer %d size %d\n", __func__, i, count);
+ ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
+ ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
+ tot_errcount += 2 * count;
+ }
+
+ debugf4("%s: allocating %d error counters\n", __func__, tot_errcount);
+ pvt = edac_align_ptr(&ptr, sz_pvt, 1);
size = ((unsigned long)pvt) + sz_pvt;
+ debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
+ __func__, size,
+ tot_dimms,
+ per_rank ? "ranks" : "dimms",
+ tot_csrows * tot_channels);
mci = kzalloc(size, GFP_KERNEL);
if (mci == NULL)
return NULL;
@@ -180,28 +266,103 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
/* Adjust pointers so they point within the memory we just allocated
* rather than an imaginary chunk of memory located at address 0.
*/
+ layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
+ dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm));
+ for (i = 0; i < n_layers; i++) {
+ mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
+ mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
+ }
pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
/* setup index and various internal pointers */
- mci->mc_idx = edac_index;
+ mci->mc_idx = mc_num;
mci->csrows = csi;
+ mci->dimms = dimm;
+ mci->tot_dimms = tot_dimms;
mci->pvt_info = pvt;
- mci->nr_csrows = nr_csrows;
-
- for (row = 0; row < nr_csrows; row++) {
- csrow = &csi[row];
- csrow->csrow_idx = row;
- csrow->mci = mci;
- csrow->nr_channels = nr_chans;
- chp = &chi[row * nr_chans];
- csrow->channels = chp;
+ mci->n_layers = n_layers;
+ mci->layers = layer;
+ memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
+ mci->nr_csrows = tot_csrows;
+ mci->num_cschannel = tot_channels;
+ mci->mem_is_per_rank = per_rank;
- for (chn = 0; chn < nr_chans; chn++) {
+ /*
+ * Fill the csrow struct
+ */
+ for (row = 0; row < tot_csrows; row++) {
+ csr = &csi[row];
+ csr->csrow_idx = row;
+ csr->mci = mci;
+ csr->nr_channels = tot_channels;
+ chp = &chi[row * tot_channels];
+ csr->channels = chp;
+
+ for (chn = 0; chn < tot_channels; chn++) {
chan = &chp[chn];
chan->chan_idx = chn;
- chan->csrow = csrow;
+ chan->csrow = csr;
+ }
+ }
+
+ /*
+ * Fill the dimm struct
+ */
+ memset(&pos, 0, sizeof(pos));
+ row = 0;
+ chn = 0;
+ debugf4("%s: initializing %d %s\n", __func__, tot_dimms,
+ per_rank ? "ranks" : "dimms");
+ for (i = 0; i < tot_dimms; i++) {
+ chan = &csi[row].channels[chn];
+ dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers,
+ pos[0], pos[1], pos[2]);
+ dimm->mci = mci;
+
+ debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__,
+ i, per_rank ? "rank" : "dimm", (dimm - mci->dimms),
+ pos[0], pos[1], pos[2], row, chn);
+
+ /*
+ * Copy DIMM location and initialize it.
+ */
+ len = sizeof(dimm->label);
+ p = dimm->label;
+ n = snprintf(p, len, "mc#%u", mc_num);
+ p += n;
+ len -= n;
+ for (j = 0; j < n_layers; j++) {
+ n = snprintf(p, len, "%s#%u",
+ edac_layer_name[layers[j].type],
+ pos[j]);
+ p += n;
+ len -= n;
+ dimm->location[j] = pos[j];
+
+ if (len <= 0)
+ break;
+ }
+
+ /* Link it to the csrows old API data */
+ chan->dimm = dimm;
+ dimm->csrow = row;
+ dimm->cschannel = chn;
+
+ /* Increment csrow location */
+ row++;
+ if (row == tot_csrows) {
+ row = 0;
+ chn++;
+ }
+
+ /* Increment dimm location */
+ for (j = n_layers - 1; j >= 0; j--) {
+ pos[j]++;
+ if (pos[j] < layers[j].size)
+ break;
+ pos[j] = 0;
}
}
@@ -490,7 +651,6 @@ EXPORT_SYMBOL(edac_mc_find);
* edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
* create sysfs entries associated with mci structure
* @mci: pointer to the mci structure to be added to the list
- * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
*
* Return:
* 0 Success
@@ -517,6 +677,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
edac_mc_dump_channel(&mci->csrows[i].
channels[j]);
}
+ for (i = 0; i < mci->tot_dimms; i++)
+ edac_mc_dump_dimm(&mci->dimms[i]);
}
#endif
mutex_lock(&mem_ctls_mutex);
@@ -636,15 +798,19 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
{
struct csrow_info *csrows = mci->csrows;
- int row, i;
+ int row, i, j, n;
debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
row = -1;
for (i = 0; i < mci->nr_csrows; i++) {
struct csrow_info *csrow = &csrows[i];
-
- if (csrow->nr_pages == 0)
+ n = 0;
+ for (j = 0; j < csrow->nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
+ n += dimm->nr_pages;
+ }
+ if (n == 0)
continue;
debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
@@ -670,249 +836,307 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
}
EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
-/* FIXME - setable log (warning/emerg) levels */
-/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
-void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, unsigned long syndrome,
- int row, int channel, const char *msg)
-{
- unsigned long remapped_page;
+const char *edac_layer_name[] = {
+ [EDAC_MC_LAYER_BRANCH] = "branch",
+ [EDAC_MC_LAYER_CHANNEL] = "channel",
+ [EDAC_MC_LAYER_SLOT] = "slot",
+ [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
+};
+EXPORT_SYMBOL_GPL(edac_layer_name);
- debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
+static void edac_inc_ce_error(struct mem_ctl_info *mci,
+ bool enable_per_layer_report,
+ const int pos[EDAC_MAX_LAYERS])
+{
+ int i, index = 0;
- /* FIXME - maybe make panic on INTERNAL ERROR an option */
- if (row >= mci->nr_csrows || row < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range "
- "(%d >= %d)\n", row, mci->nr_csrows);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
- }
+ mci->ce_mc++;
- if (channel >= mci->csrows[row].nr_channels || channel < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel out of range "
- "(%d >= %d)\n", channel,
- mci->csrows[row].nr_channels);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+ if (!enable_per_layer_report) {
+ mci->ce_noinfo_count++;
return;
}
- if (edac_mc_get_log_ce())
- /* FIXME - put in DIMM location */
- edac_mc_printk(mci, KERN_WARNING,
- "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
- "0x%lx, row %d, channel %d, label \"%s\": %s\n",
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, syndrome, row, channel,
- mci->csrows[row].channels[channel].label, msg);
-
- mci->ce_count++;
- mci->csrows[row].ce_count++;
- mci->csrows[row].channels[channel].ce_count++;
-
- if (mci->scrub_mode & SCRUB_SW_SRC) {
- /*
- * Some MC's can remap memory so that it is still available
- * at a different address when PCI devices map into memory.
- * MC's that can't do this lose the memory where PCI devices
- * are mapped. This mapping is MC dependent and so we call
- * back into the MC driver for it to map the MC page to
- * a physical (CPU) page which can then be mapped to a virtual
- * page - which can then be scrubbed.
- */
- remapped_page = mci->ctl_page_to_phys ?
- mci->ctl_page_to_phys(mci, page_frame_number) :
- page_frame_number;
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] < 0)
+ break;
+ index += pos[i];
+ mci->ce_per_layer[i][index]++;
- edac_mc_scrub_block(remapped_page, offset_in_page,
- mci->csrows[row].grain);
+ if (i < mci->n_layers - 1)
+ index *= mci->layers[i + 1].size;
}
}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
-void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
+static void edac_inc_ue_error(struct mem_ctl_info *mci,
+ bool enable_per_layer_report,
+ const int pos[EDAC_MAX_LAYERS])
{
- if (edac_mc_get_log_ce())
- edac_mc_printk(mci, KERN_WARNING,
- "CE - no information available: %s\n", msg);
+ int i, index = 0;
- mci->ce_noinfo_count++;
- mci->ce_count++;
-}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
+ mci->ue_mc++;
-void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, int row, const char *msg)
-{
- int len = EDAC_MC_LABEL_LEN * 4;
- char labels[len + 1];
- char *pos = labels;
- int chan;
- int chars;
-
- debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
-
- /* FIXME - maybe make panic on INTERNAL ERROR an option */
- if (row >= mci->nr_csrows || row < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range "
- "(%d >= %d)\n", row, mci->nr_csrows);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+ if (!enable_per_layer_report) {
+ mci->ce_noinfo_count++;
return;
}
- chars = snprintf(pos, len + 1, "%s",
- mci->csrows[row].channels[0].label);
- len -= chars;
- pos += chars;
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] < 0)
+ break;
+ index += pos[i];
+ mci->ue_per_layer[i][index]++;
- for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
- chan++) {
- chars = snprintf(pos, len + 1, ":%s",
- mci->csrows[row].channels[chan].label);
- len -= chars;
- pos += chars;
+ if (i < mci->n_layers - 1)
+ index *= mci->layers[i + 1].size;
}
+}
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_EMERG,
- "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
- "labels \"%s\": %s\n", page_frame_number,
- offset_in_page, mci->csrows[row].grain, row,
- labels, msg);
+static void edac_ce_error(struct mem_ctl_info *mci,
+ const int pos[EDAC_MAX_LAYERS],
+ const char *msg,
+ const char *location,
+ const char *label,
+ const char *detail,
+ const char *other_detail,
+ const bool enable_per_layer_report,
+ const unsigned long page_frame_number,
+ const unsigned long offset_in_page,
+ u32 grain)
+{
+ unsigned long remapped_page;
- if (edac_mc_get_panic_on_ue())
- panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
- "row %d, labels \"%s\": %s\n", mci->mc_idx,
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, row, labels, msg);
+ if (edac_mc_get_log_ce()) {
+ if (other_detail && *other_detail)
+ edac_mc_printk(mci, KERN_WARNING,
+ "CE %s on %s (%s%s - %s)\n",
+ msg, label, location,
+ detail, other_detail);
+ else
+ edac_mc_printk(mci, KERN_WARNING,
+ "CE %s on %s (%s%s)\n",
+ msg, label, location,
+ detail);
+ }
+ edac_inc_ce_error(mci, enable_per_layer_report, pos);
- mci->ue_count++;
- mci->csrows[row].ue_count++;
+ if (mci->scrub_mode & SCRUB_SW_SRC) {
+ /*
+ * Some memory controllers (called MCs below) can remap
+ * memory so that it is still available at a different
+ * address when PCI devices map into memory.
+ * MC's that can't do this, lose the memory where PCI
+ * devices are mapped. This mapping is MC-dependent
+ * and so we call back into the MC driver for it to
+ * map the MC page to a physical (CPU) page which can
+ * then be mapped to a virtual page - which can then
+ * be scrubbed.
+ */
+ remapped_page = mci->ctl_page_to_phys ?
+ mci->ctl_page_to_phys(mci, page_frame_number) :
+ page_frame_number;
+
+ edac_mc_scrub_block(remapped_page,
+ offset_in_page, grain);
+ }
}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
-void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
+static void edac_ue_error(struct mem_ctl_info *mci,
+ const int pos[EDAC_MAX_LAYERS],
+ const char *msg,
+ const char *location,
+ const char *label,
+ const char *detail,
+ const char *other_detail,
+ const bool enable_per_layer_report)
{
- if (edac_mc_get_panic_on_ue())
- panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
+ if (edac_mc_get_log_ue()) {
+ if (other_detail && *other_detail)
+ edac_mc_printk(mci, KERN_WARNING,
+ "UE %s on %s (%s%s - %s)\n",
+ msg, label, location, detail,
+ other_detail);
+ else
+ edac_mc_printk(mci, KERN_WARNING,
+ "UE %s on %s (%s%s)\n",
+ msg, label, location, detail);
+ }
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_WARNING,
- "UE - no information available: %s\n", msg);
- mci->ue_noinfo_count++;
- mci->ue_count++;
+ if (edac_mc_get_panic_on_ue()) {
+ if (other_detail && *other_detail)
+ panic("UE %s on %s (%s%s - %s)\n",
+ msg, label, location, detail, other_detail);
+ else
+ panic("UE %s on %s (%s%s)\n",
+ msg, label, location, detail);
+ }
+
+ edac_inc_ue_error(mci, enable_per_layer_report, pos);
}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
-/*************************************************************
- * On Fully Buffered DIMM modules, this help function is
- * called to process UE events
- */
-void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
- unsigned int csrow,
- unsigned int channela,
- unsigned int channelb, char *msg)
+#define OTHER_LABEL " or "
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ const unsigned long page_frame_number,
+ const unsigned long offset_in_page,
+ const unsigned long syndrome,
+ const int layer0,
+ const int layer1,
+ const int layer2,
+ const char *msg,
+ const char *other_detail,
+ const void *mcelog)
{
- int len = EDAC_MC_LABEL_LEN * 4;
- char labels[len + 1];
- char *pos = labels;
- int chars;
+ /* FIXME: too much for stack: move it to some pre-alocated area */
+ char detail[80], location[80];
+ char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
+ char *p;
+ int row = -1, chan = -1;
+ int pos[EDAC_MAX_LAYERS] = { layer0, layer1, layer2 };
+ int i;
+ u32 grain;
+ bool enable_per_layer_report = false;
- if (csrow >= mci->nr_csrows) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range (%d >= %d)\n",
- csrow, mci->nr_csrows);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
- }
+ debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
- if (channela >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel-a out of range "
- "(%d >= %d)\n",
- channela, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
+ /*
+ * Check if the event report is consistent and if the memory
+ * location is known. If it is known, enable_per_layer_report will be
+ * true, the DIMM(s) label info will be filled and the per-layer
+ * error counters will be incremented.
+ */
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] >= (int)mci->layers[i].size) {
+ if (type == HW_EVENT_ERR_CORRECTED)
+ p = "CE";
+ else
+ p = "UE";
+
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
+ edac_layer_name[mci->layers[i].type],
+ pos[i], mci->layers[i].size);
+ /*
+ * Instead of just returning it, let's use what's
+ * known about the error. The increment routines and
+ * the DIMM filter logic will do the right thing by
+ * pointing the likely damaged DIMMs.
+ */
+ pos[i] = -1;
+ }
+ if (pos[i] >= 0)
+ enable_per_layer_report = true;
}
- if (channelb >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel-b out of range "
- "(%d >= %d)\n",
- channelb, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
- }
+ /*
+ * Get the dimm label/grain that applies to the match criteria.
+ * As the error algorithm may not be able to point to just one memory
+ * stick, the logic here will get all possible labels that could
+ * pottentially be affected by the error.
+ * On FB-DIMM memory controllers, for uncorrected errors, it is common
+ * to have only the MC channel and the MC dimm (also called "branch")
+ * but the channel is not known, as the memory is arranged in pairs,
+ * where each memory belongs to a separate channel within the same
+ * branch.
+ */
+ grain = 0;
+ p = label;
+ *p = '\0';
+ for (i = 0; i < mci->tot_dimms; i++) {
+ struct dimm_info *dimm = &mci->dimms[i];
- mci->ue_count++;
- mci->csrows[csrow].ue_count++;
+ if (layer0 >= 0 && layer0 != dimm->location[0])
+ continue;
+ if (layer1 >= 0 && layer1 != dimm->location[1])
+ continue;
+ if (layer2 >= 0 && layer2 != dimm->location[2])
+ continue;
- /* Generate the DIMM labels from the specified channels */
- chars = snprintf(pos, len + 1, "%s",
- mci->csrows[csrow].channels[channela].label);
- len -= chars;
- pos += chars;
- chars = snprintf(pos, len + 1, "-%s",
- mci->csrows[csrow].channels[channelb].label);
+ /* get the max grain, over the error match range */
+ if (dimm->grain > grain)
+ grain = dimm->grain;
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_EMERG,
- "UE row %d, channel-a= %d channel-b= %d "
- "labels \"%s\": %s\n", csrow, channela, channelb,
- labels, msg);
+ /*
+ * If the error is memory-controller wide, there's no need to
+ * seek for the affected DIMMs because the whole
+ * channel/memory controller/... may be affected.
+ * Also, don't show errors for empty DIMM slots.
+ */
+ if (enable_per_layer_report && dimm->nr_pages) {
+ if (p != label) {
+ strcpy(p, OTHER_LABEL);
+ p += strlen(OTHER_LABEL);
+ }
+ strcpy(p, dimm->label);
+ p += strlen(p);
+ *p = '\0';
+
+ /*
+ * get csrow/channel of the DIMM, in order to allow
+ * incrementing the compat API counters
+ */
+ debugf4("%s: %s csrows map: (%d,%d)\n",
+ __func__,
+ mci->mem_is_per_rank ? "rank" : "dimm",
+ dimm->csrow, dimm->cschannel);
+
+ if (row == -1)
+ row = dimm->csrow;
+ else if (row >= 0 && row != dimm->csrow)
+ row = -2;
+
+ if (chan == -1)
+ chan = dimm->cschannel;
+ else if (chan >= 0 && chan != dimm->cschannel)
+ chan = -2;
+ }
+ }
- if (edac_mc_get_panic_on_ue())
- panic("UE row %d, channel-a= %d channel-b= %d "
- "labels \"%s\": %s\n", csrow, channela,
- channelb, labels, msg);
-}
-EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
+ if (!enable_per_layer_report) {
+ strcpy(label, "any memory");
+ } else {
+ debugf4("%s: csrow/channel to increment: (%d,%d)\n",
+ __func__, row, chan);
+ if (p == label)
+ strcpy(label, "unknown memory");
+ if (type == HW_EVENT_ERR_CORRECTED) {
+ if (row >= 0) {
+ mci->csrows[row].ce_count++;
+ if (chan >= 0)
+ mci->csrows[row].channels[chan].ce_count++;
+ }
+ } else
+ if (row >= 0)
+ mci->csrows[row].ue_count++;
+ }
-/*************************************************************
- * On Fully Buffered DIMM modules, this help function is
- * called to process CE events
- */
-void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
- unsigned int csrow, unsigned int channel, char *msg)
-{
+ /* Fill the RAM location data */
+ p = location;
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] < 0)
+ continue;
- /* Ensure boundary values */
- if (csrow >= mci->nr_csrows) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range (%d >= %d)\n",
- csrow, mci->nr_csrows);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
- }
- if (channel >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel out of range (%d >= %d)\n",
- channel, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
+ p += sprintf(p, "%s:%d ",
+ edac_layer_name[mci->layers[i].type],
+ pos[i]);
}
- if (edac_mc_get_log_ce())
- /* FIXME - put in DIMM location */
- edac_mc_printk(mci, KERN_WARNING,
- "CE row %d, channel %d, label \"%s\": %s\n",
- csrow, channel,
- mci->csrows[csrow].channels[channel].label, msg);
+ /* Memory type dependent details about the error */
+ if (type == HW_EVENT_ERR_CORRECTED) {
+ snprintf(detail, sizeof(detail),
+ "page:0x%lx offset:0x%lx grain:%d syndrome:0x%lx",
+ page_frame_number, offset_in_page,
+ grain, syndrome);
+ edac_ce_error(mci, pos, msg, location, label, detail,
+ other_detail, enable_per_layer_report,
+ page_frame_number, offset_in_page, grain);
+ } else {
+ snprintf(detail, sizeof(detail),
+ "page:0x%lx offset:0x%lx grain:%d",
+ page_frame_number, offset_in_page, grain);
- mci->ce_count++;
- mci->csrows[csrow].ce_count++;
- mci->csrows[csrow].channels[channel].ce_count++;
+ edac_ue_error(mci, pos, msg, location, label, detail,
+ other_detail, enable_per_layer_report);
+ }
}
-EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
+EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index e9a28f576d1..f6a29b0eedc 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -144,25 +144,31 @@ static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
+ int i;
+ u32 nr_pages = 0;
+
+ for (i = 0; i < csrow->nr_channels; i++)
+ nr_pages += csrow->channels[i].dimm->nr_pages;
+
+ return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
}
static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%s\n", mem_types[csrow->mtype]);
+ return sprintf(data, "%s\n", mem_types[csrow->channels[0].dimm->mtype]);
}
static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%s\n", dev_types[csrow->dtype]);
+ return sprintf(data, "%s\n", dev_types[csrow->channels[0].dimm->dtype]);
}
static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]);
+ return sprintf(data, "%s\n", edac_caps[csrow->channels[0].dimm->edac_mode]);
}
/* show/store functions for DIMM Label attributes */
@@ -170,11 +176,11 @@ static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
char *data, int channel)
{
/* if field has not been initialized, there is nothing to send */
- if (!csrow->channels[channel].label[0])
+ if (!csrow->channels[channel].dimm->label[0])
return 0;
return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
- csrow->channels[channel].label);
+ csrow->channels[channel].dimm->label);
}
static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
@@ -184,8 +190,8 @@ static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
ssize_t max_size = 0;
max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
- strncpy(csrow->channels[channel].label, data, max_size);
- csrow->channels[channel].label[max_size] = '\0';
+ strncpy(csrow->channels[channel].dimm->label, data, max_size);
+ csrow->channels[channel].dimm->label[max_size] = '\0';
return max_size;
}
@@ -419,8 +425,8 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
mci->ue_noinfo_count = 0;
mci->ce_noinfo_count = 0;
- mci->ue_count = 0;
- mci->ce_count = 0;
+ mci->ue_mc = 0;
+ mci->ce_mc = 0;
for (row = 0; row < mci->nr_csrows; row++) {
struct csrow_info *ri = &mci->csrows[row];
@@ -489,12 +495,12 @@ static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
/* default attribute files for the MCI object */
static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
{
- return sprintf(data, "%d\n", mci->ue_count);
+ return sprintf(data, "%d\n", mci->ue_mc);
}
static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
{
- return sprintf(data, "%d\n", mci->ce_count);
+ return sprintf(data, "%d\n", mci->ce_mc);
}
static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
@@ -519,16 +525,16 @@ static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
{
- int total_pages, csrow_idx;
+ int total_pages = 0, csrow_idx, j;
- for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
- csrow_idx++) {
+ for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
struct csrow_info *csrow = &mci->csrows[csrow_idx];
- if (!csrow->nr_pages)
- continue;
+ for (j = 0; j < csrow->nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
- total_pages += csrow->nr_pages;
+ total_pages += dimm->nr_pages;
+ }
}
return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
@@ -900,7 +906,7 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
*/
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
{
- int i;
+ int i, j;
int err;
struct csrow_info *csrow;
struct kobject *kobj_mci = &mci->edac_mci_kobj;
@@ -934,10 +940,13 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
/* Make directories for each CSROW object under the mc<id> kobject
*/
for (i = 0; i < mci->nr_csrows; i++) {
+ int nr_pages = 0;
+
csrow = &mci->csrows[i];
+ for (j = 0; j < csrow->nr_channels; j++)
+ nr_pages += csrow->channels[j].dimm->nr_pages;
- /* Only expose populated CSROWs */
- if (csrow->nr_pages > 0) {
+ if (nr_pages > 0) {
err = edac_create_csrow_object(mci, csrow, i);
if (err) {
debugf1("%s() failure: create csrow %d obj\n",
@@ -949,12 +958,15 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
return 0;
- /* CSROW error: backout what has already been registered, */
fail1:
for (i--; i >= 0; i--) {
- if (csrow->nr_pages > 0) {
+ int nr_pages = 0;
+
+ csrow = &mci->csrows[i];
+ for (j = 0; j < csrow->nr_channels; j++)
+ nr_pages += csrow->channels[j].dimm->nr_pages;
+ if (nr_pages > 0)
kobject_put(&mci->csrows[i].kobj);
- }
}
/* remove the mci instance's attributes, if any */
@@ -973,14 +985,20 @@ fail0:
*/
void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
{
- int i;
+ struct csrow_info *csrow;
+ int i, j;
debugf0("%s()\n", __func__);
/* remove all csrow kobjects */
debugf4("%s() unregister this mci kobj\n", __func__);
for (i = 0; i < mci->nr_csrows; i++) {
- if (mci->csrows[i].nr_pages > 0) {
+ int nr_pages = 0;
+
+ csrow = &mci->csrows[i];
+ for (j = 0; j < csrow->nr_channels; j++)
+ nr_pages += csrow->channels[j].dimm->nr_pages;
+ if (nr_pages > 0) {
debugf0("%s() unreg csrow-%d\n", __func__, i);
kobject_put(&mci->csrows[i].kobj);
}
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 00f81b47a51..0ea7d14cb93 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -50,7 +50,7 @@ extern void edac_device_reset_delay_period(struct edac_device_ctl_info
*edac_dev, unsigned long value);
extern void edac_mc_reset_delay_period(int value);
-extern void *edac_align_ptr(void *ptr, unsigned size);
+extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
/*
* EDAC PCI functions
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 63af1c5673d..f1ac8664988 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -42,13 +42,13 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
const char *edac_pci_name)
{
struct edac_pci_ctl_info *pci;
- void *pvt;
+ void *p = NULL, *pvt;
unsigned int size;
debugf1("%s()\n", __func__);
- pci = (struct edac_pci_ctl_info *)0;
- pvt = edac_align_ptr(&pci[1], sz_pvt);
+ pci = edac_align_ptr(&p, sizeof(*pci), 1);
+ pvt = edac_align_ptr(&p, 1, sz_pvt);
size = ((unsigned long)pvt) + sz_pvt;
/* Alloc the needed control struct memory */
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 277689a6884..8ad1744faac 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -245,7 +245,9 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
@@ -256,10 +258,15 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
row = edac_mc_find_csrow_by_page(mci, pfn);
if (info->errsts & I3000_ERRSTS_UE)
- edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ pfn, offset, 0,
+ row, -1, -1,
+ "i3000 UE", "", NULL);
else
- edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
- multi_chan ? channel : 0, "i3000 CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, offset, info->derrsyn,
+ row, multi_chan ? channel : 0, -1,
+ "i3000 CE", "", NULL);
return 1;
}
@@ -304,9 +311,10 @@ static int i3000_is_interleaved(const unsigned char *c0dra,
static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
- int i;
+ int i, j;
struct mem_ctl_info *mci = NULL;
- unsigned long last_cumul_size;
+ struct edac_mc_layer layers[2];
+ unsigned long last_cumul_size, nr_pages;
int interleaved, nr_channels;
unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
@@ -347,7 +355,14 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
*/
interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
nr_channels = interleaved ? 2 : 1;
- mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0);
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I3000_RANKS / nr_channels;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_channels;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
@@ -386,19 +401,23 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
cumul_size <<= 1;
debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
__func__, i, cumul_size);
- if (cumul_size == last_cumul_size) {
- csrow->mtype = MEM_EMPTY;
+ if (cumul_size == last_cumul_size)
continue;
- }
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = I3000_DEAP_GRAIN;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
+
+ for (j = 0; j < nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / nr_channels;
+ dimm->grain = I3000_DEAP_GRAIN;
+ dimm->mtype = MEM_DDR2;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
}
/*
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 046808c6357..bbe43ef7182 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -23,6 +23,7 @@
#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
+#define I3200_DIMMS 4
#define I3200_RANKS 8
#define I3200_RANKS_PER_CHANNEL 4
#define I3200_CHANNELS 2
@@ -217,21 +218,25 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
return;
if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
for (channel = 0; channel < nr_channels; channel++) {
log = info->eccerrlog[channel];
if (log & I3200_ECCERRLOG_UE) {
- edac_mc_handle_ue(mci, 0, 0,
- eccerrlog_row(channel, log),
- "i3200 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, 0,
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "i3000 UE", "", NULL);
} else if (log & I3200_ECCERRLOG_CE) {
- edac_mc_handle_ce(mci, 0, 0,
- eccerrlog_syndrome(log),
- eccerrlog_row(channel, log), 0,
- "i3200 CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "i3000 UE", "", NULL);
}
}
}
@@ -319,9 +324,9 @@ static unsigned long drb_to_nr_pages(
static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
- int i;
+ int i, j;
struct mem_ctl_info *mci = NULL;
- unsigned long last_page;
+ struct edac_mc_layer layers[2];
u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
bool stacked;
void __iomem *window;
@@ -336,8 +341,14 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
i3200_get_drbs(window, drbs);
nr_channels = how_many_channels(pdev);
- mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS,
- nr_channels, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I3200_DIMMS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_channels;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct i3200_priv));
if (!mci)
return -ENOMEM;
@@ -366,7 +377,6 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
- last_page = -1UL;
for (i = 0; i < mci->nr_csrows; i++) {
unsigned long nr_pages;
struct csrow_info *csrow = &mci->csrows[i];
@@ -375,20 +385,18 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
i / I3200_RANKS_PER_CHANNEL,
i % I3200_RANKS_PER_CHANNEL);
- if (nr_pages == 0) {
- csrow->mtype = MEM_EMPTY;
+ if (nr_pages == 0)
continue;
- }
- csrow->first_page = last_page + 1;
- last_page += nr_pages;
- csrow->last_page = last_page;
- csrow->nr_pages = nr_pages;
+ for (j = 0; j < nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
- csrow->grain = nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
+ dimm->nr_pages = nr_pages / nr_channels;
+ dimm->grain = nr_pages << PAGE_SHIFT;
+ dimm->mtype = MEM_DDR2;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
}
i3200_clear_error_info(mci);
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index a2680d8e744..11ea835f155 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -270,7 +270,8 @@
#define MTR3 0x8C
#define NUM_MTRS 4
-#define CHANNELS_PER_BRANCH (2)
+#define CHANNELS_PER_BRANCH 2
+#define MAX_BRANCHES 2
/* Defines to extract the vaious fields from the
* MTRx - Memory Technology Registers
@@ -473,7 +474,6 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
char msg[EDAC_MC_LABEL_LEN + 1 + 160];
char *specific = NULL;
u32 allErrors;
- int branch;
int channel;
int bank;
int rank;
@@ -485,8 +485,7 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
if (!allErrors)
return; /* if no error, return now */
- branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
- channel = branch;
+ channel = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
/* Use the NON-Recoverable macros to extract data */
bank = NREC_BANK(info->nrecmema);
@@ -495,9 +494,9 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
ras = NREC_RAS(info->nrecmemb);
cas = NREC_CAS(info->nrecmemb);
- debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, channel + 1, branch >> 1, bank,
+ debugf0("\t\tCSROW= %d Channel= %d "
+ "(DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, bank,
rdwr ? "Write" : "Read", ras, cas);
/* Only 1 bit will be on */
@@ -533,13 +532,14 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d "
- "FATAL Err=0x%x (%s))",
- branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
- allErrors, specific);
+ "Bank=%d RAS=%d CAS=%d FATAL Err=0x%x (%s)",
+ bank, ras, cas, allErrors, specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
+ channel >> 1, channel & 1, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
/*
@@ -633,13 +633,14 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
- "CAS=%d, UE Err=0x%x (%s))",
- branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
- ue_errors, specific);
+ "Rank=%d Bank=%d RAS=%d CAS=%d, UE Err=0x%x (%s)",
+ rank, bank, ras, cas, ue_errors, specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ channel >> 1, -1, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
/* Check correctable errors */
@@ -685,13 +686,16 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
+ "Rank=%d Bank=%d RDWR=%s RAS=%d "
"CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank,
rdwr ? "Write" : "Read", ras, cas, ce_errors,
specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ channel >> 1, channel % 2, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
if (!misc_messages)
@@ -731,11 +735,12 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d Err=%#x (%s))", branch >> 1,
- misc_errors, specific);
+ "Err=%#x (%s)", misc_errors, specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, 0, 0, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ branch >> 1, -1, -1,
+ "Misc error", msg, NULL);
}
}
@@ -956,14 +961,14 @@ static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel)
*
* return the proper MTR register as determine by the csrow and channel desired
*/
-static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel)
+static int determine_mtr(struct i5000_pvt *pvt, int slot, int channel)
{
int mtr;
if (channel < CHANNELS_PER_BRANCH)
- mtr = pvt->b0_mtr[csrow >> 1];
+ mtr = pvt->b0_mtr[slot];
else
- mtr = pvt->b1_mtr[csrow >> 1];
+ mtr = pvt->b1_mtr[slot];
return mtr;
}
@@ -988,37 +993,34 @@ static void decode_mtr(int slot_row, u16 mtr)
debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
}
-static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
+static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
struct i5000_dimm_info *dinfo)
{
int mtr;
int amb_present_reg;
int addrBits;
- mtr = determine_mtr(pvt, csrow, channel);
+ mtr = determine_mtr(pvt, slot, channel);
if (MTR_DIMMS_PRESENT(mtr)) {
amb_present_reg = determine_amb_present_reg(pvt, channel);
- /* Determine if there is a DIMM present in this DIMM slot */
- if (amb_present_reg & (1 << (csrow >> 1))) {
+ /* Determine if there is a DIMM present in this DIMM slot */
+ if (amb_present_reg) {
dinfo->dual_rank = MTR_DIMM_RANK(mtr);
- if (!((dinfo->dual_rank == 0) &&
- ((csrow & 0x1) == 0x1))) {
- /* Start with the number of bits for a Bank
- * on the DRAM */
- addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
- /* Add thenumber of ROW bits */
- addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
- /* add the number of COLUMN bits */
- addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
-
- addrBits += 6; /* add 64 bits per DIMM */
- addrBits -= 20; /* divide by 2^^20 */
- addrBits -= 3; /* 8 bits per bytes */
-
- dinfo->megabytes = 1 << addrBits;
- }
+ /* Start with the number of bits for a Bank
+ * on the DRAM */
+ addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
+ /* Add the number of ROW bits */
+ addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
+ /* add the number of COLUMN bits */
+ addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
+
+ addrBits += 6; /* add 64 bits per DIMM */
+ addrBits -= 20; /* divide by 2^^20 */
+ addrBits -= 3; /* 8 bits per bytes */
+
+ dinfo->megabytes = 1 << addrBits;
}
}
}
@@ -1032,10 +1034,9 @@ static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
static void calculate_dimm_size(struct i5000_pvt *pvt)
{
struct i5000_dimm_info *dinfo;
- int csrow, max_csrows;
+ int slot, channel, branch;
char *p, *mem_buffer;
int space, n;
- int channel;
/* ================= Generate some debug output ================= */
space = PAGE_SIZE;
@@ -1046,22 +1047,17 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
return;
}
- n = snprintf(p, space, "\n");
- p += n;
- space -= n;
-
- /* Scan all the actual CSROWS (which is # of DIMMS * 2)
+ /* Scan all the actual slots
* and calculate the information for each DIMM
- * Start with the highest csrow first, to display it first
- * and work toward the 0th csrow
+ * Start with the highest slot first, to display it first
+ * and work toward the 0th slot
*/
- max_csrows = pvt->maxdimmperch * 2;
- for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+ for (slot = pvt->maxdimmperch - 1; slot >= 0; slot--) {
- /* on an odd csrow, first output a 'boundary' marker,
+ /* on an odd slot, first output a 'boundary' marker,
* then reset the message buffer */
- if (csrow & 0x1) {
- n = snprintf(p, space, "---------------------------"
+ if (slot & 0x1) {
+ n = snprintf(p, space, "--------------------------"
"--------------------------------");
p += n;
space -= n;
@@ -1069,30 +1065,39 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
p = mem_buffer;
space = PAGE_SIZE;
}
- n = snprintf(p, space, "csrow %2d ", csrow);
+ n = snprintf(p, space, "slot %2d ", slot);
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
- dinfo = &pvt->dimm_info[csrow][channel];
- handle_channel(pvt, csrow, channel, dinfo);
- n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
+ dinfo = &pvt->dimm_info[slot][channel];
+ handle_channel(pvt, slot, channel, dinfo);
+ if (dinfo->megabytes)
+ n = snprintf(p, space, "%4d MB %dR| ",
+ dinfo->megabytes, dinfo->dual_rank + 1);
+ else
+ n = snprintf(p, space, "%4d MB | ", 0);
p += n;
space -= n;
}
- n = snprintf(p, space, "\n");
p += n;
space -= n;
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
}
/* Output the last bottom 'boundary' marker */
- n = snprintf(p, space, "---------------------------"
- "--------------------------------\n");
+ n = snprintf(p, space, "--------------------------"
+ "--------------------------------");
p += n;
space -= n;
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
/* now output the 'channel' labels */
- n = snprintf(p, space, " ");
+ n = snprintf(p, space, " ");
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1100,9 +1105,17 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
p += n;
space -= n;
}
- n = snprintf(p, space, "\n");
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
+
+ n = snprintf(p, space, " ");
p += n;
- space -= n;
+ for (branch = 0; branch < MAX_BRANCHES; branch++) {
+ n = snprintf(p, space, " branch %d | ", branch);
+ p += n;
+ space -= n;
+ }
/* output the last message and free buffer */
debugf2("%s\n", mem_buffer);
@@ -1235,13 +1248,13 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
static int i5000_init_csrows(struct mem_ctl_info *mci)
{
struct i5000_pvt *pvt;
- struct csrow_info *p_csrow;
+ struct dimm_info *dimm;
int empty, channel_count;
int max_csrows;
- int mtr, mtr1;
+ int mtr;
int csrow_megs;
int channel;
- int csrow;
+ int slot;
pvt = mci->pvt_info;
@@ -1250,43 +1263,40 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
empty = 1; /* Assume NO memory */
- for (csrow = 0; csrow < max_csrows; csrow++) {
- p_csrow = &mci->csrows[csrow];
-
- p_csrow->csrow_idx = csrow;
-
- /* use branch 0 for the basis */
- mtr = pvt->b0_mtr[csrow >> 1];
- mtr1 = pvt->b1_mtr[csrow >> 1];
-
- /* if no DIMMS on this row, continue */
- if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1))
- continue;
+ /*
+ * FIXME: The memory layout used to map slot/channel into the
+ * real memory architecture is weird: branch+slot are "csrows"
+ * and channel is channel. That required an extra array (dimm_info)
+ * to map the dimms. A good cleanup would be to remove this array,
+ * and do a loop here with branch, channel, slot
+ */
+ for (slot = 0; slot < max_csrows; slot++) {
+ for (channel = 0; channel < pvt->maxch; channel++) {
- /* FAKE OUT VALUES, FIXME */
- p_csrow->first_page = 0 + csrow * 20;
- p_csrow->last_page = 9 + csrow * 20;
- p_csrow->page_mask = 0xFFF;
+ mtr = determine_mtr(pvt, slot, channel);
- p_csrow->grain = 8;
+ if (!MTR_DIMMS_PRESENT(mtr))
+ continue;
- csrow_megs = 0;
- for (channel = 0; channel < pvt->maxch; channel++) {
- csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
- }
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ channel / MAX_BRANCHES,
+ channel % MAX_BRANCHES, slot);
- p_csrow->nr_pages = csrow_megs << 8;
+ csrow_megs = pvt->dimm_info[slot][channel].megabytes;
+ dimm->grain = 8;
- /* Assume DDR2 for now */
- p_csrow->mtype = MEM_FB_DDR2;
+ /* Assume DDR2 for now */
+ dimm->mtype = MEM_FB_DDR2;
- /* ask what device type on this row */
- if (MTR_DRAM_WIDTH(mtr))
- p_csrow->dtype = DEV_X8;
- else
- p_csrow->dtype = DEV_X4;
+ /* ask what device type on this row */
+ if (MTR_DRAM_WIDTH(mtr))
+ dimm->dtype = DEV_X8;
+ else
+ dimm->dtype = DEV_X4;
- p_csrow->edac_mode = EDAC_S8ECD8ED;
+ dimm->edac_mode = EDAC_S8ECD8ED;
+ dimm->nr_pages = csrow_megs << 8;
+ }
empty = 0;
}
@@ -1317,7 +1327,7 @@ static void i5000_enable_error_reporting(struct mem_ctl_info *mci)
}
/*
- * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels)
+ * i5000_get_dimm_and_channel_counts(pdev, &nr_csrows, &num_channels)
*
* ask the device how many channels are present and how many CSROWS
* as well
@@ -1332,7 +1342,7 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
* supported on this memory controller
*/
pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
- *num_dimms_per_channel = (int)value *2;
+ *num_dimms_per_channel = (int)value;
pci_read_config_byte(pdev, MAXCH, &value);
*num_channels = (int)value;
@@ -1348,10 +1358,10 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[3];
struct i5000_pvt *pvt;
int num_channels;
int num_dimms_per_channel;
- int num_csrows;
debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
__FILE__, __func__,
@@ -1377,14 +1387,22 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
*/
i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
&num_channels);
- num_csrows = num_dimms_per_channel * 2;
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
+ debugf0("MC: %s(): Number of Branches=2 Channels= %d DIMMS= %d\n",
+ __func__, num_channels, num_dimms_per_channel);
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
+ layers[0].type = EDAC_MC_LAYER_BRANCH;
+ layers[0].size = MAX_BRANCHES;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = num_channels / MAX_BRANCHES;
+ layers[1].is_virt_csrow = false;
+ layers[2].type = EDAC_MC_LAYER_SLOT;
+ layers[2].size = num_dimms_per_channel;
+ layers[2].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index d500749464e..e9e7c2a29dc 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -14,6 +14,11 @@
* rows for each respective channel are laid out one after another,
* the first half belonging to channel 0, the second half belonging
* to channel 1.
+ *
+ * This driver is for DDR2 DIMMs, and it uses chip select to select among the
+ * several ranks. However, instead of showing memories as ranks, it outputs
+ * them as DIMM's. An internal table creates the association between ranks
+ * and DIMM's.
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -410,14 +415,6 @@ static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
return csrow / priv->ranksperchan;
}
-static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
- int chan, int rank)
-{
- const struct i5100_priv *priv = mci->pvt_info;
-
- return chan * priv->ranksperchan + rank;
-}
-
static void i5100_handle_ce(struct mem_ctl_info *mci,
int chan,
unsigned bank,
@@ -427,17 +424,17 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, chan, rank);
+ char detail[80];
- printk(KERN_ERR
- "CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
- "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- chan, bank, rank, syndrome, cas, ras,
- csrow, mci->csrows[csrow].channels[0].label, msg);
+ /* Form out message */
+ snprintf(detail, sizeof(detail),
+ "bank %u, cas %u, ras %u\n",
+ bank, cas, ras);
- mci->ce_count++;
- mci->csrows[csrow].ce_count++;
- mci->csrows[csrow].channels[0].ce_count++;
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, syndrome,
+ chan, rank, -1,
+ msg, detail, NULL);
}
static void i5100_handle_ue(struct mem_ctl_info *mci,
@@ -449,16 +446,17 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, chan, rank);
+ char detail[80];
- printk(KERN_ERR
- "UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
- "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- chan, bank, rank, syndrome, cas, ras,
- csrow, mci->csrows[csrow].channels[0].label, msg);
+ /* Form out message */
+ snprintf(detail, sizeof(detail),
+ "bank %u, cas %u, ras %u\n",
+ bank, cas, ras);
- mci->ue_count++;
- mci->csrows[csrow].ue_count++;
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, syndrome,
+ chan, rank, -1,
+ msg, detail, NULL);
}
static void i5100_read_log(struct mem_ctl_info *mci, int chan,
@@ -835,10 +833,10 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
{
int i;
- unsigned long total_pages = 0UL;
struct i5100_priv *priv = mci->pvt_info;
- for (i = 0; i < mci->nr_csrows; i++) {
+ for (i = 0; i < mci->tot_dimms; i++) {
+ struct dimm_info *dimm;
const unsigned long npages = i5100_npages(mci, i);
const unsigned chan = i5100_csrow_to_chan(mci, i);
const unsigned rank = i5100_csrow_to_rank(mci, i);
@@ -846,33 +844,23 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
if (!npages)
continue;
- /*
- * FIXME: these two are totally bogus -- I don't see how to
- * map them correctly to this structure...
- */
- mci->csrows[i].first_page = total_pages;
- mci->csrows[i].last_page = total_pages + npages - 1;
- mci->csrows[i].page_mask = 0UL;
-
- mci->csrows[i].nr_pages = npages;
- mci->csrows[i].grain = 32;
- mci->csrows[i].csrow_idx = i;
- mci->csrows[i].dtype =
- (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
- mci->csrows[i].ue_count = 0;
- mci->csrows[i].ce_count = 0;
- mci->csrows[i].mtype = MEM_RDDR2;
- mci->csrows[i].edac_mode = EDAC_SECDED;
- mci->csrows[i].mci = mci;
- mci->csrows[i].nr_channels = 1;
- mci->csrows[i].channels[0].chan_idx = 0;
- mci->csrows[i].channels[0].ce_count = 0;
- mci->csrows[i].channels[0].csrow = mci->csrows + i;
- snprintf(mci->csrows[i].channels[0].label,
- sizeof(mci->csrows[i].channels[0].label),
- "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
-
- total_pages += npages;
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ chan, rank, 0);
+
+ dimm->nr_pages = npages;
+ if (npages) {
+ dimm->grain = 32;
+ dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
+ DEV_X4 : DEV_X8;
+ dimm->mtype = MEM_RDDR2;
+ dimm->edac_mode = EDAC_SECDED;
+ snprintf(dimm->label, sizeof(dimm->label),
+ "DIMM%u",
+ i5100_rank_to_slot(mci, chan, rank));
+ }
+
+ debugf2("dimm channel %d, rank %d, size %ld\n",
+ chan, rank, (long)PAGES_TO_MiB(npages));
}
}
@@ -881,6 +869,7 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
{
int rc;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i5100_priv *priv;
struct pci_dev *ch0mm, *ch1mm;
int ret = 0;
@@ -941,7 +930,14 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
goto bail_ch1;
}
- mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0);
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 2;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = ranksperch;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(*priv));
if (!mci) {
ret = -ENOMEM;
goto bail_disable_ch1;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 1869a1018fb..6640c29e188 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -18,6 +18,10 @@
* Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
* http://developer.intel.com/design/chipsets/datashts/313070.htm
*
+ * This Memory Controller manages DDR2 FB-DIMMs. It has 2 branches, each with
+ * 2 channels operating in lockstep no-mirror mode. Each channel can have up to
+ * 4 dimm's, each with up to 8GB.
+ *
*/
#include <linux/module.h>
@@ -44,12 +48,10 @@
edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
/* Limits for i5400 */
-#define NUM_MTRS_PER_BRANCH 4
+#define MAX_BRANCHES 2
#define CHANNELS_PER_BRANCH 2
-#define MAX_DIMMS_PER_CHANNEL NUM_MTRS_PER_BRANCH
-#define MAX_CHANNELS 4
-/* max possible csrows per channel */
-#define MAX_CSROWS (MAX_DIMMS_PER_CHANNEL)
+#define DIMMS_PER_CHANNEL 4
+#define MAX_CHANNELS (MAX_BRANCHES * CHANNELS_PER_BRANCH)
/* Device 16,
* Function 0: System Address
@@ -347,16 +349,16 @@ struct i5400_pvt {
u16 mir0, mir1;
- u16 b0_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */
+ u16 b0_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
u16 b0_ambpresent0; /* Branch 0, Channel 0 */
u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
- u16 b1_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */
+ u16 b1_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
u16 b1_ambpresent0; /* Branch 1, Channel 8 */
u16 b1_ambpresent1; /* Branch 1, Channel 1 */
/* DIMM information matrix, allocating architecture maximums */
- struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
+ struct i5400_dimm_info dimm_info[DIMMS_PER_CHANNEL][MAX_CHANNELS];
/* Actual values for this controller */
int maxch; /* Max channels */
@@ -532,13 +534,15 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
int ras, cas;
int errnum;
char *type = NULL;
+ enum hw_event_mc_err_type tp_event = HW_EVENT_ERR_UNCORRECTED;
if (!allErrors)
return; /* if no error, return now */
- if (allErrors & ERROR_FAT_MASK)
+ if (allErrors & ERROR_FAT_MASK) {
type = "FATAL";
- else if (allErrors & FERR_NF_UNCORRECTABLE)
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else if (allErrors & FERR_NF_UNCORRECTABLE)
type = "NON-FATAL uncorrected";
else
type = "NON-FATAL recoverable";
@@ -556,7 +560,7 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
ras = nrec_ras(info);
cas = nrec_cas(info);
- debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
+ debugf0("\t\tDIMM= %d Channels= %d,%d (Branch= %d "
"DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
rank, channel, channel + 1, branch >> 1, bank,
buf_id, rdwr_str(rdwr), ras, cas);
@@ -566,13 +570,13 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s "
- "RAS=%d CAS=%d %s Err=0x%lx (%s))",
- type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas,
- type, allErrors, error_name[errnum]);
+ "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)",
+ bank, buf_id, ras, cas, allErrors, error_name[errnum]);
- /* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+ edac_mc_handle_error(tp_event, mci, 0, 0, 0,
+ branch >> 1, -1, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
/*
@@ -630,7 +634,7 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Only 1 bit will be on */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
- debugf0("\t\tCSROW= %d Channel= %d (Branch %d "
+ debugf0("\t\tDIMM= %d Channel= %d (Branch %d "
"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
rank, channel, branch >> 1, bank,
rdwr_str(rdwr), ras, cas);
@@ -642,8 +646,10 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
branch >> 1, bank, rdwr_str(rdwr), ras, cas,
allErrors, error_name[errnum]);
- /* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ branch >> 1, channel % 2, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
return;
}
@@ -831,8 +837,8 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
/*
* determine_amb_present
*
- * the information is contained in NUM_MTRS_PER_BRANCH different
- * registers determining which of the NUM_MTRS_PER_BRANCH requires
+ * the information is contained in DIMMS_PER_CHANNEL different
+ * registers determining which of the DIMMS_PER_CHANNEL requires
* knowing which channel is in question
*
* 2 branches, each with 2 channels
@@ -861,11 +867,11 @@ static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
}
/*
- * determine_mtr(pvt, csrow, channel)
+ * determine_mtr(pvt, dimm, channel)
*
- * return the proper MTR register as determine by the csrow and desired channel
+ * return the proper MTR register as determine by the dimm and desired channel
*/
-static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
+static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel)
{
int mtr;
int n;
@@ -873,11 +879,11 @@ static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
/* There is one MTR for each slot pair of FB-DIMMs,
Each slot pair may be at branch 0 or branch 1.
*/
- n = csrow;
+ n = dimm;
- if (n >= NUM_MTRS_PER_BRANCH) {
- debugf0("ERROR: trying to access an invalid csrow: %d\n",
- csrow);
+ if (n >= DIMMS_PER_CHANNEL) {
+ debugf0("ERROR: trying to access an invalid dimm: %d\n",
+ dimm);
return 0;
}
@@ -913,19 +919,19 @@ static void decode_mtr(int slot_row, u16 mtr)
debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
}
-static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
+static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
struct i5400_dimm_info *dinfo)
{
int mtr;
int amb_present_reg;
int addrBits;
- mtr = determine_mtr(pvt, csrow, channel);
+ mtr = determine_mtr(pvt, dimm, channel);
if (MTR_DIMMS_PRESENT(mtr)) {
amb_present_reg = determine_amb_present_reg(pvt, channel);
/* Determine if there is a DIMM present in this DIMM slot */
- if (amb_present_reg & (1 << csrow)) {
+ if (amb_present_reg & (1 << dimm)) {
/* Start with the number of bits for a Bank
* on the DRAM */
addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
@@ -954,10 +960,10 @@ static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
static void calculate_dimm_size(struct i5400_pvt *pvt)
{
struct i5400_dimm_info *dinfo;
- int csrow, max_csrows;
+ int dimm, max_dimms;
char *p, *mem_buffer;
int space, n;
- int channel;
+ int channel, branch;
/* ================= Generate some debug output ================= */
space = PAGE_SIZE;
@@ -968,32 +974,32 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
return;
}
- /* Scan all the actual CSROWS
+ /* Scan all the actual DIMMS
* and calculate the information for each DIMM
- * Start with the highest csrow first, to display it first
- * and work toward the 0th csrow
+ * Start with the highest dimm first, to display it first
+ * and work toward the 0th dimm
*/
- max_csrows = pvt->maxdimmperch;
- for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+ max_dimms = pvt->maxdimmperch;
+ for (dimm = max_dimms - 1; dimm >= 0; dimm--) {
- /* on an odd csrow, first output a 'boundary' marker,
+ /* on an odd dimm, first output a 'boundary' marker,
* then reset the message buffer */
- if (csrow & 0x1) {
+ if (dimm & 0x1) {
n = snprintf(p, space, "---------------------------"
- "--------------------------------");
+ "-------------------------------");
p += n;
space -= n;
debugf2("%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
- n = snprintf(p, space, "csrow %2d ", csrow);
+ n = snprintf(p, space, "dimm %2d ", dimm);
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
- dinfo = &pvt->dimm_info[csrow][channel];
- handle_channel(pvt, csrow, channel, dinfo);
+ dinfo = &pvt->dimm_info[dimm][channel];
+ handle_channel(pvt, dimm, channel, dinfo);
n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
p += n;
space -= n;
@@ -1005,7 +1011,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
/* Output the last bottom 'boundary' marker */
n = snprintf(p, space, "---------------------------"
- "--------------------------------");
+ "-------------------------------");
p += n;
space -= n;
debugf2("%s\n", mem_buffer);
@@ -1013,7 +1019,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
space = PAGE_SIZE;
/* now output the 'channel' labels */
- n = snprintf(p, space, " ");
+ n = snprintf(p, space, " ");
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1022,6 +1028,19 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
space -= n;
}
+ space -= n;
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
+
+ n = snprintf(p, space, " ");
+ p += n;
+ for (branch = 0; branch < MAX_BRANCHES; branch++) {
+ n = snprintf(p, space, " branch %d | ", branch);
+ p += n;
+ space -= n;
+ }
+
/* output the last message and free buffer */
debugf2("%s\n", mem_buffer);
kfree(mem_buffer);
@@ -1080,7 +1099,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
/* Get the set of MTR[0-3] regs by each branch */
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) {
+ for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
int where = MTR0 + (slot_row * sizeof(u16));
/* Branch 0 set of MTR registers */
@@ -1105,7 +1124,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
/* Read and dump branch 0's MTRs */
debugf2("\nMemory Technology Registers:\n");
debugf2(" Branch 0:\n");
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
+ for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
@@ -1122,7 +1141,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
} else {
/* Read and dump branch 1's MTRs */
debugf2(" Branch 1:\n");
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
+ for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
@@ -1141,7 +1160,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
}
/*
- * i5400_init_csrows Initialize the 'csrows' table within
+ * i5400_init_dimms Initialize the 'dimms' table within
* the mci control structure with the
* addressing of memory.
*
@@ -1149,64 +1168,68 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
* 0 success
* 1 no actual memory found on this MC
*/
-static int i5400_init_csrows(struct mem_ctl_info *mci)
+static int i5400_init_dimms(struct mem_ctl_info *mci)
{
struct i5400_pvt *pvt;
- struct csrow_info *p_csrow;
- int empty, channel_count;
- int max_csrows;
+ struct dimm_info *dimm;
+ int ndimms, channel_count;
+ int max_dimms;
int mtr;
- int csrow_megs;
- int channel;
- int csrow;
+ int size_mb;
+ int channel, slot;
pvt = mci->pvt_info;
channel_count = pvt->maxch;
- max_csrows = pvt->maxdimmperch;
+ max_dimms = pvt->maxdimmperch;
- empty = 1; /* Assume NO memory */
+ ndimms = 0;
- for (csrow = 0; csrow < max_csrows; csrow++) {
- p_csrow = &mci->csrows[csrow];
-
- p_csrow->csrow_idx = csrow;
-
- /* use branch 0 for the basis */
- mtr = determine_mtr(pvt, csrow, 0);
-
- /* if no DIMMS on this row, continue */
- if (!MTR_DIMMS_PRESENT(mtr))
- continue;
-
- /* FAKE OUT VALUES, FIXME */
- p_csrow->first_page = 0 + csrow * 20;
- p_csrow->last_page = 9 + csrow * 20;
- p_csrow->page_mask = 0xFFF;
-
- p_csrow->grain = 8;
-
- csrow_megs = 0;
- for (channel = 0; channel < pvt->maxch; channel++)
- csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
-
- p_csrow->nr_pages = csrow_megs << 8;
-
- /* Assume DDR2 for now */
- p_csrow->mtype = MEM_FB_DDR2;
-
- /* ask what device type on this row */
- if (MTR_DRAM_WIDTH(mtr))
- p_csrow->dtype = DEV_X8;
- else
- p_csrow->dtype = DEV_X4;
-
- p_csrow->edac_mode = EDAC_S8ECD8ED;
-
- empty = 0;
+ /*
+ * FIXME: remove pvt->dimm_info[slot][channel] and use the 3
+ * layers here.
+ */
+ for (channel = 0; channel < mci->layers[0].size * mci->layers[1].size;
+ channel++) {
+ for (slot = 0; slot < mci->layers[2].size; slot++) {
+ mtr = determine_mtr(pvt, slot, channel);
+
+ /* if no DIMMS on this slot, continue */
+ if (!MTR_DIMMS_PRESENT(mtr))
+ continue;
+
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ channel / 2, channel % 2, slot);
+
+ size_mb = pvt->dimm_info[slot][channel].megabytes;
+
+ debugf2("%s: dimm%zd (branch %d channel %d slot %d): %d.%03d GB\n",
+ __func__, dimm - mci->dimms,
+ channel / 2, channel % 2, slot,
+ size_mb / 1000, size_mb % 1000);
+
+ dimm->nr_pages = size_mb << 8;
+ dimm->grain = 8;
+ dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
+ dimm->mtype = MEM_FB_DDR2;
+ /*
+ * The eccc mechanism is SDDC (aka SECC), with
+ * is similar to Chipkill.
+ */
+ dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
+ EDAC_S8ECD8ED : EDAC_S4ECD4ED;
+ ndimms++;
+ }
}
- return empty;
+ /*
+ * When just one memory is provided, it should be at location (0,0,0).
+ * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+.
+ */
+ if (ndimms == 1)
+ mci->dimms[0].edac_mode = EDAC_SECDED;
+
+ return (ndimms == 0);
}
/*
@@ -1242,9 +1265,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
struct i5400_pvt *pvt;
- int num_channels;
- int num_dimms_per_channel;
- int num_csrows;
+ struct edac_mc_layer layers[3];
if (dev_idx >= ARRAY_SIZE(i5400_devs))
return -EINVAL;
@@ -1258,23 +1279,21 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
if (PCI_FUNC(pdev->devfn) != 0)
return -ENODEV;
- /* As we don't have a motherboard identification routine to determine
- * actual number of slots/dimms per channel, we thus utilize the
- * resource as specified by the chipset. Thus, we might have
- * have more DIMMs per channel than actually on the mobo, but this
- * allows the driver to support up to the chipset max, without
- * some fancy mobo determination.
+ /*
+ * allocate a new MC control structure
+ *
+ * This drivers uses the DIMM slot as "csrow" and the rest as "channel".
*/
- num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL;
- num_channels = MAX_CHANNELS;
- num_csrows = num_dimms_per_channel;
-
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
-
- /* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
+ layers[0].type = EDAC_MC_LAYER_BRANCH;
+ layers[0].size = MAX_BRANCHES;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = CHANNELS_PER_BRANCH;
+ layers[1].is_virt_csrow = false;
+ layers[2].type = EDAC_MC_LAYER_SLOT;
+ layers[2].size = DIMMS_PER_CHANNEL;
+ layers[2].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
@@ -1284,8 +1303,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
pvt = mci->pvt_info;
pvt->system_address = pdev; /* Record this device in our private */
- pvt->maxch = num_channels;
- pvt->maxdimmperch = num_dimms_per_channel;
+ pvt->maxch = MAX_CHANNELS;
+ pvt->maxdimmperch = DIMMS_PER_CHANNEL;
/* 'get' the pci devices we want to reserve for our use */
if (i5400_get_devices(mci, dev_idx))
@@ -1307,13 +1326,13 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
/* Set the function pointer to an actual operation function */
mci->edac_check = i5400_check_error;
- /* initialize the MC control structure 'csrows' table
+ /* initialize the MC control structure 'dimms' table
* with the mapping and control information */
- if (i5400_init_csrows(mci)) {
+ if (i5400_init_dimms(mci)) {
debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
- " because i5400_init_csrows() returned nonzero "
+ " because i5400_init_dimms() returned nonzero "
"value\n");
- mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
+ mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */
} else {
debugf1("MC: Enable error reporting now\n");
i5400_enable_error_reporting(mci);
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 3bafa3bca14..97c22fd650e 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -464,17 +464,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
FERR_FAT_FBD, error_reg);
snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
- "FATAL (Branch=%d DRAM-Bank=%d %s "
- "RAS=%d CAS=%d Err=0x%lx (%s))",
- branch, bank,
- is_wr ? "RDWR" : "RD",
- ras, cas,
- errors, specific);
-
- /* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, branch << 1,
- (branch << 1) + 1,
- pvt->tmp_prt_buffer);
+ "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
+ bank, ras, cas, errors, specific);
+
+ edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
+ branch, -1, rank,
+ is_wr ? "Write error" : "Read error",
+ pvt->tmp_prt_buffer, NULL);
+
}
/* read in the 1st NON-FATAL error register */
@@ -513,23 +510,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
/* Form out message */
snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
- "Corrected error (Branch=%d, Channel %d), "
- " DRAM-Bank=%d %s "
- "RAS=%d CAS=%d, CE Err=0x%lx, Syndrome=0x%08x(%s))",
- branch, channel,
- bank,
- is_wr ? "RDWR" : "RD",
- ras, cas,
- errors, syndrome, specific);
-
- /*
- * Call the helper to output message
- * NOTE: Errors are reported per-branch, and not per-channel
- * Currently, we don't know how to identify the right
- * channel.
- */
- edac_mc_handle_fbd_ce(mci, rank, channel,
- pvt->tmp_prt_buffer);
+ "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
+ bank, ras, cas, errors, specific);
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0,
+ syndrome,
+ branch >> 1, channel % 2, rank,
+ is_wr ? "Write error" : "Read error",
+ pvt->tmp_prt_buffer, NULL);
}
return;
}
@@ -617,8 +605,7 @@ static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
static int decode_mtr(struct i7300_pvt *pvt,
int slot, int ch, int branch,
struct i7300_dimm_info *dinfo,
- struct csrow_info *p_csrow,
- u32 *nr_pages)
+ struct dimm_info *dimm)
{
int mtr, ans, addrBits, channel;
@@ -650,7 +637,6 @@ static int decode_mtr(struct i7300_pvt *pvt,
addrBits -= 3; /* 8 bits per bytes */
dinfo->megabytes = 1 << addrBits;
- *nr_pages = dinfo->megabytes << 8;
debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
@@ -663,11 +649,6 @@ static int decode_mtr(struct i7300_pvt *pvt,
debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes);
- p_csrow->grain = 8;
- p_csrow->mtype = MEM_FB_DDR2;
- p_csrow->csrow_idx = slot;
- p_csrow->page_mask = 0;
-
/*
* The type of error detection actually depends of the
* mode of operation. When it is just one single memory chip, at
@@ -677,15 +658,18 @@ static int decode_mtr(struct i7300_pvt *pvt,
* See datasheet Sections 7.3.6 to 7.3.8
*/
+ dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
+ dimm->grain = 8;
+ dimm->mtype = MEM_FB_DDR2;
if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
- p_csrow->edac_mode = EDAC_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
} else {
debugf2("\t\tECC code is on Lockstep mode\n");
if (MTR_DRAM_WIDTH(mtr) == 8)
- p_csrow->edac_mode = EDAC_S8ECD8ED;
+ dimm->edac_mode = EDAC_S8ECD8ED;
else
- p_csrow->edac_mode = EDAC_S4ECD4ED;
+ dimm->edac_mode = EDAC_S4ECD4ED;
}
/* ask what device type on this row */
@@ -694,9 +678,9 @@ static int decode_mtr(struct i7300_pvt *pvt,
IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
"enhanced" : "normal");
- p_csrow->dtype = DEV_X8;
+ dimm->dtype = DEV_X8;
} else
- p_csrow->dtype = DEV_X4;
+ dimm->dtype = DEV_X4;
return mtr;
}
@@ -774,11 +758,10 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt;
struct i7300_dimm_info *dinfo;
- struct csrow_info *p_csrow;
int rc = -ENODEV;
int mtr;
int ch, branch, slot, channel;
- u32 last_page = 0, nr_pages;
+ struct dimm_info *dimm;
pvt = mci->pvt_info;
@@ -809,25 +792,23 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
where,
&pvt->mtr[slot][branch]);
- for (ch = 0; ch < MAX_BRANCHES; ch++) {
+ for (ch = 0; ch < MAX_CH_PER_BRANCH; ch++) {
int channel = to_channel(ch, branch);
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+ mci->n_layers, branch, ch, slot);
+
dinfo = &pvt->dimm_info[slot][channel];
- p_csrow = &mci->csrows[slot];
mtr = decode_mtr(pvt, slot, ch, branch,
- dinfo, p_csrow, &nr_pages);
+ dinfo, dimm);
+
/* if no DIMMS on this row, continue */
if (!MTR_DIMMS_PRESENT(mtr))
continue;
- /* Update per_csrow memory count */
- p_csrow->nr_pages += nr_pages;
- p_csrow->first_page = last_page;
- last_page += nr_pages;
- p_csrow->last_page = last_page;
-
rc = 0;
+
}
}
}
@@ -1042,10 +1023,8 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[3];
struct i7300_pvt *pvt;
- int num_channels;
- int num_dimms_per_channel;
- int num_csrows;
int rc;
/* wake up device */
@@ -1062,23 +1041,17 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
if (PCI_FUNC(pdev->devfn) != 0)
return -ENODEV;
- /* As we don't have a motherboard identification routine to determine
- * actual number of slots/dimms per channel, we thus utilize the
- * resource as specified by the chipset. Thus, we might have
- * have more DIMMs per channel than actually on the mobo, but this
- * allows the driver to support up to the chipset max, without
- * some fancy mobo determination.
- */
- num_dimms_per_channel = MAX_SLOTS;
- num_channels = MAX_CHANNELS;
- num_csrows = MAX_SLOTS * MAX_CHANNELS;
-
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
-
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
+ layers[0].type = EDAC_MC_LAYER_BRANCH;
+ layers[0].size = MAX_BRANCHES;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = MAX_CH_PER_BRANCH;
+ layers[1].is_virt_csrow = true;
+ layers[2].type = EDAC_MC_LAYER_SLOT;
+ layers[2].size = MAX_SLOTS;
+ layers[2].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 7f1dfcc4e59..d27778f65a5 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -221,7 +221,9 @@ struct i7core_inject {
};
struct i7core_channel {
- u32 ranks;
+ bool is_3dimms_present;
+ bool is_single_4rank;
+ bool has_4rank;
u32 dimms;
};
@@ -257,7 +259,6 @@ struct i7core_pvt {
struct i7core_channel channel[NUM_CHANS];
int ce_count_available;
- int csrow_map[NUM_CHANS][MAX_DIMMS];
/* ECC corrected errors counts per udimm */
unsigned long udimm_ce_count[MAX_DIMMS];
@@ -492,116 +493,15 @@ static void free_i7core_dev(struct i7core_dev *i7core_dev)
/****************************************************************************
Memory check routines
****************************************************************************/
-static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
- unsigned func)
-{
- struct i7core_dev *i7core_dev = get_i7core_dev(socket);
- int i;
-
- if (!i7core_dev)
- return NULL;
-
- for (i = 0; i < i7core_dev->n_devs; i++) {
- if (!i7core_dev->pdev[i])
- continue;
-
- if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
- PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
- return i7core_dev->pdev[i];
- }
- }
-
- return NULL;
-}
-
-/**
- * i7core_get_active_channels() - gets the number of channels and csrows
- * @socket: Quick Path Interconnect socket
- * @channels: Number of channels that will be returned
- * @csrows: Number of csrows found
- *
- * Since EDAC core needs to know in advance the number of available channels
- * and csrows, in order to allocate memory for csrows/channels, it is needed
- * to run two similar steps. At the first step, implemented on this function,
- * it checks the number of csrows/channels present at one socket.
- * this is used in order to properly allocate the size of mci components.
- *
- * It should be noticed that none of the current available datasheets explain
- * or even mention how csrows are seen by the memory controller. So, we need
- * to add a fake description for csrows.
- * So, this driver is attributing one DIMM memory for one csrow.
- */
-static int i7core_get_active_channels(const u8 socket, unsigned *channels,
- unsigned *csrows)
-{
- struct pci_dev *pdev = NULL;
- int i, j;
- u32 status, control;
-
- *channels = 0;
- *csrows = 0;
-
- pdev = get_pdev_slot_func(socket, 3, 0);
- if (!pdev) {
- i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
- socket);
- return -ENODEV;
- }
-
- /* Device 3 function 0 reads */
- pci_read_config_dword(pdev, MC_STATUS, &status);
- pci_read_config_dword(pdev, MC_CONTROL, &control);
-
- for (i = 0; i < NUM_CHANS; i++) {
- u32 dimm_dod[3];
- /* Check if the channel is active */
- if (!(control & (1 << (8 + i))))
- continue;
-
- /* Check if the channel is disabled */
- if (status & (1 << i))
- continue;
-
- pdev = get_pdev_slot_func(socket, i + 4, 1);
- if (!pdev) {
- i7core_printk(KERN_ERR, "Couldn't find socket %d "
- "fn %d.%d!!!\n",
- socket, i + 4, 1);
- return -ENODEV;
- }
- /* Devices 4-6 function 1 */
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM0, &dimm_dod[0]);
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM1, &dimm_dod[1]);
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM2, &dimm_dod[2]);
- (*channels)++;
-
- for (j = 0; j < 3; j++) {
- if (!DIMM_PRESENT(dimm_dod[j]))
- continue;
- (*csrows)++;
- }
- }
-
- debugf0("Number of active channels on socket %d: %d\n",
- socket, *channels);
-
- return 0;
-}
-
-static int get_dimm_config(const struct mem_ctl_info *mci)
+static int get_dimm_config(struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
- struct csrow_info *csr;
struct pci_dev *pdev;
int i, j;
- int csrow = 0;
- unsigned long last_page = 0;
enum edac_type mode;
enum mem_type mtype;
+ struct dimm_info *dimm;
/* Get data from the MC register, function 0 */
pdev = pvt->pci_mcr[0];
@@ -657,21 +557,20 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
pci_read_config_dword(pvt->pci_ch[i][0],
MC_CHANNEL_DIMM_INIT_PARAMS, &data);
- pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
- 4 : 2;
+
+ if (data & THREE_DIMMS_PRESENT)
+ pvt->channel[i].is_3dimms_present = true;
+
+ if (data & SINGLE_QUAD_RANK_PRESENT)
+ pvt->channel[i].is_single_4rank = true;
+
+ if (data & QUAD_RANK_PRESENT)
+ pvt->channel[i].has_4rank = true;
if (data & REGISTERED_DIMM)
mtype = MEM_RDDR3;
else
mtype = MEM_DDR3;
-#if 0
- if (data & THREE_DIMMS_PRESENT)
- pvt->channel[i].dimms = 3;
- else if (data & SINGLE_QUAD_RANK_PRESENT)
- pvt->channel[i].dimms = 1;
- else
- pvt->channel[i].dimms = 2;
-#endif
/* Devices 4-6 function 1 */
pci_read_config_dword(pvt->pci_ch[i][1],
@@ -682,11 +581,13 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
MC_DOD_CH_DIMM2, &dimm_dod[2]);
debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
- "%d ranks, %cDIMMs\n",
+ "%s%s%s%cDIMMs\n",
i,
RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
data,
- pvt->channel[i].ranks,
+ pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
+ pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
+ pvt->channel[i].has_4rank ? "HAS_4R " : "",
(data & REGISTERED_DIMM) ? 'R' : 'U');
for (j = 0; j < 3; j++) {
@@ -696,6 +597,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
if (!DIMM_PRESENT(dimm_dod[j]))
continue;
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ i, j, 0);
banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
@@ -704,8 +607,6 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
/* DDR3 has 8 I/O banks */
size = (rows * cols * banks * ranks) >> (20 - 3);
- pvt->channel[i].dimms++;
-
debugf0("\tdimm %d %d Mb offset: %x, "
"bank: %d, rank: %d, row: %#x, col: %#x\n",
j, size,
@@ -714,44 +615,28 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
npages = MiB_TO_PAGES(size);
- csr = &mci->csrows[csrow];
- csr->first_page = last_page + 1;
- last_page += npages;
- csr->last_page = last_page;
- csr->nr_pages = npages;
-
- csr->page_mask = 0;
- csr->grain = 8;
- csr->csrow_idx = csrow;
- csr->nr_channels = 1;
-
- csr->channels[0].chan_idx = i;
- csr->channels[0].ce_count = 0;
-
- pvt->csrow_map[i][j] = csrow;
+ dimm->nr_pages = npages;
switch (banks) {
case 4:
- csr->dtype = DEV_X4;
+ dimm->dtype = DEV_X4;
break;
case 8:
- csr->dtype = DEV_X8;
+ dimm->dtype = DEV_X8;
break;
case 16:
- csr->dtype = DEV_X16;
+ dimm->dtype = DEV_X16;
break;
default:
- csr->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
}
- csr->edac_mode = mode;
- csr->mtype = mtype;
- snprintf(csr->channels[0].label,
- sizeof(csr->channels[0].label),
- "CPU#%uChannel#%u_DIMM#%u",
- pvt->i7core_dev->socket, i, j);
-
- csrow++;
+ snprintf(dimm->label, sizeof(dimm->label),
+ "CPU#%uChannel#%u_DIMM#%u",
+ pvt->i7core_dev->socket, i, j);
+ dimm->grain = 8;
+ dimm->edac_mode = mode;
+ dimm->mtype = mtype;
}
pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
@@ -1567,22 +1452,16 @@ error:
/****************************************************************************
Error check routines
****************************************************************************/
-static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
+static void i7core_rdimm_update_errcount(struct mem_ctl_info *mci,
const int chan,
const int dimm,
const int add)
{
- char *msg;
- struct i7core_pvt *pvt = mci->pvt_info;
- int row = pvt->csrow_map[chan][dimm], i;
+ int i;
for (i = 0; i < add; i++) {
- msg = kasprintf(GFP_KERNEL, "Corrected error "
- "(Socket=%d channel=%d dimm=%d)",
- pvt->i7core_dev->socket, chan, dimm);
-
- edac_mc_handle_fbd_ce(mci, row, 0, msg);
- kfree (msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ chan, dimm, -1, "error", "", NULL);
}
}
@@ -1623,11 +1502,11 @@ static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
/*updated the edac core */
if (add0 != 0)
- i7core_rdimm_update_csrow(mci, chan, 0, add0);
+ i7core_rdimm_update_errcount(mci, chan, 0, add0);
if (add1 != 0)
- i7core_rdimm_update_csrow(mci, chan, 1, add1);
+ i7core_rdimm_update_errcount(mci, chan, 1, add1);
if (add2 != 0)
- i7core_rdimm_update_csrow(mci, chan, 2, add2);
+ i7core_rdimm_update_errcount(mci, chan, 2, add2);
}
@@ -1747,20 +1626,30 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
const struct mce *m)
{
struct i7core_pvt *pvt = mci->pvt_info;
- char *type, *optype, *err, *msg;
+ char *type, *optype, *err, msg[80];
+ enum hw_event_mc_err_type tp_event;
unsigned long error = m->status & 0x1ff0000l;
+ bool uncorrected_error = m->mcgstatus & 1ll << 61;
+ bool ripv = m->mcgstatus & 1;
u32 optypenum = (m->status >> 4) & 0x07;
u32 core_err_cnt = (m->status >> 38) & 0x7fff;
u32 dimm = (m->misc >> 16) & 0x3;
u32 channel = (m->misc >> 18) & 0x3;
u32 syndrome = m->misc >> 32;
u32 errnum = find_first_bit(&error, 32);
- int csrow;
- if (m->mcgstatus & 1)
- type = "FATAL";
- else
- type = "NON_FATAL";
+ if (uncorrected_error) {
+ if (ripv) {
+ type = "FATAL";
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+ type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
+ }
+ } else {
+ type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
switch (optypenum) {
case 0:
@@ -1815,27 +1704,20 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
err = "unknown";
}
- /* FIXME: should convert addr into bank and rank information */
- msg = kasprintf(GFP_ATOMIC,
- "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
- "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
- type, (long long) m->addr, m->cpu, dimm, channel,
- syndrome, core_err_cnt, (long long)m->status,
- (long long)m->misc, optype, err);
-
- debugf0("%s", msg);
-
- csrow = pvt->csrow_map[channel][dimm];
+ snprintf(msg, sizeof(msg), "count=%d %s", core_err_cnt, optype);
- /* Call the helper to output message */
- if (m->mcgstatus & 1)
- edac_mc_handle_fbd_ue(mci, csrow, 0,
- 0 /* FIXME: should be channel here */, msg);
- else if (!pvt->is_registered)
- edac_mc_handle_fbd_ce(mci, csrow,
- 0 /* FIXME: should be channel here */, msg);
-
- kfree(msg);
+ /*
+ * Call the helper to output message
+ * FIXME: what to do if core_err_cnt > 1? Currently, it generates
+ * only one event
+ */
+ if (uncorrected_error || !pvt->is_registered)
+ edac_mc_handle_error(tp_event, mci,
+ m->addr >> PAGE_SHIFT,
+ m->addr & ~PAGE_MASK,
+ syndrome,
+ channel, dimm, -1,
+ err, msg, m);
}
/*
@@ -2252,15 +2134,19 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
{
struct mem_ctl_info *mci;
struct i7core_pvt *pvt;
- int rc, channels, csrows;
-
- /* Check the number of active and not disabled channels */
- rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
- if (unlikely(rc < 0))
- return rc;
+ int rc;
+ struct edac_mc_layer layers[2];
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
+
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = NUM_CHANS;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = MAX_DIMMS;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(i7core_dev->socket, ARRAY_SIZE(layers), layers,
+ sizeof(*pvt));
if (unlikely(!mci))
return -ENOMEM;
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 3bf2b2f490e..52072c28a8a 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -12,7 +12,7 @@
* 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
*
* Written with reference to 82443BX Host Bridge Datasheet:
- * http://download.intel.com/design/chipsets/datashts/29063301.pdf
+ * http://download.intel.com/design/chipsets/datashts/29063301.pdf
* references to this document given in [].
*
* This module doesn't support the 440LX, but it may be possible to
@@ -156,19 +156,19 @@ static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
error_found = 1;
if (handle_errors)
- edac_mc_handle_ce(mci, page, pageoffset,
- /* 440BX/GX don't make syndrome information
- * available */
- 0, edac_mc_find_csrow_by_page(mci, page), 0,
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, pageoffset, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1, mci->ctl_name, "", NULL);
}
if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
error_found = 1;
if (handle_errors)
- edac_mc_handle_ue(mci, page, pageoffset,
- edac_mc_find_csrow_by_page(mci, page),
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, pageoffset, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1, mci->ctl_name, "", NULL);
}
return error_found;
@@ -189,6 +189,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
enum mem_type mtype)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
int index;
u8 drbar, dramc;
u32 row_base, row_high_limit, row_high_limit_last;
@@ -197,6 +198,8 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
row_high_limit_last = 0;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
+
pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
mci->mc_idx, __FILE__, __func__, index, drbar);
@@ -217,14 +220,14 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
row_base = row_high_limit_last;
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
- csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+ dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
/* EAP reports in 4kilobyte granularity [61] */
- csrow->grain = 1 << 12;
- csrow->mtype = mtype;
+ dimm->grain = 1 << 12;
+ dimm->mtype = mtype;
/* I don't think 440BX can tell you device type? FIXME? */
- csrow->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
/* Mode is global to all rows on 440BX */
- csrow->edac_mode = edac_mode;
+ dimm->edac_mode = edac_mode;
row_high_limit_last = row_high_limit;
}
}
@@ -232,6 +235,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
u8 dramc;
u32 nbxcfg, ecc_mode;
enum mem_type mtype;
@@ -245,8 +249,13 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
return -EIO;
- mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I82443BXGX_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = I82443BXGX_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index c779092d18d..08045059d10 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -99,6 +99,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
struct i82860_error_info *info,
int handle_errors)
{
+ struct dimm_info *dimm;
int row;
if (!(info->errsts2 & 0x0003))
@@ -108,18 +109,25 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0003) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
info->eap >>= PAGE_SHIFT;
row = edac_mc_find_csrow_by_page(mci, info->eap);
+ dimm = mci->csrows[row].channels[0].dimm;
if (info->errsts & 0x0002)
- edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ info->eap, 0, 0,
+ dimm->location[0], dimm->location[1], -1,
+ "i82860 UE", "", NULL);
else
- edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0,
- "i82860 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ info->eap, 0, info->derrsyn,
+ dimm->location[0], dimm->location[1], -1,
+ "i82860 CE", "", NULL);
return 1;
}
@@ -140,6 +148,7 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
u16 value;
u32 cumul_size;
struct csrow_info *csrow;
+ struct dimm_info *dimm;
int index;
pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
@@ -153,6 +162,8 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
*/
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
+
pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
cumul_size = (value & I82860_GBA_MASK) <<
(I82860_GBA_SHIFT - PAGE_SHIFT);
@@ -164,30 +175,38 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ dimm->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
- csrow->mtype = MEM_RMBS;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
+ dimm->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
+ dimm->mtype = MEM_RMBS;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
}
}
static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i82860_error_info discard;
- /* RDRAM has channels but these don't map onto the abstractions that
- edac uses.
- The device groups from the GRA registers seem to map reasonably
- well onto the notion of a chip select row.
- There are 16 GRA registers and since the name is associated with
- the channel and the GRA registers map to physical devices so we are
- going to make 1 channel for group.
+ /*
+ * RDRAM has channels but these don't map onto the csrow abstraction.
+ * According with the datasheet, there are 2 Rambus channels, supporting
+ * up to 16 direct RDRAM devices.
+ * The device groups from the GRA registers seem to map reasonably
+ * well onto the notion of a chip select row.
+ * There are 16 GRA registers and since the name is associated with
+ * the channel and the GRA registers map to physical devices so we are
+ * going to make 1 channel for group.
*/
- mci = edac_mc_alloc(0, 16, 1, 0);
-
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 2;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = 8;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 10f15d85fb5..b613e31c16e 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -38,7 +38,8 @@
#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
/* four csrows in dual channel, eight in single channel */
-#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
+#define I82875P_NR_DIMMS 8
+#define I82875P_NR_CSROWS(nr_chans) (I82875P_NR_DIMMS / (nr_chans))
/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
@@ -235,7 +236,9 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0081) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
@@ -243,11 +246,15 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
row = edac_mc_find_csrow_by_page(mci, info->eap);
if (info->errsts & 0x0080)
- edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ info->eap, 0, 0,
+ row, -1, -1,
+ "i82875p UE", "", NULL);
else
- edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
- multi_chan ? (info->des & 0x1) : 0,
- "i82875p CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ info->eap, 0, info->derrsyn,
+ row, multi_chan ? (info->des & 0x1) : 0,
+ -1, "i82875p CE", "", NULL);
return 1;
}
@@ -342,11 +349,13 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
void __iomem * ovrfl_window, u32 drc)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
+ unsigned nr_chans = dual_channel_active(drc) + 1;
unsigned long last_cumul_size;
u8 value;
u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
- u32 cumul_size;
- int index;
+ u32 cumul_size, nr_pages;
+ int index, j;
drc_ddim = (drc >> 18) & 0x1;
last_cumul_size = 0;
@@ -369,12 +378,18 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
- csrow->mtype = MEM_DDR;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+
+ for (j = 0; j < nr_chans; j++) {
+ dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / nr_chans;
+ dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
+ dimm->mtype = MEM_DDR;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+ }
}
}
@@ -382,6 +397,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i82875p_pvt *pvt;
struct pci_dev *ovrfl_pdev;
void __iomem *ovrfl_window;
@@ -397,9 +413,14 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
return -ENODEV;
drc = readl(ovrfl_window + I82875P_DRC);
nr_chans = dual_channel_active(drc) + 1;
- mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
- nr_chans, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I82875P_NR_CSROWS(nr_chans);
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_chans;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (!mci) {
rc = -ENOMEM;
goto fail0;
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 0cd8368f88f..433332c7cdb 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -29,7 +29,8 @@
#define PCI_DEVICE_ID_INTEL_82975_0 0x277c
#endif /* PCI_DEVICE_ID_INTEL_82975_0 */
-#define I82975X_NR_CSROWS(nr_chans) (8/(nr_chans))
+#define I82975X_NR_DIMMS 8
+#define I82975X_NR_CSROWS(nr_chans) (I82975X_NR_DIMMS / (nr_chans))
/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
#define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b)
@@ -287,7 +288,8 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0003) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
@@ -309,13 +311,18 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1;
offst = info->eap
& ((1 << PAGE_SHIFT) -
- (1 << mci->csrows[row].grain));
+ (1 << mci->csrows[row].channels[chan].dimm->grain));
if (info->errsts & 0x0002)
- edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offst, 0,
+ row, -1, -1,
+ "i82975x UE", "", NULL);
else
- edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
- chan, "i82975x CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offst, info->derrsyn,
+ row, chan ? chan : 0, -1,
+ "i82975x CE", "", NULL);
return 1;
}
@@ -370,8 +377,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
struct csrow_info *csrow;
unsigned long last_cumul_size;
u8 value;
- u32 cumul_size;
+ u32 cumul_size, nr_pages;
int index, chan;
+ struct dimm_info *dimm;
+ enum dev_type dtype;
last_cumul_size = 0;
@@ -400,28 +409,33 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
+ nr_pages = cumul_size - last_cumul_size;
+ if (!nr_pages)
+ continue;
+
/*
* Initialise dram labels
* index values:
* [0-7] for single-channel; i.e. csrow->nr_channels = 1
* [0-3] for dual-channel; i.e. csrow->nr_channels = 2
*/
- for (chan = 0; chan < csrow->nr_channels; chan++)
- strncpy(csrow->channels[chan].label,
+ dtype = i82975x_dram_type(mch_window, index);
+ for (chan = 0; chan < csrow->nr_channels; chan++) {
+ dimm = mci->csrows[index].channels[chan].dimm;
+
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ strncpy(csrow->channels[chan].dimm->label,
labels[(index >> 1) + (chan * 2)],
EDAC_MC_LABEL_LEN);
-
- if (cumul_size == last_cumul_size)
- continue; /* not populated */
+ dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
+ dimm->dtype = i82975x_dram_type(mch_window, index);
+ dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
+ dimm->edac_mode = EDAC_SECDED; /* only supported */
+ }
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 7; /* 128Byte cache-line resolution */
- csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
- csrow->dtype = i82975x_dram_type(mch_window, index);
- csrow->edac_mode = EDAC_SECDED; /* only supported */
}
}
@@ -463,6 +477,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i82975x_pvt *pvt;
void __iomem *mch_window;
u32 mchbar;
@@ -531,8 +546,13 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
chans = dual_channel_active(mch_window) + 1;
/* assuming only one controller, index thus is 0 */
- mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans),
- chans, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I82975X_NR_DIMMS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = I82975X_NR_CSROWS(chans);
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (!mci) {
rc = -ENOMEM;
goto fail1;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 73464a62adf..4c402353ba9 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -854,12 +854,16 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
if (err_detect & DDR_EDE_SBE)
- edac_mc_handle_ce(mci, pfn, err_addr & ~PAGE_MASK,
- syndrome, row_index, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, err_addr & ~PAGE_MASK, syndrome,
+ row_index, 0, -1,
+ mci->ctl_name, "", NULL);
if (err_detect & DDR_EDE_MBE)
- edac_mc_handle_ue(mci, pfn, err_addr & ~PAGE_MASK,
- row_index, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ pfn, err_addr & ~PAGE_MASK, syndrome,
+ row_index, 0, -1,
+ mci->ctl_name, "", NULL);
out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
}
@@ -883,6 +887,7 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
{
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
struct csrow_info *csrow;
+ struct dimm_info *dimm;
u32 sdram_ctl;
u32 sdtype;
enum mem_type mtype;
@@ -929,6 +934,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
u32 end;
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
+
cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
(index * MPC85XX_MC_CS_BNDS_OFS));
@@ -944,19 +951,21 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
csrow->first_page = start;
csrow->last_page = end;
- csrow->nr_pages = end + 1 - start;
- csrow->grain = 8;
- csrow->mtype = mtype;
- csrow->dtype = DEV_UNKNOWN;
+
+ dimm->nr_pages = end + 1 - start;
+ dimm->grain = 8;
+ dimm->mtype = mtype;
+ dimm->dtype = DEV_UNKNOWN;
if (sdram_ctl & DSC_X32_EN)
- csrow->dtype = DEV_X32;
- csrow->edac_mode = EDAC_SECDED;
+ dimm->dtype = DEV_X32;
+ dimm->edac_mode = EDAC_SECDED;
}
}
static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct mpc85xx_mc_pdata *pdata;
struct resource r;
u32 sdram_ctl;
@@ -965,7 +974,13 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
return -ENOMEM;
- mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 4;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata));
if (!mci) {
devres_release_group(&op->dev, mpc85xx_mc_err_probe);
return -ENOMEM;
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 7e5ff367705..b0bb5a3d252 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -611,12 +611,17 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci)
/* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
if (!(reg & 0x1))
- edac_mc_handle_ce(mci, err_addr >> PAGE_SHIFT,
- err_addr & PAGE_MASK, syndrome, 0, 0,
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ err_addr >> PAGE_SHIFT,
+ err_addr & PAGE_MASK, syndrome,
+ 0, 0, -1,
+ mci->ctl_name, "", NULL);
else /* 2 bit error, UE */
- edac_mc_handle_ue(mci, err_addr >> PAGE_SHIFT,
- err_addr & PAGE_MASK, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ err_addr >> PAGE_SHIFT,
+ err_addr & PAGE_MASK, 0,
+ 0, 0, -1,
+ mci->ctl_name, "", NULL);
/* clear the error */
out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
@@ -656,6 +661,8 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
struct mv64x60_mc_pdata *pdata)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
+
u32 devtype;
u32 ctl;
@@ -664,35 +671,36 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
csrow = &mci->csrows[0];
- csrow->first_page = 0;
- csrow->nr_pages = pdata->total_mem >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->grain = 8;
+ dimm = csrow->channels[0].dimm;
+
+ dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
+ dimm->grain = 8;
- csrow->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
+ dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
devtype = (ctl >> 20) & 0x3;
switch (devtype) {
case 0x0:
- csrow->dtype = DEV_X32;
+ dimm->dtype = DEV_X32;
break;
case 0x2: /* could be X8 too, but no way to tell */
- csrow->dtype = DEV_X16;
+ dimm->dtype = DEV_X16;
break;
case 0x3:
- csrow->dtype = DEV_X4;
+ dimm->dtype = DEV_X4;
break;
default:
- csrow->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
break;
}
- csrow->edac_mode = EDAC_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
}
static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct mv64x60_mc_pdata *pdata;
struct resource *r;
u32 ctl;
@@ -701,7 +709,14 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
return -ENOMEM;
- mci = edac_mc_alloc(sizeof(struct mv64x60_mc_pdata), 1, 1, edac_mc_idx);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+ sizeof(struct mv64x60_mc_pdata));
if (!mci) {
printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index 7f71ee43674..b095a906a99 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -110,15 +110,16 @@ static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
/* uncorrectable/multi-bit errors */
if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
MCDEBUG_ERRSTA_RFL_STATUS)) {
- edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0,
- cs, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ mci->csrows[cs].first_page, 0, 0,
+ cs, 0, -1, mci->ctl_name, "", NULL);
}
/* correctable/single-bit errors */
- if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) {
- edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0,
- 0, cs, 0, mci->ctl_name);
- }
+ if (errsta & MCDEBUG_ERRSTA_SBE_STATUS)
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ mci->csrows[cs].first_page, 0, 0,
+ cs, 0, -1, mci->ctl_name, "", NULL);
}
static void pasemi_edac_check(struct mem_ctl_info *mci)
@@ -135,11 +136,13 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
enum edac_type edac_mode)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
u32 rankcfg;
int index;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
pci_read_config_dword(pdev,
MCDRAM_RANKCFG + (index * 12),
@@ -151,20 +154,20 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
MCDRAM_RANKCFG_TYPE_SIZE_S) {
case 0:
- csrow->nr_pages = 128 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 128 << (20 - PAGE_SHIFT);
break;
case 1:
- csrow->nr_pages = 256 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 256 << (20 - PAGE_SHIFT);
break;
case 2:
case 3:
- csrow->nr_pages = 512 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 512 << (20 - PAGE_SHIFT);
break;
case 4:
- csrow->nr_pages = 1024 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 1024 << (20 - PAGE_SHIFT);
break;
case 5:
- csrow->nr_pages = 2048 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 2048 << (20 - PAGE_SHIFT);
break;
default:
edac_mc_printk(mci, KERN_ERR,
@@ -174,13 +177,13 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
}
csrow->first_page = last_page_in_mmc;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- last_page_in_mmc += csrow->nr_pages;
+ csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
+ last_page_in_mmc += dimm->nr_pages;
csrow->page_mask = 0;
- csrow->grain = PASEMI_EDAC_ERROR_GRAIN;
- csrow->mtype = MEM_DDR;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = edac_mode;
+ dimm->grain = PASEMI_EDAC_ERROR_GRAIN;
+ dimm->mtype = MEM_DDR;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = edac_mode;
}
return 0;
}
@@ -189,6 +192,7 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
u32 errctl1, errcor, scrub, mcen;
pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
@@ -205,9 +209,14 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
MCDEBUG_ERRCTL1_RFL_LOG_EN;
pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
- mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS,
- system_mmc_id++);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = PASEMI_EDAC_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = PASEMI_EDAC_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers,
+ 0);
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index d427c69bb8b..f3f9fed06ad 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -727,7 +727,10 @@ ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
for (row = 0; row < mci->nr_csrows; row++)
if (ppc4xx_edac_check_bank_error(status, row))
- edac_mc_handle_ce_no_info(mci, message);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, 0,
+ row, 0, -1,
+ message, "", NULL);
}
/**
@@ -755,7 +758,10 @@ ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
for (row = 0; row < mci->nr_csrows; row++)
if (ppc4xx_edac_check_bank_error(status, row))
- edac_mc_handle_ue(mci, page, offset, row, message);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ row, 0, -1,
+ message, "", NULL);
}
/**
@@ -895,9 +901,8 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
enum mem_type mtype;
enum dev_type dtype;
enum edac_type edac_mode;
- int row;
- u32 mbxcf, size;
- static u32 ppc4xx_last_page;
+ int row, j;
+ u32 mbxcf, size, nr_pages;
/* Establish the memory type and width */
@@ -948,7 +953,7 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
case SDRAM_MBCF_SZ_2GB:
case SDRAM_MBCF_SZ_4GB:
case SDRAM_MBCF_SZ_8GB:
- csi->nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
+ nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
break;
default:
ppc4xx_edac_mc_printk(KERN_ERR, mci,
@@ -959,10 +964,6 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
goto done;
}
- csi->first_page = ppc4xx_last_page;
- csi->last_page = csi->first_page + csi->nr_pages - 1;
- csi->page_mask = 0;
-
/*
* It's unclear exactly what grain should be set to
* here. The SDRAM_ECCES register allows resolution of
@@ -975,15 +976,17 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
* possible values would be the PLB width (16), the
* page size (PAGE_SIZE) or the memory width (2 or 4).
*/
+ for (j = 0; j < csi->nr_channels; j++) {
+ struct dimm_info *dimm = csi->channels[j].dimm;
- csi->grain = 1;
-
- csi->mtype = mtype;
- csi->dtype = dtype;
+ dimm->nr_pages = nr_pages / csi->nr_channels;
+ dimm->grain = 1;
- csi->edac_mode = edac_mode;
+ dimm->mtype = mtype;
+ dimm->dtype = dtype;
- ppc4xx_last_page += csi->nr_pages;
+ dimm->edac_mode = edac_mode;
+ }
}
done:
@@ -1236,6 +1239,7 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
dcr_host_t dcr_host;
const struct device_node *np = op->dev.of_node;
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
static int ppc4xx_edac_instance;
/*
@@ -1281,12 +1285,14 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
* controller instance and perform the appropriate
* initialization.
*/
-
- mci = edac_mc_alloc(sizeof(struct ppc4xx_edac_pdata),
- ppc4xx_edac_nr_csrows,
- ppc4xx_edac_nr_chans,
- ppc4xx_edac_instance);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = ppc4xx_edac_nr_csrows;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = ppc4xx_edac_nr_chans;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers,
+ sizeof(struct ppc4xx_edac_pdata));
if (mci == NULL) {
ppc4xx_edac_printk(KERN_ERR, "%s: "
"Failed to allocate EDAC MC instance!\n",
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 6d908ad72d6..e1cacd164f3 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -179,10 +179,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
error_found = 1;
if (handle_errors)
- edac_mc_handle_ce(mci, page, 0, /* not avail */
- syndrome,
- edac_mc_find_csrow_by_page(mci, page),
- 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, 0, syndrome,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1,
+ mci->ctl_name, "", NULL);
}
if (info->eapr & BIT(1)) { /* UE? */
@@ -190,9 +191,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
if (handle_errors)
/* 82600 doesn't give enough info */
- edac_mc_handle_ue(mci, page, 0,
- edac_mc_find_csrow_by_page(mci, page),
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, 0, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1,
+ mci->ctl_name, "", NULL);
}
return error_found;
@@ -216,6 +219,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
u8 dramcr)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
int index;
u8 drbar; /* SDRAM Row Boundary Address Register */
u32 row_high_limit, row_high_limit_last;
@@ -227,6 +231,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
@@ -247,16 +252,17 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
- csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+
+ dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
/* Error address is top 19 bits - so granularity is *
* 14 bits */
- csrow->grain = 1 << 14;
- csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
+ dimm->grain = 1 << 14;
+ dimm->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
/* FIXME - check that this is unknowable with this chipset */
- csrow->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
/* Mode is global on 82600 */
- csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
+ dimm->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
row_high_limit_last = row_high_limit;
}
}
@@ -264,6 +270,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
u8 dramcr;
u32 eapr;
u32 scrub_disabled;
@@ -278,8 +285,13 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
sdram_refresh_rate);
debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
- mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = R82600_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = R82600_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 123204f8e23..4adaf4b7da9 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -314,8 +314,6 @@ struct sbridge_pvt {
struct sbridge_info info;
struct sbridge_channel channel[NUM_CHANNELS];
- int csrow_map[NUM_CHANNELS][MAX_DIMMS];
-
/* Memory type detection */
bool is_mirrored, is_lockstep, is_close_pg;
@@ -487,29 +485,14 @@ static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
}
/**
- * sbridge_get_active_channels() - gets the number of channels and csrows
+ * check_if_ecc_is_active() - Checks if ECC is active
* bus: Device bus
- * @channels: Number of channels that will be returned
- * @csrows: Number of csrows found
- *
- * Since EDAC core needs to know in advance the number of available channels
- * and csrows, in order to allocate memory for csrows/channels, it is needed
- * to run two similar steps. At the first step, implemented on this function,
- * it checks the number of csrows/channels present at one socket, identified
- * by the associated PCI bus.
- * this is used in order to properly allocate the size of mci components.
- * Note: one csrow is one dimm.
*/
-static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
- unsigned *csrows)
+static int check_if_ecc_is_active(const u8 bus)
{
struct pci_dev *pdev = NULL;
- int i, j;
u32 mcmtr;
- *channels = 0;
- *csrows = 0;
-
pdev = get_pdev_slot_func(bus, 15, 0);
if (!pdev) {
sbridge_printk(KERN_ERR, "Couldn't find PCI device "
@@ -523,41 +506,14 @@ static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
return -ENODEV;
}
-
- for (i = 0; i < NUM_CHANNELS; i++) {
- u32 mtr;
-
- /* Device 15 functions 2 - 5 */
- pdev = get_pdev_slot_func(bus, 15, 2 + i);
- if (!pdev) {
- sbridge_printk(KERN_ERR, "Couldn't find PCI device "
- "%2x.%02d.%d!!!\n",
- bus, 15, 2 + i);
- return -ENODEV;
- }
- (*channels)++;
-
- for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
- pci_read_config_dword(pdev, mtr_regs[j], &mtr);
- debugf1("Bus#%02x channel #%d MTR%d = %x\n", bus, i, j, mtr);
- if (IS_DIMM_PRESENT(mtr))
- (*csrows)++;
- }
- }
-
- debugf0("Number of active channels: %d, number of active dimms: %d\n",
- *channels, *csrows);
-
return 0;
}
-static int get_dimm_config(const struct mem_ctl_info *mci)
+static int get_dimm_config(struct mem_ctl_info *mci)
{
struct sbridge_pvt *pvt = mci->pvt_info;
- struct csrow_info *csr;
+ struct dimm_info *dimm;
int i, j, banks, ranks, rows, cols, size, npages;
- int csrow = 0;
- unsigned long last_page = 0;
u32 reg;
enum edac_type mode;
enum mem_type mtype;
@@ -616,6 +572,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
u32 mtr;
for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ i, j, 0);
pci_read_config_dword(pvt->pci_tad[i],
mtr_regs[j], &mtr);
debugf4("Channel #%d MTR%d = %x\n", i, j, mtr);
@@ -634,29 +592,15 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
pvt->sbridge_dev->mc, i, j,
size, npages,
banks, ranks, rows, cols);
- csr = &mci->csrows[csrow];
-
- csr->first_page = last_page;
- csr->last_page = last_page + npages - 1;
- csr->page_mask = 0UL; /* Unused */
- csr->nr_pages = npages;
- csr->grain = 32;
- csr->csrow_idx = csrow;
- csr->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
- csr->ce_count = 0;
- csr->ue_count = 0;
- csr->mtype = mtype;
- csr->edac_mode = mode;
- csr->nr_channels = 1;
- csr->channels[0].chan_idx = i;
- csr->channels[0].ce_count = 0;
- pvt->csrow_map[i][j] = csrow;
- snprintf(csr->channels[0].label,
- sizeof(csr->channels[0].label),
+
+ dimm->nr_pages = npages;
+ dimm->grain = 32;
+ dimm->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
+ dimm->mtype = mtype;
+ dimm->edac_mode = mode;
+ snprintf(dimm->label, sizeof(dimm->label),
"CPU_SrcID#%u_Channel#%u_DIMM#%u",
pvt->sbridge_dev->source_id, i, j);
- last_page += npages;
- csrow++;
}
}
}
@@ -844,11 +788,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
u8 *socket,
long *channel_mask,
u8 *rank,
- char *area_type)
+ char **area_type, char *msg)
{
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
- char msg[256];
int n_rir, n_sads, n_tads, sad_way, sck_xch;
int sad_interl, idx, base_ch;
int interleave_mode;
@@ -870,12 +813,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
*/
if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
if (addr >= (u64)pvt->tohm) {
sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
@@ -892,7 +833,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
limit = SAD_LIMIT(reg);
if (limit <= prv) {
sprintf(msg, "Can't discover the memory socket");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
if (addr <= limit)
@@ -901,10 +841,9 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
if (n_sads == MAX_SAD) {
sprintf(msg, "Can't discover the memory socket");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
- area_type = get_dram_attr(reg);
+ *area_type = get_dram_attr(reg);
interleave_mode = INTERLEAVE_MODE(reg);
pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
@@ -942,7 +881,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
break;
default:
sprintf(msg, "Can't discover socket interleave");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
*socket = sad_interleave[idx];
@@ -957,7 +895,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (!new_mci) {
sprintf(msg, "Struct for socket #%u wasn't initialized",
*socket);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
mci = new_mci;
@@ -973,7 +910,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
limit = TAD_LIMIT(reg);
if (limit <= prv) {
sprintf(msg, "Can't discover the memory channel");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
if (addr <= limit)
@@ -1013,7 +949,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
break;
default:
sprintf(msg, "Can't discover the TAD target");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
*channel_mask = 1 << base_ch;
@@ -1027,7 +962,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
break;
default:
sprintf(msg, "Invalid mirror set. Can't decode addr");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
} else
@@ -1055,7 +989,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (offset > addr) {
sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
offset, addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
addr -= offset;
@@ -1095,7 +1028,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (n_rir == MAX_RIR_RANGES) {
sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
ch_addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
rir_way = RIR_WAY(reg);
@@ -1409,7 +1341,8 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
{
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
- char *type, *optype, *msg, *recoverable_msg;
+ enum hw_event_mc_err_type tp_event;
+ char *type, *optype, msg[256];
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -1421,13 +1354,21 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
u32 optypenum = GET_BITFIELD(m->status, 4, 6);
long channel_mask, first_channel;
u8 rank, socket;
- int csrow, rc, dimm;
- char *area_type = "Unknown";
-
- if (ripv)
- type = "NON_FATAL";
- else
- type = "FATAL";
+ int rc, dimm;
+ char *area_type = NULL;
+
+ if (uncorrected_error) {
+ if (ripv) {
+ type = "FATAL";
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+ type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
+ }
+ } else {
+ type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
/*
* According with Table 15-9 of the Intel Architecture spec vol 3A,
@@ -1445,19 +1386,19 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
} else {
switch (optypenum) {
case 0:
- optype = "generic undef request";
+ optype = "generic undef request error";
break;
case 1:
- optype = "memory read";
+ optype = "memory read error";
break;
case 2:
- optype = "memory write";
+ optype = "memory write error";
break;
case 3:
- optype = "addr/cmd";
+ optype = "addr/cmd error";
break;
case 4:
- optype = "memory scrubbing";
+ optype = "memory scrubbing error";
break;
default:
optype = "reserved";
@@ -1466,13 +1407,13 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
}
rc = get_memory_error_data(mci, m->addr, &socket,
- &channel_mask, &rank, area_type);
+ &channel_mask, &rank, &area_type, msg);
if (rc < 0)
- return;
+ goto err_parsing;
new_mci = get_mci_for_node_id(socket);
if (!new_mci) {
- edac_mc_handle_ce_no_info(mci, "Error: socket got corrupted!");
- return;
+ strcpy(msg, "Error: socket got corrupted!");
+ goto err_parsing;
}
mci = new_mci;
pvt = mci->pvt_info;
@@ -1486,45 +1427,39 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
else
dimm = 2;
- csrow = pvt->csrow_map[first_channel][dimm];
-
- if (uncorrected_error && recoverable)
- recoverable_msg = " recoverable";
- else
- recoverable_msg = "";
/*
- * FIXME: What should we do with "channel" information on mcelog?
- * Probably, we can just discard it, as the channel information
- * comes from the get_memory_error_data() address decoding
+ * FIXME: On some memory configurations (mirror, lockstep), the
+ * Memory Controller can't point the error to a single DIMM. The
+ * EDAC core should be handling the channel mask, in order to point
+ * to the group of dimm's where the error may be happening.
*/
- msg = kasprintf(GFP_ATOMIC,
- "%d %s error(s): %s on %s area %s%s: cpu=%d Err=%04x:%04x (ch=%d), "
- "addr = 0x%08llx => socket=%d, Channel=%ld(mask=%ld), rank=%d\n",
- core_err_cnt,
- area_type,
- optype,
- type,
- recoverable_msg,
- overflow ? "OVERFLOW" : "",
- m->cpu,
- mscod, errcode,
- channel, /* 1111b means not specified */
- (long long) m->addr,
- socket,
- first_channel, /* This is the real channel on SB */
- channel_mask,
- rank);
+ snprintf(msg, sizeof(msg),
+ "count:%d%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
+ core_err_cnt,
+ overflow ? " OVERFLOW" : "",
+ (uncorrected_error && recoverable) ? " recoverable" : "",
+ area_type,
+ mscod, errcode,
+ socket,
+ channel_mask,
+ rank);
debugf0("%s", msg);
+ /* FIXME: need support for channel mask */
+
/* Call the helper to output message */
- if (uncorrected_error)
- edac_mc_handle_fbd_ue(mci, csrow, 0, 0, msg);
- else
- edac_mc_handle_fbd_ce(mci, csrow, 0, msg);
+ edac_mc_handle_error(tp_event, mci,
+ m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
+ channel, dimm, -1,
+ optype, msg, m);
+ return;
+err_parsing:
+ edac_mc_handle_error(tp_event, mci, 0, 0, 0,
+ -1, -1, -1,
+ msg, "", m);
- kfree(msg);
}
/*
@@ -1683,16 +1618,25 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct sbridge_pvt *pvt;
- int rc, channels, csrows;
+ int rc;
/* Check the number of active and not disabled channels */
- rc = sbridge_get_active_channels(sbridge_dev->bus, &channels, &csrows);
+ rc = check_if_ecc_is_active(sbridge_dev->bus);
if (unlikely(rc < 0))
return rc;
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, sbridge_dev->mc);
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = NUM_CHANNELS;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = MAX_DIMMS;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
+ sizeof(*pvt));
+
if (unlikely(!mci))
return -ENOMEM;
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index e99d0097618..7bb4614730d 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -71,7 +71,10 @@ static void tile_edac_check(struct mem_ctl_info *mci)
if (mem_error.sbe_count != priv->ce_count) {
dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node);
priv->ce_count = mem_error.sbe_count;
- edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, 0,
+ 0, 0, -1,
+ mci->ctl_name, "", NULL);
}
}
@@ -84,6 +87,7 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
struct csrow_info *csrow = &mci->csrows[0];
struct tile_edac_priv *priv = mci->pvt_info;
struct mshim_mem_info mem_info;
+ struct dimm_info *dimm = csrow->channels[0].dimm;
if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
@@ -93,27 +97,25 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
}
if (mem_info.mem_ecc)
- csrow->edac_mode = EDAC_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
else
- csrow->edac_mode = EDAC_NONE;
+ dimm->edac_mode = EDAC_NONE;
switch (mem_info.mem_type) {
case DDR2:
- csrow->mtype = MEM_DDR2;
+ dimm->mtype = MEM_DDR2;
break;
case DDR3:
- csrow->mtype = MEM_DDR3;
+ dimm->mtype = MEM_DDR3;
break;
default:
return -1;
}
- csrow->first_page = 0;
- csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->grain = TILE_EDAC_ERROR_GRAIN;
- csrow->dtype = DEV_UNKNOWN;
+ dimm->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
+ dimm->grain = TILE_EDAC_ERROR_GRAIN;
+ dimm->dtype = DEV_UNKNOWN;
return 0;
}
@@ -123,6 +125,7 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
char hv_file[32];
int hv_devhdl;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct tile_edac_priv *priv;
int rc;
@@ -132,8 +135,14 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
return -EINVAL;
/* A TILE MC has a single channel and one chip-select row. */
- mci = edac_mc_alloc(sizeof(struct tile_edac_priv),
- TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = TILE_EDAC_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = TILE_EDAC_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
+ sizeof(struct tile_edac_priv));
if (mci == NULL)
return -ENOMEM;
priv = mci->pvt_info;
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index a438297389e..1ac7962d63e 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -215,19 +215,26 @@ static void x38_process_error_info(struct mem_ctl_info *mci,
return;
if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
for (channel = 0; channel < x38_channel_num; channel++) {
log = info->eccerrlog[channel];
if (log & X38_ECCERRLOG_UE) {
- edac_mc_handle_ue(mci, 0, 0,
- eccerrlog_row(channel, log), "x38 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, 0,
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "x38 UE", "", NULL);
} else if (log & X38_ECCERRLOG_CE) {
- edac_mc_handle_ce(mci, 0, 0,
- eccerrlog_syndrome(log),
- eccerrlog_row(channel, log), 0, "x38 CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "x38 CE", "", NULL);
}
}
}
@@ -317,9 +324,9 @@ static unsigned long drb_to_nr_pages(
static int x38_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
- int i;
+ int i, j;
struct mem_ctl_info *mci = NULL;
- unsigned long last_page;
+ struct edac_mc_layer layers[2];
u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
bool stacked;
void __iomem *window;
@@ -335,7 +342,13 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
how_many_channel(pdev);
/* FIXME: unconventional pvt_info usage */
- mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = X38_RANKS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = x38_channel_num;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
@@ -363,7 +376,6 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
- last_page = -1UL;
for (i = 0; i < mci->nr_csrows; i++) {
unsigned long nr_pages;
struct csrow_info *csrow = &mci->csrows[i];
@@ -372,20 +384,18 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
i / X38_RANKS_PER_CHANNEL,
i % X38_RANKS_PER_CHANNEL);
- if (nr_pages == 0) {
- csrow->mtype = MEM_EMPTY;
+ if (nr_pages == 0)
continue;
- }
- csrow->first_page = last_page + 1;
- last_page += nr_pages;
- csrow->last_page = last_page;
- csrow->nr_pages = nr_pages;
+ for (j = 0; j < x38_channel_num; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
- csrow->grain = nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
+ dimm->nr_pages = nr_pages / x38_channel_num;
+ dimm->grain = nr_pages << PAGE_SHIFT;
+ dimm->mtype = MEM_DDR2;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
}
x38_clear_error_info(mci);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index aa3642cb820..c4067d0141f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -114,6 +114,14 @@ config GPIO_EP93XX
depends on ARCH_EP93XX
select GPIO_GENERIC
+config GPIO_MM_LANTIQ
+ bool "Lantiq Memory mapped GPIOs"
+ depends on LANTIQ && SOC_XWAY
+ help
+ This enables support for memory mapped GPIOs on the External Bus Unit
+ (EBU) found on Lantiq SoCs. The gpios are output only as they are
+ created by attaching a 16bit latch to the bus.
+
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
@@ -167,6 +175,14 @@ config GPIO_PXA
help
Say yes here to support the PXA GPIO device
+config GPIO_STA2X11
+ bool "STA2x11/ConneXt GPIO support"
+ depends on MFD_STA2X11
+ select GENERIC_IRQ_CHIP
+ help
+ Say yes here to support the STA2x11/ConneXt GPIO device.
+ The GPIO module has 128 GPIO pins with alternate functions.
+
config GPIO_XILINX
bool "Xilinx GPIO support"
depends on PPC_OF || MICROBLAZE
@@ -180,13 +196,13 @@ config GPIO_VR41XX
Say yes here to support the NEC VR4100 series General-purpose I/O Uint
config GPIO_SCH
- tristate "Intel SCH/TunnelCreek GPIO"
+ tristate "Intel SCH/TunnelCreek/Centerton GPIO"
depends on PCI && X86
select MFD_CORE
select LPC_SCH
help
- Say yes here to support GPIO interface on Intel Poulsbo SCH
- or Intel Tunnel Creek processor.
+ Say yes here to support GPIO interface on Intel Poulsbo SCH,
+ Intel Tunnel Creek processor or Intel Centerton processor.
The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are
powered by the core power rail and are turned off during sleep
modes (S3 and higher). The remaining four GPIOs are powered by
@@ -195,6 +211,22 @@ config GPIO_SCH
system from the Suspend-to-RAM state.
The Intel Tunnel Creek processor has 5 GPIOs powered by the
core power rail and 9 from suspend power supply.
+ The Intel Centerton processor has a total of 30 GPIO pins.
+ Twenty-one are powered by the core power rail and 9 from the
+ suspend power supply.
+
+config GPIO_ICH
+ tristate "Intel ICH GPIO"
+ depends on PCI && X86
+ select MFD_CORE
+ select LPC_ICH
+ help
+ Say yes here to support the GPIO functionality of a number of Intel
+ ICH-based chipsets. Currently supported devices: ICH6, ICH7, ICH8
+ ICH9, ICH10, Series 5/3400 (eg Ibex Peak), Series 6/C200 (eg
+ Cougar Point), NM10 (Tiger Point), and 3100 (Whitmore Lake).
+
+ If unsure, say N.
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
@@ -334,6 +366,16 @@ config GPIO_STMPE
This enables support for the GPIOs found on the STMPE I/O
Expanders.
+config GPIO_STP_XWAY
+ bool "XWAY STP GPIOs"
+ depends on SOC_XWAY
+ help
+ This enables support for the Serial To Parallel (STP) unit found on
+ XWAY SoC. The STP allows the SoC to drive a shift registers cascade,
+ that can be up to 24 bit. This peripheral is aimed at driving leds.
+ Some of the gpios/leds can be auto updated by the soc with dsl and
+ phy status.
+
config GPIO_TC3589X
bool "TC3589X GPIOs"
depends on MFD_TC3589X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 07a79e24540..0f55662002c 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_EM) += gpio-em.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
+obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
@@ -32,6 +33,7 @@ obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
+obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
@@ -51,7 +53,9 @@ obj-$(CONFIG_PLAT_SAMSUNG) += gpio-samsung.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
+obj-$(CONFIG_GPIO_STA2X11) += gpio-sta2x11.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
+obj-$(CONFIG_GPIO_STP_XWAY) += gpio-stp-xway.o
obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
new file mode 100644
index 00000000000..b7c06517403
--- /dev/null
+++ b/drivers/gpio/gpio-ich.c
@@ -0,0 +1,419 @@
+/*
+ * Intel ICH6-10, Series 5 and 6 GPIO driver
+ *
+ * Copyright (C) 2010 Extreme Engineering Solutions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/lpc_ich.h>
+
+#define DRV_NAME "gpio_ich"
+
+/*
+ * GPIO register offsets in GPIO I/O space.
+ * Each chunk of 32 GPIOs is manipulated via its own USE_SELx, IO_SELx, and
+ * LVLx registers. Logic in the read/write functions takes a register and
+ * an absolute bit number and determines the proper register offset and bit
+ * number in that register. For example, to read the value of GPIO bit 50
+ * the code would access offset ichx_regs[2(=GPIO_LVL)][1(=50/32)],
+ * bit 18 (50%32).
+ */
+enum GPIO_REG {
+ GPIO_USE_SEL = 0,
+ GPIO_IO_SEL,
+ GPIO_LVL,
+};
+
+static const u8 ichx_regs[3][3] = {
+ {0x00, 0x30, 0x40}, /* USE_SEL[1-3] offsets */
+ {0x04, 0x34, 0x44}, /* IO_SEL[1-3] offsets */
+ {0x0c, 0x38, 0x48}, /* LVL[1-3] offsets */
+};
+
+#define ICHX_WRITE(val, reg, base_res) outl(val, (reg) + (base_res)->start)
+#define ICHX_READ(reg, base_res) inl((reg) + (base_res)->start)
+
+struct ichx_desc {
+ /* Max GPIO pins the chipset can have */
+ uint ngpio;
+
+ /* Whether the chipset has GPIO in GPE0_STS in the PM IO region */
+ bool uses_gpe0;
+
+ /* USE_SEL is bogus on some chipsets, eg 3100 */
+ u32 use_sel_ignore[3];
+
+ /* Some chipsets have quirks, let these use their own request/get */
+ int (*request)(struct gpio_chip *chip, unsigned offset);
+ int (*get)(struct gpio_chip *chip, unsigned offset);
+};
+
+static struct {
+ spinlock_t lock;
+ struct platform_device *dev;
+ struct gpio_chip chip;
+ struct resource *gpio_base; /* GPIO IO base */
+ struct resource *pm_base; /* Power Mangagment IO base */
+ struct ichx_desc *desc; /* Pointer to chipset-specific description */
+ u32 orig_gpio_ctrl; /* Orig CTRL value, used to restore on exit */
+} ichx_priv;
+
+static int modparam_gpiobase = -1; /* dynamic */
+module_param_named(gpiobase, modparam_gpiobase, int, 0444);
+MODULE_PARM_DESC(gpiobase, "The GPIO number base. -1 means dynamic, "
+ "which is the default.");
+
+static int ichx_write_bit(int reg, unsigned nr, int val, int verify)
+{
+ unsigned long flags;
+ u32 data, tmp;
+ int reg_nr = nr / 32;
+ int bit = nr & 0x1f;
+ int ret = 0;
+
+ spin_lock_irqsave(&ichx_priv.lock, flags);
+
+ data = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+ if (val)
+ data |= 1 << bit;
+ else
+ data &= ~(1 << bit);
+ ICHX_WRITE(data, ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+ tmp = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+ if (verify && data != tmp)
+ ret = -EPERM;
+
+ spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+ return ret;
+}
+
+static int ichx_read_bit(int reg, unsigned nr)
+{
+ unsigned long flags;
+ u32 data;
+ int reg_nr = nr / 32;
+ int bit = nr & 0x1f;
+
+ spin_lock_irqsave(&ichx_priv.lock, flags);
+
+ data = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
+
+ spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+ return data & (1 << bit) ? 1 : 0;
+}
+
+static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+ /*
+ * Try setting pin as an input and verify it worked since many pins
+ * are output-only.
+ */
+ if (ichx_write_bit(GPIO_IO_SEL, nr, 1, 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ichx_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+ int val)
+{
+ /* Set GPIO output value. */
+ ichx_write_bit(GPIO_LVL, nr, val, 0);
+
+ /*
+ * Try setting pin as an output and verify it worked since many pins
+ * are input-only.
+ */
+ if (ichx_write_bit(GPIO_IO_SEL, nr, 0, 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ichx_gpio_get(struct gpio_chip *chip, unsigned nr)
+{
+ return ichx_read_bit(GPIO_LVL, nr);
+}
+
+static int ich6_gpio_get(struct gpio_chip *chip, unsigned nr)
+{
+ unsigned long flags;
+ u32 data;
+
+ /*
+ * GPI 0 - 15 need to be read from the power management registers on
+ * a ICH6/3100 bridge.
+ */
+ if (nr < 16) {
+ if (!ichx_priv.pm_base)
+ return -ENXIO;
+
+ spin_lock_irqsave(&ichx_priv.lock, flags);
+
+ /* GPI 0 - 15 are latched, write 1 to clear*/
+ ICHX_WRITE(1 << (16 + nr), 0, ichx_priv.pm_base);
+ data = ICHX_READ(0, ichx_priv.pm_base);
+
+ spin_unlock_irqrestore(&ichx_priv.lock, flags);
+
+ return (data >> 16) & (1 << nr) ? 1 : 0;
+ } else {
+ return ichx_gpio_get(chip, nr);
+ }
+}
+
+static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)
+{
+ /*
+ * Note we assume the BIOS properly set a bridge's USE value. Some
+ * chips (eg Intel 3100) have bogus USE values though, so first see if
+ * the chipset's USE value can be trusted for this specific bit.
+ * If it can't be trusted, assume that the pin can be used as a GPIO.
+ */
+ if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f)))
+ return 1;
+
+ return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV;
+}
+
+static int ich6_gpio_request(struct gpio_chip *chip, unsigned nr)
+{
+ /*
+ * Fixups for bits 16 and 17 are necessary on the Intel ICH6/3100
+ * bridge as they are controlled by USE register bits 0 and 1. See
+ * "Table 704 GPIO_USE_SEL1 register" in the i3100 datasheet for
+ * additional info.
+ */
+ if (nr == 16 || nr == 17)
+ nr -= 16;
+
+ return ichx_gpio_request(chip, nr);
+}
+
+static void ichx_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
+{
+ ichx_write_bit(GPIO_LVL, nr, val, 0);
+}
+
+static void __devinit ichx_gpiolib_setup(struct gpio_chip *chip)
+{
+ chip->owner = THIS_MODULE;
+ chip->label = DRV_NAME;
+ chip->dev = &ichx_priv.dev->dev;
+
+ /* Allow chip-specific overrides of request()/get() */
+ chip->request = ichx_priv.desc->request ?
+ ichx_priv.desc->request : ichx_gpio_request;
+ chip->get = ichx_priv.desc->get ?
+ ichx_priv.desc->get : ichx_gpio_get;
+
+ chip->set = ichx_gpio_set;
+ chip->direction_input = ichx_gpio_direction_input;
+ chip->direction_output = ichx_gpio_direction_output;
+ chip->base = modparam_gpiobase;
+ chip->ngpio = ichx_priv.desc->ngpio;
+ chip->can_sleep = 0;
+ chip->dbg_show = NULL;
+}
+
+/* ICH6-based, 631xesb-based */
+static struct ichx_desc ich6_desc = {
+ /* Bridges using the ICH6 controller need fixups for GPIO 0 - 17 */
+ .request = ich6_gpio_request,
+ .get = ich6_gpio_get,
+
+ /* GPIO 0-15 are read in the GPE0_STS PM register */
+ .uses_gpe0 = true,
+
+ .ngpio = 50,
+};
+
+/* Intel 3100 */
+static struct ichx_desc i3100_desc = {
+ /*
+ * Bits 16,17, 20 of USE_SEL and bit 16 of USE_SEL2 always read 0 on
+ * the Intel 3100. See "Table 712. GPIO Summary Table" of 3100
+ * Datasheet for more info.
+ */
+ .use_sel_ignore = {0x00130000, 0x00010000, 0x0},
+
+ /* The 3100 needs fixups for GPIO 0 - 17 */
+ .request = ich6_gpio_request,
+ .get = ich6_gpio_get,
+
+ /* GPIO 0-15 are read in the GPE0_STS PM register */
+ .uses_gpe0 = true,
+
+ .ngpio = 50,
+};
+
+/* ICH7 and ICH8-based */
+static struct ichx_desc ich7_desc = {
+ .ngpio = 50,
+};
+
+/* ICH9-based */
+static struct ichx_desc ich9_desc = {
+ .ngpio = 61,
+};
+
+/* ICH10-based - Consumer/corporate versions have different amount of GPIO */
+static struct ichx_desc ich10_cons_desc = {
+ .ngpio = 61,
+};
+static struct ichx_desc ich10_corp_desc = {
+ .ngpio = 72,
+};
+
+/* Intel 5 series, 6 series, 3400 series, and C200 series */
+static struct ichx_desc intel5_desc = {
+ .ngpio = 76,
+};
+
+static int __devinit ichx_gpio_probe(struct platform_device *pdev)
+{
+ struct resource *res_base, *res_pm;
+ int err;
+ struct lpc_ich_info *ich_info = pdev->dev.platform_data;
+
+ if (!ich_info)
+ return -ENODEV;
+
+ ichx_priv.dev = pdev;
+
+ switch (ich_info->gpio_version) {
+ case ICH_I3100_GPIO:
+ ichx_priv.desc = &i3100_desc;
+ break;
+ case ICH_V5_GPIO:
+ ichx_priv.desc = &intel5_desc;
+ break;
+ case ICH_V6_GPIO:
+ ichx_priv.desc = &ich6_desc;
+ break;
+ case ICH_V7_GPIO:
+ ichx_priv.desc = &ich7_desc;
+ break;
+ case ICH_V9_GPIO:
+ ichx_priv.desc = &ich9_desc;
+ break;
+ case ICH_V10CORP_GPIO:
+ ichx_priv.desc = &ich10_corp_desc;
+ break;
+ case ICH_V10CONS_GPIO:
+ ichx_priv.desc = &ich10_cons_desc;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ res_base = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPIO);
+ if (!res_base || !res_base->start || !res_base->end)
+ return -ENODEV;
+
+ if (!request_region(res_base->start, resource_size(res_base),
+ pdev->name))
+ return -EBUSY;
+
+ ichx_priv.gpio_base = res_base;
+
+ /*
+ * If necessary, determine the I/O address of ACPI/power management
+ * registers which are needed to read the the GPE0 register for GPI pins
+ * 0 - 15 on some chipsets.
+ */
+ if (!ichx_priv.desc->uses_gpe0)
+ goto init;
+
+ res_pm = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPE0);
+ if (!res_pm) {
+ pr_warn("ACPI BAR is unavailable, GPI 0 - 15 unavailable\n");
+ goto init;
+ }
+
+ if (!request_region(res_pm->start, resource_size(res_pm),
+ pdev->name)) {
+ pr_warn("ACPI BAR is busy, GPI 0 - 15 unavailable\n");
+ goto init;
+ }
+
+ ichx_priv.pm_base = res_pm;
+
+init:
+ ichx_gpiolib_setup(&ichx_priv.chip);
+ err = gpiochip_add(&ichx_priv.chip);
+ if (err) {
+ pr_err("Failed to register GPIOs\n");
+ goto add_err;
+ }
+
+ pr_info("GPIO from %d to %d on %s\n", ichx_priv.chip.base,
+ ichx_priv.chip.base + ichx_priv.chip.ngpio - 1, DRV_NAME);
+
+ return 0;
+
+add_err:
+ release_region(ichx_priv.gpio_base->start,
+ resource_size(ichx_priv.gpio_base));
+ if (ichx_priv.pm_base)
+ release_region(ichx_priv.pm_base->start,
+ resource_size(ichx_priv.pm_base));
+ return err;
+}
+
+static int __devexit ichx_gpio_remove(struct platform_device *pdev)
+{
+ int err;
+
+ err = gpiochip_remove(&ichx_priv.chip);
+ if (err) {
+ dev_err(&pdev->dev, "%s failed, %d\n",
+ "gpiochip_remove()", err);
+ return err;
+ }
+
+ release_region(ichx_priv.gpio_base->start,
+ resource_size(ichx_priv.gpio_base));
+ if (ichx_priv.pm_base)
+ release_region(ichx_priv.pm_base->start,
+ resource_size(ichx_priv.pm_base));
+
+ return 0;
+}
+
+static struct platform_driver ichx_gpio_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ },
+ .probe = ichx_gpio_probe,
+ .remove = __devexit_p(ichx_gpio_remove),
+};
+
+module_platform_driver(ichx_gpio_driver);
+
+MODULE_AUTHOR("Peter Tyser <ptyser@xes-inc.com>");
+MODULE_DESCRIPTION("GPIO interface for Intel ICH series");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:"DRV_NAME);
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
new file mode 100644
index 00000000000..2983dfbd066
--- /dev/null
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -0,0 +1,158 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * By attaching hardware latches to the EBU it is possible to create output
+ * only gpios. This driver configures a special memory address, which when
+ * written to outputs 16 bit to the latches.
+ */
+
+#define LTQ_EBU_BUSCON 0x1e7ff /* 16 bit access, slowest timing */
+#define LTQ_EBU_WP 0x80000000 /* write protect bit */
+
+struct ltq_mm {
+ struct of_mm_gpio_chip mmchip;
+ u16 shadow; /* shadow the latches state */
+};
+
+/**
+ * ltq_mm_apply() - write the shadow value to the ebu address.
+ * @chip: Pointer to our private data structure.
+ *
+ * Write the shadow value to the EBU to set the gpios. We need to set the
+ * global EBU lock to make sure that PCI/MTD dont break.
+ */
+static void ltq_mm_apply(struct ltq_mm *chip)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
+ __raw_writew(chip->shadow, chip->mmchip.regs);
+ ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
+ spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+/**
+ * ltq_mm_set() - gpio_chip->set - set gpios.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Set the shadow value and call ltq_mm_apply.
+ */
+static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct ltq_mm *chip =
+ container_of(mm_gc, struct ltq_mm, mmchip);
+
+ if (value)
+ chip->shadow |= (1 << offset);
+ else
+ chip->shadow &= ~(1 << offset);
+ ltq_mm_apply(chip);
+}
+
+/**
+ * ltq_mm_dir_out() - gpio_chip->dir_out - set gpio direction.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Same as ltq_mm_set, always returns 0.
+ */
+static int ltq_mm_dir_out(struct gpio_chip *gc, unsigned offset, int value)
+{
+ ltq_mm_set(gc, offset, value);
+
+ return 0;
+}
+
+/**
+ * ltq_mm_save_regs() - Set initial values of GPIO pins
+ * @mm_gc: pointer to memory mapped GPIO chip structure
+ */
+static void ltq_mm_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+ struct ltq_mm *chip =
+ container_of(mm_gc, struct ltq_mm, mmchip);
+
+ /* tell the ebu controller which memory address we will be using */
+ ltq_ebu_w32(CPHYSADDR(chip->mmchip.regs) | 0x1, LTQ_EBU_ADDRSEL1);
+
+ ltq_mm_apply(chip);
+}
+
+static int ltq_mm_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct ltq_mm *chip;
+ const __be32 *shadow;
+ int ret = 0;
+
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get memory resource\n");
+ return -ENOENT;
+ }
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->mmchip.gc.ngpio = 16;
+ chip->mmchip.gc.label = "gpio-mm-ltq";
+ chip->mmchip.gc.direction_output = ltq_mm_dir_out;
+ chip->mmchip.gc.set = ltq_mm_set;
+ chip->mmchip.save_regs = ltq_mm_save_regs;
+
+ /* store the shadow value if one was passed by the devicetree */
+ shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
+ if (shadow)
+ chip->shadow = be32_to_cpu(*shadow);
+
+ ret = of_mm_gpiochip_add(pdev->dev.of_node, &chip->mmchip);
+ if (ret)
+ kfree(chip);
+ return ret;
+}
+
+static const struct of_device_id ltq_mm_match[] = {
+ { .compatible = "lantiq,gpio-mm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_mm_match);
+
+static struct platform_driver ltq_mm_driver = {
+ .probe = ltq_mm_probe,
+ .driver = {
+ .name = "gpio-mm-ltq",
+ .owner = THIS_MODULE,
+ .of_match_table = ltq_mm_match,
+ },
+};
+
+static int __init ltq_mm_init(void)
+{
+ return platform_driver_register(&ltq_mm_driver);
+}
+
+subsys_initcall(ltq_mm_init);
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index b4136501abd..39e49566996 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -25,23 +25,25 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/basic_mmio_gpio.h>
#include <linux/module.h>
-#include <mach/mxs.h>
#define MXS_SET 0x4
#define MXS_CLR 0x8
-#define PINCTRL_DOUT(n) ((cpu_is_mx23() ? 0x0500 : 0x0700) + (n) * 0x10)
-#define PINCTRL_DIN(n) ((cpu_is_mx23() ? 0x0600 : 0x0900) + (n) * 0x10)
-#define PINCTRL_DOE(n) ((cpu_is_mx23() ? 0x0700 : 0x0b00) + (n) * 0x10)
-#define PINCTRL_PIN2IRQ(n) ((cpu_is_mx23() ? 0x0800 : 0x1000) + (n) * 0x10)
-#define PINCTRL_IRQEN(n) ((cpu_is_mx23() ? 0x0900 : 0x1100) + (n) * 0x10)
-#define PINCTRL_IRQLEV(n) ((cpu_is_mx23() ? 0x0a00 : 0x1200) + (n) * 0x10)
-#define PINCTRL_IRQPOL(n) ((cpu_is_mx23() ? 0x0b00 : 0x1300) + (n) * 0x10)
-#define PINCTRL_IRQSTAT(n) ((cpu_is_mx23() ? 0x0c00 : 0x1400) + (n) * 0x10)
+#define PINCTRL_DOUT(p) ((is_imx23_gpio(p) ? 0x0500 : 0x0700) + (p->id) * 0x10)
+#define PINCTRL_DIN(p) ((is_imx23_gpio(p) ? 0x0600 : 0x0900) + (p->id) * 0x10)
+#define PINCTRL_DOE(p) ((is_imx23_gpio(p) ? 0x0700 : 0x0b00) + (p->id) * 0x10)
+#define PINCTRL_PIN2IRQ(p) ((is_imx23_gpio(p) ? 0x0800 : 0x1000) + (p->id) * 0x10)
+#define PINCTRL_IRQEN(p) ((is_imx23_gpio(p) ? 0x0900 : 0x1100) + (p->id) * 0x10)
+#define PINCTRL_IRQLEV(p) ((is_imx23_gpio(p) ? 0x0a00 : 0x1200) + (p->id) * 0x10)
+#define PINCTRL_IRQPOL(p) ((is_imx23_gpio(p) ? 0x0b00 : 0x1300) + (p->id) * 0x10)
+#define PINCTRL_IRQSTAT(p) ((is_imx23_gpio(p) ? 0x0c00 : 0x1400) + (p->id) * 0x10)
#define GPIO_INT_FALL_EDGE 0x0
#define GPIO_INT_LOW_LEV 0x1
@@ -52,14 +54,30 @@
#define irq_to_gpio(irq) ((irq) - MXS_GPIO_IRQ_START)
+enum mxs_gpio_id {
+ IMX23_GPIO,
+ IMX28_GPIO,
+};
+
struct mxs_gpio_port {
void __iomem *base;
int id;
int irq;
int virtual_irq_start;
struct bgpio_chip bgc;
+ enum mxs_gpio_id devid;
};
+static inline int is_imx23_gpio(struct mxs_gpio_port *port)
+{
+ return port->devid == IMX23_GPIO;
+}
+
+static inline int is_imx28_gpio(struct mxs_gpio_port *port)
+{
+ return port->devid == IMX28_GPIO;
+}
+
/* Note: This driver assumes 32 GPIOs are handled in one register */
static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
@@ -89,21 +107,21 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
}
/* set level or edge */
- pin_addr = port->base + PINCTRL_IRQLEV(port->id);
+ pin_addr = port->base + PINCTRL_IRQLEV(port);
if (edge & GPIO_INT_LEV_MASK)
writel(pin_mask, pin_addr + MXS_SET);
else
writel(pin_mask, pin_addr + MXS_CLR);
/* set polarity */
- pin_addr = port->base + PINCTRL_IRQPOL(port->id);
+ pin_addr = port->base + PINCTRL_IRQPOL(port);
if (edge & GPIO_INT_POL_MASK)
writel(pin_mask, pin_addr + MXS_SET);
else
writel(pin_mask, pin_addr + MXS_CLR);
writel(1 << (gpio & 0x1f),
- port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
+ port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
return 0;
}
@@ -117,8 +135,8 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
desc->irq_data.chip->irq_ack(&desc->irq_data);
- irq_stat = readl(port->base + PINCTRL_IRQSTAT(port->id)) &
- readl(port->base + PINCTRL_IRQEN(port->id));
+ irq_stat = readl(port->base + PINCTRL_IRQSTAT(port)) &
+ readl(port->base + PINCTRL_IRQEN(port));
while (irq_stat != 0) {
int irqoffset = fls(irq_stat) - 1;
@@ -164,8 +182,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port)
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = mxs_gpio_set_irq_type;
ct->chip.irq_set_wake = mxs_gpio_set_wake_irq;
- ct->regs.ack = PINCTRL_IRQSTAT(port->id) + MXS_CLR;
- ct->regs.mask = PINCTRL_IRQEN(port->id);
+ ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR;
+ ct->regs.mask = PINCTRL_IRQEN(port);
irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
}
@@ -179,60 +197,83 @@ static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
return port->virtual_irq_start + offset;
}
+static struct platform_device_id mxs_gpio_ids[] = {
+ {
+ .name = "imx23-gpio",
+ .driver_data = IMX23_GPIO,
+ }, {
+ .name = "imx28-gpio",
+ .driver_data = IMX28_GPIO,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxs_gpio_ids);
+
+static const struct of_device_id mxs_gpio_dt_ids[] = {
+ { .compatible = "fsl,imx23-gpio", .data = (void *) IMX23_GPIO, },
+ { .compatible = "fsl,imx28-gpio", .data = (void *) IMX28_GPIO, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_gpio_dt_ids);
+
static int __devinit mxs_gpio_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(mxs_gpio_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *parent;
static void __iomem *base;
struct mxs_gpio_port *port;
struct resource *iores = NULL;
int err;
- port = kzalloc(sizeof(struct mxs_gpio_port), GFP_KERNEL);
+ port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
- port->id = pdev->id;
+ if (np) {
+ port->id = of_alias_get_id(np, "gpio");
+ if (port->id < 0)
+ return port->id;
+ port->devid = (enum mxs_gpio_id) of_id->data;
+ } else {
+ port->id = pdev->id;
+ port->devid = pdev->id_entry->driver_data;
+ }
port->virtual_irq_start = MXS_GPIO_IRQ_START + port->id * 32;
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0)
+ return port->irq;
+
/*
* map memory region only once, as all the gpio ports
* share the same one
*/
if (!base) {
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores) {
- err = -ENODEV;
- goto out_kfree;
- }
-
- if (!request_mem_region(iores->start, resource_size(iores),
- pdev->name)) {
- err = -EBUSY;
- goto out_kfree;
- }
-
- base = ioremap(iores->start, resource_size(iores));
- if (!base) {
- err = -ENOMEM;
- goto out_release_mem;
+ if (np) {
+ parent = of_get_parent(np);
+ base = of_iomap(parent, 0);
+ of_node_put(parent);
+ } else {
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_request_and_ioremap(&pdev->dev, iores);
}
+ if (!base)
+ return -EADDRNOTAVAIL;
}
port->base = base;
- port->irq = platform_get_irq(pdev, 0);
- if (port->irq < 0) {
- err = -EINVAL;
- goto out_iounmap;
- }
-
/*
* select the pin interrupt functionality but initially
* disable the interrupts
*/
- writel(~0U, port->base + PINCTRL_PIN2IRQ(port->id));
- writel(0, port->base + PINCTRL_IRQEN(port->id));
+ writel(~0U, port->base + PINCTRL_PIN2IRQ(port));
+ writel(0, port->base + PINCTRL_IRQEN(port));
/* clear address has to be used to clear IRQSTAT bits */
- writel(~0U, port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
+ writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
/* gpio-mxs can be a generic irq chip */
mxs_gpio_init_gc(port);
@@ -242,41 +283,32 @@ static int __devinit mxs_gpio_probe(struct platform_device *pdev)
irq_set_handler_data(port->irq, port);
err = bgpio_init(&port->bgc, &pdev->dev, 4,
- port->base + PINCTRL_DIN(port->id),
- port->base + PINCTRL_DOUT(port->id), NULL,
- port->base + PINCTRL_DOE(port->id), NULL, 0);
+ port->base + PINCTRL_DIN(port),
+ port->base + PINCTRL_DOUT(port), NULL,
+ port->base + PINCTRL_DOE(port), NULL, 0);
if (err)
- goto out_iounmap;
+ return err;
port->bgc.gc.to_irq = mxs_gpio_to_irq;
port->bgc.gc.base = port->id * 32;
err = gpiochip_add(&port->bgc.gc);
- if (err)
- goto out_bgpio_remove;
+ if (err) {
+ bgpio_remove(&port->bgc);
+ return err;
+ }
return 0;
-
-out_bgpio_remove:
- bgpio_remove(&port->bgc);
-out_iounmap:
- if (iores)
- iounmap(port->base);
-out_release_mem:
- if (iores)
- release_mem_region(iores->start, resource_size(iores));
-out_kfree:
- kfree(port);
- dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
- return err;
}
static struct platform_driver mxs_gpio_driver = {
.driver = {
.name = "gpio-mxs",
.owner = THIS_MODULE,
+ .of_match_table = mxs_gpio_dt_ids,
},
.probe = mxs_gpio_probe,
+ .id_table = mxs_gpio_ids,
};
static int __init mxs_gpio_init(void)
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 421f6af0f99..7bb00448e13 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2454,6 +2454,12 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
},
}, {
.chip = {
+ .base = EXYNOS5_GPC4(0),
+ .ngpio = EXYNOS5_GPIO_C4_NR,
+ .label = "GPC4",
+ },
+ }, {
+ .chip = {
.base = EXYNOS5_GPD0(0),
.ngpio = EXYNOS5_GPIO_D0_NR,
.label = "GPD0",
@@ -2826,8 +2832,11 @@ static __init void exynos5_gpiolib_init(void)
goto err_ioremap1;
}
+ /* need to set base address for gpc4 */
+ exonys5_gpios_1[11].base = gpio_base1 + 0x2E0;
+
/* need to set base address for gpx */
- chip = &exynos5_gpios_1[20];
+ chip = &exynos5_gpios_1[21];
gpx_base = gpio_base1 + 0xC00;
for (i = 0; i < 4; i++, chip++, gpx_base += 0x20)
chip->base = gpx_base;
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 8cadf4d683a..424dce8e3f3 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -232,6 +232,14 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev)
sch_gpio_resume.ngpio = 9;
break;
+ case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
+ sch_gpio_core.base = 0;
+ sch_gpio_core.ngpio = 21;
+
+ sch_gpio_resume.base = 21;
+ sch_gpio_resume.ngpio = 9;
+ break;
+
default:
return -ENODEV;
}
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
new file mode 100644
index 00000000000..38416be8ba1
--- /dev/null
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -0,0 +1,435 @@
+/*
+ * STMicroelectronics ConneXt (STA2X11) GPIO driver
+ *
+ * Copyright 2012 ST Microelectronics (Alessandro Rubini)
+ * Based on gpio-ml-ioh.c, Copyright 2010 OKI Semiconductors Ltd.
+ * Also based on previous sta2x11 work, Copyright 2011 Wind River Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/sta2x11-mfd.h>
+
+struct gsta_regs {
+ u32 dat; /* 0x00 */
+ u32 dats;
+ u32 datc;
+ u32 pdis;
+ u32 dir; /* 0x10 */
+ u32 dirs;
+ u32 dirc;
+ u32 unused_1c;
+ u32 afsela; /* 0x20 */
+ u32 unused_24[7];
+ u32 rimsc; /* 0x40 */
+ u32 fimsc;
+ u32 is;
+ u32 ic;
+};
+
+struct gsta_gpio {
+ spinlock_t lock;
+ struct device *dev;
+ void __iomem *reg_base;
+ struct gsta_regs __iomem *regs[GSTA_NR_BLOCKS];
+ struct gpio_chip gpio;
+ int irq_base;
+ /* FIXME: save the whole config here (AF, ...) */
+ unsigned irq_type[GSTA_NR_GPIO];
+};
+
+static inline struct gsta_regs __iomem *__regs(struct gsta_gpio *chip, int nr)
+{
+ return chip->regs[nr / GSTA_GPIO_PER_BLOCK];
+}
+
+static inline u32 __bit(int nr)
+{
+ return 1U << (nr % GSTA_GPIO_PER_BLOCK);
+}
+
+/*
+ * gpio methods
+ */
+
+static void gsta_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+
+ if (val)
+ writel(bit, &regs->dats);
+ else
+ writel(bit, &regs->datc);
+}
+
+static int gsta_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+
+ return readl(&regs->dat) & bit;
+}
+
+static int gsta_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+ int val)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+
+ writel(bit, &regs->dirs);
+ /* Data register after direction, otherwise pullup/down is selected */
+ if (val)
+ writel(bit, &regs->dats);
+ else
+ writel(bit, &regs->datc);
+ return 0;
+}
+
+static int gsta_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+
+ writel(bit, &regs->dirc);
+ return 0;
+}
+
+static int gsta_gpio_to_irq(struct gpio_chip *gpio, unsigned offset)
+{
+ struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
+ return chip->irq_base + offset;
+}
+
+static void gsta_gpio_setup(struct gsta_gpio *chip) /* called from probe */
+{
+ struct gpio_chip *gpio = &chip->gpio;
+
+ /*
+ * ARCH_NR_GPIOS is currently 256 and dynamic allocation starts
+ * from the end. However, for compatibility, we need the first
+ * ConneXt device to start from gpio 0: it's the main chipset
+ * on most boards so documents and drivers assume gpio0..gpio127
+ */
+ static int gpio_base;
+
+ gpio->label = dev_name(chip->dev);
+ gpio->owner = THIS_MODULE;
+ gpio->direction_input = gsta_gpio_direction_input;
+ gpio->get = gsta_gpio_get;
+ gpio->direction_output = gsta_gpio_direction_output;
+ gpio->set = gsta_gpio_set;
+ gpio->dbg_show = NULL;
+ gpio->base = gpio_base;
+ gpio->ngpio = GSTA_NR_GPIO;
+ gpio->can_sleep = 0;
+ gpio->to_irq = gsta_gpio_to_irq;
+
+ /*
+ * After the first device, turn to dynamic gpio numbers.
+ * For example, with ARCH_NR_GPIOS = 256 we can fit two cards
+ */
+ if (!gpio_base)
+ gpio_base = -1;
+}
+
+/*
+ * Special method: alternate functions and pullup/pulldown. This is only
+ * invoked on startup to configure gpio's according to platform data.
+ * FIXME : this functionality shall be managed (and exported to other drivers)
+ * via the pin control subsystem.
+ */
+static void gsta_set_config(struct gsta_gpio *chip, int nr, unsigned cfg)
+{
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ unsigned long flags;
+ u32 bit = __bit(nr);
+ u32 val;
+ int err = 0;
+
+ pr_info("%s: %p %i %i\n", __func__, chip, nr, cfg);
+
+ if (cfg == PINMUX_TYPE_NONE)
+ return;
+
+ /* Alternate function or not? */
+ spin_lock_irqsave(&chip->lock, flags);
+ val = readl(&regs->afsela);
+ if (cfg == PINMUX_TYPE_FUNCTION)
+ val |= bit;
+ else
+ val &= ~bit;
+ writel(val | bit, &regs->afsela);
+ if (cfg == PINMUX_TYPE_FUNCTION) {
+ spin_unlock_irqrestore(&chip->lock, flags);
+ return;
+ }
+
+ /* not alternate function: set details */
+ switch (cfg) {
+ case PINMUX_TYPE_OUTPUT_LOW:
+ writel(bit, &regs->dirs);
+ writel(bit, &regs->datc);
+ break;
+ case PINMUX_TYPE_OUTPUT_HIGH:
+ writel(bit, &regs->dirs);
+ writel(bit, &regs->dats);
+ break;
+ case PINMUX_TYPE_INPUT:
+ writel(bit, &regs->dirc);
+ val = readl(&regs->pdis) | bit;
+ writel(val, &regs->pdis);
+ break;
+ case PINMUX_TYPE_INPUT_PULLUP:
+ writel(bit, &regs->dirc);
+ val = readl(&regs->pdis) & ~bit;
+ writel(val, &regs->pdis);
+ writel(bit, &regs->dats);
+ break;
+ case PINMUX_TYPE_INPUT_PULLDOWN:
+ writel(bit, &regs->dirc);
+ val = readl(&regs->pdis) & ~bit;
+ writel(val, &regs->pdis);
+ writel(bit, &regs->datc);
+ break;
+ default:
+ err = 1;
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+ if (err)
+ pr_err("%s: chip %p, pin %i, cfg %i is invalid\n",
+ __func__, chip, nr, cfg);
+}
+
+/*
+ * Irq methods
+ */
+
+static void gsta_irq_disable(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct gsta_gpio *chip = gc->private;
+ int nr = data->irq - chip->irq_base;
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lock, flags);
+ if (chip->irq_type[nr] & IRQ_TYPE_EDGE_RISING) {
+ val = readl(&regs->rimsc) & ~bit;
+ writel(val, &regs->rimsc);
+ }
+ if (chip->irq_type[nr] & IRQ_TYPE_EDGE_FALLING) {
+ val = readl(&regs->fimsc) & ~bit;
+ writel(val, &regs->fimsc);
+ }
+ spin_unlock_irqrestore(&chip->lock, flags);
+ return;
+}
+
+static void gsta_irq_enable(struct irq_data *data)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+ struct gsta_gpio *chip = gc->private;
+ int nr = data->irq - chip->irq_base;
+ struct gsta_regs __iomem *regs = __regs(chip, nr);
+ u32 bit = __bit(nr);
+ u32 val;
+ int type;
+ unsigned long flags;
+
+ type = chip->irq_type[nr];
+
+ spin_lock_irqsave(&chip->lock, flags);
+ val = readl(&regs->rimsc);
+ if (type & IRQ_TYPE_EDGE_RISING)
+ writel(val | bit, &regs->rimsc);
+ else
+ writel(val & ~bit, &regs->rimsc);
+ val = readl(&regs->rimsc);
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ writel(val | bit, &regs->fimsc);
+ else
+ writel(val & ~bit, &regs->fimsc);
+ spin_unlock_irqrestore(&chip->lock, flags);
+ return;
+}
+
+static int gsta_irq_type(struct irq_data *d, unsigned int type)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct gsta_gpio *chip = gc->private;
+ int nr = d->irq - chip->irq_base;
+
+ /* We only support edge interrupts */
+ if (!(type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))) {
+ pr_debug("%s: unsupported type 0x%x\n", __func__, type);
+ return -EINVAL;
+ }
+
+ chip->irq_type[nr] = type; /* used for enable/disable */
+
+ gsta_irq_enable(d);
+ return 0;
+}
+
+static irqreturn_t gsta_gpio_handler(int irq, void *dev_id)
+{
+ struct gsta_gpio *chip = dev_id;
+ struct gsta_regs __iomem *regs;
+ u32 is;
+ int i, nr, base;
+ irqreturn_t ret = IRQ_NONE;
+
+ for (i = 0; i < GSTA_NR_BLOCKS; i++) {
+ regs = chip->regs[i];
+ base = chip->irq_base + i * GSTA_GPIO_PER_BLOCK;
+ while ((is = readl(&regs->is))) {
+ nr = __ffs(is);
+ irq = base + nr;
+ generic_handle_irq(irq);
+ writel(1 << nr, &regs->ic);
+ ret = IRQ_HANDLED;
+ }
+ }
+ return ret;
+}
+
+static __devinit void gsta_alloc_irq_chip(struct gsta_gpio *chip)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip(KBUILD_MODNAME, 1, chip->irq_base,
+ chip->reg_base, handle_simple_irq);
+ gc->private = chip;
+ ct = gc->chip_types;
+
+ ct->chip.irq_set_type = gsta_irq_type;
+ ct->chip.irq_disable = gsta_irq_disable;
+ ct->chip.irq_enable = gsta_irq_enable;
+
+ /* FIXME: this makes at most 32 interrupts. Request 0 by now */
+ irq_setup_generic_chip(gc, 0 /* IRQ_MSK(GSTA_GPIO_PER_BLOCK) */, 0,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+
+ /* Set up all all 128 interrupts: code from setup_generic_chip */
+ {
+ struct irq_chip_type *ct = gc->chip_types;
+ int i, j;
+ for (j = 0; j < GSTA_NR_GPIO; j++) {
+ i = chip->irq_base + j;
+ irq_set_chip_and_handler(i, &ct->chip, ct->handler);
+ irq_set_chip_data(i, gc);
+ irq_modify_status(i, IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+ }
+ gc->irq_cnt = i - gc->irq_base;
+ }
+}
+
+/* The platform device used here is instantiated by the MFD device */
+static int __devinit gsta_probe(struct platform_device *dev)
+{
+ int i, err;
+ struct pci_dev *pdev;
+ struct sta2x11_gpio_pdata *gpio_pdata;
+ struct gsta_gpio *chip;
+ struct resource *res;
+
+ pdev = *(struct pci_dev **)(dev->dev.platform_data);
+ gpio_pdata = dev_get_platdata(&pdev->dev);
+
+ if (gpio_pdata == NULL)
+ dev_err(&dev->dev, "no gpio config\n");
+ pr_debug("gpio config: %p\n", gpio_pdata);
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+
+ chip = devm_kzalloc(&dev->dev, sizeof(*chip), GFP_KERNEL);
+ chip->dev = &dev->dev;
+ chip->reg_base = devm_request_and_ioremap(&dev->dev, res);
+
+ for (i = 0; i < GSTA_NR_BLOCKS; i++) {
+ chip->regs[i] = chip->reg_base + i * 4096;
+ /* disable all irqs */
+ writel(0, &chip->regs[i]->rimsc);
+ writel(0, &chip->regs[i]->fimsc);
+ writel(~0, &chip->regs[i]->ic);
+ }
+ spin_lock_init(&chip->lock);
+ gsta_gpio_setup(chip);
+ for (i = 0; i < GSTA_NR_GPIO; i++)
+ gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
+
+ /* 384 was used in previous code: be compatible for other drivers */
+ err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
+ if (err < 0) {
+ dev_warn(&dev->dev, "sta2x11 gpio: Can't get irq base (%i)\n",
+ -err);
+ return err;
+ }
+ chip->irq_base = err;
+ gsta_alloc_irq_chip(chip);
+
+ err = request_irq(pdev->irq, gsta_gpio_handler,
+ IRQF_SHARED, KBUILD_MODNAME, chip);
+ if (err < 0) {
+ dev_err(&dev->dev, "sta2x11 gpio: Can't request irq (%i)\n",
+ -err);
+ goto err_free_descs;
+ }
+
+ err = gpiochip_add(&chip->gpio);
+ if (err < 0) {
+ dev_err(&dev->dev, "sta2x11 gpio: Can't register (%i)\n",
+ -err);
+ goto err_free_irq;
+ }
+
+ platform_set_drvdata(dev, chip);
+ return 0;
+
+err_free_irq:
+ free_irq(pdev->irq, chip);
+err_free_descs:
+ irq_free_descs(chip->irq_base, GSTA_NR_GPIO);
+ return err;
+}
+
+static struct platform_driver sta2x11_gpio_platform_driver = {
+ .driver = {
+ .name = "sta2x11-gpio",
+ .owner = THIS_MODULE,
+ },
+ .probe = gsta_probe,
+};
+
+module_platform_driver(sta2x11_gpio_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("sta2x11_gpio GPIO driver");
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
new file mode 100644
index 00000000000..e35096bf3cf
--- /dev/null
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -0,0 +1,301 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/of_platform.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
+ * peripheral controller used to drive external shift register cascades. At most
+ * 3 groups of 8 bits can be driven. The hardware is able to allow the DSL modem
+ * to drive the 2 LSBs of the cascade automatically.
+ */
+
+/* control register 0 */
+#define XWAY_STP_CON0 0x00
+/* control register 1 */
+#define XWAY_STP_CON1 0x04
+/* data register 0 */
+#define XWAY_STP_CPU0 0x08
+/* data register 1 */
+#define XWAY_STP_CPU1 0x0C
+/* access register */
+#define XWAY_STP_AR 0x10
+
+/* software or hardware update select bit */
+#define XWAY_STP_CON_SWU BIT(31)
+
+/* automatic update rates */
+#define XWAY_STP_2HZ 0
+#define XWAY_STP_4HZ BIT(23)
+#define XWAY_STP_8HZ BIT(24)
+#define XWAY_STP_10HZ (BIT(24) | BIT(23))
+#define XWAY_STP_SPEED_MASK (0xf << 23)
+
+/* clock source for automatic update */
+#define XWAY_STP_UPD_FPI BIT(31)
+#define XWAY_STP_UPD_MASK (BIT(31) | BIT(30))
+
+/* let the adsl core drive the 2 LSBs */
+#define XWAY_STP_ADSL_SHIFT 24
+#define XWAY_STP_ADSL_MASK 0x3
+
+/* 2 groups of 3 bits can be driven by the phys */
+#define XWAY_STP_PHY_MASK 0x3
+#define XWAY_STP_PHY1_SHIFT 27
+#define XWAY_STP_PHY2_SHIFT 15
+
+/* STP has 3 groups of 8 bits */
+#define XWAY_STP_GROUP0 BIT(0)
+#define XWAY_STP_GROUP1 BIT(1)
+#define XWAY_STP_GROUP2 BIT(2)
+#define XWAY_STP_GROUP_MASK (0x7)
+
+/* Edge configuration bits */
+#define XWAY_STP_FALLING BIT(26)
+#define XWAY_STP_EDGE_MASK BIT(26)
+
+#define xway_stp_r32(m, reg) __raw_readl(m + reg)
+#define xway_stp_w32(m, val, reg) __raw_writel(val, m + reg)
+#define xway_stp_w32_mask(m, clear, set, reg) \
+ ltq_w32((ltq_r32(m + reg) & ~(clear)) | (set), \
+ m + reg)
+
+struct xway_stp {
+ struct gpio_chip gc;
+ void __iomem *virt;
+ u32 edge; /* rising or falling edge triggered shift register */
+ u16 shadow; /* shadow the shift registers state */
+ u8 groups; /* we can drive 1-3 groups of 8bit each */
+ u8 dsl; /* the 2 LSBs can be driven by the dsl core */
+ u8 phy1; /* 3 bits can be driven by phy1 */
+ u8 phy2; /* 3 bits can be driven by phy2 */
+ u8 reserved; /* mask out the hw driven bits in gpio_request */
+};
+
+/**
+ * xway_stp_set() - gpio_chip->set - set gpios.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Set the shadow value and call ltq_ebu_apply.
+ */
+static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
+{
+ struct xway_stp *chip =
+ container_of(gc, struct xway_stp, gc);
+
+ if (val)
+ chip->shadow |= BIT(gpio);
+ else
+ chip->shadow &= ~BIT(gpio);
+ xway_stp_w32(chip->virt, chip->shadow, XWAY_STP_CPU0);
+ xway_stp_w32_mask(chip->virt, 0, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+}
+
+/**
+ * xway_stp_dir_out() - gpio_chip->dir_out - set gpio direction.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Same as xway_stp_set, always returns 0.
+ */
+static int xway_stp_dir_out(struct gpio_chip *gc, unsigned gpio, int val)
+{
+ xway_stp_set(gc, gpio, val);
+
+ return 0;
+}
+
+/**
+ * xway_stp_request() - gpio_chip->request
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ *
+ * We mask out the HW driven pins
+ */
+static int xway_stp_request(struct gpio_chip *gc, unsigned gpio)
+{
+ struct xway_stp *chip =
+ container_of(gc, struct xway_stp, gc);
+
+ if ((gpio < 8) && (chip->reserved & BIT(gpio))) {
+ dev_err(gc->dev, "GPIO %d is driven by hardware\n", gpio);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * xway_stp_hw_init() - Configure the STP unit and enable the clock gate
+ * @virt: pointer to the remapped register range
+ */
+static int xway_stp_hw_init(struct xway_stp *chip)
+{
+ /* sane defaults */
+ xway_stp_w32(chip->virt, 0, XWAY_STP_AR);
+ xway_stp_w32(chip->virt, 0, XWAY_STP_CPU0);
+ xway_stp_w32(chip->virt, 0, XWAY_STP_CPU1);
+ xway_stp_w32(chip->virt, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+ xway_stp_w32(chip->virt, 0, XWAY_STP_CON1);
+
+ /* apply edge trigger settings for the shift register */
+ xway_stp_w32_mask(chip->virt, XWAY_STP_EDGE_MASK,
+ chip->edge, XWAY_STP_CON0);
+
+ /* apply led group settings */
+ xway_stp_w32_mask(chip->virt, XWAY_STP_GROUP_MASK,
+ chip->groups, XWAY_STP_CON1);
+
+ /* tell the hardware which pins are controlled by the dsl modem */
+ xway_stp_w32_mask(chip->virt,
+ XWAY_STP_ADSL_MASK << XWAY_STP_ADSL_SHIFT,
+ chip->dsl << XWAY_STP_ADSL_SHIFT,
+ XWAY_STP_CON0);
+
+ /* tell the hardware which pins are controlled by the phys */
+ xway_stp_w32_mask(chip->virt,
+ XWAY_STP_PHY_MASK << XWAY_STP_PHY1_SHIFT,
+ chip->phy1 << XWAY_STP_PHY1_SHIFT,
+ XWAY_STP_CON0);
+ xway_stp_w32_mask(chip->virt,
+ XWAY_STP_PHY_MASK << XWAY_STP_PHY2_SHIFT,
+ chip->phy2 << XWAY_STP_PHY2_SHIFT,
+ XWAY_STP_CON1);
+
+ /* mask out the hw driven bits in gpio_request */
+ chip->reserved = (chip->phy2 << 5) | (chip->phy1 << 2) | chip->dsl;
+
+ /*
+ * if we have pins that are driven by hw, we need to tell the stp what
+ * clock to use as a timer.
+ */
+ if (chip->reserved)
+ xway_stp_w32_mask(chip->virt, XWAY_STP_UPD_MASK,
+ XWAY_STP_UPD_FPI, XWAY_STP_CON1);
+
+ return 0;
+}
+
+static int __devinit xway_stp_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ const __be32 *shadow, *groups, *dsl, *phy;
+ struct xway_stp *chip;
+ struct clk *clk;
+ int ret = 0;
+
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request STP resource\n");
+ return -ENOENT;
+ }
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->virt = devm_request_and_ioremap(&pdev->dev, res);
+ if (!chip->virt) {
+ dev_err(&pdev->dev, "failed to remap STP memory\n");
+ return -ENOMEM;
+ }
+ chip->gc.dev = &pdev->dev;
+ chip->gc.label = "stp-xway";
+ chip->gc.direction_output = xway_stp_dir_out;
+ chip->gc.set = xway_stp_set;
+ chip->gc.request = xway_stp_request;
+ chip->gc.base = -1;
+ chip->gc.owner = THIS_MODULE;
+
+ /* store the shadow value if one was passed by the devicetree */
+ shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
+ if (shadow)
+ chip->shadow = be32_to_cpu(*shadow);
+
+ /* find out which gpio groups should be enabled */
+ groups = of_get_property(pdev->dev.of_node, "lantiq,groups", NULL);
+ if (groups)
+ chip->groups = be32_to_cpu(*groups) & XWAY_STP_GROUP_MASK;
+ else
+ chip->groups = XWAY_STP_GROUP0;
+ chip->gc.ngpio = fls(chip->groups) * 8;
+
+ /* find out which gpios are controlled by the dsl core */
+ dsl = of_get_property(pdev->dev.of_node, "lantiq,dsl", NULL);
+ if (dsl)
+ chip->dsl = be32_to_cpu(*dsl) & XWAY_STP_ADSL_MASK;
+
+ /* find out which gpios are controlled by the phys */
+ if (of_machine_is_compatible("lantiq,ar9") ||
+ of_machine_is_compatible("lantiq,gr9") ||
+ of_machine_is_compatible("lantiq,vr9")) {
+ phy = of_get_property(pdev->dev.of_node, "lantiq,phy1", NULL);
+ if (phy)
+ chip->phy1 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK;
+ phy = of_get_property(pdev->dev.of_node, "lantiq,phy2", NULL);
+ if (phy)
+ chip->phy2 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK;
+ }
+
+ /* check which edge trigger we should use, default to a falling edge */
+ if (!of_find_property(pdev->dev.of_node, "lantiq,rising", NULL))
+ chip->edge = XWAY_STP_FALLING;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+ clk_enable(clk);
+
+ ret = xway_stp_hw_init(chip);
+ if (!ret)
+ ret = gpiochip_add(&chip->gc);
+
+ if (!ret)
+ dev_info(&pdev->dev, "Init done\n");
+
+ return ret;
+}
+
+static const struct of_device_id xway_stp_match[] = {
+ { .compatible = "lantiq,gpio-stp-xway" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xway_stp_match);
+
+static struct platform_driver xway_stp_driver = {
+ .probe = xway_stp_probe,
+ .driver = {
+ .name = "gpio-stp-xway",
+ .owner = THIS_MODULE,
+ .of_match_table = xway_stp_match,
+ },
+};
+
+int __init xway_stp_init(void)
+{
+ return platform_driver_register(&xway_stp_driver);
+}
+
+subsys_initcall(xway_stp_init);
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 7eef648a335..c1ad2884f2e 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -18,14 +18,27 @@
#include <linux/errno.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
+#include <linux/platform_device.h>
#include <linux/mfd/tps65910.h>
+#include <linux/of_device.h>
+
+struct tps65910_gpio {
+ struct gpio_chip gpio_chip;
+ struct tps65910 *tps65910;
+};
+
+static inline struct tps65910_gpio *to_tps65910_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct tps65910_gpio, gpio_chip);
+}
static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
{
- struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
- uint8_t val;
+ struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+ struct tps65910 *tps65910 = tps65910_gpio->tps65910;
+ unsigned int val;
- tps65910->read(tps65910, TPS65910_GPIO0 + offset, 1, &val);
+ tps65910_reg_read(tps65910, TPS65910_GPIO0 + offset, &val);
if (val & GPIO_STS_MASK)
return 1;
@@ -36,83 +49,170 @@ static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
int value)
{
- struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+ struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+ struct tps65910 *tps65910 = tps65910_gpio->tps65910;
if (value)
- tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+ tps65910_reg_set_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_SET_MASK);
else
- tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+ tps65910_reg_clear_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_SET_MASK);
}
static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
int value)
{
- struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+ struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+ struct tps65910 *tps65910 = tps65910_gpio->tps65910;
/* Set the initial value */
tps65910_gpio_set(gc, offset, value);
- return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+ return tps65910_reg_set_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_CFG_MASK);
}
static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset)
{
- struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+ struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
+ struct tps65910 *tps65910 = tps65910_gpio->tps65910;
- return tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+ return tps65910_reg_clear_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_CFG_MASK);
}
-void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
+#ifdef CONFIG_OF
+static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev,
+ struct tps65910 *tps65910, int chip_ngpio)
{
+ struct tps65910_board *tps65910_board = tps65910->of_plat_data;
+ unsigned int prop_array[TPS6591X_MAX_NUM_GPIO];
+ int ngpio = min(chip_ngpio, TPS6591X_MAX_NUM_GPIO);
int ret;
- struct tps65910_board *board_data;
+ int idx;
+
+ tps65910_board->gpio_base = -1;
+ ret = of_property_read_u32_array(tps65910->dev->of_node,
+ "ti,en-gpio-sleep", prop_array, ngpio);
+ if (ret < 0) {
+ dev_dbg(dev, "ti,en-gpio-sleep not specified\n");
+ return tps65910_board;
+ }
- if (!gpio_base)
- return;
+ for (idx = 0; idx < ngpio; idx++)
+ tps65910_board->en_gpio_sleep[idx] = (prop_array[idx] != 0);
- tps65910->gpio.owner = THIS_MODULE;
- tps65910->gpio.label = tps65910->i2c_client->name;
- tps65910->gpio.dev = tps65910->dev;
- tps65910->gpio.base = gpio_base;
+ return tps65910_board;
+}
+#else
+static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev,
+ struct tps65910 *tps65910, int chip_ngpio)
+{
+ return NULL;
+}
+#endif
+
+static int __devinit tps65910_gpio_probe(struct platform_device *pdev)
+{
+ struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
+ struct tps65910_board *pdata = dev_get_platdata(tps65910->dev);
+ struct tps65910_gpio *tps65910_gpio;
+ int ret;
+ int i;
+
+ tps65910_gpio = devm_kzalloc(&pdev->dev,
+ sizeof(*tps65910_gpio), GFP_KERNEL);
+ if (!tps65910_gpio) {
+ dev_err(&pdev->dev, "Could not allocate tps65910_gpio\n");
+ return -ENOMEM;
+ }
+
+ tps65910_gpio->tps65910 = tps65910;
+
+ tps65910_gpio->gpio_chip.owner = THIS_MODULE;
+ tps65910_gpio->gpio_chip.label = tps65910->i2c_client->name;
switch(tps65910_chip_id(tps65910)) {
case TPS65910:
- tps65910->gpio.ngpio = TPS65910_NUM_GPIO;
+ tps65910_gpio->gpio_chip.ngpio = TPS65910_NUM_GPIO;
break;
case TPS65911:
- tps65910->gpio.ngpio = TPS65911_NUM_GPIO;
+ tps65910_gpio->gpio_chip.ngpio = TPS65911_NUM_GPIO;
break;
default:
- return;
+ return -EINVAL;
+ }
+ tps65910_gpio->gpio_chip.can_sleep = 1;
+ tps65910_gpio->gpio_chip.direction_input = tps65910_gpio_input;
+ tps65910_gpio->gpio_chip.direction_output = tps65910_gpio_output;
+ tps65910_gpio->gpio_chip.set = tps65910_gpio_set;
+ tps65910_gpio->gpio_chip.get = tps65910_gpio_get;
+ tps65910_gpio->gpio_chip.dev = &pdev->dev;
+ if (pdata && pdata->gpio_base)
+ tps65910_gpio->gpio_chip.base = pdata->gpio_base;
+ else
+ tps65910_gpio->gpio_chip.base = -1;
+
+ if (!pdata && tps65910->dev->of_node)
+ pdata = tps65910_parse_dt_for_gpio(&pdev->dev, tps65910,
+ tps65910_gpio->gpio_chip.ngpio);
+
+ if (!pdata)
+ goto skip_init;
+
+ /* Configure sleep control for gpios if provided */
+ for (i = 0; i < tps65910_gpio->gpio_chip.ngpio; ++i) {
+ if (!pdata->en_gpio_sleep[i])
+ continue;
+
+ ret = tps65910_reg_set_bits(tps65910,
+ TPS65910_GPIO0 + i, GPIO_SLEEP_MASK);
+ if (ret < 0)
+ dev_warn(tps65910->dev,
+ "GPIO Sleep setting failed with err %d\n", ret);
}
- tps65910->gpio.can_sleep = 1;
-
- tps65910->gpio.direction_input = tps65910_gpio_input;
- tps65910->gpio.direction_output = tps65910_gpio_output;
- tps65910->gpio.set = tps65910_gpio_set;
- tps65910->gpio.get = tps65910_gpio_get;
-
- /* Configure sleep control for gpios */
- board_data = dev_get_platdata(tps65910->dev);
- if (board_data) {
- int i;
- for (i = 0; i < tps65910->gpio.ngpio; ++i) {
- if (board_data->en_gpio_sleep[i]) {
- ret = tps65910_set_bits(tps65910,
- TPS65910_GPIO0 + i, GPIO_SLEEP_MASK);
- if (ret < 0)
- dev_warn(tps65910->dev,
- "GPIO Sleep setting failed\n");
- }
- }
+
+skip_init:
+ ret = gpiochip_add(&tps65910_gpio->gpio_chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
}
- ret = gpiochip_add(&tps65910->gpio);
+ platform_set_drvdata(pdev, tps65910_gpio);
+
+ return ret;
+}
+
+static int __devexit tps65910_gpio_remove(struct platform_device *pdev)
+{
+ struct tps65910_gpio *tps65910_gpio = platform_get_drvdata(pdev);
- if (ret)
- dev_warn(tps65910->dev, "GPIO registration failed: %d\n", ret);
+ return gpiochip_remove(&tps65910_gpio->gpio_chip);
}
+
+static struct platform_driver tps65910_gpio_driver = {
+ .driver.name = "tps65910-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = tps65910_gpio_probe,
+ .remove = __devexit_p(tps65910_gpio_remove),
+};
+
+static int __init tps65910_gpio_init(void)
+{
+ return platform_driver_register(&tps65910_gpio_driver);
+}
+subsys_initcall(tps65910_gpio_init);
+
+static void __exit tps65910_gpio_exit(void)
+{
+ platform_driver_unregister(&tps65910_gpio_driver);
+}
+module_exit(tps65910_gpio_exit);
+
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_AUTHOR("Jorge Eduardo Candelaria jedu@slimlogic.co.uk>");
+MODULE_DESCRIPTION("GPIO interface for TPS65910/TPS6511 PMICs");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65910-gpio");
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index deb949e75ec..e56a2165641 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -102,10 +102,8 @@ static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
- if (!wm831x->irq_base)
- return -EINVAL;
-
- return wm831x->irq_base + WM831X_IRQ_GPIO_1 + offset;
+ return irq_create_mapping(wm831x->irq_domain,
+ WM831X_IRQ_GPIO_1 + offset);
}
static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 5f13c62e64b..5a3bb3d738d 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -49,7 +49,6 @@ config I2C_CHARDEV
config I2C_MUX
tristate "I2C bus multiplexing support"
- depends on EXPERIMENTAL
help
Say Y here if you want the I2C core to support the ability to
handle multiplexed I2C bus topologies, by presenting each
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 94468a64ce3..7244c8be606 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -445,20 +445,6 @@ config I2C_IOP3XX
This driver can also be built as a module. If so, the module
will be called i2c-iop3xx.
-config I2C_IXP2000
- tristate "IXP2000 GPIO-Based I2C Interface (DEPRECATED)"
- depends on ARCH_IXP2000
- select I2C_ALGOBIT
- help
- Say Y here if you have an Intel IXP2000 (2400, 2800, 2850) based
- system and are using GPIO lines for an I2C bus.
-
- This support is also available as a module. If so, the module
- will be called i2c-ixp2000.
-
- This driver is deprecated and will be dropped soon. Use i2c-gpio
- instead.
-
config I2C_MPC
tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx"
depends on PPC
@@ -483,6 +469,7 @@ config I2C_MV64XXX
config I2C_MXS
tristate "Freescale i.MX28 I2C interface"
depends on SOC_IMX28
+ select STMP_DEVICE
help
Say Y here if you want to use the I2C bus controller on
the Freescale i.MX28 processors.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 569567b0d02..ce3c2be7fb4 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
obj-$(CONFIG_I2C_IMX) += i2c-imx.o
obj-$(CONFIG_I2C_INTEL_MID) += i2c-intel-mid.o
obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
-obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
obj-$(CONFIG_I2C_MXS) += i2c-mxs.o
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index a76d85fa3ad..79b4bcb3b85 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -755,7 +755,7 @@ static int davinci_i2c_remove(struct platform_device *pdev)
dev->clk = NULL;
davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
- free_irq(IRQ_I2C, dev);
+ free_irq(dev->irq, dev);
iounmap(dev->base);
kfree(dev);
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index df879924100..1e48bec80ed 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -164,9 +164,15 @@ static char *abort_sources[] = {
u32 dw_readl(struct dw_i2c_dev *dev, int offset)
{
- u32 value = readl(dev->base + offset);
+ u32 value;
- if (dev->swab)
+ if (dev->accessor_flags & ACCESS_16BIT)
+ value = readw(dev->base + offset) |
+ (readw(dev->base + offset + 2) << 16);
+ else
+ value = readl(dev->base + offset);
+
+ if (dev->accessor_flags & ACCESS_SWAP)
return swab32(value);
else
return value;
@@ -174,10 +180,15 @@ u32 dw_readl(struct dw_i2c_dev *dev, int offset)
void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
{
- if (dev->swab)
+ if (dev->accessor_flags & ACCESS_SWAP)
b = swab32(b);
- writel(b, dev->base + offset);
+ if (dev->accessor_flags & ACCESS_16BIT) {
+ writew((u16)b, dev->base + offset);
+ writew((u16)(b >> 16), dev->base + offset + 2);
+ } else {
+ writel(b, dev->base + offset);
+ }
}
static u32
@@ -251,14 +262,14 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
input_clock_khz = dev->get_clk_rate_khz(dev);
- /* Configure register endianess access */
reg = dw_readl(dev, DW_IC_COMP_TYPE);
if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
- dev->swab = 1;
- reg = DW_IC_COMP_TYPE_VALUE;
- }
-
- if (reg != DW_IC_COMP_TYPE_VALUE) {
+ /* Configure register endianess access */
+ dev->accessor_flags |= ACCESS_SWAP;
+ } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
+ /* Configure register access mode 16bit */
+ dev->accessor_flags |= ACCESS_16BIT;
+ } else if (reg != DW_IC_COMP_TYPE_VALUE) {
dev_err(dev->dev, "Unknown Synopsys component type: "
"0x%08x\n", reg);
return -ENODEV;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 02d1a2ddd85..9c1840ee09c 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -82,7 +82,7 @@ struct dw_i2c_dev {
unsigned int status;
u32 abort_source;
int irq;
- int swab;
+ u32 accessor_flags;
struct i2c_adapter adapter;
u32 functionality;
u32 master_cfg;
@@ -90,6 +90,9 @@ struct dw_i2c_dev {
unsigned int rx_fifo_depth;
};
+#define ACCESS_SWAP 0x00000001
+#define ACCESS_16BIT 0x00000002
+
extern u32 dw_readl(struct dw_i2c_dev *dev, int offset);
extern void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset);
extern int i2c_dw_init(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 4ba589ab861..0506fef8dc0 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -36,6 +36,7 @@
#include <linux/interrupt.h>
#include <linux/of_i2c.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/io.h>
#include <linux/slab.h>
#include "i2c-designware-core.h"
@@ -95,7 +96,7 @@ static int __devinit dw_i2c_probe(struct platform_device *pdev)
r = -ENODEV;
goto err_free_mem;
}
- clk_enable(dev->clk);
+ clk_prepare_enable(dev->clk);
dev->functionality =
I2C_FUNC_I2C |
@@ -155,7 +156,7 @@ err_free_irq:
err_iounmap:
iounmap(dev->base);
err_unuse_clocks:
- clk_disable(dev->clk);
+ clk_disable_unprepare(dev->clk);
clk_put(dev->clk);
dev->clk = NULL;
err_free_mem:
@@ -177,7 +178,7 @@ static int __devexit dw_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&dev->adapter);
put_device(&pdev->dev);
- clk_disable(dev->clk);
+ clk_disable_unprepare(dev->clk);
clk_put(dev->clk);
dev->clk = NULL;
@@ -198,6 +199,31 @@ static const struct of_device_id dw_i2c_of_match[] = {
MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
#endif
+#ifdef CONFIG_PM
+static int dw_i2c_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(i_dev->clk);
+
+ return 0;
+}
+
+static int dw_i2c_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
+
+ clk_prepare_enable(i_dev->clk);
+ i2c_dw_init(i_dev);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
+
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:i2c_designware");
@@ -207,6 +233,7 @@ static struct platform_driver dw_i2c_driver = {
.name = "i2c_designware",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(dw_i2c_of_match),
+ .pm = &dw_i2c_dev_pm_ops,
},
};
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index c811289b61e..2f74ae872e1 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -263,11 +263,6 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
init_waitqueue_head(&pch_event);
}
-static inline bool ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
-{
- return cmp1.tv64 < cmp2.tv64;
-}
-
/**
* pch_i2c_wait_for_bus_idle() - check the status of bus.
* @adap: Pointer to struct i2c_algo_pch_data.
@@ -317,33 +312,6 @@ static void pch_i2c_start(struct i2c_algo_pch_data *adap)
}
/**
- * pch_i2c_wait_for_xfer_complete() - initiates a wait for the tx complete event
- * @adap: Pointer to struct i2c_algo_pch_data.
- */
-static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap)
-{
- long ret;
- ret = wait_event_timeout(pch_event,
- (adap->pch_event_flag != 0), msecs_to_jiffies(1000));
-
- if (ret == 0) {
- pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
- adap->pch_event_flag = 0;
- return -ETIMEDOUT;
- }
-
- if (adap->pch_event_flag & I2C_ERROR_MASK) {
- pch_err(adap, "error bits set: %x\n", adap->pch_event_flag);
- adap->pch_event_flag = 0;
- return -EIO;
- }
-
- adap->pch_event_flag = 0;
-
- return 0;
-}
-
-/**
* pch_i2c_getack() - to confirm ACK/NACK
* @adap: Pointer to struct i2c_algo_pch_data.
*/
@@ -373,6 +341,40 @@ static void pch_i2c_stop(struct i2c_algo_pch_data *adap)
pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_START);
}
+static int pch_i2c_wait_for_check_xfer(struct i2c_algo_pch_data *adap)
+{
+ long ret;
+
+ ret = wait_event_timeout(pch_event,
+ (adap->pch_event_flag != 0), msecs_to_jiffies(1000));
+ if (!ret) {
+ pch_err(adap, "%s:wait-event timeout\n", __func__);
+ adap->pch_event_flag = 0;
+ pch_i2c_stop(adap);
+ pch_i2c_init(adap);
+ return -ETIMEDOUT;
+ }
+
+ if (adap->pch_event_flag & I2C_ERROR_MASK) {
+ pch_err(adap, "Lost Arbitration\n");
+ adap->pch_event_flag = 0;
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
+ pch_i2c_init(adap);
+ return -EAGAIN;
+ }
+
+ adap->pch_event_flag = 0;
+
+ if (pch_i2c_getack(adap)) {
+ pch_dbg(adap, "Receive NACK for slave address"
+ "setting\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
/**
* pch_i2c_repstart() - generate repeated start condition in normal mode
* @adap: Pointer to struct i2c_algo_pch_data.
@@ -427,27 +429,12 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
if (first)
pch_i2c_start(adap);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- addr_8_lsb = (addr & I2C_ADDR_MSK);
- iowrite32(addr_8_lsb, p + PCH_I2CDR);
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
+
+ addr_8_lsb = (addr & I2C_ADDR_MSK);
+ iowrite32(addr_8_lsb, p + PCH_I2CDR);
} else {
/* set 7 bit slave address and R/W bit as 0 */
iowrite32(addr << 1, p + PCH_I2CDR);
@@ -455,44 +442,21 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
pch_i2c_start(adap);
}
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
for (wrcount = 0; wrcount < length; ++wrcount) {
/* write buffer value to I2C data register */
iowrite32(buf[wrcount], p + PCH_I2CDR);
pch_dbg(adap, "writing %x to Data register\n", buf[wrcount]);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMCF_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMIF_BIT);
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
+
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMCF_BIT);
+ pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
}
/* check if this is the last message */
@@ -580,50 +544,21 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (first)
pch_i2c_start(adap);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- addr_8_lsb = (addr & I2C_ADDR_MSK);
- iowrite32(addr_8_lsb, p + PCH_I2CDR);
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
+
+ addr_8_lsb = (addr & I2C_ADDR_MSK);
+ iowrite32(addr_8_lsb, p + PCH_I2CDR);
+
pch_i2c_restart(adap);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- addr_2_msb |= I2C_RD;
- iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK,
- p + PCH_I2CDR);
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR,
- I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
+
+ addr_2_msb |= I2C_RD;
+ iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
} else {
/* 7 address bits + R/W bit */
addr = (((addr) << 1) | (I2C_RD));
@@ -634,23 +569,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (first)
pch_i2c_start(adap);
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave address"
- "setting\n");
- return -EIO;
- }
- } else if (rtn == -EIO) { /* Arbitration Lost */
- pch_err(adap, "Lost Arbitration\n");
- pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
- pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
- pch_i2c_init(adap);
- return -EAGAIN;
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
if (length == 0) {
pch_i2c_stop(adap);
@@ -669,18 +590,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (loop != 1)
read_index++;
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave"
- "address setting\n");
- return -EIO;
- }
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
-
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
} /* end for */
pch_i2c_sendnack(adap);
@@ -690,17 +602,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
if (length != 1)
read_index++;
- rtn = pch_i2c_wait_for_xfer_complete(adap);
- if (rtn == 0) {
- if (pch_i2c_getack(adap)) {
- pch_dbg(adap, "Receive NACK for slave"
- "address setting\n");
- return -EIO;
- }
- } else { /* wait-event timeout */
- pch_i2c_stop(adap);
- return -ETIME;
- }
+ rtn = pch_i2c_wait_for_check_xfer(adap);
+ if (rtn)
+ return rtn;
if (last)
pch_i2c_stop(adap);
@@ -790,7 +694,7 @@ static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
ret = mutex_lock_interruptible(&pch_mutex);
if (ret)
- return -ERESTARTSYS;
+ return ret;
if (adap->p_adapter_info->pch_i2c_suspended) {
mutex_unlock(&pch_mutex);
@@ -909,7 +813,7 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
pch_adap->owner = THIS_MODULE;
pch_adap->class = I2C_CLASS_HWMON;
- strcpy(pch_adap->name, KBUILD_MODNAME);
+ strlcpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name));
pch_adap->algo = &pch_algorithm;
pch_adap->algo_data = &adap_info->pch_data[i];
@@ -963,7 +867,7 @@ static void __devexit pch_i2c_remove(struct pci_dev *pdev)
pci_iounmap(pdev, adap_info->pch_data[0].pch_base_address);
for (i = 0; i < adap_info->ch_num; i++)
- adap_info->pch_data[i].pch_base_address = 0;
+ adap_info->pch_data[i].pch_base_address = NULL;
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index c0330a41db0..e62d2d93862 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -190,12 +190,7 @@ static int __devinit i2c_gpio_probe(struct platform_device *pdev)
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
- /*
- * If "dev->id" is negative we consider it as zero.
- * The reason to do so is to avoid sysfs names that only make
- * sense when there are multiple adapters.
- */
- adap->nr = (pdev->id != -1) ? pdev->id : 0;
+ adap->nr = pdev->id;
ret = i2c_bit_add_numbered_bus(adap);
if (ret)
goto err_add_bus;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 56bce9a8bcb..8d6b504d65c 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -512,7 +512,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
}
/* Setup i2c_imx driver structure */
- strcpy(i2c_imx->adapter.name, pdev->name);
+ strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
i2c_imx->adapter.owner = THIS_MODULE;
i2c_imx->adapter.algo = &i2c_imx_algo;
i2c_imx->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
deleted file mode 100644
index 5d263f9014d..00000000000
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * drivers/i2c/busses/i2c-ixp2000.c
- *
- * I2C adapter for IXP2000 systems using GPIOs for I2C bus
- *
- * Author: Deepak Saxena <dsaxena@plexity.net>
- * Based on IXDP2400 code by: Naeem M. Afzal <naeem.m.afzal@intel.com>
- * Made generic by: Jeff Daly <jeffrey.daly@intel.com>
- *
- * Copyright (c) 2003-2004 MontaVista Software Inc.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
- * From Jeff Daly:
- *
- * I2C adapter driver for Intel IXDP2xxx platforms. This should work for any
- * IXP2000 platform if it uses the HW GPIO in the same manner. Basically,
- * SDA and SCL GPIOs have external pullups. Setting the respective GPIO to
- * an input will make the signal a '1' via the pullup. Setting them to
- * outputs will pull them down.
- *
- * The GPIOs are open drain signals and are used as configuration strap inputs
- * during power-up so there's generally a buffer on the board that needs to be
- * 'enabled' to drive the GPIOs.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <linux/slab.h>
-
-#include <mach/hardware.h> /* Pick up IXP2000-specific bits */
-#include <mach/gpio-ixp2000.h>
-
-static inline int ixp2000_scl_pin(void *data)
-{
- return ((struct ixp2000_i2c_pins*)data)->scl_pin;
-}
-
-static inline int ixp2000_sda_pin(void *data)
-{
- return ((struct ixp2000_i2c_pins*)data)->sda_pin;
-}
-
-
-static void ixp2000_bit_setscl(void *data, int val)
-{
- int i = 5000;
-
- if (val) {
- gpio_line_config(ixp2000_scl_pin(data), GPIO_IN);
- while(!gpio_line_get(ixp2000_scl_pin(data)) && i--);
- } else {
- gpio_line_config(ixp2000_scl_pin(data), GPIO_OUT);
- }
-}
-
-static void ixp2000_bit_setsda(void *data, int val)
-{
- if (val) {
- gpio_line_config(ixp2000_sda_pin(data), GPIO_IN);
- } else {
- gpio_line_config(ixp2000_sda_pin(data), GPIO_OUT);
- }
-}
-
-static int ixp2000_bit_getscl(void *data)
-{
- return gpio_line_get(ixp2000_scl_pin(data));
-}
-
-static int ixp2000_bit_getsda(void *data)
-{
- return gpio_line_get(ixp2000_sda_pin(data));
-}
-
-struct ixp2000_i2c_data {
- struct ixp2000_i2c_pins *gpio_pins;
- struct i2c_adapter adapter;
- struct i2c_algo_bit_data algo_data;
-};
-
-static int ixp2000_i2c_remove(struct platform_device *plat_dev)
-{
- struct ixp2000_i2c_data *drv_data = platform_get_drvdata(plat_dev);
-
- platform_set_drvdata(plat_dev, NULL);
-
- i2c_del_adapter(&drv_data->adapter);
-
- kfree(drv_data);
-
- return 0;
-}
-
-static int ixp2000_i2c_probe(struct platform_device *plat_dev)
-{
- int err;
- struct ixp2000_i2c_pins *gpio = plat_dev->dev.platform_data;
- struct ixp2000_i2c_data *drv_data =
- kzalloc(sizeof(struct ixp2000_i2c_data), GFP_KERNEL);
-
- if (!drv_data)
- return -ENOMEM;
- drv_data->gpio_pins = gpio;
-
- drv_data->algo_data.data = gpio;
- drv_data->algo_data.setsda = ixp2000_bit_setsda;
- drv_data->algo_data.setscl = ixp2000_bit_setscl;
- drv_data->algo_data.getsda = ixp2000_bit_getsda;
- drv_data->algo_data.getscl = ixp2000_bit_getscl;
- drv_data->algo_data.udelay = 6;
- drv_data->algo_data.timeout = HZ;
-
- strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
- sizeof(drv_data->adapter.name));
- drv_data->adapter.algo_data = &drv_data->algo_data,
-
- drv_data->adapter.dev.parent = &plat_dev->dev;
-
- gpio_line_config(gpio->sda_pin, GPIO_IN);
- gpio_line_config(gpio->scl_pin, GPIO_IN);
- gpio_line_set(gpio->scl_pin, 0);
- gpio_line_set(gpio->sda_pin, 0);
-
- if ((err = i2c_bit_add_bus(&drv_data->adapter)) != 0) {
- dev_err(&plat_dev->dev, "Could not install, error %d\n", err);
- kfree(drv_data);
- return err;
- }
-
- platform_set_drvdata(plat_dev, drv_data);
-
- return 0;
-}
-
-static struct platform_driver ixp2000_i2c_driver = {
- .probe = ixp2000_i2c_probe,
- .remove = ixp2000_i2c_remove,
- .driver = {
- .name = "IXP2000-I2C",
- .owner = THIS_MODULE,
- },
-};
-
-module_platform_driver(ixp2000_i2c_driver);
-
-MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>");
-MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:IXP2000-I2C");
-
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 206caacd30d..b76731edbf1 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -64,6 +64,9 @@ struct mpc_i2c {
struct i2c_adapter adap;
int irq;
u32 real_clk;
+#ifdef CONFIG_PM
+ u8 fdr, dfsrr;
+#endif
};
struct mpc_i2c_divider {
@@ -703,6 +706,30 @@ static int __devexit fsl_i2c_remove(struct platform_device *op)
return 0;
};
+#ifdef CONFIG_PM
+static int mpc_i2c_suspend(struct device *dev)
+{
+ struct mpc_i2c *i2c = dev_get_drvdata(dev);
+
+ i2c->fdr = readb(i2c->base + MPC_I2C_FDR);
+ i2c->dfsrr = readb(i2c->base + MPC_I2C_DFSRR);
+
+ return 0;
+}
+
+static int mpc_i2c_resume(struct device *dev)
+{
+ struct mpc_i2c *i2c = dev_get_drvdata(dev);
+
+ writeb(i2c->fdr, i2c->base + MPC_I2C_FDR);
+ writeb(i2c->dfsrr, i2c->base + MPC_I2C_DFSRR);
+
+ return 0;
+}
+
+SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
+#endif
+
static struct mpc_i2c_data mpc_i2c_data_512x __devinitdata = {
.setup = mpc_i2c_setup_512x,
};
@@ -747,6 +774,9 @@ static struct platform_driver mpc_i2c_driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = mpc_i2c_of_match,
+#ifdef CONFIG_PM
+ .pm = &mpc_i2c_pm_ops,
+#endif
},
};
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 7fa73eed84a..04eb441b6ce 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -27,8 +27,10 @@
#include <linux/jiffies.h>
#include <linux/io.h>
#include <linux/pinctrl/consumer.h>
-
-#include <mach/common.h>
+#include <linux/stmp_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_i2c.h>
#define DRIVER_NAME "mxs-i2c"
@@ -112,13 +114,9 @@ struct mxs_i2c_dev {
struct i2c_adapter adapter;
};
-/*
- * TODO: check if calls to here are really needed. If not, we could get rid of
- * mxs_reset_block and the mach-dependency. Needs an I2C analyzer, probably.
- */
static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
{
- mxs_reset_block(i2c->regs);
+ stmp_reset_block(i2c->regs);
writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
i2c->regs + MXS_I2C_QUEUECTRL_SET);
@@ -371,6 +369,7 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
adap->algo = &mxs_i2c_algo;
adap->dev.parent = dev;
adap->nr = pdev->id;
+ adap->dev.of_node = pdev->dev.of_node;
i2c_set_adapdata(adap, i2c);
err = i2c_add_numbered_adapter(adap);
if (err) {
@@ -380,6 +379,8 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
return err;
}
+ of_i2c_register_devices(adap);
+
return 0;
}
@@ -399,10 +400,17 @@ static int __devexit mxs_i2c_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id mxs_i2c_dt_ids[] = {
+ { .compatible = "fsl,imx28-i2c", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids);
+
static struct platform_driver mxs_i2c_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = mxs_i2c_dt_ids,
},
.remove = __devexit_p(mxs_i2c_remove),
};
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 18068dee48f..75194c579b6 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -55,6 +55,7 @@
#include <linux/i2c-ocores.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/of_i2c.h>
struct ocores_i2c {
void __iomem *base;
@@ -343,6 +344,8 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
if (pdata) {
for (i = 0; i < pdata->num_devices; i++)
i2c_new_device(&i2c->adap, pdata->devices + i);
+ } else {
+ of_i2c_register_devices(&i2c->adap);
}
return 0;
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 2adbf1a8fde..675878f49f7 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -171,7 +171,7 @@ static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
i2c->io_size = resource_size(res);
i2c->irq = irq;
- i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0;
+ i2c->adap.nr = pdev->id;
i2c->adap.owner = THIS_MODULE;
snprintf(i2c->adap.name, sizeof(i2c->adap.name),
"PCA9564/PCA9665 at 0x%08lx",
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index f6733267fa9..a997c7d3f95 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1131,11 +1131,6 @@ static int i2c_pxa_probe(struct platform_device *dev)
spin_lock_init(&i2c->lock);
init_waitqueue_head(&i2c->wait);
- /*
- * If "dev->id" is negative we consider it as zero.
- * The reason to do so is to avoid sysfs names that only make
- * sense when there are multiple adapters.
- */
i2c->adap.nr = dev->id;
snprintf(i2c->adap.name, sizeof(i2c->adap.name), "pxa_i2c-i2c.%u",
i2c->adap.nr);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 737f7218a32..fa0b1349087 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -44,8 +44,12 @@
#include <plat/regs-iic.h>
#include <plat/iic.h>
-/* i2c controller state */
+/* Treat S3C2410 as baseline hardware, anything else is supported via quirks */
+#define QUIRK_S3C2440 (1 << 0)
+#define QUIRK_HDMIPHY (1 << 1)
+#define QUIRK_NO_GPIO (1 << 2)
+/* i2c controller state */
enum s3c24xx_i2c_state {
STATE_IDLE,
STATE_START,
@@ -54,14 +58,10 @@ enum s3c24xx_i2c_state {
STATE_STOP
};
-enum s3c24xx_i2c_type {
- TYPE_S3C2410,
- TYPE_S3C2440,
-};
-
struct s3c24xx_i2c {
spinlock_t lock;
wait_queue_head_t wait;
+ unsigned int quirks;
unsigned int suspended:1;
struct i2c_msg *msg;
@@ -88,26 +88,45 @@ struct s3c24xx_i2c {
#endif
};
-/* default platform data removed, dev should always carry data. */
+static struct platform_device_id s3c24xx_driver_ids[] = {
+ {
+ .name = "s3c2410-i2c",
+ .driver_data = 0,
+ }, {
+ .name = "s3c2440-i2c",
+ .driver_data = QUIRK_S3C2440,
+ }, {
+ .name = "s3c2440-hdmiphy-i2c",
+ .driver_data = QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO,
+ }, { },
+};
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
+
+#ifdef CONFIG_OF
+static const struct of_device_id s3c24xx_i2c_match[] = {
+ { .compatible = "samsung,s3c2410-i2c", .data = (void *)0 },
+ { .compatible = "samsung,s3c2440-i2c", .data = (void *)QUIRK_S3C2440 },
+ { .compatible = "samsung,s3c2440-hdmiphy-i2c",
+ .data = (void *)(QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO) },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
+#endif
-/* s3c24xx_i2c_is2440()
+/* s3c24xx_get_device_quirks
*
- * return true is this is an s3c2440
+ * Get controller type either from device tree or platform device variant.
*/
-static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c)
+static inline unsigned int s3c24xx_get_device_quirks(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(i2c->dev);
- enum s3c24xx_i2c_type type;
-
-#ifdef CONFIG_OF
- if (i2c->dev->of_node)
- return of_device_is_compatible(i2c->dev->of_node,
- "samsung,s3c2440-i2c");
-#endif
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(&s3c24xx_i2c_match, pdev->dev.of_node);
+ return (unsigned int)match->data;
+ }
- type = platform_get_device_id(pdev)->driver_data;
- return type == TYPE_S3C2440;
+ return platform_get_device_id(pdev)->driver_data;
}
/* s3c24xx_i2c_master_complete
@@ -471,6 +490,13 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
unsigned long iicstat;
int timeout = 400;
+ /* the timeout for HDMIPHY is reduced to 10 ms because
+ * the hangup is expected to happen, so waiting 400 ms
+ * causes only unnecessary system hangup
+ */
+ if (i2c->quirks & QUIRK_HDMIPHY)
+ timeout = 10;
+
while (timeout-- > 0) {
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
@@ -480,6 +506,15 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
msleep(1);
}
+ /* hang-up of bus dedicated for HDMIPHY occurred, resetting */
+ if (i2c->quirks & QUIRK_HDMIPHY) {
+ writel(0, i2c->regs + S3C2410_IICCON);
+ writel(0, i2c->regs + S3C2410_IICSTAT);
+ writel(0, i2c->regs + S3C2410_IICDS);
+
+ return 0;
+ }
+
return -ETIMEDOUT;
}
@@ -676,7 +711,7 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
writel(iiccon, i2c->regs + S3C2410_IICCON);
- if (s3c24xx_i2c_is2440(i2c)) {
+ if (i2c->quirks & QUIRK_S3C2440) {
unsigned long sda_delay;
if (pdata->sda_delay) {
@@ -761,6 +796,9 @@ static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
{
int idx, gpio, ret;
+ if (i2c->quirks & QUIRK_NO_GPIO)
+ return 0;
+
for (idx = 0; idx < 2; idx++) {
gpio = of_get_gpio(i2c->dev->of_node, idx);
if (!gpio_is_valid(gpio)) {
@@ -785,6 +823,10 @@ free_gpio:
static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
{
unsigned int idx;
+
+ if (i2c->quirks & QUIRK_NO_GPIO)
+ return;
+
for (idx = 0; idx < 2; idx++)
gpio_free(i2c->gpios[idx]);
}
@@ -906,6 +948,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
goto err_noclk;
}
+ i2c->quirks = s3c24xx_get_device_quirks(pdev);
if (pdata)
memcpy(i2c->pdata, pdata, sizeof(*pdata));
else
@@ -1110,28 +1153,6 @@ static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
/* device driver for platform bus bits */
-static struct platform_device_id s3c24xx_driver_ids[] = {
- {
- .name = "s3c2410-i2c",
- .driver_data = TYPE_S3C2410,
- }, {
- .name = "s3c2440-i2c",
- .driver_data = TYPE_S3C2440,
- }, { },
-};
-MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-
-#ifdef CONFIG_OF
-static const struct of_device_id s3c24xx_i2c_match[] = {
- { .compatible = "samsung,s3c2410-i2c" },
- { .compatible = "samsung,s3c2440-i2c" },
- {},
-};
-MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
-#else
-#define s3c24xx_i2c_match NULL
-#endif
-
static struct platform_driver s3c24xx_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove = s3c24xx_i2c_remove,
@@ -1140,7 +1161,7 @@ static struct platform_driver s3c24xx_i2c_driver = {
.owner = THIS_MODULE,
.name = "s3c-i2c",
.pm = S3C24XX_DEV_PM_OPS,
- .of_match_table = s3c24xx_i2c_match,
+ .of_match_table = of_match_ptr(s3c24xx_i2c_match),
},
};
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 675c9692d14..8110ca45f34 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -27,6 +27,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
+#include <linux/of_i2c.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
@@ -653,6 +654,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
adap->dev.parent = &dev->dev;
adap->retries = 5;
adap->nr = dev->id;
+ adap->dev.of_node = dev->dev.of_node;
strlcpy(adap->name, dev->name, sizeof(adap->name));
@@ -667,6 +669,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
dev_info(&dev->dev, "I2C adapter %d with bus speed %lu Hz\n",
adap->nr, pd->bus_speed);
+
+ of_i2c_register_devices(adap);
return 0;
err_all:
@@ -710,11 +714,18 @@ static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = {
.runtime_resume = sh_mobile_i2c_runtime_nop,
};
+static const struct of_device_id sh_mobile_i2c_dt_ids[] __devinitconst = {
+ { .compatible = "renesas,rmobile-iic", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids);
+
static struct platform_driver sh_mobile_i2c_driver = {
.driver = {
.name = "i2c-sh_mobile",
.owner = THIS_MODULE,
.pm = &sh_mobile_i2c_dev_pm_ops,
+ .of_match_table = sh_mobile_i2c_dt_ids,
},
.probe = sh_mobile_i2c_probe,
.remove = sh_mobile_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 55e5ea62cce..8b2e555a956 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -401,8 +401,6 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
disable_irq_nosync(i2c_dev->irq);
i2c_dev->irq_disabled = 1;
}
-
- complete(&i2c_dev->msg_complete);
goto err;
}
@@ -411,7 +409,6 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
i2c_dev->msg_err |= I2C_ERR_NO_ACK;
if (status & I2C_INT_ARBITRATION_LOST)
i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST;
- complete(&i2c_dev->msg_complete);
goto err;
}
@@ -429,14 +426,14 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
}
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ if (i2c_dev->is_dvc)
+ dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+
if (status & I2C_INT_PACKET_XFER_COMPLETE) {
BUG_ON(i2c_dev->msg_buf_remaining);
complete(&i2c_dev->msg_complete);
}
-
- i2c_writel(i2c_dev, status, I2C_INT_STATUS);
- if (i2c_dev->is_dvc)
- dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
return IRQ_HANDLED;
err:
/* An error occurred, mask all interrupts */
@@ -446,6 +443,8 @@ err:
i2c_writel(i2c_dev, status, I2C_INT_STATUS);
if (i2c_dev->is_dvc)
dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+
+ complete(&i2c_dev->msg_complete);
return IRQ_HANDLED;
}
@@ -476,12 +475,15 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
packet_header = msg->len - 1;
i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
- packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
- packet_header |= I2C_HEADER_IE_ENABLE;
+ packet_header = I2C_HEADER_IE_ENABLE;
if (!stop)
packet_header |= I2C_HEADER_REPEAT_START;
- if (msg->flags & I2C_M_TEN)
+ if (msg->flags & I2C_M_TEN) {
+ packet_header |= msg->addr;
packet_header |= I2C_HEADER_10BIT_ADDR;
+ } else {
+ packet_header |= msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
+ }
if (msg->flags & I2C_M_IGNORE_NAK)
packet_header |= I2C_HEADER_CONT_ON_NAK;
if (msg->flags & I2C_M_RD)
@@ -557,7 +559,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
static u32 tegra_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
}
static const struct i2c_algorithm tegra_i2c_algo = {
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index f585aead50c..eec20db6246 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -104,13 +104,8 @@ static int i2c_versatile_probe(struct platform_device *dev)
i2c->algo = i2c_versatile_algo;
i2c->algo.data = i2c;
- if (dev->id >= 0) {
- /* static bus numbering */
- i2c->adap.nr = dev->id;
- ret = i2c_bit_add_numbered_bus(&i2c->adap);
- } else
- /* dynamic bus numbering */
- ret = i2c_bit_add_bus(&i2c->adap);
+ i2c->adap.nr = dev->id;
+ ret = i2c_bit_add_numbered_bus(&i2c->adap);
if (ret >= 0) {
platform_set_drvdata(dev, i2c);
of_i2c_register_devices(&i2c->adap);
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 2bded7647ef..641d0e5e330 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -40,6 +40,7 @@
#include <linux/i2c-xiic.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of_i2c.h>
#define DRIVER_NAME "xiic-i2c"
@@ -705,8 +706,6 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
goto resource_missing;
pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
- if (!pdata)
- return -EINVAL;
i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
if (!i2c)
@@ -730,6 +729,7 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
i2c->adap = xiic_adapter;
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.dev.parent = &pdev->dev;
+ i2c->adap.dev.of_node = pdev->dev.of_node;
xiic_reinit(i2c);
@@ -748,9 +748,13 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
goto add_adapter_failed;
}
- /* add in known devices to the bus */
- for (i = 0; i < pdata->num_devices; i++)
- i2c_new_device(&i2c->adap, pdata->devices + i);
+ if (pdata) {
+ /* add in known devices to the bus */
+ for (i = 0; i < pdata->num_devices; i++)
+ i2c_new_device(&i2c->adap, pdata->devices + i);
+ }
+
+ of_i2c_register_devices(&i2c->adap);
return 0;
@@ -795,12 +799,21 @@ static int __devexit xiic_i2c_remove(struct platform_device* pdev)
return 0;
}
+#if defined(CONFIG_OF)
+static const struct of_device_id xiic_of_match[] __devinitconst = {
+ { .compatible = "xlnx,xps-iic-2.00.a", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xiic_of_match);
+#endif
+
static struct platform_driver xiic_i2c_driver = {
.probe = xiic_i2c_probe,
.remove = __devexit_p(xiic_i2c_remove),
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(xiic_of_match),
},
};
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index feb7dc35918..a6ad32bc0a9 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -772,6 +772,23 @@ struct device_type i2c_adapter_type = {
};
EXPORT_SYMBOL_GPL(i2c_adapter_type);
+/**
+ * i2c_verify_adapter - return parameter as i2c_adapter or NULL
+ * @dev: device, probably from some driver model iterator
+ *
+ * When traversing the driver model tree, perhaps using driver model
+ * iterators like @device_for_each_child(), you can't assume very much
+ * about the nodes you find. Use this function to avoid oopses caused
+ * by wrongly treating some non-I2C device as an i2c_adapter.
+ */
+struct i2c_adapter *i2c_verify_adapter(struct device *dev)
+{
+ return (dev->type == &i2c_adapter_type)
+ ? to_i2c_adapter(dev)
+ : NULL;
+}
+EXPORT_SYMBOL(i2c_verify_adapter);
+
#ifdef CONFIG_I2C_COMPAT
static struct class_compat *i2c_adapter_compat_class;
#endif
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index d7a4833be41..1038c381aea 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -24,6 +24,8 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
+#include <linux/of.h>
+#include <linux/of_i2c.h>
/* multiplexer per channel data */
struct i2c_mux_priv {
@@ -31,11 +33,11 @@ struct i2c_mux_priv {
struct i2c_algorithm algo;
struct i2c_adapter *parent;
- void *mux_dev; /* the mux chip/device */
+ void *mux_priv; /* the mux chip/device */
u32 chan_id; /* the channel id */
- int (*select)(struct i2c_adapter *, void *mux_dev, u32 chan_id);
- int (*deselect)(struct i2c_adapter *, void *mux_dev, u32 chan_id);
+ int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
+ int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
};
static int i2c_mux_master_xfer(struct i2c_adapter *adap,
@@ -47,11 +49,11 @@ static int i2c_mux_master_xfer(struct i2c_adapter *adap,
/* Switch to the right mux port and perform the transfer. */
- ret = priv->select(parent, priv->mux_dev, priv->chan_id);
+ ret = priv->select(parent, priv->mux_priv, priv->chan_id);
if (ret >= 0)
ret = parent->algo->master_xfer(parent, msgs, num);
if (priv->deselect)
- priv->deselect(parent, priv->mux_dev, priv->chan_id);
+ priv->deselect(parent, priv->mux_priv, priv->chan_id);
return ret;
}
@@ -67,12 +69,12 @@ static int i2c_mux_smbus_xfer(struct i2c_adapter *adap,
/* Select the right mux port and perform the transfer. */
- ret = priv->select(parent, priv->mux_dev, priv->chan_id);
+ ret = priv->select(parent, priv->mux_priv, priv->chan_id);
if (ret >= 0)
ret = parent->algo->smbus_xfer(parent, addr, flags,
read_write, command, size, data);
if (priv->deselect)
- priv->deselect(parent, priv->mux_dev, priv->chan_id);
+ priv->deselect(parent, priv->mux_priv, priv->chan_id);
return ret;
}
@@ -87,7 +89,8 @@ static u32 i2c_mux_functionality(struct i2c_adapter *adap)
}
struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
- void *mux_dev, u32 force_nr, u32 chan_id,
+ struct device *mux_dev,
+ void *mux_priv, u32 force_nr, u32 chan_id,
int (*select) (struct i2c_adapter *,
void *, u32),
int (*deselect) (struct i2c_adapter *,
@@ -102,7 +105,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
/* Set up private adapter data */
priv->parent = parent;
- priv->mux_dev = mux_dev;
+ priv->mux_priv = mux_priv;
priv->chan_id = chan_id;
priv->select = select;
priv->deselect = deselect;
@@ -124,6 +127,25 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
priv->adap.algo_data = priv;
priv->adap.dev.parent = &parent->dev;
+ /*
+ * Try to populate the mux adapter's of_node, expands to
+ * nothing if !CONFIG_OF.
+ */
+ if (mux_dev->of_node) {
+ struct device_node *child;
+ u32 reg;
+
+ for_each_child_of_node(mux_dev->of_node, child) {
+ ret = of_property_read_u32(child, "reg", &reg);
+ if (ret)
+ continue;
+ if (chan_id == reg) {
+ priv->adap.dev.of_node = child;
+ break;
+ }
+ }
+ }
+
if (force_nr) {
priv->adap.nr = force_nr;
ret = i2c_add_numbered_adapter(&priv->adap);
@@ -141,6 +163,8 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
dev_info(&parent->dev, "Added multiplexed i2c bus %d\n",
i2c_adapter_id(&priv->adap));
+ of_i2c_register_devices(&priv->adap);
+
return &priv->adap;
}
EXPORT_SYMBOL_GPL(i2c_add_mux_adapter);
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 90b7a016389..beb2491db27 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -15,7 +15,7 @@ config I2C_MUX_GPIO
through GPIO pins.
This driver can also be built as a module. If so, the module
- will be called gpio-i2cmux.
+ will be called i2c-mux-gpio.
config I2C_MUX_PCA9541
tristate "NXP PCA9541 I2C Master Selector"
@@ -25,7 +25,7 @@ config I2C_MUX_PCA9541
I2C Master Selector.
This driver can also be built as a module. If so, the module
- will be called pca9541.
+ will be called i2c-mux-pca9541.
config I2C_MUX_PCA954x
tristate "Philips PCA954x I2C Mux/switches"
@@ -35,6 +35,6 @@ config I2C_MUX_PCA954x
I2C mux/switch devices.
This driver can also be built as a module. If so, the module
- will be called pca954x.
+ will be called i2c-mux-pca954x.
endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 4640436ea61..5826249b29c 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -1,8 +1,8 @@
#
# Makefile for multiplexer I2C chip drivers.
-obj-$(CONFIG_I2C_MUX_GPIO) += gpio-i2cmux.o
-obj-$(CONFIG_I2C_MUX_PCA9541) += pca9541.o
-obj-$(CONFIG_I2C_MUX_PCA954x) += pca954x.o
+obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
+obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
+obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/gpio-i2cmux.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index e5fa695eb0f..68b1f8ec343 100644
--- a/drivers/i2c/muxes/gpio-i2cmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -10,7 +10,7 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
-#include <linux/gpio-i2cmux.h>
+#include <linux/i2c-mux-gpio.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -20,10 +20,10 @@
struct gpiomux {
struct i2c_adapter *parent;
struct i2c_adapter **adap; /* child busses */
- struct gpio_i2cmux_platform_data data;
+ struct i2c_mux_gpio_platform_data data;
};
-static void gpiomux_set(const struct gpiomux *mux, unsigned val)
+static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
{
int i;
@@ -31,28 +31,28 @@ static void gpiomux_set(const struct gpiomux *mux, unsigned val)
gpio_set_value(mux->data.gpios[i], val & (1 << i));
}
-static int gpiomux_select(struct i2c_adapter *adap, void *data, u32 chan)
+static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan)
{
struct gpiomux *mux = data;
- gpiomux_set(mux, mux->data.values[chan]);
+ i2c_mux_gpio_set(mux, mux->data.values[chan]);
return 0;
}
-static int gpiomux_deselect(struct i2c_adapter *adap, void *data, u32 chan)
+static int i2c_mux_gpio_deselect(struct i2c_adapter *adap, void *data, u32 chan)
{
struct gpiomux *mux = data;
- gpiomux_set(mux, mux->data.idle);
+ i2c_mux_gpio_set(mux, mux->data.idle);
return 0;
}
-static int __devinit gpiomux_probe(struct platform_device *pdev)
+static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
{
struct gpiomux *mux;
- struct gpio_i2cmux_platform_data *pdata;
+ struct i2c_mux_gpio_platform_data *pdata;
struct i2c_adapter *parent;
int (*deselect) (struct i2c_adapter *, void *, u32);
unsigned initial_state;
@@ -86,16 +86,16 @@ static int __devinit gpiomux_probe(struct platform_device *pdev)
goto alloc_failed2;
}
- if (pdata->idle != GPIO_I2CMUX_NO_IDLE) {
+ if (pdata->idle != I2C_MUX_GPIO_NO_IDLE) {
initial_state = pdata->idle;
- deselect = gpiomux_deselect;
+ deselect = i2c_mux_gpio_deselect;
} else {
initial_state = pdata->values[0];
deselect = NULL;
}
for (i = 0; i < pdata->n_gpios; i++) {
- ret = gpio_request(pdata->gpios[i], "gpio-i2cmux");
+ ret = gpio_request(pdata->gpios[i], "i2c-mux-gpio");
if (ret)
goto err_request_gpio;
gpio_direction_output(pdata->gpios[i],
@@ -105,8 +105,8 @@ static int __devinit gpiomux_probe(struct platform_device *pdev)
for (i = 0; i < pdata->n_values; i++) {
u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
- mux->adap[i] = i2c_add_mux_adapter(parent, mux, nr, i,
- gpiomux_select, deselect);
+ mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr, i,
+ i2c_mux_gpio_select, deselect);
if (!mux->adap[i]) {
ret = -ENODEV;
dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
@@ -137,7 +137,7 @@ alloc_failed:
return ret;
}
-static int __devexit gpiomux_remove(struct platform_device *pdev)
+static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev)
{
struct gpiomux *mux = platform_get_drvdata(pdev);
int i;
@@ -156,18 +156,18 @@ static int __devexit gpiomux_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver gpiomux_driver = {
- .probe = gpiomux_probe,
- .remove = __devexit_p(gpiomux_remove),
+static struct platform_driver i2c_mux_gpio_driver = {
+ .probe = i2c_mux_gpio_probe,
+ .remove = __devexit_p(i2c_mux_gpio_remove),
.driver = {
.owner = THIS_MODULE,
- .name = "gpio-i2cmux",
+ .name = "i2c-mux-gpio",
},
};
-module_platform_driver(gpiomux_driver);
+module_platform_driver(i2c_mux_gpio_driver);
MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver");
MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:gpio-i2cmux");
+MODULE_ALIAS("platform:i2c-mux-gpio");
diff --git a/drivers/i2c/muxes/pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index e0df9b6c66b..8aacde1516a 100644
--- a/drivers/i2c/muxes/pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -353,7 +353,8 @@ static int pca9541_probe(struct i2c_client *client,
force = 0;
if (pdata)
force = pdata->modes[0].adap_id;
- data->mux_adap = i2c_add_mux_adapter(adap, client, force, 0,
+ data->mux_adap = i2c_add_mux_adapter(adap, &client->dev, client,
+ force, 0,
pca9541_select_chan,
pca9541_release_chan);
diff --git a/drivers/i2c/muxes/pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 0e37ef27aa1..f2dfe0d8fcc 100644
--- a/drivers/i2c/muxes/pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -226,7 +226,7 @@ static int pca954x_probe(struct i2c_client *client,
}
data->virt_adaps[num] =
- i2c_add_mux_adapter(adap, client,
+ i2c_add_mux_adapter(adap, &client->dev, client,
force, num, pca954x_select_chan,
(pdata && pdata->modes[num].deselect_on_exit)
? pca954x_deselect_mux : NULL);
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 29fe1b2be1c..7f7b72464a3 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -311,7 +311,15 @@ static void pxa27x_keypad_scan_direct(struct pxa27x_keypad *keypad)
if (pdata->enable_rotary0 || pdata->enable_rotary1)
pxa27x_keypad_scan_rotary(keypad);
- new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
+ /*
+ * The KPDR_DK only output the key pin level, so it relates to board,
+ * and low level may be active.
+ */
+ if (pdata->direct_key_low_active)
+ new_state = ~KPDK_DK(kpdk) & keypad->direct_key_mask;
+ else
+ new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
+
bits_changed = keypad->direct_key_state ^ new_state;
if (bits_changed == 0)
@@ -383,7 +391,14 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
if (pdata->direct_key_num > direct_key_num)
direct_key_num = pdata->direct_key_num;
- keypad->direct_key_mask = ((2 << direct_key_num) - 1) & ~mask;
+ /*
+ * Direct keys usage may not start from KP_DKIN0, check the platfrom
+ * mask data to config the specific.
+ */
+ if (pdata->direct_key_mask)
+ keypad->direct_key_mask = pdata->direct_key_mask;
+ else
+ keypad->direct_key_mask = ((1 << direct_key_num) - 1) & ~mask;
/* enable direct key */
if (direct_key_num)
@@ -399,7 +414,7 @@ static int pxa27x_keypad_open(struct input_dev *dev)
struct pxa27x_keypad *keypad = input_get_drvdata(dev);
/* Enable unit clock */
- clk_enable(keypad->clk);
+ clk_prepare_enable(keypad->clk);
pxa27x_keypad_config(keypad);
return 0;
@@ -410,7 +425,7 @@ static void pxa27x_keypad_close(struct input_dev *dev)
struct pxa27x_keypad *keypad = input_get_drvdata(dev);
/* Disable clock unit */
- clk_disable(keypad->clk);
+ clk_disable_unprepare(keypad->clk);
}
#ifdef CONFIG_PM
@@ -419,10 +434,14 @@ static int pxa27x_keypad_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
- clk_disable(keypad->clk);
-
+ /*
+ * If the keypad is used a wake up source, clock can not be disabled.
+ * Or it can not detect the key pressing.
+ */
if (device_may_wakeup(&pdev->dev))
enable_irq_wake(keypad->irq);
+ else
+ clk_disable_unprepare(keypad->clk);
return 0;
}
@@ -433,19 +452,24 @@ static int pxa27x_keypad_resume(struct device *dev)
struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
struct input_dev *input_dev = keypad->input_dev;
- if (device_may_wakeup(&pdev->dev))
+ /*
+ * If the keypad is used as wake up source, the clock is not turned
+ * off. So do not need configure it again.
+ */
+ if (device_may_wakeup(&pdev->dev)) {
disable_irq_wake(keypad->irq);
+ } else {
+ mutex_lock(&input_dev->mutex);
- mutex_lock(&input_dev->mutex);
+ if (input_dev->users) {
+ /* Enable unit clock */
+ clk_prepare_enable(keypad->clk);
+ pxa27x_keypad_config(keypad);
+ }
- if (input_dev->users) {
- /* Enable unit clock */
- clk_enable(keypad->clk);
- pxa27x_keypad_config(keypad);
+ mutex_unlock(&input_dev->mutex);
}
- mutex_unlock(&input_dev->mutex);
-
return 0;
}
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index 47f18d6bce4..6790a812a1d 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -73,7 +73,7 @@ static int __devinit wm831x_on_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_on *wm831x_on;
- int irq = platform_get_irq(pdev, 0);
+ int irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
int ret;
wm831x_on = kzalloc(sizeof(struct wm831x_on), GFP_KERNEL);
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 4bc851a9dc3..e83410721e3 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -260,15 +260,16 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
* If we have a direct IRQ use it, otherwise use the interrupt
* from the WM831x IRQ controller.
*/
+ wm831x_ts->data_irq = wm831x_irq(wm831x,
+ platform_get_irq_byname(pdev,
+ "TCHDATA"));
if (pdata && pdata->data_irq)
wm831x_ts->data_irq = pdata->data_irq;
- else
- wm831x_ts->data_irq = platform_get_irq_byname(pdev, "TCHDATA");
+ wm831x_ts->pd_irq = wm831x_irq(wm831x,
+ platform_get_irq_byname(pdev, "TCHPD"));
if (pdata && pdata->pd_irq)
wm831x_ts->pd_irq = pdata->pd_irq;
- else
- wm831x_ts->pd_irq = platform_get_irq_byname(pdev, "TCHPD");
if (pdata)
wm831x_ts->pressure = pdata->pressure;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c69843742bb..34089372753 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -162,4 +162,25 @@ config TEGRA_IOMMU_SMMU
space through the SMMU (System Memory Management Unit)
hardware included on Tegra SoCs.
+config EXYNOS_IOMMU
+ bool "Exynos IOMMU Support"
+ depends on ARCH_EXYNOS && EXYNOS_DEV_SYSMMU
+ select IOMMU_API
+ help
+ Support for the IOMMU(System MMU) of Samsung Exynos application
+ processor family. This enables H/W multimedia accellerators to see
+ non-linear physical memory chunks as a linear memory in their
+ address spaces
+
+ If unsure, say N here.
+
+config EXYNOS_IOMMU_DEBUG
+ bool "Debugging log for Exynos IOMMU"
+ depends on EXYNOS_IOMMU
+ help
+ Select this to see the detailed log message that shows what
+ happens in the IOMMU driver
+
+ Say N unless you need kernel log message for IOMMU debugging
+
endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 3e5e82ae9f0..76e54ef796d 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
+obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
new file mode 100644
index 00000000000..9a114b9ff17
--- /dev/null
+++ b/drivers/iommu/exynos-iommu.c
@@ -0,0 +1,1076 @@
+/* linux/drivers/iommu/exynos_iommu.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/iommu.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/export.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+
+#include <mach/sysmmu.h>
+
+/* We does not consider super section mapping (16MB) */
+#define SECT_ORDER 20
+#define LPAGE_ORDER 16
+#define SPAGE_ORDER 12
+
+#define SECT_SIZE (1 << SECT_ORDER)
+#define LPAGE_SIZE (1 << LPAGE_ORDER)
+#define SPAGE_SIZE (1 << SPAGE_ORDER)
+
+#define SECT_MASK (~(SECT_SIZE - 1))
+#define LPAGE_MASK (~(LPAGE_SIZE - 1))
+#define SPAGE_MASK (~(SPAGE_SIZE - 1))
+
+#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
+#define lv1ent_page(sent) ((*(sent) & 3) == 1)
+#define lv1ent_section(sent) ((*(sent) & 3) == 2)
+
+#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
+#define lv2ent_small(pent) ((*(pent) & 2) == 2)
+#define lv2ent_large(pent) ((*(pent) & 3) == 1)
+
+#define section_phys(sent) (*(sent) & SECT_MASK)
+#define section_offs(iova) ((iova) & 0xFFFFF)
+#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
+#define lpage_offs(iova) ((iova) & 0xFFFF)
+#define spage_phys(pent) (*(pent) & SPAGE_MASK)
+#define spage_offs(iova) ((iova) & 0xFFF)
+
+#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
+#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
+
+#define NUM_LV1ENTRIES 4096
+#define NUM_LV2ENTRIES 256
+
+#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
+
+#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
+
+#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
+
+#define mk_lv1ent_sect(pa) ((pa) | 2)
+#define mk_lv1ent_page(pa) ((pa) | 1)
+#define mk_lv2ent_lpage(pa) ((pa) | 1)
+#define mk_lv2ent_spage(pa) ((pa) | 2)
+
+#define CTRL_ENABLE 0x5
+#define CTRL_BLOCK 0x7
+#define CTRL_DISABLE 0x0
+
+#define REG_MMU_CTRL 0x000
+#define REG_MMU_CFG 0x004
+#define REG_MMU_STATUS 0x008
+#define REG_MMU_FLUSH 0x00C
+#define REG_MMU_FLUSH_ENTRY 0x010
+#define REG_PT_BASE_ADDR 0x014
+#define REG_INT_STATUS 0x018
+#define REG_INT_CLEAR 0x01C
+
+#define REG_PAGE_FAULT_ADDR 0x024
+#define REG_AW_FAULT_ADDR 0x028
+#define REG_AR_FAULT_ADDR 0x02C
+#define REG_DEFAULT_SLAVE_ADDR 0x030
+
+#define REG_MMU_VERSION 0x034
+
+#define REG_PB0_SADDR 0x04C
+#define REG_PB0_EADDR 0x050
+#define REG_PB1_SADDR 0x054
+#define REG_PB1_EADDR 0x058
+
+static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
+{
+ return pgtable + lv1ent_offset(iova);
+}
+
+static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
+{
+ return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
+}
+
+enum exynos_sysmmu_inttype {
+ SYSMMU_PAGEFAULT,
+ SYSMMU_AR_MULTIHIT,
+ SYSMMU_AW_MULTIHIT,
+ SYSMMU_BUSERROR,
+ SYSMMU_AR_SECURITY,
+ SYSMMU_AR_ACCESS,
+ SYSMMU_AW_SECURITY,
+ SYSMMU_AW_PROTECTION, /* 7 */
+ SYSMMU_FAULT_UNKNOWN,
+ SYSMMU_FAULTS_NUM
+};
+
+/*
+ * @itype: type of fault.
+ * @pgtable_base: the physical address of page table base. This is 0 if @itype
+ * is SYSMMU_BUSERROR.
+ * @fault_addr: the device (virtual) address that the System MMU tried to
+ * translated. This is 0 if @itype is SYSMMU_BUSERROR.
+ */
+typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
+ unsigned long pgtable_base, unsigned long fault_addr);
+
+static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
+ REG_PAGE_FAULT_ADDR,
+ REG_AR_FAULT_ADDR,
+ REG_AW_FAULT_ADDR,
+ REG_DEFAULT_SLAVE_ADDR,
+ REG_AR_FAULT_ADDR,
+ REG_AR_FAULT_ADDR,
+ REG_AW_FAULT_ADDR,
+ REG_AW_FAULT_ADDR
+};
+
+static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
+ "PAGE FAULT",
+ "AR MULTI-HIT FAULT",
+ "AW MULTI-HIT FAULT",
+ "BUS ERROR",
+ "AR SECURITY PROTECTION FAULT",
+ "AR ACCESS PROTECTION FAULT",
+ "AW SECURITY PROTECTION FAULT",
+ "AW ACCESS PROTECTION FAULT",
+ "UNKNOWN FAULT"
+};
+
+struct exynos_iommu_domain {
+ struct list_head clients; /* list of sysmmu_drvdata.node */
+ unsigned long *pgtable; /* lv1 page table, 16KB */
+ short *lv2entcnt; /* free lv2 entry counter for each section */
+ spinlock_t lock; /* lock for this structure */
+ spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
+};
+
+struct sysmmu_drvdata {
+ struct list_head node; /* entry of exynos_iommu_domain.clients */
+ struct device *sysmmu; /* System MMU's device descriptor */
+ struct device *dev; /* Owner of system MMU */
+ char *dbgname;
+ int nsfrs;
+ void __iomem **sfrbases;
+ struct clk *clk[2];
+ int activations;
+ rwlock_t lock;
+ struct iommu_domain *domain;
+ sysmmu_fault_handler_t fault_handler;
+ unsigned long pgtable;
+};
+
+static bool set_sysmmu_active(struct sysmmu_drvdata *data)
+{
+ /* return true if the System MMU was not active previously
+ and it needs to be initialized */
+ return ++data->activations == 1;
+}
+
+static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
+{
+ /* return true if the System MMU is needed to be disabled */
+ BUG_ON(data->activations < 1);
+ return --data->activations == 0;
+}
+
+static bool is_sysmmu_active(struct sysmmu_drvdata *data)
+{
+ return data->activations > 0;
+}
+
+static void sysmmu_unblock(void __iomem *sfrbase)
+{
+ __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
+}
+
+static bool sysmmu_block(void __iomem *sfrbase)
+{
+ int i = 120;
+
+ __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
+ while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
+ --i;
+
+ if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
+ sysmmu_unblock(sfrbase);
+ return false;
+ }
+
+ return true;
+}
+
+static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
+{
+ __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
+}
+
+static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
+ unsigned long iova)
+{
+ __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
+}
+
+static void __sysmmu_set_ptbase(void __iomem *sfrbase,
+ unsigned long pgd)
+{
+ __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
+ __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
+
+ __sysmmu_tlb_invalidate(sfrbase);
+}
+
+static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
+ unsigned long size, int idx)
+{
+ __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
+ __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8);
+}
+
+void exynos_sysmmu_set_prefbuf(struct device *dev,
+ unsigned long base0, unsigned long size0,
+ unsigned long base1, unsigned long size1)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ unsigned long flags;
+ int i;
+
+ BUG_ON((base0 + size0) <= base0);
+ BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
+
+ read_lock_irqsave(&data->lock, flags);
+ if (!is_sysmmu_active(data))
+ goto finish;
+
+ for (i = 0; i < data->nsfrs; i++) {
+ if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
+ if (!sysmmu_block(data->sfrbases[i]))
+ continue;
+
+ if (size1 == 0) {
+ if (size0 <= SZ_128K) {
+ base1 = base0;
+ size1 = size0;
+ } else {
+ size1 = size0 -
+ ALIGN(size0 / 2, SZ_64K);
+ size0 = size0 - size1;
+ base1 = base0 + size0;
+ }
+ }
+
+ __sysmmu_set_prefbuf(
+ data->sfrbases[i], base0, size0, 0);
+ __sysmmu_set_prefbuf(
+ data->sfrbases[i], base1, size1, 1);
+
+ sysmmu_unblock(data->sfrbases[i]);
+ }
+ }
+finish:
+ read_unlock_irqrestore(&data->lock, flags);
+}
+
+static void __set_fault_handler(struct sysmmu_drvdata *data,
+ sysmmu_fault_handler_t handler)
+{
+ unsigned long flags;
+
+ write_lock_irqsave(&data->lock, flags);
+ data->fault_handler = handler;
+ write_unlock_irqrestore(&data->lock, flags);
+}
+
+void exynos_sysmmu_set_fault_handler(struct device *dev,
+ sysmmu_fault_handler_t handler)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+ __set_fault_handler(data, handler);
+}
+
+static int default_fault_handler(enum exynos_sysmmu_inttype itype,
+ unsigned long pgtable_base, unsigned long fault_addr)
+{
+ unsigned long *ent;
+
+ if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
+ itype = SYSMMU_FAULT_UNKNOWN;
+
+ pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",
+ sysmmu_fault_name[itype], fault_addr, pgtable_base);
+
+ ent = section_entry(__va(pgtable_base), fault_addr);
+ pr_err("\tLv1 entry: 0x%lx\n", *ent);
+
+ if (lv1ent_page(ent)) {
+ ent = page_entry(ent, fault_addr);
+ pr_err("\t Lv2 entry: 0x%lx\n", *ent);
+ }
+
+ pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
+
+ BUG();
+
+ return 0;
+}
+
+static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
+{
+ /* SYSMMU is in blocked when interrupt occurred. */
+ struct sysmmu_drvdata *data = dev_id;
+ struct resource *irqres;
+ struct platform_device *pdev;
+ enum exynos_sysmmu_inttype itype;
+ unsigned long addr = -1;
+
+ int i, ret = -ENOSYS;
+
+ read_lock(&data->lock);
+
+ WARN_ON(!is_sysmmu_active(data));
+
+ pdev = to_platform_device(data->sysmmu);
+ for (i = 0; i < (pdev->num_resources / 2); i++) {
+ irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (irqres && ((int)irqres->start == irq))
+ break;
+ }
+
+ if (i == pdev->num_resources) {
+ itype = SYSMMU_FAULT_UNKNOWN;
+ } else {
+ itype = (enum exynos_sysmmu_inttype)
+ __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
+ if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
+ itype = SYSMMU_FAULT_UNKNOWN;
+ else
+ addr = __raw_readl(
+ data->sfrbases[i] + fault_reg_offset[itype]);
+ }
+
+ if (data->domain)
+ ret = report_iommu_fault(data->domain, data->dev,
+ addr, itype);
+
+ if ((ret == -ENOSYS) && data->fault_handler) {
+ unsigned long base = data->pgtable;
+ if (itype != SYSMMU_FAULT_UNKNOWN)
+ base = __raw_readl(
+ data->sfrbases[i] + REG_PT_BASE_ADDR);
+ ret = data->fault_handler(itype, base, addr);
+ }
+
+ if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
+ __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
+ else
+ dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
+ data->dbgname, sysmmu_fault_name[itype]);
+
+ if (itype != SYSMMU_FAULT_UNKNOWN)
+ sysmmu_unblock(data->sfrbases[i]);
+
+ read_unlock(&data->lock);
+
+ return IRQ_HANDLED;
+}
+
+static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
+{
+ unsigned long flags;
+ bool disabled = false;
+ int i;
+
+ write_lock_irqsave(&data->lock, flags);
+
+ if (!set_sysmmu_inactive(data))
+ goto finish;
+
+ for (i = 0; i < data->nsfrs; i++)
+ __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
+
+ if (data->clk[1])
+ clk_disable(data->clk[1]);
+ if (data->clk[0])
+ clk_disable(data->clk[0]);
+
+ disabled = true;
+ data->pgtable = 0;
+ data->domain = NULL;
+finish:
+ write_unlock_irqrestore(&data->lock, flags);
+
+ if (disabled)
+ dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
+ else
+ dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
+ data->dbgname, data->activations);
+
+ return disabled;
+}
+
+/* __exynos_sysmmu_enable: Enables System MMU
+ *
+ * returns -error if an error occurred and System MMU is not enabled,
+ * 0 if the System MMU has been just enabled and 1 if System MMU was already
+ * enabled before.
+ */
+static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
+ unsigned long pgtable, struct iommu_domain *domain)
+{
+ int i, ret = 0;
+ unsigned long flags;
+
+ write_lock_irqsave(&data->lock, flags);
+
+ if (!set_sysmmu_active(data)) {
+ if (WARN_ON(pgtable != data->pgtable)) {
+ ret = -EBUSY;
+ set_sysmmu_inactive(data);
+ } else {
+ ret = 1;
+ }
+
+ dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
+ goto finish;
+ }
+
+ if (data->clk[0])
+ clk_enable(data->clk[0]);
+ if (data->clk[1])
+ clk_enable(data->clk[1]);
+
+ data->pgtable = pgtable;
+
+ for (i = 0; i < data->nsfrs; i++) {
+ __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
+
+ if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
+ /* System MMU version is 3.x */
+ __raw_writel((1 << 12) | (2 << 28),
+ data->sfrbases[i] + REG_MMU_CFG);
+ __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
+ __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
+ }
+
+ __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
+ }
+
+ data->domain = domain;
+
+ dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
+finish:
+ write_unlock_irqrestore(&data->lock, flags);
+
+ return ret;
+}
+
+int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ int ret;
+
+ BUG_ON(!memblock_is_memory(pgtable));
+
+ ret = pm_runtime_get_sync(data->sysmmu);
+ if (ret < 0) {
+ dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
+ return ret;
+ }
+
+ ret = __exynos_sysmmu_enable(data, pgtable, NULL);
+ if (WARN_ON(ret < 0)) {
+ pm_runtime_put(data->sysmmu);
+ dev_err(data->sysmmu,
+ "(%s) Already enabled with page table %#lx\n",
+ data->dbgname, data->pgtable);
+ } else {
+ data->dev = dev;
+ }
+
+ return ret;
+}
+
+bool exynos_sysmmu_disable(struct device *dev)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ bool disabled;
+
+ disabled = __exynos_sysmmu_disable(data);
+ pm_runtime_put(data->sysmmu);
+
+ return disabled;
+}
+
+static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
+{
+ unsigned long flags;
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+ read_lock_irqsave(&data->lock, flags);
+
+ if (is_sysmmu_active(data)) {
+ int i;
+ for (i = 0; i < data->nsfrs; i++) {
+ if (sysmmu_block(data->sfrbases[i])) {
+ __sysmmu_tlb_invalidate_entry(
+ data->sfrbases[i], iova);
+ sysmmu_unblock(data->sfrbases[i]);
+ }
+ }
+ } else {
+ dev_dbg(data->sysmmu,
+ "(%s) Disabled. Skipping invalidating TLB.\n",
+ data->dbgname);
+ }
+
+ read_unlock_irqrestore(&data->lock, flags);
+}
+
+void exynos_sysmmu_tlb_invalidate(struct device *dev)
+{
+ unsigned long flags;
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+
+ read_lock_irqsave(&data->lock, flags);
+
+ if (is_sysmmu_active(data)) {
+ int i;
+ for (i = 0; i < data->nsfrs; i++) {
+ if (sysmmu_block(data->sfrbases[i])) {
+ __sysmmu_tlb_invalidate(data->sfrbases[i]);
+ sysmmu_unblock(data->sfrbases[i]);
+ }
+ }
+ } else {
+ dev_dbg(data->sysmmu,
+ "(%s) Disabled. Skipping invalidating TLB.\n",
+ data->dbgname);
+ }
+
+ read_unlock_irqrestore(&data->lock, flags);
+}
+
+static int exynos_sysmmu_probe(struct platform_device *pdev)
+{
+ int i, ret;
+ struct device *dev;
+ struct sysmmu_drvdata *data;
+
+ dev = &pdev->dev;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_dbg(dev, "Not enough memory\n");
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ ret = dev_set_drvdata(dev, data);
+ if (ret) {
+ dev_dbg(dev, "Unabled to initialize driver data\n");
+ goto err_init;
+ }
+
+ data->nsfrs = pdev->num_resources / 2;
+ data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
+ GFP_KERNEL);
+ if (data->sfrbases == NULL) {
+ dev_dbg(dev, "Not enough memory\n");
+ ret = -ENOMEM;
+ goto err_init;
+ }
+
+ for (i = 0; i < data->nsfrs; i++) {
+ struct resource *res;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res) {
+ dev_dbg(dev, "Unable to find IOMEM region\n");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ data->sfrbases[i] = ioremap(res->start, resource_size(res));
+ if (!data->sfrbases[i]) {
+ dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
+ res->start);
+ ret = -ENOENT;
+ goto err_res;
+ }
+ }
+
+ for (i = 0; i < data->nsfrs; i++) {
+ ret = platform_get_irq(pdev, i);
+ if (ret <= 0) {
+ dev_dbg(dev, "Unable to find IRQ resource\n");
+ goto err_irq;
+ }
+
+ ret = request_irq(ret, exynos_sysmmu_irq, 0,
+ dev_name(dev), data);
+ if (ret) {
+ dev_dbg(dev, "Unabled to register interrupt handler\n");
+ goto err_irq;
+ }
+ }
+
+ if (dev_get_platdata(dev)) {
+ char *deli, *beg;
+ struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
+
+ beg = platdata->clockname;
+
+ for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
+ /* NOTHING */;
+
+ if (*deli == '\0')
+ deli = NULL;
+ else
+ *deli = '\0';
+
+ data->clk[0] = clk_get(dev, beg);
+ if (IS_ERR(data->clk[0])) {
+ data->clk[0] = NULL;
+ dev_dbg(dev, "No clock descriptor registered\n");
+ }
+
+ if (data->clk[0] && deli) {
+ *deli = ',';
+ data->clk[1] = clk_get(dev, deli + 1);
+ if (IS_ERR(data->clk[1]))
+ data->clk[1] = NULL;
+ }
+
+ data->dbgname = platdata->dbgname;
+ }
+
+ data->sysmmu = dev;
+ rwlock_init(&data->lock);
+ INIT_LIST_HEAD(&data->node);
+
+ __set_fault_handler(data, &default_fault_handler);
+
+ if (dev->parent)
+ pm_runtime_enable(dev);
+
+ dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
+ return 0;
+err_irq:
+ while (i-- > 0) {
+ int irq;
+
+ irq = platform_get_irq(pdev, i);
+ free_irq(irq, data);
+ }
+err_res:
+ while (data->nsfrs-- > 0)
+ iounmap(data->sfrbases[data->nsfrs]);
+ kfree(data->sfrbases);
+err_init:
+ kfree(data);
+err_alloc:
+ dev_err(dev, "Failed to initialize\n");
+ return ret;
+}
+
+static struct platform_driver exynos_sysmmu_driver = {
+ .probe = exynos_sysmmu_probe,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "exynos-sysmmu",
+ }
+};
+
+static inline void pgtable_flush(void *vastart, void *vaend)
+{
+ dmac_flush_range(vastart, vaend);
+ outer_flush_range(virt_to_phys(vastart),
+ virt_to_phys(vaend));
+}
+
+static int exynos_iommu_domain_init(struct iommu_domain *domain)
+{
+ struct exynos_iommu_domain *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pgtable = (unsigned long *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO, 2);
+ if (!priv->pgtable)
+ goto err_pgtable;
+
+ priv->lv2entcnt = (short *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO, 1);
+ if (!priv->lv2entcnt)
+ goto err_counter;
+
+ pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
+
+ spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->pgtablelock);
+ INIT_LIST_HEAD(&priv->clients);
+
+ domain->priv = priv;
+ return 0;
+
+err_counter:
+ free_pages((unsigned long)priv->pgtable, 2);
+err_pgtable:
+ kfree(priv);
+ return -ENOMEM;
+}
+
+static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
+{
+ struct exynos_iommu_domain *priv = domain->priv;
+ struct sysmmu_drvdata *data;
+ unsigned long flags;
+ int i;
+
+ WARN_ON(!list_empty(&priv->clients));
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_for_each_entry(data, &priv->clients, node) {
+ while (!exynos_sysmmu_disable(data->dev))
+ ; /* until System MMU is actually disabled */
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ for (i = 0; i < NUM_LV1ENTRIES; i++)
+ if (lv1ent_page(priv->pgtable + i))
+ kfree(__va(lv2table_base(priv->pgtable + i)));
+
+ free_pages((unsigned long)priv->pgtable, 2);
+ free_pages((unsigned long)priv->lv2entcnt, 1);
+ kfree(domain->priv);
+ domain->priv = NULL;
+}
+
+static int exynos_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct exynos_iommu_domain *priv = domain->priv;
+ unsigned long flags;
+ int ret;
+
+ ret = pm_runtime_get_sync(data->sysmmu);
+ if (ret < 0)
+ return ret;
+
+ ret = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
+
+ if (ret == 0) {
+ /* 'data->node' must not be appeared in priv->clients */
+ BUG_ON(!list_empty(&data->node));
+ data->dev = dev;
+ list_add_tail(&data->node, &priv->clients);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (ret < 0) {
+ dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
+ __func__, __pa(priv->pgtable));
+ pm_runtime_put(data->sysmmu);
+ } else if (ret > 0) {
+ dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
+ __func__, __pa(priv->pgtable));
+ } else {
+ dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
+ __func__, __pa(priv->pgtable));
+ }
+
+ return ret;
+}
+
+static void exynos_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
+ struct exynos_iommu_domain *priv = domain->priv;
+ struct list_head *pos;
+ unsigned long flags;
+ bool found = false;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ list_for_each(pos, &priv->clients) {
+ if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ goto finish;
+
+ if (__exynos_sysmmu_disable(data)) {
+ dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
+ __func__, __pa(priv->pgtable));
+ list_del(&data->node);
+ INIT_LIST_HEAD(&data->node);
+
+ } else {
+ dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
+ __func__, __pa(priv->pgtable));
+ }
+
+finish:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (found)
+ pm_runtime_put(data->sysmmu);
+}
+
+static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
+ short *pgcounter)
+{
+ if (lv1ent_fault(sent)) {
+ unsigned long *pent;
+
+ pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
+ BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
+ if (!pent)
+ return NULL;
+
+ *sent = mk_lv1ent_page(__pa(pent));
+ *pgcounter = NUM_LV2ENTRIES;
+ pgtable_flush(pent, pent + NUM_LV2ENTRIES);
+ pgtable_flush(sent, sent + 1);
+ }
+
+ return page_entry(sent, iova);
+}
+
+static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
+{
+ if (lv1ent_section(sent))
+ return -EADDRINUSE;
+
+ if (lv1ent_page(sent)) {
+ if (*pgcnt != NUM_LV2ENTRIES)
+ return -EADDRINUSE;
+
+ kfree(page_entry(sent, 0));
+
+ *pgcnt = 0;
+ }
+
+ *sent = mk_lv1ent_sect(paddr);
+
+ pgtable_flush(sent, sent + 1);
+
+ return 0;
+}
+
+static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
+ short *pgcnt)
+{
+ if (size == SPAGE_SIZE) {
+ if (!lv2ent_fault(pent))
+ return -EADDRINUSE;
+
+ *pent = mk_lv2ent_spage(paddr);
+ pgtable_flush(pent, pent + 1);
+ *pgcnt -= 1;
+ } else { /* size == LPAGE_SIZE */
+ int i;
+ for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
+ if (!lv2ent_fault(pent)) {
+ memset(pent, 0, sizeof(*pent) * i);
+ return -EADDRINUSE;
+ }
+
+ *pent = mk_lv2ent_lpage(paddr);
+ }
+ pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
+ *pgcnt -= SPAGES_PER_LPAGE;
+ }
+
+ return 0;
+}
+
+static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct exynos_iommu_domain *priv = domain->priv;
+ unsigned long *entry;
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ BUG_ON(priv->pgtable == NULL);
+
+ spin_lock_irqsave(&priv->pgtablelock, flags);
+
+ entry = section_entry(priv->pgtable, iova);
+
+ if (size == SECT_SIZE) {
+ ret = lv1set_section(entry, paddr,
+ &priv->lv2entcnt[lv1ent_offset(iova)]);
+ } else {
+ unsigned long *pent;
+
+ pent = alloc_lv2entry(entry, iova,
+ &priv->lv2entcnt[lv1ent_offset(iova)]);
+
+ if (!pent)
+ ret = -ENOMEM;
+ else
+ ret = lv2set_page(pent, paddr, size,
+ &priv->lv2entcnt[lv1ent_offset(iova)]);
+ }
+
+ if (ret) {
+ pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
+ __func__, iova, size);
+ }
+
+ spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+ return ret;
+}
+
+static size_t exynos_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ struct exynos_iommu_domain *priv = domain->priv;
+ struct sysmmu_drvdata *data;
+ unsigned long flags;
+ unsigned long *ent;
+
+ BUG_ON(priv->pgtable == NULL);
+
+ spin_lock_irqsave(&priv->pgtablelock, flags);
+
+ ent = section_entry(priv->pgtable, iova);
+
+ if (lv1ent_section(ent)) {
+ BUG_ON(size < SECT_SIZE);
+
+ *ent = 0;
+ pgtable_flush(ent, ent + 1);
+ size = SECT_SIZE;
+ goto done;
+ }
+
+ if (unlikely(lv1ent_fault(ent))) {
+ if (size > SECT_SIZE)
+ size = SECT_SIZE;
+ goto done;
+ }
+
+ /* lv1ent_page(sent) == true here */
+
+ ent = page_entry(ent, iova);
+
+ if (unlikely(lv2ent_fault(ent))) {
+ size = SPAGE_SIZE;
+ goto done;
+ }
+
+ if (lv2ent_small(ent)) {
+ *ent = 0;
+ size = SPAGE_SIZE;
+ priv->lv2entcnt[lv1ent_offset(iova)] += 1;
+ goto done;
+ }
+
+ /* lv1ent_large(ent) == true here */
+ BUG_ON(size < LPAGE_SIZE);
+
+ memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
+
+ size = LPAGE_SIZE;
+ priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
+done:
+ spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ list_for_each_entry(data, &priv->clients, node)
+ sysmmu_tlb_invalidate_entry(data->dev, iova);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+
+ return size;
+}
+
+static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
+ unsigned long iova)
+{
+ struct exynos_iommu_domain *priv = domain->priv;
+ unsigned long *entry;
+ unsigned long flags;
+ phys_addr_t phys = 0;
+
+ spin_lock_irqsave(&priv->pgtablelock, flags);
+
+ entry = section_entry(priv->pgtable, iova);
+
+ if (lv1ent_section(entry)) {
+ phys = section_phys(entry) + section_offs(iova);
+ } else if (lv1ent_page(entry)) {
+ entry = page_entry(entry, iova);
+
+ if (lv2ent_large(entry))
+ phys = lpage_phys(entry) + lpage_offs(iova);
+ else if (lv2ent_small(entry))
+ phys = spage_phys(entry) + spage_offs(iova);
+ }
+
+ spin_unlock_irqrestore(&priv->pgtablelock, flags);
+
+ return phys;
+}
+
+static struct iommu_ops exynos_iommu_ops = {
+ .domain_init = &exynos_iommu_domain_init,
+ .domain_destroy = &exynos_iommu_domain_destroy,
+ .attach_dev = &exynos_iommu_attach_device,
+ .detach_dev = &exynos_iommu_detach_device,
+ .map = &exynos_iommu_map,
+ .unmap = &exynos_iommu_unmap,
+ .iova_to_phys = &exynos_iommu_iova_to_phys,
+ .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
+};
+
+static int __init exynos_iommu_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&exynos_sysmmu_driver);
+
+ if (ret == 0)
+ bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
+
+ return ret;
+}
+subsys_initcall(exynos_iommu_init);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index bf2fbaad5e2..b12af2ff8c5 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1907,6 +1907,15 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
+static inline void unlink_domain_info(struct device_domain_info *info)
+{
+ assert_spin_locked(&device_domain_lock);
+ list_del(&info->link);
+ list_del(&info->global);
+ if (info->dev)
+ info->dev->dev.archdata.iommu = NULL;
+}
+
static void domain_remove_dev_info(struct dmar_domain *domain)
{
struct device_domain_info *info;
@@ -1917,10 +1926,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
while (!list_empty(&domain->devices)) {
info = list_entry(domain->devices.next,
struct device_domain_info, link);
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- info->dev->dev.archdata.iommu = NULL;
+ unlink_domain_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
iommu_disable_dev_iotlb(info);
@@ -2287,12 +2293,6 @@ static int domain_add_dev_info(struct dmar_domain *domain,
if (!info)
return -ENOMEM;
- ret = domain_context_mapping(domain, pdev, translation);
- if (ret) {
- free_devinfo_mem(info);
- return ret;
- }
-
info->segment = pci_domain_nr(pdev->bus);
info->bus = pdev->bus->number;
info->devfn = pdev->devfn;
@@ -2305,6 +2305,15 @@ static int domain_add_dev_info(struct dmar_domain *domain,
pdev->dev.archdata.iommu = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
+ ret = domain_context_mapping(domain, pdev, translation);
+ if (ret) {
+ spin_lock_irqsave(&device_domain_lock, flags);
+ unlink_domain_info(info);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ free_devinfo_mem(info);
+ return ret;
+ }
+
return 0;
}
@@ -3728,10 +3737,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
if (info->segment == pci_domain_nr(pdev->bus) &&
info->bus == pdev->bus->number &&
info->devfn == pdev->devfn) {
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- info->dev->dev.archdata.iommu = NULL;
+ unlink_domain_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
iommu_disable_dev_iotlb(info);
@@ -3786,11 +3792,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
while (!list_empty(&domain->devices)) {
info = list_entry(domain->devices.next,
struct device_domain_info, link);
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- info->dev->dev.archdata.iommu = NULL;
-
+ unlink_domain_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags1);
iommu_disable_dev_iotlb(info);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ff4b8cfda58..04cb8c88d74 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -50,6 +50,19 @@ config LEDS_LM3530
controlled manually or using PWM input or using ambient
light automatically.
+config LEDS_LM3533
+ tristate "LED support for LM3533"
+ depends on LEDS_CLASS
+ depends on MFD_LM3533
+ help
+ This option enables support for the LEDs on National Semiconductor /
+ TI LM3533 Lighting Power chips.
+
+ The LEDs can be controlled directly, through PWM input, or by the
+ ambient-light-sensor interface. The chip supports
+ hardware-accelerated blinking with maximum on and off periods of 9.8
+ and 77 seconds respectively.
+
config LEDS_LOCOMO
tristate "LED Support for Locomo device"
depends on LEDS_CLASS
@@ -259,6 +272,14 @@ config LEDS_DA903X
This option enables support for on-chip LED drivers found
on Dialog Semiconductor DA9030/DA9034 PMICs.
+config LEDS_DA9052
+ tristate "Dialog DA9052/DA9053 LEDS"
+ depends on LEDS_CLASS
+ depends on PMIC_DA9052
+ help
+ This option enables support for on-chip LED drivers found
+ on Dialog Semiconductor DA9052-BC and DA9053-AA/Bx PMICs.
+
config LEDS_DAC124S085
tristate "LED Support for DAC124S085 SPI DAC"
depends on LEDS_CLASS
@@ -471,4 +492,12 @@ config LEDS_TRIGGER_DEFAULT_ON
comment "iptables trigger is under Netfilter config (LED target)"
depends on LEDS_TRIGGERS
+config LEDS_TRIGGER_TRANSIENT
+ tristate "LED Transient Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows one time activation of a transient state on
+ GPIO/PWM based hadrware.
+ If unsure, say Y.
+
endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 890481cb09f..f8958cd6cf6 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
+obj-$(CONFIG_LEDS_LM3533) += leds-lm3533.o
obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
obj-$(CONFIG_LEDS_PCA9633) += leds-pca9633.o
obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
+obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
@@ -56,3 +58,4 @@ obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
+obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 5bff8439dc6..8ee92c81aec 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -44,23 +44,18 @@ static ssize_t led_brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ unsigned long state;
ssize_t ret = -EINVAL;
- char *after;
- unsigned long state = simple_strtoul(buf, &after, 10);
- size_t count = after - buf;
- if (isspace(*after))
- count++;
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
- if (count == size) {
- ret = count;
+ if (state == LED_OFF)
+ led_trigger_remove(led_cdev);
+ led_set_brightness(led_cdev, state);
- if (state == LED_OFF)
- led_trigger_remove(led_cdev);
- led_set_brightness(led_cdev, state);
- }
-
- return ret;
+ return size;
}
static ssize_t led_max_brightness_show(struct device *dev,
diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c
new file mode 100644
index 00000000000..58a5244c437
--- /dev/null
+++ b/drivers/leds/leds-da9052.c
@@ -0,0 +1,214 @@
+/*
+ * LED Driver for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/pdata.h>
+
+#define DA9052_OPENDRAIN_OUTPUT 2
+#define DA9052_SET_HIGH_LVL_OUTPUT (1 << 3)
+#define DA9052_MASK_UPPER_NIBBLE 0xF0
+#define DA9052_MASK_LOWER_NIBBLE 0x0F
+#define DA9052_NIBBLE_SHIFT 4
+#define DA9052_MAX_BRIGHTNESS 0x5f
+
+struct da9052_led {
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct da9052 *da9052;
+ unsigned char led_index;
+ unsigned char id;
+ int brightness;
+};
+
+static unsigned char led_reg[] = {
+ DA9052_LED_CONT_4_REG,
+ DA9052_LED_CONT_5_REG,
+};
+
+static int da9052_set_led_brightness(struct da9052_led *led)
+{
+ u8 val;
+ int error;
+
+ val = (led->brightness & 0x7f) | DA9052_LED_CONT_DIM;
+
+ error = da9052_reg_write(led->da9052, led_reg[led->led_index], val);
+ if (error < 0)
+ dev_err(led->da9052->dev, "Failed to set led brightness, %d\n",
+ error);
+ return error;
+}
+
+static void da9052_led_work(struct work_struct *work)
+{
+ struct da9052_led *led = container_of(work, struct da9052_led, work);
+
+ da9052_set_led_brightness(led);
+}
+
+static void da9052_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct da9052_led *led;
+
+ led = container_of(led_cdev, struct da9052_led, cdev);
+ led->brightness = value;
+ schedule_work(&led->work);
+}
+
+static int da9052_configure_leds(struct da9052 *da9052)
+{
+ int error;
+ unsigned char register_value = DA9052_OPENDRAIN_OUTPUT
+ | DA9052_SET_HIGH_LVL_OUTPUT;
+
+ error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
+ DA9052_MASK_LOWER_NIBBLE,
+ register_value);
+
+ if (error < 0) {
+ dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
+ error);
+ return error;
+ }
+
+ error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
+ DA9052_MASK_UPPER_NIBBLE,
+ register_value << DA9052_NIBBLE_SHIFT);
+ if (error < 0)
+ dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
+ error);
+
+ return error;
+}
+
+static int __devinit da9052_led_probe(struct platform_device *pdev)
+{
+ struct da9052_pdata *pdata;
+ struct da9052 *da9052;
+ struct led_platform_data *pled;
+ struct da9052_led *led = NULL;
+ int error = -ENODEV;
+ int i;
+
+ da9052 = dev_get_drvdata(pdev->dev.parent);
+ pdata = da9052->dev->platform_data;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "No platform data\n");
+ goto err;
+ }
+
+ pled = pdata->pled;
+ if (pled == NULL) {
+ dev_err(&pdev->dev, "No platform data for LED\n");
+ goto err;
+ }
+
+ led = devm_kzalloc(&pdev->dev,
+ sizeof(struct da9052_led) * pled->num_leds,
+ GFP_KERNEL);
+ if (led == NULL) {
+ dev_err(&pdev->dev, "Failed to alloc memory\n");
+ error = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < pled->num_leds; i++) {
+ led[i].cdev.name = pled->leds[i].name;
+ led[i].cdev.brightness_set = da9052_led_set;
+ led[i].cdev.brightness = LED_OFF;
+ led[i].cdev.max_brightness = DA9052_MAX_BRIGHTNESS;
+ led[i].brightness = LED_OFF;
+ led[i].led_index = pled->leds[i].flags;
+ led[i].da9052 = dev_get_drvdata(pdev->dev.parent);
+ INIT_WORK(&led[i].work, da9052_led_work);
+
+ error = led_classdev_register(pdev->dev.parent, &led[i].cdev);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to register led %d\n",
+ led[i].led_index);
+ goto err_register;
+ }
+
+ error = da9052_set_led_brightness(&led[i]);
+ if (error) {
+ dev_err(&pdev->dev, "Unable to init led %d\n",
+ led[i].led_index);
+ continue;
+ }
+ }
+ error = da9052_configure_leds(led->da9052);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to configure GPIO LED%d\n", error);
+ goto err_register;
+ }
+
+ platform_set_drvdata(pdev, led);
+
+ return 0;
+
+err_register:
+ for (i = i - 1; i >= 0; i--) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+err:
+ return error;
+}
+
+static int __devexit da9052_led_remove(struct platform_device *pdev)
+{
+ struct da9052_led *led = platform_get_drvdata(pdev);
+ struct da9052_pdata *pdata;
+ struct da9052 *da9052;
+ struct led_platform_data *pled;
+ int i;
+
+ da9052 = dev_get_drvdata(pdev->dev.parent);
+ pdata = da9052->dev->platform_data;
+ pled = pdata->pled;
+
+ for (i = 0; i < pled->num_leds; i++) {
+ led[i].brightness = 0;
+ da9052_set_led_brightness(&led[i]);
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+ return 0;
+}
+
+static struct platform_driver da9052_led_driver = {
+ .driver = {
+ .name = "da9052-leds",
+ .owner = THIS_MODULE,
+ },
+ .probe = da9052_led_probe,
+ .remove = __devexit_p(da9052_led_remove),
+};
+
+module_platform_driver(da9052_led_driver);
+
+MODULE_AUTHOR("Dialog Semiconductor Ltd <dchen@diasemi.com>");
+MODULE_DESCRIPTION("LED driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 968fd5fef4f..84ba6de8039 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -113,6 +113,18 @@ struct lm3530_data {
bool enable;
};
+/*
+ * struct lm3530_als_data
+ * @config : value of ALS configuration register
+ * @imp_sel : value of ALS resistor select register
+ * @zone : values of ALS ZB(Zone Boundary) registers
+ */
+struct lm3530_als_data {
+ u8 config;
+ u8 imp_sel;
+ u8 zones[LM3530_ALS_ZB_MAX];
+};
+
static const u8 lm3530_reg[LM3530_REG_MAX] = {
LM3530_GEN_CONFIG,
LM3530_ALS_CONFIG,
@@ -141,29 +153,65 @@ static int lm3530_get_mode_from_str(const char *str)
return -1;
}
+static void lm3530_als_configure(struct lm3530_platform_data *pdata,
+ struct lm3530_als_data *als)
+{
+ int i;
+ u32 als_vmin, als_vmax, als_vstep;
+
+ if (pdata->als_vmax == 0) {
+ pdata->als_vmin = 0;
+ pdata->als_vmax = LM3530_ALS_WINDOW_mV;
+ }
+
+ als_vmin = pdata->als_vmin;
+ als_vmax = pdata->als_vmax;
+
+ if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
+ pdata->als_vmax = als_vmax = als_vmin + LM3530_ALS_WINDOW_mV;
+
+ /* n zone boundary makes n+1 zones */
+ als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
+
+ for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
+ als->zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
+ als_vstep + (i * als_vstep)) * LED_FULL) / 1000;
+
+ als->config =
+ (pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
+ (LM3530_ENABLE_ALS) |
+ (pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
+
+ als->imp_sel =
+ (pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
+ (pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
+}
+
static int lm3530_init_registers(struct lm3530_data *drvdata)
{
int ret = 0;
int i;
u8 gen_config;
- u8 als_config = 0;
u8 brt_ramp;
- u8 als_imp_sel = 0;
u8 brightness;
u8 reg_val[LM3530_REG_MAX];
- u8 zones[LM3530_ALS_ZB_MAX];
- u32 als_vmin, als_vmax, als_vstep;
struct lm3530_platform_data *pdata = drvdata->pdata;
struct i2c_client *client = drvdata->client;
struct lm3530_pwm_data *pwm = &pdata->pwm_data;
+ struct lm3530_als_data als;
+
+ memset(&als, 0, sizeof(struct lm3530_als_data));
gen_config = (pdata->brt_ramp_law << LM3530_RAMP_LAW_SHIFT) |
((pdata->max_current & 7) << LM3530_MAX_CURR_SHIFT);
switch (drvdata->mode) {
case LM3530_BL_MODE_MANUAL:
+ gen_config |= LM3530_ENABLE_I2C;
+ break;
case LM3530_BL_MODE_ALS:
gen_config |= LM3530_ENABLE_I2C;
+ lm3530_als_configure(pdata, &als);
break;
case LM3530_BL_MODE_PWM:
gen_config |= LM3530_ENABLE_PWM | LM3530_ENABLE_PWM_SIMPLE |
@@ -171,38 +219,6 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
break;
}
- if (drvdata->mode == LM3530_BL_MODE_ALS) {
- if (pdata->als_vmax == 0) {
- pdata->als_vmin = 0;
- pdata->als_vmax = LM3530_ALS_WINDOW_mV;
- }
-
- als_vmin = pdata->als_vmin;
- als_vmax = pdata->als_vmax;
-
- if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
- pdata->als_vmax = als_vmax =
- als_vmin + LM3530_ALS_WINDOW_mV;
-
- /* n zone boundary makes n+1 zones */
- als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
-
- for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
- zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
- als_vstep + (i * als_vstep)) * LED_FULL)
- / 1000;
-
- als_config =
- (pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
- (LM3530_ENABLE_ALS) |
- (pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
-
- als_imp_sel =
- (pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
- (pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
-
- }
-
brt_ramp = (pdata->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) |
(pdata->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT);
@@ -215,14 +231,14 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
brightness = drvdata->led_dev.max_brightness;
reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */
- reg_val[1] = als_config; /* LM3530_ALS_CONFIG */
+ reg_val[1] = als.config; /* LM3530_ALS_CONFIG */
reg_val[2] = brt_ramp; /* LM3530_BRT_RAMP_RATE */
- reg_val[3] = als_imp_sel; /* LM3530_ALS_IMP_SELECT */
+ reg_val[3] = als.imp_sel; /* LM3530_ALS_IMP_SELECT */
reg_val[4] = brightness; /* LM3530_BRT_CTRL_REG */
- reg_val[5] = zones[0]; /* LM3530_ALS_ZB0_REG */
- reg_val[6] = zones[1]; /* LM3530_ALS_ZB1_REG */
- reg_val[7] = zones[2]; /* LM3530_ALS_ZB2_REG */
- reg_val[8] = zones[3]; /* LM3530_ALS_ZB3_REG */
+ reg_val[5] = als.zones[0]; /* LM3530_ALS_ZB0_REG */
+ reg_val[6] = als.zones[1]; /* LM3530_ALS_ZB1_REG */
+ reg_val[7] = als.zones[2]; /* LM3530_ALS_ZB2_REG */
+ reg_val[8] = als.zones[3]; /* LM3530_ALS_ZB3_REG */
reg_val[9] = LM3530_DEF_ZT_0; /* LM3530_ALS_Z0T_REG */
reg_val[10] = LM3530_DEF_ZT_1; /* LM3530_ALS_Z1T_REG */
reg_val[11] = LM3530_DEF_ZT_2; /* LM3530_ALS_Z2T_REG */
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
new file mode 100644
index 00000000000..f56b6e7ffda
--- /dev/null
+++ b/drivers/leds/leds-lm3533.c
@@ -0,0 +1,785 @@
+/*
+ * leds-lm3533.c -- LM3533 LED driver
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/mfd/core.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_LVCTRLBANK_MIN 2
+#define LM3533_LVCTRLBANK_MAX 5
+#define LM3533_LVCTRLBANK_COUNT 4
+#define LM3533_RISEFALLTIME_MAX 7
+#define LM3533_ALS_CHANNEL_LV_MIN 1
+#define LM3533_ALS_CHANNEL_LV_MAX 2
+
+#define LM3533_REG_CTRLBANK_BCONF_BASE 0x1b
+#define LM3533_REG_PATTERN_ENABLE 0x28
+#define LM3533_REG_PATTERN_LOW_TIME_BASE 0x71
+#define LM3533_REG_PATTERN_HIGH_TIME_BASE 0x72
+#define LM3533_REG_PATTERN_RISETIME_BASE 0x74
+#define LM3533_REG_PATTERN_FALLTIME_BASE 0x75
+
+#define LM3533_REG_PATTERN_STEP 0x10
+
+#define LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK 0x04
+#define LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK 0x02
+#define LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK 0x01
+
+#define LM3533_LED_FLAG_PATTERN_ENABLE 1
+
+
+struct lm3533_led {
+ struct lm3533 *lm3533;
+ struct lm3533_ctrlbank cb;
+ struct led_classdev cdev;
+ int id;
+
+ struct mutex mutex;
+ unsigned long flags;
+
+ struct work_struct work;
+ u8 new_brightness;
+};
+
+
+static inline struct lm3533_led *to_lm3533_led(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct lm3533_led, cdev);
+}
+
+static inline int lm3533_led_get_ctrlbank_id(struct lm3533_led *led)
+{
+ return led->id + 2;
+}
+
+static inline u8 lm3533_led_get_lv_reg(struct lm3533_led *led, u8 base)
+{
+ return base + led->id;
+}
+
+static inline u8 lm3533_led_get_pattern(struct lm3533_led *led)
+{
+ return led->id;
+}
+
+static inline u8 lm3533_led_get_pattern_reg(struct lm3533_led *led,
+ u8 base)
+{
+ return base + lm3533_led_get_pattern(led) * LM3533_REG_PATTERN_STEP;
+}
+
+static int lm3533_led_pattern_enable(struct lm3533_led *led, int enable)
+{
+ u8 mask;
+ u8 val;
+ int pattern;
+ int state;
+ int ret = 0;
+
+ dev_dbg(led->cdev.dev, "%s - %d\n", __func__, enable);
+
+ mutex_lock(&led->mutex);
+
+ state = test_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags);
+ if ((enable && state) || (!enable && !state))
+ goto out;
+
+ pattern = lm3533_led_get_pattern(led);
+ mask = 1 << (2 * pattern);
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(led->lm3533, LM3533_REG_PATTERN_ENABLE, val, mask);
+ if (ret) {
+ dev_err(led->cdev.dev, "failed to enable pattern %d (%d)\n",
+ pattern, enable);
+ goto out;
+ }
+
+ __change_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags);
+out:
+ mutex_unlock(&led->mutex);
+
+ return ret;
+}
+
+static void lm3533_led_work(struct work_struct *work)
+{
+ struct lm3533_led *led = container_of(work, struct lm3533_led, work);
+
+ dev_dbg(led->cdev.dev, "%s - %u\n", __func__, led->new_brightness);
+
+ if (led->new_brightness == 0)
+ lm3533_led_pattern_enable(led, 0); /* disable blink */
+
+ lm3533_ctrlbank_set_brightness(&led->cb, led->new_brightness);
+}
+
+static void lm3533_led_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct lm3533_led *led = to_lm3533_led(cdev);
+
+ dev_dbg(led->cdev.dev, "%s - %d\n", __func__, value);
+
+ led->new_brightness = value;
+ schedule_work(&led->work);
+}
+
+static enum led_brightness lm3533_led_get(struct led_classdev *cdev)
+{
+ struct lm3533_led *led = to_lm3533_led(cdev);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_brightness(&led->cb, &val);
+ if (ret)
+ return ret;
+
+ dev_dbg(led->cdev.dev, "%s - %u\n", __func__, val);
+
+ return val;
+}
+
+/* Pattern generator defines (delays in us). */
+#define LM3533_LED_DELAY1_VMIN 0x00
+#define LM3533_LED_DELAY2_VMIN 0x3d
+#define LM3533_LED_DELAY3_VMIN 0x80
+
+#define LM3533_LED_DELAY1_VMAX (LM3533_LED_DELAY2_VMIN - 1)
+#define LM3533_LED_DELAY2_VMAX (LM3533_LED_DELAY3_VMIN - 1)
+#define LM3533_LED_DELAY3_VMAX 0xff
+
+#define LM3533_LED_DELAY1_TMIN 16384U
+#define LM3533_LED_DELAY2_TMIN 1130496U
+#define LM3533_LED_DELAY3_TMIN 10305536U
+
+#define LM3533_LED_DELAY1_TMAX 999424U
+#define LM3533_LED_DELAY2_TMAX 9781248U
+#define LM3533_LED_DELAY3_TMAX 76890112U
+
+/* t_step = (t_max - t_min) / (v_max - v_min) */
+#define LM3533_LED_DELAY1_TSTEP 16384
+#define LM3533_LED_DELAY2_TSTEP 131072
+#define LM3533_LED_DELAY3_TSTEP 524288
+
+/* Delay limits for hardware accelerated blinking (in ms). */
+#define LM3533_LED_DELAY_ON_MAX \
+ ((LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY2_TSTEP / 2) / 1000)
+#define LM3533_LED_DELAY_OFF_MAX \
+ ((LM3533_LED_DELAY3_TMAX + LM3533_LED_DELAY3_TSTEP / 2) / 1000)
+
+/*
+ * Returns linear map of *t from [t_min,t_max] to [v_min,v_max] with a step
+ * size of t_step, where
+ *
+ * t_step = (t_max - t_min) / (v_max - v_min)
+ *
+ * and updates *t to reflect the mapped value.
+ */
+static u8 time_to_val(unsigned *t, unsigned t_min, unsigned t_step,
+ u8 v_min, u8 v_max)
+{
+ unsigned val;
+
+ val = (*t + t_step / 2 - t_min) / t_step + v_min;
+
+ *t = t_step * (val - v_min) + t_min;
+
+ return (u8)val;
+}
+
+/*
+ * Returns time code corresponding to *delay (in ms) and updates *delay to
+ * reflect actual hardware delay.
+ *
+ * Hardware supports 256 discrete delay times, divided into three groups with
+ * the following ranges and step-sizes:
+ *
+ * [ 16, 999] [0x00, 0x3e] step 16 ms
+ * [ 1130, 9781] [0x3d, 0x7f] step 131 ms
+ * [10306, 76890] [0x80, 0xff] step 524 ms
+ *
+ * Note that delay group 3 is only available for delay_off.
+ */
+static u8 lm3533_led_get_hw_delay(unsigned *delay)
+{
+ unsigned t;
+ u8 val;
+
+ t = *delay * 1000;
+
+ if (t >= (LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY3_TMIN) / 2) {
+ t = clamp(t, LM3533_LED_DELAY3_TMIN, LM3533_LED_DELAY3_TMAX);
+ val = time_to_val(&t, LM3533_LED_DELAY3_TMIN,
+ LM3533_LED_DELAY3_TSTEP,
+ LM3533_LED_DELAY3_VMIN,
+ LM3533_LED_DELAY3_VMAX);
+ } else if (t >= (LM3533_LED_DELAY1_TMAX + LM3533_LED_DELAY2_TMIN) / 2) {
+ t = clamp(t, LM3533_LED_DELAY2_TMIN, LM3533_LED_DELAY2_TMAX);
+ val = time_to_val(&t, LM3533_LED_DELAY2_TMIN,
+ LM3533_LED_DELAY2_TSTEP,
+ LM3533_LED_DELAY2_VMIN,
+ LM3533_LED_DELAY2_VMAX);
+ } else {
+ t = clamp(t, LM3533_LED_DELAY1_TMIN, LM3533_LED_DELAY1_TMAX);
+ val = time_to_val(&t, LM3533_LED_DELAY1_TMIN,
+ LM3533_LED_DELAY1_TSTEP,
+ LM3533_LED_DELAY1_VMIN,
+ LM3533_LED_DELAY1_VMAX);
+ }
+
+ *delay = (t + 500) / 1000;
+
+ return val;
+}
+
+/*
+ * Set delay register base to *delay (in ms) and update *delay to reflect
+ * actual hardware delay used.
+ */
+static u8 lm3533_led_delay_set(struct lm3533_led *led, u8 base,
+ unsigned long *delay)
+{
+ unsigned t;
+ u8 val;
+ u8 reg;
+ int ret;
+
+ t = (unsigned)*delay;
+
+ /* Delay group 3 is only available for low time (delay off). */
+ if (base != LM3533_REG_PATTERN_LOW_TIME_BASE)
+ t = min(t, LM3533_LED_DELAY2_TMAX / 1000);
+
+ val = lm3533_led_get_hw_delay(&t);
+
+ dev_dbg(led->cdev.dev, "%s - %lu: %u (0x%02x)\n", __func__,
+ *delay, t, val);
+ reg = lm3533_led_get_pattern_reg(led, base);
+ ret = lm3533_write(led->lm3533, reg, val);
+ if (ret)
+ dev_err(led->cdev.dev, "failed to set delay (%02x)\n", reg);
+
+ *delay = t;
+
+ return ret;
+}
+
+static int lm3533_led_delay_on_set(struct lm3533_led *led, unsigned long *t)
+{
+ return lm3533_led_delay_set(led, LM3533_REG_PATTERN_HIGH_TIME_BASE, t);
+}
+
+static int lm3533_led_delay_off_set(struct lm3533_led *led, unsigned long *t)
+{
+ return lm3533_led_delay_set(led, LM3533_REG_PATTERN_LOW_TIME_BASE, t);
+}
+
+static int lm3533_led_blink_set(struct led_classdev *cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct lm3533_led *led = to_lm3533_led(cdev);
+ int ret;
+
+ dev_dbg(led->cdev.dev, "%s - on = %lu, off = %lu\n", __func__,
+ *delay_on, *delay_off);
+
+ if (*delay_on > LM3533_LED_DELAY_ON_MAX ||
+ *delay_off > LM3533_LED_DELAY_OFF_MAX)
+ return -EINVAL;
+
+ if (*delay_on == 0 && *delay_off == 0) {
+ *delay_on = 500;
+ *delay_off = 500;
+ }
+
+ ret = lm3533_led_delay_on_set(led, delay_on);
+ if (ret)
+ return ret;
+
+ ret = lm3533_led_delay_off_set(led, delay_off);
+ if (ret)
+ return ret;
+
+ return lm3533_led_pattern_enable(led, 1);
+}
+
+static ssize_t show_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", led->id);
+}
+
+/*
+ * Pattern generator rise/fall times:
+ *
+ * 0 - 2048 us (default)
+ * 1 - 262 ms
+ * 2 - 524 ms
+ * 3 - 1.049 s
+ * 4 - 2.097 s
+ * 5 - 4.194 s
+ * 6 - 8.389 s
+ * 7 - 16.78 s
+ */
+static ssize_t show_risefalltime(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, u8 base)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ ssize_t ret;
+ u8 reg;
+ u8 val;
+
+ reg = lm3533_led_get_pattern_reg(led, base);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", val);
+}
+
+static ssize_t show_risetime(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return show_risefalltime(dev, attr, buf,
+ LM3533_REG_PATTERN_RISETIME_BASE);
+}
+
+static ssize_t show_falltime(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return show_risefalltime(dev, attr, buf,
+ LM3533_REG_PATTERN_FALLTIME_BASE);
+}
+
+static ssize_t store_risefalltime(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, u8 base)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 val;
+ u8 reg;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val) || val > LM3533_RISEFALLTIME_MAX)
+ return -EINVAL;
+
+ reg = lm3533_led_get_pattern_reg(led, base);
+ ret = lm3533_write(led->lm3533, reg, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t store_risetime(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_risefalltime(dev, attr, buf, len,
+ LM3533_REG_PATTERN_RISETIME_BASE);
+}
+
+static ssize_t store_falltime(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_risefalltime(dev, attr, buf, len,
+ LM3533_REG_PATTERN_FALLTIME_BASE);
+}
+
+static ssize_t show_als_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned channel;
+ u8 reg;
+ u8 val;
+ int ret;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ channel = (val & LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK) + 1;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
+}
+
+static ssize_t store_als_channel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned channel;
+ u8 reg;
+ u8 val;
+ u8 mask;
+ int ret;
+
+ if (kstrtouint(buf, 0, &channel))
+ return -EINVAL;
+
+ if (channel < LM3533_ALS_CHANNEL_LV_MIN ||
+ channel > LM3533_ALS_CHANNEL_LV_MAX)
+ return -EINVAL;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ mask = LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK;
+ val = channel - 1;
+
+ ret = lm3533_update(led->lm3533, reg, val, mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_als_en(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ bool enable;
+ u8 reg;
+ u8 val;
+ int ret;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ enable = val & LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
+}
+
+static ssize_t store_als_en(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned enable;
+ u8 reg;
+ u8 mask;
+ u8 val;
+ int ret;
+
+ if (kstrtouint(buf, 0, &enable))
+ return -EINVAL;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ mask = LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK;
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(led->lm3533, reg, val, mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_linear(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 reg;
+ u8 val;
+ int linear;
+ int ret;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ if (val & LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK)
+ linear = 1;
+ else
+ linear = 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
+}
+
+static ssize_t store_linear(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned long linear;
+ u8 reg;
+ u8 mask;
+ u8 val;
+ int ret;
+
+ if (kstrtoul(buf, 0, &linear))
+ return -EINVAL;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ mask = LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK;
+
+ if (linear)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(led->lm3533, reg, val, mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_pwm(&led->cb, &val);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 val;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val))
+ return -EINVAL;
+
+ ret = lm3533_ctrlbank_set_pwm(&led->cb, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static LM3533_ATTR_RW(als_channel);
+static LM3533_ATTR_RW(als_en);
+static LM3533_ATTR_RW(falltime);
+static LM3533_ATTR_RO(id);
+static LM3533_ATTR_RW(linear);
+static LM3533_ATTR_RW(pwm);
+static LM3533_ATTR_RW(risetime);
+
+static struct attribute *lm3533_led_attributes[] = {
+ &dev_attr_als_channel.attr,
+ &dev_attr_als_en.attr,
+ &dev_attr_falltime.attr,
+ &dev_attr_id.attr,
+ &dev_attr_linear.attr,
+ &dev_attr_pwm.attr,
+ &dev_attr_risetime.attr,
+ NULL,
+};
+
+static umode_t lm3533_led_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ umode_t mode = attr->mode;
+
+ if (attr == &dev_attr_als_channel.attr ||
+ attr == &dev_attr_als_en.attr) {
+ if (!led->lm3533->have_als)
+ mode = 0;
+ }
+
+ return mode;
+};
+
+static struct attribute_group lm3533_led_attribute_group = {
+ .is_visible = lm3533_led_attr_is_visible,
+ .attrs = lm3533_led_attributes
+};
+
+static int __devinit lm3533_led_setup(struct lm3533_led *led,
+ struct lm3533_led_platform_data *pdata)
+{
+ int ret;
+
+ ret = lm3533_ctrlbank_set_max_current(&led->cb, pdata->max_current);
+ if (ret)
+ return ret;
+
+ return lm3533_ctrlbank_set_pwm(&led->cb, pdata->pwm);
+}
+
+static int __devinit lm3533_led_probe(struct platform_device *pdev)
+{
+ struct lm3533 *lm3533;
+ struct lm3533_led_platform_data *pdata;
+ struct lm3533_led *led;
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533 = dev_get_drvdata(pdev->dev.parent);
+ if (!lm3533)
+ return -EINVAL;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (pdev->id < 0 || pdev->id >= LM3533_LVCTRLBANK_COUNT) {
+ dev_err(&pdev->dev, "illegal LED id %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->lm3533 = lm3533;
+ led->cdev.name = pdata->name;
+ led->cdev.default_trigger = pdata->default_trigger;
+ led->cdev.brightness_set = lm3533_led_set;
+ led->cdev.brightness_get = lm3533_led_get;
+ led->cdev.blink_set = lm3533_led_blink_set;
+ led->cdev.brightness = LED_OFF;
+ led->id = pdev->id;
+
+ mutex_init(&led->mutex);
+ INIT_WORK(&led->work, lm3533_led_work);
+
+ /* The class framework makes a callback to get brightness during
+ * registration so use parent device (for error reporting) until
+ * registered.
+ */
+ led->cb.lm3533 = lm3533;
+ led->cb.id = lm3533_led_get_ctrlbank_id(led);
+ led->cb.dev = lm3533->dev;
+
+ platform_set_drvdata(pdev, led);
+
+ ret = led_classdev_register(pdev->dev.parent, &led->cdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id);
+ return ret;
+ }
+
+ led->cb.dev = led->cdev.dev;
+
+ ret = sysfs_create_group(&led->cdev.dev->kobj,
+ &lm3533_led_attribute_group);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to create sysfs attributes\n");
+ goto err_unregister;
+ }
+
+ ret = lm3533_led_setup(led, pdata);
+ if (ret)
+ goto err_sysfs_remove;
+
+ ret = lm3533_ctrlbank_enable(&led->cb);
+ if (ret)
+ goto err_sysfs_remove;
+
+ return 0;
+
+err_sysfs_remove:
+ sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
+err_unregister:
+ led_classdev_unregister(&led->cdev);
+ flush_work_sync(&led->work);
+
+ return ret;
+}
+
+static int __devexit lm3533_led_remove(struct platform_device *pdev)
+{
+ struct lm3533_led *led = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533_ctrlbank_disable(&led->cb);
+ sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
+ led_classdev_unregister(&led->cdev);
+ flush_work_sync(&led->work);
+
+ return 0;
+}
+
+static void lm3533_led_shutdown(struct platform_device *pdev)
+{
+
+ struct lm3533_led *led = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533_ctrlbank_disable(&led->cb);
+ lm3533_led_set(&led->cdev, LED_OFF); /* disable blink */
+ flush_work_sync(&led->work);
+}
+
+static struct platform_driver lm3533_led_driver = {
+ .driver = {
+ .name = "lm3533-leds",
+ .owner = THIS_MODULE,
+ },
+ .probe = lm3533_led_probe,
+ .remove = __devexit_p(lm3533_led_remove),
+ .shutdown = lm3533_led_shutdown,
+};
+module_platform_driver(lm3533_led_driver);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lm3533-leds");
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 410a723b869..23815624f35 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -193,9 +193,14 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
/* move current engine to direct mode and remember the state */
ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
+ if (ret)
+ return ret;
+
/* Mode change requires min 500 us delay. 1 - 2 ms with margin */
usleep_range(1000, 2000);
- ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+ ret = lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+ if (ret)
+ return ret;
/* For loading, all the engines to load mode */
lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
@@ -211,8 +216,7 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
LP5521_PROG_MEM_SIZE,
pattern);
- ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
- return ret;
+ return lp5521_write(client, LP5521_REG_OP_MODE, mode);
}
static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
@@ -785,7 +789,7 @@ static int __devinit lp5521_probe(struct i2c_client *client,
* LP5521_REG_ENABLE register will not have any effect - strange!
*/
ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
- if (buf != LP5521_REG_R_CURR_DEFAULT) {
+ if (ret || buf != LP5521_REG_R_CURR_DEFAULT) {
dev_err(&client->dev, "error in resetting chip\n");
goto fail2;
}
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index 8bc49154155..4cc6a2e3df3 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
return -EINVAL;
}
- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
if (led == NULL) {
dev_err(&pdev->dev, "failed to alloc memory\n");
return -ENOMEM;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index dcc3bc3d38d..5f462dbf0db 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -101,11 +101,16 @@ static const struct i2c_device_id pca955x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca955x_id);
-struct pca955x_led {
+struct pca955x {
+ struct mutex lock;
+ struct pca955x_led *leds;
struct pca955x_chipdef *chipdef;
struct i2c_client *client;
+};
+
+struct pca955x_led {
+ struct pca955x *pca955x;
struct work_struct work;
- spinlock_t lock;
enum led_brightness brightness;
struct led_classdev led_cdev;
int led_num; /* 0 .. 15 potentially */
@@ -140,7 +145,7 @@ static inline u8 pca955x_ledsel(u8 oldval, int led_num, int state)
*/
static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 2*n,
@@ -156,7 +161,7 @@ static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
*/
static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + 2*n,
@@ -169,7 +174,7 @@ static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
*/
static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n,
@@ -182,7 +187,7 @@ static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
*/
static u8 pca955x_read_ls(struct i2c_client *client, int n)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
return (u8) i2c_smbus_read_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n);
@@ -190,18 +195,23 @@ static u8 pca955x_read_ls(struct i2c_client *client, int n)
static void pca955x_led_work(struct work_struct *work)
{
- struct pca955x_led *pca955x;
+ struct pca955x_led *pca955x_led;
+ struct pca955x *pca955x;
u8 ls;
int chip_ls; /* which LSx to use (0-3 potentially) */
int ls_led; /* which set of bits within LSx to use (0-3) */
- pca955x = container_of(work, struct pca955x_led, work);
- chip_ls = pca955x->led_num / 4;
- ls_led = pca955x->led_num % 4;
+ pca955x_led = container_of(work, struct pca955x_led, work);
+ pca955x = pca955x_led->pca955x;
+
+ chip_ls = pca955x_led->led_num / 4;
+ ls_led = pca955x_led->led_num % 4;
+
+ mutex_lock(&pca955x->lock);
ls = pca955x_read_ls(pca955x->client, chip_ls);
- switch (pca955x->brightness) {
+ switch (pca955x_led->brightness) {
case LED_FULL:
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON);
break;
@@ -219,12 +229,15 @@ static void pca955x_led_work(struct work_struct *work)
* OFF, HALF, or FULL. But, this is probably better than
* just turning off for all other values.
*/
- pca955x_write_pwm(pca955x->client, 1, 255-pca955x->brightness);
+ pca955x_write_pwm(pca955x->client, 1,
+ 255 - pca955x_led->brightness);
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1);
break;
}
pca955x_write_ls(pca955x->client, chip_ls, ls);
+
+ mutex_unlock(&pca955x->lock);
}
static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value)
@@ -233,7 +246,6 @@ static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness v
pca955x = container_of(led_cdev, struct pca955x_led, led_cdev);
- spin_lock(&pca955x->lock);
pca955x->brightness = value;
/*
@@ -241,14 +253,13 @@ static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness v
* can sleep.
*/
schedule_work(&pca955x->work);
-
- spin_unlock(&pca955x->lock);
}
static int __devinit pca955x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct pca955x_led *pca955x;
+ struct pca955x *pca955x;
+ struct pca955x_led *pca955x_led;
struct pca955x_chipdef *chip;
struct i2c_adapter *adapter;
struct led_platform_data *pdata;
@@ -282,39 +293,48 @@ static int __devinit pca955x_probe(struct i2c_client *client,
}
}
- pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL);
+ pca955x = kzalloc(sizeof(*pca955x), GFP_KERNEL);
if (!pca955x)
return -ENOMEM;
+ pca955x->leds = kzalloc(sizeof(*pca955x_led) * chip->bits, GFP_KERNEL);
+ if (!pca955x->leds) {
+ err = -ENOMEM;
+ goto exit_nomem;
+ }
+
i2c_set_clientdata(client, pca955x);
+ mutex_init(&pca955x->lock);
+ pca955x->client = client;
+ pca955x->chipdef = chip;
+
for (i = 0; i < chip->bits; i++) {
- pca955x[i].chipdef = chip;
- pca955x[i].client = client;
- pca955x[i].led_num = i;
+ pca955x_led = &pca955x->leds[i];
+ pca955x_led->led_num = i;
+ pca955x_led->pca955x = pca955x;
/* Platform data can specify LED names and default triggers */
if (pdata) {
if (pdata->leds[i].name)
- snprintf(pca955x[i].name,
- sizeof(pca955x[i].name), "pca955x:%s",
- pdata->leds[i].name);
+ snprintf(pca955x_led->name,
+ sizeof(pca955x_led->name), "pca955x:%s",
+ pdata->leds[i].name);
if (pdata->leds[i].default_trigger)
- pca955x[i].led_cdev.default_trigger =
+ pca955x_led->led_cdev.default_trigger =
pdata->leds[i].default_trigger;
} else {
- snprintf(pca955x[i].name, sizeof(pca955x[i].name),
+ snprintf(pca955x_led->name, sizeof(pca955x_led->name),
"pca955x:%d", i);
}
- spin_lock_init(&pca955x[i].lock);
-
- pca955x[i].led_cdev.name = pca955x[i].name;
- pca955x[i].led_cdev.brightness_set = pca955x_led_set;
+ pca955x_led->led_cdev.name = pca955x_led->name;
+ pca955x_led->led_cdev.brightness_set = pca955x_led_set;
- INIT_WORK(&pca955x[i].work, pca955x_led_work);
+ INIT_WORK(&pca955x_led->work, pca955x_led_work);
- err = led_classdev_register(&client->dev, &pca955x[i].led_cdev);
+ err = led_classdev_register(&client->dev,
+ &pca955x_led->led_cdev);
if (err < 0)
goto exit;
}
@@ -337,10 +357,12 @@ static int __devinit pca955x_probe(struct i2c_client *client,
exit:
while (i--) {
- led_classdev_unregister(&pca955x[i].led_cdev);
- cancel_work_sync(&pca955x[i].work);
+ led_classdev_unregister(&pca955x->leds[i].led_cdev);
+ cancel_work_sync(&pca955x->leds[i].work);
}
+ kfree(pca955x->leds);
+exit_nomem:
kfree(pca955x);
return err;
@@ -348,14 +370,15 @@ exit:
static int __devexit pca955x_remove(struct i2c_client *client)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
int i;
for (i = 0; i < pca955x->chipdef->bits; i++) {
- led_classdev_unregister(&pca955x[i].led_cdev);
- cancel_work_sync(&pca955x[i].work);
+ led_classdev_unregister(&pca955x->leds[i].led_cdev);
+ cancel_work_sync(&pca955x->leds[i].work);
}
+ kfree(pca955x->leds);
kfree(pca955x);
return 0;
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
index 2b513a2ad7d..e2726867c5d 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/ledtrig-backlight.c
@@ -120,6 +120,7 @@ static void bl_trig_activate(struct led_classdev *led)
ret = fb_register_client(&n->notifier);
if (ret)
dev_err(led->dev, "unable to register backlight trigger\n");
+ led->activated = true;
return;
@@ -133,10 +134,11 @@ static void bl_trig_deactivate(struct led_classdev *led)
struct bl_trig_notifier *n =
(struct bl_trig_notifier *) led->trigger_data;
- if (n) {
+ if (led->activated) {
device_remove_file(led->dev, &dev_attr_inverted);
fb_unregister_client(&n->notifier);
kfree(n);
+ led->activated = false;
}
}
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index ecc4bf3f37a..f057c101b89 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -200,6 +200,7 @@ static void gpio_trig_activate(struct led_classdev *led)
gpio_data->led = led;
led->trigger_data = gpio_data;
INIT_WORK(&gpio_data->work, gpio_trig_work);
+ led->activated = true;
return;
@@ -217,7 +218,7 @@ static void gpio_trig_deactivate(struct led_classdev *led)
{
struct gpio_trig_data *gpio_data = led->trigger_data;
- if (gpio_data) {
+ if (led->activated) {
device_remove_file(led->dev, &dev_attr_gpio);
device_remove_file(led->dev, &dev_attr_inverted);
device_remove_file(led->dev, &dev_attr_desired_brightness);
@@ -225,6 +226,7 @@ static void gpio_trig_deactivate(struct led_classdev *led)
if (gpio_data->gpio != 0)
free_irq(gpio_to_irq(gpio_data->gpio), led);
kfree(gpio_data);
+ led->activated = false;
}
}
diff --git a/drivers/leds/ledtrig-heartbeat.c b/drivers/leds/ledtrig-heartbeat.c
index 759c0bba4a8..41dc76db431 100644
--- a/drivers/leds/ledtrig-heartbeat.c
+++ b/drivers/leds/ledtrig-heartbeat.c
@@ -18,6 +18,7 @@
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/leds.h>
+#include <linux/reboot.h>
#include "leds.h"
struct heartbeat_trig_data {
@@ -83,15 +84,17 @@ static void heartbeat_trig_activate(struct led_classdev *led_cdev)
led_heartbeat_function, (unsigned long) led_cdev);
heartbeat_data->phase = 0;
led_heartbeat_function(heartbeat_data->timer.data);
+ led_cdev->activated = true;
}
static void heartbeat_trig_deactivate(struct led_classdev *led_cdev)
{
struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
- if (heartbeat_data) {
+ if (led_cdev->activated) {
del_timer_sync(&heartbeat_data->timer);
kfree(heartbeat_data);
+ led_cdev->activated = false;
}
}
@@ -101,13 +104,38 @@ static struct led_trigger heartbeat_led_trigger = {
.deactivate = heartbeat_trig_deactivate,
};
+static int heartbeat_reboot_notifier(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ led_trigger_unregister(&heartbeat_led_trigger);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block heartbeat_reboot_nb = {
+ .notifier_call = heartbeat_reboot_notifier,
+};
+
+static struct notifier_block heartbeat_panic_nb = {
+ .notifier_call = heartbeat_reboot_notifier,
+};
+
static int __init heartbeat_trig_init(void)
{
- return led_trigger_register(&heartbeat_led_trigger);
+ int rc = led_trigger_register(&heartbeat_led_trigger);
+
+ if (!rc) {
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &heartbeat_panic_nb);
+ register_reboot_notifier(&heartbeat_reboot_nb);
+ }
+ return rc;
}
static void __exit heartbeat_trig_exit(void)
{
+ unregister_reboot_notifier(&heartbeat_reboot_nb);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &heartbeat_panic_nb);
led_trigger_unregister(&heartbeat_led_trigger);
}
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 328c64c0841..9010f7abaf2 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -31,21 +31,17 @@ static ssize_t led_delay_on_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- int ret = -EINVAL;
- char *after;
- unsigned long state = simple_strtoul(buf, &after, 10);
- size_t count = after - buf;
-
- if (isspace(*after))
- count++;
-
- if (count == size) {
- led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
- led_cdev->blink_delay_on = state;
- ret = count;
- }
+ unsigned long state;
+ ssize_t ret = -EINVAL;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
- return ret;
+ led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
+ led_cdev->blink_delay_on = state;
+
+ return size;
}
static ssize_t led_delay_off_show(struct device *dev,
@@ -60,21 +56,17 @@ static ssize_t led_delay_off_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- int ret = -EINVAL;
- char *after;
- unsigned long state = simple_strtoul(buf, &after, 10);
- size_t count = after - buf;
-
- if (isspace(*after))
- count++;
-
- if (count == size) {
- led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
- led_cdev->blink_delay_off = state;
- ret = count;
- }
+ unsigned long state;
+ ssize_t ret = -EINVAL;
- return ret;
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
+ led_cdev->blink_delay_off = state;
+
+ return size;
}
static DEVICE_ATTR(delay_on, 0644, led_delay_on_show, led_delay_on_store);
@@ -95,8 +87,7 @@ static void timer_trig_activate(struct led_classdev *led_cdev)
led_blink_set(led_cdev, &led_cdev->blink_delay_on,
&led_cdev->blink_delay_off);
-
- led_cdev->trigger_data = (void *)1;
+ led_cdev->activated = true;
return;
@@ -106,9 +97,10 @@ err_out_delayon:
static void timer_trig_deactivate(struct led_classdev *led_cdev)
{
- if (led_cdev->trigger_data) {
+ if (led_cdev->activated) {
device_remove_file(led_cdev->dev, &dev_attr_delay_on);
device_remove_file(led_cdev->dev, &dev_attr_delay_off);
+ led_cdev->activated = false;
}
/* Stop blinking */
diff --git a/drivers/leds/ledtrig-transient.c b/drivers/leds/ledtrig-transient.c
new file mode 100644
index 00000000000..83179f435e1
--- /dev/null
+++ b/drivers/leds/ledtrig-transient.c
@@ -0,0 +1,237 @@
+/*
+ * LED Kernel Transient Trigger
+ *
+ * Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
+ *
+ * Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
+ * ledtrig-heartbeat.c
+ * Design and use-case input from Jonas Bonn <jonas@southpole.se> and
+ * Neil Brown <neilb@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+/*
+ * Transient trigger allows one shot timer activation. Please refer to
+ * Documentation/leds/ledtrig-transient.txt for details
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/leds.h>
+#include "leds.h"
+
+struct transient_trig_data {
+ int activate;
+ int state;
+ int restore_state;
+ unsigned long duration;
+ struct timer_list timer;
+};
+
+static void transient_timer_function(unsigned long data)
+{
+ struct led_classdev *led_cdev = (struct led_classdev *) data;
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ transient_data->activate = 0;
+ led_set_brightness(led_cdev, transient_data->restore_state);
+}
+
+static ssize_t transient_activate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%d\n", transient_data->activate);
+}
+
+static ssize_t transient_activate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ unsigned long state;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ if (state != 1 && state != 0)
+ return -EINVAL;
+
+ /* cancel the running timer */
+ if (state == 0 && transient_data->activate == 1) {
+ del_timer(&transient_data->timer);
+ transient_data->activate = state;
+ led_set_brightness(led_cdev, transient_data->restore_state);
+ return size;
+ }
+
+ /* start timer if there is no active timer */
+ if (state == 1 && transient_data->activate == 0 &&
+ transient_data->duration != 0) {
+ transient_data->activate = state;
+ led_set_brightness(led_cdev, transient_data->state);
+ transient_data->restore_state =
+ (transient_data->state == LED_FULL) ? LED_OFF : LED_FULL;
+ mod_timer(&transient_data->timer,
+ jiffies + transient_data->duration);
+ }
+
+ /* state == 0 && transient_data->activate == 0
+ timer is not active - just return */
+ /* state == 1 && transient_data->activate == 1
+ timer is already active - just return */
+
+ return size;
+}
+
+static ssize_t transient_duration_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%lu\n", transient_data->duration);
+}
+
+static ssize_t transient_duration_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ unsigned long state;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ transient_data->duration = state;
+ return size;
+}
+
+static ssize_t transient_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ int state;
+
+ state = (transient_data->state == LED_FULL) ? 1 : 0;
+ return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t transient_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ unsigned long state;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ if (state != 1 && state != 0)
+ return -EINVAL;
+
+ transient_data->state = (state == 1) ? LED_FULL : LED_OFF;
+ return size;
+}
+
+static DEVICE_ATTR(activate, 0644, transient_activate_show,
+ transient_activate_store);
+static DEVICE_ATTR(duration, 0644, transient_duration_show,
+ transient_duration_store);
+static DEVICE_ATTR(state, 0644, transient_state_show, transient_state_store);
+
+static void transient_trig_activate(struct led_classdev *led_cdev)
+{
+ int rc;
+ struct transient_trig_data *tdata;
+
+ tdata = kzalloc(sizeof(struct transient_trig_data), GFP_KERNEL);
+ if (!tdata) {
+ dev_err(led_cdev->dev,
+ "unable to allocate transient trigger\n");
+ return;
+ }
+ led_cdev->trigger_data = tdata;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_activate);
+ if (rc)
+ goto err_out;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_duration);
+ if (rc)
+ goto err_out_duration;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_state);
+ if (rc)
+ goto err_out_state;
+
+ setup_timer(&tdata->timer, transient_timer_function,
+ (unsigned long) led_cdev);
+ led_cdev->activated = true;
+
+ return;
+
+err_out_state:
+ device_remove_file(led_cdev->dev, &dev_attr_duration);
+err_out_duration:
+ device_remove_file(led_cdev->dev, &dev_attr_activate);
+err_out:
+ dev_err(led_cdev->dev, "unable to register transient trigger\n");
+ led_cdev->trigger_data = NULL;
+ kfree(tdata);
+}
+
+static void transient_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ if (led_cdev->activated) {
+ del_timer_sync(&transient_data->timer);
+ led_set_brightness(led_cdev, transient_data->restore_state);
+ device_remove_file(led_cdev->dev, &dev_attr_activate);
+ device_remove_file(led_cdev->dev, &dev_attr_duration);
+ device_remove_file(led_cdev->dev, &dev_attr_state);
+ led_cdev->trigger_data = NULL;
+ led_cdev->activated = false;
+ kfree(transient_data);
+ }
+}
+
+static struct led_trigger transient_trigger = {
+ .name = "transient",
+ .activate = transient_trig_activate,
+ .deactivate = transient_trig_deactivate,
+};
+
+static int __init transient_trig_init(void)
+{
+ return led_trigger_register(&transient_trigger);
+}
+
+static void __exit transient_trig_exit(void)
+{
+ led_trigger_unregister(&transient_trigger);
+}
+
+module_init(transient_trig_init);
+module_exit(transient_trig_exit);
+
+MODULE_AUTHOR("Shuah Khan <shuahkhan@gmail.com>");
+MODULE_DESCRIPTION("Transient LED trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 02d54a057b6..f13643d3135 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -511,7 +511,7 @@ static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
/* ipu_csi_init_interface() */
csi_reg_write(mx3_cam, conf, CSI_SENS_CONF);
- clk_enable(mx3_cam->clk);
+ clk_prepare_enable(mx3_cam->clk);
rate = clk_round_rate(mx3_cam->clk, mx3_cam->mclk);
dev_dbg(icd->parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate);
if (rate)
@@ -552,7 +552,7 @@ static void mx3_camera_remove_device(struct soc_camera_device *icd)
*ichan = NULL;
}
- clk_disable(mx3_cam->clk);
+ clk_disable_unprepare(mx3_cam->clk);
mx3_cam->icd = NULL;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f4b4dad7739..e129c820df7 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -106,6 +106,19 @@ config UCB1400_CORE
To compile this driver as a module, choose M here: the
module will be called ucb1400_core.
+config MFD_LM3533
+ tristate "LM3533 Lighting Power chip"
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Say yes here to enable support for National Semiconductor / TI
+ LM3533 Lighting Power chips.
+
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the LED,
+ backlight or ambient-light-sensor functionality of the device.
+
config TPS6105X
tristate "TPS61050/61052 Boost Converters"
depends on I2C
@@ -177,8 +190,8 @@ config MFD_TPS65910
bool "TPS65910 Power Management chip"
depends on I2C=y && GPIOLIB
select MFD_CORE
- select GPIO_TPS65910
select REGMAP_I2C
+ select IRQ_DOMAIN
help
if you say yes here you get support for the TPS65910 series of
Power Management chips.
@@ -409,6 +422,19 @@ config PMIC_ADP5520
individual components like LCD backlight, LEDs, GPIOs and Kepad
under the corresponding menus.
+config MFD_MAX77693
+ bool "Maxim Semiconductor MAX77693 PMIC Support"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Say yes here to support for Maxim Semiconductor MAX77693.
+ This is a companion Power Management IC with Flash, Haptic, Charger,
+ and MUIC(Micro USB Interface Controller) controls on chip.
+ This driver provides common support for accessing the device;
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
config MFD_MAX8925
bool "Maxim Semiconductor MAX8925 PMIC Support"
depends on I2C=y && GENERIC_HARDIRQS
@@ -454,9 +480,9 @@ config MFD_S5M_CORE
of the device
config MFD_WM8400
- tristate "Support Wolfson Microelectronics WM8400"
+ bool "Support Wolfson Microelectronics WM8400"
select MFD_CORE
- depends on I2C
+ depends on I2C=y
select REGMAP_I2C
help
Support for the Wolfson Microelecronics WM8400 PMIC and audio
@@ -473,6 +499,7 @@ config MFD_WM831X_I2C
select MFD_CORE
select MFD_WM831X
select REGMAP_I2C
+ select IRQ_DOMAIN
depends on I2C=y && GENERIC_HARDIRQS
help
Support for the Wolfson Microelecronics WM831x and WM832x PMICs
@@ -485,6 +512,7 @@ config MFD_WM831X_SPI
select MFD_CORE
select MFD_WM831X
select REGMAP_SPI
+ select IRQ_DOMAIN
depends on SPI_MASTER && GENERIC_HARDIRQS
help
Support for the Wolfson Microelecronics WM831x and WM832x PMICs
@@ -597,17 +625,32 @@ config MFD_MC13783
tristate
config MFD_MC13XXX
- tristate "Support Freescale MC13783 and MC13892"
- depends on SPI_MASTER
+ tristate
+ depends on SPI_MASTER || I2C
select MFD_CORE
select MFD_MC13783
help
- Support for the Freescale (Atlas) PMIC and audio CODECs
- MC13783 and MC13892.
- This driver provides common support for accessing the device,
+ Enable support for the Freescale MC13783 and MC13892 PMICs.
+ This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the
functionality of the device.
+config MFD_MC13XXX_SPI
+ tristate "Freescale MC13783 and MC13892 SPI interface"
+ depends on SPI_MASTER
+ select REGMAP_SPI
+ select MFD_MC13XXX
+ help
+ Select this if your MC13xxx is connected via an SPI bus.
+
+config MFD_MC13XXX_I2C
+ tristate "Freescale MC13892 I2C interface"
+ depends on I2C
+ select REGMAP_I2C
+ select MFD_MC13XXX
+ help
+ Select this if your MC13xxx is connected via an I2C bus.
+
config ABX500_CORE
bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
default y if ARCH_U300 || ARCH_U8500
@@ -651,7 +694,7 @@ config EZX_PCAP
config AB8500_CORE
bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
- depends on GENERIC_HARDIRQS && ABX500_CORE
+ depends on GENERIC_HARDIRQS && ABX500_CORE && MFD_DB8500_PRCMU
select MFD_CORE
help
Select this option to enable access to AB8500 power management
@@ -722,6 +765,16 @@ config LPC_SCH
LPC bridge function of the Intel SCH provides support for
System Management Bus and General Purpose I/O.
+config LPC_ICH
+ tristate "Intel ICH LPC"
+ depends on PCI
+ select MFD_CORE
+ help
+ The LPC bridge function of the Intel ICH provides support for
+ many functional units. This driver provides needed support for
+ other drivers to control these functions, currently GPIO and
+ watchdog.
+
config MFD_RDC321X
tristate "Support for RDC-R321x southbridge"
select MFD_CORE
@@ -854,6 +907,11 @@ config MFD_RC5T583
Additional drivers must be enabled in order to use the
different functionality of the device.
+config MFD_STA2X11
+ bool "STA2X11 multi function device support"
+ depends on STA2X11
+ select MFD_CORE
+
config MFD_ANATOP
bool "Support for Freescale i.MX on-chip ANATOP controller"
depends on SOC_IMX6Q
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 43672b87805..75f6ed68a4b 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o
+obj-$(CONFIG_MFD_STA2X11) += sta2x11-mfd.o
obj-$(CONFIG_MFD_STMPE) += stmpe.o
obj-$(CONFIG_STMPE_I2C) += stmpe-i2c.o
obj-$(CONFIG_STMPE_SPI) += stmpe-spi.o
@@ -54,6 +55,8 @@ obj-$(CONFIG_TWL6030_PWM) += twl6030-pwm.o
obj-$(CONFIG_TWL6040_CORE) += twl6040-core.o twl6040-irq.o
obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
+obj-$(CONFIG_MFD_MC13XXX_SPI) += mc13xxx-spi.o
+obj-$(CONFIG_MFD_MC13XXX_I2C) += mc13xxx-i2c.o
obj-$(CONFIG_MFD_CORE) += mfd-core.o
@@ -75,6 +78,7 @@ obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
+obj-$(CONFIG_MFD_MAX77693) += max77693.o max77693-irq.o
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
obj-$(CONFIG_MFD_MAX8997) += max8997.o max8997-irq.o
@@ -87,15 +91,15 @@ obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
obj-$(CONFIG_ABX500_CORE) += abx500-core.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
-obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
-# ab8500-i2c need to come after db8500-prcmu (which provides the channel)
-obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o
+# ab8500-core need to come after db8500-prcmu (which provides the channel)
+obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
obj-$(CONFIG_LPC_SCH) += lpc_sch.o
+obj-$(CONFIG_LPC_ICH) += lpc_ich.o
obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o
obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o
obj-$(CONFIG_MFD_JZ4740_ADC) += jz4740-adc.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 1f08704f7ae..dac0e299860 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -18,7 +18,10 @@
#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/dbx500-prcmu.h>
#include <linux/regulator/ab8500.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
/*
* Interrupt register offsets
@@ -91,12 +94,24 @@
#define AB8500_IT_MASK23_REG 0x56
#define AB8500_IT_MASK24_REG 0x57
+/*
+ * latch hierarchy registers
+ */
+#define AB8500_IT_LATCHHIER1_REG 0x60
+#define AB8500_IT_LATCHHIER2_REG 0x61
+#define AB8500_IT_LATCHHIER3_REG 0x62
+
+#define AB8500_IT_LATCHHIER_NUM 3
+
#define AB8500_REV_REG 0x80
#define AB8500_IC_NAME_REG 0x82
#define AB8500_SWITCH_OFF_STATUS 0x00
#define AB8500_TURN_ON_STATUS 0x00
+static bool no_bm; /* No battery management */
+module_param(no_bm, bool, S_IRUGO);
+
#define AB9540_MODEM_CTRL2_REG 0x23
#define AB9540_MODEM_CTRL2_SWDBBRSTN_BIT BIT(2)
@@ -125,6 +140,41 @@ static const char ab8500_version_str[][7] = {
[AB8500_VERSION_AB8540] = "AB8540",
};
+static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
+{
+ int ret;
+
+ ret = prcmu_abb_write((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
+ if (ret < 0)
+ dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+ return ret;
+}
+
+static int ab8500_i2c_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
+ u8 data)
+{
+ int ret;
+
+ ret = prcmu_abb_write_masked((u8)(addr >> 8), (u8)(addr & 0xFF), &data,
+ &mask, 1);
+ if (ret < 0)
+ dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+ return ret;
+}
+
+static int ab8500_i2c_read(struct ab8500 *ab8500, u16 addr)
+{
+ int ret;
+ u8 data;
+
+ ret = prcmu_abb_read((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
+ if (ret < 0) {
+ dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
+ return ret;
+ }
+ return (int)data;
+}
+
static int ab8500_get_chip_id(struct device *dev)
{
struct ab8500 *ab8500;
@@ -161,9 +211,13 @@ static int set_register_interruptible(struct ab8500 *ab8500, u8 bank,
static int ab8500_set_register(struct device *dev, u8 bank,
u8 reg, u8 value)
{
+ int ret;
struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
- return set_register_interruptible(ab8500, bank, reg, value);
+ atomic_inc(&ab8500->transfer_ongoing);
+ ret = set_register_interruptible(ab8500, bank, reg, value);
+ atomic_dec(&ab8500->transfer_ongoing);
+ return ret;
}
static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -192,9 +246,13 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
static int ab8500_get_register(struct device *dev, u8 bank,
u8 reg, u8 *value)
{
+ int ret;
struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
- return get_register_interruptible(ab8500, bank, reg, value);
+ atomic_inc(&ab8500->transfer_ongoing);
+ ret = get_register_interruptible(ab8500, bank, reg, value);
+ atomic_dec(&ab8500->transfer_ongoing);
+ return ret;
}
static int mask_and_set_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -241,11 +299,14 @@ out:
static int ab8500_mask_and_set_register(struct device *dev,
u8 bank, u8 reg, u8 bitmask, u8 bitvalues)
{
+ int ret;
struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
- return mask_and_set_register_interruptible(ab8500, bank, reg,
- bitmask, bitvalues);
-
+ atomic_inc(&ab8500->transfer_ongoing);
+ ret= mask_and_set_register_interruptible(ab8500, bank, reg,
+ bitmask, bitvalues);
+ atomic_dec(&ab8500->transfer_ongoing);
+ return ret;
}
static struct abx500_ops ab8500_ops = {
@@ -264,6 +325,7 @@ static void ab8500_irq_lock(struct irq_data *data)
struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
mutex_lock(&ab8500->irq_lock);
+ atomic_inc(&ab8500->transfer_ongoing);
}
static void ab8500_irq_sync_unlock(struct irq_data *data)
@@ -292,7 +354,7 @@ static void ab8500_irq_sync_unlock(struct irq_data *data)
reg = AB8500_IT_MASK1_REG + ab8500->irq_reg_offset[i];
set_register_interruptible(ab8500, AB8500_INTERRUPT, reg, new);
}
-
+ atomic_dec(&ab8500->transfer_ongoing);
mutex_unlock(&ab8500->irq_lock);
}
@@ -325,6 +387,90 @@ static struct irq_chip ab8500_irq_chip = {
.irq_unmask = ab8500_irq_unmask,
};
+static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
+ int latch_offset, u8 latch_val)
+{
+ int int_bit = __ffs(latch_val);
+ int line, i;
+
+ do {
+ int_bit = __ffs(latch_val);
+
+ for (i = 0; i < ab8500->mask_size; i++)
+ if (ab8500->irq_reg_offset[i] == latch_offset)
+ break;
+
+ if (i >= ab8500->mask_size) {
+ dev_err(ab8500->dev, "Register offset 0x%2x not declared\n",
+ latch_offset);
+ return -ENXIO;
+ }
+
+ line = (i << 3) + int_bit;
+ latch_val &= ~(1 << int_bit);
+
+ handle_nested_irq(ab8500->irq_base + line);
+ } while (latch_val);
+
+ return 0;
+}
+
+static int ab8500_handle_hierarchical_latch(struct ab8500 *ab8500,
+ int hier_offset, u8 hier_val)
+{
+ int latch_bit, status;
+ u8 latch_offset, latch_val;
+
+ do {
+ latch_bit = __ffs(hier_val);
+ latch_offset = (hier_offset << 3) + latch_bit;
+
+ /* Fix inconsistent ITFromLatch25 bit mapping... */
+ if (unlikely(latch_offset == 17))
+ latch_offset = 24;
+
+ status = get_register_interruptible(ab8500,
+ AB8500_INTERRUPT,
+ AB8500_IT_LATCH1_REG + latch_offset,
+ &latch_val);
+ if (status < 0 || latch_val == 0)
+ goto discard;
+
+ status = ab8500_handle_hierarchical_line(ab8500,
+ latch_offset, latch_val);
+ if (status < 0)
+ return status;
+discard:
+ hier_val &= ~(1 << latch_bit);
+ } while (hier_val);
+
+ return 0;
+}
+
+static irqreturn_t ab8500_hierarchical_irq(int irq, void *dev)
+{
+ struct ab8500 *ab8500 = dev;
+ u8 i;
+
+ dev_vdbg(ab8500->dev, "interrupt\n");
+
+ /* Hierarchical interrupt version */
+ for (i = 0; i < AB8500_IT_LATCHHIER_NUM; i++) {
+ int status;
+ u8 hier_val;
+
+ status = get_register_interruptible(ab8500, AB8500_INTERRUPT,
+ AB8500_IT_LATCHHIER1_REG + i, &hier_val);
+ if (status < 0 || hier_val == 0)
+ continue;
+
+ status = ab8500_handle_hierarchical_latch(ab8500, i, hier_val);
+ if (status < 0)
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
static irqreturn_t ab8500_irq(int irq, void *dev)
{
struct ab8500 *ab8500 = dev;
@@ -332,6 +478,8 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
dev_vdbg(ab8500->dev, "interrupt\n");
+ atomic_inc(&ab8500->transfer_ongoing);
+
for (i = 0; i < ab8500->mask_size; i++) {
int regoffset = ab8500->irq_reg_offset[i];
int status;
@@ -355,9 +503,10 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
handle_nested_irq(ab8500->irq_base + line);
value &= ~(1 << bit);
+
} while (value);
}
-
+ atomic_dec(&ab8500->transfer_ongoing);
return IRQ_HANDLED;
}
@@ -411,6 +560,14 @@ static void ab8500_irq_remove(struct ab8500 *ab8500)
}
}
+int ab8500_suspend(struct ab8500 *ab8500)
+{
+ if (atomic_read(&ab8500->transfer_ongoing))
+ return -EINVAL;
+ else
+ return 0;
+}
+
/* AB8500 GPIO Resources */
static struct resource __devinitdata ab8500_gpio_resources[] = {
{
@@ -744,6 +901,39 @@ static struct resource __devinitdata ab8500_usb_resources[] = {
},
};
+static struct resource __devinitdata ab8505_iddet_resources[] = {
+ {
+ .name = "KeyDeglitch",
+ .start = AB8505_INT_KEYDEGLITCH,
+ .end = AB8505_INT_KEYDEGLITCH,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "KP",
+ .start = AB8505_INT_KP,
+ .end = AB8505_INT_KP,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "IKP",
+ .start = AB8505_INT_IKP,
+ .end = AB8505_INT_IKP,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "IKR",
+ .start = AB8505_INT_IKR,
+ .end = AB8505_INT_IKR,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "KeyStuck",
+ .start = AB8505_INT_KEYSTUCK,
+ .end = AB8505_INT_KEYSTUCK,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static struct resource __devinitdata ab8500_temp_resources[] = {
{
.name = "AB8500_TEMP_WARM",
@@ -778,35 +968,11 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
.resources = ab8500_rtc_resources,
},
{
- .name = "ab8500-charger",
- .num_resources = ARRAY_SIZE(ab8500_charger_resources),
- .resources = ab8500_charger_resources,
- },
- {
- .name = "ab8500-btemp",
- .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
- .resources = ab8500_btemp_resources,
- },
- {
- .name = "ab8500-fg",
- .num_resources = ARRAY_SIZE(ab8500_fg_resources),
- .resources = ab8500_fg_resources,
- },
- {
- .name = "ab8500-chargalg",
- .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
- .resources = ab8500_chargalg_resources,
- },
- {
.name = "ab8500-acc-det",
.num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
.resources = ab8500_av_acc_detect_resources,
},
{
- .name = "ab8500-codec",
- },
-
- {
.name = "ab8500-poweron-key",
.num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
.resources = ab8500_poweronkey_db_resources,
@@ -834,6 +1000,29 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
},
};
+static struct mfd_cell __devinitdata ab8500_bm_devs[] = {
+ {
+ .name = "ab8500-charger",
+ .num_resources = ARRAY_SIZE(ab8500_charger_resources),
+ .resources = ab8500_charger_resources,
+ },
+ {
+ .name = "ab8500-btemp",
+ .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
+ .resources = ab8500_btemp_resources,
+ },
+ {
+ .name = "ab8500-fg",
+ .num_resources = ARRAY_SIZE(ab8500_fg_resources),
+ .resources = ab8500_fg_resources,
+ },
+ {
+ .name = "ab8500-chargalg",
+ .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
+ .resources = ab8500_chargalg_resources,
+ },
+};
+
static struct mfd_cell __devinitdata ab8500_devs[] = {
{
.name = "ab8500-gpio",
@@ -845,6 +1034,9 @@ static struct mfd_cell __devinitdata ab8500_devs[] = {
.num_resources = ARRAY_SIZE(ab8500_usb_resources),
.resources = ab8500_usb_resources,
},
+ {
+ .name = "ab8500-codec",
+ },
};
static struct mfd_cell __devinitdata ab9540_devs[] = {
@@ -858,6 +1050,18 @@ static struct mfd_cell __devinitdata ab9540_devs[] = {
.num_resources = ARRAY_SIZE(ab8500_usb_resources),
.resources = ab8500_usb_resources,
},
+ {
+ .name = "ab9540-codec",
+ },
+};
+
+/* Device list common to ab9540 and ab8505 */
+static struct mfd_cell __devinitdata ab9540_ab8505_devs[] = {
+ {
+ .name = "ab-iddet",
+ .num_resources = ARRAY_SIZE(ab8505_iddet_resources),
+ .resources = ab8505_iddet_resources,
+ },
};
static ssize_t show_chip_id(struct device *dev,
@@ -1003,18 +1207,66 @@ static struct attribute_group ab9540_attr_group = {
.attrs = ab9540_sysfs_entries,
};
-int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
+static const struct of_device_id ab8500_match[] = {
+ {
+ .compatible = "stericsson,ab8500",
+ .data = (void *)AB8500_VERSION_AB8500,
+ },
+ {},
+};
+
+static int __devinit ab8500_probe(struct platform_device *pdev)
{
- struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev);
+ struct ab8500_platform_data *plat = dev_get_platdata(&pdev->dev);
+ const struct platform_device_id *platid = platform_get_device_id(pdev);
+ enum ab8500_version version = AB8500_VERSION_UNDEFINED;
+ struct device_node *np = pdev->dev.of_node;
+ struct ab8500 *ab8500;
+ struct resource *resource;
int ret;
int i;
u8 value;
+ ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
+ if (!ab8500)
+ return -ENOMEM;
+
if (plat)
ab8500->irq_base = plat->irq_base;
+ else if (np)
+ ret = of_property_read_u32(np, "stericsson,irq-base", &ab8500->irq_base);
+
+ if (!ab8500->irq_base) {
+ dev_info(&pdev->dev, "couldn't find irq-base\n");
+ ret = -EINVAL;
+ goto out_free_ab8500;
+ }
+
+ ab8500->dev = &pdev->dev;
+
+ resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!resource) {
+ ret = -ENODEV;
+ goto out_free_ab8500;
+ }
+
+ ab8500->irq = resource->start;
+
+ ab8500->read = ab8500_i2c_read;
+ ab8500->write = ab8500_i2c_write;
+ ab8500->write_masked = ab8500_i2c_write_masked;
mutex_init(&ab8500->lock);
mutex_init(&ab8500->irq_lock);
+ atomic_set(&ab8500->transfer_ongoing, 0);
+
+ platform_set_drvdata(pdev, ab8500);
+
+ if (platid)
+ version = platid->driver_data;
+ else if (np)
+ version = (unsigned int)
+ of_match_device(ab8500_match, &pdev->dev)->data;
if (version != AB8500_VERSION_UNDEFINED)
ab8500->version = version;
@@ -1022,7 +1274,7 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
ret = get_register_interruptible(ab8500, AB8500_MISC,
AB8500_IC_NAME_REG, &value);
if (ret < 0)
- return ret;
+ goto out_free_ab8500;
ab8500->version = value;
}
@@ -1030,7 +1282,7 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
ret = get_register_interruptible(ab8500, AB8500_MISC,
AB8500_REV_REG, &value);
if (ret < 0)
- return ret;
+ goto out_free_ab8500;
ab8500->chip_id = value;
@@ -1105,30 +1357,57 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
if (ret)
goto out_freeoldmask;
- ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq,
- IRQF_ONESHOT | IRQF_NO_SUSPEND,
- "ab8500", ab8500);
+ /* Activate this feature only in ab9540 */
+ /* till tests are done on ab8500 1p2 or later*/
+ if (is_ab9540(ab8500))
+ ret = request_threaded_irq(ab8500->irq, NULL,
+ ab8500_hierarchical_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "ab8500", ab8500);
+ else
+ ret = request_threaded_irq(ab8500->irq, NULL,
+ ab8500_irq,
+ IRQF_ONESHOT | IRQF_NO_SUSPEND,
+ "ab8500", ab8500);
if (ret)
goto out_removeirq;
}
- ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
- ARRAY_SIZE(abx500_common_devs), NULL,
- ab8500->irq_base);
+ if (!np) {
+ ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
+ ARRAY_SIZE(abx500_common_devs), NULL,
+ ab8500->irq_base);
- if (ret)
- goto out_freeirq;
+ if (ret)
+ goto out_freeirq;
+
+ if (is_ab9540(ab8500))
+ ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
+ ARRAY_SIZE(ab9540_devs), NULL,
+ ab8500->irq_base);
+ else
+ ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
+ ARRAY_SIZE(ab8500_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ goto out_freeirq;
- if (is_ab9540(ab8500))
- ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
- ARRAY_SIZE(ab9540_devs), NULL,
- ab8500->irq_base);
- else
- ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
- ARRAY_SIZE(ab9540_devs), NULL,
- ab8500->irq_base);
- if (ret)
- goto out_freeirq;
+ if (is_ab9540(ab8500) || is_ab8505(ab8500))
+ ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs,
+ ARRAY_SIZE(ab9540_ab8505_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ goto out_freeirq;
+ }
+
+ if (!no_bm) {
+ /* Add battery management devices */
+ ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs,
+ ARRAY_SIZE(ab8500_bm_devs), NULL,
+ ab8500->irq_base);
+ if (ret)
+ dev_err(ab8500->dev, "error adding bm devices\n");
+ }
if (is_ab9540(ab8500))
ret = sysfs_create_group(&ab8500->dev->kobj,
@@ -1151,12 +1430,16 @@ out_freeoldmask:
kfree(ab8500->oldmask);
out_freemask:
kfree(ab8500->mask);
+out_free_ab8500:
+ kfree(ab8500);
return ret;
}
-int __devexit ab8500_exit(struct ab8500 *ab8500)
+static int __devexit ab8500_remove(struct platform_device *pdev)
{
+ struct ab8500 *ab8500 = platform_get_drvdata(pdev);
+
if (is_ab9540(ab8500))
sysfs_remove_group(&ab8500->dev->kobj, &ab9540_attr_group);
else
@@ -1168,10 +1451,42 @@ int __devexit ab8500_exit(struct ab8500 *ab8500)
}
kfree(ab8500->oldmask);
kfree(ab8500->mask);
+ kfree(ab8500);
return 0;
}
+static const struct platform_device_id ab8500_id[] = {
+ { "ab8500-core", AB8500_VERSION_AB8500 },
+ { "ab8505-i2c", AB8500_VERSION_AB8505 },
+ { "ab9540-i2c", AB8500_VERSION_AB9540 },
+ { "ab8540-i2c", AB8500_VERSION_AB8540 },
+ { }
+};
+
+static struct platform_driver ab8500_core_driver = {
+ .driver = {
+ .name = "ab8500-core",
+ .owner = THIS_MODULE,
+ .of_match_table = ab8500_match,
+ },
+ .probe = ab8500_probe,
+ .remove = __devexit_p(ab8500_remove),
+ .id_table = ab8500_id,
+};
+
+static int __init ab8500_core_init(void)
+{
+ return platform_driver_register(&ab8500_core_driver);
+}
+
+static void __exit ab8500_core_exit(void)
+{
+ platform_driver_unregister(&ab8500_core_driver);
+}
+arch_initcall(ab8500_core_init);
+module_exit(ab8500_core_exit);
+
MODULE_AUTHOR("Mattias Wallin, Srinidhi Kasagar, Rabin Vincent");
MODULE_DESCRIPTION("AB8500 MFD core");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 9a0211aa889..50c4c89ab22 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -608,10 +608,16 @@ static int __devexit ab8500_debug_remove(struct platform_device *plf)
return 0;
}
+static const struct of_device_id ab8500_debug_match[] = {
+ { .compatible = "stericsson,ab8500-debug", },
+ {}
+};
+
static struct platform_driver ab8500_debug_driver = {
.driver = {
.name = "ab8500-debug",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_debug_match,
},
.probe = ab8500_debug_probe,
.remove = __devexit_p(ab8500_debug_remove)
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index c39fc716e1d..b86fd8e1ec3 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -584,7 +584,7 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
gpadc->irq = platform_get_irq_byname(pdev, "SW_CONV_END");
if (gpadc->irq < 0) {
- dev_err(gpadc->dev, "failed to get platform irq-%d\n",
+ dev_err(&pdev->dev, "failed to get platform irq-%d\n",
gpadc->irq);
ret = gpadc->irq;
goto fail;
@@ -648,12 +648,18 @@ static int __devexit ab8500_gpadc_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ab8500_gpadc_match[] = {
+ { .compatible = "stericsson,ab8500-gpadc", },
+ {}
+};
+
static struct platform_driver ab8500_gpadc_driver = {
.probe = ab8500_gpadc_probe,
.remove = __devexit_p(ab8500_gpadc_remove),
.driver = {
.name = "ab8500-gpadc",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_gpadc_match,
},
};
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
deleted file mode 100644
index b83045f102b..00000000000
--- a/drivers/mfd/ab8500-i2c.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson.
- * License Terms: GNU General Public License v2
- * This file was based on drivers/mfd/ab8500-spi.c
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/abx500/ab8500.h>
-#include <linux/mfd/dbx500-prcmu.h>
-
-static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
-{
- int ret;
-
- ret = prcmu_abb_write((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
- if (ret < 0)
- dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
- return ret;
-}
-
-static int ab8500_i2c_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
- u8 data)
-{
- int ret;
-
- ret = prcmu_abb_write_masked((u8)(addr >> 8), (u8)(addr & 0xFF), &data,
- &mask, 1);
- if (ret < 0)
- dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
- return ret;
-}
-
-static int ab8500_i2c_read(struct ab8500 *ab8500, u16 addr)
-{
- int ret;
- u8 data;
-
- ret = prcmu_abb_read((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
- if (ret < 0) {
- dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
- return ret;
- }
- return (int)data;
-}
-
-static int __devinit ab8500_i2c_probe(struct platform_device *plf)
-{
- const struct platform_device_id *platid = platform_get_device_id(plf);
- struct ab8500 *ab8500;
- struct resource *resource;
- int ret;
-
- ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
- if (!ab8500)
- return -ENOMEM;
-
- ab8500->dev = &plf->dev;
-
- resource = platform_get_resource(plf, IORESOURCE_IRQ, 0);
- if (!resource) {
- kfree(ab8500);
- return -ENODEV;
- }
-
- ab8500->irq = resource->start;
-
- ab8500->read = ab8500_i2c_read;
- ab8500->write = ab8500_i2c_write;
- ab8500->write_masked = ab8500_i2c_write_masked;
-
- platform_set_drvdata(plf, ab8500);
-
- ret = ab8500_init(ab8500, platid->driver_data);
- if (ret)
- kfree(ab8500);
-
-
- return ret;
-}
-
-static int __devexit ab8500_i2c_remove(struct platform_device *plf)
-{
- struct ab8500 *ab8500 = platform_get_drvdata(plf);
-
- ab8500_exit(ab8500);
- kfree(ab8500);
-
- return 0;
-}
-
-static const struct platform_device_id ab8500_id[] = {
- { "ab8500-i2c", AB8500_VERSION_AB8500 },
- { "ab8505-i2c", AB8500_VERSION_AB8505 },
- { "ab9540-i2c", AB8500_VERSION_AB9540 },
- { "ab8540-i2c", AB8500_VERSION_AB8540 },
- { }
-};
-
-static struct platform_driver ab8500_i2c_driver = {
- .driver = {
- .name = "ab8500-i2c",
- .owner = THIS_MODULE,
- },
- .probe = ab8500_i2c_probe,
- .remove = __devexit_p(ab8500_i2c_remove),
- .id_table = ab8500_id,
-};
-
-static int __init ab8500_i2c_init(void)
-{
- return platform_driver_register(&ab8500_i2c_driver);
-}
-
-static void __exit ab8500_i2c_exit(void)
-{
- platform_driver_unregister(&ab8500_i2c_driver);
-}
-arch_initcall(ab8500_i2c_init);
-module_exit(ab8500_i2c_exit);
-
-MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com");
-MODULE_DESCRIPTION("AB8500 Core access via PRCMU I2C");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index c28d4eb1eff..5a3e51ccf25 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -61,10 +61,16 @@ static int __devexit ab8500_sysctrl_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ab8500_sysctrl_match[] = {
+ { .compatible = "stericsson,ab8500-sysctrl", },
+ {}
+};
+
static struct platform_driver ab8500_sysctrl_driver = {
.driver = {
.name = "ab8500-sysctrl",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_sysctrl_match,
},
.probe = ab8500_sysctrl_probe,
.remove = __devexit_p(ab8500_sysctrl_remove),
diff --git a/drivers/mfd/anatop-mfd.c b/drivers/mfd/anatop-mfd.c
index 2af42480635..6da06341f6c 100644
--- a/drivers/mfd/anatop-mfd.c
+++ b/drivers/mfd/anatop-mfd.c
@@ -41,39 +41,26 @@
#include <linux/of_address.h>
#include <linux/mfd/anatop.h>
-u32 anatop_get_bits(struct anatop *adata, u32 addr, int bit_shift,
- int bit_width)
+u32 anatop_read_reg(struct anatop *adata, u32 addr)
{
- u32 val, mask;
-
- if (bit_width == 32)
- mask = ~0;
- else
- mask = (1 << bit_width) - 1;
-
- val = readl(adata->ioreg + addr);
- val = (val >> bit_shift) & mask;
-
- return val;
+ return readl(adata->ioreg + addr);
}
-EXPORT_SYMBOL_GPL(anatop_get_bits);
+EXPORT_SYMBOL_GPL(anatop_read_reg);
-void anatop_set_bits(struct anatop *adata, u32 addr, int bit_shift,
- int bit_width, u32 data)
+void anatop_write_reg(struct anatop *adata, u32 addr, u32 data, u32 mask)
{
- u32 val, mask;
+ u32 val;
- if (bit_width == 32)
- mask = ~0;
- else
- mask = (1 << bit_width) - 1;
+ data &= mask;
spin_lock(&adata->reglock);
- val = readl(adata->ioreg + addr) & ~(mask << bit_shift);
- writel((data << bit_shift) | val, adata->ioreg + addr);
+ val = readl(adata->ioreg + addr);
+ val &= ~mask;
+ val |= data;
+ writel(val, adata->ioreg + addr);
spin_unlock(&adata->reglock);
}
-EXPORT_SYMBOL_GPL(anatop_set_bits);
+EXPORT_SYMBOL_GPL(anatop_write_reg);
static const struct of_device_id of_anatop_match[] = {
{ .compatible = "fsl,imx6q-anatop", },
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 1582c3d9525..383421bf576 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -353,12 +353,28 @@ static int asic3_gpio_irq_type(struct irq_data *data, unsigned int type)
return 0;
}
+static int asic3_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct asic3 *asic = irq_data_get_irq_chip_data(data);
+ u32 bank, index;
+ u16 bit;
+
+ bank = asic3_irq_to_bank(asic, data->irq);
+ index = asic3_irq_to_index(asic, data->irq);
+ bit = 1<<index;
+
+ asic3_set_register(asic, bank + ASIC3_GPIO_SLEEP_MASK, bit, !on);
+
+ return 0;
+}
+
static struct irq_chip asic3_gpio_irq_chip = {
.name = "ASIC3-GPIO",
.irq_ack = asic3_mask_gpio_irq,
.irq_mask = asic3_mask_gpio_irq,
.irq_unmask = asic3_unmask_gpio_irq,
.irq_set_type = asic3_gpio_irq_type,
+ .irq_set_wake = asic3_gpio_irq_set_wake,
};
static struct irq_chip asic3_irq_chip = {
@@ -529,7 +545,7 @@ static int asic3_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct asic3 *asic = container_of(chip, struct asic3, gpio);
- return (offset < ASIC3_NUM_GPIOS) ? asic->irq_base + offset : -ENXIO;
+ return asic->irq_base + offset;
}
static __init int asic3_gpio_probe(struct platform_device *pdev,
@@ -894,10 +910,13 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
asic3_mmc_resources[0].start >>= asic->bus_shift;
asic3_mmc_resources[0].end >>= asic->bus_shift;
- ret = mfd_add_devices(&pdev->dev, pdev->id,
+ if (pdata->clock_rate) {
+ ds1wm_pdata.clock_rate = pdata->clock_rate;
+ ret = mfd_add_devices(&pdev->dev, pdev->id,
&asic3_cell_ds1wm, 1, mem, asic->irq_base);
- if (ret < 0)
- goto out;
+ if (ret < 0)
+ goto out;
+ }
if (mem_sdio && (irq >= 0)) {
ret = mfd_add_devices(&pdev->dev, pdev->id,
@@ -1000,6 +1019,9 @@ static int __init asic3_probe(struct platform_device *pdev)
asic3_mfd_probe(pdev, pdata, mem);
+ asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
+ (ASIC3_EXTCF_CF0_BUF_EN|ASIC3_EXTCF_CF0_PWAIT_EN), 1);
+
dev_info(asic->dev, "ASIC3 Core driver\n");
return 0;
@@ -1021,6 +1043,9 @@ static int __devexit asic3_remove(struct platform_device *pdev)
int ret;
struct asic3 *asic = platform_get_drvdata(pdev);
+ asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
+ (ASIC3_EXTCF_CF0_BUF_EN|ASIC3_EXTCF_CF0_PWAIT_EN), 0);
+
asic3_mfd_remove(pdev);
ret = asic3_gpio_remove(pdev);
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index 315fef5d466..3419e726de4 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -186,18 +186,7 @@ static struct pci_driver cs5535_mfd_driver = {
.remove = __devexit_p(cs5535_mfd_remove),
};
-static int __init cs5535_mfd_init(void)
-{
- return pci_register_driver(&cs5535_mfd_driver);
-}
-
-static void __exit cs5535_mfd_exit(void)
-{
- pci_unregister_driver(&cs5535_mfd_driver);
-}
-
-module_init(cs5535_mfd_init);
-module_exit(cs5535_mfd_exit);
+module_pci_driver(cs5535_mfd_driver);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_DESCRIPTION("MFD driver for CS5535/CS5536 southbridge's ISA PCI device");
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 7776aff4626..1f1313c9057 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -318,6 +318,135 @@ static bool da9052_reg_volatile(struct device *dev, unsigned int reg)
}
}
+/*
+ * TBAT look-up table is computed from the R90 reg (8 bit register)
+ * reading as below. The battery temperature is in milliCentigrade
+ * TBAT = (1/(t1+1/298) - 273) * 1000 mC
+ * where t1 = (1/B)* ln(( ADCval * 2.5)/(R25*ITBAT*255))
+ * Default values are R25 = 10e3, B = 3380, ITBAT = 50e-6
+ * Example:
+ * R25=10E3, B=3380, ITBAT=50e-6, ADCVAL=62d calculates
+ * TBAT = 20015 mili degrees Centrigrade
+ *
+*/
+static const int32_t tbat_lookup[255] = {
+ 183258, 144221, 124334, 111336, 101826, 94397, 88343, 83257,
+ 78889, 75071, 71688, 68656, 65914, 63414, 61120, 59001,
+ 570366, 55204, 53490, 51881, 50364, 48931, 47574, 46285,
+ 45059, 43889, 42772, 41703, 40678, 39694, 38748, 37838,
+ 36961, 36115, 35297, 34507, 33743, 33002, 32284, 31588,
+ 30911, 30254, 29615, 28994, 28389, 27799, 27225, 26664,
+ 26117, 25584, 25062, 24553, 24054, 23567, 23091, 22624,
+ 22167, 21719, 21281, 20851, 20429, 20015, 19610, 19211,
+ 18820, 18436, 18058, 17688, 17323, 16965, 16612, 16266,
+ 15925, 15589, 15259, 14933, 14613, 14298, 13987, 13681,
+ 13379, 13082, 12788, 12499, 12214, 11933, 11655, 11382,
+ 11112, 10845, 10582, 10322, 10066, 9812, 9562, 9315,
+ 9071, 8830, 8591, 8356, 8123, 7893, 7665, 7440,
+ 7218, 6998, 6780, 6565, 6352, 6141, 5933, 5726,
+ 5522, 5320, 5120, 4922, 4726, 4532, 4340, 4149,
+ 3961, 3774, 3589, 3406, 3225, 3045, 2867, 2690,
+ 2516, 2342, 2170, 2000, 1831, 1664, 1498, 1334,
+ 1171, 1009, 849, 690, 532, 376, 221, 67,
+ -84, -236, -386, -535, -683, -830, -975, -1119,
+ -1263, -1405, -1546, -1686, -1825, -1964, -2101, -2237,
+ -2372, -2506, -2639, -2771, -2902, -3033, -3162, -3291,
+ -3418, -3545, -3671, -3796, -3920, -4044, -4166, -4288,
+ -4409, -4529, -4649, -4767, -4885, -5002, -5119, -5235,
+ -5349, -5464, -5577, -5690, -5802, -5913, -6024, -6134,
+ -6244, -6352, -6461, -6568, -6675, -6781, -6887, -6992,
+ -7096, -7200, -7303, -7406, -7508, -7609, -7710, -7810,
+ -7910, -8009, -8108, -8206, -8304, -8401, -8497, -8593,
+ -8689, -8784, -8878, -8972, -9066, -9159, -9251, -9343,
+ -9435, -9526, -9617, -9707, -9796, -9886, -9975, -10063,
+ -10151, -10238, -10325, -10412, -10839, -10923, -11007, -11090,
+ -11173, -11256, -11338, -11420, -11501, -11583, -11663, -11744,
+ -11823, -11903, -11982
+};
+
+static const u8 chan_mux[DA9052_ADC_VBBAT + 1] = {
+ [DA9052_ADC_VDDOUT] = DA9052_ADC_MAN_MUXSEL_VDDOUT,
+ [DA9052_ADC_ICH] = DA9052_ADC_MAN_MUXSEL_ICH,
+ [DA9052_ADC_TBAT] = DA9052_ADC_MAN_MUXSEL_TBAT,
+ [DA9052_ADC_VBAT] = DA9052_ADC_MAN_MUXSEL_VBAT,
+ [DA9052_ADC_IN4] = DA9052_ADC_MAN_MUXSEL_AD4,
+ [DA9052_ADC_IN5] = DA9052_ADC_MAN_MUXSEL_AD5,
+ [DA9052_ADC_IN6] = DA9052_ADC_MAN_MUXSEL_AD6,
+ [DA9052_ADC_VBBAT] = DA9052_ADC_MAN_MUXSEL_VBBAT
+};
+
+int da9052_adc_manual_read(struct da9052 *da9052, unsigned char channel)
+{
+ int ret;
+ unsigned short calc_data;
+ unsigned short data;
+ unsigned char mux_sel;
+
+ if (channel > DA9052_ADC_VBBAT)
+ return -EINVAL;
+
+ mutex_lock(&da9052->auxadc_lock);
+
+ /* Channel gets activated on enabling the Conversion bit */
+ mux_sel = chan_mux[channel] | DA9052_ADC_MAN_MAN_CONV;
+
+ ret = da9052_reg_write(da9052, DA9052_ADC_MAN_REG, mux_sel);
+ if (ret < 0)
+ goto err;
+
+ /* Wait for an interrupt */
+ if (!wait_for_completion_timeout(&da9052->done,
+ msecs_to_jiffies(500))) {
+ dev_err(da9052->dev,
+ "timeout waiting for ADC conversion interrupt\n");
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ ret = da9052_reg_read(da9052, DA9052_ADC_RES_H_REG);
+ if (ret < 0)
+ goto err;
+
+ calc_data = (unsigned short)ret;
+ data = calc_data << 2;
+
+ ret = da9052_reg_read(da9052, DA9052_ADC_RES_L_REG);
+ if (ret < 0)
+ goto err;
+
+ calc_data = (unsigned short)(ret & DA9052_ADC_RES_LSB);
+ data |= calc_data;
+
+ ret = data;
+
+err:
+ mutex_unlock(&da9052->auxadc_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(da9052_adc_manual_read);
+
+static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
+{
+ struct da9052 *da9052 = irq_data;
+
+ complete(&da9052->done);
+
+ return IRQ_HANDLED;
+}
+
+int da9052_adc_read_temp(struct da9052 *da9052)
+{
+ int tbat;
+
+ tbat = da9052_reg_read(da9052, DA9052_TBAT_RES_REG);
+ if (tbat <= 0)
+ return tbat;
+
+ /* ARRAY_SIZE check is not needed since TBAT is a 8-bit register */
+ return tbat_lookup[tbat - 1];
+}
+EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
+
static struct resource da9052_rtc_resource = {
.name = "ALM",
.start = DA9052_IRQ_ALARM,
@@ -646,6 +775,9 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
struct irq_desc *desc;
int ret;
+ mutex_init(&da9052->auxadc_lock);
+ init_completion(&da9052->done);
+
if (pdata && pdata->init != NULL)
pdata->init(da9052);
@@ -665,6 +797,12 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
da9052->irq_base = regmap_irq_chip_get_base(da9052->irq_data);
+ ret = request_threaded_irq(DA9052_IRQ_ADC_EOM, NULL, da9052_auxadc_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "adc irq", da9052);
+ if (ret != 0)
+ dev_err(da9052->dev, "DA9052 ADC IRQ failed ret=%d\n", ret);
+
ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
ARRAY_SIZE(da9052_subdev_info), NULL, 0);
if (ret)
@@ -673,6 +811,7 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
return 0;
err:
+ free_irq(DA9052_IRQ_ADC_EOM, da9052);
mfd_remove_devices(da9052->dev);
regmap_err:
return ret;
@@ -680,6 +819,7 @@ regmap_err:
void da9052_device_exit(struct da9052 *da9052)
{
+ free_irq(DA9052_IRQ_ADC_EOM, da9052);
regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
mfd_remove_devices(da9052->dev);
}
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 36b88e39549..82c9d645028 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -22,6 +22,11 @@
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
+#ifdef CONFIG_OF
+#include <linux/of.h>
+#include <linux/of_device.h>
+#endif
+
static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
{
int reg_val, ret;
@@ -41,13 +46,31 @@ static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
return 0;
}
+static struct i2c_device_id da9052_i2c_id[] = {
+ {"da9052", DA9052},
+ {"da9053-aa", DA9053_AA},
+ {"da9053-ba", DA9053_BA},
+ {"da9053-bb", DA9053_BB},
+ {}
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id dialog_dt_ids[] = {
+ { .compatible = "dlg,da9052", .data = &da9052_i2c_id[0] },
+ { .compatible = "dlg,da9053-aa", .data = &da9052_i2c_id[1] },
+ { .compatible = "dlg,da9053-ab", .data = &da9052_i2c_id[2] },
+ { .compatible = "dlg,da9053-bb", .data = &da9052_i2c_id[3] },
+ { /* sentinel */ }
+};
+#endif
+
static int __devinit da9052_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct da9052 *da9052;
int ret;
- da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+ da9052 = devm_kzalloc(&client->dev, sizeof(struct da9052), GFP_KERNEL);
if (!da9052)
return -ENOMEM;
@@ -55,8 +78,7 @@ static int __devinit da9052_i2c_probe(struct i2c_client *client,
I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_info(&client->dev, "Error in %s:i2c_check_functionality\n",
__func__);
- ret = -ENODEV;
- goto err;
+ return -ENODEV;
}
da9052->dev = &client->dev;
@@ -64,29 +86,39 @@ static int __devinit da9052_i2c_probe(struct i2c_client *client,
i2c_set_clientdata(client, da9052);
- da9052->regmap = regmap_init_i2c(client, &da9052_regmap_config);
+ da9052->regmap = devm_regmap_init_i2c(client, &da9052_regmap_config);
if (IS_ERR(da9052->regmap)) {
ret = PTR_ERR(da9052->regmap);
dev_err(&client->dev, "Failed to allocate register map: %d\n",
ret);
- goto err;
+ return ret;
}
ret = da9052_i2c_enable_multiwrite(da9052);
if (ret < 0)
- goto err_regmap;
+ return ret;
+
+#ifdef CONFIG_OF
+ if (!id) {
+ struct device_node *np = client->dev.of_node;
+ const struct of_device_id *deviceid;
+
+ deviceid = of_match_node(dialog_dt_ids, np);
+ id = (const struct i2c_device_id *)deviceid->data;
+ }
+#endif
+
+ if (!id) {
+ ret = -ENODEV;
+ dev_err(&client->dev, "id is null.\n");
+ return ret;
+ }
ret = da9052_device_init(da9052, id->driver_data);
if (ret != 0)
- goto err_regmap;
+ return ret;
return 0;
-
-err_regmap:
- regmap_exit(da9052->regmap);
-err:
- kfree(da9052);
- return ret;
}
static int __devexit da9052_i2c_remove(struct i2c_client *client)
@@ -94,20 +126,9 @@ static int __devexit da9052_i2c_remove(struct i2c_client *client)
struct da9052 *da9052 = i2c_get_clientdata(client);
da9052_device_exit(da9052);
- regmap_exit(da9052->regmap);
- kfree(da9052);
-
return 0;
}
-static struct i2c_device_id da9052_i2c_id[] = {
- {"da9052", DA9052},
- {"da9053-aa", DA9053_AA},
- {"da9053-ba", DA9053_BA},
- {"da9053-bb", DA9053_BB},
- {}
-};
-
static struct i2c_driver da9052_i2c_driver = {
.probe = da9052_i2c_probe,
.remove = __devexit_p(da9052_i2c_remove),
@@ -115,6 +136,9 @@ static struct i2c_driver da9052_i2c_driver = {
.driver = {
.name = "da9052",
.owner = THIS_MODULE,
+#ifdef CONFIG_OF
+ .of_match_table = dialog_dt_ids,
+#endif
},
};
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
index 6faf149e8d9..dbeadc5a643 100644
--- a/drivers/mfd/da9052-spi.c
+++ b/drivers/mfd/da9052-spi.c
@@ -25,8 +25,9 @@ static int __devinit da9052_spi_probe(struct spi_device *spi)
{
int ret;
const struct spi_device_id *id = spi_get_device_id(spi);
- struct da9052 *da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+ struct da9052 *da9052;
+ da9052 = devm_kzalloc(&spi->dev, sizeof(struct da9052), GFP_KERNEL);
if (!da9052)
return -ENOMEM;
@@ -42,25 +43,19 @@ static int __devinit da9052_spi_probe(struct spi_device *spi)
da9052_regmap_config.read_flag_mask = 1;
da9052_regmap_config.write_flag_mask = 0;
- da9052->regmap = regmap_init_spi(spi, &da9052_regmap_config);
+ da9052->regmap = devm_regmap_init_spi(spi, &da9052_regmap_config);
if (IS_ERR(da9052->regmap)) {
ret = PTR_ERR(da9052->regmap);
dev_err(&spi->dev, "Failed to allocate register map: %d\n",
ret);
- goto err;
+ return ret;
}
ret = da9052_device_init(da9052, id->driver_data);
if (ret != 0)
- goto err_regmap;
+ return ret;
return 0;
-
-err_regmap:
- regmap_exit(da9052->regmap);
-err:
- kfree(da9052);
- return ret;
}
static int __devexit da9052_spi_remove(struct spi_device *spi)
@@ -68,9 +63,6 @@ static int __devexit da9052_spi_remove(struct spi_device *spi)
struct da9052 *da9052 = dev_get_drvdata(&spi->dev);
da9052_device_exit(da9052);
- regmap_exit(da9052->regmap);
- kfree(da9052);
-
return 0;
}
@@ -88,7 +80,6 @@ static struct spi_driver da9052_spi_driver = {
.id_table = da9052_spi_id,
.driver = {
.name = "da9052",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
};
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 5be32489714..671c8bc14bb 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2720,6 +2720,7 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = {
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
+ REGULATOR_SUPPLY("v-i2c", "nmk-i2c.4"),
/* "v-mmc" changed to "vcore" in the mainline kernel */
REGULATOR_SUPPLY("vcore", "sdi0"),
REGULATOR_SUPPLY("vcore", "sdi1"),
@@ -2958,9 +2959,10 @@ static struct mfd_cell db8500_prcmu_devs[] = {
* prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
*
*/
-static int __init db8500_prcmu_probe(struct platform_device *pdev)
+static int __devinit db8500_prcmu_probe(struct platform_device *pdev)
{
- int err = 0;
+ struct device_node *np = pdev->dev.of_node;
+ int irq = 0, err = 0;
if (ux500_is_svp())
return -ENODEV;
@@ -2970,8 +2972,14 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev)
/* Clean up the mailbox interrupts after pre-kernel code. */
writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
- err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler,
- prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
+ if (np)
+ irq = platform_get_irq(pdev, 0);
+
+ if (!np || irq <= 0)
+ irq = IRQ_DB8500_PRCMU1;
+
+ err = request_threaded_irq(irq, prcmu_irq_handler,
+ prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
if (err < 0) {
pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
err = -EBUSY;
@@ -2981,14 +2989,16 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev)
if (cpu_is_u8500v20_or_later())
prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
- err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
- ARRAY_SIZE(db8500_prcmu_devs), NULL,
- 0);
+ if (!np) {
+ err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
+ ARRAY_SIZE(db8500_prcmu_devs), NULL, 0);
+ if (err) {
+ pr_err("prcmu: Failed to add subdevices\n");
+ return err;
+ }
+ }
- if (err)
- pr_err("prcmu: Failed to add subdevices\n");
- else
- pr_info("DB8500 PRCMU initialized\n");
+ pr_info("DB8500 PRCMU initialized\n");
no_irq_return:
return err;
@@ -2999,11 +3009,12 @@ static struct platform_driver db8500_prcmu_driver = {
.name = "db8500-prcmu",
.owner = THIS_MODULE,
},
+ .probe = db8500_prcmu_probe,
};
static int __init db8500_prcmu_init(void)
{
- return platform_driver_probe(&db8500_prcmu_driver, db8500_prcmu_probe);
+ return platform_driver_register(&db8500_prcmu_driver);
}
arch_initcall(db8500_prcmu_init);
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index b76657eb0c5..59df5584cb5 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -406,7 +406,7 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
return -ENXIO;
}
- msic = kzalloc(sizeof(*msic), GFP_KERNEL);
+ msic = devm_kzalloc(&pdev->dev, sizeof(*msic), GFP_KERNEL);
if (!msic)
return -ENOMEM;
@@ -421,21 +421,13 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get SRAM iomem resource\n");
- ret = -ENODEV;
- goto fail_free_msic;
+ return -ENODEV;
}
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (!res) {
- ret = -EBUSY;
- goto fail_free_msic;
- }
-
- msic->irq_base = ioremap_nocache(res->start, resource_size(res));
+ msic->irq_base = devm_request_and_ioremap(&pdev->dev, res);
if (!msic->irq_base) {
dev_err(&pdev->dev, "failed to map SRAM memory\n");
- ret = -ENOMEM;
- goto fail_release_region;
+ return -ENOMEM;
}
platform_set_drvdata(pdev, msic);
@@ -443,7 +435,7 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
ret = intel_msic_init_devices(msic);
if (ret) {
dev_err(&pdev->dev, "failed to initialize MSIC devices\n");
- goto fail_unmap_mem;
+ return ret;
}
dev_info(&pdev->dev, "Intel MSIC version %c%d (vendor %#x)\n",
@@ -451,27 +443,14 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
msic->vendor);
return 0;
-
-fail_unmap_mem:
- iounmap(msic->irq_base);
-fail_release_region:
- release_mem_region(res->start, resource_size(res));
-fail_free_msic:
- kfree(msic);
-
- return ret;
}
static int __devexit intel_msic_remove(struct platform_device *pdev)
{
struct intel_msic *msic = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
intel_msic_remove_devices(msic);
platform_set_drvdata(pdev, NULL);
- iounmap(msic->irq_base);
- release_mem_region(res->start, resource_size(res));
- kfree(msic);
return 0;
}
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index a9223ed1b7c..2ea99989551 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -283,23 +283,8 @@ static struct pci_driver cmodio_pci_driver = {
.remove = __devexit_p(cmodio_pci_remove),
};
-/*
- * Module Init / Exit
- */
-
-static int __init cmodio_init(void)
-{
- return pci_register_driver(&cmodio_pci_driver);
-}
-
-static void __exit cmodio_exit(void)
-{
- pci_unregister_driver(&cmodio_pci_driver);
-}
+module_pci_driver(cmodio_pci_driver);
MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
MODULE_DESCRIPTION("Janz CMOD-IO PCI MODULbus Carrier Board Driver");
MODULE_LICENSE("GPL");
-
-module_init(cmodio_init);
-module_exit(cmodio_exit);
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
new file mode 100644
index 00000000000..0b2879b87fd
--- /dev/null
+++ b/drivers/mfd/lm3533-core.c
@@ -0,0 +1,667 @@
+/*
+ * lm3533-core.c -- LM3533 Core
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/regmap.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_BOOST_OVP_MASK 0x06
+#define LM3533_BOOST_OVP_SHIFT 1
+
+#define LM3533_BOOST_FREQ_MASK 0x01
+#define LM3533_BOOST_FREQ_SHIFT 0
+
+#define LM3533_BL_ID_MASK 1
+#define LM3533_LED_ID_MASK 3
+#define LM3533_BL_ID_MAX 1
+#define LM3533_LED_ID_MAX 3
+
+#define LM3533_HVLED_ID_MAX 2
+#define LM3533_LVLED_ID_MAX 5
+
+#define LM3533_REG_OUTPUT_CONF1 0x10
+#define LM3533_REG_OUTPUT_CONF2 0x11
+#define LM3533_REG_BOOST_PWM 0x2c
+
+#define LM3533_REG_MAX 0xb2
+
+
+static struct mfd_cell lm3533_als_devs[] = {
+ {
+ .name = "lm3533-als",
+ .id = -1,
+ },
+};
+
+static struct mfd_cell lm3533_bl_devs[] = {
+ {
+ .name = "lm3533-backlight",
+ .id = 0,
+ },
+ {
+ .name = "lm3533-backlight",
+ .id = 1,
+ },
+};
+
+static struct mfd_cell lm3533_led_devs[] = {
+ {
+ .name = "lm3533-leds",
+ .id = 0,
+ },
+ {
+ .name = "lm3533-leds",
+ .id = 1,
+ },
+ {
+ .name = "lm3533-leds",
+ .id = 2,
+ },
+ {
+ .name = "lm3533-leds",
+ .id = 3,
+ },
+};
+
+int lm3533_read(struct lm3533 *lm3533, u8 reg, u8 *val)
+{
+ int tmp;
+ int ret;
+
+ ret = regmap_read(lm3533->regmap, reg, &tmp);
+ if (ret < 0) {
+ dev_err(lm3533->dev, "failed to read register %02x: %d\n",
+ reg, ret);
+ return ret;
+ }
+
+ *val = tmp;
+
+ dev_dbg(lm3533->dev, "read [%02x]: %02x\n", reg, *val);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_read);
+
+int lm3533_write(struct lm3533 *lm3533, u8 reg, u8 val)
+{
+ int ret;
+
+ dev_dbg(lm3533->dev, "write [%02x]: %02x\n", reg, val);
+
+ ret = regmap_write(lm3533->regmap, reg, val);
+ if (ret < 0) {
+ dev_err(lm3533->dev, "failed to write register %02x: %d\n",
+ reg, ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_write);
+
+int lm3533_update(struct lm3533 *lm3533, u8 reg, u8 val, u8 mask)
+{
+ int ret;
+
+ dev_dbg(lm3533->dev, "update [%02x]: %02x/%02x\n", reg, val, mask);
+
+ ret = regmap_update_bits(lm3533->regmap, reg, mask, val);
+ if (ret < 0) {
+ dev_err(lm3533->dev, "failed to update register %02x: %d\n",
+ reg, ret);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_update);
+
+static int lm3533_set_boost_freq(struct lm3533 *lm3533,
+ enum lm3533_boost_freq freq)
+{
+ int ret;
+
+ ret = lm3533_update(lm3533, LM3533_REG_BOOST_PWM,
+ freq << LM3533_BOOST_FREQ_SHIFT,
+ LM3533_BOOST_FREQ_MASK);
+ if (ret)
+ dev_err(lm3533->dev, "failed to set boost frequency\n");
+
+ return ret;
+}
+
+
+static int lm3533_set_boost_ovp(struct lm3533 *lm3533,
+ enum lm3533_boost_ovp ovp)
+{
+ int ret;
+
+ ret = lm3533_update(lm3533, LM3533_REG_BOOST_PWM,
+ ovp << LM3533_BOOST_OVP_SHIFT,
+ LM3533_BOOST_OVP_MASK);
+ if (ret)
+ dev_err(lm3533->dev, "failed to set boost ovp\n");
+
+ return ret;
+}
+
+/*
+ * HVLED output config -- output hvled controlled by backlight bl
+ */
+static int lm3533_set_hvled_config(struct lm3533 *lm3533, u8 hvled, u8 bl)
+{
+ u8 val;
+ u8 mask;
+ int shift;
+ int ret;
+
+ if (hvled == 0 || hvled > LM3533_HVLED_ID_MAX)
+ return -EINVAL;
+
+ if (bl > LM3533_BL_ID_MAX)
+ return -EINVAL;
+
+ shift = hvled - 1;
+ mask = LM3533_BL_ID_MASK << shift;
+ val = bl << shift;
+
+ ret = lm3533_update(lm3533, LM3533_REG_OUTPUT_CONF1, val, mask);
+ if (ret)
+ dev_err(lm3533->dev, "failed to set hvled config\n");
+
+ return ret;
+}
+
+/*
+ * LVLED output config -- output lvled controlled by LED led
+ */
+static int lm3533_set_lvled_config(struct lm3533 *lm3533, u8 lvled, u8 led)
+{
+ u8 reg;
+ u8 val;
+ u8 mask;
+ int shift;
+ int ret;
+
+ if (lvled == 0 || lvled > LM3533_LVLED_ID_MAX)
+ return -EINVAL;
+
+ if (led > LM3533_LED_ID_MAX)
+ return -EINVAL;
+
+ if (lvled < 4) {
+ reg = LM3533_REG_OUTPUT_CONF1;
+ shift = 2 * lvled;
+ } else {
+ reg = LM3533_REG_OUTPUT_CONF2;
+ shift = 2 * (lvled - 4);
+ }
+
+ mask = LM3533_LED_ID_MASK << shift;
+ val = led << shift;
+
+ ret = lm3533_update(lm3533, reg, val, mask);
+ if (ret)
+ dev_err(lm3533->dev, "failed to set lvled config\n");
+
+ return ret;
+}
+
+static void lm3533_enable(struct lm3533 *lm3533)
+{
+ if (gpio_is_valid(lm3533->gpio_hwen))
+ gpio_set_value(lm3533->gpio_hwen, 1);
+}
+
+static void lm3533_disable(struct lm3533 *lm3533)
+{
+ if (gpio_is_valid(lm3533->gpio_hwen))
+ gpio_set_value(lm3533->gpio_hwen, 0);
+}
+
+enum lm3533_attribute_type {
+ LM3533_ATTR_TYPE_BACKLIGHT,
+ LM3533_ATTR_TYPE_LED,
+};
+
+struct lm3533_device_attribute {
+ struct device_attribute dev_attr;
+ enum lm3533_attribute_type type;
+ union {
+ struct {
+ u8 id;
+ } output;
+ } u;
+};
+
+#define to_lm3533_dev_attr(_attr) \
+ container_of(_attr, struct lm3533_device_attribute, dev_attr)
+
+static ssize_t show_output(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533 *lm3533 = dev_get_drvdata(dev);
+ struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(attr);
+ int id = lattr->u.output.id;
+ u8 reg;
+ u8 val;
+ u8 mask;
+ int shift;
+ int ret;
+
+ if (lattr->type == LM3533_ATTR_TYPE_BACKLIGHT) {
+ reg = LM3533_REG_OUTPUT_CONF1;
+ shift = id - 1;
+ mask = LM3533_BL_ID_MASK << shift;
+ } else {
+ if (id < 4) {
+ reg = LM3533_REG_OUTPUT_CONF1;
+ shift = 2 * id;
+ } else {
+ reg = LM3533_REG_OUTPUT_CONF2;
+ shift = 2 * (id - 4);
+ }
+ mask = LM3533_LED_ID_MASK << shift;
+ }
+
+ ret = lm3533_read(lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ val = (val & mask) >> shift;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_output(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533 *lm3533 = dev_get_drvdata(dev);
+ struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(attr);
+ int id = lattr->u.output.id;
+ u8 val;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val))
+ return -EINVAL;
+
+ if (lattr->type == LM3533_ATTR_TYPE_BACKLIGHT)
+ ret = lm3533_set_hvled_config(lm3533, id, val);
+ else
+ ret = lm3533_set_lvled_config(lm3533, id, val);
+
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+#define LM3533_OUTPUT_ATTR(_name, _mode, _show, _store, _type, _id) \
+ struct lm3533_device_attribute lm3533_dev_attr_##_name = \
+ { .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .type = _type, \
+ .u.output = { .id = _id }, }
+
+#define LM3533_OUTPUT_ATTR_RW(_name, _type, _id) \
+ LM3533_OUTPUT_ATTR(output_##_name, S_IRUGO | S_IWUSR, \
+ show_output, store_output, _type, _id)
+
+#define LM3533_OUTPUT_HVLED_ATTR_RW(_nr) \
+ LM3533_OUTPUT_ATTR_RW(hvled##_nr, LM3533_ATTR_TYPE_BACKLIGHT, _nr)
+#define LM3533_OUTPUT_LVLED_ATTR_RW(_nr) \
+ LM3533_OUTPUT_ATTR_RW(lvled##_nr, LM3533_ATTR_TYPE_LED, _nr)
+/*
+ * Output config:
+ *
+ * output_hvled<nr> 0-1
+ * output_lvled<nr> 0-3
+ */
+static LM3533_OUTPUT_HVLED_ATTR_RW(1);
+static LM3533_OUTPUT_HVLED_ATTR_RW(2);
+static LM3533_OUTPUT_LVLED_ATTR_RW(1);
+static LM3533_OUTPUT_LVLED_ATTR_RW(2);
+static LM3533_OUTPUT_LVLED_ATTR_RW(3);
+static LM3533_OUTPUT_LVLED_ATTR_RW(4);
+static LM3533_OUTPUT_LVLED_ATTR_RW(5);
+
+static struct attribute *lm3533_attributes[] = {
+ &lm3533_dev_attr_output_hvled1.dev_attr.attr,
+ &lm3533_dev_attr_output_hvled2.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled1.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled2.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled3.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled4.dev_attr.attr,
+ &lm3533_dev_attr_output_lvled5.dev_attr.attr,
+ NULL,
+};
+
+#define to_dev_attr(_attr) \
+ container_of(_attr, struct device_attribute, attr)
+
+static umode_t lm3533_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct lm3533 *lm3533 = dev_get_drvdata(dev);
+ struct device_attribute *dattr = to_dev_attr(attr);
+ struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(dattr);
+ enum lm3533_attribute_type type = lattr->type;
+ umode_t mode = attr->mode;
+
+ if (!lm3533->have_backlights && type == LM3533_ATTR_TYPE_BACKLIGHT)
+ mode = 0;
+ else if (!lm3533->have_leds && type == LM3533_ATTR_TYPE_LED)
+ mode = 0;
+
+ return mode;
+};
+
+static struct attribute_group lm3533_attribute_group = {
+ .is_visible = lm3533_attr_is_visible,
+ .attrs = lm3533_attributes
+};
+
+static int __devinit lm3533_device_als_init(struct lm3533 *lm3533)
+{
+ struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ int ret;
+
+ if (!pdata->als)
+ return 0;
+
+ lm3533_als_devs[0].platform_data = pdata->als;
+ lm3533_als_devs[0].pdata_size = sizeof(*pdata->als);
+
+ ret = mfd_add_devices(lm3533->dev, 0, lm3533_als_devs, 1, NULL, 0);
+ if (ret) {
+ dev_err(lm3533->dev, "failed to add ALS device\n");
+ return ret;
+ }
+
+ lm3533->have_als = 1;
+
+ return 0;
+}
+
+static int __devinit lm3533_device_bl_init(struct lm3533 *lm3533)
+{
+ struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ int i;
+ int ret;
+
+ if (!pdata->backlights || pdata->num_backlights == 0)
+ return 0;
+
+ if (pdata->num_backlights > ARRAY_SIZE(lm3533_bl_devs))
+ pdata->num_backlights = ARRAY_SIZE(lm3533_bl_devs);
+
+ for (i = 0; i < pdata->num_backlights; ++i) {
+ lm3533_bl_devs[i].platform_data = &pdata->backlights[i];
+ lm3533_bl_devs[i].pdata_size = sizeof(pdata->backlights[i]);
+ }
+
+ ret = mfd_add_devices(lm3533->dev, 0, lm3533_bl_devs,
+ pdata->num_backlights, NULL, 0);
+ if (ret) {
+ dev_err(lm3533->dev, "failed to add backlight devices\n");
+ return ret;
+ }
+
+ lm3533->have_backlights = 1;
+
+ return 0;
+}
+
+static int __devinit lm3533_device_led_init(struct lm3533 *lm3533)
+{
+ struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ int i;
+ int ret;
+
+ if (!pdata->leds || pdata->num_leds == 0)
+ return 0;
+
+ if (pdata->num_leds > ARRAY_SIZE(lm3533_led_devs))
+ pdata->num_leds = ARRAY_SIZE(lm3533_led_devs);
+
+ for (i = 0; i < pdata->num_leds; ++i) {
+ lm3533_led_devs[i].platform_data = &pdata->leds[i];
+ lm3533_led_devs[i].pdata_size = sizeof(pdata->leds[i]);
+ }
+
+ ret = mfd_add_devices(lm3533->dev, 0, lm3533_led_devs,
+ pdata->num_leds, NULL, 0);
+ if (ret) {
+ dev_err(lm3533->dev, "failed to add LED devices\n");
+ return ret;
+ }
+
+ lm3533->have_leds = 1;
+
+ return 0;
+}
+
+static int __devinit lm3533_device_setup(struct lm3533 *lm3533,
+ struct lm3533_platform_data *pdata)
+{
+ int ret;
+
+ ret = lm3533_set_boost_freq(lm3533, pdata->boost_freq);
+ if (ret)
+ return ret;
+
+ ret = lm3533_set_boost_ovp(lm3533, pdata->boost_ovp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __devinit lm3533_device_init(struct lm3533 *lm3533)
+{
+ struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
+ int ret;
+
+ dev_dbg(lm3533->dev, "%s\n", __func__);
+
+ if (!pdata) {
+ dev_err(lm3533->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ lm3533->gpio_hwen = pdata->gpio_hwen;
+
+ dev_set_drvdata(lm3533->dev, lm3533);
+
+ if (gpio_is_valid(lm3533->gpio_hwen)) {
+ ret = gpio_request_one(lm3533->gpio_hwen, GPIOF_OUT_INIT_LOW,
+ "lm3533-hwen");
+ if (ret < 0) {
+ dev_err(lm3533->dev,
+ "failed to request HWEN GPIO %d\n",
+ lm3533->gpio_hwen);
+ return ret;
+ }
+ }
+
+ lm3533_enable(lm3533);
+
+ ret = lm3533_device_setup(lm3533, pdata);
+ if (ret)
+ goto err_disable;
+
+ lm3533_device_als_init(lm3533);
+ lm3533_device_bl_init(lm3533);
+ lm3533_device_led_init(lm3533);
+
+ ret = sysfs_create_group(&lm3533->dev->kobj, &lm3533_attribute_group);
+ if (ret < 0) {
+ dev_err(lm3533->dev, "failed to create sysfs attributes\n");
+ goto err_unregister;
+ }
+
+ return 0;
+
+err_unregister:
+ mfd_remove_devices(lm3533->dev);
+err_disable:
+ lm3533_disable(lm3533);
+ if (gpio_is_valid(lm3533->gpio_hwen))
+ gpio_free(lm3533->gpio_hwen);
+
+ return ret;
+}
+
+static void __devexit lm3533_device_exit(struct lm3533 *lm3533)
+{
+ dev_dbg(lm3533->dev, "%s\n", __func__);
+
+ sysfs_remove_group(&lm3533->dev->kobj, &lm3533_attribute_group);
+
+ mfd_remove_devices(lm3533->dev);
+ lm3533_disable(lm3533);
+ if (gpio_is_valid(lm3533->gpio_hwen))
+ gpio_free(lm3533->gpio_hwen);
+}
+
+static bool lm3533_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x10 ... 0x2c:
+ case 0x30 ... 0x38:
+ case 0x40 ... 0x45:
+ case 0x50 ... 0x57:
+ case 0x60 ... 0x6e:
+ case 0x70 ... 0x75:
+ case 0x80 ... 0x85:
+ case 0x90 ... 0x95:
+ case 0xa0 ... 0xa5:
+ case 0xb0 ... 0xb2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool lm3533_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x34 ... 0x36: /* zone */
+ case 0x37 ... 0x38: /* adc */
+ case 0xb0 ... 0xb1: /* fault */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool lm3533_precious_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x34: /* zone */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = LM3533_REG_MAX,
+ .readable_reg = lm3533_readable_register,
+ .volatile_reg = lm3533_volatile_register,
+ .precious_reg = lm3533_precious_register,
+};
+
+static int __devinit lm3533_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct lm3533 *lm3533;
+ int ret;
+
+ dev_dbg(&i2c->dev, "%s\n", __func__);
+
+ lm3533 = devm_kzalloc(&i2c->dev, sizeof(*lm3533), GFP_KERNEL);
+ if (!lm3533)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, lm3533);
+
+ lm3533->regmap = devm_regmap_init_i2c(i2c, &regmap_config);
+ if (IS_ERR(lm3533->regmap))
+ return PTR_ERR(lm3533->regmap);
+
+ lm3533->dev = &i2c->dev;
+ lm3533->irq = i2c->irq;
+
+ ret = lm3533_device_init(lm3533);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __devexit lm3533_i2c_remove(struct i2c_client *i2c)
+{
+ struct lm3533 *lm3533 = i2c_get_clientdata(i2c);
+
+ dev_dbg(&i2c->dev, "%s\n", __func__);
+
+ lm3533_device_exit(lm3533);
+
+ return 0;
+}
+
+static const struct i2c_device_id lm3533_i2c_ids[] = {
+ { "lm3533", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, lm3533_i2c_ids);
+
+static struct i2c_driver lm3533_i2c_driver = {
+ .driver = {
+ .name = "lm3533",
+ .owner = THIS_MODULE,
+ },
+ .id_table = lm3533_i2c_ids,
+ .probe = lm3533_i2c_probe,
+ .remove = __devexit_p(lm3533_i2c_remove),
+};
+
+static int __init lm3533_i2c_init(void)
+{
+ return i2c_add_driver(&lm3533_i2c_driver);
+}
+subsys_initcall(lm3533_i2c_init);
+
+static void __exit lm3533_i2c_exit(void)
+{
+ i2c_del_driver(&lm3533_i2c_driver);
+}
+module_exit(lm3533_i2c_exit);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lm3533-ctrlbank.c b/drivers/mfd/lm3533-ctrlbank.c
new file mode 100644
index 00000000000..a4cb7a5220a
--- /dev/null
+++ b/drivers/mfd/lm3533-ctrlbank.c
@@ -0,0 +1,148 @@
+/*
+ * lm3533-ctrlbank.c -- LM3533 Generic Control Bank interface
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_MAX_CURRENT_MIN 5000
+#define LM3533_MAX_CURRENT_MAX 29800
+#define LM3533_MAX_CURRENT_STEP 800
+
+#define LM3533_BRIGHTNESS_MAX 255
+#define LM3533_PWM_MAX 0x3f
+
+#define LM3533_REG_PWM_BASE 0x14
+#define LM3533_REG_MAX_CURRENT_BASE 0x1f
+#define LM3533_REG_CTRLBANK_ENABLE 0x27
+#define LM3533_REG_BRIGHTNESS_BASE 0x40
+
+
+static inline u8 lm3533_ctrlbank_get_reg(struct lm3533_ctrlbank *cb, u8 base)
+{
+ return base + cb->id;
+}
+
+int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb)
+{
+ u8 mask;
+ int ret;
+
+ dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id);
+
+ mask = 1 << cb->id;
+ ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE,
+ mask, mask);
+ if (ret)
+ dev_err(cb->dev, "failed to enable ctrlbank %d\n", cb->id);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_enable);
+
+int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb)
+{
+ u8 mask;
+ int ret;
+
+ dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id);
+
+ mask = 1 << cb->id;
+ ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE, 0, mask);
+ if (ret)
+ dev_err(cb->dev, "failed to disable ctrlbank %d\n", cb->id);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_disable);
+
+/*
+ * Full-scale current.
+ *
+ * imax 5000 - 29800 uA (800 uA step)
+ */
+int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb, u16 imax)
+{
+ u8 reg;
+ u8 val;
+ int ret;
+
+ if (imax < LM3533_MAX_CURRENT_MIN || imax > LM3533_MAX_CURRENT_MAX)
+ return -EINVAL;
+
+ val = (imax - LM3533_MAX_CURRENT_MIN) / LM3533_MAX_CURRENT_STEP;
+
+ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_MAX_CURRENT_BASE);
+ ret = lm3533_write(cb->lm3533, reg, val);
+ if (ret)
+ dev_err(cb->dev, "failed to set max current\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_max_current);
+
+#define lm3533_ctrlbank_set(_name, _NAME) \
+int lm3533_ctrlbank_set_##_name(struct lm3533_ctrlbank *cb, u8 val) \
+{ \
+ u8 reg; \
+ int ret; \
+ \
+ if (val > LM3533_##_NAME##_MAX) \
+ return -EINVAL; \
+ \
+ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE); \
+ ret = lm3533_write(cb->lm3533, reg, val); \
+ if (ret) \
+ dev_err(cb->dev, "failed to set " #_name "\n"); \
+ \
+ return ret; \
+} \
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_##_name);
+
+#define lm3533_ctrlbank_get(_name, _NAME) \
+int lm3533_ctrlbank_get_##_name(struct lm3533_ctrlbank *cb, u8 *val) \
+{ \
+ u8 reg; \
+ int ret; \
+ \
+ reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE); \
+ ret = lm3533_read(cb->lm3533, reg, val); \
+ if (ret) \
+ dev_err(cb->dev, "failed to get " #_name "\n"); \
+ \
+ return ret; \
+} \
+EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_##_name);
+
+lm3533_ctrlbank_set(brightness, BRIGHTNESS);
+lm3533_ctrlbank_get(brightness, BRIGHTNESS);
+
+/*
+ * PWM-input control mask:
+ *
+ * bit 5 - PWM-input enabled in Zone 4
+ * bit 4 - PWM-input enabled in Zone 3
+ * bit 3 - PWM-input enabled in Zone 2
+ * bit 2 - PWM-input enabled in Zone 1
+ * bit 1 - PWM-input enabled in Zone 0
+ * bit 0 - PWM-input enabled
+ */
+lm3533_ctrlbank_set(pwm, PWM);
+lm3533_ctrlbank_get(pwm, PWM);
+
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Control Bank interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
new file mode 100644
index 00000000000..027cc8f8613
--- /dev/null
+++ b/drivers/mfd/lpc_ich.c
@@ -0,0 +1,888 @@
+/*
+ * lpc_ich.c - LPC interface for Intel ICH
+ *
+ * LPC bridge function of the Intel ICH contains many other
+ * functional units, such as Interrupt controllers, Timers,
+ * Power Management, System Management, GPIO, RTC, and LPC
+ * Configuration Registers.
+ *
+ * This driver is derived from lpc_sch.
+
+ * Copyright (c) 2011 Extreme Engineering Solution, Inc.
+ * Author: Aaron Sierra <asierra@xes-inc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * This driver supports the following I/O Controller hubs:
+ * (See the intel documentation on http://developer.intel.com.)
+ * document number 290655-003, 290677-014: 82801AA (ICH), 82801AB (ICHO)
+ * document number 290687-002, 298242-027: 82801BA (ICH2)
+ * document number 290733-003, 290739-013: 82801CA (ICH3-S)
+ * document number 290716-001, 290718-007: 82801CAM (ICH3-M)
+ * document number 290744-001, 290745-025: 82801DB (ICH4)
+ * document number 252337-001, 252663-008: 82801DBM (ICH4-M)
+ * document number 273599-001, 273645-002: 82801E (C-ICH)
+ * document number 252516-001, 252517-028: 82801EB (ICH5), 82801ER (ICH5R)
+ * document number 300641-004, 300884-013: 6300ESB
+ * document number 301473-002, 301474-026: 82801F (ICH6)
+ * document number 313082-001, 313075-006: 631xESB, 632xESB
+ * document number 307013-003, 307014-024: 82801G (ICH7)
+ * document number 322896-001, 322897-001: NM10
+ * document number 313056-003, 313057-017: 82801H (ICH8)
+ * document number 316972-004, 316973-012: 82801I (ICH9)
+ * document number 319973-002, 319974-002: 82801J (ICH10)
+ * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
+ * document number 320066-003, 320257-008: EP80597 (IICH)
+ * document number 324645-001, 324646-001: Cougar Point (CPT)
+ * document number TBD : Patsburg (PBG)
+ * document number TBD : DH89xxCC
+ * document number TBD : Panther Point
+ * document number TBD : Lynx Point
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/lpc_ich.h>
+
+#define ACPIBASE 0x40
+#define ACPIBASE_GPE_OFF 0x28
+#define ACPIBASE_GPE_END 0x2f
+#define ACPIBASE_SMI_OFF 0x30
+#define ACPIBASE_SMI_END 0x33
+#define ACPIBASE_TCO_OFF 0x60
+#define ACPIBASE_TCO_END 0x7f
+#define ACPICTRL 0x44
+
+#define ACPIBASE_GCS_OFF 0x3410
+#define ACPIBASE_GCS_END 0x3414
+
+#define GPIOBASE 0x48
+#define GPIOCTRL 0x4C
+
+#define RCBABASE 0xf0
+
+#define wdt_io_res(i) wdt_res(0, i)
+#define wdt_mem_res(i) wdt_res(ICH_RES_MEM_OFF, i)
+#define wdt_res(b, i) (&wdt_ich_res[(b) + (i)])
+
+static int lpc_ich_acpi_save = -1;
+static int lpc_ich_gpio_save = -1;
+
+static struct resource wdt_ich_res[] = {
+ /* ACPI - TCO */
+ {
+ .flags = IORESOURCE_IO,
+ },
+ /* ACPI - SMI */
+ {
+ .flags = IORESOURCE_IO,
+ },
+ /* GCS */
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource gpio_ich_res[] = {
+ /* GPIO */
+ {
+ .flags = IORESOURCE_IO,
+ },
+ /* ACPI - GPE0 */
+ {
+ .flags = IORESOURCE_IO,
+ },
+};
+
+enum lpc_cells {
+ LPC_WDT = 0,
+ LPC_GPIO,
+};
+
+static struct mfd_cell lpc_ich_cells[] = {
+ [LPC_WDT] = {
+ .name = "iTCO_wdt",
+ .num_resources = ARRAY_SIZE(wdt_ich_res),
+ .resources = wdt_ich_res,
+ .ignore_resource_conflicts = true,
+ },
+ [LPC_GPIO] = {
+ .name = "gpio_ich",
+ .num_resources = ARRAY_SIZE(gpio_ich_res),
+ .resources = gpio_ich_res,
+ .ignore_resource_conflicts = true,
+ },
+};
+
+/* chipset related info */
+enum lpc_chipsets {
+ LPC_ICH = 0, /* ICH */
+ LPC_ICH0, /* ICH0 */
+ LPC_ICH2, /* ICH2 */
+ LPC_ICH2M, /* ICH2-M */
+ LPC_ICH3, /* ICH3-S */
+ LPC_ICH3M, /* ICH3-M */
+ LPC_ICH4, /* ICH4 */
+ LPC_ICH4M, /* ICH4-M */
+ LPC_CICH, /* C-ICH */
+ LPC_ICH5, /* ICH5 & ICH5R */
+ LPC_6300ESB, /* 6300ESB */
+ LPC_ICH6, /* ICH6 & ICH6R */
+ LPC_ICH6M, /* ICH6-M */
+ LPC_ICH6W, /* ICH6W & ICH6RW */
+ LPC_631XESB, /* 631xESB/632xESB */
+ LPC_ICH7, /* ICH7 & ICH7R */
+ LPC_ICH7DH, /* ICH7DH */
+ LPC_ICH7M, /* ICH7-M & ICH7-U */
+ LPC_ICH7MDH, /* ICH7-M DH */
+ LPC_NM10, /* NM10 */
+ LPC_ICH8, /* ICH8 & ICH8R */
+ LPC_ICH8DH, /* ICH8DH */
+ LPC_ICH8DO, /* ICH8DO */
+ LPC_ICH8M, /* ICH8M */
+ LPC_ICH8ME, /* ICH8M-E */
+ LPC_ICH9, /* ICH9 */
+ LPC_ICH9R, /* ICH9R */
+ LPC_ICH9DH, /* ICH9DH */
+ LPC_ICH9DO, /* ICH9DO */
+ LPC_ICH9M, /* ICH9M */
+ LPC_ICH9ME, /* ICH9M-E */
+ LPC_ICH10, /* ICH10 */
+ LPC_ICH10R, /* ICH10R */
+ LPC_ICH10D, /* ICH10D */
+ LPC_ICH10DO, /* ICH10DO */
+ LPC_PCH, /* PCH Desktop Full Featured */
+ LPC_PCHM, /* PCH Mobile Full Featured */
+ LPC_P55, /* P55 */
+ LPC_PM55, /* PM55 */
+ LPC_H55, /* H55 */
+ LPC_QM57, /* QM57 */
+ LPC_H57, /* H57 */
+ LPC_HM55, /* HM55 */
+ LPC_Q57, /* Q57 */
+ LPC_HM57, /* HM57 */
+ LPC_PCHMSFF, /* PCH Mobile SFF Full Featured */
+ LPC_QS57, /* QS57 */
+ LPC_3400, /* 3400 */
+ LPC_3420, /* 3420 */
+ LPC_3450, /* 3450 */
+ LPC_EP80579, /* EP80579 */
+ LPC_CPT, /* Cougar Point */
+ LPC_CPTD, /* Cougar Point Desktop */
+ LPC_CPTM, /* Cougar Point Mobile */
+ LPC_PBG, /* Patsburg */
+ LPC_DH89XXCC, /* DH89xxCC */
+ LPC_PPT, /* Panther Point */
+ LPC_LPT, /* Lynx Point */
+};
+
+struct lpc_ich_info lpc_chipset_info[] __devinitdata = {
+ [LPC_ICH] = {
+ .name = "ICH",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH0] = {
+ .name = "ICH0",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH2] = {
+ .name = "ICH2",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH2M] = {
+ .name = "ICH2-M",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH3] = {
+ .name = "ICH3-S",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH3M] = {
+ .name = "ICH3-M",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH4] = {
+ .name = "ICH4",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH4M] = {
+ .name = "ICH4-M",
+ .iTCO_version = 1,
+ },
+ [LPC_CICH] = {
+ .name = "C-ICH",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH5] = {
+ .name = "ICH5 or ICH5R",
+ .iTCO_version = 1,
+ },
+ [LPC_6300ESB] = {
+ .name = "6300ESB",
+ .iTCO_version = 1,
+ },
+ [LPC_ICH6] = {
+ .name = "ICH6 or ICH6R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V6_GPIO,
+ },
+ [LPC_ICH6M] = {
+ .name = "ICH6-M",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V6_GPIO,
+ },
+ [LPC_ICH6W] = {
+ .name = "ICH6W or ICH6RW",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V6_GPIO,
+ },
+ [LPC_631XESB] = {
+ .name = "631xESB/632xESB",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V6_GPIO,
+ },
+ [LPC_ICH7] = {
+ .name = "ICH7 or ICH7R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH7DH] = {
+ .name = "ICH7DH",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH7M] = {
+ .name = "ICH7-M or ICH7-U",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH7MDH] = {
+ .name = "ICH7-M DH",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_NM10] = {
+ .name = "NM10",
+ .iTCO_version = 2,
+ },
+ [LPC_ICH8] = {
+ .name = "ICH8 or ICH8R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH8DH] = {
+ .name = "ICH8DH",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH8DO] = {
+ .name = "ICH8DO",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH8M] = {
+ .name = "ICH8M",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH8ME] = {
+ .name = "ICH8M-E",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V7_GPIO,
+ },
+ [LPC_ICH9] = {
+ .name = "ICH9",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9R] = {
+ .name = "ICH9R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9DH] = {
+ .name = "ICH9DH",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9DO] = {
+ .name = "ICH9DO",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9M] = {
+ .name = "ICH9M",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH9ME] = {
+ .name = "ICH9M-E",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V9_GPIO,
+ },
+ [LPC_ICH10] = {
+ .name = "ICH10",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V10CONS_GPIO,
+ },
+ [LPC_ICH10R] = {
+ .name = "ICH10R",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V10CONS_GPIO,
+ },
+ [LPC_ICH10D] = {
+ .name = "ICH10D",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V10CORP_GPIO,
+ },
+ [LPC_ICH10DO] = {
+ .name = "ICH10DO",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V10CORP_GPIO,
+ },
+ [LPC_PCH] = {
+ .name = "PCH Desktop Full Featured",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_PCHM] = {
+ .name = "PCH Mobile Full Featured",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_P55] = {
+ .name = "P55",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_PM55] = {
+ .name = "PM55",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_H55] = {
+ .name = "H55",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_QM57] = {
+ .name = "QM57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_H57] = {
+ .name = "H57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_HM55] = {
+ .name = "HM55",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_Q57] = {
+ .name = "Q57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_HM57] = {
+ .name = "HM57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_PCHMSFF] = {
+ .name = "PCH Mobile SFF Full Featured",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_QS57] = {
+ .name = "QS57",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_3400] = {
+ .name = "3400",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_3420] = {
+ .name = "3420",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_3450] = {
+ .name = "3450",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_EP80579] = {
+ .name = "EP80579",
+ .iTCO_version = 2,
+ },
+ [LPC_CPT] = {
+ .name = "Cougar Point",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_CPTD] = {
+ .name = "Cougar Point Desktop",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_CPTM] = {
+ .name = "Cougar Point Mobile",
+ .iTCO_version = 2,
+ .gpio_version = ICH_V5_GPIO,
+ },
+ [LPC_PBG] = {
+ .name = "Patsburg",
+ .iTCO_version = 2,
+ },
+ [LPC_DH89XXCC] = {
+ .name = "DH89xxCC",
+ .iTCO_version = 2,
+ },
+ [LPC_PPT] = {
+ .name = "Panther Point",
+ .iTCO_version = 2,
+ },
+ [LPC_LPT] = {
+ .name = "Lynx Point",
+ .iTCO_version = 2,
+ },
+};
+
+/*
+ * This data only exists for exporting the supported PCI ids
+ * via MODULE_DEVICE_TABLE. We do not actually register a
+ * pci_driver, because the I/O Controller Hub has also other
+ * functions that probably will be registered by other drivers.
+ */
+static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = {
+ { PCI_VDEVICE(INTEL, 0x2410), LPC_ICH},
+ { PCI_VDEVICE(INTEL, 0x2420), LPC_ICH0},
+ { PCI_VDEVICE(INTEL, 0x2440), LPC_ICH2},
+ { PCI_VDEVICE(INTEL, 0x244c), LPC_ICH2M},
+ { PCI_VDEVICE(INTEL, 0x2480), LPC_ICH3},
+ { PCI_VDEVICE(INTEL, 0x248c), LPC_ICH3M},
+ { PCI_VDEVICE(INTEL, 0x24c0), LPC_ICH4},
+ { PCI_VDEVICE(INTEL, 0x24cc), LPC_ICH4M},
+ { PCI_VDEVICE(INTEL, 0x2450), LPC_CICH},
+ { PCI_VDEVICE(INTEL, 0x24d0), LPC_ICH5},
+ { PCI_VDEVICE(INTEL, 0x25a1), LPC_6300ESB},
+ { PCI_VDEVICE(INTEL, 0x2640), LPC_ICH6},
+ { PCI_VDEVICE(INTEL, 0x2641), LPC_ICH6M},
+ { PCI_VDEVICE(INTEL, 0x2642), LPC_ICH6W},
+ { PCI_VDEVICE(INTEL, 0x2670), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2671), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2672), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2673), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2674), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2675), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2676), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2677), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2678), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x2679), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267a), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267b), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267c), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267d), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267e), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x267f), LPC_631XESB},
+ { PCI_VDEVICE(INTEL, 0x27b8), LPC_ICH7},
+ { PCI_VDEVICE(INTEL, 0x27b0), LPC_ICH7DH},
+ { PCI_VDEVICE(INTEL, 0x27b9), LPC_ICH7M},
+ { PCI_VDEVICE(INTEL, 0x27bd), LPC_ICH7MDH},
+ { PCI_VDEVICE(INTEL, 0x27bc), LPC_NM10},
+ { PCI_VDEVICE(INTEL, 0x2810), LPC_ICH8},
+ { PCI_VDEVICE(INTEL, 0x2812), LPC_ICH8DH},
+ { PCI_VDEVICE(INTEL, 0x2814), LPC_ICH8DO},
+ { PCI_VDEVICE(INTEL, 0x2815), LPC_ICH8M},
+ { PCI_VDEVICE(INTEL, 0x2811), LPC_ICH8ME},
+ { PCI_VDEVICE(INTEL, 0x2918), LPC_ICH9},
+ { PCI_VDEVICE(INTEL, 0x2916), LPC_ICH9R},
+ { PCI_VDEVICE(INTEL, 0x2912), LPC_ICH9DH},
+ { PCI_VDEVICE(INTEL, 0x2914), LPC_ICH9DO},
+ { PCI_VDEVICE(INTEL, 0x2919), LPC_ICH9M},
+ { PCI_VDEVICE(INTEL, 0x2917), LPC_ICH9ME},
+ { PCI_VDEVICE(INTEL, 0x3a18), LPC_ICH10},
+ { PCI_VDEVICE(INTEL, 0x3a16), LPC_ICH10R},
+ { PCI_VDEVICE(INTEL, 0x3a1a), LPC_ICH10D},
+ { PCI_VDEVICE(INTEL, 0x3a14), LPC_ICH10DO},
+ { PCI_VDEVICE(INTEL, 0x3b00), LPC_PCH},
+ { PCI_VDEVICE(INTEL, 0x3b01), LPC_PCHM},
+ { PCI_VDEVICE(INTEL, 0x3b02), LPC_P55},
+ { PCI_VDEVICE(INTEL, 0x3b03), LPC_PM55},
+ { PCI_VDEVICE(INTEL, 0x3b06), LPC_H55},
+ { PCI_VDEVICE(INTEL, 0x3b07), LPC_QM57},
+ { PCI_VDEVICE(INTEL, 0x3b08), LPC_H57},
+ { PCI_VDEVICE(INTEL, 0x3b09), LPC_HM55},
+ { PCI_VDEVICE(INTEL, 0x3b0a), LPC_Q57},
+ { PCI_VDEVICE(INTEL, 0x3b0b), LPC_HM57},
+ { PCI_VDEVICE(INTEL, 0x3b0d), LPC_PCHMSFF},
+ { PCI_VDEVICE(INTEL, 0x3b0f), LPC_QS57},
+ { PCI_VDEVICE(INTEL, 0x3b12), LPC_3400},
+ { PCI_VDEVICE(INTEL, 0x3b14), LPC_3420},
+ { PCI_VDEVICE(INTEL, 0x3b16), LPC_3450},
+ { PCI_VDEVICE(INTEL, 0x5031), LPC_EP80579},
+ { PCI_VDEVICE(INTEL, 0x1c41), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c42), LPC_CPTD},
+ { PCI_VDEVICE(INTEL, 0x1c43), LPC_CPTM},
+ { PCI_VDEVICE(INTEL, 0x1c44), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c45), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c46), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c47), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c48), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c49), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4a), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4b), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4c), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4d), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4e), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c4f), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c50), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c51), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c52), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c53), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c54), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c55), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c56), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c57), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c58), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c59), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5a), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5b), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5c), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5d), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5e), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1c5f), LPC_CPT},
+ { PCI_VDEVICE(INTEL, 0x1d40), LPC_PBG},
+ { PCI_VDEVICE(INTEL, 0x1d41), LPC_PBG},
+ { PCI_VDEVICE(INTEL, 0x2310), LPC_DH89XXCC},
+ { PCI_VDEVICE(INTEL, 0x1e40), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e41), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e42), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e43), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e44), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e45), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e46), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e47), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e48), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e49), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4a), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4b), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4c), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4d), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4e), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e4f), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e50), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e51), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e52), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e53), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e54), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e55), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e56), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e57), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e58), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e59), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5a), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5b), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5c), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5d), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5e), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x1e5f), LPC_PPT},
+ { PCI_VDEVICE(INTEL, 0x8c40), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c41), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c42), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c43), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c44), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c45), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c46), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c47), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c48), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c49), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4a), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4b), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4c), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4d), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4e), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4f), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c50), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c51), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c52), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c53), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c54), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c55), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c56), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c57), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c58), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c59), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5a), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5b), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5c), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5d), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5e), LPC_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5f), LPC_LPT},
+ { 0, }, /* End of list */
+};
+MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
+
+static void lpc_ich_restore_config_space(struct pci_dev *dev)
+{
+ if (lpc_ich_acpi_save >= 0) {
+ pci_write_config_byte(dev, ACPICTRL, lpc_ich_acpi_save);
+ lpc_ich_acpi_save = -1;
+ }
+
+ if (lpc_ich_gpio_save >= 0) {
+ pci_write_config_byte(dev, GPIOCTRL, lpc_ich_gpio_save);
+ lpc_ich_gpio_save = -1;
+ }
+}
+
+static void __devinit lpc_ich_enable_acpi_space(struct pci_dev *dev)
+{
+ u8 reg_save;
+
+ pci_read_config_byte(dev, ACPICTRL, &reg_save);
+ pci_write_config_byte(dev, ACPICTRL, reg_save | 0x10);
+ lpc_ich_acpi_save = reg_save;
+}
+
+static void __devinit lpc_ich_enable_gpio_space(struct pci_dev *dev)
+{
+ u8 reg_save;
+
+ pci_read_config_byte(dev, GPIOCTRL, &reg_save);
+ pci_write_config_byte(dev, GPIOCTRL, reg_save | 0x10);
+ lpc_ich_gpio_save = reg_save;
+}
+
+static void __devinit lpc_ich_finalize_cell(struct mfd_cell *cell,
+ const struct pci_device_id *id)
+{
+ cell->platform_data = &lpc_chipset_info[id->driver_data];
+ cell->pdata_size = sizeof(struct lpc_ich_info);
+}
+
+static int __devinit lpc_ich_init_gpio(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ u32 base_addr_cfg;
+ u32 base_addr;
+ int ret;
+ bool acpi_conflict = false;
+ struct resource *res;
+
+ /* Setup power management base register */
+ pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
+ base_addr = base_addr_cfg & 0x0000ff80;
+ if (!base_addr) {
+ dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+ lpc_ich_cells[LPC_GPIO].num_resources--;
+ goto gpe0_done;
+ }
+
+ res = &gpio_ich_res[ICH_RES_GPE0];
+ res->start = base_addr + ACPIBASE_GPE_OFF;
+ res->end = base_addr + ACPIBASE_GPE_END;
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ /*
+ * This isn't fatal for the GPIO, but we have to make sure that
+ * the platform_device subsystem doesn't see this resource
+ * or it will register an invalid region.
+ */
+ lpc_ich_cells[LPC_GPIO].num_resources--;
+ acpi_conflict = true;
+ } else {
+ lpc_ich_enable_acpi_space(dev);
+ }
+
+gpe0_done:
+ /* Setup GPIO base register */
+ pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
+ base_addr = base_addr_cfg & 0x0000ff80;
+ if (!base_addr) {
+ dev_err(&dev->dev, "I/O space for GPIO uninitialized\n");
+ ret = -ENODEV;
+ goto gpio_done;
+ }
+
+ /* Older devices provide fewer GPIO and have a smaller resource size. */
+ res = &gpio_ich_res[ICH_RES_GPIO];
+ res->start = base_addr;
+ switch (lpc_chipset_info[id->driver_data].gpio_version) {
+ case ICH_V5_GPIO:
+ case ICH_V10CORP_GPIO:
+ res->end = res->start + 128 - 1;
+ break;
+ default:
+ res->end = res->start + 64 - 1;
+ break;
+ }
+
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ /* this isn't necessarily fatal for the GPIO */
+ acpi_conflict = true;
+ goto gpio_done;
+ }
+ lpc_ich_enable_gpio_space(dev);
+
+ lpc_ich_finalize_cell(&lpc_ich_cells[LPC_GPIO], id);
+ ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO],
+ 1, NULL, 0);
+
+gpio_done:
+ if (acpi_conflict)
+ pr_warn("Resource conflict(s) found affecting %s\n",
+ lpc_ich_cells[LPC_GPIO].name);
+ return ret;
+}
+
+static int __devinit lpc_ich_init_wdt(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ u32 base_addr_cfg;
+ u32 base_addr;
+ int ret;
+ bool acpi_conflict = false;
+ struct resource *res;
+
+ /* Setup power management base register */
+ pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
+ base_addr = base_addr_cfg & 0x0000ff80;
+ if (!base_addr) {
+ dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
+ ret = -ENODEV;
+ goto wdt_done;
+ }
+
+ res = wdt_io_res(ICH_RES_IO_TCO);
+ res->start = base_addr + ACPIBASE_TCO_OFF;
+ res->end = base_addr + ACPIBASE_TCO_END;
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ acpi_conflict = true;
+ goto wdt_done;
+ }
+
+ res = wdt_io_res(ICH_RES_IO_SMI);
+ res->start = base_addr + ACPIBASE_SMI_OFF;
+ res->end = base_addr + ACPIBASE_SMI_END;
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ acpi_conflict = true;
+ goto wdt_done;
+ }
+ lpc_ich_enable_acpi_space(dev);
+
+ /*
+ * Get the Memory-Mapped GCS register. To get access to it
+ * we have to read RCBA from PCI Config space 0xf0 and use
+ * it as base. GCS = RCBA + ICH6_GCS(0x3410).
+ */
+ if (lpc_chipset_info[id->driver_data].iTCO_version == 2) {
+ pci_read_config_dword(dev, RCBABASE, &base_addr_cfg);
+ base_addr = base_addr_cfg & 0xffffc000;
+ if (!(base_addr_cfg & 1)) {
+ pr_err("RCBA is disabled by hardware/BIOS, "
+ "device disabled\n");
+ ret = -ENODEV;
+ goto wdt_done;
+ }
+ res = wdt_mem_res(ICH_RES_MEM_GCS);
+ res->start = base_addr + ACPIBASE_GCS_OFF;
+ res->end = base_addr + ACPIBASE_GCS_END;
+ ret = acpi_check_resource_conflict(res);
+ if (ret) {
+ acpi_conflict = true;
+ goto wdt_done;
+ }
+ }
+
+ lpc_ich_finalize_cell(&lpc_ich_cells[LPC_WDT], id);
+ ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT],
+ 1, NULL, 0);
+
+wdt_done:
+ if (acpi_conflict)
+ pr_warn("Resource conflict(s) found affecting %s\n",
+ lpc_ich_cells[LPC_WDT].name);
+ return ret;
+}
+
+static int __devinit lpc_ich_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int ret;
+ bool cell_added = false;
+
+ ret = lpc_ich_init_wdt(dev, id);
+ if (!ret)
+ cell_added = true;
+
+ ret = lpc_ich_init_gpio(dev, id);
+ if (!ret)
+ cell_added = true;
+
+ /*
+ * We only care if at least one or none of the cells registered
+ * successfully.
+ */
+ if (!cell_added) {
+ lpc_ich_restore_config_space(dev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __devexit lpc_ich_remove(struct pci_dev *dev)
+{
+ mfd_remove_devices(&dev->dev);
+ lpc_ich_restore_config_space(dev);
+}
+
+static struct pci_driver lpc_ich_driver = {
+ .name = "lpc_ich",
+ .id_table = lpc_ich_ids,
+ .probe = lpc_ich_probe,
+ .remove = __devexit_p(lpc_ich_remove),
+};
+
+static int __init lpc_ich_init(void)
+{
+ return pci_register_driver(&lpc_ich_driver);
+}
+
+static void __exit lpc_ich_exit(void)
+{
+ pci_unregister_driver(&lpc_ich_driver);
+}
+
+module_init(lpc_ich_init);
+module_exit(lpc_ich_exit);
+
+MODULE_AUTHOR("Aaron Sierra <asierra@xes-inc.com>");
+MODULE_DESCRIPTION("LPC interface for Intel ICH");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index abc421364a4..9f20abc5e39 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -36,6 +36,7 @@
#define GPIOBASE 0x44
#define GPIO_IO_SIZE 64
+#define GPIO_IO_SIZE_CENTERTON 128
#define WDTBASE 0x84
#define WDT_IO_SIZE 64
@@ -68,7 +69,7 @@ static struct resource wdt_sch_resource = {
static struct mfd_cell tunnelcreek_cells[] = {
{
- .name = "tunnelcreek_wdt",
+ .name = "ie6xx_wdt",
.num_resources = 1,
.resources = &wdt_sch_resource,
},
@@ -77,6 +78,7 @@ static struct mfd_cell tunnelcreek_cells[] = {
static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CENTERTON_ILB) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, lpc_sch_ids);
@@ -115,7 +117,11 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
}
gpio_sch_resource.start = base_addr;
- gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
+
+ if (id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB)
+ gpio_sch_resource.end = base_addr + GPIO_IO_SIZE_CENTERTON - 1;
+ else
+ gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
lpc_sch_cells[i].id = id->device;
@@ -125,7 +131,8 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
if (ret)
goto out_dev;
- if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC) {
+ if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC
+ || id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB) {
pci_read_config_dword(dev, WDTBASE, &base_addr_cfg);
if (!(base_addr_cfg & (1 << 31))) {
dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n");
@@ -167,18 +174,7 @@ static struct pci_driver lpc_sch_driver = {
.remove = __devexit_p(lpc_sch_remove),
};
-static int __init lpc_sch_init(void)
-{
- return pci_register_driver(&lpc_sch_driver);
-}
-
-static void __exit lpc_sch_exit(void)
-{
- pci_unregister_driver(&lpc_sch_driver);
-}
-
-module_init(lpc_sch_init);
-module_exit(lpc_sch_exit);
+module_pci_driver(lpc_sch_driver);
MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
MODULE_DESCRIPTION("LPC interface for Intel Poulsbo SCH");
diff --git a/drivers/mfd/max77693-irq.c b/drivers/mfd/max77693-irq.c
new file mode 100644
index 00000000000..2b403569e0a
--- /dev/null
+++ b/drivers/mfd/max77693-irq.c
@@ -0,0 +1,309 @@
+/*
+ * max77693-irq.c - Interrupt controller support for MAX77693
+ *
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997-irq.c
+ */
+
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-private.h>
+
+static const u8 max77693_mask_reg[] = {
+ [LED_INT] = MAX77693_LED_REG_FLASH_INT_MASK,
+ [TOPSYS_INT] = MAX77693_PMIC_REG_TOPSYS_INT_MASK,
+ [CHG_INT] = MAX77693_CHG_REG_CHG_INT_MASK,
+ [MUIC_INT1] = MAX77693_MUIC_REG_INTMASK1,
+ [MUIC_INT2] = MAX77693_MUIC_REG_INTMASK2,
+ [MUIC_INT3] = MAX77693_MUIC_REG_INTMASK3,
+};
+
+static struct regmap *max77693_get_regmap(struct max77693_dev *max77693,
+ enum max77693_irq_source src)
+{
+ switch (src) {
+ case LED_INT ... CHG_INT:
+ return max77693->regmap;
+ case MUIC_INT1 ... MUIC_INT3:
+ return max77693->regmap_muic;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+struct max77693_irq_data {
+ int mask;
+ enum max77693_irq_source group;
+};
+
+#define DECLARE_IRQ(idx, _group, _mask) \
+ [(idx)] = { .group = (_group), .mask = (_mask) }
+static const struct max77693_irq_data max77693_irqs[] = {
+ DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_OPEN, LED_INT, 1 << 0),
+ DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_SHORT, LED_INT, 1 << 1),
+ DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_OPEN, LED_INT, 1 << 2),
+ DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_SHORT, LED_INT, 1 << 3),
+ DECLARE_IRQ(MAX77693_LED_IRQ_MAX_FLASH, LED_INT, 1 << 4),
+
+ DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T120C_INT, TOPSYS_INT, 1 << 0),
+ DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T140C_INT, TOPSYS_INT, 1 << 1),
+ DECLARE_IRQ(MAX77693_TOPSYS_IRQ_LOWSYS_INT, TOPSYS_INT, 1 << 3),
+
+ DECLARE_IRQ(MAX77693_CHG_IRQ_BYP_I, CHG_INT, 1 << 0),
+ DECLARE_IRQ(MAX77693_CHG_IRQ_THM_I, CHG_INT, 1 << 2),
+ DECLARE_IRQ(MAX77693_CHG_IRQ_BAT_I, CHG_INT, 1 << 3),
+ DECLARE_IRQ(MAX77693_CHG_IRQ_CHG_I, CHG_INT, 1 << 4),
+ DECLARE_IRQ(MAX77693_CHG_IRQ_CHGIN_I, CHG_INT, 1 << 6),
+
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC, MUIC_INT1, 1 << 0),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_LOW, MUIC_INT1, 1 << 1),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_ERR, MUIC_INT1, 1 << 2),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC1K, MUIC_INT1, 1 << 3),
+
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGTYP, MUIC_INT2, 1 << 0),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGDETREUN, MUIC_INT2, 1 << 1),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DCDTMR, MUIC_INT2, 1 << 2),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DXOVP, MUIC_INT2, 1 << 3),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VBVOLT, MUIC_INT2, 1 << 4),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VIDRM, MUIC_INT2, 1 << 5),
+
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_EOC, MUIC_INT3, 1 << 0),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CGMBC, MUIC_INT3, 1 << 1),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_OVP, MUIC_INT3, 1 << 2),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR, MUIC_INT3, 1 << 3),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CHG_ENABLED, MUIC_INT3, 1 << 4),
+ DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_BAT_DET, MUIC_INT3, 1 << 5),
+};
+
+static void max77693_irq_lock(struct irq_data *data)
+{
+ struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+
+ mutex_lock(&max77693->irqlock);
+}
+
+static void max77693_irq_sync_unlock(struct irq_data *data)
+{
+ struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+ int i;
+
+ for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+ u8 mask_reg = max77693_mask_reg[i];
+ struct regmap *map = max77693_get_regmap(max77693, i);
+
+ if (mask_reg == MAX77693_REG_INVALID ||
+ IS_ERR_OR_NULL(map))
+ continue;
+ max77693->irq_masks_cache[i] = max77693->irq_masks_cur[i];
+
+ max77693_write_reg(map, max77693_mask_reg[i],
+ max77693->irq_masks_cur[i]);
+ }
+
+ mutex_unlock(&max77693->irqlock);
+}
+
+static const inline struct max77693_irq_data *
+irq_to_max77693_irq(struct max77693_dev *max77693, int irq)
+{
+ return &max77693_irqs[irq];
+}
+
+static void max77693_irq_mask(struct irq_data *data)
+{
+ struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+ const struct max77693_irq_data *irq_data =
+ irq_to_max77693_irq(max77693, data->irq);
+
+ if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
+ max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
+ else
+ max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
+}
+
+static void max77693_irq_unmask(struct irq_data *data)
+{
+ struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
+ const struct max77693_irq_data *irq_data =
+ irq_to_max77693_irq(max77693, data->irq);
+
+ if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
+ max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
+ else
+ max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
+}
+
+static struct irq_chip max77693_irq_chip = {
+ .name = "max77693",
+ .irq_bus_lock = max77693_irq_lock,
+ .irq_bus_sync_unlock = max77693_irq_sync_unlock,
+ .irq_mask = max77693_irq_mask,
+ .irq_unmask = max77693_irq_unmask,
+};
+
+#define MAX77693_IRQSRC_CHG (1 << 0)
+#define MAX77693_IRQSRC_TOP (1 << 1)
+#define MAX77693_IRQSRC_FLASH (1 << 2)
+#define MAX77693_IRQSRC_MUIC (1 << 3)
+static irqreturn_t max77693_irq_thread(int irq, void *data)
+{
+ struct max77693_dev *max77693 = data;
+ u8 irq_reg[MAX77693_IRQ_GROUP_NR] = {};
+ u8 irq_src;
+ int ret;
+ int i, cur_irq;
+
+ ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_INTSRC,
+ &irq_src);
+ if (ret < 0) {
+ dev_err(max77693->dev, "Failed to read interrupt source: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ if (irq_src & MAX77693_IRQSRC_CHG)
+ /* CHG_INT */
+ ret = max77693_read_reg(max77693->regmap, MAX77693_CHG_REG_CHG_INT,
+ &irq_reg[CHG_INT]);
+
+ if (irq_src & MAX77693_IRQSRC_TOP)
+ /* TOPSYS_INT */
+ ret = max77693_read_reg(max77693->regmap,
+ MAX77693_PMIC_REG_TOPSYS_INT, &irq_reg[TOPSYS_INT]);
+
+ if (irq_src & MAX77693_IRQSRC_FLASH)
+ /* LED_INT */
+ ret = max77693_read_reg(max77693->regmap,
+ MAX77693_LED_REG_FLASH_INT, &irq_reg[LED_INT]);
+
+ if (irq_src & MAX77693_IRQSRC_MUIC)
+ /* MUIC INT1 ~ INT3 */
+ max77693_bulk_read(max77693->regmap, MAX77693_MUIC_REG_INT1,
+ MAX77693_NUM_IRQ_MUIC_REGS, &irq_reg[MUIC_INT1]);
+
+ /* Apply masking */
+ for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+ if (i >= MUIC_INT1 && i <= MUIC_INT3)
+ irq_reg[i] &= max77693->irq_masks_cur[i];
+ else
+ irq_reg[i] &= ~max77693->irq_masks_cur[i];
+ }
+
+ /* Report */
+ for (i = 0; i < MAX77693_IRQ_NR; i++) {
+ if (irq_reg[max77693_irqs[i].group] & max77693_irqs[i].mask) {
+ cur_irq = irq_find_mapping(max77693->irq_domain, i);
+ if (cur_irq)
+ handle_nested_irq(cur_irq);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+int max77693_irq_resume(struct max77693_dev *max77693)
+{
+ if (max77693->irq)
+ max77693_irq_thread(0, max77693);
+
+ return 0;
+}
+
+static int max77693_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct max77693_dev *max77693 = d->host_data;
+
+ irq_set_chip_data(irq, max77693);
+ irq_set_chip_and_handler(irq, &max77693_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+ return 0;
+}
+
+static struct irq_domain_ops max77693_irq_domain_ops = {
+ .map = max77693_irq_domain_map,
+};
+
+int max77693_irq_init(struct max77693_dev *max77693)
+{
+ struct irq_domain *domain;
+ int i;
+ int ret;
+
+ mutex_init(&max77693->irqlock);
+
+ /* Mask individual interrupt sources */
+ for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
+ struct regmap *map;
+ /* MUIC IRQ 0:MASK 1:NOT MASK */
+ /* Other IRQ 1:MASK 0:NOT MASK */
+ if (i >= MUIC_INT1 && i <= MUIC_INT3) {
+ max77693->irq_masks_cur[i] = 0x00;
+ max77693->irq_masks_cache[i] = 0x00;
+ } else {
+ max77693->irq_masks_cur[i] = 0xff;
+ max77693->irq_masks_cache[i] = 0xff;
+ }
+ map = max77693_get_regmap(max77693, i);
+
+ if (IS_ERR_OR_NULL(map))
+ continue;
+ if (max77693_mask_reg[i] == MAX77693_REG_INVALID)
+ continue;
+ if (i >= MUIC_INT1 && i <= MUIC_INT3)
+ max77693_write_reg(map, max77693_mask_reg[i], 0x00);
+ else
+ max77693_write_reg(map, max77693_mask_reg[i], 0xff);
+ }
+
+ domain = irq_domain_add_linear(NULL, MAX77693_IRQ_NR,
+ &max77693_irq_domain_ops, max77693);
+ if (!domain) {
+ dev_err(max77693->dev, "could not create irq domain\n");
+ return -ENODEV;
+ }
+ max77693->irq_domain = domain;
+
+ ret = request_threaded_irq(max77693->irq, NULL, max77693_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "max77693-irq", max77693);
+
+ if (ret)
+ dev_err(max77693->dev, "Failed to request IRQ %d: %d\n",
+ max77693->irq, ret);
+
+ return 0;
+}
+
+void max77693_irq_exit(struct max77693_dev *max77693)
+{
+ if (max77693->irq)
+ free_irq(max77693->irq, max77693);
+}
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
new file mode 100644
index 00000000000..e9e4278722f
--- /dev/null
+++ b/drivers/mfd/max77693.c
@@ -0,0 +1,249 @@
+/*
+ * max77693.c - mfd core driver for the MAX 77693
+ *
+ * Copyright (C) 2012 Samsung Electronics
+ * SangYoung Son <hello.son@smasung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997.c
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-private.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+
+#define I2C_ADDR_PMIC (0xCC >> 1) /* Charger, Flash LED */
+#define I2C_ADDR_MUIC (0x4A >> 1)
+#define I2C_ADDR_HAPTIC (0x90 >> 1)
+
+static struct mfd_cell max77693_devs[] = {
+ { .name = "max77693-pmic", },
+ { .name = "max77693-charger", },
+ { .name = "max77693-flash", },
+ { .name = "max77693-muic", },
+ { .name = "max77693-haptic", },
+};
+
+int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(map, reg, &val);
+ *dest = val;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_read_reg);
+
+int max77693_bulk_read(struct regmap *map, u8 reg, int count, u8 *buf)
+{
+ int ret;
+
+ ret = regmap_bulk_read(map, reg, buf, count);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_bulk_read);
+
+int max77693_write_reg(struct regmap *map, u8 reg, u8 value)
+{
+ int ret;
+
+ ret = regmap_write(map, reg, value);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_write_reg);
+
+int max77693_bulk_write(struct regmap *map, u8 reg, int count, u8 *buf)
+{
+ int ret;
+
+ ret = regmap_bulk_write(map, reg, buf, count);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_bulk_write);
+
+int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask)
+{
+ int ret;
+
+ ret = regmap_update_bits(map, reg, mask, val);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(max77693_update_reg);
+
+static const struct regmap_config max77693_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77693_PMIC_REG_END,
+};
+
+static int max77693_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct max77693_dev *max77693;
+ struct max77693_platform_data *pdata = i2c->dev.platform_data;
+ u8 reg_data;
+ int ret = 0;
+
+ max77693 = devm_kzalloc(&i2c->dev,
+ sizeof(struct max77693_dev), GFP_KERNEL);
+ if (max77693 == NULL)
+ return -ENOMEM;
+
+ max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
+ if (IS_ERR(max77693->regmap)) {
+ ret = PTR_ERR(max77693->regmap);
+ dev_err(max77693->dev,"failed to allocate register map: %d\n",
+ ret);
+ goto err_regmap;
+ }
+
+ i2c_set_clientdata(i2c, max77693);
+ max77693->dev = &i2c->dev;
+ max77693->i2c = i2c;
+ max77693->irq = i2c->irq;
+ max77693->type = id->driver_data;
+
+ if (!pdata)
+ goto err_regmap;
+
+ max77693->wakeup = pdata->wakeup;
+
+ mutex_init(&max77693->iolock);
+
+ if (max77693_read_reg(max77693->regmap,
+ MAX77693_PMIC_REG_PMIC_ID2, &reg_data) < 0) {
+ dev_err(max77693->dev, "device not found on this channel\n");
+ ret = -ENODEV;
+ goto err_regmap;
+ } else
+ dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
+
+ max77693->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
+ i2c_set_clientdata(max77693->muic, max77693);
+
+ max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
+ i2c_set_clientdata(max77693->haptic, max77693);
+
+ ret = max77693_irq_init(max77693);
+ if (ret < 0)
+ goto err_mfd;
+
+ pm_runtime_set_active(max77693->dev);
+
+ ret = mfd_add_devices(max77693->dev, -1, max77693_devs,
+ ARRAY_SIZE(max77693_devs), NULL, 0);
+ if (ret < 0)
+ goto err_mfd;
+
+ device_init_wakeup(max77693->dev, pdata->wakeup);
+
+ return ret;
+
+err_mfd:
+ i2c_unregister_device(max77693->muic);
+ i2c_unregister_device(max77693->haptic);
+err_regmap:
+ kfree(max77693);
+
+ return ret;
+}
+
+static int max77693_i2c_remove(struct i2c_client *i2c)
+{
+ struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(max77693->dev);
+ i2c_unregister_device(max77693->muic);
+ i2c_unregister_device(max77693->haptic);
+
+ return 0;
+}
+
+static const struct i2c_device_id max77693_i2c_id[] = {
+ { "max77693", TYPE_MAX77693 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max77693_i2c_id);
+
+static int max77693_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev))
+ irq_set_irq_wake(max77693->irq, 1);
+ return 0;
+}
+
+static int max77693_resume(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev))
+ irq_set_irq_wake(max77693->irq, 0);
+ return max77693_irq_resume(max77693);
+}
+
+const struct dev_pm_ops max77693_pm = {
+ .suspend = max77693_suspend,
+ .resume = max77693_resume,
+};
+
+static struct i2c_driver max77693_i2c_driver = {
+ .driver = {
+ .name = "max77693",
+ .owner = THIS_MODULE,
+ .pm = &max77693_pm,
+ },
+ .probe = max77693_i2c_probe,
+ .remove = max77693_i2c_remove,
+ .id_table = max77693_i2c_id,
+};
+
+static int __init max77693_i2c_init(void)
+{
+ return i2c_add_driver(&max77693_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(max77693_i2c_init);
+
+static void __exit max77693_i2c_exit(void)
+{
+ i2c_del_driver(&max77693_i2c_driver);
+}
+module_exit(max77693_i2c_exit);
+
+MODULE_DESCRIPTION("MAXIM 77693 multi-function core driver");
+MODULE_AUTHOR("SangYoung, Son <hello.son@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 738722cdeca..f0ea3b8b3e4 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -15,24 +15,13 @@
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/interrupt.h>
-#include <linux/spi/spi.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mc13xxx.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
-struct mc13xxx {
- struct spi_device *spidev;
- struct mutex lock;
- int irq;
- int flags;
-
- irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
- void *irqdata[MC13XXX_NUM_IRQ];
-
- int adcflags;
-};
+#include "mc13xxx.h"
#define MC13XXX_IRQSTAT0 0
#define MC13XXX_IRQSTAT0_ADCDONEI (1 << 0)
@@ -139,34 +128,29 @@ struct mc13xxx {
#define MC13XXX_ADC2 45
-#define MC13XXX_NUMREGS 0x3f
-
void mc13xxx_lock(struct mc13xxx *mc13xxx)
{
if (!mutex_trylock(&mc13xxx->lock)) {
- dev_dbg(&mc13xxx->spidev->dev, "wait for %s from %pf\n",
+ dev_dbg(mc13xxx->dev, "wait for %s from %pf\n",
__func__, __builtin_return_address(0));
mutex_lock(&mc13xxx->lock);
}
- dev_dbg(&mc13xxx->spidev->dev, "%s from %pf\n",
+ dev_dbg(mc13xxx->dev, "%s from %pf\n",
__func__, __builtin_return_address(0));
}
EXPORT_SYMBOL(mc13xxx_lock);
void mc13xxx_unlock(struct mc13xxx *mc13xxx)
{
- dev_dbg(&mc13xxx->spidev->dev, "%s from %pf\n",
+ dev_dbg(mc13xxx->dev, "%s from %pf\n",
__func__, __builtin_return_address(0));
mutex_unlock(&mc13xxx->lock);
}
EXPORT_SYMBOL(mc13xxx_unlock);
-#define MC13XXX_REGOFFSET_SHIFT 25
int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
{
- struct spi_transfer t;
- struct spi_message m;
int ret;
BUG_ON(!mutex_is_locked(&mc13xxx->lock));
@@ -174,84 +158,35 @@ int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
if (offset > MC13XXX_NUMREGS)
return -EINVAL;
- *val = offset << MC13XXX_REGOFFSET_SHIFT;
-
- memset(&t, 0, sizeof(t));
-
- t.tx_buf = val;
- t.rx_buf = val;
- t.len = sizeof(u32);
-
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
-
- ret = spi_sync(mc13xxx->spidev, &m);
-
- /* error in message.status implies error return from spi_sync */
- BUG_ON(!ret && m.status);
+ ret = regmap_read(mc13xxx->regmap, offset, val);
+ dev_vdbg(mc13xxx->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
- if (ret)
- return ret;
-
- *val &= 0xffffff;
-
- dev_vdbg(&mc13xxx->spidev->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL(mc13xxx_reg_read);
int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val)
{
- u32 buf;
- struct spi_transfer t;
- struct spi_message m;
- int ret;
-
BUG_ON(!mutex_is_locked(&mc13xxx->lock));
- dev_vdbg(&mc13xxx->spidev->dev, "[0x%02x] <- 0x%06x\n", offset, val);
+ dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x\n", offset, val);
if (offset > MC13XXX_NUMREGS || val > 0xffffff)
return -EINVAL;
- buf = 1 << 31 | offset << MC13XXX_REGOFFSET_SHIFT | val;
-
- memset(&t, 0, sizeof(t));
-
- t.tx_buf = &buf;
- t.rx_buf = &buf;
- t.len = sizeof(u32);
-
- spi_message_init(&m);
- spi_message_add_tail(&t, &m);
-
- ret = spi_sync(mc13xxx->spidev, &m);
-
- BUG_ON(!ret && m.status);
-
- if (ret)
- return ret;
-
- return 0;
+ return regmap_write(mc13xxx->regmap, offset, val);
}
EXPORT_SYMBOL(mc13xxx_reg_write);
int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset,
u32 mask, u32 val)
{
- int ret;
- u32 valread;
-
+ BUG_ON(!mutex_is_locked(&mc13xxx->lock));
BUG_ON(val & ~mask);
+ dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x (mask: 0x%06x)\n",
+ offset, val, mask);
- ret = mc13xxx_reg_read(mc13xxx, offset, &valread);
- if (ret)
- return ret;
-
- valread = (valread & ~mask) | val;
-
- return mc13xxx_reg_write(mc13xxx, offset, valread);
+ return regmap_update_bits(mc13xxx->regmap, offset, mask, val);
}
EXPORT_SYMBOL(mc13xxx_reg_rmw);
@@ -439,7 +374,7 @@ static int mc13xxx_irq_handle(struct mc13xxx *mc13xxx,
if (handled == IRQ_HANDLED)
num_handled++;
} else {
- dev_err(&mc13xxx->spidev->dev,
+ dev_err(mc13xxx->dev,
"BUG: irq %u but no handler\n",
baseirq + irq);
@@ -475,25 +410,23 @@ static irqreturn_t mc13xxx_irq_thread(int irq, void *data)
return IRQ_RETVAL(handled);
}
-enum mc13xxx_id {
- MC13XXX_ID_MC13783,
- MC13XXX_ID_MC13892,
- MC13XXX_ID_INVALID,
-};
-
static const char *mc13xxx_chipname[] = {
[MC13XXX_ID_MC13783] = "mc13783",
[MC13XXX_ID_MC13892] = "mc13892",
};
#define maskval(reg, mask) (((reg) & (mask)) >> __ffs(mask))
-static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
+static int mc13xxx_identify(struct mc13xxx *mc13xxx)
{
u32 icid;
u32 revision;
- const char *name;
int ret;
+ /*
+ * Get the generation ID from register 46, as apparently some older
+ * IC revisions only have this info at this location. Newer ICs seem to
+ * have both.
+ */
ret = mc13xxx_reg_read(mc13xxx, 46, &icid);
if (ret)
return ret;
@@ -502,26 +435,23 @@ static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
switch (icid) {
case 2:
- *id = MC13XXX_ID_MC13783;
- name = "mc13783";
+ mc13xxx->ictype = MC13XXX_ID_MC13783;
break;
case 7:
- *id = MC13XXX_ID_MC13892;
- name = "mc13892";
+ mc13xxx->ictype = MC13XXX_ID_MC13892;
break;
default:
- *id = MC13XXX_ID_INVALID;
+ mc13xxx->ictype = MC13XXX_ID_INVALID;
break;
}
- if (*id == MC13XXX_ID_MC13783 || *id == MC13XXX_ID_MC13892) {
+ if (mc13xxx->ictype == MC13XXX_ID_MC13783 ||
+ mc13xxx->ictype == MC13XXX_ID_MC13892) {
ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
- if (ret)
- return ret;
- dev_info(&mc13xxx->spidev->dev, "%s: rev: %d.%d, "
+ dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
"fin: %d, fab: %d, icid: %d/%d\n",
- mc13xxx_chipname[*id],
+ mc13xxx_chipname[mc13xxx->ictype],
maskval(revision, MC13XXX_REVISION_REVFULL),
maskval(revision, MC13XXX_REVISION_REVMETAL),
maskval(revision, MC13XXX_REVISION_FIN),
@@ -530,26 +460,12 @@ static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
maskval(revision, MC13XXX_REVISION_ICIDCODE));
}
- if (*id != MC13XXX_ID_INVALID) {
- const struct spi_device_id *devid =
- spi_get_device_id(mc13xxx->spidev);
- if (!devid || devid->driver_data != *id)
- dev_warn(&mc13xxx->spidev->dev, "device id doesn't "
- "match auto detection!\n");
- }
-
- return 0;
+ return (mc13xxx->ictype == MC13XXX_ID_INVALID) ? -ENODEV : 0;
}
static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx)
{
- const struct spi_device_id *devid =
- spi_get_device_id(mc13xxx->spidev);
-
- if (!devid)
- return NULL;
-
- return mc13xxx_chipname[devid->driver_data];
+ return mc13xxx_chipname[mc13xxx->ictype];
}
int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
@@ -592,7 +508,7 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
};
init_completion(&adcdone_data.done);
- dev_dbg(&mc13xxx->spidev->dev, "%s\n", __func__);
+ dev_dbg(mc13xxx->dev, "%s\n", __func__);
mc13xxx_lock(mc13xxx);
@@ -637,7 +553,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
adc1 |= ato << MC13783_ADC1_ATO_SHIFT;
if (atox)
adc1 |= MC13783_ADC1_ATOX;
- dev_dbg(&mc13xxx->spidev->dev, "%s: request irq\n", __func__);
+
+ dev_dbg(mc13xxx->dev, "%s: request irq\n", __func__);
mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE,
mc13xxx_handler_adcdone, __func__, &adcdone_data);
mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_ADCDONE);
@@ -695,7 +612,7 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
if (!cell.name)
return -ENOMEM;
- return mfd_add_devices(&mc13xxx->spidev->dev, -1, &cell, 1, NULL, 0);
+ return mfd_add_devices(mc13xxx->dev, -1, &cell, 1, NULL, 0);
}
static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
@@ -706,7 +623,7 @@ static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
#ifdef CONFIG_OF
static int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
{
- struct device_node *np = mc13xxx->spidev->dev.of_node;
+ struct device_node *np = mc13xxx->dev->of_node;
if (!np)
return -ENODEV;
@@ -732,55 +649,15 @@ static inline int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
}
#endif
-static const struct spi_device_id mc13xxx_device_id[] = {
- {
- .name = "mc13783",
- .driver_data = MC13XXX_ID_MC13783,
- }, {
- .name = "mc13892",
- .driver_data = MC13XXX_ID_MC13892,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
-
-static const struct of_device_id mc13xxx_dt_ids[] = {
- { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
- { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
-
-static int mc13xxx_probe(struct spi_device *spi)
+int mc13xxx_common_init(struct mc13xxx *mc13xxx,
+ struct mc13xxx_platform_data *pdata, int irq)
{
- const struct of_device_id *of_id;
- struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
- struct mc13xxx *mc13xxx;
- struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
- enum mc13xxx_id id;
int ret;
- of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
- if (of_id)
- sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
-
- mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
- if (!mc13xxx)
- return -ENOMEM;
-
- dev_set_drvdata(&spi->dev, mc13xxx);
- spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
- spi->bits_per_word = 32;
- spi_setup(spi);
-
- mc13xxx->spidev = spi;
-
- mutex_init(&mc13xxx->lock);
mc13xxx_lock(mc13xxx);
- ret = mc13xxx_identify(mc13xxx, &id);
- if (ret || id == MC13XXX_ID_INVALID)
+ ret = mc13xxx_identify(mc13xxx);
+ if (ret)
goto err_revision;
/* mask all irqs */
@@ -792,18 +669,19 @@ static int mc13xxx_probe(struct spi_device *spi)
if (ret)
goto err_mask;
- ret = request_threaded_irq(spi->irq, NULL, mc13xxx_irq_thread,
+ ret = request_threaded_irq(irq, NULL, mc13xxx_irq_thread,
IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13xxx", mc13xxx);
if (ret) {
err_mask:
err_revision:
mc13xxx_unlock(mc13xxx);
- dev_set_drvdata(&spi->dev, NULL);
kfree(mc13xxx);
return ret;
}
+ mc13xxx->irq = irq;
+
mc13xxx_unlock(mc13xxx);
if (mc13xxx_probe_flags_dt(mc13xxx) < 0 && pdata)
@@ -838,42 +716,19 @@ err_revision:
return 0;
}
+EXPORT_SYMBOL_GPL(mc13xxx_common_init);
-static int __devexit mc13xxx_remove(struct spi_device *spi)
+void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx)
{
- struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev);
+ free_irq(mc13xxx->irq, mc13xxx);
- free_irq(mc13xxx->spidev->irq, mc13xxx);
+ mfd_remove_devices(mc13xxx->dev);
- mfd_remove_devices(&spi->dev);
+ regmap_exit(mc13xxx->regmap);
kfree(mc13xxx);
-
- return 0;
-}
-
-static struct spi_driver mc13xxx_driver = {
- .id_table = mc13xxx_device_id,
- .driver = {
- .name = "mc13xxx",
- .owner = THIS_MODULE,
- .of_match_table = mc13xxx_dt_ids,
- },
- .probe = mc13xxx_probe,
- .remove = __devexit_p(mc13xxx_remove),
-};
-
-static int __init mc13xxx_init(void)
-{
- return spi_register_driver(&mc13xxx_driver);
-}
-subsys_initcall(mc13xxx_init);
-
-static void __exit mc13xxx_exit(void)
-{
- spi_unregister_driver(&mc13xxx_driver);
}
-module_exit(mc13xxx_exit);
+EXPORT_SYMBOL_GPL(mc13xxx_common_cleanup);
MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC");
MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
new file mode 100644
index 00000000000..d22501dad6a
--- /dev/null
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2009-2010 Creative Product Design
+ * Marc Reilly marc@cpdesign.com.au
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mc13xxx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+
+#include "mc13xxx.h"
+
+static const struct i2c_device_id mc13xxx_i2c_device_id[] = {
+ {
+ .name = "mc13892",
+ .driver_data = MC13XXX_ID_MC13892,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(i2c, mc13xxx_i2c_device_id);
+
+static const struct of_device_id mc13xxx_dt_ids[] = {
+ {
+ .compatible = "fsl,mc13892",
+ .data = (void *) &mc13xxx_i2c_device_id[0],
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
+
+static struct regmap_config mc13xxx_regmap_i2c_config = {
+ .reg_bits = 8,
+ .val_bits = 24,
+
+ .max_register = MC13XXX_NUMREGS,
+
+ .cache_type = REGCACHE_NONE,
+};
+
+static int mc13xxx_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct of_device_id *of_id;
+ struct i2c_driver *idrv = to_i2c_driver(client->dev.driver);
+ struct mc13xxx *mc13xxx;
+ struct mc13xxx_platform_data *pdata = dev_get_platdata(&client->dev);
+ int ret;
+
+ of_id = of_match_device(mc13xxx_dt_ids, &client->dev);
+ if (of_id)
+ idrv->id_table = (const struct i2c_device_id*) of_id->data;
+
+ mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
+ if (!mc13xxx)
+ return -ENOMEM;
+
+ dev_set_drvdata(&client->dev, mc13xxx);
+
+ mc13xxx->dev = &client->dev;
+ mutex_init(&mc13xxx->lock);
+
+ mc13xxx->regmap = regmap_init_i2c(client, &mc13xxx_regmap_i2c_config);
+ if (IS_ERR(mc13xxx->regmap)) {
+ ret = PTR_ERR(mc13xxx->regmap);
+ dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
+ ret);
+ dev_set_drvdata(&client->dev, NULL);
+ kfree(mc13xxx);
+ return ret;
+ }
+
+ ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
+
+ if (ret == 0 && (id->driver_data != mc13xxx->ictype))
+ dev_warn(mc13xxx->dev,
+ "device id doesn't match auto detection!\n");
+
+ return ret;
+}
+
+static int __devexit mc13xxx_i2c_remove(struct i2c_client *client)
+{
+ struct mc13xxx *mc13xxx = dev_get_drvdata(&client->dev);
+
+ mc13xxx_common_cleanup(mc13xxx);
+
+ return 0;
+}
+
+static struct i2c_driver mc13xxx_i2c_driver = {
+ .id_table = mc13xxx_i2c_device_id,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "mc13xxx",
+ .of_match_table = mc13xxx_dt_ids,
+ },
+ .probe = mc13xxx_i2c_probe,
+ .remove = __devexit_p(mc13xxx_i2c_remove),
+};
+
+static int __init mc13xxx_i2c_init(void)
+{
+ return i2c_add_driver(&mc13xxx_i2c_driver);
+}
+subsys_initcall(mc13xxx_i2c_init);
+
+static void __exit mc13xxx_i2c_exit(void)
+{
+ i2c_del_driver(&mc13xxx_i2c_driver);
+}
+module_exit(mc13xxx_i2c_exit);
+
+MODULE_DESCRIPTION("i2c driver for Freescale MC13XXX PMIC");
+MODULE_AUTHOR("Marc Reilly <marc@cpdesign.com.au");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
new file mode 100644
index 00000000000..3fcdab3eb8e
--- /dev/null
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2009-2010 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * loosely based on an earlier driver that has
+ * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/mc13xxx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+
+#include "mc13xxx.h"
+
+static const struct spi_device_id mc13xxx_device_id[] = {
+ {
+ .name = "mc13783",
+ .driver_data = MC13XXX_ID_MC13783,
+ }, {
+ .name = "mc13892",
+ .driver_data = MC13XXX_ID_MC13892,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
+
+static const struct of_device_id mc13xxx_dt_ids[] = {
+ { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
+ { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
+
+static struct regmap_config mc13xxx_regmap_spi_config = {
+ .reg_bits = 7,
+ .pad_bits = 1,
+ .val_bits = 24,
+
+ .max_register = MC13XXX_NUMREGS,
+
+ .cache_type = REGCACHE_NONE,
+};
+
+static int mc13xxx_spi_probe(struct spi_device *spi)
+{
+ const struct of_device_id *of_id;
+ struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
+ struct mc13xxx *mc13xxx;
+ struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
+ int ret;
+
+ of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
+ if (of_id)
+ sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
+
+ mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
+ if (!mc13xxx)
+ return -ENOMEM;
+
+ dev_set_drvdata(&spi->dev, mc13xxx);
+ spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
+ spi->bits_per_word = 32;
+
+ mc13xxx->dev = &spi->dev;
+ mutex_init(&mc13xxx->lock);
+
+ mc13xxx->regmap = regmap_init_spi(spi, &mc13xxx_regmap_spi_config);
+ if (IS_ERR(mc13xxx->regmap)) {
+ ret = PTR_ERR(mc13xxx->regmap);
+ dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
+ ret);
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(mc13xxx);
+ return ret;
+ }
+
+ ret = mc13xxx_common_init(mc13xxx, pdata, spi->irq);
+
+ if (ret) {
+ dev_set_drvdata(&spi->dev, NULL);
+ } else {
+ const struct spi_device_id *devid =
+ spi_get_device_id(spi);
+ if (!devid || devid->driver_data != mc13xxx->ictype)
+ dev_warn(mc13xxx->dev,
+ "device id doesn't match auto detection!\n");
+ }
+
+ return ret;
+}
+
+static int __devexit mc13xxx_spi_remove(struct spi_device *spi)
+{
+ struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev);
+
+ mc13xxx_common_cleanup(mc13xxx);
+
+ return 0;
+}
+
+static struct spi_driver mc13xxx_spi_driver = {
+ .id_table = mc13xxx_device_id,
+ .driver = {
+ .name = "mc13xxx",
+ .owner = THIS_MODULE,
+ .of_match_table = mc13xxx_dt_ids,
+ },
+ .probe = mc13xxx_spi_probe,
+ .remove = __devexit_p(mc13xxx_spi_remove),
+};
+
+static int __init mc13xxx_init(void)
+{
+ return spi_register_driver(&mc13xxx_spi_driver);
+}
+subsys_initcall(mc13xxx_init);
+
+static void __exit mc13xxx_exit(void)
+{
+ spi_unregister_driver(&mc13xxx_spi_driver);
+}
+module_exit(mc13xxx_exit);
+
+MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC");
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mc13xxx.h b/drivers/mfd/mc13xxx.h
new file mode 100644
index 00000000000..bbba06feea0
--- /dev/null
+++ b/drivers/mfd/mc13xxx.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012 Creative Product Design
+ * Marc Reilly <marc@cpdesign.com.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#ifndef __DRIVERS_MFD_MC13XXX_H
+#define __DRIVERS_MFD_MC13XXX_H
+
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/mfd/mc13xxx.h>
+
+enum mc13xxx_id {
+ MC13XXX_ID_MC13783,
+ MC13XXX_ID_MC13892,
+ MC13XXX_ID_INVALID,
+};
+
+#define MC13XXX_NUMREGS 0x3f
+
+struct mc13xxx {
+ struct regmap *regmap;
+
+ struct device *dev;
+ enum mc13xxx_id ictype;
+
+ struct mutex lock;
+ int irq;
+ int flags;
+
+ irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
+ void *irqdata[MC13XXX_NUM_IRQ];
+
+ int adcflags;
+};
+
+int mc13xxx_common_init(struct mc13xxx *mc13xxx,
+ struct mc13xxx_platform_data *pdata, int irq);
+
+void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx);
+
+#endif /* __DRIVERS_MFD_MC13XXX_H */
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 189c2f07b83..29c122bf28e 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -204,7 +204,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
return -ENOENT;
}
- pcf = kzalloc(sizeof(*pcf), GFP_KERNEL);
+ pcf = devm_kzalloc(&client->dev, sizeof(*pcf), GFP_KERNEL);
if (!pcf)
return -ENOMEM;
@@ -212,12 +212,11 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
mutex_init(&pcf->lock);
- pcf->regmap = regmap_init_i2c(client, &pcf50633_regmap_config);
+ pcf->regmap = devm_regmap_init_i2c(client, &pcf50633_regmap_config);
if (IS_ERR(pcf->regmap)) {
ret = PTR_ERR(pcf->regmap);
- dev_err(pcf->dev, "Failed to allocate register map: %d\n",
- ret);
- goto err_free;
+ dev_err(pcf->dev, "Failed to allocate register map: %d\n", ret);
+ return ret;
}
i2c_set_clientdata(client, pcf);
@@ -228,7 +227,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
if (version < 0 || variant < 0) {
dev_err(pcf->dev, "Unable to probe pcf50633\n");
ret = -ENODEV;
- goto err_regmap;
+ return ret;
}
dev_info(pcf->dev, "Probed device version %d variant %d\n",
@@ -237,16 +236,11 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
pcf50633_irq_init(pcf, client->irq);
/* Create sub devices */
- pcf50633_client_dev_register(pcf, "pcf50633-input",
- &pcf->input_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-rtc",
- &pcf->rtc_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-mbc",
- &pcf->mbc_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-adc",
- &pcf->adc_pdev);
- pcf50633_client_dev_register(pcf, "pcf50633-backlight",
- &pcf->bl_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-input", &pcf->input_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-rtc", &pcf->rtc_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-mbc", &pcf->mbc_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-adc", &pcf->adc_pdev);
+ pcf50633_client_dev_register(pcf, "pcf50633-backlight", &pcf->bl_pdev);
for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
@@ -274,13 +268,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
pdata->probe_done(pcf);
return 0;
-
-err_regmap:
- regmap_exit(pcf->regmap);
-err_free:
- kfree(pcf);
-
- return ret;
}
static int __devexit pcf50633_remove(struct i2c_client *client)
@@ -300,9 +287,6 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
platform_device_unregister(pcf->regulator_pdev[i]);
- regmap_exit(pcf->regmap);
- kfree(pcf);
-
return 0;
}
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index 44afae0a69c..cdc1df7fa0e 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -75,6 +75,7 @@ static struct deepsleep_control_data deepsleep_data[] = {
(RC5T583_EXT_PWRREQ1_CONTROL | RC5T583_EXT_PWRREQ2_CONTROL)
static struct mfd_cell rc5t583_subdevs[] = {
+ {.name = "rc5t583-gpio",},
{.name = "rc5t583-regulator",},
{.name = "rc5t583-rtc", },
{.name = "rc5t583-key", }
@@ -267,7 +268,7 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
rc5t583->dev = &i2c->dev;
i2c_set_clientdata(i2c, rc5t583);
- rc5t583->regmap = regmap_init_i2c(i2c, &rc5t583_regmap_config);
+ rc5t583->regmap = devm_regmap_init_i2c(i2c, &rc5t583_regmap_config);
if (IS_ERR(rc5t583->regmap)) {
ret = PTR_ERR(rc5t583->regmap);
dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret);
@@ -276,7 +277,7 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
ret = rc5t583_clear_ext_power_req(rc5t583, pdata);
if (ret < 0)
- goto err_irq_init;
+ return ret;
if (i2c->irq) {
ret = rc5t583_irq_init(rc5t583, i2c->irq, pdata->irq_base);
@@ -299,8 +300,6 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
err_add_devs:
if (irq_init_success)
rc5t583_irq_exit(rc5t583);
-err_irq_init:
- regmap_exit(rc5t583->regmap);
return ret;
}
@@ -310,7 +309,6 @@ static int __devexit rc5t583_i2c_remove(struct i2c_client *i2c)
mfd_remove_devices(rc5t583->dev);
rc5t583_irq_exit(rc5t583);
- regmap_exit(rc5t583->regmap);
return 0;
}
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index 809bd4a6108..685d61e431a 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -108,18 +108,7 @@ static struct pci_driver rdc321x_sb_driver = {
.remove = __devexit_p(rdc321x_sb_remove),
};
-static int __init rdc321x_sb_init(void)
-{
- return pci_register_driver(&rdc321x_sb_driver);
-}
-
-static void __exit rdc321x_sb_exit(void)
-{
- pci_unregister_driver(&rdc321x_sb_driver);
-}
-
-module_init(rdc321x_sb_init);
-module_exit(rdc321x_sb_exit);
+module_pci_driver(rdc321x_sb_driver);
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/s5m-core.c b/drivers/mfd/s5m-core.c
index 48949d998d1..dd170307e60 100644
--- a/drivers/mfd/s5m-core.c
+++ b/drivers/mfd/s5m-core.c
@@ -114,12 +114,12 @@ static int s5m87xx_i2c_probe(struct i2c_client *i2c,
s5m87xx->wakeup = pdata->wakeup;
}
- s5m87xx->regmap = regmap_init_i2c(i2c, &s5m_regmap_config);
+ s5m87xx->regmap = devm_regmap_init_i2c(i2c, &s5m_regmap_config);
if (IS_ERR(s5m87xx->regmap)) {
ret = PTR_ERR(s5m87xx->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
- goto err;
+ return ret;
}
s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
@@ -159,7 +159,6 @@ err:
mfd_remove_devices(s5m87xx->dev);
s5m_irq_exit(s5m87xx);
i2c_unregister_device(s5m87xx->rtc);
- regmap_exit(s5m87xx->regmap);
return ret;
}
@@ -170,7 +169,6 @@ static int s5m87xx_i2c_remove(struct i2c_client *i2c)
mfd_remove_devices(s5m87xx->dev);
s5m_irq_exit(s5m87xx);
i2c_unregister_device(s5m87xx->rtc);
- regmap_exit(s5m87xx->regmap);
return 0;
}
diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c
new file mode 100644
index 00000000000..d31fed07aef
--- /dev/null
+++ b/drivers/mfd/sta2x11-mfd.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (c) 2009-2011 Wind River Systems, Inc.
+ * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/sta2x11-mfd.h>
+
+#include <asm/sta2x11.h>
+
+/* This describes STA2X11 MFD chip for us, we may have several */
+struct sta2x11_mfd {
+ struct sta2x11_instance *instance;
+ spinlock_t lock;
+ struct list_head list;
+ void __iomem *sctl_regs;
+ void __iomem *apbreg_regs;
+};
+
+static LIST_HEAD(sta2x11_mfd_list);
+
+/* Three functions to act on the list */
+static struct sta2x11_mfd *sta2x11_mfd_find(struct pci_dev *pdev)
+{
+ struct sta2x11_instance *instance;
+ struct sta2x11_mfd *mfd;
+
+ if (!pdev && !list_empty(&sta2x11_mfd_list)) {
+ pr_warning("%s: Unspecified device, "
+ "using first instance\n", __func__);
+ return list_entry(sta2x11_mfd_list.next,
+ struct sta2x11_mfd, list);
+ }
+
+ instance = sta2x11_get_instance(pdev);
+ if (!instance)
+ return NULL;
+ list_for_each_entry(mfd, &sta2x11_mfd_list, list) {
+ if (mfd->instance == instance)
+ return mfd;
+ }
+ return NULL;
+}
+
+static int __devinit sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
+{
+ struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+ struct sta2x11_instance *instance;
+
+ if (mfd)
+ return -EBUSY;
+ instance = sta2x11_get_instance(pdev);
+ if (!instance)
+ return -EINVAL;
+ mfd = kzalloc(sizeof(*mfd), flags);
+ if (!mfd)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&mfd->list);
+ spin_lock_init(&mfd->lock);
+ mfd->instance = instance;
+ list_add(&mfd->list, &sta2x11_mfd_list);
+ return 0;
+}
+
+static int __devexit mfd_remove(struct pci_dev *pdev)
+{
+ struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+
+ if (!mfd)
+ return -ENODEV;
+ list_del(&mfd->list);
+ kfree(mfd);
+ return 0;
+}
+
+/* These two functions are exported and are not expected to fail */
+u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+ struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+ u32 r;
+ unsigned long flags;
+
+ if (!mfd) {
+ dev_warn(&pdev->dev, ": can't access sctl regs\n");
+ return 0;
+ }
+ if (!mfd->sctl_regs) {
+ dev_warn(&pdev->dev, ": system ctl not initialized\n");
+ return 0;
+ }
+ spin_lock_irqsave(&mfd->lock, flags);
+ r = readl(mfd->sctl_regs + reg);
+ r &= ~mask;
+ r |= val;
+ if (mask)
+ writel(r, mfd->sctl_regs + reg);
+ spin_unlock_irqrestore(&mfd->lock, flags);
+ return r;
+}
+EXPORT_SYMBOL(sta2x11_sctl_mask);
+
+u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+ struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
+ u32 r;
+ unsigned long flags;
+
+ if (!mfd) {
+ dev_warn(&pdev->dev, ": can't access apb regs\n");
+ return 0;
+ }
+ if (!mfd->apbreg_regs) {
+ dev_warn(&pdev->dev, ": apb bridge not initialized\n");
+ return 0;
+ }
+ spin_lock_irqsave(&mfd->lock, flags);
+ r = readl(mfd->apbreg_regs + reg);
+ r &= ~mask;
+ r |= val;
+ if (mask)
+ writel(r, mfd->apbreg_regs + reg);
+ spin_unlock_irqrestore(&mfd->lock, flags);
+ return r;
+}
+EXPORT_SYMBOL(sta2x11_apbreg_mask);
+
+/* Two debugfs files, for our registers (FIXME: one instance only) */
+#define REG(regname) {.name = #regname, .offset = SCTL_ ## regname}
+static struct debugfs_reg32 sta2x11_sctl_regs[] = {
+ REG(SCCTL), REG(ARMCFG), REG(SCPLLCTL), REG(SCPLLFCTRL),
+ REG(SCRESFRACT), REG(SCRESCTRL1), REG(SCRESXTRL2), REG(SCPEREN0),
+ REG(SCPEREN1), REG(SCPEREN2), REG(SCGRST), REG(SCPCIPMCR1),
+ REG(SCPCIPMCR2), REG(SCPCIPMSR1), REG(SCPCIPMSR2), REG(SCPCIPMSR3),
+ REG(SCINTREN), REG(SCRISR), REG(SCCLKSTAT0), REG(SCCLKSTAT1),
+ REG(SCCLKSTAT2), REG(SCRSTSTA),
+};
+#undef REG
+
+static struct debugfs_regset32 sctl_regset = {
+ .regs = sta2x11_sctl_regs,
+ .nregs = ARRAY_SIZE(sta2x11_sctl_regs),
+};
+
+#define REG(regname) {.name = #regname, .offset = regname}
+static struct debugfs_reg32 sta2x11_apbreg_regs[] = {
+ REG(APBREG_BSR), REG(APBREG_PAER), REG(APBREG_PWAC), REG(APBREG_PRAC),
+ REG(APBREG_PCG), REG(APBREG_PUR), REG(APBREG_EMU_PCG),
+};
+#undef REG
+
+static struct debugfs_regset32 apbreg_regset = {
+ .regs = sta2x11_apbreg_regs,
+ .nregs = ARRAY_SIZE(sta2x11_apbreg_regs),
+};
+
+static struct dentry *sta2x11_sctl_debugfs;
+static struct dentry *sta2x11_apbreg_debugfs;
+
+/* Probe for the two platform devices */
+static int sta2x11_sctl_probe(struct platform_device *dev)
+{
+ struct pci_dev **pdev;
+ struct sta2x11_mfd *mfd;
+ struct resource *res;
+
+ pdev = dev->dev.platform_data;
+ mfd = sta2x11_mfd_find(*pdev);
+ if (!mfd)
+ return -ENODEV;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ if (!request_mem_region(res->start, resource_size(res),
+ "sta2x11-sctl"))
+ return -EBUSY;
+
+ mfd->sctl_regs = ioremap(res->start, resource_size(res));
+ if (!mfd->sctl_regs) {
+ release_mem_region(res->start, resource_size(res));
+ return -ENOMEM;
+ }
+ sctl_regset.base = mfd->sctl_regs;
+ sta2x11_sctl_debugfs = debugfs_create_regset32("sta2x11-sctl",
+ S_IFREG | S_IRUGO,
+ NULL, &sctl_regset);
+ return 0;
+}
+
+static int sta2x11_apbreg_probe(struct platform_device *dev)
+{
+ struct pci_dev **pdev;
+ struct sta2x11_mfd *mfd;
+ struct resource *res;
+
+ pdev = dev->dev.platform_data;
+ dev_dbg(&dev->dev, "%s: pdata is %p\n", __func__, pdev);
+ dev_dbg(&dev->dev, "%s: *pdata is %p\n", __func__, *pdev);
+
+ mfd = sta2x11_mfd_find(*pdev);
+ if (!mfd)
+ return -ENODEV;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ if (!request_mem_region(res->start, resource_size(res),
+ "sta2x11-apbreg"))
+ return -EBUSY;
+
+ mfd->apbreg_regs = ioremap(res->start, resource_size(res));
+ if (!mfd->apbreg_regs) {
+ release_mem_region(res->start, resource_size(res));
+ return -ENOMEM;
+ }
+ dev_dbg(&dev->dev, "%s: regbase %p\n", __func__, mfd->apbreg_regs);
+
+ apbreg_regset.base = mfd->apbreg_regs;
+ sta2x11_apbreg_debugfs = debugfs_create_regset32("sta2x11-apbreg",
+ S_IFREG | S_IRUGO,
+ NULL, &apbreg_regset);
+ return 0;
+}
+
+/* The two platform drivers */
+static struct platform_driver sta2x11_sctl_platform_driver = {
+ .driver = {
+ .name = "sta2x11-sctl",
+ .owner = THIS_MODULE,
+ },
+ .probe = sta2x11_sctl_probe,
+};
+
+static int __init sta2x11_sctl_init(void)
+{
+ pr_info("%s\n", __func__);
+ return platform_driver_register(&sta2x11_sctl_platform_driver);
+}
+
+static struct platform_driver sta2x11_platform_driver = {
+ .driver = {
+ .name = "sta2x11-apbreg",
+ .owner = THIS_MODULE,
+ },
+ .probe = sta2x11_apbreg_probe,
+};
+
+static int __init sta2x11_apbreg_init(void)
+{
+ pr_info("%s\n", __func__);
+ return platform_driver_register(&sta2x11_platform_driver);
+}
+
+/*
+ * What follows is the PCI device that hosts the above two pdevs.
+ * Each logic block is 4kB and they are all consecutive: we use this info.
+ */
+
+/* Bar 0 */
+enum bar0_cells {
+ STA2X11_GPIO_0 = 0,
+ STA2X11_GPIO_1,
+ STA2X11_GPIO_2,
+ STA2X11_GPIO_3,
+ STA2X11_SCTL,
+ STA2X11_SCR,
+ STA2X11_TIME,
+};
+/* Bar 1 */
+enum bar1_cells {
+ STA2X11_APBREG = 0,
+};
+#define CELL_4K(_name, _cell) { \
+ .name = _name, \
+ .start = _cell * 4096, .end = _cell * 4096 + 4095, \
+ .flags = IORESOURCE_MEM, \
+ }
+
+static const __devinitconst struct resource gpio_resources[] = {
+ {
+ .name = "sta2x11_gpio", /* 4 consecutive cells, 1 driver */
+ .start = 0,
+ .end = (4 * 4096) - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+static const __devinitconst struct resource sctl_resources[] = {
+ CELL_4K("sta2x11-sctl", STA2X11_SCTL),
+};
+static const __devinitconst struct resource scr_resources[] = {
+ CELL_4K("sta2x11-scr", STA2X11_SCR),
+};
+static const __devinitconst struct resource time_resources[] = {
+ CELL_4K("sta2x11-time", STA2X11_TIME),
+};
+
+static const __devinitconst struct resource apbreg_resources[] = {
+ CELL_4K("sta2x11-apbreg", STA2X11_APBREG),
+};
+
+#define DEV(_name, _r) \
+ { .name = _name, .num_resources = ARRAY_SIZE(_r), .resources = _r, }
+
+static __devinitdata struct mfd_cell sta2x11_mfd_bar0[] = {
+ DEV("sta2x11-gpio", gpio_resources), /* offset 0: we add pdata later */
+ DEV("sta2x11-sctl", sctl_resources),
+ DEV("sta2x11-scr", scr_resources),
+ DEV("sta2x11-time", time_resources),
+};
+
+static __devinitdata struct mfd_cell sta2x11_mfd_bar1[] = {
+ DEV("sta2x11-apbreg", apbreg_resources),
+};
+
+static int sta2x11_mfd_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int sta2x11_mfd_resume(struct pci_dev *pdev)
+{
+ int err;
+
+ pci_set_power_state(pdev, 0);
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+ pci_restore_state(pdev);
+
+ return 0;
+}
+
+static int __devinit sta2x11_mfd_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ int err, i;
+ struct sta2x11_gpio_pdata *gpio_data;
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Can't enable device.\n");
+ return err;
+ }
+
+ err = pci_enable_msi(pdev);
+ if (err)
+ dev_info(&pdev->dev, "Enable msi failed\n");
+
+ /* Read gpio config data as pci device's platform data */
+ gpio_data = dev_get_platdata(&pdev->dev);
+ if (!gpio_data)
+ dev_warn(&pdev->dev, "no gpio configuration\n");
+
+ dev_dbg(&pdev->dev, "%s, gpio_data = %p (%p)\n", __func__,
+ gpio_data, &gpio_data);
+ dev_dbg(&pdev->dev, "%s, pdev = %p (%p)\n", __func__,
+ pdev, &pdev);
+
+ /* platform data is the pci device for all of them */
+ for (i = 0; i < ARRAY_SIZE(sta2x11_mfd_bar0); i++) {
+ sta2x11_mfd_bar0[i].pdata_size = sizeof(pdev);
+ sta2x11_mfd_bar0[i].platform_data = &pdev;
+ }
+ sta2x11_mfd_bar1[0].pdata_size = sizeof(pdev);
+ sta2x11_mfd_bar1[0].platform_data = &pdev;
+
+ /* Record this pdev before mfd_add_devices: their probe looks for it */
+ sta2x11_mfd_add(pdev, GFP_ATOMIC);
+
+
+ err = mfd_add_devices(&pdev->dev, -1,
+ sta2x11_mfd_bar0,
+ ARRAY_SIZE(sta2x11_mfd_bar0),
+ &pdev->resource[0],
+ 0);
+ if (err) {
+ dev_err(&pdev->dev, "mfd_add_devices[0] failed: %d\n", err);
+ goto err_disable;
+ }
+
+ err = mfd_add_devices(&pdev->dev, -1,
+ sta2x11_mfd_bar1,
+ ARRAY_SIZE(sta2x11_mfd_bar1),
+ &pdev->resource[1],
+ 0);
+ if (err) {
+ dev_err(&pdev->dev, "mfd_add_devices[1] failed: %d\n", err);
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ mfd_remove_devices(&pdev->dev);
+ pci_disable_device(pdev);
+ pci_disable_msi(pdev);
+ return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(sta2x11_mfd_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_GPIO)},
+ {0,},
+};
+
+static struct pci_driver sta2x11_mfd_driver = {
+ .name = "sta2x11-mfd",
+ .id_table = sta2x11_mfd_tbl,
+ .probe = sta2x11_mfd_probe,
+ .suspend = sta2x11_mfd_suspend,
+ .resume = sta2x11_mfd_resume,
+};
+
+static int __init sta2x11_mfd_init(void)
+{
+ pr_info("%s\n", __func__);
+ return pci_register_driver(&sta2x11_mfd_driver);
+}
+
+/*
+ * All of this must be ready before "normal" devices like MMCI appear.
+ * But MFD (the pci device) can't be too early. The following choice
+ * prepares platform drivers very early and probe the PCI device later,
+ * but before other PCI devices.
+ */
+subsys_initcall(sta2x11_apbreg_init);
+subsys_initcall(sta2x11_sctl_init);
+rootfs_initcall(sta2x11_mfd_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wind River");
+MODULE_DESCRIPTION("STA2x11 mfd for GPIO, SCTL and APBREG");
+MODULE_DEVICE_TABLE(pci, sta2x11_mfd_tbl);
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index b58c43c7ea9..afd459013ec 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -122,7 +122,6 @@ MODULE_DEVICE_TABLE(spi, stmpe_id);
static struct spi_driver stmpe_spi_driver = {
.driver = {
.name = "stmpe-spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &stmpe_dev_pm_ops,
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 47f802bf184..396b9d1b6bd 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -283,27 +283,24 @@ static int __devinit tps65090_i2c_probe(struct i2c_client *client,
}
}
- tps65090->rmap = regmap_init_i2c(tps65090->client,
- &tps65090_regmap_config);
+ tps65090->rmap = devm_regmap_init_i2c(tps65090->client,
+ &tps65090_regmap_config);
if (IS_ERR(tps65090->rmap)) {
- dev_err(&client->dev, "regmap_init failed with err: %ld\n",
- PTR_ERR(tps65090->rmap));
+ ret = PTR_ERR(tps65090->rmap);
+ dev_err(&client->dev, "regmap_init failed with err: %d\n", ret);
goto err_irq_exit;
- };
+ }
ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
ARRAY_SIZE(tps65090s), NULL, 0);
if (ret) {
dev_err(&client->dev, "add mfd devices failed with err: %d\n",
ret);
- goto err_regmap_exit;
+ goto err_irq_exit;
}
return 0;
-err_regmap_exit:
- regmap_exit(tps65090->rmap);
-
err_irq_exit:
if (client->irq)
free_irq(client->irq, tps65090);
@@ -316,29 +313,34 @@ static int __devexit tps65090_i2c_remove(struct i2c_client *client)
struct tps65090 *tps65090 = i2c_get_clientdata(client);
mfd_remove_devices(tps65090->dev);
- regmap_exit(tps65090->rmap);
if (client->irq)
free_irq(client->irq, tps65090);
return 0;
}
-#ifdef CONFIG_PM
-static int tps65090_i2c_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int tps65090_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
if (client->irq)
disable_irq(client->irq);
return 0;
}
-static int tps65090_i2c_resume(struct i2c_client *client)
+static int tps65090_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
if (client->irq)
enable_irq(client->irq);
return 0;
}
#endif
+static const struct dev_pm_ops tps65090_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tps65090_suspend, tps65090_resume)
+};
+
static const struct i2c_device_id tps65090_id_table[] = {
{ "tps65090", 0 },
{ },
@@ -349,13 +351,10 @@ static struct i2c_driver tps65090_driver = {
.driver = {
.name = "tps65090",
.owner = THIS_MODULE,
+ .pm = &tps65090_pm_ops,
},
.probe = tps65090_i2c_probe,
.remove = __devexit_p(tps65090_i2c_remove),
-#ifdef CONFIG_PM
- .suspend = tps65090_i2c_suspend,
- .resume = tps65090_i2c_resume,
-#endif
.id_table = tps65090_id_table,
};
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index f7d854e4cc6..db194e433c0 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -96,7 +96,7 @@ EXPORT_SYMBOL_GPL(tps65217_reg_write);
* @val: Value to write.
* @level: Password protected level
*/
-int tps65217_update_bits(struct tps65217 *tps, unsigned int reg,
+static int tps65217_update_bits(struct tps65217 *tps, unsigned int reg,
unsigned int mask, unsigned int val, unsigned int level)
{
int ret;
@@ -150,7 +150,7 @@ static int __devinit tps65217_probe(struct i2c_client *client,
return -ENOMEM;
tps->pdata = pdata;
- tps->regmap = regmap_init_i2c(client, &tps65217_regmap_config);
+ tps->regmap = devm_regmap_init_i2c(client, &tps65217_regmap_config);
if (IS_ERR(tps->regmap)) {
ret = PTR_ERR(tps->regmap);
dev_err(tps->dev, "Failed to allocate register map: %d\n",
@@ -163,9 +163,9 @@ static int __devinit tps65217_probe(struct i2c_client *client,
ret = tps65217_reg_read(tps, TPS65217_REG_CHIPID, &version);
if (ret < 0) {
- dev_err(tps->dev, "Failed to read revision"
- " register: %d\n", ret);
- goto err_regmap;
+ dev_err(tps->dev, "Failed to read revision register: %d\n",
+ ret);
+ return ret;
}
dev_info(tps->dev, "TPS65217 ID %#x version 1.%d\n",
@@ -190,11 +190,6 @@ static int __devinit tps65217_probe(struct i2c_client *client,
}
return 0;
-
-err_regmap:
- regmap_exit(tps->regmap);
-
- return ret;
}
static int __devexit tps65217_remove(struct i2c_client *client)
@@ -205,8 +200,6 @@ static int __devexit tps65217_remove(struct i2c_client *client)
for (i = 0; i < TPS65217_NUM_REGULATOR; i++)
platform_device_unregister(tps->regulator_pdev[i]);
- regmap_exit(tps->regmap);
-
return 0;
}
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
index c9ed5c00a62..09aab3e4776 100644
--- a/drivers/mfd/tps65910-irq.c
+++ b/drivers/mfd/tps65910-irq.c
@@ -20,15 +20,10 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/gpio.h>
#include <linux/mfd/tps65910.h>
-static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
- int irq)
-{
- return (irq - tps65910->irq_base);
-}
-
/*
* This is a threaded IRQ handler so can access I2C/SPI. Since all
* interrupts are clear on read the IRQ line will be reasserted and
@@ -41,28 +36,28 @@ static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
static irqreturn_t tps65910_irq(int irq, void *irq_data)
{
struct tps65910 *tps65910 = irq_data;
+ unsigned int reg;
u32 irq_sts;
u32 irq_mask;
- u8 reg;
int i;
- tps65910->read(tps65910, TPS65910_INT_STS, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_STS, &reg);
irq_sts = reg;
- tps65910->read(tps65910, TPS65910_INT_STS2, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_STS2, &reg);
irq_sts |= reg << 8;
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
- tps65910->read(tps65910, TPS65910_INT_STS3, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_STS3, &reg);
irq_sts |= reg << 16;
}
- tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
irq_mask = reg;
- tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
irq_mask |= reg << 8;
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
- tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
irq_mask |= reg << 16;
}
@@ -76,19 +71,19 @@ static irqreturn_t tps65910_irq(int irq, void *irq_data)
if (!(irq_sts & (1 << i)))
continue;
- handle_nested_irq(tps65910->irq_base + i);
+ handle_nested_irq(irq_find_mapping(tps65910->domain, i));
}
/* Write the STS register back to clear IRQs we handled */
reg = irq_sts & 0xFF;
irq_sts >>= 8;
- tps65910->write(tps65910, TPS65910_INT_STS, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_STS, reg);
reg = irq_sts & 0xFF;
- tps65910->write(tps65910, TPS65910_INT_STS2, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_STS2, reg);
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
reg = irq_sts >> 8;
- tps65910->write(tps65910, TPS65910_INT_STS3, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_STS3, reg);
}
return IRQ_HANDLED;
@@ -105,27 +100,27 @@ static void tps65910_irq_sync_unlock(struct irq_data *data)
{
struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
u32 reg_mask;
- u8 reg;
+ unsigned int reg;
- tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
reg_mask = reg;
- tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
reg_mask |= reg << 8;
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
- tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
reg_mask |= reg << 16;
}
if (tps65910->irq_mask != reg_mask) {
reg = tps65910->irq_mask & 0xFF;
- tps65910->write(tps65910, TPS65910_INT_MSK, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_MSK, reg);
reg = tps65910->irq_mask >> 8 & 0xFF;
- tps65910->write(tps65910, TPS65910_INT_MSK2, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_MSK2, reg);
switch (tps65910_chip_id(tps65910)) {
case TPS65911:
reg = tps65910->irq_mask >> 16;
- tps65910->write(tps65910, TPS65910_INT_MSK3, 1, &reg);
+ tps65910_reg_write(tps65910, TPS65910_INT_MSK3, reg);
}
}
mutex_unlock(&tps65910->irq_lock);
@@ -135,14 +130,14 @@ static void tps65910_irq_enable(struct irq_data *data)
{
struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
- tps65910->irq_mask &= ~( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+ tps65910->irq_mask &= ~(1 << data->hwirq);
}
static void tps65910_irq_disable(struct irq_data *data)
{
struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
- tps65910->irq_mask |= ( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+ tps65910->irq_mask |= (1 << data->hwirq);
}
#ifdef CONFIG_PM_SLEEP
@@ -164,10 +159,35 @@ static struct irq_chip tps65910_irq_chip = {
.irq_set_wake = tps65910_irq_set_wake,
};
+static int tps65910_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct tps65910 *tps65910 = h->host_data;
+
+ irq_set_chip_data(virq, tps65910);
+ irq_set_chip_and_handler(virq, &tps65910_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops tps65910_domain_ops = {
+ .map = tps65910_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
int tps65910_irq_init(struct tps65910 *tps65910, int irq,
struct tps65910_platform_data *pdata)
{
- int ret, cur_irq;
+ int ret;
int flags = IRQF_ONESHOT;
if (!irq) {
@@ -175,17 +195,11 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
return -EINVAL;
}
- if (!pdata || !pdata->irq_base) {
- dev_warn(tps65910->dev, "No interrupt support, no IRQ base\n");
+ if (!pdata) {
+ dev_warn(tps65910->dev, "No interrupt support, no pdata\n");
return -EINVAL;
}
- tps65910->irq_mask = 0xFFFFFF;
-
- mutex_init(&tps65910->irq_lock);
- tps65910->chip_irq = irq;
- tps65910->irq_base = pdata->irq_base;
-
switch (tps65910_chip_id(tps65910)) {
case TPS65910:
tps65910->irq_num = TPS65910_NUM_IRQ;
@@ -195,22 +209,36 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
break;
}
- /* Register with genirq */
- for (cur_irq = tps65910->irq_base;
- cur_irq < tps65910->irq_num + tps65910->irq_base;
- cur_irq++) {
- irq_set_chip_data(cur_irq, tps65910);
- irq_set_chip_and_handler(cur_irq, &tps65910_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
+ if (pdata->irq_base > 0) {
+ pdata->irq_base = irq_alloc_descs(pdata->irq_base, 0,
+ tps65910->irq_num, -1);
+ if (pdata->irq_base < 0) {
+ dev_warn(tps65910->dev, "Failed to alloc IRQs: %d\n",
+ pdata->irq_base);
+ return pdata->irq_base;
+ }
+ }
+
+ tps65910->irq_mask = 0xFFFFFF;
+
+ mutex_init(&tps65910->irq_lock);
+ tps65910->chip_irq = irq;
+ tps65910->irq_base = pdata->irq_base;
+
+ if (pdata->irq_base > 0)
+ tps65910->domain = irq_domain_add_legacy(tps65910->dev->of_node,
+ tps65910->irq_num,
+ pdata->irq_base,
+ 0,
+ &tps65910_domain_ops, tps65910);
+ else
+ tps65910->domain = irq_domain_add_linear(tps65910->dev->of_node,
+ tps65910->irq_num,
+ &tps65910_domain_ops, tps65910);
+
+ if (!tps65910->domain) {
+ dev_err(tps65910->dev, "Failed to create IRQ domain\n");
+ return -ENOMEM;
}
ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index bf2b25ebf2c..be9e07b7732 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -19,13 +19,16 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/gpio.h>
#include <linux/mfd/core.h>
#include <linux/regmap.h>
#include <linux/mfd/tps65910.h>
+#include <linux/of_device.h>
static struct mfd_cell tps65910s[] = {
{
+ .name = "tps65910-gpio",
+ },
+ {
.name = "tps65910-pmic",
},
{
@@ -37,30 +40,6 @@ static struct mfd_cell tps65910s[] = {
};
-static int tps65910_i2c_read(struct tps65910 *tps65910, u8 reg,
- int bytes, void *dest)
-{
- return regmap_bulk_read(tps65910->regmap, reg, dest, bytes);
-}
-
-static int tps65910_i2c_write(struct tps65910 *tps65910, u8 reg,
- int bytes, void *src)
-{
- return regmap_bulk_write(tps65910->regmap, reg, src, bytes);
-}
-
-int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, mask);
-}
-EXPORT_SYMBOL_GPL(tps65910_set_bits);
-
-int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
-{
- return regmap_update_bits(tps65910->regmap, reg, mask, 0);
-}
-EXPORT_SYMBOL_GPL(tps65910_clear_bits);
-
static bool is_volatile_reg(struct device *dev, unsigned int reg)
{
struct tps65910 *tps65910 = dev_get_drvdata(dev);
@@ -85,80 +64,197 @@ static const struct regmap_config tps65910_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.volatile_reg = is_volatile_reg,
- .max_register = TPS65910_MAX_REGISTER,
- .num_reg_defaults_raw = TPS65910_MAX_REGISTER,
+ .max_register = TPS65910_MAX_REGISTER - 1,
.cache_type = REGCACHE_RBTREE,
};
-static int tps65910_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int __devinit tps65910_sleepinit(struct tps65910 *tps65910,
+ struct tps65910_board *pmic_pdata)
+{
+ struct device *dev = NULL;
+ int ret = 0;
+
+ dev = tps65910->dev;
+
+ if (!pmic_pdata->en_dev_slp)
+ return 0;
+
+ /* enabling SLEEP device state */
+ ret = tps65910_reg_set_bits(tps65910, TPS65910_DEVCTRL,
+ DEVCTRL_DEV_SLP_MASK);
+ if (ret < 0) {
+ dev_err(dev, "set dev_slp failed: %d\n", ret);
+ goto err_sleep_init;
+ }
+
+ /* Return if there is no sleep keepon data. */
+ if (!pmic_pdata->slp_keepon)
+ return 0;
+
+ if (pmic_pdata->slp_keepon->therm_keepon) {
+ ret = tps65910_reg_set_bits(tps65910,
+ TPS65910_SLEEP_KEEP_RES_ON,
+ SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK);
+ if (ret < 0) {
+ dev_err(dev, "set therm_keepon failed: %d\n", ret);
+ goto disable_dev_slp;
+ }
+ }
+
+ if (pmic_pdata->slp_keepon->clkout32k_keepon) {
+ ret = tps65910_reg_set_bits(tps65910,
+ TPS65910_SLEEP_KEEP_RES_ON,
+ SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK);
+ if (ret < 0) {
+ dev_err(dev, "set clkout32k_keepon failed: %d\n", ret);
+ goto disable_dev_slp;
+ }
+ }
+
+ if (pmic_pdata->slp_keepon->i2chs_keepon) {
+ ret = tps65910_reg_set_bits(tps65910,
+ TPS65910_SLEEP_KEEP_RES_ON,
+ SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK);
+ if (ret < 0) {
+ dev_err(dev, "set i2chs_keepon failed: %d\n", ret);
+ goto disable_dev_slp;
+ }
+ }
+
+ return 0;
+
+disable_dev_slp:
+ tps65910_reg_clear_bits(tps65910, TPS65910_DEVCTRL,
+ DEVCTRL_DEV_SLP_MASK);
+
+err_sleep_init:
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id tps65910_of_match[] = {
+ { .compatible = "ti,tps65910", .data = (void *)TPS65910},
+ { .compatible = "ti,tps65911", .data = (void *)TPS65911},
+ { },
+};
+MODULE_DEVICE_TABLE(of, tps65910_of_match);
+
+static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
+ int *chip_id)
+{
+ struct device_node *np = client->dev.of_node;
+ struct tps65910_board *board_info;
+ unsigned int prop;
+ const struct of_device_id *match;
+ int ret = 0;
+
+ match = of_match_device(tps65910_of_match, &client->dev);
+ if (!match) {
+ dev_err(&client->dev, "Failed to find matching dt id\n");
+ return NULL;
+ }
+
+ *chip_id = (int)match->data;
+
+ board_info = devm_kzalloc(&client->dev, sizeof(*board_info),
+ GFP_KERNEL);
+ if (!board_info) {
+ dev_err(&client->dev, "Failed to allocate pdata\n");
+ return NULL;
+ }
+
+ ret = of_property_read_u32(np, "ti,vmbch-threshold", &prop);
+ if (!ret)
+ board_info->vmbch_threshold = prop;
+ else if (*chip_id == TPS65911)
+ dev_warn(&client->dev, "VMBCH-Threshold not specified");
+
+ ret = of_property_read_u32(np, "ti,vmbch2-threshold", &prop);
+ if (!ret)
+ board_info->vmbch2_threshold = prop;
+ else if (*chip_id == TPS65911)
+ dev_warn(&client->dev, "VMBCH2-Threshold not specified");
+
+ board_info->irq = client->irq;
+ board_info->irq_base = -1;
+
+ return board_info;
+}
+#else
+static inline
+struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
+ int *chip_id)
+{
+ return NULL;
+}
+#endif
+
+static __devinit int tps65910_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
{
struct tps65910 *tps65910;
struct tps65910_board *pmic_plat_data;
+ struct tps65910_board *of_pmic_plat_data = NULL;
struct tps65910_platform_data *init_data;
int ret = 0;
+ int chip_id = id->driver_data;
pmic_plat_data = dev_get_platdata(&i2c->dev);
+
+ if (!pmic_plat_data && i2c->dev.of_node) {
+ pmic_plat_data = tps65910_parse_dt(i2c, &chip_id);
+ of_pmic_plat_data = pmic_plat_data;
+ }
+
if (!pmic_plat_data)
return -EINVAL;
- init_data = kzalloc(sizeof(struct tps65910_platform_data), GFP_KERNEL);
+ init_data = devm_kzalloc(&i2c->dev, sizeof(*init_data), GFP_KERNEL);
if (init_data == NULL)
return -ENOMEM;
- tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL);
- if (tps65910 == NULL) {
- kfree(init_data);
+ tps65910 = devm_kzalloc(&i2c->dev, sizeof(*tps65910), GFP_KERNEL);
+ if (tps65910 == NULL)
return -ENOMEM;
- }
+ tps65910->of_plat_data = of_pmic_plat_data;
i2c_set_clientdata(i2c, tps65910);
tps65910->dev = &i2c->dev;
tps65910->i2c_client = i2c;
- tps65910->id = id->driver_data;
- tps65910->read = tps65910_i2c_read;
- tps65910->write = tps65910_i2c_write;
+ tps65910->id = chip_id;
mutex_init(&tps65910->io_mutex);
- tps65910->regmap = regmap_init_i2c(i2c, &tps65910_regmap_config);
+ tps65910->regmap = devm_regmap_init_i2c(i2c, &tps65910_regmap_config);
if (IS_ERR(tps65910->regmap)) {
ret = PTR_ERR(tps65910->regmap);
dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret);
- goto regmap_err;
+ return ret;
}
ret = mfd_add_devices(tps65910->dev, -1,
tps65910s, ARRAY_SIZE(tps65910s),
NULL, 0);
- if (ret < 0)
- goto err;
+ if (ret < 0) {
+ dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
+ return ret;
+ }
init_data->irq = pmic_plat_data->irq;
init_data->irq_base = pmic_plat_data->irq_base;
- tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
-
tps65910_irq_init(tps65910, init_data->irq, init_data);
- kfree(init_data);
- return ret;
+ tps65910_sleepinit(tps65910, pmic_plat_data);
-err:
- regmap_exit(tps65910->regmap);
-regmap_err:
- kfree(tps65910);
- kfree(init_data);
return ret;
}
-static int tps65910_i2c_remove(struct i2c_client *i2c)
+static __devexit int tps65910_i2c_remove(struct i2c_client *i2c)
{
struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
tps65910_irq_exit(tps65910);
mfd_remove_devices(tps65910->dev);
- regmap_exit(tps65910->regmap);
- kfree(tps65910);
return 0;
}
@@ -175,9 +271,10 @@ static struct i2c_driver tps65910_i2c_driver = {
.driver = {
.name = "tps65910",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(tps65910_of_match),
},
.probe = tps65910_i2c_probe,
- .remove = tps65910_i2c_remove,
+ .remove = __devexit_p(tps65910_i2c_remove),
.id_table = tps65910_i2c_id,
};
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 5d656e81435..ad733d76207 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -757,6 +757,7 @@ int twl4030_init_irq(struct device *dev, int irq_num)
dev_err(dev, "could not claim irq%d: %d\n", irq_num, status);
goto fail_rqirq;
}
+ enable_irq_wake(irq_num);
return irq_base;
fail_rqirq:
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
index 2d6bedadca0..4ded9e7aa24 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040-core.c
@@ -27,7 +27,12 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/err.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -35,8 +40,24 @@
#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/twl6040.h>
+#include <linux/regulator/consumer.h>
#define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1)
+#define TWL6040_NUM_SUPPLIES (2)
+
+static bool twl6040_has_vibra(struct twl6040_platform_data *pdata,
+ struct device_node *node)
+{
+ if (pdata && pdata->vibra)
+ return true;
+
+#ifdef CONFIG_OF
+ if (of_find_node_by_name(node, "vibra"))
+ return true;
+#endif
+
+ return false;
+}
int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
{
@@ -502,17 +523,18 @@ static int __devinit twl6040_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct twl6040_platform_data *pdata = client->dev.platform_data;
+ struct device_node *node = client->dev.of_node;
struct twl6040 *twl6040;
struct mfd_cell *cell = NULL;
- int ret, children = 0;
+ int irq, ret, children = 0;
- if (!pdata) {
+ if (!pdata && !node) {
dev_err(&client->dev, "Platform data is missing\n");
return -EINVAL;
}
/* In order to operate correctly we need valid interrupt config */
- if (!client->irq || !pdata->irq_base) {
+ if (!client->irq) {
dev_err(&client->dev, "Invalid IRQ configuration\n");
return -EINVAL;
}
@@ -524,7 +546,7 @@ static int __devinit twl6040_probe(struct i2c_client *client,
goto err;
}
- twl6040->regmap = regmap_init_i2c(client, &twl6040_regmap_config);
+ twl6040->regmap = devm_regmap_init_i2c(client, &twl6040_regmap_config);
if (IS_ERR(twl6040->regmap)) {
ret = PTR_ERR(twl6040->regmap);
goto err;
@@ -532,9 +554,23 @@ static int __devinit twl6040_probe(struct i2c_client *client,
i2c_set_clientdata(client, twl6040);
+ twl6040->supplies[0].supply = "vio";
+ twl6040->supplies[1].supply = "v2v1";
+ ret = regulator_bulk_get(&client->dev, TWL6040_NUM_SUPPLIES,
+ twl6040->supplies);
+ if (ret != 0) {
+ dev_err(&client->dev, "Failed to get supplies: %d\n", ret);
+ goto regulator_get_err;
+ }
+
+ ret = regulator_bulk_enable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+ if (ret != 0) {
+ dev_err(&client->dev, "Failed to enable supplies: %d\n", ret);
+ goto power_err;
+ }
+
twl6040->dev = &client->dev;
twl6040->irq = client->irq;
- twl6040->irq_base = pdata->irq_base;
mutex_init(&twl6040->mutex);
mutex_init(&twl6040->io_mutex);
@@ -543,22 +579,26 @@ static int __devinit twl6040_probe(struct i2c_client *client,
twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
/* ERRATA: Automatic power-up is not possible in ES1.0 */
- if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0)
- twl6040->audpwron = pdata->audpwron_gpio;
- else
+ if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0) {
+ if (pdata)
+ twl6040->audpwron = pdata->audpwron_gpio;
+ else
+ twl6040->audpwron = of_get_named_gpio(node,
+ "ti,audpwron-gpio", 0);
+ } else
twl6040->audpwron = -EINVAL;
if (gpio_is_valid(twl6040->audpwron)) {
ret = gpio_request_one(twl6040->audpwron, GPIOF_OUT_INIT_LOW,
"audpwron");
if (ret)
- goto gpio1_err;
+ goto gpio_err;
}
/* codec interrupt */
ret = twl6040_irq_init(twl6040);
if (ret)
- goto gpio2_err;
+ goto irq_init_err;
ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY,
NULL, twl6040_naudint_handler, 0,
@@ -572,22 +612,27 @@ static int __devinit twl6040_probe(struct i2c_client *client,
/* dual-access registers controlled by I2C only */
twl6040_set_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_I2CSEL);
- if (pdata->codec) {
- int irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
-
- cell = &twl6040->cells[children];
- cell->name = "twl6040-codec";
- twl6040_codec_rsrc[0].start = irq;
- twl6040_codec_rsrc[0].end = irq;
- cell->resources = twl6040_codec_rsrc;
- cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
+ /*
+ * The main functionality of twl6040 to provide audio on OMAP4+ systems.
+ * We can add the ASoC codec child whenever this driver has been loaded.
+ * The ASoC codec can work without pdata, pass the platform_data only if
+ * it has been provided.
+ */
+ irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
+ cell = &twl6040->cells[children];
+ cell->name = "twl6040-codec";
+ twl6040_codec_rsrc[0].start = irq;
+ twl6040_codec_rsrc[0].end = irq;
+ cell->resources = twl6040_codec_rsrc;
+ cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
+ if (pdata && pdata->codec) {
cell->platform_data = pdata->codec;
cell->pdata_size = sizeof(*pdata->codec);
- children++;
}
+ children++;
- if (pdata->vibra) {
- int irq = twl6040->irq_base + TWL6040_IRQ_VIB;
+ if (twl6040_has_vibra(pdata, node)) {
+ irq = twl6040->irq_base + TWL6040_IRQ_VIB;
cell = &twl6040->cells[children];
cell->name = "twl6040-vibra";
@@ -596,21 +641,17 @@ static int __devinit twl6040_probe(struct i2c_client *client,
cell->resources = twl6040_vibra_rsrc;
cell->num_resources = ARRAY_SIZE(twl6040_vibra_rsrc);
- cell->platform_data = pdata->vibra;
- cell->pdata_size = sizeof(*pdata->vibra);
+ if (pdata && pdata->vibra) {
+ cell->platform_data = pdata->vibra;
+ cell->pdata_size = sizeof(*pdata->vibra);
+ }
children++;
}
- if (children) {
- ret = mfd_add_devices(&client->dev, -1, twl6040->cells,
- children, NULL, 0);
- if (ret)
- goto mfd_err;
- } else {
- dev_err(&client->dev, "No platform data found for children\n");
- ret = -ENODEV;
+ ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children,
+ NULL, 0);
+ if (ret)
goto mfd_err;
- }
return 0;
@@ -618,12 +659,15 @@ mfd_err:
free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
irq_err:
twl6040_irq_exit(twl6040);
-gpio2_err:
+irq_init_err:
if (gpio_is_valid(twl6040->audpwron))
gpio_free(twl6040->audpwron);
-gpio1_err:
+gpio_err:
+ regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+power_err:
+ regulator_bulk_free(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+regulator_get_err:
i2c_set_clientdata(client, NULL);
- regmap_exit(twl6040->regmap);
err:
return ret;
}
@@ -643,7 +687,9 @@ static int __devexit twl6040_remove(struct i2c_client *client)
mfd_remove_devices(&client->dev);
i2c_set_clientdata(client, NULL);
- regmap_exit(twl6040->regmap);
+
+ regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
+ regulator_bulk_free(TWL6040_NUM_SUPPLIES, twl6040->supplies);
return 0;
}
diff --git a/drivers/mfd/twl6040-irq.c b/drivers/mfd/twl6040-irq.c
index b3f8ddaa28a..4b42543da22 100644
--- a/drivers/mfd/twl6040-irq.c
+++ b/drivers/mfd/twl6040-irq.c
@@ -23,7 +23,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/err.h>
#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
#include <linux/mfd/twl6040.h>
@@ -138,7 +141,8 @@ static irqreturn_t twl6040_irq_thread(int irq, void *data)
int twl6040_irq_init(struct twl6040 *twl6040)
{
- int cur_irq, ret;
+ struct device_node *node = twl6040->dev->of_node;
+ int i, nr_irqs, irq_base, ret;
u8 val;
mutex_init(&twl6040->irq_mutex);
@@ -148,21 +152,31 @@ int twl6040_irq_init(struct twl6040 *twl6040)
twl6040->irq_masks_cache = TWL6040_ALLINT_MSK;
twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK);
+ nr_irqs = ARRAY_SIZE(twl6040_irqs);
+
+ irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
+ if (IS_ERR_VALUE(irq_base)) {
+ dev_err(twl6040->dev, "Fail to allocate IRQ descs\n");
+ return irq_base;
+ }
+ twl6040->irq_base = irq_base;
+
+ irq_domain_add_legacy(node, ARRAY_SIZE(twl6040_irqs), irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+
/* Register them with genirq */
- for (cur_irq = twl6040->irq_base;
- cur_irq < twl6040->irq_base + ARRAY_SIZE(twl6040_irqs);
- cur_irq++) {
- irq_set_chip_data(cur_irq, twl6040);
- irq_set_chip_and_handler(cur_irq, &twl6040_irq_chip,
+ for (i = irq_base; i < irq_base + nr_irqs; i++) {
+ irq_set_chip_data(i, twl6040);
+ irq_set_chip_and_handler(i, &twl6040_irq_chip,
handle_level_irq);
- irq_set_nested_thread(cur_irq, 1);
+ irq_set_nested_thread(i, 1);
/* ARM needs us to explicitly flag the IRQ as valid
* and will set them noprobe when we do so. */
#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
+ set_irq_flags(i, IRQF_VALID);
#else
- irq_set_noprobe(cur_irq);
+ irq_set_noprobe(i);
#endif
}
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index b73cc15e008..872aff21e4b 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -131,17 +131,7 @@ static struct pci_driver vx855_pci_driver = {
.remove = __devexit_p(vx855_remove),
};
-static int vx855_init(void)
-{
- return pci_register_driver(&vx855_pci_driver);
-}
-module_init(vx855_init);
-
-static void vx855_exit(void)
-{
- pci_unregister_driver(&vx855_pci_driver);
-}
-module_exit(vx855_exit);
+module_pci_driver(vx855_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
index 87210954a06..6ee3018d865 100644
--- a/drivers/mfd/wm831x-auxadc.c
+++ b/drivers/mfd/wm831x-auxadc.c
@@ -280,11 +280,11 @@ void wm831x_auxadc_init(struct wm831x *wm831x)
mutex_init(&wm831x->auxadc_lock);
INIT_LIST_HEAD(&wm831x->auxadc_pending);
- if (wm831x->irq && wm831x->irq_base) {
+ if (wm831x->irq) {
wm831x->auxadc_read = wm831x_auxadc_read_irq;
- ret = request_threaded_irq(wm831x->irq_base +
- WM831X_IRQ_AUXADC_DATA,
+ ret = request_threaded_irq(wm831x_irq(wm831x,
+ WM831X_IRQ_AUXADC_DATA),
NULL, wm831x_auxadc_irq, 0,
"auxadc", wm831x);
if (ret < 0) {
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 838056c3493..946698fd2dc 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -614,8 +614,15 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg,
}
EXPORT_SYMBOL_GPL(wm831x_set_bits);
+static struct resource wm831x_io_parent = {
+ .start = 0,
+ .end = 0xffffffff,
+ .flags = IORESOURCE_IO,
+};
+
static struct resource wm831x_dcdc1_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC1_CONTROL_1,
.end = WM831X_DC1_DVS_CONTROL,
.flags = IORESOURCE_IO,
@@ -637,6 +644,7 @@ static struct resource wm831x_dcdc1_resources[] = {
static struct resource wm831x_dcdc2_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC2_CONTROL_1,
.end = WM831X_DC2_DVS_CONTROL,
.flags = IORESOURCE_IO,
@@ -657,6 +665,7 @@ static struct resource wm831x_dcdc2_resources[] = {
static struct resource wm831x_dcdc3_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC3_CONTROL_1,
.end = WM831X_DC3_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -671,6 +680,7 @@ static struct resource wm831x_dcdc3_resources[] = {
static struct resource wm831x_dcdc4_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC4_CONTROL,
.end = WM831X_DC4_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -685,6 +695,7 @@ static struct resource wm831x_dcdc4_resources[] = {
static struct resource wm8320_dcdc4_buck_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_DC4_CONTROL,
.end = WM832X_DC4_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -707,6 +718,7 @@ static struct resource wm831x_gpio_resources[] = {
static struct resource wm831x_isink1_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_CURRENT_SINK_1,
.end = WM831X_CURRENT_SINK_1,
.flags = IORESOURCE_IO,
@@ -720,6 +732,7 @@ static struct resource wm831x_isink1_resources[] = {
static struct resource wm831x_isink2_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_CURRENT_SINK_2,
.end = WM831X_CURRENT_SINK_2,
.flags = IORESOURCE_IO,
@@ -733,6 +746,7 @@ static struct resource wm831x_isink2_resources[] = {
static struct resource wm831x_ldo1_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO1_CONTROL,
.end = WM831X_LDO1_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -747,6 +761,7 @@ static struct resource wm831x_ldo1_resources[] = {
static struct resource wm831x_ldo2_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO2_CONTROL,
.end = WM831X_LDO2_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -761,6 +776,7 @@ static struct resource wm831x_ldo2_resources[] = {
static struct resource wm831x_ldo3_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO3_CONTROL,
.end = WM831X_LDO3_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -775,6 +791,7 @@ static struct resource wm831x_ldo3_resources[] = {
static struct resource wm831x_ldo4_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO4_CONTROL,
.end = WM831X_LDO4_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -789,6 +806,7 @@ static struct resource wm831x_ldo4_resources[] = {
static struct resource wm831x_ldo5_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO5_CONTROL,
.end = WM831X_LDO5_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -803,6 +821,7 @@ static struct resource wm831x_ldo5_resources[] = {
static struct resource wm831x_ldo6_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO6_CONTROL,
.end = WM831X_LDO6_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -817,6 +836,7 @@ static struct resource wm831x_ldo6_resources[] = {
static struct resource wm831x_ldo7_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO7_CONTROL,
.end = WM831X_LDO7_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -831,6 +851,7 @@ static struct resource wm831x_ldo7_resources[] = {
static struct resource wm831x_ldo8_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO8_CONTROL,
.end = WM831X_LDO8_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -845,6 +866,7 @@ static struct resource wm831x_ldo8_resources[] = {
static struct resource wm831x_ldo9_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO9_CONTROL,
.end = WM831X_LDO9_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -859,6 +881,7 @@ static struct resource wm831x_ldo9_resources[] = {
static struct resource wm831x_ldo10_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO10_CONTROL,
.end = WM831X_LDO10_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -873,6 +896,7 @@ static struct resource wm831x_ldo10_resources[] = {
static struct resource wm831x_ldo11_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_LDO11_ON_CONTROL,
.end = WM831X_LDO11_SLEEP_CONTROL,
.flags = IORESOURCE_IO,
@@ -974,6 +998,7 @@ static struct resource wm831x_rtc_resources[] = {
static struct resource wm831x_status1_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_STATUS_LED_1,
.end = WM831X_STATUS_LED_1,
.flags = IORESOURCE_IO,
@@ -982,6 +1007,7 @@ static struct resource wm831x_status1_resources[] = {
static struct resource wm831x_status2_resources[] = {
{
+ .parent = &wm831x_io_parent,
.start = WM831X_STATUS_LED_2,
.end = WM831X_STATUS_LED_2,
.flags = IORESOURCE_IO,
@@ -1787,27 +1813,27 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8310:
ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8310_devs, ARRAY_SIZE(wm8310_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
break;
case WM8311:
ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8311_devs, ARRAY_SIZE(wm8311_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
if (!pdata || !pdata->disable_touch)
mfd_add_devices(wm831x->dev, wm831x_num,
touch_devs, ARRAY_SIZE(touch_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
break;
case WM8312:
ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8312_devs, ARRAY_SIZE(wm8312_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
if (!pdata || !pdata->disable_touch)
mfd_add_devices(wm831x->dev, wm831x_num,
touch_devs, ARRAY_SIZE(touch_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
break;
case WM8320:
@@ -1816,7 +1842,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8326:
ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8320_devs, ARRAY_SIZE(wm8320_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
break;
default:
@@ -1841,7 +1867,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
if (ret & WM831X_XTAL_ENA) {
ret = mfd_add_devices(wm831x->dev, wm831x_num,
rtc_devs, ARRAY_SIZE(rtc_devs),
- NULL, wm831x->irq_base);
+ NULL, 0);
if (ret != 0) {
dev_err(wm831x->dev, "Failed to add RTC: %d\n", ret);
goto err_irq;
@@ -1854,7 +1880,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
/* Treat errors as non-critical */
ret = mfd_add_devices(wm831x->dev, wm831x_num, backlight_devs,
ARRAY_SIZE(backlight_devs), NULL,
- wm831x->irq_base);
+ 0);
if (ret < 0)
dev_err(wm831x->dev, "Failed to add backlight: %d\n",
ret);
@@ -1883,8 +1909,7 @@ void wm831x_device_exit(struct wm831x *wm831x)
{
wm831x_otp_exit(wm831x);
mfd_remove_devices(wm831x->dev);
- if (wm831x->irq_base)
- free_irq(wm831x->irq_base + WM831X_IRQ_AUXADC_DATA, wm831x);
+ free_irq(wm831x_irq(wm831x, WM831X_IRQ_AUXADC_DATA), wm831x);
wm831x_irq_exit(wm831x);
}
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index bec4d053916..804e56ec99e 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -18,6 +18,7 @@
#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
@@ -328,7 +329,7 @@ static inline int irq_data_to_status_reg(struct wm831x_irq_data *irq_data)
static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x,
int irq)
{
- return &wm831x_irqs[irq - wm831x->irq_base];
+ return &wm831x_irqs[irq];
}
static void wm831x_irq_lock(struct irq_data *data)
@@ -374,7 +375,7 @@ static void wm831x_irq_enable(struct irq_data *data)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
- data->irq);
+ data->hwirq);
wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
}
@@ -383,7 +384,7 @@ static void wm831x_irq_disable(struct irq_data *data)
{
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
- data->irq);
+ data->hwirq);
wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
}
@@ -393,7 +394,7 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
int irq;
- irq = data->irq - wm831x->irq_base;
+ irq = data->hwirq;
if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
/* Ignore internal-only IRQs */
@@ -412,22 +413,25 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
* do the update here as we can be called with the bus lock
* held.
*/
+ wm831x->gpio_level_low[irq] = false;
+ wm831x->gpio_level_high[irq] = false;
switch (type) {
case IRQ_TYPE_EDGE_BOTH:
wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_INT_MODE;
- wm831x->gpio_level[irq] = false;
break;
case IRQ_TYPE_EDGE_RISING:
wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
- wm831x->gpio_level[irq] = false;
break;
case IRQ_TYPE_EDGE_FALLING:
wm831x->gpio_update[irq] = 0x10000;
- wm831x->gpio_level[irq] = false;
break;
case IRQ_TYPE_LEVEL_HIGH:
wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
- wm831x->gpio_level[irq] = true;
+ wm831x->gpio_level_high[irq] = true;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ wm831x->gpio_update[irq] = 0x10000;
+ wm831x->gpio_level_low[irq] = true;
break;
default:
return -EINVAL;
@@ -469,9 +473,11 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
* descriptors.
*/
if (primary & WM831X_TCHPD_INT)
- handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHPD);
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ WM831X_IRQ_TCHPD));
if (primary & WM831X_TCHDATA_INT)
- handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHDATA);
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ WM831X_IRQ_TCHDATA));
primary &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) {
@@ -507,16 +513,29 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
}
if (*status & wm831x_irqs[i].mask)
- handle_nested_irq(wm831x->irq_base + i);
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ i));
/* Simulate an edge triggered IRQ by polling the input
* status. This is sucky but improves interoperability.
*/
if (primary == WM831X_GP_INT &&
- wm831x->gpio_level[i - WM831X_IRQ_GPIO_1]) {
+ wm831x->gpio_level_high[i - WM831X_IRQ_GPIO_1]) {
ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
while (ret & 1 << (i - WM831X_IRQ_GPIO_1)) {
- handle_nested_irq(wm831x->irq_base + i);
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ i));
+ ret = wm831x_reg_read(wm831x,
+ WM831X_GPIO_LEVEL);
+ }
+ }
+
+ if (primary == WM831X_GP_INT &&
+ wm831x->gpio_level_low[i - WM831X_IRQ_GPIO_1]) {
+ ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
+ while (!(ret & 1 << (i - WM831X_IRQ_GPIO_1))) {
+ handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
+ i));
ret = wm831x_reg_read(wm831x,
WM831X_GPIO_LEVEL);
}
@@ -527,10 +546,34 @@ out:
return IRQ_HANDLED;
}
+static int wm831x_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_chip_and_handler(virq, &wm831x_irq_chip, handle_edge_irq);
+ irq_set_nested_thread(virq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(virq, IRQF_VALID);
+#else
+ irq_set_noprobe(virq);
+#endif
+
+ return 0;
+}
+
+static struct irq_domain_ops wm831x_irq_domain_ops = {
+ .map = wm831x_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
int wm831x_irq_init(struct wm831x *wm831x, int irq)
{
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
- int i, cur_irq, ret;
+ struct irq_domain *domain;
+ int i, ret, irq_base;
mutex_init(&wm831x->irq_lock);
@@ -543,18 +586,33 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
}
/* Try to dynamically allocate IRQs if no base is specified */
- if (!pdata || !pdata->irq_base)
- wm831x->irq_base = -1;
+ if (pdata && pdata->irq_base) {
+ irq_base = irq_alloc_descs(pdata->irq_base, 0,
+ WM831X_NUM_IRQS, 0);
+ if (irq_base < 0) {
+ dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
+ irq_base);
+ irq_base = 0;
+ }
+ } else {
+ irq_base = 0;
+ }
+
+ if (irq_base)
+ domain = irq_domain_add_legacy(wm831x->dev->of_node,
+ ARRAY_SIZE(wm831x_irqs),
+ irq_base, 0,
+ &wm831x_irq_domain_ops,
+ wm831x);
else
- wm831x->irq_base = pdata->irq_base;
+ domain = irq_domain_add_linear(wm831x->dev->of_node,
+ ARRAY_SIZE(wm831x_irqs),
+ &wm831x_irq_domain_ops,
+ wm831x);
- wm831x->irq_base = irq_alloc_descs(wm831x->irq_base, 0,
- WM831X_NUM_IRQS, 0);
- if (wm831x->irq_base < 0) {
- dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
- wm831x->irq_base);
- wm831x->irq_base = 0;
- return 0;
+ if (!domain) {
+ dev_warn(wm831x->dev, "Failed to allocate IRQ domain\n");
+ return -EINVAL;
}
if (pdata && pdata->irq_cmos)
@@ -565,38 +623,22 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
wm831x_set_bits(wm831x, WM831X_IRQ_CONFIG,
WM831X_IRQ_OD, i);
- /* Try to flag /IRQ as a wake source; there are a number of
- * unconditional wake sources in the PMIC so this isn't
- * conditional but we don't actually care *too* much if it
- * fails.
- */
- ret = enable_irq_wake(irq);
- if (ret != 0) {
- dev_warn(wm831x->dev, "Can't enable IRQ as wake source: %d\n",
- ret);
- }
-
wm831x->irq = irq;
-
- /* Register them with genirq */
- for (cur_irq = wm831x->irq_base;
- cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base;
- cur_irq++) {
- irq_set_chip_data(cur_irq, wm831x);
- irq_set_chip_and_handler(cur_irq, &wm831x_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
- }
+ wm831x->irq_domain = domain;
if (irq) {
+ /* Try to flag /IRQ as a wake source; there are a number of
+ * unconditional wake sources in the PMIC so this isn't
+ * conditional but we don't actually care *too* much if it
+ * fails.
+ */
+ ret = enable_irq_wake(irq);
+ if (ret != 0) {
+ dev_warn(wm831x->dev,
+ "Can't enable IRQ as wake source: %d\n",
+ ret);
+ }
+
ret = request_threaded_irq(irq, NULL, wm831x_irq_thread,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"wm831x", wm831x);
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index dd1caaac55e..8a9b11ca076 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -20,6 +20,7 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/regmap.h>
#include <linux/workqueue.h>
#include <linux/mfd/wm8350/core.h>
@@ -74,7 +75,7 @@ static int wm8350_phys_read(struct wm8350 *wm8350, u8 reg, int num_regs,
int bytes = num_regs * 2;
dev_dbg(wm8350->dev, "volatile read\n");
- ret = wm8350->read_dev(wm8350, reg, bytes, (char *)dest);
+ ret = regmap_raw_read(wm8350->regmap, reg, dest, bytes);
for (i = reg; i < reg + num_regs; i++) {
/* Cache is CPU endian */
@@ -96,9 +97,6 @@ static int wm8350_read(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *dest)
int ret = 0;
int bytes = num_regs * 2;
- if (wm8350->read_dev == NULL)
- return -ENODEV;
-
if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) {
dev_err(wm8350->dev, "invalid reg %x\n",
reg + num_regs - 1);
@@ -149,9 +147,6 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
int end = reg + num_regs;
int bytes = num_regs * 2;
- if (wm8350->write_dev == NULL)
- return -ENODEV;
-
if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) {
dev_err(wm8350->dev, "invalid reg %x\n",
reg + num_regs - 1);
@@ -182,7 +177,7 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
}
/* Actually write it out */
- return wm8350->write_dev(wm8350, reg, bytes, (char *)src);
+ return regmap_raw_write(wm8350->regmap, reg, src, bytes);
}
/*
@@ -515,9 +510,8 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int type, int mode)
* a PMIC so the device many not be in a virgin state and we
* can't rely on the silicon values.
*/
- ret = wm8350->read_dev(wm8350, 0,
- sizeof(u16) * (WM8350_MAX_REGISTER + 1),
- wm8350->reg_cache);
+ ret = regmap_raw_read(wm8350->regmap, 0, wm8350->reg_cache,
+ sizeof(u16) * (WM8350_MAX_REGISTER + 1));
if (ret < 0) {
dev_err(wm8350->dev,
"failed to read initial cache values\n");
@@ -570,35 +564,30 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
struct wm8350_platform_data *pdata)
{
int ret;
- u16 id1, id2, mask_rev;
- u16 cust_id, mode, chip_rev;
+ unsigned int id1, id2, mask_rev;
+ unsigned int cust_id, mode, chip_rev;
dev_set_drvdata(wm8350->dev, wm8350);
/* get WM8350 revision and config mode */
- ret = wm8350->read_dev(wm8350, WM8350_RESET_ID, sizeof(id1), &id1);
+ ret = regmap_read(wm8350->regmap, WM8350_RESET_ID, &id1);
if (ret != 0) {
dev_err(wm8350->dev, "Failed to read ID: %d\n", ret);
goto err;
}
- ret = wm8350->read_dev(wm8350, WM8350_ID, sizeof(id2), &id2);
+ ret = regmap_read(wm8350->regmap, WM8350_ID, &id2);
if (ret != 0) {
dev_err(wm8350->dev, "Failed to read ID: %d\n", ret);
goto err;
}
- ret = wm8350->read_dev(wm8350, WM8350_REVISION, sizeof(mask_rev),
- &mask_rev);
+ ret = regmap_read(wm8350->regmap, WM8350_REVISION, &mask_rev);
if (ret != 0) {
dev_err(wm8350->dev, "Failed to read revision: %d\n", ret);
goto err;
}
- id1 = be16_to_cpu(id1);
- id2 = be16_to_cpu(id2);
- mask_rev = be16_to_cpu(mask_rev);
-
if (id1 != 0x6143) {
dev_err(wm8350->dev,
"Device with ID %x is not a WM8350\n", id1);
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index d955faaf27c..a68aceb4e48 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -15,47 +15,18 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/mfd/wm8350/core.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
-static int wm8350_i2c_read_device(struct wm8350 *wm8350, char reg,
- int bytes, void *dest)
-{
- int ret;
-
- ret = i2c_master_send(wm8350->i2c_client, &reg, 1);
- if (ret < 0)
- return ret;
- ret = i2c_master_recv(wm8350->i2c_client, dest, bytes);
- if (ret < 0)
- return ret;
- if (ret != bytes)
- return -EIO;
- return 0;
-}
-
-static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
- int bytes, void *src)
-{
- /* we add 1 byte for device register */
- u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
- int ret;
-
- if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
- return -EINVAL;
-
- msg[0] = reg;
- memcpy(&msg[1], src, bytes);
- ret = i2c_master_send(wm8350->i2c_client, msg, bytes + 1);
- if (ret < 0)
- return ret;
- if (ret != bytes + 1)
- return -EIO;
- return 0;
-}
+static const struct regmap_config wm8350_regmap = {
+ .reg_bits = 8,
+ .val_bits = 16,
+};
static int wm8350_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
@@ -67,20 +38,18 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
if (wm8350 == NULL)
return -ENOMEM;
+ wm8350->regmap = devm_regmap_init_i2c(i2c, &wm8350_regmap);
+ if (IS_ERR(wm8350->regmap)) {
+ ret = PTR_ERR(wm8350->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+
i2c_set_clientdata(i2c, wm8350);
wm8350->dev = &i2c->dev;
- wm8350->i2c_client = i2c;
- wm8350->read_dev = wm8350_i2c_read_device;
- wm8350->write_dev = wm8350_i2c_write_device;
-
- ret = wm8350_device_init(wm8350, i2c->irq, i2c->dev.platform_data);
- if (ret < 0)
- goto err;
-
- return ret;
-err:
- return ret;
+ return wm8350_device_init(wm8350, i2c->irq, i2c->dev.platform_data);
}
static int wm8350_i2c_remove(struct i2c_client *i2c)
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 1189a17f0f2..4b7d378551d 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -23,136 +23,16 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-static struct {
- u16 readable; /* Mask of readable bits */
- u16 writable; /* Mask of writable bits */
- u16 vol; /* Mask of volatile bits */
- int is_codec; /* Register controlled by codec reset */
- u16 default_val; /* Value on reset */
-} reg_data[] = {
- { 0xFFFF, 0xFFFF, 0x0000, 0, 0x6172 }, /* R0 */
- { 0x7000, 0x0000, 0x8000, 0, 0x0000 }, /* R1 */
- { 0xFF17, 0xFF17, 0x0000, 0, 0x0000 }, /* R2 */
- { 0xEBF3, 0xEBF3, 0x0000, 1, 0x6000 }, /* R3 */
- { 0x3CF3, 0x3CF3, 0x0000, 1, 0x0000 }, /* R4 */
- { 0xF1F8, 0xF1F8, 0x0000, 1, 0x4050 }, /* R5 */
- { 0xFC1F, 0xFC1F, 0x0000, 1, 0x4000 }, /* R6 */
- { 0xDFDE, 0xDFDE, 0x0000, 1, 0x01C8 }, /* R7 */
- { 0xFCFC, 0xFCFC, 0x0000, 1, 0x0000 }, /* R8 */
- { 0xEFFF, 0xEFFF, 0x0000, 1, 0x0040 }, /* R9 */
- { 0xEFFF, 0xEFFF, 0x0000, 1, 0x0040 }, /* R10 */
- { 0x27F7, 0x27F7, 0x0000, 1, 0x0004 }, /* R11 */
- { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R12 */
- { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R13 */
- { 0x1FEF, 0x1FEF, 0x0000, 1, 0x0000 }, /* R14 */
- { 0x0163, 0x0163, 0x0000, 1, 0x0100 }, /* R15 */
- { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R16 */
- { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R17 */
- { 0x1FFF, 0x0FFF, 0x0000, 1, 0x0000 }, /* R18 */
- { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1000 }, /* R19 */
- { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1010 }, /* R20 */
- { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1010 }, /* R21 */
- { 0x0FDD, 0x0FDD, 0x0000, 1, 0x8000 }, /* R22 */
- { 0x1FFF, 0x1FFF, 0x0000, 1, 0x0800 }, /* R23 */
- { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R24 */
- { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R25 */
- { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R26 */
- { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R27 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R28 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R29 */
- { 0x0000, 0x0077, 0x0000, 1, 0x0066 }, /* R30 */
- { 0x0000, 0x0033, 0x0000, 1, 0x0022 }, /* R31 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0079 }, /* R32 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0079 }, /* R33 */
- { 0x0000, 0x0003, 0x0000, 1, 0x0003 }, /* R34 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0003 }, /* R35 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R36 */
- { 0x0000, 0x003F, 0x0000, 1, 0x0100 }, /* R37 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R38 */
- { 0x0000, 0x000F, 0x0000, 0, 0x0000 }, /* R39 */
- { 0x0000, 0x00FF, 0x0000, 1, 0x0000 }, /* R40 */
- { 0x0000, 0x01B7, 0x0000, 1, 0x0000 }, /* R41 */
- { 0x0000, 0x01B7, 0x0000, 1, 0x0000 }, /* R42 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R43 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R44 */
- { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R45 */
- { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R46 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R47 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R48 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R49 */
- { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R50 */
- { 0x0000, 0x01B3, 0x0000, 1, 0x0180 }, /* R51 */
- { 0x0000, 0x0077, 0x0000, 1, 0x0000 }, /* R52 */
- { 0x0000, 0x0077, 0x0000, 1, 0x0000 }, /* R53 */
- { 0x0000, 0x00FF, 0x0000, 1, 0x0000 }, /* R54 */
- { 0x0000, 0x0001, 0x0000, 1, 0x0000 }, /* R55 */
- { 0x0000, 0x003F, 0x0000, 1, 0x0000 }, /* R56 */
- { 0x0000, 0x004F, 0x0000, 1, 0x0000 }, /* R57 */
- { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R58 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R59 */
- { 0x1FFF, 0x1FFF, 0x0000, 1, 0x0000 }, /* R60 */
- { 0xFFFF, 0xFFFF, 0x0000, 1, 0x0000 }, /* R61 */
- { 0x03FF, 0x03FF, 0x0000, 1, 0x0000 }, /* R62 */
- { 0x007F, 0x007F, 0x0000, 1, 0x0000 }, /* R63 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R64 */
- { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R65 */
- { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R66 */
- { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R67 */
- { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R68 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R69 */
- { 0xFFFF, 0xFFFF, 0x0000, 0, 0x4400 }, /* R70 */
- { 0x23FF, 0x23FF, 0x0000, 0, 0x0000 }, /* R71 */
- { 0xFFFF, 0xFFFF, 0x0000, 0, 0x4400 }, /* R72 */
- { 0x23FF, 0x23FF, 0x0000, 0, 0x0000 }, /* R73 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R74 */
- { 0x000E, 0x000E, 0x0000, 0, 0x0008 }, /* R75 */
- { 0xE00F, 0xE00F, 0x0000, 0, 0x0000 }, /* R76 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R77 */
- { 0x03C0, 0x03C0, 0x0000, 0, 0x02C0 }, /* R78 */
- { 0xFFFF, 0x0000, 0xffff, 0, 0x0000 }, /* R79 */
- { 0xFFFF, 0xFFFF, 0x0000, 0, 0x0000 }, /* R80 */
- { 0xFFFF, 0x0000, 0xffff, 0, 0x0000 }, /* R81 */
- { 0x2BFF, 0x0000, 0xffff, 0, 0x0000 }, /* R82 */
- { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R83 */
- { 0x80FF, 0x80FF, 0x0000, 0, 0x00ff }, /* R84 */
-};
-
-static int wm8400_read(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *dest)
+static bool wm8400_volatile(struct device *dev, unsigned int reg)
{
- int i, ret = 0;
-
- BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
-
- /* If there are any volatile reads then read back the entire block */
- for (i = reg; i < reg + num_regs; i++)
- if (reg_data[i].vol) {
- ret = regmap_bulk_read(wm8400->regmap, reg, dest,
- num_regs);
- return ret;
- }
-
- /* Otherwise use the cache */
- memcpy(dest, &wm8400->reg_cache[reg], num_regs * sizeof(u16));
-
- return 0;
-}
-
-static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
- u16 *src)
-{
- int ret, i;
-
- BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
-
- for (i = 0; i < num_regs; i++) {
- BUG_ON(!reg_data[reg + i].writable);
- wm8400->reg_cache[reg + i] = src[i];
- ret = regmap_write(wm8400->regmap, reg, src[i]);
- if (ret != 0)
- return ret;
+ switch (reg) {
+ case WM8400_INTERRUPT_STATUS_1:
+ case WM8400_INTERRUPT_LEVELS:
+ case WM8400_SHUTDOWN_REASON:
+ return true;
+ default:
+ return false;
}
-
- return 0;
}
/**
@@ -165,13 +45,12 @@ static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
*/
u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg)
{
- u16 val;
-
- mutex_lock(&wm8400->io_lock);
-
- wm8400_read(wm8400, reg, 1, &val);
+ unsigned int val;
+ int ret;
- mutex_unlock(&wm8400->io_lock);
+ ret = regmap_read(wm8400->regmap, reg, &val);
+ if (ret < 0)
+ return ret;
return val;
}
@@ -179,63 +58,10 @@ EXPORT_SYMBOL_GPL(wm8400_reg_read);
int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data)
{
- int ret;
-
- mutex_lock(&wm8400->io_lock);
-
- ret = wm8400_read(wm8400, reg, count, data);
-
- mutex_unlock(&wm8400->io_lock);
-
- return ret;
+ return regmap_bulk_read(wm8400->regmap, reg, data, count);
}
EXPORT_SYMBOL_GPL(wm8400_block_read);
-/**
- * wm8400_set_bits - Bitmask write
- *
- * @wm8400: Pointer to wm8400 control structure
- * @reg: Register to access
- * @mask: Mask of bits to change
- * @val: Value to set for masked bits
- */
-int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, u16 mask, u16 val)
-{
- u16 tmp;
- int ret;
-
- mutex_lock(&wm8400->io_lock);
-
- ret = wm8400_read(wm8400, reg, 1, &tmp);
- tmp = (tmp & ~mask) | val;
- if (ret == 0)
- ret = wm8400_write(wm8400, reg, 1, &tmp);
-
- mutex_unlock(&wm8400->io_lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(wm8400_set_bits);
-
-/**
- * wm8400_reset_codec_reg_cache - Reset cached codec registers to
- * their default values.
- */
-void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
-{
- int i;
-
- mutex_lock(&wm8400->io_lock);
-
- /* Reset all codec registers to their initial value */
- for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
- if (reg_data[i].is_codec)
- wm8400->reg_cache[i] = reg_data[i].default_val;
-
- mutex_unlock(&wm8400->io_lock);
-}
-EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache);
-
static int wm8400_register_codec(struct wm8400 *wm8400)
{
struct mfd_cell cell = {
@@ -257,44 +83,24 @@ static int wm8400_register_codec(struct wm8400 *wm8400)
static int wm8400_init(struct wm8400 *wm8400,
struct wm8400_platform_data *pdata)
{
- u16 reg;
- int ret, i;
-
- mutex_init(&wm8400->io_lock);
+ unsigned int reg;
+ int ret;
dev_set_drvdata(wm8400->dev, wm8400);
/* Check that this is actually a WM8400 */
- ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, &i);
+ ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, &reg);
if (ret != 0) {
dev_err(wm8400->dev, "Chip ID register read failed\n");
return -EIO;
}
- if (i != reg_data[WM8400_RESET_ID].default_val) {
- dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n", i);
+ if (reg != 0x6172) {
+ dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n",
+ reg);
return -ENODEV;
}
- /* We don't know what state the hardware is in and since this
- * is a PMIC we can't reset it safely so initialise the register
- * cache from the hardware.
- */
- ret = regmap_raw_read(wm8400->regmap, 0, wm8400->reg_cache,
- ARRAY_SIZE(wm8400->reg_cache));
- if (ret != 0) {
- dev_err(wm8400->dev, "Register cache read failed\n");
- return -EIO;
- }
- for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
- wm8400->reg_cache[i] = be16_to_cpu(wm8400->reg_cache[i]);
-
- /* If the codec is in reset use hard coded values */
- if (!(wm8400->reg_cache[WM8400_POWER_MANAGEMENT_1] & WM8400_CODEC_ENA))
- for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
- if (reg_data[i].is_codec)
- wm8400->reg_cache[i] = reg_data[i].default_val;
-
- ret = wm8400_read(wm8400, WM8400_ID, 1, &reg);
+ ret = regmap_read(wm8400->regmap, WM8400_ID, &reg);
if (ret != 0) {
dev_err(wm8400->dev, "ID register read failed: %d\n", ret);
return ret;
@@ -334,8 +140,22 @@ static const struct regmap_config wm8400_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.max_register = WM8400_REGISTER_COUNT - 1,
+
+ .volatile_reg = wm8400_volatile,
+
+ .cache_type = REGCACHE_RBTREE,
};
+/**
+ * wm8400_reset_codec_reg_cache - Reset cached codec registers to
+ * their default values.
+ */
+void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
+{
+ regmap_reinit_cache(wm8400->regmap, &wm8400_regmap_config);
+}
+EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache);
+
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static int wm8400_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 9d7ca1e978f..1e321d34977 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -500,7 +500,8 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
ret);
goto err_enable;
}
- wm8994->revision = ret;
+ wm8994->revision = ret & WM8994_CHIP_REV_MASK;
+ wm8994->cust_id = (ret & WM8994_CUST_ID_MASK) >> WM8994_CUST_ID_SHIFT;
switch (wm8994->type) {
case WM8994:
@@ -553,8 +554,8 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
break;
}
- dev_info(wm8994->dev, "%s revision %c\n", devname,
- 'A' + wm8994->revision);
+ dev_info(wm8994->dev, "%s revision %c CUST_ID %02x\n", devname,
+ 'A' + wm8994->revision, wm8994->cust_id);
switch (wm8994->type) {
case WM1811:
@@ -732,23 +733,7 @@ static struct i2c_driver wm8994_i2c_driver = {
.id_table = wm8994_i2c_id,
};
-static int __init wm8994_i2c_init(void)
-{
- int ret;
-
- ret = i2c_add_driver(&wm8994_i2c_driver);
- if (ret != 0)
- pr_err("Failed to register wm8994 I2C driver: %d\n", ret);
-
- return ret;
-}
-module_init(wm8994_i2c_init);
-
-static void __exit wm8994_i2c_exit(void)
-{
- i2c_del_driver(&wm8994_i2c_driver);
-}
-module_exit(wm8994_i2c_exit);
+module_i2c_driver(wm8994_i2c_driver);
MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
index bfd25af6ecb..52e9e294494 100644
--- a/drivers/mfd/wm8994-regmap.c
+++ b/drivers/mfd/wm8994-regmap.c
@@ -1122,7 +1122,6 @@ static bool wm8994_volatile_register(struct device *dev, unsigned int reg)
case WM8994_RATE_STATUS:
case WM8958_MIC_DETECT_3:
case WM8994_DC_SERVO_4E:
- case WM8994_CHIP_REVISION:
case WM8994_INTERRUPT_STATUS_1:
case WM8994_INTERRUPT_STATUS_2:
return true;
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index d7a9aa14e5d..042a8fe4efa 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -142,10 +142,16 @@ static int __devexit ab8500_pwm_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ab8500_pwm_match[] = {
+ { .compatible = "stericsson,ab8500-pwm", },
+ {}
+};
+
static struct platform_driver ab8500_pwm_driver = {
.driver = {
.name = "ab8500-pwm",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_pwm_match,
},
.probe = ab8500_pwm_probe,
.remove = __devexit_p(ab8500_pwm_remove),
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index dabec556ebb..dd2d374dcc7 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -384,7 +384,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
md = mmc_blk_get(bdev->bd_disk);
if (!md) {
err = -EINVAL;
- goto cmd_done;
+ goto cmd_err;
}
card = md->queue.card;
@@ -483,6 +483,7 @@ cmd_rel_host:
cmd_done:
mmc_blk_put(md);
+cmd_err:
kfree(idata->buf);
kfree(idata);
return err;
@@ -1283,7 +1284,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
int ret = 1, disable_multi = 0, retry = 0, type;
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
- struct request *req;
+ struct request *req = rqc;
struct mmc_async_req *areq;
if (!rqc && !mq->mqrq_prev->req)
@@ -1291,6 +1292,16 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
do {
if (rqc) {
+ /*
+ * When 4KB native sector is enabled, only 8 blocks
+ * multiple read or write is allowed
+ */
+ if ((brq->data.blocks & 0x07) &&
+ (card->ext_csd.data_sector_size == 4096)) {
+ pr_err("%s: Transfer size is not 4KB sector size aligned\n",
+ req->rq_disk->disk_name);
+ goto cmd_abort;
+ }
mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
areq = &mq->mqrq_cur->mmc_active;
} else
@@ -1538,7 +1549,12 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
"mmcblk%d%s", md->name_idx, subname ? subname : "");
- blk_queue_logical_block_size(md->queue.queue, 512);
+ if (mmc_card_mmc(card))
+ blk_queue_logical_block_size(md->queue.queue,
+ card->ext_csd.data_sector_size);
+ else
+ blk_queue_logical_block_size(md->queue.queue, 512);
+
set_capacity(md->disk, size);
if (mmc_host_cmd23(card->host)) {
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 996f8e36e23..e360a979857 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -96,7 +96,7 @@ static int mmc_queue_thread(void *d)
* on any queue on this host, and attempt to issue it. This may
* not be the queue we were asked to process.
*/
-static void mmc_request(struct request_queue *q)
+static void mmc_request_fn(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
@@ -171,12 +171,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
limit = *mmc_dev(host)->dma_mask;
mq->card = card;
- mq->queue = blk_init_queue(mmc_request, lock);
+ mq->queue = blk_init_queue(mmc_request_fn, lock);
if (!mq->queue)
return -ENOMEM;
- memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
- memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
mq->mqrq_cur = mqrq_cur;
mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index c60cee92a2b..9b68933f27e 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -122,6 +122,7 @@ static int mmc_bus_remove(struct device *dev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int mmc_bus_suspend(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
@@ -143,6 +144,7 @@ static int mmc_bus_resume(struct device *dev)
ret = drv->resume(card);
return ret;
}
+#endif
#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c
index 2c14be73254..f13e38decea 100644
--- a/drivers/mmc/core/cd-gpio.c
+++ b/drivers/mmc/core/cd-gpio.c
@@ -73,6 +73,9 @@ void mmc_cd_gpio_free(struct mmc_host *host)
{
struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
+ if (!cd)
+ return;
+
free_irq(host->hotplug.irq, host);
gpio_free(cd->gpio);
kfree(cd);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index ba821fe70bc..0b6141d29db 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -42,6 +42,7 @@
#include "sdio_ops.h"
static struct workqueue_struct *workqueue;
+static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
/*
* Enabling software CRCs on the data blocks can be a significant (30%)
@@ -1157,6 +1158,9 @@ static void mmc_power_up(struct mmc_host *host)
{
int bit;
+ if (host->ios.power_mode == MMC_POWER_ON)
+ return;
+
mmc_host_clk_hold(host);
/* If ocr is set, we use it */
@@ -1199,6 +1203,10 @@ static void mmc_power_up(struct mmc_host *host)
void mmc_power_off(struct mmc_host *host)
{
int err = 0;
+
+ if (host->ios.power_mode == MMC_POWER_OFF)
+ return;
+
mmc_host_clk_hold(host);
host->ios.clock = 0;
@@ -2005,7 +2013,6 @@ EXPORT_SYMBOL(mmc_detect_card_removed);
void mmc_rescan(struct work_struct *work)
{
- static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
int i;
@@ -2044,8 +2051,12 @@ void mmc_rescan(struct work_struct *work)
*/
mmc_bus_put(host);
- if (host->ops->get_cd && host->ops->get_cd(host) == 0)
+ if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
+ mmc_claim_host(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
goto out;
+ }
mmc_claim_host(host);
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
@@ -2063,7 +2074,8 @@ void mmc_rescan(struct work_struct *work)
void mmc_start_host(struct mmc_host *host)
{
- mmc_power_off(host);
+ host->f_init = max(freqs[0], host->f_min);
+ mmc_power_up(host);
mmc_detect_change(host, 0);
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 54df5adc041..2d4a4b74675 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -235,6 +235,36 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
return err;
}
+static void mmc_select_card_type(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK;
+ unsigned int caps = host->caps, caps2 = host->caps2;
+ unsigned int hs_max_dtr = 0;
+
+ if (card_type & EXT_CSD_CARD_TYPE_26)
+ hs_max_dtr = MMC_HIGH_26_MAX_DTR;
+
+ if (caps & MMC_CAP_MMC_HIGHSPEED &&
+ card_type & EXT_CSD_CARD_TYPE_52)
+ hs_max_dtr = MMC_HIGH_52_MAX_DTR;
+
+ if ((caps & MMC_CAP_1_8V_DDR &&
+ card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) ||
+ (caps & MMC_CAP_1_2V_DDR &&
+ card_type & EXT_CSD_CARD_TYPE_DDR_1_2V))
+ hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
+
+ if ((caps2 & MMC_CAP2_HS200_1_8V_SDR &&
+ card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) ||
+ (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
+ card_type & EXT_CSD_CARD_TYPE_SDR_1_2V))
+ hs_max_dtr = MMC_HS200_MAX_DTR;
+
+ card->ext_csd.hs_max_dtr = hs_max_dtr;
+ card->ext_csd.card_type = card_type;
+}
+
/*
* Decode extended CSD.
*/
@@ -284,56 +314,9 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
mmc_card_set_blockaddr(card);
}
+
card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
- switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
- case EXT_CSD_CARD_TYPE_SDR_ALL:
- case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V:
- case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V:
- case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52:
- card->ext_csd.hs_max_dtr = 200000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200;
- break;
- case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL:
- case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V:
- case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V:
- case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52:
- card->ext_csd.hs_max_dtr = 200000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V;
- break;
- case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL:
- case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V:
- case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V:
- case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52:
- card->ext_csd.hs_max_dtr = 200000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V;
- break;
- case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
- EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 52000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
- break;
- case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
- EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 52000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
- break;
- case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
- EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 52000000;
- card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
- break;
- case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 52000000;
- break;
- case EXT_CSD_CARD_TYPE_26:
- card->ext_csd.hs_max_dtr = 26000000;
- break;
- default:
- /* MMC v4 spec says this cannot happen */
- pr_warning("%s: card is mmc v4 but doesn't "
- "support any high-speed modes.\n",
- mmc_hostname(card->host));
- }
+ mmc_select_card_type(card);
card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.raw_erase_timeout_mult =
@@ -533,6 +516,8 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
} else {
card->ext_csd.data_tag_unit_size = 0;
}
+ } else {
+ card->ext_csd.data_sector_size = 512;
}
out:
@@ -556,14 +541,10 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
err = mmc_get_ext_csd(card, &bw_ext_csd);
if (err || bw_ext_csd == NULL) {
- if (bus_width != MMC_BUS_WIDTH_1)
- err = -EINVAL;
+ err = -EINVAL;
goto out;
}
- if (bus_width == MMC_BUS_WIDTH_1)
- goto out;
-
/* only compare read only fields */
err = !((card->ext_csd.raw_partition_support ==
bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
@@ -736,6 +717,10 @@ static int mmc_select_powerclass(struct mmc_card *card,
card->ext_csd.generic_cmd6_time);
}
+ if (err)
+ pr_err("%s: power class selection for ext_csd_bus_width %d"
+ " failed\n", mmc_hostname(card->host), bus_width);
+
return err;
}
@@ -745,7 +730,7 @@ static int mmc_select_powerclass(struct mmc_card *card,
*/
static int mmc_select_hs200(struct mmc_card *card)
{
- int idx, err = 0;
+ int idx, err = -EINVAL;
struct mmc_host *host;
static unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_4,
@@ -761,10 +746,12 @@ static int mmc_select_hs200(struct mmc_card *card)
host = card->host;
if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
- host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
- if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0))
- err = mmc_set_signal_voltage(host,
- MMC_SIGNAL_VOLTAGE_180, 0);
+ host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0);
+
+ if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
+ host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0);
/* If fails try again during next card power cycle */
if (err)
@@ -1117,9 +1104,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
if (err)
- pr_warning("%s: power class selection to bus width %d"
- " failed\n", mmc_hostname(card->host),
- 1 << bus_width);
+ goto err;
}
/*
@@ -1151,10 +1136,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
ext_csd);
if (err)
- pr_warning("%s: power class selection to "
- "bus width %d failed\n",
- mmc_hostname(card->host),
- 1 << bus_width);
+ goto err;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
@@ -1182,10 +1164,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
ext_csd);
if (err)
- pr_warning("%s: power class selection to "
- "bus width %d ddr %d failed\n",
- mmc_hostname(card->host),
- 1 << bus_width, ddr);
+ goto err;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2c7c83f832d..13d0e95380a 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -947,7 +947,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
}
if (!err && host->sdio_irqs)
- mmc_signal_sdio_irq(host);
+ wake_up_process(host->sdio_irq_thread);
mmc_release_host(host);
/*
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index f573e7f9f74..3d8ceb4084d 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -28,18 +28,20 @@
#include "sdio_ops.h"
-static int process_sdio_pending_irqs(struct mmc_card *card)
+static int process_sdio_pending_irqs(struct mmc_host *host)
{
+ struct mmc_card *card = host->card;
int i, ret, count;
unsigned char pending;
struct sdio_func *func;
/*
* Optimization, if there is only 1 function interrupt registered
- * call irq handler directly
+ * and we know an IRQ was signaled then call irq handler directly.
+ * Otherwise do the full probe.
*/
func = card->sdio_single_irq;
- if (func) {
+ if (func && host->sdio_irq_pending) {
func->irq_handler(func);
return 1;
}
@@ -116,7 +118,8 @@ static int sdio_irq_thread(void *_host)
ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
if (ret)
break;
- ret = process_sdio_pending_irqs(host->card);
+ ret = process_sdio_pending_irqs(host);
+ host->sdio_irq_pending = false;
mmc_release_host(host);
/*
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2bc06e7344d..aa131b32e3b 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -278,10 +278,13 @@ choice
Choose which driver to use for the Atmel MCI Silicon
config MMC_AT91
- tristate "AT91 SD/MMC Card Interface support"
+ tristate "AT91 SD/MMC Card Interface support (DEPRECATED)"
depends on ARCH_AT91
help
- This selects the AT91 MCI controller.
+ This selects the AT91 MCI controller. This driver will
+ be removed soon (for more information have a look to
+ Documentation/feature-removal-schedule.txt). Please use
+ MMC_ATMEL_MCI.
If unsure, say N.
@@ -307,16 +310,6 @@ config MMC_ATMELMCI_DMA
If unsure, say N.
-config MMC_IMX
- tristate "Motorola i.MX Multimedia Card Interface support"
- depends on ARCH_MX1
- help
- This selects the Motorola i.MX Multimedia card Interface.
- If you have a i.MX platform with a Multimedia Card slot,
- say Y or M here.
-
- If unsure, say N.
-
config MMC_MSM
tristate "Qualcomm SDCC Controller Support"
depends on MMC && ARCH_MSM
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 3e7e26d0807..8922b06be92 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -4,7 +4,6 @@
obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
obj-$(CONFIG_MMC_PXA) += pxamci.o
-obj-$(CONFIG_MMC_IMX) += imxmmc.o
obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index e94476beca1..420aca642b1 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -45,19 +45,19 @@
#define ATMCI_DMA_THRESHOLD 16
enum {
- EVENT_CMD_COMPLETE = 0,
+ EVENT_CMD_RDY = 0,
EVENT_XFER_COMPLETE,
- EVENT_DATA_COMPLETE,
+ EVENT_NOTBUSY,
EVENT_DATA_ERROR,
};
enum atmel_mci_state {
STATE_IDLE = 0,
STATE_SENDING_CMD,
- STATE_SENDING_DATA,
- STATE_DATA_BUSY,
+ STATE_DATA_XFER,
+ STATE_WAITING_NOTBUSY,
STATE_SENDING_STOP,
- STATE_DATA_ERROR,
+ STATE_END_REQUEST,
};
enum atmci_xfer_dir {
@@ -78,6 +78,9 @@ struct atmel_mci_caps {
bool has_highspeed;
bool has_rwproof;
bool has_odd_clk_div;
+ bool has_bad_data_ordering;
+ bool need_reset_after_xfer;
+ bool need_blksz_mul_4;
};
struct atmel_mci_dma {
@@ -91,6 +94,11 @@ struct atmel_mci_dma {
* @regs: Pointer to MMIO registers.
* @sg: Scatterlist entry currently being processed by PIO or PDC code.
* @pio_offset: Offset into the current scatterlist entry.
+ * @buffer: Buffer used if we don't have the r/w proof capability. We
+ * don't have the time to switch pdc buffers so we have to use only
+ * one buffer for the full transaction.
+ * @buf_size: size of the buffer.
+ * @phys_buf_addr: buffer address needed for pdc.
* @cur_slot: The slot which is currently using the controller.
* @mrq: The request currently being processed on @cur_slot,
* or NULL if the controller is idle.
@@ -116,6 +124,7 @@ struct atmel_mci_dma {
* @queue: List of slots waiting for access to the controller.
* @need_clock_update: Update the clock rate before the next request.
* @need_reset: Reset controller before next request.
+ * @timer: Timer to balance the data timeout error flag which cannot rise.
* @mode_reg: Value of the MR register.
* @cfg_reg: Value of the CFG register.
* @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
@@ -166,6 +175,9 @@ struct atmel_mci {
struct scatterlist *sg;
unsigned int pio_offset;
+ unsigned int *buffer;
+ unsigned int buf_size;
+ dma_addr_t buf_phys_addr;
struct atmel_mci_slot *cur_slot;
struct mmc_request *mrq;
@@ -189,6 +201,7 @@ struct atmel_mci {
bool need_clock_update;
bool need_reset;
+ struct timer_list timer;
u32 mode_reg;
u32 cfg_reg;
unsigned long bus_hz;
@@ -480,6 +493,32 @@ err:
dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
}
+static inline unsigned int atmci_get_version(struct atmel_mci *host)
+{
+ return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
+}
+
+static void atmci_timeout_timer(unsigned long data)
+{
+ struct atmel_mci *host;
+
+ host = (struct atmel_mci *)data;
+
+ dev_dbg(&host->pdev->dev, "software timeout\n");
+
+ if (host->mrq->cmd->data) {
+ host->mrq->cmd->data->error = -ETIMEDOUT;
+ host->data = NULL;
+ } else {
+ host->mrq->cmd->error = -ETIMEDOUT;
+ host->cmd = NULL;
+ }
+ host->need_reset = 1;
+ host->state = STATE_END_REQUEST;
+ smp_wmb();
+ tasklet_schedule(&host->tasklet);
+}
+
static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
unsigned int ns)
{
@@ -591,6 +630,7 @@ static void atmci_send_command(struct atmel_mci *host,
static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
{
+ dev_dbg(&host->pdev->dev, "send stop command\n");
atmci_send_command(host, data->stop, host->stop_cmdr);
atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
}
@@ -603,6 +643,7 @@ static void atmci_pdc_set_single_buf(struct atmel_mci *host,
enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
{
u32 pointer_reg, counter_reg;
+ unsigned int buf_size;
if (dir == XFER_RECEIVE) {
pointer_reg = ATMEL_PDC_RPR;
@@ -617,8 +658,15 @@ static void atmci_pdc_set_single_buf(struct atmel_mci *host,
counter_reg += ATMEL_PDC_SCND_BUF_OFF;
}
- atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
- if (host->data_size <= sg_dma_len(host->sg)) {
+ if (!host->caps.has_rwproof) {
+ buf_size = host->buf_size;
+ atmci_writel(host, pointer_reg, host->buf_phys_addr);
+ } else {
+ buf_size = sg_dma_len(host->sg);
+ atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
+ }
+
+ if (host->data_size <= buf_size) {
if (host->data_size & 0x3) {
/* If size is different from modulo 4, transfer bytes */
atmci_writel(host, counter_reg, host->data_size);
@@ -670,7 +718,20 @@ static void atmci_pdc_cleanup(struct atmel_mci *host)
*/
static void atmci_pdc_complete(struct atmel_mci *host)
{
+ int transfer_size = host->data->blocks * host->data->blksz;
+ int i;
+
atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
+
+ if ((!host->caps.has_rwproof)
+ && (host->data->flags & MMC_DATA_READ)) {
+ if (host->caps.has_bad_data_ordering)
+ for (i = 0; i < transfer_size; i++)
+ host->buffer[i] = swab32(host->buffer[i]);
+ sg_copy_from_buffer(host->data->sg, host->data->sg_len,
+ host->buffer, transfer_size);
+ }
+
atmci_pdc_cleanup(host);
/*
@@ -678,9 +739,10 @@ static void atmci_pdc_complete(struct atmel_mci *host)
* to send the stop command or waiting for NBUSY in this case.
*/
if (host->data) {
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
tasklet_schedule(&host->tasklet);
- atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
}
@@ -716,6 +778,8 @@ static void atmci_dma_complete(void *arg)
* to send the stop command or waiting for NBUSY in this case.
*/
if (data) {
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
tasklet_schedule(&host->tasklet);
@@ -791,6 +855,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
u32 iflags, tmp;
unsigned int sg_len;
enum dma_data_direction dir;
+ int i;
data->error = -EINPROGRESS;
@@ -806,7 +871,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
} else {
dir = DMA_TO_DEVICE;
- iflags |= ATMCI_ENDTX | ATMCI_TXBUFE;
+ iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
}
/* Set BLKLEN */
@@ -818,6 +883,16 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
/* Configure PDC */
host->data_size = data->blocks * data->blksz;
sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
+
+ if ((!host->caps.has_rwproof)
+ && (host->data->flags & MMC_DATA_WRITE)) {
+ sg_copy_to_buffer(host->data->sg, host->data->sg_len,
+ host->buffer, host->data_size);
+ if (host->caps.has_bad_data_ordering)
+ for (i = 0; i < host->data_size; i++)
+ host->buffer[i] = swab32(host->buffer[i]);
+ }
+
if (host->data_size)
atmci_pdc_set_both_buf(host,
((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
@@ -931,6 +1006,8 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
static void atmci_stop_transfer(struct atmel_mci *host)
{
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
@@ -940,8 +1017,7 @@ static void atmci_stop_transfer(struct atmel_mci *host)
*/
static void atmci_stop_transfer_pdc(struct atmel_mci *host)
{
- atmci_set_pending(host, EVENT_XFER_COMPLETE);
- atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
}
static void atmci_stop_transfer_dma(struct atmel_mci *host)
@@ -953,6 +1029,8 @@ static void atmci_stop_transfer_dma(struct atmel_mci *host)
atmci_dma_cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
+ dev_dbg(&host->pdev->dev,
+ "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
@@ -977,9 +1055,12 @@ static void atmci_start_request(struct atmel_mci *host,
host->pending_events = 0;
host->completed_events = 0;
+ host->cmd_status = 0;
host->data_status = 0;
- if (host->need_reset) {
+ dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
+
+ if (host->need_reset || host->caps.need_reset_after_xfer) {
iflags = atmci_readl(host, ATMCI_IMR);
iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
@@ -994,7 +1075,7 @@ static void atmci_start_request(struct atmel_mci *host,
iflags = atmci_readl(host, ATMCI_IMR);
if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
- dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
+ dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
iflags);
if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
@@ -1043,6 +1124,8 @@ static void atmci_start_request(struct atmel_mci *host,
* prepared yet.)
*/
atmci_writel(host, ATMCI_IER, iflags);
+
+ mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
}
static void atmci_queue_request(struct atmel_mci *host,
@@ -1057,6 +1140,7 @@ static void atmci_queue_request(struct atmel_mci *host,
host->state = STATE_SENDING_CMD;
atmci_start_request(host, slot);
} else {
+ dev_dbg(&host->pdev->dev, "queue request\n");
list_add_tail(&slot->queue_node, &host->queue);
}
spin_unlock_bh(&host->lock);
@@ -1069,6 +1153,7 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct mmc_data *data;
WARN_ON(slot->mrq);
+ dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
/*
* We may "know" the card is gone even though there's still an
@@ -1308,6 +1393,8 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
host->state = STATE_IDLE;
}
+ del_timer(&host->timer);
+
spin_unlock(&host->lock);
mmc_request_done(prev_mmc, mrq);
spin_lock(&host->lock);
@@ -1330,21 +1417,13 @@ static void atmci_command_complete(struct atmel_mci *host,
cmd->error = -EILSEQ;
else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
cmd->error = -EIO;
- else
- cmd->error = 0;
-
- if (cmd->error) {
- dev_dbg(&host->pdev->dev,
- "command error: status=0x%08x\n", status);
-
- if (cmd->data) {
- host->stop_transfer(host);
- host->data = NULL;
- atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY
- | ATMCI_TXRDY | ATMCI_RXRDY
- | ATMCI_DATA_ERROR_FLAGS);
+ else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
+ if (host->caps.need_blksz_mul_4) {
+ cmd->error = -EINVAL;
+ host->need_reset = 1;
}
- }
+ } else
+ cmd->error = 0;
}
static void atmci_detect_change(unsigned long data)
@@ -1407,23 +1486,21 @@ static void atmci_detect_change(unsigned long data)
break;
case STATE_SENDING_CMD:
mrq->cmd->error = -ENOMEDIUM;
- if (!mrq->data)
- break;
- /* fall through */
- case STATE_SENDING_DATA:
+ if (mrq->data)
+ host->stop_transfer(host);
+ break;
+ case STATE_DATA_XFER:
mrq->data->error = -ENOMEDIUM;
host->stop_transfer(host);
break;
- case STATE_DATA_BUSY:
- case STATE_DATA_ERROR:
- if (mrq->data->error == -EINPROGRESS)
- mrq->data->error = -ENOMEDIUM;
- if (!mrq->stop)
- break;
- /* fall through */
+ case STATE_WAITING_NOTBUSY:
+ mrq->data->error = -ENOMEDIUM;
+ break;
case STATE_SENDING_STOP:
mrq->stop->error = -ENOMEDIUM;
break;
+ case STATE_END_REQUEST:
+ break;
}
atmci_request_end(host, mrq);
@@ -1451,7 +1528,6 @@ static void atmci_tasklet_func(unsigned long priv)
struct atmel_mci *host = (struct atmel_mci *)priv;
struct mmc_request *mrq = host->mrq;
struct mmc_data *data = host->data;
- struct mmc_command *cmd = host->cmd;
enum atmel_mci_state state = host->state;
enum atmel_mci_state prev_state;
u32 status;
@@ -1467,107 +1543,186 @@ static void atmci_tasklet_func(unsigned long priv)
do {
prev_state = state;
+ dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
switch (state) {
case STATE_IDLE:
break;
case STATE_SENDING_CMD:
+ /*
+ * Command has been sent, we are waiting for command
+ * ready. Then we have three next states possible:
+ * END_REQUEST by default, WAITING_NOTBUSY if it's a
+ * command needing it or DATA_XFER if there is data.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
if (!atmci_test_and_clear_pending(host,
- EVENT_CMD_COMPLETE))
+ EVENT_CMD_RDY))
break;
+ dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
host->cmd = NULL;
- atmci_set_completed(host, EVENT_CMD_COMPLETE);
+ atmci_set_completed(host, EVENT_CMD_RDY);
atmci_command_complete(host, mrq->cmd);
- if (!mrq->data || cmd->error) {
- atmci_request_end(host, host->mrq);
- goto unlock;
- }
+ if (mrq->data) {
+ dev_dbg(&host->pdev->dev,
+ "command with data transfer");
+ /*
+ * If there is a command error don't start
+ * data transfer.
+ */
+ if (mrq->cmd->error) {
+ host->stop_transfer(host);
+ host->data = NULL;
+ atmci_writel(host, ATMCI_IDR,
+ ATMCI_TXRDY | ATMCI_RXRDY
+ | ATMCI_DATA_ERROR_FLAGS);
+ state = STATE_END_REQUEST;
+ } else
+ state = STATE_DATA_XFER;
+ } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
+ dev_dbg(&host->pdev->dev,
+ "command response need waiting notbusy");
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ state = STATE_WAITING_NOTBUSY;
+ } else
+ state = STATE_END_REQUEST;
- prev_state = state = STATE_SENDING_DATA;
- /* fall through */
+ break;
- case STATE_SENDING_DATA:
+ case STATE_DATA_XFER:
if (atmci_test_and_clear_pending(host,
EVENT_DATA_ERROR)) {
- host->stop_transfer(host);
- if (data->stop)
- atmci_send_stop_cmd(host, data);
- state = STATE_DATA_ERROR;
+ dev_dbg(&host->pdev->dev, "set completed data error\n");
+ atmci_set_completed(host, EVENT_DATA_ERROR);
+ state = STATE_END_REQUEST;
break;
}
+ /*
+ * A data transfer is in progress. The event expected
+ * to move to the next state depends of data transfer
+ * type (PDC or DMA). Once transfer done we can move
+ * to the next step which is WAITING_NOTBUSY in write
+ * case and directly SENDING_STOP in read case.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
if (!atmci_test_and_clear_pending(host,
EVENT_XFER_COMPLETE))
break;
+ dev_dbg(&host->pdev->dev,
+ "(%s) set completed xfer complete\n",
+ __func__);
atmci_set_completed(host, EVENT_XFER_COMPLETE);
- prev_state = state = STATE_DATA_BUSY;
- /* fall through */
- case STATE_DATA_BUSY:
- if (!atmci_test_and_clear_pending(host,
- EVENT_DATA_COMPLETE))
- break;
-
- host->data = NULL;
- atmci_set_completed(host, EVENT_DATA_COMPLETE);
- status = host->data_status;
- if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) {
- if (status & ATMCI_DTOE) {
- dev_dbg(&host->pdev->dev,
- "data timeout error\n");
- data->error = -ETIMEDOUT;
- } else if (status & ATMCI_DCRCE) {
- dev_dbg(&host->pdev->dev,
- "data CRC error\n");
- data->error = -EILSEQ;
- } else {
- dev_dbg(&host->pdev->dev,
- "data FIFO error (status=%08x)\n",
- status);
- data->error = -EIO;
- }
+ if (host->data->flags & MMC_DATA_WRITE) {
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ state = STATE_WAITING_NOTBUSY;
+ } else if (host->mrq->stop) {
+ atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
+ atmci_send_stop_cmd(host, data);
+ state = STATE_SENDING_STOP;
} else {
+ host->data = NULL;
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
- atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS);
+ state = STATE_END_REQUEST;
}
+ break;
- if (!data->stop) {
- atmci_request_end(host, host->mrq);
- goto unlock;
- }
+ case STATE_WAITING_NOTBUSY:
+ /*
+ * We can be in the state for two reasons: a command
+ * requiring waiting not busy signal (stop command
+ * included) or a write operation. In the latest case,
+ * we need to send a stop command.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
+ if (!atmci_test_and_clear_pending(host,
+ EVENT_NOTBUSY))
+ break;
- prev_state = state = STATE_SENDING_STOP;
- if (!data->error)
- atmci_send_stop_cmd(host, data);
- /* fall through */
+ dev_dbg(&host->pdev->dev, "set completed not busy\n");
+ atmci_set_completed(host, EVENT_NOTBUSY);
+
+ if (host->data) {
+ /*
+ * For some commands such as CMD53, even if
+ * there is data transfer, there is no stop
+ * command to send.
+ */
+ if (host->mrq->stop) {
+ atmci_writel(host, ATMCI_IER,
+ ATMCI_CMDRDY);
+ atmci_send_stop_cmd(host, data);
+ state = STATE_SENDING_STOP;
+ } else {
+ host->data = NULL;
+ data->bytes_xfered = data->blocks
+ * data->blksz;
+ data->error = 0;
+ state = STATE_END_REQUEST;
+ }
+ } else
+ state = STATE_END_REQUEST;
+ break;
case STATE_SENDING_STOP:
+ /*
+ * In this state, it is important to set host->data to
+ * NULL (which is tested in the waiting notbusy state)
+ * in order to go to the end request state instead of
+ * sending stop again.
+ */
+ dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
if (!atmci_test_and_clear_pending(host,
- EVENT_CMD_COMPLETE))
+ EVENT_CMD_RDY))
break;
+ dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
host->cmd = NULL;
+ host->data = NULL;
+ data->bytes_xfered = data->blocks * data->blksz;
+ data->error = 0;
atmci_command_complete(host, mrq->stop);
- atmci_request_end(host, host->mrq);
- goto unlock;
+ if (mrq->stop->error) {
+ host->stop_transfer(host);
+ atmci_writel(host, ATMCI_IDR,
+ ATMCI_TXRDY | ATMCI_RXRDY
+ | ATMCI_DATA_ERROR_FLAGS);
+ state = STATE_END_REQUEST;
+ } else {
+ atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
+ state = STATE_WAITING_NOTBUSY;
+ }
+ break;
- case STATE_DATA_ERROR:
- if (!atmci_test_and_clear_pending(host,
- EVENT_XFER_COMPLETE))
- break;
+ case STATE_END_REQUEST:
+ atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
+ | ATMCI_DATA_ERROR_FLAGS);
+ status = host->data_status;
+ if (unlikely(status)) {
+ host->stop_transfer(host);
+ host->data = NULL;
+ if (status & ATMCI_DTOE) {
+ data->error = -ETIMEDOUT;
+ } else if (status & ATMCI_DCRCE) {
+ data->error = -EILSEQ;
+ } else {
+ data->error = -EIO;
+ }
+ }
- state = STATE_DATA_BUSY;
+ atmci_request_end(host, host->mrq);
+ state = STATE_IDLE;
break;
}
} while (state != prev_state);
host->state = state;
-unlock:
spin_unlock(&host->lock);
}
@@ -1620,9 +1775,6 @@ static void atmci_read_data_pio(struct atmel_mci *host)
| ATMCI_DATA_ERROR_FLAGS));
host->data_status = status;
data->bytes_xfered += nbytes;
- smp_wmb();
- atmci_set_pending(host, EVENT_DATA_ERROR);
- tasklet_schedule(&host->tasklet);
return;
}
} while (status & ATMCI_RXRDY);
@@ -1691,9 +1843,6 @@ static void atmci_write_data_pio(struct atmel_mci *host)
| ATMCI_DATA_ERROR_FLAGS));
host->data_status = status;
data->bytes_xfered += nbytes;
- smp_wmb();
- atmci_set_pending(host, EVENT_DATA_ERROR);
- tasklet_schedule(&host->tasklet);
return;
}
} while (status & ATMCI_TXRDY);
@@ -1711,16 +1860,6 @@ done:
atmci_set_pending(host, EVENT_XFER_COMPLETE);
}
-static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
-{
- atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
-
- host->cmd_status = status;
- smp_wmb();
- atmci_set_pending(host, EVENT_CMD_COMPLETE);
- tasklet_schedule(&host->tasklet);
-}
-
static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
{
int i;
@@ -1748,17 +1887,21 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
break;
if (pending & ATMCI_DATA_ERROR_FLAGS) {
+ dev_dbg(&host->pdev->dev, "IRQ: data error\n");
atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
- | ATMCI_RXRDY | ATMCI_TXRDY);
- pending &= atmci_readl(host, ATMCI_IMR);
+ | ATMCI_RXRDY | ATMCI_TXRDY
+ | ATMCI_ENDRX | ATMCI_ENDTX
+ | ATMCI_RXBUFF | ATMCI_TXBUFE);
host->data_status = status;
+ dev_dbg(&host->pdev->dev, "set pending data error\n");
smp_wmb();
atmci_set_pending(host, EVENT_DATA_ERROR);
tasklet_schedule(&host->tasklet);
}
if (pending & ATMCI_TXBUFE) {
+ dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
/*
@@ -1774,6 +1917,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
atmci_pdc_complete(host);
}
} else if (pending & ATMCI_ENDTX) {
+ dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
if (host->data_size) {
@@ -1784,6 +1928,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
}
if (pending & ATMCI_RXBUFF) {
+ dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
/*
@@ -1799,6 +1944,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
atmci_pdc_complete(host);
}
} else if (pending & ATMCI_ENDRX) {
+ dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
if (host->data_size) {
@@ -1808,23 +1954,44 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
}
}
+ /*
+ * First mci IPs, so mainly the ones having pdc, have some
+ * issues with the notbusy signal. You can't get it after
+ * data transmission if you have not sent a stop command.
+ * The appropriate workaround is to use the BLKE signal.
+ */
+ if (pending & ATMCI_BLKE) {
+ dev_dbg(&host->pdev->dev, "IRQ: blke\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
+ smp_wmb();
+ dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+ atmci_set_pending(host, EVENT_NOTBUSY);
+ tasklet_schedule(&host->tasklet);
+ }
if (pending & ATMCI_NOTBUSY) {
- atmci_writel(host, ATMCI_IDR,
- ATMCI_DATA_ERROR_FLAGS | ATMCI_NOTBUSY);
- if (!host->data_status)
- host->data_status = status;
+ dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
smp_wmb();
- atmci_set_pending(host, EVENT_DATA_COMPLETE);
+ dev_dbg(&host->pdev->dev, "set pending notbusy\n");
+ atmci_set_pending(host, EVENT_NOTBUSY);
tasklet_schedule(&host->tasklet);
}
+
if (pending & ATMCI_RXRDY)
atmci_read_data_pio(host);
if (pending & ATMCI_TXRDY)
atmci_write_data_pio(host);
- if (pending & ATMCI_CMDRDY)
- atmci_cmd_interrupt(host, status);
+ if (pending & ATMCI_CMDRDY) {
+ dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
+ atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
+ host->cmd_status = status;
+ smp_wmb();
+ dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
+ atmci_set_pending(host, EVENT_CMD_RDY);
+ tasklet_schedule(&host->tasklet);
+ }
if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
atmci_sdio_interrupt(host, status);
@@ -1877,13 +2044,26 @@ static int __init atmci_init_slot(struct atmel_mci *host,
mmc->caps |= MMC_CAP_SDIO_IRQ;
if (host->caps.has_highspeed)
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
- if (slot_data->bus_width >= 4)
+ /*
+ * Without the read/write proof capability, it is strongly suggested to
+ * use only one bit for data to prevent fifo underruns and overruns
+ * which will corrupt data.
+ */
+ if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
mmc->caps |= MMC_CAP_4_BIT_DATA;
- mmc->max_segs = 64;
- mmc->max_req_size = 32768 * 512;
- mmc->max_blk_size = 32768;
- mmc->max_blk_count = 512;
+ if (atmci_get_version(host) < 0x200) {
+ mmc->max_segs = 256;
+ mmc->max_blk_size = 4095;
+ mmc->max_blk_count = 256;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
+ } else {
+ mmc->max_segs = 64;
+ mmc->max_req_size = 32768 * 512;
+ mmc->max_blk_size = 32768;
+ mmc->max_blk_count = 512;
+ }
/* Assume card is present initially */
set_bit(ATMCI_CARD_PRESENT, &slot->flags);
@@ -2007,11 +2187,6 @@ static bool atmci_configure_dma(struct atmel_mci *host)
}
}
-static inline unsigned int atmci_get_version(struct atmel_mci *host)
-{
- return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
-}
-
/*
* HSMCI (High Speed MCI) module is not fully compatible with MCI module.
* HSMCI provides DMA support and a new config register but no more supports
@@ -2032,6 +2207,9 @@ static void __init atmci_get_cap(struct atmel_mci *host)
host->caps.has_highspeed = 0;
host->caps.has_rwproof = 0;
host->caps.has_odd_clk_div = 0;
+ host->caps.has_bad_data_ordering = 1;
+ host->caps.need_reset_after_xfer = 1;
+ host->caps.need_blksz_mul_4 = 1;
/* keep only major version number */
switch (version & 0xf00) {
@@ -2051,7 +2229,11 @@ static void __init atmci_get_cap(struct atmel_mci *host)
host->caps.has_highspeed = 1;
case 0x200:
host->caps.has_rwproof = 1;
+ host->caps.need_blksz_mul_4 = 0;
case 0x100:
+ host->caps.has_bad_data_ordering = 0;
+ host->caps.need_reset_after_xfer = 0;
+ case 0x0:
break;
default:
host->caps.has_pdc = 0;
@@ -2138,14 +2320,20 @@ static int __init atmci_probe(struct platform_device *pdev)
if (pdata->slot[0].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[0],
0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
- if (!ret)
+ if (!ret) {
nr_slots++;
+ host->buf_size = host->slot[0]->mmc->max_req_size;
+ }
}
if (pdata->slot[1].bus_width) {
ret = atmci_init_slot(host, &pdata->slot[1],
1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
- if (!ret)
+ if (!ret) {
nr_slots++;
+ if (host->slot[1]->mmc->max_req_size > host->buf_size)
+ host->buf_size =
+ host->slot[1]->mmc->max_req_size;
+ }
}
if (!nr_slots) {
@@ -2153,6 +2341,19 @@ static int __init atmci_probe(struct platform_device *pdev)
goto err_init_slot;
}
+ if (!host->caps.has_rwproof) {
+ host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
+ &host->buf_phys_addr,
+ GFP_KERNEL);
+ if (!host->buffer) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "buffer allocation failed\n");
+ goto err_init_slot;
+ }
+ }
+
+ setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+
dev_info(&pdev->dev,
"Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
host->mapbase, irq, nr_slots);
@@ -2179,6 +2380,10 @@ static int __exit atmci_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
+ if (host->buffer)
+ dma_free_coherent(&pdev->dev, host->buf_size,
+ host->buffer, host->buf_phys_addr);
+
for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
if (host->slot[i])
atmci_cleanup_slot(host->slot[i], i);
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index c1f3673ae1e..7cf6c624bf7 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1533,4 +1533,5 @@ module_exit(davinci_mmcsd_exit);
MODULE_AUTHOR("Texas Instruments India");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
+MODULE_ALIAS("platform:davinci_mmc");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index ab3fc461710..9bbf45f8c53 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -100,8 +100,6 @@ struct dw_mci_slot {
int last_detect_state;
};
-static struct workqueue_struct *dw_mci_card_workqueue;
-
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
{
@@ -859,10 +857,10 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
int_mask = mci_readl(host, INTMASK);
if (enb) {
mci_writel(host, INTMASK,
- (int_mask | (1 << SDMMC_INT_SDIO(slot->id))));
+ (int_mask | SDMMC_INT_SDIO(slot->id)));
} else {
mci_writel(host, INTMASK,
- (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id))));
+ (int_mask & ~SDMMC_INT_SDIO(slot->id)));
}
}
@@ -1605,7 +1603,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & SDMMC_INT_CD) {
mci_writel(host, RINTSTS, SDMMC_INT_CD);
- queue_work(dw_mci_card_workqueue, &host->card_work);
+ queue_work(host->card_workqueue, &host->card_work);
}
/* Handle SDIO Interrupts */
@@ -1844,7 +1842,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
* Card may have been plugged in prior to boot so we
* need to run the detect tasklet
*/
- queue_work(dw_mci_card_workqueue, &host->card_work);
+ queue_work(host->card_workqueue, &host->card_work);
return 0;
}
@@ -2021,9 +2019,9 @@ int dw_mci_probe(struct dw_mci *host)
mci_writel(host, CLKSRC, 0);
tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
- dw_mci_card_workqueue = alloc_workqueue("dw-mci-card",
+ host->card_workqueue = alloc_workqueue("dw-mci-card",
WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
- if (!dw_mci_card_workqueue)
+ if (!host->card_workqueue)
goto err_dmaunmap;
INIT_WORK(&host->card_work, dw_mci_work_routine_card);
ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host);
@@ -2085,7 +2083,7 @@ err_init_slot:
free_irq(host->irq, host);
err_workqueue:
- destroy_workqueue(dw_mci_card_workqueue);
+ destroy_workqueue(host->card_workqueue);
err_dmaunmap:
if (host->use_dma && host->dma_ops->exit)
@@ -2119,7 +2117,7 @@ void dw_mci_remove(struct dw_mci *host)
mci_writel(host, CLKSRC, 0);
free_irq(host->irq, host);
- destroy_workqueue(dw_mci_card_workqueue);
+ destroy_workqueue(host->card_workqueue);
dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
if (host->use_dma && host->dma_ops->exit)
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
deleted file mode 100644
index ea0f3cedef2..00000000000
--- a/drivers/mmc/host/imxmmc.c
+++ /dev/null
@@ -1,1169 +0,0 @@
-/*
- * linux/drivers/mmc/host/imxmmc.c - Motorola i.MX MMCI driver
- *
- * Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
- * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
- *
- * derived from pxamci.c by Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/blkdev.h>
-#include <linux/dma-mapping.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/card.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <asm/dma.h>
-#include <asm/irq.h>
-#include <asm/sizes.h>
-#include <mach/mmc.h>
-#include <mach/imx-dma.h>
-
-#include "imxmmc.h"
-
-#define DRIVER_NAME "imx-mmc"
-
-#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
- INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
- INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
-
-struct imxmci_host {
- struct mmc_host *mmc;
- spinlock_t lock;
- struct resource *res;
- void __iomem *base;
- int irq;
- imx_dmach_t dma;
- volatile unsigned int imask;
- unsigned int power_mode;
- unsigned int present;
- struct imxmmc_platform_data *pdata;
-
- struct mmc_request *req;
- struct mmc_command *cmd;
- struct mmc_data *data;
-
- struct timer_list timer;
- struct tasklet_struct tasklet;
- unsigned int status_reg;
- unsigned long pending_events;
- /* Next two fields are there for CPU driven transfers to overcome SDHC deficiencies */
- u16 *data_ptr;
- unsigned int data_cnt;
- atomic_t stuck_timeout;
-
- unsigned int dma_nents;
- unsigned int dma_size;
- unsigned int dma_dir;
- int dma_allocated;
-
- unsigned char actual_bus_width;
-
- int prev_cmd_code;
-
- struct clk *clk;
-};
-
-#define IMXMCI_PEND_IRQ_b 0
-#define IMXMCI_PEND_DMA_END_b 1
-#define IMXMCI_PEND_DMA_ERR_b 2
-#define IMXMCI_PEND_WAIT_RESP_b 3
-#define IMXMCI_PEND_DMA_DATA_b 4
-#define IMXMCI_PEND_CPU_DATA_b 5
-#define IMXMCI_PEND_CARD_XCHG_b 6
-#define IMXMCI_PEND_SET_INIT_b 7
-#define IMXMCI_PEND_STARTED_b 8
-
-#define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b)
-#define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b)
-#define IMXMCI_PEND_DMA_ERR_m (1 << IMXMCI_PEND_DMA_ERR_b)
-#define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b)
-#define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b)
-#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b)
-#define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b)
-#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b)
-#define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b)
-
-static void imxmci_stop_clock(struct imxmci_host *host)
-{
- int i = 0;
- u16 reg;
-
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg & ~STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
- while (i < 0x1000) {
- if (!(i & 0x7f)) {
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg | STR_STP_CLK_STOP_CLK,
- host->base + MMC_REG_STR_STP_CLK);
- }
-
- reg = readw(host->base + MMC_REG_STATUS);
- if (!(reg & STATUS_CARD_BUS_CLK_RUN)) {
- /* Check twice before cut */
- reg = readw(host->base + MMC_REG_STATUS);
- if (!(reg & STATUS_CARD_BUS_CLK_RUN))
- return;
- }
-
- i++;
- }
- dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
-}
-
-static int imxmci_start_clock(struct imxmci_host *host)
-{
- unsigned int trials = 0;
- unsigned int delay_limit = 128;
- unsigned long flags;
- u16 reg;
-
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg & ~STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
-
- clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
-
- /*
- * Command start of the clock, this usually succeeds in less
- * then 6 delay loops, but during card detection (low clockrate)
- * it takes up to 5000 delay loops and sometimes fails for the first time
- */
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg | STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
-
- do {
- unsigned int delay = delay_limit;
-
- while (delay--) {
- reg = readw(host->base + MMC_REG_STATUS);
- if (reg & STATUS_CARD_BUS_CLK_RUN) {
- /* Check twice before cut */
- reg = readw(host->base + MMC_REG_STATUS);
- if (reg & STATUS_CARD_BUS_CLK_RUN)
- return 0;
- }
-
- if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
- return 0;
- }
-
- local_irq_save(flags);
- /*
- * Ensure, that request is not doubled under all possible circumstances.
- * It is possible, that cock running state is missed, because some other
- * IRQ or schedule delays this function execution and the clocks has
- * been already stopped by other means (response processing, SDHC HW)
- */
- if (!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) {
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg | STR_STP_CLK_START_CLK,
- host->base + MMC_REG_STR_STP_CLK);
- }
- local_irq_restore(flags);
-
- } while (++trials < 256);
-
- dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
-
- return -1;
-}
-
-static void imxmci_softreset(struct imxmci_host *host)
-{
- int i;
-
- /* reset sequence */
- writew(0x08, host->base + MMC_REG_STR_STP_CLK);
- writew(0x0D, host->base + MMC_REG_STR_STP_CLK);
-
- for (i = 0; i < 8; i++)
- writew(0x05, host->base + MMC_REG_STR_STP_CLK);
-
- writew(0xff, host->base + MMC_REG_RES_TO);
- writew(512, host->base + MMC_REG_BLK_LEN);
- writew(1, host->base + MMC_REG_NOB);
-}
-
-static int imxmci_busy_wait_for_status(struct imxmci_host *host,
- unsigned int *pstat, unsigned int stat_mask,
- int timeout, const char *where)
-{
- int loops = 0;
-
- while (!(*pstat & stat_mask)) {
- loops += 2;
- if (loops >= timeout) {
- dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
- where, *pstat, stat_mask);
- return -1;
- }
- udelay(2);
- *pstat |= readw(host->base + MMC_REG_STATUS);
- }
- if (!loops)
- return 0;
-
- /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
- if (!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock >= 8000000))
- dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
- loops, where, *pstat, stat_mask);
- return loops;
-}
-
-static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
-{
- unsigned int nob = data->blocks;
- unsigned int blksz = data->blksz;
- unsigned int datasz = nob * blksz;
- int i;
-
- if (data->flags & MMC_DATA_STREAM)
- nob = 0xffff;
-
- host->data = data;
- data->bytes_xfered = 0;
-
- writew(nob, host->base + MMC_REG_NOB);
- writew(blksz, host->base + MMC_REG_BLK_LEN);
-
- /*
- * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
- * We are in big troubles for non-512 byte transfers according to note in the paragraph
- * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
- * The situation is even more complex in reality. The SDHC in not able to handle wll
- * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
- * This is required for SCR read at least.
- */
- if (datasz < 512) {
- host->dma_size = datasz;
- if (data->flags & MMC_DATA_READ) {
- host->dma_dir = DMA_FROM_DEVICE;
-
- /* Hack to enable read SCR */
- writew(1, host->base + MMC_REG_NOB);
- writew(512, host->base + MMC_REG_BLK_LEN);
- } else {
- host->dma_dir = DMA_TO_DEVICE;
- }
-
- /* Convert back to virtual address */
- host->data_ptr = (u16 *)sg_virt(data->sg);
- host->data_cnt = 0;
-
- clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
- set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
-
- return;
- }
-
- if (data->flags & MMC_DATA_READ) {
- host->dma_dir = DMA_FROM_DEVICE;
- host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, host->dma_dir);
-
- imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_READ);
-
- /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
- CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
- } else {
- host->dma_dir = DMA_TO_DEVICE;
-
- host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, host->dma_dir);
-
- imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
- host->res->start + MMC_REG_BUFFER_ACCESS,
- DMA_MODE_WRITE);
-
- /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
- CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
- }
-
-#if 1 /* This code is there only for consistency checking and can be disabled in future */
- host->dma_size = 0;
- for (i = 0; i < host->dma_nents; i++)
- host->dma_size += data->sg[i].length;
-
- if (datasz > host->dma_size) {
- dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
- datasz, host->dma_size);
- }
-#endif
-
- host->dma_size = datasz;
-
- wmb();
-
- set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
- clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
-
- /* start DMA engine for read, write is delayed after initial response */
- if (host->dma_dir == DMA_FROM_DEVICE)
- imx_dma_enable(host->dma);
-}
-
-static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat)
-{
- unsigned long flags;
- u32 imask;
-
- WARN_ON(host->cmd != NULL);
- host->cmd = cmd;
-
- /* Ensure, that clock are stopped else command programming and start fails */
- imxmci_stop_clock(host);
-
- if (cmd->flags & MMC_RSP_BUSY)
- cmdat |= CMD_DAT_CONT_BUSY;
-
- switch (mmc_resp_type(cmd)) {
- case MMC_RSP_R1: /* short CRC, OPCODE */
- case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
- cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1;
- break;
- case MMC_RSP_R2: /* long 136 bit + CRC */
- cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2;
- break;
- case MMC_RSP_R3: /* short */
- cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3;
- break;
- default:
- break;
- }
-
- if (test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events))
- cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */
-
- if (host->actual_bus_width == MMC_BUS_WIDTH_4)
- cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
-
- writew(cmd->opcode, host->base + MMC_REG_CMD);
- writew(cmd->arg >> 16, host->base + MMC_REG_ARGH);
- writew(cmd->arg & 0xffff, host->base + MMC_REG_ARGL);
- writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
-
- atomic_set(&host->stuck_timeout, 0);
- set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events);
-
-
- imask = IMXMCI_INT_MASK_DEFAULT;
- imask &= ~INT_MASK_END_CMD_RES;
- if (cmdat & CMD_DAT_CONT_DATA_ENABLE) {
- /* imask &= ~INT_MASK_BUF_READY; */
- imask &= ~INT_MASK_DATA_TRAN;
- if (cmdat & CMD_DAT_CONT_WRITE)
- imask &= ~INT_MASK_WRITE_OP_DONE;
- if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
- imask &= ~INT_MASK_BUF_READY;
- }
-
- spin_lock_irqsave(&host->lock, flags);
- host->imask = imask;
- writew(host->imask, host->base + MMC_REG_INT_MASK);
- spin_unlock_irqrestore(&host->lock, flags);
-
- dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n",
- cmd->opcode, cmd->opcode, imask);
-
- imxmci_start_clock(host);
-}
-
-static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&host->lock, flags);
-
- host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m |
- IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m);
-
- host->imask = IMXMCI_INT_MASK_DEFAULT;
- writew(host->imask, host->base + MMC_REG_INT_MASK);
-
- spin_unlock_irqrestore(&host->lock, flags);
-
- if (req && req->cmd)
- host->prev_cmd_code = req->cmd->opcode;
-
- host->req = NULL;
- host->cmd = NULL;
- host->data = NULL;
- mmc_request_done(host->mmc, req);
-}
-
-static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
-{
- struct mmc_data *data = host->data;
- int data_error;
-
- if (test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
- imx_dma_disable(host->dma);
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
- host->dma_dir);
- }
-
- if (stat & STATUS_ERR_MASK) {
- dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", stat);
- if (stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
- data->error = -EILSEQ;
- else if (stat & STATUS_TIME_OUT_READ)
- data->error = -ETIMEDOUT;
- else
- data->error = -EIO;
- } else {
- data->bytes_xfered = host->dma_size;
- }
-
- data_error = data->error;
-
- host->data = NULL;
-
- return data_error;
-}
-
-static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
-{
- struct mmc_command *cmd = host->cmd;
- int i;
- u32 a, b, c;
- struct mmc_data *data = host->data;
-
- if (!cmd)
- return 0;
-
- host->cmd = NULL;
-
- if (stat & STATUS_TIME_OUT_RESP) {
- dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
- cmd->error = -ETIMEDOUT;
- } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
- dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
- cmd->error = -EILSEQ;
- }
-
- if (cmd->flags & MMC_RSP_PRESENT) {
- if (cmd->flags & MMC_RSP_136) {
- for (i = 0; i < 4; i++) {
- a = readw(host->base + MMC_REG_RES_FIFO);
- b = readw(host->base + MMC_REG_RES_FIFO);
- cmd->resp[i] = a << 16 | b;
- }
- } else {
- a = readw(host->base + MMC_REG_RES_FIFO);
- b = readw(host->base + MMC_REG_RES_FIFO);
- c = readw(host->base + MMC_REG_RES_FIFO);
- cmd->resp[0] = a << 24 | b << 8 | c >> 8;
- }
- }
-
- dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n",
- cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error);
-
- if (data && !cmd->error && !(stat & STATUS_ERR_MASK)) {
- if (host->req->data->flags & MMC_DATA_WRITE) {
-
- /* Wait for FIFO to be empty before starting DMA write */
-
- stat = readw(host->base + MMC_REG_STATUS);
- if (imxmci_busy_wait_for_status(host, &stat,
- STATUS_APPL_BUFF_FE,
- 40, "imxmci_cmd_done DMA WR") < 0) {
- cmd->error = -EIO;
- imxmci_finish_data(host, stat);
- if (host->req)
- imxmci_finish_request(host, host->req);
- dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n",
- stat);
- return 0;
- }
-
- if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
- imx_dma_enable(host->dma);
- }
- } else {
- struct mmc_request *req;
- imxmci_stop_clock(host);
- req = host->req;
-
- if (data)
- imxmci_finish_data(host, stat);
-
- if (req)
- imxmci_finish_request(host, req);
- else
- dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n");
- }
-
- return 1;
-}
-
-static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
-{
- struct mmc_data *data = host->data;
- int data_error;
-
- if (!data)
- return 0;
-
- data_error = imxmci_finish_data(host, stat);
-
- if (host->req->stop) {
- imxmci_stop_clock(host);
- imxmci_start_cmd(host, host->req->stop, 0);
- } else {
- struct mmc_request *req;
- req = host->req;
- if (req)
- imxmci_finish_request(host, req);
- else
- dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n");
- }
-
- return 1;
-}
-
-static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
-{
- int i;
- int burst_len;
- int trans_done = 0;
- unsigned int stat = *pstat;
-
- if (host->actual_bus_width != MMC_BUS_WIDTH_4)
- burst_len = 16;
- else
- burst_len = 64;
-
- /* This is unfortunately required */
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
- stat);
-
- udelay(20); /* required for clocks < 8MHz*/
-
- if (host->dma_dir == DMA_FROM_DEVICE) {
- imxmci_busy_wait_for_status(host, &stat,
- STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE |
- STATUS_TIME_OUT_READ,
- 50, "imxmci_cpu_driven_data read");
-
- while ((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
- !(stat & STATUS_TIME_OUT_READ) &&
- (host->data_cnt < 512)) {
-
- udelay(20); /* required for clocks < 8MHz*/
-
- for (i = burst_len; i >= 2 ; i -= 2) {
- u16 data;
- data = readw(host->base + MMC_REG_BUFFER_ACCESS);
- udelay(10); /* required for clocks < 8MHz*/
- if (host->data_cnt+2 <= host->dma_size) {
- *(host->data_ptr++) = data;
- } else {
- if (host->data_cnt < host->dma_size)
- *(u8 *)(host->data_ptr) = data;
- }
- host->data_cnt += 2;
- }
-
- stat = readw(host->base + MMC_REG_STATUS);
-
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
- host->data_cnt, burst_len, stat);
- }
-
- if ((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
- trans_done = 1;
-
- if (host->dma_size & 0x1ff)
- stat &= ~STATUS_CRC_READ_ERR;
-
- if (stat & STATUS_TIME_OUT_READ) {
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n",
- stat);
- trans_done = -1;
- }
-
- } else {
- imxmci_busy_wait_for_status(host, &stat,
- STATUS_APPL_BUFF_FE,
- 20, "imxmci_cpu_driven_data write");
-
- while ((stat & STATUS_APPL_BUFF_FE) &&
- (host->data_cnt < host->dma_size)) {
- if (burst_len >= host->dma_size - host->data_cnt) {
- burst_len = host->dma_size - host->data_cnt;
- host->data_cnt = host->dma_size;
- trans_done = 1;
- } else {
- host->data_cnt += burst_len;
- }
-
- for (i = burst_len; i > 0 ; i -= 2)
- writew(*(host->data_ptr++), host->base + MMC_REG_BUFFER_ACCESS);
-
- stat = readw(host->base + MMC_REG_STATUS);
-
- dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
- burst_len, stat);
- }
- }
-
- *pstat = stat;
-
- return trans_done;
-}
-
-static void imxmci_dma_irq(int dma, void *devid)
-{
- struct imxmci_host *host = devid;
- u32 stat = readw(host->base + MMC_REG_STATUS);
-
- atomic_set(&host->stuck_timeout, 0);
- host->status_reg = stat;
- set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
- tasklet_schedule(&host->tasklet);
-}
-
-static irqreturn_t imxmci_irq(int irq, void *devid)
-{
- struct imxmci_host *host = devid;
- u32 stat = readw(host->base + MMC_REG_STATUS);
- int handled = 1;
-
- writew(host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT,
- host->base + MMC_REG_INT_MASK);
-
- atomic_set(&host->stuck_timeout, 0);
- host->status_reg = stat;
- set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
- set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
- tasklet_schedule(&host->tasklet);
-
- return IRQ_RETVAL(handled);
-}
-
-static void imxmci_tasklet_fnc(unsigned long data)
-{
- struct imxmci_host *host = (struct imxmci_host *)data;
- u32 stat;
- unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */
- int timeout = 0;
-
- if (atomic_read(&host->stuck_timeout) > 4) {
- char *what;
- timeout = 1;
- stat = readw(host->base + MMC_REG_STATUS);
- host->status_reg = stat;
- if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
- if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
- what = "RESP+DMA";
- else
- what = "RESP";
- else
- if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
- if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
- what = "DATA";
- else
- what = "DMA";
- else
- what = "???";
-
- dev_err(mmc_dev(host->mmc),
- "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
- what, stat,
- readw(host->base + MMC_REG_INT_MASK));
- dev_err(mmc_dev(host->mmc),
- "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
- readw(host->base + MMC_REG_CMD_DAT_CONT),
- readw(host->base + MMC_REG_BLK_LEN),
- readw(host->base + MMC_REG_NOB),
- CCR(host->dma));
- dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
- host->cmd ? host->cmd->opcode : 0,
- host->prev_cmd_code,
- 1 << host->actual_bus_width, host->dma_size);
- }
-
- if (!host->present || timeout)
- host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
- STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;
-
- if (test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
- clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
-
- stat = readw(host->base + MMC_REG_STATUS);
- /*
- * This is not required in theory, but there is chance to miss some flag
- * which clears automatically by mask write, FreeScale original code keeps
- * stat from IRQ time so do I
- */
- stat |= host->status_reg;
-
- if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
- stat &= ~STATUS_CRC_READ_ERR;
-
- if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
- imxmci_busy_wait_for_status(host, &stat,
- STATUS_END_CMD_RESP | STATUS_ERR_MASK,
- 20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
- }
-
- if (stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
- if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
- imxmci_cmd_done(host, stat);
- if (host->data && (stat & STATUS_ERR_MASK))
- imxmci_data_done(host, stat);
- }
-
- if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
- stat |= readw(host->base + MMC_REG_STATUS);
- if (imxmci_cpu_driven_data(host, &stat)) {
- if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
- imxmci_cmd_done(host, stat);
- atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
- &host->pending_events);
- imxmci_data_done(host, stat);
- }
- }
- }
-
- if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
- !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
-
- stat = readw(host->base + MMC_REG_STATUS);
- /* Same as above */
- stat |= host->status_reg;
-
- if (host->dma_dir == DMA_TO_DEVICE)
- data_dir_mask = STATUS_WRITE_OP_DONE;
- else
- data_dir_mask = STATUS_DATA_TRANS_DONE;
-
- if (stat & data_dir_mask) {
- clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
- imxmci_data_done(host, stat);
- }
- }
-
- if (test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {
-
- if (host->cmd)
- imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);
-
- if (host->data)
- imxmci_data_done(host, STATUS_TIME_OUT_READ |
- STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);
-
- if (host->req)
- imxmci_finish_request(host, host->req);
-
- mmc_detect_change(host->mmc, msecs_to_jiffies(100));
-
- }
-}
-
-static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req)
-{
- struct imxmci_host *host = mmc_priv(mmc);
- unsigned int cmdat;
-
- WARN_ON(host->req != NULL);
-
- host->req = req;
-
- cmdat = 0;
-
- if (req->data) {
- imxmci_setup_data(host, req->data);
-
- cmdat |= CMD_DAT_CONT_DATA_ENABLE;
-
- if (req->data->flags & MMC_DATA_WRITE)
- cmdat |= CMD_DAT_CONT_WRITE;
-
- if (req->data->flags & MMC_DATA_STREAM)
- cmdat |= CMD_DAT_CONT_STREAM_BLOCK;
- }
-
- imxmci_start_cmd(host, req->cmd, cmdat);
-}
-
-#define CLK_RATE 19200000
-
-static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct imxmci_host *host = mmc_priv(mmc);
- int prescaler;
-
- if (ios->bus_width == MMC_BUS_WIDTH_4) {
- host->actual_bus_width = MMC_BUS_WIDTH_4;
- imx_gpio_mode(PB11_PF_SD_DAT3);
- BLR(host->dma) = 0; /* burst 64 byte read/write */
- } else {
- host->actual_bus_width = MMC_BUS_WIDTH_1;
- imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
- BLR(host->dma) = 16; /* burst 16 byte read/write */
- }
-
- if (host->power_mode != ios->power_mode) {
- switch (ios->power_mode) {
- case MMC_POWER_OFF:
- break;
- case MMC_POWER_UP:
- set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
- break;
- case MMC_POWER_ON:
- break;
- }
- host->power_mode = ios->power_mode;
- }
-
- if (ios->clock) {
- unsigned int clk;
- u16 reg;
-
- /* The prescaler is 5 for PERCLK2 equal to 96MHz
- * then 96MHz / 5 = 19.2 MHz
- */
- clk = clk_get_rate(host->clk);
- prescaler = (clk + (CLK_RATE * 7) / 8) / CLK_RATE;
- switch (prescaler) {
- case 0:
- case 1: prescaler = 0;
- break;
- case 2: prescaler = 1;
- break;
- case 3: prescaler = 2;
- break;
- case 4: prescaler = 4;
- break;
- default:
- case 5: prescaler = 5;
- break;
- }
-
- dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n",
- clk, prescaler);
-
- for (clk = 0; clk < 8; clk++) {
- int x;
- x = CLK_RATE / (1 << clk);
- if (x <= ios->clock)
- break;
- }
-
- /* enable controller */
- reg = readw(host->base + MMC_REG_STR_STP_CLK);
- writew(reg | STR_STP_CLK_ENABLE,
- host->base + MMC_REG_STR_STP_CLK);
-
- imxmci_stop_clock(host);
- writew((prescaler << 3) | clk, host->base + MMC_REG_CLK_RATE);
- /*
- * Under my understanding, clock should not be started there, because it would
- * initiate SDHC sequencer and send last or random command into card
- */
- /* imxmci_start_clock(host); */
-
- dev_dbg(mmc_dev(host->mmc),
- "MMC_CLK_RATE: 0x%08x\n",
- readw(host->base + MMC_REG_CLK_RATE));
- } else {
- imxmci_stop_clock(host);
- }
-}
-
-static int imxmci_get_ro(struct mmc_host *mmc)
-{
- struct imxmci_host *host = mmc_priv(mmc);
-
- if (host->pdata && host->pdata->get_ro)
- return !!host->pdata->get_ro(mmc_dev(mmc));
- /*
- * Board doesn't support read only detection; let the mmc core
- * decide what to do.
- */
- return -ENOSYS;
-}
-
-
-static const struct mmc_host_ops imxmci_ops = {
- .request = imxmci_request,
- .set_ios = imxmci_set_ios,
- .get_ro = imxmci_get_ro,
-};
-
-static void imxmci_check_status(unsigned long data)
-{
- struct imxmci_host *host = (struct imxmci_host *)data;
-
- if (host->pdata && host->pdata->card_present &&
- host->pdata->card_present(mmc_dev(host->mmc)) != host->present) {
- host->present ^= 1;
- dev_info(mmc_dev(host->mmc), "card %s\n",
- host->present ? "inserted" : "removed");
-
- set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events);
- tasklet_schedule(&host->tasklet);
- }
-
- if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) ||
- test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
- atomic_inc(&host->stuck_timeout);
- if (atomic_read(&host->stuck_timeout) > 4)
- tasklet_schedule(&host->tasklet);
- } else {
- atomic_set(&host->stuck_timeout, 0);
-
- }
-
- mod_timer(&host->timer, jiffies + (HZ>>1));
-}
-
-static int __init imxmci_probe(struct platform_device *pdev)
-{
- struct mmc_host *mmc;
- struct imxmci_host *host = NULL;
- struct resource *r;
- int ret = 0, irq;
- u16 rev_no;
-
- pr_info("i.MX mmc driver\n");
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
- if (!r || irq < 0)
- return -ENXIO;
-
- r = request_mem_region(r->start, resource_size(r), pdev->name);
- if (!r)
- return -EBUSY;
-
- mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto out;
- }
-
- mmc->ops = &imxmci_ops;
- mmc->f_min = 150000;
- mmc->f_max = CLK_RATE/2;
- mmc->ocr_avail = MMC_VDD_32_33;
- mmc->caps = MMC_CAP_4_BIT_DATA;
-
- /* MMC core transfer sizes tunable parameters */
- mmc->max_segs = 64;
- mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
- mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
- mmc->max_blk_size = 2048;
- mmc->max_blk_count = 65535;
-
- host = mmc_priv(mmc);
- host->base = ioremap(r->start, resource_size(r));
- if (!host->base) {
- ret = -ENOMEM;
- goto out;
- }
-
- host->mmc = mmc;
- host->dma_allocated = 0;
- host->pdata = pdev->dev.platform_data;
- if (!host->pdata)
- dev_warn(&pdev->dev, "No platform data provided!\n");
-
- spin_lock_init(&host->lock);
- host->res = r;
- host->irq = irq;
-
- host->clk = clk_get(&pdev->dev, "perclk2");
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
- goto out;
- }
- clk_enable(host->clk);
-
- imx_gpio_mode(PB8_PF_SD_DAT0);
- imx_gpio_mode(PB9_PF_SD_DAT1);
- imx_gpio_mode(PB10_PF_SD_DAT2);
- /* Configured as GPIO with pull-up to ensure right MCC card mode */
- /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */
- imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
- /* imx_gpio_mode(PB11_PF_SD_DAT3); */
- imx_gpio_mode(PB12_PF_SD_CLK);
- imx_gpio_mode(PB13_PF_SD_CMD);
-
- imxmci_softreset(host);
-
- rev_no = readw(host->base + MMC_REG_REV_NO);
- if (rev_no != 0x390) {
- dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
- readw(host->base + MMC_REG_REV_NO));
- goto out;
- }
-
- /* recommended in data sheet */
- writew(0x2db4, host->base + MMC_REG_READ_TO);
-
- host->imask = IMXMCI_INT_MASK_DEFAULT;
- writew(host->imask, host->base + MMC_REG_INT_MASK);
-
- host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
- if(host->dma < 0) {
- dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
- ret = -EBUSY;
- goto out;
- }
- host->dma_allocated = 1;
- imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
- RSSR(host->dma) = DMA_REQ_SDHC;
-
- tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
- host->status_reg=0;
- host->pending_events=0;
-
- ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host);
- if (ret)
- goto out;
-
- if (host->pdata && host->pdata->card_present)
- host->present = host->pdata->card_present(mmc_dev(mmc));
- else /* if there is no way to detect assume that card is present */
- host->present = 1;
-
- init_timer(&host->timer);
- host->timer.data = (unsigned long)host;
- host->timer.function = imxmci_check_status;
- add_timer(&host->timer);
- mod_timer(&host->timer, jiffies + (HZ >> 1));
-
- platform_set_drvdata(pdev, mmc);
-
- mmc_add_host(mmc);
-
- return 0;
-
-out:
- if (host) {
- if (host->dma_allocated) {
- imx_dma_free(host->dma);
- host->dma_allocated = 0;
- }
- if (host->clk) {
- clk_disable(host->clk);
- clk_put(host->clk);
- }
- if (host->base)
- iounmap(host->base);
- }
- if (mmc)
- mmc_free_host(mmc);
- release_mem_region(r->start, resource_size(r));
- return ret;
-}
-
-static int __exit imxmci_remove(struct platform_device *pdev)
-{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
-
- platform_set_drvdata(pdev, NULL);
-
- if (mmc) {
- struct imxmci_host *host = mmc_priv(mmc);
-
- tasklet_disable(&host->tasklet);
-
- del_timer_sync(&host->timer);
- mmc_remove_host(mmc);
-
- free_irq(host->irq, host);
- iounmap(host->base);
- if (host->dma_allocated) {
- imx_dma_free(host->dma);
- host->dma_allocated = 0;
- }
-
- tasklet_kill(&host->tasklet);
-
- clk_disable(host->clk);
- clk_put(host->clk);
-
- release_mem_region(host->res->start, resource_size(host->res));
-
- mmc_free_host(mmc);
- }
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct mmc_host *mmc = platform_get_drvdata(dev);
- int ret = 0;
-
- if (mmc)
- ret = mmc_suspend_host(mmc);
-
- return ret;
-}
-
-static int imxmci_resume(struct platform_device *dev)
-{
- struct mmc_host *mmc = platform_get_drvdata(dev);
- struct imxmci_host *host;
- int ret = 0;
-
- if (mmc) {
- host = mmc_priv(mmc);
- if (host)
- set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
- ret = mmc_resume_host(mmc);
- }
-
- return ret;
-}
-#else
-#define imxmci_suspend NULL
-#define imxmci_resume NULL
-#endif /* CONFIG_PM */
-
-static struct platform_driver imxmci_driver = {
- .remove = __exit_p(imxmci_remove),
- .suspend = imxmci_suspend,
- .resume = imxmci_resume,
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- }
-};
-
-static int __init imxmci_init(void)
-{
- return platform_driver_probe(&imxmci_driver, imxmci_probe);
-}
-
-static void __exit imxmci_exit(void)
-{
- platform_driver_unregister(&imxmci_driver);
-}
-
-module_init(imxmci_init);
-module_exit(imxmci_exit);
-
-MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
-MODULE_AUTHOR("Sascha Hauer, Pengutronix");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-mmc");
diff --git a/drivers/mmc/host/imxmmc.h b/drivers/mmc/host/imxmmc.h
deleted file mode 100644
index 09d5d4ee3a7..00000000000
--- a/drivers/mmc/host/imxmmc.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#define MMC_REG_STR_STP_CLK 0x00
-#define MMC_REG_STATUS 0x04
-#define MMC_REG_CLK_RATE 0x08
-#define MMC_REG_CMD_DAT_CONT 0x0C
-#define MMC_REG_RES_TO 0x10
-#define MMC_REG_READ_TO 0x14
-#define MMC_REG_BLK_LEN 0x18
-#define MMC_REG_NOB 0x1C
-#define MMC_REG_REV_NO 0x20
-#define MMC_REG_INT_MASK 0x24
-#define MMC_REG_CMD 0x28
-#define MMC_REG_ARGH 0x2C
-#define MMC_REG_ARGL 0x30
-#define MMC_REG_RES_FIFO 0x34
-#define MMC_REG_BUFFER_ACCESS 0x38
-
-#define STR_STP_CLK_IPG_CLK_GATE_DIS (1<<15)
-#define STR_STP_CLK_IPG_PERCLK_GATE_DIS (1<<14)
-#define STR_STP_CLK_ENDIAN (1<<5)
-#define STR_STP_CLK_RESET (1<<3)
-#define STR_STP_CLK_ENABLE (1<<2)
-#define STR_STP_CLK_START_CLK (1<<1)
-#define STR_STP_CLK_STOP_CLK (1<<0)
-#define STATUS_CARD_PRESENCE (1<<15)
-#define STATUS_SDIO_INT_ACTIVE (1<<14)
-#define STATUS_END_CMD_RESP (1<<13)
-#define STATUS_WRITE_OP_DONE (1<<12)
-#define STATUS_DATA_TRANS_DONE (1<<11)
-#define STATUS_WR_CRC_ERROR_CODE_MASK (3<<10)
-#define STATUS_CARD_BUS_CLK_RUN (1<<8)
-#define STATUS_APPL_BUFF_FF (1<<7)
-#define STATUS_APPL_BUFF_FE (1<<6)
-#define STATUS_RESP_CRC_ERR (1<<5)
-#define STATUS_CRC_READ_ERR (1<<3)
-#define STATUS_CRC_WRITE_ERR (1<<2)
-#define STATUS_TIME_OUT_RESP (1<<1)
-#define STATUS_TIME_OUT_READ (1<<0)
-#define STATUS_ERR_MASK 0x2f
-#define CLK_RATE_PRESCALER(x) ((x) & 0x7)
-#define CLK_RATE_CLK_RATE(x) (((x) & 0x7) << 3)
-#define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1<<12)
-#define CMD_DAT_CONT_STOP_READWAIT (1<<11)
-#define CMD_DAT_CONT_START_READWAIT (1<<10)
-#define CMD_DAT_CONT_BUS_WIDTH_1 (0<<8)
-#define CMD_DAT_CONT_BUS_WIDTH_4 (2<<8)
-#define CMD_DAT_CONT_INIT (1<<7)
-#define CMD_DAT_CONT_BUSY (1<<6)
-#define CMD_DAT_CONT_STREAM_BLOCK (1<<5)
-#define CMD_DAT_CONT_WRITE (1<<4)
-#define CMD_DAT_CONT_DATA_ENABLE (1<<3)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R1 (1)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R2 (2)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R3 (3)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R4 (4)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R5 (5)
-#define CMD_DAT_CONT_RESPONSE_FORMAT_R6 (6)
-#define INT_MASK_AUTO_CARD_DETECT (1<<6)
-#define INT_MASK_DAT0_EN (1<<5)
-#define INT_MASK_SDIO (1<<4)
-#define INT_MASK_BUF_READY (1<<3)
-#define INT_MASK_END_CMD_RES (1<<2)
-#define INT_MASK_WRITE_OP_DONE (1<<1)
-#define INT_MASK_DATA_TRAN (1<<0)
-#define INT_ALL (0x7f)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index b6f38421d54..f0fcce40cd8 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/highmem.h>
@@ -25,6 +26,7 @@
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -1207,21 +1209,76 @@ static const struct mmc_host_ops mmci_ops = {
.get_cd = mmci_get_cd,
};
+#ifdef CONFIG_OF
+static void mmci_dt_populate_generic_pdata(struct device_node *np,
+ struct mmci_platform_data *pdata)
+{
+ int bus_width = 0;
+
+ pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
+ if (!pdata->gpio_wp)
+ pdata->gpio_wp = -1;
+
+ pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
+ if (!pdata->gpio_cd)
+ pdata->gpio_cd = -1;
+
+ if (of_get_property(np, "cd-inverted", NULL))
+ pdata->cd_invert = true;
+ else
+ pdata->cd_invert = false;
+
+ of_property_read_u32(np, "max-frequency", &pdata->f_max);
+ if (!pdata->f_max)
+ pr_warn("%s has no 'max-frequency' property\n", np->full_name);
+
+ if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
+ pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED;
+ if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
+ pdata->capabilities |= MMC_CAP_SD_HIGHSPEED;
+
+ of_property_read_u32(np, "bus-width", &bus_width);
+ switch (bus_width) {
+ case 0 :
+ /* No bus-width supplied. */
+ break;
+ case 4 :
+ pdata->capabilities |= MMC_CAP_4_BIT_DATA;
+ break;
+ case 8 :
+ pdata->capabilities |= MMC_CAP_8_BIT_DATA;
+ break;
+ default :
+ pr_warn("%s: Unsupported bus width\n", np->full_name);
+ }
+}
+#else
+static void mmci_dt_populate_generic_pdata(struct device_node *np,
+ struct mmci_platform_data *pdata)
+{
+ return;
+}
+#endif
+
static int __devinit mmci_probe(struct amba_device *dev,
const struct amba_id *id)
{
struct mmci_platform_data *plat = dev->dev.platform_data;
+ struct device_node *np = dev->dev.of_node;
struct variant_data *variant = id->data;
struct mmci_host *host;
struct mmc_host *mmc;
int ret;
- /* must have platform data */
- if (!plat) {
- ret = -EINVAL;
- goto out;
+ /* Must have platform data or Device Tree. */
+ if (!plat && !np) {
+ dev_err(&dev->dev, "No plat data or DT found\n");
+ return -EINVAL;
}
+ if (np)
+ mmci_dt_populate_generic_pdata(np, plat);
+
ret = amba_request_regions(dev, DRIVER_NAME);
if (ret)
goto out;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index eeb8cd125b0..3b9136c1a47 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/irq.h>
+#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/mmc/host.h>
@@ -51,6 +52,7 @@ struct mvsd_host {
struct device *dev;
struct resource *res;
int irq;
+ struct clk *clk;
int gpio_card_detect;
int gpio_write_protect;
};
@@ -770,6 +772,13 @@ static int __init mvsd_probe(struct platform_device *pdev)
} else
host->irq = irq;
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(host->clk)) {
+ clk_prepare_enable(host->clk);
+ }
+
if (mvsd_data->gpio_card_detect) {
ret = gpio_request(mvsd_data->gpio_card_detect,
DRIVER_NAME " cd");
@@ -854,6 +863,11 @@ static int __exit mvsd_remove(struct platform_device *pdev)
mvsd_power_down(host);
iounmap(host->base);
release_resource(host->res);
+
+ if (!IS_ERR(host->clk)) {
+ clk_disable_unprepare(host->clk);
+ clk_put(host->clk);
+ }
mmc_free_host(mmc);
}
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index b2058b43232..28ed52d58f7 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -136,7 +136,8 @@ struct mxcmci_host {
u16 rev_no;
unsigned int cmdat;
- struct clk *clk;
+ struct clk *clk_ipg;
+ struct clk *clk_per;
int clock;
@@ -672,7 +673,7 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
{
unsigned int divider;
int prescaler = 0;
- unsigned int clk_in = clk_get_rate(host->clk);
+ unsigned int clk_in = clk_get_rate(host->clk_per);
while (prescaler <= 0x800) {
for (divider = 1; divider <= 0xF; divider++) {
@@ -900,12 +901,20 @@ static int mxcmci_probe(struct platform_device *pdev)
host->res = r;
host->irq = irq;
- host->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(host->clk)) {
- ret = PTR_ERR(host->clk);
+ host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(host->clk_ipg)) {
+ ret = PTR_ERR(host->clk_ipg);
goto out_iounmap;
}
- clk_enable(host->clk);
+
+ host->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(host->clk_per)) {
+ ret = PTR_ERR(host->clk_per);
+ goto out_iounmap;
+ }
+
+ clk_prepare_enable(host->clk_per);
+ clk_prepare_enable(host->clk_ipg);
mxcmci_softreset(host);
@@ -917,8 +926,8 @@ static int mxcmci_probe(struct platform_device *pdev)
goto out_clk_put;
}
- mmc->f_min = clk_get_rate(host->clk) >> 16;
- mmc->f_max = clk_get_rate(host->clk) >> 1;
+ mmc->f_min = clk_get_rate(host->clk_per) >> 16;
+ mmc->f_max = clk_get_rate(host->clk_per) >> 1;
/* recommended in data sheet */
writew(0x2db4, host->base + MMC_REG_READ_TO);
@@ -967,8 +976,8 @@ out_free_dma:
if (host->dma)
dma_release_channel(host->dma);
out_clk_put:
- clk_disable(host->clk);
- clk_put(host->clk);
+ clk_disable_unprepare(host->clk_per);
+ clk_disable_unprepare(host->clk_ipg);
out_iounmap:
iounmap(host->base);
out_free:
@@ -999,8 +1008,8 @@ static int mxcmci_remove(struct platform_device *pdev)
if (host->dma)
dma_release_channel(host->dma);
- clk_disable(host->clk);
- clk_put(host->clk);
+ clk_disable_unprepare(host->clk_per);
+ clk_disable_unprepare(host->clk_ipg);
release_mem_region(host->res->start, resource_size(host->res));
@@ -1018,7 +1027,8 @@ static int mxcmci_suspend(struct device *dev)
if (mmc)
ret = mmc_suspend_host(mmc);
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk_per);
+ clk_disable_unprepare(host->clk_ipg);
return ret;
}
@@ -1029,7 +1039,8 @@ static int mxcmci_resume(struct device *dev)
struct mxcmci_host *host = mmc_priv(mmc);
int ret = 0;
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk_per);
+ clk_prepare_enable(host->clk_ipg);
if (mmc)
ret = mmc_resume_host(mmc);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index bb03ddda481..34a90266ab1 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -23,6 +23,9 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -40,18 +43,15 @@
#include <linux/module.h>
#include <linux/fsl/mxs-dma.h>
#include <linux/pinctrl/consumer.h>
-
-#include <mach/mxs.h>
-#include <mach/common.h>
-#include <mach/mmc.h>
+#include <linux/stmp_device.h>
+#include <linux/mmc/mxs-mmc.h>
#define DRIVER_NAME "mxs-mmc"
/* card detect polling timeout */
#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
-#define SSP_VERSION_LATEST 4
-#define ssp_is_old() (host->version < SSP_VERSION_LATEST)
+#define ssp_is_old(host) ((host)->devid == IMX23_MMC)
/* SSP registers */
#define HW_SSP_CTRL0 0x000
@@ -86,14 +86,14 @@
#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4)
#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0)
#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf)
-#define HW_SSP_TIMING (ssp_is_old() ? 0x050 : 0x070)
+#define HW_SSP_TIMING(h) (ssp_is_old(h) ? 0x050 : 0x070)
#define BP_SSP_TIMING_TIMEOUT (16)
#define BM_SSP_TIMING_TIMEOUT (0xffff << 16)
#define BP_SSP_TIMING_CLOCK_DIVIDE (8)
#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8)
#define BP_SSP_TIMING_CLOCK_RATE (0)
#define BM_SSP_TIMING_CLOCK_RATE (0xff)
-#define HW_SSP_CTRL1 (ssp_is_old() ? 0x060 : 0x080)
+#define HW_SSP_CTRL1(h) (ssp_is_old(h) ? 0x060 : 0x080)
#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31)
#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30)
#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29)
@@ -116,15 +116,13 @@
#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4)
#define BP_SSP_CTRL1_SSP_MODE (0)
#define BM_SSP_CTRL1_SSP_MODE (0xf)
-#define HW_SSP_SDRESP0 (ssp_is_old() ? 0x080 : 0x0a0)
-#define HW_SSP_SDRESP1 (ssp_is_old() ? 0x090 : 0x0b0)
-#define HW_SSP_SDRESP2 (ssp_is_old() ? 0x0a0 : 0x0c0)
-#define HW_SSP_SDRESP3 (ssp_is_old() ? 0x0b0 : 0x0d0)
-#define HW_SSP_STATUS (ssp_is_old() ? 0x0c0 : 0x100)
+#define HW_SSP_SDRESP0(h) (ssp_is_old(h) ? 0x080 : 0x0a0)
+#define HW_SSP_SDRESP1(h) (ssp_is_old(h) ? 0x090 : 0x0b0)
+#define HW_SSP_SDRESP2(h) (ssp_is_old(h) ? 0x0a0 : 0x0c0)
+#define HW_SSP_SDRESP3(h) (ssp_is_old(h) ? 0x0b0 : 0x0d0)
+#define HW_SSP_STATUS(h) (ssp_is_old(h) ? 0x0c0 : 0x100)
#define BM_SSP_STATUS_CARD_DETECT (1 << 28)
#define BM_SSP_STATUS_SDIO_IRQ (1 << 17)
-#define HW_SSP_VERSION (cpu_is_mx23() ? 0x110 : 0x130)
-#define BP_SSP_VERSION_MAJOR (24)
#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field)
@@ -139,6 +137,11 @@
#define SSP_PIO_NUM 3
+enum mxs_mmc_id {
+ IMX23_MMC,
+ IMX28_MMC,
+};
+
struct mxs_mmc_host {
struct mmc_host *mmc;
struct mmc_request *mrq;
@@ -146,9 +149,7 @@ struct mxs_mmc_host {
struct mmc_data *data;
void __iomem *base;
- int irq;
- struct resource *res;
- struct resource *dma_res;
+ int dma_channel;
struct clk *clk;
unsigned int clk_rate;
@@ -158,32 +159,28 @@ struct mxs_mmc_host {
enum dma_transfer_direction slave_dirn;
u32 ssp_pio_words[SSP_PIO_NUM];
- unsigned int version;
+ enum mxs_mmc_id devid;
unsigned char bus_width;
spinlock_t lock;
int sdio_irq_en;
+ int wp_gpio;
};
static int mxs_mmc_get_ro(struct mmc_host *mmc)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
- struct mxs_mmc_platform_data *pdata =
- mmc_dev(host->mmc)->platform_data;
-
- if (!pdata)
- return -EFAULT;
- if (!gpio_is_valid(pdata->wp_gpio))
+ if (!gpio_is_valid(host->wp_gpio))
return -EINVAL;
- return gpio_get_value(pdata->wp_gpio);
+ return gpio_get_value(host->wp_gpio);
}
static int mxs_mmc_get_cd(struct mmc_host *mmc)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
- return !(readl(host->base + HW_SSP_STATUS) &
+ return !(readl(host->base + HW_SSP_STATUS(host)) &
BM_SSP_STATUS_CARD_DETECT);
}
@@ -191,7 +188,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
{
u32 ctrl0, ctrl1;
- mxs_reset_block(host->base);
+ stmp_reset_block(host->base);
ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
@@ -207,7 +204,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
BF_SSP(2, TIMING_CLOCK_DIVIDE) |
BF_SSP(0, TIMING_CLOCK_RATE),
- host->base + HW_SSP_TIMING);
+ host->base + HW_SSP_TIMING(host));
if (host->sdio_irq_en) {
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
@@ -215,7 +212,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
}
writel(ctrl0, host->base + HW_SSP_CTRL0);
- writel(ctrl1, host->base + HW_SSP_CTRL1);
+ writel(ctrl1, host->base + HW_SSP_CTRL1(host));
}
static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
@@ -229,12 +226,12 @@ static void mxs_mmc_request_done(struct mxs_mmc_host *host)
if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
if (mmc_resp_type(cmd) & MMC_RSP_136) {
- cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0);
- cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1);
- cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2);
- cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3);
+ cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0(host));
+ cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1(host));
+ cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2(host));
+ cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3(host));
} else {
- cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0);
+ cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0(host));
}
}
@@ -277,9 +274,9 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
spin_lock(&host->lock);
- stat = readl(host->base + HW_SSP_CTRL1);
+ stat = readl(host->base + HW_SSP_CTRL1(host));
writel(stat & MXS_MMC_IRQ_BITS,
- host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+ host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
mmc_signal_sdio_irq(host->mmc);
@@ -485,7 +482,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
blocks = 1;
/* xfer count, block size and count need to be set differently */
- if (ssp_is_old()) {
+ if (ssp_is_old(host)) {
ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
@@ -509,10 +506,10 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
/* set the timeout count */
timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
- val = readl(host->base + HW_SSP_TIMING);
+ val = readl(host->base + HW_SSP_TIMING(host));
val &= ~(BM_SSP_TIMING_TIMEOUT);
val |= BF_SSP(timeout, TIMING_TIMEOUT);
- writel(val, host->base + HW_SSP_TIMING);
+ writel(val, host->base + HW_SSP_TIMING(host));
/* pio */
host->ssp_pio_words[0] = ctrl0;
@@ -598,11 +595,11 @@ static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
- val = readl(host->base + HW_SSP_TIMING);
+ val = readl(host->base + HW_SSP_TIMING(host));
val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
- writel(val, host->base + HW_SSP_TIMING);
+ writel(val, host->base + HW_SSP_TIMING(host));
host->clk_rate = ssp_sck;
@@ -637,18 +634,19 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
if (enable) {
writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
- host->base + HW_SSP_CTRL0 + MXS_SET_ADDR);
+ host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
- host->base + HW_SSP_CTRL1 + MXS_SET_ADDR);
+ host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET);
- if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ)
+ if (readl(host->base + HW_SSP_STATUS(host)) &
+ BM_SSP_STATUS_SDIO_IRQ)
mmc_signal_sdio_irq(host->mmc);
} else {
writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
- host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR);
+ host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
- host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR);
+ host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -669,7 +667,7 @@ static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
if (!mxs_dma_is_apbh(chan))
return false;
- if (chan->chan_id != host->dma_res->start)
+ if (chan->chan_id != host->dma_channel)
return false;
chan->private = &host->dma_data;
@@ -677,11 +675,34 @@ static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
return true;
}
+static struct platform_device_id mxs_mmc_ids[] = {
+ {
+ .name = "imx23-mmc",
+ .driver_data = IMX23_MMC,
+ }, {
+ .name = "imx28-mmc",
+ .driver_data = IMX28_MMC,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxs_mmc_ids);
+
+static const struct of_device_id mxs_mmc_dt_ids[] = {
+ { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_MMC, },
+ { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_MMC, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
+
static int mxs_mmc_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(mxs_mmc_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
struct mxs_mmc_host *host;
struct mmc_host *mmc;
- struct resource *iores, *dmares, *r;
+ struct resource *iores, *dmares;
struct mxs_mmc_platform_data *pdata;
struct pinctrl *pinctrl;
int ret = 0, irq_err, irq_dma;
@@ -691,46 +712,51 @@ static int mxs_mmc_probe(struct platform_device *pdev)
dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
irq_err = platform_get_irq(pdev, 0);
irq_dma = platform_get_irq(pdev, 1);
- if (!iores || !dmares || irq_err < 0 || irq_dma < 0)
+ if (!iores || irq_err < 0 || irq_dma < 0)
return -EINVAL;
- r = request_mem_region(iores->start, resource_size(iores), pdev->name);
- if (!r)
- return -EBUSY;
-
mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto out_release_mem;
- }
+ if (!mmc)
+ return -ENOMEM;
host = mmc_priv(mmc);
- host->base = ioremap(r->start, resource_size(r));
+ host->base = devm_request_and_ioremap(&pdev->dev, iores);
if (!host->base) {
- ret = -ENOMEM;
+ ret = -EADDRNOTAVAIL;
goto out_mmc_free;
}
- /* only major verion does matter */
- host->version = readl(host->base + HW_SSP_VERSION) >>
- BP_SSP_VERSION_MAJOR;
+ if (np) {
+ host->devid = (enum mxs_mmc_id) of_id->data;
+ /*
+ * TODO: This is a temporary solution and should be changed
+ * to use generic DMA binding later when the helpers get in.
+ */
+ ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
+ &host->dma_channel);
+ if (ret) {
+ dev_err(mmc_dev(host->mmc),
+ "failed to get dma channel\n");
+ goto out_mmc_free;
+ }
+ } else {
+ host->devid = pdev->id_entry->driver_data;
+ host->dma_channel = dmares->start;
+ }
host->mmc = mmc;
- host->res = r;
- host->dma_res = dmares;
- host->irq = irq_err;
host->sdio_irq_en = 0;
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
- goto out_iounmap;
+ goto out_mmc_free;
}
host->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
- goto out_iounmap;
+ goto out_mmc_free;
}
clk_prepare_enable(host->clk);
@@ -752,11 +778,20 @@ static int mxs_mmc_probe(struct platform_device *pdev)
MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
pdata = mmc_dev(host->mmc)->platform_data;
- if (pdata) {
+ if (!pdata) {
+ u32 bus_width = 0;
+ of_property_read_u32(np, "bus-width", &bus_width);
+ if (bus_width == 4)
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+ else if (bus_width == 8)
+ mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
+ host->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
+ } else {
if (pdata->flags & SLOTF_8_BIT_CAPABLE)
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
if (pdata->flags & SLOTF_4_BIT_CAPABLE)
mmc->caps |= MMC_CAP_4_BIT_DATA;
+ host->wp_gpio = pdata->wp_gpio;
}
mmc->f_min = 400000;
@@ -765,13 +800,14 @@ static int mxs_mmc_probe(struct platform_device *pdev)
mmc->max_segs = 52;
mmc->max_blk_size = 1 << 0xf;
- mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff;
- mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff;
+ mmc->max_blk_count = (ssp_is_old(host)) ? 0xff : 0xffffff;
+ mmc->max_req_size = (ssp_is_old(host)) ? 0xffff : 0xffffffff;
mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
platform_set_drvdata(pdev, mmc);
- ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host);
+ ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
+ DRIVER_NAME, host);
if (ret)
goto out_free_dma;
@@ -779,26 +815,20 @@ static int mxs_mmc_probe(struct platform_device *pdev)
ret = mmc_add_host(mmc);
if (ret)
- goto out_free_irq;
+ goto out_free_dma;
dev_info(mmc_dev(host->mmc), "initialized\n");
return 0;
-out_free_irq:
- free_irq(host->irq, host);
out_free_dma:
if (host->dmach)
dma_release_channel(host->dmach);
out_clk_put:
clk_disable_unprepare(host->clk);
clk_put(host->clk);
-out_iounmap:
- iounmap(host->base);
out_mmc_free:
mmc_free_host(mmc);
-out_release_mem:
- release_mem_region(iores->start, resource_size(iores));
return ret;
}
@@ -806,12 +836,9 @@ static int mxs_mmc_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct mxs_mmc_host *host = mmc_priv(mmc);
- struct resource *res = host->res;
mmc_remove_host(mmc);
- free_irq(host->irq, host);
-
platform_set_drvdata(pdev, NULL);
if (host->dmach)
@@ -820,12 +847,8 @@ static int mxs_mmc_remove(struct platform_device *pdev)
clk_disable_unprepare(host->clk);
clk_put(host->clk);
- iounmap(host->base);
-
mmc_free_host(mmc);
- release_mem_region(res->start, resource_size(res));
-
return 0;
}
@@ -865,11 +888,13 @@ static const struct dev_pm_ops mxs_mmc_pm_ops = {
static struct platform_driver mxs_mmc_driver = {
.probe = mxs_mmc_probe,
.remove = mxs_mmc_remove,
+ .id_table = mxs_mmc_ids,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &mxs_mmc_pm_ops,
+ .of_match_table = mxs_mmc_dt_ids,
#endif
},
};
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 887c0e598cf..552196c764d 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -169,11 +169,11 @@ struct mmc_omap_host {
struct timer_list clk_timer;
spinlock_t clk_lock; /* for changing enabled state */
unsigned int fclk_enabled:1;
+ struct workqueue_struct *mmc_omap_wq;
struct omap_mmc_platform_data *pdata;
};
-static struct workqueue_struct *mmc_omap_wq;
static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
{
@@ -291,7 +291,7 @@ static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
host->next_slot = new_slot;
host->mmc = new_slot->mmc;
spin_unlock_irqrestore(&host->slot_lock, flags);
- queue_work(mmc_omap_wq, &host->slot_release_work);
+ queue_work(host->mmc_omap_wq, &host->slot_release_work);
return;
}
@@ -459,7 +459,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
}
host->stop_data = data;
- queue_work(mmc_omap_wq, &host->send_stop_work);
+ queue_work(host->mmc_omap_wq, &host->send_stop_work);
}
static void
@@ -639,7 +639,7 @@ mmc_omap_cmd_timer(unsigned long data)
OMAP_MMC_WRITE(host, IE, 0);
disable_irq(host->irq);
host->abort = 1;
- queue_work(mmc_omap_wq, &host->cmd_abort_work);
+ queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
}
spin_unlock_irqrestore(&host->slot_lock, flags);
}
@@ -828,7 +828,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
host->abort = 1;
OMAP_MMC_WRITE(host, IE, 0);
disable_irq_nosync(host->irq);
- queue_work(mmc_omap_wq, &host->cmd_abort_work);
+ queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
return IRQ_HANDLED;
}
@@ -1389,13 +1389,13 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
tasklet_kill(&slot->cover_tasklet);
del_timer_sync(&slot->cover_timer);
- flush_workqueue(mmc_omap_wq);
+ flush_workqueue(slot->host->mmc_omap_wq);
mmc_remove_host(mmc);
mmc_free_host(mmc);
}
-static int __init mmc_omap_probe(struct platform_device *pdev)
+static int __devinit mmc_omap_probe(struct platform_device *pdev)
{
struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
struct mmc_omap_host *host = NULL;
@@ -1497,6 +1497,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
+ host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
+ if (!host->mmc_omap_wq)
+ goto err_plat_cleanup;
+
return 0;
err_plat_cleanup:
@@ -1518,7 +1522,7 @@ err_free_mem_region:
return ret;
}
-static int mmc_omap_remove(struct platform_device *pdev)
+static int __devexit mmc_omap_remove(struct platform_device *pdev)
{
struct mmc_omap_host *host = platform_get_drvdata(pdev);
int i;
@@ -1542,6 +1546,7 @@ static int mmc_omap_remove(struct platform_device *pdev)
iounmap(host->virt_base);
release_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
+ destroy_workqueue(host->mmc_omap_wq);
kfree(host);
@@ -1599,7 +1604,8 @@ static int mmc_omap_resume(struct platform_device *pdev)
#endif
static struct platform_driver mmc_omap_driver = {
- .remove = mmc_omap_remove,
+ .probe = mmc_omap_probe,
+ .remove = __devexit_p(mmc_omap_remove),
.suspend = mmc_omap_suspend,
.resume = mmc_omap_resume,
.driver = {
@@ -1608,29 +1614,7 @@ static struct platform_driver mmc_omap_driver = {
},
};
-static int __init mmc_omap_init(void)
-{
- int ret;
-
- mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
- if (!mmc_omap_wq)
- return -ENOMEM;
-
- ret = platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
- if (ret)
- destroy_workqueue(mmc_omap_wq);
- return ret;
-}
-
-static void __exit mmc_omap_exit(void)
-{
- platform_driver_unregister(&mmc_omap_driver);
- destroy_workqueue(mmc_omap_wq);
-}
-
-module_init(mmc_omap_init);
-module_exit(mmc_omap_exit);
-
+module_platform_driver(mmc_omap_driver);
MODULE_DESCRIPTION("OMAP Multimedia Card driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 56d4499d438..9a7a60aeb19 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -85,12 +85,14 @@
#define BRR_ENABLE (1 << 5)
#define DTO_ENABLE (1 << 20)
#define INIT_STREAM (1 << 1)
+#define ACEN_ACMD12 (1 << 2)
#define DP_SELECT (1 << 21)
#define DDIR (1 << 4)
#define DMA_EN 0x1
#define MSBS (1 << 5)
#define BCE (1 << 1)
#define FOUR_BIT (1 << 1)
+#define DDR (1 << 19)
#define DW8 (1 << 5)
#define CC 0x1
#define TC 0x02
@@ -115,6 +117,7 @@
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
+#define AUTO_CMD12 (1 << 0) /* Auto CMD12 support */
/*
* One controller can have multiple slots, like on some omap boards using
* omap.c controller driver. Luckily this is not currently done on any known
@@ -167,7 +170,6 @@ struct omap_hsmmc_host {
int use_dma, dma_ch;
int dma_line_tx, dma_line_rx;
int slot_id;
- int got_dbclk;
int response_busy;
int context_loss;
int vdd;
@@ -175,6 +177,7 @@ struct omap_hsmmc_host {
int reqs_blocked;
int use_reg;
int req_in_progress;
+ unsigned int flags;
struct omap_hsmmc_next next_data;
struct omap_mmc_platform_data *pdata;
@@ -520,6 +523,10 @@ static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
u32 con;
con = OMAP_HSMMC_READ(host->base, CON);
+ if (ios->timing == MMC_TIMING_UHS_DDR50)
+ con |= DDR; /* configure in DDR mode */
+ else
+ con &= ~DDR;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_8:
OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
@@ -766,6 +773,8 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
cmdtype = 0x3;
cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
+ if ((host->flags & AUTO_CMD12) && mmc_op_multi(cmd->opcode))
+ cmdreg |= ACEN_ACMD12;
if (data) {
cmdreg |= DP_SELECT | MSBS | BCE;
@@ -796,11 +805,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
{
int dma_ch;
+ unsigned long flags;
- spin_lock(&host->irq_lock);
+ spin_lock_irqsave(&host->irq_lock, flags);
host->req_in_progress = 0;
dma_ch = host->dma_ch;
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
omap_hsmmc_disable_irq(host);
/* Do not complete the request if DMA is still in progress */
@@ -837,11 +847,14 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
else
data->bytes_xfered = 0;
- if (!data->stop) {
+ if (data->stop && ((!(host->flags & AUTO_CMD12)) || data->error)) {
+ omap_hsmmc_start_command(host, data->stop, NULL);
+ } else {
+ if (data->stop)
+ data->stop->resp[0] = OMAP_HSMMC_READ(host->base,
+ RSP76);
omap_hsmmc_request_done(host, data->mrq);
- return;
}
- omap_hsmmc_start_command(host, data->stop, NULL);
}
/*
@@ -874,13 +887,14 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
{
int dma_ch;
+ unsigned long flags;
host->data->error = errno;
- spin_lock(&host->irq_lock);
+ spin_lock_irqsave(&host->irq_lock, flags);
dma_ch = host->dma_ch;
host->dma_ch = -1;
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
if (host->use_dma && dma_ch != -1) {
dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
@@ -1082,7 +1096,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
/* Disable the clocks */
pm_runtime_put_sync(host->dev);
- if (host->got_dbclk)
+ if (host->dbclk)
clk_disable(host->dbclk);
/* Turn the power off */
@@ -1093,7 +1107,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
vdd);
pm_runtime_get_sync(host->dev);
- if (host->got_dbclk)
+ if (host->dbclk)
clk_enable(host->dbclk);
if (ret != 0)
@@ -1234,6 +1248,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
struct omap_hsmmc_host *host = cb_data;
struct mmc_data *data;
int dma_ch, req_in_progress;
+ unsigned long flags;
if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
@@ -1241,9 +1256,9 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
return;
}
- spin_lock(&host->irq_lock);
+ spin_lock_irqsave(&host->irq_lock, flags);
if (host->dma_ch < 0) {
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
return;
}
@@ -1253,7 +1268,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
/* Fire up the next transfer. */
omap_hsmmc_config_dma_params(host, data,
data->sg + host->dma_sg_idx);
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
return;
}
@@ -1264,7 +1279,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
req_in_progress = host->req_in_progress;
dma_ch = host->dma_ch;
host->dma_ch = -1;
- spin_unlock(&host->irq_lock);
+ spin_unlock_irqrestore(&host->irq_lock, flags);
omap_free_dma(dma_ch);
@@ -1766,7 +1781,7 @@ static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
pdata->slots[0].nonremovable = true;
pdata->slots[0].no_regulator_off_init = true;
}
- of_property_read_u32(np, "ti,bus-width", &bus_width);
+ of_property_read_u32(np, "bus-width", &bus_width);
if (bus_width == 4)
pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA;
else if (bus_width == 8)
@@ -1844,6 +1859,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
host->mapbase = res->start + pdata->reg_offset;
host->base = ioremap(host->mapbase, SZ_4K);
host->power_mode = MMC_POWER_OFF;
+ host->flags = AUTO_CMD12;
host->next_data.cookie = 1;
platform_set_drvdata(pdev, host);
@@ -1885,21 +1901,17 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_context_save(host);
- if (cpu_is_omap2430()) {
- host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
- /*
- * MMC can still work without debounce clock.
- */
- if (IS_ERR(host->dbclk))
- dev_warn(mmc_dev(host->mmc),
- "Failed to get debounce clock\n");
- else
- host->got_dbclk = 1;
-
- if (host->got_dbclk)
- if (clk_enable(host->dbclk) != 0)
- dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
- " clk failed\n");
+ host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
+ /*
+ * MMC can still work without debounce clock.
+ */
+ if (IS_ERR(host->dbclk)) {
+ dev_warn(mmc_dev(host->mmc), "Failed to get debounce clk\n");
+ host->dbclk = NULL;
+ } else if (clk_enable(host->dbclk) != 0) {
+ dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
+ clk_put(host->dbclk);
+ host->dbclk = NULL;
}
/* Since we do only SG emulation, we can have as many segs
@@ -1969,7 +1981,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
ret = request_threaded_irq(mmc_slot(host).card_detect_irq,
NULL,
omap_hsmmc_detect,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
mmc_hostname(mmc), host);
if (ret) {
dev_dbg(mmc_dev(host->mmc),
@@ -2019,7 +2031,7 @@ err_irq:
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
clk_put(host->fclk);
- if (host->got_dbclk) {
+ if (host->dbclk) {
clk_disable(host->dbclk);
clk_put(host->dbclk);
}
@@ -2030,7 +2042,9 @@ err1:
err_alloc:
omap_hsmmc_gpio_free(pdata);
err:
- release_mem_region(res->start, resource_size(res));
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
return ret;
}
@@ -2052,7 +2066,7 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
clk_put(host->fclk);
- if (host->got_dbclk) {
+ if (host->dbclk) {
clk_disable(host->dbclk);
clk_put(host->dbclk);
}
@@ -2110,7 +2124,7 @@ static int omap_hsmmc_suspend(struct device *dev)
OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
}
- if (host->got_dbclk)
+ if (host->dbclk)
clk_disable(host->dbclk);
err:
pm_runtime_put_sync(host->dev);
@@ -2131,7 +2145,7 @@ static int omap_hsmmc_resume(struct device *dev)
pm_runtime_get_sync(host->dev);
- if (host->got_dbclk)
+ if (host->dbclk)
clk_enable(host->dbclk);
if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d190d04636a..ebbe984e5d0 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -71,6 +71,9 @@ struct pltfm_imx_data {
enum imx_esdhc_type devtype;
struct pinctrl *pinctrl;
struct esdhc_platform_data boarddata;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ struct clk *clk_per;
};
static struct platform_device_id imx_esdhc_devtype[] = {
@@ -404,7 +407,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
if (!np)
return -ENODEV;
- if (of_get_property(np, "fsl,card-wired", NULL))
+ if (of_get_property(np, "non-removable", NULL))
boarddata->cd_type = ESDHC_CD_PERMANENT;
if (of_get_property(np, "fsl,cd-controller", NULL))
@@ -439,7 +442,6 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
struct esdhc_platform_data *boarddata;
- struct clk *clk;
int err;
struct pltfm_imx_data *imx_data;
@@ -460,14 +462,29 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
imx_data->devtype = pdev->id_entry->driver_data;
pltfm_host->priv = imx_data;
- clk = clk_get(mmc_dev(host->mmc), NULL);
- if (IS_ERR(clk)) {
- dev_err(mmc_dev(host->mmc), "clk err\n");
- err = PTR_ERR(clk);
+ imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(imx_data->clk_ipg)) {
+ err = PTR_ERR(imx_data->clk_ipg);
goto err_clk_get;
}
- clk_prepare_enable(clk);
- pltfm_host->clk = clk;
+
+ imx_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(imx_data->clk_ahb)) {
+ err = PTR_ERR(imx_data->clk_ahb);
+ goto err_clk_get;
+ }
+
+ imx_data->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(imx_data->clk_per)) {
+ err = PTR_ERR(imx_data->clk_per);
+ goto err_clk_get;
+ }
+
+ pltfm_host->clk = imx_data->clk_per;
+
+ clk_prepare_enable(imx_data->clk_per);
+ clk_prepare_enable(imx_data->clk_ipg);
+ clk_prepare_enable(imx_data->clk_ahb);
imx_data->pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(imx_data->pinctrl)) {
@@ -567,8 +584,9 @@ no_card_detect_irq:
no_card_detect_pin:
no_board_data:
pin_err:
- clk_disable_unprepare(pltfm_host->clk);
- clk_put(pltfm_host->clk);
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ clk_disable_unprepare(imx_data->clk_ahb);
err_clk_get:
kfree(imx_data);
err_imx_data:
@@ -594,8 +612,10 @@ static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev)
gpio_free(boarddata->cd_gpio);
}
- clk_disable_unprepare(pltfm_host->clk);
- clk_put(pltfm_host->clk);
+ clk_disable_unprepare(imx_data->clk_per);
+ clk_disable_unprepare(imx_data->clk_ipg);
+ clk_disable_unprepare(imx_data->clk_ahb);
+
kfree(imx_data);
sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index c5c2a48bdd9..d9a4ef4f1ed 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -42,7 +42,8 @@ static struct sdhci_ops sdhci_pltfm_ops = {
#ifdef CONFIG_OF
static bool sdhci_of_wp_inverted(struct device_node *np)
{
- if (of_get_property(np, "sdhci,wp-inverted", NULL))
+ if (of_get_property(np, "sdhci,wp-inverted", NULL) ||
+ of_get_property(np, "wp-inverted", NULL))
return true;
/* Old device trees don't have the wp-inverted property. */
@@ -59,13 +60,16 @@ void sdhci_get_of_property(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
const __be32 *clk;
+ u32 bus_width;
int size;
if (of_device_is_available(np)) {
if (of_get_property(np, "sdhci,auto-cmd12", NULL))
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
- if (of_get_property(np, "sdhci,1-bit-only", NULL))
+ if (of_get_property(np, "sdhci,1-bit-only", NULL) ||
+ (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
+ bus_width == 1))
host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
if (sdhci_of_wp_inverted(np))
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 6dfa82e03c7..1fe32dfa7cd 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -75,8 +75,6 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
struct spear_sdhci *sdhci;
int ret;
- BUG_ON(pdev == NULL);
-
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
ret = -ENOMEM;
@@ -84,18 +82,18 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
goto err;
}
- if (!request_mem_region(iomem->start, resource_size(iomem),
- "spear-sdhci")) {
+ if (!devm_request_mem_region(&pdev->dev, iomem->start,
+ resource_size(iomem), "spear-sdhci")) {
ret = -EBUSY;
dev_dbg(&pdev->dev, "cannot request region\n");
goto err;
}
- sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL);
+ sdhci = devm_kzalloc(&pdev->dev, sizeof(*sdhci), GFP_KERNEL);
if (!sdhci) {
ret = -ENOMEM;
dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
- goto err_kzalloc;
+ goto err;
}
/* clk enable */
@@ -103,13 +101,13 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
if (IS_ERR(sdhci->clk)) {
ret = PTR_ERR(sdhci->clk);
dev_dbg(&pdev->dev, "Error getting clock\n");
- goto err_clk_get;
+ goto err;
}
ret = clk_enable(sdhci->clk);
if (ret) {
dev_dbg(&pdev->dev, "Error enabling clock\n");
- goto err_clk_enb;
+ goto put_clk;
}
/* overwrite platform_data */
@@ -124,7 +122,7 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
if (IS_ERR(host)) {
ret = PTR_ERR(host);
dev_dbg(&pdev->dev, "error allocating host\n");
- goto err_alloc_host;
+ goto disable_clk;
}
host->hw_name = "sdhci";
@@ -132,17 +130,18 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
- host->ioaddr = ioremap(iomem->start, resource_size(iomem));
+ host->ioaddr = devm_ioremap(&pdev->dev, iomem->start,
+ resource_size(iomem));
if (!host->ioaddr) {
ret = -ENOMEM;
dev_dbg(&pdev->dev, "failed to remap registers\n");
- goto err_ioremap;
+ goto free_host;
}
ret = sdhci_add_host(host);
if (ret) {
dev_dbg(&pdev->dev, "error adding host\n");
- goto err_add_host;
+ goto free_host;
}
platform_set_drvdata(pdev, host);
@@ -161,11 +160,12 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
if (sdhci->data->card_power_gpio >= 0) {
int val = 0;
- ret = gpio_request(sdhci->data->card_power_gpio, "sdhci");
+ ret = devm_gpio_request(&pdev->dev,
+ sdhci->data->card_power_gpio, "sdhci");
if (ret < 0) {
dev_dbg(&pdev->dev, "gpio request fail: %d\n",
sdhci->data->card_power_gpio);
- goto err_pgpio_request;
+ goto set_drvdata;
}
if (sdhci->data->power_always_enb)
@@ -177,60 +177,48 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
if (ret) {
dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
sdhci->data->card_power_gpio);
- goto err_pgpio_direction;
+ goto set_drvdata;
}
}
if (sdhci->data->card_int_gpio >= 0) {
- ret = gpio_request(sdhci->data->card_int_gpio, "sdhci");
+ ret = devm_gpio_request(&pdev->dev, sdhci->data->card_int_gpio,
+ "sdhci");
if (ret < 0) {
dev_dbg(&pdev->dev, "gpio request fail: %d\n",
sdhci->data->card_int_gpio);
- goto err_igpio_request;
+ goto set_drvdata;
}
ret = gpio_direction_input(sdhci->data->card_int_gpio);
if (ret) {
dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
sdhci->data->card_int_gpio);
- goto err_igpio_direction;
+ goto set_drvdata;
}
- ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio),
+ ret = devm_request_irq(&pdev->dev,
+ gpio_to_irq(sdhci->data->card_int_gpio),
sdhci_gpio_irq, IRQF_TRIGGER_LOW,
mmc_hostname(host->mmc), pdev);
if (ret) {
dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
sdhci->data->card_int_gpio);
- goto err_igpio_request_irq;
+ goto set_drvdata;
}
}
return 0;
-err_igpio_request_irq:
-err_igpio_direction:
- if (sdhci->data->card_int_gpio >= 0)
- gpio_free(sdhci->data->card_int_gpio);
-err_igpio_request:
-err_pgpio_direction:
- if (sdhci->data->card_power_gpio >= 0)
- gpio_free(sdhci->data->card_power_gpio);
-err_pgpio_request:
+set_drvdata:
platform_set_drvdata(pdev, NULL);
sdhci_remove_host(host, 1);
-err_add_host:
- iounmap(host->ioaddr);
-err_ioremap:
+free_host:
sdhci_free_host(host);
-err_alloc_host:
+disable_clk:
clk_disable(sdhci->clk);
-err_clk_enb:
+put_clk:
clk_put(sdhci->clk);
-err_clk_get:
- kfree(sdhci);
-err_kzalloc:
- release_mem_region(iomem->start, resource_size(iomem));
err:
dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
return ret;
@@ -239,35 +227,19 @@ err:
static int __devexit sdhci_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
- struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
- int dead;
+ int dead = 0;
u32 scratch;
- if (sdhci->data) {
- if (sdhci->data->card_int_gpio >= 0) {
- free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
- gpio_free(sdhci->data->card_int_gpio);
- }
-
- if (sdhci->data->card_power_gpio >= 0)
- gpio_free(sdhci->data->card_power_gpio);
- }
-
platform_set_drvdata(pdev, NULL);
- dead = 0;
scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
if (scratch == (u32)-1)
dead = 1;
sdhci_remove_host(host, dead);
- iounmap(host->ioaddr);
sdhci_free_host(host);
clk_disable(sdhci->clk);
clk_put(sdhci->clk);
- kfree(sdhci);
- if (iomem)
- release_mem_region(iomem->start, resource_size(iomem));
return 0;
}
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index ff5a1699193..b38d8a78f6a 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -32,8 +32,13 @@
#include "sdhci-pltfm.h"
+/* Tegra SDHOST controller vendor register definitions */
+#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
+#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
+
#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
+#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
struct sdhci_tegra_soc_data {
struct sdhci_pltfm_data *pdata;
@@ -120,6 +125,25 @@ static irqreturn_t carddetect_irq(int irq, void *data)
return IRQ_HANDLED;
};
+static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_tegra *tegra_host = pltfm_host->priv;
+ const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
+
+ if (!(mask & SDHCI_RESET_ALL))
+ return;
+
+ /* Erratum: Enable SDHCI spec v3.00 support */
+ if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) {
+ u32 misc_ctrl;
+
+ misc_ctrl = sdhci_readb(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+ misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
+ sdhci_writeb(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
+ }
+}
+
static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -148,6 +172,7 @@ static struct sdhci_ops tegra_sdhci_ops = {
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.platform_8bit_width = tegra_sdhci_8bit,
+ .platform_reset_exit = tegra_sdhci_reset_exit,
};
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
@@ -178,6 +203,7 @@ static struct sdhci_pltfm_data sdhci_tegra30_pdata = {
static struct sdhci_tegra_soc_data soc_data_tegra30 = {
.pdata = &sdhci_tegra30_pdata,
+ .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300,
};
#endif
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index ccefdebeff1..e626732aff7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -680,8 +680,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
}
if (count >= 0xF) {
- pr_warning("%s: Too large timeout requested for CMD%d!\n",
- mmc_hostname(host->mmc), cmd->opcode);
+ pr_warning("%s: Too large timeout 0x%x requested for CMD%d!\n",
+ mmc_hostname(host->mmc), count, cmd->opcode);
count = 0xE;
}
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index b5401e35574..c03456f1700 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -19,9 +19,9 @@
#include <linux/mtd/cfi.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
+#include <linux/of.h>
#include <lantiq_soc.h>
-#include <lantiq_platform.h>
/*
* The NOR flash is connected to the same external bus unit (EBU) as PCI.
@@ -44,8 +44,9 @@ struct ltq_mtd {
struct map_info *map;
};
-static char ltq_map_name[] = "ltq_nor";
-static const char *ltq_probe_types[] __devinitconst = { "cmdlinepart", NULL };
+static const char ltq_map_name[] = "ltq_nor";
+static const char *ltq_probe_types[] __devinitconst = {
+ "cmdlinepart", "ofpart", NULL };
static map_word
ltq_read16(struct map_info *map, unsigned long adr)
@@ -108,42 +109,38 @@ ltq_copy_to(struct map_info *map, unsigned long to,
spin_unlock_irqrestore(&ebu_lock, flags);
}
-static int __init
+static int __devinit
ltq_mtd_probe(struct platform_device *pdev)
{
- struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev);
+ struct mtd_part_parser_data ppdata;
struct ltq_mtd *ltq_mtd;
- struct resource *res;
struct cfi_private *cfi;
int err;
+ if (of_machine_is_compatible("lantiq,falcon") &&
+ (ltq_boot_select() != BS_FLASH)) {
+ dev_err(&pdev->dev, "invalid bootstrap options\n");
+ return -ENODEV;
+ }
+
ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
platform_set_drvdata(pdev, ltq_mtd);
ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!ltq_mtd->res) {
- dev_err(&pdev->dev, "failed to get memory resource");
+ dev_err(&pdev->dev, "failed to get memory resource\n");
err = -ENOENT;
goto err_out;
}
- res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start,
- resource_size(ltq_mtd->res), dev_name(&pdev->dev));
- if (!ltq_mtd->res) {
- dev_err(&pdev->dev, "failed to request mem resource");
- err = -EBUSY;
- goto err_out;
- }
-
ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
- ltq_mtd->map->phys = res->start;
- ltq_mtd->map->size = resource_size(res);
- ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev,
- ltq_mtd->map->phys, ltq_mtd->map->size);
+ ltq_mtd->map->phys = ltq_mtd->res->start;
+ ltq_mtd->map->size = resource_size(ltq_mtd->res);
+ ltq_mtd->map->virt = devm_request_and_ioremap(&pdev->dev, ltq_mtd->res);
if (!ltq_mtd->map->virt) {
- dev_err(&pdev->dev, "failed to ioremap!\n");
- err = -ENOMEM;
- goto err_free;
+ dev_err(&pdev->dev, "failed to remap mem resource\n");
+ err = -EBUSY;
+ goto err_out;
}
ltq_mtd->map->name = ltq_map_name;
@@ -169,9 +166,9 @@ ltq_mtd_probe(struct platform_device *pdev)
cfi->addr_unlock1 ^= 1;
cfi->addr_unlock2 ^= 1;
- err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, NULL,
- ltq_mtd_data->parts,
- ltq_mtd_data->nr_parts);
+ ppdata.of_node = pdev->dev.of_node;
+ err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types,
+ &ppdata, NULL, 0);
if (err) {
dev_err(&pdev->dev, "failed to add partitions\n");
goto err_destroy;
@@ -204,32 +201,23 @@ ltq_mtd_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ltq_mtd_match[] = {
+ { .compatible = "lantiq,nor" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_mtd_match);
+
static struct platform_driver ltq_mtd_driver = {
+ .probe = ltq_mtd_probe,
.remove = __devexit_p(ltq_mtd_remove),
.driver = {
- .name = "ltq_nor",
+ .name = "ltq-nor",
.owner = THIS_MODULE,
+ .of_match_table = ltq_mtd_match,
},
};
-static int __init
-init_ltq_mtd(void)
-{
- int ret = platform_driver_probe(&ltq_mtd_driver, ltq_mtd_probe);
-
- if (ret)
- pr_err("ltq_nor: error registering platform driver");
- return ret;
-}
-
-static void __exit
-exit_ltq_mtd(void)
-{
- platform_driver_unregister(&ltq_mtd_driver);
-}
-
-module_init(init_ltq_mtd);
-module_exit(exit_ltq_mtd);
+module_platform_driver(ltq_mtd_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index cc0678a967c..9e374e9bd29 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -690,7 +690,7 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
if (chip == -1) {
/* Disable the NFC clock */
if (host->clk_act) {
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
host->clk_act = 0;
}
return;
@@ -698,7 +698,7 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
if (!host->clk_act) {
/* Enable the NFC clock */
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
host->clk_act = 1;
}
@@ -1078,7 +1078,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
goto eclk;
}
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
host->clk_act = 1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 0f50ef38b87..513dc88a05c 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -17,6 +17,8 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <asm/io.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
@@ -79,6 +81,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
struct nand_chip *nc;
struct orion_nand_data *board;
struct resource *res;
+ struct clk *clk;
void __iomem *io_base;
int ret = 0;
u32 val = 0;
@@ -155,6 +158,14 @@ static int __init orion_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mtd);
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ }
+
if (nand_scan(mtd, 1)) {
ret = -ENXIO;
goto no_dev;
@@ -184,6 +195,7 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
{
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct nand_chip *nc = mtd->priv;
+ struct clk *clk;
nand_release(mtd);
@@ -191,6 +203,12 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
kfree(nc);
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ }
+
return 0;
}
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index ec03b401620..9c755db6b16 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1131,7 +1131,6 @@ static irqreturn_t
e100rxtx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
- struct net_local *np = netdev_priv(dev);
unsigned long irqbits;
/*
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 8f2cf8c09e2..ff7f4c5115a 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -207,7 +207,8 @@ struct fec_enet_private {
struct net_device *netdev;
- struct clk *clk;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
unsigned char *tx_bounce[TX_RING_SIZE];
@@ -1065,7 +1066,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* Reference Manual has an error on this, and gets fixed on i.MX6Q
* document.
*/
- fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000);
+ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
fep->phy_speed--;
fep->phy_speed <<= 1;
@@ -1618,12 +1619,20 @@ fec_probe(struct platform_device *pdev)
goto failed_pin;
}
- fep->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(fep->clk)) {
- ret = PTR_ERR(fep->clk);
+ fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fep->clk_ipg)) {
+ ret = PTR_ERR(fep->clk_ipg);
goto failed_clk;
}
- clk_prepare_enable(fep->clk);
+
+ fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(fep->clk_ahb)) {
+ ret = PTR_ERR(fep->clk_ahb);
+ goto failed_clk;
+ }
+
+ clk_prepare_enable(fep->clk_ahb);
+ clk_prepare_enable(fep->clk_ipg);
ret = fec_enet_init(ndev);
if (ret)
@@ -1646,8 +1655,8 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_init:
- clk_disable_unprepare(fep->clk);
- clk_put(fep->clk);
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
failed_pin:
failed_clk:
for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1680,8 +1689,8 @@ fec_drv_remove(struct platform_device *pdev)
if (irq > 0)
free_irq(irq, ndev);
}
- clk_disable_unprepare(fep->clk);
- clk_put(fep->clk);
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
iounmap(fep->hwp);
free_netdev(ndev);
@@ -1705,7 +1714,8 @@ fec_suspend(struct device *dev)
fec_stop(ndev);
netif_device_detach(ndev);
}
- clk_disable_unprepare(fep->clk);
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
return 0;
}
@@ -1716,7 +1726,8 @@ fec_resume(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
- clk_prepare_enable(fep->clk);
+ clk_prepare_enable(fep->clk_ahb);
+ clk_prepare_enable(fep->clk_ipg);
if (netif_running(ndev)) {
fec_restart(ndev, fep->full_duplex);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index c8950da60e6..04d901d0ff6 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -57,6 +57,7 @@
#include <linux/types.h>
#include <linux/inet_lro.h>
#include <linux/slab.h>
+#include <linux/clk.h>
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.4";
@@ -289,10 +290,10 @@ struct mv643xx_eth_shared_private {
/*
* Hardware-specific parameters.
*/
- unsigned int t_clk;
int extended_rx_coal_limit;
int tx_bw_control;
int tx_csum_limit;
+
};
#define TX_BW_CONTROL_ABSENT 0
@@ -431,6 +432,12 @@ struct mv643xx_eth_private {
int tx_desc_sram_size;
int txq_count;
struct tx_queue txq[8];
+
+ /*
+ * Hardware-specific parameters.
+ */
+ struct clk *clk;
+ unsigned int t_clk;
};
@@ -1010,7 +1017,7 @@ static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
int mtu;
int bucket_size;
- token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
+ token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
if (token_rate > 1023)
token_rate = 1023;
@@ -1042,7 +1049,7 @@ static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
int token_rate;
int bucket_size;
- token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000);
+ token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
if (token_rate > 1023)
token_rate = 1023;
@@ -1309,7 +1316,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
temp = (val & 0x003fff00) >> 8;
temp *= 64000000;
- do_div(temp, mp->shared->t_clk);
+ do_div(temp, mp->t_clk);
return (unsigned int)temp;
}
@@ -1319,7 +1326,7 @@ static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
u64 temp;
u32 val;
- temp = (u64)usec * mp->shared->t_clk;
+ temp = (u64)usec * mp->t_clk;
temp += 31999999;
do_div(temp, 64000000);
@@ -1345,7 +1352,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
temp *= 64000000;
- do_div(temp, mp->shared->t_clk);
+ do_div(temp, mp->t_clk);
return (unsigned int)temp;
}
@@ -1354,7 +1361,7 @@ static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
{
u64 temp;
- temp = (u64)usec * mp->shared->t_clk;
+ temp = (u64)usec * mp->t_clk;
temp += 31999999;
do_div(temp, 64000000);
@@ -2663,10 +2670,6 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
if (dram)
mv643xx_eth_conf_mbus_windows(msp, dram);
- /*
- * Detect hardware parameters.
- */
- msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
pd->tx_csum_limit : 9 * 1024;
infer_hw_params(msp);
@@ -2891,6 +2894,18 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mp->dev = dev;
+ /*
+ * Get the clk rate, if there is one, otherwise use the default.
+ */
+ mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
+ if (!IS_ERR(mp->clk)) {
+ clk_prepare_enable(mp->clk);
+ mp->t_clk = clk_get_rate(mp->clk);
+ } else {
+ mp->t_clk = 133000000;
+ printk(KERN_WARNING "Unable to get clock");
+ }
+
set_params(mp, pd);
netif_set_real_num_tx_queues(dev, mp->txq_count);
netif_set_real_num_rx_queues(dev, mp->rxq_count);
@@ -2979,6 +2994,11 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
if (mp->phy != NULL)
phy_detach(mp->phy);
cancel_work_sync(&mp->tx_timeout_task);
+
+ if (!IS_ERR(mp->clk)) {
+ clk_disable_unprepare(mp->clk);
+ clk_put(mp->clk);
+ }
free_netdev(mp->dev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index b42252c4bec..1b173a6145d 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -51,7 +51,7 @@ config TI_DAVINCI_CPDMA
config TI_CPSW
tristate "TI CPSW Switch Support"
- depends on ARM && (ARCH_DAVINCI || SOC_OMAPAM33XX)
+ depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
---help---
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index f37fbeb66a4..1e173f35767 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -90,8 +90,22 @@ struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
if (!dev)
return NULL;
- return to_i2c_client(dev);
+ return i2c_verify_client(dev);
}
EXPORT_SYMBOL(of_find_i2c_device_by_node);
+/* must call put_device() when done with returned i2c_adapter device */
+struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&i2c_bus_type, NULL, node,
+ of_dev_node_match);
+ if (!dev)
+ return NULL;
+
+ return i2c_verify_adapter(dev);
+}
+EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index 93125163dea..67705381321 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -15,7 +15,7 @@
* PCI tree until an device-node is found, at which point it will finish
* resolving using the OF tree walking.
*/
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
{
struct device_node *dn, *ppnode;
struct pci_dev *ppdev;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 8f169002dc7..447e83472c0 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2370,7 +2370,7 @@ void pci_enable_acs(struct pci_dev *dev)
* number is always 0 (see the Implementation Note in section 2.2.8.1 of
* the PCI Express Base Specification, Revision 2.1)
*/
-u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
{
int slot;
diff --git a/drivers/pinctrl/spear/Kconfig b/drivers/pinctrl/spear/Kconfig
index 6a2596b4f35..91558791e76 100644
--- a/drivers/pinctrl/spear/Kconfig
+++ b/drivers/pinctrl/spear/Kconfig
@@ -31,4 +31,14 @@ config PINCTRL_SPEAR320
depends on MACH_SPEAR320
select PINCTRL_SPEAR3XX
+config PINCTRL_SPEAR1310
+ bool "ST Microelectronics SPEAr1310 SoC pin controller driver"
+ depends on MACH_SPEAR1310
+ select PINCTRL_SPEAR
+
+config PINCTRL_SPEAR1340
+ bool "ST Microelectronics SPEAr1340 SoC pin controller driver"
+ depends on MACH_SPEAR1340
+ select PINCTRL_SPEAR
+
endif
diff --git a/drivers/pinctrl/spear/Makefile b/drivers/pinctrl/spear/Makefile
index 15dcb85da22..b28a7ba2244 100644
--- a/drivers/pinctrl/spear/Makefile
+++ b/drivers/pinctrl/spear/Makefile
@@ -5,3 +5,5 @@ obj-$(CONFIG_PINCTRL_SPEAR3XX) += pinctrl-spear3xx.o
obj-$(CONFIG_PINCTRL_SPEAR300) += pinctrl-spear300.o
obj-$(CONFIG_PINCTRL_SPEAR310) += pinctrl-spear310.o
obj-$(CONFIG_PINCTRL_SPEAR320) += pinctrl-spear320.o
+obj-$(CONFIG_PINCTRL_SPEAR1310) += pinctrl-spear1310.o
+obj-$(CONFIG_PINCTRL_SPEAR1340) += pinctrl-spear1340.o
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index 47a6b5b72f9..9155783bb47 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -139,4 +139,255 @@ void __devinit pmx_init_addr(struct spear_pinctrl_machdata *machdata, u16 reg);
int __devinit spear_pinctrl_probe(struct platform_device *pdev,
struct spear_pinctrl_machdata *machdata);
int __devexit spear_pinctrl_remove(struct platform_device *pdev);
+
+#define SPEAR_PIN_0_TO_101 \
+ PINCTRL_PIN(0, "PLGPIO0"), \
+ PINCTRL_PIN(1, "PLGPIO1"), \
+ PINCTRL_PIN(2, "PLGPIO2"), \
+ PINCTRL_PIN(3, "PLGPIO3"), \
+ PINCTRL_PIN(4, "PLGPIO4"), \
+ PINCTRL_PIN(5, "PLGPIO5"), \
+ PINCTRL_PIN(6, "PLGPIO6"), \
+ PINCTRL_PIN(7, "PLGPIO7"), \
+ PINCTRL_PIN(8, "PLGPIO8"), \
+ PINCTRL_PIN(9, "PLGPIO9"), \
+ PINCTRL_PIN(10, "PLGPIO10"), \
+ PINCTRL_PIN(11, "PLGPIO11"), \
+ PINCTRL_PIN(12, "PLGPIO12"), \
+ PINCTRL_PIN(13, "PLGPIO13"), \
+ PINCTRL_PIN(14, "PLGPIO14"), \
+ PINCTRL_PIN(15, "PLGPIO15"), \
+ PINCTRL_PIN(16, "PLGPIO16"), \
+ PINCTRL_PIN(17, "PLGPIO17"), \
+ PINCTRL_PIN(18, "PLGPIO18"), \
+ PINCTRL_PIN(19, "PLGPIO19"), \
+ PINCTRL_PIN(20, "PLGPIO20"), \
+ PINCTRL_PIN(21, "PLGPIO21"), \
+ PINCTRL_PIN(22, "PLGPIO22"), \
+ PINCTRL_PIN(23, "PLGPIO23"), \
+ PINCTRL_PIN(24, "PLGPIO24"), \
+ PINCTRL_PIN(25, "PLGPIO25"), \
+ PINCTRL_PIN(26, "PLGPIO26"), \
+ PINCTRL_PIN(27, "PLGPIO27"), \
+ PINCTRL_PIN(28, "PLGPIO28"), \
+ PINCTRL_PIN(29, "PLGPIO29"), \
+ PINCTRL_PIN(30, "PLGPIO30"), \
+ PINCTRL_PIN(31, "PLGPIO31"), \
+ PINCTRL_PIN(32, "PLGPIO32"), \
+ PINCTRL_PIN(33, "PLGPIO33"), \
+ PINCTRL_PIN(34, "PLGPIO34"), \
+ PINCTRL_PIN(35, "PLGPIO35"), \
+ PINCTRL_PIN(36, "PLGPIO36"), \
+ PINCTRL_PIN(37, "PLGPIO37"), \
+ PINCTRL_PIN(38, "PLGPIO38"), \
+ PINCTRL_PIN(39, "PLGPIO39"), \
+ PINCTRL_PIN(40, "PLGPIO40"), \
+ PINCTRL_PIN(41, "PLGPIO41"), \
+ PINCTRL_PIN(42, "PLGPIO42"), \
+ PINCTRL_PIN(43, "PLGPIO43"), \
+ PINCTRL_PIN(44, "PLGPIO44"), \
+ PINCTRL_PIN(45, "PLGPIO45"), \
+ PINCTRL_PIN(46, "PLGPIO46"), \
+ PINCTRL_PIN(47, "PLGPIO47"), \
+ PINCTRL_PIN(48, "PLGPIO48"), \
+ PINCTRL_PIN(49, "PLGPIO49"), \
+ PINCTRL_PIN(50, "PLGPIO50"), \
+ PINCTRL_PIN(51, "PLGPIO51"), \
+ PINCTRL_PIN(52, "PLGPIO52"), \
+ PINCTRL_PIN(53, "PLGPIO53"), \
+ PINCTRL_PIN(54, "PLGPIO54"), \
+ PINCTRL_PIN(55, "PLGPIO55"), \
+ PINCTRL_PIN(56, "PLGPIO56"), \
+ PINCTRL_PIN(57, "PLGPIO57"), \
+ PINCTRL_PIN(58, "PLGPIO58"), \
+ PINCTRL_PIN(59, "PLGPIO59"), \
+ PINCTRL_PIN(60, "PLGPIO60"), \
+ PINCTRL_PIN(61, "PLGPIO61"), \
+ PINCTRL_PIN(62, "PLGPIO62"), \
+ PINCTRL_PIN(63, "PLGPIO63"), \
+ PINCTRL_PIN(64, "PLGPIO64"), \
+ PINCTRL_PIN(65, "PLGPIO65"), \
+ PINCTRL_PIN(66, "PLGPIO66"), \
+ PINCTRL_PIN(67, "PLGPIO67"), \
+ PINCTRL_PIN(68, "PLGPIO68"), \
+ PINCTRL_PIN(69, "PLGPIO69"), \
+ PINCTRL_PIN(70, "PLGPIO70"), \
+ PINCTRL_PIN(71, "PLGPIO71"), \
+ PINCTRL_PIN(72, "PLGPIO72"), \
+ PINCTRL_PIN(73, "PLGPIO73"), \
+ PINCTRL_PIN(74, "PLGPIO74"), \
+ PINCTRL_PIN(75, "PLGPIO75"), \
+ PINCTRL_PIN(76, "PLGPIO76"), \
+ PINCTRL_PIN(77, "PLGPIO77"), \
+ PINCTRL_PIN(78, "PLGPIO78"), \
+ PINCTRL_PIN(79, "PLGPIO79"), \
+ PINCTRL_PIN(80, "PLGPIO80"), \
+ PINCTRL_PIN(81, "PLGPIO81"), \
+ PINCTRL_PIN(82, "PLGPIO82"), \
+ PINCTRL_PIN(83, "PLGPIO83"), \
+ PINCTRL_PIN(84, "PLGPIO84"), \
+ PINCTRL_PIN(85, "PLGPIO85"), \
+ PINCTRL_PIN(86, "PLGPIO86"), \
+ PINCTRL_PIN(87, "PLGPIO87"), \
+ PINCTRL_PIN(88, "PLGPIO88"), \
+ PINCTRL_PIN(89, "PLGPIO89"), \
+ PINCTRL_PIN(90, "PLGPIO90"), \
+ PINCTRL_PIN(91, "PLGPIO91"), \
+ PINCTRL_PIN(92, "PLGPIO92"), \
+ PINCTRL_PIN(93, "PLGPIO93"), \
+ PINCTRL_PIN(94, "PLGPIO94"), \
+ PINCTRL_PIN(95, "PLGPIO95"), \
+ PINCTRL_PIN(96, "PLGPIO96"), \
+ PINCTRL_PIN(97, "PLGPIO97"), \
+ PINCTRL_PIN(98, "PLGPIO98"), \
+ PINCTRL_PIN(99, "PLGPIO99"), \
+ PINCTRL_PIN(100, "PLGPIO100"), \
+ PINCTRL_PIN(101, "PLGPIO101")
+
+#define SPEAR_PIN_102_TO_245 \
+ PINCTRL_PIN(102, "PLGPIO102"), \
+ PINCTRL_PIN(103, "PLGPIO103"), \
+ PINCTRL_PIN(104, "PLGPIO104"), \
+ PINCTRL_PIN(105, "PLGPIO105"), \
+ PINCTRL_PIN(106, "PLGPIO106"), \
+ PINCTRL_PIN(107, "PLGPIO107"), \
+ PINCTRL_PIN(108, "PLGPIO108"), \
+ PINCTRL_PIN(109, "PLGPIO109"), \
+ PINCTRL_PIN(110, "PLGPIO110"), \
+ PINCTRL_PIN(111, "PLGPIO111"), \
+ PINCTRL_PIN(112, "PLGPIO112"), \
+ PINCTRL_PIN(113, "PLGPIO113"), \
+ PINCTRL_PIN(114, "PLGPIO114"), \
+ PINCTRL_PIN(115, "PLGPIO115"), \
+ PINCTRL_PIN(116, "PLGPIO116"), \
+ PINCTRL_PIN(117, "PLGPIO117"), \
+ PINCTRL_PIN(118, "PLGPIO118"), \
+ PINCTRL_PIN(119, "PLGPIO119"), \
+ PINCTRL_PIN(120, "PLGPIO120"), \
+ PINCTRL_PIN(121, "PLGPIO121"), \
+ PINCTRL_PIN(122, "PLGPIO122"), \
+ PINCTRL_PIN(123, "PLGPIO123"), \
+ PINCTRL_PIN(124, "PLGPIO124"), \
+ PINCTRL_PIN(125, "PLGPIO125"), \
+ PINCTRL_PIN(126, "PLGPIO126"), \
+ PINCTRL_PIN(127, "PLGPIO127"), \
+ PINCTRL_PIN(128, "PLGPIO128"), \
+ PINCTRL_PIN(129, "PLGPIO129"), \
+ PINCTRL_PIN(130, "PLGPIO130"), \
+ PINCTRL_PIN(131, "PLGPIO131"), \
+ PINCTRL_PIN(132, "PLGPIO132"), \
+ PINCTRL_PIN(133, "PLGPIO133"), \
+ PINCTRL_PIN(134, "PLGPIO134"), \
+ PINCTRL_PIN(135, "PLGPIO135"), \
+ PINCTRL_PIN(136, "PLGPIO136"), \
+ PINCTRL_PIN(137, "PLGPIO137"), \
+ PINCTRL_PIN(138, "PLGPIO138"), \
+ PINCTRL_PIN(139, "PLGPIO139"), \
+ PINCTRL_PIN(140, "PLGPIO140"), \
+ PINCTRL_PIN(141, "PLGPIO141"), \
+ PINCTRL_PIN(142, "PLGPIO142"), \
+ PINCTRL_PIN(143, "PLGPIO143"), \
+ PINCTRL_PIN(144, "PLGPIO144"), \
+ PINCTRL_PIN(145, "PLGPIO145"), \
+ PINCTRL_PIN(146, "PLGPIO146"), \
+ PINCTRL_PIN(147, "PLGPIO147"), \
+ PINCTRL_PIN(148, "PLGPIO148"), \
+ PINCTRL_PIN(149, "PLGPIO149"), \
+ PINCTRL_PIN(150, "PLGPIO150"), \
+ PINCTRL_PIN(151, "PLGPIO151"), \
+ PINCTRL_PIN(152, "PLGPIO152"), \
+ PINCTRL_PIN(153, "PLGPIO153"), \
+ PINCTRL_PIN(154, "PLGPIO154"), \
+ PINCTRL_PIN(155, "PLGPIO155"), \
+ PINCTRL_PIN(156, "PLGPIO156"), \
+ PINCTRL_PIN(157, "PLGPIO157"), \
+ PINCTRL_PIN(158, "PLGPIO158"), \
+ PINCTRL_PIN(159, "PLGPIO159"), \
+ PINCTRL_PIN(160, "PLGPIO160"), \
+ PINCTRL_PIN(161, "PLGPIO161"), \
+ PINCTRL_PIN(162, "PLGPIO162"), \
+ PINCTRL_PIN(163, "PLGPIO163"), \
+ PINCTRL_PIN(164, "PLGPIO164"), \
+ PINCTRL_PIN(165, "PLGPIO165"), \
+ PINCTRL_PIN(166, "PLGPIO166"), \
+ PINCTRL_PIN(167, "PLGPIO167"), \
+ PINCTRL_PIN(168, "PLGPIO168"), \
+ PINCTRL_PIN(169, "PLGPIO169"), \
+ PINCTRL_PIN(170, "PLGPIO170"), \
+ PINCTRL_PIN(171, "PLGPIO171"), \
+ PINCTRL_PIN(172, "PLGPIO172"), \
+ PINCTRL_PIN(173, "PLGPIO173"), \
+ PINCTRL_PIN(174, "PLGPIO174"), \
+ PINCTRL_PIN(175, "PLGPIO175"), \
+ PINCTRL_PIN(176, "PLGPIO176"), \
+ PINCTRL_PIN(177, "PLGPIO177"), \
+ PINCTRL_PIN(178, "PLGPIO178"), \
+ PINCTRL_PIN(179, "PLGPIO179"), \
+ PINCTRL_PIN(180, "PLGPIO180"), \
+ PINCTRL_PIN(181, "PLGPIO181"), \
+ PINCTRL_PIN(182, "PLGPIO182"), \
+ PINCTRL_PIN(183, "PLGPIO183"), \
+ PINCTRL_PIN(184, "PLGPIO184"), \
+ PINCTRL_PIN(185, "PLGPIO185"), \
+ PINCTRL_PIN(186, "PLGPIO186"), \
+ PINCTRL_PIN(187, "PLGPIO187"), \
+ PINCTRL_PIN(188, "PLGPIO188"), \
+ PINCTRL_PIN(189, "PLGPIO189"), \
+ PINCTRL_PIN(190, "PLGPIO190"), \
+ PINCTRL_PIN(191, "PLGPIO191"), \
+ PINCTRL_PIN(192, "PLGPIO192"), \
+ PINCTRL_PIN(193, "PLGPIO193"), \
+ PINCTRL_PIN(194, "PLGPIO194"), \
+ PINCTRL_PIN(195, "PLGPIO195"), \
+ PINCTRL_PIN(196, "PLGPIO196"), \
+ PINCTRL_PIN(197, "PLGPIO197"), \
+ PINCTRL_PIN(198, "PLGPIO198"), \
+ PINCTRL_PIN(199, "PLGPIO199"), \
+ PINCTRL_PIN(200, "PLGPIO200"), \
+ PINCTRL_PIN(201, "PLGPIO201"), \
+ PINCTRL_PIN(202, "PLGPIO202"), \
+ PINCTRL_PIN(203, "PLGPIO203"), \
+ PINCTRL_PIN(204, "PLGPIO204"), \
+ PINCTRL_PIN(205, "PLGPIO205"), \
+ PINCTRL_PIN(206, "PLGPIO206"), \
+ PINCTRL_PIN(207, "PLGPIO207"), \
+ PINCTRL_PIN(208, "PLGPIO208"), \
+ PINCTRL_PIN(209, "PLGPIO209"), \
+ PINCTRL_PIN(210, "PLGPIO210"), \
+ PINCTRL_PIN(211, "PLGPIO211"), \
+ PINCTRL_PIN(212, "PLGPIO212"), \
+ PINCTRL_PIN(213, "PLGPIO213"), \
+ PINCTRL_PIN(214, "PLGPIO214"), \
+ PINCTRL_PIN(215, "PLGPIO215"), \
+ PINCTRL_PIN(216, "PLGPIO216"), \
+ PINCTRL_PIN(217, "PLGPIO217"), \
+ PINCTRL_PIN(218, "PLGPIO218"), \
+ PINCTRL_PIN(219, "PLGPIO219"), \
+ PINCTRL_PIN(220, "PLGPIO220"), \
+ PINCTRL_PIN(221, "PLGPIO221"), \
+ PINCTRL_PIN(222, "PLGPIO222"), \
+ PINCTRL_PIN(223, "PLGPIO223"), \
+ PINCTRL_PIN(224, "PLGPIO224"), \
+ PINCTRL_PIN(225, "PLGPIO225"), \
+ PINCTRL_PIN(226, "PLGPIO226"), \
+ PINCTRL_PIN(227, "PLGPIO227"), \
+ PINCTRL_PIN(228, "PLGPIO228"), \
+ PINCTRL_PIN(229, "PLGPIO229"), \
+ PINCTRL_PIN(230, "PLGPIO230"), \
+ PINCTRL_PIN(231, "PLGPIO231"), \
+ PINCTRL_PIN(232, "PLGPIO232"), \
+ PINCTRL_PIN(233, "PLGPIO233"), \
+ PINCTRL_PIN(234, "PLGPIO234"), \
+ PINCTRL_PIN(235, "PLGPIO235"), \
+ PINCTRL_PIN(236, "PLGPIO236"), \
+ PINCTRL_PIN(237, "PLGPIO237"), \
+ PINCTRL_PIN(238, "PLGPIO238"), \
+ PINCTRL_PIN(239, "PLGPIO239"), \
+ PINCTRL_PIN(240, "PLGPIO240"), \
+ PINCTRL_PIN(241, "PLGPIO241"), \
+ PINCTRL_PIN(242, "PLGPIO242"), \
+ PINCTRL_PIN(243, "PLGPIO243"), \
+ PINCTRL_PIN(244, "PLGPIO244"), \
+ PINCTRL_PIN(245, "PLGPIO245")
+
#endif /* __PINMUX_SPEAR_H__ */
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
new file mode 100644
index 00000000000..fff168be7f0
--- /dev/null
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -0,0 +1,2198 @@
+/*
+ * Driver for the ST Microelectronics SPEAr1310 pinmux
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "pinctrl-spear.h"
+
+#define DRIVER_NAME "spear1310-pinmux"
+
+/* pins */
+static const struct pinctrl_pin_desc spear1310_pins[] = {
+ SPEAR_PIN_0_TO_101,
+ SPEAR_PIN_102_TO_245,
+};
+
+/* registers */
+#define PERIP_CFG 0x32C
+ #define MCIF_SEL_SHIFT 3
+ #define MCIF_SEL_SD (0x1 << MCIF_SEL_SHIFT)
+ #define MCIF_SEL_CF (0x2 << MCIF_SEL_SHIFT)
+ #define MCIF_SEL_XD (0x3 << MCIF_SEL_SHIFT)
+ #define MCIF_SEL_MASK (0x3 << MCIF_SEL_SHIFT)
+
+#define PCIE_SATA_CFG 0x3A4
+ #define PCIE_SATA2_SEL_PCIE (0 << 31)
+ #define PCIE_SATA1_SEL_PCIE (0 << 30)
+ #define PCIE_SATA0_SEL_PCIE (0 << 29)
+ #define PCIE_SATA2_SEL_SATA (1 << 31)
+ #define PCIE_SATA1_SEL_SATA (1 << 30)
+ #define PCIE_SATA0_SEL_SATA (1 << 29)
+ #define SATA2_CFG_TX_CLK_EN (1 << 27)
+ #define SATA2_CFG_RX_CLK_EN (1 << 26)
+ #define SATA2_CFG_POWERUP_RESET (1 << 25)
+ #define SATA2_CFG_PM_CLK_EN (1 << 24)
+ #define SATA1_CFG_TX_CLK_EN (1 << 23)
+ #define SATA1_CFG_RX_CLK_EN (1 << 22)
+ #define SATA1_CFG_POWERUP_RESET (1 << 21)
+ #define SATA1_CFG_PM_CLK_EN (1 << 20)
+ #define SATA0_CFG_TX_CLK_EN (1 << 19)
+ #define SATA0_CFG_RX_CLK_EN (1 << 18)
+ #define SATA0_CFG_POWERUP_RESET (1 << 17)
+ #define SATA0_CFG_PM_CLK_EN (1 << 16)
+ #define PCIE2_CFG_DEVICE_PRESENT (1 << 11)
+ #define PCIE2_CFG_POWERUP_RESET (1 << 10)
+ #define PCIE2_CFG_CORE_CLK_EN (1 << 9)
+ #define PCIE2_CFG_AUX_CLK_EN (1 << 8)
+ #define PCIE1_CFG_DEVICE_PRESENT (1 << 7)
+ #define PCIE1_CFG_POWERUP_RESET (1 << 6)
+ #define PCIE1_CFG_CORE_CLK_EN (1 << 5)
+ #define PCIE1_CFG_AUX_CLK_EN (1 << 4)
+ #define PCIE0_CFG_DEVICE_PRESENT (1 << 3)
+ #define PCIE0_CFG_POWERUP_RESET (1 << 2)
+ #define PCIE0_CFG_CORE_CLK_EN (1 << 1)
+ #define PCIE0_CFG_AUX_CLK_EN (1 << 0)
+
+#define PAD_FUNCTION_EN_0 0x650
+ #define PMX_UART0_MASK (1 << 1)
+ #define PMX_I2C0_MASK (1 << 2)
+ #define PMX_I2S0_MASK (1 << 3)
+ #define PMX_SSP0_MASK (1 << 4)
+ #define PMX_CLCD1_MASK (1 << 5)
+ #define PMX_EGPIO00_MASK (1 << 6)
+ #define PMX_EGPIO01_MASK (1 << 7)
+ #define PMX_EGPIO02_MASK (1 << 8)
+ #define PMX_EGPIO03_MASK (1 << 9)
+ #define PMX_EGPIO04_MASK (1 << 10)
+ #define PMX_EGPIO05_MASK (1 << 11)
+ #define PMX_EGPIO06_MASK (1 << 12)
+ #define PMX_EGPIO07_MASK (1 << 13)
+ #define PMX_EGPIO08_MASK (1 << 14)
+ #define PMX_EGPIO09_MASK (1 << 15)
+ #define PMX_SMI_MASK (1 << 16)
+ #define PMX_NAND8_MASK (1 << 17)
+ #define PMX_GMIICLK_MASK (1 << 18)
+ #define PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK (1 << 19)
+ #define PMX_RXCLK_RDV_TXEN_D03_MASK (1 << 20)
+ #define PMX_GMIID47_MASK (1 << 21)
+ #define PMX_MDC_MDIO_MASK (1 << 22)
+ #define PMX_MCI_DATA8_15_MASK (1 << 23)
+ #define PMX_NFAD23_MASK (1 << 24)
+ #define PMX_NFAD24_MASK (1 << 25)
+ #define PMX_NFAD25_MASK (1 << 26)
+ #define PMX_NFCE3_MASK (1 << 27)
+ #define PMX_NFWPRT3_MASK (1 << 28)
+ #define PMX_NFRSTPWDWN0_MASK (1 << 29)
+ #define PMX_NFRSTPWDWN1_MASK (1 << 30)
+ #define PMX_NFRSTPWDWN2_MASK (1 << 31)
+
+#define PAD_FUNCTION_EN_1 0x654
+ #define PMX_NFRSTPWDWN3_MASK (1 << 0)
+ #define PMX_SMINCS2_MASK (1 << 1)
+ #define PMX_SMINCS3_MASK (1 << 2)
+ #define PMX_CLCD2_MASK (1 << 3)
+ #define PMX_KBD_ROWCOL68_MASK (1 << 4)
+ #define PMX_EGPIO10_MASK (1 << 5)
+ #define PMX_EGPIO11_MASK (1 << 6)
+ #define PMX_EGPIO12_MASK (1 << 7)
+ #define PMX_EGPIO13_MASK (1 << 8)
+ #define PMX_EGPIO14_MASK (1 << 9)
+ #define PMX_EGPIO15_MASK (1 << 10)
+ #define PMX_UART0_MODEM_MASK (1 << 11)
+ #define PMX_GPT0_TMR0_MASK (1 << 12)
+ #define PMX_GPT0_TMR1_MASK (1 << 13)
+ #define PMX_GPT1_TMR0_MASK (1 << 14)
+ #define PMX_GPT1_TMR1_MASK (1 << 15)
+ #define PMX_I2S1_MASK (1 << 16)
+ #define PMX_KBD_ROWCOL25_MASK (1 << 17)
+ #define PMX_NFIO8_15_MASK (1 << 18)
+ #define PMX_KBD_COL1_MASK (1 << 19)
+ #define PMX_NFCE1_MASK (1 << 20)
+ #define PMX_KBD_COL0_MASK (1 << 21)
+ #define PMX_NFCE2_MASK (1 << 22)
+ #define PMX_KBD_ROW1_MASK (1 << 23)
+ #define PMX_NFWPRT1_MASK (1 << 24)
+ #define PMX_KBD_ROW0_MASK (1 << 25)
+ #define PMX_NFWPRT2_MASK (1 << 26)
+ #define PMX_MCIDATA0_MASK (1 << 27)
+ #define PMX_MCIDATA1_MASK (1 << 28)
+ #define PMX_MCIDATA2_MASK (1 << 29)
+ #define PMX_MCIDATA3_MASK (1 << 30)
+ #define PMX_MCIDATA4_MASK (1 << 31)
+
+#define PAD_FUNCTION_EN_2 0x658
+ #define PMX_MCIDATA5_MASK (1 << 0)
+ #define PMX_MCIDATA6_MASK (1 << 1)
+ #define PMX_MCIDATA7_MASK (1 << 2)
+ #define PMX_MCIDATA1SD_MASK (1 << 3)
+ #define PMX_MCIDATA2SD_MASK (1 << 4)
+ #define PMX_MCIDATA3SD_MASK (1 << 5)
+ #define PMX_MCIADDR0ALE_MASK (1 << 6)
+ #define PMX_MCIADDR1CLECLK_MASK (1 << 7)
+ #define PMX_MCIADDR2_MASK (1 << 8)
+ #define PMX_MCICECF_MASK (1 << 9)
+ #define PMX_MCICEXD_MASK (1 << 10)
+ #define PMX_MCICESDMMC_MASK (1 << 11)
+ #define PMX_MCICDCF1_MASK (1 << 12)
+ #define PMX_MCICDCF2_MASK (1 << 13)
+ #define PMX_MCICDXD_MASK (1 << 14)
+ #define PMX_MCICDSDMMC_MASK (1 << 15)
+ #define PMX_MCIDATADIR_MASK (1 << 16)
+ #define PMX_MCIDMARQWP_MASK (1 << 17)
+ #define PMX_MCIIORDRE_MASK (1 << 18)
+ #define PMX_MCIIOWRWE_MASK (1 << 19)
+ #define PMX_MCIRESETCF_MASK (1 << 20)
+ #define PMX_MCICS0CE_MASK (1 << 21)
+ #define PMX_MCICFINTR_MASK (1 << 22)
+ #define PMX_MCIIORDY_MASK (1 << 23)
+ #define PMX_MCICS1_MASK (1 << 24)
+ #define PMX_MCIDMAACK_MASK (1 << 25)
+ #define PMX_MCISDCMD_MASK (1 << 26)
+ #define PMX_MCILEDS_MASK (1 << 27)
+ #define PMX_TOUCH_XY_MASK (1 << 28)
+ #define PMX_SSP0_CS0_MASK (1 << 29)
+ #define PMX_SSP0_CS1_2_MASK (1 << 30)
+
+/* combined macros */
+#define PMX_GMII_MASK (PMX_GMIICLK_MASK | \
+ PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK | \
+ PMX_RXCLK_RDV_TXEN_D03_MASK | \
+ PMX_GMIID47_MASK | PMX_MDC_MDIO_MASK)
+
+#define PMX_EGPIO_0_GRP_MASK (PMX_EGPIO00_MASK | PMX_EGPIO01_MASK | \
+ PMX_EGPIO02_MASK | \
+ PMX_EGPIO03_MASK | PMX_EGPIO04_MASK | \
+ PMX_EGPIO05_MASK | PMX_EGPIO06_MASK | \
+ PMX_EGPIO07_MASK | PMX_EGPIO08_MASK | \
+ PMX_EGPIO09_MASK)
+#define PMX_EGPIO_1_GRP_MASK (PMX_EGPIO10_MASK | PMX_EGPIO11_MASK | \
+ PMX_EGPIO12_MASK | PMX_EGPIO13_MASK | \
+ PMX_EGPIO14_MASK | PMX_EGPIO15_MASK)
+
+#define PMX_KEYBOARD_6X6_MASK (PMX_KBD_ROW0_MASK | PMX_KBD_ROW1_MASK | \
+ PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL0_MASK | \
+ PMX_KBD_COL1_MASK)
+
+#define PMX_NAND8BIT_0_MASK (PMX_NAND8_MASK | PMX_NFAD23_MASK | \
+ PMX_NFAD24_MASK | PMX_NFAD25_MASK | \
+ PMX_NFWPRT3_MASK | PMX_NFRSTPWDWN0_MASK | \
+ PMX_NFRSTPWDWN1_MASK | PMX_NFRSTPWDWN2_MASK | \
+ PMX_NFCE3_MASK)
+#define PMX_NAND8BIT_1_MASK PMX_NFRSTPWDWN3_MASK
+
+#define PMX_NAND16BIT_1_MASK (PMX_KBD_ROWCOL25_MASK | PMX_NFIO8_15_MASK)
+#define PMX_NAND_4CHIPS_MASK (PMX_NFCE1_MASK | PMX_NFCE2_MASK | \
+ PMX_NFWPRT1_MASK | PMX_NFWPRT2_MASK | \
+ PMX_KBD_ROW0_MASK | PMX_KBD_ROW1_MASK | \
+ PMX_KBD_COL0_MASK | PMX_KBD_COL1_MASK)
+
+#define PMX_MCIFALL_1_MASK 0xF8000000
+#define PMX_MCIFALL_2_MASK 0x0FFFFFFF
+
+#define PMX_PCI_REG1_MASK (PMX_SMINCS2_MASK | PMX_SMINCS3_MASK | \
+ PMX_CLCD2_MASK | PMX_KBD_ROWCOL68_MASK | \
+ PMX_EGPIO_1_GRP_MASK | PMX_GPT0_TMR0_MASK | \
+ PMX_GPT0_TMR1_MASK | PMX_GPT1_TMR0_MASK | \
+ PMX_GPT1_TMR1_MASK | PMX_I2S1_MASK | \
+ PMX_NFCE2_MASK)
+#define PMX_PCI_REG2_MASK (PMX_TOUCH_XY_MASK | PMX_SSP0_CS0_MASK | \
+ PMX_SSP0_CS1_2_MASK)
+
+#define PMX_SMII_0_1_2_MASK (PMX_CLCD2_MASK | PMX_KBD_ROWCOL68_MASK)
+#define PMX_RGMII_REG0_MASK (PMX_MCI_DATA8_15_MASK | \
+ PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK | \
+ PMX_GMIID47_MASK)
+#define PMX_RGMII_REG1_MASK (PMX_KBD_ROWCOL68_MASK | PMX_EGPIO_1_GRP_MASK |\
+ PMX_KBD_ROW1_MASK | PMX_NFWPRT1_MASK | \
+ PMX_KBD_ROW0_MASK | PMX_NFWPRT2_MASK)
+#define PMX_RGMII_REG2_MASK (PMX_TOUCH_XY_MASK | PMX_SSP0_CS0_MASK | \
+ PMX_SSP0_CS1_2_MASK)
+
+#define PCIE_CFG_VAL(x) (PCIE_SATA##x##_SEL_PCIE | \
+ PCIE##x##_CFG_AUX_CLK_EN | \
+ PCIE##x##_CFG_CORE_CLK_EN | \
+ PCIE##x##_CFG_POWERUP_RESET | \
+ PCIE##x##_CFG_DEVICE_PRESENT)
+#define SATA_CFG_VAL(x) (PCIE_SATA##x##_SEL_SATA | \
+ SATA##x##_CFG_PM_CLK_EN | \
+ SATA##x##_CFG_POWERUP_RESET | \
+ SATA##x##_CFG_RX_CLK_EN | \
+ SATA##x##_CFG_TX_CLK_EN)
+
+/* Pad multiplexing for i2c0 device */
+static const unsigned i2c0_pins[] = { 102, 103 };
+static struct spear_muxreg i2c0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2C0_MASK,
+ .val = PMX_I2C0_MASK,
+ },
+};
+
+static struct spear_modemux i2c0_modemux[] = {
+ {
+ .muxregs = i2c0_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c0_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c0_pingroup = {
+ .name = "i2c0_grp",
+ .pins = i2c0_pins,
+ .npins = ARRAY_SIZE(i2c0_pins),
+ .modemuxs = i2c0_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c0_modemux),
+};
+
+static const char *const i2c0_grps[] = { "i2c0_grp" };
+static struct spear_function i2c0_function = {
+ .name = "i2c0",
+ .groups = i2c0_grps,
+ .ngroups = ARRAY_SIZE(i2c0_grps),
+};
+
+/* Pad multiplexing for ssp0 device */
+static const unsigned ssp0_pins[] = { 109, 110, 111, 112 };
+static struct spear_muxreg ssp0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_SSP0_MASK,
+ .val = PMX_SSP0_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_modemux[] = {
+ {
+ .muxregs = ssp0_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_pingroup = {
+ .name = "ssp0_grp",
+ .pins = ssp0_pins,
+ .npins = ARRAY_SIZE(ssp0_pins),
+ .modemuxs = ssp0_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_modemux),
+};
+
+/* Pad multiplexing for ssp0_cs0 device */
+static const unsigned ssp0_cs0_pins[] = { 96 };
+static struct spear_muxreg ssp0_cs0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_SSP0_CS0_MASK,
+ .val = PMX_SSP0_CS0_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs0_modemux[] = {
+ {
+ .muxregs = ssp0_cs0_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs0_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs0_pingroup = {
+ .name = "ssp0_cs0_grp",
+ .pins = ssp0_cs0_pins,
+ .npins = ARRAY_SIZE(ssp0_cs0_pins),
+ .modemuxs = ssp0_cs0_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs0_modemux),
+};
+
+/* ssp0_cs1_2 device */
+static const unsigned ssp0_cs1_2_pins[] = { 94, 95 };
+static struct spear_muxreg ssp0_cs1_2_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_SSP0_CS1_2_MASK,
+ .val = PMX_SSP0_CS1_2_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs1_2_modemux[] = {
+ {
+ .muxregs = ssp0_cs1_2_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs1_2_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs1_2_pingroup = {
+ .name = "ssp0_cs1_2_grp",
+ .pins = ssp0_cs1_2_pins,
+ .npins = ARRAY_SIZE(ssp0_cs1_2_pins),
+ .modemuxs = ssp0_cs1_2_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs1_2_modemux),
+};
+
+static const char *const ssp0_grps[] = { "ssp0_grp", "ssp0_cs0_grp",
+ "ssp0_cs1_2_grp" };
+static struct spear_function ssp0_function = {
+ .name = "ssp0",
+ .groups = ssp0_grps,
+ .ngroups = ARRAY_SIZE(ssp0_grps),
+};
+
+/* Pad multiplexing for i2s0 device */
+static const unsigned i2s0_pins[] = { 104, 105, 106, 107, 108 };
+static struct spear_muxreg i2s0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2S0_MASK,
+ .val = PMX_I2S0_MASK,
+ },
+};
+
+static struct spear_modemux i2s0_modemux[] = {
+ {
+ .muxregs = i2s0_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2s0_muxreg),
+ },
+};
+
+static struct spear_pingroup i2s0_pingroup = {
+ .name = "i2s0_grp",
+ .pins = i2s0_pins,
+ .npins = ARRAY_SIZE(i2s0_pins),
+ .modemuxs = i2s0_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2s0_modemux),
+};
+
+static const char *const i2s0_grps[] = { "i2s0_grp" };
+static struct spear_function i2s0_function = {
+ .name = "i2s0",
+ .groups = i2s0_grps,
+ .ngroups = ARRAY_SIZE(i2s0_grps),
+};
+
+/* Pad multiplexing for i2s1 device */
+static const unsigned i2s1_pins[] = { 0, 1, 2, 3 };
+static struct spear_muxreg i2s1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_I2S1_MASK,
+ .val = PMX_I2S1_MASK,
+ },
+};
+
+static struct spear_modemux i2s1_modemux[] = {
+ {
+ .muxregs = i2s1_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2s1_muxreg),
+ },
+};
+
+static struct spear_pingroup i2s1_pingroup = {
+ .name = "i2s1_grp",
+ .pins = i2s1_pins,
+ .npins = ARRAY_SIZE(i2s1_pins),
+ .modemuxs = i2s1_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2s1_modemux),
+};
+
+static const char *const i2s1_grps[] = { "i2s1_grp" };
+static struct spear_function i2s1_function = {
+ .name = "i2s1",
+ .groups = i2s1_grps,
+ .ngroups = ARRAY_SIZE(i2s1_grps),
+};
+
+/* Pad multiplexing for clcd device */
+static const unsigned clcd_pins[] = { 113, 114, 115, 116, 117, 118, 119, 120,
+ 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138, 139, 140, 141, 142 };
+static struct spear_muxreg clcd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = PMX_CLCD1_MASK,
+ },
+};
+
+static struct spear_modemux clcd_modemux[] = {
+ {
+ .muxregs = clcd_muxreg,
+ .nmuxregs = ARRAY_SIZE(clcd_muxreg),
+ },
+};
+
+static struct spear_pingroup clcd_pingroup = {
+ .name = "clcd_grp",
+ .pins = clcd_pins,
+ .npins = ARRAY_SIZE(clcd_pins),
+ .modemuxs = clcd_modemux,
+ .nmodemuxs = ARRAY_SIZE(clcd_modemux),
+};
+
+static const unsigned clcd_high_res_pins[] = { 30, 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53 };
+static struct spear_muxreg clcd_high_res_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_CLCD2_MASK,
+ .val = PMX_CLCD2_MASK,
+ },
+};
+
+static struct spear_modemux clcd_high_res_modemux[] = {
+ {
+ .muxregs = clcd_high_res_muxreg,
+ .nmuxregs = ARRAY_SIZE(clcd_high_res_muxreg),
+ },
+};
+
+static struct spear_pingroup clcd_high_res_pingroup = {
+ .name = "clcd_high_res_grp",
+ .pins = clcd_high_res_pins,
+ .npins = ARRAY_SIZE(clcd_high_res_pins),
+ .modemuxs = clcd_high_res_modemux,
+ .nmodemuxs = ARRAY_SIZE(clcd_high_res_modemux),
+};
+
+static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res" };
+static struct spear_function clcd_function = {
+ .name = "clcd",
+ .groups = clcd_grps,
+ .ngroups = ARRAY_SIZE(clcd_grps),
+};
+
+static const unsigned arm_gpio_pins[] = { 18, 19, 20, 21, 22, 23, 143, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152 };
+static struct spear_muxreg arm_gpio_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_EGPIO_0_GRP_MASK,
+ .val = PMX_EGPIO_0_GRP_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_EGPIO_1_GRP_MASK,
+ .val = PMX_EGPIO_1_GRP_MASK,
+ },
+};
+
+static struct spear_modemux arm_gpio_modemux[] = {
+ {
+ .muxregs = arm_gpio_muxreg,
+ .nmuxregs = ARRAY_SIZE(arm_gpio_muxreg),
+ },
+};
+
+static struct spear_pingroup arm_gpio_pingroup = {
+ .name = "arm_gpio_grp",
+ .pins = arm_gpio_pins,
+ .npins = ARRAY_SIZE(arm_gpio_pins),
+ .modemuxs = arm_gpio_modemux,
+ .nmodemuxs = ARRAY_SIZE(arm_gpio_modemux),
+};
+
+static const char *const arm_gpio_grps[] = { "arm_gpio_grp" };
+static struct spear_function arm_gpio_function = {
+ .name = "arm_gpio",
+ .groups = arm_gpio_grps,
+ .ngroups = ARRAY_SIZE(arm_gpio_grps),
+};
+
+/* Pad multiplexing for smi 2 chips device */
+static const unsigned smi_2_chips_pins[] = { 153, 154, 155, 156, 157 };
+static struct spear_muxreg smi_2_chips_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_SMI_MASK,
+ .val = PMX_SMI_MASK,
+ },
+};
+
+static struct spear_modemux smi_2_chips_modemux[] = {
+ {
+ .muxregs = smi_2_chips_muxreg,
+ .nmuxregs = ARRAY_SIZE(smi_2_chips_muxreg),
+ },
+};
+
+static struct spear_pingroup smi_2_chips_pingroup = {
+ .name = "smi_2_chips_grp",
+ .pins = smi_2_chips_pins,
+ .npins = ARRAY_SIZE(smi_2_chips_pins),
+ .modemuxs = smi_2_chips_modemux,
+ .nmodemuxs = ARRAY_SIZE(smi_2_chips_modemux),
+};
+
+static const unsigned smi_4_chips_pins[] = { 54, 55 };
+static struct spear_muxreg smi_4_chips_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_SMI_MASK,
+ .val = PMX_SMI_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+ .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
+ },
+};
+
+static struct spear_modemux smi_4_chips_modemux[] = {
+ {
+ .muxregs = smi_4_chips_muxreg,
+ .nmuxregs = ARRAY_SIZE(smi_4_chips_muxreg),
+ },
+};
+
+static struct spear_pingroup smi_4_chips_pingroup = {
+ .name = "smi_4_chips_grp",
+ .pins = smi_4_chips_pins,
+ .npins = ARRAY_SIZE(smi_4_chips_pins),
+ .modemuxs = smi_4_chips_modemux,
+ .nmodemuxs = ARRAY_SIZE(smi_4_chips_modemux),
+};
+
+static const char *const smi_grps[] = { "smi_2_chips_grp", "smi_4_chips_grp" };
+static struct spear_function smi_function = {
+ .name = "smi",
+ .groups = smi_grps,
+ .ngroups = ARRAY_SIZE(smi_grps),
+};
+
+/* Pad multiplexing for gmii device */
+static const unsigned gmii_pins[] = { 173, 174, 175, 176, 177, 178, 179, 180,
+ 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 197, 198, 199, 200 };
+static struct spear_muxreg gmii_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_GMII_MASK,
+ .val = PMX_GMII_MASK,
+ },
+};
+
+static struct spear_modemux gmii_modemux[] = {
+ {
+ .muxregs = gmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(gmii_muxreg),
+ },
+};
+
+static struct spear_pingroup gmii_pingroup = {
+ .name = "gmii_grp",
+ .pins = gmii_pins,
+ .npins = ARRAY_SIZE(gmii_pins),
+ .modemuxs = gmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(gmii_modemux),
+};
+
+static const char *const gmii_grps[] = { "gmii_grp" };
+static struct spear_function gmii_function = {
+ .name = "gmii",
+ .groups = gmii_grps,
+ .ngroups = ARRAY_SIZE(gmii_grps),
+};
+
+/* Pad multiplexing for rgmii device */
+static const unsigned rgmii_pins[] = { 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 175,
+ 180, 181, 182, 183, 185, 188, 193, 194, 195, 196, 197, 198, 211, 212 };
+static struct spear_muxreg rgmii_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_RGMII_REG0_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_RGMII_REG1_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_RGMII_REG2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux rgmii_modemux[] = {
+ {
+ .muxregs = rgmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(rgmii_muxreg),
+ },
+};
+
+static struct spear_pingroup rgmii_pingroup = {
+ .name = "rgmii_grp",
+ .pins = rgmii_pins,
+ .npins = ARRAY_SIZE(rgmii_pins),
+ .modemuxs = rgmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(rgmii_modemux),
+};
+
+static const char *const rgmii_grps[] = { "rgmii_grp" };
+static struct spear_function rgmii_function = {
+ .name = "rgmii",
+ .groups = rgmii_grps,
+ .ngroups = ARRAY_SIZE(rgmii_grps),
+};
+
+/* Pad multiplexing for smii_0_1_2 device */
+static const unsigned smii_0_1_2_pins[] = { 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53, 54, 55 };
+static struct spear_muxreg smii_0_1_2_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_SMII_0_1_2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux smii_0_1_2_modemux[] = {
+ {
+ .muxregs = smii_0_1_2_muxreg,
+ .nmuxregs = ARRAY_SIZE(smii_0_1_2_muxreg),
+ },
+};
+
+static struct spear_pingroup smii_0_1_2_pingroup = {
+ .name = "smii_0_1_2_grp",
+ .pins = smii_0_1_2_pins,
+ .npins = ARRAY_SIZE(smii_0_1_2_pins),
+ .modemuxs = smii_0_1_2_modemux,
+ .nmodemuxs = ARRAY_SIZE(smii_0_1_2_modemux),
+};
+
+static const char *const smii_0_1_2_grps[] = { "smii_0_1_2_grp" };
+static struct spear_function smii_0_1_2_function = {
+ .name = "smii_0_1_2",
+ .groups = smii_0_1_2_grps,
+ .ngroups = ARRAY_SIZE(smii_0_1_2_grps),
+};
+
+/* Pad multiplexing for ras_mii_txclk device */
+static const unsigned ras_mii_txclk_pins[] = { 98, 99 };
+static struct spear_muxreg ras_mii_txclk_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NFCE2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux ras_mii_txclk_modemux[] = {
+ {
+ .muxregs = ras_mii_txclk_muxreg,
+ .nmuxregs = ARRAY_SIZE(ras_mii_txclk_muxreg),
+ },
+};
+
+static struct spear_pingroup ras_mii_txclk_pingroup = {
+ .name = "ras_mii_txclk_grp",
+ .pins = ras_mii_txclk_pins,
+ .npins = ARRAY_SIZE(ras_mii_txclk_pins),
+ .modemuxs = ras_mii_txclk_modemux,
+ .nmodemuxs = ARRAY_SIZE(ras_mii_txclk_modemux),
+};
+
+static const char *const ras_mii_txclk_grps[] = { "ras_mii_txclk_grp" };
+static struct spear_function ras_mii_txclk_function = {
+ .name = "ras_mii_txclk",
+ .groups = ras_mii_txclk_grps,
+ .ngroups = ARRAY_SIZE(ras_mii_txclk_grps),
+};
+
+/* Pad multiplexing for nand 8bit device (cs0 only) */
+static const unsigned nand_8bit_pins[] = { 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
+ 83, 84, 85, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ 170, 171, 172, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ 212 };
+static struct spear_muxreg nand_8bit_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_NAND8BIT_0_MASK,
+ .val = PMX_NAND8BIT_0_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NAND8BIT_1_MASK,
+ .val = PMX_NAND8BIT_1_MASK,
+ },
+};
+
+static struct spear_modemux nand_8bit_modemux[] = {
+ {
+ .muxregs = nand_8bit_muxreg,
+ .nmuxregs = ARRAY_SIZE(nand_8bit_muxreg),
+ },
+};
+
+static struct spear_pingroup nand_8bit_pingroup = {
+ .name = "nand_8bit_grp",
+ .pins = nand_8bit_pins,
+ .npins = ARRAY_SIZE(nand_8bit_pins),
+ .modemuxs = nand_8bit_modemux,
+ .nmodemuxs = ARRAY_SIZE(nand_8bit_modemux),
+};
+
+/* Pad multiplexing for nand 16bit device */
+static const unsigned nand_16bit_pins[] = { 201, 202, 203, 204, 207, 208, 209,
+ 210 };
+static struct spear_muxreg nand_16bit_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NAND16BIT_1_MASK,
+ .val = PMX_NAND16BIT_1_MASK,
+ },
+};
+
+static struct spear_modemux nand_16bit_modemux[] = {
+ {
+ .muxregs = nand_16bit_muxreg,
+ .nmuxregs = ARRAY_SIZE(nand_16bit_muxreg),
+ },
+};
+
+static struct spear_pingroup nand_16bit_pingroup = {
+ .name = "nand_16bit_grp",
+ .pins = nand_16bit_pins,
+ .npins = ARRAY_SIZE(nand_16bit_pins),
+ .modemuxs = nand_16bit_modemux,
+ .nmodemuxs = ARRAY_SIZE(nand_16bit_modemux),
+};
+
+/* Pad multiplexing for nand 4 chips */
+static const unsigned nand_4_chips_pins[] = { 205, 206, 211, 212 };
+static struct spear_muxreg nand_4_chips_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NAND_4CHIPS_MASK,
+ .val = PMX_NAND_4CHIPS_MASK,
+ },
+};
+
+static struct spear_modemux nand_4_chips_modemux[] = {
+ {
+ .muxregs = nand_4_chips_muxreg,
+ .nmuxregs = ARRAY_SIZE(nand_4_chips_muxreg),
+ },
+};
+
+static struct spear_pingroup nand_4_chips_pingroup = {
+ .name = "nand_4_chips_grp",
+ .pins = nand_4_chips_pins,
+ .npins = ARRAY_SIZE(nand_4_chips_pins),
+ .modemuxs = nand_4_chips_modemux,
+ .nmodemuxs = ARRAY_SIZE(nand_4_chips_modemux),
+};
+
+static const char *const nand_grps[] = { "nand_8bit_grp", "nand_16bit_grp",
+ "nand_4_chips_grp" };
+static struct spear_function nand_function = {
+ .name = "nand",
+ .groups = nand_grps,
+ .ngroups = ARRAY_SIZE(nand_grps),
+};
+
+/* Pad multiplexing for keyboard_6x6 device */
+static const unsigned keyboard_6x6_pins[] = { 201, 202, 203, 204, 205, 206, 207,
+ 208, 209, 210, 211, 212 };
+static struct spear_muxreg keyboard_6x6_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KEYBOARD_6X6_MASK | PMX_NFIO8_15_MASK |
+ PMX_NFCE1_MASK | PMX_NFCE2_MASK | PMX_NFWPRT1_MASK |
+ PMX_NFWPRT2_MASK,
+ .val = PMX_KEYBOARD_6X6_MASK,
+ },
+};
+
+static struct spear_modemux keyboard_6x6_modemux[] = {
+ {
+ .muxregs = keyboard_6x6_muxreg,
+ .nmuxregs = ARRAY_SIZE(keyboard_6x6_muxreg),
+ },
+};
+
+static struct spear_pingroup keyboard_6x6_pingroup = {
+ .name = "keyboard_6x6_grp",
+ .pins = keyboard_6x6_pins,
+ .npins = ARRAY_SIZE(keyboard_6x6_pins),
+ .modemuxs = keyboard_6x6_modemux,
+ .nmodemuxs = ARRAY_SIZE(keyboard_6x6_modemux),
+};
+
+/* Pad multiplexing for keyboard_rowcol6_8 device */
+static const unsigned keyboard_rowcol6_8_pins[] = { 24, 25, 26, 27, 28, 29 };
+static struct spear_muxreg keyboard_rowcol6_8_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KBD_ROWCOL68_MASK,
+ .val = PMX_KBD_ROWCOL68_MASK,
+ },
+};
+
+static struct spear_modemux keyboard_rowcol6_8_modemux[] = {
+ {
+ .muxregs = keyboard_rowcol6_8_muxreg,
+ .nmuxregs = ARRAY_SIZE(keyboard_rowcol6_8_muxreg),
+ },
+};
+
+static struct spear_pingroup keyboard_rowcol6_8_pingroup = {
+ .name = "keyboard_rowcol6_8_grp",
+ .pins = keyboard_rowcol6_8_pins,
+ .npins = ARRAY_SIZE(keyboard_rowcol6_8_pins),
+ .modemuxs = keyboard_rowcol6_8_modemux,
+ .nmodemuxs = ARRAY_SIZE(keyboard_rowcol6_8_modemux),
+};
+
+static const char *const keyboard_grps[] = { "keyboard_6x6_grp",
+ "keyboard_rowcol6_8_grp" };
+static struct spear_function keyboard_function = {
+ .name = "keyboard",
+ .groups = keyboard_grps,
+ .ngroups = ARRAY_SIZE(keyboard_grps),
+};
+
+/* Pad multiplexing for uart0 device */
+static const unsigned uart0_pins[] = { 100, 101 };
+static struct spear_muxreg uart0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_UART0_MASK,
+ .val = PMX_UART0_MASK,
+ },
+};
+
+static struct spear_modemux uart0_modemux[] = {
+ {
+ .muxregs = uart0_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart0_muxreg),
+ },
+};
+
+static struct spear_pingroup uart0_pingroup = {
+ .name = "uart0_grp",
+ .pins = uart0_pins,
+ .npins = ARRAY_SIZE(uart0_pins),
+ .modemuxs = uart0_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart0_modemux),
+};
+
+/* Pad multiplexing for uart0_modem device */
+static const unsigned uart0_modem_pins[] = { 12, 13, 14, 15, 16, 17 };
+static struct spear_muxreg uart0_modem_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_UART0_MODEM_MASK,
+ .val = PMX_UART0_MODEM_MASK,
+ },
+};
+
+static struct spear_modemux uart0_modem_modemux[] = {
+ {
+ .muxregs = uart0_modem_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart0_modem_muxreg),
+ },
+};
+
+static struct spear_pingroup uart0_modem_pingroup = {
+ .name = "uart0_modem_grp",
+ .pins = uart0_modem_pins,
+ .npins = ARRAY_SIZE(uart0_modem_pins),
+ .modemuxs = uart0_modem_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart0_modem_modemux),
+};
+
+static const char *const uart0_grps[] = { "uart0_grp", "uart0_modem_grp" };
+static struct spear_function uart0_function = {
+ .name = "uart0",
+ .groups = uart0_grps,
+ .ngroups = ARRAY_SIZE(uart0_grps),
+};
+
+/* Pad multiplexing for gpt0_tmr0 device */
+static const unsigned gpt0_tmr0_pins[] = { 10, 11 };
+static struct spear_muxreg gpt0_tmr0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_GPT0_TMR0_MASK,
+ .val = PMX_GPT0_TMR0_MASK,
+ },
+};
+
+static struct spear_modemux gpt0_tmr0_modemux[] = {
+ {
+ .muxregs = gpt0_tmr0_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt0_tmr0_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt0_tmr0_pingroup = {
+ .name = "gpt0_tmr0_grp",
+ .pins = gpt0_tmr0_pins,
+ .npins = ARRAY_SIZE(gpt0_tmr0_pins),
+ .modemuxs = gpt0_tmr0_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt0_tmr0_modemux),
+};
+
+/* Pad multiplexing for gpt0_tmr1 device */
+static const unsigned gpt0_tmr1_pins[] = { 8, 9 };
+static struct spear_muxreg gpt0_tmr1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_GPT0_TMR1_MASK,
+ .val = PMX_GPT0_TMR1_MASK,
+ },
+};
+
+static struct spear_modemux gpt0_tmr1_modemux[] = {
+ {
+ .muxregs = gpt0_tmr1_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt0_tmr1_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt0_tmr1_pingroup = {
+ .name = "gpt0_tmr1_grp",
+ .pins = gpt0_tmr1_pins,
+ .npins = ARRAY_SIZE(gpt0_tmr1_pins),
+ .modemuxs = gpt0_tmr1_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt0_tmr1_modemux),
+};
+
+static const char *const gpt0_grps[] = { "gpt0_tmr0_grp", "gpt0_tmr1_grp" };
+static struct spear_function gpt0_function = {
+ .name = "gpt0",
+ .groups = gpt0_grps,
+ .ngroups = ARRAY_SIZE(gpt0_grps),
+};
+
+/* Pad multiplexing for gpt1_tmr0 device */
+static const unsigned gpt1_tmr0_pins[] = { 6, 7 };
+static struct spear_muxreg gpt1_tmr0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_GPT1_TMR0_MASK,
+ .val = PMX_GPT1_TMR0_MASK,
+ },
+};
+
+static struct spear_modemux gpt1_tmr0_modemux[] = {
+ {
+ .muxregs = gpt1_tmr0_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt1_tmr0_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt1_tmr0_pingroup = {
+ .name = "gpt1_tmr0_grp",
+ .pins = gpt1_tmr0_pins,
+ .npins = ARRAY_SIZE(gpt1_tmr0_pins),
+ .modemuxs = gpt1_tmr0_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt1_tmr0_modemux),
+};
+
+/* Pad multiplexing for gpt1_tmr1 device */
+static const unsigned gpt1_tmr1_pins[] = { 4, 5 };
+static struct spear_muxreg gpt1_tmr1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_GPT1_TMR1_MASK,
+ .val = PMX_GPT1_TMR1_MASK,
+ },
+};
+
+static struct spear_modemux gpt1_tmr1_modemux[] = {
+ {
+ .muxregs = gpt1_tmr1_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt1_tmr1_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt1_tmr1_pingroup = {
+ .name = "gpt1_tmr1_grp",
+ .pins = gpt1_tmr1_pins,
+ .npins = ARRAY_SIZE(gpt1_tmr1_pins),
+ .modemuxs = gpt1_tmr1_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt1_tmr1_modemux),
+};
+
+static const char *const gpt1_grps[] = { "gpt1_tmr1_grp", "gpt1_tmr0_grp" };
+static struct spear_function gpt1_function = {
+ .name = "gpt1",
+ .groups = gpt1_grps,
+ .ngroups = ARRAY_SIZE(gpt1_grps),
+};
+
+/* Pad multiplexing for mcif device */
+static const unsigned mcif_pins[] = { 86, 87, 88, 89, 90, 91, 92, 93, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
+ 243, 244, 245 };
+#define MCIF_MUXREG \
+ { \
+ .reg = PAD_FUNCTION_EN_0, \
+ .mask = PMX_MCI_DATA8_15_MASK, \
+ .val = PMX_MCI_DATA8_15_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_1, \
+ .mask = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \
+ PMX_NFWPRT2_MASK, \
+ .val = PMX_MCIFALL_1_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_2, \
+ .mask = PMX_MCIFALL_2_MASK, \
+ .val = PMX_MCIFALL_2_MASK, \
+ }
+
+/* sdhci device */
+static struct spear_muxreg sdhci_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_SD,
+ },
+};
+
+static struct spear_modemux sdhci_modemux[] = {
+ {
+ .muxregs = sdhci_muxreg,
+ .nmuxregs = ARRAY_SIZE(sdhci_muxreg),
+ },
+};
+
+static struct spear_pingroup sdhci_pingroup = {
+ .name = "sdhci_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = sdhci_modemux,
+ .nmodemuxs = ARRAY_SIZE(sdhci_modemux),
+};
+
+static const char *const sdhci_grps[] = { "sdhci_grp" };
+static struct spear_function sdhci_function = {
+ .name = "sdhci",
+ .groups = sdhci_grps,
+ .ngroups = ARRAY_SIZE(sdhci_grps),
+};
+
+/* cf device */
+static struct spear_muxreg cf_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_CF,
+ },
+};
+
+static struct spear_modemux cf_modemux[] = {
+ {
+ .muxregs = cf_muxreg,
+ .nmuxregs = ARRAY_SIZE(cf_muxreg),
+ },
+};
+
+static struct spear_pingroup cf_pingroup = {
+ .name = "cf_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = cf_modemux,
+ .nmodemuxs = ARRAY_SIZE(cf_modemux),
+};
+
+static const char *const cf_grps[] = { "cf_grp" };
+static struct spear_function cf_function = {
+ .name = "cf",
+ .groups = cf_grps,
+ .ngroups = ARRAY_SIZE(cf_grps),
+};
+
+/* xd device */
+static struct spear_muxreg xd_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_XD,
+ },
+};
+
+static struct spear_modemux xd_modemux[] = {
+ {
+ .muxregs = xd_muxreg,
+ .nmuxregs = ARRAY_SIZE(xd_muxreg),
+ },
+};
+
+static struct spear_pingroup xd_pingroup = {
+ .name = "xd_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = xd_modemux,
+ .nmodemuxs = ARRAY_SIZE(xd_modemux),
+};
+
+static const char *const xd_grps[] = { "xd_grp" };
+static struct spear_function xd_function = {
+ .name = "xd",
+ .groups = xd_grps,
+ .ngroups = ARRAY_SIZE(xd_grps),
+};
+
+/* Pad multiplexing for touch_xy device */
+static const unsigned touch_xy_pins[] = { 97 };
+static struct spear_muxreg touch_xy_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_TOUCH_XY_MASK,
+ .val = PMX_TOUCH_XY_MASK,
+ },
+};
+
+static struct spear_modemux touch_xy_modemux[] = {
+ {
+ .muxregs = touch_xy_muxreg,
+ .nmuxregs = ARRAY_SIZE(touch_xy_muxreg),
+ },
+};
+
+static struct spear_pingroup touch_xy_pingroup = {
+ .name = "touch_xy_grp",
+ .pins = touch_xy_pins,
+ .npins = ARRAY_SIZE(touch_xy_pins),
+ .modemuxs = touch_xy_modemux,
+ .nmodemuxs = ARRAY_SIZE(touch_xy_modemux),
+};
+
+static const char *const touch_xy_grps[] = { "touch_xy_grp" };
+static struct spear_function touch_xy_function = {
+ .name = "touchscreen",
+ .groups = touch_xy_grps,
+ .ngroups = ARRAY_SIZE(touch_xy_grps),
+};
+
+/* Pad multiplexing for uart1 device */
+/* Muxed with I2C */
+static const unsigned uart1_dis_i2c_pins[] = { 102, 103 };
+static struct spear_muxreg uart1_dis_i2c_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2C0_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart1_dis_i2c_modemux[] = {
+ {
+ .muxregs = uart1_dis_i2c_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart1_dis_i2c_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_1_dis_i2c_pingroup = {
+ .name = "uart1_disable_i2c_grp",
+ .pins = uart1_dis_i2c_pins,
+ .npins = ARRAY_SIZE(uart1_dis_i2c_pins),
+ .modemuxs = uart1_dis_i2c_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart1_dis_i2c_modemux),
+};
+
+/* Muxed with SD/MMC */
+static const unsigned uart1_dis_sd_pins[] = { 214, 215 };
+static struct spear_muxreg uart1_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_MCIDATA1_MASK |
+ PMX_MCIDATA2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart1_dis_sd_modemux[] = {
+ {
+ .muxregs = uart1_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart1_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_1_dis_sd_pingroup = {
+ .name = "uart1_disable_sd_grp",
+ .pins = uart1_dis_sd_pins,
+ .npins = ARRAY_SIZE(uart1_dis_sd_pins),
+ .modemuxs = uart1_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart1_dis_sd_modemux),
+};
+
+static const char *const uart1_grps[] = { "uart1_disable_i2c_grp",
+ "uart1_disable_sd_grp" };
+static struct spear_function uart1_function = {
+ .name = "uart1",
+ .groups = uart1_grps,
+ .ngroups = ARRAY_SIZE(uart1_grps),
+};
+
+/* Pad multiplexing for uart2_3 device */
+static const unsigned uart2_3_pins[] = { 104, 105, 106, 107 };
+static struct spear_muxreg uart2_3_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2S0_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart2_3_modemux[] = {
+ {
+ .muxregs = uart2_3_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart2_3_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_2_3_pingroup = {
+ .name = "uart2_3_grp",
+ .pins = uart2_3_pins,
+ .npins = ARRAY_SIZE(uart2_3_pins),
+ .modemuxs = uart2_3_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart2_3_modemux),
+};
+
+static const char *const uart2_3_grps[] = { "uart2_3_grp" };
+static struct spear_function uart2_3_function = {
+ .name = "uart2_3",
+ .groups = uart2_3_grps,
+ .ngroups = ARRAY_SIZE(uart2_3_grps),
+};
+
+/* Pad multiplexing for uart4 device */
+static const unsigned uart4_pins[] = { 108, 113 };
+static struct spear_muxreg uart4_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart4_modemux[] = {
+ {
+ .muxregs = uart4_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart4_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_4_pingroup = {
+ .name = "uart4_grp",
+ .pins = uart4_pins,
+ .npins = ARRAY_SIZE(uart4_pins),
+ .modemuxs = uart4_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart4_modemux),
+};
+
+static const char *const uart4_grps[] = { "uart4_grp" };
+static struct spear_function uart4_function = {
+ .name = "uart4",
+ .groups = uart4_grps,
+ .ngroups = ARRAY_SIZE(uart4_grps),
+};
+
+/* Pad multiplexing for uart5 device */
+static const unsigned uart5_pins[] = { 114, 115 };
+static struct spear_muxreg uart5_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux uart5_modemux[] = {
+ {
+ .muxregs = uart5_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart5_muxreg),
+ },
+};
+
+static struct spear_pingroup uart_5_pingroup = {
+ .name = "uart5_grp",
+ .pins = uart5_pins,
+ .npins = ARRAY_SIZE(uart5_pins),
+ .modemuxs = uart5_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart5_modemux),
+};
+
+static const char *const uart5_grps[] = { "uart5_grp" };
+static struct spear_function uart5_function = {
+ .name = "uart5",
+ .groups = uart5_grps,
+ .ngroups = ARRAY_SIZE(uart5_grps),
+};
+
+/* Pad multiplexing for rs485_0_1_tdm_0_1 device */
+static const unsigned rs485_0_1_tdm_0_1_pins[] = { 116, 117, 118, 119, 120, 121,
+ 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137 };
+static struct spear_muxreg rs485_0_1_tdm_0_1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux rs485_0_1_tdm_0_1_modemux[] = {
+ {
+ .muxregs = rs485_0_1_tdm_0_1_muxreg,
+ .nmuxregs = ARRAY_SIZE(rs485_0_1_tdm_0_1_muxreg),
+ },
+};
+
+static struct spear_pingroup rs485_0_1_tdm_0_1_pingroup = {
+ .name = "rs485_0_1_tdm_0_1_grp",
+ .pins = rs485_0_1_tdm_0_1_pins,
+ .npins = ARRAY_SIZE(rs485_0_1_tdm_0_1_pins),
+ .modemuxs = rs485_0_1_tdm_0_1_modemux,
+ .nmodemuxs = ARRAY_SIZE(rs485_0_1_tdm_0_1_modemux),
+};
+
+static const char *const rs485_0_1_tdm_0_1_grps[] = { "rs485_0_1_tdm_0_1_grp" };
+static struct spear_function rs485_0_1_tdm_0_1_function = {
+ .name = "rs485_0_1_tdm_0_1",
+ .groups = rs485_0_1_tdm_0_1_grps,
+ .ngroups = ARRAY_SIZE(rs485_0_1_tdm_0_1_grps),
+};
+
+/* Pad multiplexing for i2c_1_2 device */
+static const unsigned i2c_1_2_pins[] = { 138, 139, 140, 141 };
+static struct spear_muxreg i2c_1_2_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c_1_2_modemux[] = {
+ {
+ .muxregs = i2c_1_2_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c_1_2_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c_1_2_pingroup = {
+ .name = "i2c_1_2_grp",
+ .pins = i2c_1_2_pins,
+ .npins = ARRAY_SIZE(i2c_1_2_pins),
+ .modemuxs = i2c_1_2_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c_1_2_modemux),
+};
+
+static const char *const i2c_1_2_grps[] = { "i2c_1_2_grp" };
+static struct spear_function i2c_1_2_function = {
+ .name = "i2c_1_2",
+ .groups = i2c_1_2_grps,
+ .ngroups = ARRAY_SIZE(i2c_1_2_grps),
+};
+
+/* Pad multiplexing for i2c3_dis_smi_clcd device */
+/* Muxed with SMI & CLCD */
+static const unsigned i2c3_dis_smi_clcd_pins[] = { 142, 153 };
+static struct spear_muxreg i2c3_dis_smi_clcd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_CLCD1_MASK | PMX_SMI_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c3_dis_smi_clcd_modemux[] = {
+ {
+ .muxregs = i2c3_dis_smi_clcd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c3_dis_smi_clcd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c3_dis_smi_clcd_pingroup = {
+ .name = "i2c3_dis_smi_clcd_grp",
+ .pins = i2c3_dis_smi_clcd_pins,
+ .npins = ARRAY_SIZE(i2c3_dis_smi_clcd_pins),
+ .modemuxs = i2c3_dis_smi_clcd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c3_dis_smi_clcd_modemux),
+};
+
+/* Pad multiplexing for i2c3_dis_sd_i2s0 device */
+/* Muxed with SD/MMC & I2S1 */
+static const unsigned i2c3_dis_sd_i2s0_pins[] = { 0, 216 };
+static struct spear_muxreg i2c3_dis_sd_i2s0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c3_dis_sd_i2s0_modemux[] = {
+ {
+ .muxregs = i2c3_dis_sd_i2s0_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c3_dis_sd_i2s0_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c3_dis_sd_i2s0_pingroup = {
+ .name = "i2c3_dis_sd_i2s0_grp",
+ .pins = i2c3_dis_sd_i2s0_pins,
+ .npins = ARRAY_SIZE(i2c3_dis_sd_i2s0_pins),
+ .modemuxs = i2c3_dis_sd_i2s0_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c3_dis_sd_i2s0_modemux),
+};
+
+static const char *const i2c3_grps[] = { "i2c3_dis_smi_clcd_grp",
+ "i2c3_dis_sd_i2s0_grp" };
+static struct spear_function i2c3_unction = {
+ .name = "i2c3_i2s1",
+ .groups = i2c3_grps,
+ .ngroups = ARRAY_SIZE(i2c3_grps),
+};
+
+/* Pad multiplexing for i2c_4_5_dis_smi device */
+/* Muxed with SMI */
+static const unsigned i2c_4_5_dis_smi_pins[] = { 154, 155, 156, 157 };
+static struct spear_muxreg i2c_4_5_dis_smi_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_SMI_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c_4_5_dis_smi_modemux[] = {
+ {
+ .muxregs = i2c_4_5_dis_smi_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c_4_5_dis_smi_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c_4_5_dis_smi_pingroup = {
+ .name = "i2c_4_5_dis_smi_grp",
+ .pins = i2c_4_5_dis_smi_pins,
+ .npins = ARRAY_SIZE(i2c_4_5_dis_smi_pins),
+ .modemuxs = i2c_4_5_dis_smi_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c_4_5_dis_smi_modemux),
+};
+
+/* Pad multiplexing for i2c4_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c4_dis_sd_pins[] = { 217, 218 };
+static struct spear_muxreg i2c4_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_MCIDATA4_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIDATA5_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c4_dis_sd_modemux[] = {
+ {
+ .muxregs = i2c4_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c4_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c4_dis_sd_pingroup = {
+ .name = "i2c4_dis_sd_grp",
+ .pins = i2c4_dis_sd_pins,
+ .npins = ARRAY_SIZE(i2c4_dis_sd_pins),
+ .modemuxs = i2c4_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c4_dis_sd_modemux),
+};
+
+/* Pad multiplexing for i2c5_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c5_dis_sd_pins[] = { 219, 220 };
+static struct spear_muxreg i2c5_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIDATA6_MASK |
+ PMX_MCIDATA7_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c5_dis_sd_modemux[] = {
+ {
+ .muxregs = i2c5_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c5_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c5_dis_sd_pingroup = {
+ .name = "i2c5_dis_sd_grp",
+ .pins = i2c5_dis_sd_pins,
+ .npins = ARRAY_SIZE(i2c5_dis_sd_pins),
+ .modemuxs = i2c5_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c5_dis_sd_modemux),
+};
+
+static const char *const i2c_4_5_grps[] = { "i2c5_dis_sd_grp",
+ "i2c4_dis_sd_grp", "i2c_4_5_dis_smi_grp" };
+static struct spear_function i2c_4_5_function = {
+ .name = "i2c_4_5",
+ .groups = i2c_4_5_grps,
+ .ngroups = ARRAY_SIZE(i2c_4_5_grps),
+};
+
+/* Pad multiplexing for i2c_6_7_dis_kbd device */
+/* Muxed with KBD */
+static const unsigned i2c_6_7_dis_kbd_pins[] = { 207, 208, 209, 210 };
+static struct spear_muxreg i2c_6_7_dis_kbd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KBD_ROWCOL25_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c_6_7_dis_kbd_modemux[] = {
+ {
+ .muxregs = i2c_6_7_dis_kbd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c_6_7_dis_kbd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c_6_7_dis_kbd_pingroup = {
+ .name = "i2c_6_7_dis_kbd_grp",
+ .pins = i2c_6_7_dis_kbd_pins,
+ .npins = ARRAY_SIZE(i2c_6_7_dis_kbd_pins),
+ .modemuxs = i2c_6_7_dis_kbd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c_6_7_dis_kbd_modemux),
+};
+
+/* Pad multiplexing for i2c6_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned i2c6_dis_sd_pins[] = { 236, 237 };
+static struct spear_muxreg i2c6_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIIORDRE_MASK |
+ PMX_MCIIOWRWE_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c6_dis_sd_modemux[] = {
+ {
+ .muxregs = i2c6_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c6_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c6_dis_sd_pingroup = {
+ .name = "i2c6_dis_sd_grp",
+ .pins = i2c6_dis_sd_pins,
+ .npins = ARRAY_SIZE(i2c6_dis_sd_pins),
+ .modemuxs = i2c6_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c6_dis_sd_modemux),
+};
+
+/* Pad multiplexing for i2c7_dis_sd device */
+static const unsigned i2c7_dis_sd_pins[] = { 238, 239 };
+static struct spear_muxreg i2c7_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIRESETCF_MASK |
+ PMX_MCICS0CE_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux i2c7_dis_sd_modemux[] = {
+ {
+ .muxregs = i2c7_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c7_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c7_dis_sd_pingroup = {
+ .name = "i2c7_dis_sd_grp",
+ .pins = i2c7_dis_sd_pins,
+ .npins = ARRAY_SIZE(i2c7_dis_sd_pins),
+ .modemuxs = i2c7_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c7_dis_sd_modemux),
+};
+
+static const char *const i2c_6_7_grps[] = { "i2c6_dis_sd_grp",
+ "i2c7_dis_sd_grp", "i2c_6_7_dis_kbd_grp" };
+static struct spear_function i2c_6_7_function = {
+ .name = "i2c_6_7",
+ .groups = i2c_6_7_grps,
+ .ngroups = ARRAY_SIZE(i2c_6_7_grps),
+};
+
+/* Pad multiplexing for can0_dis_nor device */
+/* Muxed with NOR */
+static const unsigned can0_dis_nor_pins[] = { 56, 57 };
+static struct spear_muxreg can0_dis_nor_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_0,
+ .mask = PMX_NFRSTPWDWN2_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_NFRSTPWDWN3_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux can0_dis_nor_modemux[] = {
+ {
+ .muxregs = can0_dis_nor_muxreg,
+ .nmuxregs = ARRAY_SIZE(can0_dis_nor_muxreg),
+ },
+};
+
+static struct spear_pingroup can0_dis_nor_pingroup = {
+ .name = "can0_dis_nor_grp",
+ .pins = can0_dis_nor_pins,
+ .npins = ARRAY_SIZE(can0_dis_nor_pins),
+ .modemuxs = can0_dis_nor_modemux,
+ .nmodemuxs = ARRAY_SIZE(can0_dis_nor_modemux),
+};
+
+/* Pad multiplexing for can0_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned can0_dis_sd_pins[] = { 240, 241 };
+static struct spear_muxreg can0_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux can0_dis_sd_modemux[] = {
+ {
+ .muxregs = can0_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(can0_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup can0_dis_sd_pingroup = {
+ .name = "can0_dis_sd_grp",
+ .pins = can0_dis_sd_pins,
+ .npins = ARRAY_SIZE(can0_dis_sd_pins),
+ .modemuxs = can0_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(can0_dis_sd_modemux),
+};
+
+static const char *const can0_grps[] = { "can0_dis_nor_grp", "can0_dis_sd_grp"
+};
+static struct spear_function can0_function = {
+ .name = "can0",
+ .groups = can0_grps,
+ .ngroups = ARRAY_SIZE(can0_grps),
+};
+
+/* Pad multiplexing for can1_dis_sd device */
+/* Muxed with SD/MMC */
+static const unsigned can1_dis_sd_pins[] = { 242, 243 };
+static struct spear_muxreg can1_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux can1_dis_sd_modemux[] = {
+ {
+ .muxregs = can1_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(can1_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup can1_dis_sd_pingroup = {
+ .name = "can1_dis_sd_grp",
+ .pins = can1_dis_sd_pins,
+ .npins = ARRAY_SIZE(can1_dis_sd_pins),
+ .modemuxs = can1_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(can1_dis_sd_modemux),
+};
+
+/* Pad multiplexing for can1_dis_kbd device */
+/* Muxed with KBD */
+static const unsigned can1_dis_kbd_pins[] = { 201, 202 };
+static struct spear_muxreg can1_dis_kbd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KBD_ROWCOL25_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux can1_dis_kbd_modemux[] = {
+ {
+ .muxregs = can1_dis_kbd_muxreg,
+ .nmuxregs = ARRAY_SIZE(can1_dis_kbd_muxreg),
+ },
+};
+
+static struct spear_pingroup can1_dis_kbd_pingroup = {
+ .name = "can1_dis_kbd_grp",
+ .pins = can1_dis_kbd_pins,
+ .npins = ARRAY_SIZE(can1_dis_kbd_pins),
+ .modemuxs = can1_dis_kbd_modemux,
+ .nmodemuxs = ARRAY_SIZE(can1_dis_kbd_modemux),
+};
+
+static const char *const can1_grps[] = { "can1_dis_sd_grp", "can1_dis_kbd_grp"
+};
+static struct spear_function can1_function = {
+ .name = "can1",
+ .groups = can1_grps,
+ .ngroups = ARRAY_SIZE(can1_grps),
+};
+
+/* Pad multiplexing for pci device */
+static const unsigned pci_sata_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 };
+#define PCI_SATA_MUXREG \
+ { \
+ .reg = PAD_FUNCTION_EN_0, \
+ .mask = PMX_MCI_DATA8_15_MASK, \
+ .val = 0, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_1, \
+ .mask = PMX_PCI_REG1_MASK, \
+ .val = 0, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_2, \
+ .mask = PMX_PCI_REG2_MASK, \
+ .val = 0, \
+ }
+
+/* pad multiplexing for pcie0 device */
+static struct spear_muxreg pcie0_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = PCIE_CFG_VAL(0),
+ .val = PCIE_CFG_VAL(0),
+ },
+};
+
+static struct spear_modemux pcie0_modemux[] = {
+ {
+ .muxregs = pcie0_muxreg,
+ .nmuxregs = ARRAY_SIZE(pcie0_muxreg),
+ },
+};
+
+static struct spear_pingroup pcie0_pingroup = {
+ .name = "pcie0_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = pcie0_modemux,
+ .nmodemuxs = ARRAY_SIZE(pcie0_modemux),
+};
+
+/* pad multiplexing for pcie1 device */
+static struct spear_muxreg pcie1_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = PCIE_CFG_VAL(1),
+ .val = PCIE_CFG_VAL(1),
+ },
+};
+
+static struct spear_modemux pcie1_modemux[] = {
+ {
+ .muxregs = pcie1_muxreg,
+ .nmuxregs = ARRAY_SIZE(pcie1_muxreg),
+ },
+};
+
+static struct spear_pingroup pcie1_pingroup = {
+ .name = "pcie1_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = pcie1_modemux,
+ .nmodemuxs = ARRAY_SIZE(pcie1_modemux),
+};
+
+/* pad multiplexing for pcie2 device */
+static struct spear_muxreg pcie2_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = PCIE_CFG_VAL(2),
+ .val = PCIE_CFG_VAL(2),
+ },
+};
+
+static struct spear_modemux pcie2_modemux[] = {
+ {
+ .muxregs = pcie2_muxreg,
+ .nmuxregs = ARRAY_SIZE(pcie2_muxreg),
+ },
+};
+
+static struct spear_pingroup pcie2_pingroup = {
+ .name = "pcie2_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = pcie2_modemux,
+ .nmodemuxs = ARRAY_SIZE(pcie2_modemux),
+};
+
+static const char *const pci_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp" };
+static struct spear_function pci_function = {
+ .name = "pci",
+ .groups = pci_grps,
+ .ngroups = ARRAY_SIZE(pci_grps),
+};
+
+/* pad multiplexing for sata0 device */
+static struct spear_muxreg sata0_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_CFG_VAL(0),
+ .val = SATA_CFG_VAL(0),
+ },
+};
+
+static struct spear_modemux sata0_modemux[] = {
+ {
+ .muxregs = sata0_muxreg,
+ .nmuxregs = ARRAY_SIZE(sata0_muxreg),
+ },
+};
+
+static struct spear_pingroup sata0_pingroup = {
+ .name = "sata0_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = sata0_modemux,
+ .nmodemuxs = ARRAY_SIZE(sata0_modemux),
+};
+
+/* pad multiplexing for sata1 device */
+static struct spear_muxreg sata1_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_CFG_VAL(1),
+ .val = SATA_CFG_VAL(1),
+ },
+};
+
+static struct spear_modemux sata1_modemux[] = {
+ {
+ .muxregs = sata1_muxreg,
+ .nmuxregs = ARRAY_SIZE(sata1_muxreg),
+ },
+};
+
+static struct spear_pingroup sata1_pingroup = {
+ .name = "sata1_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = sata1_modemux,
+ .nmodemuxs = ARRAY_SIZE(sata1_modemux),
+};
+
+/* pad multiplexing for sata2 device */
+static struct spear_muxreg sata2_muxreg[] = {
+ PCI_SATA_MUXREG,
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_CFG_VAL(2),
+ .val = SATA_CFG_VAL(2),
+ },
+};
+
+static struct spear_modemux sata2_modemux[] = {
+ {
+ .muxregs = sata2_muxreg,
+ .nmuxregs = ARRAY_SIZE(sata2_muxreg),
+ },
+};
+
+static struct spear_pingroup sata2_pingroup = {
+ .name = "sata2_grp",
+ .pins = pci_sata_pins,
+ .npins = ARRAY_SIZE(pci_sata_pins),
+ .modemuxs = sata2_modemux,
+ .nmodemuxs = ARRAY_SIZE(sata2_modemux),
+};
+
+static const char *const sata_grps[] = { "sata0_grp", "sata1_grp", "sata2_grp"
+};
+static struct spear_function sata_function = {
+ .name = "sata",
+ .groups = sata_grps,
+ .ngroups = ARRAY_SIZE(sata_grps),
+};
+
+/* Pad multiplexing for ssp1_dis_kbd device */
+static const unsigned ssp1_dis_kbd_pins[] = { 203, 204, 205, 206 };
+static struct spear_muxreg ssp1_dis_kbd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK |
+ PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
+ PMX_NFCE2_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux ssp1_dis_kbd_modemux[] = {
+ {
+ .muxregs = ssp1_dis_kbd_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp1_dis_kbd_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp1_dis_kbd_pingroup = {
+ .name = "ssp1_dis_kbd_grp",
+ .pins = ssp1_dis_kbd_pins,
+ .npins = ARRAY_SIZE(ssp1_dis_kbd_pins),
+ .modemuxs = ssp1_dis_kbd_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp1_dis_kbd_modemux),
+};
+
+/* Pad multiplexing for ssp1_dis_sd device */
+static const unsigned ssp1_dis_sd_pins[] = { 224, 226, 227, 228 };
+static struct spear_muxreg ssp1_dis_sd_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
+ PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux ssp1_dis_sd_modemux[] = {
+ {
+ .muxregs = ssp1_dis_sd_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp1_dis_sd_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp1_dis_sd_pingroup = {
+ .name = "ssp1_dis_sd_grp",
+ .pins = ssp1_dis_sd_pins,
+ .npins = ARRAY_SIZE(ssp1_dis_sd_pins),
+ .modemuxs = ssp1_dis_sd_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp1_dis_sd_modemux),
+};
+
+static const char *const ssp1_grps[] = { "ssp1_dis_kbd_grp",
+ "ssp1_dis_sd_grp" };
+static struct spear_function ssp1_function = {
+ .name = "ssp1",
+ .groups = ssp1_grps,
+ .ngroups = ARRAY_SIZE(ssp1_grps),
+};
+
+/* Pad multiplexing for gpt64 device */
+static const unsigned gpt64_pins[] = { 230, 231, 232, 245 };
+static struct spear_muxreg gpt64_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
+ | PMX_MCILEDS_MASK,
+ .val = 0,
+ },
+};
+
+static struct spear_modemux gpt64_modemux[] = {
+ {
+ .muxregs = gpt64_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt64_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt64_pingroup = {
+ .name = "gpt64_grp",
+ .pins = gpt64_pins,
+ .npins = ARRAY_SIZE(gpt64_pins),
+ .modemuxs = gpt64_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt64_modemux),
+};
+
+static const char *const gpt64_grps[] = { "gpt64_grp" };
+static struct spear_function gpt64_function = {
+ .name = "gpt64",
+ .groups = gpt64_grps,
+ .ngroups = ARRAY_SIZE(gpt64_grps),
+};
+
+/* pingroups */
+static struct spear_pingroup *spear1310_pingroups[] = {
+ &i2c0_pingroup,
+ &ssp0_pingroup,
+ &i2s0_pingroup,
+ &i2s1_pingroup,
+ &clcd_pingroup,
+ &clcd_high_res_pingroup,
+ &arm_gpio_pingroup,
+ &smi_2_chips_pingroup,
+ &smi_4_chips_pingroup,
+ &gmii_pingroup,
+ &rgmii_pingroup,
+ &smii_0_1_2_pingroup,
+ &ras_mii_txclk_pingroup,
+ &nand_8bit_pingroup,
+ &nand_16bit_pingroup,
+ &nand_4_chips_pingroup,
+ &keyboard_6x6_pingroup,
+ &keyboard_rowcol6_8_pingroup,
+ &uart0_pingroup,
+ &uart0_modem_pingroup,
+ &gpt0_tmr0_pingroup,
+ &gpt0_tmr1_pingroup,
+ &gpt1_tmr0_pingroup,
+ &gpt1_tmr1_pingroup,
+ &sdhci_pingroup,
+ &cf_pingroup,
+ &xd_pingroup,
+ &touch_xy_pingroup,
+ &ssp0_cs0_pingroup,
+ &ssp0_cs1_2_pingroup,
+ &uart_1_dis_i2c_pingroup,
+ &uart_1_dis_sd_pingroup,
+ &uart_2_3_pingroup,
+ &uart_4_pingroup,
+ &uart_5_pingroup,
+ &rs485_0_1_tdm_0_1_pingroup,
+ &i2c_1_2_pingroup,
+ &i2c3_dis_smi_clcd_pingroup,
+ &i2c3_dis_sd_i2s0_pingroup,
+ &i2c_4_5_dis_smi_pingroup,
+ &i2c4_dis_sd_pingroup,
+ &i2c5_dis_sd_pingroup,
+ &i2c_6_7_dis_kbd_pingroup,
+ &i2c6_dis_sd_pingroup,
+ &i2c7_dis_sd_pingroup,
+ &can0_dis_nor_pingroup,
+ &can0_dis_sd_pingroup,
+ &can1_dis_sd_pingroup,
+ &can1_dis_kbd_pingroup,
+ &pcie0_pingroup,
+ &pcie1_pingroup,
+ &pcie2_pingroup,
+ &sata0_pingroup,
+ &sata1_pingroup,
+ &sata2_pingroup,
+ &ssp1_dis_kbd_pingroup,
+ &ssp1_dis_sd_pingroup,
+ &gpt64_pingroup,
+};
+
+/* functions */
+static struct spear_function *spear1310_functions[] = {
+ &i2c0_function,
+ &ssp0_function,
+ &i2s0_function,
+ &i2s1_function,
+ &clcd_function,
+ &arm_gpio_function,
+ &smi_function,
+ &gmii_function,
+ &rgmii_function,
+ &smii_0_1_2_function,
+ &ras_mii_txclk_function,
+ &nand_function,
+ &keyboard_function,
+ &uart0_function,
+ &gpt0_function,
+ &gpt1_function,
+ &sdhci_function,
+ &cf_function,
+ &xd_function,
+ &touch_xy_function,
+ &uart1_function,
+ &uart2_3_function,
+ &uart4_function,
+ &uart5_function,
+ &rs485_0_1_tdm_0_1_function,
+ &i2c_1_2_function,
+ &i2c3_unction,
+ &i2c_4_5_function,
+ &i2c_6_7_function,
+ &can0_function,
+ &can1_function,
+ &pci_function,
+ &sata_function,
+ &ssp1_function,
+ &gpt64_function,
+};
+
+static struct spear_pinctrl_machdata spear1310_machdata = {
+ .pins = spear1310_pins,
+ .npins = ARRAY_SIZE(spear1310_pins),
+ .groups = spear1310_pingroups,
+ .ngroups = ARRAY_SIZE(spear1310_pingroups),
+ .functions = spear1310_functions,
+ .nfunctions = ARRAY_SIZE(spear1310_functions),
+ .modes_supported = false,
+};
+
+static struct of_device_id spear1310_pinctrl_of_match[] __devinitdata = {
+ {
+ .compatible = "st,spear1310-pinmux",
+ },
+ {},
+};
+
+static int __devinit spear1310_pinctrl_probe(struct platform_device *pdev)
+{
+ return spear_pinctrl_probe(pdev, &spear1310_machdata);
+}
+
+static int __devexit spear1310_pinctrl_remove(struct platform_device *pdev)
+{
+ return spear_pinctrl_remove(pdev);
+}
+
+static struct platform_driver spear1310_pinctrl_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = spear1310_pinctrl_of_match,
+ },
+ .probe = spear1310_pinctrl_probe,
+ .remove = __devexit_p(spear1310_pinctrl_remove),
+};
+
+static int __init spear1310_pinctrl_init(void)
+{
+ return platform_driver_register(&spear1310_pinctrl_driver);
+}
+arch_initcall(spear1310_pinctrl_init);
+
+static void __exit spear1310_pinctrl_exit(void)
+{
+ platform_driver_unregister(&spear1310_pinctrl_driver);
+}
+module_exit(spear1310_pinctrl_exit);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
new file mode 100644
index 00000000000..a8ab2a6f51b
--- /dev/null
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -0,0 +1,1989 @@
+/*
+ * Driver for the ST Microelectronics SPEAr1340 pinmux
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "pinctrl-spear.h"
+
+#define DRIVER_NAME "spear1340-pinmux"
+
+/* pins */
+static const struct pinctrl_pin_desc spear1340_pins[] = {
+ SPEAR_PIN_0_TO_101,
+ SPEAR_PIN_102_TO_245,
+ PINCTRL_PIN(246, "PLGPIO246"),
+ PINCTRL_PIN(247, "PLGPIO247"),
+ PINCTRL_PIN(248, "PLGPIO248"),
+ PINCTRL_PIN(249, "PLGPIO249"),
+ PINCTRL_PIN(250, "PLGPIO250"),
+ PINCTRL_PIN(251, "PLGPIO251"),
+};
+
+/* In SPEAr1340 there are two levels of pad muxing */
+/* - pads as gpio OR peripherals */
+#define PAD_FUNCTION_EN_1 0x668
+#define PAD_FUNCTION_EN_2 0x66C
+#define PAD_FUNCTION_EN_3 0x670
+#define PAD_FUNCTION_EN_4 0x674
+#define PAD_FUNCTION_EN_5 0x690
+#define PAD_FUNCTION_EN_6 0x694
+#define PAD_FUNCTION_EN_7 0x698
+#define PAD_FUNCTION_EN_8 0x69C
+
+/* - If peripherals, then primary OR alternate peripheral */
+#define PAD_SHARED_IP_EN_1 0x6A0
+#define PAD_SHARED_IP_EN_2 0x6A4
+
+/*
+ * Macro's for first level of pmx - pads as gpio OR peripherals. There are 8
+ * registers with 32 bits each for handling gpio pads, register 8 has only 26
+ * relevant bits.
+ */
+/* macro's for making pads as gpio's */
+#define PADS_AS_GPIO_REG0_MASK 0xFFFFFFFE
+#define PADS_AS_GPIO_REGS_MASK 0xFFFFFFFF
+#define PADS_AS_GPIO_REG7_MASK 0x07FFFFFF
+
+/* macro's for making pads as peripherals */
+#define FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK 0x00000FFE
+#define UART0_ENH_AND_GPT_REG0_MASK 0x0003F000
+#define PWM1_AND_KBD_COL5_REG0_MASK 0x00040000
+#define I2C1_REG0_MASK 0x01080000
+#define SPDIF_IN_REG0_MASK 0x00100000
+#define PWM2_AND_GPT0_TMR0_CPT_REG0_MASK 0x00400000
+#define PWM3_AND_GPT0_TMR1_CLK_REG0_MASK 0x00800000
+#define PWM0_AND_SSP0_CS1_REG0_MASK 0x02000000
+#define VIP_AND_CAM3_REG0_MASK 0xFC200000
+#define VIP_AND_CAM3_REG1_MASK 0x0000000F
+#define VIP_REG1_MASK 0x00001EF0
+#define VIP_AND_CAM2_REG1_MASK 0x007FE100
+#define VIP_AND_CAM1_REG1_MASK 0xFF800000
+#define VIP_AND_CAM1_REG2_MASK 0x00000003
+#define VIP_AND_CAM0_REG2_MASK 0x00001FFC
+#define SMI_REG2_MASK 0x0021E000
+#define SSP0_REG2_MASK 0x001E0000
+#define TS_AND_SSP0_CS2_REG2_MASK 0x00400000
+#define UART0_REG2_MASK 0x01800000
+#define UART1_REG2_MASK 0x06000000
+#define I2S_IN_REG2_MASK 0xF8000000
+#define DEVS_GRP_AND_MIPHY_DBG_REG3_MASK 0x000001FE
+#define I2S_OUT_REG3_MASK 0x000001EF
+#define I2S_IN_REG3_MASK 0x00000010
+#define GMAC_REG3_MASK 0xFFFFFE00
+#define GMAC_REG4_MASK 0x0000001F
+#define DEVS_GRP_AND_MIPHY_DBG_REG4_MASK 0x7FFFFF20
+#define SSP0_CS3_REG4_MASK 0x00000020
+#define I2C0_REG4_MASK 0x000000C0
+#define CEC0_REG4_MASK 0x00000100
+#define CEC1_REG4_MASK 0x00000200
+#define SPDIF_OUT_REG4_MASK 0x00000400
+#define CLCD_REG4_MASK 0x7FFFF800
+#define CLCD_AND_ARM_TRACE_REG4_MASK 0x80000000
+#define CLCD_AND_ARM_TRACE_REG5_MASK 0xFFFFFFFF
+#define CLCD_AND_ARM_TRACE_REG6_MASK 0x00000001
+#define FSMC_PNOR_AND_MCIF_REG6_MASK 0x073FFFFE
+#define MCIF_REG6_MASK 0xF8C00000
+#define MCIF_REG7_MASK 0x000043FF
+#define FSMC_8BIT_REG7_MASK 0x07FFBC00
+
+/* other registers */
+#define PERIP_CFG 0x42C
+ /* PERIP_CFG register masks */
+ #define SSP_CS_CTL_HW 0
+ #define SSP_CS_CTL_SW 1
+ #define SSP_CS_CTL_MASK 1
+ #define SSP_CS_CTL_SHIFT 21
+ #define SSP_CS_VAL_MASK 1
+ #define SSP_CS_VAL_SHIFT 20
+ #define SSP_CS_SEL_CS0 0
+ #define SSP_CS_SEL_CS1 1
+ #define SSP_CS_SEL_CS2 2
+ #define SSP_CS_SEL_MASK 3
+ #define SSP_CS_SEL_SHIFT 18
+
+ #define I2S_CHNL_2_0 (0)
+ #define I2S_CHNL_3_1 (1)
+ #define I2S_CHNL_5_1 (2)
+ #define I2S_CHNL_7_1 (3)
+ #define I2S_CHNL_PLAY_SHIFT (4)
+ #define I2S_CHNL_PLAY_MASK (3 << 4)
+ #define I2S_CHNL_REC_SHIFT (6)
+ #define I2S_CHNL_REC_MASK (3 << 6)
+
+ #define SPDIF_OUT_ENB_MASK (1 << 2)
+ #define SPDIF_OUT_ENB_SHIFT 2
+
+ #define MCIF_SEL_SD 1
+ #define MCIF_SEL_CF 2
+ #define MCIF_SEL_XD 3
+ #define MCIF_SEL_MASK 3
+ #define MCIF_SEL_SHIFT 0
+
+#define GMAC_CLK_CFG 0x248
+ #define GMAC_PHY_IF_GMII_VAL (0 << 3)
+ #define GMAC_PHY_IF_RGMII_VAL (1 << 3)
+ #define GMAC_PHY_IF_SGMII_VAL (2 << 3)
+ #define GMAC_PHY_IF_RMII_VAL (4 << 3)
+ #define GMAC_PHY_IF_SEL_MASK (7 << 3)
+ #define GMAC_PHY_INPUT_ENB_VAL 0
+ #define GMAC_PHY_SYNT_ENB_VAL 1
+ #define GMAC_PHY_CLK_MASK 1
+ #define GMAC_PHY_CLK_SHIFT 2
+ #define GMAC_PHY_125M_PAD_VAL 0
+ #define GMAC_PHY_PLL2_VAL 1
+ #define GMAC_PHY_OSC3_VAL 2
+ #define GMAC_PHY_INPUT_CLK_MASK 3
+ #define GMAC_PHY_INPUT_CLK_SHIFT 0
+
+#define PCIE_SATA_CFG 0x424
+ /* PCIE CFG MASks */
+ #define PCIE_CFG_DEVICE_PRESENT (1 << 11)
+ #define PCIE_CFG_POWERUP_RESET (1 << 10)
+ #define PCIE_CFG_CORE_CLK_EN (1 << 9)
+ #define PCIE_CFG_AUX_CLK_EN (1 << 8)
+ #define SATA_CFG_TX_CLK_EN (1 << 4)
+ #define SATA_CFG_RX_CLK_EN (1 << 3)
+ #define SATA_CFG_POWERUP_RESET (1 << 2)
+ #define SATA_CFG_PM_CLK_EN (1 << 1)
+ #define PCIE_SATA_SEL_PCIE (0)
+ #define PCIE_SATA_SEL_SATA (1)
+ #define SATA_PCIE_CFG_MASK 0xF1F
+ #define PCIE_CFG_VAL (PCIE_SATA_SEL_PCIE | PCIE_CFG_AUX_CLK_EN | \
+ PCIE_CFG_CORE_CLK_EN | PCIE_CFG_POWERUP_RESET |\
+ PCIE_CFG_DEVICE_PRESENT)
+ #define SATA_CFG_VAL (PCIE_SATA_SEL_SATA | SATA_CFG_PM_CLK_EN | \
+ SATA_CFG_POWERUP_RESET | SATA_CFG_RX_CLK_EN | \
+ SATA_CFG_TX_CLK_EN)
+
+/* Macro's for second level of pmx - pads as primary OR alternate peripheral */
+/* Write 0 to enable FSMC_16_BIT */
+#define KBD_ROW_COL_MASK (1 << 0)
+
+/* Write 0 to enable UART0_ENH */
+#define GPT_MASK (1 << 1) /* Only clk & cpt */
+
+/* Write 0 to enable PWM1 */
+#define KBD_COL5_MASK (1 << 2)
+
+/* Write 0 to enable PWM2 */
+#define GPT0_TMR0_CPT_MASK (1 << 3) /* Only clk & cpt */
+
+/* Write 0 to enable PWM3 */
+#define GPT0_TMR1_CLK_MASK (1 << 4) /* Only clk & cpt */
+
+/* Write 0 to enable PWM0 */
+#define SSP0_CS1_MASK (1 << 5)
+
+/* Write 0 to enable VIP */
+#define CAM3_MASK (1 << 6)
+
+/* Write 0 to enable VIP */
+#define CAM2_MASK (1 << 7)
+
+/* Write 0 to enable VIP */
+#define CAM1_MASK (1 << 8)
+
+/* Write 0 to enable VIP */
+#define CAM0_MASK (1 << 9)
+
+/* Write 0 to enable TS */
+#define SSP0_CS2_MASK (1 << 10)
+
+/* Write 0 to enable FSMC PNOR */
+#define MCIF_MASK (1 << 11)
+
+/* Write 0 to enable CLCD */
+#define ARM_TRACE_MASK (1 << 12)
+
+/* Write 0 to enable I2S, SSP0_CS2, CEC0, 1, SPDIF out, CLCD */
+#define MIPHY_DBG_MASK (1 << 13)
+
+/*
+ * Pad multiplexing for making all pads as gpio's. This is done to override the
+ * values passed from bootloader and start from scratch.
+ */
+static const unsigned pads_as_gpio_pins[] = { 251 };
+static struct spear_muxreg pads_as_gpio_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PADS_AS_GPIO_REG0_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_4,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_6,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_7,
+ .mask = PADS_AS_GPIO_REGS_MASK,
+ .val = 0x0,
+ }, {
+ .reg = PAD_FUNCTION_EN_8,
+ .mask = PADS_AS_GPIO_REG7_MASK,
+ .val = 0x0,
+ },
+};
+
+static struct spear_modemux pads_as_gpio_modemux[] = {
+ {
+ .muxregs = pads_as_gpio_muxreg,
+ .nmuxregs = ARRAY_SIZE(pads_as_gpio_muxreg),
+ },
+};
+
+static struct spear_pingroup pads_as_gpio_pingroup = {
+ .name = "pads_as_gpio_grp",
+ .pins = pads_as_gpio_pins,
+ .npins = ARRAY_SIZE(pads_as_gpio_pins),
+ .modemuxs = pads_as_gpio_modemux,
+ .nmodemuxs = ARRAY_SIZE(pads_as_gpio_modemux),
+};
+
+static const char *const pads_as_gpio_grps[] = { "pads_as_gpio_grp" };
+static struct spear_function pads_as_gpio_function = {
+ .name = "pads_as_gpio",
+ .groups = pads_as_gpio_grps,
+ .ngroups = ARRAY_SIZE(pads_as_gpio_grps),
+};
+
+/* Pad multiplexing for fsmc_8bit device */
+static const unsigned fsmc_8bit_pins[] = { 233, 234, 235, 236, 238, 239, 240,
+ 241, 242, 243, 244, 245, 246, 247, 248, 249 };
+static struct spear_muxreg fsmc_8bit_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_8,
+ .mask = FSMC_8BIT_REG7_MASK,
+ .val = FSMC_8BIT_REG7_MASK,
+ }
+};
+
+static struct spear_modemux fsmc_8bit_modemux[] = {
+ {
+ .muxregs = fsmc_8bit_muxreg,
+ .nmuxregs = ARRAY_SIZE(fsmc_8bit_muxreg),
+ },
+};
+
+static struct spear_pingroup fsmc_8bit_pingroup = {
+ .name = "fsmc_8bit_grp",
+ .pins = fsmc_8bit_pins,
+ .npins = ARRAY_SIZE(fsmc_8bit_pins),
+ .modemuxs = fsmc_8bit_modemux,
+ .nmodemuxs = ARRAY_SIZE(fsmc_8bit_modemux),
+};
+
+/* Pad multiplexing for fsmc_16bit device */
+static const unsigned fsmc_16bit_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+static struct spear_muxreg fsmc_16bit_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = KBD_ROW_COL_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+ .val = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+ },
+};
+
+static struct spear_modemux fsmc_16bit_modemux[] = {
+ {
+ .muxregs = fsmc_16bit_muxreg,
+ .nmuxregs = ARRAY_SIZE(fsmc_16bit_muxreg),
+ },
+};
+
+static struct spear_pingroup fsmc_16bit_pingroup = {
+ .name = "fsmc_16bit_grp",
+ .pins = fsmc_16bit_pins,
+ .npins = ARRAY_SIZE(fsmc_16bit_pins),
+ .modemuxs = fsmc_16bit_modemux,
+ .nmodemuxs = ARRAY_SIZE(fsmc_16bit_modemux),
+};
+
+/* pad multiplexing for fsmc_pnor device */
+static const unsigned fsmc_pnor_pins[] = { 192, 193, 194, 195, 196, 197, 198,
+ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ 215, 216, 217 };
+static struct spear_muxreg fsmc_pnor_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = MCIF_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_7,
+ .mask = FSMC_PNOR_AND_MCIF_REG6_MASK,
+ .val = FSMC_PNOR_AND_MCIF_REG6_MASK,
+ },
+};
+
+static struct spear_modemux fsmc_pnor_modemux[] = {
+ {
+ .muxregs = fsmc_pnor_muxreg,
+ .nmuxregs = ARRAY_SIZE(fsmc_pnor_muxreg),
+ },
+};
+
+static struct spear_pingroup fsmc_pnor_pingroup = {
+ .name = "fsmc_pnor_grp",
+ .pins = fsmc_pnor_pins,
+ .npins = ARRAY_SIZE(fsmc_pnor_pins),
+ .modemuxs = fsmc_pnor_modemux,
+ .nmodemuxs = ARRAY_SIZE(fsmc_pnor_modemux),
+};
+
+static const char *const fsmc_grps[] = { "fsmc_8bit_grp", "fsmc_16bit_grp",
+ "fsmc_pnor_grp" };
+static struct spear_function fsmc_function = {
+ .name = "fsmc",
+ .groups = fsmc_grps,
+ .ngroups = ARRAY_SIZE(fsmc_grps),
+};
+
+/* pad multiplexing for keyboard rows-cols device */
+static const unsigned keyboard_row_col_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10 };
+static struct spear_muxreg keyboard_row_col_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = KBD_ROW_COL_MASK,
+ .val = KBD_ROW_COL_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+ .val = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
+ },
+};
+
+static struct spear_modemux keyboard_row_col_modemux[] = {
+ {
+ .muxregs = keyboard_row_col_muxreg,
+ .nmuxregs = ARRAY_SIZE(keyboard_row_col_muxreg),
+ },
+};
+
+static struct spear_pingroup keyboard_row_col_pingroup = {
+ .name = "keyboard_row_col_grp",
+ .pins = keyboard_row_col_pins,
+ .npins = ARRAY_SIZE(keyboard_row_col_pins),
+ .modemuxs = keyboard_row_col_modemux,
+ .nmodemuxs = ARRAY_SIZE(keyboard_row_col_modemux),
+};
+
+/* pad multiplexing for keyboard col5 device */
+static const unsigned keyboard_col5_pins[] = { 17 };
+static struct spear_muxreg keyboard_col5_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = KBD_COL5_MASK,
+ .val = KBD_COL5_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM1_AND_KBD_COL5_REG0_MASK,
+ .val = PWM1_AND_KBD_COL5_REG0_MASK,
+ },
+};
+
+static struct spear_modemux keyboard_col5_modemux[] = {
+ {
+ .muxregs = keyboard_col5_muxreg,
+ .nmuxregs = ARRAY_SIZE(keyboard_col5_muxreg),
+ },
+};
+
+static struct spear_pingroup keyboard_col5_pingroup = {
+ .name = "keyboard_col5_grp",
+ .pins = keyboard_col5_pins,
+ .npins = ARRAY_SIZE(keyboard_col5_pins),
+ .modemuxs = keyboard_col5_modemux,
+ .nmodemuxs = ARRAY_SIZE(keyboard_col5_modemux),
+};
+
+static const char *const keyboard_grps[] = { "keyboard_row_col_grp",
+ "keyboard_col5_grp" };
+static struct spear_function keyboard_function = {
+ .name = "keyboard",
+ .groups = keyboard_grps,
+ .ngroups = ARRAY_SIZE(keyboard_grps),
+};
+
+/* pad multiplexing for spdif_in device */
+static const unsigned spdif_in_pins[] = { 19 };
+static struct spear_muxreg spdif_in_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = SPDIF_IN_REG0_MASK,
+ .val = SPDIF_IN_REG0_MASK,
+ },
+};
+
+static struct spear_modemux spdif_in_modemux[] = {
+ {
+ .muxregs = spdif_in_muxreg,
+ .nmuxregs = ARRAY_SIZE(spdif_in_muxreg),
+ },
+};
+
+static struct spear_pingroup spdif_in_pingroup = {
+ .name = "spdif_in_grp",
+ .pins = spdif_in_pins,
+ .npins = ARRAY_SIZE(spdif_in_pins),
+ .modemuxs = spdif_in_modemux,
+ .nmodemuxs = ARRAY_SIZE(spdif_in_modemux),
+};
+
+static const char *const spdif_in_grps[] = { "spdif_in_grp" };
+static struct spear_function spdif_in_function = {
+ .name = "spdif_in",
+ .groups = spdif_in_grps,
+ .ngroups = ARRAY_SIZE(spdif_in_grps),
+};
+
+/* pad multiplexing for spdif_out device */
+static const unsigned spdif_out_pins[] = { 137 };
+static struct spear_muxreg spdif_out_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = SPDIF_OUT_REG4_MASK,
+ .val = SPDIF_OUT_REG4_MASK,
+ }, {
+ .reg = PERIP_CFG,
+ .mask = SPDIF_OUT_ENB_MASK,
+ .val = SPDIF_OUT_ENB_MASK,
+ }
+};
+
+static struct spear_modemux spdif_out_modemux[] = {
+ {
+ .muxregs = spdif_out_muxreg,
+ .nmuxregs = ARRAY_SIZE(spdif_out_muxreg),
+ },
+};
+
+static struct spear_pingroup spdif_out_pingroup = {
+ .name = "spdif_out_grp",
+ .pins = spdif_out_pins,
+ .npins = ARRAY_SIZE(spdif_out_pins),
+ .modemuxs = spdif_out_modemux,
+ .nmodemuxs = ARRAY_SIZE(spdif_out_modemux),
+};
+
+static const char *const spdif_out_grps[] = { "spdif_out_grp" };
+static struct spear_function spdif_out_function = {
+ .name = "spdif_out",
+ .groups = spdif_out_grps,
+ .ngroups = ARRAY_SIZE(spdif_out_grps),
+};
+
+/* pad multiplexing for gpt_0_1 device */
+static const unsigned gpt_0_1_pins[] = { 11, 12, 13, 14, 15, 16, 21, 22 };
+static struct spear_muxreg gpt_0_1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = GPT_MASK | GPT0_TMR0_CPT_MASK | GPT0_TMR1_CLK_MASK,
+ .val = GPT_MASK | GPT0_TMR0_CPT_MASK | GPT0_TMR1_CLK_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = UART0_ENH_AND_GPT_REG0_MASK |
+ PWM2_AND_GPT0_TMR0_CPT_REG0_MASK |
+ PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+ .val = UART0_ENH_AND_GPT_REG0_MASK |
+ PWM2_AND_GPT0_TMR0_CPT_REG0_MASK |
+ PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+ },
+};
+
+static struct spear_modemux gpt_0_1_modemux[] = {
+ {
+ .muxregs = gpt_0_1_muxreg,
+ .nmuxregs = ARRAY_SIZE(gpt_0_1_muxreg),
+ },
+};
+
+static struct spear_pingroup gpt_0_1_pingroup = {
+ .name = "gpt_0_1_grp",
+ .pins = gpt_0_1_pins,
+ .npins = ARRAY_SIZE(gpt_0_1_pins),
+ .modemuxs = gpt_0_1_modemux,
+ .nmodemuxs = ARRAY_SIZE(gpt_0_1_modemux),
+};
+
+static const char *const gpt_0_1_grps[] = { "gpt_0_1_grp" };
+static struct spear_function gpt_0_1_function = {
+ .name = "gpt_0_1",
+ .groups = gpt_0_1_grps,
+ .ngroups = ARRAY_SIZE(gpt_0_1_grps),
+};
+
+/* pad multiplexing for pwm0 device */
+static const unsigned pwm0_pins[] = { 24 };
+static struct spear_muxreg pwm0_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = SSP0_CS1_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM0_AND_SSP0_CS1_REG0_MASK,
+ .val = PWM0_AND_SSP0_CS1_REG0_MASK,
+ },
+};
+
+static struct spear_modemux pwm0_modemux[] = {
+ {
+ .muxregs = pwm0_muxreg,
+ .nmuxregs = ARRAY_SIZE(pwm0_muxreg),
+ },
+};
+
+static struct spear_pingroup pwm0_pingroup = {
+ .name = "pwm0_grp",
+ .pins = pwm0_pins,
+ .npins = ARRAY_SIZE(pwm0_pins),
+ .modemuxs = pwm0_modemux,
+ .nmodemuxs = ARRAY_SIZE(pwm0_modemux),
+};
+
+/* pad multiplexing for pwm1 device */
+static const unsigned pwm1_pins[] = { 17 };
+static struct spear_muxreg pwm1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = KBD_COL5_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM1_AND_KBD_COL5_REG0_MASK,
+ .val = PWM1_AND_KBD_COL5_REG0_MASK,
+ },
+};
+
+static struct spear_modemux pwm1_modemux[] = {
+ {
+ .muxregs = pwm1_muxreg,
+ .nmuxregs = ARRAY_SIZE(pwm1_muxreg),
+ },
+};
+
+static struct spear_pingroup pwm1_pingroup = {
+ .name = "pwm1_grp",
+ .pins = pwm1_pins,
+ .npins = ARRAY_SIZE(pwm1_pins),
+ .modemuxs = pwm1_modemux,
+ .nmodemuxs = ARRAY_SIZE(pwm1_modemux),
+};
+
+/* pad multiplexing for pwm2 device */
+static const unsigned pwm2_pins[] = { 21 };
+static struct spear_muxreg pwm2_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = GPT0_TMR0_CPT_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM2_AND_GPT0_TMR0_CPT_REG0_MASK,
+ .val = PWM2_AND_GPT0_TMR0_CPT_REG0_MASK,
+ },
+};
+
+static struct spear_modemux pwm2_modemux[] = {
+ {
+ .muxregs = pwm2_muxreg,
+ .nmuxregs = ARRAY_SIZE(pwm2_muxreg),
+ },
+};
+
+static struct spear_pingroup pwm2_pingroup = {
+ .name = "pwm2_grp",
+ .pins = pwm2_pins,
+ .npins = ARRAY_SIZE(pwm2_pins),
+ .modemuxs = pwm2_modemux,
+ .nmodemuxs = ARRAY_SIZE(pwm2_modemux),
+};
+
+/* pad multiplexing for pwm3 device */
+static const unsigned pwm3_pins[] = { 22 };
+static struct spear_muxreg pwm3_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = GPT0_TMR1_CLK_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+ .val = PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
+ },
+};
+
+static struct spear_modemux pwm3_modemux[] = {
+ {
+ .muxregs = pwm3_muxreg,
+ .nmuxregs = ARRAY_SIZE(pwm3_muxreg),
+ },
+};
+
+static struct spear_pingroup pwm3_pingroup = {
+ .name = "pwm3_grp",
+ .pins = pwm3_pins,
+ .npins = ARRAY_SIZE(pwm3_pins),
+ .modemuxs = pwm3_modemux,
+ .nmodemuxs = ARRAY_SIZE(pwm3_modemux),
+};
+
+static const char *const pwm_grps[] = { "pwm0_grp", "pwm1_grp", "pwm2_grp",
+ "pwm3_grp" };
+static struct spear_function pwm_function = {
+ .name = "pwm",
+ .groups = pwm_grps,
+ .ngroups = ARRAY_SIZE(pwm_grps),
+};
+
+/* pad multiplexing for vip_mux device */
+static const unsigned vip_mux_pins[] = { 35, 36, 37, 38, 40, 41, 42, 43 };
+static struct spear_muxreg vip_mux_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_REG1_MASK,
+ .val = VIP_REG1_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_modemux[] = {
+ {
+ .muxregs = vip_mux_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_pingroup = {
+ .name = "vip_mux_grp",
+ .pins = vip_mux_pins,
+ .npins = ARRAY_SIZE(vip_mux_pins),
+ .modemuxs = vip_mux_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam0 (disables cam0) device */
+static const unsigned vip_mux_cam0_pins[] = { 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 74, 75 };
+static struct spear_muxreg vip_mux_cam0_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM0_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = VIP_AND_CAM0_REG2_MASK,
+ .val = VIP_AND_CAM0_REG2_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_cam0_modemux[] = {
+ {
+ .muxregs = vip_mux_cam0_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_cam0_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_cam0_pingroup = {
+ .name = "vip_mux_cam0_grp",
+ .pins = vip_mux_cam0_pins,
+ .npins = ARRAY_SIZE(vip_mux_cam0_pins),
+ .modemuxs = vip_mux_cam0_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_cam0_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam1 (disables cam1) device */
+static const unsigned vip_mux_cam1_pins[] = { 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 64 };
+static struct spear_muxreg vip_mux_cam1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM1_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM1_REG1_MASK,
+ .val = VIP_AND_CAM1_REG1_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = VIP_AND_CAM1_REG2_MASK,
+ .val = VIP_AND_CAM1_REG2_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_cam1_modemux[] = {
+ {
+ .muxregs = vip_mux_cam1_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_cam1_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_cam1_pingroup = {
+ .name = "vip_mux_cam1_grp",
+ .pins = vip_mux_cam1_pins,
+ .npins = ARRAY_SIZE(vip_mux_cam1_pins),
+ .modemuxs = vip_mux_cam1_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_cam1_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam2 (disables cam2) device */
+static const unsigned vip_mux_cam2_pins[] = { 39, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52, 53 };
+static struct spear_muxreg vip_mux_cam2_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM2_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM2_REG1_MASK,
+ .val = VIP_AND_CAM2_REG1_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_cam2_modemux[] = {
+ {
+ .muxregs = vip_mux_cam2_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_cam2_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_cam2_pingroup = {
+ .name = "vip_mux_cam2_grp",
+ .pins = vip_mux_cam2_pins,
+ .npins = ARRAY_SIZE(vip_mux_cam2_pins),
+ .modemuxs = vip_mux_cam2_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_cam2_modemux),
+};
+
+/* pad multiplexing for vip_mux_cam3 (disables cam3) device */
+static const unsigned vip_mux_cam3_pins[] = { 20, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34 };
+static struct spear_muxreg vip_mux_cam3_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM3_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = VIP_AND_CAM3_REG0_MASK,
+ .val = VIP_AND_CAM3_REG0_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM3_REG1_MASK,
+ .val = VIP_AND_CAM3_REG1_MASK,
+ },
+};
+
+static struct spear_modemux vip_mux_cam3_modemux[] = {
+ {
+ .muxregs = vip_mux_cam3_muxreg,
+ .nmuxregs = ARRAY_SIZE(vip_mux_cam3_muxreg),
+ },
+};
+
+static struct spear_pingroup vip_mux_cam3_pingroup = {
+ .name = "vip_mux_cam3_grp",
+ .pins = vip_mux_cam3_pins,
+ .npins = ARRAY_SIZE(vip_mux_cam3_pins),
+ .modemuxs = vip_mux_cam3_modemux,
+ .nmodemuxs = ARRAY_SIZE(vip_mux_cam3_modemux),
+};
+
+static const char *const vip_grps[] = { "vip_mux_grp", "vip_mux_cam0_grp" ,
+ "vip_mux_cam1_grp" , "vip_mux_cam2_grp", "vip_mux_cam3_grp" };
+static struct spear_function vip_function = {
+ .name = "vip",
+ .groups = vip_grps,
+ .ngroups = ARRAY_SIZE(vip_grps),
+};
+
+/* pad multiplexing for cam0 device */
+static const unsigned cam0_pins[] = { 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75
+};
+static struct spear_muxreg cam0_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM0_MASK,
+ .val = CAM0_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = VIP_AND_CAM0_REG2_MASK,
+ .val = VIP_AND_CAM0_REG2_MASK,
+ },
+};
+
+static struct spear_modemux cam0_modemux[] = {
+ {
+ .muxregs = cam0_muxreg,
+ .nmuxregs = ARRAY_SIZE(cam0_muxreg),
+ },
+};
+
+static struct spear_pingroup cam0_pingroup = {
+ .name = "cam0_grp",
+ .pins = cam0_pins,
+ .npins = ARRAY_SIZE(cam0_pins),
+ .modemuxs = cam0_modemux,
+ .nmodemuxs = ARRAY_SIZE(cam0_modemux),
+};
+
+static const char *const cam0_grps[] = { "cam0_grp" };
+static struct spear_function cam0_function = {
+ .name = "cam0",
+ .groups = cam0_grps,
+ .ngroups = ARRAY_SIZE(cam0_grps),
+};
+
+/* pad multiplexing for cam1 device */
+static const unsigned cam1_pins[] = { 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
+};
+static struct spear_muxreg cam1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM1_MASK,
+ .val = CAM1_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM1_REG1_MASK,
+ .val = VIP_AND_CAM1_REG1_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = VIP_AND_CAM1_REG2_MASK,
+ .val = VIP_AND_CAM1_REG2_MASK,
+ },
+};
+
+static struct spear_modemux cam1_modemux[] = {
+ {
+ .muxregs = cam1_muxreg,
+ .nmuxregs = ARRAY_SIZE(cam1_muxreg),
+ },
+};
+
+static struct spear_pingroup cam1_pingroup = {
+ .name = "cam1_grp",
+ .pins = cam1_pins,
+ .npins = ARRAY_SIZE(cam1_pins),
+ .modemuxs = cam1_modemux,
+ .nmodemuxs = ARRAY_SIZE(cam1_modemux),
+};
+
+static const char *const cam1_grps[] = { "cam1_grp" };
+static struct spear_function cam1_function = {
+ .name = "cam1",
+ .groups = cam1_grps,
+ .ngroups = ARRAY_SIZE(cam1_grps),
+};
+
+/* pad multiplexing for cam2 device */
+static const unsigned cam2_pins[] = { 39, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53
+};
+static struct spear_muxreg cam2_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM2_MASK,
+ .val = CAM2_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM2_REG1_MASK,
+ .val = VIP_AND_CAM2_REG1_MASK,
+ },
+};
+
+static struct spear_modemux cam2_modemux[] = {
+ {
+ .muxregs = cam2_muxreg,
+ .nmuxregs = ARRAY_SIZE(cam2_muxreg),
+ },
+};
+
+static struct spear_pingroup cam2_pingroup = {
+ .name = "cam2_grp",
+ .pins = cam2_pins,
+ .npins = ARRAY_SIZE(cam2_pins),
+ .modemuxs = cam2_modemux,
+ .nmodemuxs = ARRAY_SIZE(cam2_modemux),
+};
+
+static const char *const cam2_grps[] = { "cam2_grp" };
+static struct spear_function cam2_function = {
+ .name = "cam2",
+ .groups = cam2_grps,
+ .ngroups = ARRAY_SIZE(cam2_grps),
+};
+
+/* pad multiplexing for cam3 device */
+static const unsigned cam3_pins[] = { 20, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34
+};
+static struct spear_muxreg cam3_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = CAM3_MASK,
+ .val = CAM3_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = VIP_AND_CAM3_REG0_MASK,
+ .val = VIP_AND_CAM3_REG0_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_2,
+ .mask = VIP_AND_CAM3_REG1_MASK,
+ .val = VIP_AND_CAM3_REG1_MASK,
+ },
+};
+
+static struct spear_modemux cam3_modemux[] = {
+ {
+ .muxregs = cam3_muxreg,
+ .nmuxregs = ARRAY_SIZE(cam3_muxreg),
+ },
+};
+
+static struct spear_pingroup cam3_pingroup = {
+ .name = "cam3_grp",
+ .pins = cam3_pins,
+ .npins = ARRAY_SIZE(cam3_pins),
+ .modemuxs = cam3_modemux,
+ .nmodemuxs = ARRAY_SIZE(cam3_modemux),
+};
+
+static const char *const cam3_grps[] = { "cam3_grp" };
+static struct spear_function cam3_function = {
+ .name = "cam3",
+ .groups = cam3_grps,
+ .ngroups = ARRAY_SIZE(cam3_grps),
+};
+
+/* pad multiplexing for smi device */
+static const unsigned smi_pins[] = { 76, 77, 78, 79, 84 };
+static struct spear_muxreg smi_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = SMI_REG2_MASK,
+ .val = SMI_REG2_MASK,
+ },
+};
+
+static struct spear_modemux smi_modemux[] = {
+ {
+ .muxregs = smi_muxreg,
+ .nmuxregs = ARRAY_SIZE(smi_muxreg),
+ },
+};
+
+static struct spear_pingroup smi_pingroup = {
+ .name = "smi_grp",
+ .pins = smi_pins,
+ .npins = ARRAY_SIZE(smi_pins),
+ .modemuxs = smi_modemux,
+ .nmodemuxs = ARRAY_SIZE(smi_modemux),
+};
+
+static const char *const smi_grps[] = { "smi_grp" };
+static struct spear_function smi_function = {
+ .name = "smi",
+ .groups = smi_grps,
+ .ngroups = ARRAY_SIZE(smi_grps),
+};
+
+/* pad multiplexing for ssp0 device */
+static const unsigned ssp0_pins[] = { 80, 81, 82, 83 };
+static struct spear_muxreg ssp0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = SSP0_REG2_MASK,
+ .val = SSP0_REG2_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_modemux[] = {
+ {
+ .muxregs = ssp0_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_pingroup = {
+ .name = "ssp0_grp",
+ .pins = ssp0_pins,
+ .npins = ARRAY_SIZE(ssp0_pins),
+ .modemuxs = ssp0_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_modemux),
+};
+
+/* pad multiplexing for ssp0_cs1 device */
+static const unsigned ssp0_cs1_pins[] = { 24 };
+static struct spear_muxreg ssp0_cs1_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = SSP0_CS1_MASK,
+ .val = SSP0_CS1_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = PWM0_AND_SSP0_CS1_REG0_MASK,
+ .val = PWM0_AND_SSP0_CS1_REG0_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs1_modemux[] = {
+ {
+ .muxregs = ssp0_cs1_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs1_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs1_pingroup = {
+ .name = "ssp0_cs1_grp",
+ .pins = ssp0_cs1_pins,
+ .npins = ARRAY_SIZE(ssp0_cs1_pins),
+ .modemuxs = ssp0_cs1_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs1_modemux),
+};
+
+/* pad multiplexing for ssp0_cs2 device */
+static const unsigned ssp0_cs2_pins[] = { 85 };
+static struct spear_muxreg ssp0_cs2_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = SSP0_CS2_MASK,
+ .val = SSP0_CS2_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = TS_AND_SSP0_CS2_REG2_MASK,
+ .val = TS_AND_SSP0_CS2_REG2_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs2_modemux[] = {
+ {
+ .muxregs = ssp0_cs2_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs2_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs2_pingroup = {
+ .name = "ssp0_cs2_grp",
+ .pins = ssp0_cs2_pins,
+ .npins = ARRAY_SIZE(ssp0_cs2_pins),
+ .modemuxs = ssp0_cs2_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs2_modemux),
+};
+
+/* pad multiplexing for ssp0_cs3 device */
+static const unsigned ssp0_cs3_pins[] = { 132 };
+static struct spear_muxreg ssp0_cs3_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = SSP0_CS3_REG4_MASK,
+ .val = SSP0_CS3_REG4_MASK,
+ },
+};
+
+static struct spear_modemux ssp0_cs3_modemux[] = {
+ {
+ .muxregs = ssp0_cs3_muxreg,
+ .nmuxregs = ARRAY_SIZE(ssp0_cs3_muxreg),
+ },
+};
+
+static struct spear_pingroup ssp0_cs3_pingroup = {
+ .name = "ssp0_cs3_grp",
+ .pins = ssp0_cs3_pins,
+ .npins = ARRAY_SIZE(ssp0_cs3_pins),
+ .modemuxs = ssp0_cs3_modemux,
+ .nmodemuxs = ARRAY_SIZE(ssp0_cs3_modemux),
+};
+
+static const char *const ssp0_grps[] = { "ssp0_grp", "ssp0_cs1_grp",
+ "ssp0_cs2_grp", "ssp0_cs3_grp" };
+static struct spear_function ssp0_function = {
+ .name = "ssp0",
+ .groups = ssp0_grps,
+ .ngroups = ARRAY_SIZE(ssp0_grps),
+};
+
+/* pad multiplexing for uart0 device */
+static const unsigned uart0_pins[] = { 86, 87 };
+static struct spear_muxreg uart0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = UART0_REG2_MASK,
+ .val = UART0_REG2_MASK,
+ },
+};
+
+static struct spear_modemux uart0_modemux[] = {
+ {
+ .muxregs = uart0_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart0_muxreg),
+ },
+};
+
+static struct spear_pingroup uart0_pingroup = {
+ .name = "uart0_grp",
+ .pins = uart0_pins,
+ .npins = ARRAY_SIZE(uart0_pins),
+ .modemuxs = uart0_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart0_modemux),
+};
+
+/* pad multiplexing for uart0_enh device */
+static const unsigned uart0_enh_pins[] = { 11, 12, 13, 14, 15, 16 };
+static struct spear_muxreg uart0_enh_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = GPT_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = UART0_ENH_AND_GPT_REG0_MASK,
+ .val = UART0_ENH_AND_GPT_REG0_MASK,
+ },
+};
+
+static struct spear_modemux uart0_enh_modemux[] = {
+ {
+ .muxregs = uart0_enh_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart0_enh_muxreg),
+ },
+};
+
+static struct spear_pingroup uart0_enh_pingroup = {
+ .name = "uart0_enh_grp",
+ .pins = uart0_enh_pins,
+ .npins = ARRAY_SIZE(uart0_enh_pins),
+ .modemuxs = uart0_enh_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart0_enh_modemux),
+};
+
+static const char *const uart0_grps[] = { "uart0_grp", "uart0_enh_grp" };
+static struct spear_function uart0_function = {
+ .name = "uart0",
+ .groups = uart0_grps,
+ .ngroups = ARRAY_SIZE(uart0_grps),
+};
+
+/* pad multiplexing for uart1 device */
+static const unsigned uart1_pins[] = { 88, 89 };
+static struct spear_muxreg uart1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = UART1_REG2_MASK,
+ .val = UART1_REG2_MASK,
+ },
+};
+
+static struct spear_modemux uart1_modemux[] = {
+ {
+ .muxregs = uart1_muxreg,
+ .nmuxregs = ARRAY_SIZE(uart1_muxreg),
+ },
+};
+
+static struct spear_pingroup uart1_pingroup = {
+ .name = "uart1_grp",
+ .pins = uart1_pins,
+ .npins = ARRAY_SIZE(uart1_pins),
+ .modemuxs = uart1_modemux,
+ .nmodemuxs = ARRAY_SIZE(uart1_modemux),
+};
+
+static const char *const uart1_grps[] = { "uart1_grp" };
+static struct spear_function uart1_function = {
+ .name = "uart1",
+ .groups = uart1_grps,
+ .ngroups = ARRAY_SIZE(uart1_grps),
+};
+
+/* pad multiplexing for i2s_in device */
+static const unsigned i2s_in_pins[] = { 90, 91, 92, 93, 94, 99 };
+static struct spear_muxreg i2s_in_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_3,
+ .mask = I2S_IN_REG2_MASK,
+ .val = I2S_IN_REG2_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_4,
+ .mask = I2S_IN_REG3_MASK,
+ .val = I2S_IN_REG3_MASK,
+ },
+};
+
+static struct spear_modemux i2s_in_modemux[] = {
+ {
+ .muxregs = i2s_in_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2s_in_muxreg),
+ },
+};
+
+static struct spear_pingroup i2s_in_pingroup = {
+ .name = "i2s_in_grp",
+ .pins = i2s_in_pins,
+ .npins = ARRAY_SIZE(i2s_in_pins),
+ .modemuxs = i2s_in_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2s_in_modemux),
+};
+
+/* pad multiplexing for i2s_out device */
+static const unsigned i2s_out_pins[] = { 95, 96, 97, 98, 100, 101, 102, 103 };
+static struct spear_muxreg i2s_out_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_4,
+ .mask = I2S_OUT_REG3_MASK,
+ .val = I2S_OUT_REG3_MASK,
+ },
+};
+
+static struct spear_modemux i2s_out_modemux[] = {
+ {
+ .muxregs = i2s_out_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2s_out_muxreg),
+ },
+};
+
+static struct spear_pingroup i2s_out_pingroup = {
+ .name = "i2s_out_grp",
+ .pins = i2s_out_pins,
+ .npins = ARRAY_SIZE(i2s_out_pins),
+ .modemuxs = i2s_out_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2s_out_modemux),
+};
+
+static const char *const i2s_grps[] = { "i2s_in_grp", "i2s_out_grp" };
+static struct spear_function i2s_function = {
+ .name = "i2s",
+ .groups = i2s_grps,
+ .ngroups = ARRAY_SIZE(i2s_grps),
+};
+
+/* pad multiplexing for gmac device */
+static const unsigned gmac_pins[] = { 104, 105, 106, 107, 108, 109, 110, 111,
+ 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
+ 126, 127, 128, 129, 130, 131 };
+#define GMAC_MUXREG \
+ { \
+ .reg = PAD_FUNCTION_EN_4, \
+ .mask = GMAC_REG3_MASK, \
+ .val = GMAC_REG3_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_5, \
+ .mask = GMAC_REG4_MASK, \
+ .val = GMAC_REG4_MASK, \
+ }
+
+/* pad multiplexing for gmii device */
+static struct spear_muxreg gmii_muxreg[] = {
+ GMAC_MUXREG,
+ {
+ .reg = GMAC_CLK_CFG,
+ .mask = GMAC_PHY_IF_SEL_MASK,
+ .val = GMAC_PHY_IF_GMII_VAL,
+ },
+};
+
+static struct spear_modemux gmii_modemux[] = {
+ {
+ .muxregs = gmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(gmii_muxreg),
+ },
+};
+
+static struct spear_pingroup gmii_pingroup = {
+ .name = "gmii_grp",
+ .pins = gmac_pins,
+ .npins = ARRAY_SIZE(gmac_pins),
+ .modemuxs = gmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(gmii_modemux),
+};
+
+/* pad multiplexing for rgmii device */
+static struct spear_muxreg rgmii_muxreg[] = {
+ GMAC_MUXREG,
+ {
+ .reg = GMAC_CLK_CFG,
+ .mask = GMAC_PHY_IF_SEL_MASK,
+ .val = GMAC_PHY_IF_RGMII_VAL,
+ },
+};
+
+static struct spear_modemux rgmii_modemux[] = {
+ {
+ .muxregs = rgmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(rgmii_muxreg),
+ },
+};
+
+static struct spear_pingroup rgmii_pingroup = {
+ .name = "rgmii_grp",
+ .pins = gmac_pins,
+ .npins = ARRAY_SIZE(gmac_pins),
+ .modemuxs = rgmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(rgmii_modemux),
+};
+
+/* pad multiplexing for rmii device */
+static struct spear_muxreg rmii_muxreg[] = {
+ GMAC_MUXREG,
+ {
+ .reg = GMAC_CLK_CFG,
+ .mask = GMAC_PHY_IF_SEL_MASK,
+ .val = GMAC_PHY_IF_RMII_VAL,
+ },
+};
+
+static struct spear_modemux rmii_modemux[] = {
+ {
+ .muxregs = rmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(rmii_muxreg),
+ },
+};
+
+static struct spear_pingroup rmii_pingroup = {
+ .name = "rmii_grp",
+ .pins = gmac_pins,
+ .npins = ARRAY_SIZE(gmac_pins),
+ .modemuxs = rmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(rmii_modemux),
+};
+
+/* pad multiplexing for sgmii device */
+static struct spear_muxreg sgmii_muxreg[] = {
+ GMAC_MUXREG,
+ {
+ .reg = GMAC_CLK_CFG,
+ .mask = GMAC_PHY_IF_SEL_MASK,
+ .val = GMAC_PHY_IF_SGMII_VAL,
+ },
+};
+
+static struct spear_modemux sgmii_modemux[] = {
+ {
+ .muxregs = sgmii_muxreg,
+ .nmuxregs = ARRAY_SIZE(sgmii_muxreg),
+ },
+};
+
+static struct spear_pingroup sgmii_pingroup = {
+ .name = "sgmii_grp",
+ .pins = gmac_pins,
+ .npins = ARRAY_SIZE(gmac_pins),
+ .modemuxs = sgmii_modemux,
+ .nmodemuxs = ARRAY_SIZE(sgmii_modemux),
+};
+
+static const char *const gmac_grps[] = { "gmii_grp", "rgmii_grp", "rmii_grp",
+ "sgmii_grp" };
+static struct spear_function gmac_function = {
+ .name = "gmac",
+ .groups = gmac_grps,
+ .ngroups = ARRAY_SIZE(gmac_grps),
+};
+
+/* pad multiplexing for i2c0 device */
+static const unsigned i2c0_pins[] = { 133, 134 };
+static struct spear_muxreg i2c0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = I2C0_REG4_MASK,
+ .val = I2C0_REG4_MASK,
+ },
+};
+
+static struct spear_modemux i2c0_modemux[] = {
+ {
+ .muxregs = i2c0_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c0_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c0_pingroup = {
+ .name = "i2c0_grp",
+ .pins = i2c0_pins,
+ .npins = ARRAY_SIZE(i2c0_pins),
+ .modemuxs = i2c0_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c0_modemux),
+};
+
+static const char *const i2c0_grps[] = { "i2c0_grp" };
+static struct spear_function i2c0_function = {
+ .name = "i2c0",
+ .groups = i2c0_grps,
+ .ngroups = ARRAY_SIZE(i2c0_grps),
+};
+
+/* pad multiplexing for i2c1 device */
+static const unsigned i2c1_pins[] = { 18, 23 };
+static struct spear_muxreg i2c1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_1,
+ .mask = I2C1_REG0_MASK,
+ .val = I2C1_REG0_MASK,
+ },
+};
+
+static struct spear_modemux i2c1_modemux[] = {
+ {
+ .muxregs = i2c1_muxreg,
+ .nmuxregs = ARRAY_SIZE(i2c1_muxreg),
+ },
+};
+
+static struct spear_pingroup i2c1_pingroup = {
+ .name = "i2c1_grp",
+ .pins = i2c1_pins,
+ .npins = ARRAY_SIZE(i2c1_pins),
+ .modemuxs = i2c1_modemux,
+ .nmodemuxs = ARRAY_SIZE(i2c1_modemux),
+};
+
+static const char *const i2c1_grps[] = { "i2c1_grp" };
+static struct spear_function i2c1_function = {
+ .name = "i2c1",
+ .groups = i2c1_grps,
+ .ngroups = ARRAY_SIZE(i2c1_grps),
+};
+
+/* pad multiplexing for cec0 device */
+static const unsigned cec0_pins[] = { 135 };
+static struct spear_muxreg cec0_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = CEC0_REG4_MASK,
+ .val = CEC0_REG4_MASK,
+ },
+};
+
+static struct spear_modemux cec0_modemux[] = {
+ {
+ .muxregs = cec0_muxreg,
+ .nmuxregs = ARRAY_SIZE(cec0_muxreg),
+ },
+};
+
+static struct spear_pingroup cec0_pingroup = {
+ .name = "cec0_grp",
+ .pins = cec0_pins,
+ .npins = ARRAY_SIZE(cec0_pins),
+ .modemuxs = cec0_modemux,
+ .nmodemuxs = ARRAY_SIZE(cec0_modemux),
+};
+
+static const char *const cec0_grps[] = { "cec0_grp" };
+static struct spear_function cec0_function = {
+ .name = "cec0",
+ .groups = cec0_grps,
+ .ngroups = ARRAY_SIZE(cec0_grps),
+};
+
+/* pad multiplexing for cec1 device */
+static const unsigned cec1_pins[] = { 136 };
+static struct spear_muxreg cec1_muxreg[] = {
+ {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = CEC1_REG4_MASK,
+ .val = CEC1_REG4_MASK,
+ },
+};
+
+static struct spear_modemux cec1_modemux[] = {
+ {
+ .muxregs = cec1_muxreg,
+ .nmuxregs = ARRAY_SIZE(cec1_muxreg),
+ },
+};
+
+static struct spear_pingroup cec1_pingroup = {
+ .name = "cec1_grp",
+ .pins = cec1_pins,
+ .npins = ARRAY_SIZE(cec1_pins),
+ .modemuxs = cec1_modemux,
+ .nmodemuxs = ARRAY_SIZE(cec1_modemux),
+};
+
+static const char *const cec1_grps[] = { "cec1_grp" };
+static struct spear_function cec1_function = {
+ .name = "cec1",
+ .groups = cec1_grps,
+ .ngroups = ARRAY_SIZE(cec1_grps),
+};
+
+/* pad multiplexing for mcif devices */
+static const unsigned mcif_pins[] = { 193, 194, 195, 196, 197, 198, 199, 200,
+ 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+ 229, 230, 231, 232, 237 };
+#define MCIF_MUXREG \
+ { \
+ .reg = PAD_SHARED_IP_EN_1, \
+ .mask = MCIF_MASK, \
+ .val = MCIF_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_7, \
+ .mask = FSMC_PNOR_AND_MCIF_REG6_MASK | MCIF_REG6_MASK, \
+ .val = FSMC_PNOR_AND_MCIF_REG6_MASK | MCIF_REG6_MASK, \
+ }, { \
+ .reg = PAD_FUNCTION_EN_8, \
+ .mask = MCIF_REG7_MASK, \
+ .val = MCIF_REG7_MASK, \
+ }
+
+/* Pad multiplexing for sdhci device */
+static struct spear_muxreg sdhci_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_SD,
+ },
+};
+
+static struct spear_modemux sdhci_modemux[] = {
+ {
+ .muxregs = sdhci_muxreg,
+ .nmuxregs = ARRAY_SIZE(sdhci_muxreg),
+ },
+};
+
+static struct spear_pingroup sdhci_pingroup = {
+ .name = "sdhci_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = sdhci_modemux,
+ .nmodemuxs = ARRAY_SIZE(sdhci_modemux),
+};
+
+static const char *const sdhci_grps[] = { "sdhci_grp" };
+static struct spear_function sdhci_function = {
+ .name = "sdhci",
+ .groups = sdhci_grps,
+ .ngroups = ARRAY_SIZE(sdhci_grps),
+};
+
+/* Pad multiplexing for cf device */
+static struct spear_muxreg cf_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_CF,
+ },
+};
+
+static struct spear_modemux cf_modemux[] = {
+ {
+ .muxregs = cf_muxreg,
+ .nmuxregs = ARRAY_SIZE(cf_muxreg),
+ },
+};
+
+static struct spear_pingroup cf_pingroup = {
+ .name = "cf_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = cf_modemux,
+ .nmodemuxs = ARRAY_SIZE(cf_modemux),
+};
+
+static const char *const cf_grps[] = { "cf_grp" };
+static struct spear_function cf_function = {
+ .name = "cf",
+ .groups = cf_grps,
+ .ngroups = ARRAY_SIZE(cf_grps),
+};
+
+/* Pad multiplexing for xd device */
+static struct spear_muxreg xd_muxreg[] = {
+ MCIF_MUXREG,
+ {
+ .reg = PERIP_CFG,
+ .mask = MCIF_SEL_MASK,
+ .val = MCIF_SEL_XD,
+ },
+};
+
+static struct spear_modemux xd_modemux[] = {
+ {
+ .muxregs = xd_muxreg,
+ .nmuxregs = ARRAY_SIZE(xd_muxreg),
+ },
+};
+
+static struct spear_pingroup xd_pingroup = {
+ .name = "xd_grp",
+ .pins = mcif_pins,
+ .npins = ARRAY_SIZE(mcif_pins),
+ .modemuxs = xd_modemux,
+ .nmodemuxs = ARRAY_SIZE(xd_modemux),
+};
+
+static const char *const xd_grps[] = { "xd_grp" };
+static struct spear_function xd_function = {
+ .name = "xd",
+ .groups = xd_grps,
+ .ngroups = ARRAY_SIZE(xd_grps),
+};
+
+/* pad multiplexing for clcd device */
+static const unsigned clcd_pins[] = { 138, 139, 140, 141, 142, 143, 144, 145,
+ 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
+ 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+ 188, 189, 190, 191 };
+static struct spear_muxreg clcd_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = ARM_TRACE_MASK | MIPHY_DBG_MASK,
+ .val = 0,
+ }, {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
+ .val = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_6,
+ .mask = CLCD_AND_ARM_TRACE_REG5_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG5_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_7,
+ .mask = CLCD_AND_ARM_TRACE_REG6_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG6_MASK,
+ },
+};
+
+static struct spear_modemux clcd_modemux[] = {
+ {
+ .muxregs = clcd_muxreg,
+ .nmuxregs = ARRAY_SIZE(clcd_muxreg),
+ },
+};
+
+static struct spear_pingroup clcd_pingroup = {
+ .name = "clcd_grp",
+ .pins = clcd_pins,
+ .npins = ARRAY_SIZE(clcd_pins),
+ .modemuxs = clcd_modemux,
+ .nmodemuxs = ARRAY_SIZE(clcd_modemux),
+};
+
+static const char *const clcd_grps[] = { "clcd_grp" };
+static struct spear_function clcd_function = {
+ .name = "clcd",
+ .groups = clcd_grps,
+ .ngroups = ARRAY_SIZE(clcd_grps),
+};
+
+/* pad multiplexing for arm_trace device */
+static const unsigned arm_trace_pins[] = { 158, 159, 160, 161, 162, 163, 164,
+ 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
+ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+ 193, 194, 195, 196, 197, 198, 199, 200 };
+static struct spear_muxreg arm_trace_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = ARM_TRACE_MASK,
+ .val = ARM_TRACE_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = CLCD_AND_ARM_TRACE_REG4_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG4_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_6,
+ .mask = CLCD_AND_ARM_TRACE_REG5_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG5_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_7,
+ .mask = CLCD_AND_ARM_TRACE_REG6_MASK,
+ .val = CLCD_AND_ARM_TRACE_REG6_MASK,
+ },
+};
+
+static struct spear_modemux arm_trace_modemux[] = {
+ {
+ .muxregs = arm_trace_muxreg,
+ .nmuxregs = ARRAY_SIZE(arm_trace_muxreg),
+ },
+};
+
+static struct spear_pingroup arm_trace_pingroup = {
+ .name = "arm_trace_grp",
+ .pins = arm_trace_pins,
+ .npins = ARRAY_SIZE(arm_trace_pins),
+ .modemuxs = arm_trace_modemux,
+ .nmodemuxs = ARRAY_SIZE(arm_trace_modemux),
+};
+
+static const char *const arm_trace_grps[] = { "arm_trace_grp" };
+static struct spear_function arm_trace_function = {
+ .name = "arm_trace",
+ .groups = arm_trace_grps,
+ .ngroups = ARRAY_SIZE(arm_trace_grps),
+};
+
+/* pad multiplexing for miphy_dbg device */
+static const unsigned miphy_dbg_pins[] = { 96, 97, 98, 99, 100, 101, 102, 103,
+ 132, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
+ 148, 149, 150, 151, 152, 153, 154, 155, 156, 157 };
+static struct spear_muxreg miphy_dbg_muxreg[] = {
+ {
+ .reg = PAD_SHARED_IP_EN_1,
+ .mask = MIPHY_DBG_MASK,
+ .val = MIPHY_DBG_MASK,
+ }, {
+ .reg = PAD_FUNCTION_EN_5,
+ .mask = DEVS_GRP_AND_MIPHY_DBG_REG4_MASK,
+ .val = DEVS_GRP_AND_MIPHY_DBG_REG4_MASK,
+ },
+};
+
+static struct spear_modemux miphy_dbg_modemux[] = {
+ {
+ .muxregs = miphy_dbg_muxreg,
+ .nmuxregs = ARRAY_SIZE(miphy_dbg_muxreg),
+ },
+};
+
+static struct spear_pingroup miphy_dbg_pingroup = {
+ .name = "miphy_dbg_grp",
+ .pins = miphy_dbg_pins,
+ .npins = ARRAY_SIZE(miphy_dbg_pins),
+ .modemuxs = miphy_dbg_modemux,
+ .nmodemuxs = ARRAY_SIZE(miphy_dbg_modemux),
+};
+
+static const char *const miphy_dbg_grps[] = { "miphy_dbg_grp" };
+static struct spear_function miphy_dbg_function = {
+ .name = "miphy_dbg",
+ .groups = miphy_dbg_grps,
+ .ngroups = ARRAY_SIZE(miphy_dbg_grps),
+};
+
+/* pad multiplexing for pcie device */
+static const unsigned pcie_pins[] = { 250 };
+static struct spear_muxreg pcie_muxreg[] = {
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_PCIE_CFG_MASK,
+ .val = PCIE_CFG_VAL,
+ },
+};
+
+static struct spear_modemux pcie_modemux[] = {
+ {
+ .muxregs = pcie_muxreg,
+ .nmuxregs = ARRAY_SIZE(pcie_muxreg),
+ },
+};
+
+static struct spear_pingroup pcie_pingroup = {
+ .name = "pcie_grp",
+ .pins = pcie_pins,
+ .npins = ARRAY_SIZE(pcie_pins),
+ .modemuxs = pcie_modemux,
+ .nmodemuxs = ARRAY_SIZE(pcie_modemux),
+};
+
+static const char *const pcie_grps[] = { "pcie_grp" };
+static struct spear_function pcie_function = {
+ .name = "pcie",
+ .groups = pcie_grps,
+ .ngroups = ARRAY_SIZE(pcie_grps),
+};
+
+/* pad multiplexing for sata device */
+static const unsigned sata_pins[] = { 250 };
+static struct spear_muxreg sata_muxreg[] = {
+ {
+ .reg = PCIE_SATA_CFG,
+ .mask = SATA_PCIE_CFG_MASK,
+ .val = SATA_CFG_VAL,
+ },
+};
+
+static struct spear_modemux sata_modemux[] = {
+ {
+ .muxregs = sata_muxreg,
+ .nmuxregs = ARRAY_SIZE(sata_muxreg),
+ },
+};
+
+static struct spear_pingroup sata_pingroup = {
+ .name = "sata_grp",
+ .pins = sata_pins,
+ .npins = ARRAY_SIZE(sata_pins),
+ .modemuxs = sata_modemux,
+ .nmodemuxs = ARRAY_SIZE(sata_modemux),
+};
+
+static const char *const sata_grps[] = { "sata_grp" };
+static struct spear_function sata_function = {
+ .name = "sata",
+ .groups = sata_grps,
+ .ngroups = ARRAY_SIZE(sata_grps),
+};
+
+/* pingroups */
+static struct spear_pingroup *spear1340_pingroups[] = {
+ &pads_as_gpio_pingroup,
+ &fsmc_8bit_pingroup,
+ &fsmc_16bit_pingroup,
+ &fsmc_pnor_pingroup,
+ &keyboard_row_col_pingroup,
+ &keyboard_col5_pingroup,
+ &spdif_in_pingroup,
+ &spdif_out_pingroup,
+ &gpt_0_1_pingroup,
+ &pwm0_pingroup,
+ &pwm1_pingroup,
+ &pwm2_pingroup,
+ &pwm3_pingroup,
+ &vip_mux_pingroup,
+ &vip_mux_cam0_pingroup,
+ &vip_mux_cam1_pingroup,
+ &vip_mux_cam2_pingroup,
+ &vip_mux_cam3_pingroup,
+ &cam0_pingroup,
+ &cam1_pingroup,
+ &cam2_pingroup,
+ &cam3_pingroup,
+ &smi_pingroup,
+ &ssp0_pingroup,
+ &ssp0_cs1_pingroup,
+ &ssp0_cs2_pingroup,
+ &ssp0_cs3_pingroup,
+ &uart0_pingroup,
+ &uart0_enh_pingroup,
+ &uart1_pingroup,
+ &i2s_in_pingroup,
+ &i2s_out_pingroup,
+ &gmii_pingroup,
+ &rgmii_pingroup,
+ &rmii_pingroup,
+ &sgmii_pingroup,
+ &i2c0_pingroup,
+ &i2c1_pingroup,
+ &cec0_pingroup,
+ &cec1_pingroup,
+ &sdhci_pingroup,
+ &cf_pingroup,
+ &xd_pingroup,
+ &clcd_pingroup,
+ &arm_trace_pingroup,
+ &miphy_dbg_pingroup,
+ &pcie_pingroup,
+ &sata_pingroup,
+};
+
+/* functions */
+static struct spear_function *spear1340_functions[] = {
+ &pads_as_gpio_function,
+ &fsmc_function,
+ &keyboard_function,
+ &spdif_in_function,
+ &spdif_out_function,
+ &gpt_0_1_function,
+ &pwm_function,
+ &vip_function,
+ &cam0_function,
+ &cam1_function,
+ &cam2_function,
+ &cam3_function,
+ &smi_function,
+ &ssp0_function,
+ &uart0_function,
+ &uart1_function,
+ &i2s_function,
+ &gmac_function,
+ &i2c0_function,
+ &i2c1_function,
+ &cec0_function,
+ &cec1_function,
+ &sdhci_function,
+ &cf_function,
+ &xd_function,
+ &clcd_function,
+ &arm_trace_function,
+ &miphy_dbg_function,
+ &pcie_function,
+ &sata_function,
+};
+
+static struct spear_pinctrl_machdata spear1340_machdata = {
+ .pins = spear1340_pins,
+ .npins = ARRAY_SIZE(spear1340_pins),
+ .groups = spear1340_pingroups,
+ .ngroups = ARRAY_SIZE(spear1340_pingroups),
+ .functions = spear1340_functions,
+ .nfunctions = ARRAY_SIZE(spear1340_functions),
+ .modes_supported = false,
+};
+
+static struct of_device_id spear1340_pinctrl_of_match[] __devinitdata = {
+ {
+ .compatible = "st,spear1340-pinmux",
+ },
+ {},
+};
+
+static int __devinit spear1340_pinctrl_probe(struct platform_device *pdev)
+{
+ return spear_pinctrl_probe(pdev, &spear1340_machdata);
+}
+
+static int __devexit spear1340_pinctrl_remove(struct platform_device *pdev)
+{
+ return spear_pinctrl_remove(pdev);
+}
+
+static struct platform_driver spear1340_pinctrl_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = spear1340_pinctrl_of_match,
+ },
+ .probe = spear1340_pinctrl_probe,
+ .remove = __devexit_p(spear1340_pinctrl_remove),
+};
+
+static int __init spear1340_pinctrl_init(void)
+{
+ return platform_driver_register(&spear1340_pinctrl_driver);
+}
+arch_initcall(spear1340_pinctrl_init);
+
+static void __exit spear1340_pinctrl_exit(void)
+{
+ platform_driver_unregister(&spear1340_pinctrl_driver);
+}
+module_exit(spear1340_pinctrl_exit);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c
index 832049a8b1c..91c883bc46a 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.c
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c
@@ -15,108 +15,7 @@
/* pins */
static const struct pinctrl_pin_desc spear3xx_pins[] = {
- PINCTRL_PIN(0, "PLGPIO0"),
- PINCTRL_PIN(1, "PLGPIO1"),
- PINCTRL_PIN(2, "PLGPIO2"),
- PINCTRL_PIN(3, "PLGPIO3"),
- PINCTRL_PIN(4, "PLGPIO4"),
- PINCTRL_PIN(5, "PLGPIO5"),
- PINCTRL_PIN(6, "PLGPIO6"),
- PINCTRL_PIN(7, "PLGPIO7"),
- PINCTRL_PIN(8, "PLGPIO8"),
- PINCTRL_PIN(9, "PLGPIO9"),
- PINCTRL_PIN(10, "PLGPIO10"),
- PINCTRL_PIN(11, "PLGPIO11"),
- PINCTRL_PIN(12, "PLGPIO12"),
- PINCTRL_PIN(13, "PLGPIO13"),
- PINCTRL_PIN(14, "PLGPIO14"),
- PINCTRL_PIN(15, "PLGPIO15"),
- PINCTRL_PIN(16, "PLGPIO16"),
- PINCTRL_PIN(17, "PLGPIO17"),
- PINCTRL_PIN(18, "PLGPIO18"),
- PINCTRL_PIN(19, "PLGPIO19"),
- PINCTRL_PIN(20, "PLGPIO20"),
- PINCTRL_PIN(21, "PLGPIO21"),
- PINCTRL_PIN(22, "PLGPIO22"),
- PINCTRL_PIN(23, "PLGPIO23"),
- PINCTRL_PIN(24, "PLGPIO24"),
- PINCTRL_PIN(25, "PLGPIO25"),
- PINCTRL_PIN(26, "PLGPIO26"),
- PINCTRL_PIN(27, "PLGPIO27"),
- PINCTRL_PIN(28, "PLGPIO28"),
- PINCTRL_PIN(29, "PLGPIO29"),
- PINCTRL_PIN(30, "PLGPIO30"),
- PINCTRL_PIN(31, "PLGPIO31"),
- PINCTRL_PIN(32, "PLGPIO32"),
- PINCTRL_PIN(33, "PLGPIO33"),
- PINCTRL_PIN(34, "PLGPIO34"),
- PINCTRL_PIN(35, "PLGPIO35"),
- PINCTRL_PIN(36, "PLGPIO36"),
- PINCTRL_PIN(37, "PLGPIO37"),
- PINCTRL_PIN(38, "PLGPIO38"),
- PINCTRL_PIN(39, "PLGPIO39"),
- PINCTRL_PIN(40, "PLGPIO40"),
- PINCTRL_PIN(41, "PLGPIO41"),
- PINCTRL_PIN(42, "PLGPIO42"),
- PINCTRL_PIN(43, "PLGPIO43"),
- PINCTRL_PIN(44, "PLGPIO44"),
- PINCTRL_PIN(45, "PLGPIO45"),
- PINCTRL_PIN(46, "PLGPIO46"),
- PINCTRL_PIN(47, "PLGPIO47"),
- PINCTRL_PIN(48, "PLGPIO48"),
- PINCTRL_PIN(49, "PLGPIO49"),
- PINCTRL_PIN(50, "PLGPIO50"),
- PINCTRL_PIN(51, "PLGPIO51"),
- PINCTRL_PIN(52, "PLGPIO52"),
- PINCTRL_PIN(53, "PLGPIO53"),
- PINCTRL_PIN(54, "PLGPIO54"),
- PINCTRL_PIN(55, "PLGPIO55"),
- PINCTRL_PIN(56, "PLGPIO56"),
- PINCTRL_PIN(57, "PLGPIO57"),
- PINCTRL_PIN(58, "PLGPIO58"),
- PINCTRL_PIN(59, "PLGPIO59"),
- PINCTRL_PIN(60, "PLGPIO60"),
- PINCTRL_PIN(61, "PLGPIO61"),
- PINCTRL_PIN(62, "PLGPIO62"),
- PINCTRL_PIN(63, "PLGPIO63"),
- PINCTRL_PIN(64, "PLGPIO64"),
- PINCTRL_PIN(65, "PLGPIO65"),
- PINCTRL_PIN(66, "PLGPIO66"),
- PINCTRL_PIN(67, "PLGPIO67"),
- PINCTRL_PIN(68, "PLGPIO68"),
- PINCTRL_PIN(69, "PLGPIO69"),
- PINCTRL_PIN(70, "PLGPIO70"),
- PINCTRL_PIN(71, "PLGPIO71"),
- PINCTRL_PIN(72, "PLGPIO72"),
- PINCTRL_PIN(73, "PLGPIO73"),
- PINCTRL_PIN(74, "PLGPIO74"),
- PINCTRL_PIN(75, "PLGPIO75"),
- PINCTRL_PIN(76, "PLGPIO76"),
- PINCTRL_PIN(77, "PLGPIO77"),
- PINCTRL_PIN(78, "PLGPIO78"),
- PINCTRL_PIN(79, "PLGPIO79"),
- PINCTRL_PIN(80, "PLGPIO80"),
- PINCTRL_PIN(81, "PLGPIO81"),
- PINCTRL_PIN(82, "PLGPIO82"),
- PINCTRL_PIN(83, "PLGPIO83"),
- PINCTRL_PIN(84, "PLGPIO84"),
- PINCTRL_PIN(85, "PLGPIO85"),
- PINCTRL_PIN(86, "PLGPIO86"),
- PINCTRL_PIN(87, "PLGPIO87"),
- PINCTRL_PIN(88, "PLGPIO88"),
- PINCTRL_PIN(89, "PLGPIO89"),
- PINCTRL_PIN(90, "PLGPIO90"),
- PINCTRL_PIN(91, "PLGPIO91"),
- PINCTRL_PIN(92, "PLGPIO92"),
- PINCTRL_PIN(93, "PLGPIO93"),
- PINCTRL_PIN(94, "PLGPIO94"),
- PINCTRL_PIN(95, "PLGPIO95"),
- PINCTRL_PIN(96, "PLGPIO96"),
- PINCTRL_PIN(97, "PLGPIO97"),
- PINCTRL_PIN(98, "PLGPIO98"),
- PINCTRL_PIN(99, "PLGPIO99"),
- PINCTRL_PIN(100, "PLGPIO100"),
- PINCTRL_PIN(101, "PLGPIO101"),
+ SPEAR_PIN_0_TO_101,
};
/* firda_pins */
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index ee79ce64d9d..57787d87d9a 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1104,6 +1104,7 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
mutex_init(&dev->mutex);
+ memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
dev->backlight_dev = backlight_device_register("toshiba",
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index 987332b71d8..fc1ad955118 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -565,7 +565,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
goto err_usb;
}
- irq = platform_get_irq_byname(pdev, "SYSLO");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
IRQF_TRIGGER_RISING, "System power low",
power);
@@ -575,7 +575,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
goto err_battery;
}
- irq = platform_get_irq_byname(pdev, "PWR SRC");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
IRQF_TRIGGER_RISING, "Power source",
power);
@@ -586,7 +586,9 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
}
for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
- irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
+ irq = wm831x_irq(wm831x,
+ platform_get_irq_byname(pdev,
+ wm831x_bat_irqs[i]));
ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
IRQF_TRIGGER_RISING,
wm831x_bat_irqs[i],
@@ -606,10 +608,10 @@ err_bat_irq:
irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
free_irq(irq, power);
}
- irq = platform_get_irq_byname(pdev, "PWR SRC");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
free_irq(irq, power);
err_syslo:
- irq = platform_get_irq_byname(pdev, "SYSLO");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
free_irq(irq, power);
err_battery:
if (power->have_battery)
@@ -626,17 +628,20 @@ err_kmalloc:
static __devexit int wm831x_power_remove(struct platform_device *pdev)
{
struct wm831x_power *wm831x_power = platform_get_drvdata(pdev);
+ struct wm831x *wm831x = wm831x_power->wm831x;
int irq, i;
for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
- irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
+ irq = wm831x_irq(wm831x,
+ platform_get_irq_byname(pdev,
+ wm831x_bat_irqs[i]));
free_irq(irq, wm831x_power);
}
- irq = platform_get_irq_byname(pdev, "PWR SRC");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
free_irq(irq, wm831x_power);
- irq = platform_get_irq_byname(pdev, "SYSLO");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
free_irq(irq, wm831x_power);
if (wm831x_power->have_battery)
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 49b2112b048..3660bace123 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -47,7 +47,7 @@ static int anatop_set_voltage(struct regulator_dev *reg, int min_uV,
int max_uV, unsigned *selector)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
- u32 val, sel;
+ u32 val, sel, mask;
int uv;
uv = min_uV;
@@ -71,11 +71,10 @@ static int anatop_set_voltage(struct regulator_dev *reg, int min_uV,
val = anatop_reg->min_bit_val + sel;
*selector = sel;
dev_dbg(&reg->dev, "%s: calculated val %d\n", __func__, val);
- anatop_set_bits(anatop_reg->mfd,
- anatop_reg->control_reg,
- anatop_reg->vol_bit_shift,
- anatop_reg->vol_bit_width,
- val);
+ mask = ((1 << anatop_reg->vol_bit_width) - 1) <<
+ anatop_reg->vol_bit_shift;
+ val <<= anatop_reg->vol_bit_shift;
+ anatop_write_reg(anatop_reg->mfd, anatop_reg->control_reg, val, mask);
return 0;
}
@@ -88,10 +87,9 @@ static int anatop_get_voltage_sel(struct regulator_dev *reg)
if (!anatop_reg->control_reg)
return -ENOTSUPP;
- val = anatop_get_bits(anatop_reg->mfd,
- anatop_reg->control_reg,
- anatop_reg->vol_bit_shift,
- anatop_reg->vol_bit_width);
+ val = anatop_read_reg(anatop_reg->mfd, anatop_reg->control_reg);
+ val = (val & ((1 << anatop_reg->vol_bit_width) - 1)) >>
+ anatop_reg->vol_bit_shift;
return val - anatop_reg->min_bit_val;
}
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 4e01a423471..6bf864b4bdf 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -331,21 +331,16 @@ struct tps65910_reg {
static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg)
{
- u8 val;
+ unsigned int val;
int err;
- err = pmic->mfd->read(pmic->mfd, reg, 1, &val);
+ err = tps65910_reg_read(pmic->mfd, reg, &val);
if (err)
return err;
return val;
}
-static inline int tps65910_write(struct tps65910_reg *pmic, u8 reg, u8 val)
-{
- return pmic->mfd->write(pmic->mfd, reg, 1, &val);
-}
-
static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
u8 set_mask, u8 clear_mask)
{
@@ -362,7 +357,7 @@ static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
data &= ~clear_mask;
data |= set_mask;
- err = tps65910_write(pmic, reg, data);
+ err = tps65910_reg_write(pmic->mfd, reg, data);
if (err)
dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
@@ -371,7 +366,7 @@ out:
return err;
}
-static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
+static int tps65910_reg_read_locked(struct tps65910_reg *pmic, u8 reg)
{
int data;
@@ -385,13 +380,13 @@ static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
return data;
}
-static int tps65910_reg_write(struct tps65910_reg *pmic, u8 reg, u8 val)
+static int tps65910_reg_write_locked(struct tps65910_reg *pmic, u8 reg, u8 val)
{
int err;
mutex_lock(&pmic->mutex);
- err = tps65910_write(pmic, reg, val);
+ err = tps65910_reg_write(pmic->mfd, reg, val);
if (err < 0)
dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
@@ -490,9 +485,9 @@ static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
LDO_ST_MODE_BIT);
case REGULATOR_MODE_IDLE:
value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT;
- return tps65910_set_bits(mfd, reg, value);
+ return tps65910_reg_set_bits(mfd, reg, value);
case REGULATOR_MODE_STANDBY:
- return tps65910_clear_bits(mfd, reg, LDO_ST_ON_BIT);
+ return tps65910_reg_clear_bits(mfd, reg, LDO_ST_ON_BIT);
}
return -EINVAL;
@@ -507,7 +502,7 @@ static unsigned int tps65910_get_mode(struct regulator_dev *dev)
if (reg < 0)
return reg;
- value = tps65910_reg_read(pmic, reg);
+ value = tps65910_reg_read_locked(pmic, reg);
if (value < 0)
return value;
@@ -527,28 +522,28 @@ static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
switch (id) {
case TPS65910_REG_VDD1:
- opvsel = tps65910_reg_read(pmic, TPS65910_VDD1_OP);
- mult = tps65910_reg_read(pmic, TPS65910_VDD1);
+ opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_OP);
+ mult = tps65910_reg_read_locked(pmic, TPS65910_VDD1);
mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
- srvsel = tps65910_reg_read(pmic, TPS65910_VDD1_SR);
+ srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_SR);
sr = opvsel & VDD1_OP_CMD_MASK;
opvsel &= VDD1_OP_SEL_MASK;
srvsel &= VDD1_SR_SEL_MASK;
vselmax = 75;
break;
case TPS65910_REG_VDD2:
- opvsel = tps65910_reg_read(pmic, TPS65910_VDD2_OP);
- mult = tps65910_reg_read(pmic, TPS65910_VDD2);
+ opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_OP);
+ mult = tps65910_reg_read_locked(pmic, TPS65910_VDD2);
mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
- srvsel = tps65910_reg_read(pmic, TPS65910_VDD2_SR);
+ srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_SR);
sr = opvsel & VDD2_OP_CMD_MASK;
opvsel &= VDD2_OP_SEL_MASK;
srvsel &= VDD2_SR_SEL_MASK;
vselmax = 75;
break;
case TPS65911_REG_VDDCTRL:
- opvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_OP);
- srvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_SR);
+ opvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_OP);
+ srvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_SR);
sr = opvsel & VDDCTRL_OP_CMD_MASK;
opvsel &= VDDCTRL_OP_SEL_MASK;
srvsel &= VDDCTRL_SR_SEL_MASK;
@@ -588,7 +583,7 @@ static int tps65910_get_voltage_sel(struct regulator_dev *dev)
if (reg < 0)
return reg;
- value = tps65910_reg_read(pmic, reg);
+ value = tps65910_reg_read_locked(pmic, reg);
if (value < 0)
return value;
@@ -625,7 +620,7 @@ static int tps65911_get_voltage_sel(struct regulator_dev *dev)
reg = pmic->get_ctrl_reg(id);
- value = tps65910_reg_read(pmic, reg);
+ value = tps65910_reg_read_locked(pmic, reg);
switch (id) {
case TPS65911_REG_LDO1:
@@ -670,7 +665,7 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
tps65910_modify_bits(pmic, TPS65910_VDD1,
(dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
VDD1_VGAIN_SEL_MASK);
- tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
+ tps65910_reg_write_locked(pmic, TPS65910_VDD1_OP, vsel);
break;
case TPS65910_REG_VDD2:
dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
@@ -681,11 +676,11 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
tps65910_modify_bits(pmic, TPS65910_VDD2,
(dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
VDD1_VGAIN_SEL_MASK);
- tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel);
+ tps65910_reg_write_locked(pmic, TPS65910_VDD2_OP, vsel);
break;
case TPS65911_REG_VDDCTRL:
vsel = selector + 3;
- tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
+ tps65910_reg_write_locked(pmic, TPS65911_VDDCTRL_OP, vsel);
}
return 0;
@@ -936,10 +931,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
/* External EN1 control */
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1)
- ret = tps65910_set_bits(mfd,
+ ret = tps65910_reg_set_bits(mfd,
TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
else
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
@@ -949,10 +944,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
/* External EN2 control */
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2)
- ret = tps65910_set_bits(mfd,
+ ret = tps65910_reg_set_bits(mfd,
TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
else
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
@@ -964,10 +959,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
if ((tps65910_chip_id(mfd) == TPS65910) &&
(id >= TPS65910_REG_VDIG1)) {
if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3)
- ret = tps65910_set_bits(mfd,
+ ret = tps65910_reg_set_bits(mfd,
TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
else
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
if (ret < 0) {
dev_err(mfd->dev,
@@ -979,10 +974,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
/* Return if no external control is selected */
if (!(ext_sleep_config & EXT_SLEEP_CONTROL)) {
/* Clear all sleep controls */
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
if (!ret)
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
if (ret < 0)
dev_err(mfd->dev,
@@ -1001,32 +996,33 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
(tps65910_chip_id(mfd) == TPS65911))) {
int op_reg_add = pmic->get_ctrl_reg(id) + 1;
int sr_reg_add = pmic->get_ctrl_reg(id) + 2;
- int opvsel = tps65910_reg_read(pmic, op_reg_add);
- int srvsel = tps65910_reg_read(pmic, sr_reg_add);
+ int opvsel = tps65910_reg_read_locked(pmic, op_reg_add);
+ int srvsel = tps65910_reg_read_locked(pmic, sr_reg_add);
if (opvsel & VDD1_OP_CMD_MASK) {
u8 reg_val = srvsel & VDD1_OP_SEL_MASK;
- ret = tps65910_reg_write(pmic, op_reg_add, reg_val);
+ ret = tps65910_reg_write_locked(pmic, op_reg_add,
+ reg_val);
if (ret < 0) {
dev_err(mfd->dev,
"Error in configuring op register\n");
return ret;
}
}
- ret = tps65910_reg_write(pmic, sr_reg_add, 0);
+ ret = tps65910_reg_write_locked(pmic, sr_reg_add, 0);
if (ret < 0) {
dev_err(mfd->dev, "Error in settting sr register\n");
return ret;
}
}
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
if (!ret) {
if (ext_sleep_config & TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP)
- ret = tps65910_set_bits(mfd,
+ ret = tps65910_reg_set_bits(mfd,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
else
- ret = tps65910_clear_bits(mfd,
+ ret = tps65910_reg_clear_bits(mfd,
TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
}
if (ret < 0)
@@ -1177,7 +1173,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmic);
/* Give control of all register to control port */
- tps65910_set_bits(pmic->mfd, TPS65910_DEVCTRL,
+ tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
DEVCTRL_SR_CTL_I2C_SEL_MASK);
switch(tps65910_chip_id(tps65910)) {
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index a885911bb5f..099da11e989 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -535,7 +535,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
@@ -544,7 +544,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
goto err_regulator;
}
- irq = platform_get_irq_byname(pdev, "HC");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC"));
ret = request_threaded_irq(irq, NULL, wm831x_dcdc_oc_irq,
IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
@@ -558,7 +558,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
return 0;
err_uv:
- free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
+ dcdc);
err_regulator:
regulator_unregister(dcdc->regulator);
err:
@@ -570,11 +571,14 @@ err:
static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
+ struct wm831x *wm831x = dcdc->wm831x;
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq_byname(pdev, "HC"), dcdc);
- free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC")),
+ dcdc);
+ free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
+ dcdc);
regulator_unregister(dcdc->regulator);
if (dcdc->dvs_gpio)
gpio_free(dcdc->dvs_gpio);
@@ -726,7 +730,7 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
IRQF_TRIGGER_RISING, dcdc->name, dcdc);
if (ret != 0) {
@@ -751,7 +755,8 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
+ dcdc);
regulator_unregister(dcdc->regulator);
return 0;
@@ -859,7 +864,7 @@ static __devinit int wm831x_boostp_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
IRQF_TRIGGER_RISING, dcdc->name,
dcdc);
@@ -885,7 +890,8 @@ static __devexit int wm831x_boostp_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
+ free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
+ dcdc);
regulator_unregister(dcdc->regulator);
return 0;
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index b50ab778b09..0d207c29771 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -202,7 +202,7 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq(pdev, 0);
+ irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
ret = request_threaded_irq(irq, NULL, wm831x_isink_irq,
IRQF_TRIGGER_RISING, isink->name, isink);
if (ret != 0) {
@@ -227,7 +227,7 @@ static __devexit int wm831x_isink_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq(pdev, 0), isink);
+ free_irq(wm831x_irq(isink->wm831x, platform_get_irq(pdev, 0)), isink);
regulator_unregister(isink->regulator);
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index aa1f8b3fbe1..a9a28d8ac18 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -321,7 +321,7 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
IRQF_TRIGGER_RISING, ldo->name,
ldo);
@@ -347,7 +347,8 @@ static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
+ free_irq(wm831x_irq(ldo->wm831x,
+ platform_get_irq_byname(pdev, "UV")), ldo);
regulator_unregister(ldo->regulator);
return 0;
@@ -582,7 +583,7 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
goto err;
}
- irq = platform_get_irq_byname(pdev, "UV");
+ irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
IRQF_TRIGGER_RISING, ldo->name, ldo);
if (ret != 0) {
@@ -605,7 +606,8 @@ static __devexit int wm831x_aldo_remove(struct platform_device *pdev)
{
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
- free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
+ free_irq(wm831x_irq(ldo->wm831x, platform_get_irq_byname(pdev, "UV")),
+ ldo);
regulator_unregister(ldo->regulator);
return 0;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4161bfe462c..08cbdb900a1 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -620,27 +620,6 @@ config RTC_DRV_MSM6242
This driver can also be built as a module. If so, the module
will be called rtc-msm6242.
-config RTC_DRV_IMXDI
- tristate "Freescale IMX DryIce Real Time Clock"
- depends on ARCH_MX25
- depends on RTC_CLASS
- help
- Support for Freescale IMX DryIce RTC
-
- This driver can also be built as a module, if so, the module
- will be called "rtc-imxdi".
-
-config RTC_MXC
- tristate "Freescale MXC Real Time Clock"
- depends on ARCH_MXC
- depends on RTC_CLASS
- help
- If you say yes here you get support for the Freescale MXC
- RTC module.
-
- This driver can also be built as a module, if so, the module
- will be called "rtc-mxc".
-
config RTC_DRV_BQ4802
tristate "TI BQ4802"
help
@@ -738,6 +717,16 @@ config RTC_DRV_DAVINCI
This driver can also be built as a module. If so, the module
will be called rtc-davinci.
+config RTC_DRV_IMXDI
+ tristate "Freescale IMX DryIce Real Time Clock"
+ depends on SOC_IMX25
+ depends on RTC_CLASS
+ help
+ Support for Freescale IMX DryIce RTC
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-imxdi".
+
config RTC_DRV_OMAP
tristate "TI OMAP1"
depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
@@ -1087,4 +1076,15 @@ config RTC_DRV_LOONGSON1
This driver can also be built as a module. If so, the module
will be called rtc-ls1x.
+config RTC_DRV_MXC
+ tristate "Freescale MXC Real Time Clock"
+ depends on ARCH_MXC
+ depends on RTC_CLASS
+ help
+ If you say yes here you get support for the Freescale MXC
+ RTC module.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-mxc".
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 727ae7786e6..2973921c30d 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -61,7 +61,7 @@ obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o
obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
-obj-$(CONFIG_RTC_MXC) += rtc-mxc.o
+obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX8925) += rtc-max8925.o
obj-$(CONFIG_RTC_DRV_MAX8998) += rtc-max8998.o
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index c293d0cdb10..836710ce750 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -17,8 +17,7 @@
#include <linux/string.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
-
-
+#include <linux/rtc/ds1307.h>
/*
* We can't determine type by probing, but if we expect pre-Linux code
@@ -92,7 +91,8 @@ enum ds_type {
# define DS1337_BIT_A2I 0x02
# define DS1337_BIT_A1I 0x01
#define DS1339_REG_ALARM1_SECS 0x07
-#define DS1339_REG_TRICKLE 0x10
+
+#define DS13XX_TRICKLE_CHARGER_MAGIC 0xa0
#define RX8025_REG_CTRL1 0x0e
# define RX8025_BIT_2412 0x20
@@ -124,6 +124,7 @@ struct chip_desc {
unsigned alarm:1;
u16 nvram_offset;
u16 nvram_size;
+ u16 trickle_charger_reg;
};
static const struct chip_desc chips[last_ds_type] = {
@@ -140,6 +141,13 @@ static const struct chip_desc chips[last_ds_type] = {
},
[ds_1339] = {
.alarm = 1,
+ .trickle_charger_reg = 0x10,
+ },
+ [ds_1340] = {
+ .trickle_charger_reg = 0x08,
+ },
+ [ds_1388] = {
+ .trickle_charger_reg = 0x0a,
},
[ds_3231] = {
.alarm = 1,
@@ -619,6 +627,7 @@ static int __devinit ds1307_probe(struct i2c_client *client,
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int want_irq = false;
unsigned char *buf;
+ struct ds1307_platform_data *pdata = client->dev.platform_data;
static const int bbsqi_bitpos[] = {
[ds_1337] = 0,
[ds_1339] = DS1339_BIT_BBSQI,
@@ -637,7 +646,10 @@ static int __devinit ds1307_probe(struct i2c_client *client,
ds1307->client = client;
ds1307->type = id->driver_data;
- ds1307->offset = 0;
+
+ if (pdata && pdata->trickle_charger_setup && chip->trickle_charger_reg)
+ i2c_smbus_write_byte_data(client, chip->trickle_charger_reg,
+ DS13XX_TRICKLE_CHARGER_MAGIC | pdata->trickle_charger_setup);
buf = ds1307->regs;
if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 14a42a1edc6..9602278ff98 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -127,7 +127,7 @@ static const struct attribute_group ep93xx_rtc_sysfs_files = {
.attrs = ep93xx_rtc_attrs,
};
-static int __init ep93xx_rtc_probe(struct platform_device *pdev)
+static int __devinit ep93xx_rtc_probe(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc;
struct resource *res;
@@ -174,7 +174,7 @@ exit:
return err;
}
-static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
+static int __devexit ep93xx_rtc_remove(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
@@ -186,31 +186,19 @@ static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:ep93xx-rtc");
-
static struct platform_driver ep93xx_rtc_driver = {
.driver = {
.name = "ep93xx-rtc",
.owner = THIS_MODULE,
},
- .remove = __exit_p(ep93xx_rtc_remove),
+ .probe = ep93xx_rtc_probe,
+ .remove = __devexit_p(ep93xx_rtc_remove),
};
-static int __init ep93xx_rtc_init(void)
-{
- return platform_driver_probe(&ep93xx_rtc_driver, ep93xx_rtc_probe);
-}
-
-static void __exit ep93xx_rtc_exit(void)
-{
- platform_driver_unregister(&ep93xx_rtc_driver);
-}
+module_platform_driver(ep93xx_rtc_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("EP93XX RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
-module_init(ep93xx_rtc_init);
-module_exit(ep93xx_rtc_exit);
+MODULE_ALIAS("platform:ep93xx-rtc");
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index d93a9608b1f..891cd6c61d0 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -405,7 +405,7 @@ static int dryice_rtc_probe(struct platform_device *pdev)
imxdi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(imxdi->clk))
return PTR_ERR(imxdi->clk);
- clk_enable(imxdi->clk);
+ clk_prepare_enable(imxdi->clk);
/*
* Initialize dryice hardware
@@ -470,7 +470,7 @@ static int dryice_rtc_probe(struct platform_device *pdev)
return 0;
err:
- clk_disable(imxdi->clk);
+ clk_disable_unprepare(imxdi->clk);
clk_put(imxdi->clk);
return rc;
@@ -487,7 +487,7 @@ static int __devexit dryice_rtc_remove(struct platform_device *pdev)
rtc_device_unregister(imxdi->rtc);
- clk_disable(imxdi->clk);
+ clk_disable_unprepare(imxdi->clk);
clk_put(imxdi->clk);
return 0;
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 63c72189c64..d5218553741 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -19,6 +19,7 @@
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/of.h>
/*
* Clock and Power control register offsets
@@ -386,13 +387,22 @@ static const struct dev_pm_ops lpc32xx_rtc_pm_ops = {
#define LPC32XX_RTC_PM_OPS NULL
#endif
+#ifdef CONFIG_OF
+static const struct of_device_id lpc32xx_rtc_match[] = {
+ { .compatible = "nxp,lpc3220-rtc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_rtc_match);
+#endif
+
static struct platform_driver lpc32xx_rtc_driver = {
.probe = lpc32xx_rtc_probe,
.remove = __devexit_p(lpc32xx_rtc_remove),
.driver = {
.name = RTC_NAME,
.owner = THIS_MODULE,
- .pm = LPC32XX_RTC_PM_OPS
+ .pm = LPC32XX_RTC_PM_OPS,
+ .of_match_table = of_match_ptr(lpc32xx_rtc_match),
},
};
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
index 10f1c29436e..efab3d48cb1 100644
--- a/drivers/rtc/rtc-m41t93.c
+++ b/drivers/rtc/rtc-m41t93.c
@@ -48,6 +48,7 @@ static inline int m41t93_set_reg(struct spi_device *spi, u8 addr, u8 data)
static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
{
struct spi_device *spi = to_spi_device(dev);
+ int tmp;
u8 buf[9] = {0x80}; /* write cmd + 8 data bytes */
u8 * const data = &buf[1]; /* ptr to first data byte */
@@ -62,6 +63,30 @@ static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
+ tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
+ if (tmp < 0)
+ return tmp;
+
+ if (tmp & M41T93_FLAG_OF) {
+ dev_warn(&spi->dev, "OF bit is set, resetting.\n");
+ m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
+
+ tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
+ if (tmp < 0) {
+ return tmp;
+ } else if (tmp & M41T93_FLAG_OF) {
+ /* OF cannot be immediately reset: oscillator has to be
+ * restarted. */
+ u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
+
+ dev_warn(&spi->dev,
+ "OF bit is still set, kickstarting clock.\n");
+ m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
+ reset_osc &= ~M41T93_FLAG_ST;
+ m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
+ }
+ }
+
data[M41T93_REG_SSEC] = 0;
data[M41T93_REG_ST_SEC] = bin2bcd(tm->tm_sec);
data[M41T93_REG_MIN] = bin2bcd(tm->tm_min);
@@ -89,10 +114,7 @@ static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
1. halt bit (HT) is set: the clock is running but update of readout
registers has been disabled due to power failure. This is normal
case after poweron. Time is valid after resetting HT bit.
- 2. oscillator fail bit (OF) is set. Oscillator has be stopped and
- time is invalid:
- a) OF can be immeditely reset.
- b) OF cannot be immediately reset: oscillator has to be restarted.
+ 2. oscillator fail bit (OF) is set: time is invalid.
*/
tmp = spi_w8r8(spi, M41T93_REG_ALM_HOUR_HT);
if (tmp < 0)
@@ -110,21 +132,7 @@ static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
if (tmp & M41T93_FLAG_OF) {
ret = -EINVAL;
- dev_warn(&spi->dev, "OF bit is set, resetting.\n");
- m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
-
- tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
- if (tmp < 0)
- return tmp;
- else if (tmp & M41T93_FLAG_OF) {
- u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
-
- dev_warn(&spi->dev,
- "OF bit is still set, kickstarting clock.\n");
- m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
- reset_osc &= ~M41T93_FLAG_ST;
- m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
- }
+ dev_warn(&spi->dev, "OF bit is set, write time to restart.\n");
}
if (tmp & M41T93_FLAG_BL)
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index bc0677de199..97a3284bb7c 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -64,6 +64,7 @@ struct pcf8563 {
* 1970...2069.
*/
int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
+ int voltage_low; /* incicates if a low_voltage was detected */
};
/*
@@ -86,9 +87,11 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
return -EIO;
}
- if (buf[PCF8563_REG_SC] & PCF8563_SC_LV)
+ if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) {
+ pcf8563->voltage_low = 1;
dev_info(&client->dev,
"low voltage detected, date/time is not reliable.\n");
+ }
dev_dbg(&client->dev,
"%s: raw data is st1=%02x, st2=%02x, sec=%02x, min=%02x, hr=%02x, "
@@ -173,6 +176,44 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
return 0;
}
+#ifdef CONFIG_RTC_INTF_DEV
+static int pcf8563_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct pcf8563 *pcf8563 = i2c_get_clientdata(to_i2c_client(dev));
+ struct rtc_time tm;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ if (pcf8563->voltage_low)
+ dev_info(dev, "low voltage detected, date/time is not reliable.\n");
+
+ if (copy_to_user((void __user *)arg, &pcf8563->voltage_low,
+ sizeof(int)))
+ return -EFAULT;
+ return 0;
+ case RTC_VL_CLR:
+ /*
+ * Clear the VL bit in the seconds register in case
+ * the time has not been set already (which would
+ * have cleared it). This does not really matter
+ * because of the cached voltage_low value but do it
+ * anyway for consistency.
+ */
+ if (pcf8563_get_datetime(to_i2c_client(dev), &tm))
+ pcf8563_set_datetime(to_i2c_client(dev), &tm);
+
+ /* Clear the cached value. */
+ pcf8563->voltage_low = 0;
+
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#else
+#define pcf8563_rtc_ioctl NULL
+#endif
+
static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
return pcf8563_get_datetime(to_i2c_client(dev), tm);
@@ -184,6 +225,7 @@ static int pcf8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
}
static const struct rtc_class_ops pcf8563_rtc_ops = {
+ .ioctl = pcf8563_rtc_ioctl,
.read_time = pcf8563_rtc_read_time,
.set_time = pcf8563_rtc_set_time,
};
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index f027c063fb2..cc0533994f6 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -220,17 +220,9 @@ static irqreturn_t pl031_interrupt(int irq, void *dev_id)
unsigned long events = 0;
rtcmis = readl(ldata->base + RTC_MIS);
- if (rtcmis) {
- writel(rtcmis, ldata->base + RTC_ICR);
-
- if (rtcmis & RTC_BIT_AI)
- events |= (RTC_AF | RTC_IRQF);
-
- /* Timer interrupt is only available in ST variants */
- if ((rtcmis & RTC_BIT_PI) &&
- (ldata->hw_designer == AMBA_VENDOR_ST))
- events |= (RTC_PF | RTC_IRQF);
-
+ if (rtcmis & RTC_BIT_AI) {
+ writel(RTC_BIT_AI, ldata->base + RTC_ICR);
+ events |= (RTC_AF | RTC_IRQF);
rtc_update_irq(ldata->rtc, 1, events);
return IRQ_HANDLED;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 3f3a2975236..7e6af0b22f1 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -670,6 +670,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
#define s3c_rtc_resume NULL
#endif
+#ifdef CONFIG_OF
static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
[TYPE_S3C2410] = { TYPE_S3C2410 },
[TYPE_S3C2416] = { TYPE_S3C2416 },
@@ -677,7 +678,6 @@ static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
[TYPE_S3C64XX] = { TYPE_S3C64XX },
};
-#ifdef CONFIG_OF
static const struct of_device_id s3c_rtc_dt_match[] = {
{
.compatible = "samsung,s3c2410-rtc",
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index e38da0dc418..1f76320e545 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/slab.h>
@@ -519,6 +520,14 @@ static void spear_rtc_shutdown(struct platform_device *pdev)
clk_disable(config->clk);
}
+#ifdef CONFIG_OF
+static const struct of_device_id spear_rtc_id_table[] = {
+ { .compatible = "st,spear600-rtc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spear_rtc_id_table);
+#endif
+
static struct platform_driver spear_rtc_driver = {
.probe = spear_rtc_probe,
.remove = __devexit_p(spear_rtc_remove),
@@ -527,6 +536,7 @@ static struct platform_driver spear_rtc_driver = {
.shutdown = spear_rtc_shutdown,
.driver = {
.name = "rtc-spear",
+ .of_match_table = of_match_ptr(spear_rtc_id_table),
},
};
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 75259fe3860..c006025cecc 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -309,7 +309,8 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- info = kzalloc(sizeof(struct tegra_rtc_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct tegra_rtc_info),
+ GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -317,29 +318,18 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
if (!res) {
dev_err(&pdev->dev,
"Unable to allocate resources for device.\n");
- ret = -EBUSY;
- goto err_free_info;
+ return -EBUSY;
}
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev,
- "Unable to request mem region for device.\n");
- ret = -EBUSY;
- goto err_free_info;
+ info->rtc_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!info->rtc_base) {
+ dev_err(&pdev->dev, "Unable to request mem region and grab IOs for device.\n");
+ return -EBUSY;
}
info->tegra_rtc_irq = platform_get_irq(pdev, 0);
- if (info->tegra_rtc_irq <= 0) {
- ret = -EBUSY;
- goto err_release_mem_region;
- }
-
- info->rtc_base = ioremap_nocache(res->start, resource_size(res));
- if (!info->rtc_base) {
- dev_err(&pdev->dev, "Unable to grab IOs for device.\n");
- ret = -EBUSY;
- goto err_release_mem_region;
- }
+ if (info->tegra_rtc_irq <= 0)
+ return -EBUSY;
/* set context info. */
info->pdev = pdev;
@@ -362,11 +352,12 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Unable to register device (err=%d).\n",
ret);
- goto err_iounmap;
+ return ret;
}
- ret = request_irq(info->tegra_rtc_irq, tegra_rtc_irq_handler,
- IRQF_TRIGGER_HIGH, "rtc alarm", &pdev->dev);
+ ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
+ tegra_rtc_irq_handler, IRQF_TRIGGER_HIGH,
+ "rtc alarm", &pdev->dev);
if (ret) {
dev_err(&pdev->dev,
"Unable to request interrupt for device (err=%d).\n",
@@ -380,12 +371,6 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
err_dev_unreg:
rtc_device_unregister(info->rtc_dev);
-err_iounmap:
- iounmap(info->rtc_base);
-err_release_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_info:
- kfree(info);
return ret;
}
@@ -393,17 +378,8 @@ err_free_info:
static int __devexit tegra_rtc_remove(struct platform_device *pdev)
{
struct tegra_rtc_info *info = platform_get_drvdata(pdev);
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EBUSY;
- free_irq(info->tegra_rtc_irq, &pdev->dev);
rtc_device_unregister(info->rtc_dev);
- iounmap(info->rtc_base);
- release_mem_region(res->start, resource_size(res));
- kfree(info);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 3b6e6a67e76..59c6245e042 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -396,7 +396,7 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_rtc *wm831x_rtc;
- int alm_irq = platform_get_irq_byname(pdev, "ALM");
+ int alm_irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "ALM"));
int ret = 0;
wm831x_rtc = devm_kzalloc(&pdev->dev, sizeof(*wm831x_rtc), GFP_KERNEL);
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 36506366158..766cb7b19b4 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -17,6 +17,7 @@
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/memory.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
@@ -38,7 +39,8 @@ struct read_info_sccb {
u64 facilities; /* 48-55 */
u8 _reserved2[84 - 56]; /* 56-83 */
u8 fac84; /* 84 */
- u8 _reserved3[91 - 85]; /* 85-90 */
+ u8 fac85; /* 85 */
+ u8 _reserved3[91 - 86]; /* 86-90 */
u8 flags; /* 91 */
u8 _reserved4[100 - 92]; /* 92-99 */
u32 rnsize2; /* 100-103 */
@@ -51,6 +53,7 @@ static int __initdata early_read_info_sccb_valid;
u64 sclp_facilities;
static u8 sclp_fac84;
+static u8 sclp_fac85;
static unsigned long long rzm;
static unsigned long long rnmax;
@@ -112,6 +115,7 @@ void __init sclp_facilities_detect(void)
sccb = &early_read_info_sccb;
sclp_facilities = sccb->facilities;
sclp_fac84 = sccb->fac84;
+ sclp_fac85 = sccb->fac85;
rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
rzm <<= 20;
@@ -127,6 +131,12 @@ unsigned long long sclp_get_rzm(void)
return rzm;
}
+u8 sclp_get_fac85(void)
+{
+ return sclp_fac85;
+}
+EXPORT_SYMBOL_GPL(sclp_get_fac85);
+
/*
* This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. Therefore the sccb should have valid contents.
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 00c024039c9..cd2fe350e72 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -311,7 +311,7 @@ config SPI_S3C24XX_FIQ
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
- depends on (ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
+ depends on (ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
select S3C64XX_DMA if ARCH_S3C64XX
help
SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 69c9a6601f4..47877d68761 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -86,7 +86,8 @@ struct spi_imx_data {
struct completion xfer_done;
void __iomem *base;
int irq;
- struct clk *clk;
+ struct clk *clk_per;
+ struct clk *clk_ipg;
unsigned long spi_clk;
unsigned int count;
@@ -853,15 +854,22 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
goto out_free_irq;
}
- spi_imx->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(spi_imx->clk)) {
- dev_err(&pdev->dev, "unable to get clock\n");
- ret = PTR_ERR(spi_imx->clk);
+ spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(spi_imx->clk_ipg)) {
+ ret = PTR_ERR(spi_imx->clk_ipg);
goto out_free_irq;
}
- clk_enable(spi_imx->clk);
- spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
+ spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(spi_imx->clk_per)) {
+ ret = PTR_ERR(spi_imx->clk_per);
+ goto out_free_irq;
+ }
+
+ clk_prepare_enable(spi_imx->clk_per);
+ clk_prepare_enable(spi_imx->clk_ipg);
+
+ spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
spi_imx->devtype_data->reset(spi_imx);
@@ -879,8 +887,8 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
return ret;
out_clk_put:
- clk_disable(spi_imx->clk);
- clk_put(spi_imx->clk);
+ clk_disable_unprepare(spi_imx->clk_per);
+ clk_disable_unprepare(spi_imx->clk_ipg);
out_free_irq:
free_irq(spi_imx->irq, spi_imx);
out_iounmap:
@@ -908,8 +916,8 @@ static int __devexit spi_imx_remove(struct platform_device *pdev)
spi_bitbang_stop(&spi_imx->bitbang);
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_disable(spi_imx->clk);
- clk_put(spi_imx->clk);
+ clk_disable_unprepare(spi_imx->clk_per);
+ clk_disable_unprepare(spi_imx->clk_ipg);
free_irq(spi_imx->irq, spi_imx);
iounmap(spi_imx->base);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index e496f799b7a..dfd04e91fa6 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -16,8 +16,8 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
-#include <linux/spi/orion_spi.h>
#include <linux/module.h>
+#include <linux/clk.h>
#include <asm/unaligned.h>
#define DRIVER_NAME "orion_spi"
@@ -46,6 +46,7 @@ struct orion_spi {
unsigned int max_speed;
unsigned int min_speed;
struct orion_spi_info *spi_info;
+ struct clk *clk;
};
static struct workqueue_struct *orion_spi_wq;
@@ -104,7 +105,7 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
orion_spi = spi_master_get_devdata(spi->master);
- tclk_hz = orion_spi->spi_info->tclk;
+ tclk_hz = clk_get_rate(orion_spi->clk);
/*
* the supported rates are: 4,6,8...30
@@ -450,6 +451,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
struct orion_spi *spi;
struct resource *r;
struct orion_spi_info *spi_info;
+ unsigned long tclk_hz;
int status = 0;
spi_info = pdev->dev.platform_data;
@@ -476,19 +478,28 @@ static int __init orion_spi_probe(struct platform_device *pdev)
spi->master = master;
spi->spi_info = spi_info;
- spi->max_speed = DIV_ROUND_UP(spi_info->tclk, 4);
- spi->min_speed = DIV_ROUND_UP(spi_info->tclk, 30);
+ spi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(spi->clk)) {
+ status = PTR_ERR(spi->clk);
+ goto out;
+ }
+
+ clk_prepare(spi->clk);
+ clk_enable(spi->clk);
+ tclk_hz = clk_get_rate(spi->clk);
+ spi->max_speed = DIV_ROUND_UP(tclk_hz, 4);
+ spi->min_speed = DIV_ROUND_UP(tclk_hz, 30);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
status = -ENODEV;
- goto out;
+ goto out_rel_clk;
}
if (!request_mem_region(r->start, resource_size(r),
dev_name(&pdev->dev))) {
status = -EBUSY;
- goto out;
+ goto out_rel_clk;
}
spi->base = ioremap(r->start, SZ_1K);
@@ -508,7 +519,9 @@ static int __init orion_spi_probe(struct platform_device *pdev)
out_rel_mem:
release_mem_region(r->start, resource_size(r));
-
+out_rel_clk:
+ clk_disable_unprepare(spi->clk);
+ clk_put(spi->clk);
out:
spi_master_put(master);
return status;
@@ -526,6 +539,9 @@ static int __exit orion_spi_remove(struct platform_device *pdev)
cancel_work_sync(&spi->work);
+ clk_disable_unprepare(spi->clk);
+ clk_put(spi->clk);
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(r->start, resource_size(r));
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 4511420849b..e84dbecd099 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/file.h>
#include <linux/fs.h>
+#include <linux/falloc.h>
#include <linux/miscdevice.h>
#include <linux/security.h>
#include <linux/mm.h>
@@ -363,11 +364,12 @@ static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
mutex_lock(&ashmem_mutex);
list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
- struct inode *inode = range->asma->file->f_dentry->d_inode;
loff_t start = range->pgstart * PAGE_SIZE;
- loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
+ loff_t end = (range->pgend + 1) * PAGE_SIZE;
- vmtruncate_range(inode, start, end);
+ do_fallocate(range->asma->file,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ start, end - start);
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 59af3945ea8..65c7c62c7aa 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -633,7 +633,6 @@ static int ptmx_open(struct inode *inode, struct file *filp)
mutex_unlock(&devpts_mutex);
mutex_lock(&tty_mutex);
- mutex_lock(&devpts_mutex);
tty = tty_init_dev(ptm_driver, index);
if (IS_ERR(tty)) {
@@ -643,7 +642,6 @@ static int ptmx_open(struct inode *inode, struct file *filp)
/* The tty returned here is locked so we can safely
drop the mutex */
- mutex_unlock(&devpts_mutex);
mutex_unlock(&tty_mutex);
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index ec206732f68..4ef747307ec 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -205,7 +205,8 @@ struct imx_port {
unsigned int irda_inv_rx:1;
unsigned int irda_inv_tx:1;
unsigned short trcv_delay; /* transceiver delay */
- struct clk *clk;
+ struct clk *clk_ipg;
+ struct clk *clk_per;
struct imx_uart_data *devdata;
};
@@ -673,7 +674,7 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
* RFDIV is set such way to satisfy requested uartclk value
*/
val = TXTL << 10 | RXTL;
- ufcr_rfdiv = (clk_get_rate(sport->clk) + sport->port.uartclk / 2)
+ ufcr_rfdiv = (clk_get_rate(sport->clk_per) + sport->port.uartclk / 2)
/ sport->port.uartclk;
if(!ufcr_rfdiv)
@@ -1286,7 +1287,7 @@ imx_console_get_options(struct imx_port *sport, int *baud,
else
ucfr_rfdiv = 6 - ucfr_rfdiv;
- uartclk = clk_get_rate(sport->clk);
+ uartclk = clk_get_rate(sport->clk_per);
uartclk /= ucfr_rfdiv;
{ /*
@@ -1511,14 +1512,22 @@ static int serial_imx_probe(struct platform_device *pdev)
goto unmap;
}
- sport->clk = clk_get(&pdev->dev, "uart");
- if (IS_ERR(sport->clk)) {
- ret = PTR_ERR(sport->clk);
+ sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(sport->clk_ipg)) {
+ ret = PTR_ERR(sport->clk_ipg);
goto unmap;
}
- clk_prepare_enable(sport->clk);
- sport->port.uartclk = clk_get_rate(sport->clk);
+ sport->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(sport->clk_per)) {
+ ret = PTR_ERR(sport->clk_per);
+ goto unmap;
+ }
+
+ clk_prepare_enable(sport->clk_per);
+ clk_prepare_enable(sport->clk_ipg);
+
+ sport->port.uartclk = clk_get_rate(sport->clk_per);
imx_ports[sport->port.line] = sport;
@@ -1539,8 +1548,8 @@ deinit:
if (pdata && pdata->exit)
pdata->exit(pdev);
clkput:
- clk_disable_unprepare(sport->clk);
- clk_put(sport->clk);
+ clk_disable_unprepare(sport->clk_per);
+ clk_disable_unprepare(sport->clk_ipg);
unmap:
iounmap(sport->port.membase);
free:
@@ -1558,11 +1567,10 @@ static int serial_imx_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- if (sport) {
- uart_remove_one_port(&imx_reg, &sport->port);
- clk_disable_unprepare(sport->clk);
- clk_put(sport->clk);
- }
+ uart_remove_one_port(&imx_reg, &sport->port);
+
+ clk_disable_unprepare(sport->clk_per);
+ clk_disable_unprepare(sport->clk_ipg);
if (pdata && pdata->exit)
pdata->exit(pdev);
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 96c1cacc736..02da071fe1e 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -31,16 +31,19 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/gpio.h>
#include <lantiq_soc.h>
#define PORT_LTQ_ASC 111
#define MAXPORTS 2
#define UART_DUMMY_UER_RX 1
-#define DRVNAME "ltq_asc"
+#define DRVNAME "lantiq,asc"
#ifdef __BIG_ENDIAN
#define LTQ_ASC_TBUF (0x0020 + 3)
#define LTQ_ASC_RBUF (0x0024 + 3)
@@ -114,6 +117,9 @@ static DEFINE_SPINLOCK(ltq_asc_lock);
struct ltq_uart_port {
struct uart_port port;
+ /* clock used to derive divider */
+ struct clk *fpiclk;
+ /* clock gating of the ASC core */
struct clk *clk;
unsigned int tx_irq;
unsigned int rx_irq;
@@ -316,7 +322,9 @@ lqasc_startup(struct uart_port *port)
struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
int retval;
- port->uartclk = clk_get_rate(ltq_port->clk);
+ if (ltq_port->clk)
+ clk_enable(ltq_port->clk);
+ port->uartclk = clk_get_rate(ltq_port->fpiclk);
ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
port->membase + LTQ_ASC_CLC);
@@ -382,6 +390,8 @@ lqasc_shutdown(struct uart_port *port)
port->membase + LTQ_ASC_RXFCON);
ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
port->membase + LTQ_ASC_TXFCON);
+ if (ltq_port->clk)
+ clk_disable(ltq_port->clk);
}
static void
@@ -630,7 +640,7 @@ lqasc_console_setup(struct console *co, char *options)
port = &ltq_port->port;
- port->uartclk = clk_get_rate(ltq_port->clk);
+ port->uartclk = clk_get_rate(ltq_port->fpiclk);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -668,37 +678,32 @@ static struct uart_driver lqasc_reg = {
static int __init
lqasc_probe(struct platform_device *pdev)
{
+ struct device_node *node = pdev->dev.of_node;
struct ltq_uart_port *ltq_port;
struct uart_port *port;
- struct resource *mmres, *irqres;
- int tx_irq, rx_irq, err_irq;
- struct clk *clk;
+ struct resource *mmres, irqres[3];
+ int line = 0;
int ret;
mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!mmres || !irqres)
+ ret = of_irq_to_resource_table(node, irqres, 3);
+ if (!mmres || (ret != 3)) {
+ dev_err(&pdev->dev,
+ "failed to get memory/irq for serial port\n");
return -ENODEV;
+ }
- if (pdev->id >= MAXPORTS)
- return -EBUSY;
+ /* check if this is the console port */
+ if (mmres->start != CPHYSADDR(LTQ_EARLY_ASC))
+ line = 1;
- if (lqasc_port[pdev->id] != NULL)
+ if (lqasc_port[line]) {
+ dev_err(&pdev->dev, "port %d already allocated\n", line);
return -EBUSY;
-
- clk = clk_get(&pdev->dev, "fpi");
- if (IS_ERR(clk)) {
- pr_err("failed to get fpi clk\n");
- return -ENOENT;
}
- tx_irq = platform_get_irq_byname(pdev, "tx");
- rx_irq = platform_get_irq_byname(pdev, "rx");
- err_irq = platform_get_irq_byname(pdev, "err");
- if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0))
- return -ENODEV;
-
- ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL);
+ ltq_port = devm_kzalloc(&pdev->dev, sizeof(struct ltq_uart_port),
+ GFP_KERNEL);
if (!ltq_port)
return -ENOMEM;
@@ -709,19 +714,26 @@ lqasc_probe(struct platform_device *pdev)
port->ops = &lqasc_pops;
port->fifosize = 16;
port->type = PORT_LTQ_ASC,
- port->line = pdev->id;
+ port->line = line;
port->dev = &pdev->dev;
-
- port->irq = tx_irq; /* unused, just to be backward-compatibe */
+ /* unused, just to be backward-compatible */
+ port->irq = irqres[0].start;
port->mapbase = mmres->start;
- ltq_port->clk = clk;
+ ltq_port->fpiclk = clk_get_fpi();
+ if (IS_ERR(ltq_port->fpiclk)) {
+ pr_err("failed to get fpi clk\n");
+ return -ENOENT;
+ }
- ltq_port->tx_irq = tx_irq;
- ltq_port->rx_irq = rx_irq;
- ltq_port->err_irq = err_irq;
+ /* not all asc ports have clock gates, lets ignore the return code */
+ ltq_port->clk = clk_get(&pdev->dev, NULL);
- lqasc_port[pdev->id] = ltq_port;
+ ltq_port->tx_irq = irqres[0].start;
+ ltq_port->rx_irq = irqres[1].start;
+ ltq_port->err_irq = irqres[2].start;
+
+ lqasc_port[line] = ltq_port;
platform_set_drvdata(pdev, ltq_port);
ret = uart_add_one_port(&lqasc_reg, port);
@@ -729,10 +741,17 @@ lqasc_probe(struct platform_device *pdev)
return ret;
}
+static const struct of_device_id ltq_asc_match[] = {
+ { .compatible = DRVNAME },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_asc_match);
+
static struct platform_driver lqasc_driver = {
.driver = {
.name = DRVNAME,
.owner = THIS_MODULE,
+ .of_match_table = ltq_asc_match,
},
};
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 0be8a2f00d0..f76b1688c5c 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/major.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 4001eee6c08..92c00b24d0d 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -57,6 +57,7 @@
#include <linux/ioport.h>
#include <linux/irqflags.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/major.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 173a9000a6c..ba8be396a62 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -894,6 +894,23 @@ int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
tty_ldisc_enable(tty);
return 0;
}
+
+static void tty_ldisc_kill(struct tty_struct *tty)
+{
+ mutex_lock(&tty->ldisc_mutex);
+ /*
+ * Now kill off the ldisc
+ */
+ tty_ldisc_close(tty, tty->ldisc);
+ tty_ldisc_put(tty->ldisc);
+ /* Force an oops if we mess this up */
+ tty->ldisc = NULL;
+
+ /* Ensure the next open requests the N_TTY ldisc */
+ tty_set_termios_ldisc(tty, N_TTY);
+ mutex_unlock(&tty->ldisc_mutex);
+}
+
/**
* tty_ldisc_release - release line discipline
* @tty: tty being shut down
@@ -912,27 +929,19 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
* race with the set_ldisc code path.
*/
- tty_unlock(tty);
+ tty_unlock_pair(tty, o_tty);
tty_ldisc_halt(tty);
tty_ldisc_flush_works(tty);
- tty_lock(tty);
-
- mutex_lock(&tty->ldisc_mutex);
- /*
- * Now kill off the ldisc
- */
- tty_ldisc_close(tty, tty->ldisc);
- tty_ldisc_put(tty->ldisc);
- /* Force an oops if we mess this up */
- tty->ldisc = NULL;
+ if (o_tty) {
+ tty_ldisc_halt(o_tty);
+ tty_ldisc_flush_works(o_tty);
+ }
+ tty_lock_pair(tty, o_tty);
- /* Ensure the next open requests the N_TTY ldisc */
- tty_set_termios_ldisc(tty, N_TTY);
- mutex_unlock(&tty->ldisc_mutex);
- /* This will need doing differently if we need to lock */
+ tty_ldisc_kill(tty);
if (o_tty)
- tty_ldisc_release(o_tty, NULL);
+ tty_ldisc_kill(o_tty);
/* And the memory resources remaining (buffers, termios) will be
disposed of when the kref hits zero */
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index a797d51ecbe..c778ffe4e4e 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -32,7 +32,7 @@
#define ULPI_VIEWPORT_OFFSET 0x170
struct ehci_mxc_priv {
- struct clk *usbclk, *ahbclk, *phy1clk;
+ struct clk *usbclk, *ahbclk, *phyclk;
struct usb_hcd *hcd;
};
@@ -166,31 +166,26 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
}
/* enable clocks */
- priv->usbclk = clk_get(dev, "usb");
+ priv->usbclk = clk_get(dev, "ipg");
if (IS_ERR(priv->usbclk)) {
ret = PTR_ERR(priv->usbclk);
goto err_clk;
}
- clk_enable(priv->usbclk);
+ clk_prepare_enable(priv->usbclk);
- if (!cpu_is_mx35() && !cpu_is_mx25()) {
- priv->ahbclk = clk_get(dev, "usb_ahb");
- if (IS_ERR(priv->ahbclk)) {
- ret = PTR_ERR(priv->ahbclk);
- goto err_clk_ahb;
- }
- clk_enable(priv->ahbclk);
+ priv->ahbclk = clk_get(dev, "ahb");
+ if (IS_ERR(priv->ahbclk)) {
+ ret = PTR_ERR(priv->ahbclk);
+ goto err_clk_ahb;
}
+ clk_prepare_enable(priv->ahbclk);
/* "dr" device has its own clock on i.MX51 */
- if (cpu_is_mx51() && (pdev->id == 0)) {
- priv->phy1clk = clk_get(dev, "usb_phy1");
- if (IS_ERR(priv->phy1clk)) {
- ret = PTR_ERR(priv->phy1clk);
- goto err_clk_phy;
- }
- clk_enable(priv->phy1clk);
- }
+ priv->phyclk = clk_get(dev, "phy");
+ if (IS_ERR(priv->phyclk))
+ priv->phyclk = NULL;
+ if (priv->phyclk)
+ clk_prepare_enable(priv->phyclk);
/* call platform specific init function */
@@ -265,17 +260,15 @@ err_add:
if (pdata && pdata->exit)
pdata->exit(pdev);
err_init:
- if (priv->phy1clk) {
- clk_disable(priv->phy1clk);
- clk_put(priv->phy1clk);
- }
-err_clk_phy:
- if (priv->ahbclk) {
- clk_disable(priv->ahbclk);
- clk_put(priv->ahbclk);
+ if (priv->phyclk) {
+ clk_disable_unprepare(priv->phyclk);
+ clk_put(priv->phyclk);
}
+
+ clk_disable_unprepare(priv->ahbclk);
+ clk_put(priv->ahbclk);
err_clk_ahb:
- clk_disable(priv->usbclk);
+ clk_disable_unprepare(priv->usbclk);
clk_put(priv->usbclk);
err_clk:
iounmap(hcd->regs);
@@ -307,15 +300,14 @@ static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
usb_put_hcd(hcd);
platform_set_drvdata(pdev, NULL);
- clk_disable(priv->usbclk);
+ clk_disable_unprepare(priv->usbclk);
clk_put(priv->usbclk);
- if (priv->ahbclk) {
- clk_disable(priv->ahbclk);
- clk_put(priv->ahbclk);
- }
- if (priv->phy1clk) {
- clk_disable(priv->phy1clk);
- clk_put(priv->phy1clk);
+ clk_disable_unprepare(priv->ahbclk);
+ clk_put(priv->ahbclk);
+
+ if (priv->phyclk) {
+ clk_disable_unprepare(priv->phyclk);
+ clk_put(priv->phyclk);
}
kfree(priv);
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 6c6a5a3b4ea..82de1073aa5 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mbus.h>
+#include <linux/clk.h>
#include <plat/ehci-orion.h>
#define rdl(off) __raw_readl(hcd->regs + (off))
@@ -198,6 +199,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
struct resource *res;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
+ struct clk *clk;
void __iomem *regs;
int irq, err;
@@ -238,6 +240,14 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
goto err2;
}
+ /* Not all platforms can gate the clock, so it is not
+ an error if the clock does not exists. */
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ }
+
hcd = usb_create_hcd(&ehci_orion_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
@@ -301,12 +311,18 @@ err1:
static int __exit ehci_orion_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct clk *clk;
usb_remove_hcd(hcd);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
+ clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ }
return 0;
}
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 4a44bf83361..68548236ec4 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -722,8 +722,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
}
}
- tegra->phy = tegra_usb_phy_open(instance, hcd->regs, pdata->phy_config,
- TEGRA_USB_PHY_MODE_HOST);
+ tegra->phy = tegra_usb_phy_open(&pdev->dev, instance, hcd->regs,
+ pdata->phy_config,
+ TEGRA_USB_PHY_MODE_HOST);
if (IS_ERR(tegra->phy)) {
dev_err(&pdev->dev, "Failed to open USB phy\n");
err = -ENXIO;
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index af16884491e..fa2b0375031 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -184,6 +184,18 @@ config BACKLIGHT_GENERIC
known as the Corgi backlight driver. If you have a Sharp Zaurus
SL-C7xx, SL-Cxx00 or SL-6000x say y.
+config BACKLIGHT_LM3533
+ tristate "Backlight Driver for LM3533"
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on MFD_LM3533
+ help
+ Say Y to enable the backlight driver for National Semiconductor / TI
+ LM3533 Lighting Power chips.
+
+ The backlights can be controlled directly, through PWM input, or by
+ the ambient-light-sensor interface. The chip supports 256 brightness
+ levels.
+
config BACKLIGHT_LOCOMO
tristate "Sharp LOCOMO LCD/Backlight Driver"
depends on SHARP_LOCOMO
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 36855ae887d..a2ac9cfbaf6 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o
obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
+obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 4911ea7989c..df5db99af23 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -160,7 +160,7 @@ static ssize_t adp5520_store(struct device *dev, const char *buf,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -214,7 +214,7 @@ static ssize_t adp5520_bl_daylight_max_store(struct device *dev,
struct adp5520_bl *data = dev_get_drvdata(dev);
int ret;
- ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+ ret = kstrtoul(buf, 10, &data->cached_daylight_max);
if (ret < 0)
return ret;
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 550dbf0bb89..77d1fdba597 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -222,7 +222,8 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
struct led_info *cur_led;
int ret, i;
- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ led = devm_kzalloc(&client->dev, sizeof(*led) * pdata->num_leds,
+ GFP_KERNEL);
if (led == NULL) {
dev_err(&client->dev, "failed to alloc memory\n");
return -ENOMEM;
@@ -236,7 +237,7 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
if (ret) {
dev_err(&client->dev, "failed to write\n");
- goto err_free;
+ return ret;
}
for (i = 0; i < pdata->num_leds; ++i) {
@@ -291,9 +292,6 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
cancel_work_sync(&led[i].work);
}
- err_free:
- kfree(led);
-
return ret;
}
@@ -309,7 +307,6 @@ static int __devexit adp8860_led_remove(struct i2c_client *client)
cancel_work_sync(&data->led[i].work);
}
- kfree(data->led);
return 0;
}
#else
@@ -451,7 +448,7 @@ static ssize_t adp8860_store(struct device *dev, const char *buf,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -501,7 +498,7 @@ static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct adp8860_bl *data = dev_get_drvdata(dev);
- int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+ int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
if (ret)
return ret;
@@ -608,7 +605,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
uint8_t reg_val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -675,13 +672,13 @@ static int __devinit adp8860_probe(struct i2c_client *client,
return -EINVAL;
}
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
ret = adp8860_read(client, ADP8860_MFDVID, &reg_val);
if (ret < 0)
- goto out2;
+ return ret;
switch (ADP8860_MANID(reg_val)) {
case ADP8863_MANUFID:
@@ -694,8 +691,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
break;
default:
dev_err(&client->dev, "failed to probe\n");
- ret = -ENODEV;
- goto out2;
+ return -ENODEV;
}
/* It's confirmed that the DEVID field is actually a REVID */
@@ -717,8 +713,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
&client->dev, data, &adp8860_bl_ops, &props);
if (IS_ERR(bl)) {
dev_err(&client->dev, "failed to register backlight\n");
- ret = PTR_ERR(bl);
- goto out2;
+ return PTR_ERR(bl);
}
bl->props.brightness = ADP8860_MAX_BRIGHTNESS;
@@ -756,8 +751,6 @@ out:
&adp8860_bl_attr_group);
out1:
backlight_device_unregister(bl);
-out2:
- kfree(data);
return ret;
}
@@ -776,7 +769,6 @@ static int __devexit adp8860_remove(struct i2c_client *client)
&adp8860_bl_attr_group);
backlight_device_unregister(data->bl);
- kfree(data);
return 0;
}
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 9be58c6f18f..edf7f91c8e6 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -244,8 +244,8 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
struct led_info *cur_led;
int ret, i;
-
- led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
+ led = devm_kzalloc(&client->dev, pdata->num_leds * sizeof(*led),
+ GFP_KERNEL);
if (led == NULL) {
dev_err(&client->dev, "failed to alloc memory\n");
return -ENOMEM;
@@ -253,17 +253,17 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
if (ret)
- goto err_free;
+ return ret;
ret = adp8870_write(client, ADP8870_ISCT1,
(pdata->led_on_time & 0x3) << 6);
if (ret)
- goto err_free;
+ return ret;
ret = adp8870_write(client, ADP8870_ISCF,
FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
if (ret)
- goto err_free;
+ return ret;
for (i = 0; i < pdata->num_leds; ++i) {
cur_led = &pdata->leds[i];
@@ -317,9 +317,6 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
cancel_work_sync(&led[i].work);
}
- err_free:
- kfree(led);
-
return ret;
}
@@ -335,7 +332,6 @@ static int __devexit adp8870_led_remove(struct i2c_client *client)
cancel_work_sync(&data->led[i].work);
}
- kfree(data->led);
return 0;
}
#else
@@ -572,7 +568,7 @@ static ssize_t adp8870_store(struct device *dev, const char *buf,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -652,7 +648,7 @@ static ssize_t adp8870_bl_l1_daylight_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct adp8870_bl *data = dev_get_drvdata(dev);
- int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+ int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
if (ret)
return ret;
@@ -794,7 +790,7 @@ static ssize_t adp8870_bl_ambient_light_zone_store(struct device *dev,
uint8_t reg_val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -874,7 +870,7 @@ static int __devinit adp8870_probe(struct i2c_client *client,
return -ENODEV;
}
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -894,8 +890,7 @@ static int __devinit adp8870_probe(struct i2c_client *client,
&client->dev, data, &adp8870_bl_ops, &props);
if (IS_ERR(bl)) {
dev_err(&client->dev, "failed to register backlight\n");
- ret = PTR_ERR(bl);
- goto out2;
+ return PTR_ERR(bl);
}
data->bl = bl;
@@ -930,8 +925,6 @@ out:
&adp8870_bl_attr_group);
out1:
backlight_device_unregister(bl);
-out2:
- kfree(data);
return ret;
}
@@ -950,7 +943,6 @@ static int __devexit adp8870_remove(struct i2c_client *client)
&adp8870_bl_attr_group);
backlight_device_unregister(data->bl);
- kfree(data);
return 0;
}
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 7bdadc79011..3729238e709 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -482,7 +482,7 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
struct backlight_device *bd = NULL;
struct backlight_properties props;
- lcd = kzalloc(sizeof(struct ams369fg06), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct ams369fg06), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -492,7 +492,7 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "spi setup failed.\n");
- goto out_free_lcd;
+ return ret;
}
lcd->spi = spi;
@@ -501,15 +501,13 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL\n");
- goto out_free_lcd;
+ return -EFAULT;
}
ld = lcd_device_register("ams369fg06", &spi->dev, lcd,
&ams369fg06_lcd_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_lcd;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
lcd->ld = ld;
@@ -547,8 +545,6 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
out_lcd_unregister:
lcd_device_unregister(ld);
-out_free_lcd:
- kfree(lcd);
return ret;
}
@@ -559,7 +555,6 @@ static int __devexit ams369fg06_remove(struct spi_device *spi)
ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
- kfree(lcd);
return 0;
}
@@ -619,7 +614,6 @@ static void ams369fg06_shutdown(struct spi_device *spi)
static struct spi_driver ams369fg06_driver = {
.driver = {
.name = "ams369fg06",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ams369fg06_probe,
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
index a523b255e12..9dc73ac3709 100644
--- a/drivers/video/backlight/apple_bl.c
+++ b/drivers/video/backlight/apple_bl.c
@@ -16,6 +16,8 @@
* get at the firmware code in order to figure out what it's actually doing.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -25,6 +27,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/atomic.h>
+#include <linux/apple_bl.h>
static struct backlight_device *apple_backlight_device;
@@ -39,8 +42,6 @@ struct hw_data {
static const struct hw_data *hw_data;
-#define DRIVER "apple_backlight: "
-
/* Module parameters. */
static int debug;
module_param_named(debug, debug, int, 0644);
@@ -60,8 +61,7 @@ static int intel_chipset_send_intensity(struct backlight_device *bd)
int intensity = bd->props.brightness;
if (debug)
- printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
- intensity);
+ pr_debug("setting brightness to %d\n", intensity);
intel_chipset_set_brightness(intensity);
return 0;
@@ -76,8 +76,7 @@ static int intel_chipset_get_intensity(struct backlight_device *bd)
intensity = inb(0xb3) >> 4;
if (debug)
- printk(KERN_DEBUG DRIVER "read brightness of %d\n",
- intensity);
+ pr_debug("read brightness of %d\n", intensity);
return intensity;
}
@@ -107,8 +106,7 @@ static int nvidia_chipset_send_intensity(struct backlight_device *bd)
int intensity = bd->props.brightness;
if (debug)
- printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
- intensity);
+ pr_debug("setting brightness to %d\n", intensity);
nvidia_chipset_set_brightness(intensity);
return 0;
@@ -123,8 +121,7 @@ static int nvidia_chipset_get_intensity(struct backlight_device *bd)
intensity = inb(0x52f) >> 4;
if (debug)
- printk(KERN_DEBUG DRIVER "read brightness of %d\n",
- intensity);
+ pr_debug("read brightness of %d\n", intensity);
return intensity;
}
@@ -149,7 +146,7 @@ static int __devinit apple_bl_add(struct acpi_device *dev)
host = pci_get_bus_and_slot(0, 0);
if (!host) {
- printk(KERN_ERR DRIVER "unable to find PCI host\n");
+ pr_err("unable to find PCI host\n");
return -ENODEV;
}
@@ -161,7 +158,7 @@ static int __devinit apple_bl_add(struct acpi_device *dev)
pci_dev_put(host);
if (!hw_data) {
- printk(KERN_ERR DRIVER "unknown hardware\n");
+ pr_err("unknown hardware\n");
return -ENODEV;
}
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index bf5b1ece716..297db2fa91f 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -5,6 +5,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -123,7 +125,7 @@ static ssize_t backlight_store_power(struct device *dev,
rc = -ENXIO;
mutex_lock(&bd->ops_lock);
if (bd->ops) {
- pr_debug("backlight: set power to %lu\n", power);
+ pr_debug("set power to %lu\n", power);
if (bd->props.power != power) {
bd->props.power = power;
backlight_update_status(bd);
@@ -161,8 +163,7 @@ static ssize_t backlight_store_brightness(struct device *dev,
if (brightness > bd->props.max_brightness)
rc = -EINVAL;
else {
- pr_debug("backlight: set brightness to %lu\n",
- brightness);
+ pr_debug("set brightness to %lu\n", brightness);
bd->props.brightness = brightness;
backlight_update_status(bd);
rc = count;
@@ -378,8 +379,8 @@ static int __init backlight_class_init(void)
{
backlight_class = class_create(THIS_MODULE, "backlight");
if (IS_ERR(backlight_class)) {
- printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n",
- PTR_ERR(backlight_class));
+ pr_warn("Unable to create backlight class; errno = %ld\n",
+ PTR_ERR(backlight_class));
return PTR_ERR(backlight_class);
}
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 6dab13fe562..23d732677ba 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -544,7 +544,7 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
return -EINVAL;
}
- lcd = kzalloc(sizeof(struct corgi_lcd), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct corgi_lcd), GFP_KERNEL);
if (!lcd) {
dev_err(&spi->dev, "failed to allocate memory\n");
return -ENOMEM;
@@ -554,10 +554,9 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
lcd->lcd_dev = lcd_device_register("corgi_lcd", &spi->dev,
lcd, &corgi_lcd_ops);
- if (IS_ERR(lcd->lcd_dev)) {
- ret = PTR_ERR(lcd->lcd_dev);
- goto err_free_lcd;
- }
+ if (IS_ERR(lcd->lcd_dev))
+ return PTR_ERR(lcd->lcd_dev);
+
lcd->power = FB_BLANK_POWERDOWN;
lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA;
@@ -591,8 +590,6 @@ err_unregister_bl:
backlight_device_unregister(lcd->bl_dev);
err_unregister_lcd:
lcd_device_unregister(lcd->lcd_dev);
-err_free_lcd:
- kfree(lcd);
return ret;
}
@@ -613,7 +610,6 @@ static int __devexit corgi_lcd_remove(struct spi_device *spi)
corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->lcd_dev);
- kfree(lcd);
return 0;
}
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 22489eb5f3e..37bae801e23 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -27,6 +27,8 @@
* Alan Hourihane <alanh-at-tungstengraphics-dot-com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -180,14 +182,13 @@ static int cr_backlight_probe(struct platform_device *pdev)
lpc_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
CRVML_DEVICE_LPC, NULL);
if (!lpc_dev) {
- printk("INTEL CARILLO RANCH LPC not found.\n");
+ pr_err("INTEL CARILLO RANCH LPC not found.\n");
return -ENODEV;
}
pci_read_config_byte(lpc_dev, CRVML_REG_GPIOEN, &dev_en);
if (!(dev_en & CRVML_GPIOEN_BIT)) {
- printk(KERN_ERR
- "Carillo Ranch GPIO device was not enabled.\n");
+ pr_err("Carillo Ranch GPIO device was not enabled.\n");
pci_dev_put(lpc_dev);
return -ENODEV;
}
@@ -270,7 +271,7 @@ static int __init cr_backlight_init(void)
return PTR_ERR(crp);
}
- printk("Carillo Ranch Backlight Driver Initialized.\n");
+ pr_info("Carillo Ranch Backlight Driver Initialized.\n");
return 0;
}
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 30e19681a30..573c7ece0fd 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -136,6 +136,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2,
DA9034_WLED_ISET(pdata->output_current));
+ memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = max_brightness;
bl = backlight_device_register(pdev->name, data->da903x_dev, data,
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 9ce6170c186..8c660fcd250 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -106,7 +108,7 @@ static int genericbl_probe(struct platform_device *pdev)
generic_backlight_device = bd;
- printk("Generic Backlight Driver Initialized.\n");
+ pr_info("Generic Backlight Driver Initialized.\n");
return 0;
}
@@ -120,7 +122,7 @@ static int genericbl_remove(struct platform_device *pdev)
backlight_device_unregister(bd);
- printk("Generic Backlight Driver Unloaded\n");
+ pr_info("Generic Backlight Driver Unloaded\n");
return 0;
}
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
index 5118a9f029a..6c9399341bc 100644
--- a/drivers/video/backlight/ili9320.c
+++ b/drivers/video/backlight/ili9320.c
@@ -220,7 +220,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
/* allocate and initialse our state */
- ili = kzalloc(sizeof(struct ili9320), GFP_KERNEL);
+ ili = devm_kzalloc(&spi->dev, sizeof(struct ili9320), GFP_KERNEL);
if (ili == NULL) {
dev_err(dev, "no memory for device\n");
return -ENOMEM;
@@ -240,8 +240,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
lcd = lcd_device_register("ili9320", dev, ili, &ili9320_ops);
if (IS_ERR(lcd)) {
dev_err(dev, "failed to register lcd device\n");
- ret = PTR_ERR(lcd);
- goto err_free;
+ return PTR_ERR(lcd);
}
ili->lcd = lcd;
@@ -259,9 +258,6 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
err_unregister:
lcd_device_unregister(lcd);
- err_free:
- kfree(ili);
-
return ret;
}
@@ -272,7 +268,6 @@ int __devexit ili9320_remove(struct ili9320 *ili)
ili9320_power(ili, FB_BLANK_POWERDOWN);
lcd_device_unregister(ili->lcd);
- kfree(ili);
return 0;
}
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 2f8af5d786a..16f593b6442 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/backlight.h>
#include <linux/device.h>
#include <linux/fb.h>
@@ -38,7 +40,7 @@ static int jornada_bl_get_brightness(struct backlight_device *bd)
ret = jornada_ssp_byte(GETBRIGHTNESS);
if (jornada_ssp_byte(GETBRIGHTNESS) != TXDUMMY) {
- printk(KERN_ERR "bl : get brightness timeout\n");
+ pr_err("get brightness timeout\n");
jornada_ssp_end();
return -ETIMEDOUT;
} else /* exchange txdummy for value */
@@ -59,7 +61,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
if ((bd->props.power != FB_BLANK_UNBLANK) || (bd->props.fb_blank != FB_BLANK_UNBLANK)) {
ret = jornada_ssp_byte(BRIGHTNESSOFF);
if (ret != TXDUMMY) {
- printk(KERN_INFO "bl : brightness off timeout\n");
+ pr_info("brightness off timeout\n");
/* turn off backlight */
PPSR &= ~PPC_LDD1;
PPDR |= PPC_LDD1;
@@ -70,7 +72,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
/* send command to our mcu */
if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) {
- printk(KERN_INFO "bl : failed to set brightness\n");
+ pr_info("failed to set brightness\n");
ret = -ETIMEDOUT;
goto out;
}
@@ -81,7 +83,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
but due to physical layout it is equal to 0, so we simply
invert the value (MAX VALUE - NEW VALUE). */
if (jornada_ssp_byte(BL_MAX_BRIGHT - bd->props.brightness) != TXDUMMY) {
- printk(KERN_ERR "bl : set brightness failed\n");
+ pr_err("set brightness failed\n");
ret = -ETIMEDOUT;
}
@@ -113,7 +115,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
- printk(KERN_ERR "bl : failed to register device, err=%x\n", ret);
+ pr_err("failed to register device, err=%x\n", ret);
return ret;
}
@@ -125,7 +127,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
jornada_bl_update_status(bd);
platform_set_drvdata(pdev, bd);
- printk(KERN_INFO "HP Jornada 700 series backlight driver\n");
+ pr_info("HP Jornada 700 series backlight driver\n");
return 0;
}
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index 22d231a17e3..635b30523fd 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <linux/fb.h>
#include <linux/kernel.h>
@@ -44,7 +46,7 @@ static int jornada_lcd_get_contrast(struct lcd_device *dev)
jornada_ssp_start();
if (jornada_ssp_byte(GETCONTRAST) != TXDUMMY) {
- printk(KERN_ERR "lcd: get contrast failed\n");
+ pr_err("get contrast failed\n");
jornada_ssp_end();
return -ETIMEDOUT;
} else {
@@ -65,7 +67,7 @@ static int jornada_lcd_set_contrast(struct lcd_device *dev, int value)
/* push the new value */
if (jornada_ssp_byte(value) != TXDUMMY) {
- printk(KERN_ERR "lcd : set contrast failed\n");
+ pr_err("set contrast failed\n");
jornada_ssp_end();
return -ETIMEDOUT;
}
@@ -103,7 +105,7 @@ static int jornada_lcd_probe(struct platform_device *pdev)
if (IS_ERR(lcd_device)) {
ret = PTR_ERR(lcd_device);
- printk(KERN_ERR "lcd : failed to register device\n");
+ pr_err("failed to register device\n");
return ret;
}
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 6022b67285e..40f606a8609 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
@@ -159,7 +161,8 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
return -EINVAL;
}
- priv = kzalloc(sizeof(struct l4f00242t03_priv), GFP_KERNEL);
+ priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv),
+ GFP_KERNEL);
if (priv == NULL) {
dev_err(&spi->dev, "No memory for this device.\n");
@@ -177,7 +180,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 reset gpio.\n");
- goto err;
+ return ret;
}
ret = gpio_request_one(pdata->data_enable_gpio, GPIOF_OUT_INIT_LOW,
@@ -185,7 +188,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 data en gpio.\n");
- goto err2;
+ goto err;
}
priv->io_reg = regulator_get(&spi->dev, "vdd");
@@ -193,7 +196,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
ret = PTR_ERR(priv->io_reg);
dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
__func__);
- goto err3;
+ goto err2;
}
priv->core_reg = regulator_get(&spi->dev, "vcore");
@@ -201,14 +204,14 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
ret = PTR_ERR(priv->core_reg);
dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
__func__);
- goto err4;
+ goto err3;
}
priv->ld = lcd_device_register("l4f00242t03",
&spi->dev, priv, &l4f_ops);
if (IS_ERR(priv->ld)) {
ret = PTR_ERR(priv->ld);
- goto err5;
+ goto err4;
}
/* Init the LCD */
@@ -220,16 +223,14 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
return 0;
-err5:
- regulator_put(priv->core_reg);
err4:
- regulator_put(priv->io_reg);
+ regulator_put(priv->core_reg);
err3:
- gpio_free(pdata->data_enable_gpio);
+ regulator_put(priv->io_reg);
err2:
- gpio_free(pdata->reset_gpio);
+ gpio_free(pdata->data_enable_gpio);
err:
- kfree(priv);
+ gpio_free(pdata->reset_gpio);
return ret;
}
@@ -250,8 +251,6 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi)
regulator_put(priv->io_reg);
regulator_put(priv->core_reg);
- kfree(priv);
-
return 0;
}
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 79c1b0d609a..a5d0d024bb9 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -5,6 +5,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -32,6 +34,8 @@ static int fb_notifier_callback(struct notifier_block *self,
case FB_EVENT_BLANK:
case FB_EVENT_MODE_CHANGE:
case FB_EVENT_MODE_CHANGE_ALL:
+ case FB_EARLY_EVENT_BLANK:
+ case FB_R_EARLY_EVENT_BLANK:
break;
default:
return 0;
@@ -46,6 +50,14 @@ static int fb_notifier_callback(struct notifier_block *self,
if (event == FB_EVENT_BLANK) {
if (ld->ops->set_power)
ld->ops->set_power(ld, *(int *)evdata->data);
+ } else if (event == FB_EARLY_EVENT_BLANK) {
+ if (ld->ops->early_set_power)
+ ld->ops->early_set_power(ld,
+ *(int *)evdata->data);
+ } else if (event == FB_R_EARLY_EVENT_BLANK) {
+ if (ld->ops->r_early_set_power)
+ ld->ops->r_early_set_power(ld,
+ *(int *)evdata->data);
} else {
if (ld->ops->set_mode)
ld->ops->set_mode(ld, evdata->data);
@@ -106,7 +118,7 @@ static ssize_t lcd_store_power(struct device *dev,
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_power) {
- pr_debug("lcd: set power to %lu\n", power);
+ pr_debug("set power to %lu\n", power);
ld->ops->set_power(ld, power);
rc = count;
}
@@ -142,7 +154,7 @@ static ssize_t lcd_store_contrast(struct device *dev,
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_contrast) {
- pr_debug("lcd: set contrast to %lu\n", contrast);
+ pr_debug("set contrast to %lu\n", contrast);
ld->ops->set_contrast(ld, contrast);
rc = count;
}
@@ -253,8 +265,8 @@ static int __init lcd_class_init(void)
{
lcd_class = class_create(THIS_MODULE, "lcd");
if (IS_ERR(lcd_class)) {
- printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n",
- PTR_ERR(lcd_class));
+ pr_warn("Unable to create backlight class; errno = %ld\n",
+ PTR_ERR(lcd_class));
return PTR_ERR(lcd_class);
}
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index efd352be21a..58f517fb7d4 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -707,7 +707,7 @@ static int ld9040_probe(struct spi_device *spi)
struct backlight_device *bd = NULL;
struct backlight_properties props;
- lcd = kzalloc(sizeof(struct ld9040), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct ld9040), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -717,7 +717,7 @@ static int ld9040_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "spi setup failed.\n");
- goto out_free_lcd;
+ return ret;
}
lcd->spi = spi;
@@ -726,7 +726,7 @@ static int ld9040_probe(struct spi_device *spi)
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
- goto out_free_lcd;
+ return -EFAULT;
}
mutex_init(&lcd->lock);
@@ -734,13 +734,13 @@ static int ld9040_probe(struct spi_device *spi)
ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
if (ret) {
dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
- goto out_free_lcd;
+ return ret;
}
ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
- goto out_free_lcd;
+ goto out_free_regulator;
}
lcd->ld = ld;
@@ -782,10 +782,9 @@ static int ld9040_probe(struct spi_device *spi)
out_unregister_lcd:
lcd_device_unregister(lcd->ld);
-out_free_lcd:
+out_free_regulator:
regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
- kfree(lcd);
return ret;
}
@@ -797,7 +796,6 @@ static int __devexit ld9040_remove(struct spi_device *spi)
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
- kfree(lcd);
return 0;
}
@@ -846,7 +844,6 @@ static void ld9040_shutdown(struct spi_device *spi)
static struct spi_driver ld9040_driver = {
.driver = {
.name = "ld9040",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ld9040_probe,
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
new file mode 100644
index 00000000000..bebeb63607d
--- /dev/null
+++ b/drivers/video/backlight/lm3533_bl.c
@@ -0,0 +1,423 @@
+/*
+ * lm3533-bl.c -- LM3533 Backlight driver
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_HVCTRLBANK_COUNT 2
+#define LM3533_BL_MAX_BRIGHTNESS 255
+
+#define LM3533_REG_CTRLBANK_AB_BCONF 0x1a
+
+
+struct lm3533_bl {
+ struct lm3533 *lm3533;
+ struct lm3533_ctrlbank cb;
+ struct backlight_device *bd;
+ int id;
+};
+
+
+static inline int lm3533_bl_get_ctrlbank_id(struct lm3533_bl *bl)
+{
+ return bl->id;
+}
+
+static int lm3533_bl_update_status(struct backlight_device *bd)
+{
+ struct lm3533_bl *bl = bl_get_data(bd);
+ int brightness = bd->props.brightness;
+
+ if (bd->props.power != FB_BLANK_UNBLANK)
+ brightness = 0;
+ if (bd->props.fb_blank != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ return lm3533_ctrlbank_set_brightness(&bl->cb, (u8)brightness);
+}
+
+static int lm3533_bl_get_brightness(struct backlight_device *bd)
+{
+ struct lm3533_bl *bl = bl_get_data(bd);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_brightness(&bl->cb, &val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static const struct backlight_ops lm3533_bl_ops = {
+ .get_brightness = lm3533_bl_get_brightness,
+ .update_status = lm3533_bl_update_status,
+};
+
+static ssize_t show_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bl->id);
+}
+
+static ssize_t show_als_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ unsigned channel = lm3533_bl_get_ctrlbank_id(bl);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
+}
+
+static ssize_t show_als_en(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ int ctrlbank = lm3533_bl_get_ctrlbank_id(bl);
+ u8 val;
+ u8 mask;
+ bool enable;
+ int ret;
+
+ ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val);
+ if (ret)
+ return ret;
+
+ mask = 1 << (2 * ctrlbank);
+ enable = val & mask;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
+}
+
+static ssize_t store_als_en(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ int ctrlbank = lm3533_bl_get_ctrlbank_id(bl);
+ int enable;
+ u8 val;
+ u8 mask;
+ int ret;
+
+ if (kstrtoint(buf, 0, &enable))
+ return -EINVAL;
+
+ mask = 1 << (2 * ctrlbank);
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val,
+ mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_linear(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ u8 val;
+ u8 mask;
+ int linear;
+ int ret;
+
+ ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val);
+ if (ret)
+ return ret;
+
+ mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1);
+
+ if (val & mask)
+ linear = 1;
+ else
+ linear = 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
+}
+
+static ssize_t store_linear(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ unsigned long linear;
+ u8 mask;
+ u8 val;
+ int ret;
+
+ if (kstrtoul(buf, 0, &linear))
+ return -EINVAL;
+
+ mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1);
+
+ if (linear)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val,
+ mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_pwm(&bl->cb, &val);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ u8 val;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val))
+ return -EINVAL;
+
+ ret = lm3533_ctrlbank_set_pwm(&bl->cb, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static LM3533_ATTR_RO(als_channel);
+static LM3533_ATTR_RW(als_en);
+static LM3533_ATTR_RO(id);
+static LM3533_ATTR_RW(linear);
+static LM3533_ATTR_RW(pwm);
+
+static struct attribute *lm3533_bl_attributes[] = {
+ &dev_attr_als_channel.attr,
+ &dev_attr_als_en.attr,
+ &dev_attr_id.attr,
+ &dev_attr_linear.attr,
+ &dev_attr_pwm.attr,
+ NULL,
+};
+
+static umode_t lm3533_bl_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ umode_t mode = attr->mode;
+
+ if (attr == &dev_attr_als_channel.attr ||
+ attr == &dev_attr_als_en.attr) {
+ if (!bl->lm3533->have_als)
+ mode = 0;
+ }
+
+ return mode;
+};
+
+static struct attribute_group lm3533_bl_attribute_group = {
+ .is_visible = lm3533_bl_attr_is_visible,
+ .attrs = lm3533_bl_attributes
+};
+
+static int __devinit lm3533_bl_setup(struct lm3533_bl *bl,
+ struct lm3533_bl_platform_data *pdata)
+{
+ int ret;
+
+ ret = lm3533_ctrlbank_set_max_current(&bl->cb, pdata->max_current);
+ if (ret)
+ return ret;
+
+ return lm3533_ctrlbank_set_pwm(&bl->cb, pdata->pwm);
+}
+
+static int __devinit lm3533_bl_probe(struct platform_device *pdev)
+{
+ struct lm3533 *lm3533;
+ struct lm3533_bl_platform_data *pdata;
+ struct lm3533_bl *bl;
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533 = dev_get_drvdata(pdev->dev.parent);
+ if (!lm3533)
+ return -EINVAL;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (pdev->id < 0 || pdev->id >= LM3533_HVCTRLBANK_COUNT) {
+ dev_err(&pdev->dev, "illegal backlight id %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+ if (!bl) {
+ dev_err(&pdev->dev,
+ "failed to allocate memory for backlight\n");
+ return -ENOMEM;
+ }
+
+ bl->lm3533 = lm3533;
+ bl->id = pdev->id;
+
+ bl->cb.lm3533 = lm3533;
+ bl->cb.id = lm3533_bl_get_ctrlbank_id(bl);
+ bl->cb.dev = NULL; /* until registered */
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = LM3533_BL_MAX_BRIGHTNESS;
+ props.brightness = pdata->default_brightness;
+ bd = backlight_device_register(pdata->name, pdev->dev.parent, bl,
+ &lm3533_bl_ops, &props);
+ if (IS_ERR(bd)) {
+ dev_err(&pdev->dev, "failed to register backlight device\n");
+ ret = PTR_ERR(bd);
+ goto err_free;
+ }
+
+ bl->bd = bd;
+ bl->cb.dev = &bl->bd->dev;
+
+ platform_set_drvdata(pdev, bl);
+
+ ret = sysfs_create_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to create sysfs attributes\n");
+ goto err_unregister;
+ }
+
+ backlight_update_status(bd);
+
+ ret = lm3533_bl_setup(bl, pdata);
+ if (ret)
+ goto err_sysfs_remove;
+
+ ret = lm3533_ctrlbank_enable(&bl->cb);
+ if (ret)
+ goto err_sysfs_remove;
+
+ return 0;
+
+err_sysfs_remove:
+ sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+err_unregister:
+ backlight_device_unregister(bd);
+err_free:
+ kfree(bl);
+
+ return ret;
+}
+
+static int __devexit lm3533_bl_remove(struct platform_device *pdev)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bd = bl->bd;
+
+ dev_dbg(&bd->dev, "%s\n", __func__);
+
+ bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.brightness = 0;
+
+ lm3533_ctrlbank_disable(&bl->cb);
+ sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+ backlight_device_unregister(bd);
+ kfree(bl);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lm3533_bl_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ return lm3533_ctrlbank_disable(&bl->cb);
+}
+
+static int lm3533_bl_resume(struct platform_device *pdev)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ return lm3533_ctrlbank_enable(&bl->cb);
+}
+#else
+#define lm3533_bl_suspend NULL
+#define lm3533_bl_resume NULL
+#endif
+
+static void lm3533_bl_shutdown(struct platform_device *pdev)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533_ctrlbank_disable(&bl->cb);
+}
+
+static struct platform_driver lm3533_bl_driver = {
+ .driver = {
+ .name = "lm3533-backlight",
+ .owner = THIS_MODULE,
+ },
+ .probe = lm3533_bl_probe,
+ .remove = __devexit_p(lm3533_bl_remove),
+ .shutdown = lm3533_bl_shutdown,
+ .suspend = lm3533_bl_suspend,
+ .resume = lm3533_bl_resume,
+};
+module_platform_driver(lm3533_bl_driver);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Backlight driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lm3533-backlight");
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index 4161f9e3982..a9f2c36966f 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -168,7 +168,8 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
goto err;
}
- st = kzalloc(sizeof(struct lms283gf05_state), GFP_KERNEL);
+ st = devm_kzalloc(&spi->dev, sizeof(struct lms283gf05_state),
+ GFP_KERNEL);
if (st == NULL) {
dev_err(&spi->dev, "No memory for device state\n");
ret = -ENOMEM;
@@ -178,7 +179,7 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
- goto err2;
+ goto err;
}
st->spi = spi;
@@ -193,8 +194,6 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
return 0;
-err2:
- kfree(st);
err:
if (pdata != NULL)
gpio_free(pdata->reset_gpio);
@@ -212,8 +211,6 @@ static int __devexit lms283gf05_remove(struct spi_device *spi)
if (pdata != NULL)
gpio_free(pdata->reset_gpio);
- kfree(st);
-
return 0;
}
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 333949ff326..6c0f1ac0d32 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -232,23 +232,20 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
struct lcd_device *ld;
int ret;
- lcd = kzalloc(sizeof(struct ltv350qv), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct ltv350qv), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
lcd->spi = spi;
lcd->power = FB_BLANK_POWERDOWN;
- lcd->buffer = kzalloc(8, GFP_KERNEL);
- if (!lcd->buffer) {
- ret = -ENOMEM;
- goto out_free_lcd;
- }
+ lcd->buffer = devm_kzalloc(&spi->dev, 8, GFP_KERNEL);
+ if (!lcd->buffer)
+ return -ENOMEM;
ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_buffer;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
+
lcd->ld = ld;
ret = ltv350qv_power(lcd, FB_BLANK_UNBLANK);
@@ -261,10 +258,6 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
out_unregister:
lcd_device_unregister(ld);
-out_free_buffer:
- kfree(lcd->buffer);
-out_free_lcd:
- kfree(lcd);
return ret;
}
@@ -274,8 +267,6 @@ static int __devexit ltv350qv_remove(struct spi_device *spi)
ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->ld);
- kfree(lcd->buffer);
- kfree(lcd);
return 0;
}
@@ -310,7 +301,6 @@ static void ltv350qv_shutdown(struct spi_device *spi)
static struct spi_driver ltv350qv_driver = {
.driver = {
.name = "ltv350qv",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 0175bfb08a1..bfdc5fbeaa1 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -168,7 +170,7 @@ static int omapbl_probe(struct platform_device *pdev)
dev->props.brightness = pdata->default_intensity;
omapbl_update_status(dev);
- printk(KERN_INFO "OMAP LCD backlight initialised\n");
+ pr_info("OMAP LCD backlight initialised\n");
return 0;
}
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index c65853cb974..c092159f438 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -111,6 +111,7 @@ static int __devinit pcf50633_bl_probe(struct platform_device *pdev)
if (!pcf_bl)
return -ENOMEM;
+ memset(&bl_props, 0, sizeof(bl_props));
bl_props.type = BACKLIGHT_RAW;
bl_props.max_brightness = 0x3f;
bl_props.power = FB_BLANK_UNBLANK;
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 6af183d6465..69b35f02929 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -15,6 +15,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -68,13 +70,13 @@ static int progearbl_probe(struct platform_device *pdev)
pmu_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL);
if (!pmu_dev) {
- printk("ALI M7101 PMU not found.\n");
+ pr_err("ALI M7101 PMU not found.\n");
return -ENODEV;
}
sb_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
if (!sb_dev) {
- printk("ALI 1533 SB not found.\n");
+ pr_err("ALI 1533 SB not found.\n");
ret = -ENODEV;
goto put_pmu;
}
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index e264f55b257..6437ae474cf 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -741,7 +741,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
struct backlight_device *bd = NULL;
struct backlight_properties props;
- lcd = kzalloc(sizeof(struct s6e63m0), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct s6e63m0), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -751,7 +751,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "spi setup failed.\n");
- goto out_free_lcd;
+ return ret;
}
lcd->spi = spi;
@@ -760,14 +760,12 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
- goto out_free_lcd;
+ return -EFAULT;
}
ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_lcd;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
lcd->ld = ld;
@@ -824,8 +822,6 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
out_lcd_unregister:
lcd_device_unregister(ld);
-out_free_lcd:
- kfree(lcd);
return ret;
}
@@ -838,7 +834,6 @@ static int __devexit s6e63m0_remove(struct spi_device *spi)
device_remove_file(&spi->dev, &dev_attr_gamma_mode);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
- kfree(lcd);
return 0;
}
@@ -899,7 +894,6 @@ static void s6e63m0_shutdown(struct spi_device *spi)
static struct spi_driver s6e63m0_driver = {
.driver = {
.name = "s6e63m0",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = s6e63m0_probe,
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 2368b8e5f89..02444d042cd 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -349,7 +349,7 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
if (err)
return err;
- lcd = kzalloc(sizeof(struct tdo24m), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct tdo24m), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -357,11 +357,9 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
lcd->power = FB_BLANK_POWERDOWN;
lcd->mode = MODE_VGA; /* default to VGA */
- lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
- if (lcd->buf == NULL) {
- kfree(lcd);
+ lcd->buf = devm_kzalloc(&spi->dev, TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
+ if (lcd->buf == NULL)
return -ENOMEM;
- }
m = &lcd->msg;
x = &lcd->xfer;
@@ -383,15 +381,13 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
break;
default:
dev_err(&spi->dev, "Unsupported model");
- goto out_free;
+ return -EINVAL;
}
lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev,
lcd, &tdo24m_ops);
- if (IS_ERR(lcd->lcd_dev)) {
- err = PTR_ERR(lcd->lcd_dev);
- goto out_free;
- }
+ if (IS_ERR(lcd->lcd_dev))
+ return PTR_ERR(lcd->lcd_dev);
dev_set_drvdata(&spi->dev, lcd);
err = tdo24m_power(lcd, FB_BLANK_UNBLANK);
@@ -402,9 +398,6 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
out_unregister:
lcd_device_unregister(lcd->lcd_dev);
-out_free:
- kfree(lcd->buf);
- kfree(lcd);
return err;
}
@@ -414,8 +407,6 @@ static int __devexit tdo24m_remove(struct spi_device *spi)
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->lcd_dev);
- kfree(lcd->buf);
- kfree(lcd);
return 0;
}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 2b241abced4..0d54e607e82 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -82,8 +82,11 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct backlight_properties props;
- struct tosa_bl_data *data = kzalloc(sizeof(struct tosa_bl_data), GFP_KERNEL);
+ struct tosa_bl_data *data;
int ret = 0;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct tosa_bl_data),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -92,7 +95,7 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
ret = gpio_request(TOSA_GPIO_BL_C20MA, "backlight");
if (ret) {
dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
- goto err_gpio_bl;
+ return ret;
}
ret = gpio_direction_output(TOSA_GPIO_BL_C20MA, 0);
if (ret)
@@ -122,8 +125,6 @@ err_reg:
data->bl = NULL;
err_gpio_dir:
gpio_free(TOSA_GPIO_BL_C20MA);
-err_gpio_bl:
- kfree(data);
return ret;
}
@@ -136,8 +137,6 @@ static int __devexit tosa_bl_remove(struct i2c_client *client)
gpio_free(TOSA_GPIO_BL_C20MA);
- kfree(data);
-
return 0;
}
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 2231aec2391..47823b8efff 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -174,7 +174,8 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
int ret;
struct tosa_lcd_data *data;
- data = kzalloc(sizeof(struct tosa_lcd_data), GFP_KERNEL);
+ data = devm_kzalloc(&spi->dev, sizeof(struct tosa_lcd_data),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -187,7 +188,7 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0)
- goto err_spi;
+ return ret;
data->spi = spi;
dev_set_drvdata(&spi->dev, data);
@@ -224,8 +225,6 @@ err_gpio_dir:
gpio_free(TOSA_GPIO_TG_ON);
err_gpio_tg:
dev_set_drvdata(&spi->dev, NULL);
-err_spi:
- kfree(data);
return ret;
}
@@ -242,7 +241,6 @@ static int __devexit tosa_lcd_remove(struct spi_device *spi)
gpio_free(TOSA_GPIO_TG_ON);
dev_set_drvdata(&spi->dev, NULL);
- kfree(data);
return 0;
}
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 5d365deb5f8..9e5517a3a52 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -194,6 +194,7 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
data->current_brightness = 0;
data->isink_reg = isink_reg;
+ memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = max_isel;
bl = backlight_device_register("wm831x", &pdev->dev, data,
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index c6ce416ab58..0dff12a1dae 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1046,20 +1046,29 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
int
fb_blank(struct fb_info *info, int blank)
{
- int ret = -EINVAL;
+ struct fb_event event;
+ int ret = -EINVAL, early_ret;
if (blank > FB_BLANK_POWERDOWN)
blank = FB_BLANK_POWERDOWN;
+ event.info = info;
+ event.data = &blank;
+
+ early_ret = fb_notifier_call_chain(FB_EARLY_EVENT_BLANK, &event);
+
if (info->fbops->fb_blank)
ret = info->fbops->fb_blank(blank, info);
- if (!ret) {
- struct fb_event event;
-
- event.info = info;
- event.data = &blank;
+ if (!ret)
fb_notifier_call_chain(FB_EVENT_BLANK, &event);
+ else {
+ /*
+ * if fb_blank is failed then revert effects of
+ * the early blank event.
+ */
+ if (!early_ret)
+ fb_notifier_call_chain(FB_R_EARLY_EVENT_BLANK, &event);
}
return ret;
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index f135dbead07..caad3689b4e 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -131,7 +131,9 @@ struct imxfb_rgb {
struct imxfb_info {
struct platform_device *pdev;
void __iomem *regs;
- struct clk *clk;
+ struct clk *clk_ipg;
+ struct clk *clk_ahb;
+ struct clk *clk_per;
/*
* These are the addresses we mapped
@@ -340,7 +342,7 @@ static int imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
pr_debug("var->bits_per_pixel=%d\n", var->bits_per_pixel);
- lcd_clk = clk_get_rate(fbi->clk);
+ lcd_clk = clk_get_rate(fbi->clk_per);
tmp = var->pixclock * (unsigned long long)lcd_clk;
@@ -455,11 +457,17 @@ static int imxfb_bl_update_status(struct backlight_device *bl)
fbi->pwmr = (fbi->pwmr & ~0xFF) | brightness;
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- clk_enable(fbi->clk);
+ if (bl->props.fb_blank != FB_BLANK_UNBLANK) {
+ clk_prepare_enable(fbi->clk_ipg);
+ clk_prepare_enable(fbi->clk_ahb);
+ clk_prepare_enable(fbi->clk_per);
+ }
writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- clk_disable(fbi->clk);
+ if (bl->props.fb_blank != FB_BLANK_UNBLANK) {
+ clk_disable_unprepare(fbi->clk_per);
+ clk_disable_unprepare(fbi->clk_ahb);
+ clk_disable_unprepare(fbi->clk_ipg);
+ }
return 0;
}
@@ -522,7 +530,9 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
*/
writel(RMCR_LCDC_EN_MX1, fbi->regs + LCDC_RMCR);
- clk_enable(fbi->clk);
+ clk_prepare_enable(fbi->clk_ipg);
+ clk_prepare_enable(fbi->clk_ahb);
+ clk_prepare_enable(fbi->clk_per);
if (fbi->backlight_power)
fbi->backlight_power(1);
@@ -539,7 +549,9 @@ static void imxfb_disable_controller(struct imxfb_info *fbi)
if (fbi->lcd_power)
fbi->lcd_power(0);
- clk_disable(fbi->clk);
+ clk_disable_unprepare(fbi->clk_per);
+ clk_disable_unprepare(fbi->clk_ipg);
+ clk_disable_unprepare(fbi->clk_ahb);
writel(0, fbi->regs + LCDC_RMCR);
}
@@ -770,10 +782,21 @@ static int __init imxfb_probe(struct platform_device *pdev)
goto failed_req;
}
- fbi->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(fbi->clk)) {
- ret = PTR_ERR(fbi->clk);
- dev_err(&pdev->dev, "unable to get clock: %d\n", ret);
+ fbi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(fbi->clk_ipg)) {
+ ret = PTR_ERR(fbi->clk_ipg);
+ goto failed_getclock;
+ }
+
+ fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(fbi->clk_ahb)) {
+ ret = PTR_ERR(fbi->clk_ahb);
+ goto failed_getclock;
+ }
+
+ fbi->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(fbi->clk_per)) {
+ ret = PTR_ERR(fbi->clk_per);
goto failed_getclock;
}
@@ -858,7 +881,6 @@ failed_platform_init:
failed_map:
iounmap(fbi->regs);
failed_ioremap:
- clk_put(fbi->clk);
failed_getclock:
release_mem_region(res->start, resource_size(res));
failed_req:
@@ -895,8 +917,6 @@ static int __devexit imxfb_remove(struct platform_device *pdev)
iounmap(fbi->regs);
release_mem_region(res->start, resource_size(res));
- clk_disable(fbi->clk);
- clk_put(fbi->clk);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index d26f37ac69d..74e7cf07850 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -532,6 +532,7 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
/*------- Backlight control --------*/
+ memset(&props, 0, sizeof(props));
props.fb_blank = FB_BLANK_UNBLANK;
props.power = FB_BLANK_UNBLANK;
props.type = BACKLIGHT_RAW;
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index a3b6a74c67a..1cc61a700fa 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -138,7 +138,7 @@ static int __devinit mxc_w1_probe(struct platform_device *pdev)
goto failed_ioremap;
}
- clk_enable(mdev->clk);
+ clk_prepare_enable(mdev->clk);
__raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
mdev->bus_master.data = mdev;
@@ -178,7 +178,7 @@ static int __devexit mxc_w1_remove(struct platform_device *pdev)
iounmap(mdev->regs);
release_mem_region(res->start, resource_size(res));
- clk_disable(mdev->clk);
+ clk_disable_unprepare(mdev->clk);
clk_put(mdev->clk);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index a18bf6358eb..d92d7488be1 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -565,6 +565,7 @@ config INTEL_SCU_WATCHDOG
config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on (X86 || IA64) && PCI
+ select LPC_ICH
---help---
Hardware driver for the intel TCO timer based watchdog devices.
These drivers are included in the Intel 82801 I/O Controller
diff --git a/drivers/watchdog/iTCO_vendor.h b/drivers/watchdog/iTCO_vendor.h
index 9e27e6422f6..3c57b45537a 100644
--- a/drivers/watchdog/iTCO_vendor.h
+++ b/drivers/watchdog/iTCO_vendor.h
@@ -1,8 +1,8 @@
/* iTCO Vendor Specific Support hooks */
#ifdef CONFIG_ITCO_VENDOR_SUPPORT
-extern void iTCO_vendor_pre_start(unsigned long, unsigned int);
-extern void iTCO_vendor_pre_stop(unsigned long);
-extern void iTCO_vendor_pre_keepalive(unsigned long, unsigned int);
+extern void iTCO_vendor_pre_start(struct resource *, unsigned int);
+extern void iTCO_vendor_pre_stop(struct resource *);
+extern void iTCO_vendor_pre_keepalive(struct resource *, unsigned int);
extern void iTCO_vendor_pre_set_heartbeat(unsigned int);
extern int iTCO_vendor_check_noreboot_on(void);
#else
diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c
index 2721d29ce24..b6b2f90b5d4 100644
--- a/drivers/watchdog/iTCO_vendor_support.c
+++ b/drivers/watchdog/iTCO_vendor_support.c
@@ -35,11 +35,6 @@
#include "iTCO_vendor.h"
-/* iTCO defines */
-#define SMI_EN (acpibase + 0x30) /* SMI Control and Enable Register */
-#define TCOBASE (acpibase + 0x60) /* TCO base address */
-#define TCO1_STS (TCOBASE + 0x04) /* TCO1 Status Register */
-
/* List of vendor support modes */
/* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */
#define SUPERMICRO_OLD_BOARD 1
@@ -82,24 +77,24 @@ MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default="
* 20.6 seconds.
*/
-static void supermicro_old_pre_start(unsigned long acpibase)
+static void supermicro_old_pre_start(struct resource *smires)
{
unsigned long val32;
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
- val32 = inl(SMI_EN);
+ val32 = inl(smires->start);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
- outl(val32, SMI_EN); /* Needed to activate watchdog */
+ outl(val32, smires->start); /* Needed to activate watchdog */
}
-static void supermicro_old_pre_stop(unsigned long acpibase)
+static void supermicro_old_pre_stop(struct resource *smires)
{
unsigned long val32;
/* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
- val32 = inl(SMI_EN);
+ val32 = inl(smires->start);
val32 |= 0x00002000; /* Turn on SMI clearing watchdog */
- outl(val32, SMI_EN); /* Needed to deactivate watchdog */
+ outl(val32, smires->start); /* Needed to deactivate watchdog */
}
/*
@@ -270,66 +265,66 @@ static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat)
* Don't use this fix if you don't need to!!!
*/
-static void broken_bios_start(unsigned long acpibase)
+static void broken_bios_start(struct resource *smires)
{
unsigned long val32;
- val32 = inl(SMI_EN);
+ val32 = inl(smires->start);
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI#
Bit 0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */
val32 &= 0xffffdffe;
- outl(val32, SMI_EN);
+ outl(val32, smires->start);
}
-static void broken_bios_stop(unsigned long acpibase)
+static void broken_bios_stop(struct resource *smires)
{
unsigned long val32;
- val32 = inl(SMI_EN);
+ val32 = inl(smires->start);
/* Bit 13: TCO_EN -> 1 = Enables TCO logic generating an SMI#
Bit 0: GBL_SMI_EN -> 1 = Turn global SMI on again. */
val32 |= 0x00002001;
- outl(val32, SMI_EN);
+ outl(val32, smires->start);
}
/*
* Generic Support Functions
*/
-void iTCO_vendor_pre_start(unsigned long acpibase,
+void iTCO_vendor_pre_start(struct resource *smires,
unsigned int heartbeat)
{
switch (vendorsupport) {
case SUPERMICRO_OLD_BOARD:
- supermicro_old_pre_start(acpibase);
+ supermicro_old_pre_start(smires);
break;
case SUPERMICRO_NEW_BOARD:
supermicro_new_pre_start(heartbeat);
break;
case BROKEN_BIOS:
- broken_bios_start(acpibase);
+ broken_bios_start(smires);
break;
}
}
EXPORT_SYMBOL(iTCO_vendor_pre_start);
-void iTCO_vendor_pre_stop(unsigned long acpibase)
+void iTCO_vendor_pre_stop(struct resource *smires)
{
switch (vendorsupport) {
case SUPERMICRO_OLD_BOARD:
- supermicro_old_pre_stop(acpibase);
+ supermicro_old_pre_stop(smires);
break;
case SUPERMICRO_NEW_BOARD:
supermicro_new_pre_stop();
break;
case BROKEN_BIOS:
- broken_bios_stop(acpibase);
+ broken_bios_stop(smires);
break;
}
}
EXPORT_SYMBOL(iTCO_vendor_pre_stop);
-void iTCO_vendor_pre_keepalive(unsigned long acpibase, unsigned int heartbeat)
+void iTCO_vendor_pre_keepalive(struct resource *smires, unsigned int heartbeat)
{
if (vendorsupport == SUPERMICRO_NEW_BOARD)
supermicro_new_pre_set_heartbeat(heartbeat);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 9fecb95645a..741528b032e 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -66,316 +66,16 @@
#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
#include <linux/io.h> /* For inb/outb/... */
+#include <linux/mfd/core.h>
+#include <linux/mfd/lpc_ich.h>
#include "iTCO_vendor.h"
-/* TCO related info */
-enum iTCO_chipsets {
- TCO_ICH = 0, /* ICH */
- TCO_ICH0, /* ICH0 */
- TCO_ICH2, /* ICH2 */
- TCO_ICH2M, /* ICH2-M */
- TCO_ICH3, /* ICH3-S */
- TCO_ICH3M, /* ICH3-M */
- TCO_ICH4, /* ICH4 */
- TCO_ICH4M, /* ICH4-M */
- TCO_CICH, /* C-ICH */
- TCO_ICH5, /* ICH5 & ICH5R */
- TCO_6300ESB, /* 6300ESB */
- TCO_ICH6, /* ICH6 & ICH6R */
- TCO_ICH6M, /* ICH6-M */
- TCO_ICH6W, /* ICH6W & ICH6RW */
- TCO_631XESB, /* 631xESB/632xESB */
- TCO_ICH7, /* ICH7 & ICH7R */
- TCO_ICH7DH, /* ICH7DH */
- TCO_ICH7M, /* ICH7-M & ICH7-U */
- TCO_ICH7MDH, /* ICH7-M DH */
- TCO_NM10, /* NM10 */
- TCO_ICH8, /* ICH8 & ICH8R */
- TCO_ICH8DH, /* ICH8DH */
- TCO_ICH8DO, /* ICH8DO */
- TCO_ICH8M, /* ICH8M */
- TCO_ICH8ME, /* ICH8M-E */
- TCO_ICH9, /* ICH9 */
- TCO_ICH9R, /* ICH9R */
- TCO_ICH9DH, /* ICH9DH */
- TCO_ICH9DO, /* ICH9DO */
- TCO_ICH9M, /* ICH9M */
- TCO_ICH9ME, /* ICH9M-E */
- TCO_ICH10, /* ICH10 */
- TCO_ICH10R, /* ICH10R */
- TCO_ICH10D, /* ICH10D */
- TCO_ICH10DO, /* ICH10DO */
- TCO_PCH, /* PCH Desktop Full Featured */
- TCO_PCHM, /* PCH Mobile Full Featured */
- TCO_P55, /* P55 */
- TCO_PM55, /* PM55 */
- TCO_H55, /* H55 */
- TCO_QM57, /* QM57 */
- TCO_H57, /* H57 */
- TCO_HM55, /* HM55 */
- TCO_Q57, /* Q57 */
- TCO_HM57, /* HM57 */
- TCO_PCHMSFF, /* PCH Mobile SFF Full Featured */
- TCO_QS57, /* QS57 */
- TCO_3400, /* 3400 */
- TCO_3420, /* 3420 */
- TCO_3450, /* 3450 */
- TCO_EP80579, /* EP80579 */
- TCO_CPT, /* Cougar Point */
- TCO_CPTD, /* Cougar Point Desktop */
- TCO_CPTM, /* Cougar Point Mobile */
- TCO_PBG, /* Patsburg */
- TCO_DH89XXCC, /* DH89xxCC */
- TCO_PPT, /* Panther Point */
- TCO_LPT, /* Lynx Point */
-};
-
-static struct {
- char *name;
- unsigned int iTCO_version;
-} iTCO_chipset_info[] __devinitdata = {
- {"ICH", 1},
- {"ICH0", 1},
- {"ICH2", 1},
- {"ICH2-M", 1},
- {"ICH3-S", 1},
- {"ICH3-M", 1},
- {"ICH4", 1},
- {"ICH4-M", 1},
- {"C-ICH", 1},
- {"ICH5 or ICH5R", 1},
- {"6300ESB", 1},
- {"ICH6 or ICH6R", 2},
- {"ICH6-M", 2},
- {"ICH6W or ICH6RW", 2},
- {"631xESB/632xESB", 2},
- {"ICH7 or ICH7R", 2},
- {"ICH7DH", 2},
- {"ICH7-M or ICH7-U", 2},
- {"ICH7-M DH", 2},
- {"NM10", 2},
- {"ICH8 or ICH8R", 2},
- {"ICH8DH", 2},
- {"ICH8DO", 2},
- {"ICH8M", 2},
- {"ICH8M-E", 2},
- {"ICH9", 2},
- {"ICH9R", 2},
- {"ICH9DH", 2},
- {"ICH9DO", 2},
- {"ICH9M", 2},
- {"ICH9M-E", 2},
- {"ICH10", 2},
- {"ICH10R", 2},
- {"ICH10D", 2},
- {"ICH10DO", 2},
- {"PCH Desktop Full Featured", 2},
- {"PCH Mobile Full Featured", 2},
- {"P55", 2},
- {"PM55", 2},
- {"H55", 2},
- {"QM57", 2},
- {"H57", 2},
- {"HM55", 2},
- {"Q57", 2},
- {"HM57", 2},
- {"PCH Mobile SFF Full Featured", 2},
- {"QS57", 2},
- {"3400", 2},
- {"3420", 2},
- {"3450", 2},
- {"EP80579", 2},
- {"Cougar Point", 2},
- {"Cougar Point Desktop", 2},
- {"Cougar Point Mobile", 2},
- {"Patsburg", 2},
- {"DH89xxCC", 2},
- {"Panther Point", 2},
- {"Lynx Point", 2},
- {NULL, 0}
-};
-
-/*
- * This data only exists for exporting the supported PCI ids
- * via MODULE_DEVICE_TABLE. We do not actually register a
- * pci_driver, because the I/O Controller Hub has also other
- * functions that probably will be registered by other drivers.
- */
-static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
- { PCI_VDEVICE(INTEL, 0x2410), TCO_ICH},
- { PCI_VDEVICE(INTEL, 0x2420), TCO_ICH0},
- { PCI_VDEVICE(INTEL, 0x2440), TCO_ICH2},
- { PCI_VDEVICE(INTEL, 0x244c), TCO_ICH2M},
- { PCI_VDEVICE(INTEL, 0x2480), TCO_ICH3},
- { PCI_VDEVICE(INTEL, 0x248c), TCO_ICH3M},
- { PCI_VDEVICE(INTEL, 0x24c0), TCO_ICH4},
- { PCI_VDEVICE(INTEL, 0x24cc), TCO_ICH4M},
- { PCI_VDEVICE(INTEL, 0x2450), TCO_CICH},
- { PCI_VDEVICE(INTEL, 0x24d0), TCO_ICH5},
- { PCI_VDEVICE(INTEL, 0x25a1), TCO_6300ESB},
- { PCI_VDEVICE(INTEL, 0x2640), TCO_ICH6},
- { PCI_VDEVICE(INTEL, 0x2641), TCO_ICH6M},
- { PCI_VDEVICE(INTEL, 0x2642), TCO_ICH6W},
- { PCI_VDEVICE(INTEL, 0x2670), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2671), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2672), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2673), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2674), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2675), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2676), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2677), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2678), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x2679), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267a), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267b), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267c), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267d), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267e), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x267f), TCO_631XESB},
- { PCI_VDEVICE(INTEL, 0x27b8), TCO_ICH7},
- { PCI_VDEVICE(INTEL, 0x27b0), TCO_ICH7DH},
- { PCI_VDEVICE(INTEL, 0x27b9), TCO_ICH7M},
- { PCI_VDEVICE(INTEL, 0x27bd), TCO_ICH7MDH},
- { PCI_VDEVICE(INTEL, 0x27bc), TCO_NM10},
- { PCI_VDEVICE(INTEL, 0x2810), TCO_ICH8},
- { PCI_VDEVICE(INTEL, 0x2812), TCO_ICH8DH},
- { PCI_VDEVICE(INTEL, 0x2814), TCO_ICH8DO},
- { PCI_VDEVICE(INTEL, 0x2815), TCO_ICH8M},
- { PCI_VDEVICE(INTEL, 0x2811), TCO_ICH8ME},
- { PCI_VDEVICE(INTEL, 0x2918), TCO_ICH9},
- { PCI_VDEVICE(INTEL, 0x2916), TCO_ICH9R},
- { PCI_VDEVICE(INTEL, 0x2912), TCO_ICH9DH},
- { PCI_VDEVICE(INTEL, 0x2914), TCO_ICH9DO},
- { PCI_VDEVICE(INTEL, 0x2919), TCO_ICH9M},
- { PCI_VDEVICE(INTEL, 0x2917), TCO_ICH9ME},
- { PCI_VDEVICE(INTEL, 0x3a18), TCO_ICH10},
- { PCI_VDEVICE(INTEL, 0x3a16), TCO_ICH10R},
- { PCI_VDEVICE(INTEL, 0x3a1a), TCO_ICH10D},
- { PCI_VDEVICE(INTEL, 0x3a14), TCO_ICH10DO},
- { PCI_VDEVICE(INTEL, 0x3b00), TCO_PCH},
- { PCI_VDEVICE(INTEL, 0x3b01), TCO_PCHM},
- { PCI_VDEVICE(INTEL, 0x3b02), TCO_P55},
- { PCI_VDEVICE(INTEL, 0x3b03), TCO_PM55},
- { PCI_VDEVICE(INTEL, 0x3b06), TCO_H55},
- { PCI_VDEVICE(INTEL, 0x3b07), TCO_QM57},
- { PCI_VDEVICE(INTEL, 0x3b08), TCO_H57},
- { PCI_VDEVICE(INTEL, 0x3b09), TCO_HM55},
- { PCI_VDEVICE(INTEL, 0x3b0a), TCO_Q57},
- { PCI_VDEVICE(INTEL, 0x3b0b), TCO_HM57},
- { PCI_VDEVICE(INTEL, 0x3b0d), TCO_PCHMSFF},
- { PCI_VDEVICE(INTEL, 0x3b0f), TCO_QS57},
- { PCI_VDEVICE(INTEL, 0x3b12), TCO_3400},
- { PCI_VDEVICE(INTEL, 0x3b14), TCO_3420},
- { PCI_VDEVICE(INTEL, 0x3b16), TCO_3450},
- { PCI_VDEVICE(INTEL, 0x5031), TCO_EP80579},
- { PCI_VDEVICE(INTEL, 0x1c41), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c42), TCO_CPTD},
- { PCI_VDEVICE(INTEL, 0x1c43), TCO_CPTM},
- { PCI_VDEVICE(INTEL, 0x1c44), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c45), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c46), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c47), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c48), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c49), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4a), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4b), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4c), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4d), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4e), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c4f), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c50), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c51), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c52), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c53), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c54), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c55), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c56), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c57), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c58), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c59), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5a), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5b), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5c), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5d), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5e), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1c5f), TCO_CPT},
- { PCI_VDEVICE(INTEL, 0x1d40), TCO_PBG},
- { PCI_VDEVICE(INTEL, 0x1d41), TCO_PBG},
- { PCI_VDEVICE(INTEL, 0x2310), TCO_DH89XXCC},
- { PCI_VDEVICE(INTEL, 0x1e40), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e41), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e42), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e43), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e44), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e45), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e46), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e47), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e48), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e49), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4a), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4b), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4c), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4d), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4e), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e4f), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e50), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e51), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e52), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e53), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e54), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e55), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e56), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e57), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e58), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e59), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5a), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5b), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5c), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT},
- { PCI_VDEVICE(INTEL, 0x8c40), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c41), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c42), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c43), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c44), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c45), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c46), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c47), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c48), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c49), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4a), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4b), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4c), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4d), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4e), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c4f), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c50), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c51), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c52), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c53), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c54), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c55), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c56), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c57), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c58), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c59), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5a), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5b), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5c), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5d), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5e), TCO_LPT},
- { PCI_VDEVICE(INTEL, 0x8c5f), TCO_LPT},
- { 0, }, /* End of list */
-};
-MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
-
/* Address definitions for the TCO */
/* TCO base address */
-#define TCOBASE (iTCO_wdt_private.ACPIBASE + 0x60)
+#define TCOBASE (iTCO_wdt_private.tco_res->start)
/* SMI Control and Enable Register */
-#define SMI_EN (iTCO_wdt_private.ACPIBASE + 0x30)
+#define SMI_EN (iTCO_wdt_private.smi_res->start)
#define TCO_RLD (TCOBASE + 0x00) /* TCO Timer Reload and Curr. Value */
#define TCOv1_TMR (TCOBASE + 0x01) /* TCOv1 Timer Initial Value */
@@ -393,19 +93,18 @@ static char expect_release;
static struct { /* this is private data for the iTCO_wdt device */
/* TCO version/generation */
unsigned int iTCO_version;
- /* The device's ACPIBASE address (TCOBASE = ACPIBASE+0x60) */
- unsigned long ACPIBASE;
+ struct resource *tco_res;
+ struct resource *smi_res;
+ struct resource *gcs_res;
/* NO_REBOOT flag is Memory-Mapped GCS register bit 5 (TCO version 2)*/
unsigned long __iomem *gcs;
/* the lock for io operations */
spinlock_t io_lock;
+ struct platform_device *dev;
/* the PCI-device */
struct pci_dev *pdev;
} iTCO_wdt_private;
-/* the watchdog platform device */
-static struct platform_device *iTCO_wdt_platform_device;
-
/* module parameters */
#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
@@ -485,7 +184,7 @@ static int iTCO_wdt_start(void)
spin_lock(&iTCO_wdt_private.io_lock);
- iTCO_vendor_pre_start(iTCO_wdt_private.ACPIBASE, heartbeat);
+ iTCO_vendor_pre_start(iTCO_wdt_private.smi_res, heartbeat);
/* disable chipset's NO_REBOOT bit */
if (iTCO_wdt_unset_NO_REBOOT_bit()) {
@@ -519,7 +218,7 @@ static int iTCO_wdt_stop(void)
spin_lock(&iTCO_wdt_private.io_lock);
- iTCO_vendor_pre_stop(iTCO_wdt_private.ACPIBASE);
+ iTCO_vendor_pre_stop(iTCO_wdt_private.smi_res);
/* Bit 11: TCO Timer Halt -> 1 = The TCO timer is disabled */
val = inw(TCO1_CNT);
@@ -541,7 +240,7 @@ static int iTCO_wdt_keepalive(void)
{
spin_lock(&iTCO_wdt_private.io_lock);
- iTCO_vendor_pre_keepalive(iTCO_wdt_private.ACPIBASE, heartbeat);
+ iTCO_vendor_pre_keepalive(iTCO_wdt_private.smi_res, heartbeat);
/* Reload the timer by writing to the TCO Timer Counter register */
if (iTCO_wdt_private.iTCO_version == 2)
@@ -786,83 +485,120 @@ static struct miscdevice iTCO_wdt_miscdev = {
* Init & exit routines
*/
-static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
- const struct pci_device_id *ent, struct platform_device *dev)
+static void __devexit iTCO_wdt_cleanup(void)
+{
+ /* Stop the timer before we leave */
+ if (!nowayout)
+ iTCO_wdt_stop();
+
+ /* Deregister */
+ misc_deregister(&iTCO_wdt_miscdev);
+
+ /* release resources */
+ release_region(iTCO_wdt_private.tco_res->start,
+ resource_size(iTCO_wdt_private.tco_res));
+ release_region(iTCO_wdt_private.smi_res->start,
+ resource_size(iTCO_wdt_private.smi_res));
+ if (iTCO_wdt_private.iTCO_version == 2) {
+ iounmap(iTCO_wdt_private.gcs);
+ release_mem_region(iTCO_wdt_private.gcs_res->start,
+ resource_size(iTCO_wdt_private.gcs_res));
+ }
+
+ iTCO_wdt_private.tco_res = NULL;
+ iTCO_wdt_private.smi_res = NULL;
+ iTCO_wdt_private.gcs_res = NULL;
+ iTCO_wdt_private.gcs = NULL;
+}
+
+static int __devinit iTCO_wdt_probe(struct platform_device *dev)
{
- int ret;
- u32 base_address;
- unsigned long RCBA;
+ int ret = -ENODEV;
unsigned long val32;
+ struct lpc_ich_info *ich_info = dev->dev.platform_data;
+
+ if (!ich_info)
+ goto out;
+
+ spin_lock_init(&iTCO_wdt_private.io_lock);
+
+ iTCO_wdt_private.tco_res =
+ platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_TCO);
+ if (!iTCO_wdt_private.tco_res)
+ goto out;
+
+ iTCO_wdt_private.smi_res =
+ platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_SMI);
+ if (!iTCO_wdt_private.smi_res)
+ goto out;
+
+ iTCO_wdt_private.iTCO_version = ich_info->iTCO_version;
+ iTCO_wdt_private.dev = dev;
+ iTCO_wdt_private.pdev = to_pci_dev(dev->dev.parent);
/*
- * Find the ACPI/PM base I/O address which is the base
- * for the TCO registers (TCOBASE=ACPIBASE + 0x60)
- * ACPIBASE is bits [15:7] from 0x40-0x43
+ * Get the Memory-Mapped GCS register, we need it for the
+ * NO_REBOOT flag (TCO v2).
*/
- pci_read_config_dword(pdev, 0x40, &base_address);
- base_address &= 0x0000ff80;
- if (base_address == 0x00000000) {
- /* Something's wrong here, ACPIBASE has to be set */
- pr_err("failed to get TCOBASE address, device disabled by hardware/BIOS\n");
- return -ENODEV;
- }
- iTCO_wdt_private.iTCO_version =
- iTCO_chipset_info[ent->driver_data].iTCO_version;
- iTCO_wdt_private.ACPIBASE = base_address;
- iTCO_wdt_private.pdev = pdev;
-
- /* Get the Memory-Mapped GCS register, we need it for the
- NO_REBOOT flag (TCO v2). To get access to it you have to
- read RCBA from PCI Config space 0xf0 and use it as base.
- GCS = RCBA + ICH6_GCS(0x3410). */
if (iTCO_wdt_private.iTCO_version == 2) {
- pci_read_config_dword(pdev, 0xf0, &base_address);
- if ((base_address & 1) == 0) {
- pr_err("RCBA is disabled by hardware/BIOS, device disabled\n");
- ret = -ENODEV;
+ iTCO_wdt_private.gcs_res = platform_get_resource(dev,
+ IORESOURCE_MEM,
+ ICH_RES_MEM_GCS);
+
+ if (!iTCO_wdt_private.gcs_res)
+ goto out;
+
+ if (!request_mem_region(iTCO_wdt_private.gcs_res->start,
+ resource_size(iTCO_wdt_private.gcs_res), dev->name)) {
+ ret = -EBUSY;
goto out;
}
- RCBA = base_address & 0xffffc000;
- iTCO_wdt_private.gcs = ioremap((RCBA + 0x3410), 4);
+ iTCO_wdt_private.gcs = ioremap(iTCO_wdt_private.gcs_res->start,
+ resource_size(iTCO_wdt_private.gcs_res));
+ if (!iTCO_wdt_private.gcs) {
+ ret = -EIO;
+ goto unreg_gcs;
+ }
}
/* Check chipset's NO_REBOOT bit */
if (iTCO_wdt_unset_NO_REBOOT_bit() && iTCO_vendor_check_noreboot_on()) {
pr_info("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n");
ret = -ENODEV; /* Cannot reset NO_REBOOT bit */
- goto out_unmap;
+ goto unmap_gcs;
}
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
iTCO_wdt_set_NO_REBOOT_bit();
/* The TCO logic uses the TCO_EN bit in the SMI_EN register */
- if (!request_region(SMI_EN, 4, "iTCO_wdt")) {
- pr_err("I/O address 0x%04lx already in use, device disabled\n",
+ if (!request_region(iTCO_wdt_private.smi_res->start,
+ resource_size(iTCO_wdt_private.smi_res), dev->name)) {
+ pr_err("I/O address 0x%04llx already in use, device disabled\n",
SMI_EN);
- ret = -EIO;
- goto out_unmap;
+ ret = -EBUSY;
+ goto unmap_gcs;
}
if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
- /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
+ /*
+ * Bit 13: TCO_EN -> 0
+ * Disables TCO logic generating an SMI#
+ */
val32 = inl(SMI_EN);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
outl(val32, SMI_EN);
}
- /* The TCO I/O registers reside in a 32-byte range pointed to
- by the TCOBASE value */
- if (!request_region(TCOBASE, 0x20, "iTCO_wdt")) {
- pr_err("I/O address 0x%04lx already in use, device disabled\n",
+ if (!request_region(iTCO_wdt_private.tco_res->start,
+ resource_size(iTCO_wdt_private.tco_res), dev->name)) {
+ pr_err("I/O address 0x%04llx already in use, device disabled\n",
TCOBASE);
- ret = -EIO;
- goto unreg_smi_en;
+ ret = -EBUSY;
+ goto unreg_smi;
}
- pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04lx)\n",
- iTCO_chipset_info[ent->driver_data].name,
- iTCO_chipset_info[ent->driver_data].iTCO_version,
- TCOBASE);
+ pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
+ ich_info->name, ich_info->iTCO_version, TCOBASE);
/* Clear out the (probably old) status */
outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
@@ -883,7 +619,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
if (ret != 0) {
pr_err("cannot register miscdev on minor=%d (err=%d)\n",
WATCHDOG_MINOR, ret);
- goto unreg_region;
+ goto unreg_tco;
}
pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
@@ -891,62 +627,31 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
return 0;
-unreg_region:
- release_region(TCOBASE, 0x20);
-unreg_smi_en:
- release_region(SMI_EN, 4);
-out_unmap:
+unreg_tco:
+ release_region(iTCO_wdt_private.tco_res->start,
+ resource_size(iTCO_wdt_private.tco_res));
+unreg_smi:
+ release_region(iTCO_wdt_private.smi_res->start,
+ resource_size(iTCO_wdt_private.smi_res));
+unmap_gcs:
if (iTCO_wdt_private.iTCO_version == 2)
iounmap(iTCO_wdt_private.gcs);
-out:
- iTCO_wdt_private.ACPIBASE = 0;
- return ret;
-}
-
-static void __devexit iTCO_wdt_cleanup(void)
-{
- /* Stop the timer before we leave */
- if (!nowayout)
- iTCO_wdt_stop();
-
- /* Deregister */
- misc_deregister(&iTCO_wdt_miscdev);
- release_region(TCOBASE, 0x20);
- release_region(SMI_EN, 4);
+unreg_gcs:
if (iTCO_wdt_private.iTCO_version == 2)
- iounmap(iTCO_wdt_private.gcs);
- pci_dev_put(iTCO_wdt_private.pdev);
- iTCO_wdt_private.ACPIBASE = 0;
-}
-
-static int __devinit iTCO_wdt_probe(struct platform_device *dev)
-{
- int ret = -ENODEV;
- int found = 0;
- struct pci_dev *pdev = NULL;
- const struct pci_device_id *ent;
-
- spin_lock_init(&iTCO_wdt_private.io_lock);
-
- for_each_pci_dev(pdev) {
- ent = pci_match_id(iTCO_wdt_pci_tbl, pdev);
- if (ent) {
- found++;
- ret = iTCO_wdt_init(pdev, ent, dev);
- if (!ret)
- break;
- }
- }
-
- if (!found)
- pr_info("No device detected\n");
+ release_mem_region(iTCO_wdt_private.gcs_res->start,
+ resource_size(iTCO_wdt_private.gcs_res));
+out:
+ iTCO_wdt_private.tco_res = NULL;
+ iTCO_wdt_private.smi_res = NULL;
+ iTCO_wdt_private.gcs_res = NULL;
+ iTCO_wdt_private.gcs = NULL;
return ret;
}
static int __devexit iTCO_wdt_remove(struct platform_device *dev)
{
- if (iTCO_wdt_private.ACPIBASE)
+ if (iTCO_wdt_private.tco_res || iTCO_wdt_private.smi_res)
iTCO_wdt_cleanup();
return 0;
@@ -977,23 +682,11 @@ static int __init iTCO_wdt_init_module(void)
if (err)
return err;
- iTCO_wdt_platform_device = platform_device_register_simple(DRV_NAME,
- -1, NULL, 0);
- if (IS_ERR(iTCO_wdt_platform_device)) {
- err = PTR_ERR(iTCO_wdt_platform_device);
- goto unreg_platform_driver;
- }
-
return 0;
-
-unreg_platform_driver:
- platform_driver_unregister(&iTCO_wdt_driver);
- return err;
}
static void __exit iTCO_wdt_cleanup_module(void)
{
- platform_device_unregister(iTCO_wdt_platform_device);
platform_driver_unregister(&iTCO_wdt_driver);
pr_info("Watchdog Module Unloaded\n");
}
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 7a2b734fcdc..bcfab2b00ad 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -121,7 +121,7 @@ static void imx2_wdt_start(void)
{
if (!test_and_set_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
/* at our first start we enable clock and do initialisations */
- clk_enable(imx2_wdt.clk);
+ clk_prepare_enable(imx2_wdt.clk);
imx2_wdt_setup();
} else /* delete the timer that pings the watchdog after close */
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index a9593a3a32a..2e74c3a8ee5 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -13,14 +13,15 @@
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <lantiq.h>
+#include <lantiq_soc.h>
-/* Section 3.4 of the datasheet
+/*
+ * Section 3.4 of the datasheet
* The password sequence protects the WDT control register from unintended
* write actions, which might cause malfunction of the WDT.
*
@@ -70,7 +71,8 @@ ltq_wdt_disable(void)
{
/* write the first password magic */
ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
- /* write the second password magic with no config
+ /*
+ * write the second password magic with no config
* this turns the watchdog off
*/
ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR);
@@ -184,7 +186,7 @@ static struct miscdevice ltq_wdt_miscdev = {
.fops = &ltq_wdt_fops,
};
-static int __init
+static int __devinit
ltq_wdt_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -194,28 +196,27 @@ ltq_wdt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot obtain I/O memory region");
return -ENOENT;
}
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "cannot request I/O memory region");
- return -EBUSY;
- }
- ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
+
+ ltq_wdt_membase = devm_request_and_ioremap(&pdev->dev, res);
if (!ltq_wdt_membase) {
dev_err(&pdev->dev, "cannot remap I/O memory region\n");
return -ENOMEM;
}
/* we do not need to enable the clock as it is always running */
- clk = clk_get(&pdev->dev, "io");
- WARN_ON(!clk);
+ clk = clk_get_io();
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return -ENOENT;
+ }
ltq_io_region_clk_rate = clk_get_rate(clk);
clk_put(clk);
+ /* find out if the watchdog caused the last reboot */
if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST)
ltq_wdt_bootstatus = WDIOF_CARDRESET;
+ dev_info(&pdev->dev, "Init done\n");
return misc_register(&ltq_wdt_miscdev);
}
@@ -227,33 +228,26 @@ ltq_wdt_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ltq_wdt_match[] = {
+ { .compatible = "lantiq,wdt" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_wdt_match);
static struct platform_driver ltq_wdt_driver = {
+ .probe = ltq_wdt_probe,
.remove = __devexit_p(ltq_wdt_remove),
.driver = {
- .name = "ltq_wdt",
+ .name = "wdt",
.owner = THIS_MODULE,
+ .of_match_table = ltq_wdt_match,
},
};
-static int __init
-init_ltq_wdt(void)
-{
- return platform_driver_probe(&ltq_wdt_driver, ltq_wdt_probe);
-}
-
-static void __exit
-exit_ltq_wdt(void)
-{
- return platform_driver_unregister(&ltq_wdt_driver);
-}
-
-module_init(init_ltq_wdt);
-module_exit(exit_ltq_wdt);
+module_platform_driver(ltq_wdt_driver);
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
-
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Lantiq SoC Watchdog");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 788aa158e78..0f5736949c6 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -24,8 +24,8 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/spinlock.h>
+#include <linux/clk.h>
#include <mach/bridge-regs.h>
-#include <plat/orion_wdt.h>
/*
* Watchdog timer block registers.
@@ -41,6 +41,7 @@
static bool nowayout = WATCHDOG_NOWAYOUT;
static int heartbeat = -1; /* module parameter (seconds) */
static unsigned int wdt_max_duration; /* (seconds) */
+static struct clk *clk;
static unsigned int wdt_tclk;
static void __iomem *wdt_reg;
static unsigned long wdt_status;
@@ -237,16 +238,16 @@ static struct miscdevice orion_wdt_miscdev = {
static int __devinit orion_wdt_probe(struct platform_device *pdev)
{
- struct orion_wdt_platform_data *pdata = pdev->dev.platform_data;
struct resource *res;
int ret;
- if (pdata) {
- wdt_tclk = pdata->tclk;
- } else {
- pr_err("misses platform data\n");
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ printk(KERN_ERR "Orion Watchdog missing clock\n");
return -ENODEV;
}
+ clk_prepare_enable(clk);
+ wdt_tclk = clk_get_rate(clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -282,6 +283,9 @@ static int __devexit orion_wdt_remove(struct platform_device *pdev)
if (!ret)
orion_wdt_miscdev.parent = NULL;
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+
return ret;
}
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 9adc5be57b1..fc348863113 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
-obj-$(CONFIG_XEN_DOM0) += pci.o
+obj-$(CONFIG_XEN_DOM0) += pci.o acpi.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c
new file mode 100644
index 00000000000..119d42a2bf5
--- /dev/null
+++ b/drivers/xen/acpi.c
@@ -0,0 +1,62 @@
+/******************************************************************************
+ * acpi.c
+ * acpi file for domain 0 kernel
+ *
+ * Copyright (c) 2011 Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ * Copyright (c) 2011 Yu Ke ke.yu@intel.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <xen/acpi.h>
+#include <xen/interface/platform.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+int xen_acpi_notify_hypervisor_state(u8 sleep_state,
+ u32 pm1a_cnt, u32 pm1b_cnt)
+{
+ struct xen_platform_op op = {
+ .cmd = XENPF_enter_acpi_sleep,
+ .interface_version = XENPF_INTERFACE_VERSION,
+ .u = {
+ .enter_acpi_sleep = {
+ .pm1a_cnt_val = (u16)pm1a_cnt,
+ .pm1b_cnt_val = (u16)pm1b_cnt,
+ .sleep_state = sleep_state,
+ },
+ },
+ };
+
+ if ((pm1a_cnt & 0xffff0000) || (pm1b_cnt & 0xffff0000)) {
+ WARN(1, "Using more than 16bits of PM1A/B 0x%x/0x%x!"
+ "Email xen-devel@lists.xensource.com Thank you.\n", \
+ pm1a_cnt, pm1b_cnt);
+ return -1;
+ }
+
+ HYPERVISOR_dom0_op(&op);
+ return 1;
+}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0a8a17cd80b..6908e4ce2a0 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -611,7 +611,7 @@ static void disable_pirq(struct irq_data *data)
disable_dynirq(data);
}
-static int find_irq_by_gsi(unsigned gsi)
+int xen_irq_from_gsi(unsigned gsi)
{
struct irq_info *info;
@@ -625,6 +625,7 @@ static int find_irq_by_gsi(unsigned gsi)
return -1;
}
+EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
/*
* Do not make any assumptions regarding the relationship between the
@@ -644,7 +645,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
mutex_lock(&irq_mapping_update_lock);
- irq = find_irq_by_gsi(gsi);
+ irq = xen_irq_from_gsi(gsi);
if (irq != -1) {
printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
irq, gsi);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index f100ce20b16..0bfc1ef1125 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -38,6 +38,7 @@
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/hardirq.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -426,10 +427,8 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
nflags = *pflags;
do {
flags = nflags;
- if (flags & (GTF_reading|GTF_writing)) {
- printk(KERN_ALERT "WARNING: g.e. still in use!\n");
+ if (flags & (GTF_reading|GTF_writing))
return 0;
- }
} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
return 1;
@@ -458,12 +457,103 @@ static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
return 1;
}
-int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
{
return gnttab_interface->end_foreign_access_ref(ref, readonly);
}
+
+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+{
+ if (_gnttab_end_foreign_access_ref(ref, readonly))
+ return 1;
+ pr_warn("WARNING: g.e. %#x still in use!\n", ref);
+ return 0;
+}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
+struct deferred_entry {
+ struct list_head list;
+ grant_ref_t ref;
+ bool ro;
+ uint16_t warn_delay;
+ struct page *page;
+};
+static LIST_HEAD(deferred_list);
+static void gnttab_handle_deferred(unsigned long);
+static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
+
+static void gnttab_handle_deferred(unsigned long unused)
+{
+ unsigned int nr = 10;
+ struct deferred_entry *first = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ while (nr--) {
+ struct deferred_entry *entry
+ = list_first_entry(&deferred_list,
+ struct deferred_entry, list);
+
+ if (entry == first)
+ break;
+ list_del(&entry->list);
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+ if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
+ put_free_entry(entry->ref);
+ if (entry->page) {
+ pr_debug("freeing g.e. %#x (pfn %#lx)\n",
+ entry->ref, page_to_pfn(entry->page));
+ __free_page(entry->page);
+ } else
+ pr_info("freeing g.e. %#x\n", entry->ref);
+ kfree(entry);
+ entry = NULL;
+ } else {
+ if (!--entry->warn_delay)
+ pr_info("g.e. %#x still pending\n",
+ entry->ref);
+ if (!first)
+ first = entry;
+ }
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ if (entry)
+ list_add_tail(&entry->list, &deferred_list);
+ else if (list_empty(&deferred_list))
+ break;
+ }
+ if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
+ deferred_timer.expires = jiffies + HZ;
+ add_timer(&deferred_timer);
+ }
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+}
+
+static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
+ struct page *page)
+{
+ struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ const char *what = KERN_WARNING "leaking";
+
+ if (entry) {
+ unsigned long flags;
+
+ entry->ref = ref;
+ entry->ro = readonly;
+ entry->page = page;
+ entry->warn_delay = 60;
+ spin_lock_irqsave(&gnttab_list_lock, flags);
+ list_add_tail(&entry->list, &deferred_list);
+ if (!timer_pending(&deferred_timer)) {
+ deferred_timer.expires = jiffies + HZ;
+ add_timer(&deferred_timer);
+ }
+ spin_unlock_irqrestore(&gnttab_list_lock, flags);
+ what = KERN_DEBUG "deferring";
+ }
+ printk("%s g.e. %#x (pfn %#lx)\n",
+ what, ref, page ? page_to_pfn(page) : -1);
+}
+
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
unsigned long page)
{
@@ -471,12 +561,9 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
put_free_entry(ref);
if (page != 0)
free_page(page);
- } else {
- /* XXX This needs to be fixed so that the ref and page are
- placed on a list to be freed up later. */
- printk(KERN_WARNING
- "WARNING: leaking g.e. and page still in use!\n");
- }
+ } else
+ gnttab_add_deferred(ref, readonly,
+ page ? virt_to_page(page) : NULL);
}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
@@ -741,6 +828,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct page **pages, unsigned int count)
{
int i, ret;
+ bool lazy = false;
pte_t *pte;
unsigned long mfn;
@@ -751,6 +839,11 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
if (xen_feature(XENFEAT_auto_translated_physmap))
return ret;
+ if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ arch_enter_lazy_mmu_mode();
+ lazy = true;
+ }
+
for (i = 0; i < count; i++) {
/* Do not add to override if the map failed. */
if (map_ops[i].status)
@@ -769,6 +862,9 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
return ret;
}
+ if (lazy)
+ arch_leave_lazy_mmu_mode();
+
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_map_refs);
@@ -777,6 +873,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct page **pages, unsigned int count, bool clear_pte)
{
int i, ret;
+ bool lazy = false;
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
if (ret)
@@ -785,12 +882,20 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
if (xen_feature(XENFEAT_auto_translated_physmap))
return ret;
+ if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ arch_enter_lazy_mmu_mode();
+ lazy = true;
+ }
+
for (i = 0; i < count; i++) {
ret = m2p_remove_override(pages[i], clear_pte);
if (ret)
return ret;
}
+ if (lazy)
+ arch_leave_lazy_mmu_mode();
+
return ret;
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 0b48579a9cd..7ff2569e17a 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -29,6 +29,7 @@
#include <acpi/acpi_drivers.h>
#include <acpi/processor.h>
+#include <xen/xen.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 146c9489701..7d041cb6da2 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -105,6 +105,12 @@ static unsigned int selfballoon_interval __read_mostly = 5;
*/
static unsigned int selfballoon_min_usable_mb;
+/*
+ * Amount of RAM in MB to add to the target number of pages.
+ * Can be used to reserve some more room for caches and the like.
+ */
+static unsigned int selfballoon_reserved_mb;
+
static void selfballoon_process(struct work_struct *work);
static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
@@ -217,7 +223,8 @@ static void selfballoon_process(struct work_struct *work)
cur_pages = totalram_pages;
tgt_pages = cur_pages; /* default is no change */
goal_pages = percpu_counter_read_positive(&vm_committed_as) +
- totalreserve_pages;
+ totalreserve_pages +
+ MB2PAGES(selfballoon_reserved_mb);
#ifdef CONFIG_FRONTSWAP
/* allow space for frontswap pages to be repatriated */
if (frontswap_selfshrinking && frontswap_enabled)
@@ -397,6 +404,30 @@ static DEVICE_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
show_selfballoon_min_usable_mb,
store_selfballoon_min_usable_mb);
+SELFBALLOON_SHOW(selfballoon_reserved_mb, "%d\n",
+ selfballoon_reserved_mb);
+
+static ssize_t store_selfballoon_reserved_mb(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ selfballoon_reserved_mb = val;
+ return count;
+}
+
+static DEVICE_ATTR(selfballoon_reserved_mb, S_IRUGO | S_IWUSR,
+ show_selfballoon_reserved_mb,
+ store_selfballoon_reserved_mb);
+
#ifdef CONFIG_FRONTSWAP
SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
@@ -480,6 +511,7 @@ static struct attribute *selfballoon_attrs[] = {
&dev_attr_selfballoon_downhysteresis.attr,
&dev_attr_selfballoon_uphysteresis.attr,
&dev_attr_selfballoon_min_usable_mb.attr,
+ &dev_attr_selfballoon_reserved_mb.attr,
#ifdef CONFIG_FRONTSWAP
&dev_attr_frontswap_selfshrinking.attr,
&dev_attr_frontswap_hysteresis.attr,
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 2eff7a6aaa2..52fe7ad0766 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -234,3 +234,9 @@ int xb_init_comms(void)
return 0;
}
+
+void xb_deinit_comms(void)
+{
+ unbind_from_irqhandler(xenbus_irq, &xb_waitq);
+ xenbus_irq = 0;
+}
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
index 6e42800fa49..c8abd3b8a6c 100644
--- a/drivers/xen/xenbus/xenbus_comms.h
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -35,6 +35,7 @@
int xs_init(void);
int xb_init_comms(void);
+void xb_deinit_comms(void);
/* Low level routines. */
int xb_write(const void *data, unsigned len);
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
index 3d3be78c109..be738c43104 100644
--- a/drivers/xen/xenbus/xenbus_dev_backend.c
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -8,7 +8,11 @@
#include <xen/xen.h>
#include <xen/page.h>
+#include <xen/xenbus.h>
#include <xen/xenbus_dev.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <asm/xen/hypervisor.h>
#include "xenbus_comms.h"
@@ -22,6 +26,50 @@ static int xenbus_backend_open(struct inode *inode, struct file *filp)
return nonseekable_open(inode, filp);
}
+static long xenbus_alloc(domid_t domid)
+{
+ struct evtchn_alloc_unbound arg;
+ int err = -EEXIST;
+
+ xs_suspend();
+
+ /* If xenstored_ready is nonzero, that means we have already talked to
+ * xenstore and set up watches. These watches will be restored by
+ * xs_resume, but that requires communication over the port established
+ * below that is not visible to anyone until the ioctl returns.
+ *
+ * This can be resolved by splitting the ioctl into two parts
+ * (postponing the resume until xenstored is active) but this is
+ * unnecessarily complex for the intended use where xenstored is only
+ * started once - so return -EEXIST if it's already running.
+ */
+ if (xenstored_ready)
+ goto out_err;
+
+ gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
+ virt_to_mfn(xen_store_interface), 0 /* writable */);
+
+ arg.dom = DOMID_SELF;
+ arg.remote_dom = domid;
+
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &arg);
+ if (err)
+ goto out_err;
+
+ if (xen_store_evtchn > 0)
+ xb_deinit_comms();
+
+ xen_store_evtchn = arg.port;
+
+ xs_resume();
+
+ return arg.port;
+
+ out_err:
+ xs_suspend_cancel();
+ return err;
+}
+
static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data)
{
if (!capable(CAP_SYS_ADMIN))
@@ -33,6 +81,9 @@ static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned l
return xen_store_evtchn;
return -ENODEV;
+ case IOCTL_XENBUS_BACKEND_SETUP:
+ return xenbus_alloc(data);
+
default:
return -ENOTTY;
}
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 014c8dd6296..57ccb7537da 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -448,7 +448,7 @@ void v9fs_evict_inode(struct inode *inode)
struct v9fs_inode *v9inode = V9FS_I(inode);
truncate_inode_pages(inode->i_mapping, 0);
- end_writeback(inode);
+ clear_inode(inode);
filemap_fdatawrite(inode->i_mapping);
#ifdef CONFIG_9P_FSCACHE
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 88a4b0b5005..8bc4a59f4e7 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -264,7 +264,7 @@ affs_evict_inode(struct inode *inode)
}
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
affs_free_prealloc(inode);
cache_page = (unsigned long)AFFS_I(inode)->i_lc;
if (cache_page) {
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index d890ae3b2ce..95cffd38239 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -423,7 +423,7 @@ void afs_evict_inode(struct inode *inode)
ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
afs_give_up_callback(vnode);
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 6e488ebe778..8a4fed8ead3 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -100,7 +100,7 @@ static int autofs4_show_options(struct seq_file *m, struct dentry *root)
static void autofs4_evict_inode(struct inode *inode)
{
- end_writeback(inode);
+ clear_inode(inode);
kfree(inode->i_private);
}
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 37268c5bb98..1b35d6bd06b 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -292,7 +292,6 @@ static const struct inode_operations bad_inode_ops =
.getxattr = bad_inode_getxattr,
.listxattr = bad_inode_listxattr,
.removexattr = bad_inode_removexattr,
- /* truncate_range returns void */
};
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index e23dc7c8b88..9870417c26e 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -174,7 +174,7 @@ static void bfs_evict_inode(struct inode *inode)
truncate_inode_pages(&inode->i_data, 0);
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
if (inode->i_nlink)
return;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 613aa061823..790b3cddca6 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -505,7 +505,7 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
static void bm_evict_inode(struct inode *inode)
{
- end_writeback(inode);
+ clear_inode(inode);
kfree(inode->i_private);
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index ba11c30f302..c2bbe1fb132 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -487,7 +487,7 @@ static void bdev_evict_inode(struct inode *inode)
struct list_head *p;
truncate_inode_pages(&inode->i_data, 0);
invalidate_inode_buffers(inode); /* is it needed here? */
- end_writeback(inode);
+ clear_inode(inode);
spin_lock(&bdev_lock);
while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
__bd_forget(list_entry(p, struct inode, i_devices));
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 61b16c641ce..ceb7b9c9edc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3756,7 +3756,7 @@ void btrfs_evict_inode(struct inode *inode)
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root, nr);
no_delete:
- end_writeback(inode);
+ clear_inode(inode);
return;
}
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 2b243af70aa..a08306a8bec 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -158,3 +158,23 @@ config CIFS_NFSD_EXPORT
depends on CIFS && EXPERIMENTAL && BROKEN
help
Allows NFS server to export a CIFS mounted share (nfsd over cifs)
+
+config CIFS_SMB2
+ bool "SMB2 network file system support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && INET && BROKEN
+ select NLS
+ select KEYS
+ select FSCACHE
+ select DNS_RESOLVER
+
+ help
+ This enables experimental support for the SMB2 (Server Message Block
+ version 2) protocol. The SMB2 protocol is the successor to the
+ popular CIFS and SMB network file sharing protocols. SMB2 is the
+ native file sharing mechanism for recent versions of Windows
+ operating systems (since Vista). SMB2 enablement will eventually
+ allow users better performance, security and features, than would be
+ possible with cifs. Note that smb2 mount options also are simpler
+ (compared to cifs) due to protocol improvements.
+
+ Unless you are a developer or tester, say N.
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 005d524c3a4..4b412754434 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_CIFS) += cifs.o
cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \
link.o misc.o netmisc.o smbencrypt.o transport.o asn1.o \
cifs_unicode.o nterr.o xattr.o cifsencrypt.o \
- readdir.o ioctl.o sess.o export.o
+ readdir.o ioctl.o sess.o export.o smb1ops.o
cifs-$(CONFIG_CIFS_ACL) += cifsacl.o
@@ -15,3 +15,5 @@ cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o
cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o
+
+cifs-$(CONFIG_CIFS_SMB2) += smb2ops.o
diff --git a/fs/cifs/README b/fs/cifs/README
index b7d782bab79..22ab7b5b8da 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -608,11 +608,6 @@ Stats Lists summary resource usage information as well as per
in the kernel configuration.
Configuration pseudo-files:
-MultiuserMount If set to one, more than one CIFS session to
- the same server ip address can be established
- if more than one uid accesses the same mount
- point and if the uids user/password mapping
- information is available. (default is 0)
PacketSigningEnabled If set to one, cifs packet signing is enabled
and will be used if the server requires
it. If set to two, cifs packet signing is
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 27046462941..e8140528ca5 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -57,19 +57,21 @@ cifs_dump_mem(char *label, void *data, int length)
}
}
-#ifdef CONFIG_CIFS_DEBUG2
void cifs_dump_detail(void *buf)
{
+#ifdef CONFIG_CIFS_DEBUG2
struct smb_hdr *smb = (struct smb_hdr *)buf;
cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
smb->Command, smb->Status.CifsError,
smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
cERROR(1, "smb buf %p len %d", smb, smbCalcSize(smb));
+#endif /* CONFIG_CIFS_DEBUG2 */
}
void cifs_dump_mids(struct TCP_Server_Info *server)
{
+#ifdef CONFIG_CIFS_DEBUG2
struct list_head *tmp;
struct mid_q_entry *mid_entry;
@@ -102,8 +104,8 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
}
}
spin_unlock(&GlobalMid_Lock);
-}
#endif /* CONFIG_CIFS_DEBUG2 */
+}
#ifdef CONFIG_PROC_FS
static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
@@ -420,7 +422,6 @@ static struct proc_dir_entry *proc_fs_cifs;
static const struct file_operations cifsFYI_proc_fops;
static const struct file_operations cifs_lookup_cache_proc_fops;
static const struct file_operations traceSMB_proc_fops;
-static const struct file_operations cifs_multiuser_mount_proc_fops;
static const struct file_operations cifs_security_flags_proc_fops;
static const struct file_operations cifs_linux_ext_proc_fops;
@@ -440,8 +441,6 @@ cifs_proc_init(void)
proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
&cifs_linux_ext_proc_fops);
- proc_create("MultiuserMount", 0, proc_fs_cifs,
- &cifs_multiuser_mount_proc_fops);
proc_create("SecurityFlags", 0, proc_fs_cifs,
&cifs_security_flags_proc_fops);
proc_create("LookupCacheEnabled", 0, proc_fs_cifs,
@@ -460,7 +459,6 @@ cifs_proc_clean(void)
#ifdef CONFIG_CIFS_STATS
remove_proc_entry("Stats", proc_fs_cifs);
#endif
- remove_proc_entry("MultiuserMount", proc_fs_cifs);
remove_proc_entry("SecurityFlags", proc_fs_cifs);
remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
@@ -617,52 +615,6 @@ static const struct file_operations traceSMB_proc_fops = {
.write = traceSMB_proc_write,
};
-static int cifs_multiuser_mount_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%d\n", multiuser_mount);
- return 0;
-}
-
-static int cifs_multiuser_mount_proc_open(struct inode *inode, struct file *fh)
-{
- return single_open(fh, cifs_multiuser_mount_proc_show, NULL);
-}
-
-static ssize_t cifs_multiuser_mount_proc_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *ppos)
-{
- char c;
- int rc;
- static bool warned;
-
- rc = get_user(c, buffer);
- if (rc)
- return rc;
- if (c == '0' || c == 'n' || c == 'N')
- multiuser_mount = 0;
- else if (c == '1' || c == 'y' || c == 'Y') {
- multiuser_mount = 1;
- if (!warned) {
- warned = true;
- printk(KERN_WARNING "CIFS VFS: The legacy multiuser "
- "mount code is scheduled to be deprecated in "
- "3.5. Please switch to using the multiuser "
- "mount option.");
- }
- }
-
- return count;
-}
-
-static const struct file_operations cifs_multiuser_mount_proc_fops = {
- .owner = THIS_MODULE,
- .open = cifs_multiuser_mount_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cifs_multiuser_mount_proc_write,
-};
-
static int cifs_security_flags_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "0x%x\n", global_secflags);
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 566e0ae8dc2..c0c68bb492d 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -24,10 +24,10 @@
#define _H_CIFS_DEBUG
void cifs_dump_mem(char *label, void *data, int length);
-#ifdef CONFIG_CIFS_DEBUG2
-#define DBG2 2
void cifs_dump_detail(void *);
void cifs_dump_mids(struct TCP_Server_Info *);
+#ifdef CONFIG_CIFS_DEBUG2
+#define DBG2 2
#else
#define DBG2 0
#endif
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 541ef81f6ae..8b6e344eb0b 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -56,7 +56,6 @@ int traceSMB = 0;
bool enable_oplocks = true;
unsigned int linuxExtEnabled = 1;
unsigned int lookupCacheEnabled = 1;
-unsigned int multiuser_mount = 0;
unsigned int global_secflags = CIFSSEC_DEF;
/* unsigned int ntlmv2_support = 0; */
unsigned int sign_CIFS_PDUs = 1;
@@ -125,7 +124,7 @@ cifs_read_super(struct super_block *sb)
goto out_no_root;
}
- /* do that *after* d_alloc_root() - we want NULL ->d_op for root here */
+ /* do that *after* d_make_root() - we want NULL ->d_op for root here */
if (cifs_sb_master_tcon(cifs_sb)->nocase)
sb->s_d_op = &cifs_ci_dentry_ops;
else
@@ -272,7 +271,7 @@ static void
cifs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
cifs_fscache_release_inode_cookie(inode);
}
@@ -329,6 +328,19 @@ cifs_show_security(struct seq_file *s, struct TCP_Server_Info *server)
seq_printf(s, "i");
}
+static void
+cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
+{
+ seq_printf(s, ",cache=");
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
+ seq_printf(s, "strict");
+ else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
+ seq_printf(s, "none");
+ else
+ seq_printf(s, "loose");
+}
+
/*
* cifs_show_options() is for displaying mount options in /proc/mounts.
* Not all settable options are displayed but most of the important
@@ -342,7 +354,9 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
struct sockaddr *srcaddr;
srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
+ seq_printf(s, ",vers=%s", tcon->ses->server->vals->version_string);
cifs_show_security(s, tcon->ses->server);
+ cifs_show_cache_flavor(s, cifs_sb);
seq_printf(s, ",unc=%s", tcon->treeName);
@@ -408,8 +422,6 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",rwpidforward");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
seq_printf(s, ",forcemand");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
- seq_printf(s, ",directio");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
seq_printf(s, ",nouser_xattr");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
@@ -432,8 +444,6 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",nostrictsync");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
seq_printf(s, ",noperm");
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
- seq_printf(s, ",strictcache");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
seq_printf(s, ",backupuid=%u", cifs_sb->mnt_backupuid);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
@@ -945,7 +955,6 @@ cifs_init_once(void *inode)
struct cifsInodeInfo *cifsi = inode;
inode_init_once(&cifsi->vfs_inode);
- INIT_LIST_HEAD(&cifsi->llist);
mutex_init(&cifsi->lock_mutex);
}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4ff6313f0a9..20350a93ed9 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -43,6 +43,7 @@
#define CIFS_MIN_RCV_POOL 4
+#define MAX_REOPEN_ATT 5 /* these many maximum attempts to reopen a file */
/*
* default attribute cache timeout (jiffies)
*/
@@ -150,6 +151,57 @@ struct cifs_cred {
*****************************************************************
*/
+enum smb_version {
+ Smb_1 = 1,
+ Smb_21,
+};
+
+struct mid_q_entry;
+struct TCP_Server_Info;
+struct cifsFileInfo;
+struct cifs_ses;
+
+struct smb_version_operations {
+ int (*send_cancel)(struct TCP_Server_Info *, void *,
+ struct mid_q_entry *);
+ bool (*compare_fids)(struct cifsFileInfo *, struct cifsFileInfo *);
+ /* setup request: allocate mid, sign message */
+ int (*setup_request)(struct cifs_ses *, struct kvec *, unsigned int,
+ struct mid_q_entry **);
+ /* check response: verify signature, map error */
+ int (*check_receive)(struct mid_q_entry *, struct TCP_Server_Info *,
+ bool);
+ void (*add_credits)(struct TCP_Server_Info *, const unsigned int);
+ void (*set_credits)(struct TCP_Server_Info *, const int);
+ int * (*get_credits_field)(struct TCP_Server_Info *);
+ /* data offset from read response message */
+ unsigned int (*read_data_offset)(char *);
+ /* data length from read response message */
+ unsigned int (*read_data_length)(char *);
+ /* map smb to linux error */
+ int (*map_error)(char *, bool);
+ /* find mid corresponding to the response message */
+ struct mid_q_entry * (*find_mid)(struct TCP_Server_Info *, char *);
+ void (*dump_detail)(void *);
+ /* verify the message */
+ int (*check_message)(char *, unsigned int);
+ bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+};
+
+struct smb_version_values {
+ char *version_string;
+ __u32 large_lock_type;
+ __u32 exclusive_lock_type;
+ __u32 shared_lock_type;
+ __u32 unlock_lock_type;
+ size_t header_size;
+ size_t max_header_size;
+ size_t read_rsp_size;
+};
+
+#define HEADER_SIZE(server) (server->vals->header_size)
+#define MAX_HEADER_SIZE(server) (server->vals->max_header_size)
+
struct smb_vol {
char *username;
char *password;
@@ -205,6 +257,8 @@ struct smb_vol {
bool sockopt_tcp_nodelay:1;
unsigned short int port;
unsigned long actimeo; /* attribute cache timeout (jiffies) */
+ struct smb_version_operations *ops;
+ struct smb_version_values *vals;
char *prepath;
struct sockaddr_storage srcaddr; /* allow binding to a local IP */
struct nls_table *local_nls;
@@ -242,6 +296,8 @@ struct TCP_Server_Info {
int srv_count; /* reference counter */
/* 15 character server name + 0x20 16th byte indicating type = srv */
char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
+ struct smb_version_operations *ops;
+ struct smb_version_values *vals;
enum statusEnum tcpStatus; /* what we think the status is */
char *hostname; /* hostname portion of UNC string */
struct socket *ssocket;
@@ -321,16 +377,6 @@ in_flight(struct TCP_Server_Info *server)
return num;
}
-static inline int*
-get_credits_field(struct TCP_Server_Info *server)
-{
- /*
- * This will change to switch statement when we reserve slots for echos
- * and oplock breaks.
- */
- return &server->credits;
-}
-
static inline bool
has_credits(struct TCP_Server_Info *server, int *credits)
{
@@ -341,16 +387,16 @@ has_credits(struct TCP_Server_Info *server, int *credits)
return num > 0;
}
-static inline size_t
-header_size(void)
+static inline void
+add_credits(struct TCP_Server_Info *server, const unsigned int add)
{
- return sizeof(struct smb_hdr);
+ server->ops->add_credits(server, add);
}
-static inline size_t
-max_header_size(void)
+static inline void
+set_credits(struct TCP_Server_Info *server, const int val)
{
- return MAX_CIFS_HDR_SIZE;
+ server->ops->set_credits(server, val);
}
/*
@@ -547,8 +593,7 @@ struct cifsLockInfo {
__u64 offset;
__u64 length;
__u32 pid;
- __u8 type;
- __u16 netfid;
+ __u32 type;
};
/*
@@ -573,6 +618,10 @@ struct cifs_search_info {
struct cifsFileInfo {
struct list_head tlist; /* pointer to next fid owned by tcon */
struct list_head flist; /* next fid (file instance) for this inode */
+ struct list_head llist; /*
+ * brlocks held by this fid, protected by
+ * lock_mutex from cifsInodeInfo structure
+ */
unsigned int uid; /* allows finding which FileInfo structure */
__u32 pid; /* process id who opened file */
__u16 netfid; /* file id from remote */
@@ -615,9 +664,12 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
*/
struct cifsInodeInfo {
- struct list_head llist; /* brlocks for this inode */
bool can_cache_brlcks;
- struct mutex lock_mutex; /* protect two fields above */
+ struct mutex lock_mutex; /*
+ * protect the field above and llist
+ * from every cifsFileInfo structure
+ * from openFileList
+ */
/* BB add in lists for dirty pages i.e. write caching info for oplock */
struct list_head openFileList;
__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
@@ -703,7 +755,6 @@ static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
#endif
-struct mid_q_entry;
/*
* This is the prototype for the mid receive function. This function is for
@@ -1042,12 +1093,7 @@ GLOBAL_EXTERN atomic_t smBufAllocCount;
GLOBAL_EXTERN atomic_t midCount;
/* Misc globals */
-GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
- to be established on existing mount if we
- have the uid/password or Kerberos credential
- or equivalent for current user */
-/* enable or disable oplocks */
-GLOBAL_EXTERN bool enable_oplocks;
+GLOBAL_EXTERN bool enable_oplocks; /* enable or disable oplocks */
GLOBAL_EXTERN unsigned int lookupCacheEnabled;
GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
with more secure ntlmssp2 challenge/resp */
@@ -1074,4 +1120,11 @@ void cifs_oplock_break(struct work_struct *work);
extern const struct slow_work_ops cifs_oplock_break_ops;
extern struct workqueue_struct *cifsiod_wq;
+/* Operations for different SMB versions */
+#define SMB1_VERSION_STRING "1.0"
+extern struct smb_version_operations smb1_operations;
+extern struct smb_version_values smb1_values;
+#define SMB21_VERSION_STRING "2.1"
+extern struct smb_version_operations smb21_operations;
+extern struct smb_version_values smb21_values;
#endif /* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 96192c1e380..5ec21ecf798 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -78,6 +78,8 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
int * /* bytes returned */ , const int long_op);
extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
char *in_buf, int flags);
+extern int cifs_setup_request(struct cifs_ses *, struct kvec *, unsigned int,
+ struct mid_q_entry **);
extern int cifs_check_receive(struct mid_q_entry *mid,
struct TCP_Server_Info *server, bool log_error);
extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
@@ -88,9 +90,6 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
struct smb_hdr *in_buf ,
struct smb_hdr *out_buf,
int *bytes_returned);
-extern void cifs_add_credits(struct TCP_Server_Info *server,
- const unsigned int add);
-extern void cifs_set_credits(struct TCP_Server_Info *server, const int val);
extern int checkSMB(char *buf, unsigned int length);
extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
extern bool backup_cred(struct cifs_sb_info *);
@@ -192,11 +191,13 @@ extern int CIFSTCon(unsigned int xid, struct cifs_ses *ses,
extern int CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
const char *searchName, const struct nls_table *nls_codepage,
- __u16 *searchHandle, struct cifs_search_info *psrch_inf,
+ __u16 *searchHandle, __u16 search_flags,
+ struct cifs_search_info *psrch_inf,
int map, const char dirsep);
extern int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
- __u16 searchHandle, struct cifs_search_info *psrch_inf);
+ __u16 searchHandle, __u16 search_flags,
+ struct cifs_search_info *psrch_inf);
extern int CIFSFindClose(const int, struct cifs_tcon *tcon,
const __u16 search_handle);
@@ -464,6 +465,9 @@ extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8,
/* asynchronous read support */
struct cifs_readdata {
+ struct kref refcount;
+ struct list_head list;
+ struct completion done;
struct cifsFileInfo *cfile;
struct address_space *mapping;
__u64 offset;
@@ -472,12 +476,13 @@ struct cifs_readdata {
int result;
struct list_head pages;
struct work_struct work;
+ int (*marshal_iov) (struct cifs_readdata *rdata,
+ unsigned int remaining);
unsigned int nr_iov;
struct kvec iov[1];
};
-struct cifs_readdata *cifs_readdata_alloc(unsigned int nr_pages);
-void cifs_readdata_free(struct cifs_readdata *rdata);
+void cifs_readdata_release(struct kref *refcount);
int cifs_async_readv(struct cifs_readdata *rdata);
/* asynchronous write support */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index da2f5446fa7..b5ad716b264 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -87,7 +87,6 @@ static struct {
#endif /* CIFS_POSIX */
/* Forward declarations */
-static void cifs_readv_complete(struct work_struct *work);
/* Mark as invalid, all open files on tree connections since they
were closed when session to server was lost */
@@ -461,7 +460,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
server->maxReq = min_t(unsigned int,
le16_to_cpu(rsp->MaxMpxCount),
cifs_max_pending);
- cifs_set_credits(server, server->maxReq);
+ set_credits(server, server->maxReq);
server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
/* even though we do not use raw we might as well set this
@@ -569,7 +568,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
little endian */
server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount),
cifs_max_pending);
- cifs_set_credits(server, server->maxReq);
+ set_credits(server, server->maxReq);
/* probably no need to store and check maxvcs */
server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
@@ -721,7 +720,7 @@ cifs_echo_callback(struct mid_q_entry *mid)
struct TCP_Server_Info *server = mid->callback_data;
DeleteMidQEntry(mid);
- cifs_add_credits(server, 1);
+ add_credits(server, 1);
}
int
@@ -1385,28 +1384,6 @@ openRetry:
return rc;
}
-struct cifs_readdata *
-cifs_readdata_alloc(unsigned int nr_pages)
-{
- struct cifs_readdata *rdata;
-
- /* readdata + 1 kvec for each page */
- rdata = kzalloc(sizeof(*rdata) +
- sizeof(struct kvec) * nr_pages, GFP_KERNEL);
- if (rdata != NULL) {
- INIT_WORK(&rdata->work, cifs_readv_complete);
- INIT_LIST_HEAD(&rdata->pages);
- }
- return rdata;
-}
-
-void
-cifs_readdata_free(struct cifs_readdata *rdata)
-{
- cifsFileInfo_put(rdata->cfile);
- kfree(rdata);
-}
-
/*
* Discard any remaining data in the current SMB. To do this, we borrow the
* current bigbuf.
@@ -1423,7 +1400,7 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
length = cifs_read_from_socket(server, server->bigbuf,
min_t(unsigned int, remaining,
- CIFSMaxBufSize + max_header_size()));
+ CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
if (length < 0)
return length;
server->total_read += length;
@@ -1434,38 +1411,14 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
return 0;
}
-static inline size_t
-read_rsp_size(void)
-{
- return sizeof(READ_RSP);
-}
-
-static inline unsigned int
-read_data_offset(char *buf)
-{
- READ_RSP *rsp = (READ_RSP *)buf;
- return le16_to_cpu(rsp->DataOffset);
-}
-
-static inline unsigned int
-read_data_length(char *buf)
-{
- READ_RSP *rsp = (READ_RSP *)buf;
- return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
- le16_to_cpu(rsp->DataLength);
-}
-
static int
cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
int length, len;
- unsigned int data_offset, remaining, data_len;
+ unsigned int data_offset, data_len;
struct cifs_readdata *rdata = mid->callback_data;
char *buf = server->smallbuf;
unsigned int buflen = get_rfc1002_length(buf) + 4;
- u64 eof;
- pgoff_t eof_index;
- struct page *page, *tpage;
cFYI(1, "%s: mid=%llu offset=%llu bytes=%u", __func__,
mid->mid, rdata->offset, rdata->bytes);
@@ -1475,9 +1428,10 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
* can if there's not enough data. At this point, we've read down to
* the Mid.
*/
- len = min_t(unsigned int, buflen, read_rsp_size()) - header_size() + 1;
+ len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
+ HEADER_SIZE(server) + 1;
- rdata->iov[0].iov_base = buf + header_size() - 1;
+ rdata->iov[0].iov_base = buf + HEADER_SIZE(server) - 1;
rdata->iov[0].iov_len = len;
length = cifs_readv_from_socket(server, rdata->iov, 1, len);
@@ -1486,7 +1440,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
server->total_read += length;
/* Was the SMB read successful? */
- rdata->result = map_smb_to_linux_error(buf, false);
+ rdata->result = server->ops->map_error(buf, false);
if (rdata->result != 0) {
cFYI(1, "%s: server returned error %d", __func__,
rdata->result);
@@ -1494,14 +1448,15 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
/* Is there enough to get to the rest of the READ_RSP header? */
- if (server->total_read < read_rsp_size()) {
+ if (server->total_read < server->vals->read_rsp_size) {
cFYI(1, "%s: server returned short header. got=%u expected=%zu",
- __func__, server->total_read, read_rsp_size());
+ __func__, server->total_read,
+ server->vals->read_rsp_size);
rdata->result = -EIO;
return cifs_readv_discard(server, mid);
}
- data_offset = read_data_offset(buf) + 4;
+ data_offset = server->ops->read_data_offset(buf) + 4;
if (data_offset < server->total_read) {
/*
* win2k8 sometimes sends an offset of 0 when the read
@@ -1540,7 +1495,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
rdata->iov[0].iov_base, rdata->iov[0].iov_len);
/* how much data is in the response? */
- data_len = read_data_length(buf);
+ data_len = server->ops->read_data_length(buf);
if (data_offset + data_len > buflen) {
/* data_len is corrupt -- discard frame */
rdata->result = -EIO;
@@ -1548,64 +1503,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
/* marshal up the page array */
- len = 0;
- remaining = data_len;
- rdata->nr_iov = 1;
-
- /* determine the eof that the server (probably) has */
- eof = CIFS_I(rdata->mapping->host)->server_eof;
- eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
- cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
-
- list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
- if (remaining >= PAGE_CACHE_SIZE) {
- /* enough data to fill the page */
- rdata->iov[rdata->nr_iov].iov_base = kmap(page);
- rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
- cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
- rdata->nr_iov, page->index,
- rdata->iov[rdata->nr_iov].iov_base,
- rdata->iov[rdata->nr_iov].iov_len);
- ++rdata->nr_iov;
- len += PAGE_CACHE_SIZE;
- remaining -= PAGE_CACHE_SIZE;
- } else if (remaining > 0) {
- /* enough for partial page, fill and zero the rest */
- rdata->iov[rdata->nr_iov].iov_base = kmap(page);
- rdata->iov[rdata->nr_iov].iov_len = remaining;
- cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
- rdata->nr_iov, page->index,
- rdata->iov[rdata->nr_iov].iov_base,
- rdata->iov[rdata->nr_iov].iov_len);
- memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
- '\0', PAGE_CACHE_SIZE - remaining);
- ++rdata->nr_iov;
- len += remaining;
- remaining = 0;
- } else if (page->index > eof_index) {
- /*
- * The VFS will not try to do readahead past the
- * i_size, but it's possible that we have outstanding
- * writes with gaps in the middle and the i_size hasn't
- * caught up yet. Populate those with zeroed out pages
- * to prevent the VFS from repeatedly attempting to
- * fill them until the writes are flushed.
- */
- zero_user(page, 0, PAGE_CACHE_SIZE);
- list_del(&page->lru);
- lru_cache_add_file(page);
- flush_dcache_page(page);
- SetPageUptodate(page);
- unlock_page(page);
- page_cache_release(page);
- } else {
- /* no need to hold page hostage */
- list_del(&page->lru);
- lru_cache_add_file(page);
- unlock_page(page);
- page_cache_release(page);
- }
- }
+ len = rdata->marshal_iov(rdata, data_len);
+ data_len -= len;
/* issue the read if we have any iovecs left to fill */
if (rdata->nr_iov > 1) {
@@ -1621,7 +1520,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
rdata->bytes = length;
cFYI(1, "total_read=%u buflen=%u remaining=%u", server->total_read,
- buflen, remaining);
+ buflen, data_len);
/* discard anything left over */
if (server->total_read < buflen)
@@ -1632,33 +1531,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
static void
-cifs_readv_complete(struct work_struct *work)
-{
- struct cifs_readdata *rdata = container_of(work,
- struct cifs_readdata, work);
- struct page *page, *tpage;
-
- list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
- list_del(&page->lru);
- lru_cache_add_file(page);
-
- if (rdata->result == 0) {
- kunmap(page);
- flush_dcache_page(page);
- SetPageUptodate(page);
- }
-
- unlock_page(page);
-
- if (rdata->result == 0)
- cifs_readpage_to_fscache(rdata->mapping->host, page);
-
- page_cache_release(page);
- }
- cifs_readdata_free(rdata);
-}
-
-static void
cifs_readv_callback(struct mid_q_entry *mid)
{
struct cifs_readdata *rdata = mid->callback_data;
@@ -1691,7 +1563,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
queue_work(cifsiod_wq, &rdata->work);
DeleteMidQEntry(mid);
- cifs_add_credits(server, 1);
+ add_credits(server, 1);
}
/* cifs_async_readv - send an async write, and set up mid to handle result */
@@ -1744,12 +1616,15 @@ cifs_async_readv(struct cifs_readdata *rdata)
rdata->iov[0].iov_base = smb;
rdata->iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
+ kref_get(&rdata->refcount);
rc = cifs_call_async(tcon->ses->server, rdata->iov, 1,
cifs_readv_receive, cifs_readv_callback,
rdata, false);
if (rc == 0)
cifs_stats_inc(&tcon->num_reads);
+ else
+ kref_put(&rdata->refcount, cifs_readdata_release);
cifs_small_buf_release(smb);
return rc;
@@ -2135,7 +2010,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
queue_work(cifsiod_wq, &wdata->work);
DeleteMidQEntry(mid);
- cifs_add_credits(tcon->ses->server, 1);
+ add_credits(tcon->ses->server, 1);
}
/* cifs_async_writev - send an async write, and set up mid to handle result */
@@ -4344,7 +4219,7 @@ int
CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
const char *searchName,
const struct nls_table *nls_codepage,
- __u16 *pnetfid,
+ __u16 *pnetfid, __u16 search_flags,
struct cifs_search_info *psrch_inf, int remap, const char dirsep)
{
/* level 257 SMB_ */
@@ -4416,8 +4291,7 @@ findFirstRetry:
cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
ATTR_DIRECTORY);
pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
- pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END |
- CIFS_SEARCH_RETURN_RESUME);
+ pSMB->SearchFlags = cpu_to_le16(search_flags);
pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
/* BB what should we set StorageType to? Does it matter? BB */
@@ -4487,8 +4361,8 @@ findFirstRetry:
return rc;
}
-int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
- __u16 searchHandle, struct cifs_search_info *psrch_inf)
+int CIFSFindNext(const int xid, struct cifs_tcon *tcon, __u16 searchHandle,
+ __u16 search_flags, struct cifs_search_info *psrch_inf)
{
TRANSACTION2_FNEXT_REQ *pSMB = NULL;
TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
@@ -4531,8 +4405,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO));
pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
pSMB->ResumeKey = psrch_inf->resume_key;
- pSMB->SearchFlags =
- cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME);
+ pSMB->SearchFlags = cpu_to_le16(search_flags);
name_len = psrch_inf->resume_name_len;
params += name_len;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e0b56d7a19c..ccafdedd0db 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/connect.c
*
- * Copyright (C) International Business Machines Corp., 2002,2009
+ * Copyright (C) International Business Machines Corp., 2002,2011
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -102,7 +102,7 @@ enum {
Opt_srcaddr, Opt_prefixpath,
Opt_iocharset, Opt_sockopt,
Opt_netbiosname, Opt_servern,
- Opt_ver, Opt_sec,
+ Opt_ver, Opt_vers, Opt_sec, Opt_cache,
/* Mount options to be ignored */
Opt_ignore,
@@ -210,9 +210,9 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_netbiosname, "netbiosname=%s" },
{ Opt_servern, "servern=%s" },
{ Opt_ver, "ver=%s" },
- { Opt_ver, "vers=%s" },
- { Opt_ver, "version=%s" },
+ { Opt_vers, "vers=%s" },
{ Opt_sec, "sec=%s" },
+ { Opt_cache, "cache=%s" },
{ Opt_ignore, "cred" },
{ Opt_ignore, "credentials" },
@@ -261,6 +261,26 @@ static const match_table_t cifs_secflavor_tokens = {
{ Opt_sec_err, NULL }
};
+/* cache flavors */
+enum {
+ Opt_cache_loose,
+ Opt_cache_strict,
+ Opt_cache_none,
+ Opt_cache_err
+};
+
+static const match_table_t cifs_cacheflavor_tokens = {
+ { Opt_cache_loose, "loose" },
+ { Opt_cache_strict, "strict" },
+ { Opt_cache_none, "none" },
+ { Opt_cache_err, NULL }
+};
+
+static const match_table_t cifs_smb_version_tokens = {
+ { Smb_1, SMB1_VERSION_STRING },
+ { Smb_21, SMB21_VERSION_STRING },
+};
+
static int ip_connect(struct TCP_Server_Info *server);
static int generic_ip_connect(struct TCP_Server_Info *server);
static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
@@ -549,7 +569,7 @@ allocate_buffers(struct TCP_Server_Info *server)
}
} else if (server->large_buf) {
/* we are reusing a dirty large buf, clear its start */
- memset(server->bigbuf, 0, header_size());
+ memset(server->bigbuf, 0, HEADER_SIZE(server));
}
if (!server->smallbuf) {
@@ -563,7 +583,7 @@ allocate_buffers(struct TCP_Server_Info *server)
/* beginning of smb buffer is cleared in our buf_get */
} else {
/* if existing small buf clear beginning */
- memset(server->smallbuf, 0, header_size());
+ memset(server->smallbuf, 0, HEADER_SIZE(server));
}
return true;
@@ -764,25 +784,6 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type)
return false;
}
-static struct mid_q_entry *
-find_mid(struct TCP_Server_Info *server, char *buffer)
-{
- struct smb_hdr *buf = (struct smb_hdr *)buffer;
- struct mid_q_entry *mid;
-
- spin_lock(&GlobalMid_Lock);
- list_for_each_entry(mid, &server->pending_mid_q, qhead) {
- if (mid->mid == buf->Mid &&
- mid->mid_state == MID_REQUEST_SUBMITTED &&
- le16_to_cpu(mid->command) == buf->Command) {
- spin_unlock(&GlobalMid_Lock);
- return mid;
- }
- }
- spin_unlock(&GlobalMid_Lock);
- return NULL;
-}
-
void
dequeue_mid(struct mid_q_entry *mid, bool malformed)
{
@@ -934,7 +935,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
unsigned int pdu_length = get_rfc1002_length(buf);
/* make sure this will fit in a large buffer */
- if (pdu_length > CIFSMaxBufSize + max_header_size() - 4) {
+ if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) - 4) {
cERROR(1, "SMB response too long (%u bytes)",
pdu_length);
cifs_reconnect(server);
@@ -950,8 +951,8 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
/* now read the rest */
- length = cifs_read_from_socket(server, buf + header_size() - 1,
- pdu_length - header_size() + 1 + 4);
+ length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
+ pdu_length - HEADER_SIZE(server) + 1 + 4);
if (length < 0)
return length;
server->total_read += length;
@@ -967,7 +968,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
* 48 bytes is enough to display the header and a little bit
* into the payload for debugging purposes.
*/
- length = checkSMB(buf, server->total_read);
+ length = server->ops->check_message(buf, server->total_read);
if (length != 0)
cifs_dump_mem("Bad SMB: ", buf,
min_t(unsigned int, server->total_read, 48));
@@ -1025,7 +1026,7 @@ cifs_demultiplex_thread(void *p)
continue;
/* make sure we have enough to get to the MID */
- if (pdu_length < header_size() - 1 - 4) {
+ if (pdu_length < HEADER_SIZE(server) - 1 - 4) {
cERROR(1, "SMB response too short (%u bytes)",
pdu_length);
cifs_reconnect(server);
@@ -1035,12 +1036,12 @@ cifs_demultiplex_thread(void *p)
/* read down to the MID */
length = cifs_read_from_socket(server, buf + 4,
- header_size() - 1 - 4);
+ HEADER_SIZE(server) - 1 - 4);
if (length < 0)
continue;
server->total_read += length;
- mid_entry = find_mid(server, buf);
+ mid_entry = server->ops->find_mid(server, buf);
if (!mid_entry || !mid_entry->receive)
length = standard_receive3(server, mid_entry);
@@ -1057,12 +1058,13 @@ cifs_demultiplex_thread(void *p)
if (mid_entry != NULL) {
if (!mid_entry->multiRsp || mid_entry->multiEnd)
mid_entry->callback(mid_entry);
- } else if (!is_valid_oplock_break(buf, server)) {
+ } else if (!server->ops->is_oplock_break(buf, server)) {
cERROR(1, "No task to wake, unknown frame received! "
"NumMids %d", atomic_read(&midCount));
- cifs_dump_mem("Received Data is: ", buf, header_size());
+ cifs_dump_mem("Received Data is: ", buf,
+ HEADER_SIZE(server));
#ifdef CONFIG_CIFS_DEBUG2
- cifs_dump_detail(buf);
+ server->ops->dump_detail(buf);
cifs_dump_mids(server);
#endif /* CIFS_DEBUG2 */
@@ -1186,6 +1188,54 @@ static int cifs_parse_security_flavors(char *value,
}
static int
+cifs_parse_cache_flavor(char *value, struct smb_vol *vol)
+{
+ substring_t args[MAX_OPT_ARGS];
+
+ switch (match_token(value, cifs_cacheflavor_tokens, args)) {
+ case Opt_cache_loose:
+ vol->direct_io = false;
+ vol->strict_io = false;
+ break;
+ case Opt_cache_strict:
+ vol->direct_io = false;
+ vol->strict_io = true;
+ break;
+ case Opt_cache_none:
+ vol->direct_io = true;
+ vol->strict_io = false;
+ break;
+ default:
+ cERROR(1, "bad cache= option: %s", value);
+ return 1;
+ }
+ return 0;
+}
+
+static int
+cifs_parse_smb_version(char *value, struct smb_vol *vol)
+{
+ substring_t args[MAX_OPT_ARGS];
+
+ switch (match_token(value, cifs_smb_version_tokens, args)) {
+ case Smb_1:
+ vol->ops = &smb1_operations;
+ vol->vals = &smb1_values;
+ break;
+#ifdef CONFIG_CIFS_SMB2
+ case Smb_21:
+ vol->ops = &smb21_operations;
+ vol->vals = &smb21_values;
+ break;
+#endif
+ default:
+ cERROR(1, "Unknown vers= option specified: %s", value);
+ return 1;
+ }
+ return 0;
+}
+
+static int
cifs_parse_mount_options(const char *mountdata, const char *devname,
struct smb_vol *vol)
{
@@ -1203,6 +1253,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
char *string = NULL;
char *tmp_end, *value;
char delim;
+ bool cache_specified = false;
+ static bool cache_warned = false;
separator[0] = ',';
separator[1] = 0;
@@ -1236,6 +1288,10 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->actimeo = CIFS_DEF_ACTIMEO;
+ /* FIXME: add autonegotiation -- for now, SMB1 is default */
+ vol->ops = &smb1_operations;
+ vol->vals = &smb1_values;
+
if (!mountdata)
goto cifs_parse_mount_err;
@@ -1414,10 +1470,20 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->seal = 1;
break;
case Opt_direct:
- vol->direct_io = 1;
+ cache_specified = true;
+ vol->direct_io = true;
+ vol->strict_io = false;
+ cERROR(1, "The \"directio\" option will be removed in "
+ "3.7. Please switch to the \"cache=none\" "
+ "option.");
break;
case Opt_strictcache:
- vol->strict_io = 1;
+ cache_specified = true;
+ vol->direct_io = false;
+ vol->strict_io = true;
+ cERROR(1, "The \"strictcache\" option will be removed "
+ "in 3.7. Please switch to the \"cache=strict\" "
+ "option.");
break;
case Opt_noac:
printk(KERN_WARNING "CIFS: Mount option noac not "
@@ -1821,8 +1887,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (string == NULL)
goto out_nomem;
- if (strnicmp(string, "cifs", 4) == 0 ||
- strnicmp(string, "1", 1) == 0) {
+ if (strnicmp(string, "1", 1) == 0) {
/* This is the default */
break;
}
@@ -1830,6 +1895,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
printk(KERN_WARNING "CIFS: Invalid version"
" specified\n");
goto cifs_parse_mount_err;
+ case Opt_vers:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (cifs_parse_smb_version(string, vol) != 0)
+ goto cifs_parse_mount_err;
+ break;
case Opt_sec:
string = match_strdup(args);
if (string == NULL)
@@ -1838,6 +1911,15 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (cifs_parse_security_flavors(string, vol) != 0)
goto cifs_parse_mount_err;
break;
+ case Opt_cache:
+ cache_specified = true;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+
+ if (cifs_parse_cache_flavor(string, vol) != 0)
+ goto cifs_parse_mount_err;
+ break;
default:
/*
* An option we don't recognize. Save it off for later
@@ -1881,6 +1963,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
printk(KERN_NOTICE "CIFS: ignoring forcegid mount option "
"specified with no gid= option.\n");
+ /* FIXME: remove this block in 3.7 */
+ if (!cache_specified && !cache_warned) {
+ cache_warned = true;
+ printk(KERN_NOTICE "CIFS: no cache= option specified, using "
+ "\"cache=loose\". This default will change "
+ "to \"cache=strict\" in 3.7.\n");
+ }
+
kfree(mountdata_copy);
return 0;
@@ -2041,6 +2131,9 @@ match_security(struct TCP_Server_Info *server, struct smb_vol *vol)
static int match_server(struct TCP_Server_Info *server, struct sockaddr *addr,
struct smb_vol *vol)
{
+ if ((server->vals != vol->vals) || (server->ops != vol->ops))
+ return 0;
+
if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
return 0;
@@ -2163,6 +2256,8 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
goto out_err;
}
+ tcp_ses->ops = volume_info->ops;
+ tcp_ses->vals = volume_info->vals;
cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
tcp_ses->hostname = extract_hostname(volume_info->UNC);
if (IS_ERR(tcp_ses->hostname)) {
@@ -3569,6 +3664,7 @@ cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
if (cifs_parse_mount_options(mount_data, devname, volume_info))
return -EINVAL;
+
if (volume_info->nullauth) {
cFYI(1, "Anonymous login");
kfree(volume_info->username);
@@ -4010,11 +4106,11 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
if (server->maxBuf != 0)
return 0;
- cifs_set_credits(server, 1);
+ set_credits(server, 1);
rc = CIFSSMBNegotiate(xid, ses);
if (rc == -EAGAIN) {
/* retry only once on 1st time connection */
- cifs_set_credits(server, 1);
+ set_credits(server, 1);
rc = CIFSSMBNegotiate(xid, ses);
if (rc == -EAGAIN)
rc = -EHOSTDOWN;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 81725e9286e..253170dfa71 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -264,6 +264,7 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
pCifsFile->tlink = cifs_get_tlink(tlink);
mutex_init(&pCifsFile->fh_mutex);
INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
+ INIT_LIST_HEAD(&pCifsFile->llist);
spin_lock(&cifs_file_list_lock);
list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
@@ -334,9 +335,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
* is closed anyway.
*/
mutex_lock(&cifsi->lock_mutex);
- list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
- if (li->netfid != cifs_file->netfid)
- continue;
+ list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
list_del(&li->llist);
cifs_del_lock_waiters(li);
kfree(li);
@@ -645,7 +644,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
}
static struct cifsLockInfo *
-cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
+cifs_lock_init(__u64 offset, __u64 length, __u8 type)
{
struct cifsLockInfo *lock =
kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
@@ -654,7 +653,6 @@ cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
lock->offset = offset;
lock->length = length;
lock->type = type;
- lock->netfid = netfid;
lock->pid = current->tgid;
INIT_LIST_HEAD(&lock->blist);
init_waitqueue_head(&lock->block_q);
@@ -672,19 +670,20 @@ cifs_del_lock_waiters(struct cifsLockInfo *lock)
}
static bool
-__cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
- __u64 length, __u8 type, __u16 netfid,
- struct cifsLockInfo **conf_lock)
+cifs_find_fid_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
+ __u64 length, __u8 type, struct cifsFileInfo *cur,
+ struct cifsLockInfo **conf_lock)
{
- struct cifsLockInfo *li, *tmp;
+ struct cifsLockInfo *li;
+ struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
- list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
+ list_for_each_entry(li, &cfile->llist, llist) {
if (offset + length <= li->offset ||
offset >= li->offset + li->length)
continue;
- else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
- ((netfid == li->netfid && current->tgid == li->pid) ||
- type == li->type))
+ else if ((type & server->vals->shared_lock_type) &&
+ ((server->ops->compare_fids(cur, cfile) &&
+ current->tgid == li->pid) || type == li->type))
continue;
else {
*conf_lock = li;
@@ -695,11 +694,23 @@ __cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
}
static bool
-cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
- struct cifsLockInfo **conf_lock)
+cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
+ __u8 type, struct cifsLockInfo **conf_lock)
{
- return __cifs_find_lock_conflict(cinode, lock->offset, lock->length,
- lock->type, lock->netfid, conf_lock);
+ bool rc = false;
+ struct cifsFileInfo *fid, *tmp;
+ struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+
+ spin_lock(&cifs_file_list_lock);
+ list_for_each_entry_safe(fid, tmp, &cinode->openFileList, flist) {
+ rc = cifs_find_fid_lock_conflict(fid, offset, length, type,
+ cfile, conf_lock);
+ if (rc)
+ break;
+ }
+ spin_unlock(&cifs_file_list_lock);
+
+ return rc;
}
/*
@@ -710,22 +721,24 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
* the server or 1 otherwise.
*/
static int
-cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
- __u8 type, __u16 netfid, struct file_lock *flock)
+cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
+ __u8 type, struct file_lock *flock)
{
int rc = 0;
struct cifsLockInfo *conf_lock;
+ struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
bool exist;
mutex_lock(&cinode->lock_mutex);
- exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid,
- &conf_lock);
+ exist = cifs_find_lock_conflict(cfile, offset, length, type,
+ &conf_lock);
if (exist) {
flock->fl_start = conf_lock->offset;
flock->fl_end = conf_lock->offset + conf_lock->length - 1;
flock->fl_pid = conf_lock->pid;
- if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
+ if (conf_lock->type & server->vals->shared_lock_type)
flock->fl_type = F_RDLCK;
else
flock->fl_type = F_WRLCK;
@@ -739,10 +752,11 @@ cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
}
static void
-cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
+cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
{
+ struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
mutex_lock(&cinode->lock_mutex);
- list_add_tail(&lock->llist, &cinode->llist);
+ list_add_tail(&lock->llist, &cfile->llist);
mutex_unlock(&cinode->lock_mutex);
}
@@ -753,10 +767,11 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
* 3) -EACCESS, if there is a lock that prevents us and wait is false.
*/
static int
-cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
+cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
bool wait)
{
struct cifsLockInfo *conf_lock;
+ struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
bool exist;
int rc = 0;
@@ -764,9 +779,10 @@ try_again:
exist = false;
mutex_lock(&cinode->lock_mutex);
- exist = cifs_find_lock_conflict(cinode, lock, &conf_lock);
+ exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
+ lock->type, &conf_lock);
if (!exist && cinode->can_cache_brlcks) {
- list_add_tail(&lock->llist, &cinode->llist);
+ list_add_tail(&lock->llist, &cfile->llist);
mutex_unlock(&cinode->lock_mutex);
return rc;
}
@@ -888,7 +904,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
for (i = 0; i < 2; i++) {
cur = buf;
num = 0;
- list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
+ list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
if (li->type != types[i])
continue;
cur->Pid = cpu_to_le16(li->pid);
@@ -898,7 +914,8 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
if (++num == max_num) {
stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
- li->type, 0, num, buf);
+ (__u8)li->type, 0, num,
+ buf);
if (stored_rc)
rc = stored_rc;
cur = buf;
@@ -909,7 +926,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
if (num) {
stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
- types[i], 0, num, buf);
+ (__u8)types[i], 0, num, buf);
if (stored_rc)
rc = stored_rc;
}
@@ -1053,8 +1070,8 @@ cifs_push_locks(struct cifsFileInfo *cfile)
}
static void
-cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
- bool *wait_flag)
+cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
+ bool *wait_flag, struct TCP_Server_Info *server)
{
if (flock->fl_flags & FL_POSIX)
cFYI(1, "Posix");
@@ -1073,38 +1090,50 @@ cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
(~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
- *type = LOCKING_ANDX_LARGE_FILES;
+ *type = server->vals->large_lock_type;
if (flock->fl_type == F_WRLCK) {
cFYI(1, "F_WRLCK ");
+ *type |= server->vals->exclusive_lock_type;
*lock = 1;
} else if (flock->fl_type == F_UNLCK) {
cFYI(1, "F_UNLCK");
+ *type |= server->vals->unlock_lock_type;
*unlock = 1;
/* Check if unlock includes more than one lock range */
} else if (flock->fl_type == F_RDLCK) {
cFYI(1, "F_RDLCK");
- *type |= LOCKING_ANDX_SHARED_LOCK;
+ *type |= server->vals->shared_lock_type;
*lock = 1;
} else if (flock->fl_type == F_EXLCK) {
cFYI(1, "F_EXLCK");
+ *type |= server->vals->exclusive_lock_type;
*lock = 1;
} else if (flock->fl_type == F_SHLCK) {
cFYI(1, "F_SHLCK");
- *type |= LOCKING_ANDX_SHARED_LOCK;
+ *type |= server->vals->shared_lock_type;
*lock = 1;
} else
cFYI(1, "Unknown type of lock");
}
static int
-cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
+cifs_mandatory_lock(int xid, struct cifsFileInfo *cfile, __u64 offset,
+ __u64 length, __u32 type, int lock, int unlock, bool wait)
+{
+ return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->netfid,
+ current->tgid, length, offset, unlock, lock,
+ (__u8)type, wait, 0);
+}
+
+static int
+cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
bool wait_flag, bool posix_lck, int xid)
{
int rc = 0;
__u64 length = 1 + flock->fl_end - flock->fl_start;
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
+ struct TCP_Server_Info *server = tcon->ses->server;
__u16 netfid = cfile->netfid;
if (posix_lck) {
@@ -1114,7 +1143,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
if (!rc)
return rc;
- if (type & LOCKING_ANDX_SHARED_LOCK)
+ if (type & server->vals->shared_lock_type)
posix_lock_type = CIFS_RDLCK;
else
posix_lock_type = CIFS_WRLCK;
@@ -1124,38 +1153,35 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
return rc;
}
- rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
- flock);
+ rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
if (!rc)
return rc;
/* BB we could chain these into one lock request BB */
- rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
- flock->fl_start, 0, 1, type, 0, 0);
+ rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length, type,
+ 1, 0, false);
if (rc == 0) {
- rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
- length, flock->fl_start, 1, 0,
- type, 0, 0);
+ rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+ type, 0, 1, false);
flock->fl_type = F_UNLCK;
if (rc != 0)
cERROR(1, "Error unlocking previously locked "
- "range %d during test of lock", rc);
+ "range %d during test of lock", rc);
return 0;
}
- if (type & LOCKING_ANDX_SHARED_LOCK) {
+ if (type & server->vals->shared_lock_type) {
flock->fl_type = F_WRLCK;
return 0;
}
- rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
- flock->fl_start, 0, 1,
- type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
+ rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+ type | server->vals->shared_lock_type, 1, 0,
+ false);
if (rc == 0) {
- rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
- length, flock->fl_start, 1, 0,
- type | LOCKING_ANDX_SHARED_LOCK,
- 0, 0);
+ rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+ type | server->vals->shared_lock_type,
+ 0, 1, false);
flock->fl_type = F_RDLCK;
if (rc != 0)
cERROR(1, "Error unlocking previously locked "
@@ -1212,15 +1238,13 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
for (i = 0; i < 2; i++) {
cur = buf;
num = 0;
- list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
+ list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
if (flock->fl_start > li->offset ||
(flock->fl_start + length) <
(li->offset + li->length))
continue;
if (current->tgid != li->pid)
continue;
- if (cfile->netfid != li->netfid)
- continue;
if (types[i] != li->type)
continue;
if (!cinode->can_cache_brlcks) {
@@ -1233,7 +1257,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
cpu_to_le32((u32)(li->offset>>32));
/*
* We need to save a lock here to let us add
- * it again to the inode list if the unlock
+ * it again to the file's list if the unlock
* range request fails on the server.
*/
list_move(&li->llist, &tmp_llist);
@@ -1247,10 +1271,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
* We failed on the unlock range
* request - add all locks from
* the tmp list to the head of
- * the inode list.
+ * the file's list.
*/
cifs_move_llist(&tmp_llist,
- &cinode->llist);
+ &cfile->llist);
rc = stored_rc;
} else
/*
@@ -1265,7 +1289,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
} else {
/*
* We can cache brlock requests - simply remove
- * a lock from the inode list.
+ * a lock from the file's list.
*/
list_del(&li->llist);
cifs_del_lock_waiters(li);
@@ -1276,7 +1300,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
types[i], num, 0, buf);
if (stored_rc) {
- cifs_move_llist(&tmp_llist, &cinode->llist);
+ cifs_move_llist(&tmp_llist, &cfile->llist);
rc = stored_rc;
} else
cifs_free_llist(&tmp_llist);
@@ -1289,14 +1313,14 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
}
static int
-cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
+cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
{
int rc = 0;
__u64 length = 1 + flock->fl_end - flock->fl_start;
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
+ struct TCP_Server_Info *server = tcon->ses->server;
__u16 netfid = cfile->netfid;
if (posix_lck) {
@@ -1306,7 +1330,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
if (!rc || rc < 0)
return rc;
- if (type & LOCKING_ANDX_SHARED_LOCK)
+ if (type & server->vals->shared_lock_type)
posix_lock_type = CIFS_RDLCK;
else
posix_lock_type = CIFS_WRLCK;
@@ -1323,24 +1347,24 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
if (lock) {
struct cifsLockInfo *lock;
- lock = cifs_lock_init(flock->fl_start, length, type, netfid);
+ lock = cifs_lock_init(flock->fl_start, length, type);
if (!lock)
return -ENOMEM;
- rc = cifs_lock_add_if(cinode, lock, wait_flag);
+ rc = cifs_lock_add_if(cfile, lock, wait_flag);
if (rc < 0)
kfree(lock);
if (rc <= 0)
goto out;
- rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
- flock->fl_start, 0, 1, type, wait_flag, 0);
+ rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
+ type, 1, 0, wait_flag);
if (rc) {
kfree(lock);
goto out;
}
- cifs_lock_add(cinode, lock);
+ cifs_lock_add(cfile, lock);
} else if (unlock)
rc = cifs_unlock_range(cfile, flock, xid);
@@ -1361,7 +1385,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
struct cifsInodeInfo *cinode;
struct cifsFileInfo *cfile;
__u16 netfid;
- __u8 type;
+ __u32 type;
rc = -EACCES;
xid = GetXid();
@@ -1370,11 +1394,13 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
"end: %lld", cmd, flock->fl_flags, flock->fl_type,
flock->fl_start, flock->fl_end);
- cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
-
- cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
cfile = (struct cifsFileInfo *)file->private_data;
tcon = tlink_tcon(cfile->tlink);
+
+ cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
+ tcon->ses->server);
+
+ cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
netfid = cfile->netfid;
cinode = CIFS_I(file->f_path.dentry->d_inode);
@@ -1539,10 +1565,11 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
bool fsuid_only)
{
- struct cifsFileInfo *open_file;
+ struct cifsFileInfo *open_file, *inv_file = NULL;
struct cifs_sb_info *cifs_sb;
bool any_available = false;
int rc;
+ unsigned int refind = 0;
/* Having a null inode here (because mapping->host was set to zero by
the VFS or MM) should not happen but we had reports of on oops (due to
@@ -1562,40 +1589,25 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
spin_lock(&cifs_file_list_lock);
refind_writable:
+ if (refind > MAX_REOPEN_ATT) {
+ spin_unlock(&cifs_file_list_lock);
+ return NULL;
+ }
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
if (!any_available && open_file->pid != current->tgid)
continue;
if (fsuid_only && open_file->uid != current_fsuid())
continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
- cifsFileInfo_get(open_file);
-
if (!open_file->invalidHandle) {
/* found a good writable file */
+ cifsFileInfo_get(open_file);
spin_unlock(&cifs_file_list_lock);
return open_file;
+ } else {
+ if (!inv_file)
+ inv_file = open_file;
}
-
- spin_unlock(&cifs_file_list_lock);
-
- /* Had to unlock since following call can block */
- rc = cifs_reopen_file(open_file, false);
- if (!rc)
- return open_file;
-
- /* if it fails, try another handle if possible */
- cFYI(1, "wp failed on reopen file");
- cifsFileInfo_put(open_file);
-
- spin_lock(&cifs_file_list_lock);
-
- /* else we simply continue to the next entry. Thus
- we do not loop on reopen errors. If we
- can not reopen the file, for example if we
- reconnected to a server with another client
- racing to delete or lock the file we would not
- make progress if we restarted before the beginning
- of the loop here. */
}
}
/* couldn't find useable FH with same pid, try any available */
@@ -1603,7 +1615,30 @@ refind_writable:
any_available = true;
goto refind_writable;
}
+
+ if (inv_file) {
+ any_available = false;
+ cifsFileInfo_get(inv_file);
+ }
+
spin_unlock(&cifs_file_list_lock);
+
+ if (inv_file) {
+ rc = cifs_reopen_file(inv_file, false);
+ if (!rc)
+ return inv_file;
+ else {
+ spin_lock(&cifs_file_list_lock);
+ list_move_tail(&inv_file->flist,
+ &cifs_inode->openFileList);
+ spin_unlock(&cifs_file_list_lock);
+ cifsFileInfo_put(inv_file);
+ spin_lock(&cifs_file_list_lock);
+ ++refind;
+ goto refind_writable;
+ }
+ }
+
return NULL;
}
@@ -2339,24 +2374,224 @@ ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
return cifs_user_writev(iocb, iov, nr_segs, pos);
}
+static struct cifs_readdata *
+cifs_readdata_alloc(unsigned int nr_vecs, work_func_t complete)
+{
+ struct cifs_readdata *rdata;
+
+ rdata = kzalloc(sizeof(*rdata) +
+ sizeof(struct kvec) * nr_vecs, GFP_KERNEL);
+ if (rdata != NULL) {
+ kref_init(&rdata->refcount);
+ INIT_LIST_HEAD(&rdata->list);
+ init_completion(&rdata->done);
+ INIT_WORK(&rdata->work, complete);
+ INIT_LIST_HEAD(&rdata->pages);
+ }
+ return rdata;
+}
+
+void
+cifs_readdata_release(struct kref *refcount)
+{
+ struct cifs_readdata *rdata = container_of(refcount,
+ struct cifs_readdata, refcount);
+
+ if (rdata->cfile)
+ cifsFileInfo_put(rdata->cfile);
+
+ kfree(rdata);
+}
+
+static int
+cifs_read_allocate_pages(struct list_head *list, unsigned int npages)
+{
+ int rc = 0;
+ struct page *page, *tpage;
+ unsigned int i;
+
+ for (i = 0; i < npages; i++) {
+ page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
+ if (!page) {
+ rc = -ENOMEM;
+ break;
+ }
+ list_add(&page->lru, list);
+ }
+
+ if (rc) {
+ list_for_each_entry_safe(page, tpage, list, lru) {
+ list_del(&page->lru);
+ put_page(page);
+ }
+ }
+ return rc;
+}
+
+static void
+cifs_uncached_readdata_release(struct kref *refcount)
+{
+ struct page *page, *tpage;
+ struct cifs_readdata *rdata = container_of(refcount,
+ struct cifs_readdata, refcount);
+
+ list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+ list_del(&page->lru);
+ put_page(page);
+ }
+ cifs_readdata_release(refcount);
+}
+
+static int
+cifs_retry_async_readv(struct cifs_readdata *rdata)
+{
+ int rc;
+
+ do {
+ if (rdata->cfile->invalidHandle) {
+ rc = cifs_reopen_file(rdata->cfile, true);
+ if (rc != 0)
+ continue;
+ }
+ rc = cifs_async_readv(rdata);
+ } while (rc == -EAGAIN);
+
+ return rc;
+}
+
+/**
+ * cifs_readdata_to_iov - copy data from pages in response to an iovec
+ * @rdata: the readdata response with list of pages holding data
+ * @iov: vector in which we should copy the data
+ * @nr_segs: number of segments in vector
+ * @offset: offset into file of the first iovec
+ * @copied: used to return the amount of data copied to the iov
+ *
+ * This function copies data from a list of pages in a readdata response into
+ * an array of iovecs. It will first calculate where the data should go
+ * based on the info in the readdata and then copy the data into that spot.
+ */
+static ssize_t
+cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
+ unsigned long nr_segs, loff_t offset, ssize_t *copied)
+{
+ int rc = 0;
+ struct iov_iter ii;
+ size_t pos = rdata->offset - offset;
+ struct page *page, *tpage;
+ ssize_t remaining = rdata->bytes;
+ unsigned char *pdata;
+
+ /* set up iov_iter and advance to the correct offset */
+ iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
+ iov_iter_advance(&ii, pos);
+
+ *copied = 0;
+ list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+ ssize_t copy;
+
+ /* copy a whole page or whatever's left */
+ copy = min_t(ssize_t, remaining, PAGE_SIZE);
+
+ /* ...but limit it to whatever space is left in the iov */
+ copy = min_t(ssize_t, copy, iov_iter_count(&ii));
+
+ /* go while there's data to be copied and no errors */
+ if (copy && !rc) {
+ pdata = kmap(page);
+ rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
+ (int)copy);
+ kunmap(page);
+ if (!rc) {
+ *copied += copy;
+ remaining -= copy;
+ iov_iter_advance(&ii, copy);
+ }
+ }
+
+ list_del(&page->lru);
+ put_page(page);
+ }
+
+ return rc;
+}
+
+static void
+cifs_uncached_readv_complete(struct work_struct *work)
+{
+ struct cifs_readdata *rdata = container_of(work,
+ struct cifs_readdata, work);
+
+ /* if the result is non-zero then the pages weren't kmapped */
+ if (rdata->result == 0) {
+ struct page *page;
+
+ list_for_each_entry(page, &rdata->pages, lru)
+ kunmap(page);
+ }
+
+ complete(&rdata->done);
+ kref_put(&rdata->refcount, cifs_uncached_readdata_release);
+}
+
+static int
+cifs_uncached_read_marshal_iov(struct cifs_readdata *rdata,
+ unsigned int remaining)
+{
+ int len = 0;
+ struct page *page, *tpage;
+
+ rdata->nr_iov = 1;
+ list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+ if (remaining >= PAGE_SIZE) {
+ /* enough data to fill the page */
+ rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+ rdata->iov[rdata->nr_iov].iov_len = PAGE_SIZE;
+ cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+ rdata->nr_iov, page->index,
+ rdata->iov[rdata->nr_iov].iov_base,
+ rdata->iov[rdata->nr_iov].iov_len);
+ ++rdata->nr_iov;
+ len += PAGE_SIZE;
+ remaining -= PAGE_SIZE;
+ } else if (remaining > 0) {
+ /* enough for partial page, fill and zero the rest */
+ rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+ rdata->iov[rdata->nr_iov].iov_len = remaining;
+ cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+ rdata->nr_iov, page->index,
+ rdata->iov[rdata->nr_iov].iov_base,
+ rdata->iov[rdata->nr_iov].iov_len);
+ memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
+ '\0', PAGE_SIZE - remaining);
+ ++rdata->nr_iov;
+ len += remaining;
+ remaining = 0;
+ } else {
+ /* no need to hold page hostage */
+ list_del(&page->lru);
+ put_page(page);
+ }
+ }
+
+ return len;
+}
+
static ssize_t
cifs_iovec_read(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *poffset)
{
- int rc;
- int xid;
- ssize_t total_read;
- unsigned int bytes_read = 0;
+ ssize_t rc;
size_t len, cur_len;
- int iov_offset = 0;
+ ssize_t total_read = 0;
+ loff_t offset = *poffset;
+ unsigned int npages;
struct cifs_sb_info *cifs_sb;
- struct cifs_tcon *pTcon;
+ struct cifs_tcon *tcon;
struct cifsFileInfo *open_file;
- struct smb_com_read_rsp *pSMBr;
- struct cifs_io_parms io_parms;
- char *read_data;
- unsigned int rsize;
- __u32 pid;
+ struct cifs_readdata *rdata, *tmp;
+ struct list_head rdata_list;
+ pid_t pid;
if (!nr_segs)
return 0;
@@ -2365,14 +2600,10 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
if (!len)
return 0;
- xid = GetXid();
+ INIT_LIST_HEAD(&rdata_list);
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
-
- /* FIXME: set up handlers for larger reads and/or convert to async */
- rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
-
open_file = file->private_data;
- pTcon = tlink_tcon(open_file->tlink);
+ tcon = tlink_tcon(open_file->tlink);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
@@ -2382,56 +2613,78 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
cFYI(1, "attempting read on write only file instance");
- for (total_read = 0; total_read < len; total_read += bytes_read) {
- cur_len = min_t(const size_t, len - total_read, rsize);
- rc = -EAGAIN;
- read_data = NULL;
+ do {
+ cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
+ npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
- while (rc == -EAGAIN) {
- int buf_type = CIFS_NO_BUFFER;
- if (open_file->invalidHandle) {
- rc = cifs_reopen_file(open_file, true);
- if (rc != 0)
- break;
- }
- io_parms.netfid = open_file->netfid;
- io_parms.pid = pid;
- io_parms.tcon = pTcon;
- io_parms.offset = *poffset;
- io_parms.length = cur_len;
- rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
- &read_data, &buf_type);
- pSMBr = (struct smb_com_read_rsp *)read_data;
- if (read_data) {
- char *data_offset = read_data + 4 +
- le16_to_cpu(pSMBr->DataOffset);
- if (memcpy_toiovecend(iov, data_offset,
- iov_offset, bytes_read))
- rc = -EFAULT;
- if (buf_type == CIFS_SMALL_BUFFER)
- cifs_small_buf_release(read_data);
- else if (buf_type == CIFS_LARGE_BUFFER)
- cifs_buf_release(read_data);
- read_data = NULL;
- iov_offset += bytes_read;
- }
+ /* allocate a readdata struct */
+ rdata = cifs_readdata_alloc(npages,
+ cifs_uncached_readv_complete);
+ if (!rdata) {
+ rc = -ENOMEM;
+ goto error;
}
- if (rc || (bytes_read == 0)) {
- if (total_read) {
- break;
- } else {
- FreeXid(xid);
- return rc;
+ rc = cifs_read_allocate_pages(&rdata->pages, npages);
+ if (rc)
+ goto error;
+
+ rdata->cfile = cifsFileInfo_get(open_file);
+ rdata->offset = offset;
+ rdata->bytes = cur_len;
+ rdata->pid = pid;
+ rdata->marshal_iov = cifs_uncached_read_marshal_iov;
+
+ rc = cifs_retry_async_readv(rdata);
+error:
+ if (rc) {
+ kref_put(&rdata->refcount,
+ cifs_uncached_readdata_release);
+ break;
+ }
+
+ list_add_tail(&rdata->list, &rdata_list);
+ offset += cur_len;
+ len -= cur_len;
+ } while (len > 0);
+
+ /* if at least one read request send succeeded, then reset rc */
+ if (!list_empty(&rdata_list))
+ rc = 0;
+
+ /* the loop below should proceed in the order of increasing offsets */
+restart_loop:
+ list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
+ if (!rc) {
+ ssize_t copied;
+
+ /* FIXME: freezable sleep too? */
+ rc = wait_for_completion_killable(&rdata->done);
+ if (rc)
+ rc = -EINTR;
+ else if (rdata->result)
+ rc = rdata->result;
+ else {
+ rc = cifs_readdata_to_iov(rdata, iov,
+ nr_segs, *poffset,
+ &copied);
+ total_read += copied;
+ }
+
+ /* resend call if it's a retryable error */
+ if (rc == -EAGAIN) {
+ rc = cifs_retry_async_readv(rdata);
+ goto restart_loop;
}
- } else {
- cifs_stats_bytes_read(pTcon, bytes_read);
- *poffset += bytes_read;
}
+ list_del_init(&rdata->list);
+ kref_put(&rdata->refcount, cifs_uncached_readdata_release);
}
- FreeXid(xid);
- return total_read;
+ cifs_stats_bytes_read(tcon, total_read);
+ *poffset += total_read;
+
+ return total_read ? total_read : rc;
}
ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
@@ -2606,6 +2859,100 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
return rc;
}
+static void
+cifs_readv_complete(struct work_struct *work)
+{
+ struct cifs_readdata *rdata = container_of(work,
+ struct cifs_readdata, work);
+ struct page *page, *tpage;
+
+ list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+ list_del(&page->lru);
+ lru_cache_add_file(page);
+
+ if (rdata->result == 0) {
+ kunmap(page);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ }
+
+ unlock_page(page);
+
+ if (rdata->result == 0)
+ cifs_readpage_to_fscache(rdata->mapping->host, page);
+
+ page_cache_release(page);
+ }
+ kref_put(&rdata->refcount, cifs_readdata_release);
+}
+
+static int
+cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
+{
+ int len = 0;
+ struct page *page, *tpage;
+ u64 eof;
+ pgoff_t eof_index;
+
+ /* determine the eof that the server (probably) has */
+ eof = CIFS_I(rdata->mapping->host)->server_eof;
+ eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
+ cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
+
+ rdata->nr_iov = 1;
+ list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
+ if (remaining >= PAGE_CACHE_SIZE) {
+ /* enough data to fill the page */
+ rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+ rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
+ cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+ rdata->nr_iov, page->index,
+ rdata->iov[rdata->nr_iov].iov_base,
+ rdata->iov[rdata->nr_iov].iov_len);
+ ++rdata->nr_iov;
+ len += PAGE_CACHE_SIZE;
+ remaining -= PAGE_CACHE_SIZE;
+ } else if (remaining > 0) {
+ /* enough for partial page, fill and zero the rest */
+ rdata->iov[rdata->nr_iov].iov_base = kmap(page);
+ rdata->iov[rdata->nr_iov].iov_len = remaining;
+ cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
+ rdata->nr_iov, page->index,
+ rdata->iov[rdata->nr_iov].iov_base,
+ rdata->iov[rdata->nr_iov].iov_len);
+ memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
+ '\0', PAGE_CACHE_SIZE - remaining);
+ ++rdata->nr_iov;
+ len += remaining;
+ remaining = 0;
+ } else if (page->index > eof_index) {
+ /*
+ * The VFS will not try to do readahead past the
+ * i_size, but it's possible that we have outstanding
+ * writes with gaps in the middle and the i_size hasn't
+ * caught up yet. Populate those with zeroed out pages
+ * to prevent the VFS from repeatedly attempting to
+ * fill them until the writes are flushed.
+ */
+ zero_user(page, 0, PAGE_CACHE_SIZE);
+ list_del(&page->lru);
+ lru_cache_add_file(page);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ page_cache_release(page);
+ } else {
+ /* no need to hold page hostage */
+ list_del(&page->lru);
+ lru_cache_add_file(page);
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ }
+
+ return len;
+}
+
static int cifs_readpages(struct file *file, struct address_space *mapping,
struct list_head *page_list, unsigned num_pages)
{
@@ -2708,7 +3055,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
nr_pages++;
}
- rdata = cifs_readdata_alloc(nr_pages);
+ rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
if (!rdata) {
/* best to give up if we're out of mem */
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
@@ -2722,24 +3069,16 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
}
spin_lock(&cifs_file_list_lock);
- cifsFileInfo_get(open_file);
spin_unlock(&cifs_file_list_lock);
- rdata->cfile = open_file;
+ rdata->cfile = cifsFileInfo_get(open_file);
rdata->mapping = mapping;
rdata->offset = offset;
rdata->bytes = bytes;
rdata->pid = pid;
+ rdata->marshal_iov = cifs_readpages_marshal_iov;
list_splice_init(&tmplist, &rdata->pages);
- do {
- if (open_file->invalidHandle) {
- rc = cifs_reopen_file(open_file, true);
- if (rc != 0)
- continue;
- }
- rc = cifs_async_readv(rdata);
- } while (rc == -EAGAIN);
-
+ rc = cifs_retry_async_readv(rdata);
if (rc != 0) {
list_for_each_entry_safe(page, tpage, &rdata->pages,
lru) {
@@ -2748,9 +3087,11 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
unlock_page(page);
page_cache_release(page);
}
- cifs_readdata_free(rdata);
+ kref_put(&rdata->refcount, cifs_readdata_release);
break;
}
+
+ kref_put(&rdata->refcount, cifs_readdata_release);
}
return rc;
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 4221b5e48a4..6d2667f0c98 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -51,7 +51,15 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
cifs_sb = CIFS_SB(inode->i_sb);
switch (command) {
+ static bool warned = false;
case CIFS_IOC_CHECKUMOUNT:
+ if (!warned) {
+ warned = true;
+ cERROR(1, "the CIFS_IOC_CHECKMOUNT ioctl will "
+ "be deprecated in 3.7. Please "
+ "migrate away from the use of "
+ "umount.cifs");
+ }
cFYI(1, "User unmount attempted");
if (cifs_sb->mnt_uid == current_uid())
rc = 0;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index c29d1aa2c54..e2552d2b2e4 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -306,8 +306,6 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
const struct cifs_tcon *treeCon, int word_count
/* length of fixed section (word count) in two byte units */)
{
- struct list_head *temp_item;
- struct cifs_ses *ses;
char *temp = (char *) buffer;
memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
@@ -337,51 +335,6 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
/* Uid is not converted */
buffer->Uid = treeCon->ses->Suid;
buffer->Mid = GetNextMid(treeCon->ses->server);
- if (multiuser_mount != 0) {
- /* For the multiuser case, there are few obvious technically */
- /* possible mechanisms to match the local linux user (uid) */
- /* to a valid remote smb user (smb_uid): */
- /* 1) Query Winbind (or other local pam/nss daemon */
- /* for userid/password/logon_domain or credential */
- /* 2) Query Winbind for uid to sid to username mapping */
- /* and see if we have a matching password for existing*/
- /* session for that user perhas getting password by */
- /* adding a new pam_cifs module that stores passwords */
- /* so that the cifs vfs can get at that for all logged*/
- /* on users */
- /* 3) (Which is the mechanism we have chosen) */
- /* Search through sessions to the same server for a */
- /* a match on the uid that was passed in on mount */
- /* with the current processes uid (or euid?) and use */
- /* that smb uid. If no existing smb session for */
- /* that uid found, use the default smb session ie */
- /* the smb session for the volume mounted which is */
- /* the same as would be used if the multiuser mount */
- /* flag were disabled. */
-
- /* BB Add support for establishing new tCon and SMB Session */
- /* with userid/password pairs found on the smb session */
- /* for other target tcp/ip addresses BB */
- if (current_fsuid() != treeCon->ses->linux_uid) {
- cFYI(1, "Multiuser mode and UID "
- "did not match tcon uid");
- spin_lock(&cifs_tcp_ses_lock);
- list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
- ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
- if (ses->linux_uid == current_fsuid()) {
- if (ses->server == treeCon->ses->server) {
- cFYI(1, "found matching uid substitute right smb_uid");
- buffer->Uid = ses->Suid;
- break;
- } else {
- /* BB eventually call cifs_setup_session here */
- cFYI(1, "local UID found but no smb sess with this server exists");
- }
- }
- }
- spin_unlock(&cifs_tcp_ses_lock);
- }
- }
}
if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
buffer->Flags2 |= SMBFLG2_DFS;
@@ -700,22 +653,3 @@ backup_cred(struct cifs_sb_info *cifs_sb)
return false;
}
-
-void
-cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add)
-{
- spin_lock(&server->req_lock);
- server->credits += add;
- server->in_flight--;
- spin_unlock(&server->req_lock);
- wake_up(&server->request_q);
-}
-
-void
-cifs_set_credits(struct TCP_Server_Info *server, const int val)
-{
- spin_lock(&server->req_lock);
- server->credits = val;
- server->oplocks = val > 1 ? enable_oplocks : false;
- spin_unlock(&server->req_lock);
-}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index e2bbc683e01..0a8224d1c4c 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -219,6 +219,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
static int initiate_cifs_search(const int xid, struct file *file)
{
+ __u16 search_flags;
int rc = 0;
char *full_path = NULL;
struct cifsFileInfo *cifsFile;
@@ -270,8 +271,12 @@ ffirst_retry:
cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
}
+ search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
+ if (backup_cred(cifs_sb))
+ search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
+
rc = CIFSFindFirst(xid, pTcon, full_path, cifs_sb->local_nls,
- &cifsFile->netfid, &cifsFile->srch_inf,
+ &cifsFile->netfid, search_flags, &cifsFile->srch_inf,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR, CIFS_DIR_SEP(cifs_sb));
if (rc == 0)
@@ -502,11 +507,13 @@ static int cifs_save_resume_key(const char *current_entry,
static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
struct file *file, char **ppCurrentEntry, int *num_to_ret)
{
+ __u16 search_flags;
int rc = 0;
int pos_in_buf = 0;
loff_t first_entry_in_buffer;
loff_t index_to_find = file->f_pos;
struct cifsFileInfo *cifsFile = file->private_data;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
/* check if index in the buffer */
if ((cifsFile == NULL) || (ppCurrentEntry == NULL) ||
@@ -560,10 +567,14 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
cifsFile);
}
+ search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
+ if (backup_cred(cifs_sb))
+ search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
+
while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
(rc == 0) && !cifsFile->srch_inf.endOfSearch) {
cFYI(1, "calling findnext2");
- rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
+ rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, search_flags,
&cifsFile->srch_inf);
/* FindFirst/Next set last_entry to NULL on malformed reply */
if (cifsFile->srch_inf.last_entry)
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
new file mode 100644
index 00000000000..d9d615fbed3
--- /dev/null
+++ b/fs/cifs/smb1ops.c
@@ -0,0 +1,154 @@
+/*
+ * SMB1 (CIFS) version specific operations
+ *
+ * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2 as published
+ * by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+#include "cifspdu.h"
+
+/*
+ * An NT cancel request header looks just like the original request except:
+ *
+ * The Command is SMB_COM_NT_CANCEL
+ * The WordCount is zeroed out
+ * The ByteCount is zeroed out
+ *
+ * This function mangles an existing request buffer into a
+ * SMB_COM_NT_CANCEL request and then sends it.
+ */
+static int
+send_nt_cancel(struct TCP_Server_Info *server, void *buf,
+ struct mid_q_entry *mid)
+{
+ int rc = 0;
+ struct smb_hdr *in_buf = (struct smb_hdr *)buf;
+
+ /* -4 for RFC1001 length and +2 for BCC field */
+ in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
+ in_buf->Command = SMB_COM_NT_CANCEL;
+ in_buf->WordCount = 0;
+ put_bcc(0, in_buf);
+
+ mutex_lock(&server->srv_mutex);
+ rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+ if (rc) {
+ mutex_unlock(&server->srv_mutex);
+ return rc;
+ }
+ rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
+ mutex_unlock(&server->srv_mutex);
+
+ cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
+ in_buf->Mid, rc);
+
+ return rc;
+}
+
+static bool
+cifs_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
+{
+ return ob1->netfid == ob2->netfid;
+}
+
+static unsigned int
+cifs_read_data_offset(char *buf)
+{
+ READ_RSP *rsp = (READ_RSP *)buf;
+ return le16_to_cpu(rsp->DataOffset);
+}
+
+static unsigned int
+cifs_read_data_length(char *buf)
+{
+ READ_RSP *rsp = (READ_RSP *)buf;
+ return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
+ le16_to_cpu(rsp->DataLength);
+}
+
+static struct mid_q_entry *
+cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
+{
+ struct smb_hdr *buf = (struct smb_hdr *)buffer;
+ struct mid_q_entry *mid;
+
+ spin_lock(&GlobalMid_Lock);
+ list_for_each_entry(mid, &server->pending_mid_q, qhead) {
+ if (mid->mid == buf->Mid &&
+ mid->mid_state == MID_REQUEST_SUBMITTED &&
+ le16_to_cpu(mid->command) == buf->Command) {
+ spin_unlock(&GlobalMid_Lock);
+ return mid;
+ }
+ }
+ spin_unlock(&GlobalMid_Lock);
+ return NULL;
+}
+
+static void
+cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add)
+{
+ spin_lock(&server->req_lock);
+ server->credits += add;
+ server->in_flight--;
+ spin_unlock(&server->req_lock);
+ wake_up(&server->request_q);
+}
+
+static void
+cifs_set_credits(struct TCP_Server_Info *server, const int val)
+{
+ spin_lock(&server->req_lock);
+ server->credits = val;
+ server->oplocks = val > 1 ? enable_oplocks : false;
+ spin_unlock(&server->req_lock);
+}
+
+static int *
+cifs_get_credits_field(struct TCP_Server_Info *server)
+{
+ return &server->credits;
+}
+
+struct smb_version_operations smb1_operations = {
+ .send_cancel = send_nt_cancel,
+ .compare_fids = cifs_compare_fids,
+ .setup_request = cifs_setup_request,
+ .check_receive = cifs_check_receive,
+ .add_credits = cifs_add_credits,
+ .set_credits = cifs_set_credits,
+ .get_credits_field = cifs_get_credits_field,
+ .read_data_offset = cifs_read_data_offset,
+ .read_data_length = cifs_read_data_length,
+ .map_error = map_smb_to_linux_error,
+ .find_mid = cifs_find_mid,
+ .check_message = checkSMB,
+ .dump_detail = cifs_dump_detail,
+ .is_oplock_break = is_valid_oplock_break,
+};
+
+struct smb_version_values smb1_values = {
+ .version_string = SMB1_VERSION_STRING,
+ .large_lock_type = LOCKING_ANDX_LARGE_FILES,
+ .exclusive_lock_type = 0,
+ .shared_lock_type = LOCKING_ANDX_SHARED_LOCK,
+ .unlock_lock_type = 0,
+ .header_size = sizeof(struct smb_hdr),
+ .max_header_size = MAX_CIFS_HDR_SIZE,
+ .read_rsp_size = sizeof(READ_RSP),
+};
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
new file mode 100644
index 00000000000..f065e89756a
--- /dev/null
+++ b/fs/cifs/smb2ops.c
@@ -0,0 +1,27 @@
+/*
+ * SMB2 version specific operations
+ *
+ * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2 as published
+ * by the Free Software Foundation.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "cifsglob.h"
+
+struct smb_version_operations smb21_operations = {
+};
+
+struct smb_version_values smb21_values = {
+ .version_string = SMB21_VERSION_STRING,
+};
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 0961336513d..1b36ffe6a47 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -304,7 +304,8 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int optype,
static int
wait_for_free_request(struct TCP_Server_Info *server, const int optype)
{
- return wait_for_free_credits(server, optype, get_credits_field(server));
+ return wait_for_free_credits(server, optype,
+ server->ops->get_credits_field(server));
}
static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
@@ -396,7 +397,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
rc = cifs_setup_async_request(server, iov, nvec, &mid);
if (rc) {
mutex_unlock(&server->srv_mutex);
- cifs_add_credits(server, 1);
+ add_credits(server, 1);
wake_up(&server->request_q);
return rc;
}
@@ -418,7 +419,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
return rc;
out_err:
delete_mid(mid);
- cifs_add_credits(server, 1);
+ add_credits(server, 1);
wake_up(&server->request_q);
return rc;
}
@@ -483,41 +484,11 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
return rc;
}
-/*
- * An NT cancel request header looks just like the original request except:
- *
- * The Command is SMB_COM_NT_CANCEL
- * The WordCount is zeroed out
- * The ByteCount is zeroed out
- *
- * This function mangles an existing request buffer into a
- * SMB_COM_NT_CANCEL request and then sends it.
- */
-static int
-send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
- struct mid_q_entry *mid)
+static inline int
+send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
{
- int rc = 0;
-
- /* -4 for RFC1001 length and +2 for BCC field */
- in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
- in_buf->Command = SMB_COM_NT_CANCEL;
- in_buf->WordCount = 0;
- put_bcc(0, in_buf);
-
- mutex_lock(&server->srv_mutex);
- rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
- if (rc) {
- mutex_unlock(&server->srv_mutex);
- return rc;
- }
- rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
- mutex_unlock(&server->srv_mutex);
-
- cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
- in_buf->Mid, rc);
-
- return rc;
+ return server->ops->send_cancel ?
+ server->ops->send_cancel(server, buf, mid) : 0;
}
int
@@ -544,7 +515,7 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
return map_smb_to_linux_error(mid->resp_buf, log_error);
}
-static int
+int
cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
unsigned int nvec, struct mid_q_entry **ret_mid)
{
@@ -607,12 +578,12 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
mutex_lock(&ses->server->srv_mutex);
- rc = cifs_setup_request(ses, iov, n_vec, &midQ);
+ rc = ses->server->ops->setup_request(ses, iov, n_vec, &midQ);
if (rc) {
mutex_unlock(&ses->server->srv_mutex);
cifs_small_buf_release(buf);
/* Update # of requests on wire to server */
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
@@ -636,13 +607,13 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
rc = wait_for_response(ses->server, midQ);
if (rc != 0) {
- send_nt_cancel(ses->server, (struct smb_hdr *)buf, midQ);
+ send_cancel(ses->server, buf, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
cifs_small_buf_release(buf);
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
spin_unlock(&GlobalMid_Lock);
@@ -652,7 +623,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
@@ -670,14 +641,15 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
else
*pRespBufType = CIFS_SMALL_BUFFER;
- rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
+ rc = ses->server->ops->check_receive(midQ, ses->server,
+ flags & CIFS_LOG_ERROR);
/* mark it so buf will not be freed by delete_mid */
if ((flags & CIFS_NO_RESP) == 0)
midQ->resp_buf = NULL;
out:
delete_mid(midQ);
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
@@ -727,7 +699,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
if (rc) {
mutex_unlock(&ses->server->srv_mutex);
/* Update # of requests on wire to server */
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
@@ -753,13 +725,13 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = wait_for_response(ses->server, midQ);
if (rc != 0) {
- send_nt_cancel(ses->server, in_buf, midQ);
+ send_cancel(ses->server, in_buf, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
spin_unlock(&GlobalMid_Lock);
@@ -767,7 +739,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_sync_mid_result(midQ, ses->server);
if (rc != 0) {
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
@@ -783,7 +755,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_check_receive(midQ, ses->server, 0);
out:
delete_mid(midQ);
- cifs_add_credits(ses->server, 1);
+ add_credits(ses->server, 1);
return rc;
}
@@ -898,7 +870,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
if (in_buf->Command == SMB_COM_TRANSACTION2) {
/* POSIX lock. We send a NT_CANCEL SMB to cause the
blocking lock to return. */
- rc = send_nt_cancel(ses->server, in_buf, midQ);
+ rc = send_cancel(ses->server, in_buf, midQ);
if (rc) {
delete_mid(midQ);
return rc;
@@ -919,7 +891,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
rc = wait_for_response(ses->server, midQ);
if (rc) {
- send_nt_cancel(ses->server, in_buf, midQ);
+ send_cancel(ses->server, in_buf, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
/* no longer considered to be "in-flight" */
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 2870597b5c9..f1813120d75 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -244,7 +244,7 @@ static void coda_put_super(struct super_block *sb)
static void coda_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
coda_cache_clear_inode(inode);
}
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 5dfafdd1dbd..2340f6978d6 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -20,6 +20,7 @@
#include <linux/namei.h>
#include <linux/debugfs.h>
#include <linux/io.h>
+#include <linux/slab.h>
static ssize_t default_read_file(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -520,6 +521,133 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
}
EXPORT_SYMBOL_GPL(debugfs_create_blob);
+struct array_data {
+ void *array;
+ u32 elements;
+};
+
+static int u32_array_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return nonseekable_open(inode, file);
+}
+
+static size_t format_array(char *buf, size_t bufsize, const char *fmt,
+ u32 *array, u32 array_size)
+{
+ size_t ret = 0;
+ u32 i;
+
+ for (i = 0; i < array_size; i++) {
+ size_t len;
+
+ len = snprintf(buf, bufsize, fmt, array[i]);
+ len++; /* ' ' or '\n' */
+ ret += len;
+
+ if (buf) {
+ buf += len;
+ bufsize -= len;
+ buf[-1] = (i == array_size-1) ? '\n' : ' ';
+ }
+ }
+
+ ret++; /* \0 */
+ if (buf)
+ *buf = '\0';
+
+ return ret;
+}
+
+static char *format_array_alloc(const char *fmt, u32 *array,
+ u32 array_size)
+{
+ size_t len = format_array(NULL, 0, fmt, array, array_size);
+ char *ret;
+
+ ret = kmalloc(len, GFP_KERNEL);
+ if (ret == NULL)
+ return NULL;
+
+ format_array(ret, len, fmt, array, array_size);
+ return ret;
+}
+
+static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct array_data *data = inode->i_private;
+ size_t size;
+
+ if (*ppos == 0) {
+ if (file->private_data) {
+ kfree(file->private_data);
+ file->private_data = NULL;
+ }
+
+ file->private_data = format_array_alloc("%u", data->array,
+ data->elements);
+ }
+
+ size = 0;
+ if (file->private_data)
+ size = strlen(file->private_data);
+
+ return simple_read_from_buffer(buf, len, ppos,
+ file->private_data, size);
+}
+
+static int u32_array_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static const struct file_operations u32_array_fops = {
+ .owner = THIS_MODULE,
+ .open = u32_array_open,
+ .release = u32_array_release,
+ .read = u32_array_read,
+ .llseek = no_llseek,
+};
+
+/**
+ * debugfs_create_u32_array - create a debugfs file that is used to read u32
+ * array.
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have.
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is %NULL, then the
+ * file will be created in the root of the debugfs filesystem.
+ * @array: u32 array that provides data.
+ * @elements: total number of elements in the array.
+ *
+ * This function creates a file in debugfs with the given name that exports
+ * @array as data. If the @mode variable is so set it can be read from.
+ * Writing is not supported. Seek within the file is also not supported.
+ * Once array is created its size can not be changed.
+ *
+ * The function returns a pointer to dentry on success. If debugfs is not
+ * enabled in the kernel, the value -%ENODEV will be returned.
+ */
+struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
+ struct dentry *parent,
+ u32 *array, u32 elements)
+{
+ struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
+
+ if (data == NULL)
+ return NULL;
+
+ data->array = array;
+ data->elements = elements;
+
+ return debugfs_create_file(name, mode, parent, data, &u32_array_fops);
+}
+EXPORT_SYMBOL_GPL(debugfs_create_u32_array);
+
#ifdef CONFIG_HAS_IOMEM
/*
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 2dd946b636d..e879cf8ff0b 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -133,7 +133,7 @@ static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf)
static void ecryptfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
iput(ecryptfs_inode_to_lower(inode));
}
diff --git a/fs/exofs/Kbuild b/fs/exofs/Kbuild
index 352ba149d23..389ba8312d5 100644
--- a/fs/exofs/Kbuild
+++ b/fs/exofs/Kbuild
@@ -16,5 +16,5 @@
libore-y := ore.o ore_raid.o
obj-$(CONFIG_ORE) += libore.o
-exofs-y := inode.o file.o symlink.o namei.o dir.o super.o
+exofs-y := inode.o file.o symlink.o namei.o dir.o super.o sys.o
obj-$(CONFIG_EXOFS_FS) += exofs.o
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index ca9d49665ef..fffe86fd7a4 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -56,6 +56,9 @@
struct exofs_dev {
struct ore_dev ored;
unsigned did;
+ unsigned urilen;
+ uint8_t *uri;
+ struct kobject ed_kobj;
};
/*
* our extension to the in-memory superblock
@@ -73,6 +76,7 @@ struct exofs_sb_info {
struct ore_layout layout; /* Default files layout */
struct ore_comp one_comp; /* id & cred of partition id=0*/
struct ore_components oc; /* comps for the partition */
+ struct kobject s_kobj; /* holds per-sbi kobject */
};
/*
@@ -176,6 +180,16 @@ void exofs_make_credential(u8 cred_a[OSD_CAP_LEN],
const struct osd_obj_id *obj);
int exofs_sbi_write_stats(struct exofs_sb_info *sbi);
+/* sys.c */
+int exofs_sysfs_init(void);
+void exofs_sysfs_uninit(void);
+int exofs_sysfs_sb_add(struct exofs_sb_info *sbi,
+ struct exofs_dt_device_info *dt_dev);
+void exofs_sysfs_sb_del(struct exofs_sb_info *sbi);
+int exofs_sysfs_odev_add(struct exofs_dev *edev,
+ struct exofs_sb_info *sbi);
+void exofs_sysfs_dbg_print(void);
+
/*********************
* operation vectors *
*********************/
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index ea5e1f97806..5badb0c039d 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -1473,7 +1473,7 @@ void exofs_evict_inode(struct inode *inode)
goto no_delete;
inode->i_size = 0;
- end_writeback(inode);
+ clear_inode(inode);
/* if we are deleting an obj that hasn't been created yet, wait.
* This also makes sure that create_done cannot be called with an
@@ -1503,5 +1503,5 @@ void exofs_evict_inode(struct inode *inode)
return;
no_delete:
- end_writeback(inode);
+ clear_inode(inode);
}
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 735ca06430a..433783624d1 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -472,6 +472,7 @@ static void exofs_put_super(struct super_block *sb)
_exofs_print_device("Unmounting", NULL, ore_comp_dev(&sbi->oc, 0),
sbi->one_comp.obj.partition);
+ exofs_sysfs_sb_del(sbi);
bdi_destroy(&sbi->bdi);
exofs_free_sbi(sbi);
sb->s_fs_info = NULL;
@@ -632,6 +633,12 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
memcpy(&sbi->oc.ods[numdevs], &sbi->oc.ods[0],
(numdevs - 1) * sizeof(sbi->oc.ods[0]));
+ /* create sysfs subdir under which we put the device table
+ * And cluster layout. A Superblock is identified by the string:
+ * "dev[0].osdname"_"pid"
+ */
+ exofs_sysfs_sb_add(sbi, &dt->dt_dev_table[0]);
+
for (i = 0; i < numdevs; i++) {
struct exofs_fscb fscb;
struct osd_dev_info odi;
@@ -657,6 +664,7 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
eds[i].ored.od = fscb_od;
++sbi->oc.numdevs;
fscb_od = NULL;
+ exofs_sysfs_odev_add(&eds[i], sbi);
continue;
}
@@ -682,6 +690,7 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
odi.osdname);
goto out;
}
+ exofs_sysfs_odev_add(&eds[i], sbi);
/* TODO: verify other information is correct and FS-uuid
* matches. Benny what did you say about device table
@@ -745,7 +754,6 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
sbi->one_comp.obj.partition = opts->pid;
sbi->one_comp.obj.id = 0;
exofs_make_credential(sbi->one_comp.cred, &sbi->one_comp.obj);
- sbi->oc.numdevs = 1;
sbi->oc.single_comp = EC_SINGLE_COMP;
sbi->oc.comps = &sbi->one_comp;
@@ -804,6 +812,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
ore_comp_set_dev(&sbi->oc, 0, od);
+ sbi->oc.numdevs = 1;
}
__sbi_read_stats(sbi);
@@ -844,6 +853,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
goto free_sbi;
}
+ exofs_sysfs_dbg_print();
_exofs_print_device("Mounting", opts->dev_name,
ore_comp_dev(&sbi->oc, 0),
sbi->one_comp.obj.partition);
@@ -1023,6 +1033,9 @@ static int __init init_exofs(void)
if (err)
goto out_d;
+ /* We don't fail if sysfs creation failed */
+ exofs_sysfs_init();
+
return 0;
out_d:
destroy_inodecache();
@@ -1032,6 +1045,7 @@ out:
static void __exit exit_exofs(void)
{
+ exofs_sysfs_uninit();
unregister_filesystem(&exofs_type);
destroy_inodecache();
}
diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c
new file mode 100644
index 00000000000..e32bc919e4e
--- /dev/null
+++ b/fs/exofs/sys.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2012
+ * Sachin Bhamare <sbhamare@panasas.com>
+ * Boaz Harrosh <bharrosh@panasas.com>
+ *
+ * This file is part of exofs.
+ *
+ * exofs is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published by
+ * the Free Software Foundation.
+ *
+ * exofs is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with exofs; if not, write to the:
+ * Free Software Foundation <licensing@fsf.org>
+ */
+
+#include <linux/kobject.h>
+#include <linux/device.h>
+
+#include "exofs.h"
+
+struct odev_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct exofs_dev *, char *);
+ ssize_t (*store)(struct exofs_dev *, const char *, size_t);
+};
+
+static ssize_t odev_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct exofs_dev *edp = container_of(kobj, struct exofs_dev, ed_kobj);
+ struct odev_attr *a = container_of(attr, struct odev_attr, attr);
+
+ return a->show ? a->show(edp, buf) : 0;
+}
+
+static ssize_t odev_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct exofs_dev *edp = container_of(kobj, struct exofs_dev, ed_kobj);
+ struct odev_attr *a = container_of(attr, struct odev_attr, attr);
+
+ return a->store ? a->store(edp, buf, len) : len;
+}
+
+static const struct sysfs_ops odev_attr_ops = {
+ .show = odev_attr_show,
+ .store = odev_attr_store,
+};
+
+
+static struct kset *exofs_kset;
+
+static ssize_t osdname_show(struct exofs_dev *edp, char *buf)
+{
+ struct osd_dev *odev = edp->ored.od;
+ const struct osd_dev_info *odi = osduld_device_info(odev);
+
+ return snprintf(buf, odi->osdname_len + 1, "%s", odi->osdname);
+}
+
+static ssize_t systemid_show(struct exofs_dev *edp, char *buf)
+{
+ struct osd_dev *odev = edp->ored.od;
+ const struct osd_dev_info *odi = osduld_device_info(odev);
+
+ memcpy(buf, odi->systemid, odi->systemid_len);
+ return odi->systemid_len;
+}
+
+static ssize_t uri_show(struct exofs_dev *edp, char *buf)
+{
+ return snprintf(buf, edp->urilen, "%s", edp->uri);
+}
+
+static ssize_t uri_store(struct exofs_dev *edp, const char *buf, size_t len)
+{
+ edp->urilen = strlen(buf) + 1;
+ edp->uri = krealloc(edp->uri, edp->urilen, GFP_KERNEL);
+ strncpy(edp->uri, buf, edp->urilen);
+ return edp->urilen;
+}
+
+#define OSD_ATTR(name, mode, show, store) \
+ static struct odev_attr odev_attr_##name = \
+ __ATTR(name, mode, show, store)
+
+OSD_ATTR(osdname, S_IRUGO, osdname_show, NULL);
+OSD_ATTR(systemid, S_IRUGO, systemid_show, NULL);
+OSD_ATTR(uri, S_IRWXU, uri_show, uri_store);
+
+static struct attribute *odev_attrs[] = {
+ &odev_attr_osdname.attr,
+ &odev_attr_systemid.attr,
+ &odev_attr_uri.attr,
+ NULL,
+};
+
+static struct kobj_type odev_ktype = {
+ .default_attrs = odev_attrs,
+ .sysfs_ops = &odev_attr_ops,
+};
+
+static struct kobj_type uuid_ktype = {
+};
+
+void exofs_sysfs_dbg_print()
+{
+#ifdef CONFIG_EXOFS_DEBUG
+ struct kobject *k_name, *k_tmp;
+
+ list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) {
+ printk(KERN_INFO "%s: name %s ref %d\n",
+ __func__, kobject_name(k_name),
+ (int)atomic_read(&k_name->kref.refcount));
+ }
+#endif
+}
+/*
+ * This function removes all kobjects under exofs_kset
+ * At the end of it, exofs_kset kobject will have a refcount
+ * of 1 which gets decremented only on exofs module unload
+ */
+void exofs_sysfs_sb_del(struct exofs_sb_info *sbi)
+{
+ struct kobject *k_name, *k_tmp;
+ struct kobject *s_kobj = &sbi->s_kobj;
+
+ list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) {
+ /* Remove all that are children of this SBI */
+ if (k_name->parent == s_kobj)
+ kobject_put(k_name);
+ }
+ kobject_put(s_kobj);
+}
+
+/*
+ * This function creates sysfs entries to hold the current exofs cluster
+ * instance (uniquely identified by osdname,pid tuple).
+ * This function gets called once per exofs mount instance.
+ */
+int exofs_sysfs_sb_add(struct exofs_sb_info *sbi,
+ struct exofs_dt_device_info *dt_dev)
+{
+ struct kobject *s_kobj;
+ int retval = 0;
+ uint64_t pid = sbi->one_comp.obj.partition;
+
+ /* allocate new uuid dirent */
+ s_kobj = &sbi->s_kobj;
+ s_kobj->kset = exofs_kset;
+ retval = kobject_init_and_add(s_kobj, &uuid_ktype,
+ &exofs_kset->kobj, "%s_%llx", dt_dev->osdname, pid);
+ if (retval) {
+ EXOFS_ERR("ERROR: Failed to create sysfs entry for "
+ "uuid-%s_%llx => %d\n", dt_dev->osdname, pid, retval);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int exofs_sysfs_odev_add(struct exofs_dev *edev, struct exofs_sb_info *sbi)
+{
+ struct kobject *d_kobj;
+ int retval = 0;
+
+ /* create osd device group which contains following attributes
+ * osdname, systemid & uri
+ */
+ d_kobj = &edev->ed_kobj;
+ d_kobj->kset = exofs_kset;
+ retval = kobject_init_and_add(d_kobj, &odev_ktype,
+ &sbi->s_kobj, "dev%u", edev->did);
+ if (retval) {
+ EXOFS_ERR("ERROR: Failed to create sysfs entry for "
+ "device dev%u\n", edev->did);
+ return retval;
+ }
+ return 0;
+}
+
+int exofs_sysfs_init(void)
+{
+ exofs_kset = kset_create_and_add("exofs", NULL, fs_kobj);
+ if (!exofs_kset) {
+ EXOFS_ERR("ERROR: kset_create_and_add exofs failed\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void exofs_sysfs_uninit(void)
+{
+ kset_unregister(exofs_kset);
+}
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 030c6d277e1..1c361399886 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -165,7 +165,6 @@ static void release_blocks(struct super_block *sb, int count)
struct ext2_sb_info *sbi = EXT2_SB(sb);
percpu_counter_add(&sbi->s_freeblocks_counter, count);
- sb->s_dirt = 1;
}
}
@@ -180,7 +179,6 @@ static void group_adjust_blocks(struct super_block *sb, int group_no,
free_blocks = le16_to_cpu(desc->bg_free_blocks_count);
desc->bg_free_blocks_count = cpu_to_le16(free_blocks + count);
spin_unlock(sb_bgl_lock(sbi, group_no));
- sb->s_dirt = 1;
mark_buffer_dirty(bh);
}
}
@@ -479,7 +477,7 @@ void ext2_discard_reservation(struct inode *inode)
}
/**
- * ext2_free_blocks_sb() -- Free given blocks and update quota and i_blocks
+ * ext2_free_blocks() -- Free given blocks and update quota and i_blocks
* @inode: inode
* @block: start physcial block to free
* @count: number of blocks to free
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 8b15cf8cef3..c13eb7b91a1 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -81,7 +81,6 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir)
spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
if (dir)
percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter);
- sb->s_dirt = 1;
mark_buffer_dirty(bh);
}
@@ -543,7 +542,6 @@ got:
}
spin_unlock(sb_bgl_lock(sbi, group));
- sb->s_dirt = 1;
mark_buffer_dirty(bh2);
if (test_opt(sb, GRPID)) {
inode->i_mode = mode;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index f9fa95f8443..264d315f6c4 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -90,7 +90,7 @@ void ext2_evict_inode(struct inode * inode)
}
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
ext2_discard_reservation(inode);
rsv = EXT2_I(inode)->i_block_alloc_info;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 38f816071dd..b3621cb7ea3 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -130,9 +130,6 @@ static void ext2_put_super (struct super_block * sb)
dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
- if (sb->s_dirt)
- ext2_write_super(sb);
-
ext2_xattr_put_super(sb);
if (!(sb->s_flags & MS_RDONLY)) {
struct ext2_super_block *es = sbi->s_es;
@@ -307,7 +304,6 @@ static const struct super_operations ext2_sops = {
.write_inode = ext2_write_inode,
.evict_inode = ext2_evict_inode,
.put_super = ext2_put_super,
- .write_super = ext2_write_super,
.sync_fs = ext2_sync_fs,
.statfs = ext2_statfs,
.remount_fs = ext2_remount,
@@ -358,11 +354,6 @@ static struct dentry *ext2_fh_to_parent(struct super_block *sb, struct fid *fid,
ext2_nfs_get_inode);
}
-/* Yes, most of these are left as NULL!!
- * A NULL value implies the default, which works with ext2-like file
- * systems, but can be improved upon.
- * Currently only get_parent is required.
- */
static const struct export_operations ext2_export_ops = {
.fh_to_dentry = ext2_fh_to_dentry,
.fh_to_parent = ext2_fh_to_parent,
@@ -1176,7 +1167,6 @@ static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
if (wait)
sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
- sb->s_dirt = 0;
}
/*
@@ -1209,8 +1199,6 @@ void ext2_write_super(struct super_block *sb)
{
if (!(sb->s_flags & MS_RDONLY))
ext2_sync_fs(sb, 1);
- else
- sb->s_dirt = 0;
}
static int ext2_remount (struct super_block * sb, int * flags, char * data)
@@ -1456,7 +1444,6 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
struct buffer_head tmp_bh;
struct buffer_head *bh;
- mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
@@ -1486,16 +1473,13 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
blk++;
}
out:
- if (len == towrite) {
- mutex_unlock(&inode->i_mutex);
+ if (len == towrite)
return err;
- }
if (inode->i_size < off+len-towrite)
i_size_write(inode, off+len-towrite);
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
- mutex_unlock(&inode->i_mutex);
return len - towrite;
}
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 6dcafc7efdf..b6754dbbce3 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -339,7 +339,6 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
spin_lock(&EXT2_SB(sb)->s_lock);
EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
spin_unlock(&EXT2_SB(sb)->s_lock);
- sb->s_dirt = 1;
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
}
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index cc761ad8fa5..92490e9f85c 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -21,30 +21,15 @@
*
*/
+#include <linux/compat.h>
#include "ext3.h"
static unsigned char ext3_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
};
-static int ext3_readdir(struct file *, void *, filldir_t);
static int ext3_dx_readdir(struct file * filp,
void * dirent, filldir_t filldir);
-static int ext3_release_dir (struct inode * inode,
- struct file * filp);
-
-const struct file_operations ext3_dir_operations = {
- .llseek = generic_file_llseek,
- .read = generic_read_dir,
- .readdir = ext3_readdir, /* we take BKL. needed?*/
- .unlocked_ioctl = ext3_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = ext3_compat_ioctl,
-#endif
- .fsync = ext3_sync_file, /* BKL held */
- .release = ext3_release_dir,
-};
-
static unsigned char get_dtype(struct super_block *sb, int filetype)
{
@@ -55,6 +40,25 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
return (ext3_filetype_table[filetype]);
}
+/**
+ * Check if the given dir-inode refers to an htree-indexed directory
+ * (or a directory which chould potentially get coverted to use htree
+ * indexing).
+ *
+ * Return 1 if it is a dx dir, 0 if not
+ */
+static int is_dx_dir(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+
+ if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
+ EXT3_FEATURE_COMPAT_DIR_INDEX) &&
+ ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
+ ((inode->i_size >> sb->s_blocksize_bits) == 1)))
+ return 1;
+
+ return 0;
+}
int ext3_check_dir_entry (const char * function, struct inode * dir,
struct ext3_dir_entry_2 * de,
@@ -94,18 +98,13 @@ static int ext3_readdir(struct file * filp,
unsigned long offset;
int i, stored;
struct ext3_dir_entry_2 *de;
- struct super_block *sb;
int err;
struct inode *inode = filp->f_path.dentry->d_inode;
+ struct super_block *sb = inode->i_sb;
int ret = 0;
int dir_has_error = 0;
- sb = inode->i_sb;
-
- if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
- EXT3_FEATURE_COMPAT_DIR_INDEX) &&
- ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
- ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
+ if (is_dx_dir(inode)) {
err = ext3_dx_readdir(filp, dirent, filldir);
if (err != ERR_BAD_DX_DIR) {
ret = err;
@@ -227,22 +226,87 @@ out:
return ret;
}
+static inline int is_32bit_api(void)
+{
+#ifdef CONFIG_COMPAT
+ return is_compat_task();
+#else
+ return (BITS_PER_LONG == 32);
+#endif
+}
+
/*
* These functions convert from the major/minor hash to an f_pos
- * value.
+ * value for dx directories
*
- * Currently we only use major hash numer. This is unfortunate, but
- * on 32-bit machines, the same VFS interface is used for lseek and
- * llseek, so if we use the 64 bit offset, then the 32-bit versions of
- * lseek/telldir/seekdir will blow out spectacularly, and from within
- * the ext2 low-level routine, we don't know if we're being called by
- * a 64-bit version of the system call or the 32-bit version of the
- * system call. Worse yet, NFSv2 only allows for a 32-bit readdir
- * cookie. Sigh.
+ * Upper layer (for example NFS) should specify FMODE_32BITHASH or
+ * FMODE_64BITHASH explicitly. On the other hand, we allow ext3 to be mounted
+ * directly on both 32-bit and 64-bit nodes, under such case, neither
+ * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
*/
-#define hash2pos(major, minor) (major >> 1)
-#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff)
-#define pos2min_hash(pos) (0)
+static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
+{
+ if ((filp->f_mode & FMODE_32BITHASH) ||
+ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+ return major >> 1;
+ else
+ return ((__u64)(major >> 1) << 32) | (__u64)minor;
+}
+
+static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
+{
+ if ((filp->f_mode & FMODE_32BITHASH) ||
+ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+ return (pos << 1) & 0xffffffff;
+ else
+ return ((pos >> 32) << 1) & 0xffffffff;
+}
+
+static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
+{
+ if ((filp->f_mode & FMODE_32BITHASH) ||
+ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+ return 0;
+ else
+ return pos & 0xffffffff;
+}
+
+/*
+ * Return 32- or 64-bit end-of-file for dx directories
+ */
+static inline loff_t ext3_get_htree_eof(struct file *filp)
+{
+ if ((filp->f_mode & FMODE_32BITHASH) ||
+ (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
+ return EXT3_HTREE_EOF_32BIT;
+ else
+ return EXT3_HTREE_EOF_64BIT;
+}
+
+
+/*
+ * ext3_dir_llseek() calls generic_file_llseek[_size]() to handle both
+ * non-htree and htree directories, where the "offset" is in terms
+ * of the filename hash value instead of the byte offset.
+ *
+ * Because we may return a 64-bit hash that is well beyond s_maxbytes,
+ * we need to pass the max hash as the maximum allowable offset in
+ * the htree directory case.
+ *
+ * NOTE: offsets obtained *before* ext3_set_inode_flag(dir, EXT3_INODE_INDEX)
+ * will be invalid once the directory was converted into a dx directory
+ */
+loff_t ext3_dir_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct inode *inode = file->f_mapping->host;
+ int dx_dir = is_dx_dir(inode);
+
+ if (likely(dx_dir))
+ return generic_file_llseek_size(file, offset, origin,
+ ext3_get_htree_eof(file));
+ else
+ return generic_file_llseek(file, offset, origin);
+}
/*
* This structure holds the nodes of the red-black tree used to store
@@ -303,15 +367,16 @@ static void free_rb_tree_fname(struct rb_root *root)
}
-static struct dir_private_info *ext3_htree_create_dir_info(loff_t pos)
+static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp,
+ loff_t pos)
{
struct dir_private_info *p;
p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
if (!p)
return NULL;
- p->curr_hash = pos2maj_hash(pos);
- p->curr_minor_hash = pos2min_hash(pos);
+ p->curr_hash = pos2maj_hash(filp, pos);
+ p->curr_minor_hash = pos2min_hash(filp, pos);
return p;
}
@@ -401,7 +466,7 @@ static int call_filldir(struct file * filp, void * dirent,
printk("call_filldir: called with null fname?!?\n");
return 0;
}
- curr_pos = hash2pos(fname->hash, fname->minor_hash);
+ curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
while (fname) {
error = filldir(dirent, fname->name,
fname->name_len, curr_pos,
@@ -426,13 +491,13 @@ static int ext3_dx_readdir(struct file * filp,
int ret;
if (!info) {
- info = ext3_htree_create_dir_info(filp->f_pos);
+ info = ext3_htree_create_dir_info(filp, filp->f_pos);
if (!info)
return -ENOMEM;
filp->private_data = info;
}
- if (filp->f_pos == EXT3_HTREE_EOF)
+ if (filp->f_pos == ext3_get_htree_eof(filp))
return 0; /* EOF */
/* Some one has messed with f_pos; reset the world */
@@ -440,8 +505,8 @@ static int ext3_dx_readdir(struct file * filp,
free_rb_tree_fname(&info->root);
info->curr_node = NULL;
info->extra_fname = NULL;
- info->curr_hash = pos2maj_hash(filp->f_pos);
- info->curr_minor_hash = pos2min_hash(filp->f_pos);
+ info->curr_hash = pos2maj_hash(filp, filp->f_pos);
+ info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
}
/*
@@ -473,7 +538,7 @@ static int ext3_dx_readdir(struct file * filp,
if (ret < 0)
return ret;
if (ret == 0) {
- filp->f_pos = EXT3_HTREE_EOF;
+ filp->f_pos = ext3_get_htree_eof(filp);
break;
}
info->curr_node = rb_first(&info->root);
@@ -493,7 +558,7 @@ static int ext3_dx_readdir(struct file * filp,
info->curr_minor_hash = fname->minor_hash;
} else {
if (info->next_hash == ~0) {
- filp->f_pos = EXT3_HTREE_EOF;
+ filp->f_pos = ext3_get_htree_eof(filp);
break;
}
info->curr_hash = info->next_hash;
@@ -512,3 +577,15 @@ static int ext3_release_dir (struct inode * inode, struct file * filp)
return 0;
}
+
+const struct file_operations ext3_dir_operations = {
+ .llseek = ext3_dir_llseek,
+ .read = generic_read_dir,
+ .readdir = ext3_readdir,
+ .unlocked_ioctl = ext3_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ext3_compat_ioctl,
+#endif
+ .fsync = ext3_sync_file,
+ .release = ext3_release_dir,
+};
diff --git a/fs/ext3/ext3.h b/fs/ext3/ext3.h
index 7977973a24f..e85ff15a060 100644
--- a/fs/ext3/ext3.h
+++ b/fs/ext3/ext3.h
@@ -920,7 +920,11 @@ struct dx_hash_info
u32 *seed;
};
-#define EXT3_HTREE_EOF 0x7fffffff
+
+/* 32 and 64 bit signed EOF for dx directories */
+#define EXT3_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1)
+#define EXT3_HTREE_EOF_64BIT ((1ULL << (64 - 1)) - 1)
+
/*
* Control parameters used by ext3_htree_next_block
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
index d10231ddcf8..ede315cdf12 100644
--- a/fs/ext3/hash.c
+++ b/fs/ext3/hash.c
@@ -198,8 +198,8 @@ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
return -1;
}
hash = hash & ~1;
- if (hash == (EXT3_HTREE_EOF << 1))
- hash = (EXT3_HTREE_EOF-1) << 1;
+ if (hash == (EXT3_HTREE_EOF_32BIT << 1))
+ hash = (EXT3_HTREE_EOF_32BIT - 1) << 1;
hinfo->hash = hash;
hinfo->minor_hash = minor_hash;
return 0;
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index e3c39e4cec1..082afd78b10 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -180,8 +180,7 @@ error_return:
* It's OK to put directory into a group unless
* it has too many directories already (max_dirs) or
* it has too few free inodes left (min_inodes) or
- * it has too few free blocks left (min_blocks) or
- * it's already running too large debt (max_debt).
+ * it has too few free blocks left (min_blocks).
* Parent's group is preferred, if it doesn't satisfy these
* conditions we search cyclically through the rest. If none
* of the groups look good we just look for a group with more
@@ -191,21 +190,16 @@ error_return:
* when we allocate an inode, within 0--255.
*/
-#define INODE_COST 64
-#define BLOCK_COST 256
-
static int find_group_orlov(struct super_block *sb, struct inode *parent)
{
int parent_group = EXT3_I(parent)->i_block_group;
struct ext3_sb_info *sbi = EXT3_SB(sb);
- struct ext3_super_block *es = sbi->s_es;
int ngroups = sbi->s_groups_count;
int inodes_per_group = EXT3_INODES_PER_GROUP(sb);
unsigned int freei, avefreei;
ext3_fsblk_t freeb, avefreeb;
- ext3_fsblk_t blocks_per_dir;
unsigned int ndirs;
- int max_debt, max_dirs, min_inodes;
+ int max_dirs, min_inodes;
ext3_grpblk_t min_blocks;
int group = -1, i;
struct ext3_group_desc *desc;
@@ -242,20 +236,10 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
goto fallback;
}
- blocks_per_dir = (le32_to_cpu(es->s_blocks_count) - freeb) / ndirs;
-
max_dirs = ndirs / ngroups + inodes_per_group / 16;
min_inodes = avefreei - inodes_per_group / 4;
min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4;
- max_debt = EXT3_BLOCKS_PER_GROUP(sb) / max(blocks_per_dir, (ext3_fsblk_t)BLOCK_COST);
- if (max_debt * INODE_COST > inodes_per_group)
- max_debt = inodes_per_group / INODE_COST;
- if (max_debt > 255)
- max_debt = 255;
- if (max_debt == 0)
- max_debt = 1;
-
for (i = 0; i < ngroups; i++) {
group = (parent_group + i) % ngroups;
desc = ext3_get_group_desc (sb, group, NULL);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index a09790a412b..9a4a5c48b1c 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -272,18 +272,18 @@ void ext3_evict_inode (struct inode *inode)
if (ext3_mark_inode_dirty(handle, inode)) {
/* If that failed, just dquot_drop() and be done with that */
dquot_drop(inode);
- end_writeback(inode);
+ clear_inode(inode);
} else {
ext3_xattr_delete_inode(handle, inode);
dquot_free_inode(inode);
dquot_drop(inode);
- end_writeback(inode);
+ clear_inode(inode);
ext3_free_inode(handle, inode);
}
ext3_journal_stop(handle);
return;
no_delete:
- end_writeback(inode);
+ clear_inode(inode);
dquot_drop(inode);
}
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 94ef7e61612..8c3a44b7c37 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -3015,7 +3015,6 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
(unsigned long long)off, (unsigned long long)len);
return -EIO;
}
- mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
bh = ext3_bread(handle, inode, blk, 1, &err);
if (!bh)
goto out;
@@ -3039,10 +3038,8 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
}
brelse(bh);
out:
- if (err) {
- mutex_unlock(&inode->i_mutex);
+ if (err)
return err;
- }
if (inode->i_size < off + len) {
i_size_write(inode, off + len);
EXT3_I(inode)->i_disksize = inode->i_size;
@@ -3050,7 +3047,6 @@ out:
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ext3_mark_inode_dirty(handle, inode);
- mutex_unlock(&inode->i_mutex);
return len;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 436b4223df6..35b5954489e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1007,7 +1007,7 @@ static void destroy_inodecache(void)
void ext4_clear_inode(struct inode *inode)
{
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
dquot_drop(inode);
ext4_discard_preallocations(inode);
if (EXT4_I(inode)->jinode) {
@@ -4758,7 +4758,6 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
return -EIO;
}
- mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
bh = ext4_bread(handle, inode, blk, 1, &err);
if (!bh)
goto out;
@@ -4774,16 +4773,13 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
err = ext4_handle_dirty_metadata(handle, NULL, bh);
brelse(bh);
out:
- if (err) {
- mutex_unlock(&inode->i_mutex);
+ if (err)
return err;
- }
if (inode->i_size < off + len) {
i_size_write(inode, off + len);
EXT4_I(inode)->i_disksize = inode->i_size;
ext4_mark_inode_dirty(handle, inode);
}
- mutex_unlock(&inode->i_mutex);
return len;
}
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 21687e31acc..b3d290c1b51 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -454,7 +454,7 @@ static void fat_evict_inode(struct inode *inode)
fat_truncate_blocks(inode, 0);
}
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
fat_cache_inval_inode(inode);
fat_detach(inode);
}
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index cf9ef918a2a..ef67c95f12d 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -355,6 +355,6 @@ void
vxfs_evict_inode(struct inode *ip)
{
truncate_inode_pages(&ip->i_data, 0);
- end_writeback(ip);
+ clear_inode(ip);
call_rcu(&ip->i_rcu, vxfs_i_callback);
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 539f36cf3e4..8d2fb8c88cf 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -231,11 +231,8 @@ static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
static void inode_sync_complete(struct inode *inode)
{
- /*
- * Prevent speculative execution through
- * spin_unlock(&wb->list_lock);
- */
-
+ inode->i_state &= ~I_SYNC;
+ /* Waiters must see I_SYNC cleared before being woken up */
smp_mb();
wake_up_bit(&inode->i_state, __I_SYNC);
}
@@ -329,10 +326,12 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
}
/*
- * Wait for writeback on an inode to complete.
+ * Wait for writeback on an inode to complete. Called with i_lock held.
+ * Caller must make sure inode cannot go away when we drop i_lock.
*/
-static void inode_wait_for_writeback(struct inode *inode,
- struct bdi_writeback *wb)
+static void __inode_wait_for_writeback(struct inode *inode)
+ __releases(inode->i_lock)
+ __acquires(inode->i_lock)
{
DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
wait_queue_head_t *wqh;
@@ -340,70 +339,119 @@ static void inode_wait_for_writeback(struct inode *inode,
wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
while (inode->i_state & I_SYNC) {
spin_unlock(&inode->i_lock);
- spin_unlock(&wb->list_lock);
__wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
- spin_lock(&wb->list_lock);
spin_lock(&inode->i_lock);
}
}
/*
- * Write out an inode's dirty pages. Called under wb->list_lock and
- * inode->i_lock. Either the caller has an active reference on the inode or
- * the inode has I_WILL_FREE set.
- *
- * If `wait' is set, wait on the writeout.
- *
- * The whole writeout design is quite complex and fragile. We want to avoid
- * starvation of particular inodes when others are being redirtied, prevent
- * livelocks, etc.
+ * Wait for writeback on an inode to complete. Caller must have inode pinned.
*/
-static int
-writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
- struct writeback_control *wbc)
+void inode_wait_for_writeback(struct inode *inode)
{
- struct address_space *mapping = inode->i_mapping;
- long nr_to_write = wbc->nr_to_write;
- unsigned dirty;
- int ret;
+ spin_lock(&inode->i_lock);
+ __inode_wait_for_writeback(inode);
+ spin_unlock(&inode->i_lock);
+}
- assert_spin_locked(&wb->list_lock);
- assert_spin_locked(&inode->i_lock);
+/*
+ * Sleep until I_SYNC is cleared. This function must be called with i_lock
+ * held and drops it. It is aimed for callers not holding any inode reference
+ * so once i_lock is dropped, inode can go away.
+ */
+static void inode_sleep_on_writeback(struct inode *inode)
+ __releases(inode->i_lock)
+{
+ DEFINE_WAIT(wait);
+ wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
+ int sleep;
- if (!atomic_read(&inode->i_count))
- WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
- else
- WARN_ON(inode->i_state & I_WILL_FREE);
+ prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+ sleep = inode->i_state & I_SYNC;
+ spin_unlock(&inode->i_lock);
+ if (sleep)
+ schedule();
+ finish_wait(wqh, &wait);
+}
- if (inode->i_state & I_SYNC) {
+/*
+ * Find proper writeback list for the inode depending on its current state and
+ * possibly also change of its state while we were doing writeback. Here we
+ * handle things such as livelock prevention or fairness of writeback among
+ * inodes. This function can be called only by flusher thread - noone else
+ * processes all inodes in writeback lists and requeueing inodes behind flusher
+ * thread's back can have unexpected consequences.
+ */
+static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
+ struct writeback_control *wbc)
+{
+ if (inode->i_state & I_FREEING)
+ return;
+
+ /*
+ * Sync livelock prevention. Each inode is tagged and synced in one
+ * shot. If still dirty, it will be redirty_tail()'ed below. Update
+ * the dirty time to prevent enqueue and sync it again.
+ */
+ if ((inode->i_state & I_DIRTY) &&
+ (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
+ inode->dirtied_when = jiffies;
+
+ if (wbc->pages_skipped) {
/*
- * If this inode is locked for writeback and we are not doing
- * writeback-for-data-integrity, move it to b_more_io so that
- * writeback can proceed with the other inodes on s_io.
- *
- * We'll have another go at writing back this inode when we
- * completed a full scan of b_io.
+ * writeback is not making progress due to locked
+ * buffers. Skip this inode for now.
*/
- if (wbc->sync_mode != WB_SYNC_ALL) {
+ redirty_tail(inode, wb);
+ return;
+ }
+
+ if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
+ /*
+ * We didn't write back all the pages. nfs_writepages()
+ * sometimes bales out without doing anything.
+ */
+ if (wbc->nr_to_write <= 0) {
+ /* Slice used up. Queue for next turn. */
requeue_io(inode, wb);
- trace_writeback_single_inode_requeue(inode, wbc,
- nr_to_write);
- return 0;
+ } else {
+ /*
+ * Writeback blocked by something other than
+ * congestion. Delay the inode for some time to
+ * avoid spinning on the CPU (100% iowait)
+ * retrying writeback of the dirty page/inode
+ * that cannot be performed immediately.
+ */
+ redirty_tail(inode, wb);
}
-
+ } else if (inode->i_state & I_DIRTY) {
/*
- * It's a data-integrity sync. We must wait.
+ * Filesystems can dirty the inode during writeback operations,
+ * such as delayed allocation during submission or metadata
+ * updates after data IO completion.
*/
- inode_wait_for_writeback(inode, wb);
+ redirty_tail(inode, wb);
+ } else {
+ /* The inode is clean. Remove from writeback lists. */
+ list_del_init(&inode->i_wb_list);
}
+}
- BUG_ON(inode->i_state & I_SYNC);
+/*
+ * Write out an inode and its dirty pages. Do not update the writeback list
+ * linkage. That is left to the caller. The caller is also responsible for
+ * setting I_SYNC flag and calling inode_sync_complete() to clear it.
+ */
+static int
+__writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+ struct writeback_control *wbc)
+{
+ struct address_space *mapping = inode->i_mapping;
+ long nr_to_write = wbc->nr_to_write;
+ unsigned dirty;
+ int ret;
- /* Set I_SYNC, reset I_DIRTY_PAGES */
- inode->i_state |= I_SYNC;
- inode->i_state &= ~I_DIRTY_PAGES;
- spin_unlock(&inode->i_lock);
- spin_unlock(&wb->list_lock);
+ WARN_ON(!(inode->i_state & I_SYNC));
ret = do_writepages(mapping, wbc);
@@ -424,6 +472,9 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
* write_inode()
*/
spin_lock(&inode->i_lock);
+ /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
+ if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+ inode->i_state &= ~I_DIRTY_PAGES;
dirty = inode->i_state & I_DIRTY;
inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
spin_unlock(&inode->i_lock);
@@ -433,60 +484,67 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
if (ret == 0)
ret = err;
}
+ trace_writeback_single_inode(inode, wbc, nr_to_write);
+ return ret;
+}
+
+/*
+ * Write out an inode's dirty pages. Either the caller has an active reference
+ * on the inode or the inode has I_WILL_FREE set.
+ *
+ * This function is designed to be called for writing back one inode which
+ * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
+ * and does more profound writeback list handling in writeback_sb_inodes().
+ */
+static int
+writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
+ struct writeback_control *wbc)
+{
+ int ret = 0;
- spin_lock(&wb->list_lock);
spin_lock(&inode->i_lock);
- inode->i_state &= ~I_SYNC;
- if (!(inode->i_state & I_FREEING)) {
+ if (!atomic_read(&inode->i_count))
+ WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
+ else
+ WARN_ON(inode->i_state & I_WILL_FREE);
+
+ if (inode->i_state & I_SYNC) {
+ if (wbc->sync_mode != WB_SYNC_ALL)
+ goto out;
/*
- * Sync livelock prevention. Each inode is tagged and synced in
- * one shot. If still dirty, it will be redirty_tail()'ed below.
- * Update the dirty time to prevent enqueue and sync it again.
+ * It's a data-integrity sync. We must wait. Since callers hold
+ * inode reference or inode has I_WILL_FREE set, it cannot go
+ * away under us.
*/
- if ((inode->i_state & I_DIRTY) &&
- (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
- inode->dirtied_when = jiffies;
-
- if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
- /*
- * We didn't write back all the pages. nfs_writepages()
- * sometimes bales out without doing anything.
- */
- inode->i_state |= I_DIRTY_PAGES;
- if (wbc->nr_to_write <= 0) {
- /*
- * slice used up: queue for next turn
- */
- requeue_io(inode, wb);
- } else {
- /*
- * Writeback blocked by something other than
- * congestion. Delay the inode for some time to
- * avoid spinning on the CPU (100% iowait)
- * retrying writeback of the dirty page/inode
- * that cannot be performed immediately.
- */
- redirty_tail(inode, wb);
- }
- } else if (inode->i_state & I_DIRTY) {
- /*
- * Filesystems can dirty the inode during writeback
- * operations, such as delayed allocation during
- * submission or metadata updates after data IO
- * completion.
- */
- redirty_tail(inode, wb);
- } else {
- /*
- * The inode is clean. At this point we either have
- * a reference to the inode or it's on it's way out.
- * No need to add it back to the LRU.
- */
- list_del_init(&inode->i_wb_list);
- }
+ __inode_wait_for_writeback(inode);
}
+ WARN_ON(inode->i_state & I_SYNC);
+ /*
+ * Skip inode if it is clean. We don't want to mess with writeback
+ * lists in this function since flusher thread may be doing for example
+ * sync in parallel and if we move the inode, it could get skipped. So
+ * here we make sure inode is on some writeback list and leave it there
+ * unless we have completely cleaned the inode.
+ */
+ if (!(inode->i_state & I_DIRTY))
+ goto out;
+ inode->i_state |= I_SYNC;
+ spin_unlock(&inode->i_lock);
+
+ ret = __writeback_single_inode(inode, wb, wbc);
+
+ spin_lock(&wb->list_lock);
+ spin_lock(&inode->i_lock);
+ /*
+ * If inode is clean, remove it from writeback lists. Otherwise don't
+ * touch it. See comment above for explanation.
+ */
+ if (!(inode->i_state & I_DIRTY))
+ list_del_init(&inode->i_wb_list);
+ spin_unlock(&wb->list_lock);
inode_sync_complete(inode);
- trace_writeback_single_inode(inode, wbc, nr_to_write);
+out:
+ spin_unlock(&inode->i_lock);
return ret;
}
@@ -580,29 +638,57 @@ static long writeback_sb_inodes(struct super_block *sb,
redirty_tail(inode, wb);
continue;
}
- __iget(inode);
+ if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
+ /*
+ * If this inode is locked for writeback and we are not
+ * doing writeback-for-data-integrity, move it to
+ * b_more_io so that writeback can proceed with the
+ * other inodes on s_io.
+ *
+ * We'll have another go at writing back this inode
+ * when we completed a full scan of b_io.
+ */
+ spin_unlock(&inode->i_lock);
+ requeue_io(inode, wb);
+ trace_writeback_sb_inodes_requeue(inode);
+ continue;
+ }
+ spin_unlock(&wb->list_lock);
+
+ /*
+ * We already requeued the inode if it had I_SYNC set and we
+ * are doing WB_SYNC_NONE writeback. So this catches only the
+ * WB_SYNC_ALL case.
+ */
+ if (inode->i_state & I_SYNC) {
+ /* Wait for I_SYNC. This function drops i_lock... */
+ inode_sleep_on_writeback(inode);
+ /* Inode may be gone, start again */
+ continue;
+ }
+ inode->i_state |= I_SYNC;
+ spin_unlock(&inode->i_lock);
+
write_chunk = writeback_chunk_size(wb->bdi, work);
wbc.nr_to_write = write_chunk;
wbc.pages_skipped = 0;
- writeback_single_inode(inode, wb, &wbc);
+ /*
+ * We use I_SYNC to pin the inode in memory. While it is set
+ * evict_inode() will wait so the inode cannot be freed.
+ */
+ __writeback_single_inode(inode, wb, &wbc);
work->nr_pages -= write_chunk - wbc.nr_to_write;
wrote += write_chunk - wbc.nr_to_write;
+ spin_lock(&wb->list_lock);
+ spin_lock(&inode->i_lock);
if (!(inode->i_state & I_DIRTY))
wrote++;
- if (wbc.pages_skipped) {
- /*
- * writeback is not making progress due to locked
- * buffers. Skip this inode for now.
- */
- redirty_tail(inode, wb);
- }
+ requeue_inode(inode, wb, &wbc);
+ inode_sync_complete(inode);
spin_unlock(&inode->i_lock);
- spin_unlock(&wb->list_lock);
- iput(inode);
- cond_resched();
- spin_lock(&wb->list_lock);
+ cond_resched_lock(&wb->list_lock);
/*
* bail out to wb_writeback() often enough to check
* background threshold and other termination conditions.
@@ -796,8 +882,10 @@ static long wb_writeback(struct bdi_writeback *wb,
trace_writeback_wait(wb->bdi, work);
inode = wb_inode(wb->b_more_io.prev);
spin_lock(&inode->i_lock);
- inode_wait_for_writeback(inode, wb);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&wb->list_lock);
+ /* This function drops i_lock... */
+ inode_sleep_on_writeback(inode);
+ spin_lock(&wb->list_lock);
}
}
spin_unlock(&wb->list_lock);
@@ -1331,7 +1419,6 @@ EXPORT_SYMBOL(sync_inodes_sb);
int write_inode_now(struct inode *inode, int sync)
{
struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
- int ret;
struct writeback_control wbc = {
.nr_to_write = LONG_MAX,
.sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
@@ -1343,12 +1430,7 @@ int write_inode_now(struct inode *inode, int sync)
wbc.nr_to_write = 0;
might_sleep();
- spin_lock(&wb->list_lock);
- spin_lock(&inode->i_lock);
- ret = writeback_single_inode(inode, wb, &wbc);
- spin_unlock(&inode->i_lock);
- spin_unlock(&wb->list_lock);
- return ret;
+ return writeback_single_inode(inode, wb, &wbc);
}
EXPORT_SYMBOL(write_inode_now);
@@ -1365,15 +1447,7 @@ EXPORT_SYMBOL(write_inode_now);
*/
int sync_inode(struct inode *inode, struct writeback_control *wbc)
{
- struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
- int ret;
-
- spin_lock(&wb->list_lock);
- spin_lock(&inode->i_lock);
- ret = writeback_single_inode(inode, wb, wbc);
- spin_unlock(&inode->i_lock);
- spin_unlock(&wb->list_lock);
- return ret;
+ return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
}
EXPORT_SYMBOL(sync_inode);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 26783eb2b1f..56f6dcf3076 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -122,7 +122,7 @@ static void fuse_destroy_inode(struct inode *inode)
static void fuse_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
if (inode->i_sb->s_flags & MS_ACTIVE) {
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 6172fa77ad5..713e621c240 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1554,7 +1554,7 @@ out_unlock:
out:
/* Case 3 starts here */
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
gfs2_dir_hash_inval(ip);
ip->i_gl->gl_object = NULL;
flush_delayed_work_sync(&ip->i_gl->gl_work);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 737dbeb6432..761ec06354b 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -532,7 +532,7 @@ out:
void hfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) {
HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
iput(HFS_I(inode)->rsrc_inode);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index ceb1c281eef..a9bca4b8768 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -154,7 +154,7 @@ static void hfsplus_evict_inode(struct inode *inode)
{
dprint(DBG_INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino);
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
if (HFSPLUS_IS_RSRC(inode)) {
HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
iput(HFSPLUS_I(inode)->rsrc_inode);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 07c516bfea7..2afa5bbccf9 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -240,7 +240,7 @@ static struct inode *hostfs_alloc_inode(struct super_block *sb)
static void hostfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
if (HOSTFS_I(inode)->fd != -1) {
close_file(&HOSTFS_I(inode)->fd);
HOSTFS_I(inode)->fd = -1;
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 3b2cec29972..b43066cbdc6 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -299,7 +299,7 @@ void hpfs_write_if_changed(struct inode *inode)
void hpfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
if (!inode->i_nlink) {
hpfs_lock(inode->i_sb);
hpfs_remove_fnode(inode->i_sb, inode->i_ino);
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index a80e45a690a..d4f93b52cec 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -614,7 +614,7 @@ static struct inode *hppfs_alloc_inode(struct super_block *sb)
void hppfs_evict_inode(struct inode *ino)
{
- end_writeback(ino);
+ clear_inode(ino);
dput(HPPFS_I(ino)->proc_dentry);
mntput(ino->i_sb->s_fs_info);
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 001ef01d2fe..cc9281b6c62 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -393,7 +393,7 @@ static void truncate_hugepages(struct inode *inode, loff_t lstart)
static void hugetlbfs_evict_inode(struct inode *inode)
{
truncate_hugepages(inode, 0);
- end_writeback(inode);
+ clear_inode(inode);
}
static inline void
diff --git a/fs/inode.c b/fs/inode.c
index da93f7d160d..6bc8761cc33 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -486,7 +486,7 @@ void __remove_inode_hash(struct inode *inode)
}
EXPORT_SYMBOL(__remove_inode_hash);
-void end_writeback(struct inode *inode)
+void clear_inode(struct inode *inode)
{
might_sleep();
/*
@@ -500,11 +500,10 @@ void end_writeback(struct inode *inode)
BUG_ON(!list_empty(&inode->i_data.private_list));
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(inode->i_state & I_CLEAR);
- inode_sync_wait(inode);
/* don't need i_lock here, no concurrent mods to i_state */
inode->i_state = I_FREEING | I_CLEAR;
}
-EXPORT_SYMBOL(end_writeback);
+EXPORT_SYMBOL(clear_inode);
/*
* Free the inode passed in, removing it from the lists it is still connected
@@ -531,12 +530,20 @@ static void evict(struct inode *inode)
inode_sb_list_del(inode);
+ /*
+ * Wait for flusher thread to be done with the inode so that filesystem
+ * does not start destroying it while writeback is still running. Since
+ * the inode has I_FREEING set, flusher thread won't start new work on
+ * the inode. We just have to wait for running writeback to finish.
+ */
+ inode_wait_for_writeback(inode);
+
if (op->evict_inode) {
op->evict_inode(inode);
} else {
if (inode->i_data.nrpages)
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
}
if (S_ISBLK(inode->i_mode) && inode->i_bdev)
bd_forget(inode);
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 05f0754f2b4..08c03044abd 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -508,20 +508,19 @@ int cleanup_journal_tail(journal_t *journal)
/*
* We need to make sure that any blocks that were recently written out
* --- perhaps by log_do_checkpoint() --- are flushed out before we
- * drop the transactions from the journal. It's unlikely this will be
- * necessary, especially with an appropriately sized journal, but we
- * need this to guarantee correctness. Fortunately
- * cleanup_journal_tail() doesn't get called all that often.
+ * drop the transactions from the journal. Similarly we need to be sure
+ * superblock makes it to disk before next transaction starts reusing
+ * freed space (otherwise we could replay some blocks of the new
+ * transaction thinking they belong to the old one). So we use
+ * WRITE_FLUSH_FUA. It's unlikely this will be necessary, especially
+ * with an appropriately sized journal, but we need this to guarantee
+ * correctness. Fortunately cleanup_journal_tail() doesn't get called
+ * all that often.
*/
- if (journal->j_flags & JFS_BARRIER)
- blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+ journal_update_sb_log_tail(journal, first_tid, blocknr,
+ WRITE_FLUSH_FUA);
spin_lock(&journal->j_state_lock);
- if (!tid_gt(first_tid, journal->j_tail_sequence)) {
- spin_unlock(&journal->j_state_lock);
- /* Someone else cleaned up journal so return 0 */
- return 0;
- }
/* OK, update the superblock to recover the freed space.
* Physical blocks come first: have we wrapped beyond the end of
* the log? */
@@ -539,8 +538,6 @@ int cleanup_journal_tail(journal_t *journal)
journal->j_tail_sequence = first_tid;
journal->j_tail = blocknr;
spin_unlock(&journal->j_state_lock);
- if (!(journal->j_flags & JFS_ABORT))
- journal_update_superblock(journal, 1);
return 0;
}
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index f2b9a571f4c..52c15c77602 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -298,6 +298,7 @@ void journal_commit_transaction(journal_t *journal)
int tag_flag;
int i;
struct blk_plug plug;
+ int write_op = WRITE;
/*
* First job: lock down the current transaction and wait for
@@ -307,7 +308,16 @@ void journal_commit_transaction(journal_t *journal)
/* Do we need to erase the effects of a prior journal_flush? */
if (journal->j_flags & JFS_FLUSHED) {
jbd_debug(3, "super block updated\n");
- journal_update_superblock(journal, 1);
+ mutex_lock(&journal->j_checkpoint_mutex);
+ /*
+ * We hold j_checkpoint_mutex so tail cannot change under us.
+ * We don't need any special data guarantees for writing sb
+ * since journal is empty and it is ok for write to be
+ * flushed only with transaction commit.
+ */
+ journal_update_sb_log_tail(journal, journal->j_tail_sequence,
+ journal->j_tail, WRITE_SYNC);
+ mutex_unlock(&journal->j_checkpoint_mutex);
} else {
jbd_debug(3, "superblock not updated\n");
}
@@ -413,13 +423,16 @@ void journal_commit_transaction(journal_t *journal)
jbd_debug (3, "JBD: commit phase 2\n");
+ if (tid_geq(journal->j_commit_waited, commit_transaction->t_tid))
+ write_op = WRITE_SYNC;
+
/*
* Now start flushing things to disk, in the order they appear
* on the transaction lists. Data blocks go first.
*/
blk_start_plug(&plug);
err = journal_submit_data_buffers(journal, commit_transaction,
- WRITE_SYNC);
+ write_op);
blk_finish_plug(&plug);
/*
@@ -478,7 +491,7 @@ void journal_commit_transaction(journal_t *journal)
blk_start_plug(&plug);
- journal_write_revoke_records(journal, commit_transaction, WRITE_SYNC);
+ journal_write_revoke_records(journal, commit_transaction, write_op);
/*
* If we found any dirty or locked buffers, then we should have
@@ -649,7 +662,7 @@ start_journal_io:
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
bh->b_end_io = journal_end_buffer_io_sync;
- submit_bh(WRITE_SYNC, bh);
+ submit_bh(write_op, bh);
}
cond_resched();
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 0971e921780..425c2f2cf17 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -563,6 +563,8 @@ int log_wait_commit(journal_t *journal, tid_t tid)
spin_unlock(&journal->j_state_lock);
#endif
spin_lock(&journal->j_state_lock);
+ if (!tid_geq(journal->j_commit_waited, tid))
+ journal->j_commit_waited = tid;
while (tid_gt(tid, journal->j_commit_sequence)) {
jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n",
tid, journal->j_commit_sequence);
@@ -921,8 +923,33 @@ static int journal_reset(journal_t *journal)
journal->j_max_transaction_buffers = journal->j_maxlen / 4;
- /* Add the dynamic fields and write it to disk. */
- journal_update_superblock(journal, 1);
+ /*
+ * As a special case, if the on-disk copy is already marked as needing
+ * no recovery (s_start == 0), then we can safely defer the superblock
+ * update until the next commit by setting JFS_FLUSHED. This avoids
+ * attempting a write to a potential-readonly device.
+ */
+ if (sb->s_start == 0) {
+ jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
+ "(start %u, seq %d, errno %d)\n",
+ journal->j_tail, journal->j_tail_sequence,
+ journal->j_errno);
+ journal->j_flags |= JFS_FLUSHED;
+ } else {
+ /* Lock here to make assertions happy... */
+ mutex_lock(&journal->j_checkpoint_mutex);
+ /*
+ * Update log tail information. We use WRITE_FUA since new
+ * transaction will start reusing journal space and so we
+ * must make sure information about current log tail is on
+ * disk before that.
+ */
+ journal_update_sb_log_tail(journal,
+ journal->j_tail_sequence,
+ journal->j_tail,
+ WRITE_FUA);
+ mutex_unlock(&journal->j_checkpoint_mutex);
+ }
return journal_start_thread(journal);
}
@@ -999,35 +1026,15 @@ int journal_create(journal_t *journal)
return journal_reset(journal);
}
-/**
- * void journal_update_superblock() - Update journal sb on disk.
- * @journal: The journal to update.
- * @wait: Set to '0' if you don't want to wait for IO completion.
- *
- * Update a journal's dynamic superblock fields and write it to disk,
- * optionally waiting for the IO to complete.
- */
-void journal_update_superblock(journal_t *journal, int wait)
+static void journal_write_superblock(journal_t *journal, int write_op)
{
- journal_superblock_t *sb = journal->j_superblock;
struct buffer_head *bh = journal->j_sb_buffer;
+ int ret;
- /*
- * As a special case, if the on-disk copy is already marked as needing
- * no recovery (s_start == 0) and there are no outstanding transactions
- * in the filesystem, then we can safely defer the superblock update
- * until the next commit by setting JFS_FLUSHED. This avoids
- * attempting a write to a potential-readonly device.
- */
- if (sb->s_start == 0 && journal->j_tail_sequence ==
- journal->j_transaction_sequence) {
- jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
- "(start %u, seq %d, errno %d)\n",
- journal->j_tail, journal->j_tail_sequence,
- journal->j_errno);
- goto out;
- }
-
+ trace_journal_write_superblock(journal, write_op);
+ if (!(journal->j_flags & JFS_BARRIER))
+ write_op &= ~(REQ_FUA | REQ_FLUSH);
+ lock_buffer(bh);
if (buffer_write_io_error(bh)) {
char b[BDEVNAME_SIZE];
/*
@@ -1045,42 +1052,100 @@ void journal_update_superblock(journal_t *journal, int wait)
set_buffer_uptodate(bh);
}
+ get_bh(bh);
+ bh->b_end_io = end_buffer_write_sync;
+ ret = submit_bh(write_op, bh);
+ wait_on_buffer(bh);
+ if (buffer_write_io_error(bh)) {
+ clear_buffer_write_io_error(bh);
+ set_buffer_uptodate(bh);
+ ret = -EIO;
+ }
+ if (ret) {
+ char b[BDEVNAME_SIZE];
+ printk(KERN_ERR "JBD: Error %d detected "
+ "when updating journal superblock for %s.\n",
+ ret, journal_dev_name(journal, b));
+ }
+}
+
+/**
+ * journal_update_sb_log_tail() - Update log tail in journal sb on disk.
+ * @journal: The journal to update.
+ * @tail_tid: TID of the new transaction at the tail of the log
+ * @tail_block: The first block of the transaction at the tail of the log
+ * @write_op: With which operation should we write the journal sb
+ *
+ * Update a journal's superblock information about log tail and write it to
+ * disk, waiting for the IO to complete.
+ */
+void journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
+ unsigned int tail_block, int write_op)
+{
+ journal_superblock_t *sb = journal->j_superblock;
+
+ BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
+ jbd_debug(1,"JBD: updating superblock (start %u, seq %u)\n",
+ tail_block, tail_tid);
+
+ sb->s_sequence = cpu_to_be32(tail_tid);
+ sb->s_start = cpu_to_be32(tail_block);
+
+ journal_write_superblock(journal, write_op);
+
+ /* Log is no longer empty */
+ spin_lock(&journal->j_state_lock);
+ WARN_ON(!sb->s_sequence);
+ journal->j_flags &= ~JFS_FLUSHED;
+ spin_unlock(&journal->j_state_lock);
+}
+
+/**
+ * mark_journal_empty() - Mark on disk journal as empty.
+ * @journal: The journal to update.
+ *
+ * Update a journal's dynamic superblock fields to show that journal is empty.
+ * Write updated superblock to disk waiting for IO to complete.
+ */
+static void mark_journal_empty(journal_t *journal)
+{
+ journal_superblock_t *sb = journal->j_superblock;
+
+ BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
spin_lock(&journal->j_state_lock);
- jbd_debug(1,"JBD: updating superblock (start %u, seq %d, errno %d)\n",
- journal->j_tail, journal->j_tail_sequence, journal->j_errno);
+ jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n",
+ journal->j_tail_sequence);
sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
- sb->s_start = cpu_to_be32(journal->j_tail);
- sb->s_errno = cpu_to_be32(journal->j_errno);
+ sb->s_start = cpu_to_be32(0);
spin_unlock(&journal->j_state_lock);
- BUFFER_TRACE(bh, "marking dirty");
- mark_buffer_dirty(bh);
- if (wait) {
- sync_dirty_buffer(bh);
- if (buffer_write_io_error(bh)) {
- char b[BDEVNAME_SIZE];
- printk(KERN_ERR "JBD: I/O error detected "
- "when updating journal superblock for %s.\n",
- journal_dev_name(journal, b));
- clear_buffer_write_io_error(bh);
- set_buffer_uptodate(bh);
- }
- } else
- write_dirty_buffer(bh, WRITE);
+ journal_write_superblock(journal, WRITE_FUA);
- trace_jbd_update_superblock_end(journal, wait);
-out:
- /* If we have just flushed the log (by marking s_start==0), then
- * any future commit will have to be careful to update the
- * superblock again to re-record the true start of the log. */
+ spin_lock(&journal->j_state_lock);
+ /* Log is empty */
+ journal->j_flags |= JFS_FLUSHED;
+ spin_unlock(&journal->j_state_lock);
+}
+
+/**
+ * journal_update_sb_errno() - Update error in the journal.
+ * @journal: The journal to update.
+ *
+ * Update a journal's errno. Write updated superblock to disk waiting for IO
+ * to complete.
+ */
+static void journal_update_sb_errno(journal_t *journal)
+{
+ journal_superblock_t *sb = journal->j_superblock;
spin_lock(&journal->j_state_lock);
- if (sb->s_start)
- journal->j_flags &= ~JFS_FLUSHED;
- else
- journal->j_flags |= JFS_FLUSHED;
+ jbd_debug(1, "JBD: updating superblock error (errno %d)\n",
+ journal->j_errno);
+ sb->s_errno = cpu_to_be32(journal->j_errno);
spin_unlock(&journal->j_state_lock);
+
+ journal_write_superblock(journal, WRITE_SYNC);
}
/*
@@ -1251,6 +1316,8 @@ int journal_destroy(journal_t *journal)
/* Force any old transactions to disk */
+ /* We cannot race with anybody but must keep assertions happy */
+ mutex_lock(&journal->j_checkpoint_mutex);
/* Totally anal locking here... */
spin_lock(&journal->j_list_lock);
while (journal->j_checkpoint_transactions != NULL) {
@@ -1266,16 +1333,14 @@ int journal_destroy(journal_t *journal)
if (journal->j_sb_buffer) {
if (!is_journal_aborted(journal)) {
- /* We can now mark the journal as empty. */
- journal->j_tail = 0;
journal->j_tail_sequence =
++journal->j_transaction_sequence;
- journal_update_superblock(journal, 1);
- } else {
+ mark_journal_empty(journal);
+ } else
err = -EIO;
- }
brelse(journal->j_sb_buffer);
}
+ mutex_unlock(&journal->j_checkpoint_mutex);
if (journal->j_inode)
iput(journal->j_inode);
@@ -1455,7 +1520,6 @@ int journal_flush(journal_t *journal)
{
int err = 0;
transaction_t *transaction = NULL;
- unsigned int old_tail;
spin_lock(&journal->j_state_lock);
@@ -1490,6 +1554,7 @@ int journal_flush(journal_t *journal)
if (is_journal_aborted(journal))
return -EIO;
+ mutex_lock(&journal->j_checkpoint_mutex);
cleanup_journal_tail(journal);
/* Finally, mark the journal as really needing no recovery.
@@ -1497,14 +1562,9 @@ int journal_flush(journal_t *journal)
* the magic code for a fully-recovered superblock. Any future
* commits of data to the journal will restore the current
* s_start value. */
+ mark_journal_empty(journal);
+ mutex_unlock(&journal->j_checkpoint_mutex);
spin_lock(&journal->j_state_lock);
- old_tail = journal->j_tail;
- journal->j_tail = 0;
- spin_unlock(&journal->j_state_lock);
- journal_update_superblock(journal, 1);
- spin_lock(&journal->j_state_lock);
- journal->j_tail = old_tail;
-
J_ASSERT(!journal->j_running_transaction);
J_ASSERT(!journal->j_committing_transaction);
J_ASSERT(!journal->j_checkpoint_transactions);
@@ -1544,8 +1604,12 @@ int journal_wipe(journal_t *journal, int write)
write ? "Clearing" : "Ignoring");
err = journal_skip_recovery(journal);
- if (write)
- journal_update_superblock(journal, 1);
+ if (write) {
+ /* Lock to make assertions happy... */
+ mutex_lock(&journal->j_checkpoint_mutex);
+ mark_journal_empty(journal);
+ mutex_unlock(&journal->j_checkpoint_mutex);
+ }
no_recovery:
return err;
@@ -1613,7 +1677,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
__journal_abort_hard(journal);
if (errno)
- journal_update_superblock(journal, 1);
+ journal_update_sb_errno(journal);
}
/**
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index b2a7e5244e3..febc10db5ce 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1433,8 +1433,6 @@ int journal_stop(handle_t *handle)
}
}
- if (handle->h_sync)
- transaction->t_synchronous_commit = 1;
current->journal_info = NULL;
spin_lock(&journal->j_state_lock);
spin_lock(&transaction->t_handle_lock);
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index bb6f993ebca..3d3092eda81 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -240,7 +240,7 @@ void jffs2_evict_inode (struct inode *inode)
jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
__func__, inode->i_ino, inode->i_mode);
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
jffs2_do_clear_inode(c, f);
}
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 77b69b27f82..4692bf3ca8c 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -169,7 +169,7 @@ void jfs_evict_inode(struct inode *inode)
} else {
truncate_inode_pages(&inode->i_data, 0);
}
- end_writeback(inode);
+ clear_inode(inode);
dquot_drop(inode);
}
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index e3ab5e5a904..f1cb512c501 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -2175,7 +2175,7 @@ void logfs_evict_inode(struct inode *inode)
}
}
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
/* Cheaper version of write_inode. All changes are concealed in
* aliases, which are moved back. No write to the medium happens.
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index fcb05d2c6b5..2a503ad020d 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -32,7 +32,7 @@ static void minix_evict_inode(struct inode *inode)
minix_truncate(inode);
}
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
if (!inode->i_nlink)
minix_free_inode(inode);
}
diff --git a/fs/namei.c b/fs/namei.c
index 93ff12b1a1d..c651f02c9fe 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1452,7 +1452,8 @@ EXPORT_SYMBOL(full_name_hash);
*/
static inline unsigned long hash_name(const char *name, unsigned int *hashp)
{
- unsigned long a, mask, hash, len;
+ unsigned long a, b, adata, bdata, mask, hash, len;
+ const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
hash = a = 0;
len = -sizeof(unsigned long);
@@ -1460,17 +1461,18 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp)
hash = (hash + a) * 9;
len += sizeof(unsigned long);
a = load_unaligned_zeropad(name+len);
- /* Do we have any NUL or '/' bytes in this word? */
- mask = has_zero(a) | has_zero(a ^ REPEAT_BYTE('/'));
- } while (!mask);
-
- /* The mask *below* the first high bit set */
- mask = (mask - 1) & ~mask;
- mask >>= 7;
- hash += a & mask;
+ b = a ^ REPEAT_BYTE('/');
+ } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
+
+ adata = prep_zero_mask(a, adata, &constants);
+ bdata = prep_zero_mask(b, bdata, &constants);
+
+ mask = create_zero_mask(adata | bdata);
+
+ hash += a & zero_bytemask(mask);
*hashp = fold_hash(hash);
- return len + count_masked_bytes(mask);
+ return len + find_zero(mask);
}
#else
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 87484fb8d17..333df07ae3b 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -292,7 +292,7 @@ static void
ncp_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
if (S_ISDIR(inode->i_mode)) {
DDPRINTK("ncp_evict_inode: put directory %ld\n", inode->i_ino);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 2a0e6c59914..f90f4f5cd42 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -29,9 +29,20 @@ config NFS_FS
If unsure, say N.
+config NFS_V2
+ bool "NFS client support for NFS version 2"
+ depends on NFS_FS
+ default y
+ help
+ This option enables support for version 2 of the NFS protocol
+ (RFC 1094) in the kernel's NFS client.
+
+ If unsure, say Y.
+
config NFS_V3
bool "NFS client support for NFS version 3"
depends on NFS_FS
+ default y
help
This option enables support for version 3 of the NFS protocol
(RFC 1813) in the kernel's NFS client.
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index b58613d0abb..7ddd45d9f17 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -4,11 +4,12 @@
obj-$(CONFIG_NFS_FS) += nfs.o
-nfs-y := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
- direct.o pagelist.o proc.o read.o symlink.o unlink.o \
+nfs-y := client.o dir.o file.o getroot.o inode.o super.o \
+ direct.o pagelist.o read.o symlink.o unlink.o \
write.o namespace.o mount_clnt.o \
dns_resolve.o cache_lib.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
+nfs-$(CONFIG_NFS_V2) += proc.o nfs2xdr.o
nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 7f6a23f0244..7ae8a608956 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -187,7 +187,6 @@ static void bl_end_io_read(struct bio *bio, int err)
struct parallel_io *par = bio->bi_private;
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
do {
struct page *page = bvec->bv_page;
@@ -198,9 +197,12 @@ static void bl_end_io_read(struct bio *bio, int err)
SetPageUptodate(page);
} while (bvec >= bio->bi_io_vec);
if (!uptodate) {
- if (!rdata->pnfs_error)
- rdata->pnfs_error = -EIO;
- pnfs_set_lo_fail(rdata->lseg);
+ struct nfs_read_data *rdata = par->data;
+ struct nfs_pgio_header *header = rdata->header;
+
+ if (!header->pnfs_error)
+ header->pnfs_error = -EIO;
+ pnfs_set_lo_fail(header->lseg);
}
bio_put(bio);
put_parallel(par);
@@ -221,7 +223,7 @@ bl_end_par_io_read(void *data, int unused)
{
struct nfs_read_data *rdata = data;
- rdata->task.tk_status = rdata->pnfs_error;
+ rdata->task.tk_status = rdata->header->pnfs_error;
INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
schedule_work(&rdata->task.u.tk_work);
}
@@ -229,6 +231,7 @@ bl_end_par_io_read(void *data, int unused)
static enum pnfs_try_status
bl_read_pagelist(struct nfs_read_data *rdata)
{
+ struct nfs_pgio_header *header = rdata->header;
int i, hole;
struct bio *bio = NULL;
struct pnfs_block_extent *be = NULL, *cow_read = NULL;
@@ -239,7 +242,7 @@ bl_read_pagelist(struct nfs_read_data *rdata)
int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
- rdata->npages, f_offset, (unsigned int)rdata->args.count);
+ rdata->pages.npages, f_offset, (unsigned int)rdata->args.count);
par = alloc_parallel(rdata);
if (!par)
@@ -249,17 +252,17 @@ bl_read_pagelist(struct nfs_read_data *rdata)
isect = (sector_t) (f_offset >> SECTOR_SHIFT);
/* Code assumes extents are page-aligned */
- for (i = pg_index; i < rdata->npages; i++) {
+ for (i = pg_index; i < rdata->pages.npages; i++) {
if (!extent_length) {
/* We've used up the previous extent */
bl_put_extent(be);
bl_put_extent(cow_read);
bio = bl_submit_bio(READ, bio);
/* Get the next one */
- be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
+ be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
isect, &cow_read);
if (!be) {
- rdata->pnfs_error = -EIO;
+ header->pnfs_error = -EIO;
goto out;
}
extent_length = be->be_length -
@@ -282,11 +285,12 @@ bl_read_pagelist(struct nfs_read_data *rdata)
struct pnfs_block_extent *be_read;
be_read = (hole && cow_read) ? cow_read : be;
- bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
+ bio = bl_add_page_to_bio(bio, rdata->pages.npages - i,
+ READ,
isect, pages[i], be_read,
bl_end_io_read, par);
if (IS_ERR(bio)) {
- rdata->pnfs_error = PTR_ERR(bio);
+ header->pnfs_error = PTR_ERR(bio);
bio = NULL;
goto out;
}
@@ -294,9 +298,9 @@ bl_read_pagelist(struct nfs_read_data *rdata)
isect += PAGE_CACHE_SECTORS;
extent_length -= PAGE_CACHE_SECTORS;
}
- if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
+ if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
rdata->res.eof = 1;
- rdata->res.count = rdata->inode->i_size - f_offset;
+ rdata->res.count = header->inode->i_size - f_offset;
} else {
rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
}
@@ -345,7 +349,6 @@ static void bl_end_io_write_zero(struct bio *bio, int err)
struct parallel_io *par = bio->bi_private;
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
do {
struct page *page = bvec->bv_page;
@@ -358,9 +361,12 @@ static void bl_end_io_write_zero(struct bio *bio, int err)
} while (bvec >= bio->bi_io_vec);
if (unlikely(!uptodate)) {
- if (!wdata->pnfs_error)
- wdata->pnfs_error = -EIO;
- pnfs_set_lo_fail(wdata->lseg);
+ struct nfs_write_data *data = par->data;
+ struct nfs_pgio_header *header = data->header;
+
+ if (!header->pnfs_error)
+ header->pnfs_error = -EIO;
+ pnfs_set_lo_fail(header->lseg);
}
bio_put(bio);
put_parallel(par);
@@ -370,12 +376,13 @@ static void bl_end_io_write(struct bio *bio, int err)
{
struct parallel_io *par = bio->bi_private;
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
+ struct nfs_write_data *data = par->data;
+ struct nfs_pgio_header *header = data->header;
if (!uptodate) {
- if (!wdata->pnfs_error)
- wdata->pnfs_error = -EIO;
- pnfs_set_lo_fail(wdata->lseg);
+ if (!header->pnfs_error)
+ header->pnfs_error = -EIO;
+ pnfs_set_lo_fail(header->lseg);
}
bio_put(bio);
put_parallel(par);
@@ -391,9 +398,9 @@ static void bl_write_cleanup(struct work_struct *work)
dprintk("%s enter\n", __func__);
task = container_of(work, struct rpc_task, u.tk_work);
wdata = container_of(task, struct nfs_write_data, task);
- if (likely(!wdata->pnfs_error)) {
+ if (likely(!wdata->header->pnfs_error)) {
/* Marks for LAYOUTCOMMIT */
- mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
+ mark_extents_written(BLK_LSEG2EXT(wdata->header->lseg),
wdata->args.offset, wdata->args.count);
}
pnfs_ld_write_done(wdata);
@@ -404,12 +411,12 @@ static void bl_end_par_io_write(void *data, int num_se)
{
struct nfs_write_data *wdata = data;
- if (unlikely(wdata->pnfs_error)) {
- bl_free_short_extents(&BLK_LSEG2EXT(wdata->lseg)->bl_inval,
+ if (unlikely(wdata->header->pnfs_error)) {
+ bl_free_short_extents(&BLK_LSEG2EXT(wdata->header->lseg)->bl_inval,
num_se);
}
- wdata->task.tk_status = wdata->pnfs_error;
+ wdata->task.tk_status = wdata->header->pnfs_error;
wdata->verf.committed = NFS_FILE_SYNC;
INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
schedule_work(&wdata->task.u.tk_work);
@@ -540,6 +547,7 @@ check_page:
static enum pnfs_try_status
bl_write_pagelist(struct nfs_write_data *wdata, int sync)
{
+ struct nfs_pgio_header *header = wdata->header;
int i, ret, npg_zero, pg_index, last = 0;
struct bio *bio = NULL;
struct pnfs_block_extent *be = NULL, *cow_read = NULL;
@@ -552,7 +560,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
pgoff_t index;
u64 temp;
int npg_per_block =
- NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
+ NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
/* At this point, wdata->pages is a (sequential) list of nfs_pages.
@@ -566,7 +574,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
/* At this point, have to be more careful with error handling */
isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
- be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
+ be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read);
if (!be || !is_writable(be, isect)) {
dprintk("%s no matching extents!\n", __func__);
goto out_mds;
@@ -597,10 +605,10 @@ fill_invalid_ext:
dprintk("%s zero %dth page: index %lu isect %llu\n",
__func__, npg_zero, index,
(unsigned long long)isect);
- page = bl_find_get_zeroing_page(wdata->inode, index,
+ page = bl_find_get_zeroing_page(header->inode, index,
cow_read);
if (unlikely(IS_ERR(page))) {
- wdata->pnfs_error = PTR_ERR(page);
+ header->pnfs_error = PTR_ERR(page);
goto out;
} else if (page == NULL)
goto next_page;
@@ -612,7 +620,7 @@ fill_invalid_ext:
__func__, ret);
end_page_writeback(page);
page_cache_release(page);
- wdata->pnfs_error = ret;
+ header->pnfs_error = ret;
goto out;
}
if (likely(!bl_push_one_short_extent(be->be_inval)))
@@ -620,11 +628,11 @@ fill_invalid_ext:
else {
end_page_writeback(page);
page_cache_release(page);
- wdata->pnfs_error = -ENOMEM;
+ header->pnfs_error = -ENOMEM;
goto out;
}
/* FIXME: This should be done in bi_end_io */
- mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
+ mark_extents_written(BLK_LSEG2EXT(header->lseg),
page->index << PAGE_CACHE_SHIFT,
PAGE_CACHE_SIZE);
@@ -632,7 +640,7 @@ fill_invalid_ext:
isect, page, be,
bl_end_io_write_zero, par);
if (IS_ERR(bio)) {
- wdata->pnfs_error = PTR_ERR(bio);
+ header->pnfs_error = PTR_ERR(bio);
bio = NULL;
goto out;
}
@@ -647,16 +655,16 @@ next_page:
/* Middle pages */
pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
- for (i = pg_index; i < wdata->npages; i++) {
+ for (i = pg_index; i < wdata->pages.npages; i++) {
if (!extent_length) {
/* We've used up the previous extent */
bl_put_extent(be);
bio = bl_submit_bio(WRITE, bio);
/* Get the next one */
- be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
+ be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
isect, NULL);
if (!be || !is_writable(be, isect)) {
- wdata->pnfs_error = -EINVAL;
+ header->pnfs_error = -EINVAL;
goto out;
}
if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
@@ -664,7 +672,7 @@ next_page:
be->be_inval)))
par->bse_count++;
else {
- wdata->pnfs_error = -ENOMEM;
+ header->pnfs_error = -ENOMEM;
goto out;
}
}
@@ -677,15 +685,15 @@ next_page:
if (unlikely(ret)) {
dprintk("%s bl_mark_sectors_init fail %d\n",
__func__, ret);
- wdata->pnfs_error = ret;
+ header->pnfs_error = ret;
goto out;
}
}
- bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
+ bio = bl_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
isect, pages[i], be,
bl_end_io_write, par);
if (IS_ERR(bio)) {
- wdata->pnfs_error = PTR_ERR(bio);
+ header->pnfs_error = PTR_ERR(bio);
bio = NULL;
goto out;
}
diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c
index a5c88a554d9..c96554245cc 100644
--- a/fs/nfs/blocklayout/blocklayoutdev.c
+++ b/fs/nfs/blocklayout/blocklayoutdev.c
@@ -123,7 +123,7 @@ nfs4_blk_decode_device(struct nfs_server *server,
uint8_t *dataptr;
DECLARE_WAITQUEUE(wq, current);
int offset, len, i, rc;
- struct net *net = server->nfs_client->net;
+ struct net *net = server->nfs_client->cl_net;
struct nfs_net *nn = net_generic(net, nfs_net_id);
struct bl_dev_msg *reply = &nn->bl_mount_reply;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 60f7e4ec842..7d108753af8 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -65,7 +65,7 @@ static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq);
static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
{
int ret = 0;
- struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
if (clp->rpc_ops->version != 4 || minorversion != 0)
return ret;
@@ -90,7 +90,9 @@ static bool nfs4_disable_idmapping = true;
* RPC cruft for NFS
*/
static const struct rpc_version *nfs_version[5] = {
+#ifdef CONFIG_NFS_V2
[2] = &nfs_version2,
+#endif
#ifdef CONFIG_NFS_V3
[3] = &nfs_version3,
#endif
@@ -129,6 +131,7 @@ const struct rpc_program nfsacl_program = {
#endif /* CONFIG_NFS_V3_ACL */
struct nfs_client_initdata {
+ unsigned long init_flags;
const char *hostname;
const struct sockaddr *addr;
size_t addrlen;
@@ -172,7 +175,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
clp->cl_rpcclient = ERR_PTR(-EINVAL);
clp->cl_proto = cl_init->proto;
- clp->net = get_net(cl_init->net);
+ clp->cl_net = get_net(cl_init->net);
#ifdef CONFIG_NFS_V4
err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
@@ -182,7 +185,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
spin_lock_init(&clp->cl_lock);
INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
- clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
clp->cl_minorversion = cl_init->minorversion;
clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
@@ -207,6 +209,7 @@ static void nfs4_shutdown_session(struct nfs_client *clp)
if (nfs4_has_session(clp)) {
nfs4_deviceid_purge_client(clp);
nfs4_destroy_session(clp->cl_session);
+ nfs4_destroy_clientid(clp);
}
}
@@ -235,6 +238,9 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
nfs_idmap_delete(clp);
rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
+ kfree(clp->cl_serverowner);
+ kfree(clp->cl_serverscope);
+ kfree(clp->cl_implid);
}
/* idr_remove_all is not needed as all id's are removed by nfs_put_client */
@@ -248,7 +254,7 @@ void nfs_cleanup_cb_ident_idr(struct net *net)
/* nfs_client_lock held */
static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
{
- struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
if (clp->cl_cb_ident)
idr_remove(&nn->cb_ident_idr, clp->cl_cb_ident);
@@ -301,10 +307,8 @@ static void nfs_free_client(struct nfs_client *clp)
if (clp->cl_machine_cred != NULL)
put_rpccred(clp->cl_machine_cred);
- put_net(clp->net);
+ put_net(clp->cl_net);
kfree(clp->cl_hostname);
- kfree(clp->server_scope);
- kfree(clp->impl_id);
kfree(clp);
dprintk("<-- nfs_free_client()\n");
@@ -321,7 +325,7 @@ void nfs_put_client(struct nfs_client *clp)
return;
dprintk("--> nfs_put_client({%d})\n", atomic_read(&clp->cl_count));
- nn = net_generic(clp->net, nfs_net_id);
+ nn = net_generic(clp->cl_net, nfs_net_id);
if (atomic_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) {
list_del(&clp->cl_share_link);
@@ -456,6 +460,8 @@ static bool nfs4_cb_match_client(const struct sockaddr *addr,
clp->cl_cons_state == NFS_CS_SESSION_INITING))
return false;
+ smp_rmb();
+
/* Match the version and minorversion */
if (clp->rpc_ops->version != 4 ||
clp->cl_minorversion != minorversion)
@@ -504,6 +510,47 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
return NULL;
}
+static bool nfs_client_init_is_complete(const struct nfs_client *clp)
+{
+ return clp->cl_cons_state != NFS_CS_INITING;
+}
+
+int nfs_wait_client_init_complete(const struct nfs_client *clp)
+{
+ return wait_event_killable(nfs_client_active_wq,
+ nfs_client_init_is_complete(clp));
+}
+
+/*
+ * Found an existing client. Make sure it's ready before returning.
+ */
+static struct nfs_client *
+nfs_found_client(const struct nfs_client_initdata *cl_init,
+ struct nfs_client *clp)
+{
+ int error;
+
+ error = nfs_wait_client_init_complete(clp);
+ if (error < 0) {
+ nfs_put_client(clp);
+ return ERR_PTR(-ERESTARTSYS);
+ }
+
+ if (clp->cl_cons_state < NFS_CS_READY) {
+ error = clp->cl_cons_state;
+ nfs_put_client(clp);
+ return ERR_PTR(error);
+ }
+
+ smp_rmb();
+
+ BUG_ON(clp->cl_cons_state != NFS_CS_READY);
+
+ dprintk("<-- %s found nfs_client %p for %s\n",
+ __func__, clp, cl_init->hostname ?: "");
+ return clp;
+}
+
/*
* Look up a client by IP address and protocol version
* - creates a new record if one doesn't yet exist
@@ -512,11 +559,9 @@ static struct nfs_client *
nfs_get_client(const struct nfs_client_initdata *cl_init,
const struct rpc_timeout *timeparms,
const char *ip_addr,
- rpc_authflavor_t authflavour,
- int noresvport)
+ rpc_authflavor_t authflavour)
{
struct nfs_client *clp, *new = NULL;
- int error;
struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id);
dprintk("--> nfs_get_client(%s,v%u)\n",
@@ -527,60 +572,29 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
spin_lock(&nn->nfs_client_lock);
clp = nfs_match_client(cl_init);
- if (clp)
- goto found_client;
- if (new)
- goto install_client;
+ if (clp) {
+ spin_unlock(&nn->nfs_client_lock);
+ if (new)
+ nfs_free_client(new);
+ return nfs_found_client(cl_init, clp);
+ }
+ if (new) {
+ list_add(&new->cl_share_link, &nn->nfs_client_list);
+ spin_unlock(&nn->nfs_client_lock);
+ new->cl_flags = cl_init->init_flags;
+ return cl_init->rpc_ops->init_client(new,
+ timeparms, ip_addr,
+ authflavour);
+ }
spin_unlock(&nn->nfs_client_lock);
new = nfs_alloc_client(cl_init);
} while (!IS_ERR(new));
- dprintk("--> nfs_get_client() = %ld [failed]\n", PTR_ERR(new));
+ dprintk("<-- nfs_get_client() Failed to find %s (%ld)\n",
+ cl_init->hostname ?: "", PTR_ERR(new));
return new;
-
- /* install a new client and return with it unready */
-install_client:
- clp = new;
- list_add(&clp->cl_share_link, &nn->nfs_client_list);
- spin_unlock(&nn->nfs_client_lock);
-
- error = cl_init->rpc_ops->init_client(clp, timeparms, ip_addr,
- authflavour, noresvport);
- if (error < 0) {
- nfs_put_client(clp);
- return ERR_PTR(error);
- }
- dprintk("--> nfs_get_client() = %p [new]\n", clp);
- return clp;
-
- /* found an existing client
- * - make sure it's ready before returning
- */
-found_client:
- spin_unlock(&nn->nfs_client_lock);
-
- if (new)
- nfs_free_client(new);
-
- error = wait_event_killable(nfs_client_active_wq,
- clp->cl_cons_state < NFS_CS_INITING);
- if (error < 0) {
- nfs_put_client(clp);
- return ERR_PTR(-ERESTARTSYS);
- }
-
- if (clp->cl_cons_state < NFS_CS_READY) {
- error = clp->cl_cons_state;
- nfs_put_client(clp);
- return ERR_PTR(error);
- }
-
- BUG_ON(clp->cl_cons_state != NFS_CS_READY);
-
- dprintk("--> nfs_get_client() = %p [share]\n", clp);
- return clp;
}
/*
@@ -588,27 +602,12 @@ found_client:
*/
void nfs_mark_client_ready(struct nfs_client *clp, int state)
{
+ smp_wmb();
clp->cl_cons_state = state;
wake_up_all(&nfs_client_active_wq);
}
/*
- * With sessions, the client is not marked ready until after a
- * successful EXCHANGE_ID and CREATE_SESSION.
- *
- * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
- * other versions of NFS can be tried.
- */
-int nfs4_check_client_ready(struct nfs_client *clp)
-{
- if (!nfs4_has_session(clp))
- return 0;
- if (clp->cl_cons_state < NFS_CS_READY)
- return -EPROTONOSUPPORT;
- return 0;
-}
-
-/*
* Initialise the timeout values for a connection
*/
static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
@@ -654,12 +653,11 @@ static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
*/
static int nfs_create_rpc_client(struct nfs_client *clp,
const struct rpc_timeout *timeparms,
- rpc_authflavor_t flavor,
- int discrtry, int noresvport)
+ rpc_authflavor_t flavor)
{
struct rpc_clnt *clnt = NULL;
struct rpc_create_args args = {
- .net = clp->net,
+ .net = clp->cl_net,
.protocol = clp->cl_proto,
.address = (struct sockaddr *)&clp->cl_addr,
.addrsize = clp->cl_addrlen,
@@ -670,9 +668,9 @@ static int nfs_create_rpc_client(struct nfs_client *clp,
.authflavor = flavor,
};
- if (discrtry)
+ if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags))
args.flags |= RPC_CLNT_CREATE_DISCRTRY;
- if (noresvport)
+ if (test_bit(NFS_CS_NORESVPORT, &clp->cl_flags))
args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
if (!IS_ERR(clp->cl_rpcclient))
@@ -713,7 +711,7 @@ static int nfs_start_lockd(struct nfs_server *server)
.nfs_version = clp->rpc_ops->version,
.noresvport = server->flags & NFS_MOUNT_NORESVPORT ?
1 : 0,
- .net = clp->net,
+ .net = clp->cl_net,
};
if (nlm_init.nfs_version > 3)
@@ -805,36 +803,43 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
return 0;
}
-/*
- * Initialise an NFS2 or NFS3 client
+/**
+ * nfs_init_client - Initialise an NFS2 or NFS3 client
+ *
+ * @clp: nfs_client to initialise
+ * @timeparms: timeout parameters for underlying RPC transport
+ * @ip_addr: IP presentation address (not used)
+ * @authflavor: authentication flavor for underlying RPC transport
+ *
+ * Returns pointer to an NFS client, or an ERR_PTR value.
*/
-int nfs_init_client(struct nfs_client *clp, const struct rpc_timeout *timeparms,
- const char *ip_addr, rpc_authflavor_t authflavour,
- int noresvport)
+struct nfs_client *nfs_init_client(struct nfs_client *clp,
+ const struct rpc_timeout *timeparms,
+ const char *ip_addr, rpc_authflavor_t authflavour)
{
int error;
if (clp->cl_cons_state == NFS_CS_READY) {
/* the client is already initialised */
dprintk("<-- nfs_init_client() = 0 [already %p]\n", clp);
- return 0;
+ return clp;
}
/*
* Create a client RPC handle for doing FSSTAT with UNIX auth only
* - RFC 2623, sec 2.3.2
*/
- error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX,
- 0, noresvport);
+ error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
if (error < 0)
goto error;
nfs_mark_client_ready(clp, NFS_CS_READY);
- return 0;
+ return clp;
error:
nfs_mark_client_ready(clp, error);
+ nfs_put_client(clp);
dprintk("<-- nfs_init_client() = xerror %d\n", error);
- return error;
+ return ERR_PTR(error);
}
/*
@@ -847,7 +852,7 @@ static int nfs_init_server(struct nfs_server *server,
.hostname = data->nfs_server.hostname,
.addr = (const struct sockaddr *)&data->nfs_server.address,
.addrlen = data->nfs_server.addrlen,
- .rpc_ops = &nfs_v2_clientops,
+ .rpc_ops = NULL,
.proto = data->nfs_server.protocol,
.net = data->net,
};
@@ -857,17 +862,28 @@ static int nfs_init_server(struct nfs_server *server,
dprintk("--> nfs_init_server()\n");
+ switch (data->version) {
+#ifdef CONFIG_NFS_V2
+ case 2:
+ cl_init.rpc_ops = &nfs_v2_clientops;
+ break;
+#endif
#ifdef CONFIG_NFS_V3
- if (data->version == 3)
+ case 3:
cl_init.rpc_ops = &nfs_v3_clientops;
+ break;
#endif
+ default:
+ return -EPROTONOSUPPORT;
+ }
nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
data->timeo, data->retrans);
+ if (data->flags & NFS_MOUNT_NORESVPORT)
+ set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
/* Allocate or find a client reference we can use */
- clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX,
- data->flags & NFS_MOUNT_NORESVPORT);
+ clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX);
if (IS_ERR(clp)) {
dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
return PTR_ERR(clp);
@@ -880,7 +896,7 @@ static int nfs_init_server(struct nfs_server *server,
server->options = data->options;
server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
- NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
+ NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR;
if (data->rsize)
server->rsize = nfs_block_size(data->rsize, NULL);
@@ -1048,7 +1064,7 @@ static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_serve
static void nfs_server_insert_lists(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
- struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
spin_lock(&nn->nfs_client_lock);
list_add_tail_rcu(&server->client_link, &clp->cl_superblocks);
@@ -1065,7 +1081,7 @@ static void nfs_server_remove_lists(struct nfs_server *server)
if (clp == NULL)
return;
- nn = net_generic(clp->net, nfs_net_id);
+ nn = net_generic(clp->cl_net, nfs_net_id);
spin_lock(&nn->nfs_client_lock);
list_del_rcu(&server->client_link);
if (list_empty(&clp->cl_superblocks))
@@ -1333,21 +1349,27 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
* so that the client back channel can find the
* nfs_client struct
*/
- clp->cl_cons_state = NFS_CS_SESSION_INITING;
+ nfs_mark_client_ready(clp, NFS_CS_SESSION_INITING);
}
#endif /* CONFIG_NFS_V4_1 */
return nfs4_init_callback(clp);
}
-/*
- * Initialise an NFS4 client record
+/**
+ * nfs4_init_client - Initialise an NFS4 client record
+ *
+ * @clp: nfs_client to initialise
+ * @timeparms: timeout parameters for underlying RPC transport
+ * @ip_addr: callback IP address in presentation format
+ * @authflavor: authentication flavor for underlying RPC transport
+ *
+ * Returns pointer to an NFS client, or an ERR_PTR value.
*/
-int nfs4_init_client(struct nfs_client *clp,
- const struct rpc_timeout *timeparms,
- const char *ip_addr,
- rpc_authflavor_t authflavour,
- int noresvport)
+struct nfs_client *nfs4_init_client(struct nfs_client *clp,
+ const struct rpc_timeout *timeparms,
+ const char *ip_addr,
+ rpc_authflavor_t authflavour)
{
char buf[INET6_ADDRSTRLEN + 1];
int error;
@@ -1355,14 +1377,14 @@ int nfs4_init_client(struct nfs_client *clp,
if (clp->cl_cons_state == NFS_CS_READY) {
/* the client is initialised already */
dprintk("<-- nfs4_init_client() = 0 [already %p]\n", clp);
- return 0;
+ return clp;
}
/* Check NFS protocol revision and initialize RPC op vector */
clp->rpc_ops = &nfs_v4_clientops;
- error = nfs_create_rpc_client(clp, timeparms, authflavour,
- 1, noresvport);
+ __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+ error = nfs_create_rpc_client(clp, timeparms, authflavour);
if (error < 0)
goto error;
@@ -1395,12 +1417,13 @@ int nfs4_init_client(struct nfs_client *clp,
if (!nfs4_has_session(clp))
nfs_mark_client_ready(clp, NFS_CS_READY);
- return 0;
+ return clp;
error:
nfs_mark_client_ready(clp, error);
+ nfs_put_client(clp);
dprintk("<-- nfs4_init_client() = xerror %d\n", error);
- return error;
+ return ERR_PTR(error);
}
/*
@@ -1429,9 +1452,11 @@ static int nfs4_set_client(struct nfs_server *server,
dprintk("--> nfs4_set_client()\n");
+ if (server->flags & NFS_MOUNT_NORESVPORT)
+ set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+
/* Allocate or find a client reference we can use */
- clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour,
- server->flags & NFS_MOUNT_NORESVPORT);
+ clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour);
if (IS_ERR(clp)) {
error = PTR_ERR(clp);
goto error;
@@ -1465,8 +1490,8 @@ error:
* the MDS.
*/
struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
- const struct sockaddr *ds_addr,
- int ds_addrlen, int ds_proto)
+ const struct sockaddr *ds_addr, int ds_addrlen,
+ int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans)
{
struct nfs_client_initdata cl_init = {
.addr = ds_addr,
@@ -1474,14 +1499,9 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
.rpc_ops = &nfs_v4_clientops,
.proto = ds_proto,
.minorversion = mds_clp->cl_minorversion,
- .net = mds_clp->net,
- };
- struct rpc_timeout ds_timeout = {
- .to_initval = 15 * HZ,
- .to_maxval = 15 * HZ,
- .to_retries = 1,
- .to_exponential = 1,
+ .net = mds_clp->cl_net,
};
+ struct rpc_timeout ds_timeout;
struct nfs_client *clp;
/*
@@ -1489,8 +1509,9 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
* cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
* (section 13.1 RFC 5661).
*/
+ nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
- mds_clp->cl_rpcclient->cl_auth->au_flavor, 0);
+ mds_clp->cl_rpcclient->cl_auth->au_flavor);
dprintk("<-- %s %p\n", __func__, clp);
return clp;
@@ -1701,7 +1722,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
rpc_protocol(parent_server->client),
parent_server->client->cl_timeout,
parent_client->cl_mvops->minor_version,
- parent_client->net);
+ parent_client->cl_net);
if (error < 0)
goto error;
@@ -1805,6 +1826,7 @@ void nfs_clients_init(struct net *net)
idr_init(&nn->cb_ident_idr);
#endif
spin_lock_init(&nn->nfs_client_lock);
+ nn->boot_time = CURRENT_TIME;
}
#ifdef CONFIG_PROC_FS
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 89af1d26927..bd3a9601d32 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -316,6 +316,10 @@ out:
* nfs_client_return_marked_delegations - return previously marked delegations
* @clp: nfs_client to process
*
+ * Note that this function is designed to be called by the state
+ * manager thread. For this reason, it cannot flush the dirty data,
+ * since that could deadlock in case of a state recovery error.
+ *
* Returns zero on success, or a negative errno value.
*/
int nfs_client_return_marked_delegations(struct nfs_client *clp)
@@ -340,11 +344,9 @@ restart:
server);
rcu_read_unlock();
- if (delegation != NULL) {
- filemap_flush(inode->i_mapping);
+ if (delegation != NULL)
err = __nfs_inode_return_delegation(inode,
delegation, 0);
- }
iput(inode);
if (!err)
goto restart;
@@ -380,6 +382,10 @@ void nfs_inode_return_delegation_noreclaim(struct inode *inode)
* nfs_inode_return_delegation - synchronously return a delegation
* @inode: inode to process
*
+ * This routine will always flush any dirty data to disk on the
+ * assumption that if we need to return the delegation, then
+ * we should stop caching.
+ *
* Returns zero on success, or a negative errno value.
*/
int nfs_inode_return_delegation(struct inode *inode)
@@ -389,10 +395,10 @@ int nfs_inode_return_delegation(struct inode *inode)
struct nfs_delegation *delegation;
int err = 0;
+ nfs_wb_all(inode);
if (rcu_access_pointer(nfsi->delegation) != NULL) {
delegation = nfs_detach_delegation(nfsi, server);
if (delegation != NULL) {
- nfs_wb_all(inode);
err = __nfs_inode_return_delegation(inode, delegation, 1);
}
}
@@ -538,6 +544,8 @@ int nfs_async_inode_return_delegation(struct inode *inode,
struct nfs_client *clp = server->nfs_client;
struct nfs_delegation *delegation;
+ filemap_flush(inode->i_mapping);
+
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index cd6a7a8dada..72709c4193f 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -66,6 +66,7 @@ static inline int nfs_have_delegation(struct inode *inode, fmode_t flags)
static inline int nfs_inode_return_delegation(struct inode *inode)
{
+ nfs_wb_all(inode);
return 0;
}
#endif
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index eedd24d0ad2..0989a209968 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -475,6 +475,29 @@ different:
}
static
+bool nfs_use_readdirplus(struct inode *dir, struct file *filp)
+{
+ if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
+ return false;
+ if (test_and_clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags))
+ return true;
+ if (filp->f_pos == 0)
+ return true;
+ return false;
+}
+
+/*
+ * This function is called by the lookup code to request the use of
+ * readdirplus to accelerate any future lookups in the same
+ * directory.
+ */
+static
+void nfs_advise_use_readdirplus(struct inode *dir)
+{
+ set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags);
+}
+
+static
void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
{
struct qstr filename = QSTR_INIT(entry->name, entry->len);
@@ -871,7 +894,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
desc->file = filp;
desc->dir_cookie = &dir_ctx->dir_cookie;
desc->decode = NFS_PROTO(inode)->decode_dirent;
- desc->plus = NFS_USE_READDIRPLUS(inode);
+ desc->plus = nfs_use_readdirplus(inode, filp) ? 1 : 0;
nfs_block_sillyrename(dentry);
res = nfs_revalidate_mapping(inode, filp->f_mapping);
@@ -1111,7 +1134,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
if (!inode) {
if (nfs_neg_need_reval(dir, dentry, nd))
goto out_bad;
- goto out_valid;
+ goto out_valid_noent;
}
if (is_bad_inode(inode)) {
@@ -1140,7 +1163,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
if (fhandle == NULL || fattr == NULL)
goto out_error;
- error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
if (error)
goto out_bad;
if (nfs_compare_fh(NFS_FH(inode), fhandle))
@@ -1153,6 +1176,9 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
out_set_verifier:
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
out_valid:
+ /* Success: notify readdir to use READDIRPLUS */
+ nfs_advise_use_readdirplus(dir);
+ out_valid_noent:
dput(parent);
dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is valid\n",
__func__, dentry->d_parent->d_name.name,
@@ -1296,7 +1322,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
parent = dentry->d_parent;
/* Protect against concurrent sillydeletes */
nfs_block_sillyrename(parent);
- error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
if (error == -ENOENT)
goto no_entry;
if (error < 0) {
@@ -1308,6 +1334,9 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
if (IS_ERR(res))
goto out_unblock_sillyrename;
+ /* Success: notify readdir to use READDIRPLUS */
+ nfs_advise_use_readdirplus(dir);
+
no_entry:
res = d_materialise_unique(dentry, inode);
if (res != NULL) {
@@ -1643,7 +1672,7 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
if (dentry->d_inode)
goto out;
if (fhandle->size == 0) {
- error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+ error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
if (error)
goto out_error;
}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 481be7f7bdd..23d170bc44f 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -56,6 +56,7 @@
#include "internal.h"
#include "iostat.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -81,16 +82,19 @@ struct nfs_direct_req {
struct completion completion; /* wait for i/o completion */
/* commit state */
- struct list_head rewrite_list; /* saved nfs_write_data structs */
- struct nfs_write_data * commit_data; /* special write_data for commits */
+ struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */
+ struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
+ struct work_struct work;
int flags;
#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
struct nfs_writeverf verf; /* unstable write verifier */
};
+static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
+static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
-static const struct rpc_call_ops nfs_write_direct_ops;
+static void nfs_direct_write_schedule_work(struct work_struct *work);
static inline void get_dreq(struct nfs_direct_req *dreq)
{
@@ -124,22 +128,6 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_
return -EINVAL;
}
-static void nfs_direct_dirty_pages(struct page **pages, unsigned int pgbase, size_t count)
-{
- unsigned int npages;
- unsigned int i;
-
- if (count == 0)
- return;
- pages += (pgbase >> PAGE_SHIFT);
- npages = (count + (pgbase & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- for (i = 0; i < npages; i++) {
- struct page *page = pages[i];
- if (!PageCompound(page))
- set_page_dirty(page);
- }
-}
-
static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
{
unsigned int i;
@@ -147,26 +135,30 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
page_cache_release(pages[i]);
}
+void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
+ struct nfs_direct_req *dreq)
+{
+ cinfo->lock = &dreq->lock;
+ cinfo->mds = &dreq->mds_cinfo;
+ cinfo->ds = &dreq->ds_cinfo;
+ cinfo->dreq = dreq;
+ cinfo->completion_ops = &nfs_direct_commit_completion_ops;
+}
+
static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
{
struct nfs_direct_req *dreq;
- dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
+ dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
if (!dreq)
return NULL;
kref_init(&dreq->kref);
kref_get(&dreq->kref);
init_completion(&dreq->completion);
- INIT_LIST_HEAD(&dreq->rewrite_list);
- dreq->iocb = NULL;
- dreq->ctx = NULL;
- dreq->l_ctx = NULL;
+ INIT_LIST_HEAD(&dreq->mds_cinfo.list);
+ INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
spin_lock_init(&dreq->lock);
- atomic_set(&dreq->io_count, 0);
- dreq->count = 0;
- dreq->error = 0;
- dreq->flags = 0;
return dreq;
}
@@ -226,47 +218,80 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
nfs_direct_req_release(dreq);
}
-/*
- * We must hold a reference to all the pages in this direct read request
- * until the RPCs complete. This could be long *after* we are woken up in
- * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
- */
-static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
+static void nfs_direct_readpage_release(struct nfs_page *req)
{
- struct nfs_read_data *data = calldata;
-
- nfs_readpage_result(task, data);
+ dprintk("NFS: direct read done (%s/%lld %d@%lld)\n",
+ req->wb_context->dentry->d_inode->i_sb->s_id,
+ (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
+ req->wb_bytes,
+ (long long)req_offset(req));
+ nfs_release_request(req);
}
-static void nfs_direct_read_release(void *calldata)
+static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
{
+ unsigned long bytes = 0;
+ struct nfs_direct_req *dreq = hdr->dreq;
- struct nfs_read_data *data = calldata;
- struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
- int status = data->task.tk_status;
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+ goto out_put;
spin_lock(&dreq->lock);
- if (unlikely(status < 0)) {
- dreq->error = status;
- spin_unlock(&dreq->lock);
- } else {
- dreq->count += data->res.count;
- spin_unlock(&dreq->lock);
- nfs_direct_dirty_pages(data->pagevec,
- data->args.pgbase,
- data->res.count);
- }
- nfs_direct_release_pages(data->pagevec, data->npages);
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
+ dreq->error = hdr->error;
+ else
+ dreq->count += hdr->good_bytes;
+ spin_unlock(&dreq->lock);
+ while (!list_empty(&hdr->pages)) {
+ struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ struct page *page = req->wb_page;
+
+ if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
+ if (bytes > hdr->good_bytes)
+ zero_user(page, 0, PAGE_SIZE);
+ else if (hdr->good_bytes - bytes < PAGE_SIZE)
+ zero_user_segment(page,
+ hdr->good_bytes & ~PAGE_MASK,
+ PAGE_SIZE);
+ }
+ if (!PageCompound(page)) {
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+ if (bytes < hdr->good_bytes)
+ set_page_dirty(page);
+ } else
+ set_page_dirty(page);
+ }
+ bytes += req->wb_bytes;
+ nfs_list_remove_request(req);
+ nfs_direct_readpage_release(req);
+ }
+out_put:
if (put_dreq(dreq))
nfs_direct_complete(dreq);
- nfs_readdata_free(data);
+ hdr->release(hdr);
+}
+
+static void nfs_read_sync_pgio_error(struct list_head *head)
+{
+ struct nfs_page *req;
+
+ while (!list_empty(head)) {
+ req = nfs_list_entry(head->next);
+ nfs_list_remove_request(req);
+ nfs_release_request(req);
+ }
}
-static const struct rpc_call_ops nfs_read_direct_ops = {
- .rpc_call_prepare = nfs_read_prepare,
- .rpc_call_done = nfs_direct_read_result,
- .rpc_release = nfs_direct_read_release,
+static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
+{
+ get_dreq(hdr->dreq);
+}
+
+static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
+ .error_cleanup = nfs_read_sync_pgio_error,
+ .init_hdr = nfs_direct_pgio_init,
+ .completion = nfs_direct_read_completion,
};
/*
@@ -276,107 +301,82 @@ static const struct rpc_call_ops nfs_read_direct_ops = {
* handled automatically by nfs_direct_read_result(). Otherwise, if
* no requests have been sent, just return an error.
*/
-static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
+static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
const struct iovec *iov,
loff_t pos)
{
+ struct nfs_direct_req *dreq = desc->pg_dreq;
struct nfs_open_context *ctx = dreq->ctx;
struct inode *inode = ctx->dentry->d_inode;
unsigned long user_addr = (unsigned long)iov->iov_base;
size_t count = iov->iov_len;
size_t rsize = NFS_SERVER(inode)->rsize;
- struct rpc_task *task;
- struct rpc_message msg = {
- .rpc_cred = ctx->cred,
- };
- struct rpc_task_setup task_setup_data = {
- .rpc_client = NFS_CLIENT(inode),
- .rpc_message = &msg,
- .callback_ops = &nfs_read_direct_ops,
- .workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
- };
unsigned int pgbase;
int result;
ssize_t started = 0;
+ struct page **pagevec = NULL;
+ unsigned int npages;
do {
- struct nfs_read_data *data;
size_t bytes;
+ int i;
pgbase = user_addr & ~PAGE_MASK;
- bytes = min(rsize,count);
+ bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
result = -ENOMEM;
- data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes));
- if (unlikely(!data))
+ npages = nfs_page_array_len(pgbase, bytes);
+ if (!pagevec)
+ pagevec = kmalloc(npages * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!pagevec)
break;
-
down_read(&current->mm->mmap_sem);
result = get_user_pages(current, current->mm, user_addr,
- data->npages, 1, 0, data->pagevec, NULL);
+ npages, 1, 0, pagevec, NULL);
up_read(&current->mm->mmap_sem);
- if (result < 0) {
- nfs_readdata_free(data);
+ if (result < 0)
break;
- }
- if ((unsigned)result < data->npages) {
+ if ((unsigned)result < npages) {
bytes = result * PAGE_SIZE;
if (bytes <= pgbase) {
- nfs_direct_release_pages(data->pagevec, result);
- nfs_readdata_free(data);
+ nfs_direct_release_pages(pagevec, result);
break;
}
bytes -= pgbase;
- data->npages = result;
+ npages = result;
}
- get_dreq(dreq);
-
- data->req = (struct nfs_page *) dreq;
- data->inode = inode;
- data->cred = msg.rpc_cred;
- data->args.fh = NFS_FH(inode);
- data->args.context = ctx;
- data->args.lock_context = dreq->l_ctx;
- data->args.offset = pos;
- data->args.pgbase = pgbase;
- data->args.pages = data->pagevec;
- data->args.count = bytes;
- data->res.fattr = &data->fattr;
- data->res.eof = 0;
- data->res.count = bytes;
- nfs_fattr_init(&data->fattr);
- msg.rpc_argp = &data->args;
- msg.rpc_resp = &data->res;
-
- task_setup_data.task = &data->task;
- task_setup_data.callback_data = data;
- NFS_PROTO(inode)->read_setup(data, &msg);
-
- task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task))
- break;
- rpc_put_task(task);
-
- dprintk("NFS: %5u initiated direct read call "
- "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
- data->task.tk_pid,
- inode->i_sb->s_id,
- (long long)NFS_FILEID(inode),
- bytes,
- (unsigned long long)data->args.offset);
-
- started += bytes;
- user_addr += bytes;
- pos += bytes;
- /* FIXME: Remove this unnecessary math from final patch */
- pgbase += bytes;
- pgbase &= ~PAGE_MASK;
- BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
-
- count -= bytes;
- } while (count != 0);
+ for (i = 0; i < npages; i++) {
+ struct nfs_page *req;
+ unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
+ /* XXX do we need to do the eof zeroing found in async_filler? */
+ req = nfs_create_request(dreq->ctx, dreq->inode,
+ pagevec[i],
+ pgbase, req_len);
+ if (IS_ERR(req)) {
+ result = PTR_ERR(req);
+ break;
+ }
+ req->wb_index = pos >> PAGE_SHIFT;
+ req->wb_offset = pos & ~PAGE_MASK;
+ if (!nfs_pageio_add_request(desc, req)) {
+ result = desc->pg_error;
+ nfs_release_request(req);
+ break;
+ }
+ pgbase = 0;
+ bytes -= req_len;
+ started += req_len;
+ user_addr += req_len;
+ pos += req_len;
+ count -= req_len;
+ }
+ /* The nfs_page now hold references to these pages */
+ nfs_direct_release_pages(pagevec, npages);
+ } while (count != 0 && result >= 0);
+
+ kfree(pagevec);
if (started)
return started;
@@ -388,15 +388,19 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
unsigned long nr_segs,
loff_t pos)
{
+ struct nfs_pageio_descriptor desc;
ssize_t result = -EINVAL;
size_t requested_bytes = 0;
unsigned long seg;
+ nfs_pageio_init_read(&desc, dreq->inode,
+ &nfs_direct_read_completion_ops);
get_dreq(dreq);
+ desc.pg_dreq = dreq;
for (seg = 0; seg < nr_segs; seg++) {
const struct iovec *vec = &iov[seg];
- result = nfs_direct_read_schedule_segment(dreq, vec, pos);
+ result = nfs_direct_read_schedule_segment(&desc, vec, pos);
if (result < 0)
break;
requested_bytes += result;
@@ -405,6 +409,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
pos += vec->iov_len;
}
+ nfs_pageio_complete(&desc);
+
/*
* If no bytes were started, return the error, and let the
* generic layer handle the completion.
@@ -441,104 +447,64 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
if (!result)
result = nfs_direct_wait(dreq);
+ NFS_I(inode)->read_io += result;
out_release:
nfs_direct_req_release(dreq);
out:
return result;
}
-static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
-{
- while (!list_empty(&dreq->rewrite_list)) {
- struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
- list_del(&data->pages);
- nfs_direct_release_pages(data->pagevec, data->npages);
- nfs_writedata_free(data);
- }
-}
-
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
{
- struct inode *inode = dreq->inode;
- struct list_head *p;
- struct nfs_write_data *data;
- struct rpc_task *task;
- struct rpc_message msg = {
- .rpc_cred = dreq->ctx->cred,
- };
- struct rpc_task_setup task_setup_data = {
- .rpc_client = NFS_CLIENT(inode),
- .rpc_message = &msg,
- .callback_ops = &nfs_write_direct_ops,
- .workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
- };
+ struct nfs_pageio_descriptor desc;
+ struct nfs_page *req, *tmp;
+ LIST_HEAD(reqs);
+ struct nfs_commit_info cinfo;
+ LIST_HEAD(failed);
+
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
+ spin_lock(cinfo.lock);
+ nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
+ spin_unlock(cinfo.lock);
dreq->count = 0;
get_dreq(dreq);
- list_for_each(p, &dreq->rewrite_list) {
- data = list_entry(p, struct nfs_write_data, pages);
-
- get_dreq(dreq);
-
- /* Use stable writes */
- data->args.stable = NFS_FILE_SYNC;
-
- /*
- * Reset data->res.
- */
- nfs_fattr_init(&data->fattr);
- data->res.count = data->args.count;
- memset(&data->verf, 0, sizeof(data->verf));
-
- /*
- * Reuse data->task; data->args should not have changed
- * since the original request was sent.
- */
- task_setup_data.task = &data->task;
- task_setup_data.callback_data = data;
- msg.rpc_argp = &data->args;
- msg.rpc_resp = &data->res;
- NFS_PROTO(inode)->write_setup(data, &msg);
-
- /*
- * We're called via an RPC callback, so BKL is already held.
- */
- task = rpc_run_task(&task_setup_data);
- if (!IS_ERR(task))
- rpc_put_task(task);
-
- dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
- data->task.tk_pid,
- inode->i_sb->s_id,
- (long long)NFS_FILEID(inode),
- data->args.count,
- (unsigned long long)data->args.offset);
+ nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE,
+ &nfs_direct_write_completion_ops);
+ desc.pg_dreq = dreq;
+
+ list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
+ if (!nfs_pageio_add_request(&desc, req)) {
+ nfs_list_add_request(req, &failed);
+ spin_lock(cinfo.lock);
+ dreq->flags = 0;
+ dreq->error = -EIO;
+ spin_unlock(cinfo.lock);
+ }
}
+ nfs_pageio_complete(&desc);
- if (put_dreq(dreq))
- nfs_direct_write_complete(dreq, inode);
-}
-
-static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
-{
- struct nfs_write_data *data = calldata;
+ while (!list_empty(&failed))
+ nfs_unlock_and_release_request(req);
- /* Call the NFS version-specific code */
- NFS_PROTO(data->inode)->commit_done(task, data);
+ if (put_dreq(dreq))
+ nfs_direct_write_complete(dreq, dreq->inode);
}
-static void nfs_direct_commit_release(void *calldata)
+static void nfs_direct_commit_complete(struct nfs_commit_data *data)
{
- struct nfs_write_data *data = calldata;
- struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
+ struct nfs_direct_req *dreq = data->dreq;
+ struct nfs_commit_info cinfo;
+ struct nfs_page *req;
int status = data->task.tk_status;
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
if (status < 0) {
dprintk("NFS: %5u commit failed with error %d.\n",
- data->task.tk_pid, status);
+ data->task.tk_pid, status);
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
} else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
@@ -546,62 +512,47 @@ static void nfs_direct_commit_release(void *calldata)
}
dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
- nfs_direct_write_complete(dreq, data->inode);
- nfs_commit_free(data);
+ while (!list_empty(&data->pages)) {
+ req = nfs_list_entry(data->pages.next);
+ nfs_list_remove_request(req);
+ if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
+ /* Note the rewrite will go through mds */
+ kref_get(&req->wb_kref);
+ nfs_mark_request_commit(req, NULL, &cinfo);
+ }
+ nfs_unlock_and_release_request(req);
+ }
+
+ if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
+ nfs_direct_write_complete(dreq, data->inode);
+}
+
+static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
+{
+ /* There is no lock to clear */
}
-static const struct rpc_call_ops nfs_commit_direct_ops = {
- .rpc_call_prepare = nfs_write_prepare,
- .rpc_call_done = nfs_direct_commit_result,
- .rpc_release = nfs_direct_commit_release,
+static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
+ .completion = nfs_direct_commit_complete,
+ .error_cleanup = nfs_direct_error_cleanup,
};
static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
{
- struct nfs_write_data *data = dreq->commit_data;
- struct rpc_task *task;
- struct rpc_message msg = {
- .rpc_argp = &data->args,
- .rpc_resp = &data->res,
- .rpc_cred = dreq->ctx->cred,
- };
- struct rpc_task_setup task_setup_data = {
- .task = &data->task,
- .rpc_client = NFS_CLIENT(dreq->inode),
- .rpc_message = &msg,
- .callback_ops = &nfs_commit_direct_ops,
- .callback_data = data,
- .workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
- };
-
- data->inode = dreq->inode;
- data->cred = msg.rpc_cred;
-
- data->args.fh = NFS_FH(data->inode);
- data->args.offset = 0;
- data->args.count = 0;
- data->args.context = dreq->ctx;
- data->args.lock_context = dreq->l_ctx;
- data->res.count = 0;
- data->res.fattr = &data->fattr;
- data->res.verf = &data->verf;
- nfs_fattr_init(&data->fattr);
-
- NFS_PROTO(data->inode)->commit_setup(data, &msg);
-
- /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
- dreq->commit_data = NULL;
-
- dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
-
- task = rpc_run_task(&task_setup_data);
- if (!IS_ERR(task))
- rpc_put_task(task);
+ int res;
+ struct nfs_commit_info cinfo;
+ LIST_HEAD(mds_list);
+
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
+ res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
+ if (res < 0) /* res == -ENOMEM */
+ nfs_direct_write_reschedule(dreq);
}
-static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
+static void nfs_direct_write_schedule_work(struct work_struct *work)
{
+ struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
int flags = dreq->flags;
dreq->flags = 0;
@@ -613,89 +564,32 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
nfs_direct_write_reschedule(dreq);
break;
default:
- if (dreq->commit_data != NULL)
- nfs_commit_free(dreq->commit_data);
- nfs_direct_free_writedata(dreq);
- nfs_zap_mapping(inode, inode->i_mapping);
+ nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
nfs_direct_complete(dreq);
}
}
-static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
+static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
{
- dreq->commit_data = nfs_commitdata_alloc();
- if (dreq->commit_data != NULL)
- dreq->commit_data->req = (struct nfs_page *) dreq;
+ schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
}
+
#else
-static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
+static void nfs_direct_write_schedule_work(struct work_struct *work)
{
- dreq->commit_data = NULL;
}
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
{
- nfs_direct_free_writedata(dreq);
nfs_zap_mapping(inode, inode->i_mapping);
nfs_direct_complete(dreq);
}
#endif
-static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
-{
- struct nfs_write_data *data = calldata;
-
- nfs_writeback_done(task, data);
-}
-
/*
* NB: Return the value of the first error return code. Subsequent
* errors after the first one are ignored.
*/
-static void nfs_direct_write_release(void *calldata)
-{
- struct nfs_write_data *data = calldata;
- struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
- int status = data->task.tk_status;
-
- spin_lock(&dreq->lock);
-
- if (unlikely(status < 0)) {
- /* An error has occurred, so we should not commit */
- dreq->flags = 0;
- dreq->error = status;
- }
- if (unlikely(dreq->error != 0))
- goto out_unlock;
-
- dreq->count += data->res.count;
-
- if (data->res.verf->committed != NFS_FILE_SYNC) {
- switch (dreq->flags) {
- case 0:
- memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
- dreq->flags = NFS_ODIRECT_DO_COMMIT;
- break;
- case NFS_ODIRECT_DO_COMMIT:
- if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
- dprintk("NFS: %5u write verify failed\n", data->task.tk_pid);
- dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
- }
- }
- }
-out_unlock:
- spin_unlock(&dreq->lock);
-
- if (put_dreq(dreq))
- nfs_direct_write_complete(dreq, data->inode);
-}
-
-static const struct rpc_call_ops nfs_write_direct_ops = {
- .rpc_call_prepare = nfs_write_prepare,
- .rpc_call_done = nfs_direct_write_result,
- .rpc_release = nfs_direct_write_release,
-};
-
/*
* For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
* operation. If nfs_writedata_alloc() or get_user_pages() fails,
@@ -703,132 +597,187 @@ static const struct rpc_call_ops nfs_write_direct_ops = {
* handled automatically by nfs_direct_write_result(). Otherwise, if
* no requests have been sent, just return an error.
*/
-static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
+static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
const struct iovec *iov,
- loff_t pos, int sync)
+ loff_t pos)
{
+ struct nfs_direct_req *dreq = desc->pg_dreq;
struct nfs_open_context *ctx = dreq->ctx;
struct inode *inode = ctx->dentry->d_inode;
unsigned long user_addr = (unsigned long)iov->iov_base;
size_t count = iov->iov_len;
- struct rpc_task *task;
- struct rpc_message msg = {
- .rpc_cred = ctx->cred,
- };
- struct rpc_task_setup task_setup_data = {
- .rpc_client = NFS_CLIENT(inode),
- .rpc_message = &msg,
- .callback_ops = &nfs_write_direct_ops,
- .workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
- };
size_t wsize = NFS_SERVER(inode)->wsize;
unsigned int pgbase;
int result;
ssize_t started = 0;
+ struct page **pagevec = NULL;
+ unsigned int npages;
do {
- struct nfs_write_data *data;
size_t bytes;
+ int i;
pgbase = user_addr & ~PAGE_MASK;
- bytes = min(wsize,count);
+ bytes = min(max_t(size_t, wsize, PAGE_SIZE), count);
result = -ENOMEM;
- data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes));
- if (unlikely(!data))
+ npages = nfs_page_array_len(pgbase, bytes);
+ if (!pagevec)
+ pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
+ if (!pagevec)
break;
down_read(&current->mm->mmap_sem);
result = get_user_pages(current, current->mm, user_addr,
- data->npages, 0, 0, data->pagevec, NULL);
+ npages, 0, 0, pagevec, NULL);
up_read(&current->mm->mmap_sem);
- if (result < 0) {
- nfs_writedata_free(data);
+ if (result < 0)
break;
- }
- if ((unsigned)result < data->npages) {
+
+ if ((unsigned)result < npages) {
bytes = result * PAGE_SIZE;
if (bytes <= pgbase) {
- nfs_direct_release_pages(data->pagevec, result);
- nfs_writedata_free(data);
+ nfs_direct_release_pages(pagevec, result);
break;
}
bytes -= pgbase;
- data->npages = result;
+ npages = result;
}
- get_dreq(dreq);
-
- list_move_tail(&data->pages, &dreq->rewrite_list);
-
- data->req = (struct nfs_page *) dreq;
- data->inode = inode;
- data->cred = msg.rpc_cred;
- data->args.fh = NFS_FH(inode);
- data->args.context = ctx;
- data->args.lock_context = dreq->l_ctx;
- data->args.offset = pos;
- data->args.pgbase = pgbase;
- data->args.pages = data->pagevec;
- data->args.count = bytes;
- data->args.stable = sync;
- data->res.fattr = &data->fattr;
- data->res.count = bytes;
- data->res.verf = &data->verf;
- nfs_fattr_init(&data->fattr);
-
- task_setup_data.task = &data->task;
- task_setup_data.callback_data = data;
- msg.rpc_argp = &data->args;
- msg.rpc_resp = &data->res;
- NFS_PROTO(inode)->write_setup(data, &msg);
-
- task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task))
- break;
- rpc_put_task(task);
-
- dprintk("NFS: %5u initiated direct write call "
- "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
- data->task.tk_pid,
- inode->i_sb->s_id,
- (long long)NFS_FILEID(inode),
- bytes,
- (unsigned long long)data->args.offset);
+ for (i = 0; i < npages; i++) {
+ struct nfs_page *req;
+ unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
- started += bytes;
- user_addr += bytes;
- pos += bytes;
-
- /* FIXME: Remove this useless math from the final patch */
- pgbase += bytes;
- pgbase &= ~PAGE_MASK;
- BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
+ req = nfs_create_request(dreq->ctx, dreq->inode,
+ pagevec[i],
+ pgbase, req_len);
+ if (IS_ERR(req)) {
+ result = PTR_ERR(req);
+ break;
+ }
+ nfs_lock_request(req);
+ req->wb_index = pos >> PAGE_SHIFT;
+ req->wb_offset = pos & ~PAGE_MASK;
+ if (!nfs_pageio_add_request(desc, req)) {
+ result = desc->pg_error;
+ nfs_unlock_and_release_request(req);
+ break;
+ }
+ pgbase = 0;
+ bytes -= req_len;
+ started += req_len;
+ user_addr += req_len;
+ pos += req_len;
+ count -= req_len;
+ }
+ /* The nfs_page now hold references to these pages */
+ nfs_direct_release_pages(pagevec, npages);
+ } while (count != 0 && result >= 0);
- count -= bytes;
- } while (count != 0);
+ kfree(pagevec);
if (started)
return started;
return result < 0 ? (ssize_t) result : -EFAULT;
}
+static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+{
+ struct nfs_direct_req *dreq = hdr->dreq;
+ struct nfs_commit_info cinfo;
+ int bit = -1;
+ struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+ goto out_put;
+
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
+
+ spin_lock(&dreq->lock);
+
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+ dreq->flags = 0;
+ dreq->error = hdr->error;
+ }
+ if (dreq->error != 0)
+ bit = NFS_IOHDR_ERROR;
+ else {
+ dreq->count += hdr->good_bytes;
+ if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ bit = NFS_IOHDR_NEED_RESCHED;
+ } else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
+ if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
+ bit = NFS_IOHDR_NEED_RESCHED;
+ else if (dreq->flags == 0) {
+ memcpy(&dreq->verf, &req->wb_verf,
+ sizeof(dreq->verf));
+ bit = NFS_IOHDR_NEED_COMMIT;
+ dreq->flags = NFS_ODIRECT_DO_COMMIT;
+ } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
+ if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) {
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ bit = NFS_IOHDR_NEED_RESCHED;
+ } else
+ bit = NFS_IOHDR_NEED_COMMIT;
+ }
+ }
+ }
+ spin_unlock(&dreq->lock);
+
+ while (!list_empty(&hdr->pages)) {
+ req = nfs_list_entry(hdr->pages.next);
+ nfs_list_remove_request(req);
+ switch (bit) {
+ case NFS_IOHDR_NEED_RESCHED:
+ case NFS_IOHDR_NEED_COMMIT:
+ kref_get(&req->wb_kref);
+ nfs_mark_request_commit(req, hdr->lseg, &cinfo);
+ }
+ nfs_unlock_and_release_request(req);
+ }
+
+out_put:
+ if (put_dreq(dreq))
+ nfs_direct_write_complete(dreq, hdr->inode);
+ hdr->release(hdr);
+}
+
+static void nfs_write_sync_pgio_error(struct list_head *head)
+{
+ struct nfs_page *req;
+
+ while (!list_empty(head)) {
+ req = nfs_list_entry(head->next);
+ nfs_list_remove_request(req);
+ nfs_unlock_and_release_request(req);
+ }
+}
+
+static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
+ .error_cleanup = nfs_write_sync_pgio_error,
+ .init_hdr = nfs_direct_pgio_init,
+ .completion = nfs_direct_write_completion,
+};
+
static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
const struct iovec *iov,
unsigned long nr_segs,
- loff_t pos, int sync)
+ loff_t pos)
{
+ struct nfs_pageio_descriptor desc;
ssize_t result = 0;
size_t requested_bytes = 0;
unsigned long seg;
+ nfs_pageio_init_write(&desc, dreq->inode, FLUSH_COND_STABLE,
+ &nfs_direct_write_completion_ops);
+ desc.pg_dreq = dreq;
get_dreq(dreq);
for (seg = 0; seg < nr_segs; seg++) {
const struct iovec *vec = &iov[seg];
- result = nfs_direct_write_schedule_segment(dreq, vec,
- pos, sync);
+ result = nfs_direct_write_schedule_segment(&desc, vec, pos);
if (result < 0)
break;
requested_bytes += result;
@@ -836,6 +785,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
break;
pos += vec->iov_len;
}
+ nfs_pageio_complete(&desc);
+ NFS_I(dreq->inode)->write_io += desc.pg_bytes_written;
/*
* If no bytes were started, return the error, and let the
@@ -858,16 +809,10 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
ssize_t result = -ENOMEM;
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct nfs_direct_req *dreq;
- size_t wsize = NFS_SERVER(inode)->wsize;
- int sync = NFS_UNSTABLE;
dreq = nfs_direct_req_alloc();
if (!dreq)
goto out;
- nfs_alloc_commit_data(dreq);
-
- if (dreq->commit_data == NULL || count <= wsize)
- sync = NFS_FILE_SYNC;
dreq->inode = inode;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
@@ -877,7 +822,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
- result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
+ result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos);
if (!result)
result = nfs_direct_wait(dreq);
out_release:
@@ -997,10 +942,15 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
task_io_account_write(count);
retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
+ if (retval > 0) {
+ struct inode *inode = mapping->host;
- if (retval > 0)
iocb->ki_pos = pos + retval;
-
+ spin_lock(&inode->i_lock);
+ if (i_size_read(inode) < iocb->ki_pos)
+ i_size_write(inode, iocb->ki_pos);
+ spin_unlock(&inode->i_lock);
+ }
out:
return retval;
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index aa9b709fd32..56311ca5f9f 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -174,6 +174,13 @@ nfs_file_flush(struct file *file, fl_owner_t id)
if ((file->f_mode & FMODE_WRITE) == 0)
return 0;
+ /*
+ * If we're holding a write delegation, then just start the i/o
+ * but don't wait for completion (or send a commit).
+ */
+ if (nfs_have_delegation(inode, FMODE_WRITE))
+ return filemap_fdatawrite(file->f_mapping);
+
/* Flush writes to the server and return any errors */
return vfs_fsync(file, 0);
}
@@ -417,6 +424,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
if (status < 0)
return status;
+ NFS_I(mapping->host)->write_io += copied;
return copied;
}
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index ae65c16b367..c817787fbdb 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -64,23 +64,12 @@ void nfs_fscache_release_client_cookie(struct nfs_client *clp)
* either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
* superblock across an automount point of some nature.
*/
-void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq,
- struct nfs_clone_mount *mntdata)
+void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
{
struct nfs_fscache_key *key, *xkey;
struct nfs_server *nfss = NFS_SB(sb);
struct rb_node **p, *parent;
- int diff, ulen;
-
- if (uniq) {
- ulen = strlen(uniq);
- } else if (mntdata) {
- struct nfs_server *mnt_s = NFS_SB(mntdata->sb);
- if (mnt_s->fscache_key) {
- uniq = mnt_s->fscache_key->key.uniquifier;
- ulen = mnt_s->fscache_key->key.uniq_len;
- }
- }
+ int diff;
if (!uniq) {
uniq = "";
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index b9c572d0679..c5b11b53ff3 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -73,9 +73,7 @@ extern void nfs_fscache_unregister(void);
extern void nfs_fscache_get_client_cookie(struct nfs_client *);
extern void nfs_fscache_release_client_cookie(struct nfs_client *);
-extern void nfs_fscache_get_super_cookie(struct super_block *,
- const char *,
- struct nfs_clone_mount *);
+extern void nfs_fscache_get_super_cookie(struct super_block *, const char *, int);
extern void nfs_fscache_release_super_cookie(struct super_block *);
extern void nfs_fscache_init_inode_cookie(struct inode *);
@@ -172,12 +170,6 @@ static inline void nfs_fscache_unregister(void) {}
static inline void nfs_fscache_get_client_cookie(struct nfs_client *clp) {}
static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {}
-static inline void nfs_fscache_get_super_cookie(
- struct super_block *sb,
- const char *uniq,
- struct nfs_clone_mount *mntdata)
-{
-}
static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
static inline void nfs_fscache_init_inode_cookie(struct inode *inode) {}
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 4ca6f5c8038..8abfb19bd3a 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -150,7 +150,7 @@ int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh)
goto out;
/* Start by getting the root filehandle from the server */
- ret = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
+ ret = nfs4_proc_get_rootfh(server, mntfh, &fsinfo);
if (ret < 0) {
dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret);
goto out;
@@ -178,87 +178,4 @@ out:
return ret;
}
-/*
- * get an NFS4 root dentry from the root filehandle
- */
-struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh,
- const char *devname)
-{
- struct nfs_server *server = NFS_SB(sb);
- struct nfs_fattr *fattr = NULL;
- struct dentry *ret;
- struct inode *inode;
- void *name = kstrdup(devname, GFP_KERNEL);
- int error;
-
- dprintk("--> nfs4_get_root()\n");
-
- if (!name)
- return ERR_PTR(-ENOMEM);
-
- /* get the info about the server and filesystem */
- error = nfs4_server_capabilities(server, mntfh);
- if (error < 0) {
- dprintk("nfs_get_root: getcaps error = %d\n",
- -error);
- kfree(name);
- return ERR_PTR(error);
- }
-
- fattr = nfs_alloc_fattr();
- if (fattr == NULL) {
- kfree(name);
- return ERR_PTR(-ENOMEM);
- }
-
- /* get the actual root for this mount */
- error = server->nfs_client->rpc_ops->getattr(server, mntfh, fattr);
- if (error < 0) {
- dprintk("nfs_get_root: getattr error = %d\n", -error);
- ret = ERR_PTR(error);
- goto out;
- }
-
- if (fattr->valid & NFS_ATTR_FATTR_FSID &&
- !nfs_fsid_equal(&server->fsid, &fattr->fsid))
- memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
-
- inode = nfs_fhget(sb, mntfh, fattr);
- if (IS_ERR(inode)) {
- dprintk("nfs_get_root: get root inode failed\n");
- ret = ERR_CAST(inode);
- goto out;
- }
-
- error = nfs_superblock_set_dummy_root(sb, inode);
- if (error != 0) {
- ret = ERR_PTR(error);
- goto out;
- }
-
- /* root dentries normally start off anonymous and get spliced in later
- * if the dentry tree reaches them; however if the dentry already
- * exists, we'll pick it up at this point and use it as the root
- */
- ret = d_obtain_alias(inode);
- if (IS_ERR(ret)) {
- dprintk("nfs_get_root: get root dentry failed\n");
- goto out;
- }
-
- security_d_instantiate(ret, inode);
- spin_lock(&ret->d_lock);
- if (IS_ROOT(ret) && !(ret->d_flags & DCACHE_NFSFS_RENAMED)) {
- ret->d_fsdata = name;
- name = NULL;
- }
- spin_unlock(&ret->d_lock);
-out:
- if (name)
- kfree(name);
- nfs_free_fattr(fattr);
- dprintk("<-- nfs4_get_root()\n");
- return ret;
-}
-
#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index ba3019f5934..b5b86a05059 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -415,7 +415,7 @@ static int __nfs_idmap_register(struct dentry *dir,
static void nfs_idmap_unregister(struct nfs_client *clp,
struct rpc_pipe *pipe)
{
- struct net *net = clp->net;
+ struct net *net = clp->cl_net;
struct super_block *pipefs_sb;
pipefs_sb = rpc_get_sb_net(net);
@@ -429,7 +429,7 @@ static int nfs_idmap_register(struct nfs_client *clp,
struct idmap *idmap,
struct rpc_pipe *pipe)
{
- struct net *net = clp->net;
+ struct net *net = clp->cl_net;
struct super_block *pipefs_sb;
int err = 0;
@@ -530,9 +530,25 @@ static struct nfs_client *nfs_get_client_for_event(struct net *net, int event)
struct nfs_net *nn = net_generic(net, nfs_net_id);
struct dentry *cl_dentry;
struct nfs_client *clp;
+ int err;
+restart:
spin_lock(&nn->nfs_client_lock);
list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
+ /* Wait for initialisation to finish */
+ if (clp->cl_cons_state == NFS_CS_INITING) {
+ atomic_inc(&clp->cl_count);
+ spin_unlock(&nn->nfs_client_lock);
+ err = nfs_wait_client_init_complete(clp);
+ nfs_put_client(clp);
+ if (err)
+ return NULL;
+ goto restart;
+ }
+ /* Skip nfs_clients that failed to initialise */
+ if (clp->cl_cons_state < 0)
+ continue;
+ smp_rmb();
if (clp->rpc_ops != &nfs_v4_clientops)
continue;
cl_dentry = clp->cl_idmap->idmap_pipe->dentry;
@@ -640,20 +656,16 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
struct idmap_msg *im;
struct idmap *idmap = (struct idmap *)aux;
struct key *key = cons->key;
- int ret;
+ int ret = -ENOMEM;
/* msg and im are freed in idmap_pipe_destroy_msg */
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
- if (IS_ERR(msg)) {
- ret = PTR_ERR(msg);
+ if (!msg)
goto out0;
- }
im = kmalloc(sizeof(*im), GFP_KERNEL);
- if (IS_ERR(im)) {
- ret = PTR_ERR(im);
+ if (!im)
goto out1;
- }
ret = nfs_idmap_prepare_message(key->description, im, msg);
if (ret < 0)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e8bbfa5b350..2f6f78c4b42 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -121,7 +121,7 @@ static void nfs_clear_inode(struct inode *inode)
void nfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
nfs_clear_inode(inode);
}
@@ -285,9 +285,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode->i_mode = fattr->mode;
if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
&& nfs_server_capable(inode, NFS_CAP_MODE))
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
/* Why so? Because we want revalidate for devices/FIFOs, and
* that's precisely what we have in nfs_file_inode_operations.
*/
@@ -300,8 +298,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
inode->i_fop = &nfs_dir_operations;
inode->i_data.a_ops = &nfs_dir_aops;
- if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS))
- set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
/* Deal with crossing mountpoints */
if (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT ||
fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
@@ -327,6 +323,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode->i_gid = -2;
inode->i_blocks = 0;
memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
+ nfsi->write_io = 0;
+ nfsi->read_io = 0;
nfsi->read_cache_jiffies = fattr->time_start;
nfsi->attr_gencount = fattr->gencount;
@@ -337,24 +335,19 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
inode->i_mtime = fattr->mtime;
else if (nfs_server_capable(inode, NFS_CAP_MTIME))
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_DATA;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
inode->i_ctime = fattr->ctime;
else if (nfs_server_capable(inode, NFS_CAP_CTIME))
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
inode->i_version = fattr->change_attr;
else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_DATA;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_SIZE)
inode->i_size = nfs_size_to_loff_t(fattr->size);
else
nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_DATA
| NFS_INO_REVAL_PAGECACHE;
if (fattr->valid & NFS_ATTR_FATTR_NLINK)
set_nlink(inode, fattr->nlink);
@@ -363,15 +356,11 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
if (fattr->valid & NFS_ATTR_FATTR_OWNER)
inode->i_uid = fattr->uid;
else if (nfs_server_capable(inode, NFS_CAP_OWNER))
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_GROUP)
inode->i_gid = fattr->gid;
else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
inode->i_blocks = fattr->du.nfs2.blocks;
if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
@@ -654,6 +643,7 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f
nfs_init_lock_context(&ctx->lock_context);
ctx->lock_context.open_context = ctx;
INIT_LIST_HEAD(&ctx->list);
+ ctx->mdsthreshold = NULL;
return ctx;
}
@@ -682,6 +672,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
put_rpccred(ctx->cred);
dput(ctx->dentry);
nfs_sb_deactive(sb);
+ kfree(ctx->mdsthreshold);
kfree(ctx);
}
@@ -870,6 +861,15 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
return 0;
}
+static bool nfs_mapping_need_revalidate_inode(struct inode *inode)
+{
+ if (nfs_have_delegated_attributes(inode))
+ return false;
+ return (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
+ || nfs_attribute_timeout(inode)
+ || NFS_STALE(inode);
+}
+
/**
* nfs_revalidate_mapping - Revalidate the pagecache
* @inode - pointer to host inode
@@ -880,9 +880,7 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
struct nfs_inode *nfsi = NFS_I(inode);
int ret = 0;
- if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
- || nfs_attribute_cache_expired(inode)
- || NFS_STALE(inode)) {
+ if (nfs_mapping_need_revalidate_inode(inode)) {
ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
if (ret < 0)
goto out;
@@ -948,6 +946,8 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
unsigned long invalid = 0;
+ if (nfs_have_delegated_attributes(inode))
+ return 0;
/* Has the inode gone and changed behind our back? */
if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
return -EIO;
@@ -960,7 +960,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
/* Verify a few of the more important attributes */
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ invalid |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
cur_size = i_size_read(inode);
@@ -1279,14 +1279,26 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfs_display_fhandle_hash(NFS_FH(inode)),
atomic_read(&inode->i_count), fattr->valid);
- if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
- goto out_fileid;
+ if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) {
+ printk(KERN_ERR "NFS: server %s error: fileid changed\n"
+ "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
+ NFS_SERVER(inode)->nfs_client->cl_hostname,
+ inode->i_sb->s_id, (long long)nfsi->fileid,
+ (long long)fattr->fileid);
+ goto out_err;
+ }
/*
* Make sure the inode's type hasn't changed.
*/
- if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
- goto out_changed;
+ if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) {
+ /*
+ * Big trouble! The inode has become a different object.
+ */
+ printk(KERN_DEBUG "NFS: %s: inode %ld mode changed, %07o to %07o\n",
+ __func__, inode->i_ino, inode->i_mode, fattr->mode);
+ goto out_err;
+ }
server = NFS_SERVER(inode);
/* Update the fsid? */
@@ -1314,7 +1326,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (inode->i_version != fattr->change_attr) {
dprintk("NFS: change_attr change on server for file %s/%ld\n",
inode->i_sb->s_id, inode->i_ino);
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ invalid |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_PAGECACHE;
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
inode->i_version = fattr->change_attr;
@@ -1323,38 +1339,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
invalid |= save_cache_validity;
if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
- /* NFSv2/v3: Check if the mtime agrees */
- if (!timespec_equal(&inode->i_mtime, &fattr->mtime)) {
- dprintk("NFS: mtime change on server for file %s/%ld\n",
- inode->i_sb->s_id, inode->i_ino);
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
- if (S_ISDIR(inode->i_mode))
- nfs_force_lookup_revalidate(inode);
- memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
- }
+ memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
} else if (server->caps & NFS_CAP_MTIME)
invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_DATA
- | NFS_INO_REVAL_PAGECACHE
| NFS_INO_REVAL_FORCED);
if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
- /* If ctime has changed we should definitely clear access+acl caches */
- if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) {
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
- /* and probably clear data for a directory too as utimes can cause
- * havoc with our cache.
- */
- if (S_ISDIR(inode->i_mode)) {
- invalid |= NFS_INO_INVALID_DATA;
- nfs_force_lookup_revalidate(inode);
- }
- memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
- }
+ memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
} else if (server->caps & NFS_CAP_CTIME)
invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_ACCESS
- | NFS_INO_INVALID_ACL
| NFS_INO_REVAL_FORCED);
/* Check if our cached file size is stale */
@@ -1466,12 +1459,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfsi->cache_validity |= invalid;
return 0;
- out_changed:
- /*
- * Big trouble! The inode has become a different object.
- */
- printk(KERN_DEBUG "NFS: %s: inode %ld mode changed, %07o to %07o\n",
- __func__, inode->i_ino, inode->i_mode, fattr->mode);
out_err:
/*
* No need to worry about unhashing the dentry, as the
@@ -1480,13 +1467,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
*/
nfs_invalidate_inode(inode);
return -ESTALE;
-
- out_fileid:
- printk(KERN_ERR "NFS: server %s error: fileid changed\n"
- "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
- NFS_SERVER(inode)->nfs_client->cl_hostname, inode->i_sb->s_id,
- (long long)nfsi->fileid, (long long)fattr->fileid);
- goto out_err;
}
@@ -1500,7 +1480,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
void nfs4_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
pnfs_return_layout(inode);
pnfs_destroy_layout(NFS_I(inode));
/* If we are holding a delegation, return it! */
@@ -1547,7 +1527,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
nfsi->delegation_state = 0;
init_rwsem(&nfsi->rwsem);
nfsi->layout = NULL;
- atomic_set(&nfsi->commits_outstanding, 0);
+ atomic_set(&nfsi->commit_info.rpcs_out, 0);
#endif
}
@@ -1559,9 +1539,9 @@ static void init_once(void *foo)
INIT_LIST_HEAD(&nfsi->open_files);
INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
- INIT_LIST_HEAD(&nfsi->commit_list);
+ INIT_LIST_HEAD(&nfsi->commit_info.list);
nfsi->npages = 0;
- nfsi->ncommit = 0;
+ nfsi->commit_info.ncommit = 0;
atomic_set(&nfsi->silly_count, 1);
INIT_HLIST_HEAD(&nfsi->silly_list);
init_waitqueue_head(&nfsi->waitqueue);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index b777bdaba4c..1848a727559 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -103,6 +103,7 @@ struct nfs_parsed_mount_data {
unsigned int version;
unsigned int minorversion;
char *fscache_uniq;
+ bool need_mount;
struct {
struct sockaddr_storage address;
@@ -167,11 +168,13 @@ extern struct nfs_server *nfs_clone_server(struct nfs_server *,
struct nfs_fh *,
struct nfs_fattr *,
rpc_authflavor_t);
+extern int nfs_wait_client_init_complete(const struct nfs_client *clp);
extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
-extern int nfs4_check_client_ready(struct nfs_client *clp);
extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
const struct sockaddr *ds_addr,
- int ds_addrlen, int ds_proto);
+ int ds_addrlen, int ds_proto,
+ unsigned int ds_timeo,
+ unsigned int ds_retrans);
#ifdef CONFIG_PROC_FS
extern int __init nfs_fs_proc_init(void);
extern void nfs_fs_proc_exit(void);
@@ -185,21 +188,11 @@ static inline void nfs_fs_proc_exit(void)
}
#endif
-/* nfs4namespace.c */
-#ifdef CONFIG_NFS_V4
-extern struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry);
-#else
-static inline
-struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
-{
- return ERR_PTR(-ENOENT);
-}
-#endif
-
/* callback_xdr.c */
extern struct svc_version nfs4_callback_version1;
extern struct svc_version nfs4_callback_version4;
+struct nfs_pageio_descriptor;
/* pagelist.c */
extern int __init nfs_init_nfspagecache(void);
extern void nfs_destroy_nfspagecache(void);
@@ -210,9 +203,13 @@ extern void nfs_destroy_writepagecache(void);
extern int __init nfs_init_directcache(void);
extern void nfs_destroy_directcache(void);
+extern bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount);
+extern void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr,
+ void (*release)(struct nfs_pgio_header *hdr));
+void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos);
/* nfs2xdr.c */
-extern int nfs_stat_to_errno(enum nfs_stat);
extern struct rpc_procinfo nfs_procedures[];
extern int nfs2_decode_dirent(struct xdr_stream *,
struct nfs_entry *, int);
@@ -237,14 +234,13 @@ extern const u32 nfs41_maxwrite_overhead;
extern struct rpc_procinfo nfs4_procedures[];
#endif
-extern int nfs4_init_ds_session(struct nfs_client *clp);
+extern int nfs4_init_ds_session(struct nfs_client *, unsigned long);
/* proc.c */
void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
-extern int nfs_init_client(struct nfs_client *clp,
+extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
const struct rpc_timeout *timeparms,
- const char *ip_addr, rpc_authflavor_t authflavour,
- int noresvport);
+ const char *ip_addr, rpc_authflavor_t authflavour);
/* dir.c */
extern int nfs_access_cache_shrinker(struct shrinker *shrink,
@@ -280,9 +276,10 @@ extern void nfs_sb_deactive(struct super_block *sb);
extern char *nfs_path(char **p, struct dentry *dentry,
char *buffer, ssize_t buflen);
extern struct vfsmount *nfs_d_automount(struct path *path);
-#ifdef CONFIG_NFS_V4
-rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
-#endif
+struct vfsmount *nfs_submount(struct nfs_server *, struct dentry *,
+ struct nfs_fh *, struct nfs_fattr *);
+struct vfsmount *nfs_do_submount(struct dentry *, struct nfs_fh *,
+ struct nfs_fattr *, rpc_authflavor_t);
/* getroot.c */
extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *,
@@ -294,46 +291,73 @@ extern struct dentry *nfs4_get_root(struct super_block *, struct nfs_fh *,
extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh);
#endif
-struct nfs_pageio_descriptor;
+struct nfs_pgio_completion_ops;
/* read.c */
-extern int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
- const struct rpc_call_ops *call_ops);
+extern struct nfs_read_header *nfs_readhdr_alloc(void);
+extern void nfs_readhdr_free(struct nfs_pgio_header *hdr);
+extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops);
+extern int nfs_initiate_read(struct rpc_clnt *clnt,
+ struct nfs_read_data *data,
+ const struct rpc_call_ops *call_ops, int flags);
extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
- struct list_head *head);
-
+ struct nfs_pgio_header *hdr);
extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
- struct inode *inode);
+ struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_readdata_release(struct nfs_read_data *rdata);
/* write.c */
+extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode, int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops);
+extern struct nfs_write_header *nfs_writehdr_alloc(void);
+extern void nfs_writehdr_free(struct nfs_pgio_header *hdr);
extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
- struct list_head *head);
+ struct nfs_pgio_header *hdr);
extern void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
- struct inode *inode, int ioflags);
+ struct inode *inode, int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops);
extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_writedata_release(struct nfs_write_data *wdata);
-extern void nfs_commit_free(struct nfs_write_data *p);
-extern int nfs_initiate_write(struct nfs_write_data *data,
- struct rpc_clnt *clnt,
+extern void nfs_commit_free(struct nfs_commit_data *p);
+extern int nfs_initiate_write(struct rpc_clnt *clnt,
+ struct nfs_write_data *data,
const struct rpc_call_ops *call_ops,
- int how);
+ int how, int flags);
extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
-extern int nfs_initiate_commit(struct nfs_write_data *data,
- struct rpc_clnt *clnt,
+extern void nfs_commit_prepare(struct rpc_task *task, void *calldata);
+extern int nfs_initiate_commit(struct rpc_clnt *clnt,
+ struct nfs_commit_data *data,
const struct rpc_call_ops *call_ops,
- int how);
-extern void nfs_init_commit(struct nfs_write_data *data,
+ int how, int flags);
+extern void nfs_init_commit(struct nfs_commit_data *data,
struct list_head *head,
- struct pnfs_layout_segment *lseg);
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo);
+int nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
+ struct nfs_commit_info *cinfo, int max);
+int nfs_scan_commit(struct inode *inode, struct list_head *dst,
+ struct nfs_commit_info *cinfo);
+void nfs_mark_request_commit(struct nfs_page *req,
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo);
+int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
+ int how, struct nfs_commit_info *cinfo);
void nfs_retry_commit(struct list_head *page_list,
- struct pnfs_layout_segment *lseg);
-void nfs_commit_clear_lock(struct nfs_inode *nfsi);
-void nfs_commitdata_release(void *data);
-void nfs_commit_release_pages(struct nfs_write_data *data);
-void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head);
-void nfs_request_remove_commit_list(struct nfs_page *req);
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo);
+void nfs_commitdata_release(struct nfs_commit_data *data);
+void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
+ struct nfs_commit_info *cinfo);
+void nfs_request_remove_commit_list(struct nfs_page *req,
+ struct nfs_commit_info *cinfo);
+void nfs_init_cinfo(struct nfs_commit_info *cinfo,
+ struct inode *inode,
+ struct nfs_direct_req *dreq);
#ifdef CONFIG_MIGRATION
extern int nfs_migrate_page(struct address_space *,
@@ -342,15 +366,16 @@ extern int nfs_migrate_page(struct address_space *,
#define nfs_migrate_page NULL
#endif
+/* direct.c */
+void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
+ struct nfs_direct_req *dreq);
+
/* nfs4proc.c */
extern void __nfs4_read_done_cb(struct nfs_read_data *);
-extern void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data);
-extern int nfs4_init_client(struct nfs_client *clp,
+extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
const struct rpc_timeout *timeparms,
const char *ip_addr,
- rpc_authflavor_t authflavour,
- int noresvport);
-extern void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data);
+ rpc_authflavor_t authflavour);
extern int _nfs4_call_sync(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
@@ -466,3 +491,15 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
PAGE_SIZE - 1) >> PAGE_SHIFT;
}
+/*
+ * Convert a struct timespec into a 64-bit change attribute
+ *
+ * This does approximately the same thing as timespec_to_ns(),
+ * but for calculation efficiency, we multiply the seconds by
+ * 1024*1024*1024.
+ */
+static inline
+u64 nfs_timespec_to_change_attr(const struct timespec *ts)
+{
+ return ((u64)ts->tv_sec << 30) + ts->tv_nsec;
+}
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index d51868e5683..08b9c93675d 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -26,11 +26,6 @@ static LIST_HEAD(nfs_automount_list);
static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
int nfs_mountpoint_expiry_timeout = 500 * HZ;
-static struct vfsmount *nfs_do_submount(struct dentry *dentry,
- struct nfs_fh *fh,
- struct nfs_fattr *fattr,
- rpc_authflavor_t authflavor);
-
/*
* nfs_path - reconstruct the path given an arbitrary dentry
* @base - used to return pointer to the end of devname part of path
@@ -118,64 +113,6 @@ Elong:
return ERR_PTR(-ENAMETOOLONG);
}
-#ifdef CONFIG_NFS_V4
-rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
-{
- struct gss_api_mech *mech;
- struct xdr_netobj oid;
- int i;
- rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
-
- for (i = 0; i < flavors->num_flavors; i++) {
- struct nfs4_secinfo_flavor *flavor;
- flavor = &flavors->flavors[i];
-
- if (flavor->flavor == RPC_AUTH_NULL || flavor->flavor == RPC_AUTH_UNIX) {
- pseudoflavor = flavor->flavor;
- break;
- } else if (flavor->flavor == RPC_AUTH_GSS) {
- oid.len = flavor->gss.sec_oid4.len;
- oid.data = flavor->gss.sec_oid4.data;
- mech = gss_mech_get_by_OID(&oid);
- if (!mech)
- continue;
- pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service);
- gss_mech_put(mech);
- break;
- }
- }
-
- return pseudoflavor;
-}
-
-static struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
- struct qstr *name,
- struct nfs_fh *fh,
- struct nfs_fattr *fattr)
-{
- int err;
-
- if (NFS_PROTO(dir)->version == 4)
- return nfs4_proc_lookup_mountpoint(dir, name, fh, fattr);
-
- err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
- if (err)
- return ERR_PTR(err);
- return rpc_clone_client(NFS_SERVER(dir)->client);
-}
-#else /* CONFIG_NFS_V4 */
-static inline struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
- struct qstr *name,
- struct nfs_fh *fh,
- struct nfs_fattr *fattr)
-{
- int err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
- if (err)
- return ERR_PTR(err);
- return rpc_clone_client(NFS_SERVER(dir)->client);
-}
-#endif /* CONFIG_NFS_V4 */
-
/*
* nfs_d_automount - Handle crossing a mountpoint on the server
* @path - The mountpoint
@@ -191,10 +128,9 @@ static inline struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
struct vfsmount *nfs_d_automount(struct path *path)
{
struct vfsmount *mnt;
- struct dentry *parent;
+ struct nfs_server *server = NFS_SERVER(path->dentry->d_inode);
struct nfs_fh *fh = NULL;
struct nfs_fattr *fattr = NULL;
- struct rpc_clnt *client;
dprintk("--> nfs_d_automount()\n");
@@ -210,21 +146,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
dprintk("%s: enter\n", __func__);
- /* Look it up again to get its attributes */
- parent = dget_parent(path->dentry);
- client = nfs_lookup_mountpoint(parent->d_inode, &path->dentry->d_name, fh, fattr);
- dput(parent);
- if (IS_ERR(client)) {
- mnt = ERR_CAST(client);
- goto out;
- }
-
- if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
- mnt = nfs_do_refmount(client, path->dentry);
- else
- mnt = nfs_do_submount(path->dentry, fh, fattr, client->cl_auth->au_flavor);
- rpc_shutdown_client(client);
-
+ mnt = server->nfs_client->rpc_ops->submount(server, path->dentry, fh, fattr);
if (IS_ERR(mnt))
goto out;
@@ -297,10 +219,8 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
* @authflavor - security flavor to use when performing the mount
*
*/
-static struct vfsmount *nfs_do_submount(struct dentry *dentry,
- struct nfs_fh *fh,
- struct nfs_fattr *fattr,
- rpc_authflavor_t authflavor)
+struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
+ struct nfs_fattr *fattr, rpc_authflavor_t authflavor)
{
struct nfs_clone_mount mountdata = {
.sb = dentry->d_sb,
@@ -333,3 +253,18 @@ out:
dprintk("<-- nfs_do_submount() = %p\n", mnt);
return mnt;
}
+
+struct vfsmount *nfs_submount(struct nfs_server *server, struct dentry *dentry,
+ struct nfs_fh *fh, struct nfs_fattr *fattr)
+{
+ int err;
+ struct dentry *parent = dget_parent(dentry);
+
+ /* Look it up again to get its attributes */
+ err = server->nfs_client->rpc_ops->lookup(parent->d_inode, &dentry->d_name, fh, fattr);
+ dput(parent);
+ if (err != 0)
+ return ERR_PTR(err);
+
+ return nfs_do_submount(dentry, fh, fattr, server->client->cl_auth->au_flavor);
+}
diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
index aa14ec303e9..8a6394edb8b 100644
--- a/fs/nfs/netns.h
+++ b/fs/nfs/netns.h
@@ -1,3 +1,7 @@
+/*
+ * NFS-private data for each "struct net". Accessed with net_generic().
+ */
+
#ifndef __NFS_NETNS_H__
#define __NFS_NETNS_H__
@@ -20,6 +24,7 @@ struct nfs_net {
struct idr cb_ident_idr; /* Protected by nfs_client_lock */
#endif
spinlock_t nfs_client_lock;
+ struct timespec boot_time;
};
extern int nfs_net_id;
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 1f56000fabb..baf759bccd0 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -61,6 +61,7 @@
#define NFS_readdirres_sz (1)
#define NFS_statfsres_sz (1+NFS_info_sz)
+static int nfs_stat_to_errno(enum nfs_stat);
/*
* While encoding arguments, set up the reply buffer in advance to
@@ -313,6 +314,8 @@ static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
p = xdr_decode_time(p, &fattr->atime);
p = xdr_decode_time(p, &fattr->mtime);
xdr_decode_time(p, &fattr->ctime);
+ fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
+
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
@@ -1109,7 +1112,7 @@ static const struct {
* Returns a local errno value, or -EIO if the NFS status code is
* not recognized. This function is used jointly by NFSv2 and NFSv3.
*/
-int nfs_stat_to_errno(enum nfs_stat status)
+static int nfs_stat_to_errno(enum nfs_stat status)
{
int i;
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 75c68299358..2292a0fd2bf 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -142,7 +142,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs3_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+nfs3_proc_lookup(struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs3_diropargs arg = {
@@ -810,11 +810,13 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
static int nfs3_read_done(struct rpc_task *task, struct nfs_read_data *data)
{
- if (nfs3_async_handle_jukebox(task, data->inode))
+ struct inode *inode = data->header->inode;
+
+ if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
- nfs_invalidate_atime(data->inode);
- nfs_refresh_inode(data->inode, &data->fattr);
+ nfs_invalidate_atime(inode);
+ nfs_refresh_inode(inode, &data->fattr);
return 0;
}
@@ -830,10 +832,12 @@ static void nfs3_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_da
static int nfs3_write_done(struct rpc_task *task, struct nfs_write_data *data)
{
- if (nfs3_async_handle_jukebox(task, data->inode))
+ struct inode *inode = data->header->inode;
+
+ if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
if (task->tk_status >= 0)
- nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr);
+ nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
return 0;
}
@@ -847,7 +851,12 @@ static void nfs3_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_
rpc_call_start(task);
}
-static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data)
+static void nfs3_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+{
+ rpc_call_start(task);
+}
+
+static int nfs3_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
{
if (nfs3_async_handle_jukebox(task, data->inode))
return -EAGAIN;
@@ -855,7 +864,7 @@ static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data)
return 0;
}
-static void nfs3_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+static void nfs3_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
{
msg->rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT];
}
@@ -875,6 +884,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.file_inode_ops = &nfs3_file_inode_operations,
.file_ops = &nfs_file_operations,
.getroot = nfs3_proc_get_root,
+ .submount = nfs_submount,
.getattr = nfs3_proc_getattr,
.setattr = nfs3_proc_setattr,
.lookup = nfs3_proc_lookup,
@@ -906,6 +916,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.write_rpc_prepare = nfs3_proc_write_rpc_prepare,
.write_done = nfs3_write_done,
.commit_setup = nfs3_proc_commit_setup,
+ .commit_rpc_prepare = nfs3_proc_commit_rpc_prepare,
.commit_done = nfs3_commit_done,
.lock = nfs3_proc_lock,
.clear_acl_cache = nfs3_forget_cached_acls,
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index a77cc9a3ce5..902de489ec9 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -86,6 +86,8 @@
XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz)
+static int nfs3_stat_to_errno(enum nfs_stat);
+
/*
* Map file type to S_IFMT bits
*/
@@ -675,6 +677,7 @@ static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
p = xdr_decode_nfstime3(p, &fattr->atime);
p = xdr_decode_nfstime3(p, &fattr->mtime);
xdr_decode_nfstime3(p, &fattr->ctime);
+ fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
fattr->valid |= NFS_ATTR_FATTR_V3;
return 0;
@@ -725,12 +728,14 @@ static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
goto out_overflow;
fattr->valid |= NFS_ATTR_FATTR_PRESIZE
+ | NFS_ATTR_FATTR_PRECHANGE
| NFS_ATTR_FATTR_PREMTIME
| NFS_ATTR_FATTR_PRECTIME;
p = xdr_decode_size3(p, &fattr->pre_size);
p = xdr_decode_nfstime3(p, &fattr->pre_mtime);
xdr_decode_nfstime3(p, &fattr->pre_ctime);
+ fattr->pre_change_attr = nfs_timespec_to_change_attr(&fattr->pre_ctime);
return 0;
out_overflow:
@@ -1287,7 +1292,7 @@ static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
* };
*/
static void encode_commit3args(struct xdr_stream *xdr,
- const struct nfs_writeargs *args)
+ const struct nfs_commitargs *args)
{
__be32 *p;
@@ -1300,7 +1305,7 @@ static void encode_commit3args(struct xdr_stream *xdr,
static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
struct xdr_stream *xdr,
- const struct nfs_writeargs *args)
+ const struct nfs_commitargs *args)
{
encode_commit3args(xdr, args);
}
@@ -1385,7 +1390,7 @@ static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1424,7 +1429,7 @@ static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1472,7 +1477,7 @@ out_default:
error = decode_post_op_attr(xdr, result->dir_attr);
if (unlikely(error))
goto out;
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1513,7 +1518,7 @@ static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1554,7 +1559,7 @@ static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1636,7 +1641,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1706,7 +1711,7 @@ static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1770,7 +1775,7 @@ out_default:
error = decode_wcc_data(xdr, result->dir_attr);
if (unlikely(error))
goto out;
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1809,7 +1814,7 @@ static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1853,7 +1858,7 @@ static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -1896,7 +1901,7 @@ static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/**
@@ -2088,7 +2093,7 @@ out_default:
error = decode_post_op_attr(xdr, result->dir_attr);
if (unlikely(error))
goto out;
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -2156,7 +2161,7 @@ static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -2232,7 +2237,7 @@ static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -2295,7 +2300,7 @@ static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
/*
@@ -2319,7 +2324,7 @@ out_status:
*/
static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
struct xdr_stream *xdr,
- struct nfs_writeres *result)
+ struct nfs_commitres *result)
{
enum nfs_stat status;
int error;
@@ -2336,7 +2341,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
out:
return error;
out_status:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
#ifdef CONFIG_NFS_V3_ACL
@@ -2401,7 +2406,7 @@ static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
@@ -2420,11 +2425,76 @@ static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
out:
return error;
out_default:
- return nfs_stat_to_errno(status);
+ return nfs3_stat_to_errno(status);
}
#endif /* CONFIG_NFS_V3_ACL */
+
+/*
+ * We need to translate between nfs status return values and
+ * the local errno values which may not be the same.
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs_errtbl[] = {
+ { NFS_OK, 0 },
+ { NFSERR_PERM, -EPERM },
+ { NFSERR_NOENT, -ENOENT },
+ { NFSERR_IO, -errno_NFSERR_IO},
+ { NFSERR_NXIO, -ENXIO },
+/* { NFSERR_EAGAIN, -EAGAIN }, */
+ { NFSERR_ACCES, -EACCES },
+ { NFSERR_EXIST, -EEXIST },
+ { NFSERR_XDEV, -EXDEV },
+ { NFSERR_NODEV, -ENODEV },
+ { NFSERR_NOTDIR, -ENOTDIR },
+ { NFSERR_ISDIR, -EISDIR },
+ { NFSERR_INVAL, -EINVAL },
+ { NFSERR_FBIG, -EFBIG },
+ { NFSERR_NOSPC, -ENOSPC },
+ { NFSERR_ROFS, -EROFS },
+ { NFSERR_MLINK, -EMLINK },
+ { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFSERR_NOTEMPTY, -ENOTEMPTY },
+ { NFSERR_DQUOT, -EDQUOT },
+ { NFSERR_STALE, -ESTALE },
+ { NFSERR_REMOTE, -EREMOTE },
+#ifdef EWFLUSH
+ { NFSERR_WFLUSH, -EWFLUSH },
+#endif
+ { NFSERR_BADHANDLE, -EBADHANDLE },
+ { NFSERR_NOT_SYNC, -ENOTSYNC },
+ { NFSERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFSERR_NOTSUPP, -ENOTSUPP },
+ { NFSERR_TOOSMALL, -ETOOSMALL },
+ { NFSERR_SERVERFAULT, -EREMOTEIO },
+ { NFSERR_BADTYPE, -EBADTYPE },
+ { NFSERR_JUKEBOX, -EJUKEBOX },
+ { -1, -EIO }
+};
+
+/**
+ * nfs3_stat_to_errno - convert an NFS status code to a local errno
+ * @status: NFS status code to convert
+ *
+ * Returns a local errno value, or -EIO if the NFS status code is
+ * not recognized. This function is used jointly by NFSv2 and NFSv3.
+ */
+static int nfs3_stat_to_errno(enum nfs_stat status)
+{
+ int i;
+
+ for (i = 0; nfs_errtbl[i].stat != -1; i++) {
+ if (nfs_errtbl[i].stat == (int)status)
+ return nfs_errtbl[i].errno;
+ }
+ dprintk("NFS: Unrecognized nfs status value: %u\n", status);
+ return nfs_errtbl[i].errno;
+}
+
+
#define PROC(proc, argtype, restype, timer) \
[NFS3PROC_##proc] = { \
.p_proc = NFS3PROC_##proc, \
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 8d75021020b..c6827f93ab5 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -24,6 +24,8 @@ enum nfs4_client_state {
NFS4CLNT_RECALL_SLOT,
NFS4CLNT_LEASE_CONFIRM,
NFS4CLNT_SERVER_SCOPE_MISMATCH,
+ NFS4CLNT_PURGE_STATE,
+ NFS4CLNT_BIND_CONN_TO_SESSION,
};
enum nfs4_session_state {
@@ -52,11 +54,6 @@ struct nfs4_minor_version_ops {
const struct nfs4_state_maintenance_ops *state_renewal_ops;
};
-struct nfs_unique_id {
- struct rb_node rb_node;
- __u64 id;
-};
-
#define NFS_SEQID_CONFIRMED 1
struct nfs_seqid_counter {
ktime_t create_time;
@@ -206,12 +203,18 @@ extern const struct dentry_operations nfs4_dentry_operations;
extern const struct inode_operations nfs4_dir_inode_operations;
/* nfs4namespace.c */
+rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
+struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *,
+ struct nfs_fh *, struct nfs_fattr *);
/* nfs4proc.c */
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
+extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
+extern int nfs4_proc_bind_conn_to_session(struct nfs_client *, struct rpc_cred *cred);
extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
+extern int nfs4_destroy_clientid(struct nfs_client *clp);
extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
@@ -239,8 +242,8 @@ extern int nfs41_setup_sequence(struct nfs4_session *session,
struct rpc_task *task);
extern void nfs4_destroy_session(struct nfs4_session *session);
extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
-extern int nfs4_proc_create_session(struct nfs_client *);
-extern int nfs4_proc_destroy_session(struct nfs4_session *);
+extern int nfs4_proc_create_session(struct nfs_client *, struct rpc_cred *);
+extern int nfs4_proc_destroy_session(struct nfs4_session *, struct rpc_cred *);
extern int nfs4_init_session(struct nfs_server *server);
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
struct nfs_fsinfo *fsinfo);
@@ -310,9 +313,9 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
#if defined(CONFIG_NFS_V4_1)
struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
-extern void nfs4_schedule_session_recovery(struct nfs4_session *);
+extern void nfs4_schedule_session_recovery(struct nfs4_session *, int);
#else
-static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
+static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
{
}
#endif /* CONFIG_NFS_V4_1 */
@@ -334,7 +337,7 @@ extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs
extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
extern void nfs41_handle_recall_slot(struct nfs_client *clp);
extern void nfs41_handle_server_scope(struct nfs_client *,
- struct server_scope **);
+ struct nfs41_server_scope **);
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *,
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 5acfd9ea8a3..e1340293872 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -82,29 +82,76 @@ filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset)
BUG();
}
+static void filelayout_reset_write(struct nfs_write_data *data)
+{
+ struct nfs_pgio_header *hdr = data->header;
+ struct rpc_task *task = &data->task;
+
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ dprintk("%s Reset task %5u for i/o through MDS "
+ "(req %s/%lld, %u bytes @ offset %llu)\n", __func__,
+ data->task.tk_pid,
+ hdr->inode->i_sb->s_id,
+ (long long)NFS_FILEID(hdr->inode),
+ data->args.count,
+ (unsigned long long)data->args.offset);
+
+ task->tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
+ &hdr->pages,
+ hdr->completion_ops);
+ }
+}
+
+static void filelayout_reset_read(struct nfs_read_data *data)
+{
+ struct nfs_pgio_header *hdr = data->header;
+ struct rpc_task *task = &data->task;
+
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ dprintk("%s Reset task %5u for i/o through MDS "
+ "(req %s/%lld, %u bytes @ offset %llu)\n", __func__,
+ data->task.tk_pid,
+ hdr->inode->i_sb->s_id,
+ (long long)NFS_FILEID(hdr->inode),
+ data->args.count,
+ (unsigned long long)data->args.offset);
+
+ task->tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
+ &hdr->pages,
+ hdr->completion_ops);
+ }
+}
+
static int filelayout_async_handle_error(struct rpc_task *task,
struct nfs4_state *state,
struct nfs_client *clp,
- int *reset)
+ struct pnfs_layout_segment *lseg)
{
- struct nfs_server *mds_server = NFS_SERVER(state->inode);
+ struct inode *inode = lseg->pls_layout->plh_inode;
+ struct nfs_server *mds_server = NFS_SERVER(inode);
+ struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
struct nfs_client *mds_client = mds_server->nfs_client;
+ struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
if (task->tk_status >= 0)
return 0;
- *reset = 0;
switch (task->tk_status) {
/* MDS state errors */
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
+ if (state == NULL)
+ break;
nfs_remove_bad_delegation(state->inode);
case -NFS4ERR_OPENMODE:
+ if (state == NULL)
+ break;
nfs4_schedule_stateid_recovery(mds_server, state);
goto wait_on_recovery;
case -NFS4ERR_EXPIRED:
- nfs4_schedule_stateid_recovery(mds_server, state);
+ if (state != NULL)
+ nfs4_schedule_stateid_recovery(mds_server, state);
nfs4_schedule_lease_recovery(mds_client);
goto wait_on_recovery;
/* DS session errors */
@@ -118,7 +165,7 @@ static int filelayout_async_handle_error(struct rpc_task *task,
dprintk("%s ERROR %d, Reset session. Exchangeid "
"flags 0x%x\n", __func__, task->tk_status,
clp->cl_exchange_flags);
- nfs4_schedule_session_recovery(clp->cl_session);
+ nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
break;
case -NFS4ERR_DELAY:
case -NFS4ERR_GRACE:
@@ -127,11 +174,48 @@ static int filelayout_async_handle_error(struct rpc_task *task,
break;
case -NFS4ERR_RETRY_UNCACHED_REP:
break;
+ /* Invalidate Layout errors */
+ case -NFS4ERR_PNFS_NO_LAYOUT:
+ case -ESTALE: /* mapped NFS4ERR_STALE */
+ case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
+ case -EISDIR: /* mapped NFS4ERR_ISDIR */
+ case -NFS4ERR_FHEXPIRED:
+ case -NFS4ERR_WRONG_TYPE:
+ dprintk("%s Invalid layout error %d\n", __func__,
+ task->tk_status);
+ /*
+ * Destroy layout so new i/o will get a new layout.
+ * Layout will not be destroyed until all current lseg
+ * references are put. Mark layout as invalid to resend failed
+ * i/o and all i/o waiting on the slot table to the MDS until
+ * layout is destroyed and a new valid layout is obtained.
+ */
+ set_bit(NFS_LAYOUT_INVALID,
+ &NFS_I(inode)->layout->plh_flags);
+ pnfs_destroy_layout(NFS_I(inode));
+ rpc_wake_up(&tbl->slot_tbl_waitq);
+ goto reset;
+ /* RPC connection errors */
+ case -ECONNREFUSED:
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
+ case -ENETUNREACH:
+ case -EIO:
+ case -ETIMEDOUT:
+ case -EPIPE:
+ dprintk("%s DS connection error %d\n", __func__,
+ task->tk_status);
+ if (!filelayout_test_devid_invalid(devid))
+ _pnfs_return_layout(inode);
+ filelayout_mark_devid_invalid(devid);
+ rpc_wake_up(&tbl->slot_tbl_waitq);
+ nfs4_ds_disconnect(clp);
+ /* fall through */
default:
- dprintk("%s DS error. Retry through MDS %d\n", __func__,
+reset:
+ dprintk("%s Retry through MDS. Error %d\n", __func__,
task->tk_status);
- *reset = 1;
- break;
+ return -NFS4ERR_RESET_TO_MDS;
}
out:
task->tk_status = 0;
@@ -148,18 +232,17 @@ wait_on_recovery:
static int filelayout_read_done_cb(struct rpc_task *task,
struct nfs_read_data *data)
{
- int reset = 0;
+ struct nfs_pgio_header *hdr = data->header;
+ int err;
- dprintk("%s DS read\n", __func__);
+ err = filelayout_async_handle_error(task, data->args.context->state,
+ data->ds_clp, hdr->lseg);
- if (filelayout_async_handle_error(task, data->args.context->state,
- data->ds_clp, &reset) == -EAGAIN) {
- dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
- __func__, data->ds_clp, data->ds_clp->cl_session);
- if (reset) {
- pnfs_set_lo_fail(data->lseg);
- nfs4_reset_read(task, data);
- }
+ switch (err) {
+ case -NFS4ERR_RESET_TO_MDS:
+ filelayout_reset_read(data);
+ return task->tk_status;
+ case -EAGAIN:
rpc_restart_call_prepare(task);
return -EAGAIN;
}
@@ -175,13 +258,15 @@ static int filelayout_read_done_cb(struct rpc_task *task,
static void
filelayout_set_layoutcommit(struct nfs_write_data *wdata)
{
- if (FILELAYOUT_LSEG(wdata->lseg)->commit_through_mds ||
+ struct nfs_pgio_header *hdr = wdata->header;
+
+ if (FILELAYOUT_LSEG(hdr->lseg)->commit_through_mds ||
wdata->res.verf->committed == NFS_FILE_SYNC)
return;
pnfs_set_layoutcommit(wdata);
- dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, wdata->inode->i_ino,
- (unsigned long) NFS_I(wdata->inode)->layout->plh_lwb);
+ dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
+ (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
}
/*
@@ -191,8 +276,14 @@ filelayout_set_layoutcommit(struct nfs_write_data *wdata)
*/
static void filelayout_read_prepare(struct rpc_task *task, void *data)
{
- struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+ struct nfs_read_data *rdata = data;
+ if (filelayout_reset_to_mds(rdata->header->lseg)) {
+ dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
+ filelayout_reset_read(rdata);
+ rpc_exit(task, 0);
+ return;
+ }
rdata->read_done_cb = filelayout_read_done_cb;
if (nfs41_setup_sequence(rdata->ds_clp->cl_session,
@@ -205,42 +296,47 @@ static void filelayout_read_prepare(struct rpc_task *task, void *data)
static void filelayout_read_call_done(struct rpc_task *task, void *data)
{
- struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+ struct nfs_read_data *rdata = data;
dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
+ if (test_bit(NFS_IOHDR_REDO, &rdata->header->flags) &&
+ task->tk_status == 0)
+ return;
+
/* Note this may cause RPC to be resent */
- rdata->mds_ops->rpc_call_done(task, data);
+ rdata->header->mds_ops->rpc_call_done(task, data);
}
static void filelayout_read_count_stats(struct rpc_task *task, void *data)
{
- struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+ struct nfs_read_data *rdata = data;
- rpc_count_iostats(task, NFS_SERVER(rdata->inode)->client->cl_metrics);
+ rpc_count_iostats(task, NFS_SERVER(rdata->header->inode)->client->cl_metrics);
}
static void filelayout_read_release(void *data)
{
- struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+ struct nfs_read_data *rdata = data;
- put_lseg(rdata->lseg);
- rdata->mds_ops->rpc_release(data);
+ nfs_put_client(rdata->ds_clp);
+ rdata->header->mds_ops->rpc_release(data);
}
static int filelayout_write_done_cb(struct rpc_task *task,
struct nfs_write_data *data)
{
- int reset = 0;
-
- if (filelayout_async_handle_error(task, data->args.context->state,
- data->ds_clp, &reset) == -EAGAIN) {
- dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
- __func__, data->ds_clp, data->ds_clp->cl_session);
- if (reset) {
- pnfs_set_lo_fail(data->lseg);
- nfs4_reset_write(task, data);
- }
+ struct nfs_pgio_header *hdr = data->header;
+ int err;
+
+ err = filelayout_async_handle_error(task, data->args.context->state,
+ data->ds_clp, hdr->lseg);
+
+ switch (err) {
+ case -NFS4ERR_RESET_TO_MDS:
+ filelayout_reset_write(data);
+ return task->tk_status;
+ case -EAGAIN:
rpc_restart_call_prepare(task);
return -EAGAIN;
}
@@ -250,7 +346,7 @@ static int filelayout_write_done_cb(struct rpc_task *task,
}
/* Fake up some data that will cause nfs_commit_release to retry the writes. */
-static void prepare_to_resend_writes(struct nfs_write_data *data)
+static void prepare_to_resend_writes(struct nfs_commit_data *data)
{
struct nfs_page *first = nfs_list_entry(data->pages.next);
@@ -261,19 +357,19 @@ static void prepare_to_resend_writes(struct nfs_write_data *data)
}
static int filelayout_commit_done_cb(struct rpc_task *task,
- struct nfs_write_data *data)
+ struct nfs_commit_data *data)
{
- int reset = 0;
-
- if (filelayout_async_handle_error(task, data->args.context->state,
- data->ds_clp, &reset) == -EAGAIN) {
- dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
- __func__, data->ds_clp, data->ds_clp->cl_session);
- if (reset) {
- prepare_to_resend_writes(data);
- pnfs_set_lo_fail(data->lseg);
- } else
- rpc_restart_call_prepare(task);
+ int err;
+
+ err = filelayout_async_handle_error(task, NULL, data->ds_clp,
+ data->lseg);
+
+ switch (err) {
+ case -NFS4ERR_RESET_TO_MDS:
+ prepare_to_resend_writes(data);
+ return -EAGAIN;
+ case -EAGAIN:
+ rpc_restart_call_prepare(task);
return -EAGAIN;
}
@@ -282,8 +378,14 @@ static int filelayout_commit_done_cb(struct rpc_task *task,
static void filelayout_write_prepare(struct rpc_task *task, void *data)
{
- struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+ struct nfs_write_data *wdata = data;
+ if (filelayout_reset_to_mds(wdata->header->lseg)) {
+ dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
+ filelayout_reset_write(wdata);
+ rpc_exit(task, 0);
+ return;
+ }
if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
&wdata->args.seq_args, &wdata->res.seq_res,
task))
@@ -294,36 +396,66 @@ static void filelayout_write_prepare(struct rpc_task *task, void *data)
static void filelayout_write_call_done(struct rpc_task *task, void *data)
{
- struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+ struct nfs_write_data *wdata = data;
+
+ if (test_bit(NFS_IOHDR_REDO, &wdata->header->flags) &&
+ task->tk_status == 0)
+ return;
/* Note this may cause RPC to be resent */
- wdata->mds_ops->rpc_call_done(task, data);
+ wdata->header->mds_ops->rpc_call_done(task, data);
}
static void filelayout_write_count_stats(struct rpc_task *task, void *data)
{
- struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+ struct nfs_write_data *wdata = data;
- rpc_count_iostats(task, NFS_SERVER(wdata->inode)->client->cl_metrics);
+ rpc_count_iostats(task, NFS_SERVER(wdata->header->inode)->client->cl_metrics);
}
static void filelayout_write_release(void *data)
{
- struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+ struct nfs_write_data *wdata = data;
+
+ nfs_put_client(wdata->ds_clp);
+ wdata->header->mds_ops->rpc_release(data);
+}
+
+static void filelayout_commit_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs_commit_data *wdata = data;
- put_lseg(wdata->lseg);
- wdata->mds_ops->rpc_release(data);
+ if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
+ &wdata->args.seq_args, &wdata->res.seq_res,
+ task))
+ return;
+
+ rpc_call_start(task);
+}
+
+static void filelayout_write_commit_done(struct rpc_task *task, void *data)
+{
+ struct nfs_commit_data *wdata = data;
+
+ /* Note this may cause RPC to be resent */
+ wdata->mds_ops->rpc_call_done(task, data);
+}
+
+static void filelayout_commit_count_stats(struct rpc_task *task, void *data)
+{
+ struct nfs_commit_data *cdata = data;
+
+ rpc_count_iostats(task, NFS_SERVER(cdata->inode)->client->cl_metrics);
}
-static void filelayout_commit_release(void *data)
+static void filelayout_commit_release(void *calldata)
{
- struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+ struct nfs_commit_data *data = calldata;
- nfs_commit_release_pages(wdata);
- if (atomic_dec_and_test(&NFS_I(wdata->inode)->commits_outstanding))
- nfs_commit_clear_lock(NFS_I(wdata->inode));
- put_lseg(wdata->lseg);
- nfs_commitdata_release(wdata);
+ data->completion_ops->completion(data);
+ put_lseg(data->lseg);
+ nfs_put_client(data->ds_clp);
+ nfs_commitdata_release(data);
}
static const struct rpc_call_ops filelayout_read_call_ops = {
@@ -341,16 +473,17 @@ static const struct rpc_call_ops filelayout_write_call_ops = {
};
static const struct rpc_call_ops filelayout_commit_call_ops = {
- .rpc_call_prepare = filelayout_write_prepare,
- .rpc_call_done = filelayout_write_call_done,
- .rpc_count_stats = filelayout_write_count_stats,
+ .rpc_call_prepare = filelayout_commit_prepare,
+ .rpc_call_done = filelayout_write_commit_done,
+ .rpc_count_stats = filelayout_commit_count_stats,
.rpc_release = filelayout_commit_release,
};
static enum pnfs_try_status
filelayout_read_pagelist(struct nfs_read_data *data)
{
- struct pnfs_layout_segment *lseg = data->lseg;
+ struct nfs_pgio_header *hdr = data->header;
+ struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
loff_t offset = data->args.offset;
u32 j, idx;
@@ -358,25 +491,20 @@ filelayout_read_pagelist(struct nfs_read_data *data)
int status;
dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
- __func__, data->inode->i_ino,
+ __func__, hdr->inode->i_ino,
data->args.pgbase, (size_t)data->args.count, offset);
- if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
- return PNFS_NOT_ATTEMPTED;
-
/* Retrieve the correct rpc_client for the byte range */
j = nfs4_fl_calc_j_index(lseg, offset);
idx = nfs4_fl_calc_ds_index(lseg, j);
ds = nfs4_fl_prepare_ds(lseg, idx);
- if (!ds) {
- /* Either layout fh index faulty, or ds connect failed */
- set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
- set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ if (!ds)
return PNFS_NOT_ATTEMPTED;
- }
- dprintk("%s USE DS: %s\n", __func__, ds->ds_remotestr);
+ dprintk("%s USE DS: %s cl_count %d\n", __func__,
+ ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
/* No multipath support. Use first DS */
+ atomic_inc(&ds->ds_clp->cl_count);
data->ds_clp = ds->ds_clp;
fh = nfs4_fl_select_ds_fh(lseg, j);
if (fh)
@@ -386,8 +514,8 @@ filelayout_read_pagelist(struct nfs_read_data *data)
data->mds_offset = offset;
/* Perform an asynchronous read to ds */
- status = nfs_initiate_read(data, ds->ds_clp->cl_rpcclient,
- &filelayout_read_call_ops);
+ status = nfs_initiate_read(ds->ds_clp->cl_rpcclient, data,
+ &filelayout_read_call_ops, RPC_TASK_SOFTCONN);
BUG_ON(status != 0);
return PNFS_ATTEMPTED;
}
@@ -396,32 +524,26 @@ filelayout_read_pagelist(struct nfs_read_data *data)
static enum pnfs_try_status
filelayout_write_pagelist(struct nfs_write_data *data, int sync)
{
- struct pnfs_layout_segment *lseg = data->lseg;
+ struct nfs_pgio_header *hdr = data->header;
+ struct pnfs_layout_segment *lseg = hdr->lseg;
struct nfs4_pnfs_ds *ds;
loff_t offset = data->args.offset;
u32 j, idx;
struct nfs_fh *fh;
int status;
- if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
- return PNFS_NOT_ATTEMPTED;
-
/* Retrieve the correct rpc_client for the byte range */
j = nfs4_fl_calc_j_index(lseg, offset);
idx = nfs4_fl_calc_ds_index(lseg, j);
ds = nfs4_fl_prepare_ds(lseg, idx);
- if (!ds) {
- printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n",
- __func__);
- set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
- set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ if (!ds)
return PNFS_NOT_ATTEMPTED;
- }
- dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s\n", __func__,
- data->inode->i_ino, sync, (size_t) data->args.count, offset,
- ds->ds_remotestr);
+ dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d\n",
+ __func__, hdr->inode->i_ino, sync, (size_t) data->args.count,
+ offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
data->write_done_cb = filelayout_write_done_cb;
+ atomic_inc(&ds->ds_clp->cl_count);
data->ds_clp = ds->ds_clp;
fh = nfs4_fl_select_ds_fh(lseg, j);
if (fh)
@@ -433,8 +555,9 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
data->args.offset = filelayout_get_dserver_offset(lseg, offset);
/* Perform an asynchronous write */
- status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient,
- &filelayout_write_call_ops, sync);
+ status = nfs_initiate_write(ds->ds_clp->cl_rpcclient, data,
+ &filelayout_write_call_ops, sync,
+ RPC_TASK_SOFTCONN);
BUG_ON(status != 0);
return PNFS_ATTEMPTED;
}
@@ -650,10 +773,65 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
dprintk("--> %s\n", __func__);
nfs4_fl_put_deviceid(fl->dsaddr);
- kfree(fl->commit_buckets);
+ /* This assumes a single RW lseg */
+ if (lseg->pls_range.iomode == IOMODE_RW) {
+ struct nfs4_filelayout *flo;
+
+ flo = FILELAYOUT_FROM_HDR(lseg->pls_layout);
+ flo->commit_info.nbuckets = 0;
+ kfree(flo->commit_info.buckets);
+ flo->commit_info.buckets = NULL;
+ }
_filelayout_free_lseg(fl);
}
+static int
+filelayout_alloc_commit_info(struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo,
+ gfp_t gfp_flags)
+{
+ struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
+ struct pnfs_commit_bucket *buckets;
+ int size;
+
+ if (fl->commit_through_mds)
+ return 0;
+ if (cinfo->ds->nbuckets != 0) {
+ /* This assumes there is only one IOMODE_RW lseg. What
+ * we really want to do is have a layout_hdr level
+ * dictionary of <multipath_list4, fh> keys, each
+ * associated with a struct list_head, populated by calls
+ * to filelayout_write_pagelist().
+ * */
+ return 0;
+ }
+
+ size = (fl->stripe_type == STRIPE_SPARSE) ?
+ fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
+
+ buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
+ gfp_flags);
+ if (!buckets)
+ return -ENOMEM;
+ else {
+ int i;
+
+ spin_lock(cinfo->lock);
+ if (cinfo->ds->nbuckets != 0)
+ kfree(buckets);
+ else {
+ cinfo->ds->buckets = buckets;
+ cinfo->ds->nbuckets = size;
+ for (i = 0; i < size; i++) {
+ INIT_LIST_HEAD(&buckets[i].written);
+ INIT_LIST_HEAD(&buckets[i].committing);
+ }
+ }
+ spin_unlock(cinfo->lock);
+ return 0;
+ }
+}
+
static struct pnfs_layout_segment *
filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
struct nfs4_layoutget_res *lgr,
@@ -673,29 +851,6 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
_filelayout_free_lseg(fl);
return NULL;
}
-
- /* This assumes there is only one IOMODE_RW lseg. What
- * we really want to do is have a layout_hdr level
- * dictionary of <multipath_list4, fh> keys, each
- * associated with a struct list_head, populated by calls
- * to filelayout_write_pagelist().
- * */
- if ((!fl->commit_through_mds) && (lgr->range.iomode == IOMODE_RW)) {
- int i;
- int size = (fl->stripe_type == STRIPE_SPARSE) ?
- fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
-
- fl->commit_buckets = kcalloc(size, sizeof(struct nfs4_fl_commit_bucket), gfp_flags);
- if (!fl->commit_buckets) {
- filelayout_free_lseg(&fl->generic_hdr);
- return NULL;
- }
- fl->number_of_buckets = size;
- for (i = 0; i < size; i++) {
- INIT_LIST_HEAD(&fl->commit_buckets[i].written);
- INIT_LIST_HEAD(&fl->commit_buckets[i].committing);
- }
- }
return &fl->generic_hdr;
}
@@ -716,8 +871,8 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
!nfs_generic_pg_test(pgio, prev, req))
return false;
- p_stripe = (u64)prev->wb_index << PAGE_CACHE_SHIFT;
- r_stripe = (u64)req->wb_index << PAGE_CACHE_SHIFT;
+ p_stripe = (u64)req_offset(prev);
+ r_stripe = (u64)req_offset(req);
stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
do_div(p_stripe, stripe_unit);
@@ -732,6 +887,16 @@ filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
{
BUG_ON(pgio->pg_lseg != NULL);
+ if (req->wb_offset != req->wb_pgbase) {
+ /*
+ * Handling unaligned pages is difficult, because have to
+ * somehow split a req in two in certain cases in the
+ * pg.test code. Avoid this by just not using pnfs
+ * in this case.
+ */
+ nfs_pageio_reset_read_mds(pgio);
+ return;
+ }
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
0,
@@ -747,8 +912,13 @@ static void
filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
+ struct nfs_commit_info cinfo;
+ int status;
+
BUG_ON(pgio->pg_lseg != NULL);
+ if (req->wb_offset != req->wb_pgbase)
+ goto out_mds;
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
0,
@@ -757,7 +927,17 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
GFP_NOFS);
/* If no lseg, fall back to write through mds */
if (pgio->pg_lseg == NULL)
- nfs_pageio_reset_write_mds(pgio);
+ goto out_mds;
+ nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
+ status = filelayout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
+ if (status < 0) {
+ put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ goto out_mds;
+ }
+ return;
+out_mds:
+ nfs_pageio_reset_write_mds(pgio);
}
static const struct nfs_pageio_ops filelayout_pg_read_ops = {
@@ -784,43 +964,42 @@ static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
* If this will make the bucket empty, it will need to put the lseg reference.
*/
static void
-filelayout_clear_request_commit(struct nfs_page *req)
+filelayout_clear_request_commit(struct nfs_page *req,
+ struct nfs_commit_info *cinfo)
{
struct pnfs_layout_segment *freeme = NULL;
- struct inode *inode = req->wb_context->dentry->d_inode;
- spin_lock(&inode->i_lock);
+ spin_lock(cinfo->lock);
if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
goto out;
+ cinfo->ds->nwritten--;
if (list_is_singular(&req->wb_list)) {
- struct pnfs_layout_segment *lseg;
+ struct pnfs_commit_bucket *bucket;
- /* From here we can find the bucket, but for the moment,
- * since there is only one relevant lseg...
- */
- list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
- if (lseg->pls_range.iomode == IOMODE_RW) {
- freeme = lseg;
- break;
- }
- }
+ bucket = list_first_entry(&req->wb_list,
+ struct pnfs_commit_bucket,
+ written);
+ freeme = bucket->wlseg;
+ bucket->wlseg = NULL;
}
out:
- nfs_request_remove_commit_list(req);
- spin_unlock(&inode->i_lock);
+ nfs_request_remove_commit_list(req, cinfo);
+ spin_unlock(cinfo->lock);
put_lseg(freeme);
}
static struct list_head *
filelayout_choose_commit_list(struct nfs_page *req,
- struct pnfs_layout_segment *lseg)
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
u32 i, j;
struct list_head *list;
+ struct pnfs_commit_bucket *buckets;
if (fl->commit_through_mds)
- return &NFS_I(req->wb_context->dentry->d_inode)->commit_list;
+ return &cinfo->mds->list;
/* Note that we are calling nfs4_fl_calc_j_index on each page
* that ends up being committed to a data server. An attractive
@@ -828,31 +1007,33 @@ filelayout_choose_commit_list(struct nfs_page *req,
* to store the value calculated in filelayout_write_pagelist
* and just use that here.
*/
- j = nfs4_fl_calc_j_index(lseg,
- (loff_t)req->wb_index << PAGE_CACHE_SHIFT);
+ j = nfs4_fl_calc_j_index(lseg, req_offset(req));
i = select_bucket_index(fl, j);
- list = &fl->commit_buckets[i].written;
+ buckets = cinfo->ds->buckets;
+ list = &buckets[i].written;
if (list_empty(list)) {
/* Non-empty buckets hold a reference on the lseg. That ref
* is normally transferred to the COMMIT call and released
* there. It could also be released if the last req is pulled
* off due to a rewrite, in which case it will be done in
- * filelayout_remove_commit_req
+ * filelayout_clear_request_commit
*/
- get_lseg(lseg);
+ buckets[i].wlseg = get_lseg(lseg);
}
set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+ cinfo->ds->nwritten++;
return list;
}
static void
filelayout_mark_request_commit(struct nfs_page *req,
- struct pnfs_layout_segment *lseg)
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
struct list_head *list;
- list = filelayout_choose_commit_list(req, lseg);
- nfs_request_add_commit_list(req, list);
+ list = filelayout_choose_commit_list(req, lseg, cinfo);
+ nfs_request_add_commit_list(req, list, cinfo);
}
static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
@@ -880,7 +1061,7 @@ select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
return flseg->fh_array[i];
}
-static int filelayout_initiate_commit(struct nfs_write_data *data, int how)
+static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
{
struct pnfs_layout_segment *lseg = data->lseg;
struct nfs4_pnfs_ds *ds;
@@ -890,135 +1071,138 @@ static int filelayout_initiate_commit(struct nfs_write_data *data, int how)
idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
ds = nfs4_fl_prepare_ds(lseg, idx);
if (!ds) {
- printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n",
- __func__);
- set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
- set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
prepare_to_resend_writes(data);
filelayout_commit_release(data);
return -EAGAIN;
}
- dprintk("%s ino %lu, how %d\n", __func__, data->inode->i_ino, how);
- data->write_done_cb = filelayout_commit_done_cb;
+ dprintk("%s ino %lu, how %d cl_count %d\n", __func__,
+ data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count));
+ data->commit_done_cb = filelayout_commit_done_cb;
+ atomic_inc(&ds->ds_clp->cl_count);
data->ds_clp = ds->ds_clp;
fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
if (fh)
data->args.fh = fh;
- return nfs_initiate_commit(data, ds->ds_clp->cl_rpcclient,
- &filelayout_commit_call_ops, how);
-}
-
-/*
- * This is only useful while we are using whole file layouts.
- */
-static struct pnfs_layout_segment *
-find_only_write_lseg_locked(struct inode *inode)
-{
- struct pnfs_layout_segment *lseg;
-
- list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list)
- if (lseg->pls_range.iomode == IOMODE_RW)
- return lseg;
- return NULL;
-}
-
-static struct pnfs_layout_segment *find_only_write_lseg(struct inode *inode)
-{
- struct pnfs_layout_segment *rv;
-
- spin_lock(&inode->i_lock);
- rv = find_only_write_lseg_locked(inode);
- if (rv)
- get_lseg(rv);
- spin_unlock(&inode->i_lock);
- return rv;
+ return nfs_initiate_commit(ds->ds_clp->cl_rpcclient, data,
+ &filelayout_commit_call_ops, how,
+ RPC_TASK_SOFTCONN);
}
static int
-filelayout_scan_ds_commit_list(struct nfs4_fl_commit_bucket *bucket, int max,
- spinlock_t *lock)
+transfer_commit_list(struct list_head *src, struct list_head *dst,
+ struct nfs_commit_info *cinfo, int max)
{
- struct list_head *src = &bucket->written;
- struct list_head *dst = &bucket->committing;
struct nfs_page *req, *tmp;
int ret = 0;
list_for_each_entry_safe(req, tmp, src, wb_list) {
if (!nfs_lock_request(req))
continue;
- if (cond_resched_lock(lock))
+ kref_get(&req->wb_kref);
+ if (cond_resched_lock(cinfo->lock))
list_safe_reset_next(req, tmp, wb_list);
- nfs_request_remove_commit_list(req);
+ nfs_request_remove_commit_list(req, cinfo);
clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
nfs_list_add_request(req, dst);
ret++;
- if (ret == max)
+ if ((ret == max) && !cinfo->dreq)
break;
}
return ret;
}
+static int
+filelayout_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
+ struct nfs_commit_info *cinfo,
+ int max)
+{
+ struct list_head *src = &bucket->written;
+ struct list_head *dst = &bucket->committing;
+ int ret;
+
+ ret = transfer_commit_list(src, dst, cinfo, max);
+ if (ret) {
+ cinfo->ds->nwritten -= ret;
+ cinfo->ds->ncommitting += ret;
+ bucket->clseg = bucket->wlseg;
+ if (list_empty(src))
+ bucket->wlseg = NULL;
+ else
+ get_lseg(bucket->clseg);
+ }
+ return ret;
+}
+
/* Move reqs from written to committing lists, returning count of number moved.
- * Note called with i_lock held.
+ * Note called with cinfo->lock held.
*/
-static int filelayout_scan_commit_lists(struct inode *inode, int max,
- spinlock_t *lock)
+static int filelayout_scan_commit_lists(struct nfs_commit_info *cinfo,
+ int max)
{
- struct pnfs_layout_segment *lseg;
- struct nfs4_filelayout_segment *fl;
int i, rv = 0, cnt;
- lseg = find_only_write_lseg_locked(inode);
- if (!lseg)
- goto out_done;
- fl = FILELAYOUT_LSEG(lseg);
- if (fl->commit_through_mds)
- goto out_done;
- for (i = 0; i < fl->number_of_buckets && max != 0; i++) {
- cnt = filelayout_scan_ds_commit_list(&fl->commit_buckets[i],
- max, lock);
+ for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
+ cnt = filelayout_scan_ds_commit_list(&cinfo->ds->buckets[i],
+ cinfo, max);
max -= cnt;
rv += cnt;
}
-out_done:
return rv;
}
+/* Pull everything off the committing lists and dump into @dst */
+static void filelayout_recover_commit_reqs(struct list_head *dst,
+ struct nfs_commit_info *cinfo)
+{
+ struct pnfs_commit_bucket *b;
+ int i;
+
+ /* NOTE cinfo->lock is NOT held, relying on fact that this is
+ * only called on single thread per dreq.
+ * Can't take the lock because need to do put_lseg
+ */
+ for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
+ if (transfer_commit_list(&b->written, dst, cinfo, 0)) {
+ BUG_ON(!list_empty(&b->written));
+ put_lseg(b->wlseg);
+ b->wlseg = NULL;
+ }
+ }
+ cinfo->ds->nwritten = 0;
+}
+
static unsigned int
-alloc_ds_commits(struct inode *inode, struct list_head *list)
+alloc_ds_commits(struct nfs_commit_info *cinfo, struct list_head *list)
{
- struct pnfs_layout_segment *lseg;
- struct nfs4_filelayout_segment *fl;
- struct nfs_write_data *data;
+ struct pnfs_ds_commit_info *fl_cinfo;
+ struct pnfs_commit_bucket *bucket;
+ struct nfs_commit_data *data;
int i, j;
unsigned int nreq = 0;
- /* Won't need this when non-whole file layout segments are supported
- * instead we will use a pnfs_layout_hdr structure */
- lseg = find_only_write_lseg(inode);
- if (!lseg)
- return 0;
- fl = FILELAYOUT_LSEG(lseg);
- for (i = 0; i < fl->number_of_buckets; i++) {
- if (list_empty(&fl->commit_buckets[i].committing))
+ fl_cinfo = cinfo->ds;
+ bucket = fl_cinfo->buckets;
+ for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
+ if (list_empty(&bucket->committing))
continue;
data = nfs_commitdata_alloc();
if (!data)
break;
data->ds_commit_index = i;
- data->lseg = lseg;
+ data->lseg = bucket->clseg;
+ bucket->clseg = NULL;
list_add(&data->pages, list);
nreq++;
}
/* Clean up on error */
- for (j = i; j < fl->number_of_buckets; j++) {
- if (list_empty(&fl->commit_buckets[i].committing))
+ for (j = i; j < fl_cinfo->nbuckets; j++, bucket++) {
+ if (list_empty(&bucket->committing))
continue;
- nfs_retry_commit(&fl->commit_buckets[i].committing, lseg);
- put_lseg(lseg); /* associated with emptying bucket */
+ nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo);
+ put_lseg(bucket->clseg);
+ bucket->clseg = NULL;
}
- put_lseg(lseg);
/* Caller will clean up entries put on list */
return nreq;
}
@@ -1026,9 +1210,9 @@ alloc_ds_commits(struct inode *inode, struct list_head *list)
/* This follows nfs_commit_list pretty closely */
static int
filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
- int how)
+ int how, struct nfs_commit_info *cinfo)
{
- struct nfs_write_data *data, *tmp;
+ struct nfs_commit_data *data, *tmp;
LIST_HEAD(list);
unsigned int nreq = 0;
@@ -1039,30 +1223,34 @@ filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
list_add(&data->pages, &list);
nreq++;
} else
- nfs_retry_commit(mds_pages, NULL);
+ nfs_retry_commit(mds_pages, NULL, cinfo);
}
- nreq += alloc_ds_commits(inode, &list);
+ nreq += alloc_ds_commits(cinfo, &list);
if (nreq == 0) {
- nfs_commit_clear_lock(NFS_I(inode));
+ cinfo->completion_ops->error_cleanup(NFS_I(inode));
goto out;
}
- atomic_add(nreq, &NFS_I(inode)->commits_outstanding);
+ atomic_add(nreq, &cinfo->mds->rpcs_out);
list_for_each_entry_safe(data, tmp, &list, pages) {
list_del_init(&data->pages);
if (!data->lseg) {
- nfs_init_commit(data, mds_pages, NULL);
- nfs_initiate_commit(data, NFS_CLIENT(inode),
- data->mds_ops, how);
+ nfs_init_commit(data, mds_pages, NULL, cinfo);
+ nfs_initiate_commit(NFS_CLIENT(inode), data,
+ data->mds_ops, how, 0);
} else {
- nfs_init_commit(data, &FILELAYOUT_LSEG(data->lseg)->commit_buckets[data->ds_commit_index].committing, data->lseg);
+ struct pnfs_commit_bucket *buckets;
+
+ buckets = cinfo->ds->buckets;
+ nfs_init_commit(data, &buckets[data->ds_commit_index].committing, data->lseg, cinfo);
filelayout_initiate_commit(data, how);
}
}
out:
+ cinfo->ds->ncommitting = 0;
return PNFS_ATTEMPTED;
}
@@ -1072,17 +1260,47 @@ filelayout_free_deveiceid_node(struct nfs4_deviceid_node *d)
nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node));
}
+static struct pnfs_layout_hdr *
+filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
+{
+ struct nfs4_filelayout *flo;
+
+ flo = kzalloc(sizeof(*flo), gfp_flags);
+ return &flo->generic_hdr;
+}
+
+static void
+filelayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ kfree(FILELAYOUT_FROM_HDR(lo));
+}
+
+static struct pnfs_ds_commit_info *
+filelayout_get_ds_info(struct inode *inode)
+{
+ struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
+
+ if (layout == NULL)
+ return NULL;
+ else
+ return &FILELAYOUT_FROM_HDR(layout)->commit_info;
+}
+
static struct pnfs_layoutdriver_type filelayout_type = {
.id = LAYOUT_NFSV4_1_FILES,
.name = "LAYOUT_NFSV4_1_FILES",
.owner = THIS_MODULE,
+ .alloc_layout_hdr = filelayout_alloc_layout_hdr,
+ .free_layout_hdr = filelayout_free_layout_hdr,
.alloc_lseg = filelayout_alloc_lseg,
.free_lseg = filelayout_free_lseg,
.pg_read_ops = &filelayout_pg_read_ops,
.pg_write_ops = &filelayout_pg_write_ops,
+ .get_ds_info = &filelayout_get_ds_info,
.mark_request_commit = filelayout_mark_request_commit,
.clear_request_commit = filelayout_clear_request_commit,
.scan_commit_lists = filelayout_scan_commit_lists,
+ .recover_commit_reqs = filelayout_recover_commit_reqs,
.commit_pagelist = filelayout_commit_pagelist,
.read_pagelist = filelayout_read_pagelist,
.write_pagelist = filelayout_write_pagelist,
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index 21190bb1f5e..43fe802dd67 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -33,6 +33,13 @@
#include "pnfs.h"
/*
+ * Default data server connection timeout and retrans vaules.
+ * Set by module paramters dataserver_timeo and dataserver_retrans.
+ */
+#define NFS4_DEF_DS_TIMEO 60
+#define NFS4_DEF_DS_RETRANS 5
+
+/*
* Field testing shows we need to support up to 4096 stripe indices.
* We store each index as a u8 (u32 on the wire) to keep the memory footprint
* reasonable. This in turn means we support a maximum of 256
@@ -41,6 +48,9 @@
#define NFS4_PNFS_MAX_STRIPE_CNT 4096
#define NFS4_PNFS_MAX_MULTI_CNT 256 /* 256 fit into a u8 stripe_index */
+/* error codes for internal use */
+#define NFS4ERR_RESET_TO_MDS 12001
+
enum stripetype4 {
STRIPE_SPARSE = 1,
STRIPE_DENSE = 2
@@ -62,23 +72,14 @@ struct nfs4_pnfs_ds {
atomic_t ds_count;
};
-/* nfs4_file_layout_dsaddr flags */
-#define NFS4_DEVICE_ID_NEG_ENTRY 0x00000001
-
struct nfs4_file_layout_dsaddr {
struct nfs4_deviceid_node id_node;
- unsigned long flags;
u32 stripe_count;
u8 *stripe_indices;
u32 ds_num;
struct nfs4_pnfs_ds *ds_list[1];
};
-struct nfs4_fl_commit_bucket {
- struct list_head written;
- struct list_head committing;
-};
-
struct nfs4_filelayout_segment {
struct pnfs_layout_segment generic_hdr;
u32 stripe_type;
@@ -89,10 +90,19 @@ struct nfs4_filelayout_segment {
struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
unsigned int num_fh;
struct nfs_fh **fh_array;
- struct nfs4_fl_commit_bucket *commit_buckets; /* Sort commits to ds */
- int number_of_buckets;
};
+struct nfs4_filelayout {
+ struct pnfs_layout_hdr generic_hdr;
+ struct pnfs_ds_commit_info commit_info;
+};
+
+static inline struct nfs4_filelayout *
+FILELAYOUT_FROM_HDR(struct pnfs_layout_hdr *lo)
+{
+ return container_of(lo, struct nfs4_filelayout, generic_hdr);
+}
+
static inline struct nfs4_filelayout_segment *
FILELAYOUT_LSEG(struct pnfs_layout_segment *lseg)
{
@@ -107,6 +117,36 @@ FILELAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg)
return &FILELAYOUT_LSEG(lseg)->dsaddr->id_node;
}
+static inline void
+filelayout_mark_devid_invalid(struct nfs4_deviceid_node *node)
+{
+ u32 *p = (u32 *)&node->deviceid;
+
+ printk(KERN_WARNING "NFS: Deviceid [%x%x%x%x] marked out of use.\n",
+ p[0], p[1], p[2], p[3]);
+
+ set_bit(NFS_DEVICEID_INVALID, &node->flags);
+}
+
+static inline bool
+filelayout_test_layout_invalid(struct pnfs_layout_hdr *lo)
+{
+ return test_bit(NFS_LAYOUT_INVALID, &lo->plh_flags);
+}
+
+static inline bool
+filelayout_test_devid_invalid(struct nfs4_deviceid_node *node)
+{
+ return test_bit(NFS_DEVICEID_INVALID, &node->flags);
+}
+
+static inline bool
+filelayout_reset_to_mds(struct pnfs_layout_segment *lseg)
+{
+ return filelayout_test_devid_invalid(FILELAYOUT_DEVID_NODE(lseg)) ||
+ filelayout_test_layout_invalid(lseg->pls_layout);
+}
+
extern struct nfs_fh *
nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j);
@@ -119,5 +159,6 @@ extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
extern void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
struct nfs4_file_layout_dsaddr *
get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
+void nfs4_ds_disconnect(struct nfs_client *clp);
#endif /* FS_NFS_NFS4FILELAYOUT_H */
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index c9cff9adb2d..a1fab8da7f0 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -30,12 +30,16 @@
#include <linux/nfs_fs.h>
#include <linux/vmalloc.h>
+#include <linux/module.h>
#include "internal.h"
#include "nfs4filelayout.h"
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO;
+static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS;
+
/*
* Data server cache
*
@@ -145,6 +149,28 @@ _data_server_lookup_locked(const struct list_head *dsaddrs)
}
/*
+ * Lookup DS by nfs_client pointer. Zero data server client pointer
+ */
+void nfs4_ds_disconnect(struct nfs_client *clp)
+{
+ struct nfs4_pnfs_ds *ds;
+ struct nfs_client *found = NULL;
+
+ dprintk("%s clp %p\n", __func__, clp);
+ spin_lock(&nfs4_ds_cache_lock);
+ list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
+ if (ds->ds_clp && ds->ds_clp == clp) {
+ found = ds->ds_clp;
+ ds->ds_clp = NULL;
+ }
+ spin_unlock(&nfs4_ds_cache_lock);
+ if (found) {
+ set_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state);
+ nfs_put_client(clp);
+ }
+}
+
+/*
* Create an rpc connection to the nfs4_pnfs_ds data server
* Currently only supports IPv4 and IPv6 addresses
*/
@@ -165,8 +191,9 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
__func__, ds->ds_remotestr, da->da_remotestr);
clp = nfs4_set_ds_client(mds_srv->nfs_client,
- (struct sockaddr *)&da->da_addr,
- da->da_addrlen, IPPROTO_TCP);
+ (struct sockaddr *)&da->da_addr,
+ da->da_addrlen, IPPROTO_TCP,
+ dataserver_timeo, dataserver_retrans);
if (!IS_ERR(clp))
break;
}
@@ -176,28 +203,7 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
goto out;
}
- if ((clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) != 0) {
- if (!is_ds_client(clp)) {
- status = -ENODEV;
- goto out_put;
- }
- ds->ds_clp = clp;
- dprintk("%s [existing] server=%s\n", __func__,
- ds->ds_remotestr);
- goto out;
- }
-
- /*
- * Do not set NFS_CS_CHECK_LEASE_TIME instead set the DS lease to
- * be equal to the MDS lease. Renewal is scheduled in create_session.
- */
- spin_lock(&mds_srv->nfs_client->cl_lock);
- clp->cl_lease_time = mds_srv->nfs_client->cl_lease_time;
- spin_unlock(&mds_srv->nfs_client->cl_lock);
- clp->cl_last_renewal = jiffies;
-
- /* New nfs_client */
- status = nfs4_init_ds_session(clp);
+ status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time);
if (status)
goto out_put;
@@ -602,7 +608,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
mp_count = be32_to_cpup(p); /* multipath count */
for (j = 0; j < mp_count; j++) {
- da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->net,
+ da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->cl_net,
&stream, gfp_flags);
if (da)
list_add_tail(&da->da_node, &dsaddrs);
@@ -791,48 +797,42 @@ nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j)
return flseg->fh_array[i];
}
-static void
-filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr,
- int err, const char *ds_remotestr)
-{
- u32 *p = (u32 *)&dsaddr->id_node.deviceid;
-
- printk(KERN_ERR "NFS: data server %s connection error %d."
- " Deviceid [%x%x%x%x] marked out of use.\n",
- ds_remotestr, err, p[0], p[1], p[2], p[3]);
-
- spin_lock(&nfs4_ds_cache_lock);
- dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY;
- spin_unlock(&nfs4_ds_cache_lock);
-}
-
struct nfs4_pnfs_ds *
nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
{
struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr;
struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
+ struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
+
+ if (filelayout_test_devid_invalid(devid))
+ return NULL;
if (ds == NULL) {
printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
__func__, ds_idx);
- return NULL;
+ goto mark_dev_invalid;
}
if (!ds->ds_clp) {
struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
int err;
- if (dsaddr->flags & NFS4_DEVICE_ID_NEG_ENTRY) {
- /* Already tried to connect, don't try again */
- dprintk("%s Deviceid marked out of use\n", __func__);
- return NULL;
- }
err = nfs4_ds_connect(s, ds);
- if (err) {
- filelayout_mark_devid_negative(dsaddr, err,
- ds->ds_remotestr);
- return NULL;
- }
+ if (err)
+ goto mark_dev_invalid;
}
return ds;
+
+mark_dev_invalid:
+ filelayout_mark_devid_invalid(devid);
+ return NULL;
}
+
+module_param(dataserver_retrans, uint, 0644);
+MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client "
+ "retries a request before it attempts further "
+ " recovery action.");
+module_param(dataserver_timeo, uint, 0644);
+MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
+ "NFSv4.1 client waits for a response from a "
+ " data server before it retries an NFS request.");
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index a7f3dedc4ec..017b4b01a69 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -132,6 +132,35 @@ static size_t nfs_parse_server_name(char *string, size_t len,
return ret;
}
+rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
+{
+ struct gss_api_mech *mech;
+ struct xdr_netobj oid;
+ int i;
+ rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
+
+ for (i = 0; i < flavors->num_flavors; i++) {
+ struct nfs4_secinfo_flavor *flavor;
+ flavor = &flavors->flavors[i];
+
+ if (flavor->flavor == RPC_AUTH_NULL || flavor->flavor == RPC_AUTH_UNIX) {
+ pseudoflavor = flavor->flavor;
+ break;
+ } else if (flavor->flavor == RPC_AUTH_GSS) {
+ oid.len = flavor->gss.sec_oid4.len;
+ oid.data = flavor->gss.sec_oid4.data;
+ mech = gss_mech_get_by_OID(&oid);
+ if (!mech)
+ continue;
+ pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service);
+ gss_mech_put(mech);
+ break;
+ }
+ }
+
+ return pseudoflavor;
+}
+
static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name)
{
struct page *page;
@@ -168,7 +197,7 @@ struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt, struct inode *ino
rpc_authflavor_t flavor;
flavor = nfs4_negotiate_security(inode, name);
- if (flavor < 0)
+ if ((int)flavor < 0)
return ERR_PTR(flavor);
clone = rpc_clone_client(clnt);
@@ -300,7 +329,7 @@ out:
* @dentry - dentry of referral
*
*/
-struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
+static struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
{
struct vfsmount *mnt = ERR_PTR(-ENOMEM);
struct dentry *parent;
@@ -341,3 +370,25 @@ out:
dprintk("%s: done\n", __func__);
return mnt;
}
+
+struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
+ struct nfs_fh *fh, struct nfs_fattr *fattr)
+{
+ struct dentry *parent = dget_parent(dentry);
+ struct rpc_clnt *client;
+ struct vfsmount *mnt;
+
+ /* Look it up again to get its attributes and sec flavor */
+ client = nfs4_proc_lookup_mountpoint(parent->d_inode, &dentry->d_name, fh, fattr);
+ dput(parent);
+ if (IS_ERR(client))
+ return ERR_CAST(client);
+
+ if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
+ mnt = nfs_do_refmount(client, dentry);
+ else
+ mnt = nfs_do_submount(dentry, fh, fattr, client->cl_auth->au_flavor);
+
+ rpc_shutdown_client(client);
+ return mnt;
+}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ab985f6f0da..d48dbefa0e7 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -64,6 +64,7 @@
#include "iostat.h"
#include "callback.h"
#include "pnfs.h"
+#include "netns.h"
#define NFSDBG_FACILITY NFSDBG_PROC
@@ -80,6 +81,7 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
+static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *);
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_fattr *fattr, struct iattr *sattr,
@@ -101,6 +103,8 @@ static int nfs4_map_errors(int err)
case -NFS4ERR_BADOWNER:
case -NFS4ERR_BADNAME:
return -EINVAL;
+ case -NFS4ERR_SHARE_DENIED:
+ return -EACCES;
default:
dprintk("%s could not handle NFSv4 error %d\n",
__func__, -err);
@@ -304,7 +308,7 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR: %d Reset session\n", __func__,
errorcode);
- nfs4_schedule_session_recovery(clp->cl_session);
+ nfs4_schedule_session_recovery(clp->cl_session, errorcode);
exception->retry = 1;
break;
#endif /* defined(CONFIG_NFS_V4_1) */
@@ -772,7 +776,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
struct nfs_inode *nfsi = NFS_I(dir);
spin_lock(&dir->i_lock);
- nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
if (!cinfo->atomic || cinfo->before != dir->i_version)
nfs_force_lookup_revalidate(dir);
dir->i_version = cinfo->after;
@@ -788,7 +792,6 @@ struct nfs4_opendata {
struct nfs4_string owner_name;
struct nfs4_string group_name;
struct nfs_fattr f_attr;
- struct nfs_fattr dir_attr;
struct dentry *dir;
struct dentry *dentry;
struct nfs4_state_owner *owner;
@@ -804,12 +807,10 @@ struct nfs4_opendata {
static void nfs4_init_opendata_res(struct nfs4_opendata *p)
{
p->o_res.f_attr = &p->f_attr;
- p->o_res.dir_attr = &p->dir_attr;
p->o_res.seqid = p->o_arg.seqid;
p->c_res.seqid = p->c_arg.seqid;
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
- nfs_fattr_init(&p->dir_attr);
nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
}
@@ -843,7 +844,6 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
p->o_arg.name = &dentry->d_name;
p->o_arg.server = server;
p->o_arg.bitmask = server->attr_bitmask;
- p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
if (attrs != NULL && attrs->ia_valid != 0) {
__be32 verf[2];
@@ -1332,7 +1332,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
- nfs4_schedule_session_recovery(server->nfs_client->cl_session);
+ nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
@@ -1611,8 +1611,6 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
- nfs_refresh_inode(dir, o_res->dir_attr);
-
if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
status = _nfs4_proc_open_confirm(data);
if (status != 0)
@@ -1645,11 +1643,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
nfs_fattr_map_and_free_names(server, &data->f_attr);
- if (o_arg->open_flags & O_CREAT) {
+ if (o_arg->open_flags & O_CREAT)
update_changeattr(dir, &o_res->cinfo);
- nfs_post_op_update_inode(dir, o_res->dir_attr);
- } else
- nfs_refresh_inode(dir, o_res->dir_attr);
if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
server->caps &= ~NFS_CAP_POSIX_LOCK;
if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
@@ -1789,7 +1784,14 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct
/*
* Returns a referenced nfs4_state
*/
-static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
+static int _nfs4_do_open(struct inode *dir,
+ struct dentry *dentry,
+ fmode_t fmode,
+ int flags,
+ struct iattr *sattr,
+ struct rpc_cred *cred,
+ struct nfs4_state **res,
+ struct nfs4_threshold **ctx_th)
{
struct nfs4_state_owner *sp;
struct nfs4_state *state = NULL;
@@ -1814,6 +1816,11 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode
if (opendata == NULL)
goto err_put_state_owner;
+ if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
+ opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
+ if (!opendata->f_attr.mdsthreshold)
+ goto err_opendata_put;
+ }
if (dentry->d_inode != NULL)
opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
@@ -1839,11 +1846,19 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode
nfs_setattr_update_inode(state->inode, sattr);
nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
}
+
+ if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
+ *ctx_th = opendata->f_attr.mdsthreshold;
+ else
+ kfree(opendata->f_attr.mdsthreshold);
+ opendata->f_attr.mdsthreshold = NULL;
+
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
*res = state;
return 0;
err_opendata_put:
+ kfree(opendata->f_attr.mdsthreshold);
nfs4_opendata_put(opendata);
err_put_state_owner:
nfs4_put_state_owner(sp);
@@ -1853,14 +1868,21 @@ out_err:
}
-static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
+static struct nfs4_state *nfs4_do_open(struct inode *dir,
+ struct dentry *dentry,
+ fmode_t fmode,
+ int flags,
+ struct iattr *sattr,
+ struct rpc_cred *cred,
+ struct nfs4_threshold **ctx_th)
{
struct nfs4_exception exception = { };
struct nfs4_state *res;
int status;
do {
- status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res);
+ status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
+ &res, ctx_th);
if (status == 0)
break;
/* NOTE: BAD_SEQID means the server and client disagree about the
@@ -2184,7 +2206,8 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags
struct nfs4_state *state;
/* Protect against concurrent sillydeletes */
- state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred);
+ state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr,
+ ctx->cred, &ctx->mdsthreshold);
if (IS_ERR(state))
return ERR_CAST(state);
ctx->state = state;
@@ -2354,8 +2377,8 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
/*
* get the file handle for the "/" directory on the server
*/
-static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
- struct nfs_fsinfo *info)
+int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
{
int minor_version = server->nfs_client->cl_minorversion;
int status = nfs4_lookup_root(server, fhandle, info);
@@ -2372,6 +2395,31 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
return nfs4_map_errors(status);
}
+static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
+ struct nfs_fsinfo *info)
+{
+ int error;
+ struct nfs_fattr *fattr = info->fattr;
+
+ error = nfs4_server_capabilities(server, mntfh);
+ if (error < 0) {
+ dprintk("nfs4_get_root: getcaps error = %d\n", -error);
+ return error;
+ }
+
+ error = nfs4_proc_getattr(server, mntfh, fattr);
+ if (error < 0) {
+ dprintk("nfs4_get_root: getattr error = %d\n", -error);
+ return error;
+ }
+
+ if (fattr->valid & NFS_ATTR_FATTR_FSID &&
+ !nfs_fsid_equal(&server->fsid, &fattr->fsid))
+ memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
+
+ return error;
+}
+
/*
* Get locations and (maybe) other attributes of a referral.
* Note that we'll actually follow the referral later when
@@ -2578,7 +2626,7 @@ out:
return err;
}
-static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
int status;
@@ -2761,7 +2809,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
fmode = ctx->mode;
}
sattr->ia_mode &= ~current_umask();
- state = nfs4_do_open(dir, de, fmode, flags, sattr, cred);
+ state = nfs4_do_open(dir, de, fmode, flags, sattr, cred, NULL);
d_drop(dentry);
if (IS_ERR(state)) {
status = PTR_ERR(state);
@@ -2783,7 +2831,6 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
struct nfs_removeargs args = {
.fh = NFS_FH(dir),
.name = *name,
- .bitmask = server->attr_bitmask,
};
struct nfs_removeres res = {
.server = server,
@@ -2793,19 +2840,11 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
.rpc_argp = &args,
.rpc_resp = &res,
};
- int status = -ENOMEM;
-
- res.dir_attr = nfs_alloc_fattr();
- if (res.dir_attr == NULL)
- goto out;
+ int status;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
- if (status == 0) {
+ if (status == 0)
update_changeattr(dir, &res.cinfo);
- nfs_post_op_update_inode(dir, res.dir_attr);
- }
- nfs_free_fattr(res.dir_attr);
-out:
return status;
}
@@ -2827,7 +2866,6 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
struct nfs_removeargs *args = msg->rpc_argp;
struct nfs_removeres *res = msg->rpc_resp;
- args->bitmask = server->cache_consistency_bitmask;
res->server = server;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
@@ -2852,7 +2890,6 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
update_changeattr(dir, &res->cinfo);
- nfs_post_op_update_inode(dir, res->dir_attr);
return 1;
}
@@ -2863,7 +2900,6 @@ static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
struct nfs_renameres *res = msg->rpc_resp;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
- arg->bitmask = server->attr_bitmask;
res->server = server;
nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
}
@@ -2889,9 +2925,7 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
return 0;
update_changeattr(old_dir, &res->old_cinfo);
- nfs_post_op_update_inode(old_dir, res->old_fattr);
update_changeattr(new_dir, &res->new_cinfo);
- nfs_post_op_update_inode(new_dir, res->new_fattr);
return 1;
}
@@ -2904,7 +2938,6 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
.new_dir = NFS_FH(new_dir),
.old_name = old_name,
.new_name = new_name,
- .bitmask = server->attr_bitmask,
};
struct nfs_renameres res = {
.server = server,
@@ -2916,21 +2949,11 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
};
int status = -ENOMEM;
- res.old_fattr = nfs_alloc_fattr();
- res.new_fattr = nfs_alloc_fattr();
- if (res.old_fattr == NULL || res.new_fattr == NULL)
- goto out;
-
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(old_dir, &res.old_cinfo);
- nfs_post_op_update_inode(old_dir, res.old_fattr);
update_changeattr(new_dir, &res.new_cinfo);
- nfs_post_op_update_inode(new_dir, res.new_fattr);
}
-out:
- nfs_free_fattr(res.new_fattr);
- nfs_free_fattr(res.old_fattr);
return status;
}
@@ -2968,18 +2991,15 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *
int status = -ENOMEM;
res.fattr = nfs_alloc_fattr();
- res.dir_attr = nfs_alloc_fattr();
- if (res.fattr == NULL || res.dir_attr == NULL)
+ if (res.fattr == NULL)
goto out;
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
update_changeattr(dir, &res.cinfo);
- nfs_post_op_update_inode(dir, res.dir_attr);
nfs_post_op_update_inode(inode, res.fattr);
}
out:
- nfs_free_fattr(res.dir_attr);
nfs_free_fattr(res.fattr);
return status;
}
@@ -3002,7 +3022,6 @@ struct nfs4_createdata {
struct nfs4_create_res res;
struct nfs_fh fh;
struct nfs_fattr fattr;
- struct nfs_fattr dir_fattr;
};
static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
@@ -3026,9 +3045,7 @@ static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
data->res.server = server;
data->res.fh = &data->fh;
data->res.fattr = &data->fattr;
- data->res.dir_fattr = &data->dir_fattr;
nfs_fattr_init(data->res.fattr);
- nfs_fattr_init(data->res.dir_fattr);
}
return data;
}
@@ -3039,7 +3056,6 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
&data->arg.seq_args, &data->res.seq_res, 1);
if (status == 0) {
update_changeattr(dir, &data->res.dir_cinfo);
- nfs_post_op_update_inode(dir, data->res.dir_fattr);
status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
}
return status;
@@ -3335,12 +3351,12 @@ static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
void __nfs4_read_done_cb(struct nfs_read_data *data)
{
- nfs_invalidate_atime(data->inode);
+ nfs_invalidate_atime(data->header->inode);
}
static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
{
- struct nfs_server *server = NFS_SERVER(data->inode);
+ struct nfs_server *server = NFS_SERVER(data->header->inode);
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
rpc_restart_call_prepare(task);
@@ -3375,7 +3391,7 @@ static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message
static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
{
- if (nfs4_setup_sequence(NFS_SERVER(data->inode),
+ if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
&data->args.seq_args,
&data->res.seq_res,
task))
@@ -3383,25 +3399,9 @@ static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_da
rpc_call_start(task);
}
-/* Reset the the nfs_read_data to send the read to the MDS. */
-void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
-{
- dprintk("%s Reset task for i/o through\n", __func__);
- put_lseg(data->lseg);
- data->lseg = NULL;
- /* offsets will differ in the dense stripe case */
- data->args.offset = data->mds_offset;
- data->ds_clp = NULL;
- data->args.fh = NFS_FH(data->inode);
- data->read_done_cb = nfs4_read_done_cb;
- task->tk_ops = data->mds_ops;
- rpc_task_reset_client(task, NFS_CLIENT(data->inode));
-}
-EXPORT_SYMBOL_GPL(nfs4_reset_read);
-
static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
{
- struct inode *inode = data->inode;
+ struct inode *inode = data->header->inode;
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
rpc_restart_call_prepare(task);
@@ -3409,7 +3409,7 @@ static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data
}
if (task->tk_status >= 0) {
renew_lease(NFS_SERVER(inode), data->timestamp);
- nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
+ nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
}
return 0;
}
@@ -3422,32 +3422,30 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
nfs4_write_done_cb(task, data);
}
-/* Reset the the nfs_write_data to send the write to the MDS. */
-void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
+static
+bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
{
- dprintk("%s Reset task for i/o through\n", __func__);
- put_lseg(data->lseg);
- data->lseg = NULL;
- data->ds_clp = NULL;
- data->write_done_cb = nfs4_write_done_cb;
- data->args.fh = NFS_FH(data->inode);
- data->args.bitmask = data->res.server->cache_consistency_bitmask;
- data->args.offset = data->mds_offset;
- data->res.fattr = &data->fattr;
- task->tk_ops = data->mds_ops;
- rpc_task_reset_client(task, NFS_CLIENT(data->inode));
+ const struct nfs_pgio_header *hdr = data->header;
+
+ /* Don't request attributes for pNFS or O_DIRECT writes */
+ if (data->ds_clp != NULL || hdr->dreq != NULL)
+ return false;
+ /* Otherwise, request attributes if and only if we don't hold
+ * a delegation
+ */
+ return nfs_have_delegation(hdr->inode, FMODE_READ) == 0;
}
-EXPORT_SYMBOL_GPL(nfs4_reset_write);
static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
- struct nfs_server *server = NFS_SERVER(data->inode);
+ struct nfs_server *server = NFS_SERVER(data->header->inode);
- if (data->lseg) {
+ if (!nfs4_write_need_cache_consistency_data(data)) {
data->args.bitmask = NULL;
data->res.fattr = NULL;
} else
data->args.bitmask = server->cache_consistency_bitmask;
+
if (!data->write_done_cb)
data->write_done_cb = nfs4_write_done_cb;
data->res.server = server;
@@ -3459,6 +3457,16 @@ static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_messag
static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
{
+ if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
+ &data->args.seq_args,
+ &data->res.seq_res,
+ task))
+ return;
+ rpc_call_start(task);
+}
+
+static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+{
if (nfs4_setup_sequence(NFS_SERVER(data->inode),
&data->args.seq_args,
&data->res.seq_res,
@@ -3467,7 +3475,7 @@ static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_
rpc_call_start(task);
}
-static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
+static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
{
struct inode *inode = data->inode;
@@ -3475,28 +3483,22 @@ static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *dat
rpc_restart_call_prepare(task);
return -EAGAIN;
}
- nfs_refresh_inode(inode, data->res.fattr);
return 0;
}
-static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
+static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
{
if (!nfs4_sequence_done(task, &data->res.seq_res))
return -EAGAIN;
- return data->write_done_cb(task, data);
+ return data->commit_done_cb(task, data);
}
-static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
{
struct nfs_server *server = NFS_SERVER(data->inode);
- if (data->lseg) {
- data->args.bitmask = NULL;
- data->res.fattr = NULL;
- } else
- data->args.bitmask = server->cache_consistency_bitmask;
- if (!data->write_done_cb)
- data->write_done_cb = nfs4_commit_done_cb;
+ if (data->commit_done_cb == NULL)
+ data->commit_done_cb = nfs4_commit_done_cb;
data->res.server = server;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
@@ -3905,7 +3907,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR %d, Reset session\n", __func__,
task->tk_status);
- nfs4_schedule_session_recovery(clp->cl_session);
+ nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
task->tk_status = 0;
return -EAGAIN;
#endif /* CONFIG_NFS_V4_1 */
@@ -3931,13 +3933,21 @@ wait_on_recovery:
return -EAGAIN;
}
-static void nfs4_construct_boot_verifier(struct nfs_client *clp,
- nfs4_verifier *bootverf)
+static void nfs4_init_boot_verifier(const struct nfs_client *clp,
+ nfs4_verifier *bootverf)
{
__be32 verf[2];
- verf[0] = htonl((u32)clp->cl_boot_time.tv_sec);
- verf[1] = htonl((u32)clp->cl_boot_time.tv_nsec);
+ if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
+ /* An impossible timestamp guarantees this value
+ * will never match a generated boot time. */
+ verf[0] = 0;
+ verf[1] = (__be32)(NSEC_PER_SEC + 1);
+ } else {
+ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
+ verf[0] = (__be32)nn->boot_time.tv_sec;
+ verf[1] = (__be32)nn->boot_time.tv_nsec;
+ }
memcpy(bootverf->data, verf, sizeof(bootverf->data));
}
@@ -3960,7 +3970,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
int loop = 0;
int status;
- nfs4_construct_boot_verifier(clp, &sc_verifier);
+ nfs4_init_boot_verifier(clp, &sc_verifier);
for(;;) {
rcu_read_lock();
@@ -4104,7 +4114,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
data->args.fhandle = &data->fh;
data->args.stateid = &data->stateid;
- data->args.bitmask = server->attr_bitmask;
+ data->args.bitmask = server->cache_consistency_bitmask;
nfs_copy_fh(&data->fh, NFS_FH(inode));
nfs4_stateid_copy(&data->stateid, stateid);
data->res.fattr = &data->fattr;
@@ -4125,9 +4135,10 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
if (status != 0)
goto out;
status = data->rpc_status;
- if (status != 0)
- goto out;
- nfs_refresh_inode(inode, &data->fattr);
+ if (status == 0)
+ nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
+ else
+ nfs_refresh_inode(inode, &data->fattr);
out:
rpc_put_task(task);
return status;
@@ -4837,7 +4848,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
- nfs4_schedule_session_recovery(server->nfs_client->cl_session);
+ nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
goto out;
case -ERESTARTSYS:
/*
@@ -5079,7 +5090,8 @@ out_inval:
}
static bool
-nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
+nfs41_same_server_scope(struct nfs41_server_scope *a,
+ struct nfs41_server_scope *b)
{
if (a->server_scope_sz == b->server_scope_sz &&
memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
@@ -5089,6 +5101,61 @@ nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
}
/*
+ * nfs4_proc_bind_conn_to_session()
+ *
+ * The 4.1 client currently uses the same TCP connection for the
+ * fore and backchannel.
+ */
+int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ int status;
+ struct nfs41_bind_conn_to_session_res res;
+ struct rpc_message msg = {
+ .rpc_proc =
+ &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
+ .rpc_argp = clp,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+
+ dprintk("--> %s\n", __func__);
+ BUG_ON(clp == NULL);
+
+ res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
+ if (unlikely(res.session == NULL)) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ if (status == 0) {
+ if (memcmp(res.session->sess_id.data,
+ clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
+ dprintk("NFS: %s: Session ID mismatch\n", __func__);
+ status = -EIO;
+ goto out_session;
+ }
+ if (res.dir != NFS4_CDFS4_BOTH) {
+ dprintk("NFS: %s: Unexpected direction from server\n",
+ __func__);
+ status = -EIO;
+ goto out_session;
+ }
+ if (res.use_conn_in_rdma_mode) {
+ dprintk("NFS: %s: Server returned RDMA mode = true\n",
+ __func__);
+ status = -EIO;
+ goto out_session;
+ }
+ }
+out_session:
+ kfree(res.session);
+out:
+ dprintk("<-- %s status= %d\n", __func__, status);
+ return status;
+}
+
+/*
* nfs4_proc_exchange_id()
*
* Since the clientid has expired, all compounds using sessions
@@ -5105,7 +5172,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
};
struct nfs41_exchange_id_res res = {
- .client = clp,
+ 0
};
int status;
struct rpc_message msg = {
@@ -5118,7 +5185,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
dprintk("--> %s\n", __func__);
BUG_ON(clp == NULL);
- nfs4_construct_boot_verifier(clp, &verifier);
+ nfs4_init_boot_verifier(clp, &verifier);
args.id_len = scnprintf(args.id, sizeof(args.id),
"%s/%s/%u",
@@ -5126,59 +5193,135 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
clp->cl_rpcclient->cl_nodename,
clp->cl_rpcclient->cl_auth->au_flavor);
- res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
- if (unlikely(!res.server_scope)) {
+ res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
+ GFP_NOFS);
+ if (unlikely(res.server_owner == NULL)) {
status = -ENOMEM;
goto out;
}
- res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL);
- if (unlikely(!res.impl_id)) {
+ res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
+ GFP_NOFS);
+ if (unlikely(res.server_scope == NULL)) {
+ status = -ENOMEM;
+ goto out_server_owner;
+ }
+
+ res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
+ if (unlikely(res.impl_id == NULL)) {
status = -ENOMEM;
goto out_server_scope;
}
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
- if (!status)
- status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
+ if (status == 0)
+ status = nfs4_check_cl_exchange_flags(res.flags);
+
+ if (status == 0) {
+ clp->cl_clientid = res.clientid;
+ clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R);
+ if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R))
+ clp->cl_seqid = res.seqid;
+
+ kfree(clp->cl_serverowner);
+ clp->cl_serverowner = res.server_owner;
+ res.server_owner = NULL;
- if (!status) {
/* use the most recent implementation id */
- kfree(clp->impl_id);
- clp->impl_id = res.impl_id;
- } else
- kfree(res.impl_id);
+ kfree(clp->cl_implid);
+ clp->cl_implid = res.impl_id;
- if (!status) {
- if (clp->server_scope &&
- !nfs41_same_server_scope(clp->server_scope,
+ if (clp->cl_serverscope != NULL &&
+ !nfs41_same_server_scope(clp->cl_serverscope,
res.server_scope)) {
dprintk("%s: server_scope mismatch detected\n",
__func__);
set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
- kfree(clp->server_scope);
- clp->server_scope = NULL;
+ kfree(clp->cl_serverscope);
+ clp->cl_serverscope = NULL;
}
- if (!clp->server_scope) {
- clp->server_scope = res.server_scope;
+ if (clp->cl_serverscope == NULL) {
+ clp->cl_serverscope = res.server_scope;
goto out;
}
- }
+ } else
+ kfree(res.impl_id);
+out_server_owner:
+ kfree(res.server_owner);
out_server_scope:
kfree(res.server_scope);
out:
- if (clp->impl_id)
+ if (clp->cl_implid != NULL)
dprintk("%s: Server Implementation ID: "
"domain: %s, name: %s, date: %llu,%u\n",
- __func__, clp->impl_id->domain, clp->impl_id->name,
- clp->impl_id->date.seconds,
- clp->impl_id->date.nseconds);
+ __func__, clp->cl_implid->domain, clp->cl_implid->name,
+ clp->cl_implid->date.seconds,
+ clp->cl_implid->date.nseconds);
dprintk("<-- %s status= %d\n", __func__, status);
return status;
}
+static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
+ struct rpc_cred *cred)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
+ .rpc_argp = clp,
+ .rpc_cred = cred,
+ };
+ int status;
+
+ status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ if (status)
+ pr_warn("NFS: Got error %d from the server %s on "
+ "DESTROY_CLIENTID.", status, clp->cl_hostname);
+ return status;
+}
+
+static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
+ struct rpc_cred *cred)
+{
+ unsigned int loop;
+ int ret;
+
+ for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
+ ret = _nfs4_proc_destroy_clientid(clp, cred);
+ switch (ret) {
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_CLIENTID_BUSY:
+ ssleep(1);
+ break;
+ default:
+ return ret;
+ }
+ }
+ return 0;
+}
+
+int nfs4_destroy_clientid(struct nfs_client *clp)
+{
+ struct rpc_cred *cred;
+ int ret = 0;
+
+ if (clp->cl_mvops->minor_version < 1)
+ goto out;
+ if (clp->cl_exchange_flags == 0)
+ goto out;
+ cred = nfs4_get_exchange_id_cred(clp);
+ ret = nfs4_proc_destroy_clientid(clp, cred);
+ if (cred)
+ put_rpccred(cred);
+ switch (ret) {
+ case 0:
+ case -NFS4ERR_STALE_CLIENTID:
+ clp->cl_exchange_flags = 0;
+ }
+out:
+ return ret;
+}
+
struct nfs4_get_lease_time_data {
struct nfs4_get_lease_time_args *args;
struct nfs4_get_lease_time_res *res;
@@ -5399,8 +5542,12 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
void nfs4_destroy_session(struct nfs4_session *session)
{
struct rpc_xprt *xprt;
+ struct rpc_cred *cred;
- nfs4_proc_destroy_session(session);
+ cred = nfs4_get_exchange_id_cred(session->clp);
+ nfs4_proc_destroy_session(session, cred);
+ if (cred)
+ put_rpccred(cred);
rcu_read_lock();
xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
@@ -5510,7 +5657,8 @@ static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
return nfs4_verify_back_channel_attrs(args, session);
}
-static int _nfs4_proc_create_session(struct nfs_client *clp)
+static int _nfs4_proc_create_session(struct nfs_client *clp,
+ struct rpc_cred *cred)
{
struct nfs4_session *session = clp->cl_session;
struct nfs41_create_session_args args = {
@@ -5524,6 +5672,7 @@ static int _nfs4_proc_create_session(struct nfs_client *clp)
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
.rpc_argp = &args,
.rpc_resp = &res,
+ .rpc_cred = cred,
};
int status;
@@ -5548,7 +5697,7 @@ static int _nfs4_proc_create_session(struct nfs_client *clp)
* It is the responsibility of the caller to verify the session is
* expired before calling this routine.
*/
-int nfs4_proc_create_session(struct nfs_client *clp)
+int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
{
int status;
unsigned *ptr;
@@ -5556,7 +5705,7 @@ int nfs4_proc_create_session(struct nfs_client *clp)
dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
- status = _nfs4_proc_create_session(clp);
+ status = _nfs4_proc_create_session(clp, cred);
if (status)
goto out;
@@ -5578,10 +5727,15 @@ out:
* Issue the over-the-wire RPC DESTROY_SESSION.
* The caller must serialize access to this routine.
*/
-int nfs4_proc_destroy_session(struct nfs4_session *session)
+int nfs4_proc_destroy_session(struct nfs4_session *session,
+ struct rpc_cred *cred)
{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
+ .rpc_argp = session,
+ .rpc_cred = cred,
+ };
int status = 0;
- struct rpc_message msg;
dprintk("--> nfs4_proc_destroy_session\n");
@@ -5589,10 +5743,6 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
if (session->clp->cl_cons_state != NFS_CS_READY)
return status;
- msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
- msg.rpc_argp = session;
- msg.rpc_resp = NULL;
- msg.rpc_cred = NULL;
status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status)
@@ -5604,53 +5754,79 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
return status;
}
+/*
+ * With sessions, the client is not marked ready until after a
+ * successful EXCHANGE_ID and CREATE_SESSION.
+ *
+ * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
+ * other versions of NFS can be tried.
+ */
+static int nfs41_check_session_ready(struct nfs_client *clp)
+{
+ int ret;
+
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
+ ret = nfs4_client_recover_expired_lease(clp);
+ if (ret)
+ return ret;
+ }
+ if (clp->cl_cons_state < NFS_CS_READY)
+ return -EPROTONOSUPPORT;
+ smp_rmb();
+ return 0;
+}
+
int nfs4_init_session(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_session *session;
unsigned int rsize, wsize;
- int ret;
if (!nfs4_has_session(clp))
return 0;
session = clp->cl_session;
- if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
- return 0;
+ spin_lock(&clp->cl_lock);
+ if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
- rsize = server->rsize;
- if (rsize == 0)
- rsize = NFS_MAX_FILE_IO_SIZE;
- wsize = server->wsize;
- if (wsize == 0)
- wsize = NFS_MAX_FILE_IO_SIZE;
+ rsize = server->rsize;
+ if (rsize == 0)
+ rsize = NFS_MAX_FILE_IO_SIZE;
+ wsize = server->wsize;
+ if (wsize == 0)
+ wsize = NFS_MAX_FILE_IO_SIZE;
- session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
- session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
+ session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
+ session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
+ }
+ spin_unlock(&clp->cl_lock);
- ret = nfs4_recover_expired_lease(server);
- if (!ret)
- ret = nfs4_check_client_ready(clp);
- return ret;
+ return nfs41_check_session_ready(clp);
}
-int nfs4_init_ds_session(struct nfs_client *clp)
+int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
{
struct nfs4_session *session = clp->cl_session;
int ret;
- if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
- return 0;
-
- ret = nfs4_client_recover_expired_lease(clp);
- if (!ret)
- /* Test for the DS role */
- if (!is_ds_client(clp))
- ret = -ENODEV;
- if (!ret)
- ret = nfs4_check_client_ready(clp);
- return ret;
+ spin_lock(&clp->cl_lock);
+ if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
+ /*
+ * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
+ * DS lease to be equal to the MDS lease.
+ */
+ clp->cl_lease_time = lease_time;
+ clp->cl_last_renewal = jiffies;
+ }
+ spin_unlock(&clp->cl_lock);
+ ret = nfs41_check_session_ready(clp);
+ if (ret)
+ return ret;
+ /* Test for the DS role */
+ if (!is_ds_client(clp))
+ return -ENODEV;
+ return 0;
}
EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
@@ -6557,6 +6733,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.file_inode_ops = &nfs4_file_inode_operations,
.file_ops = &nfs4_file_operations,
.getroot = nfs4_proc_get_root,
+ .submount = nfs4_submount,
.getattr = nfs4_proc_getattr,
.setattr = nfs4_proc_setattr,
.lookup = nfs4_proc_lookup,
@@ -6589,13 +6766,13 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.write_rpc_prepare = nfs4_proc_write_rpc_prepare,
.write_done = nfs4_write_done,
.commit_setup = nfs4_proc_commit_setup,
+ .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
.commit_done = nfs4_commit_done,
.lock = nfs4_proc_lock,
.clear_acl_cache = nfs4_zap_acl_attr,
.close_context = nfs4_close_context,
.open_context = nfs4_atomic_open,
.init_client = nfs4_init_client,
- .secinfo = nfs4_proc_secinfo,
};
static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index dc484c0eae7..6930bec91bc 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -49,7 +49,7 @@
#include "nfs4_fs.h"
#include "delegation.h"
-#define NFSDBG_FACILITY NFSDBG_PROC
+#define NFSDBG_FACILITY NFSDBG_STATE
void
nfs4_renew_state(struct work_struct *work)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 7f0fcfc1fe9..c679b9ecef6 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -57,6 +57,8 @@
#include "internal.h"
#include "pnfs.h"
+#define NFSDBG_FACILITY NFSDBG_STATE
+
#define OPENOWNER_POOL_SIZE 8
const nfs4_stateid zero_stateid;
@@ -254,7 +256,7 @@ int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
goto out;
set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
do_confirm:
- status = nfs4_proc_create_session(clp);
+ status = nfs4_proc_create_session(clp, cred);
if (status != 0)
goto out;
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
@@ -1106,6 +1108,8 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp)
return;
if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
+ dprintk("%s: scheduling lease recovery for server %s\n", __func__,
+ clp->cl_hostname);
nfs4_schedule_state_manager(clp);
}
EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery);
@@ -1122,6 +1126,8 @@ static void nfs40_handle_cb_pathdown(struct nfs_client *clp)
{
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
nfs_expire_all_delegations(clp);
+ dprintk("%s: handling CB_PATHDOWN recovery for server %s\n", __func__,
+ clp->cl_hostname);
}
void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
@@ -1158,6 +1164,8 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4
struct nfs_client *clp = server->nfs_client;
nfs4_state_mark_reclaim_nograce(clp, state);
+ dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
+ clp->cl_hostname);
nfs4_schedule_state_manager(clp);
}
EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
@@ -1491,19 +1499,25 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_DEADSESSION:
- case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_SEQ_FALSE_RETRY:
case -NFS4ERR_SEQ_MISORDERED:
set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
/* Zero session reset errors */
break;
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+ break;
case -EKEYEXPIRED:
/* Nothing we can do */
nfs4_warn_keyexpired(clp->cl_hostname);
break;
default:
+ dprintk("%s: failed to handle error %d for server %s\n",
+ __func__, error, clp->cl_hostname);
return error;
}
+ dprintk("%s: handled error %d for server %s\n", __func__, error,
+ clp->cl_hostname);
return 0;
}
@@ -1572,34 +1586,82 @@ out:
return nfs4_recovery_handle_error(clp, status);
}
+/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
+ * on EXCHANGE_ID for v4.1
+ */
+static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
+{
+ switch (status) {
+ case -NFS4ERR_SEQ_MISORDERED:
+ if (test_and_set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state))
+ return -ESERVERFAULT;
+ /* Lease confirmation error: retry after purging the lease */
+ ssleep(1);
+ case -NFS4ERR_CLID_INUSE:
+ case -NFS4ERR_STALE_CLIENTID:
+ clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+ break;
+ case -EACCES:
+ if (clp->cl_machine_cred == NULL)
+ return -EACCES;
+ /* Handle case where the user hasn't set up machine creds */
+ nfs4_clear_machine_cred(clp);
+ case -NFS4ERR_DELAY:
+ case -ETIMEDOUT:
+ case -EAGAIN:
+ ssleep(1);
+ break;
+
+ case -NFS4ERR_MINOR_VERS_MISMATCH:
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
+ nfs_mark_client_ready(clp, -EPROTONOSUPPORT);
+ dprintk("%s: exit with error %d for server %s\n",
+ __func__, -EPROTONOSUPPORT, clp->cl_hostname);
+ return -EPROTONOSUPPORT;
+ case -EKEYEXPIRED:
+ nfs4_warn_keyexpired(clp->cl_hostname);
+ case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
+ * in nfs4_exchange_id */
+ default:
+ dprintk("%s: exit with error %d for server %s\n", __func__,
+ status, clp->cl_hostname);
+ return status;
+ }
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ dprintk("%s: handled error %d for server %s\n", __func__, status,
+ clp->cl_hostname);
+ return 0;
+}
+
static int nfs4_reclaim_lease(struct nfs_client *clp)
{
struct rpc_cred *cred;
const struct nfs4_state_recovery_ops *ops =
clp->cl_mvops->reboot_recovery_ops;
- int status = -ENOENT;
+ int status;
cred = ops->get_clid_cred(clp);
- if (cred != NULL) {
- status = ops->establish_clid(clp, cred);
- put_rpccred(cred);
- /* Handle case where the user hasn't set up machine creds */
- if (status == -EACCES && cred == clp->cl_machine_cred) {
- nfs4_clear_machine_cred(clp);
- status = -EAGAIN;
- }
- if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
- status = -EPROTONOSUPPORT;
- }
- return status;
+ if (cred == NULL)
+ return -ENOENT;
+ status = ops->establish_clid(clp, cred);
+ put_rpccred(cred);
+ if (status != 0)
+ return nfs4_handle_reclaim_lease_error(clp, status);
+ return 0;
}
#ifdef CONFIG_NFS_V4_1
-void nfs4_schedule_session_recovery(struct nfs4_session *session)
+void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
{
struct nfs_client *clp = session->clp;
- set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ switch (err) {
+ default:
+ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ break;
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+ }
nfs4_schedule_lease_recovery(clp);
}
EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
@@ -1607,14 +1669,19 @@ EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
void nfs41_handle_recall_slot(struct nfs_client *clp)
{
set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
+ dprintk("%s: scheduling slot recall for server %s\n", __func__,
+ clp->cl_hostname);
nfs4_schedule_state_manager(clp);
}
static void nfs4_reset_all_state(struct nfs_client *clp)
{
if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
- clp->cl_boot_time = CURRENT_TIME;
+ set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
+ clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
nfs4_state_start_reclaim_nograce(clp);
+ dprintk("%s: scheduling reset of all state for server %s!\n",
+ __func__, clp->cl_hostname);
nfs4_schedule_state_manager(clp);
}
}
@@ -1623,33 +1690,50 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
{
if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
nfs4_state_start_reclaim_reboot(clp);
+ dprintk("%s: server %s rebooted!\n", __func__,
+ clp->cl_hostname);
nfs4_schedule_state_manager(clp);
}
}
static void nfs41_handle_state_revoked(struct nfs_client *clp)
{
- /* Temporary */
nfs4_reset_all_state(clp);
+ dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
}
static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
{
/* This will need to handle layouts too */
nfs_expire_all_delegations(clp);
+ dprintk("%s: Recallable state revoked on server %s!\n", __func__,
+ clp->cl_hostname);
}
-static void nfs41_handle_cb_path_down(struct nfs_client *clp)
+static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
{
nfs_expire_all_delegations(clp);
if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
nfs4_schedule_state_manager(clp);
+ dprintk("%s: server %s declared a backchannel fault\n", __func__,
+ clp->cl_hostname);
+}
+
+static void nfs41_handle_cb_path_down(struct nfs_client *clp)
+{
+ if (test_and_set_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
+ &clp->cl_state) == 0)
+ nfs4_schedule_state_manager(clp);
}
void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
{
if (!flags)
return;
+
+ dprintk("%s: \"%s\" (client ID %llx) flags=0x%08x\n",
+ __func__, clp->cl_hostname, clp->cl_clientid, flags);
+
if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
nfs41_handle_server_reboot(clp);
if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
@@ -1659,18 +1743,21 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
nfs41_handle_state_revoked(clp);
if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
nfs41_handle_recallable_state_revoked(clp);
- if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
- SEQ4_STATUS_BACKCHANNEL_FAULT |
- SEQ4_STATUS_CB_PATH_DOWN_SESSION))
+ if (flags & SEQ4_STATUS_BACKCHANNEL_FAULT)
+ nfs41_handle_backchannel_fault(clp);
+ else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+ SEQ4_STATUS_CB_PATH_DOWN_SESSION))
nfs41_handle_cb_path_down(clp);
}
static int nfs4_reset_session(struct nfs_client *clp)
{
+ struct rpc_cred *cred;
int status;
nfs4_begin_drain_session(clp);
- status = nfs4_proc_destroy_session(clp->cl_session);
+ cred = nfs4_get_exchange_id_cred(clp);
+ status = nfs4_proc_destroy_session(clp->cl_session, cred);
if (status && status != -NFS4ERR_BADSESSION &&
status != -NFS4ERR_DEADSESSION) {
status = nfs4_recovery_handle_error(clp, status);
@@ -1678,19 +1765,26 @@ static int nfs4_reset_session(struct nfs_client *clp)
}
memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
- status = nfs4_proc_create_session(clp);
+ status = nfs4_proc_create_session(clp, cred);
if (status) {
- status = nfs4_recovery_handle_error(clp, status);
+ dprintk("%s: session reset failed with status %d for server %s!\n",
+ __func__, status, clp->cl_hostname);
+ status = nfs4_handle_reclaim_lease_error(clp, status);
goto out;
}
clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
/* create_session negotiated new slot table */
clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
+ clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+ dprintk("%s: session reset was successful for server %s!\n",
+ __func__, clp->cl_hostname);
/* Let the state manager reestablish state */
if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
nfs41_setup_state_renewal(clp);
out:
+ if (cred)
+ put_rpccred(cred);
return status;
}
@@ -1722,37 +1816,41 @@ static int nfs4_recall_slot(struct nfs_client *clp)
return 0;
}
-#else /* CONFIG_NFS_V4_1 */
-static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
-static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
-static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
-#endif /* CONFIG_NFS_V4_1 */
-
-/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
- * on EXCHANGE_ID for v4.1
- */
-static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
+static int nfs4_bind_conn_to_session(struct nfs_client *clp)
{
- switch (status) {
- case -NFS4ERR_CLID_INUSE:
- case -NFS4ERR_STALE_CLIENTID:
- clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+ struct rpc_cred *cred;
+ int ret;
+
+ nfs4_begin_drain_session(clp);
+ cred = nfs4_get_exchange_id_cred(clp);
+ ret = nfs4_proc_bind_conn_to_session(clp, cred);
+ if (cred)
+ put_rpccred(cred);
+ clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+ switch (ret) {
+ case 0:
+ dprintk("%s: bind_conn_to_session was successful for server %s!\n",
+ __func__, clp->cl_hostname);
break;
case -NFS4ERR_DELAY:
- case -ETIMEDOUT:
- case -EAGAIN:
ssleep(1);
+ set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
break;
-
- case -EKEYEXPIRED:
- nfs4_warn_keyexpired(clp->cl_hostname);
- case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
- * in nfs4_exchange_id */
default:
- return;
+ return nfs4_recovery_handle_error(clp, ret);
}
- set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ return 0;
}
+#else /* CONFIG_NFS_V4_1 */
+static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
+static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
+static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
+
+static int nfs4_bind_conn_to_session(struct nfs_client *clp)
+{
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
static void nfs4_state_manager(struct nfs_client *clp)
{
@@ -1760,19 +1858,21 @@ static void nfs4_state_manager(struct nfs_client *clp)
/* Ensure exclusive access to NFSv4 state */
do {
+ if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
+ status = nfs4_reclaim_lease(clp);
+ if (status < 0)
+ goto out_error;
+ clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ }
+
if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
/* We're going to have to re-establish a clientid */
status = nfs4_reclaim_lease(clp);
- if (status) {
- nfs4_set_lease_expired(clp, status);
- if (test_bit(NFS4CLNT_LEASE_EXPIRED,
- &clp->cl_state))
- continue;
- if (clp->cl_cons_state ==
- NFS_CS_SESSION_INITING)
- nfs_mark_client_ready(clp, status);
+ if (status < 0)
goto out_error;
- }
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
+ continue;
clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH,
@@ -1803,6 +1903,15 @@ static void nfs4_state_manager(struct nfs_client *clp)
goto out_error;
}
+ /* Send BIND_CONN_TO_SESSION */
+ if (test_and_clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
+ &clp->cl_state) && nfs4_has_session(clp)) {
+ status = nfs4_bind_conn_to_session(clp);
+ if (status < 0)
+ goto out_error;
+ continue;
+ }
+
/* First recover reboot state... */
if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
status = nfs4_do_reclaim(clp,
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index c54aae364be..ee4a74db95d 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -53,9 +53,11 @@
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_idmap.h>
+
#include "nfs4_fs.h"
#include "internal.h"
#include "pnfs.h"
+#include "netns.h"
#define NFSDBG_FACILITY NFSDBG_XDR
@@ -99,9 +101,12 @@ static int nfs4_stat_to_errno(int);
#define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2))
#define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
#define nfs4_group_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
+/* We support only one layout type per file system */
+#define decode_mdsthreshold_maxsz (1 + 1 + nfs4_fattr_bitmap_maxsz + 1 + 8)
/* This is based on getfattr, which uses the most attributes: */
#define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \
- 3 + 3 + 3 + nfs4_owner_maxsz + nfs4_group_maxsz))
+ 3 + 3 + 3 + nfs4_owner_maxsz + \
+ nfs4_group_maxsz + decode_mdsthreshold_maxsz))
#define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \
nfs4_fattr_value_maxsz)
#define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz)
@@ -321,8 +326,20 @@ static int nfs4_stat_to_errno(int);
1 /* csr_flags */ + \
decode_channel_attrs_maxsz + \
decode_channel_attrs_maxsz)
+#define encode_bind_conn_to_session_maxsz (op_encode_hdr_maxsz + \
+ /* bctsa_sessid */ \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
+ 1 /* bctsa_dir */ + \
+ 1 /* bctsa_use_conn_in_rdma_mode */)
+#define decode_bind_conn_to_session_maxsz (op_decode_hdr_maxsz + \
+ /* bctsr_sessid */ \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
+ 1 /* bctsr_dir */ + \
+ 1 /* bctsr_use_conn_in_rdma_mode */)
#define encode_destroy_session_maxsz (op_encode_hdr_maxsz + 4)
#define decode_destroy_session_maxsz (op_decode_hdr_maxsz)
+#define encode_destroy_clientid_maxsz (op_encode_hdr_maxsz + 2)
+#define decode_destroy_clientid_maxsz (op_decode_hdr_maxsz)
#define encode_sequence_maxsz (op_encode_hdr_maxsz + \
XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4)
#define decode_sequence_maxsz (op_decode_hdr_maxsz + \
@@ -421,30 +438,22 @@ static int nfs4_stat_to_errno(int);
#define NFS4_enc_commit_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
- encode_commit_maxsz + \
- encode_getattr_maxsz)
+ encode_commit_maxsz)
#define NFS4_dec_commit_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
- decode_commit_maxsz + \
- decode_getattr_maxsz)
+ decode_commit_maxsz)
#define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
- encode_savefh_maxsz + \
encode_open_maxsz + \
encode_getfh_maxsz + \
- encode_getattr_maxsz + \
- encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
- decode_savefh_maxsz + \
decode_open_maxsz + \
decode_getfh_maxsz + \
- decode_getattr_maxsz + \
- decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_open_confirm_sz \
(compound_encode_hdr_maxsz + \
@@ -595,47 +604,37 @@ static int nfs4_stat_to_errno(int);
#define NFS4_enc_remove_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
- encode_remove_maxsz + \
- encode_getattr_maxsz)
+ encode_remove_maxsz)
#define NFS4_dec_remove_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
- decode_remove_maxsz + \
- decode_getattr_maxsz)
+ decode_remove_maxsz)
#define NFS4_enc_rename_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
- encode_rename_maxsz + \
- encode_getattr_maxsz + \
- encode_restorefh_maxsz + \
- encode_getattr_maxsz)
+ encode_rename_maxsz)
#define NFS4_dec_rename_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
- decode_rename_maxsz + \
- decode_getattr_maxsz + \
- decode_restorefh_maxsz + \
- decode_getattr_maxsz)
+ decode_rename_maxsz)
#define NFS4_enc_link_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
encode_link_maxsz + \
- decode_getattr_maxsz + \
encode_restorefh_maxsz + \
- decode_getattr_maxsz)
+ encode_getattr_maxsz)
#define NFS4_dec_link_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
decode_link_maxsz + \
- decode_getattr_maxsz + \
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \
@@ -653,20 +652,14 @@ static int nfs4_stat_to_errno(int);
#define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
- encode_savefh_maxsz + \
encode_create_maxsz + \
encode_getfh_maxsz + \
- encode_getattr_maxsz + \
- encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_create_sz (compound_decode_hdr_maxsz + \
decode_sequence_maxsz + \
decode_putfh_maxsz + \
- decode_savefh_maxsz + \
decode_create_maxsz + \
decode_getfh_maxsz + \
- decode_getattr_maxsz + \
- decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_pathconf_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
@@ -738,6 +731,12 @@ static int nfs4_stat_to_errno(int);
decode_putfh_maxsz + \
decode_secinfo_maxsz)
#if defined(CONFIG_NFS_V4_1)
+#define NFS4_enc_bind_conn_to_session_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_bind_conn_to_session_maxsz)
+#define NFS4_dec_bind_conn_to_session_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_bind_conn_to_session_maxsz)
#define NFS4_enc_exchange_id_sz \
(compound_encode_hdr_maxsz + \
encode_exchange_id_maxsz)
@@ -754,6 +753,10 @@ static int nfs4_stat_to_errno(int);
encode_destroy_session_maxsz)
#define NFS4_dec_destroy_session_sz (compound_decode_hdr_maxsz + \
decode_destroy_session_maxsz)
+#define NFS4_enc_destroy_clientid_sz (compound_encode_hdr_maxsz + \
+ encode_destroy_clientid_maxsz)
+#define NFS4_dec_destroy_clientid_sz (compound_decode_hdr_maxsz + \
+ decode_destroy_clientid_maxsz)
#define NFS4_enc_sequence_sz \
(compound_decode_hdr_maxsz + \
encode_sequence_maxsz)
@@ -1103,7 +1106,7 @@ static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg
encode_nfs4_stateid(xdr, arg->stateid);
}
-static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr)
+static void encode_commit(struct xdr_stream *xdr, const struct nfs_commitargs *args, struct compound_hdr *hdr)
{
__be32 *p;
@@ -1194,6 +1197,16 @@ static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct c
bitmask[1] & nfs4_fattr_bitmap[1], hdr);
}
+static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask,
+ struct compound_hdr *hdr)
+{
+ encode_getattr_three(xdr,
+ bitmask[0] & nfs4_fattr_bitmap[0],
+ bitmask[1] & nfs4_fattr_bitmap[1],
+ bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD,
+ hdr);
+}
+
static void encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
{
encode_getattr_three(xdr,
@@ -1678,6 +1691,20 @@ static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, stru
#if defined(CONFIG_NFS_V4_1)
/* NFSv4.1 operations */
+static void encode_bind_conn_to_session(struct xdr_stream *xdr,
+ struct nfs4_session *session,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION,
+ decode_bind_conn_to_session_maxsz, hdr);
+ encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+ p = xdr_reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(NFS4_CDFC4_BACK_OR_BOTH);
+ *p = 0; /* use_conn_in_rdma_mode = False */
+}
+
static void encode_exchange_id(struct xdr_stream *xdr,
struct nfs41_exchange_id_args *args,
struct compound_hdr *hdr)
@@ -1726,6 +1753,7 @@ static void encode_create_session(struct xdr_stream *xdr,
char machine_name[NFS4_MAX_MACHINE_NAME_LEN];
uint32_t len;
struct nfs_client *clp = args->client;
+ struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
u32 max_resp_sz_cached;
/*
@@ -1767,7 +1795,7 @@ static void encode_create_session(struct xdr_stream *xdr,
*p++ = cpu_to_be32(RPC_AUTH_UNIX); /* auth_sys */
/* authsys_parms rfc1831 */
- *p++ = cpu_to_be32((u32)clp->cl_boot_time.tv_nsec); /* stamp */
+ *p++ = (__be32)nn->boot_time.tv_nsec; /* stamp */
p = xdr_encode_opaque(p, machine_name, len);
*p++ = cpu_to_be32(0); /* UID */
*p++ = cpu_to_be32(0); /* GID */
@@ -1782,6 +1810,14 @@ static void encode_destroy_session(struct xdr_stream *xdr,
encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
}
+static void encode_destroy_clientid(struct xdr_stream *xdr,
+ uint64_t clientid,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_DESTROY_CLIENTID, decode_destroy_clientid_maxsz, hdr);
+ encode_uint64(xdr, clientid);
+}
+
static void encode_reclaim_complete(struct xdr_stream *xdr,
struct nfs41_reclaim_complete_args *args,
struct compound_hdr *hdr)
@@ -2064,7 +2100,6 @@ static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_remove(xdr, &args->name, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
@@ -2084,9 +2119,6 @@ static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_savefh(xdr, &hdr);
encode_putfh(xdr, args->new_dir, &hdr);
encode_rename(xdr, args->old_name, args->new_name, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
- encode_restorefh(xdr, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
@@ -2106,7 +2138,6 @@ static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_savefh(xdr, &hdr);
encode_putfh(xdr, args->dir_fh, &hdr);
encode_link(xdr, args->name, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
encode_restorefh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
@@ -2125,12 +2156,9 @@ static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->dir_fh, &hdr);
- encode_savefh(xdr, &hdr);
encode_create(xdr, args, &hdr);
encode_getfh(xdr, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
- encode_restorefh(xdr, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
@@ -2191,12 +2219,9 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
- encode_savefh(xdr, &hdr);
encode_open(xdr, args, &hdr);
encode_getfh(xdr, &hdr);
- encode_getfattr(xdr, args->bitmask, &hdr);
- encode_restorefh(xdr, &hdr);
- encode_getfattr(xdr, args->dir_bitmask, &hdr);
+ encode_getfattr_open(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
@@ -2448,7 +2473,7 @@ static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
* a COMMIT request
*/
static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
- struct nfs_writeargs *args)
+ struct nfs_commitargs *args)
{
struct compound_hdr hdr = {
.minorversion = nfs4_xdr_minorversion(&args->seq_args),
@@ -2458,8 +2483,6 @@ static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fh, &hdr);
encode_commit(xdr, args, &hdr);
- if (args->bitmask)
- encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
@@ -2602,8 +2625,8 @@ static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
encode_compound_hdr(xdr, req, &hdr);
encode_sequence(xdr, &args->seq_args, &hdr);
encode_putfh(xdr, args->fhandle, &hdr);
- encode_delegreturn(xdr, args->stateid, &hdr);
encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_delegreturn(xdr, args->stateid, &hdr);
encode_nops(&hdr);
}
@@ -2651,6 +2674,22 @@ static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req,
#if defined(CONFIG_NFS_V4_1)
/*
+ * BIND_CONN_TO_SESSION request
+ */
+static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_client *clp)
+{
+ struct compound_hdr hdr = {
+ .minorversion = clp->cl_mvops->minor_version,
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_bind_conn_to_session(xdr, clp->cl_session, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
* EXCHANGE_ID request
*/
static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
@@ -2699,6 +2738,22 @@ static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
}
/*
+ * a DESTROY_CLIENTID request
+ */
+static void nfs4_xdr_enc_destroy_clientid(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_client *clp)
+{
+ struct compound_hdr hdr = {
+ .minorversion = clp->cl_mvops->minor_version,
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_destroy_clientid(xdr, clp->cl_clientid, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
* a SEQUENCE request
*/
static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
@@ -4102,7 +4157,7 @@ static int decode_verifier(struct xdr_stream *xdr, void *verifier)
return decode_opaque_fixed(xdr, verifier, NFS4_VERIFIER_SIZE);
}
-static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res)
+static int decode_commit(struct xdr_stream *xdr, struct nfs_commitres *res)
{
int status;
@@ -4220,6 +4275,110 @@ xdr_error:
return status;
}
+static int decode_threshold_hint(struct xdr_stream *xdr,
+ uint32_t *bitmap,
+ uint64_t *res,
+ uint32_t hint_bit)
+{
+ __be32 *p;
+
+ *res = 0;
+ if (likely(bitmap[0] & hint_bit)) {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_first_threshold_item4(struct xdr_stream *xdr,
+ struct nfs4_threshold *res)
+{
+ __be32 *p, *savep;
+ uint32_t bitmap[3] = {0,}, attrlen;
+ int status;
+
+ /* layout type */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p)) {
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+ }
+ res->l_type = be32_to_cpup(p);
+
+ /* thi_hintset bitmap */
+ status = decode_attr_bitmap(xdr, bitmap);
+ if (status < 0)
+ goto xdr_error;
+
+ /* thi_hintlist length */
+ status = decode_attr_length(xdr, &attrlen, &savep);
+ if (status < 0)
+ goto xdr_error;
+ /* thi_hintlist */
+ status = decode_threshold_hint(xdr, bitmap, &res->rd_sz, THRESHOLD_RD);
+ if (status < 0)
+ goto xdr_error;
+ status = decode_threshold_hint(xdr, bitmap, &res->wr_sz, THRESHOLD_WR);
+ if (status < 0)
+ goto xdr_error;
+ status = decode_threshold_hint(xdr, bitmap, &res->rd_io_sz,
+ THRESHOLD_RD_IO);
+ if (status < 0)
+ goto xdr_error;
+ status = decode_threshold_hint(xdr, bitmap, &res->wr_io_sz,
+ THRESHOLD_WR_IO);
+ if (status < 0)
+ goto xdr_error;
+
+ status = verify_attr_len(xdr, savep, attrlen);
+ res->bm = bitmap[0];
+
+ dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
+ __func__, res->bm, res->rd_sz, res->wr_sz, res->rd_io_sz,
+ res->wr_io_sz);
+xdr_error:
+ dprintk("%s ret=%d!\n", __func__, status);
+ return status;
+}
+
+/*
+ * Thresholds on pNFS direct I/O vrs MDS I/O
+ */
+static int decode_attr_mdsthreshold(struct xdr_stream *xdr,
+ uint32_t *bitmap,
+ struct nfs4_threshold *res)
+{
+ __be32 *p;
+ int status = 0;
+ uint32_t num;
+
+ if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U)))
+ return -EIO;
+ if (likely(bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ num = be32_to_cpup(p);
+ if (num == 0)
+ return 0;
+ if (num > 1)
+ printk(KERN_INFO "%s: Warning: Multiple pNFS layout "
+ "drivers per filesystem not supported\n",
+ __func__);
+
+ status = decode_first_threshold_item4(xdr, res);
+ }
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
struct nfs_fattr *fattr, struct nfs_fh *fh,
struct nfs4_fs_locations *fs_loc,
@@ -4326,6 +4485,10 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
goto xdr_error;
fattr->valid |= status;
+ status = decode_attr_mdsthreshold(xdr, bitmap, fattr->mdsthreshold);
+ if (status < 0)
+ goto xdr_error;
+
xdr_error:
dprintk("%s: xdr returned %d\n", __func__, -status);
return status;
@@ -5156,7 +5319,6 @@ static int decode_exchange_id(struct xdr_stream *xdr,
uint32_t dummy;
char *dummy_str;
int status;
- struct nfs_client *clp = res->client;
uint32_t impl_id_count;
status = decode_op_hdr(xdr, OP_EXCHANGE_ID);
@@ -5166,36 +5328,39 @@ static int decode_exchange_id(struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
- xdr_decode_hyper(p, &clp->cl_clientid);
+ xdr_decode_hyper(p, &res->clientid);
p = xdr_inline_decode(xdr, 12);
if (unlikely(!p))
goto out_overflow;
- clp->cl_seqid = be32_to_cpup(p++);
- clp->cl_exchange_flags = be32_to_cpup(p++);
+ res->seqid = be32_to_cpup(p++);
+ res->flags = be32_to_cpup(p++);
/* We ask for SP4_NONE */
dummy = be32_to_cpup(p);
if (dummy != SP4_NONE)
return -EIO;
- /* Throw away minor_id */
+ /* server_owner4.so_minor_id */
p = xdr_inline_decode(xdr, 8);
if (unlikely(!p))
goto out_overflow;
+ p = xdr_decode_hyper(p, &res->server_owner->minor_id);
- /* Throw away Major id */
+ /* server_owner4.so_major_id */
status = decode_opaque_inline(xdr, &dummy, &dummy_str);
if (unlikely(status))
return status;
+ if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
+ return -EIO;
+ memcpy(res->server_owner->major_id, dummy_str, dummy);
+ res->server_owner->major_id_sz = dummy;
- /* Save server_scope */
+ /* server_scope4 */
status = decode_opaque_inline(xdr, &dummy, &dummy_str);
if (unlikely(status))
return status;
-
if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
return -EIO;
-
memcpy(res->server_scope->server_scope, dummy_str, dummy);
res->server_scope->server_scope_sz = dummy;
@@ -5276,6 +5441,37 @@ static int decode_sessionid(struct xdr_stream *xdr, struct nfs4_sessionid *sid)
return decode_opaque_fixed(xdr, sid->data, NFS4_MAX_SESSIONID_LEN);
}
+static int decode_bind_conn_to_session(struct xdr_stream *xdr,
+ struct nfs41_bind_conn_to_session_res *res)
+{
+ __be32 *p;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION);
+ if (!status)
+ status = decode_sessionid(xdr, &res->session->sess_id);
+ if (unlikely(status))
+ return status;
+
+ /* dir flags, rdma mode bool */
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+
+ res->dir = be32_to_cpup(p++);
+ if (res->dir == 0 || res->dir > NFS4_CDFS4_BOTH)
+ return -EIO;
+ if (be32_to_cpup(p) == 0)
+ res->use_conn_in_rdma_mode = false;
+ else
+ res->use_conn_in_rdma_mode = true;
+
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
static int decode_create_session(struct xdr_stream *xdr,
struct nfs41_create_session_res *res)
{
@@ -5312,6 +5508,11 @@ static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
return decode_op_hdr(xdr, OP_DESTROY_SESSION);
}
+static int decode_destroy_clientid(struct xdr_stream *xdr, void *dummy)
+{
+ return decode_op_hdr(xdr, OP_DESTROY_CLIENTID);
+}
+
static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy)
{
return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE);
@@ -5800,9 +6001,6 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
status = decode_remove(xdr, &res->cinfo);
- if (status)
- goto out;
- decode_getfattr(xdr, res->dir_attr, res->server);
out:
return status;
}
@@ -5832,15 +6030,6 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo);
- if (status)
- goto out;
- /* Current FH is target directory */
- if (decode_getfattr(xdr, res->new_fattr, res->server))
- goto out;
- status = decode_restorefh(xdr);
- if (status)
- goto out;
- decode_getfattr(xdr, res->old_fattr, res->server);
out:
return status;
}
@@ -5876,8 +6065,6 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
* Note order: OP_LINK leaves the directory as the current
* filehandle.
*/
- if (decode_getfattr(xdr, res->dir_attr, res->server))
- goto out;
status = decode_restorefh(xdr);
if (status)
goto out;
@@ -5904,21 +6091,13 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_savefh(xdr);
- if (status)
- goto out;
status = decode_create(xdr, &res->dir_cinfo);
if (status)
goto out;
status = decode_getfh(xdr, res->fh);
if (status)
goto out;
- if (decode_getfattr(xdr, res->fattr, res->server))
- goto out;
- status = decode_restorefh(xdr);
- if (status)
- goto out;
- decode_getfattr(xdr, res->dir_fattr, res->server);
+ decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -6075,19 +6254,12 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_putfh(xdr);
if (status)
goto out;
- status = decode_savefh(xdr);
- if (status)
- goto out;
status = decode_open(xdr, res);
if (status)
goto out;
if (decode_getfh(xdr, &res->fh) != 0)
goto out;
- if (decode_getfattr(xdr, res->f_attr, res->server) != 0)
- goto out;
- if (decode_restorefh(xdr) != 0)
- goto out;
- decode_getfattr(xdr, res->dir_attr, res->server);
+ decode_getfattr(xdr, res->f_attr, res->server);
out:
return status;
}
@@ -6353,7 +6525,7 @@ out:
* Decode COMMIT response
*/
static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
- struct nfs_writeres *res)
+ struct nfs_commitres *res)
{
struct compound_hdr hdr;
int status;
@@ -6368,10 +6540,6 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
if (status)
goto out;
status = decode_commit(xdr, res);
- if (status)
- goto out;
- if (res->fattr)
- decode_getfattr(xdr, res->fattr, res->server);
out:
return status;
}
@@ -6527,10 +6695,10 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
status = decode_putfh(xdr);
if (status != 0)
goto out;
- status = decode_delegreturn(xdr);
+ status = decode_getfattr(xdr, res->fattr, res->server);
if (status != 0)
goto out;
- decode_getfattr(xdr, res->fattr, res->server);
+ status = decode_delegreturn(xdr);
out:
return status;
}
@@ -6591,6 +6759,22 @@ out:
#if defined(CONFIG_NFS_V4_1)
/*
+ * Decode BIND_CONN_TO_SESSION response
+ */
+static int nfs4_xdr_dec_bind_conn_to_session(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_bind_conn_to_session(xdr, res);
+ return status;
+}
+
+/*
* Decode EXCHANGE_ID response
*/
static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp,
@@ -6639,6 +6823,22 @@ static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp,
}
/*
+ * Decode DESTROY_CLIENTID response
+ */
+static int nfs4_xdr_dec_destroy_clientid(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_destroy_clientid(xdr, res);
+ return status;
+}
+
+/*
* Decode SEQUENCE response
*/
static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
@@ -7085,6 +7285,9 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
PROC(GETDEVICELIST, enc_getdevicelist, dec_getdevicelist),
+ PROC(BIND_CONN_TO_SESSION,
+ enc_bind_conn_to_session, dec_bind_conn_to_session),
+ PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
#endif /* CONFIG_NFS_V4_1 */
};
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 4bff4a3dab4..b47277baeba 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -211,7 +211,7 @@ static void copy_single_comp(struct ore_components *oc, unsigned c,
memcpy(ocomp->cred, src_comp->oc_cap.cred, sizeof(ocomp->cred));
}
-int __alloc_objio_seg(unsigned numdevs, gfp_t gfp_flags,
+static int __alloc_objio_seg(unsigned numdevs, gfp_t gfp_flags,
struct objio_segment **pseg)
{
/* This is the in memory structure of the objio_segment
@@ -440,11 +440,12 @@ static void _read_done(struct ore_io_state *ios, void *private)
int objio_read_pagelist(struct nfs_read_data *rdata)
{
+ struct nfs_pgio_header *hdr = rdata->header;
struct objio_state *objios;
int ret;
- ret = objio_alloc_io_state(NFS_I(rdata->inode)->layout, true,
- rdata->lseg, rdata->args.pages, rdata->args.pgbase,
+ ret = objio_alloc_io_state(NFS_I(hdr->inode)->layout, true,
+ hdr->lseg, rdata->args.pages, rdata->args.pgbase,
rdata->args.offset, rdata->args.count, rdata,
GFP_KERNEL, &objios);
if (unlikely(ret))
@@ -483,12 +484,12 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
{
struct objio_state *objios = priv;
struct nfs_write_data *wdata = objios->oir.rpcdata;
+ struct address_space *mapping = wdata->header->inode->i_mapping;
pgoff_t index = offset / PAGE_SIZE;
- struct page *page = find_get_page(wdata->inode->i_mapping, index);
+ struct page *page = find_get_page(mapping, index);
if (!page) {
- page = find_or_create_page(wdata->inode->i_mapping,
- index, GFP_NOFS);
+ page = find_or_create_page(mapping, index, GFP_NOFS);
if (unlikely(!page)) {
dprintk("%s: grab_cache_page Failed index=0x%lx\n",
__func__, index);
@@ -518,11 +519,12 @@ static const struct _ore_r4w_op _r4w_op = {
int objio_write_pagelist(struct nfs_write_data *wdata, int how)
{
+ struct nfs_pgio_header *hdr = wdata->header;
struct objio_state *objios;
int ret;
- ret = objio_alloc_io_state(NFS_I(wdata->inode)->layout, false,
- wdata->lseg, wdata->args.pages, wdata->args.pgbase,
+ ret = objio_alloc_io_state(NFS_I(hdr->inode)->layout, false,
+ hdr->lseg, wdata->args.pages, wdata->args.pgbase,
wdata->args.offset, wdata->args.count, wdata, GFP_NOFS,
&objios);
if (unlikely(ret))
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
index 595c5fc21a1..87461354530 100644
--- a/fs/nfs/objlayout/objlayout.c
+++ b/fs/nfs/objlayout/objlayout.c
@@ -258,7 +258,7 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
if (status >= 0)
rdata->res.count = status;
else
- rdata->pnfs_error = status;
+ rdata->header->pnfs_error = status;
objlayout_iodone(oir);
/* must not use oir after this point */
@@ -279,12 +279,14 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
enum pnfs_try_status
objlayout_read_pagelist(struct nfs_read_data *rdata)
{
+ struct nfs_pgio_header *hdr = rdata->header;
+ struct inode *inode = hdr->inode;
loff_t offset = rdata->args.offset;
size_t count = rdata->args.count;
int err;
loff_t eof;
- eof = i_size_read(rdata->inode);
+ eof = i_size_read(inode);
if (unlikely(offset + count > eof)) {
if (offset >= eof) {
err = 0;
@@ -297,17 +299,17 @@ objlayout_read_pagelist(struct nfs_read_data *rdata)
}
rdata->res.eof = (offset + count) >= eof;
- _fix_verify_io_params(rdata->lseg, &rdata->args.pages,
+ _fix_verify_io_params(hdr->lseg, &rdata->args.pages,
&rdata->args.pgbase,
rdata->args.offset, rdata->args.count);
dprintk("%s: inode(%lx) offset 0x%llx count 0x%Zx eof=%d\n",
- __func__, rdata->inode->i_ino, offset, count, rdata->res.eof);
+ __func__, inode->i_ino, offset, count, rdata->res.eof);
err = objio_read_pagelist(rdata);
out:
if (unlikely(err)) {
- rdata->pnfs_error = err;
+ hdr->pnfs_error = err;
dprintk("%s: Returned Error %d\n", __func__, err);
return PNFS_NOT_ATTEMPTED;
}
@@ -340,7 +342,7 @@ objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
wdata->res.count = status;
wdata->verf.committed = oir->committed;
} else {
- wdata->pnfs_error = status;
+ wdata->header->pnfs_error = status;
}
objlayout_iodone(oir);
/* must not use oir after this point */
@@ -363,15 +365,16 @@ enum pnfs_try_status
objlayout_write_pagelist(struct nfs_write_data *wdata,
int how)
{
+ struct nfs_pgio_header *hdr = wdata->header;
int err;
- _fix_verify_io_params(wdata->lseg, &wdata->args.pages,
+ _fix_verify_io_params(hdr->lseg, &wdata->args.pages,
&wdata->args.pgbase,
wdata->args.offset, wdata->args.count);
err = objio_write_pagelist(wdata, how);
if (unlikely(err)) {
- wdata->pnfs_error = err;
+ hdr->pnfs_error = err;
dprintk("%s: Returned Error %d\n", __func__, err);
return PNFS_NOT_ATTEMPTED;
}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index d21fceaa9f6..aed913c833f 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -26,6 +26,47 @@
static struct kmem_cache *nfs_page_cachep;
+bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
+{
+ p->npages = pagecount;
+ if (pagecount <= ARRAY_SIZE(p->page_array))
+ p->pagevec = p->page_array;
+ else {
+ p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
+ if (!p->pagevec)
+ p->npages = 0;
+ }
+ return p->pagevec != NULL;
+}
+
+void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr,
+ void (*release)(struct nfs_pgio_header *hdr))
+{
+ hdr->req = nfs_list_entry(desc->pg_list.next);
+ hdr->inode = desc->pg_inode;
+ hdr->cred = hdr->req->wb_context->cred;
+ hdr->io_start = req_offset(hdr->req);
+ hdr->good_bytes = desc->pg_count;
+ hdr->dreq = desc->pg_dreq;
+ hdr->release = release;
+ hdr->completion_ops = desc->pg_completion_ops;
+ if (hdr->completion_ops->init_hdr)
+ hdr->completion_ops->init_hdr(hdr);
+}
+
+void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
+{
+ spin_lock(&hdr->lock);
+ if (pos < hdr->io_start + hdr->good_bytes) {
+ set_bit(NFS_IOHDR_ERROR, &hdr->flags);
+ clear_bit(NFS_IOHDR_EOF, &hdr->flags);
+ hdr->good_bytes = pos - hdr->io_start;
+ hdr->error = error;
+ }
+ spin_unlock(&hdr->lock);
+}
+
static inline struct nfs_page *
nfs_page_alloc(void)
{
@@ -76,12 +117,8 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
* long write-back delay. This will be adjusted in
* update_nfs_request below if the region is not locked. */
req->wb_page = page;
- atomic_set(&req->wb_complete, 0);
req->wb_index = page->index;
page_cache_get(page);
- BUG_ON(PagePrivate(page));
- BUG_ON(!PageLocked(page));
- BUG_ON(page->mapping->host != inode);
req->wb_offset = offset;
req->wb_pgbase = offset;
req->wb_bytes = count;
@@ -104,6 +141,15 @@ void nfs_unlock_request(struct nfs_page *req)
clear_bit(PG_BUSY, &req->wb_flags);
smp_mb__after_clear_bit();
wake_up_bit(&req->wb_flags, PG_BUSY);
+}
+
+/**
+ * nfs_unlock_and_release_request - Unlock request and release the nfs_page
+ * @req:
+ */
+void nfs_unlock_and_release_request(struct nfs_page *req)
+{
+ nfs_unlock_request(req);
nfs_release_request(req);
}
@@ -203,6 +249,7 @@ EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
struct inode *inode,
const struct nfs_pageio_ops *pg_ops,
+ const struct nfs_pgio_completion_ops *compl_ops,
size_t bsize,
int io_flags)
{
@@ -215,9 +262,11 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_recoalesce = 0;
desc->pg_inode = inode;
desc->pg_ops = pg_ops;
+ desc->pg_completion_ops = compl_ops;
desc->pg_ioflags = io_flags;
desc->pg_error = 0;
desc->pg_lseg = NULL;
+ desc->pg_dreq = NULL;
}
/**
@@ -241,12 +290,12 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
return false;
if (req->wb_context->state != prev->wb_context->state)
return false;
- if (req->wb_index != (prev->wb_index + 1))
- return false;
if (req->wb_pgbase != 0)
return false;
if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
return false;
+ if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
+ return false;
return pgio->pg_ops->pg_test(pgio, prev, req);
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 38512bcd2e9..b8323aa7b54 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -395,6 +395,9 @@ mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
dprintk("%s:Begin lo %p\n", __func__, lo);
if (list_empty(&lo->plh_segs)) {
+ /* Reset MDS Threshold I/O counters */
+ NFS_I(lo->plh_inode)->write_io = 0;
+ NFS_I(lo->plh_inode)->read_io = 0;
if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
put_layout_hdr_locked(lo);
return 0;
@@ -455,6 +458,7 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
spin_unlock(&nfsi->vfs_inode.i_lock);
pnfs_free_lseg_list(&tmp_list);
}
+EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
/*
* Called by the state manger to remove all layouts established under an
@@ -692,6 +696,7 @@ out:
dprintk("<-- %s status: %d\n", __func__, status);
return status;
}
+EXPORT_SYMBOL_GPL(_pnfs_return_layout);
bool pnfs_roc(struct inode *ino)
{
@@ -931,6 +936,81 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
}
/*
+ * Use mdsthreshold hints set at each OPEN to determine if I/O should go
+ * to the MDS or over pNFS
+ *
+ * The nfs_inode read_io and write_io fields are cumulative counters reset
+ * when there are no layout segments. Note that in pnfs_update_layout iomode
+ * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
+ * WRITE request.
+ *
+ * A return of true means use MDS I/O.
+ *
+ * From rfc 5661:
+ * If a file's size is smaller than the file size threshold, data accesses
+ * SHOULD be sent to the metadata server. If an I/O request has a length that
+ * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
+ * server. If both file size and I/O size are provided, the client SHOULD
+ * reach or exceed both thresholds before sending its read or write
+ * requests to the data server.
+ */
+static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
+ struct inode *ino, int iomode)
+{
+ struct nfs4_threshold *t = ctx->mdsthreshold;
+ struct nfs_inode *nfsi = NFS_I(ino);
+ loff_t fsize = i_size_read(ino);
+ bool size = false, size_set = false, io = false, io_set = false, ret = false;
+
+ if (t == NULL)
+ return ret;
+
+ dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
+ __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
+
+ switch (iomode) {
+ case IOMODE_READ:
+ if (t->bm & THRESHOLD_RD) {
+ dprintk("%s fsize %llu\n", __func__, fsize);
+ size_set = true;
+ if (fsize < t->rd_sz)
+ size = true;
+ }
+ if (t->bm & THRESHOLD_RD_IO) {
+ dprintk("%s nfsi->read_io %llu\n", __func__,
+ nfsi->read_io);
+ io_set = true;
+ if (nfsi->read_io < t->rd_io_sz)
+ io = true;
+ }
+ break;
+ case IOMODE_RW:
+ if (t->bm & THRESHOLD_WR) {
+ dprintk("%s fsize %llu\n", __func__, fsize);
+ size_set = true;
+ if (fsize < t->wr_sz)
+ size = true;
+ }
+ if (t->bm & THRESHOLD_WR_IO) {
+ dprintk("%s nfsi->write_io %llu\n", __func__,
+ nfsi->write_io);
+ io_set = true;
+ if (nfsi->write_io < t->wr_io_sz)
+ io = true;
+ }
+ break;
+ }
+ if (size_set && io_set) {
+ if (size && io)
+ ret = true;
+ } else if (size || io)
+ ret = true;
+
+ dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
+ return ret;
+}
+
+/*
* Layout segment is retreived from the server if not cached.
* The appropriate layout segment is referenced and returned to the caller.
*/
@@ -957,6 +1037,10 @@ pnfs_update_layout(struct inode *ino,
if (!pnfs_enabled_sb(NFS_SERVER(ino)))
return NULL;
+
+ if (pnfs_within_mdsthreshold(ctx, ino, iomode))
+ return NULL;
+
spin_lock(&ino->i_lock);
lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
if (lo == NULL) {
@@ -1082,6 +1166,10 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
{
BUG_ON(pgio->pg_lseg != NULL);
+ if (req->wb_offset != req->wb_pgbase) {
+ nfs_pageio_reset_read_mds(pgio);
+ return;
+ }
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
req_offset(req),
@@ -1100,6 +1188,10 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *
{
BUG_ON(pgio->pg_lseg != NULL);
+ if (req->wb_offset != req->wb_pgbase) {
+ nfs_pageio_reset_write_mds(pgio);
+ return;
+ }
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
req_offset(req),
@@ -1113,26 +1205,31 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
bool
-pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
+pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
struct nfs_server *server = NFS_SERVER(inode);
struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
if (ld == NULL)
return false;
- nfs_pageio_init(pgio, inode, ld->pg_read_ops, server->rsize, 0);
+ nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops,
+ server->rsize, 0);
return true;
}
bool
-pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
+pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+ int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
struct nfs_server *server = NFS_SERVER(inode);
struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
if (ld == NULL)
return false;
- nfs_pageio_init(pgio, inode, ld->pg_write_ops, server->wsize, ioflags);
+ nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops,
+ server->wsize, ioflags);
return true;
}
@@ -1162,13 +1259,15 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
-static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head)
+int pnfs_write_done_resend_to_mds(struct inode *inode,
+ struct list_head *head,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
struct nfs_pageio_descriptor pgio;
LIST_HEAD(failed);
/* Resend all requests through the MDS */
- nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE);
+ nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE, compl_ops);
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
@@ -1188,30 +1287,37 @@ static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *
}
return 0;
}
+EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
+
+static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
+{
+ struct nfs_pgio_header *hdr = data->header;
+
+ dprintk("pnfs write error = %d\n", hdr->pnfs_error);
+ if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
+ PNFS_LAYOUTRET_ON_ERROR) {
+ clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
+ pnfs_return_layout(hdr->inode);
+ }
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
+ data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
+ &hdr->pages,
+ hdr->completion_ops);
+}
/*
* Called by non rpc-based layout drivers
*/
void pnfs_ld_write_done(struct nfs_write_data *data)
{
- if (likely(!data->pnfs_error)) {
+ struct nfs_pgio_header *hdr = data->header;
+
+ if (!hdr->pnfs_error) {
pnfs_set_layoutcommit(data);
- data->mds_ops->rpc_call_done(&data->task, data);
- } else {
- dprintk("pnfs write error = %d\n", data->pnfs_error);
- if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
- PNFS_LAYOUTRET_ON_ERROR) {
- /* Don't lo_commit on error, Server will needs to
- * preform a file recovery.
- */
- clear_bit(NFS_INO_LAYOUTCOMMIT,
- &NFS_I(data->inode)->flags);
- pnfs_return_layout(data->inode);
- }
- data->task.tk_status = pnfs_write_done_resend_to_mds(data->inode, &data->pages);
- }
- put_lseg(data->lseg);
- data->mds_ops->rpc_release(data);
+ hdr->mds_ops->rpc_call_done(&data->task, data);
+ } else
+ pnfs_ld_handle_write_error(data);
+ hdr->mds_ops->rpc_release(data);
}
EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
@@ -1219,12 +1325,13 @@ static void
pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
struct nfs_write_data *data)
{
- list_splice_tail_init(&data->pages, &desc->pg_list);
- if (data->req && list_empty(&data->req->wb_list))
- nfs_list_add_request(data->req, &desc->pg_list);
- nfs_pageio_reset_write_mds(desc);
- desc->pg_recoalesce = 1;
- put_lseg(data->lseg);
+ struct nfs_pgio_header *hdr = data->header;
+
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ list_splice_tail_init(&hdr->pages, &desc->pg_list);
+ nfs_pageio_reset_write_mds(desc);
+ desc->pg_recoalesce = 1;
+ }
nfs_writedata_release(data);
}
@@ -1234,23 +1341,18 @@ pnfs_try_to_write_data(struct nfs_write_data *wdata,
struct pnfs_layout_segment *lseg,
int how)
{
- struct inode *inode = wdata->inode;
+ struct nfs_pgio_header *hdr = wdata->header;
+ struct inode *inode = hdr->inode;
enum pnfs_try_status trypnfs;
struct nfs_server *nfss = NFS_SERVER(inode);
- wdata->mds_ops = call_ops;
- wdata->lseg = get_lseg(lseg);
+ hdr->mds_ops = call_ops;
dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
inode->i_ino, wdata->args.count, wdata->args.offset, how);
-
trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
- if (trypnfs == PNFS_NOT_ATTEMPTED) {
- put_lseg(wdata->lseg);
- wdata->lseg = NULL;
- } else
+ if (trypnfs != PNFS_NOT_ATTEMPTED)
nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
-
dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
return trypnfs;
}
@@ -1266,7 +1368,7 @@ pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *he
while (!list_empty(head)) {
enum pnfs_try_status trypnfs;
- data = list_entry(head->next, struct nfs_write_data, list);
+ data = list_first_entry(head, struct nfs_write_data, list);
list_del_init(&data->list);
trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
@@ -1276,43 +1378,82 @@ pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *he
put_lseg(lseg);
}
+static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
+{
+ put_lseg(hdr->lseg);
+ nfs_writehdr_free(hdr);
+}
+
int
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
- LIST_HEAD(head);
+ struct nfs_write_header *whdr;
+ struct nfs_pgio_header *hdr;
int ret;
- ret = nfs_generic_flush(desc, &head);
- if (ret != 0) {
+ whdr = nfs_writehdr_alloc();
+ if (!whdr) {
+ desc->pg_completion_ops->error_cleanup(&desc->pg_list);
put_lseg(desc->pg_lseg);
desc->pg_lseg = NULL;
- return ret;
+ return -ENOMEM;
}
- pnfs_do_multiple_writes(desc, &head, desc->pg_ioflags);
- return 0;
+ hdr = &whdr->header;
+ nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
+ hdr->lseg = get_lseg(desc->pg_lseg);
+ atomic_inc(&hdr->refcnt);
+ ret = nfs_generic_flush(desc, hdr);
+ if (ret != 0) {
+ put_lseg(desc->pg_lseg);
+ desc->pg_lseg = NULL;
+ } else
+ pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
+ if (atomic_dec_and_test(&hdr->refcnt))
+ hdr->completion_ops->completion(hdr);
+ return ret;
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
-static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
+int pnfs_read_done_resend_to_mds(struct inode *inode,
+ struct list_head *head,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
struct nfs_pageio_descriptor pgio;
+ LIST_HEAD(failed);
- put_lseg(data->lseg);
- data->lseg = NULL;
- dprintk("pnfs write error = %d\n", data->pnfs_error);
- if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
- PNFS_LAYOUTRET_ON_ERROR)
- pnfs_return_layout(data->inode);
-
- nfs_pageio_init_read_mds(&pgio, data->inode);
-
- while (!list_empty(&data->pages)) {
- struct nfs_page *req = nfs_list_entry(data->pages.next);
+ /* Resend all requests through the MDS */
+ nfs_pageio_init_read_mds(&pgio, inode, compl_ops);
+ while (!list_empty(head)) {
+ struct nfs_page *req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_pageio_add_request(&pgio, req);
+ if (!nfs_pageio_add_request(&pgio, req))
+ nfs_list_add_request(req, &failed);
}
nfs_pageio_complete(&pgio);
+
+ if (!list_empty(&failed)) {
+ list_move(&failed, head);
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
+
+static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
+{
+ struct nfs_pgio_header *hdr = data->header;
+
+ dprintk("pnfs read error = %d\n", hdr->pnfs_error);
+ if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
+ PNFS_LAYOUTRET_ON_ERROR) {
+ clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
+ pnfs_return_layout(hdr->inode);
+ }
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
+ data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
+ &hdr->pages,
+ hdr->completion_ops);
}
/*
@@ -1320,13 +1461,14 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
*/
void pnfs_ld_read_done(struct nfs_read_data *data)
{
- if (likely(!data->pnfs_error)) {
+ struct nfs_pgio_header *hdr = data->header;
+
+ if (likely(!hdr->pnfs_error)) {
__nfs4_read_done_cb(data);
- data->mds_ops->rpc_call_done(&data->task, data);
+ hdr->mds_ops->rpc_call_done(&data->task, data);
} else
pnfs_ld_handle_read_error(data);
- put_lseg(data->lseg);
- data->mds_ops->rpc_release(data);
+ hdr->mds_ops->rpc_release(data);
}
EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
@@ -1334,11 +1476,13 @@ static void
pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
struct nfs_read_data *data)
{
- list_splice_tail_init(&data->pages, &desc->pg_list);
- if (data->req && list_empty(&data->req->wb_list))
- nfs_list_add_request(data->req, &desc->pg_list);
- nfs_pageio_reset_read_mds(desc);
- desc->pg_recoalesce = 1;
+ struct nfs_pgio_header *hdr = data->header;
+
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ list_splice_tail_init(&hdr->pages, &desc->pg_list);
+ nfs_pageio_reset_read_mds(desc);
+ desc->pg_recoalesce = 1;
+ }
nfs_readdata_release(data);
}
@@ -1350,23 +1494,19 @@ pnfs_try_to_read_data(struct nfs_read_data *rdata,
const struct rpc_call_ops *call_ops,
struct pnfs_layout_segment *lseg)
{
- struct inode *inode = rdata->inode;
+ struct nfs_pgio_header *hdr = rdata->header;
+ struct inode *inode = hdr->inode;
struct nfs_server *nfss = NFS_SERVER(inode);
enum pnfs_try_status trypnfs;
- rdata->mds_ops = call_ops;
- rdata->lseg = get_lseg(lseg);
+ hdr->mds_ops = call_ops;
dprintk("%s: Reading ino:%lu %u@%llu\n",
__func__, inode->i_ino, rdata->args.count, rdata->args.offset);
trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
- if (trypnfs == PNFS_NOT_ATTEMPTED) {
- put_lseg(rdata->lseg);
- rdata->lseg = NULL;
- } else {
+ if (trypnfs != PNFS_NOT_ATTEMPTED)
nfs_inc_stats(inode, NFSIOS_PNFS_READ);
- }
dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
return trypnfs;
}
@@ -1382,7 +1522,7 @@ pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *hea
while (!list_empty(head)) {
enum pnfs_try_status trypnfs;
- data = list_entry(head->next, struct nfs_read_data, list);
+ data = list_first_entry(head, struct nfs_read_data, list);
list_del_init(&data->list);
trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
@@ -1392,20 +1532,40 @@ pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *hea
put_lseg(lseg);
}
+static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
+{
+ put_lseg(hdr->lseg);
+ nfs_readhdr_free(hdr);
+}
+
int
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
- LIST_HEAD(head);
+ struct nfs_read_header *rhdr;
+ struct nfs_pgio_header *hdr;
int ret;
- ret = nfs_generic_pagein(desc, &head);
- if (ret != 0) {
+ rhdr = nfs_readhdr_alloc();
+ if (!rhdr) {
+ desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+ ret = -ENOMEM;
put_lseg(desc->pg_lseg);
desc->pg_lseg = NULL;
return ret;
}
- pnfs_do_multiple_reads(desc, &head);
- return 0;
+ hdr = &rhdr->header;
+ nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
+ hdr->lseg = get_lseg(desc->pg_lseg);
+ atomic_inc(&hdr->refcnt);
+ ret = nfs_generic_pagein(desc, hdr);
+ if (ret != 0) {
+ put_lseg(desc->pg_lseg);
+ desc->pg_lseg = NULL;
+ } else
+ pnfs_do_multiple_reads(desc, &hdr->rpc_list);
+ if (atomic_dec_and_test(&hdr->refcnt))
+ hdr->completion_ops->completion(hdr);
+ return ret;
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
@@ -1438,30 +1598,32 @@ EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
void
pnfs_set_layoutcommit(struct nfs_write_data *wdata)
{
- struct nfs_inode *nfsi = NFS_I(wdata->inode);
+ struct nfs_pgio_header *hdr = wdata->header;
+ struct inode *inode = hdr->inode;
+ struct nfs_inode *nfsi = NFS_I(inode);
loff_t end_pos = wdata->mds_offset + wdata->res.count;
bool mark_as_dirty = false;
- spin_lock(&nfsi->vfs_inode.i_lock);
+ spin_lock(&inode->i_lock);
if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
mark_as_dirty = true;
dprintk("%s: Set layoutcommit for inode %lu ",
- __func__, wdata->inode->i_ino);
+ __func__, inode->i_ino);
}
- if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &wdata->lseg->pls_flags)) {
+ if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
/* references matched in nfs4_layoutcommit_release */
- get_lseg(wdata->lseg);
+ get_lseg(hdr->lseg);
}
if (end_pos > nfsi->layout->plh_lwb)
nfsi->layout->plh_lwb = end_pos;
- spin_unlock(&nfsi->vfs_inode.i_lock);
+ spin_unlock(&inode->i_lock);
dprintk("%s: lseg %p end_pos %llu\n",
- __func__, wdata->lseg, nfsi->layout->plh_lwb);
+ __func__, hdr->lseg, nfsi->layout->plh_lwb);
/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
* will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
if (mark_as_dirty)
- mark_inode_dirty_sync(wdata->inode);
+ mark_inode_dirty_sync(inode);
}
EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
@@ -1550,3 +1712,15 @@ out_free:
kfree(data);
goto out;
}
+
+struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
+{
+ struct nfs4_threshold *thp;
+
+ thp = kzalloc(sizeof(*thp), GFP_NOFS);
+ if (!thp) {
+ dprintk("%s mdsthreshold allocation failed\n", __func__);
+ return NULL;
+ }
+ return thp;
+}
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 442ebf68eee..29fd23c0efd 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -63,6 +63,7 @@ enum {
NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */
NFS_LAYOUT_ROC, /* some lseg had roc bit set */
NFS_LAYOUT_DESTROYED, /* no new use of layout allowed */
+ NFS_LAYOUT_INVALID, /* layout is being destroyed */
};
enum layoutdriver_policy_flags {
@@ -94,11 +95,20 @@ struct pnfs_layoutdriver_type {
const struct nfs_pageio_ops *pg_read_ops;
const struct nfs_pageio_ops *pg_write_ops;
+ struct pnfs_ds_commit_info *(*get_ds_info) (struct inode *inode);
void (*mark_request_commit) (struct nfs_page *req,
- struct pnfs_layout_segment *lseg);
- void (*clear_request_commit) (struct nfs_page *req);
- int (*scan_commit_lists) (struct inode *inode, int max, spinlock_t *lock);
- int (*commit_pagelist)(struct inode *inode, struct list_head *mds_pages, int how);
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo);
+ void (*clear_request_commit) (struct nfs_page *req,
+ struct nfs_commit_info *cinfo);
+ int (*scan_commit_lists) (struct nfs_commit_info *cinfo,
+ int max);
+ void (*recover_commit_reqs) (struct list_head *list,
+ struct nfs_commit_info *cinfo);
+ int (*commit_pagelist)(struct inode *inode,
+ struct list_head *mds_pages,
+ int how,
+ struct nfs_commit_info *cinfo);
/*
* Return PNFS_ATTEMPTED to indicate the layout code has attempted
@@ -168,8 +178,10 @@ extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
void get_layout_hdr(struct pnfs_layout_hdr *lo);
void put_lseg(struct pnfs_layout_segment *lseg);
-bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *);
-bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *, int);
+bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *,
+ const struct nfs_pgio_completion_ops *);
+bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *,
+ int, const struct nfs_pgio_completion_ops *);
void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32);
void unset_pnfs_layoutdriver(struct nfs_server *);
@@ -211,6 +223,11 @@ struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
gfp_t gfp_flags);
void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp);
+int pnfs_read_done_resend_to_mds(struct inode *inode, struct list_head *head,
+ const struct nfs_pgio_completion_ops *compl_ops);
+int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head,
+ const struct nfs_pgio_completion_ops *compl_ops);
+struct nfs4_threshold *pnfs_mdsthreshold_alloc(void);
/* nfs4_deviceid_flags */
enum {
@@ -261,49 +278,66 @@ static inline int pnfs_enabled_sb(struct nfs_server *nfss)
}
static inline int
-pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how,
+ struct nfs_commit_info *cinfo)
{
- if (!test_and_clear_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags))
+ if (cinfo->ds == NULL || cinfo->ds->ncommitting == 0)
return PNFS_NOT_ATTEMPTED;
- return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how);
+ return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how, cinfo);
+}
+
+static inline struct pnfs_ds_commit_info *
+pnfs_get_ds_info(struct inode *inode)
+{
+ struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
+
+ if (ld == NULL || ld->get_ds_info == NULL)
+ return NULL;
+ return ld->get_ds_info(inode);
}
static inline bool
-pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
struct inode *inode = req->wb_context->dentry->d_inode;
struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
if (lseg == NULL || ld->mark_request_commit == NULL)
return false;
- ld->mark_request_commit(req, lseg);
+ ld->mark_request_commit(req, lseg, cinfo);
return true;
}
static inline bool
-pnfs_clear_request_commit(struct nfs_page *req)
+pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo)
{
struct inode *inode = req->wb_context->dentry->d_inode;
struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
if (ld == NULL || ld->clear_request_commit == NULL)
return false;
- ld->clear_request_commit(req);
+ ld->clear_request_commit(req, cinfo);
return true;
}
static inline int
-pnfs_scan_commit_lists(struct inode *inode, int max, spinlock_t *lock)
+pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
+ int max)
{
- struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
- int ret;
-
- if (ld == NULL || ld->scan_commit_lists == NULL)
+ if (cinfo->ds == NULL || cinfo->ds->nwritten == 0)
return 0;
- ret = ld->scan_commit_lists(inode, max, lock);
- if (ret != 0)
- set_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags);
- return ret;
+ else
+ return NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists(cinfo, max);
+}
+
+static inline void
+pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list,
+ struct nfs_commit_info *cinfo)
+{
+ if (cinfo->ds == NULL || cinfo->ds->nwritten == 0)
+ return;
+ NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
}
/* Should the pNFS client commit and return the layout upon a setattr */
@@ -327,6 +361,14 @@ static inline int pnfs_return_layout(struct inode *ino)
return 0;
}
+static inline bool
+pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src,
+ struct nfs_server *nfss)
+{
+ return (dst && src && src->bm != 0 &&
+ nfss->pnfs_curr_ld->id == src->l_type);
+}
+
#ifdef NFS_DEBUG
void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id);
#else
@@ -396,45 +438,74 @@ static inline void unset_pnfs_layoutdriver(struct nfs_server *s)
{
}
-static inline bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
+static inline bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
return false;
}
-static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
+static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
return false;
}
static inline int
-pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how,
+ struct nfs_commit_info *cinfo)
{
return PNFS_NOT_ATTEMPTED;
}
+static inline struct pnfs_ds_commit_info *
+pnfs_get_ds_info(struct inode *inode)
+{
+ return NULL;
+}
+
static inline bool
-pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
return false;
}
static inline bool
-pnfs_clear_request_commit(struct nfs_page *req)
+pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo)
{
return false;
}
static inline int
-pnfs_scan_commit_lists(struct inode *inode, int max, spinlock_t *lock)
+pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
+ int max)
{
return 0;
}
+static inline void
+pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list,
+ struct nfs_commit_info *cinfo)
+{
+}
+
static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync)
{
return 0;
}
+static inline bool
+pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src,
+ struct nfs_server *nfss)
+{
+ return false;
+}
+
+static inline struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
+{
+ return NULL;
+}
+
#endif /* CONFIG_NFS_V4_1 */
#endif /* FS_NFS_PNFS_H */
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index d6408b6437d..a706b6bcc28 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -178,7 +178,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+nfs_proc_lookup(struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct nfs_diropargs arg = {
@@ -640,12 +640,14 @@ nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)
{
+ struct inode *inode = data->header->inode;
+
if (nfs_async_handle_expired_key(task))
return -EAGAIN;
- nfs_invalidate_atime(data->inode);
+ nfs_invalidate_atime(inode);
if (task->tk_status >= 0) {
- nfs_refresh_inode(data->inode, data->res.fattr);
+ nfs_refresh_inode(inode, data->res.fattr);
/* Emulate the eof flag, which isn't normally needed in NFSv2
* as it is guaranteed to always return the file attributes
*/
@@ -667,11 +669,13 @@ static void nfs_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_dat
static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data)
{
+ struct inode *inode = data->header->inode;
+
if (nfs_async_handle_expired_key(task))
return -EAGAIN;
if (task->tk_status >= 0)
- nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr);
+ nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
return 0;
}
@@ -687,8 +691,13 @@ static void nfs_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_d
rpc_call_start(task);
}
+static void nfs_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
+{
+ BUG();
+}
+
static void
-nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+nfs_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
{
BUG();
}
@@ -732,6 +741,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.file_inode_ops = &nfs_file_inode_operations,
.file_ops = &nfs_file_operations,
.getroot = nfs_proc_get_root,
+ .submount = nfs_submount,
.getattr = nfs_proc_getattr,
.setattr = nfs_proc_setattr,
.lookup = nfs_proc_lookup,
@@ -763,6 +773,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.write_rpc_prepare = nfs_proc_write_rpc_prepare,
.write_done = nfs_write_done,
.commit_setup = nfs_proc_commit_setup,
+ .commit_rpc_prepare = nfs_proc_commit_rpc_prepare,
.lock = nfs_proc_lock,
.lock_check_bounds = nfs_lock_check_bounds,
.close_context = nfs_close_context,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 0a4be28c2ea..86ced783621 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -30,43 +30,73 @@
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
static const struct nfs_pageio_ops nfs_pageio_read_ops;
-static const struct rpc_call_ops nfs_read_partial_ops;
-static const struct rpc_call_ops nfs_read_full_ops;
+static const struct rpc_call_ops nfs_read_common_ops;
+static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
static struct kmem_cache *nfs_rdata_cachep;
-struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
+struct nfs_read_header *nfs_readhdr_alloc(void)
{
- struct nfs_read_data *p;
-
- p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
- if (p) {
- INIT_LIST_HEAD(&p->pages);
- p->npages = pagecount;
- if (pagecount <= ARRAY_SIZE(p->page_array))
- p->pagevec = p->page_array;
- else {
- p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
- if (!p->pagevec) {
- kmem_cache_free(nfs_rdata_cachep, p);
- p = NULL;
- }
- }
+ struct nfs_read_header *rhdr;
+
+ rhdr = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
+ if (rhdr) {
+ struct nfs_pgio_header *hdr = &rhdr->header;
+
+ INIT_LIST_HEAD(&hdr->pages);
+ INIT_LIST_HEAD(&hdr->rpc_list);
+ spin_lock_init(&hdr->lock);
+ atomic_set(&hdr->refcnt, 0);
+ }
+ return rhdr;
+}
+
+static struct nfs_read_data *nfs_readdata_alloc(struct nfs_pgio_header *hdr,
+ unsigned int pagecount)
+{
+ struct nfs_read_data *data, *prealloc;
+
+ prealloc = &container_of(hdr, struct nfs_read_header, header)->rpc_data;
+ if (prealloc->header == NULL)
+ data = prealloc;
+ else
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto out;
+
+ if (nfs_pgarray_set(&data->pages, pagecount)) {
+ data->header = hdr;
+ atomic_inc(&hdr->refcnt);
+ } else {
+ if (data != prealloc)
+ kfree(data);
+ data = NULL;
}
- return p;
+out:
+ return data;
}
-void nfs_readdata_free(struct nfs_read_data *p)
+void nfs_readhdr_free(struct nfs_pgio_header *hdr)
{
- if (p && (p->pagevec != &p->page_array[0]))
- kfree(p->pagevec);
- kmem_cache_free(nfs_rdata_cachep, p);
+ struct nfs_read_header *rhdr = container_of(hdr, struct nfs_read_header, header);
+
+ kmem_cache_free(nfs_rdata_cachep, rhdr);
}
void nfs_readdata_release(struct nfs_read_data *rdata)
{
+ struct nfs_pgio_header *hdr = rdata->header;
+ struct nfs_read_header *read_header = container_of(hdr, struct nfs_read_header, header);
+
put_nfs_open_context(rdata->args.context);
- nfs_readdata_free(rdata);
+ if (rdata->pages.pagevec != rdata->pages.page_array)
+ kfree(rdata->pages.pagevec);
+ if (rdata != &read_header->rpc_data)
+ kfree(rdata);
+ else
+ rdata->header = NULL;
+ if (atomic_dec_and_test(&hdr->refcnt))
+ hdr->completion_ops->completion(hdr);
}
static
@@ -78,39 +108,11 @@ int nfs_return_empty_page(struct page *page)
return 0;
}
-static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
-{
- unsigned int remainder = data->args.count - data->res.count;
- unsigned int base = data->args.pgbase + data->res.count;
- unsigned int pglen;
- struct page **pages;
-
- if (data->res.eof == 0 || remainder == 0)
- return;
- /*
- * Note: "remainder" can never be negative, since we check for
- * this in the XDR code.
- */
- pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
- base &= ~PAGE_CACHE_MASK;
- pglen = PAGE_CACHE_SIZE - base;
- for (;;) {
- if (remainder <= pglen) {
- zero_user(*pages, base, remainder);
- break;
- }
- zero_user(*pages, base, pglen);
- pages++;
- remainder -= pglen;
- pglen = PAGE_CACHE_SIZE;
- base = 0;
- }
-}
-
void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
- struct inode *inode)
+ struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
- nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
+ nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, compl_ops,
NFS_SERVER(inode)->rsize, 0);
}
@@ -121,11 +123,12 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
-static void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
- struct inode *inode)
+void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
- if (!pnfs_pageio_init_read(pgio, inode))
- nfs_pageio_init_read_mds(pgio, inode);
+ if (!pnfs_pageio_init_read(pgio, inode, compl_ops))
+ nfs_pageio_init_read_mds(pgio, inode, compl_ops);
}
int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
@@ -146,9 +149,10 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
if (len < PAGE_CACHE_SIZE)
zero_user_segment(page, len, PAGE_CACHE_SIZE);
- nfs_pageio_init_read(&pgio, inode);
+ nfs_pageio_init_read(&pgio, inode, &nfs_async_read_completion_ops);
nfs_pageio_add_request(&pgio, new);
nfs_pageio_complete(&pgio);
+ NFS_I(inode)->read_io += pgio.pg_bytes_written;
return 0;
}
@@ -169,16 +173,49 @@ static void nfs_readpage_release(struct nfs_page *req)
nfs_release_request(req);
}
-int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
- const struct rpc_call_ops *call_ops)
+/* Note io was page aligned */
+static void nfs_read_completion(struct nfs_pgio_header *hdr)
+{
+ unsigned long bytes = 0;
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+ goto out;
+ while (!list_empty(&hdr->pages)) {
+ struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ struct page *page = req->wb_page;
+
+ if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
+ if (bytes > hdr->good_bytes)
+ zero_user(page, 0, PAGE_SIZE);
+ else if (hdr->good_bytes - bytes < PAGE_SIZE)
+ zero_user_segment(page,
+ hdr->good_bytes & ~PAGE_MASK,
+ PAGE_SIZE);
+ }
+ bytes += req->wb_bytes;
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+ if (bytes <= hdr->good_bytes)
+ SetPageUptodate(page);
+ } else
+ SetPageUptodate(page);
+ nfs_list_remove_request(req);
+ nfs_readpage_release(req);
+ }
+out:
+ hdr->release(hdr);
+}
+
+int nfs_initiate_read(struct rpc_clnt *clnt,
+ struct nfs_read_data *data,
+ const struct rpc_call_ops *call_ops, int flags)
{
- struct inode *inode = data->inode;
+ struct inode *inode = data->header->inode;
int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
- .rpc_cred = data->cred,
+ .rpc_cred = data->header->cred,
};
struct rpc_task_setup task_setup_data = {
.task = &data->task,
@@ -187,7 +224,7 @@ int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
.callback_ops = call_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC | swap_flags,
+ .flags = RPC_TASK_ASYNC | swap_flags | flags,
};
/* Set up the initial task struct. */
@@ -212,19 +249,15 @@ EXPORT_SYMBOL_GPL(nfs_initiate_read);
/*
* Set up the NFS read request struct
*/
-static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
+static void nfs_read_rpcsetup(struct nfs_read_data *data,
unsigned int count, unsigned int offset)
{
- struct inode *inode = req->wb_context->dentry->d_inode;
-
- data->req = req;
- data->inode = inode;
- data->cred = req->wb_context->cred;
+ struct nfs_page *req = data->header->req;
- data->args.fh = NFS_FH(inode);
+ data->args.fh = NFS_FH(data->header->inode);
data->args.offset = req_offset(req) + offset;
data->args.pgbase = req->wb_pgbase + offset;
- data->args.pages = data->pagevec;
+ data->args.pages = data->pages.pagevec;
data->args.count = count;
data->args.context = get_nfs_open_context(req->wb_context);
data->args.lock_context = req->wb_lock_context;
@@ -238,9 +271,9 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
static int nfs_do_read(struct nfs_read_data *data,
const struct rpc_call_ops *call_ops)
{
- struct inode *inode = data->args.context->dentry->d_inode;
+ struct inode *inode = data->header->inode;
- return nfs_initiate_read(data, NFS_CLIENT(inode), call_ops);
+ return nfs_initiate_read(NFS_CLIENT(inode), data, call_ops, 0);
}
static int
@@ -253,7 +286,7 @@ nfs_do_multiple_reads(struct list_head *head,
while (!list_empty(head)) {
int ret2;
- data = list_entry(head->next, struct nfs_read_data, list);
+ data = list_first_entry(head, struct nfs_read_data, list);
list_del_init(&data->list);
ret2 = nfs_do_read(data, call_ops);
@@ -275,6 +308,24 @@ nfs_async_read_error(struct list_head *head)
}
}
+static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
+ .error_cleanup = nfs_async_read_error,
+ .completion = nfs_read_completion,
+};
+
+static void nfs_pagein_error(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
+{
+ set_bit(NFS_IOHDR_REDO, &hdr->flags);
+ while (!list_empty(&hdr->rpc_list)) {
+ struct nfs_read_data *data = list_first_entry(&hdr->rpc_list,
+ struct nfs_read_data, list);
+ list_del(&data->list);
+ nfs_readdata_release(data);
+ }
+ desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+}
+
/*
* Generate multiple requests to fill a single page.
*
@@ -288,93 +339,95 @@ nfs_async_read_error(struct list_head *head)
* won't see the new data until our attribute cache is updated. This is more
* or less conventional NFS client behavior.
*/
-static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
{
- struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
+ struct nfs_page *req = hdr->req;
struct page *page = req->wb_page;
struct nfs_read_data *data;
size_t rsize = desc->pg_bsize, nbytes;
unsigned int offset;
- int requests = 0;
- int ret = 0;
-
- nfs_list_remove_request(req);
offset = 0;
nbytes = desc->pg_count;
do {
size_t len = min(nbytes,rsize);
- data = nfs_readdata_alloc(1);
- if (!data)
- goto out_bad;
- data->pagevec[0] = page;
- nfs_read_rpcsetup(req, data, len, offset);
- list_add(&data->list, res);
- requests++;
+ data = nfs_readdata_alloc(hdr, 1);
+ if (!data) {
+ nfs_pagein_error(desc, hdr);
+ return -ENOMEM;
+ }
+ data->pages.pagevec[0] = page;
+ nfs_read_rpcsetup(data, len, offset);
+ list_add(&data->list, &hdr->rpc_list);
nbytes -= len;
offset += len;
- } while(nbytes != 0);
- atomic_set(&req->wb_complete, requests);
- desc->pg_rpc_callops = &nfs_read_partial_ops;
- return ret;
-out_bad:
- while (!list_empty(res)) {
- data = list_entry(res->next, struct nfs_read_data, list);
- list_del(&data->list);
- nfs_readdata_release(data);
- }
- nfs_readpage_release(req);
- return -ENOMEM;
+ } while (nbytes != 0);
+
+ nfs_list_remove_request(req);
+ nfs_list_add_request(req, &hdr->pages);
+ desc->pg_rpc_callops = &nfs_read_common_ops;
+ return 0;
}
-static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_pagein_one(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
{
struct nfs_page *req;
struct page **pages;
- struct nfs_read_data *data;
+ struct nfs_read_data *data;
struct list_head *head = &desc->pg_list;
- int ret = 0;
- data = nfs_readdata_alloc(nfs_page_array_len(desc->pg_base,
- desc->pg_count));
+ data = nfs_readdata_alloc(hdr, nfs_page_array_len(desc->pg_base,
+ desc->pg_count));
if (!data) {
- nfs_async_read_error(head);
- ret = -ENOMEM;
- goto out;
+ nfs_pagein_error(desc, hdr);
+ return -ENOMEM;
}
- pages = data->pagevec;
+ pages = data->pages.pagevec;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_list_add_request(req, &data->pages);
+ nfs_list_add_request(req, &hdr->pages);
*pages++ = req->wb_page;
}
- req = nfs_list_entry(data->pages.next);
- nfs_read_rpcsetup(req, data, desc->pg_count, 0);
- list_add(&data->list, res);
- desc->pg_rpc_callops = &nfs_read_full_ops;
-out:
- return ret;
+ nfs_read_rpcsetup(data, desc->pg_count, 0);
+ list_add(&data->list, &hdr->rpc_list);
+ desc->pg_rpc_callops = &nfs_read_common_ops;
+ return 0;
}
-int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, struct list_head *head)
+int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
{
if (desc->pg_bsize < PAGE_CACHE_SIZE)
- return nfs_pagein_multi(desc, head);
- return nfs_pagein_one(desc, head);
+ return nfs_pagein_multi(desc, hdr);
+ return nfs_pagein_one(desc, hdr);
}
static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
- LIST_HEAD(head);
+ struct nfs_read_header *rhdr;
+ struct nfs_pgio_header *hdr;
int ret;
- ret = nfs_generic_pagein(desc, &head);
+ rhdr = nfs_readhdr_alloc();
+ if (!rhdr) {
+ desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+ return -ENOMEM;
+ }
+ hdr = &rhdr->header;
+ nfs_pgheader_init(desc, hdr, nfs_readhdr_free);
+ atomic_inc(&hdr->refcnt);
+ ret = nfs_generic_pagein(desc, hdr);
if (ret == 0)
- ret = nfs_do_multiple_reads(&head, desc->pg_rpc_callops);
+ ret = nfs_do_multiple_reads(&hdr->rpc_list,
+ desc->pg_rpc_callops);
+ if (atomic_dec_and_test(&hdr->refcnt))
+ hdr->completion_ops->completion(hdr);
return ret;
}
@@ -389,20 +442,21 @@ static const struct nfs_pageio_ops nfs_pageio_read_ops = {
*/
int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
{
+ struct inode *inode = data->header->inode;
int status;
dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
task->tk_status);
- status = NFS_PROTO(data->inode)->read_done(task, data);
+ status = NFS_PROTO(inode)->read_done(task, data);
if (status != 0)
return status;
- nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
+ nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, data->res.count);
if (task->tk_status == -ESTALE) {
- set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
- nfs_mark_for_revalidate(data->inode);
+ set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
+ nfs_mark_for_revalidate(inode);
}
return 0;
}
@@ -412,15 +466,13 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
struct nfs_readargs *argp = &data->args;
struct nfs_readres *resp = &data->res;
- if (resp->eof || resp->count == argp->count)
- return;
-
/* This is a short read! */
- nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
+ nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD);
/* Has the server at least made some progress? */
- if (resp->count == 0)
+ if (resp->count == 0) {
+ nfs_set_pgio_error(data->header, -EIO, argp->offset);
return;
-
+ }
/* Yes, so retry the read at the end of the data */
data->mds_offset += resp->count;
argp->offset += resp->count;
@@ -429,114 +481,46 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
rpc_restart_call_prepare(task);
}
-/*
- * Handle a read reply that fills part of a page.
- */
-static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
+static void nfs_readpage_result_common(struct rpc_task *task, void *calldata)
{
struct nfs_read_data *data = calldata;
-
+ struct nfs_pgio_header *hdr = data->header;
+
+ /* Note the only returns of nfs_readpage_result are 0 and -EAGAIN */
if (nfs_readpage_result(task, data) != 0)
return;
if (task->tk_status < 0)
- return;
-
- nfs_readpage_truncate_uninitialised_page(data);
- nfs_readpage_retry(task, data);
+ nfs_set_pgio_error(hdr, task->tk_status, data->args.offset);
+ else if (data->res.eof) {
+ loff_t bound;
+
+ bound = data->args.offset + data->res.count;
+ spin_lock(&hdr->lock);
+ if (bound < hdr->io_start + hdr->good_bytes) {
+ set_bit(NFS_IOHDR_EOF, &hdr->flags);
+ clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
+ hdr->good_bytes = bound - hdr->io_start;
+ }
+ spin_unlock(&hdr->lock);
+ } else if (data->res.count != data->args.count)
+ nfs_readpage_retry(task, data);
}
-static void nfs_readpage_release_partial(void *calldata)
+static void nfs_readpage_release_common(void *calldata)
{
- struct nfs_read_data *data = calldata;
- struct nfs_page *req = data->req;
- struct page *page = req->wb_page;
- int status = data->task.tk_status;
-
- if (status < 0)
- set_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags);
-
- if (atomic_dec_and_test(&req->wb_complete)) {
- if (!test_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags))
- SetPageUptodate(page);
- nfs_readpage_release(req);
- }
nfs_readdata_release(calldata);
}
void nfs_read_prepare(struct rpc_task *task, void *calldata)
{
struct nfs_read_data *data = calldata;
- NFS_PROTO(data->inode)->read_rpc_prepare(task, data);
-}
-
-static const struct rpc_call_ops nfs_read_partial_ops = {
- .rpc_call_prepare = nfs_read_prepare,
- .rpc_call_done = nfs_readpage_result_partial,
- .rpc_release = nfs_readpage_release_partial,
-};
-
-static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
-{
- unsigned int count = data->res.count;
- unsigned int base = data->args.pgbase;
- struct page **pages;
-
- if (data->res.eof)
- count = data->args.count;
- if (unlikely(count == 0))
- return;
- pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
- base &= ~PAGE_CACHE_MASK;
- count += base;
- for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
- SetPageUptodate(*pages);
- if (count == 0)
- return;
- /* Was this a short read? */
- if (data->res.eof || data->res.count == data->args.count)
- SetPageUptodate(*pages);
-}
-
-/*
- * This is the callback from RPC telling us whether a reply was
- * received or some error occurred (timeout or socket shutdown).
- */
-static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
-{
- struct nfs_read_data *data = calldata;
-
- if (nfs_readpage_result(task, data) != 0)
- return;
- if (task->tk_status < 0)
- return;
- /*
- * Note: nfs_readpage_retry may change the values of
- * data->args. In the multi-page case, we therefore need
- * to ensure that we call nfs_readpage_set_pages_uptodate()
- * first.
- */
- nfs_readpage_truncate_uninitialised_page(data);
- nfs_readpage_set_pages_uptodate(data);
- nfs_readpage_retry(task, data);
-}
-
-static void nfs_readpage_release_full(void *calldata)
-{
- struct nfs_read_data *data = calldata;
-
- while (!list_empty(&data->pages)) {
- struct nfs_page *req = nfs_list_entry(data->pages.next);
-
- nfs_list_remove_request(req);
- nfs_readpage_release(req);
- }
- nfs_readdata_release(calldata);
+ NFS_PROTO(data->header->inode)->read_rpc_prepare(task, data);
}
-static const struct rpc_call_ops nfs_read_full_ops = {
+static const struct rpc_call_ops nfs_read_common_ops = {
.rpc_call_prepare = nfs_read_prepare,
- .rpc_call_done = nfs_readpage_result_full,
- .rpc_release = nfs_readpage_release_full,
+ .rpc_call_done = nfs_readpage_result_common,
+ .rpc_release = nfs_readpage_release_common,
};
/*
@@ -668,11 +652,12 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
if (ret == 0)
goto read_complete; /* all pages were read */
- nfs_pageio_init_read(&pgio, inode);
+ nfs_pageio_init_read(&pgio, inode, &nfs_async_read_completion_ops);
ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
nfs_pageio_complete(&pgio);
+ NFS_I(inode)->read_io += pgio.pg_bytes_written;
npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
nfs_add_stats(inode, NFSIOS_READPAGES, npages);
read_complete:
@@ -684,7 +669,7 @@ out:
int __init nfs_init_readpagecache(void)
{
nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
- sizeof(struct nfs_read_data),
+ sizeof(struct nfs_read_header),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (nfs_rdata_cachep == NULL)
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 4ac7fca7e4b..ff656c02268 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -66,6 +66,7 @@
#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_VFS
+#define NFS_TEXT_DATA 1
#ifdef CONFIG_NFS_V3
#define NFS_DEFAULT_VERSION 3
@@ -277,12 +278,22 @@ static match_table_t nfs_vers_tokens = {
{ Opt_vers_err, NULL }
};
+struct nfs_mount_info {
+ void (*fill_super)(struct super_block *, struct nfs_mount_info *);
+ int (*set_security)(struct super_block *, struct dentry *, struct nfs_mount_info *);
+ struct nfs_parsed_mount_data *parsed;
+ struct nfs_clone_mount *cloned;
+ struct nfs_fh *mntfh;
+};
+
static void nfs_umount_begin(struct super_block *);
static int nfs_statfs(struct dentry *, struct kstatfs *);
static int nfs_show_options(struct seq_file *, struct dentry *);
static int nfs_show_devname(struct seq_file *, struct dentry *);
static int nfs_show_path(struct seq_file *, struct dentry *);
static int nfs_show_stats(struct seq_file *, struct dentry *);
+static struct dentry *nfs_fs_mount_common(struct file_system_type *,
+ struct nfs_server *, int, const char *, struct nfs_mount_info *);
static struct dentry *nfs_fs_mount(struct file_system_type *,
int, const char *, void *);
static struct dentry *nfs_xdev_mount(struct file_system_type *fs_type,
@@ -323,12 +334,11 @@ static const struct super_operations nfs_sops = {
};
#ifdef CONFIG_NFS_V4
-static int nfs4_validate_text_mount_data(void *options,
+static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *);
+static int nfs4_validate_mount_data(void *options,
struct nfs_parsed_mount_data *args, const char *dev_name);
static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
- struct nfs_parsed_mount_data *data);
-static struct dentry *nfs4_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data);
+ struct nfs_mount_info *mount_info);
static struct dentry *nfs4_remote_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data);
static struct dentry *nfs4_xdev_mount(struct file_system_type *fs_type,
@@ -342,7 +352,7 @@ static void nfs4_kill_super(struct super_block *sb);
static struct file_system_type nfs4_fs_type = {
.owner = THIS_MODULE,
.name = "nfs4",
- .mount = nfs4_mount,
+ .mount = nfs_fs_mount,
.kill_sb = nfs4_kill_super,
.fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
};
@@ -786,8 +796,8 @@ static void show_pnfs(struct seq_file *m, struct nfs_server *server)
static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss)
{
- if (nfss->nfs_client && nfss->nfs_client->impl_id) {
- struct nfs41_impl_id *impl_id = nfss->nfs_client->impl_id;
+ if (nfss->nfs_client && nfss->nfs_client->cl_implid) {
+ struct nfs41_impl_id *impl_id = nfss->nfs_client->cl_implid;
seq_printf(m, "\n\timpl_id:\tname='%s',domain='%s',"
"date='%llu,%u'",
impl_id->name, impl_id->domain,
@@ -938,7 +948,7 @@ static void nfs_umount_begin(struct super_block *sb)
rpc_killall_tasks(rpc);
}
-static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int version)
+static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
{
struct nfs_parsed_mount_data *data;
@@ -953,8 +963,8 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
data->nfs_server.protocol = XPRT_TRANSPORT_TCP;
data->auth_flavors[0] = RPC_AUTH_UNIX;
data->auth_flavor_len = 1;
- data->version = version;
data->minorversion = 0;
+ data->need_mount = true;
data->net = current->nsproxy->net_ns;
security_init_mnt_opts(&data->lsm_opts);
}
@@ -1674,8 +1684,8 @@ static int nfs_walk_authlist(struct nfs_parsed_mount_data *args,
* Use the remote server's MOUNT service to request the NFS file handle
* corresponding to the provided path.
*/
-static int nfs_try_mount(struct nfs_parsed_mount_data *args,
- struct nfs_fh *root_fh)
+static int nfs_request_mount(struct nfs_parsed_mount_data *args,
+ struct nfs_fh *root_fh)
{
rpc_authflavor_t server_authlist[NFS_MAX_SECFLAVORS];
unsigned int server_authlist_len = ARRAY_SIZE(server_authlist);
@@ -1738,6 +1748,26 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
return nfs_walk_authlist(args, &request);
}
+static struct dentry *nfs_try_mount(int flags, const char *dev_name,
+ struct nfs_mount_info *mount_info)
+{
+ int status;
+ struct nfs_server *server;
+
+ if (mount_info->parsed->need_mount) {
+ status = nfs_request_mount(mount_info->parsed, mount_info->mntfh);
+ if (status)
+ return ERR_PTR(status);
+ }
+
+ /* Get a volume representation */
+ server = nfs_create_server(mount_info->parsed, mount_info->mntfh);
+ if (IS_ERR(server))
+ return ERR_CAST(server);
+
+ return nfs_fs_mount_common(&nfs_fs_type, server, flags, dev_name, mount_info);
+}
+
/*
* Split "dev_name" into "hostname:export_path".
*
@@ -1826,10 +1856,10 @@ out_path:
* + breaking back: trying proto=udp after proto=tcp, v2 after v3,
* mountproto=tcp after mountproto=udp, and so on
*/
-static int nfs_validate_mount_data(void *options,
- struct nfs_parsed_mount_data *args,
- struct nfs_fh *mntfh,
- const char *dev_name)
+static int nfs23_validate_mount_data(void *options,
+ struct nfs_parsed_mount_data *args,
+ struct nfs_fh *mntfh,
+ const char *dev_name)
{
struct nfs_mount_data *data = (struct nfs_mount_data *)options;
struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
@@ -1883,6 +1913,7 @@ static int nfs_validate_mount_data(void *options,
args->acregmax = data->acregmax;
args->acdirmin = data->acdirmin;
args->acdirmax = data->acdirmax;
+ args->need_mount = false;
memcpy(sap, &data->addr, sizeof(data->addr));
args->nfs_server.addrlen = sizeof(data->addr);
@@ -1934,43 +1965,8 @@ static int nfs_validate_mount_data(void *options,
}
break;
- default: {
- int status;
-
- if (nfs_parse_mount_options((char *)options, args) == 0)
- return -EINVAL;
-
- if (!nfs_verify_server_address(sap))
- goto out_no_address;
-
- if (args->version == 4)
-#ifdef CONFIG_NFS_V4
- return nfs4_validate_text_mount_data(options,
- args, dev_name);
-#else
- goto out_v4_not_compiled;
-#endif
-
- nfs_set_port(sap, &args->nfs_server.port, 0);
-
- nfs_set_mount_transport_protocol(args);
-
- status = nfs_parse_devname(dev_name,
- &args->nfs_server.hostname,
- PAGE_SIZE,
- &args->nfs_server.export_path,
- NFS_MAXPATHLEN);
- if (!status)
- status = nfs_try_mount(args, mntfh);
-
- kfree(args->nfs_server.export_path);
- args->nfs_server.export_path = NULL;
-
- if (status)
- return status;
-
- break;
- }
+ default:
+ return NFS_TEXT_DATA;
}
#ifndef CONFIG_NFS_V3
@@ -1999,12 +1995,6 @@ out_v3_not_compiled:
return -EPROTONOSUPPORT;
#endif /* !CONFIG_NFS_V3 */
-#ifndef CONFIG_NFS_V4
-out_v4_not_compiled:
- dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
- return -EPROTONOSUPPORT;
-#endif /* !CONFIG_NFS_V4 */
-
out_nomem:
dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n");
return -ENOMEM;
@@ -2018,6 +2008,82 @@ out_invalid_fh:
return -EINVAL;
}
+#ifdef CONFIG_NFS_V4
+static int nfs_validate_mount_data(struct file_system_type *fs_type,
+ void *options,
+ struct nfs_parsed_mount_data *args,
+ struct nfs_fh *mntfh,
+ const char *dev_name)
+{
+ if (fs_type == &nfs_fs_type)
+ return nfs23_validate_mount_data(options, args, mntfh, dev_name);
+ return nfs4_validate_mount_data(options, args, dev_name);
+}
+#else
+static int nfs_validate_mount_data(struct file_system_type *fs_type,
+ void *options,
+ struct nfs_parsed_mount_data *args,
+ struct nfs_fh *mntfh,
+ const char *dev_name)
+{
+ return nfs23_validate_mount_data(options, args, mntfh, dev_name);
+}
+#endif
+
+static int nfs_validate_text_mount_data(void *options,
+ struct nfs_parsed_mount_data *args,
+ const char *dev_name)
+{
+ int port = 0;
+ int max_namelen = PAGE_SIZE;
+ int max_pathlen = NFS_MAXPATHLEN;
+ struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
+
+ if (nfs_parse_mount_options((char *)options, args) == 0)
+ return -EINVAL;
+
+ if (!nfs_verify_server_address(sap))
+ goto out_no_address;
+
+ if (args->version == 4) {
+#ifdef CONFIG_NFS_V4
+ port = NFS_PORT;
+ max_namelen = NFS4_MAXNAMLEN;
+ max_pathlen = NFS4_MAXPATHLEN;
+ nfs_validate_transport_protocol(args);
+ nfs4_validate_mount_flags(args);
+#else
+ goto out_v4_not_compiled;
+#endif /* CONFIG_NFS_V4 */
+ } else
+ nfs_set_mount_transport_protocol(args);
+
+ nfs_set_port(sap, &args->nfs_server.port, port);
+
+ if (args->auth_flavor_len > 1)
+ goto out_bad_auth;
+
+ return nfs_parse_devname(dev_name,
+ &args->nfs_server.hostname,
+ max_namelen,
+ &args->nfs_server.export_path,
+ max_pathlen);
+
+#ifndef CONFIG_NFS_V4
+out_v4_not_compiled:
+ dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
+ return -EPROTONOSUPPORT;
+#endif /* !CONFIG_NFS_V4 */
+
+out_no_address:
+ dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n");
+ return -EINVAL;
+
+out_bad_auth:
+ dfprintk(MOUNT, "NFS: Too many RPC auth flavours specified\n");
+ return -EINVAL;
+}
+
static int
nfs_compare_remount_data(struct nfs_server *nfss,
struct nfs_parsed_mount_data *data)
@@ -2129,8 +2195,9 @@ static inline void nfs_initialise_sb(struct super_block *sb)
* Finish setting up an NFS2/3 superblock
*/
static void nfs_fill_super(struct super_block *sb,
- struct nfs_parsed_mount_data *data)
+ struct nfs_mount_info *mount_info)
{
+ struct nfs_parsed_mount_data *data = mount_info->parsed;
struct nfs_server *server = NFS_SB(sb);
sb->s_blocksize_bits = 0;
@@ -2154,8 +2221,9 @@ static void nfs_fill_super(struct super_block *sb,
* Finish setting up a cloned NFS2/3 superblock
*/
static void nfs_clone_super(struct super_block *sb,
- const struct super_block *old_sb)
+ struct nfs_mount_info *mount_info)
{
+ const struct super_block *old_sb = mount_info->cloned->sb;
struct nfs_server *server = NFS_SB(sb);
sb->s_blocksize_bits = old_sb->s_blocksize_bits;
@@ -2278,52 +2346,70 @@ static int nfs_compare_super(struct super_block *sb, void *data)
return nfs_compare_mount_options(sb, server, mntflags);
}
+#ifdef CONFIG_NFS_FSCACHE
+static void nfs_get_cache_cookie(struct super_block *sb,
+ struct nfs_parsed_mount_data *parsed,
+ struct nfs_clone_mount *cloned)
+{
+ char *uniq = NULL;
+ int ulen = 0;
+
+ if (parsed && parsed->fscache_uniq) {
+ uniq = parsed->fscache_uniq;
+ ulen = strlen(parsed->fscache_uniq);
+ } else if (cloned) {
+ struct nfs_server *mnt_s = NFS_SB(cloned->sb);
+ if (mnt_s->fscache_key) {
+ uniq = mnt_s->fscache_key->key.uniquifier;
+ ulen = mnt_s->fscache_key->key.uniq_len;
+ };
+ }
+
+ nfs_fscache_get_super_cookie(sb, uniq, ulen);
+}
+#else
+static void nfs_get_cache_cookie(struct super_block *sb,
+ struct nfs_parsed_mount_data *parsed,
+ struct nfs_clone_mount *cloned)
+{
+}
+#endif
+
static int nfs_bdi_register(struct nfs_server *server)
{
return bdi_register_dev(&server->backing_dev_info, server->s_dev);
}
-static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data)
+static int nfs_set_sb_security(struct super_block *s, struct dentry *mntroot,
+ struct nfs_mount_info *mount_info)
+{
+ return security_sb_set_mnt_opts(s, &mount_info->parsed->lsm_opts);
+}
+
+static int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
+ struct nfs_mount_info *mount_info)
+{
+ /* clone any lsm security options from the parent to the new sb */
+ security_sb_clone_mnt_opts(mount_info->cloned->sb, s);
+ if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops)
+ return -ESTALE;
+ return 0;
+}
+
+static struct dentry *nfs_fs_mount_common(struct file_system_type *fs_type,
+ struct nfs_server *server,
+ int flags, const char *dev_name,
+ struct nfs_mount_info *mount_info)
{
- struct nfs_server *server = NULL;
struct super_block *s;
- struct nfs_parsed_mount_data *data;
- struct nfs_fh *mntfh;
struct dentry *mntroot = ERR_PTR(-ENOMEM);
int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
struct nfs_sb_mountdata sb_mntdata = {
.mntflags = flags,
+ .server = server,
};
int error;
- data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
- mntfh = nfs_alloc_fhandle();
- if (data == NULL || mntfh == NULL)
- goto out;
-
- /* Validate the mount data */
- error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
- if (error < 0) {
- mntroot = ERR_PTR(error);
- goto out;
- }
-
-#ifdef CONFIG_NFS_V4
- if (data->version == 4) {
- mntroot = nfs4_try_mount(flags, dev_name, data);
- goto out;
- }
-#endif /* CONFIG_NFS_V4 */
-
- /* Get a volume representation */
- server = nfs_create_server(data, mntfh);
- if (IS_ERR(server)) {
- mntroot = ERR_CAST(server);
- goto out;
- }
- sb_mntdata.server = server;
-
if (server->flags & NFS_MOUNT_UNSHARED)
compare_super = NULL;
@@ -2351,23 +2437,21 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
if (!s->s_root) {
/* initial superblock/root creation */
- nfs_fill_super(s, data);
- nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL);
+ mount_info->fill_super(s, mount_info);
+ nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
}
- mntroot = nfs_get_root(s, mntfh, dev_name);
+ mntroot = nfs_get_root(s, mount_info->mntfh, dev_name);
if (IS_ERR(mntroot))
goto error_splat_super;
- error = security_sb_set_mnt_opts(s, &data->lsm_opts);
+ error = mount_info->set_security(s, mntroot, mount_info);
if (error)
goto error_splat_root;
s->s_flags |= MS_ACTIVE;
out:
- nfs_free_parsed_mount_data(data);
- nfs_free_fhandle(mntfh);
return mntroot;
out_err_nosb:
@@ -2385,6 +2469,43 @@ error_splat_bdi:
goto out;
}
+static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data)
+{
+ struct nfs_mount_info mount_info = {
+ .fill_super = nfs_fill_super,
+ .set_security = nfs_set_sb_security,
+ };
+ struct dentry *mntroot = ERR_PTR(-ENOMEM);
+ int error;
+
+ mount_info.parsed = nfs_alloc_parsed_mount_data();
+ mount_info.mntfh = nfs_alloc_fhandle();
+ if (mount_info.parsed == NULL || mount_info.mntfh == NULL)
+ goto out;
+
+ /* Validate the mount data */
+ error = nfs_validate_mount_data(fs_type, raw_data, mount_info.parsed, mount_info.mntfh, dev_name);
+ if (error == NFS_TEXT_DATA)
+ error = nfs_validate_text_mount_data(raw_data, mount_info.parsed, dev_name);
+ if (error < 0) {
+ mntroot = ERR_PTR(error);
+ goto out;
+ }
+
+#ifdef CONFIG_NFS_V4
+ if (mount_info.parsed->version == 4)
+ mntroot = nfs4_try_mount(flags, dev_name, &mount_info);
+ else
+#endif /* CONFIG_NFS_V4 */
+ mntroot = nfs_try_mount(flags, dev_name, &mount_info);
+
+out:
+ nfs_free_parsed_mount_data(mount_info.parsed);
+ nfs_free_fhandle(mount_info.mntfh);
+ return mntroot;
+}
+
/*
* Ensure that we unregister the bdi before kill_anon_super
* releases the device name
@@ -2409,93 +2530,51 @@ static void nfs_kill_super(struct super_block *s)
}
/*
- * Clone an NFS2/3 server record on xdev traversal (FSID-change)
+ * Clone an NFS2/3/4 server record on xdev traversal (FSID-change)
*/
static struct dentry *
-nfs_xdev_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *raw_data)
+nfs_xdev_mount_common(struct file_system_type *fs_type, int flags,
+ const char *dev_name, struct nfs_mount_info *mount_info)
{
- struct nfs_clone_mount *data = raw_data;
- struct super_block *s;
+ struct nfs_clone_mount *data = mount_info->cloned;
struct nfs_server *server;
- struct dentry *mntroot;
- int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
- struct nfs_sb_mountdata sb_mntdata = {
- .mntflags = flags,
- };
+ struct dentry *mntroot = ERR_PTR(-ENOMEM);
int error;
- dprintk("--> nfs_xdev_mount()\n");
+ dprintk("--> nfs_xdev_mount_common()\n");
+
+ mount_info->mntfh = data->fh;
/* create a new volume representation */
server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
if (IS_ERR(server)) {
error = PTR_ERR(server);
- goto out_err_noserver;
- }
- sb_mntdata.server = server;
-
- if (server->flags & NFS_MOUNT_UNSHARED)
- compare_super = NULL;
-
- /* -o noac implies -o sync */
- if (server->flags & NFS_MOUNT_NOAC)
- sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
- /* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
- if (IS_ERR(s)) {
- error = PTR_ERR(s);
- goto out_err_nosb;
- }
-
- if (s->s_fs_info != server) {
- nfs_free_server(server);
- server = NULL;
- } else {
- error = nfs_bdi_register(server);
- if (error)
- goto error_splat_bdi;
- }
-
- if (!s->s_root) {
- /* initial superblock/root creation */
- nfs_clone_super(s, data->sb);
- nfs_fscache_get_super_cookie(s, NULL, data);
- }
-
- mntroot = nfs_get_root(s, data->fh, dev_name);
- if (IS_ERR(mntroot)) {
- error = PTR_ERR(mntroot);
- goto error_splat_super;
- }
- if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
- dput(mntroot);
- error = -ESTALE;
- goto error_splat_super;
+ goto out_err;
}
- s->s_flags |= MS_ACTIVE;
-
- /* clone any lsm security options from the parent to the new sb */
- security_sb_clone_mnt_opts(data->sb, s);
-
- dprintk("<-- nfs_xdev_mount() = 0\n");
+ mntroot = nfs_fs_mount_common(fs_type, server, flags, dev_name, mount_info);
+ dprintk("<-- nfs_xdev_mount_common() = 0\n");
+out:
return mntroot;
-out_err_nosb:
- nfs_free_server(server);
-out_err_noserver:
- dprintk("<-- nfs_xdev_mount() = %d [error]\n", error);
- return ERR_PTR(error);
+out_err:
+ dprintk("<-- nfs_xdev_mount_common() = %d [error]\n", error);
+ goto out;
+}
-error_splat_super:
- if (server && !s->s_root)
- bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
- deactivate_locked_super(s);
- dprintk("<-- nfs_xdev_mount() = %d [splat]\n", error);
- return ERR_PTR(error);
+/*
+ * Clone an NFS2/3 server record on xdev traversal (FSID-change)
+ */
+static struct dentry *
+nfs_xdev_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *raw_data)
+{
+ struct nfs_mount_info mount_info = {
+ .fill_super = nfs_clone_super,
+ .set_security = nfs_clone_sb_security,
+ .cloned = raw_data,
+ };
+ return nfs_xdev_mount_common(&nfs_fs_type, flags, dev_name, &mount_info);
}
#ifdef CONFIG_NFS_V4
@@ -2504,8 +2583,9 @@ error_splat_bdi:
* Finish setting up a cloned NFS4 superblock
*/
static void nfs4_clone_super(struct super_block *sb,
- const struct super_block *old_sb)
+ struct nfs_mount_info *mount_info)
{
+ const struct super_block *old_sb = mount_info->cloned->sb;
sb->s_blocksize_bits = old_sb->s_blocksize_bits;
sb->s_blocksize = old_sb->s_blocksize;
sb->s_maxbytes = old_sb->s_maxbytes;
@@ -2523,7 +2603,8 @@ static void nfs4_clone_super(struct super_block *sb,
/*
* Set up an NFS4 superblock
*/
-static void nfs4_fill_super(struct super_block *sb)
+static void nfs4_fill_super(struct super_block *sb,
+ struct nfs_mount_info *mount_info)
{
sb->s_time_gran = 1;
sb->s_op = &nfs4_sops;
@@ -2542,37 +2623,6 @@ static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *args)
NFS_MOUNT_LOCAL_FLOCK|NFS_MOUNT_LOCAL_FCNTL);
}
-static int nfs4_validate_text_mount_data(void *options,
- struct nfs_parsed_mount_data *args,
- const char *dev_name)
-{
- struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
-
- nfs_set_port(sap, &args->nfs_server.port, NFS_PORT);
-
- nfs_validate_transport_protocol(args);
-
- nfs4_validate_mount_flags(args);
-
- if (args->version != 4) {
- dfprintk(MOUNT,
- "NFS4: Illegal mount version\n");
- return -EINVAL;
- }
-
- if (args->auth_flavor_len > 1) {
- dfprintk(MOUNT,
- "NFS4: Too many RPC auth flavours specified\n");
- return -EINVAL;
- }
-
- return nfs_parse_devname(dev_name,
- &args->nfs_server.hostname,
- NFS4_MAXNAMLEN,
- &args->nfs_server.export_path,
- NFS4_MAXPATHLEN);
-}
-
/*
* Validate NFSv4 mount options
*/
@@ -2643,13 +2693,7 @@ static int nfs4_validate_mount_data(void *options,
break;
default:
- if (nfs_parse_mount_options((char *)options, args) == 0)
- return -EINVAL;
-
- if (!nfs_verify_server_address(sap))
- return -EINVAL;
-
- return nfs4_validate_text_mount_data(options, args, dev_name);
+ return NFS_TEXT_DATA;
}
return 0;
@@ -2673,91 +2717,26 @@ out_no_address:
*/
static struct dentry *
nfs4_remote_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *raw_data)
+ const char *dev_name, void *info)
{
- struct nfs_parsed_mount_data *data = raw_data;
- struct super_block *s;
+ struct nfs_mount_info *mount_info = info;
struct nfs_server *server;
- struct nfs_fh *mntfh;
- struct dentry *mntroot;
- int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
- struct nfs_sb_mountdata sb_mntdata = {
- .mntflags = flags,
- };
- int error = -ENOMEM;
+ struct dentry *mntroot = ERR_PTR(-ENOMEM);
- mntfh = nfs_alloc_fhandle();
- if (data == NULL || mntfh == NULL)
- goto out;
+ mount_info->fill_super = nfs4_fill_super;
+ mount_info->set_security = nfs_set_sb_security;
/* Get a volume representation */
- server = nfs4_create_server(data, mntfh);
+ server = nfs4_create_server(mount_info->parsed, mount_info->mntfh);
if (IS_ERR(server)) {
- error = PTR_ERR(server);
+ mntroot = ERR_CAST(server);
goto out;
}
- sb_mntdata.server = server;
- if (server->flags & NFS4_MOUNT_UNSHARED)
- compare_super = NULL;
-
- /* -o noac implies -o sync */
- if (server->flags & NFS_MOUNT_NOAC)
- sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
- /* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
- if (IS_ERR(s)) {
- error = PTR_ERR(s);
- goto out_free;
- }
-
- if (s->s_fs_info != server) {
- nfs_free_server(server);
- server = NULL;
- } else {
- error = nfs_bdi_register(server);
- if (error)
- goto error_splat_bdi;
- }
-
- if (!s->s_root) {
- /* initial superblock/root creation */
- nfs4_fill_super(s);
- nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL);
- }
-
- mntroot = nfs4_get_root(s, mntfh, dev_name);
- if (IS_ERR(mntroot)) {
- error = PTR_ERR(mntroot);
- goto error_splat_super;
- }
-
- error = security_sb_set_mnt_opts(s, &data->lsm_opts);
- if (error)
- goto error_splat_root;
-
- s->s_flags |= MS_ACTIVE;
-
- nfs_free_fhandle(mntfh);
- return mntroot;
+ mntroot = nfs_fs_mount_common(fs_type, server, flags, dev_name, mount_info);
out:
- nfs_free_fhandle(mntfh);
- return ERR_PTR(error);
-
-out_free:
- nfs_free_server(server);
- goto out;
-
-error_splat_root:
- dput(mntroot);
-error_splat_super:
- if (server && !s->s_root)
- bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
- deactivate_locked_super(s);
- goto out;
+ return mntroot;
}
static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
@@ -2869,17 +2848,18 @@ static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
}
static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
- struct nfs_parsed_mount_data *data)
+ struct nfs_mount_info *mount_info)
{
char *export_path;
struct vfsmount *root_mnt;
struct dentry *res;
+ struct nfs_parsed_mount_data *data = mount_info->parsed;
dfprintk(MOUNT, "--> nfs4_try_mount()\n");
export_path = data->nfs_server.export_path;
data->nfs_server.export_path = "/";
- root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, data,
+ root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
data->nfs_server.hostname);
data->nfs_server.export_path = export_path;
@@ -2891,38 +2871,6 @@ static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
return res;
}
-/*
- * Get the superblock for an NFS4 mountpoint
- */
-static struct dentry *nfs4_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *raw_data)
-{
- struct nfs_parsed_mount_data *data;
- int error = -ENOMEM;
- struct dentry *res = ERR_PTR(-ENOMEM);
-
- data = nfs_alloc_parsed_mount_data(4);
- if (data == NULL)
- goto out;
-
- /* Validate the mount data */
- error = nfs4_validate_mount_data(raw_data, data, dev_name);
- if (error < 0) {
- res = ERR_PTR(error);
- goto out;
- }
-
- res = nfs4_try_mount(flags, dev_name, data);
- if (IS_ERR(res))
- error = PTR_ERR(res);
-
-out:
- nfs_free_parsed_mount_data(data);
- dprintk("<-- nfs4_mount() = %d%s\n", error,
- error != 0 ? " [error]" : "");
- return res;
-}
-
static void nfs4_kill_super(struct super_block *sb)
{
struct nfs_server *server = NFS_SB(sb);
@@ -2942,181 +2890,43 @@ static struct dentry *
nfs4_xdev_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data)
{
- struct nfs_clone_mount *data = raw_data;
- struct super_block *s;
- struct nfs_server *server;
- struct dentry *mntroot;
- int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
- struct nfs_sb_mountdata sb_mntdata = {
- .mntflags = flags,
+ struct nfs_mount_info mount_info = {
+ .fill_super = nfs4_clone_super,
+ .set_security = nfs_clone_sb_security,
+ .cloned = raw_data,
};
- int error;
-
- dprintk("--> nfs4_xdev_mount()\n");
-
- /* create a new volume representation */
- server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
- if (IS_ERR(server)) {
- error = PTR_ERR(server);
- goto out_err_noserver;
- }
- sb_mntdata.server = server;
-
- if (server->flags & NFS4_MOUNT_UNSHARED)
- compare_super = NULL;
-
- /* -o noac implies -o sync */
- if (server->flags & NFS_MOUNT_NOAC)
- sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
- /* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
- if (IS_ERR(s)) {
- error = PTR_ERR(s);
- goto out_err_nosb;
- }
-
- if (s->s_fs_info != server) {
- nfs_free_server(server);
- server = NULL;
- } else {
- error = nfs_bdi_register(server);
- if (error)
- goto error_splat_bdi;
- }
-
- if (!s->s_root) {
- /* initial superblock/root creation */
- nfs4_clone_super(s, data->sb);
- nfs_fscache_get_super_cookie(s, NULL, data);
- }
-
- mntroot = nfs4_get_root(s, data->fh, dev_name);
- if (IS_ERR(mntroot)) {
- error = PTR_ERR(mntroot);
- goto error_splat_super;
- }
- if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
- dput(mntroot);
- error = -ESTALE;
- goto error_splat_super;
- }
-
- s->s_flags |= MS_ACTIVE;
-
- security_sb_clone_mnt_opts(data->sb, s);
-
- dprintk("<-- nfs4_xdev_mount() = 0\n");
- return mntroot;
-
-out_err_nosb:
- nfs_free_server(server);
-out_err_noserver:
- dprintk("<-- nfs4_xdev_mount() = %d [error]\n", error);
- return ERR_PTR(error);
-
-error_splat_super:
- if (server && !s->s_root)
- bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
- deactivate_locked_super(s);
- dprintk("<-- nfs4_xdev_mount() = %d [splat]\n", error);
- return ERR_PTR(error);
+ return nfs_xdev_mount_common(&nfs4_fs_type, flags, dev_name, &mount_info);
}
static struct dentry *
nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data)
{
- struct nfs_clone_mount *data = raw_data;
- struct super_block *s;
- struct nfs_server *server;
- struct dentry *mntroot;
- struct nfs_fh *mntfh;
- int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
- struct nfs_sb_mountdata sb_mntdata = {
- .mntflags = flags,
+ struct nfs_mount_info mount_info = {
+ .fill_super = nfs4_fill_super,
+ .set_security = nfs_clone_sb_security,
+ .cloned = raw_data,
};
- int error = -ENOMEM;
+ struct nfs_server *server;
+ struct dentry *mntroot = ERR_PTR(-ENOMEM);
dprintk("--> nfs4_referral_get_sb()\n");
- mntfh = nfs_alloc_fhandle();
- if (mntfh == NULL)
- goto out_err_nofh;
+ mount_info.mntfh = nfs_alloc_fhandle();
+ if (mount_info.cloned == NULL || mount_info.mntfh == NULL)
+ goto out;
/* create a new volume representation */
- server = nfs4_create_referral_server(data, mntfh);
+ server = nfs4_create_referral_server(mount_info.cloned, mount_info.mntfh);
if (IS_ERR(server)) {
- error = PTR_ERR(server);
- goto out_err_noserver;
- }
- sb_mntdata.server = server;
-
- if (server->flags & NFS4_MOUNT_UNSHARED)
- compare_super = NULL;
-
- /* -o noac implies -o sync */
- if (server->flags & NFS_MOUNT_NOAC)
- sb_mntdata.mntflags |= MS_SYNCHRONOUS;
-
- /* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
- if (IS_ERR(s)) {
- error = PTR_ERR(s);
- goto out_err_nosb;
- }
-
- if (s->s_fs_info != server) {
- nfs_free_server(server);
- server = NULL;
- } else {
- error = nfs_bdi_register(server);
- if (error)
- goto error_splat_bdi;
- }
-
- if (!s->s_root) {
- /* initial superblock/root creation */
- nfs4_fill_super(s);
- nfs_fscache_get_super_cookie(s, NULL, data);
- }
-
- mntroot = nfs4_get_root(s, mntfh, dev_name);
- if (IS_ERR(mntroot)) {
- error = PTR_ERR(mntroot);
- goto error_splat_super;
- }
- if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
- dput(mntroot);
- error = -ESTALE;
- goto error_splat_super;
+ mntroot = ERR_CAST(server);
+ goto out;
}
- s->s_flags |= MS_ACTIVE;
-
- security_sb_clone_mnt_opts(data->sb, s);
-
- nfs_free_fhandle(mntfh);
- dprintk("<-- nfs4_referral_get_sb() = 0\n");
+ mntroot = nfs_fs_mount_common(&nfs4_fs_type, server, flags, dev_name, &mount_info);
+out:
+ nfs_free_fhandle(mount_info.mntfh);
return mntroot;
-
-out_err_nosb:
- nfs_free_server(server);
-out_err_noserver:
- nfs_free_fhandle(mntfh);
-out_err_nofh:
- dprintk("<-- nfs4_referral_get_sb() = %d [error]\n", error);
- return ERR_PTR(error);
-
-error_splat_super:
- if (server && !s->s_root)
- bdi_unregister(&server->backing_dev_info);
-error_splat_bdi:
- deactivate_locked_super(s);
- nfs_free_fhandle(mntfh);
- dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error);
- return ERR_PTR(error);
}
/*
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c07462320f6..e6fe3d69d14 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -39,20 +39,20 @@
/*
* Local function declarations
*/
-static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
- struct inode *inode, int ioflags);
static void nfs_redirty_request(struct nfs_page *req);
-static const struct rpc_call_ops nfs_write_partial_ops;
-static const struct rpc_call_ops nfs_write_full_ops;
+static const struct rpc_call_ops nfs_write_common_ops;
static const struct rpc_call_ops nfs_commit_ops;
+static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
+static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
static struct kmem_cache *nfs_wdata_cachep;
static mempool_t *nfs_wdata_mempool;
+static struct kmem_cache *nfs_cdata_cachep;
static mempool_t *nfs_commit_mempool;
-struct nfs_write_data *nfs_commitdata_alloc(void)
+struct nfs_commit_data *nfs_commitdata_alloc(void)
{
- struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
+ struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
if (p) {
memset(p, 0, sizeof(*p));
@@ -62,46 +62,73 @@ struct nfs_write_data *nfs_commitdata_alloc(void)
}
EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
-void nfs_commit_free(struct nfs_write_data *p)
+void nfs_commit_free(struct nfs_commit_data *p)
{
- if (p && (p->pagevec != &p->page_array[0]))
- kfree(p->pagevec);
mempool_free(p, nfs_commit_mempool);
}
EXPORT_SYMBOL_GPL(nfs_commit_free);
-struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
+struct nfs_write_header *nfs_writehdr_alloc(void)
{
- struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
+ struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
if (p) {
+ struct nfs_pgio_header *hdr = &p->header;
+
memset(p, 0, sizeof(*p));
- INIT_LIST_HEAD(&p->pages);
- p->npages = pagecount;
- if (pagecount <= ARRAY_SIZE(p->page_array))
- p->pagevec = p->page_array;
- else {
- p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
- if (!p->pagevec) {
- mempool_free(p, nfs_wdata_mempool);
- p = NULL;
- }
- }
+ INIT_LIST_HEAD(&hdr->pages);
+ INIT_LIST_HEAD(&hdr->rpc_list);
+ spin_lock_init(&hdr->lock);
+ atomic_set(&hdr->refcnt, 0);
}
return p;
}
-void nfs_writedata_free(struct nfs_write_data *p)
+static struct nfs_write_data *nfs_writedata_alloc(struct nfs_pgio_header *hdr,
+ unsigned int pagecount)
+{
+ struct nfs_write_data *data, *prealloc;
+
+ prealloc = &container_of(hdr, struct nfs_write_header, header)->rpc_data;
+ if (prealloc->header == NULL)
+ data = prealloc;
+ else
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto out;
+
+ if (nfs_pgarray_set(&data->pages, pagecount)) {
+ data->header = hdr;
+ atomic_inc(&hdr->refcnt);
+ } else {
+ if (data != prealloc)
+ kfree(data);
+ data = NULL;
+ }
+out:
+ return data;
+}
+
+void nfs_writehdr_free(struct nfs_pgio_header *hdr)
{
- if (p && (p->pagevec != &p->page_array[0]))
- kfree(p->pagevec);
- mempool_free(p, nfs_wdata_mempool);
+ struct nfs_write_header *whdr = container_of(hdr, struct nfs_write_header, header);
+ mempool_free(whdr, nfs_wdata_mempool);
}
void nfs_writedata_release(struct nfs_write_data *wdata)
{
+ struct nfs_pgio_header *hdr = wdata->header;
+ struct nfs_write_header *write_header = container_of(hdr, struct nfs_write_header, header);
+
put_nfs_open_context(wdata->args.context);
- nfs_writedata_free(wdata);
+ if (wdata->pages.pagevec != wdata->pages.page_array)
+ kfree(wdata->pages.pagevec);
+ if (wdata != &write_header->rpc_data)
+ kfree(wdata);
+ else
+ wdata->header = NULL;
+ if (atomic_dec_and_test(&hdr->refcnt))
+ hdr->completion_ops->completion(hdr);
}
static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
@@ -203,7 +230,6 @@ static int nfs_set_page_writeback(struct page *page)
struct inode *inode = page->mapping->host;
struct nfs_server *nfss = NFS_SERVER(inode);
- page_cache_get(page);
if (atomic_long_inc_return(&nfss->writeback) >
NFS_CONGESTION_ON_THRESH) {
set_bdi_congested(&nfss->backing_dev_info,
@@ -219,7 +245,6 @@ static void nfs_end_page_writeback(struct page *page)
struct nfs_server *nfss = NFS_SERVER(inode);
end_page_writeback(page);
- page_cache_release(page);
if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
}
@@ -235,10 +260,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblo
req = nfs_page_find_request_locked(page);
if (req == NULL)
break;
- if (nfs_lock_request_dontget(req))
+ if (nfs_lock_request(req))
break;
/* Note: If we hold the page lock, as is the case in nfs_writepage,
- * then the call to nfs_lock_request_dontget() will always
+ * then the call to nfs_lock_request() will always
* succeed provided that someone hasn't already marked the
* request as dirty (in which case we don't care).
*/
@@ -310,7 +335,8 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc
struct nfs_pageio_descriptor pgio;
int err;
- nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
+ nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
+ &nfs_async_write_completion_ops);
err = nfs_do_writepage(page, wbc, &pgio);
nfs_pageio_complete(&pgio);
if (err < 0)
@@ -353,7 +379,8 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
- nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
+ nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
+ &nfs_async_write_completion_ops);
err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
nfs_pageio_complete(&pgio);
@@ -379,7 +406,7 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
struct nfs_inode *nfsi = NFS_I(inode);
/* Lock the request! */
- nfs_lock_request_dontget(req);
+ nfs_lock_request(req);
spin_lock(&inode->i_lock);
if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
@@ -421,65 +448,88 @@ nfs_mark_request_dirty(struct nfs_page *req)
/**
* nfs_request_add_commit_list - add request to a commit list
* @req: pointer to a struct nfs_page
- * @head: commit list head
+ * @dst: commit list head
+ * @cinfo: holds list lock and accounting info
*
- * This sets the PG_CLEAN bit, updates the inode global count of
+ * This sets the PG_CLEAN bit, updates the cinfo count of
* number of outstanding requests requiring a commit as well as
* the MM page stats.
*
- * The caller must _not_ hold the inode->i_lock, but must be
+ * The caller must _not_ hold the cinfo->lock, but must be
* holding the nfs_page lock.
*/
void
-nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head)
+nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
+ struct nfs_commit_info *cinfo)
{
- struct inode *inode = req->wb_context->dentry->d_inode;
-
set_bit(PG_CLEAN, &(req)->wb_flags);
- spin_lock(&inode->i_lock);
- nfs_list_add_request(req, head);
- NFS_I(inode)->ncommit++;
- spin_unlock(&inode->i_lock);
- inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
- inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
- __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ spin_lock(cinfo->lock);
+ nfs_list_add_request(req, dst);
+ cinfo->mds->ncommit++;
+ spin_unlock(cinfo->lock);
+ if (!cinfo->dreq) {
+ inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+ inc_bdi_stat(req->wb_page->mapping->backing_dev_info,
+ BDI_RECLAIMABLE);
+ __mark_inode_dirty(req->wb_context->dentry->d_inode,
+ I_DIRTY_DATASYNC);
+ }
}
EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
/**
* nfs_request_remove_commit_list - Remove request from a commit list
* @req: pointer to a nfs_page
+ * @cinfo: holds list lock and accounting info
*
- * This clears the PG_CLEAN bit, and updates the inode global count of
+ * This clears the PG_CLEAN bit, and updates the cinfo's count of
* number of outstanding requests requiring a commit
* It does not update the MM page stats.
*
- * The caller _must_ hold the inode->i_lock and the nfs_page lock.
+ * The caller _must_ hold the cinfo->lock and the nfs_page lock.
*/
void
-nfs_request_remove_commit_list(struct nfs_page *req)
+nfs_request_remove_commit_list(struct nfs_page *req,
+ struct nfs_commit_info *cinfo)
{
- struct inode *inode = req->wb_context->dentry->d_inode;
-
if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
return;
nfs_list_remove_request(req);
- NFS_I(inode)->ncommit--;
+ cinfo->mds->ncommit--;
}
EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
+static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
+ struct inode *inode)
+{
+ cinfo->lock = &inode->i_lock;
+ cinfo->mds = &NFS_I(inode)->commit_info;
+ cinfo->ds = pnfs_get_ds_info(inode);
+ cinfo->dreq = NULL;
+ cinfo->completion_ops = &nfs_commit_completion_ops;
+}
+
+void nfs_init_cinfo(struct nfs_commit_info *cinfo,
+ struct inode *inode,
+ struct nfs_direct_req *dreq)
+{
+ if (dreq)
+ nfs_init_cinfo_from_dreq(cinfo, dreq);
+ else
+ nfs_init_cinfo_from_inode(cinfo, inode);
+}
+EXPORT_SYMBOL_GPL(nfs_init_cinfo);
/*
* Add a request to the inode's commit list.
*/
-static void
-nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+void
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
- struct inode *inode = req->wb_context->dentry->d_inode;
-
- if (pnfs_mark_request_commit(req, lseg))
+ if (pnfs_mark_request_commit(req, lseg, cinfo))
return;
- nfs_request_add_commit_list(req, &NFS_I(inode)->commit_list);
+ nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
}
static void
@@ -494,11 +544,13 @@ nfs_clear_request_commit(struct nfs_page *req)
{
if (test_bit(PG_CLEAN, &req->wb_flags)) {
struct inode *inode = req->wb_context->dentry->d_inode;
+ struct nfs_commit_info cinfo;
- if (!pnfs_clear_request_commit(req)) {
- spin_lock(&inode->i_lock);
- nfs_request_remove_commit_list(req);
- spin_unlock(&inode->i_lock);
+ nfs_init_cinfo_from_inode(&cinfo, inode);
+ if (!pnfs_clear_request_commit(req, &cinfo)) {
+ spin_lock(cinfo.lock);
+ nfs_request_remove_commit_list(req, &cinfo);
+ spin_unlock(cinfo.lock);
}
nfs_clear_page_commit(req->wb_page);
}
@@ -508,28 +560,25 @@ static inline
int nfs_write_need_commit(struct nfs_write_data *data)
{
if (data->verf.committed == NFS_DATA_SYNC)
- return data->lseg == NULL;
- else
- return data->verf.committed != NFS_FILE_SYNC;
+ return data->header->lseg == NULL;
+ return data->verf.committed != NFS_FILE_SYNC;
}
-static inline
-int nfs_reschedule_unstable_write(struct nfs_page *req,
- struct nfs_write_data *data)
+#else
+static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
+ struct inode *inode)
{
- if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
- nfs_mark_request_commit(req, data->lseg);
- return 1;
- }
- if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
- nfs_mark_request_dirty(req);
- return 1;
- }
- return 0;
}
-#else
-static void
-nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+
+void nfs_init_cinfo(struct nfs_commit_info *cinfo,
+ struct inode *inode,
+ struct nfs_direct_req *dreq)
+{
+}
+
+void
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
}
@@ -544,25 +593,57 @@ int nfs_write_need_commit(struct nfs_write_data *data)
return 0;
}
-static inline
-int nfs_reschedule_unstable_write(struct nfs_page *req,
- struct nfs_write_data *data)
+#endif
+
+static void nfs_write_completion(struct nfs_pgio_header *hdr)
{
- return 0;
+ struct nfs_commit_info cinfo;
+ unsigned long bytes = 0;
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
+ goto out;
+ nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
+ while (!list_empty(&hdr->pages)) {
+ struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+
+ bytes += req->wb_bytes;
+ nfs_list_remove_request(req);
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
+ (hdr->good_bytes < bytes)) {
+ nfs_set_pageerror(req->wb_page);
+ nfs_context_set_write_error(req->wb_context, hdr->error);
+ goto remove_req;
+ }
+ if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
+ nfs_mark_request_dirty(req);
+ goto next;
+ }
+ if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
+ nfs_mark_request_commit(req, hdr->lseg, &cinfo);
+ goto next;
+ }
+remove_req:
+ nfs_inode_remove_request(req);
+next:
+ nfs_unlock_request(req);
+ nfs_end_page_writeback(req->wb_page);
+ nfs_release_request(req);
+ }
+out:
+ hdr->release(hdr);
}
-#endif
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-static int
-nfs_need_commit(struct nfs_inode *nfsi)
+static unsigned long
+nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
{
- return nfsi->ncommit > 0;
+ return cinfo->mds->ncommit;
}
-/* i_lock held by caller */
-static int
-nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
- spinlock_t *lock)
+/* cinfo->lock held by caller */
+int
+nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
+ struct nfs_commit_info *cinfo, int max)
{
struct nfs_page *req, *tmp;
int ret = 0;
@@ -570,12 +651,13 @@ nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
list_for_each_entry_safe(req, tmp, src, wb_list) {
if (!nfs_lock_request(req))
continue;
- if (cond_resched_lock(lock))
+ kref_get(&req->wb_kref);
+ if (cond_resched_lock(cinfo->lock))
list_safe_reset_next(req, tmp, wb_list);
- nfs_request_remove_commit_list(req);
+ nfs_request_remove_commit_list(req, cinfo);
nfs_list_add_request(req, dst);
ret++;
- if (ret == max)
+ if ((ret == max) && !cinfo->dreq)
break;
}
return ret;
@@ -584,37 +666,38 @@ nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
/*
* nfs_scan_commit - Scan an inode for commit requests
* @inode: NFS inode to scan
- * @dst: destination list
+ * @dst: mds destination list
+ * @cinfo: mds and ds lists of reqs ready to commit
*
* Moves requests from the inode's 'commit' request list.
* The requests are *not* checked to ensure that they form a contiguous set.
*/
-static int
-nfs_scan_commit(struct inode *inode, struct list_head *dst)
+int
+nfs_scan_commit(struct inode *inode, struct list_head *dst,
+ struct nfs_commit_info *cinfo)
{
- struct nfs_inode *nfsi = NFS_I(inode);
int ret = 0;
- spin_lock(&inode->i_lock);
- if (nfsi->ncommit > 0) {
+ spin_lock(cinfo->lock);
+ if (cinfo->mds->ncommit > 0) {
const int max = INT_MAX;
- ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max,
- &inode->i_lock);
- ret += pnfs_scan_commit_lists(inode, max - ret,
- &inode->i_lock);
+ ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
+ cinfo, max);
+ ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(cinfo->lock);
return ret;
}
#else
-static inline int nfs_need_commit(struct nfs_inode *nfsi)
+static unsigned long nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
{
return 0;
}
-static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst)
+int nfs_scan_commit(struct inode *inode, struct list_head *dst,
+ struct nfs_commit_info *cinfo)
{
return 0;
}
@@ -659,7 +742,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
|| end < req->wb_offset)
goto out_flushme;
- if (nfs_lock_request_dontget(req))
+ if (nfs_lock_request(req))
break;
/* The request is locked, so wait and then retry */
@@ -729,7 +812,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
nfs_grow_file(page, offset, count);
nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
nfs_mark_request_dirty(req);
- nfs_unlock_request(req);
+ nfs_unlock_and_release_request(req);
return 0;
}
@@ -766,10 +849,14 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
* the PageUptodate() flag. In this case, we will need to turn off
* write optimisations that depend on the page contents being correct.
*/
-static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
+static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
{
- return PageUptodate(page) &&
- !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
+ if (nfs_have_delegated_attributes(inode))
+ goto out;
+ if (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
+ return false;
+out:
+ return PageUptodate(page) != 0;
}
/*
@@ -815,17 +902,6 @@ int nfs_updatepage(struct file *file, struct page *page,
return status;
}
-static void nfs_writepage_release(struct nfs_page *req,
- struct nfs_write_data *data)
-{
- struct page *page = req->wb_page;
-
- if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
- nfs_inode_remove_request(req);
- nfs_unlock_request(req);
- nfs_end_page_writeback(page);
-}
-
static int flush_task_priority(int how)
{
switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
@@ -837,18 +913,18 @@ static int flush_task_priority(int how)
return RPC_PRIORITY_NORMAL;
}
-int nfs_initiate_write(struct nfs_write_data *data,
- struct rpc_clnt *clnt,
+int nfs_initiate_write(struct rpc_clnt *clnt,
+ struct nfs_write_data *data,
const struct rpc_call_ops *call_ops,
- int how)
+ int how, int flags)
{
- struct inode *inode = data->inode;
+ struct inode *inode = data->header->inode;
int priority = flush_task_priority(how);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
- .rpc_cred = data->cred,
+ .rpc_cred = data->header->cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = clnt,
@@ -857,7 +933,7 @@ int nfs_initiate_write(struct nfs_write_data *data,
.callback_ops = call_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | flags,
.priority = priority,
};
int ret = 0;
@@ -892,26 +968,21 @@ EXPORT_SYMBOL_GPL(nfs_initiate_write);
/*
* Set up the argument/result storage required for the RPC call.
*/
-static void nfs_write_rpcsetup(struct nfs_page *req,
- struct nfs_write_data *data,
+static void nfs_write_rpcsetup(struct nfs_write_data *data,
unsigned int count, unsigned int offset,
- int how)
+ int how, struct nfs_commit_info *cinfo)
{
- struct inode *inode = req->wb_context->dentry->d_inode;
+ struct nfs_page *req = data->header->req;
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
- data->req = req;
- data->inode = inode = req->wb_context->dentry->d_inode;
- data->cred = req->wb_context->cred;
-
- data->args.fh = NFS_FH(inode);
+ data->args.fh = NFS_FH(data->header->inode);
data->args.offset = req_offset(req) + offset;
/* pnfs_set_layoutcommit needs this */
data->mds_offset = data->args.offset;
data->args.pgbase = req->wb_pgbase + offset;
- data->args.pages = data->pagevec;
+ data->args.pages = data->pages.pagevec;
data->args.count = count;
data->args.context = get_nfs_open_context(req->wb_context);
data->args.lock_context = req->wb_lock_context;
@@ -920,7 +991,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
case 0:
break;
case FLUSH_COND_STABLE:
- if (nfs_need_commit(NFS_I(inode)))
+ if (nfs_reqs_to_commit(cinfo))
break;
default:
data->args.stable = NFS_FILE_SYNC;
@@ -936,9 +1007,9 @@ static int nfs_do_write(struct nfs_write_data *data,
const struct rpc_call_ops *call_ops,
int how)
{
- struct inode *inode = data->args.context->dentry->d_inode;
+ struct inode *inode = data->header->inode;
- return nfs_initiate_write(data, NFS_CLIENT(inode), call_ops, how);
+ return nfs_initiate_write(NFS_CLIENT(inode), data, call_ops, how, 0);
}
static int nfs_do_multiple_writes(struct list_head *head,
@@ -951,7 +1022,7 @@ static int nfs_do_multiple_writes(struct list_head *head,
while (!list_empty(head)) {
int ret2;
- data = list_entry(head->next, struct nfs_write_data, list);
+ data = list_first_entry(head, struct nfs_write_data, list);
list_del_init(&data->list);
ret2 = nfs_do_write(data, call_ops, how);
@@ -967,31 +1038,60 @@ static int nfs_do_multiple_writes(struct list_head *head,
*/
static void nfs_redirty_request(struct nfs_page *req)
{
- struct page *page = req->wb_page;
-
nfs_mark_request_dirty(req);
nfs_unlock_request(req);
- nfs_end_page_writeback(page);
+ nfs_end_page_writeback(req->wb_page);
+ nfs_release_request(req);
+}
+
+static void nfs_async_write_error(struct list_head *head)
+{
+ struct nfs_page *req;
+
+ while (!list_empty(head)) {
+ req = nfs_list_entry(head->next);
+ nfs_list_remove_request(req);
+ nfs_redirty_request(req);
+ }
+}
+
+static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
+ .error_cleanup = nfs_async_write_error,
+ .completion = nfs_write_completion,
+};
+
+static void nfs_flush_error(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
+{
+ set_bit(NFS_IOHDR_REDO, &hdr->flags);
+ while (!list_empty(&hdr->rpc_list)) {
+ struct nfs_write_data *data = list_first_entry(&hdr->rpc_list,
+ struct nfs_write_data, list);
+ list_del(&data->list);
+ nfs_writedata_release(data);
+ }
+ desc->pg_completion_ops->error_cleanup(&desc->pg_list);
}
/*
* Generate multiple small requests to write out a single
* contiguous dirty area on one page.
*/
-static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_flush_multi(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
{
- struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
+ struct nfs_page *req = hdr->req;
struct page *page = req->wb_page;
struct nfs_write_data *data;
size_t wsize = desc->pg_bsize, nbytes;
unsigned int offset;
int requests = 0;
- int ret = 0;
+ struct nfs_commit_info cinfo;
- nfs_list_remove_request(req);
+ nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
- (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit ||
+ (desc->pg_moreio || nfs_reqs_to_commit(&cinfo) ||
desc->pg_count > wsize))
desc->pg_ioflags &= ~FLUSH_COND_STABLE;
@@ -1001,28 +1101,22 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head
do {
size_t len = min(nbytes, wsize);
- data = nfs_writedata_alloc(1);
- if (!data)
- goto out_bad;
- data->pagevec[0] = page;
- nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);
- list_add(&data->list, res);
+ data = nfs_writedata_alloc(hdr, 1);
+ if (!data) {
+ nfs_flush_error(desc, hdr);
+ return -ENOMEM;
+ }
+ data->pages.pagevec[0] = page;
+ nfs_write_rpcsetup(data, len, offset, desc->pg_ioflags, &cinfo);
+ list_add(&data->list, &hdr->rpc_list);
requests++;
nbytes -= len;
offset += len;
} while (nbytes != 0);
- atomic_set(&req->wb_complete, requests);
- desc->pg_rpc_callops = &nfs_write_partial_ops;
- return ret;
-
-out_bad:
- while (!list_empty(res)) {
- data = list_entry(res->next, struct nfs_write_data, list);
- list_del(&data->list);
- nfs_writedata_release(data);
- }
- nfs_redirty_request(req);
- return -ENOMEM;
+ nfs_list_remove_request(req);
+ nfs_list_add_request(req, &hdr->pages);
+ desc->pg_rpc_callops = &nfs_write_common_ops;
+ return 0;
}
/*
@@ -1033,62 +1127,71 @@ out_bad:
* This is the case if nfs_updatepage detects a conflicting request
* that has been written but not committed.
*/
-static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
+static int nfs_flush_one(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
{
struct nfs_page *req;
struct page **pages;
struct nfs_write_data *data;
struct list_head *head = &desc->pg_list;
- int ret = 0;
+ struct nfs_commit_info cinfo;
- data = nfs_writedata_alloc(nfs_page_array_len(desc->pg_base,
- desc->pg_count));
+ data = nfs_writedata_alloc(hdr, nfs_page_array_len(desc->pg_base,
+ desc->pg_count));
if (!data) {
- while (!list_empty(head)) {
- req = nfs_list_entry(head->next);
- nfs_list_remove_request(req);
- nfs_redirty_request(req);
- }
- ret = -ENOMEM;
- goto out;
+ nfs_flush_error(desc, hdr);
+ return -ENOMEM;
}
- pages = data->pagevec;
+
+ nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
+ pages = data->pages.pagevec;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_list_add_request(req, &data->pages);
+ nfs_list_add_request(req, &hdr->pages);
*pages++ = req->wb_page;
}
- req = nfs_list_entry(data->pages.next);
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
- (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
+ (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
desc->pg_ioflags &= ~FLUSH_COND_STABLE;
/* Set up the argument struct */
- nfs_write_rpcsetup(req, data, desc->pg_count, 0, desc->pg_ioflags);
- list_add(&data->list, res);
- desc->pg_rpc_callops = &nfs_write_full_ops;
-out:
- return ret;
+ nfs_write_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo);
+ list_add(&data->list, &hdr->rpc_list);
+ desc->pg_rpc_callops = &nfs_write_common_ops;
+ return 0;
}
-int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head)
+int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
+ struct nfs_pgio_header *hdr)
{
if (desc->pg_bsize < PAGE_CACHE_SIZE)
- return nfs_flush_multi(desc, head);
- return nfs_flush_one(desc, head);
+ return nfs_flush_multi(desc, hdr);
+ return nfs_flush_one(desc, hdr);
}
static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
- LIST_HEAD(head);
+ struct nfs_write_header *whdr;
+ struct nfs_pgio_header *hdr;
int ret;
- ret = nfs_generic_flush(desc, &head);
+ whdr = nfs_writehdr_alloc();
+ if (!whdr) {
+ desc->pg_completion_ops->error_cleanup(&desc->pg_list);
+ return -ENOMEM;
+ }
+ hdr = &whdr->header;
+ nfs_pgheader_init(desc, hdr, nfs_writehdr_free);
+ atomic_inc(&hdr->refcnt);
+ ret = nfs_generic_flush(desc, hdr);
if (ret == 0)
- ret = nfs_do_multiple_writes(&head, desc->pg_rpc_callops,
- desc->pg_ioflags);
+ ret = nfs_do_multiple_writes(&hdr->rpc_list,
+ desc->pg_rpc_callops,
+ desc->pg_ioflags);
+ if (atomic_dec_and_test(&hdr->refcnt))
+ hdr->completion_ops->completion(hdr);
return ret;
}
@@ -1098,9 +1201,10 @@ static const struct nfs_pageio_ops nfs_pageio_write_ops = {
};
void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
- struct inode *inode, int ioflags)
+ struct inode *inode, int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
- nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
+ nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops, compl_ops,
NFS_SERVER(inode)->wsize, ioflags);
}
@@ -1111,80 +1215,27 @@ void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
-static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
- struct inode *inode, int ioflags)
+void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode, int ioflags,
+ const struct nfs_pgio_completion_ops *compl_ops)
{
- if (!pnfs_pageio_init_write(pgio, inode, ioflags))
- nfs_pageio_init_write_mds(pgio, inode, ioflags);
+ if (!pnfs_pageio_init_write(pgio, inode, ioflags, compl_ops))
+ nfs_pageio_init_write_mds(pgio, inode, ioflags, compl_ops);
}
-/*
- * Handle a write reply that flushed part of a page.
- */
-static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
+void nfs_write_prepare(struct rpc_task *task, void *calldata)
{
- struct nfs_write_data *data = calldata;
-
- dprintk("NFS: %5u write(%s/%lld %d@%lld)",
- task->tk_pid,
- data->req->wb_context->dentry->d_inode->i_sb->s_id,
- (long long)
- NFS_FILEID(data->req->wb_context->dentry->d_inode),
- data->req->wb_bytes, (long long)req_offset(data->req));
-
- nfs_writeback_done(task, data);
+ struct nfs_write_data *data = calldata;
+ NFS_PROTO(data->header->inode)->write_rpc_prepare(task, data);
}
-static void nfs_writeback_release_partial(void *calldata)
+void nfs_commit_prepare(struct rpc_task *task, void *calldata)
{
- struct nfs_write_data *data = calldata;
- struct nfs_page *req = data->req;
- struct page *page = req->wb_page;
- int status = data->task.tk_status;
+ struct nfs_commit_data *data = calldata;
- if (status < 0) {
- nfs_set_pageerror(page);
- nfs_context_set_write_error(req->wb_context, status);
- dprintk(", error = %d\n", status);
- goto out;
- }
-
- if (nfs_write_need_commit(data)) {
- struct inode *inode = page->mapping->host;
-
- spin_lock(&inode->i_lock);
- if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
- /* Do nothing we need to resend the writes */
- } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
- memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
- dprintk(" defer commit\n");
- } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
- set_bit(PG_NEED_RESCHED, &req->wb_flags);
- clear_bit(PG_NEED_COMMIT, &req->wb_flags);
- dprintk(" server reboot detected\n");
- }
- spin_unlock(&inode->i_lock);
- } else
- dprintk(" OK\n");
-
-out:
- if (atomic_dec_and_test(&req->wb_complete))
- nfs_writepage_release(req, data);
- nfs_writedata_release(calldata);
+ NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
}
-void nfs_write_prepare(struct rpc_task *task, void *calldata)
-{
- struct nfs_write_data *data = calldata;
- NFS_PROTO(data->inode)->write_rpc_prepare(task, data);
-}
-
-static const struct rpc_call_ops nfs_write_partial_ops = {
- .rpc_call_prepare = nfs_write_prepare,
- .rpc_call_done = nfs_writeback_done_partial,
- .rpc_release = nfs_writeback_release_partial,
-};
-
/*
* Handle a write reply that flushes a whole page.
*
@@ -1192,59 +1243,37 @@ static const struct rpc_call_ops nfs_write_partial_ops = {
* writebacks since the page->count is kept > 1 for as long
* as the page has a write request pending.
*/
-static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
+static void nfs_writeback_done_common(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
nfs_writeback_done(task, data);
}
-static void nfs_writeback_release_full(void *calldata)
+static void nfs_writeback_release_common(void *calldata)
{
struct nfs_write_data *data = calldata;
+ struct nfs_pgio_header *hdr = data->header;
int status = data->task.tk_status;
+ struct nfs_page *req = hdr->req;
- /* Update attributes as result of writeback. */
- while (!list_empty(&data->pages)) {
- struct nfs_page *req = nfs_list_entry(data->pages.next);
- struct page *page = req->wb_page;
-
- nfs_list_remove_request(req);
-
- dprintk("NFS: %5u write (%s/%lld %d@%lld)",
- data->task.tk_pid,
- req->wb_context->dentry->d_inode->i_sb->s_id,
- (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
- req->wb_bytes,
- (long long)req_offset(req));
-
- if (status < 0) {
- nfs_set_pageerror(page);
- nfs_context_set_write_error(req->wb_context, status);
- dprintk(", error = %d\n", status);
- goto remove_request;
- }
-
- if (nfs_write_need_commit(data)) {
+ if ((status >= 0) && nfs_write_need_commit(data)) {
+ spin_lock(&hdr->lock);
+ if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags))
+ ; /* Do nothing */
+ else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags))
memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
- nfs_mark_request_commit(req, data->lseg);
- dprintk(" marked for commit\n");
- goto next;
- }
- dprintk(" OK\n");
-remove_request:
- nfs_inode_remove_request(req);
- next:
- nfs_unlock_request(req);
- nfs_end_page_writeback(page);
+ else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf)))
+ set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags);
+ spin_unlock(&hdr->lock);
}
- nfs_writedata_release(calldata);
+ nfs_writedata_release(data);
}
-static const struct rpc_call_ops nfs_write_full_ops = {
+static const struct rpc_call_ops nfs_write_common_ops = {
.rpc_call_prepare = nfs_write_prepare,
- .rpc_call_done = nfs_writeback_done_full,
- .rpc_release = nfs_writeback_release_full,
+ .rpc_call_done = nfs_writeback_done_common,
+ .rpc_release = nfs_writeback_release_common,
};
@@ -1255,6 +1284,7 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
{
struct nfs_writeargs *argp = &data->args;
struct nfs_writeres *resp = &data->res;
+ struct inode *inode = data->header->inode;
int status;
dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
@@ -1267,10 +1297,10 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
* another writer had changed the file, but some applications
* depend on tighter cache coherency when writing.
*/
- status = NFS_PROTO(data->inode)->write_done(task, data);
+ status = NFS_PROTO(inode)->write_done(task, data);
if (status != 0)
return;
- nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
+ nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
@@ -1288,46 +1318,47 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
if (time_before(complain, jiffies)) {
dprintk("NFS: faulty NFS server %s:"
" (committed = %d) != (stable = %d)\n",
- NFS_SERVER(data->inode)->nfs_client->cl_hostname,
+ NFS_SERVER(inode)->nfs_client->cl_hostname,
resp->verf->committed, argp->stable);
complain = jiffies + 300 * HZ;
}
}
#endif
- /* Is this a short write? */
- if (task->tk_status >= 0 && resp->count < argp->count) {
+ if (task->tk_status < 0)
+ nfs_set_pgio_error(data->header, task->tk_status, argp->offset);
+ else if (resp->count < argp->count) {
static unsigned long complain;
- nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
+ /* This a short write! */
+ nfs_inc_stats(inode, NFSIOS_SHORTWRITE);
/* Has the server at least made some progress? */
- if (resp->count != 0) {
- /* Was this an NFSv2 write or an NFSv3 stable write? */
- if (resp->verf->committed != NFS_UNSTABLE) {
- /* Resend from where the server left off */
- data->mds_offset += resp->count;
- argp->offset += resp->count;
- argp->pgbase += resp->count;
- argp->count -= resp->count;
- } else {
- /* Resend as a stable write in order to avoid
- * headaches in the case of a server crash.
- */
- argp->stable = NFS_FILE_SYNC;
+ if (resp->count == 0) {
+ if (time_before(complain, jiffies)) {
+ printk(KERN_WARNING
+ "NFS: Server wrote zero bytes, expected %u.\n",
+ argp->count);
+ complain = jiffies + 300 * HZ;
}
- rpc_restart_call_prepare(task);
+ nfs_set_pgio_error(data->header, -EIO, argp->offset);
+ task->tk_status = -EIO;
return;
}
- if (time_before(complain, jiffies)) {
- printk(KERN_WARNING
- "NFS: Server wrote zero bytes, expected %u.\n",
- argp->count);
- complain = jiffies + 300 * HZ;
+ /* Was this an NFSv2 write or an NFSv3 stable write? */
+ if (resp->verf->committed != NFS_UNSTABLE) {
+ /* Resend from where the server left off */
+ data->mds_offset += resp->count;
+ argp->offset += resp->count;
+ argp->pgbase += resp->count;
+ argp->count -= resp->count;
+ } else {
+ /* Resend as a stable write in order to avoid
+ * headaches in the case of a server crash.
+ */
+ argp->stable = NFS_FILE_SYNC;
}
- /* Can't do anything about it except throw an error. */
- task->tk_status = -EIO;
+ rpc_restart_call_prepare(task);
}
- return;
}
@@ -1347,26 +1378,23 @@ static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
return (ret < 0) ? ret : 1;
}
-void nfs_commit_clear_lock(struct nfs_inode *nfsi)
+static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
{
clear_bit(NFS_INO_COMMIT, &nfsi->flags);
smp_mb__after_clear_bit();
wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
}
-EXPORT_SYMBOL_GPL(nfs_commit_clear_lock);
-void nfs_commitdata_release(void *data)
+void nfs_commitdata_release(struct nfs_commit_data *data)
{
- struct nfs_write_data *wdata = data;
-
- put_nfs_open_context(wdata->args.context);
- nfs_commit_free(wdata);
+ put_nfs_open_context(data->context);
+ nfs_commit_free(data);
}
EXPORT_SYMBOL_GPL(nfs_commitdata_release);
-int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
+int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
const struct rpc_call_ops *call_ops,
- int how)
+ int how, int flags)
{
struct rpc_task *task;
int priority = flush_task_priority(how);
@@ -1382,7 +1410,7 @@ int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
.callback_ops = call_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
- .flags = RPC_TASK_ASYNC,
+ .flags = RPC_TASK_ASYNC | flags,
.priority = priority,
};
/* Set up the initial task struct. */
@@ -1403,9 +1431,10 @@ EXPORT_SYMBOL_GPL(nfs_initiate_commit);
/*
* Set up the argument/result storage required for the RPC call.
*/
-void nfs_init_commit(struct nfs_write_data *data,
- struct list_head *head,
- struct pnfs_layout_segment *lseg)
+void nfs_init_commit(struct nfs_commit_data *data,
+ struct list_head *head,
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
struct nfs_page *first = nfs_list_entry(head->next);
struct inode *inode = first->wb_context->dentry->d_inode;
@@ -1419,13 +1448,14 @@ void nfs_init_commit(struct nfs_write_data *data,
data->cred = first->wb_context->cred;
data->lseg = lseg; /* reference transferred */
data->mds_ops = &nfs_commit_ops;
+ data->completion_ops = cinfo->completion_ops;
+ data->dreq = cinfo->dreq;
data->args.fh = NFS_FH(data->inode);
/* Note: we always request a commit of the entire inode */
data->args.offset = 0;
data->args.count = 0;
- data->args.context = get_nfs_open_context(first->wb_context);
- data->res.count = 0;
+ data->context = get_nfs_open_context(first->wb_context);
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
nfs_fattr_init(&data->fattr);
@@ -1433,18 +1463,21 @@ void nfs_init_commit(struct nfs_write_data *data,
EXPORT_SYMBOL_GPL(nfs_init_commit);
void nfs_retry_commit(struct list_head *page_list,
- struct pnfs_layout_segment *lseg)
+ struct pnfs_layout_segment *lseg,
+ struct nfs_commit_info *cinfo)
{
struct nfs_page *req;
while (!list_empty(page_list)) {
req = nfs_list_entry(page_list->next);
nfs_list_remove_request(req);
- nfs_mark_request_commit(req, lseg);
- dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
- dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
- BDI_RECLAIMABLE);
- nfs_unlock_request(req);
+ nfs_mark_request_commit(req, lseg, cinfo);
+ if (!cinfo->dreq) {
+ dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+ dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
+ BDI_RECLAIMABLE);
+ }
+ nfs_unlock_and_release_request(req);
}
}
EXPORT_SYMBOL_GPL(nfs_retry_commit);
@@ -1453,9 +1486,10 @@ EXPORT_SYMBOL_GPL(nfs_retry_commit);
* Commit dirty pages
*/
static int
-nfs_commit_list(struct inode *inode, struct list_head *head, int how)
+nfs_commit_list(struct inode *inode, struct list_head *head, int how,
+ struct nfs_commit_info *cinfo)
{
- struct nfs_write_data *data;
+ struct nfs_commit_data *data;
data = nfs_commitdata_alloc();
@@ -1463,11 +1497,13 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
goto out_bad;
/* Set up the argument struct */
- nfs_init_commit(data, head, NULL);
- return nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how);
+ nfs_init_commit(data, head, NULL, cinfo);
+ atomic_inc(&cinfo->mds->rpcs_out);
+ return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops,
+ how, 0);
out_bad:
- nfs_retry_commit(head, NULL);
- nfs_commit_clear_lock(NFS_I(inode));
+ nfs_retry_commit(head, NULL, cinfo);
+ cinfo->completion_ops->error_cleanup(NFS_I(inode));
return -ENOMEM;
}
@@ -1476,7 +1512,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
*/
static void nfs_commit_done(struct rpc_task *task, void *calldata)
{
- struct nfs_write_data *data = calldata;
+ struct nfs_commit_data *data = calldata;
dprintk("NFS: %5u nfs_commit_done (status %d)\n",
task->tk_pid, task->tk_status);
@@ -1485,10 +1521,11 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
NFS_PROTO(data->inode)->commit_done(task, data);
}
-void nfs_commit_release_pages(struct nfs_write_data *data)
+static void nfs_commit_release_pages(struct nfs_commit_data *data)
{
struct nfs_page *req;
int status = data->task.tk_status;
+ struct nfs_commit_info cinfo;
while (!list_empty(&data->pages)) {
req = nfs_list_entry(data->pages.next);
@@ -1519,42 +1556,59 @@ void nfs_commit_release_pages(struct nfs_write_data *data)
dprintk(" mismatch\n");
nfs_mark_request_dirty(req);
next:
- nfs_unlock_request(req);
+ nfs_unlock_and_release_request(req);
}
+ nfs_init_cinfo(&cinfo, data->inode, data->dreq);
+ if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
+ nfs_commit_clear_lock(NFS_I(data->inode));
}
-EXPORT_SYMBOL_GPL(nfs_commit_release_pages);
static void nfs_commit_release(void *calldata)
{
- struct nfs_write_data *data = calldata;
+ struct nfs_commit_data *data = calldata;
- nfs_commit_release_pages(data);
- nfs_commit_clear_lock(NFS_I(data->inode));
+ data->completion_ops->completion(data);
nfs_commitdata_release(calldata);
}
static const struct rpc_call_ops nfs_commit_ops = {
- .rpc_call_prepare = nfs_write_prepare,
+ .rpc_call_prepare = nfs_commit_prepare,
.rpc_call_done = nfs_commit_done,
.rpc_release = nfs_commit_release,
};
+static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
+ .completion = nfs_commit_release_pages,
+ .error_cleanup = nfs_commit_clear_lock,
+};
+
+int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
+ int how, struct nfs_commit_info *cinfo)
+{
+ int status;
+
+ status = pnfs_commit_list(inode, head, how, cinfo);
+ if (status == PNFS_NOT_ATTEMPTED)
+ status = nfs_commit_list(inode, head, how, cinfo);
+ return status;
+}
+
int nfs_commit_inode(struct inode *inode, int how)
{
LIST_HEAD(head);
+ struct nfs_commit_info cinfo;
int may_wait = how & FLUSH_SYNC;
int res;
res = nfs_commit_set_lock(NFS_I(inode), may_wait);
if (res <= 0)
goto out_mark_dirty;
- res = nfs_scan_commit(inode, &head);
+ nfs_init_cinfo_from_inode(&cinfo, inode);
+ res = nfs_scan_commit(inode, &head, &cinfo);
if (res) {
int error;
- error = pnfs_commit_list(inode, &head, how);
- if (error == PNFS_NOT_ATTEMPTED)
- error = nfs_commit_list(inode, &head, how);
+ error = nfs_generic_commit_list(inode, &head, how, &cinfo);
if (error < 0)
return error;
if (!may_wait)
@@ -1585,14 +1639,14 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
int ret = 0;
/* no commits means nothing needs to be done */
- if (!nfsi->ncommit)
+ if (!nfsi->commit_info.ncommit)
return ret;
if (wbc->sync_mode == WB_SYNC_NONE) {
/* Don't commit yet if this is a non-blocking flush and there
* are a lot of outstanding writes for this mapping.
*/
- if (nfsi->ncommit <= (nfsi->npages >> 1))
+ if (nfsi->commit_info.ncommit <= (nfsi->npages >> 1))
goto out_mark_dirty;
/* don't wait for the COMMIT response */
@@ -1665,7 +1719,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
req = nfs_page_find_request(page);
if (req == NULL)
break;
- if (nfs_lock_request_dontget(req)) {
+ if (nfs_lock_request(req)) {
nfs_clear_request_commit(req);
nfs_inode_remove_request(req);
/*
@@ -1673,7 +1727,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
* page as being dirty
*/
cancel_dirty_page(page, PAGE_CACHE_SIZE);
- nfs_unlock_request(req);
+ nfs_unlock_and_release_request(req);
break;
}
ret = nfs_wait_on_request(req);
@@ -1742,7 +1796,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
int __init nfs_init_writepagecache(void)
{
nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
- sizeof(struct nfs_write_data),
+ sizeof(struct nfs_write_header),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (nfs_wdata_cachep == NULL)
@@ -1753,6 +1807,13 @@ int __init nfs_init_writepagecache(void)
if (nfs_wdata_mempool == NULL)
return -ENOMEM;
+ nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
+ sizeof(struct nfs_commit_data),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (nfs_cdata_cachep == NULL)
+ return -ENOMEM;
+
nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
nfs_wdata_cachep);
if (nfs_commit_mempool == NULL)
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 8f7b95ac1f7..7cc64465ec2 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -734,7 +734,7 @@ void nilfs_evict_inode(struct inode *inode)
if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
if (inode->i_data.nrpages)
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
nilfs_clear_inode(inode);
return;
}
@@ -746,7 +746,7 @@ void nilfs_evict_inode(struct inode *inode)
/* TODO: some of the following operations may fail. */
nilfs_truncate_bmap(ii, 0);
nilfs_mark_inode_dirty(inode);
- end_writeback(inode);
+ clear_inode(inode);
ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
if (!ret)
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 2eaa6665294..c6dbd3db6ca 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -2258,7 +2258,7 @@ void ntfs_evict_big_inode(struct inode *vi)
ntfs_inode *ni = NTFS_I(vi);
truncate_inode_pages(&vi->i_data, 0);
- end_writeback(vi);
+ clear_inode(vi);
#ifdef NTFS_RW
if (NInoDirty(ni)) {
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 3b5825ef319..e31d6ae013a 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -367,7 +367,7 @@ static void dlmfs_evict_inode(struct inode *inode)
int status;
struct dlmfs_inode_private *ip;
- end_writeback(inode);
+ clear_inode(inode);
mlog(0, "inode %lu\n", inode->i_ino);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 17454a904d7..735514ca400 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -1069,7 +1069,7 @@ static void ocfs2_clear_inode(struct inode *inode)
int status;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
- end_writeback(inode);
+ clear_inode(inode);
trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno,
inode->i_nlink);
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index dbc84222258..e6213b3725d 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -184,7 +184,7 @@ int omfs_sync_inode(struct inode *inode)
static void omfs_evict_inode(struct inode *inode)
{
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
if (inode->i_nlink)
return;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index d2d3108a611..d7d711876b6 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -411,12 +411,13 @@ static const struct file_operations proc_lstats_operations = {
static int proc_oom_score(struct task_struct *task, char *buffer)
{
+ unsigned long totalpages = totalram_pages + total_swap_pages;
unsigned long points = 0;
read_lock(&tasklist_lock);
if (pid_alive(task))
- points = oom_badness(task, NULL, NULL,
- totalram_pages + total_swap_pages);
+ points = oom_badness(task, NULL, NULL, totalpages) *
+ 1000 / totalpages;
read_unlock(&tasklist_lock);
return sprintf(buffer, "%lu\n", points);
}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 554ecc54799..7ac817b64a7 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -33,7 +33,7 @@ static void proc_evict_inode(struct inode *inode)
const struct proc_ns_operations *ns_ops;
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
/* Stop tracking associated processes */
put_pid(PROC_I(inode)->pid);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 1030a716d15..7faaf2acc57 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -784,7 +784,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
- if (pmd_trans_huge_lock(pmd, vma) == 1) {
+ if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
for (; addr != end; addr += PAGE_SIZE) {
unsigned long offset;
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 19507889bb7..aeb19e68e08 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -85,7 +85,7 @@ static void pstore_evict_inode(struct inode *inode)
struct pstore_private *p = inode->i_private;
unsigned long flags;
- end_writeback(inode);
+ clear_inode(inode);
if (p) {
spin_lock_irqsave(&allpstore_lock, flags);
list_del(&p->list);
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index d69a1d1d7e1..10cbe841cb7 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -116,15 +116,15 @@
* spinlock to internal buffers before writing.
*
* Lock ordering (including related VFS locks) is the following:
- * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
+ * dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock >
* dqio_mutex
+ * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc.
* The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
* dqptr_sem. But filesystem has to count with the fact that functions such as
* dquot_alloc_space() acquire dqptr_sem and they usually have to be called
* from inside a transaction to keep filesystem consistency after a crash. Also
* filesystems usually want to do some IO on dquot from ->mark_dirty which is
* called with dqptr_sem held.
- * i_mutex on quota files is special (it's below dqio_mutex)
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
@@ -638,7 +638,7 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
dqstats_inc(DQST_SYNCS);
mutex_unlock(&dqopt->dqonoff_mutex);
- if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE))
+ if (!wait || (dqopt->flags & DQUOT_QUOTA_SYS_FILE))
return 0;
/* This is not very clever (and fast) but currently I don't know about
@@ -652,18 +652,17 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
* Now when everything is written we can discard the pagecache so
* that userspace sees the changes.
*/
- mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_lock(&dqopt->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
if (!sb_has_quota_active(sb, cnt))
continue;
- mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
- I_MUTEX_QUOTA);
- truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
- mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
+ mutex_lock(&dqopt->files[cnt]->i_mutex);
+ truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
+ mutex_unlock(&dqopt->files[cnt]->i_mutex);
}
- mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+ mutex_unlock(&dqopt->dqonoff_mutex);
return 0;
}
@@ -907,14 +906,14 @@ static void add_dquot_ref(struct super_block *sb, int type)
spin_unlock(&inode->i_lock);
continue;
}
-#ifdef CONFIG_QUOTA_DEBUG
- if (unlikely(inode_get_rsv_space(inode) > 0))
- reserved = 1;
-#endif
__iget(inode);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_sb_list_lock);
+#ifdef CONFIG_QUOTA_DEBUG
+ if (unlikely(inode_get_rsv_space(inode) > 0))
+ reserved = 1;
+#endif
iput(old_inode);
__dquot_initialize(inode, type);
@@ -2037,8 +2036,7 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
/* If quota was reenabled in the meantime, we have
* nothing to do */
if (!sb_has_quota_loaded(sb, cnt)) {
- mutex_lock_nested(&toputinode[cnt]->i_mutex,
- I_MUTEX_QUOTA);
+ mutex_lock(&toputinode[cnt]->i_mutex);
toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
S_NOATIME | S_NOQUOTA);
truncate_inode_pages(&toputinode[cnt]->i_data,
@@ -2133,7 +2131,7 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
/* We don't want quota and atime on quota files (deadlocks
* possible) Also nobody should write to the file - we use
* special IO operations which ignore the immutable bit. */
- mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
+ mutex_lock(&inode->i_mutex);
oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
S_NOQUOTA);
inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
@@ -2180,7 +2178,7 @@ out_file_init:
iput(inode);
out_lock:
if (oldflags != -1) {
- mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
+ mutex_lock(&inode->i_mutex);
/* Set the flags back (in the case of accidental quotaon()
* on a wrong file we don't want to mess up the flags) */
inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 494c315c741..59d06871a85 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -76,14 +76,14 @@ void reiserfs_evict_inode(struct inode *inode)
;
}
out:
- end_writeback(inode); /* note this must go after the journal_end to prevent deadlock */
+ clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */
dquot_drop(inode);
inode->i_blocks = 0;
reiserfs_write_unlock_once(inode->i_sb, depth);
return;
no_delete:
- end_writeback(inode);
+ clear_inode(inode);
dquot_drop(inode);
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 8b7616ef06d..c07b7d70944 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2270,7 +2270,6 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
(unsigned long long)off, (unsigned long long)len);
return -EIO;
}
- mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
@@ -2302,16 +2301,13 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
blk++;
}
out:
- if (len == towrite) {
- mutex_unlock(&inode->i_mutex);
+ if (len == towrite)
return err;
- }
if (inode->i_size < off + len - towrite)
i_size_write(inode, off + len - towrite);
inode->i_version++;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
- mutex_unlock(&inode->i_mutex);
return len - towrite;
}
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 907c2b3af75..0ce3ccf7f40 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -310,7 +310,7 @@ void sysfs_evict_inode(struct inode *inode)
struct sysfs_dirent *sd = inode->i_private;
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
sysfs_put(sd);
}
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 3da5ce25faf..08d0b2568cd 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -316,7 +316,7 @@ static void sysv_evict_inode(struct inode *inode)
sysv_truncate(inode);
}
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
if (!inode->i_nlink)
sysv_free_inode(inode);
}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 001acccac0d..5862dd9d278 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -378,7 +378,7 @@ out:
smp_wmb();
}
done:
- end_writeback(inode);
+ clear_inode(inode);
}
static void ubifs_dirty_inode(struct inode *inode, int flags)
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 7d752800835..873e1bab9c4 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -80,7 +80,7 @@ void udf_evict_inode(struct inode *inode)
} else
truncate_inode_pages(&inode->i_data, 0);
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
inode->i_size != iinfo->i_lenExtents) {
udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 7cdd3953d67..dd7c89d8a1c 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -895,7 +895,7 @@ void ufs_evict_inode(struct inode * inode)
}
invalidate_inode_buffers(inode);
- end_writeback(inode);
+ clear_inode(inode);
if (want_delete) {
lock_ufs(inode->i_sb);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 2fcfd5b0b04..0d9de41a715 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -932,7 +932,7 @@ xfs_fs_evict_inode(
trace_xfs_evict_inode(ip);
truncate_inode_pages(&inode->i_data, 0);
- end_writeback(inode);
+ clear_inode(inode);
XFS_STATS_INC(vn_rele);
XFS_STATS_INC(vn_remove);
XFS_STATS_DEC(vn_active);
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
index 85a3ffaa024..abfb2682de7 100644
--- a/include/asm-generic/dma-coherent.h
+++ b/include/asm-generic/dma-coherent.h
@@ -3,13 +3,15 @@
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
/*
- * These two functions are only for dma allocator.
+ * These three functions are only for dma allocator.
* Don't use them in device drivers.
*/
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret);
int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, size_t size, int *ret);
/*
* Standard interface
*/
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
new file mode 100644
index 00000000000..c544356b374
--- /dev/null
+++ b/include/asm-generic/dma-contiguous.h
@@ -0,0 +1,28 @@
+#ifndef ASM_DMA_CONTIGUOUS_H
+#define ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/device.h>
+#include <linux/dma-contiguous.h>
+
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ if (dev && dev->cma_area)
+ return dev->cma_area;
+ return dma_contiguous_default_area;
+}
+
+static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
+{
+ if (dev)
+ dev->cma_area = cma;
+ if (!dev || !dma_contiguous_default_area)
+ dma_contiguous_default_area = cma;
+}
+
+#endif
+#endif
+
+#endif
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
new file mode 100644
index 00000000000..5cba37f9eae
--- /dev/null
+++ b/include/asm-generic/kvm_para.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_GENERIC_KVM_PARA_H
+#define _ASM_GENERIC_KVM_PARA_H
+
+#ifdef __KERNEL__
+
+/*
+ * This function is used by architectures that support kvm to avoid issuing
+ * false soft lockup messages.
+ */
+static inline bool kvm_check_and_clear_guest_paused(void)
+{
+ return false;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
+#endif /* _KERNEL__ */
+
+#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 125c54e9851..6f2b45a9b6b 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -158,9 +158,8 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
#endif
#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
-extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp);
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
#endif
#ifndef __HAVE_ARCH_PTE_SAME
@@ -446,6 +445,18 @@ static inline int pmd_write(pmd_t pmd)
#endif /* __HAVE_ARCH_PMD_WRITE */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#ifndef pmd_read_atomic
+static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+{
+ /*
+ * Depend on compiler for an atomic pmd read. NOTE: this is
+ * only going to work, if the pmdval_t isn't larger than
+ * an unsigned long.
+ */
+ return *pmdp;
+}
+#endif
+
/*
* This function is meant to be used by sites walking pagetables with
* the mmap_sem hold in read mode to protect against MADV_DONTNEED and
@@ -459,11 +470,17 @@ static inline int pmd_write(pmd_t pmd)
* undefined so behaving like if the pmd was none is safe (because it
* can return none anyway). The compiler level barrier() is critically
* important to compute the two checks atomically on the same pmdval.
+ *
+ * For 32bit kernels with a 64bit large pmd_t this automatically takes
+ * care of reading the pmd atomically to avoid SMP race conditions
+ * against pmd_populate() when the mmap_sem is hold for reading by the
+ * caller (a special atomic read not done by "gcc" as in the generic
+ * version above, is also needed when THP is disabled because the page
+ * fault can populate the pmd from under us).
*/
static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
{
- /* depend on compiler for an atomic pmd read */
- pmd_t pmdval = *pmd;
+ pmd_t pmdval = pmd_read_atomic(pmd);
/*
* The barrier will stabilize the pmdval in a register or on
* the stack so that it will stop changing under the code.
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
new file mode 100644
index 00000000000..3f21f1b72e4
--- /dev/null
+++ b/include/asm-generic/word-at-a-time.h
@@ -0,0 +1,52 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+/*
+ * This says "generic", but it's actually big-endian only.
+ * Little-endian can use more efficient versions of these
+ * interfaces, see for example
+ * arch/x86/include/asm/word-at-a-time.h
+ * for those.
+ */
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+ unsigned long mask = (val & c->low_bits) + c->low_bits;
+ return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+ long byte = 0;
+#ifdef CONFIG_64BIT
+ if (mask >> 32)
+ mask >>= 32;
+ else
+ byte = 4;
+#endif
+ if (mask >> 16)
+ mask >>= 16;
+ else
+ byte += 2;
+ return (mask >> 8) ? byte : byte + 1;
+}
+
+static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+ unsigned long rhs = val | c->low_bits;
+ *data = rhs;
+ return (val + c->high_bits) & ~rhs;
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 4cd59b95858..7185b8f15ce 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -225,6 +225,7 @@ header-y += kd.h
header-y += kdev_t.h
header-y += kernel.h
header-y += kernelcapi.h
+header-y += kernel-page-flags.h
header-y += keyboard.h
header-y += keyctl.h
header-y += l2tp.h
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index e64ce2cfee9..02549017212 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -92,6 +92,8 @@ struct pl08x_bus_data {
* right now
* @serving: the virtual channel currently being served by this physical
* channel
+ * @locked: channel unavailable for the system, e.g. dedicated to secure
+ * world
*/
struct pl08x_phy_chan {
unsigned int id;
@@ -99,6 +101,7 @@ struct pl08x_phy_chan {
spinlock_t lock;
int signal;
struct pl08x_dma_chan *serving;
+ bool locked;
};
/**
diff --git a/include/linux/apple_bl.h b/include/linux/apple_bl.h
index 47bedc0eee6..0a95e730fce 100644
--- a/include/linux/apple_bl.h
+++ b/include/linux/apple_bl.h
@@ -5,7 +5,7 @@
#ifndef _LINUX_APPLE_BL_H
#define _LINUX_APPLE_BL_H
-#ifdef CONFIG_BACKLIGHT_APPLE
+#if defined(CONFIG_BACKLIGHT_APPLE) || defined(CONFIG_BACKLIGHT_APPLE_MODULE)
extern int apple_bl_register(void);
extern void apple_bl_unregister(void);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 1a0cd270bb7..324fe08ea3b 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -135,9 +135,6 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
int flags);
-extern void *alloc_bootmem_section(unsigned long size,
- unsigned long section_nr);
-
#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
extern void *alloc_remap(int nid, unsigned long size);
#else
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 72961c39576..aaac4bba6f5 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -30,6 +30,13 @@ struct pt_regs;
#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
+/*
+ * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the
+ * expression but avoids the generation of any code, even if that expression
+ * has side-effects.
+ */
+#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e))))
+
/**
* BUILD_BUG_ON - break compile if a condition is true.
* @condition: the condition which the compiler should know is false.
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
index 5e4312b6f5c..eb3f84bc532 100644
--- a/include/linux/clk-private.h
+++ b/include/linux/clk-private.h
@@ -30,7 +30,7 @@ struct clk {
const struct clk_ops *ops;
struct clk_hw *hw;
struct clk *parent;
- char **parent_names;
+ const char **parent_names;
struct clk **parents;
u8 num_parents;
unsigned long rate;
@@ -55,12 +55,22 @@ struct clk {
* alternative macro for static initialization
*/
-extern struct clk_ops clk_fixed_rate_ops;
+#define DEFINE_CLK(_name, _ops, _flags, _parent_names, \
+ _parents) \
+ static struct clk _name = { \
+ .name = #_name, \
+ .ops = &_ops, \
+ .hw = &_name##_hw.hw, \
+ .parent_names = _parent_names, \
+ .num_parents = ARRAY_SIZE(_parent_names), \
+ .parents = _parents, \
+ .flags = _flags, \
+ }
#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate, \
_fixed_rate_flags) \
static struct clk _name; \
- static char *_name##_parent_names[] = {}; \
+ static const char *_name##_parent_names[] = {}; \
static struct clk_fixed_rate _name##_hw = { \
.hw = { \
.clk = &_name, \
@@ -68,23 +78,14 @@ extern struct clk_ops clk_fixed_rate_ops;
.fixed_rate = _rate, \
.flags = _fixed_rate_flags, \
}; \
- static struct clk _name = { \
- .name = #_name, \
- .ops = &clk_fixed_rate_ops, \
- .hw = &_name##_hw.hw, \
- .parent_names = _name##_parent_names, \
- .num_parents = \
- ARRAY_SIZE(_name##_parent_names), \
- .flags = _flags, \
- };
-
-extern struct clk_ops clk_gate_ops;
+ DEFINE_CLK(_name, clk_fixed_rate_ops, _flags, \
+ _name##_parent_names, NULL);
#define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr, \
_flags, _reg, _bit_idx, \
_gate_flags, _lock) \
static struct clk _name; \
- static char *_name##_parent_names[] = { \
+ static const char *_name##_parent_names[] = { \
_parent_name, \
}; \
static struct clk *_name##_parents[] = { \
@@ -99,24 +100,14 @@ extern struct clk_ops clk_gate_ops;
.flags = _gate_flags, \
.lock = _lock, \
}; \
- static struct clk _name = { \
- .name = #_name, \
- .ops = &clk_gate_ops, \
- .hw = &_name##_hw.hw, \
- .parent_names = _name##_parent_names, \
- .num_parents = \
- ARRAY_SIZE(_name##_parent_names), \
- .parents = _name##_parents, \
- .flags = _flags, \
- };
-
-extern struct clk_ops clk_divider_ops;
+ DEFINE_CLK(_name, clk_gate_ops, _flags, \
+ _name##_parent_names, _name##_parents);
#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
_flags, _reg, _shift, _width, \
_divider_flags, _lock) \
static struct clk _name; \
- static char *_name##_parent_names[] = { \
+ static const char *_name##_parent_names[] = { \
_parent_name, \
}; \
static struct clk *_name##_parents[] = { \
@@ -132,18 +123,8 @@ extern struct clk_ops clk_divider_ops;
.flags = _divider_flags, \
.lock = _lock, \
}; \
- static struct clk _name = { \
- .name = #_name, \
- .ops = &clk_divider_ops, \
- .hw = &_name##_hw.hw, \
- .parent_names = _name##_parent_names, \
- .num_parents = \
- ARRAY_SIZE(_name##_parent_names), \
- .parents = _name##_parents, \
- .flags = _flags, \
- };
-
-extern struct clk_ops clk_mux_ops;
+ DEFINE_CLK(_name, clk_divider_ops, _flags, \
+ _name##_parent_names, _name##_parents);
#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \
_reg, _shift, _width, \
@@ -159,16 +140,28 @@ extern struct clk_ops clk_mux_ops;
.flags = _mux_flags, \
.lock = _lock, \
}; \
- static struct clk _name = { \
- .name = #_name, \
- .ops = &clk_mux_ops, \
- .hw = &_name##_hw.hw, \
- .parent_names = _parent_names, \
- .num_parents = \
- ARRAY_SIZE(_parent_names), \
- .parents = _parents, \
- .flags = _flags, \
- };
+ DEFINE_CLK(_name, clk_mux_ops, _flags, _parent_names, \
+ _parents);
+
+#define DEFINE_CLK_FIXED_FACTOR(_name, _parent_name, \
+ _parent_ptr, _flags, \
+ _mult, _div) \
+ static struct clk _name; \
+ static const char *_name##_parent_names[] = { \
+ _parent_name, \
+ }; \
+ static struct clk *_name##_parents[] = { \
+ _parent_ptr, \
+ }; \
+ static struct clk_fixed_factor _name##_hw = { \
+ .hw = { \
+ .clk = &_name, \
+ }, \
+ .mult = _mult, \
+ .div = _div, \
+ }; \
+ DEFINE_CLK(_name, clk_fixed_factor_ops, _flags, \
+ _name##_parent_names, _name##_parents);
/**
* __clk_init - initialize the data structures in a struct clk
@@ -189,8 +182,12 @@ extern struct clk_ops clk_mux_ops;
*
* It is not necessary to call clk_register if __clk_init is used directly with
* statically initialized clock data.
+ *
+ * Returns 0 on success, otherwise an error code.
*/
-void __clk_init(struct device *dev, struct clk *clk);
+int __clk_init(struct device *dev, struct clk *clk);
+
+struct clk *__clk_register(struct device *dev, struct clk_hw *hw);
#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PRIVATE_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 5508897ad37..4a0b483986c 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -15,19 +15,6 @@
#ifdef CONFIG_COMMON_CLK
-/**
- * struct clk_hw - handle for traversing from a struct clk to its corresponding
- * hardware-specific structure. struct clk_hw should be declared within struct
- * clk_foo and then referenced by the struct clk instance that uses struct
- * clk_foo's clk_ops
- *
- * clk: pointer to the struct clk instance that points back to this struct
- * clk_hw instance
- */
-struct clk_hw {
- struct clk *clk;
-};
-
/*
* flags used across common struct clk. these flags should only affect the
* top-level framework. custom flags for dealing with hardware specifics
@@ -39,6 +26,8 @@ struct clk_hw {
#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
#define CLK_IS_ROOT BIT(4) /* root clk, has no parent */
+struct clk_hw;
+
/**
* struct clk_ops - Callback operations for hardware clocks; these are to
* be provided by the clock implementation, and will be called by drivers
@@ -88,19 +77,11 @@ struct clk_hw {
* array index into the value programmed into the hardware.
* Returns 0 on success, -EERROR otherwise.
*
- * @set_rate: Change the rate of this clock. If this callback returns
- * CLK_SET_RATE_PARENT, the rate change will be propagated to the
- * parent clock (which may propagate again if the parent clock
- * also sets this flag). The requested rate of the parent is
- * passed back from the callback in the second 'unsigned long *'
- * argument. Note that it is up to the hardware clock's set_rate
- * implementation to insure that clocks do not run out of spec
- * when propgating the call to set_rate up to the parent. One way
- * to do this is to gate the clock (via clk_disable and/or
- * clk_unprepare) before calling clk_set_rate, then ungating it
- * afterward. If your clock also has the CLK_GATE_SET_RATE flag
- * set then this will insure safety. Returns 0 on success,
- * -EERROR otherwise.
+ * @set_rate: Change the rate of this clock. The requested rate is specified
+ * by the second argument, which should typically be the return
+ * of .round_rate call. The third argument gives the parent rate
+ * which is likely helpful for most .set_rate implementation.
+ * Returns 0 on success, -EERROR otherwise.
*
* The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
* implementations to split any work between atomic (enable) and sleepable
@@ -125,10 +106,46 @@ struct clk_ops {
unsigned long *);
int (*set_parent)(struct clk_hw *hw, u8 index);
u8 (*get_parent)(struct clk_hw *hw);
- int (*set_rate)(struct clk_hw *hw, unsigned long);
+ int (*set_rate)(struct clk_hw *hw, unsigned long,
+ unsigned long);
void (*init)(struct clk_hw *hw);
};
+/**
+ * struct clk_init_data - holds init data that's common to all clocks and is
+ * shared between the clock provider and the common clock framework.
+ *
+ * @name: clock name
+ * @ops: operations this clock supports
+ * @parent_names: array of string names for all possible parents
+ * @num_parents: number of possible parents
+ * @flags: framework-level hints and quirks
+ */
+struct clk_init_data {
+ const char *name;
+ const struct clk_ops *ops;
+ const char **parent_names;
+ u8 num_parents;
+ unsigned long flags;
+};
+
+/**
+ * struct clk_hw - handle for traversing from a struct clk to its corresponding
+ * hardware-specific structure. struct clk_hw should be declared within struct
+ * clk_foo and then referenced by the struct clk instance that uses struct
+ * clk_foo's clk_ops
+ *
+ * @clk: pointer to the struct clk instance that points back to this struct
+ * clk_hw instance
+ *
+ * @init: pointer to struct clk_init_data that contains the init data shared
+ * with the common clock framework.
+ */
+struct clk_hw {
+ struct clk *clk;
+ struct clk_init_data *init;
+};
+
/*
* DOC: Basic clock implementations common to many platforms
*
@@ -149,6 +166,7 @@ struct clk_fixed_rate {
u8 flags;
};
+extern const struct clk_ops clk_fixed_rate_ops;
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate);
@@ -165,7 +183,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
* Clock which can gate its output. Implements .enable & .disable
*
* Flags:
- * CLK_GATE_SET_DISABLE - by default this clock sets the bit at bit_idx to
+ * CLK_GATE_SET_TO_DISABLE - by default this clock sets the bit at bit_idx to
* enable the clock. Setting this flag does the opposite: setting the bit
* disable the clock and clearing it enables the clock
*/
@@ -175,11 +193,11 @@ struct clk_gate {
u8 bit_idx;
u8 flags;
spinlock_t *lock;
- char *parent[1];
};
#define CLK_GATE_SET_TO_DISABLE BIT(0)
+extern const struct clk_ops clk_gate_ops;
struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
@@ -212,12 +230,12 @@ struct clk_divider {
u8 width;
u8 flags;
spinlock_t *lock;
- char *parent[1];
};
#define CLK_DIVIDER_ONE_BASED BIT(0)
#define CLK_DIVIDER_POWER_OF_TWO BIT(1)
+extern const struct clk_ops clk_divider_ops;
struct clk *clk_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
@@ -238,7 +256,7 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
*
* Flags:
* CLK_MUX_INDEX_ONE - register index starts at 1, not 0
- * CLK_MUX_INDEX_BITWISE - register index is a single bit (power of two)
+ * CLK_MUX_INDEX_BIT - register index is a single bit (power of two)
*/
struct clk_mux {
struct clk_hw hw;
@@ -252,29 +270,49 @@ struct clk_mux {
#define CLK_MUX_INDEX_ONE BIT(0)
#define CLK_MUX_INDEX_BIT BIT(1)
+extern const struct clk_ops clk_mux_ops;
struct clk *clk_register_mux(struct device *dev, const char *name,
- char **parent_names, u8 num_parents, unsigned long flags,
+ const char **parent_names, u8 num_parents, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_mux_flags, spinlock_t *lock);
/**
+ * struct clk_fixed_factor - fixed multiplier and divider clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @mult: multiplier
+ * @div: divider
+ *
+ * Clock with a fixed multiplier and divider. The output frequency is the
+ * parent clock rate divided by div and multiplied by mult.
+ * Implements .recalc_rate, .set_rate and .round_rate
+ */
+
+struct clk_fixed_factor {
+ struct clk_hw hw;
+ unsigned int mult;
+ unsigned int div;
+};
+
+extern struct clk_ops clk_fixed_factor_ops;
+struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div);
+
+/**
* clk_register - allocate a new clock, register it and return an opaque cookie
* @dev: device that is registering this clock
- * @name: clock name
- * @ops: operations this clock supports
* @hw: link to hardware-specific clock data
- * @parent_names: array of string names for all possible parents
- * @num_parents: number of possible parents
- * @flags: framework-level hints and quirks
*
* clk_register is the primary interface for populating the clock tree with new
* clock nodes. It returns a pointer to the newly allocated struct clk which
* cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API.
+ * rest of the clock API. In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
*/
-struct clk *clk_register(struct device *dev, const char *name,
- const struct clk_ops *ops, struct clk_hw *hw,
- char **parent_names, u8 num_parents, unsigned long flags);
+struct clk *clk_register(struct device *dev, struct clk_hw *hw);
+
+void clk_unregister(struct clk *clk);
/* helper functions */
const char *__clk_get_name(struct clk *clk);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 70cf722ac3a..ad5c43e8ae8 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -81,7 +81,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
-#endif /* !CONFIG_COMMON_CLK */
+#endif
/**
* clk_get - lookup and obtain a reference to a clock producer.
@@ -252,7 +252,7 @@ void devm_clk_put(struct device *dev, struct clk *clk);
* Returns rounded clock rate in Hz, or negative errno.
*/
long clk_round_rate(struct clk *clk, unsigned long rate);
-
+
/**
* clk_set_rate - set the clock rate for a clock source
* @clk: clock source
@@ -261,7 +261,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
* Returns success (0) or negative errno.
*/
int clk_set_rate(struct clk *clk, unsigned long rate);
-
+
/**
* clk_set_parent - set the parent clock source for this clock
* @clk: clock source
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 51a90b7f2d6..e988037abd2 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_COMPACTION_H
#define _LINUX_COMPACTION_H
+#include <linux/node.h>
+
/* Return values for compact_zone() and try_to_compact_pages() */
/* compaction didn't start as it was not possible or direct reclaim was more suitable */
#define COMPACT_SKIPPED 0
@@ -11,6 +13,23 @@
/* The full zone was compacted */
#define COMPACT_COMPLETE 3
+/*
+ * compaction supports three modes
+ *
+ * COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans
+ * MIGRATE_MOVABLE pageblocks as migration sources and targets.
+ * COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans
+ * MIGRATE_MOVABLE pageblocks as migration sources.
+ * MIGRATE_UNMOVABLE pageblocks are scanned as potential migration
+ * targets and convers them to MIGRATE_MOVABLE if possible
+ * COMPACT_SYNC uses synchronous migration and scans all pageblocks
+ */
+enum compact_mode {
+ COMPACT_ASYNC_MOVABLE,
+ COMPACT_ASYNC_UNMOVABLE,
+ COMPACT_SYNC,
+};
+
#ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory;
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index ae36b72c22f..66c434f5dd1 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -93,6 +93,10 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
int nregs, void __iomem *base, char *prefix);
+struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
+ struct dentry *parent,
+ u32 *array, u32 elements);
+
bool debugfs_initialized(void);
#else
@@ -219,6 +223,13 @@ static inline bool debugfs_initialized(void)
return false;
}
+static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
+ struct dentry *parent,
+ u32 *array, u32 elements)
+{
+ return ERR_PTR(-ENODEV);
+}
+
#endif
#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index e04f5776f6d..161d96241b1 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -667,6 +667,10 @@ struct device {
struct dma_coherent_mem *dma_mem; /* internal for coherent mem
override */
+#ifdef CONFIG_CMA
+ struct cma *cma_area; /* contiguous memory area for dma
+ allocations */
+#endif
/* arch specific additions */
struct dev_archdata archdata;
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3efbfc2145c..eb48f3816df 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -61,6 +61,13 @@ struct dma_buf_attachment;
* This Callback must not sleep.
* @kmap: maps a page from the buffer into kernel address space.
* @kunmap: [optional] unmaps a page from the buffer.
+ * @mmap: used to expose the backing storage to userspace. Note that the
+ * mapping needs to be coherent - if the exporter doesn't directly
+ * support this, it needs to fake coherency by shooting down any ptes
+ * when transitioning away from the cpu domain.
+ * @vmap: [optional] creates a virtual mapping for the buffer into kernel
+ * address space. Same restrictions as for vmap and friends apply.
+ * @vunmap: [optional] unmaps a vmap from the buffer
*/
struct dma_buf_ops {
int (*attach)(struct dma_buf *, struct device *,
@@ -92,6 +99,11 @@ struct dma_buf_ops {
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long);
void (*kunmap)(struct dma_buf *, unsigned long, void *);
+
+ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
+
+ void *(*vmap)(struct dma_buf *);
+ void (*vunmap)(struct dma_buf *, void *vaddr);
};
/**
@@ -167,6 +179,11 @@ void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
void *dma_buf_kmap(struct dma_buf *, unsigned long);
void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
+
+int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
+ unsigned long);
+void *dma_buf_vmap(struct dma_buf *);
+void dma_buf_vunmap(struct dma_buf *, void *vaddr);
#else
static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
@@ -248,6 +265,22 @@ static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
unsigned long pnum, void *vaddr)
{
}
+
+static inline int dma_buf_mmap(struct dma_buf *dmabuf,
+ struct vm_area_struct *vma,
+ unsigned long pgoff)
+{
+ return -ENODEV;
+}
+
+static inline void *dma_buf_vmap(struct dma_buf *dmabuf)
+{
+ return NULL;
+}
+
+static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+}
#endif /* CONFIG_DMA_SHARED_BUFFER */
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
new file mode 100644
index 00000000000..2f303e4b7ed
--- /dev/null
+++ b/include/linux/dma-contiguous.h
@@ -0,0 +1,110 @@
+#ifndef __LINUX_CMA_H
+#define __LINUX_CMA_H
+
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ * Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+/*
+ * Contiguous Memory Allocator
+ *
+ * The Contiguous Memory Allocator (CMA) makes it possible to
+ * allocate big contiguous chunks of memory after the system has
+ * booted.
+ *
+ * Why is it needed?
+ *
+ * Various devices on embedded systems have no scatter-getter and/or
+ * IO map support and require contiguous blocks of memory to
+ * operate. They include devices such as cameras, hardware video
+ * coders, etc.
+ *
+ * Such devices often require big memory buffers (a full HD frame
+ * is, for instance, more then 2 mega pixels large, i.e. more than 6
+ * MB of memory), which makes mechanisms such as kmalloc() or
+ * alloc_page() ineffective.
+ *
+ * At the same time, a solution where a big memory region is
+ * reserved for a device is suboptimal since often more memory is
+ * reserved then strictly required and, moreover, the memory is
+ * inaccessible to page system even if device drivers don't use it.
+ *
+ * CMA tries to solve this issue by operating on memory regions
+ * where only movable pages can be allocated from. This way, kernel
+ * can use the memory for pagecache and when device driver requests
+ * it, allocated pages can be migrated.
+ *
+ * Driver usage
+ *
+ * CMA should not be used by the device drivers directly. It is
+ * only a helper framework for dma-mapping subsystem.
+ *
+ * For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ */
+
+#ifdef __KERNEL__
+
+struct cma;
+struct page;
+struct device;
+
+#ifdef CONFIG_CMA
+
+/*
+ * There is always at least global CMA area and a few optional device
+ * private areas configured in kernel .config.
+ */
+#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
+
+extern struct cma *dma_contiguous_default_area;
+
+void dma_contiguous_reserve(phys_addr_t addr_limit);
+int dma_declare_contiguous(struct device *dev, unsigned long size,
+ phys_addr_t base, phys_addr_t limit);
+
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int order);
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count);
+
+#else
+
+#define MAX_CMA_AREAS (0)
+
+static inline void dma_contiguous_reserve(phys_addr_t limit) { }
+
+static inline
+int dma_declare_contiguous(struct device *dev, unsigned long size,
+ phys_addr_t base, phys_addr_t limit)
+{
+ return -ENOSYS;
+}
+
+static inline
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int order)
+{
+ return NULL;
+}
+
+static inline
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count)
+{
+ return false;
+}
+
+#endif
+
+#endif
+
+#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index f9a2e5e67a5..d3fec584e8c 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -615,11 +615,13 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
}
static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
- struct dma_chan *chan, void *buf, size_t len,
+ struct dma_chan *chan, dma_addr_t buf, size_t len,
enum dma_transfer_direction dir, unsigned long flags)
{
struct scatterlist sg;
- sg_init_one(&sg, buf, len);
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = buf;
+ sg_dma_len(&sg) = len;
return chan->device->device_prep_slave_sg(chan, &sg, 1,
dir, flags, NULL);
diff --git a/include/linux/edac.h b/include/linux/edac.h
index c621d762bb2..91ba3bae42e 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -71,6 +71,25 @@ enum dev_type {
#define DEV_FLAG_X64 BIT(DEV_X64)
/**
+ * enum hw_event_mc_err_type - type of the detected error
+ *
+ * @HW_EVENT_ERR_CORRECTED: Corrected Error - Indicates that an ECC
+ * corrected error was detected
+ * @HW_EVENT_ERR_UNCORRECTED: Uncorrected Error - Indicates an error that
+ * can't be corrected by ECC, but it is not
+ * fatal (maybe it is on an unused memory area,
+ * or the memory controller could recover from
+ * it for example, by re-trying the operation).
+ * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not
+ * be recovered.
+ */
+enum hw_event_mc_err_type {
+ HW_EVENT_ERR_CORRECTED,
+ HW_EVENT_ERR_UNCORRECTED,
+ HW_EVENT_ERR_FATAL,
+};
+
+/**
* enum mem_type - memory types. For a more detailed reference, please see
* http://en.wikipedia.org/wiki/DRAM
*
@@ -313,38 +332,141 @@ enum scrub_type {
*/
/**
+ * enum edac_mc_layer - memory controller hierarchy layer
+ *
+ * @EDAC_MC_LAYER_BRANCH: memory layer is named "branch"
+ * @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel"
+ * @EDAC_MC_LAYER_SLOT: memory layer is named "slot"
+ * @EDAC_MC_LAYER_CHIP_SELECT: memory layer is named "chip select"
+ *
+ * This enum is used by the drivers to tell edac_mc_sysfs what name should
+ * be used when describing a memory stick location.
+ */
+enum edac_mc_layer_type {
+ EDAC_MC_LAYER_BRANCH,
+ EDAC_MC_LAYER_CHANNEL,
+ EDAC_MC_LAYER_SLOT,
+ EDAC_MC_LAYER_CHIP_SELECT,
+};
+
+/**
+ * struct edac_mc_layer - describes the memory controller hierarchy
+ * @layer: layer type
+ * @size: number of components per layer. For example,
+ * if the channel layer has two channels, size = 2
+ * @is_virt_csrow: This layer is part of the "csrow" when old API
+ * compatibility mode is enabled. Otherwise, it is
+ * a channel
+ */
+struct edac_mc_layer {
+ enum edac_mc_layer_type type;
+ unsigned size;
+ bool is_virt_csrow;
+};
+
+/*
+ * Maximum number of layers used by the memory controller to uniquely
+ * identify a single memory stick.
+ * NOTE: Changing this constant requires not only to change the constant
+ * below, but also to change the existing code at the core, as there are
+ * some code there that are optimized for 3 layers.
+ */
+#define EDAC_MAX_LAYERS 3
+
+/**
+ * EDAC_DIMM_PTR - Macro responsible to find a pointer inside a pointer array
+ * for the element given by [layer0,layer1,layer2] position
+ *
+ * @layers: a struct edac_mc_layer array, describing how many elements
+ * were allocated for each layer
+ * @var: name of the var where we want to get the pointer
+ * (like mci->dimms)
+ * @n_layers: Number of layers at the @layers array
+ * @layer0: layer0 position
+ * @layer1: layer1 position. Unused if n_layers < 2
+ * @layer2: layer2 position. Unused if n_layers < 3
+ *
+ * For 1 layer, this macro returns &var[layer0]
+ * For 2 layers, this macro is similar to allocate a bi-dimensional array
+ * and to return "&var[layer0][layer1]"
+ * For 3 layers, this macro is similar to allocate a tri-dimensional array
+ * and to return "&var[layer0][layer1][layer2]"
+ *
+ * A loop could be used here to make it more generic, but, as we only have
+ * 3 layers, this is a little faster.
+ * By design, layers can never be 0 or more than 3. If that ever happens,
+ * a NULL is returned, causing an OOPS during the memory allocation routine,
+ * with would point to the developer that he's doing something wrong.
+ */
+#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
+ typeof(var) __p; \
+ if ((nlayers) == 1) \
+ __p = &var[layer0]; \
+ else if ((nlayers) == 2) \
+ __p = &var[(layer1) + ((layers[1]).size * (layer0))]; \
+ else if ((nlayers) == 3) \
+ __p = &var[(layer2) + ((layers[2]).size * ((layer1) + \
+ ((layers[1]).size * (layer0))))]; \
+ else \
+ __p = NULL; \
+ __p; \
+})
+
+
+/* FIXME: add the proper per-location error counts */
+struct dimm_info {
+ char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
+
+ /* Memory location data */
+ unsigned location[EDAC_MAX_LAYERS];
+
+ struct mem_ctl_info *mci; /* the parent */
+
+ u32 grain; /* granularity of reported error in bytes */
+ enum dev_type dtype; /* memory device type */
+ enum mem_type mtype; /* memory dimm type */
+ enum edac_type edac_mode; /* EDAC mode for this dimm */
+
+ u32 nr_pages; /* number of pages on this dimm */
+
+ unsigned csrow, cschannel; /* Points to the old API data */
+};
+
+/**
* struct rank_info - contains the information for one DIMM rank
*
* @chan_idx: channel number where the rank is (typically, 0 or 1)
* @ce_count: number of correctable errors for this rank
- * @label: DIMM label. Different ranks for the same DIMM should be
- * filled, on userspace, with the same label.
- * FIXME: The core currently won't enforce it.
* @csrow: A pointer to the chip select row structure (the parent
* structure). The location of the rank is given by
* the (csrow->csrow_idx, chan_idx) vector.
+ * @dimm: A pointer to the DIMM structure, where the DIMM label
+ * information is stored.
+ *
+ * FIXME: Currently, the EDAC core model will assume one DIMM per rank.
+ * This is a bad assumption, but it makes this patch easier. Later
+ * patches in this series will fix this issue.
*/
struct rank_info {
int chan_idx;
- u32 ce_count;
- char label[EDAC_MC_LABEL_LEN + 1];
- struct csrow_info *csrow; /* the parent */
+ struct csrow_info *csrow;
+ struct dimm_info *dimm;
+
+ u32 ce_count; /* Correctable Errors for this csrow */
};
struct csrow_info {
- unsigned long first_page; /* first page number in dimm */
- unsigned long last_page; /* last page number in dimm */
+ /* Used only by edac_mc_find_csrow_by_page() */
+ unsigned long first_page; /* first page number in csrow */
+ unsigned long last_page; /* last page number in csrow */
unsigned long page_mask; /* used for interleaving -
- * 0UL for non intlv
- */
- u32 nr_pages; /* number of pages in csrow */
- u32 grain; /* granularity of reported error in bytes */
- int csrow_idx; /* the chip-select row */
- enum dev_type dtype; /* memory device type */
+ * 0UL for non intlv */
+
+ int csrow_idx; /* the chip-select row */
+
u32 ue_count; /* Uncorrectable Errors for this csrow */
u32 ce_count; /* Correctable Errors for this csrow */
- enum mem_type mtype; /* memory csrow type */
- enum edac_type edac_mode; /* EDAC mode for this csrow */
+
struct mem_ctl_info *mci; /* the parent */
struct kobject kobj; /* sysfs kobject for this csrow */
@@ -426,8 +548,20 @@ struct mem_ctl_info {
unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
unsigned long page);
int mc_idx;
- int nr_csrows;
struct csrow_info *csrows;
+ unsigned nr_csrows, num_cschannel;
+
+ /* Memory Controller hierarchy */
+ unsigned n_layers;
+ struct edac_mc_layer *layers;
+ bool mem_is_per_rank;
+
+ /*
+ * DIMM info. Will eventually remove the entire csrows_info some day
+ */
+ unsigned tot_dimms;
+ struct dimm_info *dimms;
+
/*
* FIXME - what about controllers on other busses? - IDs must be
* unique. dev pointer should be sufficiently unique, but
@@ -440,12 +574,16 @@ struct mem_ctl_info {
const char *dev_name;
char proc_name[MC_PROC_NAME_MAX_LEN + 1];
void *pvt_info;
- u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */
- u32 ce_noinfo_count; /* Correctable Errors w/o info */
- u32 ue_count; /* Total Uncorrectable Errors for this MC */
- u32 ce_count; /* Total Correctable Errors for this MC */
unsigned long start_time; /* mci load start time (in jiffies) */
+ /*
+ * drivers shouldn't access those fields directly, as the core
+ * already handles that.
+ */
+ u32 ce_noinfo_count, ue_noinfo_count;
+ u32 ue_mc, ce_mc;
+ u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
+
struct completion complete;
/* edac sysfs device control */
@@ -458,7 +596,7 @@ struct mem_ctl_info {
* by the low level driver.
*
* Set by the low level driver to provide attributes at the
- * controller level, same level as 'ue_count' and 'ce_count' above.
+ * controller level.
* An array of structures, NULL terminated
*
* If attributes are desired, then set to array of attributes
diff --git a/include/linux/fb.h b/include/linux/fb.h
index d31cb682e17..a3229d7ab9f 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -554,6 +554,10 @@ struct fb_cursor_user {
#define FB_EVENT_FB_UNBIND 0x0E
/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */
#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
+/* A hardware display blank early change occured */
+#define FB_EARLY_EVENT_BLANK 0x10
+/* A hardware display blank revert early change occured */
+#define FB_R_EARLY_EVENT_BLANK 0x11
struct fb_event {
struct fb_info *info;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c0e53372b08..038076b27ea 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1681,7 +1681,6 @@ struct inode_operations {
ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
- void (*truncate_range)(struct inode *, loff_t, loff_t);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
} ____cacheline_aligned;
@@ -1764,8 +1763,8 @@ struct super_operations {
* I_FREEING Set when inode is about to be freed but still has dirty
* pages or buffers attached or the inode itself is still
* dirty.
- * I_CLEAR Added by end_writeback(). In this state the inode is clean
- * and can be destroyed. Inode keeps I_FREEING.
+ * I_CLEAR Added by clear_inode(). In this state the inode is
+ * clean and can be destroyed. Inode keeps I_FREEING.
*
* Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
* prohibited for many purposes. iget() must wait for
@@ -1773,9 +1772,10 @@ struct super_operations {
* anew. Other functions will just ignore such inodes,
* if appropriate. I_NEW is used for waiting.
*
- * I_SYNC Synchonized write of dirty inode data. The bits is
- * set during data writeback, and cleared with a wakeup
- * on the bit address once it is done.
+ * I_SYNC Writeback of inode is running. The bit is set during
+ * data writeback, and cleared with a wakeup on the bit
+ * address once it is done. The bit is also used to pin
+ * the inode in memory for flusher thread.
*
* I_REFERENCED Marks the inode as recently references on the LRU list.
*
@@ -2349,7 +2349,7 @@ extern unsigned int get_next_ino(void);
extern void __iget(struct inode * inode);
extern void iget_failed(struct inode *);
-extern void end_writeback(struct inode *);
+extern void clear_inode(struct inode *);
extern void __destroy_inode(struct inode *);
extern struct inode *new_inode_pseudo(struct super_block *sb);
extern struct inode *new_inode(struct super_block *sb);
diff --git a/include/linux/fsl/mxs-dma.h b/include/linux/fsl/mxs-dma.h
index 203d7c4a3e1..55d87023839 100644
--- a/include/linux/fsl/mxs-dma.h
+++ b/include/linux/fsl/mxs-dma.h
@@ -15,14 +15,6 @@ struct mxs_dma_data {
int chan_irq;
};
-static inline int mxs_dma_is_apbh(struct dma_chan *chan)
-{
- return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbh");
-}
-
-static inline int mxs_dma_is_apbx(struct dma_chan *chan)
-{
- return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbx");
-}
-
+extern int mxs_dma_is_apbh(struct dma_chan *chan);
+extern int mxs_dma_is_apbx(struct dma_chan *chan);
#endif /* __MACH_MXS_DMA_H__ */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 581e74b7df9..1e49be49d32 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -391,4 +391,16 @@ static inline bool pm_suspended_storage(void)
}
#endif /* CONFIG_PM_SLEEP */
+#ifdef CONFIG_CMA
+
+/* The below functions must be run on a range from a single zone. */
+extern int alloc_contig_range(unsigned long start, unsigned long end,
+ unsigned migratetype);
+extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
+
+/* CMA stuff */
+extern void init_cma_reserved_pageblock(struct page *page);
+
+#endif
+
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index c8af7a2efb5..4c59b113118 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -59,6 +59,8 @@ extern pmd_t *page_check_address_pmd(struct page *page,
#define HPAGE_PMD_MASK HPAGE_MASK
#define HPAGE_PMD_SIZE HPAGE_SIZE
+extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
+
#define transparent_hugepage_enabled(__vma) \
((transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_FLAG) || \
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 000837e126e..d5d6bbe2259 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -284,6 +284,14 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
#include <asm/hugetlb.h>
+#ifndef arch_make_huge_pte
+static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+ struct page *page, int writable)
+{
+ return entry;
+}
+#endif
+
static inline struct hstate *page_hstate(struct page *page)
{
return size_to_hstate(PAGE_SIZE << compound_order(page));
diff --git a/include/linux/gpio-i2cmux.h b/include/linux/i2c-mux-gpio.h
index 4a333bb0bd0..a36343a37eb 100644
--- a/include/linux/gpio-i2cmux.h
+++ b/include/linux/i2c-mux-gpio.h
@@ -1,5 +1,5 @@
/*
- * gpio-i2cmux interface to platform code
+ * i2c-mux-gpio interface to platform code
*
* Peter Korsgaard <peter.korsgaard@barco.com>
*
@@ -8,14 +8,14 @@
* published by the Free Software Foundation.
*/
-#ifndef _LINUX_GPIO_I2CMUX_H
-#define _LINUX_GPIO_I2CMUX_H
+#ifndef _LINUX_I2C_MUX_GPIO_H
+#define _LINUX_I2C_MUX_GPIO_H
/* MUX has no specific idle mode */
-#define GPIO_I2CMUX_NO_IDLE ((unsigned)-1)
+#define I2C_MUX_GPIO_NO_IDLE ((unsigned)-1)
/**
- * struct gpio_i2cmux_platform_data - Platform-dependent data for gpio-i2cmux
+ * struct i2c_mux_gpio_platform_data - Platform-dependent data for i2c-mux-gpio
* @parent: Parent I2C bus adapter number
* @base_nr: Base I2C bus number to number adapters from or zero for dynamic
* @values: Array of bitmasks of GPIO settings (low/high) for each
@@ -25,7 +25,7 @@
* @n_gpios: Number of GPIOs used to control MUX
* @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
*/
-struct gpio_i2cmux_platform_data {
+struct i2c_mux_gpio_platform_data {
int parent;
int base_nr;
const unsigned *values;
@@ -35,4 +35,4 @@ struct gpio_i2cmux_platform_data {
unsigned idle;
};
-#endif /* _LINUX_GPIO_I2CMUX_H */
+#endif /* _LINUX_I2C_MUX_GPIO_H */
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
index 747f0cde416..c7908383001 100644
--- a/include/linux/i2c-mux.h
+++ b/include/linux/i2c-mux.h
@@ -34,7 +34,8 @@
* mux control.
*/
struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
- void *mux_dev, u32 force_nr, u32 chan_id,
+ struct device *mux_dev,
+ void *mux_priv, u32 force_nr, u32 chan_id,
int (*select) (struct i2c_adapter *,
void *mux_dev, u32 chan_id),
int (*deselect) (struct i2c_adapter *,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 195d8b3d9cf..b66cb601435 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -232,6 +232,7 @@ struct i2c_client {
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
extern struct i2c_client *i2c_verify_client(struct device *dev);
+extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
{
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index d211732b9e9..c8f32975f0e 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -479,12 +479,6 @@ struct transaction_s
* How many handles used this transaction? [t_handle_lock]
*/
int t_handle_count;
-
- /*
- * This transaction is being forced and some process is
- * waiting for it to finish.
- */
- unsigned int t_synchronous_commit:1;
};
/**
@@ -531,6 +525,8 @@ struct transaction_s
* transaction
* @j_commit_request: Sequence number of the most recent transaction wanting
* commit
+ * @j_commit_waited: Sequence number of the most recent transaction someone
+ * is waiting for to commit.
* @j_uuid: Uuid of client object.
* @j_task: Pointer to the current commit thread for this journal
* @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
@@ -696,6 +692,13 @@ struct journal_s
tid_t j_commit_request;
/*
+ * Sequence number of the most recent transaction someone is waiting
+ * for to commit.
+ * [j_state_lock]
+ */
+ tid_t j_commit_waited;
+
+ /*
* Journal uuid: identifies the object (filesystem, LVM volume etc)
* backed by this journal. This will eventually be replaced by an array
* of uuids, allowing us to index multiple devices within a single
@@ -861,7 +864,8 @@ extern int journal_destroy (journal_t *);
extern int journal_recover (journal_t *journal);
extern int journal_wipe (journal_t *, int);
extern int journal_skip_recovery (journal_t *);
-extern void journal_update_superblock (journal_t *, int);
+extern void journal_update_sb_log_tail (journal_t *, tid_t, unsigned int,
+ int);
extern void journal_abort (journal_t *, int);
extern int journal_errno (journal_t *);
extern void journal_ack_err (journal_t *);
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 387571959dd..6883e197acb 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -36,6 +36,7 @@ const char *kallsyms_lookup(unsigned long addr,
/* Look up a kernel symbol and return it in a text buffer. */
extern int sprint_symbol(char *buffer, unsigned long address);
+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
extern int sprint_backtrace(char *buffer, unsigned long address);
/* Look up a kernel symbol and print it to the kernel messages. */
@@ -80,6 +81,12 @@ static inline int sprint_symbol(char *buffer, unsigned long addr)
return 0;
}
+static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr)
+{
+ *buffer = '\0';
+ return 0;
+}
+
static inline int sprint_backtrace(char *buffer, unsigned long addr)
{
*buffer = '\0';
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index 26a65711676..a1bdf696635 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -32,6 +32,8 @@
#define KPF_KSM 21
#define KPF_THP 22
+#ifdef __KERNEL__
+
/* kernel hacking assistances
* WARNING: subject to change, never rely on them!
*/
@@ -44,4 +46,6 @@
#define KPF_ARCH 38
#define KPF_UNCACHED 39
+#endif /* __KERNEL__ */
+
#endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 6c322a90b92..09f2b3aa2da 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -449,6 +449,30 @@ struct kvm_ppc_pvinfo {
__u8 pad[108];
};
+/* for KVM_PPC_GET_SMMU_INFO */
+#define KVM_PPC_PAGE_SIZES_MAX_SZ 8
+
+struct kvm_ppc_one_page_size {
+ __u32 page_shift; /* Page shift (or 0) */
+ __u32 pte_enc; /* Encoding in the HPTE (>>12) */
+};
+
+struct kvm_ppc_one_seg_page_size {
+ __u32 page_shift; /* Base page shift of segment (or 0) */
+ __u32 slb_enc; /* SLB encoding for BookS */
+ struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
+};
+
+#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
+#define KVM_PPC_1T_SEGMENTS 0x00000002
+
+struct kvm_ppc_smmu_info {
+ __u64 flags;
+ __u32 slb_size;
+ __u32 pad;
+ struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
+};
+
#define KVMIO 0xAE
/* machine type bits, to be used as argument to KVM_CREATE_VM */
@@ -589,6 +613,10 @@ struct kvm_ppc_pvinfo {
#define KVM_CAP_S390_UCONTROL 73
#define KVM_CAP_SYNC_REGS 74
#define KVM_CAP_PCI_2_3 75
+#define KVM_CAP_KVMCLOCK_CTRL 76
+#define KVM_CAP_SIGNAL_MSI 77
+#define KVM_CAP_PPC_GET_SMMU_INFO 78
+#define KVM_CAP_S390_COW 79
#ifdef KVM_CAP_IRQ_ROUTING
@@ -714,6 +742,14 @@ struct kvm_one_reg {
__u64 addr;
};
+struct kvm_msi {
+ __u32 address_lo;
+ __u32 address_hi;
+ __u32 data;
+ __u32 flags;
+ __u8 pad[16];
+};
+
/*
* ioctls for VM fds
*/
@@ -788,6 +824,10 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_PCI_2_3 */
#define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \
struct kvm_assigned_pci_dev)
+/* Available with KVM_CAP_SIGNAL_MSI */
+#define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi)
+/* Available with KVM_CAP_PPC_GET_SMMU_INFO */
+#define KVM_PPC_GET_SMMU_INFO _IOR(KVMIO, 0xa6, struct kvm_ppc_smmu_info)
/*
* ioctls for vcpu fds
@@ -859,6 +899,8 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_ONE_REG */
#define KVM_GET_ONE_REG _IOW(KVMIO, 0xab, struct kvm_one_reg)
#define KVM_SET_ONE_REG _IOW(KVMIO, 0xac, struct kvm_one_reg)
+/* VM is being stopped by host */
+#define KVM_KVMCLOCK_CTRL _IO(KVMIO, 0xad)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 72cbf08d45f..c4464356b35 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -35,6 +35,20 @@
#endif
/*
+ * If we support unaligned MMIO, at most one fragment will be split into two:
+ */
+#ifdef KVM_UNALIGNED_MMIO
+# define KVM_EXTRA_MMIO_FRAGMENTS 1
+#else
+# define KVM_EXTRA_MMIO_FRAGMENTS 0
+#endif
+
+#define KVM_USER_MMIO_SIZE 8
+
+#define KVM_MAX_MMIO_FRAGMENTS \
+ (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
+
+/*
* vcpu->requests bit members
*/
#define KVM_REQ_TLB_FLUSH 0
@@ -68,10 +82,11 @@ struct kvm_io_range {
struct kvm_io_device *dev;
};
+#define NR_IOBUS_DEVS 1000
+
struct kvm_io_bus {
int dev_count;
-#define NR_IOBUS_DEVS 300
- struct kvm_io_range range[NR_IOBUS_DEVS];
+ struct kvm_io_range range[];
};
enum kvm_bus {
@@ -113,7 +128,18 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
enum {
OUTSIDE_GUEST_MODE,
IN_GUEST_MODE,
- EXITING_GUEST_MODE
+ EXITING_GUEST_MODE,
+ READING_SHADOW_PAGE_TABLES,
+};
+
+/*
+ * Sometimes a large or cross-page mmio needs to be broken up into separate
+ * exits for userspace servicing.
+ */
+struct kvm_mmio_fragment {
+ gpa_t gpa;
+ void *data;
+ unsigned len;
};
struct kvm_vcpu {
@@ -143,10 +169,9 @@ struct kvm_vcpu {
int mmio_needed;
int mmio_read_completed;
int mmio_is_write;
- int mmio_size;
- int mmio_index;
- unsigned char mmio_data[KVM_MMIO_SIZE];
- gpa_t mmio_phys_addr;
+ int mmio_cur_fragment;
+ int mmio_nr_fragments;
+ struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
#endif
#ifdef CONFIG_KVM_ASYNC_PF
@@ -178,8 +203,6 @@ struct kvm_memory_slot {
unsigned long flags;
unsigned long *rmap;
unsigned long *dirty_bitmap;
- unsigned long *dirty_bitmap_head;
- unsigned long nr_dirty_pages;
struct kvm_arch_memory_slot arch;
unsigned long userspace_addr;
int user_alloc;
@@ -438,6 +461,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
gfn_t gfn);
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
void kvm_resched(struct kvm_vcpu *vcpu);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
@@ -506,6 +531,7 @@ int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
void kvm_free_physmem(struct kvm *kvm);
@@ -521,6 +547,15 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
}
#endif
+static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
+{
+#ifdef __KVM_HAVE_ARCH_WQP
+ return vcpu->arch.wqp;
+#else
+ return &vcpu->wq;
+#endif
+}
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_free_all_assigned_devices(struct kvm *kvm);
@@ -769,6 +804,8 @@ int kvm_set_irq_routing(struct kvm *kvm,
unsigned flags);
void kvm_free_irq_routing(struct kvm *kvm);
+int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
+
#else
static inline void kvm_free_irq_routing(struct kvm *kvm) {}
diff --git a/include/linux/lcd.h b/include/linux/lcd.h
index 8877123f2d6..e00c3b0ebc6 100644
--- a/include/linux/lcd.h
+++ b/include/linux/lcd.h
@@ -40,6 +40,16 @@ struct lcd_ops {
/* Get the LCD panel power status (0: full on, 1..3: controller
power on, flat panel power off, 4: full off), see FB_BLANK_XXX */
int (*get_power)(struct lcd_device *);
+ /*
+ * Enable or disable power to the LCD(0: on; 4: off, see FB_BLANK_XXX)
+ * and this callback would be called proir to fb driver's callback.
+ *
+ * P.S. note that if early_set_power is not NULL then early fb notifier
+ * would be registered.
+ */
+ int (*early_set_power)(struct lcd_device *, int power);
+ /* revert the effects of the early blank event. */
+ int (*r_early_set_power)(struct lcd_device *, int power);
/* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */
int (*set_power)(struct lcd_device *, int power);
/* Get the current contrast setting (0-max_contrast) */
diff --git a/include/linux/led-lm3530.h b/include/linux/led-lm3530.h
index eeae6e74247..4b133479d6e 100644
--- a/include/linux/led-lm3530.h
+++ b/include/linux/led-lm3530.h
@@ -92,7 +92,7 @@ struct lm3530_pwm_data {
* @als2_resistor_sel: internal resistance from ALS2 input to ground
* @als_vmin: als input voltage calibrated for max brightness in mV
* @als_vmax: als input voltage calibrated for min brightness in mV
- * @brt_val: brightness value (0-255)
+ * @brt_val: brightness value (0-127)
* @pwm_data: PWM control functions (only valid when the mode is PWM)
*/
struct lm3530_platform_data {
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 5884def15a2..39eee41d8c6 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -73,6 +73,8 @@ struct led_classdev {
struct led_trigger *trigger;
struct list_head trig_list;
void *trigger_data;
+ /* true if activated - deactivate routine uses it to do cleanup */
+ bool activated;
#endif
};
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f94efd2f6c2..83e7ba90d6e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -63,12 +63,7 @@ extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);
struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
-struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
- enum lru_list);
-void mem_cgroup_lru_del_list(struct page *, enum lru_list);
-void mem_cgroup_lru_del(struct page *);
-struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
- enum lru_list, enum lru_list);
+struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
/* For coalescing uncharge for reducing memcg' overhead*/
extern void mem_cgroup_uncharge_start(void);
@@ -79,6 +74,8 @@ extern void mem_cgroup_uncharge_cache_page(struct page *page);
extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
int order);
+bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
+ struct mem_cgroup *memcg);
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
@@ -92,10 +89,13 @@ static inline
int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
{
struct mem_cgroup *memcg;
+ int match;
+
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
+ match = __mem_cgroup_same_or_subtree(cgroup, memcg);
rcu_read_unlock();
- return cgroup == memcg;
+ return match;
}
extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
@@ -114,17 +114,11 @@ void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
/*
* For memory reclaim.
*/
-int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
- struct zone *zone);
-int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
- struct zone *zone);
+int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
+int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
- int nid, int zid, unsigned int lrumask);
-struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
- struct zone *zone);
-struct zone_reclaim_stat*
-mem_cgroup_get_reclaim_stat_from_page(struct page *page);
+unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
+void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
struct task_struct *p);
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
@@ -251,25 +245,8 @@ static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
return &zone->lruvec;
}
-static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
- struct page *page,
- enum lru_list lru)
-{
- return &zone->lruvec;
-}
-
-static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
-{
-}
-
-static inline void mem_cgroup_lru_del(struct page *page)
-{
-}
-
-static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
- struct page *page,
- enum lru_list from,
- enum lru_list to)
+static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
+ struct zone *zone)
{
return &zone->lruvec;
}
@@ -333,35 +310,27 @@ static inline bool mem_cgroup_disabled(void)
}
static inline int
-mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
+mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
{
return 1;
}
static inline int
-mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
+mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
{
return 1;
}
static inline unsigned long
-mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
- unsigned int lru_mask)
+mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
return 0;
}
-
-static inline struct zone_reclaim_stat*
-mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
-{
- return NULL;
-}
-
-static inline struct zone_reclaim_stat*
-mem_cgroup_get_reclaim_stat_from_page(struct page *page)
+static inline void
+mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
+ int increment)
{
- return NULL;
}
static inline void
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 7c727a90d70..4aa42732e47 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -225,8 +225,8 @@ static inline void check_highest_zone(enum zone_type k)
policy_zone = k;
}
-int do_migrate_pages(struct mm_struct *mm,
- const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
+int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+ const nodemask_t *to, int flags);
#ifdef CONFIG_TMPFS
@@ -354,9 +354,8 @@ static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk,
return false;
}
-static inline int do_migrate_pages(struct mm_struct *mm,
- const nodemask_t *from_nodes,
- const nodemask_t *to_nodes, int flags)
+static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+ const nodemask_t *to, int flags)
{
return 0;
}
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index fccc3002f27..91dd3ef63e9 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -7,6 +7,7 @@
#ifndef MFD_AB8500_H
#define MFD_AB8500_H
+#include <linux/atomic.h>
#include <linux/mutex.h>
struct device;
@@ -194,6 +195,14 @@ enum ab8500_version {
#define AB9540_INT_GPIO52F 123
#define AB9540_INT_GPIO53F 124
#define AB9540_INT_GPIO54F 125 /* not 8505 */
+/* ab8500_irq_regoffset[16] -> IT[Source|Latch|Mask]25 */
+#define AB8505_INT_KEYSTUCK 128
+#define AB8505_INT_IKR 129
+#define AB8505_INT_IKP 130
+#define AB8505_INT_KP 131
+#define AB8505_INT_KEYDEGLITCH 132
+#define AB8505_INT_MODPWRSTATUSF 134
+#define AB8505_INT_MODPWRSTATUSR 135
/*
* AB8500_AB9540_NR_IRQS is used when configuring the IRQ numbers for the
@@ -203,8 +212,8 @@ enum ab8500_version {
* which is larger.
*/
#define AB8500_NR_IRQS 112
-#define AB8505_NR_IRQS 128
-#define AB9540_NR_IRQS 128
+#define AB8505_NR_IRQS 136
+#define AB9540_NR_IRQS 136
/* This is set to the roof of any AB8500 chip variant IRQ counts */
#define AB8500_MAX_NR_IRQS AB9540_NR_IRQS
@@ -216,6 +225,7 @@ enum ab8500_version {
* @dev: parent device
* @lock: read/write operations lock
* @irq_lock: genirq bus lock
+ * @transfer_ongoing: 0 if no transfer ongoing
* @irq: irq line
* @version: chip version id (e.g. ab8500 or ab9540)
* @chip_id: chip revision id
@@ -234,7 +244,7 @@ struct ab8500 {
struct device *dev;
struct mutex lock;
struct mutex irq_lock;
-
+ atomic_t transfer_ongoing;
int irq_base;
int irq;
enum ab8500_version version;
@@ -280,6 +290,8 @@ extern int __devinit ab8500_init(struct ab8500 *ab8500,
enum ab8500_version version);
extern int __devexit ab8500_exit(struct ab8500 *ab8500);
+extern int ab8500_suspend(struct ab8500 *ab8500);
+
static inline int is_ab8500(struct ab8500 *ab)
{
return ab->version == AB8500_VERSION_AB8500;
diff --git a/include/linux/mfd/anatop.h b/include/linux/mfd/anatop.h
index 22c1007d3ec..7f92acf03d9 100644
--- a/include/linux/mfd/anatop.h
+++ b/include/linux/mfd/anatop.h
@@ -34,7 +34,7 @@ struct anatop {
spinlock_t reglock;
};
-extern u32 anatop_get_bits(struct anatop *, u32, int, int);
-extern void anatop_set_bits(struct anatop *, u32, int, int, u32);
+extern u32 anatop_read_reg(struct anatop *, u32);
+extern void anatop_write_reg(struct anatop *, u32, u32, u32);
#endif /* __LINUX_MFD_ANATOP_H */
diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h
index ef6faa5cee4..e1148d037e7 100644
--- a/include/linux/mfd/asic3.h
+++ b/include/linux/mfd/asic3.h
@@ -31,6 +31,8 @@ struct asic3_platform_data {
unsigned int gpio_base;
+ unsigned int clock_rate;
+
struct asic3_led *leds;
};
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index 8313cd9658e..0507c4c21a7 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -33,6 +33,18 @@
#include <linux/mfd/da9052/reg.h>
+/* Common - HWMON Channel Definations */
+#define DA9052_ADC_VDDOUT 0
+#define DA9052_ADC_ICH 1
+#define DA9052_ADC_TBAT 2
+#define DA9052_ADC_VBAT 3
+#define DA9052_ADC_IN4 4
+#define DA9052_ADC_IN5 5
+#define DA9052_ADC_IN6 6
+#define DA9052_ADC_TSI 7
+#define DA9052_ADC_TJUNC 8
+#define DA9052_ADC_VBBAT 9
+
#define DA9052_IRQ_DCIN 0
#define DA9052_IRQ_VBUS 1
#define DA9052_IRQ_DCINREM 2
@@ -79,6 +91,9 @@ struct da9052 {
struct device *dev;
struct regmap *regmap;
+ struct mutex auxadc_lock;
+ struct completion done;
+
int irq_base;
struct regmap_irq_chip_data *irq_data;
u8 chip_id;
@@ -86,6 +101,10 @@ struct da9052 {
int chip_irq;
};
+/* ADC API */
+int da9052_adc_manual_read(struct da9052 *da9052, unsigned char channel);
+int da9052_adc_read_temp(struct da9052 *da9052);
+
/* Device I/O API */
static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg)
{
diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h
new file mode 100644
index 00000000000..594bc591f25
--- /dev/null
+++ b/include/linux/mfd/lm3533.h
@@ -0,0 +1,104 @@
+/*
+ * lm3533.h -- LM3533 interface
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_LM3533_H
+#define __LINUX_MFD_LM3533_H
+
+#define LM3533_ATTR_RO(_name) \
+ DEVICE_ATTR(_name, S_IRUGO, show_##_name, NULL)
+#define LM3533_ATTR_RW(_name) \
+ DEVICE_ATTR(_name, S_IRUGO | S_IWUSR , show_##_name, store_##_name)
+
+struct device;
+struct regmap;
+
+struct lm3533 {
+ struct device *dev;
+
+ struct regmap *regmap;
+
+ int gpio_hwen;
+ int irq;
+
+ unsigned have_als:1;
+ unsigned have_backlights:1;
+ unsigned have_leds:1;
+};
+
+struct lm3533_ctrlbank {
+ struct lm3533 *lm3533;
+ struct device *dev;
+ int id;
+};
+
+struct lm3533_als_platform_data {
+ unsigned pwm_mode:1; /* PWM input mode (default analog) */
+ u8 r_select; /* 1 - 127 (ignored in PWM-mode) */
+};
+
+struct lm3533_bl_platform_data {
+ char *name;
+ u16 max_current; /* 5000 - 29800 uA (800 uA step) */
+ u8 default_brightness; /* 0 - 255 */
+ u8 pwm; /* 0 - 0x3f */
+};
+
+struct lm3533_led_platform_data {
+ char *name;
+ const char *default_trigger;
+ u16 max_current; /* 5000 - 29800 uA (800 uA step) */
+ u8 pwm; /* 0 - 0x3f */
+};
+
+enum lm3533_boost_freq {
+ LM3533_BOOST_FREQ_500KHZ,
+ LM3533_BOOST_FREQ_1000KHZ,
+};
+
+enum lm3533_boost_ovp {
+ LM3533_BOOST_OVP_16V,
+ LM3533_BOOST_OVP_24V,
+ LM3533_BOOST_OVP_32V,
+ LM3533_BOOST_OVP_40V,
+};
+
+struct lm3533_platform_data {
+ int gpio_hwen;
+
+ enum lm3533_boost_ovp boost_ovp;
+ enum lm3533_boost_freq boost_freq;
+
+ struct lm3533_als_platform_data *als;
+
+ struct lm3533_bl_platform_data *backlights;
+ int num_backlights;
+
+ struct lm3533_led_platform_data *leds;
+ int num_leds;
+};
+
+extern int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb);
+extern int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb);
+
+extern int lm3533_ctrlbank_set_brightness(struct lm3533_ctrlbank *cb, u8 val);
+extern int lm3533_ctrlbank_get_brightness(struct lm3533_ctrlbank *cb, u8 *val);
+extern int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb,
+ u16 imax);
+extern int lm3533_ctrlbank_set_pwm(struct lm3533_ctrlbank *cb, u8 val);
+extern int lm3533_ctrlbank_get_pwm(struct lm3533_ctrlbank *cb, u8 *val);
+
+extern int lm3533_read(struct lm3533 *lm3533, u8 reg, u8 *val);
+extern int lm3533_write(struct lm3533 *lm3533, u8 reg, u8 val);
+extern int lm3533_update(struct lm3533 *lm3533, u8 reg, u8 val, u8 mask);
+
+#endif /* __LINUX_MFD_LM3533_H */
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
new file mode 100644
index 00000000000..fec5256c3f5
--- /dev/null
+++ b/include/linux/mfd/lpc_ich.h
@@ -0,0 +1,48 @@
+/*
+ * linux/drivers/mfd/lpc_ich.h
+ *
+ * Copyright (c) 2012 Extreme Engineering Solution, Inc.
+ * Author: Aaron Sierra <asierra@xes-inc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef LPC_ICH_H
+#define LPC_ICH_H
+
+/* Watchdog resources */
+#define ICH_RES_IO_TCO 0
+#define ICH_RES_IO_SMI 1
+#define ICH_RES_MEM_OFF 2
+#define ICH_RES_MEM_GCS 0
+
+/* GPIO resources */
+#define ICH_RES_GPIO 0
+#define ICH_RES_GPE0 1
+
+/* GPIO compatibility */
+#define ICH_I3100_GPIO 0x401
+#define ICH_V5_GPIO 0x501
+#define ICH_V6_GPIO 0x601
+#define ICH_V7_GPIO 0x701
+#define ICH_V9_GPIO 0x801
+#define ICH_V10CORP_GPIO 0xa01
+#define ICH_V10CONS_GPIO 0xa11
+
+struct lpc_ich_info {
+ char name[32];
+ unsigned int iTCO_version;
+ unsigned int gpio_version;
+};
+
+#endif
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
new file mode 100644
index 00000000000..68263c5fa53
--- /dev/null
+++ b/include/linux/mfd/max77693-private.h
@@ -0,0 +1,227 @@
+/*
+ * max77693-private.h - Voltage regulator driver for the Maxim 77693
+ *
+ * Copyright (C) 2012 Samsung Electrnoics
+ * SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_MFD_MAX77693_PRIV_H
+#define __LINUX_MFD_MAX77693_PRIV_H
+
+#include <linux/i2c.h>
+
+#define MAX77693_NUM_IRQ_MUIC_REGS 3
+#define MAX77693_REG_INVALID (0xff)
+
+/* Slave addr = 0xCC: PMIC, Charger, Flash LED */
+enum max77693_pmic_reg {
+ MAX77693_LED_REG_IFLASH1 = 0x00,
+ MAX77693_LED_REG_IFLASH2 = 0x01,
+ MAX77693_LED_REG_ITORCH = 0x02,
+ MAX77693_LED_REG_ITORCHTIMER = 0x03,
+ MAX77693_LED_REG_FLASH_TIMER = 0x04,
+ MAX77693_LED_REG_FLASH_EN = 0x05,
+ MAX77693_LED_REG_MAX_FLASH1 = 0x06,
+ MAX77693_LED_REG_MAX_FLASH2 = 0x07,
+ MAX77693_LED_REG_MAX_FLASH3 = 0x08,
+ MAX77693_LED_REG_MAX_FLASH4 = 0x09,
+ MAX77693_LED_REG_VOUT_CNTL = 0x0A,
+ MAX77693_LED_REG_VOUT_FLASH1 = 0x0B,
+ MAX77693_LED_REG_VOUT_FLASH2 = 0x0C,
+ MAX77693_LED_REG_FLASH_INT = 0x0E,
+ MAX77693_LED_REG_FLASH_INT_MASK = 0x0F,
+ MAX77693_LED_REG_FLASH_INT_STATUS = 0x10,
+
+ MAX77693_PMIC_REG_PMIC_ID1 = 0x20,
+ MAX77693_PMIC_REG_PMIC_ID2 = 0x21,
+ MAX77693_PMIC_REG_INTSRC = 0x22,
+ MAX77693_PMIC_REG_INTSRC_MASK = 0x23,
+ MAX77693_PMIC_REG_TOPSYS_INT = 0x24,
+ MAX77693_PMIC_REG_TOPSYS_INT_MASK = 0x26,
+ MAX77693_PMIC_REG_TOPSYS_STAT = 0x28,
+ MAX77693_PMIC_REG_MAINCTRL1 = 0x2A,
+ MAX77693_PMIC_REG_LSCNFG = 0x2B,
+
+ MAX77693_CHG_REG_CHG_INT = 0xB0,
+ MAX77693_CHG_REG_CHG_INT_MASK = 0xB1,
+ MAX77693_CHG_REG_CHG_INT_OK = 0xB2,
+ MAX77693_CHG_REG_CHG_DETAILS_00 = 0xB3,
+ MAX77693_CHG_REG_CHG_DETAILS_01 = 0xB4,
+ MAX77693_CHG_REG_CHG_DETAILS_02 = 0xB5,
+ MAX77693_CHG_REG_CHG_DETAILS_03 = 0xB6,
+ MAX77693_CHG_REG_CHG_CNFG_00 = 0xB7,
+ MAX77693_CHG_REG_CHG_CNFG_01 = 0xB8,
+ MAX77693_CHG_REG_CHG_CNFG_02 = 0xB9,
+ MAX77693_CHG_REG_CHG_CNFG_03 = 0xBA,
+ MAX77693_CHG_REG_CHG_CNFG_04 = 0xBB,
+ MAX77693_CHG_REG_CHG_CNFG_05 = 0xBC,
+ MAX77693_CHG_REG_CHG_CNFG_06 = 0xBD,
+ MAX77693_CHG_REG_CHG_CNFG_07 = 0xBE,
+ MAX77693_CHG_REG_CHG_CNFG_08 = 0xBF,
+ MAX77693_CHG_REG_CHG_CNFG_09 = 0xC0,
+ MAX77693_CHG_REG_CHG_CNFG_10 = 0xC1,
+ MAX77693_CHG_REG_CHG_CNFG_11 = 0xC2,
+ MAX77693_CHG_REG_CHG_CNFG_12 = 0xC3,
+ MAX77693_CHG_REG_CHG_CNFG_13 = 0xC4,
+ MAX77693_CHG_REG_CHG_CNFG_14 = 0xC5,
+ MAX77693_CHG_REG_SAFEOUT_CTRL = 0xC6,
+
+ MAX77693_PMIC_REG_END,
+};
+
+/* Slave addr = 0x4A: MUIC */
+enum max77693_muic_reg {
+ MAX77693_MUIC_REG_ID = 0x00,
+ MAX77693_MUIC_REG_INT1 = 0x01,
+ MAX77693_MUIC_REG_INT2 = 0x02,
+ MAX77693_MUIC_REG_INT3 = 0x03,
+ MAX77693_MUIC_REG_STATUS1 = 0x04,
+ MAX77693_MUIC_REG_STATUS2 = 0x05,
+ MAX77693_MUIC_REG_STATUS3 = 0x06,
+ MAX77693_MUIC_REG_INTMASK1 = 0x07,
+ MAX77693_MUIC_REG_INTMASK2 = 0x08,
+ MAX77693_MUIC_REG_INTMASK3 = 0x09,
+ MAX77693_MUIC_REG_CDETCTRL1 = 0x0A,
+ MAX77693_MUIC_REG_CDETCTRL2 = 0x0B,
+ MAX77693_MUIC_REG_CTRL1 = 0x0C,
+ MAX77693_MUIC_REG_CTRL2 = 0x0D,
+ MAX77693_MUIC_REG_CTRL3 = 0x0E,
+
+ MAX77693_MUIC_REG_END,
+};
+
+/* Slave addr = 0x90: Haptic */
+enum max77693_haptic_reg {
+ MAX77693_HAPTIC_REG_STATUS = 0x00,
+ MAX77693_HAPTIC_REG_CONFIG1 = 0x01,
+ MAX77693_HAPTIC_REG_CONFIG2 = 0x02,
+ MAX77693_HAPTIC_REG_CONFIG_CHNL = 0x03,
+ MAX77693_HAPTIC_REG_CONFG_CYC1 = 0x04,
+ MAX77693_HAPTIC_REG_CONFG_CYC2 = 0x05,
+ MAX77693_HAPTIC_REG_CONFIG_PER1 = 0x06,
+ MAX77693_HAPTIC_REG_CONFIG_PER2 = 0x07,
+ MAX77693_HAPTIC_REG_CONFIG_PER3 = 0x08,
+ MAX77693_HAPTIC_REG_CONFIG_PER4 = 0x09,
+ MAX77693_HAPTIC_REG_CONFIG_DUTY1 = 0x0A,
+ MAX77693_HAPTIC_REG_CONFIG_DUTY2 = 0x0B,
+ MAX77693_HAPTIC_REG_CONFIG_PWM1 = 0x0C,
+ MAX77693_HAPTIC_REG_CONFIG_PWM2 = 0x0D,
+ MAX77693_HAPTIC_REG_CONFIG_PWM3 = 0x0E,
+ MAX77693_HAPTIC_REG_CONFIG_PWM4 = 0x0F,
+ MAX77693_HAPTIC_REG_REV = 0x10,
+
+ MAX77693_HAPTIC_REG_END,
+};
+
+enum max77693_irq_source {
+ LED_INT = 0,
+ TOPSYS_INT,
+ CHG_INT,
+ MUIC_INT1,
+ MUIC_INT2,
+ MUIC_INT3,
+
+ MAX77693_IRQ_GROUP_NR,
+};
+
+enum max77693_irq {
+ /* PMIC - FLASH */
+ MAX77693_LED_IRQ_FLED2_OPEN,
+ MAX77693_LED_IRQ_FLED2_SHORT,
+ MAX77693_LED_IRQ_FLED1_OPEN,
+ MAX77693_LED_IRQ_FLED1_SHORT,
+ MAX77693_LED_IRQ_MAX_FLASH,
+
+ /* PMIC - TOPSYS */
+ MAX77693_TOPSYS_IRQ_T120C_INT,
+ MAX77693_TOPSYS_IRQ_T140C_INT,
+ MAX77693_TOPSYS_IRQ_LOWSYS_INT,
+
+ /* PMIC - Charger */
+ MAX77693_CHG_IRQ_BYP_I,
+ MAX77693_CHG_IRQ_THM_I,
+ MAX77693_CHG_IRQ_BAT_I,
+ MAX77693_CHG_IRQ_CHG_I,
+ MAX77693_CHG_IRQ_CHGIN_I,
+
+ /* MUIC INT1 */
+ MAX77693_MUIC_IRQ_INT1_ADC,
+ MAX77693_MUIC_IRQ_INT1_ADC_LOW,
+ MAX77693_MUIC_IRQ_INT1_ADC_ERR,
+ MAX77693_MUIC_IRQ_INT1_ADC1K,
+
+ /* MUIC INT2 */
+ MAX77693_MUIC_IRQ_INT2_CHGTYP,
+ MAX77693_MUIC_IRQ_INT2_CHGDETREUN,
+ MAX77693_MUIC_IRQ_INT2_DCDTMR,
+ MAX77693_MUIC_IRQ_INT2_DXOVP,
+ MAX77693_MUIC_IRQ_INT2_VBVOLT,
+ MAX77693_MUIC_IRQ_INT2_VIDRM,
+
+ /* MUIC INT3 */
+ MAX77693_MUIC_IRQ_INT3_EOC,
+ MAX77693_MUIC_IRQ_INT3_CGMBC,
+ MAX77693_MUIC_IRQ_INT3_OVP,
+ MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR,
+ MAX77693_MUIC_IRQ_INT3_CHG_ENABLED,
+ MAX77693_MUIC_IRQ_INT3_BAT_DET,
+
+ MAX77693_IRQ_NR,
+};
+
+struct max77693_dev {
+ struct device *dev;
+ struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
+ struct i2c_client *muic; /* 0x4A , MUIC */
+ struct i2c_client *haptic; /* 0x90 , Haptic */
+ struct mutex iolock;
+
+ int type;
+
+ struct regmap *regmap;
+ struct regmap *regmap_muic;
+ struct regmap *regmap_haptic;
+
+ struct irq_domain *irq_domain;
+
+ int irq;
+ int irq_gpio;
+ bool wakeup;
+ struct mutex irqlock;
+ int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
+ int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
+};
+
+enum max77693_types {
+ TYPE_MAX77693,
+};
+
+extern int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest);
+extern int max77693_bulk_read(struct regmap *map, u8 reg, int count,
+ u8 *buf);
+extern int max77693_write_reg(struct regmap *map, u8 reg, u8 value);
+extern int max77693_bulk_write(struct regmap *map, u8 reg, int count,
+ u8 *buf);
+extern int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask);
+
+extern int max77693_irq_init(struct max77693_dev *max77686);
+extern void max77693_irq_exit(struct max77693_dev *max77686);
+extern int max77693_irq_resume(struct max77693_dev *max77686);
+
+#endif /* __LINUX_MFD_MAX77693_PRIV_H */
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
new file mode 100644
index 00000000000..1d28ae90384
--- /dev/null
+++ b/include/linux/mfd/max77693.h
@@ -0,0 +1,36 @@
+/*
+ * max77693.h - Driver for the Maxim 77693
+ *
+ * Copyright (C) 2012 Samsung Electrnoics
+ * SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997.h
+ *
+ * MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices.
+ * The devices share the same I2C bus and included in
+ * this mfd driver.
+ */
+
+#ifndef __LINUX_MFD_MAX77693_H
+#define __LINUX_MFD_MAX77693_H
+
+struct max77693_platform_data {
+ int wakeup;
+};
+#endif /* __LINUX_MFD_MAX77693_H */
diff --git a/include/linux/mfd/sta2x11-mfd.h b/include/linux/mfd/sta2x11-mfd.h
new file mode 100644
index 00000000000..d179227e866
--- /dev/null
+++ b/include/linux/mfd/sta2x11-mfd.h
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2009-2011 Wind River Systems, Inc.
+ * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * The STMicroelectronics ConneXt (STA2X11) chip has several unrelated
+ * functions in one PCI endpoint functions. This driver simply
+ * registers the platform devices in this iomemregion and exports a few
+ * functions to access common registers
+ */
+
+#ifndef __STA2X11_MFD_H
+#define __STA2X11_MFD_H
+#include <linux/types.h>
+#include <linux/pci.h>
+
+/*
+ * The MFD PCI block includes the GPIO peripherals and other register blocks.
+ * For GPIO, we have 32*4 bits (I use "gsta" for "gpio sta2x11".)
+ */
+#define GSTA_GPIO_PER_BLOCK 32
+#define GSTA_NR_BLOCKS 4
+#define GSTA_NR_GPIO (GSTA_GPIO_PER_BLOCK * GSTA_NR_BLOCKS)
+
+/* Pinconfig is set by the board definition: altfunc, pull-up, pull-down */
+struct sta2x11_gpio_pdata {
+ unsigned pinconfig[GSTA_NR_GPIO];
+};
+
+/* Macros below lifted from sh_pfc.h, with minor differences */
+#define PINMUX_TYPE_NONE 0
+#define PINMUX_TYPE_FUNCTION 1
+#define PINMUX_TYPE_OUTPUT_LOW 2
+#define PINMUX_TYPE_OUTPUT_HIGH 3
+#define PINMUX_TYPE_INPUT 4
+#define PINMUX_TYPE_INPUT_PULLUP 5
+#define PINMUX_TYPE_INPUT_PULLDOWN 6
+
+/* Give names to GPIO pins, like PXA does, taken from the manual */
+#define STA2X11_GPIO0 0
+#define STA2X11_GPIO1 1
+#define STA2X11_GPIO2 2
+#define STA2X11_GPIO3 3
+#define STA2X11_GPIO4 4
+#define STA2X11_GPIO5 5
+#define STA2X11_GPIO6 6
+#define STA2X11_GPIO7 7
+#define STA2X11_GPIO8_RGBOUT_RED7 8
+#define STA2X11_GPIO9_RGBOUT_RED6 9
+#define STA2X11_GPIO10_RGBOUT_RED5 10
+#define STA2X11_GPIO11_RGBOUT_RED4 11
+#define STA2X11_GPIO12_RGBOUT_RED3 12
+#define STA2X11_GPIO13_RGBOUT_RED2 13
+#define STA2X11_GPIO14_RGBOUT_RED1 14
+#define STA2X11_GPIO15_RGBOUT_RED0 15
+#define STA2X11_GPIO16_RGBOUT_GREEN7 16
+#define STA2X11_GPIO17_RGBOUT_GREEN6 17
+#define STA2X11_GPIO18_RGBOUT_GREEN5 18
+#define STA2X11_GPIO19_RGBOUT_GREEN4 19
+#define STA2X11_GPIO20_RGBOUT_GREEN3 20
+#define STA2X11_GPIO21_RGBOUT_GREEN2 21
+#define STA2X11_GPIO22_RGBOUT_GREEN1 22
+#define STA2X11_GPIO23_RGBOUT_GREEN0 23
+#define STA2X11_GPIO24_RGBOUT_BLUE7 24
+#define STA2X11_GPIO25_RGBOUT_BLUE6 25
+#define STA2X11_GPIO26_RGBOUT_BLUE5 26
+#define STA2X11_GPIO27_RGBOUT_BLUE4 27
+#define STA2X11_GPIO28_RGBOUT_BLUE3 28
+#define STA2X11_GPIO29_RGBOUT_BLUE2 29
+#define STA2X11_GPIO30_RGBOUT_BLUE1 30
+#define STA2X11_GPIO31_RGBOUT_BLUE0 31
+#define STA2X11_GPIO32_RGBOUT_VSYNCH 32
+#define STA2X11_GPIO33_RGBOUT_HSYNCH 33
+#define STA2X11_GPIO34_RGBOUT_DEN 34
+#define STA2X11_GPIO35_ETH_CRS_DV 35
+#define STA2X11_GPIO36_ETH_TXD1 36
+#define STA2X11_GPIO37_ETH_TXD0 37
+#define STA2X11_GPIO38_ETH_TX_EN 38
+#define STA2X11_GPIO39_MDIO 39
+#define STA2X11_GPIO40_ETH_REF_CLK 40
+#define STA2X11_GPIO41_ETH_RXD1 41
+#define STA2X11_GPIO42_ETH_RXD0 42
+#define STA2X11_GPIO43_MDC 43
+#define STA2X11_GPIO44_CAN_TX 44
+#define STA2X11_GPIO45_CAN_RX 45
+#define STA2X11_GPIO46_MLB_DAT 46
+#define STA2X11_GPIO47_MLB_SIG 47
+#define STA2X11_GPIO48_SPI0_CLK 48
+#define STA2X11_GPIO49_SPI0_TXD 49
+#define STA2X11_GPIO50_SPI0_RXD 50
+#define STA2X11_GPIO51_SPI0_FRM 51
+#define STA2X11_GPIO52_SPI1_CLK 52
+#define STA2X11_GPIO53_SPI1_TXD 53
+#define STA2X11_GPIO54_SPI1_RXD 54
+#define STA2X11_GPIO55_SPI1_FRM 55
+#define STA2X11_GPIO56_SPI2_CLK 56
+#define STA2X11_GPIO57_SPI2_TXD 57
+#define STA2X11_GPIO58_SPI2_RXD 58
+#define STA2X11_GPIO59_SPI2_FRM 59
+#define STA2X11_GPIO60_I2C0_SCL 60
+#define STA2X11_GPIO61_I2C0_SDA 61
+#define STA2X11_GPIO62_I2C1_SCL 62
+#define STA2X11_GPIO63_I2C1_SDA 63
+#define STA2X11_GPIO64_I2C2_SCL 64
+#define STA2X11_GPIO65_I2C2_SDA 65
+#define STA2X11_GPIO66_I2C3_SCL 66
+#define STA2X11_GPIO67_I2C3_SDA 67
+#define STA2X11_GPIO68_MSP0_RCK 68
+#define STA2X11_GPIO69_MSP0_RXD 69
+#define STA2X11_GPIO70_MSP0_RFS 70
+#define STA2X11_GPIO71_MSP0_TCK 71
+#define STA2X11_GPIO72_MSP0_TXD 72
+#define STA2X11_GPIO73_MSP0_TFS 73
+#define STA2X11_GPIO74_MSP0_SCK 74
+#define STA2X11_GPIO75_MSP1_CK 75
+#define STA2X11_GPIO76_MSP1_RXD 76
+#define STA2X11_GPIO77_MSP1_FS 77
+#define STA2X11_GPIO78_MSP1_TXD 78
+#define STA2X11_GPIO79_MSP2_CK 79
+#define STA2X11_GPIO80_MSP2_RXD 80
+#define STA2X11_GPIO81_MSP2_FS 81
+#define STA2X11_GPIO82_MSP2_TXD 82
+#define STA2X11_GPIO83_MSP3_CK 83
+#define STA2X11_GPIO84_MSP3_RXD 84
+#define STA2X11_GPIO85_MSP3_FS 85
+#define STA2X11_GPIO86_MSP3_TXD 86
+#define STA2X11_GPIO87_MSP4_CK 87
+#define STA2X11_GPIO88_MSP4_RXD 88
+#define STA2X11_GPIO89_MSP4_FS 89
+#define STA2X11_GPIO90_MSP4_TXD 90
+#define STA2X11_GPIO91_MSP5_CK 91
+#define STA2X11_GPIO92_MSP5_RXD 92
+#define STA2X11_GPIO93_MSP5_FS 93
+#define STA2X11_GPIO94_MSP5_TXD 94
+#define STA2X11_GPIO95_SDIO3_DAT3 95
+#define STA2X11_GPIO96_SDIO3_DAT2 96
+#define STA2X11_GPIO97_SDIO3_DAT1 97
+#define STA2X11_GPIO98_SDIO3_DAT0 98
+#define STA2X11_GPIO99_SDIO3_CLK 99
+#define STA2X11_GPIO100_SDIO3_CMD 100
+#define STA2X11_GPIO101 101
+#define STA2X11_GPIO102 102
+#define STA2X11_GPIO103 103
+#define STA2X11_GPIO104 104
+#define STA2X11_GPIO105_SDIO2_DAT3 105
+#define STA2X11_GPIO106_SDIO2_DAT2 106
+#define STA2X11_GPIO107_SDIO2_DAT1 107
+#define STA2X11_GPIO108_SDIO2_DAT0 108
+#define STA2X11_GPIO109_SDIO2_CLK 109
+#define STA2X11_GPIO110_SDIO2_CMD 110
+#define STA2X11_GPIO111 111
+#define STA2X11_GPIO112 112
+#define STA2X11_GPIO113 113
+#define STA2X11_GPIO114 114
+#define STA2X11_GPIO115_SDIO1_DAT3 115
+#define STA2X11_GPIO116_SDIO1_DAT2 116
+#define STA2X11_GPIO117_SDIO1_DAT1 117
+#define STA2X11_GPIO118_SDIO1_DAT0 118
+#define STA2X11_GPIO119_SDIO1_CLK 119
+#define STA2X11_GPIO120_SDIO1_CMD 120
+#define STA2X11_GPIO121 121
+#define STA2X11_GPIO122 122
+#define STA2X11_GPIO123 123
+#define STA2X11_GPIO124 124
+#define STA2X11_GPIO125_UART2_TXD 125
+#define STA2X11_GPIO126_UART2_RXD 126
+#define STA2X11_GPIO127_UART3_TXD 127
+
+/*
+ * The APB bridge has its own registers, needed by our users as well.
+ * They are accessed with the following read/mask/write function.
+ */
+u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
+
+/* CAN and MLB */
+#define APBREG_BSR 0x00 /* Bridge Status Reg */
+#define APBREG_PAER 0x08 /* Peripherals Address Error Reg */
+#define APBREG_PWAC 0x20 /* Peripheral Write Access Control reg */
+#define APBREG_PRAC 0x40 /* Peripheral Read Access Control reg */
+#define APBREG_PCG 0x60 /* Peripheral Clock Gating Reg */
+#define APBREG_PUR 0x80 /* Peripheral Under Reset Reg */
+#define APBREG_EMU_PCG 0xA0 /* Emulator Peripheral Clock Gating Reg */
+
+#define APBREG_CAN (1 << 1)
+#define APBREG_MLB (1 << 3)
+
+/* SARAC */
+#define APBREG_BSR_SARAC 0x100 /* Bridge Status Reg */
+#define APBREG_PAER_SARAC 0x108 /* Peripherals Address Error Reg */
+#define APBREG_PWAC_SARAC 0x120 /* Peripheral Write Access Control reg */
+#define APBREG_PRAC_SARAC 0x140 /* Peripheral Read Access Control reg */
+#define APBREG_PCG_SARAC 0x160 /* Peripheral Clock Gating Reg */
+#define APBREG_PUR_SARAC 0x180 /* Peripheral Under Reset Reg */
+#define APBREG_EMU_PCG_SARAC 0x1A0 /* Emulator Peripheral Clock Gating Reg */
+
+#define APBREG_SARAC (1 << 2)
+
+/*
+ * The system controller has its own registers. Some of these are accessed
+ * by out users as well, using the following read/mask/write/function
+ */
+u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
+
+#define SCTL_SCCTL 0x00 /* System controller control register */
+#define SCTL_ARMCFG 0x04 /* ARM configuration register */
+#define SCTL_SCPLLCTL 0x08 /* PLL control status register */
+#define SCTL_SCPLLFCTRL 0x0c /* PLL frequency control register */
+#define SCTL_SCRESFRACT 0x10 /* PLL fractional input register */
+#define SCTL_SCRESCTRL1 0x14 /* Peripheral reset control 1 */
+#define SCTL_SCRESXTRL2 0x18 /* Peripheral reset control 2 */
+#define SCTL_SCPEREN0 0x1c /* Peripheral clock enable register 0 */
+#define SCTL_SCPEREN1 0x20 /* Peripheral clock enable register 1 */
+#define SCTL_SCPEREN2 0x24 /* Peripheral clock enable register 2 */
+#define SCTL_SCGRST 0x28 /* Peripheral global reset */
+#define SCTL_SCPCIPMCR1 0x30 /* PCI power management control 1 */
+#define SCTL_SCPCIPMCR2 0x34 /* PCI power management control 2 */
+#define SCTL_SCPCIPMSR1 0x38 /* PCI power management status 1 */
+#define SCTL_SCPCIPMSR2 0x3c /* PCI power management status 2 */
+#define SCTL_SCPCIPMSR3 0x40 /* PCI power management status 3 */
+#define SCTL_SCINTREN 0x44 /* Interrupt enable */
+#define SCTL_SCRISR 0x48 /* RAW interrupt status */
+#define SCTL_SCCLKSTAT0 0x4c /* Peripheral clocks status 0 */
+#define SCTL_SCCLKSTAT1 0x50 /* Peripheral clocks status 1 */
+#define SCTL_SCCLKSTAT2 0x54 /* Peripheral clocks status 2 */
+#define SCTL_SCRSTSTA 0x58 /* Reset status register */
+
+#define SCTL_SCRESCTRL1_USB_PHY_POR (1 << 0)
+#define SCTL_SCRESCTRL1_USB_OTG (1 << 1)
+#define SCTL_SCRESCTRL1_USB_HRST (1 << 2)
+#define SCTL_SCRESCTRL1_USB_PHY_HOST (1 << 3)
+#define SCTL_SCRESCTRL1_SATAII (1 << 4)
+#define SCTL_SCRESCTRL1_VIP (1 << 5)
+#define SCTL_SCRESCTRL1_PER_MMC0 (1 << 6)
+#define SCTL_SCRESCTRL1_PER_MMC1 (1 << 7)
+#define SCTL_SCRESCTRL1_PER_GPIO0 (1 << 8)
+#define SCTL_SCRESCTRL1_PER_GPIO1 (1 << 9)
+#define SCTL_SCRESCTRL1_PER_GPIO2 (1 << 10)
+#define SCTL_SCRESCTRL1_PER_GPIO3 (1 << 11)
+#define SCTL_SCRESCTRL1_PER_MTU0 (1 << 12)
+#define SCTL_SCRESCTRL1_KER_SPI0 (1 << 13)
+#define SCTL_SCRESCTRL1_KER_SPI1 (1 << 14)
+#define SCTL_SCRESCTRL1_KER_SPI2 (1 << 15)
+#define SCTL_SCRESCTRL1_KER_MCI0 (1 << 16)
+#define SCTL_SCRESCTRL1_KER_MCI1 (1 << 17)
+#define SCTL_SCRESCTRL1_PRE_HSI2C0 (1 << 18)
+#define SCTL_SCRESCTRL1_PER_HSI2C1 (1 << 19)
+#define SCTL_SCRESCTRL1_PER_HSI2C2 (1 << 20)
+#define SCTL_SCRESCTRL1_PER_HSI2C3 (1 << 21)
+#define SCTL_SCRESCTRL1_PER_MSP0 (1 << 22)
+#define SCTL_SCRESCTRL1_PER_MSP1 (1 << 23)
+#define SCTL_SCRESCTRL1_PER_MSP2 (1 << 24)
+#define SCTL_SCRESCTRL1_PER_MSP3 (1 << 25)
+#define SCTL_SCRESCTRL1_PER_MSP4 (1 << 26)
+#define SCTL_SCRESCTRL1_PER_MSP5 (1 << 27)
+#define SCTL_SCRESCTRL1_PER_MMC (1 << 28)
+#define SCTL_SCRESCTRL1_KER_MSP0 (1 << 29)
+#define SCTL_SCRESCTRL1_KER_MSP1 (1 << 30)
+#define SCTL_SCRESCTRL1_KER_MSP2 (1 << 31)
+
+#define SCTL_SCPEREN0_UART0 (1 << 0)
+#define SCTL_SCPEREN0_UART1 (1 << 1)
+#define SCTL_SCPEREN0_UART2 (1 << 2)
+#define SCTL_SCPEREN0_UART3 (1 << 3)
+#define SCTL_SCPEREN0_MSP0 (1 << 4)
+#define SCTL_SCPEREN0_MSP1 (1 << 5)
+#define SCTL_SCPEREN0_MSP2 (1 << 6)
+#define SCTL_SCPEREN0_MSP3 (1 << 7)
+#define SCTL_SCPEREN0_MSP4 (1 << 8)
+#define SCTL_SCPEREN0_MSP5 (1 << 9)
+#define SCTL_SCPEREN0_SPI0 (1 << 10)
+#define SCTL_SCPEREN0_SPI1 (1 << 11)
+#define SCTL_SCPEREN0_SPI2 (1 << 12)
+#define SCTL_SCPEREN0_I2C0 (1 << 13)
+#define SCTL_SCPEREN0_I2C1 (1 << 14)
+#define SCTL_SCPEREN0_I2C2 (1 << 15)
+#define SCTL_SCPEREN0_I2C3 (1 << 16)
+#define SCTL_SCPEREN0_SVDO_LVDS (1 << 17)
+#define SCTL_SCPEREN0_USB_HOST (1 << 18)
+#define SCTL_SCPEREN0_USB_OTG (1 << 19)
+#define SCTL_SCPEREN0_MCI0 (1 << 20)
+#define SCTL_SCPEREN0_MCI1 (1 << 21)
+#define SCTL_SCPEREN0_MCI2 (1 << 22)
+#define SCTL_SCPEREN0_MCI3 (1 << 23)
+#define SCTL_SCPEREN0_SATA (1 << 24)
+#define SCTL_SCPEREN0_ETHERNET (1 << 25)
+#define SCTL_SCPEREN0_VIC (1 << 26)
+#define SCTL_SCPEREN0_DMA_AUDIO (1 << 27)
+#define SCTL_SCPEREN0_DMA_SOC (1 << 28)
+#define SCTL_SCPEREN0_RAM (1 << 29)
+#define SCTL_SCPEREN0_VIP (1 << 30)
+#define SCTL_SCPEREN0_ARM (1 << 31)
+
+#define SCTL_SCPEREN1_UART0 (1 << 0)
+#define SCTL_SCPEREN1_UART1 (1 << 1)
+#define SCTL_SCPEREN1_UART2 (1 << 2)
+#define SCTL_SCPEREN1_UART3 (1 << 3)
+#define SCTL_SCPEREN1_MSP0 (1 << 4)
+#define SCTL_SCPEREN1_MSP1 (1 << 5)
+#define SCTL_SCPEREN1_MSP2 (1 << 6)
+#define SCTL_SCPEREN1_MSP3 (1 << 7)
+#define SCTL_SCPEREN1_MSP4 (1 << 8)
+#define SCTL_SCPEREN1_MSP5 (1 << 9)
+#define SCTL_SCPEREN1_SPI0 (1 << 10)
+#define SCTL_SCPEREN1_SPI1 (1 << 11)
+#define SCTL_SCPEREN1_SPI2 (1 << 12)
+#define SCTL_SCPEREN1_I2C0 (1 << 13)
+#define SCTL_SCPEREN1_I2C1 (1 << 14)
+#define SCTL_SCPEREN1_I2C2 (1 << 15)
+#define SCTL_SCPEREN1_I2C3 (1 << 16)
+#define SCTL_SCPEREN1_USB_PHY (1 << 17)
+
+#endif /* __STA2X11_MFD_H */
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index 8516fd1eaab..f8d5b4d5843 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -117,7 +117,7 @@ struct matrix_keymap_data;
* @no_autorepeat: disable key autorepeat
*/
struct stmpe_keypad_platform_data {
- struct matrix_keymap_data *keymap_data;
+ const struct matrix_keymap_data *keymap_data;
unsigned int debounce_ms;
unsigned int scan_count;
bool no_autorepeat;
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 1c6c2860d1a..dd8dc0a6c46 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -18,6 +18,7 @@
#define __LINUX_MFD_TPS65910_H
#include <linux/gpio.h>
+#include <linux/regmap.h>
/* TPS chip id list */
#define TPS65910 0
@@ -783,6 +784,18 @@
#define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3 0x4
#define TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP 0x8
+/*
+ * Sleep keepon data: Maintains the state in sleep mode
+ * @therm_keepon: Keep on the thermal monitoring in sleep state.
+ * @clkout32k_keepon: Keep on the 32KHz clock output in sleep state.
+ * @i2chs_keepon: Keep on high speed internal clock in sleep state.
+ */
+struct tps65910_sleep_keepon_data {
+ unsigned therm_keepon:1;
+ unsigned clkout32k_keepon:1;
+ unsigned i2chs_keepon:1;
+};
+
/**
* struct tps65910_board
* Board platform data may be used to initialize regulators.
@@ -794,6 +807,8 @@ struct tps65910_board {
int irq_base;
int vmbch_threshold;
int vmbch2_threshold;
+ bool en_dev_slp;
+ struct tps65910_sleep_keepon_data *slp_keepon;
bool en_gpio_sleep[TPS6591X_MAX_NUM_GPIO];
unsigned long regulator_ext_sleep_control[TPS65910_NUM_REGS];
struct regulator_init_data *tps65910_pmic_init_data[TPS65910_NUM_REGS];
@@ -809,16 +824,14 @@ struct tps65910 {
struct regmap *regmap;
struct mutex io_mutex;
unsigned int id;
- int (*read)(struct tps65910 *tps65910, u8 reg, int size, void *dest);
- int (*write)(struct tps65910 *tps65910, u8 reg, int size, void *src);
/* Client devices */
struct tps65910_pmic *pmic;
struct tps65910_rtc *rtc;
struct tps65910_power *power;
- /* GPIO Handling */
- struct gpio_chip gpio;
+ /* Device node parsed board data */
+ struct tps65910_board *of_plat_data;
/* IRQ Handling */
struct mutex irq_lock;
@@ -826,6 +839,7 @@ struct tps65910 {
int irq_base;
int irq_num;
u32 irq_mask;
+ struct irq_domain *domain;
};
struct tps65910_platform_data {
@@ -833,9 +847,6 @@ struct tps65910_platform_data {
int irq_base;
};
-int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
-int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
-void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base);
int tps65910_irq_init(struct tps65910 *tps65910, int irq,
struct tps65910_platform_data *pdata);
int tps65910_irq_exit(struct tps65910 *tps65910);
@@ -845,4 +856,28 @@ static inline int tps65910_chip_id(struct tps65910 *tps65910)
return tps65910->id;
}
+static inline int tps65910_reg_read(struct tps65910 *tps65910, u8 reg,
+ unsigned int *val)
+{
+ return regmap_read(tps65910->regmap, reg, val);
+}
+
+static inline int tps65910_reg_write(struct tps65910 *tps65910, u8 reg,
+ unsigned int val)
+{
+ return regmap_write(tps65910->regmap, reg, val);
+}
+
+static inline int tps65910_reg_set_bits(struct tps65910 *tps65910, u8 reg,
+ u8 mask)
+{
+ return regmap_update_bits(tps65910->regmap, reg, mask, mask);
+}
+
+static inline int tps65910_reg_clear_bits(struct tps65910 *tps65910, u8 reg,
+ u8 mask)
+{
+ return regmap_update_bits(tps65910->regmap, reg, mask, 0);
+}
+
#endif /* __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index b15b5f03f5c..6659487c31e 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
+#include <linux/regulator/consumer.h>
#define TWL6040_REG_ASICID 0x01
#define TWL6040_REG_ASICREV 0x02
@@ -203,6 +204,7 @@ struct regmap;
struct twl6040 {
struct device *dev;
struct regmap *regmap;
+ struct regulator_bulk_data supplies[2]; /* supplies for vio, v2v1 */
struct mutex mutex;
struct mutex io_mutex;
struct mutex irq_mutex;
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 4b1211859f7..4a3b83a7761 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -17,6 +17,7 @@
#include <linux/completion.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/regmap.h>
@@ -338,6 +339,7 @@
#define WM831X_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [1:0] */
struct regulator_dev;
+struct irq_domain;
#define WM831X_NUM_IRQ_REGS 5
#define WM831X_NUM_GPIO_REGS 16
@@ -367,7 +369,7 @@ struct wm831x {
int irq; /* Our chip IRQ */
struct mutex irq_lock;
- int irq_base;
+ struct irq_domain *irq_domain;
int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */
int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
@@ -382,7 +384,8 @@ struct wm831x {
/* Used by the interrupt controller code to post writes */
int gpio_update[WM831X_NUM_GPIO_REGS];
- bool gpio_level[WM831X_NUM_GPIO_REGS];
+ bool gpio_level_high[WM831X_NUM_GPIO_REGS];
+ bool gpio_level_low[WM831X_NUM_GPIO_REGS];
struct mutex auxadc_lock;
struct list_head auxadc_pending;
@@ -417,6 +420,11 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq);
void wm831x_irq_exit(struct wm831x *wm831x);
void wm831x_auxadc_init(struct wm831x *wm831x);
+static inline int wm831x_irq(struct wm831x *wm831x, int irq)
+{
+ return irq_create_mapping(wm831x->irq_domain, irq);
+}
+
extern struct regmap_config wm831x_regmap_config;
#endif
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 98fcc977e82..9192b6404a7 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -602,6 +602,7 @@ extern const u16 wm8352_mode2_defaults[];
extern const u16 wm8352_mode3_defaults[];
struct wm8350;
+struct regmap;
struct wm8350_hwmon {
struct platform_device *pdev;
@@ -612,13 +613,7 @@ struct wm8350 {
struct device *dev;
/* device IO */
- union {
- struct i2c_client *i2c_client;
- struct spi_device *spi_device;
- };
- int (*read_dev)(struct wm8350 *wm8350, char reg, int size, void *dest);
- int (*write_dev)(struct wm8350 *wm8350, char reg, int size,
- void *src);
+ struct regmap *regmap;
u16 *reg_cache;
struct mutex auxadc_mutex;
diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h
index 0147b696851..2de565b94d0 100644
--- a/include/linux/mfd/wm8400-private.h
+++ b/include/linux/mfd/wm8400-private.h
@@ -24,19 +24,14 @@
#include <linux/mfd/wm8400.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
-
-struct regmap;
+#include <linux/regmap.h>
#define WM8400_REGISTER_COUNT 0x55
struct wm8400 {
struct device *dev;
-
- struct mutex io_lock;
struct regmap *regmap;
- u16 reg_cache[WM8400_REGISTER_COUNT];
-
struct platform_device regulators[6];
};
@@ -930,6 +925,11 @@ struct wm8400 {
u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg);
int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data);
-int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, u16 mask, u16 val);
+
+static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg,
+ u16 mask, u16 val)
+{
+ return regmap_update_bits(wm8400->regmap, reg, mask, val);
+}
#endif
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index 6695c3ec451..1f173306bf0 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -57,6 +57,7 @@ struct wm8994 {
enum wm8994_type type;
int revision;
+ int cust_id;
struct device *dev;
struct regmap *regmap;
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index 86e6a032a07..053548961c1 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -2212,6 +2212,9 @@
/*
* R256 (0x100) - Chip Revision
*/
+#define WM8994_CUST_ID_MASK 0xFF00 /* CUST_ID - [15:8] */
+#define WM8994_CUST_ID_SHIFT 8 /* CUST_ID - [15:8] */
+#define WM8994_CUST_ID_WIDTH 8 /* CUST_ID - [15:8] */
#define WM8994_CHIP_REV_MASK 0x000F /* CHIP_REV - [3:0] */
#define WM8994_CHIP_REV_SHIFT 0 /* CHIP_REV - [3:0] */
#define WM8994_CHIP_REV_WIDTH 4 /* CHIP_REV - [3:0] */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7d5c37f24c6..ce26716238c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -321,6 +321,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
static inline void compound_lock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ VM_BUG_ON(PageSlab(page));
bit_spin_lock(PG_compound_lock, &page->flags);
#endif
}
@@ -328,6 +329,7 @@ static inline void compound_lock(struct page *page)
static inline void compound_unlock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ VM_BUG_ON(PageSlab(page));
bit_spin_unlock(PG_compound_lock, &page->flags);
#endif
}
@@ -871,8 +873,6 @@ extern void pagefault_out_of_memory(void);
extern void show_free_areas(unsigned int flags);
extern bool skip_free_areas_node(unsigned int flags, int nid);
-int shmem_lock(struct file *file, int lock, struct user_struct *user);
-struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
int shmem_zero_setup(struct vm_area_struct *);
extern int can_do_mlock(void);
@@ -951,11 +951,9 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
extern int vmtruncate(struct inode *inode, loff_t offset);
-extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
-
int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 227fd3e9a9c..1397ccf81e9 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -21,22 +21,22 @@ static inline int page_is_file_cache(struct page *page)
return !PageSwapBacked(page);
}
-static inline void
-add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
+static __always_inline void add_page_to_lru_list(struct page *page,
+ struct lruvec *lruvec, enum lru_list lru)
{
- struct lruvec *lruvec;
-
- lruvec = mem_cgroup_lru_add_list(zone, page, lru);
+ int nr_pages = hpage_nr_pages(page);
+ mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
list_add(&page->lru, &lruvec->lists[lru]);
- __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page));
+ __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
}
-static inline void
-del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru)
+static __always_inline void del_page_from_lru_list(struct page *page,
+ struct lruvec *lruvec, enum lru_list lru)
{
- mem_cgroup_lru_del_list(page, lru);
+ int nr_pages = hpage_nr_pages(page);
+ mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
list_del(&page->lru);
- __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page));
+ __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages);
}
/**
@@ -61,7 +61,7 @@ static inline enum lru_list page_lru_base_type(struct page *page)
* Returns the LRU list a page was on, as an index into the array of LRU
* lists; and clears its Unevictable or Active flags, ready for freeing.
*/
-static inline enum lru_list page_off_lru(struct page *page)
+static __always_inline enum lru_list page_off_lru(struct page *page)
{
enum lru_list lru;
@@ -85,7 +85,7 @@ static inline enum lru_list page_off_lru(struct page *page)
* Returns the LRU list a page should be on, as an index
* into the array of LRU lists.
*/
-static inline enum lru_list page_lru(struct page *page)
+static __always_inline enum lru_list page_lru(struct page *page)
{
enum lru_list lru;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 26574c72612..dad95bdd06d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -345,17 +345,6 @@ struct mm_struct {
/* Architecture-specific MM context */
mm_context_t context;
- /* Swap token stuff */
- /*
- * Last value of global fault stamp as seen by this process.
- * In other words, this value gives an indication of how long
- * it has been since this task got the token.
- * Look at mm/thrash.c
- */
- unsigned int faultstamp;
- unsigned int token_priority;
- unsigned int last_interval;
-
unsigned long flags; /* Must use atomic bitops to access the bits */
struct core_state *core_state; /* coredumping support */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 629b823f883..d76513b5b26 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -58,6 +58,10 @@ struct mmc_ext_csd {
unsigned int generic_cmd6_time; /* Units: 10ms */
unsigned int power_off_longtime; /* Units: ms */
unsigned int hs_max_dtr;
+#define MMC_HIGH_26_MAX_DTR 26000000
+#define MMC_HIGH_52_MAX_DTR 52000000
+#define MMC_HIGH_DDR_MAX_DTR 52000000
+#define MMC_HS200_MAX_DTR 200000000
unsigned int sectors;
unsigned int card_type;
unsigned int hc_erase_size; /* In sectors */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 8f66e28f5a0..7a7ebd367cf 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -125,6 +125,7 @@ struct dw_mci {
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
+ struct workqueue_struct *card_workqueue;
/* DMA interface members*/
int use_dma;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index cbde4b7e675..0707d228d7f 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -297,6 +297,7 @@ struct mmc_host {
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
+ bool sdio_irq_pending;
atomic_t sdio_irq_thread_abort;
mmc_pm_flag_t pm_flags; /* requested pm features */
@@ -352,6 +353,7 @@ extern int mmc_cache_ctrl(struct mmc_host *, u8);
static inline void mmc_signal_sdio_irq(struct mmc_host *host)
{
host->ops->enable_sdio_irq(host, 0);
+ host->sdio_irq_pending = true;
wake_up_process(host->sdio_irq_thread);
}
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index b822a2cb600..d425cab144d 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -354,66 +354,6 @@ struct _mmc_csd {
#define EXT_CSD_CARD_TYPE_SDR_1_2V (1<<5) /* Card can run at 200MHz */
/* SDR mode @1.2V I/O */
-#define EXT_CSD_CARD_TYPE_SDR_200 (EXT_CSD_CARD_TYPE_SDR_1_8V | \
- EXT_CSD_CARD_TYPE_SDR_1_2V)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL (EXT_CSD_CARD_TYPE_SDR_200 | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_2V_ALL (EXT_CSD_CARD_TYPE_SDR_1_2V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_8V_ALL (EXT_CSD_CARD_TYPE_SDR_1_8V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_1_2V | \
- EXT_CSD_CARD_TYPE_DDR_1_8V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_1_8V | \
- EXT_CSD_CARD_TYPE_DDR_1_8V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_1_2V | \
- EXT_CSD_CARD_TYPE_DDR_1_2V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_1_8V | \
- EXT_CSD_CARD_TYPE_DDR_1_2V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52 (EXT_CSD_CARD_TYPE_SDR_1_2V | \
- EXT_CSD_CARD_TYPE_DDR_52 | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52 (EXT_CSD_CARD_TYPE_SDR_1_8V | \
- EXT_CSD_CARD_TYPE_DDR_52 | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_200 | \
- EXT_CSD_CARD_TYPE_DDR_1_8V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_200 | \
- EXT_CSD_CARD_TYPE_DDR_1_2V | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
-#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52 (EXT_CSD_CARD_TYPE_SDR_200 | \
- EXT_CSD_CARD_TYPE_DDR_52 | \
- EXT_CSD_CARD_TYPE_52 | \
- EXT_CSD_CARD_TYPE_26)
-
#define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */
#define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */
#define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */
diff --git a/arch/arm/mach-mxs/include/mach/mmc.h b/include/linux/mmc/mxs-mmc.h
index 211547a0556..7c2ad3a7f2f 100644
--- a/arch/arm/mach-mxs/include/mach/mmc.h
+++ b/include/linux/mmc/mxs-mmc.h
@@ -6,8 +6,8 @@
* published by the Free Software Foundation.
*/
-#ifndef __MACH_MXS_MMC_H__
-#define __MACH_MXS_MMC_H__
+#ifndef __LINUX_MMC_MXS_MMC_H__
+#define __LINUX_MMC_MXS_MMC_H__
struct mxs_mmc_platform_data {
int wp_gpio; /* write protect pin */
@@ -15,4 +15,5 @@ struct mxs_mmc_platform_data {
#define SLOTF_4_BIT_CAPABLE (1 << 0)
#define SLOTF_8_BIT_CAPABLE (1 << 1)
};
-#endif /* __MACH_MXS_MMC_H__ */
+
+#endif /* __LINUX_MMC_MXS_MMC_H__ */
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index c04ecfe03f7..580bd587d91 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -4,7 +4,7 @@
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
#else
-#define VM_BUG_ON(cond) do { (void)(cond); } while (0)
+#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#endif
#ifdef CONFIG_DEBUG_VIRTUAL
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 41aa49b7482..2427706f78b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -35,13 +35,39 @@
*/
#define PAGE_ALLOC_COSTLY_ORDER 3
-#define MIGRATE_UNMOVABLE 0
-#define MIGRATE_RECLAIMABLE 1
-#define MIGRATE_MOVABLE 2
-#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */
-#define MIGRATE_RESERVE 3
-#define MIGRATE_ISOLATE 4 /* can't allocate from here */
-#define MIGRATE_TYPES 5
+enum {
+ MIGRATE_UNMOVABLE,
+ MIGRATE_RECLAIMABLE,
+ MIGRATE_MOVABLE,
+ MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
+ MIGRATE_RESERVE = MIGRATE_PCPTYPES,
+#ifdef CONFIG_CMA
+ /*
+ * MIGRATE_CMA migration type is designed to mimic the way
+ * ZONE_MOVABLE works. Only movable pages can be allocated
+ * from MIGRATE_CMA pageblocks and page allocator never
+ * implicitly change migration type of MIGRATE_CMA pageblock.
+ *
+ * The way to use it is to change migratetype of a range of
+ * pageblocks to MIGRATE_CMA which can be done by
+ * __free_pageblock_cma() function. What is important though
+ * is that a range of pageblocks must be aligned to
+ * MAX_ORDER_NR_PAGES should biggest page be bigger then
+ * a single pageblock.
+ */
+ MIGRATE_CMA,
+#endif
+ MIGRATE_ISOLATE, /* can't allocate from here */
+ MIGRATE_TYPES
+};
+
+#ifdef CONFIG_CMA
+# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+# define cma_wmark_pages(zone) zone->min_cma_pages
+#else
+# define is_migrate_cma(migratetype) false
+# define cma_wmark_pages(zone) 0
+#endif
#define for_each_migratetype_order(order, type) \
for (order = 0; order < MAX_ORDER; order++) \
@@ -159,8 +185,25 @@ static inline int is_unevictable_lru(enum lru_list lru)
return (lru == LRU_UNEVICTABLE);
}
+struct zone_reclaim_stat {
+ /*
+ * The pageout code in vmscan.c keeps track of how many of the
+ * mem/swap backed and file backed pages are refeferenced.
+ * The higher the rotated/scanned ratio, the more valuable
+ * that cache is.
+ *
+ * The anon LRU stats live in [0], file LRU stats in [1]
+ */
+ unsigned long recent_rotated[2];
+ unsigned long recent_scanned[2];
+};
+
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
+ struct zone_reclaim_stat reclaim_stat;
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+ struct zone *zone;
+#endif
};
/* Mask used at gathering information at once (see memcontrol.c) */
@@ -169,16 +212,12 @@ struct lruvec {
#define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
-/* Isolate inactive pages */
-#define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1)
-/* Isolate active pages */
-#define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2)
/* Isolate clean file */
-#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4)
+#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
/* Isolate unmapped file */
-#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8)
+#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
-#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10)
+#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
/* LRU Isolation modes. */
typedef unsigned __bitwise__ isolate_mode_t;
@@ -287,19 +326,6 @@ enum zone_type {
#error ZONES_SHIFT -- too many zones configured adjust calculation
#endif
-struct zone_reclaim_stat {
- /*
- * The pageout code in vmscan.c keeps track of how many of the
- * mem/swap backed and file backed pages are refeferenced.
- * The higher the rotated/scanned ratio, the more valuable
- * that cache is.
- *
- * The anon LRU stats live in [0], file LRU stats in [1]
- */
- unsigned long recent_rotated[2];
- unsigned long recent_scanned[2];
-};
-
struct zone {
/* Fields commonly accessed by the page allocator */
@@ -347,6 +373,13 @@ struct zone {
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
#endif
+#ifdef CONFIG_CMA
+ /*
+ * CMA needs to increase watermark levels during the allocation
+ * process to make sure that the system is not starved.
+ */
+ unsigned long min_cma_pages;
+#endif
struct free_area free_area[MAX_ORDER];
#ifndef CONFIG_SPARSEMEM
@@ -374,8 +407,6 @@ struct zone {
spinlock_t lru_lock;
struct lruvec lruvec;
- struct zone_reclaim_stat reclaim_stat;
-
unsigned long pages_scanned; /* since last reclaim */
unsigned long flags; /* zone flags, see below */
@@ -701,6 +732,17 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size,
enum memmap_context context);
+extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
+
+static inline struct zone *lruvec_zone(struct lruvec *lruvec)
+{
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+ return lruvec->zone;
+#else
+ return container_of(lruvec, struct zone, lruvec);
+#endif
+}
+
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 30b0c4e78f9..51bf8ada6dc 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -18,7 +18,6 @@
struct mv643xx_eth_shared_platform_data {
struct mbus_dram_target_info *dram;
struct platform_device *shared_smi;
- unsigned int t_clk;
/*
* Max packet size for Tx IP/Layer 4 checksum, when set to 0, default
* limit of 9KiB will be used.
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 0987146b063..af2d2fa30ee 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -69,6 +69,10 @@
#define NFS4_CDFC4_FORE_OR_BOTH 0x3
#define NFS4_CDFC4_BACK_OR_BOTH 0x7
+#define NFS4_CDFS4_FORE 0x1
+#define NFS4_CDFS4_BACK 0x2
+#define NFS4_CDFS4_BOTH 0x3
+
#define NFS4_SET_TO_SERVER_TIME 0
#define NFS4_SET_TO_CLIENT_TIME 1
@@ -526,6 +530,13 @@ enum lock_type4 {
#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
+#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
+
+/* MDS threshold bitmap bits */
+#define THRESHOLD_RD (1UL << 0)
+#define THRESHOLD_WR (1UL << 1)
+#define THRESHOLD_RD_IO (1UL << 2)
+#define THRESHOLD_WR_IO (1UL << 3)
#define NFSPROC4_NULL 0
#define NFSPROC4_COMPOUND 1
@@ -596,6 +607,8 @@ enum {
NFSPROC4_CLNT_TEST_STATEID,
NFSPROC4_CLNT_FREE_STATEID,
NFSPROC4_CLNT_GETDEVICELIST,
+ NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
+ NFSPROC4_CLNT_DESTROY_CLIENTID,
};
/* nfs41 types */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 52a1bdb4ee2..b23cfc120ed 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -102,6 +102,7 @@ struct nfs_open_context {
int error;
struct list_head list;
+ struct nfs4_threshold *mdsthreshold;
};
struct nfs_open_dir_context {
@@ -179,8 +180,7 @@ struct nfs_inode {
__be32 cookieverf[2];
unsigned long npages;
- unsigned long ncommit;
- struct list_head commit_list;
+ struct nfs_mds_commit_info commit_info;
/* Open contexts for shared mmap writes */
struct list_head open_files;
@@ -201,8 +201,10 @@ struct nfs_inode {
/* pNFS layout information */
struct pnfs_layout_hdr *layout;
- atomic_t commits_outstanding;
#endif /* CONFIG_NFS_V4*/
+ /* how many bytes have been written/read and how many bytes queued up */
+ __u64 write_io;
+ __u64 read_io;
#ifdef CONFIG_NFS_FSCACHE
struct fscache_cookie *fscache;
#endif
@@ -230,7 +232,6 @@ struct nfs_inode {
#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
#define NFS_INO_COMMIT (7) /* inode is committing unstable writes */
-#define NFS_INO_PNFS_COMMIT (8) /* use pnfs code for commit */
#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
@@ -317,11 +318,6 @@ static inline int nfs_server_capable(struct inode *inode, int cap)
return NFS_SERVER(inode)->caps & cap;
}
-static inline int NFS_USE_READDIRPLUS(struct inode *inode)
-{
- return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
-}
-
static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
{
dentry->d_time = verf;
@@ -552,8 +548,8 @@ extern int nfs_wb_page(struct inode *inode, struct page* page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
extern int nfs_commit_inode(struct inode *, int);
-extern struct nfs_write_data *nfs_commitdata_alloc(void);
-extern void nfs_commit_free(struct nfs_write_data *wdata);
+extern struct nfs_commit_data *nfs_commitdata_alloc(void);
+extern void nfs_commit_free(struct nfs_commit_data *data);
#else
static inline int
nfs_commit_inode(struct inode *inode, int how)
@@ -569,12 +565,6 @@ nfs_have_writebacks(struct inode *inode)
}
/*
- * Allocate nfs_write_data structures
- */
-extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages);
-extern void nfs_writedata_free(struct nfs_write_data *);
-
-/*
* linux/fs/nfs/read.c
*/
extern int nfs_readpage(struct file *, struct page *);
@@ -585,12 +575,6 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
struct page *);
/*
- * Allocate nfs_read_data structures
- */
-extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages);
-extern void nfs_readdata_free(struct nfs_read_data *);
-
-/*
* linux/fs/nfs3proc.c
*/
#ifdef CONFIG_NFS_V3_ACL
@@ -654,6 +638,7 @@ nfs_fileid_to_ino_t(u64 fileid)
#define NFSDBG_FSCACHE 0x0800
#define NFSDBG_PNFS 0x1000
#define NFSDBG_PNFS_LD 0x2000
+#define NFSDBG_STATE 0x4000
#define NFSDBG_ALL 0xFFFF
#ifdef __KERNEL__
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7073fc74481..fbb78fb09bd 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -17,7 +17,7 @@ struct nfs4_sequence_args;
struct nfs4_sequence_res;
struct nfs_server;
struct nfs4_minor_version_ops;
-struct server_scope;
+struct nfs41_server_scope;
struct nfs41_impl_id;
/*
@@ -35,6 +35,9 @@ struct nfs_client {
#define NFS_CS_RENEWD 3 /* - renewd started */
#define NFS_CS_STOP_RENEW 4 /* no more state to renew */
#define NFS_CS_CHECK_LEASE_TIME 5 /* need to check lease time */
+ unsigned long cl_flags; /* behavior switches */
+#define NFS_CS_NORESVPORT 0 /* - use ephemeral src port */
+#define NFS_CS_DISCRTRY 1 /* - disconnect on RPC retry */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -61,9 +64,6 @@ struct nfs_client {
struct rpc_wait_queue cl_rpcwaitq;
- /* used for the setclientid verifier */
- struct timespec cl_boot_time;
-
/* idmapper */
struct idmap * cl_idmap;
@@ -79,16 +79,17 @@ struct nfs_client {
u32 cl_seqid;
/* The flags used for obtaining the clientid during EXCHANGE_ID */
u32 cl_exchange_flags;
- struct nfs4_session *cl_session; /* sharred session */
+ struct nfs4_session *cl_session; /* shared session */
+ struct nfs41_server_owner *cl_serverowner;
+ struct nfs41_server_scope *cl_serverscope;
+ struct nfs41_impl_id *cl_implid;
#endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_NFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */
#endif
- struct server_scope *server_scope; /* from exchange_id */
- struct nfs41_impl_id *impl_id; /* from exchange_id */
- struct net *net;
+ struct net *cl_net;
};
/*
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index eac30d6bec1..88d166b555e 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -27,7 +27,6 @@ enum {
PG_CLEAN,
PG_NEED_COMMIT,
PG_NEED_RESCHED,
- PG_PARTIAL_READ_FAILED,
PG_COMMIT_TO_DS,
};
@@ -37,7 +36,6 @@ struct nfs_page {
struct page *wb_page; /* page to read in/write out */
struct nfs_open_context *wb_context; /* File state context info */
struct nfs_lock_context *wb_lock_context; /* lock context info */
- atomic_t wb_complete; /* i/os we're waiting for */
pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */
unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */
wb_pgbase, /* Start of page data */
@@ -68,7 +66,9 @@ struct nfs_pageio_descriptor {
int pg_ioflags;
int pg_error;
const struct rpc_call_ops *pg_rpc_callops;
+ const struct nfs_pgio_completion_ops *pg_completion_ops;
struct pnfs_layout_segment *pg_lseg;
+ struct nfs_direct_req *pg_dreq;
};
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
@@ -84,6 +84,7 @@ extern void nfs_release_request(struct nfs_page *req);
extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
struct inode *inode,
const struct nfs_pageio_ops *pg_ops,
+ const struct nfs_pgio_completion_ops *compl_ops,
size_t bsize,
int how);
extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
@@ -95,26 +96,17 @@ extern bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
struct nfs_page *req);
extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
+extern void nfs_unlock_and_release_request(struct nfs_page *req);
/*
- * Lock the page of an asynchronous request without getting a new reference
+ * Lock the page of an asynchronous request
*/
static inline int
-nfs_lock_request_dontget(struct nfs_page *req)
-{
- return !test_and_set_bit(PG_BUSY, &req->wb_flags);
-}
-
-static inline int
nfs_lock_request(struct nfs_page *req)
{
- if (test_and_set_bit(PG_BUSY, &req->wb_flags))
- return 0;
- kref_get(&req->wb_kref);
- return 1;
+ return !test_and_set_bit(PG_BUSY, &req->wb_flags);
}
-
/**
* nfs_list_add_request - Insert a request into a list
* @req: request
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7ba3551a041..d1a7bf51c32 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -35,6 +35,15 @@ static inline int nfs_fsid_equal(const struct nfs_fsid *a, const struct nfs_fsid
return a->major == b->major && a->minor == b->minor;
}
+struct nfs4_threshold {
+ __u32 bm;
+ __u32 l_type;
+ __u64 rd_sz;
+ __u64 wr_sz;
+ __u64 rd_io_sz;
+ __u64 wr_io_sz;
+};
+
struct nfs_fattr {
unsigned int valid; /* which fields are valid */
umode_t mode;
@@ -67,6 +76,7 @@ struct nfs_fattr {
unsigned long gencount;
struct nfs4_string *owner_name;
struct nfs4_string *group_name;
+ struct nfs4_threshold *mdsthreshold; /* pNFS threshold hints */
};
#define NFS_ATTR_FATTR_TYPE (1U << 0)
@@ -106,14 +116,14 @@ struct nfs_fattr {
| NFS_ATTR_FATTR_FILEID \
| NFS_ATTR_FATTR_ATIME \
| NFS_ATTR_FATTR_MTIME \
- | NFS_ATTR_FATTR_CTIME)
+ | NFS_ATTR_FATTR_CTIME \
+ | NFS_ATTR_FATTR_CHANGE)
#define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \
| NFS_ATTR_FATTR_BLOCKS_USED)
#define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \
| NFS_ATTR_FATTR_SPACE_USED)
#define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \
- | NFS_ATTR_FATTR_SPACE_USED \
- | NFS_ATTR_FATTR_CHANGE)
+ | NFS_ATTR_FATTR_SPACE_USED)
/*
* Info on the file system
@@ -338,7 +348,6 @@ struct nfs_openargs {
const struct qstr * name;
const struct nfs_server *server; /* Needed for ID mapping */
const u32 * bitmask;
- const u32 * dir_bitmask;
__u32 claim;
struct nfs4_sequence_args seq_args;
};
@@ -349,7 +358,6 @@ struct nfs_openres {
struct nfs4_change_info cinfo;
__u32 rflags;
struct nfs_fattr * f_attr;
- struct nfs_fattr * dir_attr;
struct nfs_seqid * seqid;
const struct nfs_server *server;
fmode_t delegation_type;
@@ -519,12 +527,29 @@ struct nfs_writeres {
};
/*
+ * Arguments to the commit call.
+ */
+struct nfs_commitargs {
+ struct nfs_fh *fh;
+ __u64 offset;
+ __u32 count;
+ const u32 *bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs_commitres {
+ struct nfs_fattr *fattr;
+ struct nfs_writeverf *verf;
+ const struct nfs_server *server;
+ struct nfs4_sequence_res seq_res;
+};
+
+/*
* Common arguments to the unlink call
*/
struct nfs_removeargs {
const struct nfs_fh *fh;
struct qstr name;
- const u32 * bitmask;
struct nfs4_sequence_args seq_args;
};
@@ -543,7 +568,6 @@ struct nfs_renameargs {
const struct nfs_fh *new_dir;
const struct qstr *old_name;
const struct qstr *new_name;
- const u32 *bitmask;
struct nfs4_sequence_args seq_args;
};
@@ -839,7 +863,6 @@ struct nfs4_create_res {
struct nfs_fh * fh;
struct nfs_fattr * fattr;
struct nfs4_change_info dir_cinfo;
- struct nfs_fattr * dir_fattr;
struct nfs4_sequence_res seq_res;
};
@@ -1061,6 +1084,21 @@ struct nfstime4 {
};
#ifdef CONFIG_NFS_V4_1
+
+struct pnfs_commit_bucket {
+ struct list_head written;
+ struct list_head committing;
+ struct pnfs_layout_segment *wlseg;
+ struct pnfs_layout_segment *clseg;
+};
+
+struct pnfs_ds_commit_info {
+ int nwritten;
+ int ncommitting;
+ int nbuckets;
+ struct pnfs_commit_bucket *buckets;
+};
+
#define NFS4_EXCHANGE_ID_LEN (48)
struct nfs41_exchange_id_args {
struct nfs_client *client;
@@ -1070,13 +1108,13 @@ struct nfs41_exchange_id_args {
u32 flags;
};
-struct server_owner {
+struct nfs41_server_owner {
uint64_t minor_id;
uint32_t major_id_sz;
char major_id[NFS4_OPAQUE_LIMIT];
};
-struct server_scope {
+struct nfs41_server_scope {
uint32_t server_scope_sz;
char server_scope[NFS4_OPAQUE_LIMIT];
};
@@ -1087,10 +1125,18 @@ struct nfs41_impl_id {
struct nfstime4 date;
};
+struct nfs41_bind_conn_to_session_res {
+ struct nfs4_session *session;
+ u32 dir;
+ bool use_conn_in_rdma_mode;
+};
+
struct nfs41_exchange_id_res {
- struct nfs_client *client;
+ u64 clientid;
+ u32 seqid;
u32 flags;
- struct server_scope *server_scope;
+ struct nfs41_server_owner *server_owner;
+ struct nfs41_server_scope *server_scope;
struct nfs41_impl_id *impl_id;
};
@@ -1143,35 +1189,114 @@ struct nfs41_free_stateid_res {
struct nfs4_sequence_res seq_res;
};
+#else
+
+struct pnfs_ds_commit_info {
+};
+
#endif /* CONFIG_NFS_V4_1 */
struct nfs_page;
#define NFS_PAGEVEC_SIZE (8U)
+struct nfs_page_array {
+ struct page **pagevec;
+ unsigned int npages; /* Max length of pagevec */
+ struct page *page_array[NFS_PAGEVEC_SIZE];
+};
+
struct nfs_read_data {
+ struct nfs_pgio_header *header;
+ struct list_head list;
struct rpc_task task;
- struct inode *inode;
- struct rpc_cred *cred;
struct nfs_fattr fattr; /* fattr storage */
- struct list_head pages; /* Coalesced read requests */
- struct list_head list; /* lists of struct nfs_read_data */
- struct nfs_page *req; /* multi ops per nfs_page */
- struct page **pagevec;
- unsigned int npages; /* Max length of pagevec */
struct nfs_readargs args;
struct nfs_readres res;
unsigned long timestamp; /* For lease renewal */
- struct pnfs_layout_segment *lseg;
- struct nfs_client *ds_clp; /* pNFS data server */
- const struct rpc_call_ops *mds_ops;
int (*read_done_cb) (struct rpc_task *task, struct nfs_read_data *data);
__u64 mds_offset;
+ struct nfs_page_array pages;
+ struct nfs_client *ds_clp; /* pNFS data server */
+};
+
+/* used as flag bits in nfs_pgio_header */
+enum {
+ NFS_IOHDR_ERROR = 0,
+ NFS_IOHDR_EOF,
+ NFS_IOHDR_REDO,
+ NFS_IOHDR_NEED_COMMIT,
+ NFS_IOHDR_NEED_RESCHED,
+};
+
+struct nfs_pgio_header {
+ struct inode *inode;
+ struct rpc_cred *cred;
+ struct list_head pages;
+ struct list_head rpc_list;
+ atomic_t refcnt;
+ struct nfs_page *req;
+ struct pnfs_layout_segment *lseg;
+ loff_t io_start;
+ const struct rpc_call_ops *mds_ops;
+ void (*release) (struct nfs_pgio_header *hdr);
+ const struct nfs_pgio_completion_ops *completion_ops;
+ struct nfs_direct_req *dreq;
+ spinlock_t lock;
+ /* fields protected by lock */
int pnfs_error;
- struct page *page_array[NFS_PAGEVEC_SIZE];
+ int error; /* merge with pnfs_error */
+ unsigned long good_bytes; /* boundary of good data */
+ unsigned long flags;
+};
+
+struct nfs_read_header {
+ struct nfs_pgio_header header;
+ struct nfs_read_data rpc_data;
};
struct nfs_write_data {
+ struct nfs_pgio_header *header;
+ struct list_head list;
+ struct rpc_task task;
+ struct nfs_fattr fattr;
+ struct nfs_writeverf verf;
+ struct nfs_writeargs args; /* argument struct */
+ struct nfs_writeres res; /* result struct */
+ unsigned long timestamp; /* For lease renewal */
+ int (*write_done_cb) (struct rpc_task *task, struct nfs_write_data *data);
+ __u64 mds_offset; /* Filelayout dense stripe */
+ struct nfs_page_array pages;
+ struct nfs_client *ds_clp; /* pNFS data server */
+};
+
+struct nfs_write_header {
+ struct nfs_pgio_header header;
+ struct nfs_write_data rpc_data;
+};
+
+struct nfs_mds_commit_info {
+ atomic_t rpcs_out;
+ unsigned long ncommit;
+ struct list_head list;
+};
+
+struct nfs_commit_data;
+struct nfs_inode;
+struct nfs_commit_completion_ops {
+ void (*error_cleanup) (struct nfs_inode *nfsi);
+ void (*completion) (struct nfs_commit_data *data);
+};
+
+struct nfs_commit_info {
+ spinlock_t *lock;
+ struct nfs_mds_commit_info *mds;
+ struct pnfs_ds_commit_info *ds;
+ struct nfs_direct_req *dreq; /* O_DIRECT request */
+ const struct nfs_commit_completion_ops *completion_ops;
+};
+
+struct nfs_commit_data {
struct rpc_task task;
struct inode *inode;
struct rpc_cred *cred;
@@ -1179,22 +1304,22 @@ struct nfs_write_data {
struct nfs_writeverf verf;
struct list_head pages; /* Coalesced requests we wish to flush */
struct list_head list; /* lists of struct nfs_write_data */
- struct nfs_page *req; /* multi ops per nfs_page */
- struct page **pagevec;
- unsigned int npages; /* Max length of pagevec */
- struct nfs_writeargs args; /* argument struct */
- struct nfs_writeres res; /* result struct */
+ struct nfs_direct_req *dreq; /* O_DIRECT request */
+ struct nfs_commitargs args; /* argument struct */
+ struct nfs_commitres res; /* result struct */
+ struct nfs_open_context *context;
struct pnfs_layout_segment *lseg;
struct nfs_client *ds_clp; /* pNFS data server */
int ds_commit_index;
const struct rpc_call_ops *mds_ops;
- int (*write_done_cb) (struct rpc_task *task, struct nfs_write_data *data);
-#ifdef CONFIG_NFS_V4
- unsigned long timestamp; /* For lease renewal */
-#endif
- __u64 mds_offset; /* Filelayout dense stripe */
- int pnfs_error;
- struct page *page_array[NFS_PAGEVEC_SIZE];
+ const struct nfs_commit_completion_ops *completion_ops;
+ int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data);
+};
+
+struct nfs_pgio_completion_ops {
+ void (*error_cleanup)(struct list_head *head);
+ void (*init_hdr)(struct nfs_pgio_header *hdr);
+ void (*completion)(struct nfs_pgio_header *hdr);
};
struct nfs_unlinkdata {
@@ -1234,11 +1359,13 @@ struct nfs_rpc_ops {
int (*getroot) (struct nfs_server *, struct nfs_fh *,
struct nfs_fsinfo *);
+ struct vfsmount *(*submount) (struct nfs_server *, struct dentry *,
+ struct nfs_fh *, struct nfs_fattr *);
int (*getattr) (struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
struct iattr *);
- int (*lookup) (struct rpc_clnt *clnt, struct inode *, struct qstr *,
+ int (*lookup) (struct inode *, struct qstr *,
struct nfs_fh *, struct nfs_fattr *);
int (*access) (struct inode *, struct nfs_access_entry *);
int (*readlink)(struct inode *, struct page *, unsigned int,
@@ -1277,8 +1404,9 @@ struct nfs_rpc_ops {
void (*write_setup) (struct nfs_write_data *, struct rpc_message *);
void (*write_rpc_prepare)(struct rpc_task *, struct nfs_write_data *);
int (*write_done) (struct rpc_task *, struct nfs_write_data *);
- void (*commit_setup) (struct nfs_write_data *, struct rpc_message *);
- int (*commit_done) (struct rpc_task *, struct nfs_write_data *);
+ void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *);
+ void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *);
+ int (*commit_done) (struct rpc_task *, struct nfs_commit_data *);
int (*lock)(struct file *, int, struct file_lock *);
int (*lock_check_bounds)(const struct file_lock *);
void (*clear_acl_cache)(struct inode *);
@@ -1287,9 +1415,9 @@ struct nfs_rpc_ops {
struct nfs_open_context *ctx,
int open_flags,
struct iattr *iattr);
- int (*init_client) (struct nfs_client *, const struct rpc_timeout *,
- const char *, rpc_authflavor_t, int);
- int (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
+ struct nfs_client *
+ (*init_client) (struct nfs_client *, const struct rpc_timeout *,
+ const char *, rpc_authflavor_t);
};
/*
diff --git a/include/linux/of_i2c.h b/include/linux/of_i2c.h
index 0efe8d465f5..1cb775f8e66 100644
--- a/include/linux/of_i2c.h
+++ b/include/linux/of_i2c.h
@@ -20,6 +20,10 @@ extern void of_i2c_register_devices(struct i2c_adapter *adap);
/* must call put_device() when done with returned i2c_client device */
extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+/* must call put_device() when done with returned i2c_adapter device */
+extern struct i2c_adapter *of_find_i2c_adapter_by_node(
+ struct device_node *node);
+
#else
static inline void of_i2c_register_devices(struct i2c_adapter *adap)
{
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index f93e21700d3..bb115deb761 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -5,7 +5,7 @@
struct pci_dev;
struct of_irq;
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
+int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq);
struct device_node;
struct device_node *of_pci_find_child_device(struct device_node *parent,
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 3d7647536b0..e4c29bc72e7 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -43,8 +43,9 @@ enum oom_constraint {
extern void compare_swap_oom_score_adj(int old_val, int new_val);
extern int test_set_oom_score_adj(int new_val);
-extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
- const nodemask_t *nodemask, unsigned long totalpages);
+extern unsigned long oom_badness(struct task_struct *p,
+ struct mem_cgroup *memcg, const nodemask_t *nodemask,
+ unsigned long totalpages);
extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 051c1b1ede4..3bdcab30ca4 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -3,7 +3,7 @@
/*
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- * If specified range includes migrate types other than MOVABLE,
+ * If specified range includes migrate types other than MOVABLE or CMA,
* this will fail with -EBUSY.
*
* For isolating all pages in the range finally, the caller have to
@@ -11,27 +11,27 @@
* test it.
*/
extern int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned migratetype);
/*
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
* target range is [start_pfn, end_pfn)
*/
extern int
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
+undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned migratetype);
/*
- * test all pages in [start_pfn, end_pfn)are isolated or not.
+ * Test all pages in [start_pfn, end_pfn) are isolated or not.
*/
-extern int
-test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
+int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
/*
- * Internal funcs.Changes pageblock's migrate type.
- * Please use make_pagetype_isolated()/make_pagetype_movable().
+ * Internal functions. Changes pageblock's migrate type.
*/
extern int set_migratetype_isolate(struct page *page);
-extern void unset_migratetype_isolate(struct page *page);
+extern void unset_migratetype_isolate(struct page *page, unsigned migratetype);
#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index efa26b4da8d..7cfad3bbb0c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -460,11 +460,11 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
*/
static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
{
- int ret;
+ int ret = 0;
char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
- return 0;
+ return ret;
/*
* Writing zeroes into userspace here is OK, because we know that if
@@ -489,11 +489,11 @@ static inline int fault_in_multipages_readable(const char __user *uaddr,
int size)
{
volatile char c;
- int ret;
+ int ret = 0;
const char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
- return 0;
+ return ret;
while (uaddr <= end) {
ret = __get_user(c, uaddr);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 17b7b5b01b4..d8c379dba6a 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -687,7 +687,7 @@ int __must_check pci_bus_add_device(struct pci_dev *dev);
void pci_read_bridge_bases(struct pci_bus *child);
struct resource *pci_find_parent_resource(const struct pci_dev *dev,
struct resource *res);
-u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin);
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
@@ -1692,7 +1692,8 @@ extern void pci_release_bus_of_node(struct pci_bus *bus);
/* Arch may override this (weak) */
extern struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus);
-static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
+static inline struct device_node *
+pci_device_to_OF_node(const struct pci_dev *pdev)
{
return pdev ? pdev->dev.of_node : NULL;
}
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 3329965ed63..ab741b0d007 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2506,6 +2506,7 @@
#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
+#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085
#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index fb201896a8b..7d7fbe2ef78 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -119,7 +119,7 @@ int __must_check res_counter_charge_locked(struct res_counter *counter,
unsigned long val, bool force);
int __must_check res_counter_charge(struct res_counter *counter,
unsigned long val, struct res_counter **limit_fail_at);
-int __must_check res_counter_charge_nofail(struct res_counter *counter,
+int res_counter_charge_nofail(struct res_counter *counter,
unsigned long val, struct res_counter **limit_fail_at);
/*
@@ -135,6 +135,9 @@ int __must_check res_counter_charge_nofail(struct res_counter *counter,
void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
void res_counter_uncharge(struct res_counter *counter, unsigned long val);
+void res_counter_uncharge_until(struct res_counter *counter,
+ struct res_counter *top,
+ unsigned long val);
/**
* res_counter_margin - calculate chargeable space of a counter
* @cnt: the counter
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index fd07c4542ce..3fce545df39 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -173,8 +173,6 @@ enum ttu_flags {
};
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
-bool is_vma_temporary_stack(struct vm_area_struct *vma);
-
int try_to_unmap(struct page *, enum ttu_flags flags);
int try_to_unmap_one(struct page *, struct vm_area_struct *,
unsigned long address, enum ttu_flags flags);
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index fcabfb4873c..f071b3922c6 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -91,6 +91,9 @@ struct rtc_pll_info {
#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */
#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
+#define RTC_VL_READ _IOR('p', 0x13, int) /* Voltage low detector */
+#define RTC_VL_CLR _IO('p', 0x14) /* Clear voltage low information */
+
/* interrupt flags */
#define RTC_IRQF 0x80 /* Any of the following is active */
#define RTC_PF 0x40 /* Periodic interrupt */
diff --git a/include/linux/rtc/ds1307.h b/include/linux/rtc/ds1307.h
new file mode 100644
index 00000000000..291b1c49036
--- /dev/null
+++ b/include/linux/rtc/ds1307.h
@@ -0,0 +1,22 @@
+/*
+ * ds1307.h - platform_data for the ds1307 (and variants) rtc driver
+ * (C) Copyright 2012 by Wolfram Sang, Pengutronix e.K.
+ * same license as the driver
+ */
+
+#ifndef _LINUX_DS1307_H
+#define _LINUX_DS1307_H
+
+#include <linux/types.h>
+
+#define DS1307_TRICKLE_CHARGER_250_OHM 0x01
+#define DS1307_TRICKLE_CHARGER_2K_OHM 0x02
+#define DS1307_TRICKLE_CHARGER_4K_OHM 0x03
+#define DS1307_TRICKLE_CHARGER_NO_DIODE 0x04
+#define DS1307_TRICKLE_CHARGER_DIODE 0x08
+
+struct ds1307_platform_data {
+ u8 trickle_charger_setup;
+};
+
+#endif /* _LINUX_DS1307_H */
diff --git a/include/linux/spi/orion_spi.h b/include/linux/spi/orion_spi.h
deleted file mode 100644
index b4d9fa6f797..00000000000
--- a/include/linux/spi/orion_spi.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * orion_spi.h
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __LINUX_SPI_ORION_SPI_H
-#define __LINUX_SPI_ORION_SPI_H
-
-struct orion_spi_info {
- u32 tclk; /* no <linux/clk.h> support yet */
-};
-
-
-#endif
diff --git a/include/linux/stmp_device.h b/include/linux/stmp_device.h
new file mode 100644
index 00000000000..6cf7ec9547c
--- /dev/null
+++ b/include/linux/stmp_device.h
@@ -0,0 +1,20 @@
+/*
+ * basic functions for devices following the "stmp" style register layout
+ *
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __STMP_DEVICE_H__
+#define __STMP_DEVICE_H__
+
+#define STMP_OFFSET_REG_SET 0x4
+#define STMP_OFFSET_REG_CLR 0x8
+#define STMP_OFFSET_REG_TOG 0xc
+
+extern int stmp_reset_block(void __iomem *);
+#endif /* __STMP_DEVICE_H__ */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b1fd5c7925f..b6661933e25 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -221,8 +221,8 @@ extern unsigned int nr_free_pagecache_pages(void);
/* linux/mm/swap.c */
extern void __lru_cache_add(struct page *, enum lru_list lru);
extern void lru_cache_add_lru(struct page *, enum lru_list lru);
-extern void lru_add_page_tail(struct zone* zone,
- struct page *page, struct page *page_tail);
+extern void lru_add_page_tail(struct page *page, struct page *page_tail,
+ struct lruvec *lruvec);
extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
@@ -251,7 +251,7 @@ static inline void lru_cache_add_file(struct page *page)
/* linux/mm/vmscan.c */
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file);
+extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap);
extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
@@ -351,31 +351,14 @@ extern int swap_type_of(dev_t, sector_t, struct block_device **);
extern unsigned int count_swap_pages(int, int);
extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
+extern int page_swapcount(struct page *);
extern int reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
-/* linux/mm/thrash.c */
-extern struct mm_struct *swap_token_mm;
-extern void grab_swap_token(struct mm_struct *);
-extern void __put_swap_token(struct mm_struct *);
-extern void disable_swap_token(struct mem_cgroup *memcg);
-
-static inline int has_swap_token(struct mm_struct *mm)
-{
- return (mm == swap_token_mm);
-}
-
-static inline void put_swap_token(struct mm_struct *mm)
-{
- if (has_swap_token(mm))
- __put_swap_token(mm);
-}
-
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
extern void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
-extern int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep);
#else
static inline void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
@@ -462,6 +445,11 @@ static inline void delete_from_swap_cache(struct page *page)
{
}
+static inline int page_swapcount(struct page *page)
+{
+ return 0;
+}
+
#define reuse_swap_page(page) (page_mapcount(page) == 1)
static inline int try_to_free_swap(struct page *page)
@@ -476,37 +464,11 @@ static inline swp_entry_t get_swap_page(void)
return entry;
}
-/* linux/mm/thrash.c */
-static inline void put_swap_token(struct mm_struct *mm)
-{
-}
-
-static inline void grab_swap_token(struct mm_struct *mm)
-{
-}
-
-static inline int has_swap_token(struct mm_struct *mm)
-{
- return 0;
-}
-
-static inline void disable_swap_token(struct mem_cgroup *memcg)
-{
-}
-
static inline void
mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
{
}
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-static inline int
-mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
-{
- return 0;
-}
-#endif
-
#endif /* CONFIG_SWAP */
#endif /* __KERNEL__*/
#endif /* _LINUX_SWAP_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a2b84f598e2..6d0a0fcd80e 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -58,7 +58,6 @@ extern const char *wb_reason_name[];
* in a manner such that unspecified fields are set to zero.
*/
struct writeback_control {
- enum writeback_sync_modes sync_mode;
long nr_to_write; /* Write this many pages, and decrement
this for each page written */
long pages_skipped; /* Pages which were not written */
@@ -71,6 +70,8 @@ struct writeback_control {
loff_t range_start;
loff_t range_end;
+ enum writeback_sync_modes sync_mode;
+
unsigned for_kupdate:1; /* A kupdate writeback */
unsigned for_background:1; /* A background writeback */
unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
@@ -94,6 +95,7 @@ long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
enum wb_reason reason);
long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
+void inode_wait_for_writeback(struct inode *inode);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
@@ -101,12 +103,6 @@ static inline void wait_on_inode(struct inode *inode)
might_sleep();
wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
}
-static inline void inode_sync_wait(struct inode *inode)
-{
- might_sleep();
- wait_on_bit(&inode->i_state, __I_SYNC, inode_wait,
- TASK_UNINTERRUPTIBLE);
-}
/*
diff --git a/include/net/sock.h b/include/net/sock.h
index d89f0582b6b..4a452169956 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -46,6 +46,7 @@
#include <linux/list_nulls.h>
#include <linux/timer.h>
#include <linux/cache.h>
+#include <linux/bitops.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h> /* struct sk_buff */
@@ -921,12 +922,23 @@ struct proto {
#endif
};
+/*
+ * Bits in struct cg_proto.flags
+ */
+enum cg_proto_flags {
+ /* Currently active and new sockets should be assigned to cgroups */
+ MEMCG_SOCK_ACTIVE,
+ /* It was ever activated; we must disarm static keys on destruction */
+ MEMCG_SOCK_ACTIVATED,
+};
+
struct cg_proto {
void (*enter_memory_pressure)(struct sock *sk);
struct res_counter *memory_allocated; /* Current allocated memory. */
struct percpu_counter *sockets_allocated; /* Current number of sockets. */
int *memory_pressure;
long *sysctl_mem;
+ unsigned long flags;
/*
* memcg field is used to find which memcg we belong directly
* Each memcg struct can hold more than one cg_proto, so container_of
@@ -942,6 +954,16 @@ struct cg_proto {
extern int proto_register(struct proto *prot, int alloc_slab);
extern void proto_unregister(struct proto *prot);
+static inline bool memcg_proto_active(struct cg_proto *cg_proto)
+{
+ return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+}
+
+static inline bool memcg_proto_activated(struct cg_proto *cg_proto)
+{
+ return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags);
+}
+
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
{
diff --git a/include/trace/events/jbd.h b/include/trace/events/jbd.h
index aff64d82d71..da6f2591c25 100644
--- a/include/trace/events/jbd.h
+++ b/include/trace/events/jbd.h
@@ -36,19 +36,17 @@ DECLARE_EVENT_CLASS(jbd_commit,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( char, sync_commit )
__field( int, transaction )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
- __entry->sync_commit = commit_transaction->t_synchronous_commit;
__entry->transaction = commit_transaction->t_tid;
),
- TP_printk("dev %d,%d transaction %d sync %d",
+ TP_printk("dev %d,%d transaction %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction, __entry->sync_commit)
+ __entry->transaction)
);
DEFINE_EVENT(jbd_commit, jbd_start_commit,
@@ -87,19 +85,17 @@ TRACE_EVENT(jbd_drop_transaction,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( char, sync_commit )
__field( int, transaction )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
- __entry->sync_commit = commit_transaction->t_synchronous_commit;
__entry->transaction = commit_transaction->t_tid;
),
- TP_printk("dev %d,%d transaction %d sync %d",
+ TP_printk("dev %d,%d transaction %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction, __entry->sync_commit)
+ __entry->transaction)
);
TRACE_EVENT(jbd_end_commit,
@@ -109,21 +105,19 @@ TRACE_EVENT(jbd_end_commit,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( char, sync_commit )
__field( int, transaction )
__field( int, head )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
- __entry->sync_commit = commit_transaction->t_synchronous_commit;
__entry->transaction = commit_transaction->t_tid;
__entry->head = journal->j_tail_sequence;
),
- TP_printk("dev %d,%d transaction %d sync %d head %d",
+ TP_printk("dev %d,%d transaction %d head %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction, __entry->sync_commit, __entry->head)
+ __entry->transaction, __entry->head)
);
TRACE_EVENT(jbd_do_submit_data,
@@ -133,19 +127,17 @@ TRACE_EVENT(jbd_do_submit_data,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( char, sync_commit )
__field( int, transaction )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
- __entry->sync_commit = commit_transaction->t_synchronous_commit;
__entry->transaction = commit_transaction->t_tid;
),
- TP_printk("dev %d,%d transaction %d sync %d",
+ TP_printk("dev %d,%d transaction %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->transaction, __entry->sync_commit)
+ __entry->transaction)
);
TRACE_EVENT(jbd_cleanup_journal_tail,
@@ -177,24 +169,23 @@ TRACE_EVENT(jbd_cleanup_journal_tail,
__entry->block_nr, __entry->freed)
);
-TRACE_EVENT(jbd_update_superblock_end,
- TP_PROTO(journal_t *journal, int wait),
+TRACE_EVENT(journal_write_superblock,
+ TP_PROTO(journal_t *journal, int write_op),
- TP_ARGS(journal, wait),
+ TP_ARGS(journal, write_op),
TP_STRUCT__entry(
__field( dev_t, dev )
- __field( int, wait )
+ __field( int, write_op )
),
TP_fast_assign(
__entry->dev = journal->j_fs_dev->bd_dev;
- __entry->wait = wait;
+ __entry->write_op = write_op;
),
- TP_printk("dev %d,%d wait %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->wait)
+ TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
+ MINOR(__entry->dev), __entry->write_op)
);
#endif /* _TRACE_JBD_H */
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index f64560e204b..bab3b87e406 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -13,7 +13,7 @@
#define RECLAIM_WB_ANON 0x0001u
#define RECLAIM_WB_FILE 0x0002u
#define RECLAIM_WB_MIXED 0x0010u
-#define RECLAIM_WB_SYNC 0x0004u
+#define RECLAIM_WB_SYNC 0x0004u /* Unused, all reclaim async */
#define RECLAIM_WB_ASYNC 0x0008u
#define show_reclaim_flags(flags) \
@@ -25,15 +25,15 @@
{RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
) : "RECLAIM_WB_NONE"
-#define trace_reclaim_flags(page, sync) ( \
+#define trace_reclaim_flags(page) ( \
(page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
- (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+ (RECLAIM_WB_ASYNC) \
)
-#define trace_shrink_flags(file, sync) ( \
- (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_MIXED : \
- (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \
- (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \
+#define trace_shrink_flags(file) \
+ ( \
+ (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
+ (RECLAIM_WB_ASYNC) \
)
TRACE_EVENT(mm_vmscan_kswapd_sleep,
@@ -263,22 +263,16 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
unsigned long nr_requested,
unsigned long nr_scanned,
unsigned long nr_taken,
- unsigned long nr_lumpy_taken,
- unsigned long nr_lumpy_dirty,
- unsigned long nr_lumpy_failed,
isolate_mode_t isolate_mode,
int file),
- TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file),
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file),
TP_STRUCT__entry(
__field(int, order)
__field(unsigned long, nr_requested)
__field(unsigned long, nr_scanned)
__field(unsigned long, nr_taken)
- __field(unsigned long, nr_lumpy_taken)
- __field(unsigned long, nr_lumpy_dirty)
- __field(unsigned long, nr_lumpy_failed)
__field(isolate_mode_t, isolate_mode)
__field(int, file)
),
@@ -288,22 +282,16 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
__entry->nr_requested = nr_requested;
__entry->nr_scanned = nr_scanned;
__entry->nr_taken = nr_taken;
- __entry->nr_lumpy_taken = nr_lumpy_taken;
- __entry->nr_lumpy_dirty = nr_lumpy_dirty;
- __entry->nr_lumpy_failed = nr_lumpy_failed;
__entry->isolate_mode = isolate_mode;
__entry->file = file;
),
- TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu file=%d",
+ TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu file=%d",
__entry->isolate_mode,
__entry->order,
__entry->nr_requested,
__entry->nr_scanned,
__entry->nr_taken,
- __entry->nr_lumpy_taken,
- __entry->nr_lumpy_dirty,
- __entry->nr_lumpy_failed,
__entry->file)
);
@@ -313,13 +301,10 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,
unsigned long nr_requested,
unsigned long nr_scanned,
unsigned long nr_taken,
- unsigned long nr_lumpy_taken,
- unsigned long nr_lumpy_dirty,
- unsigned long nr_lumpy_failed,
isolate_mode_t isolate_mode,
int file),
- TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file)
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
);
@@ -329,13 +314,10 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate,
unsigned long nr_requested,
unsigned long nr_scanned,
unsigned long nr_taken,
- unsigned long nr_lumpy_taken,
- unsigned long nr_lumpy_dirty,
- unsigned long nr_lumpy_failed,
isolate_mode_t isolate_mode,
int file),
- TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file)
+ TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
);
@@ -395,88 +377,6 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
show_reclaim_flags(__entry->reclaim_flags))
);
-TRACE_EVENT(replace_swap_token,
- TP_PROTO(struct mm_struct *old_mm,
- struct mm_struct *new_mm),
-
- TP_ARGS(old_mm, new_mm),
-
- TP_STRUCT__entry(
- __field(struct mm_struct*, old_mm)
- __field(unsigned int, old_prio)
- __field(struct mm_struct*, new_mm)
- __field(unsigned int, new_prio)
- ),
-
- TP_fast_assign(
- __entry->old_mm = old_mm;
- __entry->old_prio = old_mm ? old_mm->token_priority : 0;
- __entry->new_mm = new_mm;
- __entry->new_prio = new_mm->token_priority;
- ),
-
- TP_printk("old_token_mm=%p old_prio=%u new_token_mm=%p new_prio=%u",
- __entry->old_mm, __entry->old_prio,
- __entry->new_mm, __entry->new_prio)
-);
-
-DECLARE_EVENT_CLASS(put_swap_token_template,
- TP_PROTO(struct mm_struct *swap_token_mm),
-
- TP_ARGS(swap_token_mm),
-
- TP_STRUCT__entry(
- __field(struct mm_struct*, swap_token_mm)
- ),
-
- TP_fast_assign(
- __entry->swap_token_mm = swap_token_mm;
- ),
-
- TP_printk("token_mm=%p", __entry->swap_token_mm)
-);
-
-DEFINE_EVENT(put_swap_token_template, put_swap_token,
- TP_PROTO(struct mm_struct *swap_token_mm),
- TP_ARGS(swap_token_mm)
-);
-
-DEFINE_EVENT_CONDITION(put_swap_token_template, disable_swap_token,
- TP_PROTO(struct mm_struct *swap_token_mm),
- TP_ARGS(swap_token_mm),
- TP_CONDITION(swap_token_mm != NULL)
-);
-
-TRACE_EVENT_CONDITION(update_swap_token_priority,
- TP_PROTO(struct mm_struct *mm,
- unsigned int old_prio,
- struct mm_struct *swap_token_mm),
-
- TP_ARGS(mm, old_prio, swap_token_mm),
-
- TP_CONDITION(mm->token_priority != old_prio),
-
- TP_STRUCT__entry(
- __field(struct mm_struct*, mm)
- __field(unsigned int, old_prio)
- __field(unsigned int, new_prio)
- __field(struct mm_struct*, swap_token_mm)
- __field(unsigned int, swap_token_prio)
- ),
-
- TP_fast_assign(
- __entry->mm = mm;
- __entry->old_prio = old_prio;
- __entry->new_prio = mm->token_priority;
- __entry->swap_token_mm = swap_token_mm;
- __entry->swap_token_prio = swap_token_mm ? swap_token_mm->token_priority : 0;
- ),
-
- TP_printk("mm=%p old_prio=%u new_prio=%u swap_token_mm=%p token_prio=%u",
- __entry->mm, __entry->old_prio, __entry->new_prio,
- __entry->swap_token_mm, __entry->swap_token_prio)
-);
-
#endif /* _TRACE_VMSCAN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 7b81887b023..b453d92c225 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -372,6 +372,35 @@ TRACE_EVENT(balance_dirty_pages,
)
);
+TRACE_EVENT(writeback_sb_inodes_requeue,
+
+ TP_PROTO(struct inode *inode),
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+ __field(unsigned long, ino)
+ __field(unsigned long, state)
+ __field(unsigned long, dirtied_when)
+ ),
+
+ TP_fast_assign(
+ strncpy(__entry->name,
+ dev_name(inode_to_bdi(inode)->dev), 32);
+ __entry->ino = inode->i_ino;
+ __entry->state = inode->i_state;
+ __entry->dirtied_when = inode->dirtied_when;
+ ),
+
+ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
+ __entry->name,
+ __entry->ino,
+ show_inode_state(__entry->state),
+ __entry->dirtied_when,
+ (jiffies - __entry->dirtied_when) / HZ
+ )
+);
+
DECLARE_EVENT_CLASS(writeback_congest_waited_template,
TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
@@ -450,13 +479,6 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
)
);
-DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue,
- TP_PROTO(struct inode *inode,
- struct writeback_control *wbc,
- unsigned long nr_to_write),
- TP_ARGS(inode, wbc, nr_to_write)
-);
-
DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
TP_PROTO(struct inode *inode,
struct writeback_control *wbc,
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
new file mode 100644
index 00000000000..48a9c0171b6
--- /dev/null
+++ b/include/xen/acpi.h
@@ -0,0 +1,58 @@
+/******************************************************************************
+ * acpi.h
+ * acpi file for domain 0 kernel
+ *
+ * Copyright (c) 2011 Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ * Copyright (c) 2011 Yu Ke <ke.yu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _XEN_ACPI_H
+#define _XEN_ACPI_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_XEN_DOM0
+#include <asm/xen/hypervisor.h>
+#include <xen/xen.h>
+#include <linux/acpi.h>
+
+int xen_acpi_notify_hypervisor_state(u8 sleep_state,
+ u32 pm1a_cnt, u32 pm1b_cnd);
+
+static inline void xen_acpi_sleep_register(void)
+{
+ if (xen_initial_domain())
+ acpi_os_set_prepare_sleep(
+ &xen_acpi_notify_hypervisor_state);
+}
+#else
+static inline void xen_acpi_sleep_register(void)
+{
+}
+#endif
+
+#endif /* _XEN_ACPI_H */
diff --git a/include/xen/events.h b/include/xen/events.h
index 0f773708e02..04399b28e82 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -103,6 +103,9 @@ int xen_irq_from_pirq(unsigned pirq);
/* Return the pirq allocated to the irq. */
int xen_pirq_from_irq(unsigned irq);
+/* Return the irq allocated to the gsi */
+int xen_irq_from_gsi(unsigned gsi);
+
/* Determine whether to ignore this IRQ if it is passed to a guest. */
int xen_test_irq_shared(int irq);
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 15f8a00ff00..11e27c3af3c 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -46,6 +46,8 @@
#include <xen/features.h>
+#define GNTTAB_RESERVED_XENSTORE 1
+
/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
#define NR_GRANT_FRAMES 4
diff --git a/include/xen/xenbus_dev.h b/include/xen/xenbus_dev.h
index ac5f0fe47ed..bbee8c6a349 100644
--- a/include/xen/xenbus_dev.h
+++ b/include/xen/xenbus_dev.h
@@ -38,4 +38,7 @@
#define IOCTL_XENBUS_BACKEND_EVTCHN \
_IOC(_IOC_NONE, 'B', 0, 0)
+#define IOCTL_XENBUS_BACKEND_SETUP \
+ _IOC(_IOC_NONE, 'B', 1, 0)
+
#endif /* __LINUX_XEN_XENBUS_DEV_H__ */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index b6a0d46fbad..a2757d4ab77 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -251,7 +251,7 @@ static void mqueue_evict_inode(struct inode *inode)
int i;
struct ipc_namespace *ipc_ns;
- end_writeback(inode);
+ clear_inode(inode);
if (S_ISDIR(inode->i_mode))
return;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a0c6af34d50..0f3527d6184 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5132,7 +5132,7 @@ EXPORT_SYMBOL_GPL(css_depth);
* @root: the css supporsed to be an ancestor of the child.
*
* Returns true if "root" is an ancestor of "child" in its hierarchy. Because
- * this function reads css->id, this use rcu_dereference() and rcu_read_lock().
+ * this function reads css->id, the caller must hold rcu_read_lock().
* But, considering usual usage, the csses should be valid objects after test.
* Assuming that the caller will do some action to the child if this returns
* returns true, the caller must take "child";s reference count.
@@ -5144,18 +5144,18 @@ bool css_is_ancestor(struct cgroup_subsys_state *child,
{
struct css_id *child_id;
struct css_id *root_id;
- bool ret = true;
- rcu_read_lock();
child_id = rcu_dereference(child->id);
+ if (!child_id)
+ return false;
root_id = rcu_dereference(root->id);
- if (!child_id
- || !root_id
- || (child_id->depth < root_id->depth)
- || (child_id->stack[root_id->depth] != root_id->id))
- ret = false;
- rcu_read_unlock();
- return ret;
+ if (!root_id)
+ return false;
+ if (child_id->depth < root_id->depth)
+ return false;
+ if (child_id->stack[root_id->depth] != root_id->id)
+ return false;
+ return true;
}
void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
diff --git a/kernel/fork.c b/kernel/fork.c
index 47b4e4f379f..017fb23d598 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -386,7 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
}
charge = 0;
if (mpnt->vm_flags & VM_ACCOUNT) {
- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
+ unsigned long len;
+ len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
goto fail_nomem;
charge = len;
@@ -614,7 +615,6 @@ void mmput(struct mm_struct *mm)
list_del(&mm->mmlist);
spin_unlock(&mmlist_lock);
}
- put_swap_token(mm);
if (mm->binfmt)
module_put(mm->binfmt->module);
mmdrop(mm);
@@ -831,10 +831,6 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
memcpy(mm, oldmm, sizeof(*mm));
mm_init_cpumask(mm);
- /* Initializing for Swap token stuff */
- mm->token_priority = 0;
- mm->last_interval = 0;
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
mm->pmd_huge_pte = NULL;
#endif
@@ -913,10 +909,6 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
goto fail_nomem;
good_mm:
- /* Initializing for Swap token stuff */
- mm->token_priority = 0;
- mm->last_interval = 0;
-
tsk->mm = mm;
tsk->active_mm = mm;
return 0;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 079f1d39a8b..2169feeba52 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -343,7 +343,7 @@ int lookup_symbol_attrs(unsigned long addr, unsigned long *size,
/* Look up a kernel symbol and return it in a text buffer. */
static int __sprint_symbol(char *buffer, unsigned long address,
- int symbol_offset)
+ int symbol_offset, int add_offset)
{
char *modname;
const char *name;
@@ -358,13 +358,13 @@ static int __sprint_symbol(char *buffer, unsigned long address,
if (name != buffer)
strcpy(buffer, name);
len = strlen(buffer);
- buffer += len;
offset -= symbol_offset;
+ if (add_offset)
+ len += sprintf(buffer + len, "+%#lx/%#lx", offset, size);
+
if (modname)
- len += sprintf(buffer, "+%#lx/%#lx [%s]", offset, size, modname);
- else
- len += sprintf(buffer, "+%#lx/%#lx", offset, size);
+ len += sprintf(buffer + len, " [%s]", modname);
return len;
}
@@ -382,12 +382,28 @@ static int __sprint_symbol(char *buffer, unsigned long address,
*/
int sprint_symbol(char *buffer, unsigned long address)
{
- return __sprint_symbol(buffer, address, 0);
+ return __sprint_symbol(buffer, address, 0, 1);
}
-
EXPORT_SYMBOL_GPL(sprint_symbol);
/**
+ * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer
+ * @buffer: buffer to be stored
+ * @address: address to lookup
+ *
+ * This function looks up a kernel symbol with @address and stores its name
+ * and module name to @buffer if possible. If no symbol was found, just saves
+ * its @address as is.
+ *
+ * This function returns the number of bytes stored in @buffer.
+ */
+int sprint_symbol_no_offset(char *buffer, unsigned long address)
+{
+ return __sprint_symbol(buffer, address, 0, 0);
+}
+EXPORT_SYMBOL_GPL(sprint_symbol_no_offset);
+
+/**
* sprint_backtrace - Look up a backtrace symbol and return it in a text buffer
* @buffer: buffer to be stored
* @address: address to lookup
@@ -403,7 +419,7 @@ EXPORT_SYMBOL_GPL(sprint_symbol);
*/
int sprint_backtrace(char *buffer, unsigned long address)
{
- return __sprint_symbol(buffer, address, -1);
+ return __sprint_symbol(buffer, address, -1, 1);
}
/* Look up a kernel symbol and print it to the kernel messages. */
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index bebe2b170d4..ad581aa2369 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -94,13 +94,15 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
counter->usage -= val;
}
-void res_counter_uncharge(struct res_counter *counter, unsigned long val)
+void res_counter_uncharge_until(struct res_counter *counter,
+ struct res_counter *top,
+ unsigned long val)
{
unsigned long flags;
struct res_counter *c;
local_irq_save(flags);
- for (c = counter; c != NULL; c = c->parent) {
+ for (c = counter; c != top; c = c->parent) {
spin_lock(&c->lock);
res_counter_uncharge_locked(c, val);
spin_unlock(&c->lock);
@@ -108,6 +110,10 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val)
local_irq_restore(flags);
}
+void res_counter_uncharge(struct res_counter *counter, unsigned long val)
+{
+ res_counter_uncharge_until(counter, NULL, val);
+}
static inline unsigned long long *
res_counter_member(struct res_counter *counter, int member)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index df30ee08bdd..e5e1d85b8c7 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -24,6 +24,7 @@
#include <linux/sysctl.h>
#include <asm/irq_regs.h>
+#include <linux/kvm_para.h>
#include <linux/perf_event.h>
int watchdog_enabled = 1;
@@ -280,6 +281,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
__this_cpu_write(softlockup_touch_sync, false);
sched_clock_tick();
}
+
+ /* Clear the guest paused flag on watchdog reset */
+ kvm_check_and_clear_guest_paused();
__touch_watchdog();
return HRTIMER_RESTART;
}
@@ -292,6 +296,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
*/
duration = is_softlockup(touch_ts);
if (unlikely(duration)) {
+ /*
+ * If a virtual machine is stopped by the host it can look to
+ * the watchdog like a soft lockup, check to see if the host
+ * stopped the vm before we issue the warning
+ */
+ if (kvm_check_and_clear_guest_paused())
+ return HRTIMER_RESTART;
+
/* only warn once */
if (__this_cpu_read(soft_watchdog_warn) == true)
return HRTIMER_RESTART;
diff --git a/lib/Kconfig b/lib/Kconfig
index 98230ac3db2..a9e15403434 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -19,6 +19,9 @@ config RATIONAL
config GENERIC_STRNCPY_FROM_USER
bool
+config GENERIC_STRNLEN_USER
+ bool
+
config GENERIC_FIND_FIRST_BIT
bool
@@ -36,6 +39,9 @@ config GENERIC_IO
boolean
default n
+config STMP_DEVICE
+ bool
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/Makefile b/lib/Makefile
index b98df505f33..8c31a0cb75e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -126,6 +126,9 @@ obj-$(CONFIG_CLZ_TAB) += clz_tab.o
obj-$(CONFIG_DDR) += jedec_ddr_data.o
obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
+obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
+
+obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/bitmap.c b/lib/bitmap.c
index b5a8b6ad245..06fdfa1aeba 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -369,7 +369,8 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area);
* @nmaskbits: size of bitmap, in bits
*
* Exactly @nmaskbits bits are displayed. Hex digits are grouped into
- * comma-separated sets of eight digits per set.
+ * comma-separated sets of eight digits per set. Returns the number of
+ * characters which were written to *buf, excluding the trailing \0.
*/
int bitmap_scnprintf(char *buf, unsigned int buflen,
const unsigned long *maskp, int nmaskbits)
@@ -517,8 +518,8 @@ EXPORT_SYMBOL(bitmap_parse_user);
*
* Helper routine for bitmap_scnlistprintf(). Write decimal number
* or range to buf, suppressing output past buf+buflen, with optional
- * comma-prefix. Return len of what would be written to buf, if it
- * all fit.
+ * comma-prefix. Return len of what was written to *buf, excluding the
+ * trailing \0.
*/
static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
{
@@ -544,9 +545,8 @@ static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
* the range. Output format is compatible with the format
* accepted as input by bitmap_parselist().
*
- * The return value is the number of characters which would be
- * generated for the given input, excluding the trailing '\0', as
- * per ISO C99.
+ * The return value is the number of characters which were written to *buf
+ * excluding the trailing '\0', as per ISO C99's scnprintf.
*/
int bitmap_scnlistprintf(char *buf, unsigned int buflen,
const unsigned long *maskp, int nmaskbits)
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 3810b481f94..23a5e031cd8 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -31,6 +31,9 @@ void __list_add(struct list_head *new,
"list_add corruption. prev->next should be "
"next (%p), but was %p. (prev=%p).\n",
next, prev->next, prev);
+ WARN(new == prev || new == next,
+ "list_add double add: new=%p, prev=%p, next=%p.\n",
+ new, prev, next);
next->prev = new;
new->next = next;
new->prev = prev;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 86516f5588e..d7c878cc006 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -73,11 +73,24 @@ static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1] __read_mostly;
static struct kmem_cache *radix_tree_node_cachep;
/*
+ * The radix tree is variable-height, so an insert operation not only has
+ * to build the branch to its corresponding item, it also has to build the
+ * branch to existing items if the size has to be increased (by
+ * radix_tree_extend).
+ *
+ * The worst case is a zero height tree with just a single item at index 0,
+ * and then inserting an item at index ULONG_MAX. This requires 2 new branches
+ * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
+ * Hence:
+ */
+#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
+
+/*
* Per-cpu pool of preloaded nodes
*/
struct radix_tree_preload {
int nr;
- struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
+ struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
};
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 525d160d44f..d0ec4f3d159 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -58,7 +58,7 @@ static void spin_dump(raw_spinlock_t *lock, const char *msg)
printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
msg, raw_smp_processor_id(),
current->comm, task_pid_nr(current));
- printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, "
+ printk(KERN_EMERG " lock: %ps, .magic: %08x, .owner: %s/%d, "
".owner_cpu: %d\n",
lock, lock->magic,
owner ? owner->comm : "<none>",
diff --git a/lib/stmp_device.c b/lib/stmp_device.c
new file mode 100644
index 00000000000..8ac9bcc4289
--- /dev/null
+++ b/lib/stmp_device.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 1999 ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd
+ * Copyright 2006-2007,2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ * Copyright 2009 Ilya Yanok, Emcraft Systems Ltd, yanok@emcraft.com
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/stmp_device.h>
+
+#define STMP_MODULE_CLKGATE (1 << 30)
+#define STMP_MODULE_SFTRST (1 << 31)
+
+/*
+ * Clear the bit and poll it cleared. This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int stmp_clear_poll_bit(void __iomem *addr, u32 mask)
+{
+ int timeout = 0x400;
+
+ writel(mask, addr + STMP_OFFSET_REG_CLR);
+ udelay(1);
+ while ((readl(addr) & mask) && --timeout)
+ /* nothing */;
+
+ return !timeout;
+}
+
+int stmp_reset_block(void __iomem *reset_addr)
+{
+ int ret;
+ int timeout = 0x400;
+
+ /* clear and poll SFTRST */
+ ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear CLKGATE */
+ writel(STMP_MODULE_CLKGATE, reset_addr + STMP_OFFSET_REG_CLR);
+
+ /* set SFTRST to reset the block */
+ writel(STMP_MODULE_SFTRST, reset_addr + STMP_OFFSET_REG_SET);
+ udelay(1);
+
+ /* poll CLKGATE becoming set */
+ while ((!(readl(reset_addr) & STMP_MODULE_CLKGATE)) && --timeout)
+ /* nothing */;
+ if (unlikely(!timeout))
+ goto error;
+
+ /* clear and poll SFTRST */
+ ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST);
+ if (unlikely(ret))
+ goto error;
+
+ /* clear and poll CLKGATE */
+ ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_CLKGATE);
+ if (unlikely(ret))
+ goto error;
+
+ return 0;
+
+error:
+ pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(stmp_reset_block);
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index dd4ece37269..1cffc223bff 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -23,15 +23,15 @@
int string_get_size(u64 size, const enum string_size_units units,
char *buf, int len)
{
- const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB",
+ static const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB",
"EB", "ZB", "YB", NULL};
- const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
+ static const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
"EiB", "ZiB", "YiB", NULL };
- const char **units_str[] = {
+ static const char **units_str[] = {
[STRING_UNITS_10] = units_10,
[STRING_UNITS_2] = units_2,
};
- const unsigned int divisor[] = {
+ static const unsigned int divisor[] = {
[STRING_UNITS_10] = 1000,
[STRING_UNITS_2] = 1024,
};
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index c4c09b0e96b..bb2b201d6ad 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -4,37 +4,7 @@
#include <linux/errno.h>
#include <asm/byteorder.h>
-
-static inline long find_zero(unsigned long mask)
-{
- long byte = 0;
-
-#ifdef __BIG_ENDIAN
-#ifdef CONFIG_64BIT
- if (mask >> 32)
- mask >>= 32;
- else
- byte = 4;
-#endif
- if (mask >> 16)
- mask >>= 16;
- else
- byte += 2;
- return (mask >> 8) ? byte : byte + 1;
-#else
-#ifdef CONFIG_64BIT
- if (!((unsigned int) mask)) {
- mask >>= 32;
- byte = 4;
- }
-#endif
- if (!(mask & 0xffff)) {
- mask >>= 16;
- byte += 2;
- }
- return (mask & 0xff) ? byte : byte + 1;
-#endif
-}
+#include <asm/word-at-a-time.h>
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
#define IS_UNALIGNED(src, dst) 0
@@ -51,8 +21,7 @@ static inline long find_zero(unsigned long mask)
*/
static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
{
- const unsigned long high_bits = REPEAT_BYTE(0xfe) + 1;
- const unsigned long low_bits = REPEAT_BYTE(0x7f);
+ const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
long res = 0;
/*
@@ -66,18 +35,16 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
goto byte_at_a_time;
while (max >= sizeof(unsigned long)) {
- unsigned long c, v, rhs;
+ unsigned long c, data;
/* Fall back to byte-at-a-time if we get a page fault */
if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
break;
- rhs = c | low_bits;
- v = (c + high_bits) & ~rhs;
*(unsigned long *)(dst+res) = c;
- if (v) {
- v = (c & low_bits) + low_bits;
- v = ~(v | rhs);
- return res + find_zero(v);
+ if (has_zero(c, &data, &constants)) {
+ data = prep_zero_mask(c, data, &constants);
+ data = create_zero_mask(data);
+ return res + find_zero(data);
}
res += sizeof(unsigned long);
max -= sizeof(unsigned long);
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
new file mode 100644
index 00000000000..a28df5206d9
--- /dev/null
+++ b/lib/strnlen_user.c
@@ -0,0 +1,138 @@
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/uaccess.h>
+
+#include <asm/word-at-a-time.h>
+
+/* Set bits in the first 'n' bytes when loaded from memory */
+#ifdef __LITTLE_ENDIAN
+# define aligned_byte_mask(n) ((1ul << 8*(n))-1)
+#else
+# define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n)))
+#endif
+
+/*
+ * Do a strnlen, return length of string *with* final '\0'.
+ * 'count' is the user-supplied count, while 'max' is the
+ * address space maximum.
+ *
+ * Return 0 for exceptions (which includes hitting the address
+ * space maximum), or 'count+1' if hitting the user-supplied
+ * maximum count.
+ *
+ * NOTE! We can sometimes overshoot the user-supplied maximum
+ * if it fits in a aligned 'long'. The caller needs to check
+ * the return value against "> max".
+ */
+static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
+{
+ const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+ long align, res = 0;
+ unsigned long c;
+
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
+ /*
+ * Do everything aligned. But that means that we
+ * need to also expand the maximum..
+ */
+ align = (sizeof(long) - 1) & (unsigned long)src;
+ src -= align;
+ max += align;
+
+ if (unlikely(__get_user(c,(unsigned long __user *)src)))
+ return 0;
+ c |= aligned_byte_mask(align);
+
+ for (;;) {
+ unsigned long data;
+ if (has_zero(c, &data, &constants)) {
+ data = prep_zero_mask(c, data, &constants);
+ data = create_zero_mask(data);
+ return res + find_zero(data) + 1 - align;
+ }
+ res += sizeof(unsigned long);
+ if (unlikely(max < sizeof(unsigned long)))
+ break;
+ max -= sizeof(unsigned long);
+ if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
+ return 0;
+ }
+ res -= align;
+
+ /*
+ * Uhhuh. We hit 'max'. But was that the user-specified maximum
+ * too? If so, return the marker for "too long".
+ */
+ if (res >= count)
+ return count+1;
+
+ /*
+ * Nope: we hit the address space limit, and we still had more
+ * characters the caller would have wanted. That's 0.
+ */
+ return 0;
+}
+
+/**
+ * strnlen_user: - Get the size of a user string INCLUDING final NUL.
+ * @str: The string to measure.
+ * @count: Maximum count (including NUL character)
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * Get the size of a NUL-terminated string in user space.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ * If the string is too long, returns 'count+1'.
+ * On exception (or invalid count), returns 0.
+ */
+long strnlen_user(const char __user *str, long count)
+{
+ unsigned long max_addr, src_addr;
+
+ if (unlikely(count <= 0))
+ return 0;
+
+ max_addr = user_addr_max();
+ src_addr = (unsigned long)str;
+ if (likely(src_addr < max_addr)) {
+ unsigned long max = max_addr - src_addr;
+ return do_strnlen_user(str, count, max);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(strnlen_user);
+
+/**
+ * strlen_user: - Get the size of a user string INCLUDING final NUL.
+ * @str: The string to measure.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * Get the size of a NUL-terminated string in user space.
+ *
+ * Returns the size of the string INCLUDING the terminating NUL.
+ * On exception, returns 0.
+ *
+ * If there is a limit on the length of a valid string, you may wish to
+ * consider using strnlen_user() instead.
+ */
+long strlen_user(const char __user *str)
+{
+ unsigned long max_addr, src_addr;
+
+ max_addr = user_addr_max();
+ src_addr = (unsigned long)str;
+ if (likely(src_addr < max_addr)) {
+ unsigned long max = max_addr - src_addr;
+ return do_strnlen_user(str, ~0ul, max);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(strlen_user);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 414f46ed1dc..45bc1f83a5a 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -130,11 +130,9 @@ void swiotlb_print_info(void)
pstart = virt_to_phys(io_tlb_start);
pend = virt_to_phys(io_tlb_end);
- printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
- bytes >> 20, io_tlb_start, io_tlb_end);
- printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
- (unsigned long long)pstart,
- (unsigned long long)pend);
+ printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
+ (unsigned long long)pstart, (unsigned long long)pend - 1,
+ bytes >> 20, io_tlb_start, io_tlb_end - 1);
}
void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
index d55769d63cb..bea3f3fa3f0 100644
--- a/lib/test-kstrtox.c
+++ b/lib/test-kstrtox.c
@@ -11,7 +11,7 @@ struct test_fail {
};
#define DEFINE_TEST_FAIL(test) \
- const struct test_fail test[] __initdata
+ const struct test_fail test[] __initconst
#define DECLARE_TEST_OK(type, test_type) \
test_type { \
@@ -21,7 +21,7 @@ struct test_fail {
}
#define DEFINE_TEST_OK(type, test) \
- const type test[] __initdata
+ const type test[] __initconst
#define TEST_FAIL(fn, type, fmt, test) \
{ \
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index abbabec9720..5391299c1e7 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -284,6 +284,7 @@ char *number(char *buf, char *end, unsigned long long num,
char locase;
int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
int i;
+ bool is_zero = num == 0LL;
/* locase = 0 or 0x20. ORing digits or letters with 'locase'
* produces same digits or (maybe lowercased) letters */
@@ -305,8 +306,9 @@ char *number(char *buf, char *end, unsigned long long num,
}
}
if (need_pfx) {
- spec.field_width--;
if (spec.base == 16)
+ spec.field_width -= 2;
+ else if (!is_zero)
spec.field_width--;
}
@@ -353,9 +355,11 @@ char *number(char *buf, char *end, unsigned long long num,
}
/* "0x" / "0" prefix */
if (need_pfx) {
- if (buf < end)
- *buf = '0';
- ++buf;
+ if (spec.base == 16 || !is_zero) {
+ if (buf < end)
+ *buf = '0';
+ ++buf;
+ }
if (spec.base == 16) {
if (buf < end)
*buf = ('X' | locase);
@@ -436,7 +440,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
else if (ext != 'f' && ext != 's')
sprint_symbol(sym, value);
else
- kallsyms_lookup(value, NULL, NULL, NULL, sym);
+ sprint_symbol_no_offset(sym, value);
return string(buf, end, sym, spec);
#else
diff --git a/mm/Kconfig b/mm/Kconfig
index e338407f122..b2176374b98 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -198,7 +198,7 @@ config COMPACTION
config MIGRATION
bool "Page migration"
def_bool y
- depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
+ depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA
help
Allows the migration of the physical location of pages of processes
while the virtual addresses are not changed. This is useful in
@@ -349,6 +349,16 @@ choice
benefit.
endchoice
+config CROSS_MEMORY_ATTACH
+ bool "Cross Memory Support"
+ depends on MMU
+ default y
+ help
+ Enabling this option adds the system calls process_vm_readv and
+ process_vm_writev which allow a process with the correct privileges
+ to directly read from or write to to another process's address space.
+ See the man page for more details.
+
#
# UP and nommu archs use km based percpu allocator
#
diff --git a/mm/Makefile b/mm/Makefile
index 50ec00ef2a0..a156285ce88 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -5,15 +5,18 @@
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
- vmalloc.o pagewalk.o pgtable-generic.o \
- process_vm_access.o
+ vmalloc.o pagewalk.o pgtable-generic.o
+
+ifdef CONFIG_CROSS_MEMORY_ATTACH
+mmu-$(CONFIG_MMU) += process_vm_access.o
+endif
obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
maccess.o page_alloc.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
page_isolation.o mm_init.o mmu_context.o percpu.o \
- $(mmu-y)
+ compaction.o $(mmu-y)
obj-y += init-mm.o
ifdef CONFIG_NO_BOOTMEM
@@ -25,14 +28,13 @@ endif
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
obj-$(CONFIG_BOUNCE) += bounce.o
-obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
+obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
obj-$(CONFIG_HAS_DMA) += dmapool.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
obj-$(CONFIG_NUMA) += mempolicy.o
obj-$(CONFIG_SPARSEMEM) += sparse.o
obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
obj-$(CONFIG_SLOB) += slob.o
-obj-$(CONFIG_COMPACTION) += compaction.o
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
obj-$(CONFIG_KSM) += ksm.o
obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 0131170c9d5..ec4fcb7a56c 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -77,16 +77,16 @@ unsigned long __init bootmem_bootmap_pages(unsigned long pages)
*/
static void __init link_bootmem(bootmem_data_t *bdata)
{
- struct list_head *iter;
+ bootmem_data_t *ent;
- list_for_each(iter, &bdata_list) {
- bootmem_data_t *ent;
-
- ent = list_entry(iter, bootmem_data_t, list);
- if (bdata->node_min_pfn < ent->node_min_pfn)
- break;
+ list_for_each_entry(ent, &bdata_list, list) {
+ if (bdata->node_min_pfn < ent->node_min_pfn) {
+ list_add_tail(&bdata->list, &ent->list);
+ return;
+ }
}
- list_add_tail(&bdata->list, iter);
+
+ list_add_tail(&bdata->list, &bdata_list);
}
/*
@@ -203,7 +203,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
} else {
unsigned long off = 0;
- while (vec && off < BITS_PER_LONG) {
+ vec >>= start & (BITS_PER_LONG - 1);
+ while (vec) {
if (vec & 1) {
page = pfn_to_page(start + off);
__free_pages_bootmem(page, 0);
@@ -467,7 +468,7 @@ static unsigned long __init align_off(struct bootmem_data *bdata,
return ALIGN(base + off, align) - base;
}
-static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
+static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
{
@@ -588,14 +589,14 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
p_bdata = bootmem_arch_preferred_node(bdata, size, align,
goal, limit);
if (p_bdata)
- return alloc_bootmem_core(p_bdata, size, align,
+ return alloc_bootmem_bdata(p_bdata, size, align,
goal, limit);
}
#endif
return NULL;
}
-static void * __init ___alloc_bootmem_nopanic(unsigned long size,
+static void * __init alloc_bootmem_core(unsigned long size,
unsigned long align,
unsigned long goal,
unsigned long limit)
@@ -603,7 +604,6 @@ static void * __init ___alloc_bootmem_nopanic(unsigned long size,
bootmem_data_t *bdata;
void *region;
-restart:
region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
if (region)
return region;
@@ -614,11 +614,25 @@ restart:
if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
break;
- region = alloc_bootmem_core(bdata, size, align, goal, limit);
+ region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
if (region)
return region;
}
+ return NULL;
+}
+
+static void * __init ___alloc_bootmem_nopanic(unsigned long size,
+ unsigned long align,
+ unsigned long goal,
+ unsigned long limit)
+{
+ void *ptr;
+
+restart:
+ ptr = alloc_bootmem_core(size, align, goal, limit);
+ if (ptr)
+ return ptr;
if (goal) {
goal = 0;
goto restart;
@@ -684,21 +698,56 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
return ___alloc_bootmem(size, align, goal, limit);
}
-static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
+static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
{
void *ptr;
- ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
+again:
+ ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size,
+ align, goal, limit);
if (ptr)
return ptr;
- ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
+ ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
if (ptr)
return ptr;
- return ___alloc_bootmem(size, align, goal, limit);
+ ptr = alloc_bootmem_core(size, align, goal, limit);
+ if (ptr)
+ return ptr;
+
+ if (goal) {
+ goal = 0;
+ goto again;
+ }
+
+ return NULL;
+}
+
+void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+ return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
+}
+
+void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal,
+ unsigned long limit)
+{
+ void *ptr;
+
+ ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
+ if (ptr)
+ return ptr;
+
+ printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+ panic("Out of memory");
+ return NULL;
}
/**
@@ -722,7 +771,7 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
- return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0);
+ return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
}
void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
@@ -743,7 +792,7 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
unsigned long new_goal;
new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
- ptr = alloc_bootmem_core(pgdat->bdata, size, align,
+ ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
new_goal, 0);
if (ptr)
return ptr;
@@ -754,47 +803,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
}
-#ifdef CONFIG_SPARSEMEM
-/**
- * alloc_bootmem_section - allocate boot memory from a specific section
- * @size: size of the request in bytes
- * @section_nr: sparse map section to allocate from
- *
- * Return NULL on failure.
- */
-void * __init alloc_bootmem_section(unsigned long size,
- unsigned long section_nr)
-{
- bootmem_data_t *bdata;
- unsigned long pfn, goal;
-
- pfn = section_nr_to_pfn(section_nr);
- goal = pfn << PAGE_SHIFT;
- bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
-
- return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, 0);
-}
-#endif
-
-void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
-{
- void *ptr;
-
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
- ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
- if (ptr)
- return ptr;
-
- ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
- if (ptr)
- return ptr;
-
- return __alloc_bootmem_nopanic(size, align, goal);
-}
-
#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
#endif
@@ -839,6 +847,6 @@ void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
- return ___alloc_bootmem_node(pgdat->bdata, size, align,
- goal, ARCH_LOW_ADDRESS_LIMIT);
+ return ___alloc_bootmem_node(pgdat, size, align,
+ goal, ARCH_LOW_ADDRESS_LIMIT);
}
diff --git a/mm/compaction.c b/mm/compaction.c
index 74a8c825ff2..4ac338af512 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -16,30 +16,11 @@
#include <linux/sysfs.h>
#include "internal.h"
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>
-/*
- * compact_control is used to track pages being migrated and the free pages
- * they are being migrated to during memory compaction. The free_pfn starts
- * at the end of a zone and migrate_pfn begins at the start. Movable pages
- * are moved to the end of a zone during a compaction run and the run
- * completes when free_pfn <= migrate_pfn
- */
-struct compact_control {
- struct list_head freepages; /* List of free pages to migrate to */
- struct list_head migratepages; /* List of pages being migrated */
- unsigned long nr_freepages; /* Number of isolated free pages */
- unsigned long nr_migratepages; /* Number of pages to migrate */
- unsigned long free_pfn; /* isolate_freepages search base */
- unsigned long migrate_pfn; /* isolate_migratepages search base */
- bool sync; /* Synchronous migration */
-
- int order; /* order a direct compactor needs */
- int migratetype; /* MOVABLE, RECLAIMABLE etc */
- struct zone *zone;
-};
-
static unsigned long release_freepages(struct list_head *freelist)
{
struct page *page, *next;
@@ -54,24 +35,35 @@ static unsigned long release_freepages(struct list_head *freelist)
return count;
}
-/* Isolate free pages onto a private freelist. Must hold zone->lock */
-static unsigned long isolate_freepages_block(struct zone *zone,
- unsigned long blockpfn,
- struct list_head *freelist)
+static void map_pages(struct list_head *list)
+{
+ struct page *page;
+
+ list_for_each_entry(page, list, lru) {
+ arch_alloc_page(page, 0);
+ kernel_map_pages(page, 1, 1);
+ }
+}
+
+static inline bool migrate_async_suitable(int migratetype)
+{
+ return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
+}
+
+/*
+ * Isolate free pages onto a private freelist. Caller must hold zone->lock.
+ * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
+ * pages inside of the pageblock (even though it may still end up isolating
+ * some pages).
+ */
+static unsigned long isolate_freepages_block(unsigned long blockpfn,
+ unsigned long end_pfn,
+ struct list_head *freelist,
+ bool strict)
{
- unsigned long zone_end_pfn, end_pfn;
int nr_scanned = 0, total_isolated = 0;
struct page *cursor;
- /* Get the last PFN we should scan for free pages at */
- zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
- end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
-
- /* Find the first usable PFN in the block to initialse page cursor */
- for (; blockpfn < end_pfn; blockpfn++) {
- if (pfn_valid_within(blockpfn))
- break;
- }
cursor = pfn_to_page(blockpfn);
/* Isolate free pages. This assumes the block is valid */
@@ -79,15 +71,23 @@ static unsigned long isolate_freepages_block(struct zone *zone,
int isolated, i;
struct page *page = cursor;
- if (!pfn_valid_within(blockpfn))
+ if (!pfn_valid_within(blockpfn)) {
+ if (strict)
+ return 0;
continue;
+ }
nr_scanned++;
- if (!PageBuddy(page))
+ if (!PageBuddy(page)) {
+ if (strict)
+ return 0;
continue;
+ }
/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
+ if (!isolated && strict)
+ return 0;
total_isolated += isolated;
for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist);
@@ -105,114 +105,71 @@ static unsigned long isolate_freepages_block(struct zone *zone,
return total_isolated;
}
-/* Returns true if the page is within a block suitable for migration to */
-static bool suitable_migration_target(struct page *page)
-{
-
- int migratetype = get_pageblock_migratetype(page);
-
- /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
- if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
- return false;
-
- /* If the page is a large free page, then allow migration */
- if (PageBuddy(page) && page_order(page) >= pageblock_order)
- return true;
-
- /* If the block is MIGRATE_MOVABLE, allow migration */
- if (migratetype == MIGRATE_MOVABLE)
- return true;
-
- /* Otherwise skip the block */
- return false;
-}
-
-/*
- * Based on information in the current compact_control, find blocks
- * suitable for isolating free pages from and then isolate them.
+/**
+ * isolate_freepages_range() - isolate free pages.
+ * @start_pfn: The first PFN to start isolating.
+ * @end_pfn: The one-past-last PFN.
+ *
+ * Non-free pages, invalid PFNs, or zone boundaries within the
+ * [start_pfn, end_pfn) range are considered errors, cause function to
+ * undo its actions and return zero.
+ *
+ * Otherwise, function returns one-past-the-last PFN of isolated page
+ * (which may be greater then end_pfn if end fell in a middle of
+ * a free page).
*/
-static void isolate_freepages(struct zone *zone,
- struct compact_control *cc)
+unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
{
- struct page *page;
- unsigned long high_pfn, low_pfn, pfn;
- unsigned long flags;
- int nr_freepages = cc->nr_freepages;
- struct list_head *freelist = &cc->freepages;
-
- /*
- * Initialise the free scanner. The starting point is where we last
- * scanned from (or the end of the zone if starting). The low point
- * is the end of the pageblock the migration scanner is using.
- */
- pfn = cc->free_pfn;
- low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+ unsigned long isolated, pfn, block_end_pfn, flags;
+ struct zone *zone = NULL;
+ LIST_HEAD(freelist);
- /*
- * Take care that if the migration scanner is at the end of the zone
- * that the free scanner does not accidentally move to the next zone
- * in the next isolation cycle.
- */
- high_pfn = min(low_pfn, pfn);
+ if (pfn_valid(start_pfn))
+ zone = page_zone(pfn_to_page(start_pfn));
- /*
- * Isolate free pages until enough are available to migrate the
- * pages on cc->migratepages. We stop searching if the migrate
- * and free page scanners meet or enough free pages are isolated.
- */
- for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
- pfn -= pageblock_nr_pages) {
- unsigned long isolated;
-
- if (!pfn_valid(pfn))
- continue;
+ for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
+ if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
+ break;
/*
- * Check for overlapping nodes/zones. It's possible on some
- * configurations to have a setup like
- * node0 node1 node0
- * i.e. it's possible that all pages within a zones range of
- * pages do not belong to a single zone.
+ * On subsequent iterations ALIGN() is actually not needed,
+ * but we keep it that we not to complicate the code.
*/
- page = pfn_to_page(pfn);
- if (page_zone(page) != zone)
- continue;
+ block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+ block_end_pfn = min(block_end_pfn, end_pfn);
- /* Check the block is suitable for migration */
- if (!suitable_migration_target(page))
- continue;
+ spin_lock_irqsave(&zone->lock, flags);
+ isolated = isolate_freepages_block(pfn, block_end_pfn,
+ &freelist, true);
+ spin_unlock_irqrestore(&zone->lock, flags);
/*
- * Found a block suitable for isolating free pages from. Now
- * we disabled interrupts, double check things are ok and
- * isolate the pages. This is to minimise the time IRQs
- * are disabled
+ * In strict mode, isolate_freepages_block() returns 0 if
+ * there are any holes in the block (ie. invalid PFNs or
+ * non-free pages).
*/
- isolated = 0;
- spin_lock_irqsave(&zone->lock, flags);
- if (suitable_migration_target(page)) {
- isolated = isolate_freepages_block(zone, pfn, freelist);
- nr_freepages += isolated;
- }
- spin_unlock_irqrestore(&zone->lock, flags);
+ if (!isolated)
+ break;
/*
- * Record the highest PFN we isolated pages from. When next
- * looking for free pages, the search will restart here as
- * page migration may have returned some pages to the allocator
+ * If we managed to isolate pages, it is always (1 << n) *
+ * pageblock_nr_pages for some non-negative n. (Max order
+ * page may span two pageblocks).
*/
- if (isolated)
- high_pfn = max(high_pfn, pfn);
}
/* split_free_page does not map the pages */
- list_for_each_entry(page, freelist, lru) {
- arch_alloc_page(page, 0);
- kernel_map_pages(page, 1, 1);
+ map_pages(&freelist);
+
+ if (pfn < end_pfn) {
+ /* Loop terminated early, cleanup. */
+ release_freepages(&freelist);
+ return 0;
}
- cc->free_pfn = high_pfn;
- cc->nr_freepages = nr_freepages;
+ /* We don't use freelists for anything. */
+ return pfn;
}
/* Update the number of anon and file isolated pages in the zone */
@@ -243,37 +200,34 @@ static bool too_many_isolated(struct zone *zone)
return isolated > (inactive + active) / 2;
}
-/* possible outcome of isolate_migratepages */
-typedef enum {
- ISOLATE_ABORT, /* Abort compaction now */
- ISOLATE_NONE, /* No pages isolated, continue scanning */
- ISOLATE_SUCCESS, /* Pages isolated, migrate */
-} isolate_migrate_t;
-
-/*
- * Isolate all pages that can be migrated from the block pointed to by
- * the migrate scanner within compact_control.
+/**
+ * isolate_migratepages_range() - isolate all migrate-able pages in range.
+ * @zone: Zone pages are in.
+ * @cc: Compaction control structure.
+ * @low_pfn: The first PFN of the range.
+ * @end_pfn: The one-past-the-last PFN of the range.
+ *
+ * Isolate all pages that can be migrated from the range specified by
+ * [low_pfn, end_pfn). Returns zero if there is a fatal signal
+ * pending), otherwise PFN of the first page that was not scanned
+ * (which may be both less, equal to or more then end_pfn).
+ *
+ * Assumes that cc->migratepages is empty and cc->nr_migratepages is
+ * zero.
+ *
+ * Apart from cc->migratepages and cc->nr_migratetypes this function
+ * does not modify any cc's fields, in particular it does not modify
+ * (or read for that matter) cc->migrate_pfn.
*/
-static isolate_migrate_t isolate_migratepages(struct zone *zone,
- struct compact_control *cc)
+unsigned long
+isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ unsigned long low_pfn, unsigned long end_pfn)
{
- unsigned long low_pfn, end_pfn;
unsigned long last_pageblock_nr = 0, pageblock_nr;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = &cc->migratepages;
- isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
-
- /* Do not scan outside zone boundaries */
- low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
-
- /* Only scan within a pageblock boundary */
- end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
-
- /* Do not cross the free scanner or scan within a memory hole */
- if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
- cc->migrate_pfn = end_pfn;
- return ISOLATE_NONE;
- }
+ isolate_mode_t mode = 0;
+ struct lruvec *lruvec;
/*
* Ensure that there are not too many pages isolated from the LRU
@@ -282,13 +236,13 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
*/
while (unlikely(too_many_isolated(zone))) {
/* async migration should just abort */
- if (!cc->sync)
- return ISOLATE_ABORT;
+ if (cc->mode != COMPACT_SYNC)
+ return 0;
congestion_wait(BLK_RW_ASYNC, HZ/10);
if (fatal_signal_pending(current))
- return ISOLATE_ABORT;
+ return 0;
}
/* Time to isolate some pages for migration */
@@ -350,8 +304,9 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
* satisfies the allocation
*/
pageblock_nr = low_pfn >> pageblock_order;
- if (!cc->sync && last_pageblock_nr != pageblock_nr &&
- get_pageblock_migratetype(page) != MIGRATE_MOVABLE) {
+ if (cc->mode != COMPACT_SYNC &&
+ last_pageblock_nr != pageblock_nr &&
+ !migrate_async_suitable(get_pageblock_migratetype(page))) {
low_pfn += pageblock_nr_pages;
low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
last_pageblock_nr = pageblock_nr;
@@ -371,17 +326,19 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
continue;
}
- if (!cc->sync)
+ if (cc->mode != COMPACT_SYNC)
mode |= ISOLATE_ASYNC_MIGRATE;
+ lruvec = mem_cgroup_page_lruvec(page, zone);
+
/* Try isolate the page */
- if (__isolate_lru_page(page, mode, 0) != 0)
+ if (__isolate_lru_page(page, mode) != 0)
continue;
VM_BUG_ON(PageTransCompound(page));
/* Successfully isolated */
- del_page_from_lru_list(zone, page, page_lru(page));
+ del_page_from_lru_list(page, lruvec, page_lru(page));
list_add(&page->lru, migratelist);
cc->nr_migratepages++;
nr_isolated++;
@@ -396,11 +353,200 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
acct_isolated(zone, cc);
spin_unlock_irq(&zone->lru_lock);
- cc->migrate_pfn = low_pfn;
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
- return ISOLATE_SUCCESS;
+ return low_pfn;
+}
+
+#endif /* CONFIG_COMPACTION || CONFIG_CMA */
+#ifdef CONFIG_COMPACTION
+/*
+ * Returns true if MIGRATE_UNMOVABLE pageblock was successfully
+ * converted to MIGRATE_MOVABLE type, false otherwise.
+ */
+static bool rescue_unmovable_pageblock(struct page *page)
+{
+ unsigned long pfn, start_pfn, end_pfn;
+ struct page *start_page, *end_page;
+
+ pfn = page_to_pfn(page);
+ start_pfn = pfn & ~(pageblock_nr_pages - 1);
+ end_pfn = start_pfn + pageblock_nr_pages;
+
+ start_page = pfn_to_page(start_pfn);
+ end_page = pfn_to_page(end_pfn);
+
+ /* Do not deal with pageblocks that overlap zones */
+ if (page_zone(start_page) != page_zone(end_page))
+ return false;
+
+ for (page = start_page, pfn = start_pfn; page < end_page; pfn++,
+ page++) {
+ if (!pfn_valid_within(pfn))
+ continue;
+
+ if (PageBuddy(page)) {
+ int order = page_order(page);
+
+ pfn += (1 << order) - 1;
+ page += (1 << order) - 1;
+
+ continue;
+ } else if (page_count(page) == 0 || PageLRU(page))
+ continue;
+
+ return false;
+ }
+
+ set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+ move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE);
+ return true;
+}
+
+enum smt_result {
+ GOOD_AS_MIGRATION_TARGET,
+ FAIL_UNMOVABLE_TARGET,
+ FAIL_BAD_TARGET,
+};
+
+/*
+ * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block
+ * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page
+ * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise.
+ */
+static enum smt_result suitable_migration_target(struct page *page,
+ struct compact_control *cc)
+{
+
+ int migratetype = get_pageblock_migratetype(page);
+
+ /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
+ if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
+ return FAIL_BAD_TARGET;
+
+ /* If the page is a large free page, then allow migration */
+ if (PageBuddy(page) && page_order(page) >= pageblock_order)
+ return GOOD_AS_MIGRATION_TARGET;
+
+ /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
+ if (cc->mode != COMPACT_ASYNC_UNMOVABLE &&
+ migrate_async_suitable(migratetype))
+ return GOOD_AS_MIGRATION_TARGET;
+
+ if (cc->mode == COMPACT_ASYNC_MOVABLE &&
+ migratetype == MIGRATE_UNMOVABLE)
+ return FAIL_UNMOVABLE_TARGET;
+
+ if (cc->mode != COMPACT_ASYNC_MOVABLE &&
+ migratetype == MIGRATE_UNMOVABLE &&
+ rescue_unmovable_pageblock(page))
+ return GOOD_AS_MIGRATION_TARGET;
+
+ /* Otherwise skip the block */
+ return FAIL_BAD_TARGET;
+}
+
+/*
+ * Based on information in the current compact_control, find blocks
+ * suitable for isolating free pages from and then isolate them.
+ */
+static void isolate_freepages(struct zone *zone,
+ struct compact_control *cc)
+{
+ struct page *page;
+ unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
+ unsigned long flags;
+ int nr_freepages = cc->nr_freepages;
+ struct list_head *freelist = &cc->freepages;
+
+ /*
+ * Initialise the free scanner. The starting point is where we last
+ * scanned from (or the end of the zone if starting). The low point
+ * is the end of the pageblock the migration scanner is using.
+ */
+ pfn = cc->free_pfn;
+ low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+
+ /*
+ * Take care that if the migration scanner is at the end of the zone
+ * that the free scanner does not accidentally move to the next zone
+ * in the next isolation cycle.
+ */
+ high_pfn = min(low_pfn, pfn);
+
+ zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+
+ /*
+ * isolate_freepages() may be called more than once during
+ * compact_zone_order() run and we want only the most recent
+ * count.
+ */
+ cc->nr_pageblocks_skipped = 0;
+
+ /*
+ * Isolate free pages until enough are available to migrate the
+ * pages on cc->migratepages. We stop searching if the migrate
+ * and free page scanners meet or enough free pages are isolated.
+ */
+ for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
+ pfn -= pageblock_nr_pages) {
+ unsigned long isolated;
+ enum smt_result ret;
+
+ if (!pfn_valid(pfn))
+ continue;
+
+ /*
+ * Check for overlapping nodes/zones. It's possible on some
+ * configurations to have a setup like
+ * node0 node1 node0
+ * i.e. it's possible that all pages within a zones range of
+ * pages do not belong to a single zone.
+ */
+ page = pfn_to_page(pfn);
+ if (page_zone(page) != zone)
+ continue;
+
+ /* Check the block is suitable for migration */
+ ret = suitable_migration_target(page, cc);
+ if (ret != GOOD_AS_MIGRATION_TARGET) {
+ if (ret == FAIL_UNMOVABLE_TARGET)
+ cc->nr_pageblocks_skipped++;
+ continue;
+ }
+ /*
+ * Found a block suitable for isolating free pages from. Now
+ * we disabled interrupts, double check things are ok and
+ * isolate the pages. This is to minimise the time IRQs
+ * are disabled
+ */
+ isolated = 0;
+ spin_lock_irqsave(&zone->lock, flags);
+ ret = suitable_migration_target(page, cc);
+ if (ret == GOOD_AS_MIGRATION_TARGET) {
+ end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
+ isolated = isolate_freepages_block(pfn, end_pfn,
+ freelist, false);
+ nr_freepages += isolated;
+ } else if (ret == FAIL_UNMOVABLE_TARGET)
+ cc->nr_pageblocks_skipped++;
+ spin_unlock_irqrestore(&zone->lock, flags);
+
+ /*
+ * Record the highest PFN we isolated pages from. When next
+ * looking for free pages, the search will restart here as
+ * page migration may have returned some pages to the allocator
+ */
+ if (isolated)
+ high_pfn = max(high_pfn, pfn);
+ }
+
+ /* split_free_page does not map the pages */
+ map_pages(freelist);
+
+ cc->free_pfn = high_pfn;
+ cc->nr_freepages = nr_freepages;
}
/*
@@ -449,6 +595,44 @@ static void update_nr_listpages(struct compact_control *cc)
cc->nr_freepages = nr_freepages;
}
+/* possible outcome of isolate_migratepages */
+typedef enum {
+ ISOLATE_ABORT, /* Abort compaction now */
+ ISOLATE_NONE, /* No pages isolated, continue scanning */
+ ISOLATE_SUCCESS, /* Pages isolated, migrate */
+} isolate_migrate_t;
+
+/*
+ * Isolate all pages that can be migrated from the block pointed to by
+ * the migrate scanner within compact_control.
+ */
+static isolate_migrate_t isolate_migratepages(struct zone *zone,
+ struct compact_control *cc)
+{
+ unsigned long low_pfn, end_pfn;
+
+ /* Do not scan outside zone boundaries */
+ low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
+
+ /* Only scan within a pageblock boundary */
+ end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
+
+ /* Do not cross the free scanner or scan within a memory hole */
+ if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
+ cc->migrate_pfn = end_pfn;
+ return ISOLATE_NONE;
+ }
+
+ /* Perform the isolation */
+ low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
+ if (!low_pfn)
+ return ISOLATE_ABORT;
+
+ cc->migrate_pfn = low_pfn;
+
+ return ISOLATE_SUCCESS;
+}
+
static int compact_finished(struct zone *zone,
struct compact_control *cc)
{
@@ -578,8 +762,9 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
nr_migrate = cc->nr_migratepages;
err = migrate_pages(&cc->migratepages, compaction_alloc,
- (unsigned long)cc, false,
- cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
+ (unsigned long)&cc->freepages, false,
+ (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT
+ : MIGRATE_ASYNC);
update_nr_listpages(cc);
nr_remaining = cc->nr_migratepages;
@@ -608,7 +793,8 @@ out:
static unsigned long compact_zone_order(struct zone *zone,
int order, gfp_t gfp_mask,
- bool sync)
+ enum compact_mode mode,
+ unsigned long *nr_pageblocks_skipped)
{
struct compact_control cc = {
.nr_freepages = 0,
@@ -616,12 +802,17 @@ static unsigned long compact_zone_order(struct zone *zone,
.order = order,
.migratetype = allocflags_to_migratetype(gfp_mask),
.zone = zone,
- .sync = sync,
+ .mode = mode,
};
+ unsigned long rc;
+
INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);
- return compact_zone(zone, &cc);
+ rc = compact_zone(zone, &cc);
+ *nr_pageblocks_skipped = cc.nr_pageblocks_skipped;
+
+ return rc;
}
int sysctl_extfrag_threshold = 500;
@@ -646,6 +837,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
struct zoneref *z;
struct zone *zone;
int rc = COMPACT_SKIPPED;
+ unsigned long nr_pageblocks_skipped;
+ enum compact_mode mode;
/*
* Check whether it is worth even starting compaction. The order check is
@@ -662,12 +855,22 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
nodemask) {
int status;
- status = compact_zone_order(zone, order, gfp_mask, sync);
+ mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE;
+retry:
+ status = compact_zone_order(zone, order, gfp_mask, mode,
+ &nr_pageblocks_skipped);
rc = max(status, rc);
/* If a normal allocation would succeed, stop compacting */
if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
break;
+
+ if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) {
+ if (nr_pageblocks_skipped) {
+ mode = COMPACT_ASYNC_UNMOVABLE;
+ goto retry;
+ }
+ }
}
return rc;
@@ -701,7 +904,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
if (ok && cc->order > zone->compact_order_failed)
zone->compact_order_failed = cc->order + 1;
/* Currently async compaction is never deferred. */
- else if (!ok && cc->sync)
+ else if (!ok && cc->mode == COMPACT_SYNC)
defer_compaction(zone, cc->order);
}
@@ -716,7 +919,7 @@ int compact_pgdat(pg_data_t *pgdat, int order)
{
struct compact_control cc = {
.order = order,
- .sync = false,
+ .mode = COMPACT_ASYNC_MOVABLE,
};
return __compact_pgdat(pgdat, &cc);
@@ -726,7 +929,7 @@ static int compact_node(int nid)
{
struct compact_control cc = {
.order = -1,
- .sync = true,
+ .mode = COMPACT_SYNC,
};
return __compact_pgdat(NODE_DATA(nid), &cc);
@@ -795,3 +998,5 @@ void compaction_unregister_node(struct node *node)
return device_remove_file(&node->dev, &dev_attr_compact);
}
#endif /* CONFIG_SYSFS && CONFIG_NUMA */
+
+#endif /* CONFIG_COMPACTION */
diff --git a/mm/filemap.c b/mm/filemap.c
index 79c4b2b0b14..64b48f934b8 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -29,7 +29,6 @@
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/security.h>
-#include <linux/syscalls.h>
#include <linux/cpuset.h>
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
#include <linux/memcontrol.h>
@@ -1478,44 +1477,6 @@ out:
}
EXPORT_SYMBOL(generic_file_aio_read);
-static ssize_t
-do_readahead(struct address_space *mapping, struct file *filp,
- pgoff_t index, unsigned long nr)
-{
- if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
- return -EINVAL;
-
- force_page_cache_readahead(mapping, filp, index, nr);
- return 0;
-}
-
-SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
-{
- ssize_t ret;
- struct file *file;
-
- ret = -EBADF;
- file = fget(fd);
- if (file) {
- if (file->f_mode & FMODE_READ) {
- struct address_space *mapping = file->f_mapping;
- pgoff_t start = offset >> PAGE_CACHE_SHIFT;
- pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
- unsigned long len = end - start + 1;
- ret = do_readahead(mapping, file, start, len);
- }
- fput(file);
- }
- return ret;
-}
-#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
-asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
-{
- return SYSC_readahead((int) fd, offset, (size_t) count);
-}
-SYSCALL_ALIAS(sys_readahead, SyS_readahead);
-#endif
-
#ifdef CONFIG_MMU
/**
* page_cache_read - adds requested page to the page cache if not already there
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f0e5306eeb5..57c4b930901 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -636,16 +636,12 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
unsigned long haddr, pmd_t *pmd,
struct page *page)
{
- int ret = 0;
pgtable_t pgtable;
VM_BUG_ON(!PageCompound(page));
pgtable = pte_alloc_one(mm, haddr);
- if (unlikely(!pgtable)) {
- mem_cgroup_uncharge_page(page);
- put_page(page);
+ if (unlikely(!pgtable))
return VM_FAULT_OOM;
- }
clear_huge_page(page, haddr, HPAGE_PMD_NR);
__SetPageUptodate(page);
@@ -675,7 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
spin_unlock(&mm->page_table_lock);
}
- return ret;
+ return 0;
}
static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
@@ -724,8 +720,14 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
put_page(page);
goto out;
}
+ if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
+ page))) {
+ mem_cgroup_uncharge_page(page);
+ put_page(page);
+ goto out;
+ }
- return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
+ return 0;
}
out:
/*
@@ -950,6 +952,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
count_vm_event(THP_FAULT_FALLBACK);
ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
pmd, orig_pmd, page, haddr);
+ if (ret & VM_FAULT_OOM)
+ split_huge_page(page);
put_page(page);
goto out;
}
@@ -957,6 +961,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
put_page(new_page);
+ split_huge_page(page);
put_page(page);
ret |= VM_FAULT_OOM;
goto out;
@@ -968,8 +973,10 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
spin_lock(&mm->page_table_lock);
put_page(page);
if (unlikely(!pmd_same(*pmd, orig_pmd))) {
+ spin_unlock(&mm->page_table_lock);
mem_cgroup_uncharge_page(new_page);
put_page(new_page);
+ goto out;
} else {
pmd_t entry;
VM_BUG_ON(!PageHead(page));
@@ -1224,10 +1231,13 @@ static void __split_huge_page_refcount(struct page *page)
{
int i;
struct zone *zone = page_zone(page);
+ struct lruvec *lruvec;
int tail_count = 0;
/* prevent PageLRU to go away from under us, and freeze lru stats */
spin_lock_irq(&zone->lru_lock);
+ lruvec = mem_cgroup_page_lruvec(page, zone);
+
compound_lock(page);
/* complete memcg works before add pages to LRU */
mem_cgroup_split_huge_fixup(page);
@@ -1302,13 +1312,12 @@ static void __split_huge_page_refcount(struct page *page)
BUG_ON(!PageDirty(page_tail));
BUG_ON(!PageSwapBacked(page_tail));
-
- lru_add_page_tail(zone, page, page_tail);
+ lru_add_page_tail(page, page_tail, lruvec);
}
atomic_sub(tail_count, &page->_count);
BUG_ON(atomic_read(&page->_count) <= 0);
- __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
+ __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
__mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
ClearPageCompound(page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ae8f708e3d7..285a81e87ec 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -273,8 +273,8 @@ static long region_count(struct list_head *head, long f, long t)
/* Locate each segment we overlap with, and count that overlap. */
list_for_each_entry(rg, head, link) {
- int seg_from;
- int seg_to;
+ long seg_from;
+ long seg_to;
if (rg->to <= f)
continue;
@@ -2157,6 +2157,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
kref_get(&reservations->refs);
}
+static void resv_map_put(struct vm_area_struct *vma)
+{
+ struct resv_map *reservations = vma_resv_map(vma);
+
+ if (!reservations)
+ return;
+ kref_put(&reservations->refs, resv_map_release);
+}
+
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{
struct hstate *h = hstate_vma(vma);
@@ -2173,7 +2182,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
reserve = (end - start) -
region_count(&reservations->regions, start, end);
- kref_put(&reservations->refs, resv_map_release);
+ resv_map_put(vma);
if (reserve) {
hugetlb_acct_memory(h, -reserve);
@@ -2213,6 +2222,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
}
entry = pte_mkyoung(entry);
entry = pte_mkhuge(entry);
+ entry = arch_make_huge_pte(entry, vma, page, writable);
return entry;
}
@@ -2990,12 +3000,16 @@ int hugetlb_reserve_pages(struct inode *inode,
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
}
- if (chg < 0)
- return chg;
+ if (chg < 0) {
+ ret = chg;
+ goto out_err;
+ }
/* There must be enough pages in the subpool for the mapping */
- if (hugepage_subpool_get_pages(spool, chg))
- return -ENOSPC;
+ if (hugepage_subpool_get_pages(spool, chg)) {
+ ret = -ENOSPC;
+ goto out_err;
+ }
/*
* Check enough hugepages are available for the reservation.
@@ -3004,7 +3018,7 @@ int hugetlb_reserve_pages(struct inode *inode,
ret = hugetlb_acct_memory(h, chg);
if (ret < 0) {
hugepage_subpool_put_pages(spool, chg);
- return ret;
+ goto out_err;
}
/*
@@ -3021,6 +3035,9 @@ int hugetlb_reserve_pages(struct inode *inode,
if (!vma || vma->vm_flags & VM_MAYSHARE)
region_add(&inode->i_mapping->private_list, from, to);
return 0;
+out_err:
+ resv_map_put(vma);
+ return ret;
}
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
diff --git a/mm/internal.h b/mm/internal.h
index 2189af49178..4194ab9dc19 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -94,12 +94,52 @@ extern void putback_lru_page(struct page *page);
/*
* in mm/page_alloc.c
*/
+extern void set_pageblock_migratetype(struct page *page, int migratetype);
+extern int move_freepages_block(struct zone *zone, struct page *page,
+ int migratetype);
extern void __free_pages_bootmem(struct page *page, unsigned int order);
extern void prep_compound_page(struct page *page, unsigned long order);
#ifdef CONFIG_MEMORY_FAILURE
extern bool is_free_buddy_page(struct page *page);
#endif
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+#include <linux/compaction.h>
+
+/*
+ * in mm/compaction.c
+ */
+/*
+ * compact_control is used to track pages being migrated and the free pages
+ * they are being migrated to during memory compaction. The free_pfn starts
+ * at the end of a zone and migrate_pfn begins at the start. Movable pages
+ * are moved to the end of a zone during a compaction run and the run
+ * completes when free_pfn <= migrate_pfn
+ */
+struct compact_control {
+ struct list_head freepages; /* List of free pages to migrate to */
+ struct list_head migratepages; /* List of pages being migrated */
+ unsigned long nr_freepages; /* Number of isolated free pages */
+ unsigned long nr_migratepages; /* Number of pages to migrate */
+ unsigned long free_pfn; /* isolate_freepages search base */
+ unsigned long migrate_pfn; /* isolate_migratepages search base */
+ enum compact_mode mode; /* Compaction mode */
+
+ int order; /* order a direct compactor needs */
+ int migratetype; /* MOVABLE, RECLAIMABLE etc */
+ struct zone *zone;
+
+ /* Number of UNMOVABLE destination pageblocks skipped during scan */
+ unsigned long nr_pageblocks_skipped;
+};
+
+unsigned long
+isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn);
+unsigned long
+isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ unsigned long low_pfn, unsigned long end_pfn);
+
+#endif
/*
* function for dealing with page's order in buddy system.
@@ -131,7 +171,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
* to determine if it's being mapped into a LOCKED vma.
* If so, mark page as mlocked.
*/
-static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
+static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
+ struct page *page)
{
VM_BUG_ON(PageLRU(page));
@@ -189,7 +230,7 @@ extern unsigned long vma_address(struct page *page,
struct vm_area_struct *vma);
#endif
#else /* !CONFIG_MMU */
-static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
+static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
{
return 0;
}
diff --git a/mm/madvise.c b/mm/madvise.c
index 1ccbba5b667..deff1b64a08 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -11,8 +11,10 @@
#include <linux/mempolicy.h>
#include <linux/page-isolation.h>
#include <linux/hugetlb.h>
+#include <linux/falloc.h>
#include <linux/sched.h>
#include <linux/ksm.h>
+#include <linux/fs.h>
/*
* Any behaviour which results in changes to the vma->vm_flags needs to
@@ -200,8 +202,7 @@ static long madvise_remove(struct vm_area_struct *vma,
struct vm_area_struct **prev,
unsigned long start, unsigned long end)
{
- struct address_space *mapping;
- loff_t offset, endoff;
+ loff_t offset;
int error;
*prev = NULL; /* tell sys_madvise we drop mmap_sem */
@@ -217,16 +218,14 @@ static long madvise_remove(struct vm_area_struct *vma,
if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
return -EACCES;
- mapping = vma->vm_file->f_mapping;
-
offset = (loff_t)(start - vma->vm_start)
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
- endoff = (loff_t)(end - vma->vm_start - 1)
- + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
- /* vmtruncate_range needs to take i_mutex */
+ /* filesystem's fallocate may need to take i_mutex */
up_read(&current->mm->mmap_sem);
- error = vmtruncate_range(mapping->host, offset, endoff);
+ error = do_fallocate(vma->vm_file,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ offset, end - start);
down_read(&current->mm->mmap_sem);
return error;
}
diff --git a/mm/memblock.c b/mm/memblock.c
index a44eab3157f..952123eba43 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -37,6 +37,8 @@ struct memblock memblock __initdata_memblock = {
int memblock_debug __initdata_memblock;
static int memblock_can_resize __initdata_memblock;
+static int memblock_memory_in_slab __initdata_memblock = 0;
+static int memblock_reserved_in_slab __initdata_memblock = 0;
/* inline so we don't get a warning when pr_debug is compiled out */
static inline const char *memblock_type_name(struct memblock_type *type)
@@ -187,6 +189,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
struct memblock_region *new_array, *old_array;
phys_addr_t old_size, new_size, addr;
int use_slab = slab_is_available();
+ int *in_slab;
/* We don't allow resizing until we know about the reserved regions
* of memory that aren't suitable for allocation
@@ -198,6 +201,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
old_size = type->max * sizeof(struct memblock_region);
new_size = old_size << 1;
+ /* Retrieve the slab flag */
+ if (type == &memblock.memory)
+ in_slab = &memblock_memory_in_slab;
+ else
+ in_slab = &memblock_reserved_in_slab;
+
/* Try to find some space for it.
*
* WARNING: We assume that either slab_is_available() and we use it or
@@ -212,14 +221,15 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
if (use_slab) {
new_array = kmalloc(new_size, GFP_KERNEL);
addr = new_array ? __pa(new_array) : 0;
- } else
+ } else {
addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
+ new_array = addr ? __va(addr) : 0;
+ }
if (!addr) {
pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
memblock_type_name(type), type->max, type->max * 2);
return -1;
}
- new_array = __va(addr);
memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
@@ -234,22 +244,24 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
type->regions = new_array;
type->max <<= 1;
- /* If we use SLAB that's it, we are done */
- if (use_slab)
- return 0;
-
- /* Add the new reserved region now. Should not fail ! */
- BUG_ON(memblock_reserve(addr, new_size));
-
- /* If the array wasn't our static init one, then free it. We only do
- * that before SLAB is available as later on, we don't know whether
- * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
- * anyways
+ /* Free old array. We needn't free it if the array is the
+ * static one
*/
- if (old_array != memblock_memory_init_regions &&
- old_array != memblock_reserved_init_regions)
+ if (*in_slab)
+ kfree(old_array);
+ else if (old_array != memblock_memory_init_regions &&
+ old_array != memblock_reserved_init_regions)
memblock_free(__pa(old_array), old_size);
+ /* Reserve the new array if that comes from the memblock.
+ * Otherwise, we needn't do it
+ */
+ if (!use_slab)
+ BUG_ON(memblock_reserve(addr, new_size));
+
+ /* Update slab flag */
+ *in_slab = use_slab;
+
return 0;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f342778a0c0..ac35bccadb7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -59,7 +59,7 @@
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES 5
-struct mem_cgroup *root_mem_cgroup __read_mostly;
+static struct mem_cgroup *root_mem_cgroup __read_mostly;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
@@ -73,7 +73,7 @@ static int really_do_swap_account __initdata = 0;
#endif
#else
-#define do_swap_account (0)
+#define do_swap_account 0
#endif
@@ -88,18 +88,31 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
- MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
MEM_CGROUP_STAT_NSTATS,
};
+static const char * const mem_cgroup_stat_names[] = {
+ "cache",
+ "rss",
+ "mapped_file",
+ "swap",
+};
+
enum mem_cgroup_events_index {
MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
- MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */
MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
MEM_CGROUP_EVENTS_NSTATS,
};
+
+static const char * const mem_cgroup_events_names[] = {
+ "pgpgin",
+ "pgpgout",
+ "pgfault",
+ "pgmajfault",
+};
+
/*
* Per memcg event counter is incremented at every pagein/pageout. With THP,
* it will be incremated by the number of pages. This counter is used for
@@ -112,13 +125,14 @@ enum mem_cgroup_events_target {
MEM_CGROUP_TARGET_NUMAINFO,
MEM_CGROUP_NTARGETS,
};
-#define THRESHOLDS_EVENTS_TARGET (128)
-#define SOFTLIMIT_EVENTS_TARGET (1024)
-#define NUMAINFO_EVENTS_TARGET (1024)
+#define THRESHOLDS_EVENTS_TARGET 128
+#define SOFTLIMIT_EVENTS_TARGET 1024
+#define NUMAINFO_EVENTS_TARGET 1024
struct mem_cgroup_stat_cpu {
long count[MEM_CGROUP_STAT_NSTATS];
unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
+ unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
};
@@ -138,7 +152,6 @@ struct mem_cgroup_per_zone {
struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
- struct zone_reclaim_stat reclaim_stat;
struct rb_node tree_node; /* RB tree node */
unsigned long long usage_in_excess;/* Set to the value by which */
/* the soft limit is exceeded*/
@@ -182,7 +195,7 @@ struct mem_cgroup_threshold {
/* For threshold */
struct mem_cgroup_threshold_ary {
- /* An array index points to threshold just below usage. */
+ /* An array index points to threshold just below or equal to usage. */
int current_threshold;
/* Size of entries[] */
unsigned int size;
@@ -245,8 +258,8 @@ struct mem_cgroup {
*/
struct rcu_head rcu_freeing;
/*
- * But when using vfree(), that cannot be done at
- * interrupt time, so we must then queue the work.
+ * We also need some space for a worker in deferred freeing.
+ * By the time we call it, rcu_freeing is no longer in use.
*/
struct work_struct work_freeing;
};
@@ -305,7 +318,7 @@ struct mem_cgroup {
/*
* percpu counter.
*/
- struct mem_cgroup_stat_cpu *stat;
+ struct mem_cgroup_stat_cpu __percpu *stat;
/*
* used when a cpu is offlined or other synchronizations
* See mem_cgroup_read_stat().
@@ -360,8 +373,8 @@ static bool move_file(void)
* Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
* limit reclaim to prevent infinite loops, if they ever occur.
*/
-#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100)
-#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
+#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
+#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
enum charge_type {
MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
@@ -377,8 +390,8 @@ enum charge_type {
#define _MEM (0)
#define _MEMSWAP (1)
#define _OOM_TYPE (2)
-#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
-#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
+#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
+#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
#define MEMFILE_ATTR(val) ((val) & 0xffff)
/* Used for OOM nofiier */
#define OOM_CONTROL (0)
@@ -404,6 +417,7 @@ void sock_update_memcg(struct sock *sk)
{
if (mem_cgroup_sockets_enabled) {
struct mem_cgroup *memcg;
+ struct cg_proto *cg_proto;
BUG_ON(!sk->sk_prot->proto_cgroup);
@@ -423,9 +437,10 @@ void sock_update_memcg(struct sock *sk)
rcu_read_lock();
memcg = mem_cgroup_from_task(current);
- if (!mem_cgroup_is_root(memcg)) {
+ cg_proto = sk->sk_prot->proto_cgroup(memcg);
+ if (!mem_cgroup_is_root(memcg) && memcg_proto_active(cg_proto)) {
mem_cgroup_get(memcg);
- sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg);
+ sk->sk_cgrp = cg_proto;
}
rcu_read_unlock();
}
@@ -454,6 +469,19 @@ EXPORT_SYMBOL(tcp_proto_cgroup);
#endif /* CONFIG_INET */
#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
+#if defined(CONFIG_INET) && defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
+static void disarm_sock_keys(struct mem_cgroup *memcg)
+{
+ if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
+ return;
+ static_key_slow_dec(&memcg_socket_limit_enabled);
+}
+#else
+static void disarm_sock_keys(struct mem_cgroup *memcg)
+{
+}
+#endif
+
static void drain_all_stock_async(struct mem_cgroup *memcg);
static struct mem_cgroup_per_zone *
@@ -718,12 +746,21 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
nr_pages = -nr_pages; /* for event */
}
- __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
+ __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
preempt_enable();
}
unsigned long
+mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
+{
+ struct mem_cgroup_per_zone *mz;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+ return mz->lru_size[lru];
+}
+
+static unsigned long
mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
unsigned int lru_mask)
{
@@ -770,7 +807,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
{
unsigned long val, next;
- val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
+ val = __this_cpu_read(memcg->stat->nr_page_events);
next = __this_cpu_read(memcg->stat->targets[target]);
/* from time_after() in jiffies.h */
if ((long)next - (long)val < 0) {
@@ -1013,7 +1050,7 @@ EXPORT_SYMBOL(mem_cgroup_count_vm_event);
/**
* mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
* @zone: zone of the wanted lruvec
- * @mem: memcg of the wanted lruvec
+ * @memcg: memcg of the wanted lruvec
*
* Returns the lru list vector holding pages for the given @zone and
* @mem. This can be the global zone lruvec, if the memory controller
@@ -1046,19 +1083,11 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
*/
/**
- * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec
- * @zone: zone of the page
+ * mem_cgroup_page_lruvec - return lruvec for adding an lru page
* @page: the page
- * @lru: current lru
- *
- * This function accounts for @page being added to @lru, and returns
- * the lruvec for the given @zone and the memcg @page is charged to.
- *
- * The callsite is then responsible for physically linking the page to
- * the returned lruvec->lists[@lru].
+ * @zone: zone of the page
*/
-struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
- enum lru_list lru)
+struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
{
struct mem_cgroup_per_zone *mz;
struct mem_cgroup *memcg;
@@ -1071,7 +1100,7 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
memcg = pc->mem_cgroup;
/*
- * Surreptitiously switch any uncharged page to root:
+ * Surreptitiously switch any uncharged offlist page to root:
* an uncharged page off lru does nothing to secure
* its former mem_cgroup from sudden removal.
*
@@ -1079,85 +1108,60 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
* under page_cgroup lock: between them, they make all uses
* of pc->mem_cgroup safe.
*/
- if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
+ if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
pc->mem_cgroup = memcg = root_mem_cgroup;
mz = page_cgroup_zoneinfo(memcg, page);
- /* compound_order() is stabilized through lru_lock */
- mz->lru_size[lru] += 1 << compound_order(page);
return &mz->lruvec;
}
/**
- * mem_cgroup_lru_del_list - account for removing an lru page
- * @page: the page
- * @lru: target lru
- *
- * This function accounts for @page being removed from @lru.
+ * mem_cgroup_update_lru_size - account for adding or removing an lru page
+ * @lruvec: mem_cgroup per zone lru vector
+ * @lru: index of lru list the page is sitting on
+ * @nr_pages: positive when adding or negative when removing
*
- * The callsite is then responsible for physically unlinking
- * @page->lru.
+ * This function must be called when a page is added to or removed from an
+ * lru list.
*/
-void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
+void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
+ int nr_pages)
{
struct mem_cgroup_per_zone *mz;
- struct mem_cgroup *memcg;
- struct page_cgroup *pc;
+ unsigned long *lru_size;
if (mem_cgroup_disabled())
return;
- pc = lookup_page_cgroup(page);
- memcg = pc->mem_cgroup;
- VM_BUG_ON(!memcg);
- mz = page_cgroup_zoneinfo(memcg, page);
- /* huge page split is done under lru_lock. so, we have no races. */
- VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page)));
- mz->lru_size[lru] -= 1 << compound_order(page);
-}
-
-void mem_cgroup_lru_del(struct page *page)
-{
- mem_cgroup_lru_del_list(page, page_lru(page));
-}
-
-/**
- * mem_cgroup_lru_move_lists - account for moving a page between lrus
- * @zone: zone of the page
- * @page: the page
- * @from: current lru
- * @to: target lru
- *
- * This function accounts for @page being moved between the lrus @from
- * and @to, and returns the lruvec for the given @zone and the memcg
- * @page is charged to.
- *
- * The callsite is then responsible for physically relinking
- * @page->lru to the returned lruvec->lists[@to].
- */
-struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
- struct page *page,
- enum lru_list from,
- enum lru_list to)
-{
- /* XXX: Optimize this, especially for @from == @to */
- mem_cgroup_lru_del_list(page, from);
- return mem_cgroup_lru_add_list(zone, page, to);
+ mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
+ lru_size = mz->lru_size + lru;
+ *lru_size += nr_pages;
+ VM_BUG_ON((long)(*lru_size) < 0);
}
/*
* Checks whether given mem is same or in the root_mem_cgroup's
* hierarchy subtree
*/
+bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
+ struct mem_cgroup *memcg)
+{
+ if (root_memcg == memcg)
+ return true;
+ if (!root_memcg->use_hierarchy)
+ return false;
+ return css_is_ancestor(&memcg->css, &root_memcg->css);
+}
+
static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
- struct mem_cgroup *memcg)
+ struct mem_cgroup *memcg)
{
- if (root_memcg != memcg) {
- return (root_memcg->use_hierarchy &&
- css_is_ancestor(&memcg->css, &root_memcg->css));
- }
+ bool ret;
- return true;
+ rcu_read_lock();
+ ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
+ rcu_read_unlock();
+ return ret;
}
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
@@ -1195,19 +1199,15 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
return ret;
}
-int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
+int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
{
unsigned long inactive_ratio;
- int nid = zone_to_nid(zone);
- int zid = zone_idx(zone);
unsigned long inactive;
unsigned long active;
unsigned long gb;
- inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
- BIT(LRU_INACTIVE_ANON));
- active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
- BIT(LRU_ACTIVE_ANON));
+ inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
+ active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
gb = (inactive + active) >> (30 - PAGE_SHIFT);
if (gb)
@@ -1218,49 +1218,17 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
return inactive * inactive_ratio < active;
}
-int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
+int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
{
unsigned long active;
unsigned long inactive;
- int zid = zone_idx(zone);
- int nid = zone_to_nid(zone);
- inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
- BIT(LRU_INACTIVE_FILE));
- active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
- BIT(LRU_ACTIVE_FILE));
+ inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_FILE);
+ active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_FILE);
return (active > inactive);
}
-struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
- struct zone *zone)
-{
- int nid = zone_to_nid(zone);
- int zid = zone_idx(zone);
- struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
-
- return &mz->reclaim_stat;
-}
-
-struct zone_reclaim_stat *
-mem_cgroup_get_reclaim_stat_from_page(struct page *page)
-{
- struct page_cgroup *pc;
- struct mem_cgroup_per_zone *mz;
-
- if (mem_cgroup_disabled())
- return NULL;
-
- pc = lookup_page_cgroup(page);
- if (!PageCgroupUsed(pc))
- return NULL;
- /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
- smp_rmb();
- mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
- return &mz->reclaim_stat;
-}
-
#define mem_cgroup_from_res_counter(counter, member) \
container_of(counter, struct mem_cgroup, member)
@@ -1634,7 +1602,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
* unused nodes. But scan_nodes is lazily updated and may not cotain
* enough new information. We need to do double check.
*/
-bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
{
int nid;
@@ -1669,7 +1637,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
return 0;
}
-bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
+static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
{
return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
}
@@ -1843,7 +1811,8 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
/*
* try to call OOM killer. returns false if we should exit memory-reclaim loop.
*/
-bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
+static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
+ int order)
{
struct oom_wait_info owait;
bool locked, need_to_kill;
@@ -1992,7 +1961,7 @@ struct memcg_stock_pcp {
unsigned int nr_pages;
struct work_struct work;
unsigned long flags;
-#define FLUSHING_CACHED_CHARGE (0)
+#define FLUSHING_CACHED_CHARGE 0
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
static DEFINE_MUTEX(percpu_charge_mutex);
@@ -2139,7 +2108,7 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
int i;
spin_lock(&memcg->pcp_counter_lock);
- for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
+ for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
long x = per_cpu(memcg->stat->count[i], cpu);
per_cpu(memcg->stat->count[i], cpu) = 0;
@@ -2427,6 +2396,24 @@ static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
}
/*
+ * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
+ * This is useful when moving usage to parent cgroup.
+ */
+static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
+ unsigned int nr_pages)
+{
+ unsigned long bytes = nr_pages * PAGE_SIZE;
+
+ if (mem_cgroup_is_root(memcg))
+ return;
+
+ res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
+ if (do_swap_account)
+ res_counter_uncharge_until(&memcg->memsw,
+ memcg->memsw.parent, bytes);
+}
+
+/*
* A helper function to get mem_cgroup from ID. must be called under
* rcu_read_lock(). The caller must check css_is_removed() or some if
* it's concern. (dropping refcnt from swap can be called against removed
@@ -2481,6 +2468,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
{
struct page_cgroup *pc = lookup_page_cgroup(page);
struct zone *uninitialized_var(zone);
+ struct lruvec *lruvec;
bool was_on_lru = false;
bool anon;
@@ -2503,8 +2491,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
if (PageLRU(page)) {
+ lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
ClearPageLRU(page);
- del_page_from_lru_list(zone, page, page_lru(page));
+ del_page_from_lru_list(page, lruvec, page_lru(page));
was_on_lru = true;
}
}
@@ -2522,9 +2511,10 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
if (lrucare) {
if (was_on_lru) {
+ lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
- add_page_to_lru_list(zone, page, page_lru(page));
+ add_page_to_lru_list(page, lruvec, page_lru(page));
}
spin_unlock_irq(&zone->lru_lock);
}
@@ -2547,7 +2537,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MIGRATION))
+#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
/*
* Because tail pages are not marked as "used", set it. We're under
* zone->lru_lock, 'splitting on pmd' and compound_lock.
@@ -2578,23 +2568,19 @@ void mem_cgroup_split_huge_fixup(struct page *head)
* @pc: page_cgroup of the page.
* @from: mem_cgroup which the page is moved from.
* @to: mem_cgroup which the page is moved to. @from != @to.
- * @uncharge: whether we should call uncharge and css_put against @from.
*
* The caller must confirm following.
* - page is not on LRU (isolate_page() is useful.)
* - compound_lock is held when nr_pages > 1
*
- * This function doesn't do "charge" nor css_get to new cgroup. It should be
- * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
- * true, this function does "uncharge" from old cgroup, but it doesn't if
- * @uncharge is false, so a caller should do "uncharge".
+ * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
+ * from old cgroup.
*/
static int mem_cgroup_move_account(struct page *page,
unsigned int nr_pages,
struct page_cgroup *pc,
struct mem_cgroup *from,
- struct mem_cgroup *to,
- bool uncharge)
+ struct mem_cgroup *to)
{
unsigned long flags;
int ret;
@@ -2628,9 +2614,6 @@ static int mem_cgroup_move_account(struct page *page,
preempt_enable();
}
mem_cgroup_charge_statistics(from, anon, -nr_pages);
- if (uncharge)
- /* This is not "cancel", but cancel_charge does all we need. */
- __mem_cgroup_cancel_charge(from, nr_pages);
/* caller should have done css_get */
pc->mem_cgroup = to;
@@ -2664,15 +2647,13 @@ static int mem_cgroup_move_parent(struct page *page,
struct mem_cgroup *child,
gfp_t gfp_mask)
{
- struct cgroup *cg = child->css.cgroup;
- struct cgroup *pcg = cg->parent;
struct mem_cgroup *parent;
unsigned int nr_pages;
unsigned long uninitialized_var(flags);
int ret;
/* Is ROOT ? */
- if (!pcg)
+ if (mem_cgroup_is_root(child))
return -EINVAL;
ret = -EBUSY;
@@ -2683,21 +2664,23 @@ static int mem_cgroup_move_parent(struct page *page,
nr_pages = hpage_nr_pages(page);
- parent = mem_cgroup_from_cont(pcg);
- ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
- if (ret)
- goto put_back;
+ parent = parent_mem_cgroup(child);
+ /*
+ * If no parent, move charges to root cgroup.
+ */
+ if (!parent)
+ parent = root_mem_cgroup;
if (nr_pages > 1)
flags = compound_lock_irqsave(page);
- ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
- if (ret)
- __mem_cgroup_cancel_charge(parent, nr_pages);
+ ret = mem_cgroup_move_account(page, nr_pages,
+ pc, child, parent);
+ if (!ret)
+ __mem_cgroup_cancel_local_charge(child, nr_pages);
if (nr_pages > 1)
compound_unlock_irqrestore(page, flags);
-put_back:
putback_lru_page(page);
put:
put_page(page);
@@ -2845,24 +2828,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
*/
if (do_swap_account && PageSwapCache(page)) {
swp_entry_t ent = {.val = page_private(page)};
- struct mem_cgroup *swap_memcg;
- unsigned short id;
-
- id = swap_cgroup_record(ent, 0);
- rcu_read_lock();
- swap_memcg = mem_cgroup_lookup(id);
- if (swap_memcg) {
- /*
- * This recorded memcg can be obsolete one. So, avoid
- * calling css_tryget
- */
- if (!mem_cgroup_is_root(swap_memcg))
- res_counter_uncharge(&swap_memcg->memsw,
- PAGE_SIZE);
- mem_cgroup_swap_statistics(swap_memcg, false);
- mem_cgroup_put(swap_memcg);
- }
- rcu_read_unlock();
+ mem_cgroup_uncharge_swap(ent);
}
/*
* At swapin, we may charge account against cgroup which has no tasks.
@@ -3155,7 +3121,6 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
* @entry: swap entry to be moved
* @from: mem_cgroup which the entry is moved from
* @to: mem_cgroup which the entry is moved to
- * @need_fixup: whether we should fixup res_counters and refcounts.
*
* It succeeds only when the swap_cgroup's record for this entry is the same
* as the mem_cgroup's id of @from.
@@ -3166,7 +3131,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
* both res and memsw, and called css_get().
*/
static int mem_cgroup_move_swap_account(swp_entry_t entry,
- struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
+ struct mem_cgroup *from, struct mem_cgroup *to)
{
unsigned short old_id, new_id;
@@ -3185,24 +3150,13 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
* swap-in, the refcount of @to might be decreased to 0.
*/
mem_cgroup_get(to);
- if (need_fixup) {
- if (!mem_cgroup_is_root(from))
- res_counter_uncharge(&from->memsw, PAGE_SIZE);
- mem_cgroup_put(from);
- /*
- * we charged both to->res and to->memsw, so we should
- * uncharge to->res.
- */
- if (!mem_cgroup_is_root(to))
- res_counter_uncharge(&to->res, PAGE_SIZE);
- }
return 0;
}
return -EINVAL;
}
#else
static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
- struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
+ struct mem_cgroup *from, struct mem_cgroup *to)
{
return -EINVAL;
}
@@ -3363,7 +3317,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage)
{
- struct mem_cgroup *memcg;
+ struct mem_cgroup *memcg = NULL;
struct page_cgroup *pc;
enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
@@ -3373,11 +3327,20 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
pc = lookup_page_cgroup(oldpage);
/* fix accounting on old pages */
lock_page_cgroup(pc);
- memcg = pc->mem_cgroup;
- mem_cgroup_charge_statistics(memcg, false, -1);
- ClearPageCgroupUsed(pc);
+ if (PageCgroupUsed(pc)) {
+ memcg = pc->mem_cgroup;
+ mem_cgroup_charge_statistics(memcg, false, -1);
+ ClearPageCgroupUsed(pc);
+ }
unlock_page_cgroup(pc);
+ /*
+ * When called from shmem_replace_page(), in some cases the
+ * oldpage has already been charged, and in some cases not.
+ */
+ if (!memcg)
+ return;
+
if (PageSwapBacked(oldpage))
type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
@@ -3793,7 +3756,7 @@ try_to_free:
goto move_account;
}
-int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
+static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
{
return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
}
@@ -4051,103 +4014,13 @@ static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
}
#endif
-
-/* For read statistics */
-enum {
- MCS_CACHE,
- MCS_RSS,
- MCS_FILE_MAPPED,
- MCS_PGPGIN,
- MCS_PGPGOUT,
- MCS_SWAP,
- MCS_PGFAULT,
- MCS_PGMAJFAULT,
- MCS_INACTIVE_ANON,
- MCS_ACTIVE_ANON,
- MCS_INACTIVE_FILE,
- MCS_ACTIVE_FILE,
- MCS_UNEVICTABLE,
- NR_MCS_STAT,
-};
-
-struct mcs_total_stat {
- s64 stat[NR_MCS_STAT];
-};
-
-struct {
- char *local_name;
- char *total_name;
-} memcg_stat_strings[NR_MCS_STAT] = {
- {"cache", "total_cache"},
- {"rss", "total_rss"},
- {"mapped_file", "total_mapped_file"},
- {"pgpgin", "total_pgpgin"},
- {"pgpgout", "total_pgpgout"},
- {"swap", "total_swap"},
- {"pgfault", "total_pgfault"},
- {"pgmajfault", "total_pgmajfault"},
- {"inactive_anon", "total_inactive_anon"},
- {"active_anon", "total_active_anon"},
- {"inactive_file", "total_inactive_file"},
- {"active_file", "total_active_file"},
- {"unevictable", "total_unevictable"}
-};
-
-
-static void
-mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
-{
- s64 val;
-
- /* per cpu stat */
- val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE);
- s->stat[MCS_CACHE] += val * PAGE_SIZE;
- val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS);
- s->stat[MCS_RSS] += val * PAGE_SIZE;
- val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
- s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
- val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN);
- s->stat[MCS_PGPGIN] += val;
- val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT);
- s->stat[MCS_PGPGOUT] += val;
- if (do_swap_account) {
- val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
- s->stat[MCS_SWAP] += val * PAGE_SIZE;
- }
- val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT);
- s->stat[MCS_PGFAULT] += val;
- val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT);
- s->stat[MCS_PGMAJFAULT] += val;
-
- /* per zone stat */
- val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
- s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
- val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
- s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
- val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
- s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
- val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
- s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
- val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
- s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
-}
-
-static void
-mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
-{
- struct mem_cgroup *iter;
-
- for_each_mem_cgroup_tree(iter, memcg)
- mem_cgroup_get_local_stat(iter, s);
-}
-
#ifdef CONFIG_NUMA
-static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
+static int mem_control_numa_stat_show(struct cgroup *cont, struct cftype *cft,
+ struct seq_file *m)
{
int nid;
unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
unsigned long node_nr;
- struct cgroup *cont = m->private;
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
@@ -4188,64 +4061,100 @@ static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
}
#endif /* CONFIG_NUMA */
+static const char * const mem_cgroup_lru_names[] = {
+ "inactive_anon",
+ "active_anon",
+ "inactive_file",
+ "active_file",
+ "unevictable",
+};
+
+static inline void mem_cgroup_lru_names_not_uptodate(void)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
+}
+
static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
- struct cgroup_map_cb *cb)
+ struct seq_file *m)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
- struct mcs_total_stat mystat;
- int i;
-
- memset(&mystat, 0, sizeof(mystat));
- mem_cgroup_get_local_stat(memcg, &mystat);
+ struct mem_cgroup *mi;
+ unsigned int i;
-
- for (i = 0; i < NR_MCS_STAT; i++) {
- if (i == MCS_SWAP && !do_swap_account)
+ for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
+ if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
continue;
- cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
+ seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
+ mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
}
+ for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
+ seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
+ mem_cgroup_read_events(memcg, i));
+
+ for (i = 0; i < NR_LRU_LISTS; i++)
+ seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
+ mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
+
/* Hierarchical information */
{
unsigned long long limit, memsw_limit;
memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
- cb->fill(cb, "hierarchical_memory_limit", limit);
+ seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
if (do_swap_account)
- cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
+ seq_printf(m, "hierarchical_memsw_limit %llu\n",
+ memsw_limit);
}
- memset(&mystat, 0, sizeof(mystat));
- mem_cgroup_get_total_stat(memcg, &mystat);
- for (i = 0; i < NR_MCS_STAT; i++) {
- if (i == MCS_SWAP && !do_swap_account)
+ for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
+ long long val = 0;
+
+ if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
continue;
- cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
+ for_each_mem_cgroup_tree(mi, memcg)
+ val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
+ seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
+ }
+
+ for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
+ unsigned long long val = 0;
+
+ for_each_mem_cgroup_tree(mi, memcg)
+ val += mem_cgroup_read_events(mi, i);
+ seq_printf(m, "total_%s %llu\n",
+ mem_cgroup_events_names[i], val);
+ }
+
+ for (i = 0; i < NR_LRU_LISTS; i++) {
+ unsigned long long val = 0;
+
+ for_each_mem_cgroup_tree(mi, memcg)
+ val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
+ seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
}
#ifdef CONFIG_DEBUG_VM
{
int nid, zid;
struct mem_cgroup_per_zone *mz;
+ struct zone_reclaim_stat *rstat;
unsigned long recent_rotated[2] = {0, 0};
unsigned long recent_scanned[2] = {0, 0};
for_each_online_node(nid)
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
mz = mem_cgroup_zoneinfo(memcg, nid, zid);
+ rstat = &mz->lruvec.reclaim_stat;
- recent_rotated[0] +=
- mz->reclaim_stat.recent_rotated[0];
- recent_rotated[1] +=
- mz->reclaim_stat.recent_rotated[1];
- recent_scanned[0] +=
- mz->reclaim_stat.recent_scanned[0];
- recent_scanned[1] +=
- mz->reclaim_stat.recent_scanned[1];
+ recent_rotated[0] += rstat->recent_rotated[0];
+ recent_rotated[1] += rstat->recent_rotated[1];
+ recent_scanned[0] += rstat->recent_scanned[0];
+ recent_scanned[1] += rstat->recent_scanned[1];
}
- cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
- cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
- cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
- cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
+ seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
+ seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
+ seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
+ seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
}
#endif
@@ -4307,7 +4216,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
usage = mem_cgroup_usage(memcg, swap);
/*
- * current_threshold points to threshold just below usage.
+ * current_threshold points to threshold just below or equal to usage.
* If it's not true, a threshold was crossed after last
* call of __mem_cgroup_threshold().
*/
@@ -4433,14 +4342,15 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
/* Find current threshold */
new->current_threshold = -1;
for (i = 0; i < size; i++) {
- if (new->entries[i].threshold < usage) {
+ if (new->entries[i].threshold <= usage) {
/*
* new->current_threshold will not be used until
* rcu_assign_pointer(), so it's safe to increment
* it here.
*/
++new->current_threshold;
- }
+ } else
+ break;
}
/* Free old spare buffer and save old primary buffer as spare */
@@ -4509,7 +4419,7 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
continue;
new->entries[j] = thresholds->primary->entries[i];
- if (new->entries[j].threshold < usage) {
+ if (new->entries[j].threshold <= usage) {
/*
* new->current_threshold will not be used
* until rcu_assign_pointer(), so it's safe to increment
@@ -4623,22 +4533,6 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
return 0;
}
-#ifdef CONFIG_NUMA
-static const struct file_operations mem_control_numa_stat_file_operations = {
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
-{
- struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
-
- file->f_op = &mem_control_numa_stat_file_operations;
- return single_open(file, mem_control_numa_stat_show, cont);
-}
-#endif /* CONFIG_NUMA */
-
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
@@ -4694,7 +4588,7 @@ static struct cftype mem_cgroup_files[] = {
},
{
.name = "stat",
- .read_map = mem_control_stat_show,
+ .read_seq_string = mem_control_stat_show,
},
{
.name = "force_empty",
@@ -4726,8 +4620,7 @@ static struct cftype mem_cgroup_files[] = {
#ifdef CONFIG_NUMA
{
.name = "numa_stat",
- .open = mem_control_numa_stat_open,
- .mode = S_IRUGO,
+ .read_seq_string = mem_control_numa_stat_show,
},
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -4764,7 +4657,6 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
{
struct mem_cgroup_per_node *pn;
struct mem_cgroup_per_zone *mz;
- enum lru_list lru;
int zone, tmp = node;
/*
* This routine is called against possible nodes.
@@ -4782,8 +4674,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
mz = &pn->zoneinfo[zone];
- for_each_lru(lru)
- INIT_LIST_HEAD(&mz->lruvec.lists[lru]);
+ lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]);
mz->usage_in_excess = 0;
mz->on_tree = false;
mz->memcg = memcg;
@@ -4826,23 +4717,40 @@ out_free:
}
/*
- * Helpers for freeing a vzalloc()ed mem_cgroup by RCU,
+ * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU,
* but in process context. The work_freeing structure is overlaid
* on the rcu_freeing structure, which itself is overlaid on memsw.
*/
-static void vfree_work(struct work_struct *work)
+static void free_work(struct work_struct *work)
{
struct mem_cgroup *memcg;
+ int size = sizeof(struct mem_cgroup);
memcg = container_of(work, struct mem_cgroup, work_freeing);
- vfree(memcg);
+ /*
+ * We need to make sure that (at least for now), the jump label
+ * destruction code runs outside of the cgroup lock. This is because
+ * get_online_cpus(), which is called from the static_branch update,
+ * can't be called inside the cgroup_lock. cpusets are the ones
+ * enforcing this dependency, so if they ever change, we might as well.
+ *
+ * schedule_work() will guarantee this happens. Be careful if you need
+ * to move this code around, and make sure it is outside
+ * the cgroup_lock.
+ */
+ disarm_sock_keys(memcg);
+ if (size < PAGE_SIZE)
+ kfree(memcg);
+ else
+ vfree(memcg);
}
-static void vfree_rcu(struct rcu_head *rcu_head)
+
+static void free_rcu(struct rcu_head *rcu_head)
{
struct mem_cgroup *memcg;
memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
- INIT_WORK(&memcg->work_freeing, vfree_work);
+ INIT_WORK(&memcg->work_freeing, free_work);
schedule_work(&memcg->work_freeing);
}
@@ -4868,10 +4776,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
free_mem_cgroup_per_zone_info(memcg, node);
free_percpu(memcg->stat);
- if (sizeof(struct mem_cgroup) < PAGE_SIZE)
- kfree_rcu(memcg, rcu_freeing);
- else
- call_rcu(&memcg->rcu_freeing, vfree_rcu);
+ call_rcu(&memcg->rcu_freeing, free_rcu);
}
static void mem_cgroup_get(struct mem_cgroup *memcg)
@@ -5135,7 +5040,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
return NULL;
if (PageAnon(page)) {
/* we don't move shared anon */
- if (!move_anon() || page_mapcount(page) > 2)
+ if (!move_anon())
return NULL;
} else if (!move_file())
/* we ignore mapcount for file pages */
@@ -5146,32 +5051,37 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
return page;
}
+#ifdef CONFIG_SWAP
static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
- int usage_count;
struct page *page = NULL;
swp_entry_t ent = pte_to_swp_entry(ptent);
if (!move_anon() || non_swap_entry(ent))
return NULL;
- usage_count = mem_cgroup_count_swap_user(ent, &page);
- if (usage_count > 1) { /* we don't move shared anon */
- if (page)
- put_page(page);
- return NULL;
- }
+ /*
+ * Because lookup_swap_cache() updates some statistics counter,
+ * we call find_get_page() with swapper_space directly.
+ */
+ page = find_get_page(&swapper_space, ent.val);
if (do_swap_account)
entry->val = ent.val;
return page;
}
+#else
+static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
+ unsigned long addr, pte_t ptent, swp_entry_t *entry)
+{
+ return NULL;
+}
+#endif
static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
unsigned long addr, pte_t ptent, swp_entry_t *entry)
{
struct page *page = NULL;
- struct inode *inode;
struct address_space *mapping;
pgoff_t pgoff;
@@ -5180,7 +5090,6 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
if (!move_file())
return NULL;
- inode = vma->vm_file->f_path.dentry->d_inode;
mapping = vma->vm_file->f_mapping;
if (pte_none(ptent))
pgoff = linear_page_index(vma, addr);
@@ -5479,8 +5388,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
if (!isolate_lru_page(page)) {
pc = lookup_page_cgroup(page);
if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
- pc, mc.from, mc.to,
- false)) {
+ pc, mc.from, mc.to)) {
mc.precharge -= HPAGE_PMD_NR;
mc.moved_charge += HPAGE_PMD_NR;
}
@@ -5510,7 +5418,7 @@ retry:
goto put;
pc = lookup_page_cgroup(page);
if (!mem_cgroup_move_account(page, 1, pc,
- mc.from, mc.to, false)) {
+ mc.from, mc.to)) {
mc.precharge--;
/* we uncharge from mc.from later. */
mc.moved_charge++;
@@ -5521,8 +5429,7 @@ put: /* get_mctgt_type() gets the page */
break;
case MC_TARGET_SWAP:
ent = target.ent;
- if (!mem_cgroup_move_swap_account(ent,
- mc.from, mc.to, false)) {
+ if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
mc.precharge--;
/* we fixup refcnts and charges later. */
mc.moved_swap++;
@@ -5598,7 +5505,6 @@ static void mem_cgroup_move_task(struct cgroup *cont,
if (mm) {
if (mc.to)
mem_cgroup_move_charge(mm);
- put_swap_token(mm);
mmput(mm);
}
if (mc.to)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 97cc2733551..ab1e7145e29 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1388,23 +1388,23 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
*/
if (!get_page_unless_zero(compound_head(p))) {
if (PageHuge(p)) {
- pr_info("get_any_page: %#lx free huge page\n", pfn);
+ pr_info("%s: %#lx free huge page\n", __func__, pfn);
ret = dequeue_hwpoisoned_huge_page(compound_head(p));
} else if (is_free_buddy_page(p)) {
- pr_info("get_any_page: %#lx free buddy page\n", pfn);
+ pr_info("%s: %#lx free buddy page\n", __func__, pfn);
/* Set hwpoison bit while page is still isolated */
SetPageHWPoison(p);
ret = 0;
} else {
- pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n",
- pfn, p->flags);
+ pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
+ __func__, pfn, p->flags);
ret = -EIO;
}
} else {
/* Not a free page */
ret = 1;
}
- unset_migratetype_isolate(p);
+ unset_migratetype_isolate(p, MIGRATE_MOVABLE);
unlock_memory_hotplug();
return ret;
}
diff --git a/mm/memory.c b/mm/memory.c
index e40f6759ba9..1b7dc662bf9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2908,7 +2908,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
page = lookup_swap_cache(entry);
if (!page) {
- grab_swap_token(mm); /* Contend for token _before_ read-in */
page = swapin_readahead(entry,
GFP_HIGHUSER_MOVABLE, vma, address);
if (!page) {
@@ -2938,6 +2937,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
}
locked = lock_page_or_retry(page, mm, flags);
+
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
if (!locked) {
ret |= VM_FAULT_RETRY;
@@ -3486,6 +3486,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
+retry:
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
@@ -3499,13 +3500,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd, flags);
} else {
pmd_t orig_pmd = *pmd;
+ int ret;
+
barrier();
if (pmd_trans_huge(orig_pmd)) {
if (flags & FAULT_FLAG_WRITE &&
!pmd_write(orig_pmd) &&
- !pmd_trans_splitting(orig_pmd))
- return do_huge_pmd_wp_page(mm, vma, address,
- pmd, orig_pmd);
+ !pmd_trans_splitting(orig_pmd)) {
+ ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
+ orig_pmd);
+ /*
+ * If COW results in an oom, the huge pmd will
+ * have been split, so retry the fault on the
+ * pte for a smaller charge.
+ */
+ if (unlikely(ret & VM_FAULT_OOM))
+ goto retry;
+ return ret;
+ }
return 0;
}
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 6629fafd6ce..0d7e3ec8e0f 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -74,8 +74,7 @@ static struct resource *register_memory_resource(u64 start, u64 size)
res->end = start + size - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
if (request_resource(&iomem_resource, res) < 0) {
- printk("System RAM resource %llx - %llx cannot be added\n",
- (unsigned long long)res->start, (unsigned long long)res->end);
+ printk("System RAM resource %pR cannot be added\n", res);
kfree(res);
res = NULL;
}
@@ -502,8 +501,10 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
online_pages_range);
if (ret) {
mutex_unlock(&zonelists_mutex);
- printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
- nr_pages, pfn);
+ printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
+ (unsigned long long) pfn << PAGE_SHIFT,
+ (((unsigned long long) pfn + nr_pages)
+ << PAGE_SHIFT) - 1);
memory_notify(MEM_CANCEL_ONLINE, &arg);
unlock_memory_hotplug();
return ret;
@@ -891,7 +892,7 @@ static int __ref offline_pages(unsigned long start_pfn,
nr_pages = end_pfn - start_pfn;
/* set above range as isolated */
- ret = start_isolate_page_range(start_pfn, end_pfn);
+ ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
if (ret)
goto out;
@@ -956,7 +957,7 @@ repeat:
We cannot do rollback at this point. */
offline_isolated_pages(start_pfn, end_pfn);
/* reset pagetype flags and makes migrate type to be MOVABLE */
- undo_isolate_page_range(start_pfn, end_pfn);
+ undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
/* removal success */
zone->present_pages -= offlined_pages;
zone->zone_pgdat->node_present_pages -= offlined_pages;
@@ -977,11 +978,12 @@ repeat:
return 0;
failed_removal:
- printk(KERN_INFO "memory offlining %lx to %lx failed\n",
- start_pfn, end_pfn);
+ printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
+ (unsigned long long) start_pfn << PAGE_SHIFT,
+ ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
memory_notify(MEM_CANCEL_OFFLINE, &arg);
/* pushback to free area */
- undo_isolate_page_range(start_pfn, end_pfn);
+ undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
out:
unlock_memory_hotplug();
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 88f9422b92e..f15c1b24ca1 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -390,7 +390,7 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
{
if (!pol)
return;
- if (!mpol_store_user_nodemask(pol) && step == 0 &&
+ if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
return;
@@ -950,8 +950,8 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
*
* Returns the number of page that could not be moved.
*/
-int do_migrate_pages(struct mm_struct *mm,
- const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
+int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+ const nodemask_t *to, int flags)
{
int busy = 0;
int err;
@@ -963,7 +963,7 @@ int do_migrate_pages(struct mm_struct *mm,
down_read(&mm->mmap_sem);
- err = migrate_vmas(mm, from_nodes, to_nodes, flags);
+ err = migrate_vmas(mm, from, to, flags);
if (err)
goto out;
@@ -998,14 +998,34 @@ int do_migrate_pages(struct mm_struct *mm,
* moved to an empty node, then there is nothing left worth migrating.
*/
- tmp = *from_nodes;
+ tmp = *from;
while (!nodes_empty(tmp)) {
int s,d;
int source = -1;
int dest = 0;
for_each_node_mask(s, tmp) {
- d = node_remap(s, *from_nodes, *to_nodes);
+
+ /*
+ * do_migrate_pages() tries to maintain the relative
+ * node relationship of the pages established between
+ * threads and memory areas.
+ *
+ * However if the number of source nodes is not equal to
+ * the number of destination nodes we can not preserve
+ * this node relative relationship. In that case, skip
+ * copying memory from a node that is in the destination
+ * mask.
+ *
+ * Example: [2,3,4] -> [3,4,5] moves everything.
+ * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
+ */
+
+ if ((nodes_weight(*from) != nodes_weight(*to)) &&
+ (node_isset(s, *to)))
+ continue;
+
+ d = node_remap(s, *from, *to);
if (s == d)
continue;
@@ -1065,8 +1085,8 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
{
}
-int do_migrate_pages(struct mm_struct *mm,
- const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
+int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+ const nodemask_t *to, int flags)
{
return -ENOSYS;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index e8dcfc7de86..4a9c2a391e2 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1639,33 +1639,34 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma = NULL;
- if (mm) {
- /* Check the cache first. */
- /* (Cache hit rate is typically around 35%.) */
- vma = mm->mmap_cache;
- if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
- struct rb_node * rb_node;
-
- rb_node = mm->mm_rb.rb_node;
- vma = NULL;
-
- while (rb_node) {
- struct vm_area_struct * vma_tmp;
-
- vma_tmp = rb_entry(rb_node,
- struct vm_area_struct, vm_rb);
-
- if (vma_tmp->vm_end > addr) {
- vma = vma_tmp;
- if (vma_tmp->vm_start <= addr)
- break;
- rb_node = rb_node->rb_left;
- } else
- rb_node = rb_node->rb_right;
- }
- if (vma)
- mm->mmap_cache = vma;
+ if (WARN_ON_ONCE(!mm)) /* Remove this in linux-3.6 */
+ return NULL;
+
+ /* Check the cache first. */
+ /* (Cache hit rate is typically around 35%.) */
+ vma = mm->mmap_cache;
+ if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
+ struct rb_node *rb_node;
+
+ rb_node = mm->mm_rb.rb_node;
+ vma = NULL;
+
+ while (rb_node) {
+ struct vm_area_struct *vma_tmp;
+
+ vma_tmp = rb_entry(rb_node,
+ struct vm_area_struct, vm_rb);
+
+ if (vma_tmp->vm_end > addr) {
+ vma = vma_tmp;
+ if (vma_tmp->vm_start <= addr)
+ break;
+ rb_node = rb_node->rb_left;
+ } else
+ rb_node = rb_node->rb_right;
}
+ if (vma)
+ mm->mmap_cache = vma;
}
return vma;
}
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 7cf7b7ddc7c..6830eab5bf0 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -86,3 +86,17 @@ int memmap_valid_within(unsigned long pfn,
return 1;
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
+void lruvec_init(struct lruvec *lruvec, struct zone *zone)
+{
+ enum lru_list lru;
+
+ memset(lruvec, 0, sizeof(struct lruvec));
+
+ for_each_lru(lru)
+ INIT_LIST_HEAD(&lruvec->lists[lru]);
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+ lruvec->zone = zone;
+#endif
+}
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 1983fb1c702..d23415c001b 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -274,86 +274,85 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
return ___alloc_bootmem(size, align, goal, limit);
}
-/**
- * __alloc_bootmem_node - allocate boot memory from a specific node
- * @pgdat: node to allocate from
- * @size: size of the request in bytes
- * @align: alignment of the region
- * @goal: preferred starting address of the region
- *
- * The goal is dropped if it can not be satisfied and the allocation will
- * fall back to memory below @goal.
- *
- * Allocation may fall back to any node in the system if the specified node
- * can not hold the requested memory.
- *
- * The function panics if the request can not be satisfied.
- */
-void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
- unsigned long align, unsigned long goal)
+static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal,
+ unsigned long limit)
{
void *ptr;
- if (WARN_ON_ONCE(slab_is_available()))
- return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
-
again:
ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
- goal, -1ULL);
+ goal, limit);
if (ptr)
return ptr;
ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
- goal, -1ULL);
- if (!ptr && goal) {
+ goal, limit);
+ if (ptr)
+ return ptr;
+
+ if (goal) {
goal = 0;
goto again;
}
- return ptr;
+
+ return NULL;
}
-void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
+void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
- return __alloc_bootmem_node(pgdat, size, align, goal);
+ if (WARN_ON_ONCE(slab_is_available()))
+ return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
+
+ return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
}
-#ifdef CONFIG_SPARSEMEM
-/**
- * alloc_bootmem_section - allocate boot memory from a specific section
- * @size: size of the request in bytes
- * @section_nr: sparse map section to allocate from
- *
- * Return NULL on failure.
- */
-void * __init alloc_bootmem_section(unsigned long size,
- unsigned long section_nr)
+void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal,
+ unsigned long limit)
{
- unsigned long pfn, goal, limit;
+ void *ptr;
- pfn = section_nr_to_pfn(section_nr);
- goal = pfn << PAGE_SHIFT;
- limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
+ ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
+ if (ptr)
+ return ptr;
- return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
- SMP_CACHE_BYTES, goal, limit);
+ printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
+ panic("Out of memory");
+ return NULL;
}
-#endif
-void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
+/**
+ * __alloc_bootmem_node - allocate boot memory from a specific node
+ * @pgdat: node to allocate from
+ * @size: size of the request in bytes
+ * @align: alignment of the region
+ * @goal: preferred starting address of the region
+ *
+ * The goal is dropped if it can not be satisfied and the allocation will
+ * fall back to memory below @goal.
+ *
+ * Allocation may fall back to any node in the system if the specified node
+ * can not hold the requested memory.
+ *
+ * The function panics if the request can not be satisfied.
+ */
+void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
- void *ptr;
-
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
- ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
- goal, -1ULL);
- if (ptr)
- return ptr;
+ return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
+}
- return __alloc_bootmem_nopanic(size, align, goal);
+void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
+ unsigned long align, unsigned long goal)
+{
+ return __alloc_bootmem_node(pgdat, size, align, goal);
}
#ifndef ARCH_LOW_ADDRESS_LIMIT
@@ -397,16 +396,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
- void *ptr;
-
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
- ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
- goal, ARCH_LOW_ADDRESS_LIMIT);
- if (ptr)
- return ptr;
-
- return __alloc_memory_core_early(MAX_NUMNODES, size, align,
- goal, ARCH_LOW_ADDRESS_LIMIT);
+ return ___alloc_bootmem_node(pgdat, size, align, goal,
+ ARCH_LOW_ADDRESS_LIMIT);
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 9f09a1fde9f..ed0e1967736 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -180,10 +180,10 @@ static bool oom_unkillable_task(struct task_struct *p,
* predictable as possible. The goal is to return the highest value for the
* task consuming the most memory to avoid subsequent oom failures.
*/
-unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
- const nodemask_t *nodemask, unsigned long totalpages)
+unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
+ const nodemask_t *nodemask, unsigned long totalpages)
{
- long points;
+ unsigned long points;
if (oom_unkillable_task(p, memcg, nodemask))
return 0;
@@ -198,21 +198,11 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
}
/*
- * The memory controller may have a limit of 0 bytes, so avoid a divide
- * by zero, if necessary.
- */
- if (!totalpages)
- totalpages = 1;
-
- /*
* The baseline for the badness score is the proportion of RAM that each
* task's rss, pagetable and swap space use.
*/
- points = get_mm_rss(p->mm) + p->mm->nr_ptes;
- points += get_mm_counter(p->mm, MM_SWAPENTS);
-
- points *= 1000;
- points /= totalpages;
+ points = get_mm_rss(p->mm) + p->mm->nr_ptes +
+ get_mm_counter(p->mm, MM_SWAPENTS);
task_unlock(p);
/*
@@ -220,23 +210,20 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
* implementation used by LSMs.
*/
if (has_capability_noaudit(p, CAP_SYS_ADMIN))
- points -= 30;
+ points -= 30 * totalpages / 1000;
/*
* /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
* either completely disable oom killing or always prefer a certain
* task.
*/
- points += p->signal->oom_score_adj;
+ points += p->signal->oom_score_adj * totalpages / 1000;
/*
- * Never return 0 for an eligible task that may be killed since it's
- * possible that no single user task uses more than 0.1% of memory and
- * no single admin tasks uses more than 3.0%.
+ * Never return 0 for an eligible task regardless of the root bonus and
+ * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
*/
- if (points <= 0)
- return 1;
- return (points < 1000) ? points : 1000;
+ return points ? points : 1;
}
/*
@@ -314,7 +301,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
{
struct task_struct *g, *p;
struct task_struct *chosen = NULL;
- *ppoints = 0;
+ unsigned long chosen_points = 0;
do_each_thread(g, p) {
unsigned int points;
@@ -354,7 +341,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
*/
if (p == current) {
chosen = p;
- *ppoints = 1000;
+ chosen_points = ULONG_MAX;
} else if (!force_kill) {
/*
* If this task is not being ptraced on exit,
@@ -367,12 +354,13 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
}
points = oom_badness(p, memcg, nodemask, totalpages);
- if (points > *ppoints) {
+ if (points > chosen_points) {
chosen = p;
- *ppoints = points;
+ chosen_points = points;
}
} while_each_thread(g, p);
+ *ppoints = chosen_points * 1000 / totalpages;
return chosen;
}
@@ -572,7 +560,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
}
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
- limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT;
+ limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
read_lock(&tasklist_lock);
p = select_bad_process(&points, limit, memcg, NULL, false);
if (p && PTR_ERR(p) != -1UL)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 26adea8ca2e..93d8d2f7108 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -204,7 +204,7 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
* Returns the global number of pages potentially available for dirty
* page cache. This is the base value for the global dirty limits.
*/
-unsigned long global_dirtyable_memory(void)
+static unsigned long global_dirtyable_memory(void)
{
unsigned long x;
@@ -1568,6 +1568,7 @@ void writeback_set_ratelimit(void)
unsigned long background_thresh;
unsigned long dirty_thresh;
global_dirty_limits(&background_thresh, &dirty_thresh);
+ global_dirty_limit = dirty_thresh;
ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
if (ratelimit_pages < 16)
ratelimit_pages = 16;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1851df60043..6092f331b32 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,7 @@
#include <linux/ftrace_event.h>
#include <linux/memcontrol.h>
#include <linux/prefetch.h>
+#include <linux/migrate.h>
#include <linux/page-debug-flags.h>
#include <asm/tlbflush.h>
@@ -218,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes);
int page_group_by_mobility_disabled __read_mostly;
-static void set_pageblock_migratetype(struct page *page, int migratetype)
+void set_pageblock_migratetype(struct page *page, int migratetype)
{
if (unlikely(page_group_by_mobility_disabled))
@@ -513,10 +514,10 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
* free pages of length of (1 << order) and marked with _mapcount -2. Page's
* order is recorded in page_private(page) field.
* So when we are allocating or freeing one, we can derive the state of the
- * other. That is, if we allocate a small block, and both were
- * free, the remainder of the region must be split into blocks.
+ * other. That is, if we allocate a small block, and both were
+ * free, the remainder of the region must be split into blocks.
* If a block is freed, and its buddy is also free, then this
- * triggers coalescing into a block of larger size.
+ * triggers coalescing into a block of larger size.
*
* -- wli
*/
@@ -749,6 +750,24 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
__free_pages(page, order);
}
+#ifdef CONFIG_CMA
+/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
+void __init init_cma_reserved_pageblock(struct page *page)
+{
+ unsigned i = pageblock_nr_pages;
+ struct page *p = page;
+
+ do {
+ __ClearPageReserved(p);
+ set_page_count(p, 0);
+ } while (++p, --i);
+
+ set_page_refcounted(page);
+ set_pageblock_migratetype(page, MIGRATE_CMA);
+ __free_pages(page, pageblock_order);
+ totalram_pages += pageblock_nr_pages;
+}
+#endif
/*
* The order of subdivision here is critical for the IO subsystem.
@@ -874,11 +893,17 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
* This array describes the order lists are fallen back to when
* the free lists for the desirable migrate type are depleted
*/
-static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
- [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
- [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
- [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
- [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
+static int fallbacks[MIGRATE_TYPES][4] = {
+ [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
+ [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
+#ifdef CONFIG_CMA
+ [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
+ [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
+#else
+ [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
+#endif
+ [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
+ [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
};
/*
@@ -929,8 +954,8 @@ static int move_freepages(struct zone *zone,
return pages_moved;
}
-static int move_freepages_block(struct zone *zone, struct page *page,
- int migratetype)
+int move_freepages_block(struct zone *zone, struct page *page,
+ int migratetype)
{
unsigned long start_pfn, end_pfn;
struct page *start_page, *end_page;
@@ -973,12 +998,12 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
/* Find the largest possible block of pages in the other list */
for (current_order = MAX_ORDER-1; current_order >= order;
--current_order) {
- for (i = 0; i < MIGRATE_TYPES - 1; i++) {
+ for (i = 0;; i++) {
migratetype = fallbacks[start_migratetype][i];
/* MIGRATE_RESERVE handled later if necessary */
if (migratetype == MIGRATE_RESERVE)
- continue;
+ break;
area = &(zone->free_area[current_order]);
if (list_empty(&area->free_list[migratetype]))
@@ -993,11 +1018,18 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
* pages to the preferred allocation list. If falling
* back for a reclaimable kernel allocation, be more
* aggressive about taking ownership of free pages
+ *
+ * On the other hand, never change migration
+ * type of MIGRATE_CMA pageblocks nor move CMA
+ * pages on different free lists. We don't
+ * want unmovable pages to be allocated from
+ * MIGRATE_CMA areas.
*/
- if (unlikely(current_order >= (pageblock_order >> 1)) ||
- start_migratetype == MIGRATE_RECLAIMABLE ||
- page_group_by_mobility_disabled) {
- unsigned long pages;
+ if (!is_migrate_cma(migratetype) &&
+ (unlikely(current_order >= pageblock_order / 2) ||
+ start_migratetype == MIGRATE_RECLAIMABLE ||
+ page_group_by_mobility_disabled)) {
+ int pages;
pages = move_freepages_block(zone, page,
start_migratetype);
@@ -1015,11 +1047,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
rmv_page_order(page);
/* Take ownership for orders >= pageblock_order */
- if (current_order >= pageblock_order)
+ if (current_order >= pageblock_order &&
+ !is_migrate_cma(migratetype))
change_pageblock_range(page, current_order,
start_migratetype);
- expand(zone, page, order, current_order, area, migratetype);
+ expand(zone, page, order, current_order, area,
+ is_migrate_cma(migratetype)
+ ? migratetype : start_migratetype);
trace_mm_page_alloc_extfrag(page, order, current_order,
start_migratetype, migratetype);
@@ -1061,17 +1096,17 @@ retry_reserve:
return page;
}
-/*
+/*
* Obtain a specified number of elements from the buddy allocator, all under
* a single hold of the lock, for efficiency. Add them to the supplied list.
* Returns the number of new pages which were placed at *list.
*/
-static int rmqueue_bulk(struct zone *zone, unsigned int order,
+static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
int migratetype, int cold)
{
- int i;
-
+ int mt = migratetype, i;
+
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
struct page *page = __rmqueue(zone, order, migratetype);
@@ -1091,7 +1126,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
list_add(&page->lru, list);
else
list_add_tail(&page->lru, list);
- set_page_private(page, migratetype);
+ if (IS_ENABLED(CONFIG_CMA)) {
+ mt = get_pageblock_migratetype(page);
+ if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
+ mt = migratetype;
+ }
+ set_page_private(page, mt);
list = &page->lru;
}
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
@@ -1371,8 +1411,12 @@ int split_free_page(struct page *page)
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
- for (; page < endpage; page += pageblock_nr_pages)
- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+ for (; page < endpage; page += pageblock_nr_pages) {
+ int mt = get_pageblock_migratetype(page);
+ if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
+ set_pageblock_migratetype(page,
+ MIGRATE_MOVABLE);
+ }
}
return 1 << order;
@@ -2086,16 +2130,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
}
#endif /* CONFIG_COMPACTION */
-/* The really slow allocator path where we enter direct reclaim */
-static inline struct page *
-__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist, enum zone_type high_zoneidx,
- nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
- int migratetype, unsigned long *did_some_progress)
+/* Perform direct synchronous page reclaim */
+static int
+__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
+ nodemask_t *nodemask)
{
- struct page *page = NULL;
struct reclaim_state reclaim_state;
- bool drained = false;
+ int progress;
cond_resched();
@@ -2106,7 +2147,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
reclaim_state.reclaimed_slab = 0;
current->reclaim_state = &reclaim_state;
- *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
+ progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
current->reclaim_state = NULL;
lockdep_clear_current_reclaim_state();
@@ -2114,6 +2155,21 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
cond_resched();
+ return progress;
+}
+
+/* The really slow allocator path where we enter direct reclaim */
+static inline struct page *
+__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, enum zone_type high_zoneidx,
+ nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+ int migratetype, unsigned long *did_some_progress)
+{
+ struct page *page = NULL;
+ bool drained = false;
+
+ *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+ nodemask);
if (unlikely(!(*did_some_progress)))
return NULL;
@@ -4244,25 +4300,24 @@ static inline void setup_usemap(struct pglist_data *pgdat,
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
-/* Return a sensible default order for the pageblock size. */
-static inline int pageblock_default_order(void)
-{
- if (HPAGE_SHIFT > PAGE_SHIFT)
- return HUGETLB_PAGE_ORDER;
-
- return MAX_ORDER-1;
-}
-
/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
-static inline void __init set_pageblock_order(unsigned int order)
+static inline void __init set_pageblock_order(void)
{
+ unsigned int order;
+
/* Check that pageblock_nr_pages has not already been setup */
if (pageblock_order)
return;
+ if (HPAGE_SHIFT > PAGE_SHIFT)
+ order = HUGETLB_PAGE_ORDER;
+ else
+ order = MAX_ORDER - 1;
+
/*
* Assume the largest contiguous order of interest is a huge page.
- * This value may be variable depending on boot parameters on IA64
+ * This value may be variable depending on boot parameters on IA64 and
+ * powerpc.
*/
pageblock_order = order;
}
@@ -4270,15 +4325,13 @@ static inline void __init set_pageblock_order(unsigned int order)
/*
* When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
- * and pageblock_default_order() are unused as pageblock_order is set
- * at compile-time. See include/linux/pageblock-flags.h for the values of
- * pageblock_order based on the kernel config
+ * is unused as pageblock_order is set at compile-time. See
+ * include/linux/pageblock-flags.h for the values of pageblock_order based on
+ * the kernel config
*/
-static inline int pageblock_default_order(unsigned int order)
+static inline void set_pageblock_order(void)
{
- return MAX_ORDER-1;
}
-#define set_pageblock_order(x) do {} while (0)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
@@ -4301,11 +4354,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
init_waitqueue_head(&pgdat->kswapd_wait);
pgdat->kswapd_max_order = 0;
pgdat_page_cgroup_init(pgdat);
-
+
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
unsigned long size, realsize, memmap_pages;
- enum lru_list lru;
size = zone_spanned_pages_in_node(nid, j, zones_size);
realsize = size - zone_absent_pages_in_node(nid, j,
@@ -4355,18 +4407,13 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
zone->zone_pgdat = pgdat;
zone_pcp_init(zone);
- for_each_lru(lru)
- INIT_LIST_HEAD(&zone->lruvec.lists[lru]);
- zone->reclaim_stat.recent_rotated[0] = 0;
- zone->reclaim_stat.recent_rotated[1] = 0;
- zone->reclaim_stat.recent_scanned[0] = 0;
- zone->reclaim_stat.recent_scanned[1] = 0;
+ lruvec_init(&zone->lruvec, zone);
zap_zone_vm_stats(zone);
zone->flags = 0;
if (!size)
continue;
- set_pageblock_order(pageblock_default_order());
+ set_pageblock_order();
setup_usemap(pgdat, zone, size);
ret = init_currently_empty_zone(zone, zone_start_pfn,
size, MEMMAP_EARLY);
@@ -4759,7 +4806,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
find_zone_movable_pfns_for_nodes();
/* Print out the zone ranges */
- printk("Zone PFN ranges:\n");
+ printk("Zone ranges:\n");
for (i = 0; i < MAX_NR_ZONES; i++) {
if (i == ZONE_MOVABLE)
continue;
@@ -4768,22 +4815,25 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
arch_zone_highest_possible_pfn[i])
printk(KERN_CONT "empty\n");
else
- printk(KERN_CONT "%0#10lx -> %0#10lx\n",
- arch_zone_lowest_possible_pfn[i],
- arch_zone_highest_possible_pfn[i]);
+ printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
+ arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
+ (arch_zone_highest_possible_pfn[i]
+ << PAGE_SHIFT) - 1);
}
/* Print out the PFNs ZONE_MOVABLE begins at in each node */
- printk("Movable zone start PFN for each node\n");
+ printk("Movable zone start for each node\n");
for (i = 0; i < MAX_NUMNODES; i++) {
if (zone_movable_pfn[i])
- printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
+ printk(" Node %d: %#010lx\n", i,
+ zone_movable_pfn[i] << PAGE_SHIFT);
}
/* Print out the early_node_map[] */
- printk("Early memory PFN ranges\n");
+ printk("Early memory node ranges\n");
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
- printk(" %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
+ printk(" node %3d: [mem %#010lx-%#010lx]\n", nid,
+ start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
/* Initialise every node */
mminit_verify_pageflags_layout();
@@ -4976,14 +5026,7 @@ static void setup_per_zone_lowmem_reserve(void)
calculate_totalreserve_pages();
}
-/**
- * setup_per_zone_wmarks - called when min_free_kbytes changes
- * or when memory is hot-{added|removed}
- *
- * Ensures that the watermark[min,low,high] values for each zone are set
- * correctly with respect to min_free_kbytes.
- */
-void setup_per_zone_wmarks(void)
+static void __setup_per_zone_wmarks(void)
{
unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
unsigned long lowmem_pages = 0;
@@ -5030,6 +5073,11 @@ void setup_per_zone_wmarks(void)
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
+
+ zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
+ zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
+ zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
+
setup_zone_migrate_reserve(zone);
spin_unlock_irqrestore(&zone->lock, flags);
}
@@ -5038,6 +5086,20 @@ void setup_per_zone_wmarks(void)
calculate_totalreserve_pages();
}
+/**
+ * setup_per_zone_wmarks - called when min_free_kbytes changes
+ * or when memory is hot-{added|removed}
+ *
+ * Ensures that the watermark[min,low,high] values for each zone are set
+ * correctly with respect to min_free_kbytes.
+ */
+void setup_per_zone_wmarks(void)
+{
+ mutex_lock(&zonelists_mutex);
+ __setup_per_zone_wmarks();
+ mutex_unlock(&zonelists_mutex);
+}
+
/*
* The inactive anon list should be small enough that the VM never has to
* do too much work, but large enough that each inactive page has a chance
@@ -5415,14 +5477,16 @@ static int
__count_immobile_pages(struct zone *zone, struct page *page, int count)
{
unsigned long pfn, iter, found;
+ int mt;
+
/*
* For avoiding noise data, lru_add_drain_all() should be called
* If ZONE_MOVABLE, the zone never contains immobile pages
*/
if (zone_idx(zone) == ZONE_MOVABLE)
return true;
-
- if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
+ mt = get_pageblock_migratetype(page);
+ if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
return true;
pfn = page_to_pfn(page);
@@ -5539,7 +5603,7 @@ out:
return ret;
}
-void unset_migratetype_isolate(struct page *page)
+void unset_migratetype_isolate(struct page *page, unsigned migratetype)
{
struct zone *zone;
unsigned long flags;
@@ -5547,12 +5611,259 @@ void unset_migratetype_isolate(struct page *page)
spin_lock_irqsave(&zone->lock, flags);
if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
goto out;
- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
- move_freepages_block(zone, page, MIGRATE_MOVABLE);
+ set_pageblock_migratetype(page, migratetype);
+ move_freepages_block(zone, page, migratetype);
out:
spin_unlock_irqrestore(&zone->lock, flags);
}
+#ifdef CONFIG_CMA
+
+static unsigned long pfn_max_align_down(unsigned long pfn)
+{
+ return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
+ pageblock_nr_pages) - 1);
+}
+
+static unsigned long pfn_max_align_up(unsigned long pfn)
+{
+ return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
+ pageblock_nr_pages));
+}
+
+static struct page *
+__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
+ int **resultp)
+{
+ return alloc_page(GFP_HIGHUSER_MOVABLE);
+}
+
+/* [start, end) must belong to a single zone. */
+static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
+{
+ /* This function is based on compact_zone() from compaction.c. */
+
+ unsigned long pfn = start;
+ unsigned int tries = 0;
+ int ret = 0;
+
+ struct compact_control cc = {
+ .nr_migratepages = 0,
+ .order = -1,
+ .zone = page_zone(pfn_to_page(start)),
+ .mode = COMPACT_SYNC,
+ };
+ INIT_LIST_HEAD(&cc.migratepages);
+
+ migrate_prep_local();
+
+ while (pfn < end || !list_empty(&cc.migratepages)) {
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ if (list_empty(&cc.migratepages)) {
+ cc.nr_migratepages = 0;
+ pfn = isolate_migratepages_range(cc.zone, &cc,
+ pfn, end);
+ if (!pfn) {
+ ret = -EINTR;
+ break;
+ }
+ tries = 0;
+ } else if (++tries == 5) {
+ ret = ret < 0 ? ret : -EBUSY;
+ break;
+ }
+
+ ret = migrate_pages(&cc.migratepages,
+ __alloc_contig_migrate_alloc,
+ 0, false, MIGRATE_SYNC);
+ }
+
+ putback_lru_pages(&cc.migratepages);
+ return ret > 0 ? 0 : ret;
+}
+
+/*
+ * Update zone's cma pages counter used for watermark level calculation.
+ */
+static inline void __update_cma_watermarks(struct zone *zone, int count)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&zone->lock, flags);
+ zone->min_cma_pages += count;
+ spin_unlock_irqrestore(&zone->lock, flags);
+ setup_per_zone_wmarks();
+}
+
+/*
+ * Trigger memory pressure bump to reclaim some pages in order to be able to
+ * allocate 'count' pages in single page units. Does similar work as
+ *__alloc_pages_slowpath() function.
+ */
+static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
+{
+ enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+ struct zonelist *zonelist = node_zonelist(0, gfp_mask);
+ int did_some_progress = 0;
+ int order = 1;
+
+ /*
+ * Increase level of watermarks to force kswapd do his job
+ * to stabilise at new watermark level.
+ */
+ __update_cma_watermarks(zone, count);
+
+ /* Obey watermarks as if the page was being allocated */
+ while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
+ wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
+
+ did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
+ NULL);
+ if (!did_some_progress) {
+ /* Exhausted what can be done so it's blamo time */
+ out_of_memory(zonelist, gfp_mask, order, NULL, false);
+ }
+ }
+
+ /* Restore original watermark levels. */
+ __update_cma_watermarks(zone, -count);
+
+ return count;
+}
+
+/**
+ * alloc_contig_range() -- tries to allocate given range of pages
+ * @start: start PFN to allocate
+ * @end: one-past-the-last PFN to allocate
+ * @migratetype: migratetype of the underlaying pageblocks (either
+ * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
+ * in range must have the same migratetype and it must
+ * be either of the two.
+ *
+ * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
+ * aligned, however it's the caller's responsibility to guarantee that
+ * we are the only thread that changes migrate type of pageblocks the
+ * pages fall in.
+ *
+ * The PFN range must belong to a single zone.
+ *
+ * Returns zero on success or negative error code. On success all
+ * pages which PFN is in [start, end) are allocated for the caller and
+ * need to be freed with free_contig_range().
+ */
+int alloc_contig_range(unsigned long start, unsigned long end,
+ unsigned migratetype)
+{
+ struct zone *zone = page_zone(pfn_to_page(start));
+ unsigned long outer_start, outer_end;
+ int ret = 0, order;
+
+ /*
+ * What we do here is we mark all pageblocks in range as
+ * MIGRATE_ISOLATE. Because pageblock and max order pages may
+ * have different sizes, and due to the way page allocator
+ * work, we align the range to biggest of the two pages so
+ * that page allocator won't try to merge buddies from
+ * different pageblocks and change MIGRATE_ISOLATE to some
+ * other migration type.
+ *
+ * Once the pageblocks are marked as MIGRATE_ISOLATE, we
+ * migrate the pages from an unaligned range (ie. pages that
+ * we are interested in). This will put all the pages in
+ * range back to page allocator as MIGRATE_ISOLATE.
+ *
+ * When this is done, we take the pages in range from page
+ * allocator removing them from the buddy system. This way
+ * page allocator will never consider using them.
+ *
+ * This lets us mark the pageblocks back as
+ * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
+ * aligned range but not in the unaligned, original range are
+ * put back to page allocator so that buddy can use them.
+ */
+
+ ret = start_isolate_page_range(pfn_max_align_down(start),
+ pfn_max_align_up(end), migratetype);
+ if (ret)
+ goto done;
+
+ ret = __alloc_contig_migrate_range(start, end);
+ if (ret)
+ goto done;
+
+ /*
+ * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
+ * aligned blocks that are marked as MIGRATE_ISOLATE. What's
+ * more, all pages in [start, end) are free in page allocator.
+ * What we are going to do is to allocate all pages from
+ * [start, end) (that is remove them from page allocator).
+ *
+ * The only problem is that pages at the beginning and at the
+ * end of interesting range may be not aligned with pages that
+ * page allocator holds, ie. they can be part of higher order
+ * pages. Because of this, we reserve the bigger range and
+ * once this is done free the pages we are not interested in.
+ *
+ * We don't have to hold zone->lock here because the pages are
+ * isolated thus they won't get removed from buddy.
+ */
+
+ lru_add_drain_all();
+ drain_all_pages();
+
+ order = 0;
+ outer_start = start;
+ while (!PageBuddy(pfn_to_page(outer_start))) {
+ if (++order >= MAX_ORDER) {
+ ret = -EBUSY;
+ goto done;
+ }
+ outer_start &= ~0UL << order;
+ }
+
+ /* Make sure the range is really isolated. */
+ if (test_pages_isolated(outer_start, end)) {
+ pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
+ outer_start, end);
+ ret = -EBUSY;
+ goto done;
+ }
+
+ /*
+ * Reclaim enough pages to make sure that contiguous allocation
+ * will not starve the system.
+ */
+ __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
+
+ /* Grab isolated pages from freelists. */
+ outer_end = isolate_freepages_range(outer_start, end);
+ if (!outer_end) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ /* Free head and tail (if any) */
+ if (start != outer_start)
+ free_contig_range(outer_start, start - outer_start);
+ if (end != outer_end)
+ free_contig_range(end, outer_end - end);
+
+done:
+ undo_isolate_page_range(pfn_max_align_down(start),
+ pfn_max_align_up(end), migratetype);
+ return ret;
+}
+
+void free_contig_range(unsigned long pfn, unsigned nr_pages)
+{
+ for (; nr_pages--; ++pfn)
+ __free_page(pfn_to_page(pfn));
+}
+#endif
+
#ifdef CONFIG_MEMORY_HOTREMOVE
/*
* All pages in the range must be isolated before calling this.
@@ -5621,7 +5932,7 @@ bool is_free_buddy_page(struct page *page)
}
#endif
-static struct trace_print_flags pageflag_names[] = {
+static const struct trace_print_flags pageflag_names[] = {
{1UL << PG_locked, "locked" },
{1UL << PG_error, "error" },
{1UL << PG_referenced, "referenced" },
@@ -5656,7 +5967,9 @@ static struct trace_print_flags pageflag_names[] = {
#ifdef CONFIG_MEMORY_FAILURE
{1UL << PG_hwpoison, "hwpoison" },
#endif
- {-1UL, NULL },
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ {1UL << PG_compound_lock, "compound_lock" },
+#endif
};
static void dump_page_flags(unsigned long flags)
@@ -5665,12 +5978,14 @@ static void dump_page_flags(unsigned long flags)
unsigned long mask;
int i;
+ BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
+
printk(KERN_ALERT "page flags: %#lx(", flags);
/* remove zone id */
flags &= (1UL << NR_PAGEFLAGS) - 1;
- for (i = 0; pageflag_names[i].name && flags; i++) {
+ for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) {
mask = pageflag_names[i].mask;
if ((flags & mask) != mask)
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 4ae42bb4089..c9f04774f2b 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -24,6 +24,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* to be MIGRATE_ISOLATE.
* @start_pfn: The lower PFN of the range to be isolated.
* @end_pfn: The upper PFN of the range to be isolated.
+ * @migratetype: migrate type to set in error recovery.
*
* Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
* the range will never be allocated. Any free pages and pages freed in the
@@ -32,8 +33,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* start_pfn/end_pfn must be aligned to pageblock_order.
* Returns 0 on success and -EBUSY if any part of range cannot be isolated.
*/
-int
-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
+int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned migratetype)
{
unsigned long pfn;
unsigned long undo_pfn;
@@ -56,7 +57,7 @@ undo:
for (pfn = start_pfn;
pfn < undo_pfn;
pfn += pageblock_nr_pages)
- unset_migratetype_isolate(pfn_to_page(pfn));
+ unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
return -EBUSY;
}
@@ -64,8 +65,8 @@ undo:
/*
* Make isolated pages available again.
*/
-int
-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
+int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned migratetype)
{
unsigned long pfn;
struct page *page;
@@ -77,7 +78,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
page = __first_valid_page(pfn, pageblock_nr_pages);
if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
continue;
- unset_migratetype_isolate(page);
+ unset_migratetype_isolate(page, migratetype);
}
return 0;
}
@@ -86,7 +87,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
* all pages in [start_pfn...end_pfn) must be in the same zone.
* zone->lock must be held before call this.
*
- * Returns 1 if all pages in the range is isolated.
+ * Returns 1 if all pages in the range are isolated.
*/
static int
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 5a74fea182f..74c0ddaa6fa 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -109,8 +109,8 @@ pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmdp)
+void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
+ pmd_t *pmdp)
{
pmd_t pmd = pmd_mksplitting(*pmdp);
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
diff --git a/mm/readahead.c b/mm/readahead.c
index cbcbb02f3e2..ea8f8fa2164 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -17,6 +17,8 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/pagevec.h>
#include <linux/pagemap.h>
+#include <linux/syscalls.h>
+#include <linux/file.h>
/*
* Initialise a struct file's readahead state. Assumes that the caller has
@@ -562,3 +564,41 @@ page_cache_async_readahead(struct address_space *mapping,
ondemand_readahead(mapping, ra, filp, true, offset, req_size);
}
EXPORT_SYMBOL_GPL(page_cache_async_readahead);
+
+static ssize_t
+do_readahead(struct address_space *mapping, struct file *filp,
+ pgoff_t index, unsigned long nr)
+{
+ if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
+ return -EINVAL;
+
+ force_page_cache_readahead(mapping, filp, index, nr);
+ return 0;
+}
+
+SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
+{
+ ssize_t ret;
+ struct file *file;
+
+ ret = -EBADF;
+ file = fget(fd);
+ if (file) {
+ if (file->f_mode & FMODE_READ) {
+ struct address_space *mapping = file->f_mapping;
+ pgoff_t start = offset >> PAGE_CACHE_SHIFT;
+ pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
+ unsigned long len = end - start + 1;
+ ret = do_readahead(mapping, file, start, len);
+ }
+ fput(file);
+ }
+ return ret;
+}
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
+{
+ return SYSC_readahead((int) fd, offset, (size_t) count);
+}
+SYSCALL_ALIAS(sys_readahead, SyS_readahead);
+#endif
diff --git a/mm/rmap.c b/mm/rmap.c
index 5b5ad584ffb..0f3b7cda2a2 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -755,12 +755,6 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
pte_unmap_unlock(pte, ptl);
}
- /* Pretend the page is referenced if the task has the
- swap token and is in the middle of a page fault. */
- if (mm != current->mm && has_swap_token(mm) &&
- rwsem_is_locked(&mm->mmap_sem))
- referenced++;
-
(*mapcount)--;
if (referenced)
diff --git a/mm/shmem.c b/mm/shmem.c
index d7b433a1ef5..d576b84d913 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -53,6 +53,7 @@ static struct vfsmount *shm_mnt;
#include <linux/blkdev.h>
#include <linux/pagevec.h>
#include <linux/percpu_counter.h>
+#include <linux/falloc.h>
#include <linux/splice.h>
#include <linux/security.h>
#include <linux/swapops.h>
@@ -83,12 +84,25 @@ struct shmem_xattr {
char value[0];
};
+/*
+ * shmem_fallocate and shmem_writepage communicate via inode->i_private
+ * (with i_mutex making sure that it has only one user at a time):
+ * we would prefer not to enlarge the shmem inode just for that.
+ */
+struct shmem_falloc {
+ pgoff_t start; /* start of range currently being fallocated */
+ pgoff_t next; /* the next page offset to be fallocated */
+ pgoff_t nr_falloced; /* how many new pages have been fallocated */
+ pgoff_t nr_unswapped; /* how often writepage refused to swap out */
+};
+
/* Flag allocation requirements to shmem_getpage */
enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */
SGP_CACHE, /* don't exceed i_size, may allocate page */
SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
- SGP_WRITE, /* may exceed i_size, may allocate page */
+ SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
+ SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
#ifdef CONFIG_TMPFS
@@ -103,6 +117,9 @@ static unsigned long shmem_default_max_inodes(void)
}
#endif
+static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
+static int shmem_replace_page(struct page **pagep, gfp_t gfp,
+ struct shmem_inode_info *info, pgoff_t index);
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
@@ -423,27 +440,31 @@ void shmem_unlock_mapping(struct address_space *mapping)
/*
* Remove range of pages and swap entries from radix tree, and free them.
+ * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
*/
-void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+ bool unfalloc)
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
- pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
+ pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
+ unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
+ unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
struct pagevec pvec;
pgoff_t indices[PAGEVEC_SIZE];
long nr_swaps_freed = 0;
pgoff_t index;
int i;
- BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
+ if (lend == -1)
+ end = -1; /* unsigned, so actually very big */
pagevec_init(&pvec, 0);
index = start;
- while (index <= end) {
+ while (index < end) {
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE),
pvec.pages, indices);
if (!pvec.nr)
break;
@@ -452,10 +473,12 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
struct page *page = pvec.pages[i];
index = indices[i];
- if (index > end)
+ if (index >= end)
break;
if (radix_tree_exceptional_entry(page)) {
+ if (unfalloc)
+ continue;
nr_swaps_freed += !shmem_free_swap(mapping,
index, page);
continue;
@@ -463,9 +486,11 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
if (!trylock_page(page))
continue;
- if (page->mapping == mapping) {
- VM_BUG_ON(PageWriteback(page));
- truncate_inode_page(mapping, page);
+ if (!unfalloc || !PageUptodate(page)) {
+ if (page->mapping == mapping) {
+ VM_BUG_ON(PageWriteback(page));
+ truncate_inode_page(mapping, page);
+ }
}
unlock_page(page);
}
@@ -476,30 +501,47 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
index++;
}
- if (partial) {
+ if (partial_start) {
struct page *page = NULL;
shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
if (page) {
- zero_user_segment(page, partial, PAGE_CACHE_SIZE);
+ unsigned int top = PAGE_CACHE_SIZE;
+ if (start > end) {
+ top = partial_end;
+ partial_end = 0;
+ }
+ zero_user_segment(page, partial_start, top);
+ set_page_dirty(page);
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ }
+ if (partial_end) {
+ struct page *page = NULL;
+ shmem_getpage(inode, end, &page, SGP_READ, NULL);
+ if (page) {
+ zero_user_segment(page, 0, partial_end);
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
}
}
+ if (start >= end)
+ return;
index = start;
for ( ; ; ) {
cond_resched();
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE),
pvec.pages, indices);
if (!pvec.nr) {
- if (index == start)
+ if (index == start || unfalloc)
break;
index = start;
continue;
}
- if (index == start && indices[0] > end) {
+ if ((index == start || unfalloc) && indices[0] >= end) {
shmem_deswap_pagevec(&pvec);
pagevec_release(&pvec);
break;
@@ -509,19 +551,23 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
struct page *page = pvec.pages[i];
index = indices[i];
- if (index > end)
+ if (index >= end)
break;
if (radix_tree_exceptional_entry(page)) {
+ if (unfalloc)
+ continue;
nr_swaps_freed += !shmem_free_swap(mapping,
index, page);
continue;
}
lock_page(page);
- if (page->mapping == mapping) {
- VM_BUG_ON(PageWriteback(page));
- truncate_inode_page(mapping, page);
+ if (!unfalloc || !PageUptodate(page)) {
+ if (page->mapping == mapping) {
+ VM_BUG_ON(PageWriteback(page));
+ truncate_inode_page(mapping, page);
+ }
}
unlock_page(page);
}
@@ -535,7 +581,11 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
info->swapped -= nr_swaps_freed;
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
+}
+void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+{
+ shmem_undo_range(inode, lstart, lend, false);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
@@ -597,19 +647,20 @@ static void shmem_evict_inode(struct inode *inode)
}
BUG_ON(inode->i_blocks);
shmem_free_inode(inode->i_sb);
- end_writeback(inode);
+ clear_inode(inode);
}
/*
* If swap found in inode, free it and move page from swapcache to filecache.
*/
static int shmem_unuse_inode(struct shmem_inode_info *info,
- swp_entry_t swap, struct page *page)
+ swp_entry_t swap, struct page **pagep)
{
struct address_space *mapping = info->vfs_inode.i_mapping;
void *radswap;
pgoff_t index;
- int error;
+ gfp_t gfp;
+ int error = 0;
radswap = swp_to_radix_entry(swap);
index = radix_tree_locate_item(&mapping->page_tree, radswap);
@@ -625,22 +676,37 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
if (shmem_swaplist.next != &info->swaplist)
list_move_tail(&shmem_swaplist, &info->swaplist);
+ gfp = mapping_gfp_mask(mapping);
+ if (shmem_should_replace_page(*pagep, gfp)) {
+ mutex_unlock(&shmem_swaplist_mutex);
+ error = shmem_replace_page(pagep, gfp, info, index);
+ mutex_lock(&shmem_swaplist_mutex);
+ /*
+ * We needed to drop mutex to make that restrictive page
+ * allocation; but the inode might already be freed by now,
+ * and we cannot refer to inode or mapping or info to check.
+ * However, we do hold page lock on the PageSwapCache page,
+ * so can check if that still has our reference remaining.
+ */
+ if (!page_swapcount(*pagep))
+ error = -ENOENT;
+ }
+
/*
* We rely on shmem_swaplist_mutex, not only to protect the swaplist,
* but also to hold up shmem_evict_inode(): so inode cannot be freed
* beneath us (pagelock doesn't help until the page is in pagecache).
*/
- error = shmem_add_to_page_cache(page, mapping, index,
+ if (!error)
+ error = shmem_add_to_page_cache(*pagep, mapping, index,
GFP_NOWAIT, radswap);
- /* which does mem_cgroup_uncharge_cache_page on error */
-
if (error != -ENOMEM) {
/*
* Truncation and eviction use free_swap_and_cache(), which
* only does trylock page: if we raced, best clean up here.
*/
- delete_from_swap_cache(page);
- set_page_dirty(page);
+ delete_from_swap_cache(*pagep);
+ set_page_dirty(*pagep);
if (!error) {
spin_lock(&info->lock);
info->swapped--;
@@ -660,7 +726,14 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
struct list_head *this, *next;
struct shmem_inode_info *info;
int found = 0;
- int error;
+ int error = 0;
+
+ /*
+ * There's a faint possibility that swap page was replaced before
+ * caller locked it: it will come back later with the right page.
+ */
+ if (unlikely(!PageSwapCache(page)))
+ goto out;
/*
* Charge page using GFP_KERNEL while we can wait, before taking
@@ -676,7 +749,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
list_for_each_safe(this, next, &shmem_swaplist) {
info = list_entry(this, struct shmem_inode_info, swaplist);
if (info->swapped)
- found = shmem_unuse_inode(info, swap, page);
+ found = shmem_unuse_inode(info, swap, &page);
else
list_del_init(&info->swaplist);
cond_resched();
@@ -685,8 +758,6 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
}
mutex_unlock(&shmem_swaplist_mutex);
- if (!found)
- mem_cgroup_uncharge_cache_page(page);
if (found < 0)
error = found;
out:
@@ -727,6 +798,38 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
goto redirty;
}
+
+ /*
+ * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
+ * value into swapfile.c, the only way we can correctly account for a
+ * fallocated page arriving here is now to initialize it and write it.
+ *
+ * That's okay for a page already fallocated earlier, but if we have
+ * not yet completed the fallocation, then (a) we want to keep track
+ * of this page in case we have to undo it, and (b) it may not be a
+ * good idea to continue anyway, once we're pushing into swap. So
+ * reactivate the page, and let shmem_fallocate() quit when too many.
+ */
+ if (!PageUptodate(page)) {
+ if (inode->i_private) {
+ struct shmem_falloc *shmem_falloc;
+ spin_lock(&inode->i_lock);
+ shmem_falloc = inode->i_private;
+ if (shmem_falloc &&
+ index >= shmem_falloc->start &&
+ index < shmem_falloc->next)
+ shmem_falloc->nr_unswapped++;
+ else
+ shmem_falloc = NULL;
+ spin_unlock(&inode->i_lock);
+ if (shmem_falloc)
+ goto redirty;
+ }
+ clear_highpage(page);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ }
+
swap = get_swap_page();
if (!swap.val)
goto redirty;
@@ -856,6 +959,84 @@ static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
#endif
/*
+ * When a page is moved from swapcache to shmem filecache (either by the
+ * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
+ * shmem_unuse_inode()), it may have been read in earlier from swap, in
+ * ignorance of the mapping it belongs to. If that mapping has special
+ * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
+ * we may need to copy to a suitable page before moving to filecache.
+ *
+ * In a future release, this may well be extended to respect cpuset and
+ * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
+ * but for now it is a simple matter of zone.
+ */
+static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
+{
+ return page_zonenum(page) > gfp_zone(gfp);
+}
+
+static int shmem_replace_page(struct page **pagep, gfp_t gfp,
+ struct shmem_inode_info *info, pgoff_t index)
+{
+ struct page *oldpage, *newpage;
+ struct address_space *swap_mapping;
+ pgoff_t swap_index;
+ int error;
+
+ oldpage = *pagep;
+ swap_index = page_private(oldpage);
+ swap_mapping = page_mapping(oldpage);
+
+ /*
+ * We have arrived here because our zones are constrained, so don't
+ * limit chance of success by further cpuset and node constraints.
+ */
+ gfp &= ~GFP_CONSTRAINT_MASK;
+ newpage = shmem_alloc_page(gfp, info, index);
+ if (!newpage)
+ return -ENOMEM;
+ VM_BUG_ON(shmem_should_replace_page(newpage, gfp));
+
+ *pagep = newpage;
+ page_cache_get(newpage);
+ copy_highpage(newpage, oldpage);
+
+ VM_BUG_ON(!PageLocked(oldpage));
+ __set_page_locked(newpage);
+ VM_BUG_ON(!PageUptodate(oldpage));
+ SetPageUptodate(newpage);
+ VM_BUG_ON(!PageSwapBacked(oldpage));
+ SetPageSwapBacked(newpage);
+ VM_BUG_ON(!swap_index);
+ set_page_private(newpage, swap_index);
+ VM_BUG_ON(!PageSwapCache(oldpage));
+ SetPageSwapCache(newpage);
+
+ /*
+ * Our caller will very soon move newpage out of swapcache, but it's
+ * a nice clean interface for us to replace oldpage by newpage there.
+ */
+ spin_lock_irq(&swap_mapping->tree_lock);
+ error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
+ newpage);
+ __inc_zone_page_state(newpage, NR_FILE_PAGES);
+ __dec_zone_page_state(oldpage, NR_FILE_PAGES);
+ spin_unlock_irq(&swap_mapping->tree_lock);
+ BUG_ON(error);
+
+ mem_cgroup_replace_page_cache(oldpage, newpage);
+ lru_cache_add_anon(newpage);
+
+ ClearPageSwapCache(oldpage);
+ set_page_private(oldpage, 0);
+
+ unlock_page(oldpage);
+ page_cache_release(oldpage);
+ page_cache_release(oldpage);
+ return 0;
+}
+
+/*
* shmem_getpage_gfp - find page in cache, or get from swap, or allocate
*
* If we allocate a new one we do not mark it dirty. That's up to the
@@ -872,6 +1053,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
swp_entry_t swap;
int error;
int once = 0;
+ int alloced = 0;
if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
return -EFBIG;
@@ -883,19 +1065,21 @@ repeat:
page = NULL;
}
- if (sgp != SGP_WRITE &&
+ if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
error = -EINVAL;
goto failed;
}
+ /* fallocated page? */
+ if (page && !PageUptodate(page)) {
+ if (sgp != SGP_READ)
+ goto clear;
+ unlock_page(page);
+ page_cache_release(page);
+ page = NULL;
+ }
if (page || (sgp == SGP_READ && !swap.val)) {
- /*
- * Once we can get the page lock, it must be uptodate:
- * if there were an error in reading back from swap,
- * the page would not be inserted into the filecache.
- */
- BUG_ON(page && !PageUptodate(page));
*pagep = page;
return 0;
}
@@ -923,19 +1107,20 @@ repeat:
/* We have to do this with page locked to prevent races */
lock_page(page);
+ if (!PageSwapCache(page) || page->mapping) {
+ error = -EEXIST; /* try again */
+ goto failed;
+ }
if (!PageUptodate(page)) {
error = -EIO;
goto failed;
}
wait_on_page_writeback(page);
- /* Someone may have already done it for us */
- if (page->mapping) {
- if (page->mapping == mapping &&
- page->index == index)
- goto done;
- error = -EEXIST;
- goto failed;
+ if (shmem_should_replace_page(page, gfp)) {
+ error = shmem_replace_page(&page, gfp, info, index);
+ if (error)
+ goto failed;
}
error = mem_cgroup_cache_charge(page, current->mm,
@@ -991,19 +1176,36 @@ repeat:
inode->i_blocks += BLOCKS_PER_PAGE;
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
+ alloced = true;
- clear_highpage(page);
- flush_dcache_page(page);
- SetPageUptodate(page);
+ /*
+ * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
+ */
+ if (sgp == SGP_FALLOC)
+ sgp = SGP_WRITE;
+clear:
+ /*
+ * Let SGP_WRITE caller clear ends if write does not fill page;
+ * but SGP_FALLOC on a page fallocated earlier must initialize
+ * it now, lest undo on failure cancel our earlier guarantee.
+ */
+ if (sgp != SGP_WRITE) {
+ clear_highpage(page);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ }
if (sgp == SGP_DIRTY)
set_page_dirty(page);
}
-done:
+
/* Perhaps the file has been truncated since we checked */
- if (sgp != SGP_WRITE &&
+ if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
error = -EINVAL;
- goto trunc;
+ if (alloced)
+ goto trunc;
+ else
+ goto failed;
}
*pagep = page;
return 0;
@@ -1012,6 +1214,7 @@ done:
* Error recovery.
*/
trunc:
+ info = SHMEM_I(inode);
ClearPageDirty(page);
delete_from_page_cache(page);
spin_lock(&info->lock);
@@ -1019,6 +1222,7 @@ trunc:
inode->i_blocks -= BLOCKS_PER_PAGE;
spin_unlock(&info->lock);
decused:
+ sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks)
percpu_counter_add(&sbinfo->used_blocks, -1);
unacct:
@@ -1204,6 +1408,14 @@ shmem_write_end(struct file *file, struct address_space *mapping,
if (pos + copied > inode->i_size)
i_size_write(inode, pos + copied);
+ if (!PageUptodate(page)) {
+ if (copied < PAGE_CACHE_SIZE) {
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ zero_user_segments(page, 0, from,
+ from + copied, PAGE_CACHE_SIZE);
+ }
+ SetPageUptodate(page);
+ }
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
@@ -1462,6 +1674,199 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
return error;
}
+/*
+ * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
+ */
+static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
+ pgoff_t index, pgoff_t end, int origin)
+{
+ struct page *page;
+ struct pagevec pvec;
+ pgoff_t indices[PAGEVEC_SIZE];
+ bool done = false;
+ int i;
+
+ pagevec_init(&pvec, 0);
+ pvec.nr = 1; /* start small: we may be there already */
+ while (!done) {
+ pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+ pvec.nr, pvec.pages, indices);
+ if (!pvec.nr) {
+ if (origin == SEEK_DATA)
+ index = end;
+ break;
+ }
+ for (i = 0; i < pvec.nr; i++, index++) {
+ if (index < indices[i]) {
+ if (origin == SEEK_HOLE) {
+ done = true;
+ break;
+ }
+ index = indices[i];
+ }
+ page = pvec.pages[i];
+ if (page && !radix_tree_exceptional_entry(page)) {
+ if (!PageUptodate(page))
+ page = NULL;
+ }
+ if (index >= end ||
+ (page && origin == SEEK_DATA) ||
+ (!page && origin == SEEK_HOLE)) {
+ done = true;
+ break;
+ }
+ }
+ shmem_deswap_pagevec(&pvec);
+ pagevec_release(&pvec);
+ pvec.nr = PAGEVEC_SIZE;
+ cond_resched();
+ }
+ return index;
+}
+
+static loff_t shmem_file_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct address_space *mapping;
+ struct inode *inode;
+ pgoff_t start, end;
+ loff_t new_offset;
+
+ if (origin != SEEK_DATA && origin != SEEK_HOLE)
+ return generic_file_llseek_size(file, offset, origin,
+ MAX_LFS_FILESIZE);
+ mapping = file->f_mapping;
+ inode = mapping->host;
+ mutex_lock(&inode->i_mutex);
+ /* We're holding i_mutex so we can access i_size directly */
+
+ if (offset < 0)
+ offset = -EINVAL;
+ else if (offset >= inode->i_size)
+ offset = -ENXIO;
+ else {
+ start = offset >> PAGE_CACHE_SHIFT;
+ end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ new_offset = shmem_seek_hole_data(mapping, start, end, origin);
+ new_offset <<= PAGE_CACHE_SHIFT;
+ if (new_offset > offset) {
+ if (new_offset < inode->i_size)
+ offset = new_offset;
+ else if (origin == SEEK_DATA)
+ offset = -ENXIO;
+ else
+ offset = inode->i_size;
+ }
+ }
+
+ if (offset >= 0 && offset != file->f_pos) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ }
+ mutex_unlock(&inode->i_mutex);
+ return offset;
+}
+
+static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+ loff_t len)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+ struct shmem_falloc shmem_falloc;
+ pgoff_t start, index, end;
+ int error;
+
+ mutex_lock(&inode->i_mutex);
+
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ struct address_space *mapping = file->f_mapping;
+ loff_t unmap_start = round_up(offset, PAGE_SIZE);
+ loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+
+ if ((u64)unmap_end > (u64)unmap_start)
+ unmap_mapping_range(mapping, unmap_start,
+ 1 + unmap_end - unmap_start, 0);
+ shmem_truncate_range(inode, offset, offset + len - 1);
+ /* No need to unmap again: hole-punching leaves COWed pages */
+ error = 0;
+ goto out;
+ }
+
+ /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
+ error = inode_newsize_ok(inode, offset + len);
+ if (error)
+ goto out;
+
+ start = offset >> PAGE_CACHE_SHIFT;
+ end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ /* Try to avoid a swapstorm if len is impossible to satisfy */
+ if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
+ error = -ENOSPC;
+ goto out;
+ }
+
+ shmem_falloc.start = start;
+ shmem_falloc.next = start;
+ shmem_falloc.nr_falloced = 0;
+ shmem_falloc.nr_unswapped = 0;
+ spin_lock(&inode->i_lock);
+ inode->i_private = &shmem_falloc;
+ spin_unlock(&inode->i_lock);
+
+ for (index = start; index < end; index++) {
+ struct page *page;
+
+ /*
+ * Good, the fallocate(2) manpage permits EINTR: we may have
+ * been interrupted because we are using up too much memory.
+ */
+ if (signal_pending(current))
+ error = -EINTR;
+ else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
+ error = -ENOMEM;
+ else
+ error = shmem_getpage(inode, index, &page, SGP_FALLOC,
+ NULL);
+ if (error) {
+ /* Remove the !PageUptodate pages we added */
+ shmem_undo_range(inode,
+ (loff_t)start << PAGE_CACHE_SHIFT,
+ (loff_t)index << PAGE_CACHE_SHIFT, true);
+ goto undone;
+ }
+
+ /*
+ * Inform shmem_writepage() how far we have reached.
+ * No need for lock or barrier: we have the page lock.
+ */
+ shmem_falloc.next++;
+ if (!PageUptodate(page))
+ shmem_falloc.nr_falloced++;
+
+ /*
+ * If !PageUptodate, leave it that way so that freeable pages
+ * can be recognized if we need to rollback on error later.
+ * But set_page_dirty so that memory pressure will swap rather
+ * than free the pages we are allocating (and SGP_CACHE pages
+ * might still be clean: we now need to mark those dirty too).
+ */
+ set_page_dirty(page);
+ unlock_page(page);
+ page_cache_release(page);
+ cond_resched();
+ }
+
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
+ i_size_write(inode, offset + len);
+ inode->i_ctime = CURRENT_TIME;
+undone:
+ spin_lock(&inode->i_lock);
+ inode->i_private = NULL;
+ spin_unlock(&inode->i_lock);
+out:
+ mutex_unlock(&inode->i_mutex);
+ return error;
+}
+
static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
@@ -1665,6 +2070,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
kaddr = kmap_atomic(page);
memcpy(kaddr, symname, len);
kunmap_atomic(kaddr);
+ SetPageUptodate(page);
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
@@ -2270,6 +2676,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
}
}
sb->s_export_op = &shmem_export_ops;
+ sb->s_flags |= MS_NOSEC;
#else
sb->s_flags |= MS_NOUSER;
#endif
@@ -2364,7 +2771,7 @@ static const struct address_space_operations shmem_aops = {
static const struct file_operations shmem_file_operations = {
.mmap = shmem_mmap,
#ifdef CONFIG_TMPFS
- .llseek = generic_file_llseek,
+ .llseek = shmem_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = shmem_file_aio_read,
@@ -2372,12 +2779,12 @@ static const struct file_operations shmem_file_operations = {
.fsync = noop_fsync,
.splice_read = shmem_file_splice_read,
.splice_write = generic_file_splice_write,
+ .fallocate = shmem_fallocate,
#endif
};
static const struct inode_operations shmem_inode_operations = {
.setattr = shmem_setattr,
- .truncate_range = shmem_truncate_range,
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
diff --git a/mm/sparse.c b/mm/sparse.c
index a8bc7d364de..6a4bf9160e8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -273,10 +273,10 @@ static unsigned long *__kmalloc_section_usemap(void)
#ifdef CONFIG_MEMORY_HOTREMOVE
static unsigned long * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
- unsigned long count)
+ unsigned long size)
{
- unsigned long section_nr;
-
+ pg_data_t *host_pgdat;
+ unsigned long goal;
/*
* A page may contain usemaps for other sections preventing the
* page being freed and making a section unremovable while
@@ -287,8 +287,10 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
* from the same section as the pgdat where possible to avoid
* this problem.
*/
- section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
- return alloc_bootmem_section(usemap_size() * count, section_nr);
+ goal = __pa(pgdat) & PAGE_SECTION_MASK;
+ host_pgdat = NODE_DATA(early_pfn_to_nid(goal >> PAGE_SHIFT));
+ return __alloc_bootmem_node_nopanic(host_pgdat, size,
+ SMP_CACHE_BYTES, goal);
}
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -332,9 +334,9 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
#else
static unsigned long * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
- unsigned long count)
+ unsigned long size)
{
- return NULL;
+ return alloc_bootmem_node_nopanic(pgdat, size);
}
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -352,13 +354,10 @@ static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
int size = usemap_size();
usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
- usemap_count);
+ size * usemap_count);
if (!usemap) {
- usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count);
- if (!usemap) {
- printk(KERN_WARNING "%s: allocation failed\n", __func__);
- return;
- }
+ printk(KERN_WARNING "%s: allocation failed\n", __func__);
+ return;
}
for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
diff --git a/mm/swap.c b/mm/swap.c
index 5c13f133897..4e7e2ec6707 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -47,13 +47,15 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
static void __page_cache_release(struct page *page)
{
if (PageLRU(page)) {
- unsigned long flags;
struct zone *zone = page_zone(page);
+ struct lruvec *lruvec;
+ unsigned long flags;
spin_lock_irqsave(&zone->lru_lock, flags);
+ lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page);
- del_page_from_lru_list(zone, page, page_off_lru(page));
+ del_page_from_lru_list(page, lruvec, page_off_lru(page));
spin_unlock_irqrestore(&zone->lru_lock, flags);
}
}
@@ -82,6 +84,25 @@ static void put_compound_page(struct page *page)
if (likely(page != page_head &&
get_page_unless_zero(page_head))) {
unsigned long flags;
+
+ /*
+ * THP can not break up slab pages so avoid taking
+ * compound_lock(). Slab performs non-atomic bit ops
+ * on page->flags for better performance. In particular
+ * slab_unlock() in slub used to be a hot path. It is
+ * still hot on arches that do not support
+ * this_cpu_cmpxchg_double().
+ */
+ if (PageSlab(page_head)) {
+ if (PageTail(page)) {
+ if (put_page_testzero(page_head))
+ VM_BUG_ON(1);
+
+ atomic_dec(&page->_mapcount);
+ goto skip_lock_tail;
+ } else
+ goto skip_lock;
+ }
/*
* page_head wasn't a dangling pointer but it
* may not be a head page anymore by the time
@@ -92,10 +113,10 @@ static void put_compound_page(struct page *page)
if (unlikely(!PageTail(page))) {
/* __split_huge_page_refcount run before us */
compound_unlock_irqrestore(page_head, flags);
- VM_BUG_ON(PageHead(page_head));
+skip_lock:
if (put_page_testzero(page_head))
__put_single_page(page_head);
- out_put_single:
+out_put_single:
if (put_page_testzero(page))
__put_single_page(page);
return;
@@ -115,6 +136,8 @@ static void put_compound_page(struct page *page)
VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
VM_BUG_ON(atomic_read(&page->_count) != 0);
compound_unlock_irqrestore(page_head, flags);
+
+skip_lock_tail:
if (put_page_testzero(page_head)) {
if (PageHead(page_head))
__put_compound_page(page_head);
@@ -162,6 +185,18 @@ bool __get_page_tail(struct page *page)
struct page *page_head = compound_trans_head(page);
if (likely(page != page_head && get_page_unless_zero(page_head))) {
+
+ /* Ref to put_compound_page() comment. */
+ if (PageSlab(page_head)) {
+ if (likely(PageTail(page))) {
+ __get_page_tail_foll(page, false);
+ return true;
+ } else {
+ put_page(page_head);
+ return false;
+ }
+ }
+
/*
* page_head wasn't a dangling pointer but it
* may not be a head page anymore by the time
@@ -202,11 +237,12 @@ void put_pages_list(struct list_head *pages)
EXPORT_SYMBOL(put_pages_list);
static void pagevec_lru_move_fn(struct pagevec *pvec,
- void (*move_fn)(struct page *page, void *arg),
- void *arg)
+ void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
+ void *arg)
{
int i;
struct zone *zone = NULL;
+ struct lruvec *lruvec;
unsigned long flags = 0;
for (i = 0; i < pagevec_count(pvec); i++) {
@@ -220,7 +256,8 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
spin_lock_irqsave(&zone->lru_lock, flags);
}
- (*move_fn)(page, arg);
+ lruvec = mem_cgroup_page_lruvec(page, zone);
+ (*move_fn)(page, lruvec, arg);
}
if (zone)
spin_unlock_irqrestore(&zone->lru_lock, flags);
@@ -228,16 +265,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
pagevec_reinit(pvec);
}
-static void pagevec_move_tail_fn(struct page *page, void *arg)
+static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
+ void *arg)
{
int *pgmoved = arg;
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
enum lru_list lru = page_lru_base_type(page);
- struct lruvec *lruvec;
-
- lruvec = mem_cgroup_lru_move_lists(page_zone(page),
- page, lru, lru);
list_move_tail(&page->lru, &lruvec->lists[lru]);
(*pgmoved)++;
}
@@ -276,41 +310,30 @@ void rotate_reclaimable_page(struct page *page)
}
}
-static void update_page_reclaim_stat(struct zone *zone, struct page *page,
+static void update_page_reclaim_stat(struct lruvec *lruvec,
int file, int rotated)
{
- struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
- struct zone_reclaim_stat *memcg_reclaim_stat;
-
- memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
+ struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
reclaim_stat->recent_scanned[file]++;
if (rotated)
reclaim_stat->recent_rotated[file]++;
-
- if (!memcg_reclaim_stat)
- return;
-
- memcg_reclaim_stat->recent_scanned[file]++;
- if (rotated)
- memcg_reclaim_stat->recent_rotated[file]++;
}
-static void __activate_page(struct page *page, void *arg)
+static void __activate_page(struct page *page, struct lruvec *lruvec,
+ void *arg)
{
- struct zone *zone = page_zone(page);
-
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
int file = page_is_file_cache(page);
int lru = page_lru_base_type(page);
- del_page_from_lru_list(zone, page, lru);
+ del_page_from_lru_list(page, lruvec, lru);
SetPageActive(page);
lru += LRU_ACTIVE;
- add_page_to_lru_list(zone, page, lru);
- __count_vm_event(PGACTIVATE);
+ add_page_to_lru_list(page, lruvec, lru);
- update_page_reclaim_stat(zone, page, file, 1);
+ __count_vm_event(PGACTIVATE);
+ update_page_reclaim_stat(lruvec, file, 1);
}
}
@@ -347,7 +370,7 @@ void activate_page(struct page *page)
struct zone *zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
- __activate_page(page, NULL);
+ __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
spin_unlock_irq(&zone->lru_lock);
}
#endif
@@ -414,11 +437,13 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru)
void add_page_to_unevictable_list(struct page *page)
{
struct zone *zone = page_zone(page);
+ struct lruvec *lruvec;
spin_lock_irq(&zone->lru_lock);
+ lruvec = mem_cgroup_page_lruvec(page, zone);
SetPageUnevictable(page);
SetPageLRU(page);
- add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
+ add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
spin_unlock_irq(&zone->lru_lock);
}
@@ -443,11 +468,11 @@ void add_page_to_unevictable_list(struct page *page)
* be write it out by flusher threads as this is much more effective
* than the single-page writeout from reclaim.
*/
-static void lru_deactivate_fn(struct page *page, void *arg)
+static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
+ void *arg)
{
int lru, file;
bool active;
- struct zone *zone = page_zone(page);
if (!PageLRU(page))
return;
@@ -460,13 +485,13 @@ static void lru_deactivate_fn(struct page *page, void *arg)
return;
active = PageActive(page);
-
file = page_is_file_cache(page);
lru = page_lru_base_type(page);
- del_page_from_lru_list(zone, page, lru + active);
+
+ del_page_from_lru_list(page, lruvec, lru + active);
ClearPageActive(page);
ClearPageReferenced(page);
- add_page_to_lru_list(zone, page, lru);
+ add_page_to_lru_list(page, lruvec, lru);
if (PageWriteback(page) || PageDirty(page)) {
/*
@@ -476,19 +501,17 @@ static void lru_deactivate_fn(struct page *page, void *arg)
*/
SetPageReclaim(page);
} else {
- struct lruvec *lruvec;
/*
* The page's writeback ends up during pagevec
* We moves tha page into tail of inactive.
*/
- lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
list_move_tail(&page->lru, &lruvec->lists[lru]);
__count_vm_event(PGROTATED);
}
if (active)
__count_vm_event(PGDEACTIVATE);
- update_page_reclaim_stat(zone, page, file, 0);
+ update_page_reclaim_stat(lruvec, file, 0);
}
/*
@@ -588,6 +611,7 @@ void release_pages(struct page **pages, int nr, int cold)
int i;
LIST_HEAD(pages_to_free);
struct zone *zone = NULL;
+ struct lruvec *lruvec;
unsigned long uninitialized_var(flags);
for (i = 0; i < nr; i++) {
@@ -615,9 +639,11 @@ void release_pages(struct page **pages, int nr, int cold)
zone = pagezone;
spin_lock_irqsave(&zone->lru_lock, flags);
}
+
+ lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page);
- del_page_from_lru_list(zone, page, page_off_lru(page));
+ del_page_from_lru_list(page, lruvec, page_off_lru(page));
}
list_add(&page->lru, &pages_to_free);
@@ -649,8 +675,8 @@ EXPORT_SYMBOL(__pagevec_release);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* used by __split_huge_page_refcount() */
-void lru_add_page_tail(struct zone* zone,
- struct page *page, struct page *page_tail)
+void lru_add_page_tail(struct page *page, struct page *page_tail,
+ struct lruvec *lruvec)
{
int uninitialized_var(active);
enum lru_list lru;
@@ -659,7 +685,8 @@ void lru_add_page_tail(struct zone* zone,
VM_BUG_ON(!PageHead(page));
VM_BUG_ON(PageCompound(page_tail));
VM_BUG_ON(PageLRU(page_tail));
- VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
+ VM_BUG_ON(NR_CPUS != 1 &&
+ !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
SetPageLRU(page_tail);
@@ -688,20 +715,20 @@ void lru_add_page_tail(struct zone* zone,
* Use the standard add function to put page_tail on the list,
* but then correct its position so they all end up in order.
*/
- add_page_to_lru_list(zone, page_tail, lru);
+ add_page_to_lru_list(page_tail, lruvec, lru);
list_head = page_tail->lru.prev;
list_move_tail(&page_tail->lru, list_head);
}
if (!PageUnevictable(page))
- update_page_reclaim_stat(zone, page_tail, file, active);
+ update_page_reclaim_stat(lruvec, file, active);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-static void __pagevec_lru_add_fn(struct page *page, void *arg)
+static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
+ void *arg)
{
enum lru_list lru = (enum lru_list)arg;
- struct zone *zone = page_zone(page);
int file = is_file_lru(lru);
int active = is_active_lru(lru);
@@ -712,8 +739,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
SetPageLRU(page);
if (active)
SetPageActive(page);
- add_page_to_lru_list(zone, page, lru);
- update_page_reclaim_stat(zone, page, file, active);
+ add_page_to_lru_list(page, lruvec, lru);
+ update_page_reclaim_stat(lruvec, file, active);
}
/*
diff --git a/mm/swapfile.c b/mm/swapfile.c
index fafc26d1b1d..457b10baef5 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -601,7 +601,7 @@ void swapcache_free(swp_entry_t entry, struct page *page)
* This does not give an exact answer when swap count is continued,
* but does include the high COUNT_CONTINUED flag to allow for that.
*/
-static inline int page_swapcount(struct page *page)
+int page_swapcount(struct page *page)
{
int count = 0;
struct swap_info_struct *p;
@@ -717,37 +717,6 @@ int free_swap_and_cache(swp_entry_t entry)
return p != NULL;
}
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-/**
- * mem_cgroup_count_swap_user - count the user of a swap entry
- * @ent: the swap entry to be checked
- * @pagep: the pointer for the swap cache page of the entry to be stored
- *
- * Returns the number of the user of the swap entry. The number is valid only
- * for swaps of anonymous pages.
- * If the entry is found on swap cache, the page is stored to pagep with
- * refcount of it being incremented.
- */
-int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
-{
- struct page *page;
- struct swap_info_struct *p;
- int count = 0;
-
- page = find_get_page(&swapper_space, ent.val);
- if (page)
- count += page_mapcount(page);
- p = swap_info_get(ent);
- if (p) {
- count += swap_count(p->swap_map[swp_offset(ent)]);
- spin_unlock(&swap_lock);
- }
-
- *pagep = page;
- return count;
-}
-#endif
-
#ifdef CONFIG_HIBERNATION
/*
* Find the swap type that corresponds to given device (if any).
diff --git a/mm/thrash.c b/mm/thrash.c
deleted file mode 100644
index 57ad495dbd5..00000000000
--- a/mm/thrash.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * mm/thrash.c
- *
- * Copyright (C) 2004, Red Hat, Inc.
- * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
- * Released under the GPL, see the file COPYING for details.
- *
- * Simple token based thrashing protection, using the algorithm
- * described in: http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/abs05-1.html
- *
- * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
- * Improved algorithm to pass token:
- * Each task has a priority which is incremented if it contended
- * for the token in an interval less than its previous attempt.
- * If the token is acquired, that task's priority is boosted to prevent
- * the token from bouncing around too often and to let the task make
- * some progress in its execution.
- */
-
-#include <linux/jiffies.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/swap.h>
-#include <linux/memcontrol.h>
-
-#include <trace/events/vmscan.h>
-
-#define TOKEN_AGING_INTERVAL (0xFF)
-
-static DEFINE_SPINLOCK(swap_token_lock);
-struct mm_struct *swap_token_mm;
-static struct mem_cgroup *swap_token_memcg;
-
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
-{
- struct mem_cgroup *memcg;
-
- memcg = try_get_mem_cgroup_from_mm(mm);
- if (memcg)
- css_put(mem_cgroup_css(memcg));
-
- return memcg;
-}
-#else
-static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
-{
- return NULL;
-}
-#endif
-
-void grab_swap_token(struct mm_struct *mm)
-{
- int current_interval;
- unsigned int old_prio = mm->token_priority;
- static unsigned int global_faults;
- static unsigned int last_aging;
-
- global_faults++;
-
- current_interval = global_faults - mm->faultstamp;
-
- if (!spin_trylock(&swap_token_lock))
- return;
-
- /* First come first served */
- if (!swap_token_mm)
- goto replace_token;
-
- /*
- * Usually, we don't need priority aging because long interval faults
- * makes priority decrease quickly. But there is one exception. If the
- * token owner task is sleeping, it never make long interval faults.
- * Thus, we need a priority aging mechanism instead. The requirements
- * of priority aging are
- * 1) An aging interval is reasonable enough long. Too short aging
- * interval makes quick swap token lost and decrease performance.
- * 2) The swap token owner task have to get priority aging even if
- * it's under sleep.
- */
- if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
- swap_token_mm->token_priority /= 2;
- last_aging = global_faults;
- }
-
- if (mm == swap_token_mm) {
- mm->token_priority += 2;
- goto update_priority;
- }
-
- if (current_interval < mm->last_interval)
- mm->token_priority++;
- else {
- if (likely(mm->token_priority > 0))
- mm->token_priority--;
- }
-
- /* Check if we deserve the token */
- if (mm->token_priority > swap_token_mm->token_priority)
- goto replace_token;
-
-update_priority:
- trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
-
-out:
- mm->faultstamp = global_faults;
- mm->last_interval = current_interval;
- spin_unlock(&swap_token_lock);
- return;
-
-replace_token:
- mm->token_priority += 2;
- trace_replace_swap_token(swap_token_mm, mm);
- swap_token_mm = mm;
- swap_token_memcg = swap_token_memcg_from_mm(mm);
- last_aging = global_faults;
- goto out;
-}
-
-/* Called on process exit. */
-void __put_swap_token(struct mm_struct *mm)
-{
- spin_lock(&swap_token_lock);
- if (likely(mm == swap_token_mm)) {
- trace_put_swap_token(swap_token_mm);
- swap_token_mm = NULL;
- swap_token_memcg = NULL;
- }
- spin_unlock(&swap_token_lock);
-}
-
-static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
-{
- if (!a)
- return true;
- if (!b)
- return true;
- if (a == b)
- return true;
- return false;
-}
-
-void disable_swap_token(struct mem_cgroup *memcg)
-{
- /* memcg reclaim don't disable unrelated mm token. */
- if (match_memcg(memcg, swap_token_memcg)) {
- spin_lock(&swap_token_lock);
- if (match_memcg(memcg, swap_token_memcg)) {
- trace_disable_swap_token(swap_token_mm);
- swap_token_mm = NULL;
- swap_token_memcg = NULL;
- }
- spin_unlock(&swap_token_lock);
- }
-}
diff --git a/mm/truncate.c b/mm/truncate.c
index 61a183b89df..75801acdaac 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -602,31 +602,6 @@ int vmtruncate(struct inode *inode, loff_t newsize)
}
EXPORT_SYMBOL(vmtruncate);
-int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
-{
- struct address_space *mapping = inode->i_mapping;
- loff_t holebegin = round_up(lstart, PAGE_SIZE);
- loff_t holelen = 1 + lend - holebegin;
-
- /*
- * If the underlying filesystem is not going to provide
- * a way to truncate a range of blocks (punch a hole) -
- * we should return failure right now.
- */
- if (!inode->i_op->truncate_range)
- return -ENOSYS;
-
- mutex_lock(&inode->i_mutex);
- inode_dio_wait(inode);
- unmap_mapping_range(mapping, holebegin, holelen, 1);
- inode->i_op->truncate_range(inode, lstart, lend);
- /* unmap again to remove racily COWed private pages */
- unmap_mapping_range(mapping, holebegin, holelen, 1);
- mutex_unlock(&inode->i_mutex);
-
- return 0;
-}
-
/**
* truncate_pagecache_range - unmap and remove pagecache that is hole-punched
* @inode: inode
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 94dff883b44..2aad49981b5 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1185,9 +1185,10 @@ void __init vmalloc_init(void)
/* Import existing vmlist entries. */
for (tmp = vmlist; tmp; tmp = tmp->next) {
va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
- va->flags = tmp->flags | VM_VM_AREA;
+ va->flags = VM_VM_AREA;
va->va_start = (unsigned long)tmp->addr;
va->va_end = va->va_start + tmp->size;
+ va->vm = tmp;
__insert_vmap_area(va);
}
@@ -2375,8 +2376,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
return NULL;
}
- vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
- vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
+ vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
+ vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
if (!vas || !vms)
goto err_free2;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 33dc256033b..eeb3bc9d1d3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -53,24 +53,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/vmscan.h>
-/*
- * reclaim_mode determines how the inactive list is shrunk
- * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
- * RECLAIM_MODE_ASYNC: Do not block
- * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback
- * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
- * page from the LRU and reclaim all pages within a
- * naturally aligned range
- * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
- * order-0 pages and then compact the zone
- */
-typedef unsigned __bitwise__ reclaim_mode_t;
-#define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u)
-#define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u)
-#define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u)
-#define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u)
-#define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u)
-
struct scan_control {
/* Incremented by the number of inactive pages that were scanned */
unsigned long nr_scanned;
@@ -96,11 +78,8 @@ struct scan_control {
int order;
- /*
- * Intend to reclaim enough continuous memory rather than reclaim
- * enough amount of memory. i.e, mode for high order allocation.
- */
- reclaim_mode_t reclaim_mode;
+ /* Scan (total_size >> priority) pages at once */
+ int priority;
/*
* The memory cgroup that hit its limit and as a result is the
@@ -115,11 +94,6 @@ struct scan_control {
nodemask_t *nodemask;
};
-struct mem_cgroup_zone {
- struct mem_cgroup *mem_cgroup;
- struct zone *zone;
-};
-
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
#ifdef ARCH_HAS_PREFETCH
@@ -164,44 +138,21 @@ static bool global_reclaim(struct scan_control *sc)
{
return !sc->target_mem_cgroup;
}
-
-static bool scanning_global_lru(struct mem_cgroup_zone *mz)
-{
- return !mz->mem_cgroup;
-}
#else
static bool global_reclaim(struct scan_control *sc)
{
return true;
}
-
-static bool scanning_global_lru(struct mem_cgroup_zone *mz)
-{
- return true;
-}
#endif
-static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz)
-{
- if (!scanning_global_lru(mz))
- return mem_cgroup_get_reclaim_stat(mz->mem_cgroup, mz->zone);
-
- return &mz->zone->reclaim_stat;
-}
-
-static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz,
- enum lru_list lru)
+static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
- if (!scanning_global_lru(mz))
- return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup,
- zone_to_nid(mz->zone),
- zone_idx(mz->zone),
- BIT(lru));
+ if (!mem_cgroup_disabled())
+ return mem_cgroup_get_lru_size(lruvec, lru);
- return zone_page_state(mz->zone, NR_LRU_BASE + lru);
+ return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
}
-
/*
* Add a shrinker callback to be called from the vm
*/
@@ -364,39 +315,6 @@ out:
return ret;
}
-static void set_reclaim_mode(int priority, struct scan_control *sc,
- bool sync)
-{
- reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
-
- /*
- * Initially assume we are entering either lumpy reclaim or
- * reclaim/compaction.Depending on the order, we will either set the
- * sync mode or just reclaim order-0 pages later.
- */
- if (COMPACTION_BUILD)
- sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
- else
- sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
-
- /*
- * Avoid using lumpy reclaim or reclaim/compaction if possible by
- * restricting when its set to either costly allocations or when
- * under memory pressure
- */
- if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
- sc->reclaim_mode |= syncmode;
- else if (sc->order && priority < DEF_PRIORITY - 2)
- sc->reclaim_mode |= syncmode;
- else
- sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
-}
-
-static void reset_reclaim_mode(struct scan_control *sc)
-{
- sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
-}
-
static inline int is_page_cache_freeable(struct page *page)
{
/*
@@ -416,10 +334,6 @@ static int may_write_to_queue(struct backing_dev_info *bdi,
return 1;
if (bdi == current->backing_dev_info)
return 1;
-
- /* lumpy reclaim for hugepage often need a lot of write */
- if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
- return 1;
return 0;
}
@@ -523,8 +437,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
/* synchronous write or broken a_ops? */
ClearPageReclaim(page);
}
- trace_mm_vmscan_writepage(page,
- trace_reclaim_flags(page, sc->reclaim_mode));
+ trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
inc_zone_page_state(page, NR_VMSCAN_WRITE);
return PAGE_SUCCESS;
}
@@ -701,19 +614,15 @@ enum page_references {
};
static enum page_references page_check_references(struct page *page,
- struct mem_cgroup_zone *mz,
struct scan_control *sc)
{
int referenced_ptes, referenced_page;
unsigned long vm_flags;
- referenced_ptes = page_referenced(page, 1, mz->mem_cgroup, &vm_flags);
+ referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
+ &vm_flags);
referenced_page = TestClearPageReferenced(page);
- /* Lumpy reclaim - ignore references */
- if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
- return PAGEREF_RECLAIM;
-
/*
* Mlock lost the isolation race with us. Let try_to_unmap()
* move the page to the unevictable list.
@@ -722,7 +631,7 @@ static enum page_references page_check_references(struct page *page,
return PAGEREF_RECLAIM;
if (referenced_ptes) {
- if (PageAnon(page))
+ if (PageSwapBacked(page))
return PAGEREF_ACTIVATE;
/*
* All mapped pages start out with page table
@@ -763,9 +672,8 @@ static enum page_references page_check_references(struct page *page,
* shrink_page_list() returns the number of reclaimed pages
*/
static unsigned long shrink_page_list(struct list_head *page_list,
- struct mem_cgroup_zone *mz,
+ struct zone *zone,
struct scan_control *sc,
- int priority,
unsigned long *ret_nr_dirty,
unsigned long *ret_nr_writeback)
{
@@ -794,7 +702,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto keep;
VM_BUG_ON(PageActive(page));
- VM_BUG_ON(page_zone(page) != mz->zone);
+ VM_BUG_ON(page_zone(page) != zone);
sc->nr_scanned++;
@@ -813,22 +721,11 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (PageWriteback(page)) {
nr_writeback++;
- /*
- * Synchronous reclaim cannot queue pages for
- * writeback due to the possibility of stack overflow
- * but if it encounters a page under writeback, wait
- * for the IO to complete.
- */
- if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
- may_enter_fs)
- wait_on_page_writeback(page);
- else {
- unlock_page(page);
- goto keep_lumpy;
- }
+ unlock_page(page);
+ goto keep;
}
- references = page_check_references(page, mz, sc);
+ references = page_check_references(page, sc);
switch (references) {
case PAGEREF_ACTIVATE:
goto activate_locked;
@@ -879,7 +776,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* unless under significant pressure.
*/
if (page_is_file_cache(page) &&
- (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
+ (!current_is_kswapd() ||
+ sc->priority >= DEF_PRIORITY - 2)) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
@@ -908,7 +806,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto activate_locked;
case PAGE_SUCCESS:
if (PageWriteback(page))
- goto keep_lumpy;
+ goto keep;
if (PageDirty(page))
goto keep;
@@ -994,7 +892,6 @@ cull_mlocked:
try_to_free_swap(page);
unlock_page(page);
putback_lru_page(page);
- reset_reclaim_mode(sc);
continue;
activate_locked:
@@ -1007,8 +904,6 @@ activate_locked:
keep_locked:
unlock_page(page);
keep:
- reset_reclaim_mode(sc);
-keep_lumpy:
list_add(&page->lru, &ret_pages);
VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
}
@@ -1020,7 +915,7 @@ keep_lumpy:
* will encounter the same problem
*/
if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
- zone_set_flag(mz->zone, ZONE_CONGESTED);
+ zone_set_flag(zone, ZONE_CONGESTED);
free_hot_cold_page_list(&free_pages, 1);
@@ -1041,34 +936,15 @@ keep_lumpy:
*
* returns 0 on success, -ve errno on failure.
*/
-int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
+int __isolate_lru_page(struct page *page, isolate_mode_t mode)
{
- bool all_lru_mode;
int ret = -EINVAL;
/* Only take pages on the LRU. */
if (!PageLRU(page))
return ret;
- all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) ==
- (ISOLATE_ACTIVE|ISOLATE_INACTIVE);
-
- /*
- * When checking the active state, we need to be sure we are
- * dealing with comparible boolean values. Take the logical not
- * of each.
- */
- if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
- return ret;
-
- if (!all_lru_mode && !!page_is_file_cache(page) != file)
- return ret;
-
- /*
- * When this function is being called for lumpy reclaim, we
- * initially look into all LRU pages, active, inactive and
- * unevictable; only give shrink_page_list evictable pages.
- */
+ /* Do not give back unevictable pages for compaction */
if (PageUnevictable(page))
return ret;
@@ -1135,54 +1011,39 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
* Appropriate locks must be held before calling this function.
*
* @nr_to_scan: The number of pages to look through on the list.
- * @mz: The mem_cgroup_zone to pull pages from.
+ * @lruvec: The LRU vector to pull pages from.
* @dst: The temp list to put pages on to.
* @nr_scanned: The number of pages that were scanned.
* @sc: The scan_control struct for this reclaim session
* @mode: One of the LRU isolation modes
- * @active: True [1] if isolating active pages
- * @file: True [1] if isolating file [!anon] pages
+ * @lru: LRU list id for isolating
*
* returns how many pages were moved onto *@dst.
*/
static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
- struct mem_cgroup_zone *mz, struct list_head *dst,
+ struct lruvec *lruvec, struct list_head *dst,
unsigned long *nr_scanned, struct scan_control *sc,
- isolate_mode_t mode, int active, int file)
+ isolate_mode_t mode, enum lru_list lru)
{
- struct lruvec *lruvec;
- struct list_head *src;
+ struct list_head *src = &lruvec->lists[lru];
unsigned long nr_taken = 0;
- unsigned long nr_lumpy_taken = 0;
- unsigned long nr_lumpy_dirty = 0;
- unsigned long nr_lumpy_failed = 0;
unsigned long scan;
- int lru = LRU_BASE;
-
- lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
- if (active)
- lru += LRU_ACTIVE;
- if (file)
- lru += LRU_FILE;
- src = &lruvec->lists[lru];
for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
struct page *page;
- unsigned long pfn;
- unsigned long end_pfn;
- unsigned long page_pfn;
- int zone_id;
+ int nr_pages;
page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags);
VM_BUG_ON(!PageLRU(page));
- switch (__isolate_lru_page(page, mode, file)) {
+ switch (__isolate_lru_page(page, mode)) {
case 0:
- mem_cgroup_lru_del(page);
+ nr_pages = hpage_nr_pages(page);
+ mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
list_move(&page->lru, dst);
- nr_taken += hpage_nr_pages(page);
+ nr_taken += nr_pages;
break;
case -EBUSY:
@@ -1193,93 +1054,11 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
default:
BUG();
}
-
- if (!sc->order || !(sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM))
- continue;
-
- /*
- * Attempt to take all pages in the order aligned region
- * surrounding the tag page. Only take those pages of
- * the same active state as that tag page. We may safely
- * round the target page pfn down to the requested order
- * as the mem_map is guaranteed valid out to MAX_ORDER,
- * where that page is in a different zone we will detect
- * it from its zone id and abort this block scan.
- */
- zone_id = page_zone_id(page);
- page_pfn = page_to_pfn(page);
- pfn = page_pfn & ~((1 << sc->order) - 1);
- end_pfn = pfn + (1 << sc->order);
- for (; pfn < end_pfn; pfn++) {
- struct page *cursor_page;
-
- /* The target page is in the block, ignore it. */
- if (unlikely(pfn == page_pfn))
- continue;
-
- /* Avoid holes within the zone. */
- if (unlikely(!pfn_valid_within(pfn)))
- break;
-
- cursor_page = pfn_to_page(pfn);
-
- /* Check that we have not crossed a zone boundary. */
- if (unlikely(page_zone_id(cursor_page) != zone_id))
- break;
-
- /*
- * If we don't have enough swap space, reclaiming of
- * anon page which don't already have a swap slot is
- * pointless.
- */
- if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) &&
- !PageSwapCache(cursor_page))
- break;
-
- if (__isolate_lru_page(cursor_page, mode, file) == 0) {
- unsigned int isolated_pages;
-
- mem_cgroup_lru_del(cursor_page);
- list_move(&cursor_page->lru, dst);
- isolated_pages = hpage_nr_pages(cursor_page);
- nr_taken += isolated_pages;
- nr_lumpy_taken += isolated_pages;
- if (PageDirty(cursor_page))
- nr_lumpy_dirty += isolated_pages;
- scan++;
- pfn += isolated_pages - 1;
- } else {
- /*
- * Check if the page is freed already.
- *
- * We can't use page_count() as that
- * requires compound_head and we don't
- * have a pin on the page here. If a
- * page is tail, we may or may not
- * have isolated the head, so assume
- * it's not free, it'd be tricky to
- * track the head status without a
- * page pin.
- */
- if (!PageTail(cursor_page) &&
- !atomic_read(&cursor_page->_count))
- continue;
- break;
- }
- }
-
- /* If we break out of the loop above, lumpy reclaim failed */
- if (pfn < end_pfn)
- nr_lumpy_failed++;
}
*nr_scanned = scan;
-
- trace_mm_vmscan_lru_isolate(sc->order,
- nr_to_scan, scan,
- nr_taken,
- nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
- mode, file);
+ trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
+ nr_taken, mode, is_file_lru(lru));
return nr_taken;
}
@@ -1316,15 +1095,16 @@ int isolate_lru_page(struct page *page)
if (PageLRU(page)) {
struct zone *zone = page_zone(page);
+ struct lruvec *lruvec;
spin_lock_irq(&zone->lru_lock);
+ lruvec = mem_cgroup_page_lruvec(page, zone);
if (PageLRU(page)) {
int lru = page_lru(page);
- ret = 0;
get_page(page);
ClearPageLRU(page);
-
- del_page_from_lru_list(zone, page, lru);
+ del_page_from_lru_list(page, lruvec, lru);
+ ret = 0;
}
spin_unlock_irq(&zone->lru_lock);
}
@@ -1357,11 +1137,10 @@ static int too_many_isolated(struct zone *zone, int file,
}
static noinline_for_stack void
-putback_inactive_pages(struct mem_cgroup_zone *mz,
- struct list_head *page_list)
+putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
{
- struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
- struct zone *zone = mz->zone;
+ struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
+ struct zone *zone = lruvec_zone(lruvec);
LIST_HEAD(pages_to_free);
/*
@@ -1379,9 +1158,13 @@ putback_inactive_pages(struct mem_cgroup_zone *mz,
spin_lock_irq(&zone->lru_lock);
continue;
}
+
+ lruvec = mem_cgroup_page_lruvec(page, zone);
+
SetPageLRU(page);
lru = page_lru(page);
- add_page_to_lru_list(zone, page, lru);
+ add_page_to_lru_list(page, lruvec, lru);
+
if (is_active_lru(lru)) {
int file = is_file_lru(lru);
int numpages = hpage_nr_pages(page);
@@ -1390,7 +1173,7 @@ putback_inactive_pages(struct mem_cgroup_zone *mz,
if (put_page_testzero(page)) {
__ClearPageLRU(page);
__ClearPageActive(page);
- del_page_from_lru_list(zone, page, lru);
+ del_page_from_lru_list(page, lruvec, lru);
if (unlikely(PageCompound(page))) {
spin_unlock_irq(&zone->lru_lock);
@@ -1407,112 +1190,24 @@ putback_inactive_pages(struct mem_cgroup_zone *mz,
list_splice(&pages_to_free, page_list);
}
-static noinline_for_stack void
-update_isolated_counts(struct mem_cgroup_zone *mz,
- struct list_head *page_list,
- unsigned long *nr_anon,
- unsigned long *nr_file)
-{
- struct zone *zone = mz->zone;
- unsigned int count[NR_LRU_LISTS] = { 0, };
- unsigned long nr_active = 0;
- struct page *page;
- int lru;
-
- /*
- * Count pages and clear active flags
- */
- list_for_each_entry(page, page_list, lru) {
- int numpages = hpage_nr_pages(page);
- lru = page_lru_base_type(page);
- if (PageActive(page)) {
- lru += LRU_ACTIVE;
- ClearPageActive(page);
- nr_active += numpages;
- }
- count[lru] += numpages;
- }
-
- preempt_disable();
- __count_vm_events(PGDEACTIVATE, nr_active);
-
- __mod_zone_page_state(zone, NR_ACTIVE_FILE,
- -count[LRU_ACTIVE_FILE]);
- __mod_zone_page_state(zone, NR_INACTIVE_FILE,
- -count[LRU_INACTIVE_FILE]);
- __mod_zone_page_state(zone, NR_ACTIVE_ANON,
- -count[LRU_ACTIVE_ANON]);
- __mod_zone_page_state(zone, NR_INACTIVE_ANON,
- -count[LRU_INACTIVE_ANON]);
-
- *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
- *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
-
- __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
- __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
- preempt_enable();
-}
-
-/*
- * Returns true if a direct reclaim should wait on pages under writeback.
- *
- * If we are direct reclaiming for contiguous pages and we do not reclaim
- * everything in the list, try again and wait for writeback IO to complete.
- * This will stall high-order allocations noticeably. Only do that when really
- * need to free the pages under high memory pressure.
- */
-static inline bool should_reclaim_stall(unsigned long nr_taken,
- unsigned long nr_freed,
- int priority,
- struct scan_control *sc)
-{
- int lumpy_stall_priority;
-
- /* kswapd should not stall on sync IO */
- if (current_is_kswapd())
- return false;
-
- /* Only stall on lumpy reclaim */
- if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
- return false;
-
- /* If we have reclaimed everything on the isolated list, no stall */
- if (nr_freed == nr_taken)
- return false;
-
- /*
- * For high-order allocations, there are two stall thresholds.
- * High-cost allocations stall immediately where as lower
- * order allocations such as stacks require the scanning
- * priority to be much higher before stalling.
- */
- if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
- lumpy_stall_priority = DEF_PRIORITY;
- else
- lumpy_stall_priority = DEF_PRIORITY / 3;
-
- return priority <= lumpy_stall_priority;
-}
-
/*
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
* of reclaimed pages
*/
static noinline_for_stack unsigned long
-shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
- struct scan_control *sc, int priority, int file)
+shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
+ struct scan_control *sc, enum lru_list lru)
{
LIST_HEAD(page_list);
unsigned long nr_scanned;
unsigned long nr_reclaimed = 0;
unsigned long nr_taken;
- unsigned long nr_anon;
- unsigned long nr_file;
unsigned long nr_dirty = 0;
unsigned long nr_writeback = 0;
- isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
- struct zone *zone = mz->zone;
- struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
+ isolate_mode_t isolate_mode = 0;
+ int file = is_file_lru(lru);
+ struct zone *zone = lruvec_zone(lruvec);
+ struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
while (unlikely(too_many_isolated(zone, file, sc))) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1522,10 +1217,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
return SWAP_CLUSTER_MAX;
}
- set_reclaim_mode(priority, sc, false);
- if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
- isolate_mode |= ISOLATE_ACTIVE;
-
lru_add_drain();
if (!sc->may_unmap)
@@ -1535,38 +1226,30 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
spin_lock_irq(&zone->lru_lock);
- nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned,
- sc, isolate_mode, 0, file);
+ nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
+ &nr_scanned, sc, isolate_mode, lru);
+
+ __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
+
if (global_reclaim(sc)) {
zone->pages_scanned += nr_scanned;
if (current_is_kswapd())
- __count_zone_vm_events(PGSCAN_KSWAPD, zone,
- nr_scanned);
+ __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
else
- __count_zone_vm_events(PGSCAN_DIRECT, zone,
- nr_scanned);
+ __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
}
spin_unlock_irq(&zone->lru_lock);
if (nr_taken == 0)
return 0;
- update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
-
- nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
+ nr_reclaimed = shrink_page_list(&page_list, zone, sc,
&nr_dirty, &nr_writeback);
- /* Check if we should syncronously wait for writeback */
- if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
- set_reclaim_mode(priority, sc, true);
- nr_reclaimed += shrink_page_list(&page_list, mz, sc,
- priority, &nr_dirty, &nr_writeback);
- }
-
spin_lock_irq(&zone->lru_lock);
- reclaim_stat->recent_scanned[0] += nr_anon;
- reclaim_stat->recent_scanned[1] += nr_file;
+ reclaim_stat->recent_scanned[file] += nr_taken;
if (global_reclaim(sc)) {
if (current_is_kswapd())
@@ -1577,10 +1260,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
nr_reclaimed);
}
- putback_inactive_pages(mz, &page_list);
+ putback_inactive_pages(lruvec, &page_list);
- __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
- __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&zone->lru_lock);
@@ -1609,14 +1291,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
* DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
* isolated page is PageWriteback
*/
- if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority)))
+ if (nr_writeback && nr_writeback >=
+ (nr_taken >> (DEF_PRIORITY - sc->priority)))
wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
zone_idx(zone),
nr_scanned, nr_reclaimed,
- priority,
- trace_shrink_flags(file, sc->reclaim_mode));
+ sc->priority,
+ trace_shrink_flags(file));
return nr_reclaimed;
}
@@ -1638,30 +1321,32 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
* But we had to alter page->flags anyway.
*/
-static void move_active_pages_to_lru(struct zone *zone,
+static void move_active_pages_to_lru(struct lruvec *lruvec,
struct list_head *list,
struct list_head *pages_to_free,
enum lru_list lru)
{
+ struct zone *zone = lruvec_zone(lruvec);
unsigned long pgmoved = 0;
struct page *page;
+ int nr_pages;
while (!list_empty(list)) {
- struct lruvec *lruvec;
-
page = lru_to_page(list);
+ lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
- lruvec = mem_cgroup_lru_add_list(zone, page, lru);
+ nr_pages = hpage_nr_pages(page);
+ mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
list_move(&page->lru, &lruvec->lists[lru]);
- pgmoved += hpage_nr_pages(page);
+ pgmoved += nr_pages;
if (put_page_testzero(page)) {
__ClearPageLRU(page);
__ClearPageActive(page);
- del_page_from_lru_list(zone, page, lru);
+ del_page_from_lru_list(page, lruvec, lru);
if (unlikely(PageCompound(page))) {
spin_unlock_irq(&zone->lru_lock);
@@ -1677,9 +1362,9 @@ static void move_active_pages_to_lru(struct zone *zone,
}
static void shrink_active_list(unsigned long nr_to_scan,
- struct mem_cgroup_zone *mz,
+ struct lruvec *lruvec,
struct scan_control *sc,
- int priority, int file)
+ enum lru_list lru)
{
unsigned long nr_taken;
unsigned long nr_scanned;
@@ -1688,15 +1373,14 @@ static void shrink_active_list(unsigned long nr_to_scan,
LIST_HEAD(l_active);
LIST_HEAD(l_inactive);
struct page *page;
- struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
+ struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
unsigned long nr_rotated = 0;
- isolate_mode_t isolate_mode = ISOLATE_ACTIVE;
- struct zone *zone = mz->zone;
+ isolate_mode_t isolate_mode = 0;
+ int file = is_file_lru(lru);
+ struct zone *zone = lruvec_zone(lruvec);
lru_add_drain();
- reset_reclaim_mode(sc);
-
if (!sc->may_unmap)
isolate_mode |= ISOLATE_UNMAPPED;
if (!sc->may_writepage)
@@ -1704,18 +1388,15 @@ static void shrink_active_list(unsigned long nr_to_scan,
spin_lock_irq(&zone->lru_lock);
- nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc,
- isolate_mode, 1, file);
+ nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
+ &nr_scanned, sc, isolate_mode, lru);
if (global_reclaim(sc))
zone->pages_scanned += nr_scanned;
reclaim_stat->recent_scanned[file] += nr_taken;
__count_zone_vm_events(PGREFILL, zone, nr_scanned);
- if (file)
- __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
- else
- __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
+ __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
spin_unlock_irq(&zone->lru_lock);
@@ -1737,7 +1418,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
}
}
- if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) {
+ if (page_referenced(page, 0, sc->target_mem_cgroup,
+ &vm_flags)) {
nr_rotated += hpage_nr_pages(page);
/*
* Identify referenced, file-backed active pages and
@@ -1770,10 +1452,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
*/
reclaim_stat->recent_rotated[file] += nr_rotated;
- move_active_pages_to_lru(zone, &l_active, &l_hold,
- LRU_ACTIVE + file * LRU_FILE);
- move_active_pages_to_lru(zone, &l_inactive, &l_hold,
- LRU_BASE + file * LRU_FILE);
+ move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
+ move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
spin_unlock_irq(&zone->lru_lock);
@@ -1796,13 +1476,12 @@ static int inactive_anon_is_low_global(struct zone *zone)
/**
* inactive_anon_is_low - check if anonymous pages need to be deactivated
- * @zone: zone to check
- * @sc: scan control of this context
+ * @lruvec: LRU vector to check
*
* Returns true if the zone does not have enough inactive anon pages,
* meaning some active anon pages need to be deactivated.
*/
-static int inactive_anon_is_low(struct mem_cgroup_zone *mz)
+static int inactive_anon_is_low(struct lruvec *lruvec)
{
/*
* If we don't have swap space, anonymous page deactivation
@@ -1811,14 +1490,13 @@ static int inactive_anon_is_low(struct mem_cgroup_zone *mz)
if (!total_swap_pages)
return 0;
- if (!scanning_global_lru(mz))
- return mem_cgroup_inactive_anon_is_low(mz->mem_cgroup,
- mz->zone);
+ if (!mem_cgroup_disabled())
+ return mem_cgroup_inactive_anon_is_low(lruvec);
- return inactive_anon_is_low_global(mz->zone);
+ return inactive_anon_is_low_global(lruvec_zone(lruvec));
}
#else
-static inline int inactive_anon_is_low(struct mem_cgroup_zone *mz)
+static inline int inactive_anon_is_low(struct lruvec *lruvec)
{
return 0;
}
@@ -1836,7 +1514,7 @@ static int inactive_file_is_low_global(struct zone *zone)
/**
* inactive_file_is_low - check if file pages need to be deactivated
- * @mz: memory cgroup and zone to check
+ * @lruvec: LRU vector to check
*
* When the system is doing streaming IO, memory pressure here
* ensures that active file pages get deactivated, until more
@@ -1848,44 +1526,39 @@ static int inactive_file_is_low_global(struct zone *zone)
* This uses a different ratio than the anonymous pages, because
* the page cache uses a use-once replacement algorithm.
*/
-static int inactive_file_is_low(struct mem_cgroup_zone *mz)
+static int inactive_file_is_low(struct lruvec *lruvec)
{
- if (!scanning_global_lru(mz))
- return mem_cgroup_inactive_file_is_low(mz->mem_cgroup,
- mz->zone);
+ if (!mem_cgroup_disabled())
+ return mem_cgroup_inactive_file_is_low(lruvec);
- return inactive_file_is_low_global(mz->zone);
+ return inactive_file_is_low_global(lruvec_zone(lruvec));
}
-static int inactive_list_is_low(struct mem_cgroup_zone *mz, int file)
+static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
{
- if (file)
- return inactive_file_is_low(mz);
+ if (is_file_lru(lru))
+ return inactive_file_is_low(lruvec);
else
- return inactive_anon_is_low(mz);
+ return inactive_anon_is_low(lruvec);
}
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
- struct mem_cgroup_zone *mz,
- struct scan_control *sc, int priority)
+ struct lruvec *lruvec, struct scan_control *sc)
{
- int file = is_file_lru(lru);
-
if (is_active_lru(lru)) {
- if (inactive_list_is_low(mz, file))
- shrink_active_list(nr_to_scan, mz, sc, priority, file);
+ if (inactive_list_is_low(lruvec, lru))
+ shrink_active_list(nr_to_scan, lruvec, sc, lru);
return 0;
}
- return shrink_inactive_list(nr_to_scan, mz, sc, priority, file);
+ return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
}
-static int vmscan_swappiness(struct mem_cgroup_zone *mz,
- struct scan_control *sc)
+static int vmscan_swappiness(struct scan_control *sc)
{
if (global_reclaim(sc))
return vm_swappiness;
- return mem_cgroup_swappiness(mz->mem_cgroup);
+ return mem_cgroup_swappiness(sc->target_mem_cgroup);
}
/*
@@ -1896,17 +1569,18 @@ static int vmscan_swappiness(struct mem_cgroup_zone *mz,
*
* nr[0] = anon pages to scan; nr[1] = file pages to scan
*/
-static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
- unsigned long *nr, int priority)
+static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
+ unsigned long *nr)
{
unsigned long anon, file, free;
unsigned long anon_prio, file_prio;
unsigned long ap, fp;
- struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
+ struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
u64 fraction[2], denominator;
enum lru_list lru;
int noswap = 0;
bool force_scan = false;
+ struct zone *zone = lruvec_zone(lruvec);
/*
* If the zone or memcg is small, nr[l] can be 0. This
@@ -1918,7 +1592,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
* latencies, so it's better to scan a minimum amount there as
* well.
*/
- if (current_is_kswapd() && mz->zone->all_unreclaimable)
+ if (current_is_kswapd() && zone->all_unreclaimable)
force_scan = true;
if (!global_reclaim(sc))
force_scan = true;
@@ -1932,16 +1606,16 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
goto out;
}
- anon = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) +
- zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
- file = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) +
- zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
+ anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
+ get_lru_size(lruvec, LRU_INACTIVE_ANON);
+ file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
+ get_lru_size(lruvec, LRU_INACTIVE_FILE);
if (global_reclaim(sc)) {
- free = zone_page_state(mz->zone, NR_FREE_PAGES);
+ free = zone_page_state(zone, NR_FREE_PAGES);
/* If we have very few page cache pages,
force-scan anon pages. */
- if (unlikely(file + free <= high_wmark_pages(mz->zone))) {
+ if (unlikely(file + free <= high_wmark_pages(zone))) {
fraction[0] = 1;
fraction[1] = 0;
denominator = 1;
@@ -1953,8 +1627,8 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
* With swappiness at 100, anonymous and file have the same priority.
* This scanning priority is essentially the inverse of IO cost.
*/
- anon_prio = vmscan_swappiness(mz, sc);
- file_prio = 200 - vmscan_swappiness(mz, sc);
+ anon_prio = vmscan_swappiness(sc);
+ file_prio = 200 - anon_prio;
/*
* OK, so we have swap space and a fair amount of page cache
@@ -1967,7 +1641,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
*
* anon in [0], file in [1]
*/
- spin_lock_irq(&mz->zone->lru_lock);
+ spin_lock_irq(&zone->lru_lock);
if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
reclaim_stat->recent_scanned[0] /= 2;
reclaim_stat->recent_rotated[0] /= 2;
@@ -1983,12 +1657,12 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
* proportional to the fraction of recently scanned pages on
* each list that were recently referenced and in active use.
*/
- ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
+ ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
ap /= reclaim_stat->recent_rotated[0] + 1;
- fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
+ fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
fp /= reclaim_stat->recent_rotated[1] + 1;
- spin_unlock_irq(&mz->zone->lru_lock);
+ spin_unlock_irq(&zone->lru_lock);
fraction[0] = ap;
fraction[1] = fp;
@@ -1998,9 +1672,9 @@ out:
int file = is_file_lru(lru);
unsigned long scan;
- scan = zone_nr_lru_pages(mz, lru);
- if (priority || noswap) {
- scan >>= priority;
+ scan = get_lru_size(lruvec, lru);
+ if (sc->priority || noswap || !vmscan_swappiness(sc)) {
+ scan >>= sc->priority;
if (!scan && force_scan)
scan = SWAP_CLUSTER_MAX;
scan = div64_u64(scan * fraction[file], denominator);
@@ -2009,14 +1683,25 @@ out:
}
}
+/* Use reclaim/compaction for costly allocs or under memory pressure */
+static bool in_reclaim_compaction(struct scan_control *sc)
+{
+ if (COMPACTION_BUILD && sc->order &&
+ (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
+ sc->priority < DEF_PRIORITY - 2))
+ return true;
+
+ return false;
+}
+
/*
- * Reclaim/compaction depends on a number of pages being freed. To avoid
- * disruption to the system, a small number of order-0 pages continue to be
- * rotated and reclaimed in the normal fashion. However, by the time we get
- * back to the allocator and call try_to_compact_zone(), we ensure that
- * there are enough free pages for it to be likely successful
+ * Reclaim/compaction is used for high-order allocation requests. It reclaims
+ * order-0 pages before compacting the zone. should_continue_reclaim() returns
+ * true if more pages should be reclaimed such that when the page allocator
+ * calls try_to_compact_zone() that it will have enough free pages to succeed.
+ * It will give up earlier than that if there is difficulty reclaiming pages.
*/
-static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
+static inline bool should_continue_reclaim(struct lruvec *lruvec,
unsigned long nr_reclaimed,
unsigned long nr_scanned,
struct scan_control *sc)
@@ -2025,7 +1710,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
unsigned long inactive_lru_pages;
/* If not in reclaim/compaction mode, stop */
- if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
+ if (!in_reclaim_compaction(sc))
return false;
/* Consider stopping depending on scan and reclaim activity */
@@ -2056,15 +1741,15 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
* inactive lists are large enough, continue reclaiming
*/
pages_for_compaction = (2UL << sc->order);
- inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
+ inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
if (nr_swap_pages > 0)
- inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
+ inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
if (sc->nr_reclaimed < pages_for_compaction &&
inactive_lru_pages > pages_for_compaction)
return true;
/* If compaction would go ahead or the allocation would succeed, stop */
- switch (compaction_suitable(mz->zone, sc->order)) {
+ switch (compaction_suitable(lruvec_zone(lruvec), sc->order)) {
case COMPACT_PARTIAL:
case COMPACT_CONTINUE:
return false;
@@ -2076,8 +1761,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
/*
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
*/
-static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
- struct scan_control *sc)
+static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
unsigned long nr[NR_LRU_LISTS];
unsigned long nr_to_scan;
@@ -2089,7 +1773,7 @@ static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
restart:
nr_reclaimed = 0;
nr_scanned = sc->nr_scanned;
- get_scan_count(mz, sc, nr, priority);
+ get_scan_count(lruvec, sc, nr);
blk_start_plug(&plug);
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -2101,7 +1785,7 @@ restart:
nr[lru] -= nr_to_scan;
nr_reclaimed += shrink_list(lru, nr_to_scan,
- mz, sc, priority);
+ lruvec, sc);
}
}
/*
@@ -2112,7 +1796,8 @@ restart:
* with multiple processes reclaiming pages, the total
* freeing target can get unreasonably large.
*/
- if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
+ if (nr_reclaimed >= nr_to_reclaim &&
+ sc->priority < DEF_PRIORITY)
break;
}
blk_finish_plug(&plug);
@@ -2122,35 +1807,33 @@ restart:
* Even if we did not try to evict anon pages at all, we want to
* rebalance the anon lru active/inactive ratio.
*/
- if (inactive_anon_is_low(mz))
- shrink_active_list(SWAP_CLUSTER_MAX, mz, sc, priority, 0);
+ if (inactive_anon_is_low(lruvec))
+ shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
+ sc, LRU_ACTIVE_ANON);
/* reclaim/compaction might need reclaim to continue */
- if (should_continue_reclaim(mz, nr_reclaimed,
- sc->nr_scanned - nr_scanned, sc))
+ if (should_continue_reclaim(lruvec, nr_reclaimed,
+ sc->nr_scanned - nr_scanned, sc))
goto restart;
throttle_vm_writeout(sc->gfp_mask);
}
-static void shrink_zone(int priority, struct zone *zone,
- struct scan_control *sc)
+static void shrink_zone(struct zone *zone, struct scan_control *sc)
{
struct mem_cgroup *root = sc->target_mem_cgroup;
struct mem_cgroup_reclaim_cookie reclaim = {
.zone = zone,
- .priority = priority,
+ .priority = sc->priority,
};
struct mem_cgroup *memcg;
memcg = mem_cgroup_iter(root, NULL, &reclaim);
do {
- struct mem_cgroup_zone mz = {
- .mem_cgroup = memcg,
- .zone = zone,
- };
+ struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
+
+ shrink_lruvec(lruvec, sc);
- shrink_mem_cgroup_zone(priority, &mz, sc);
/*
* Limit reclaim has historically picked one memcg and
* scanned it with decreasing priority levels until
@@ -2226,8 +1909,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
* the caller that it should consider retrying the allocation instead of
* further reclaim.
*/
-static bool shrink_zones(int priority, struct zonelist *zonelist,
- struct scan_control *sc)
+static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
@@ -2254,7 +1936,8 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
if (global_reclaim(sc)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc->priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
if (COMPACTION_BUILD) {
/*
@@ -2286,7 +1969,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
/* need some check for avoid more shrink_zone() */
}
- shrink_zone(priority, zone, sc);
+ shrink_zone(zone, sc);
}
return aborted_reclaim;
@@ -2337,7 +2020,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct scan_control *sc,
struct shrink_control *shrink)
{
- int priority;
unsigned long total_scanned = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
struct zoneref *z;
@@ -2350,11 +2032,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
if (global_reclaim(sc))
count_vm_event(ALLOCSTALL);
- for (priority = DEF_PRIORITY; priority >= 0; priority--) {
+ do {
sc->nr_scanned = 0;
- if (!priority)
- disable_swap_token(sc->target_mem_cgroup);
- aborted_reclaim = shrink_zones(priority, zonelist, sc);
+ aborted_reclaim = shrink_zones(zonelist, sc);
/*
* Don't shrink slabs when reclaiming memory from
@@ -2396,7 +2076,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
/* Take a nap, wait for some writeback to complete */
if (!sc->hibernation_mode && sc->nr_scanned &&
- priority < DEF_PRIORITY - 2) {
+ sc->priority < DEF_PRIORITY - 2) {
struct zone *preferred_zone;
first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
@@ -2404,7 +2084,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
&preferred_zone);
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
}
- }
+ } while (--sc->priority >= 0);
out:
delayacct_freepages_end();
@@ -2442,6 +2122,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.may_unmap = 1,
.may_swap = 1,
.order = order,
+ .priority = DEF_PRIORITY,
.target_mem_cgroup = NULL,
.nodemask = nodemask,
};
@@ -2474,17 +2155,15 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
.may_unmap = 1,
.may_swap = !noswap,
.order = 0,
+ .priority = 0,
.target_mem_cgroup = memcg,
};
- struct mem_cgroup_zone mz = {
- .mem_cgroup = memcg,
- .zone = zone,
- };
+ struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
- trace_mm_vmscan_memcg_softlimit_reclaim_begin(0,
+ trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
sc.may_writepage,
sc.gfp_mask);
@@ -2495,7 +2174,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
* will pick up pages from other mem cgroup's as well. We hack
* the priority and make it zero.
*/
- shrink_mem_cgroup_zone(0, &mz, &sc);
+ shrink_lruvec(lruvec, &sc);
trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
@@ -2516,6 +2195,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
.may_swap = !noswap,
.nr_to_reclaim = SWAP_CLUSTER_MAX,
.order = 0,
+ .priority = DEF_PRIORITY,
.target_mem_cgroup = memcg,
.nodemask = NULL, /* we don't care the placement */
.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
@@ -2546,8 +2226,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
}
#endif
-static void age_active_anon(struct zone *zone, struct scan_control *sc,
- int priority)
+static void age_active_anon(struct zone *zone, struct scan_control *sc)
{
struct mem_cgroup *memcg;
@@ -2556,14 +2235,11 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc,
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
- struct mem_cgroup_zone mz = {
- .mem_cgroup = memcg,
- .zone = zone,
- };
+ struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
- if (inactive_anon_is_low(&mz))
- shrink_active_list(SWAP_CLUSTER_MAX, &mz,
- sc, priority, 0);
+ if (inactive_anon_is_low(lruvec))
+ shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
+ sc, LRU_ACTIVE_ANON);
memcg = mem_cgroup_iter(NULL, memcg, NULL);
} while (memcg);
@@ -2672,7 +2348,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
{
int all_zones_ok;
unsigned long balanced;
- int priority;
int i;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long total_scanned;
@@ -2696,18 +2371,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
};
loop_again:
total_scanned = 0;
+ sc.priority = DEF_PRIORITY;
sc.nr_reclaimed = 0;
sc.may_writepage = !laptop_mode;
count_vm_event(PAGEOUTRUN);
- for (priority = DEF_PRIORITY; priority >= 0; priority--) {
+ do {
unsigned long lru_pages = 0;
int has_under_min_watermark_zone = 0;
- /* The swap token gets in the way of swapout... */
- if (!priority)
- disable_swap_token(NULL);
-
all_zones_ok = 1;
balanced = 0;
@@ -2721,14 +2393,15 @@ loop_again:
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc.priority != DEF_PRIORITY)
continue;
/*
* Do some background aging of the anon list, to give
* pages a chance to be referenced before reclaiming.
*/
- age_active_anon(zone, &sc, priority);
+ age_active_anon(zone, &sc);
/*
* If the number of buffer_heads in the machine
@@ -2776,7 +2449,8 @@ loop_again:
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc.priority != DEF_PRIORITY)
continue;
sc.nr_scanned = 0;
@@ -2820,7 +2494,7 @@ loop_again:
!zone_watermark_ok_safe(zone, testorder,
high_wmark_pages(zone) + balance_gap,
end_zone, 0)) {
- shrink_zone(priority, zone, &sc);
+ shrink_zone(zone, &sc);
reclaim_state->reclaimed_slab = 0;
nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
@@ -2877,7 +2551,7 @@ loop_again:
* OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones.
*/
- if (total_scanned && (priority < DEF_PRIORITY - 2)) {
+ if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
if (has_under_min_watermark_zone)
count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
else
@@ -2892,7 +2566,7 @@ loop_again:
*/
if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
break;
- }
+ } while (--sc.priority >= 0);
out:
/*
@@ -2942,7 +2616,8 @@ out:
if (!populated_zone(zone))
continue;
- if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+ if (zone->all_unreclaimable &&
+ sc.priority != DEF_PRIORITY)
continue;
/* Would compaction fail due to lack of free memory? */
@@ -3209,6 +2884,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
.nr_to_reclaim = nr_to_reclaim,
.hibernation_mode = 1,
.order = 0,
+ .priority = DEF_PRIORITY,
};
struct shrink_control shrink = {
.gfp_mask = sc.gfp_mask,
@@ -3386,7 +3062,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
const unsigned long nr_pages = 1 << order;
struct task_struct *p = current;
struct reclaim_state reclaim_state;
- int priority;
struct scan_control sc = {
.may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
@@ -3395,6 +3070,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
.order = order,
+ .priority = ZONE_RECLAIM_PRIORITY,
};
struct shrink_control shrink = {
.gfp_mask = sc.gfp_mask,
@@ -3417,11 +3093,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
* Free memory by calling shrink zone with increasing
* priorities until we have enough memory freed.
*/
- priority = ZONE_RECLAIM_PRIORITY;
do {
- shrink_zone(priority, zone, &sc);
- priority--;
- } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
+ shrink_zone(zone, &sc);
+ } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
}
nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
@@ -3536,7 +3210,7 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
if (mapping_unevictable(page_mapping(page)))
return 0;
- if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
+ if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page)))
return 0;
return 1;
@@ -3572,6 +3246,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
zone = pagezone;
spin_lock_irq(&zone->lru_lock);
}
+ lruvec = mem_cgroup_page_lruvec(page, zone);
if (!PageLRU(page) || !PageUnevictable(page))
continue;
@@ -3581,11 +3256,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
VM_BUG_ON(PageActive(page));
ClearPageUnevictable(page);
- __dec_zone_state(zone, NR_UNEVICTABLE);
- lruvec = mem_cgroup_lru_move_lists(zone, page,
- LRU_UNEVICTABLE, lru);
- list_move(&page->lru, &lruvec->lists[lru]);
- __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
+ del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
+ add_page_to_lru_list(page, lruvec, lru);
pgrescued++;
}
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7db1b9bab49..1bbbbd9776a 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -613,6 +613,9 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
"Reclaimable",
"Movable",
"Reserve",
+#ifdef CONFIG_CMA
+ "CMA",
+#endif
"Isolate",
};
@@ -1220,7 +1223,6 @@ module_init(setup_vmstat)
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
#include <linux/debugfs.h>
-static struct dentry *extfrag_debug_root;
/*
* Return an index indicating how much of the available free memory is
@@ -1358,19 +1360,24 @@ static const struct file_operations extfrag_file_ops = {
static int __init extfrag_debug_init(void)
{
+ struct dentry *extfrag_debug_root;
+
extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
if (!extfrag_debug_root)
return -ENOMEM;
if (!debugfs_create_file("unusable_index", 0444,
extfrag_debug_root, NULL, &unusable_file_ops))
- return -ENOMEM;
+ goto fail;
if (!debugfs_create_file("extfrag_index", 0444,
extfrag_debug_root, NULL, &extfrag_file_ops))
- return -ENOMEM;
+ goto fail;
return 0;
+fail:
+ debugfs_remove_recursive(extfrag_debug_root);
+ return -ENOMEM;
}
module_init(extfrag_debug_init);
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 151703791bb..b6f3583ddfe 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -74,9 +74,6 @@ void tcp_destroy_cgroup(struct mem_cgroup *memcg)
percpu_counter_destroy(&tcp->tcp_sockets_allocated);
val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
-
- if (val != RESOURCE_MAX)
- static_key_slow_dec(&memcg_socket_limit_enabled);
}
EXPORT_SYMBOL(tcp_destroy_cgroup);
@@ -107,10 +104,33 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
net->ipv4.sysctl_tcp_mem[i]);
- if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX)
- static_key_slow_dec(&memcg_socket_limit_enabled);
- else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX)
- static_key_slow_inc(&memcg_socket_limit_enabled);
+ if (val == RESOURCE_MAX)
+ clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+ else if (val != RESOURCE_MAX) {
+ /*
+ * The active bit needs to be written after the static_key
+ * update. This is what guarantees that the socket activation
+ * function is the last one to run. See sock_update_memcg() for
+ * details, and note that we don't mark any socket as belonging
+ * to this memcg until that flag is up.
+ *
+ * We need to do this, because static_keys will span multiple
+ * sites, but we can't control their order. If we mark a socket
+ * as accounted, but the accounting functions are not patched in
+ * yet, we'll lose accounting.
+ *
+ * We never race with the readers in sock_update_memcg(),
+ * because when this value change, the code to process it is not
+ * patched in yet.
+ *
+ * The activated bit is used to guarantee that no two writers
+ * will do the update in the same memcg. Without that, we can't
+ * properly shutdown the static key.
+ */
+ if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
+ static_key_slow_inc(&memcg_socket_limit_enabled);
+ set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
+ }
return 0;
}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 7fee13b331d..f56f045778a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1286,6 +1286,8 @@ call_reserveresult(struct rpc_task *task)
}
switch (status) {
+ case -ENOMEM:
+ rpc_delay(task, HZ >> 2);
case -EAGAIN: /* woken up; retry */
task->tk_action = call_reserve;
return;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index fd2423991c2..04040476082 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -120,7 +120,7 @@ EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall);
/**
* rpc_queue_upcall - queue an upcall message to userspace
- * @inode: inode of upcall pipe on which to queue given message
+ * @pipe: upcall pipe on which to queue given message
* @msg: message to queue
*
* Call with an @inode created by rpc_mkpipe() to queue an upcall.
@@ -819,9 +819,7 @@ static int rpc_rmdir_depopulate(struct dentry *dentry,
* @parent: dentry of directory to create new "pipe" in
* @name: name of pipe
* @private: private data to associate with the pipe, for the caller's use
- * @ops: operations defining the behavior of the pipe: upcall, downcall,
- * release_pipe, open_pipe, and destroy_msg.
- * @flags: rpc_pipe flags
+ * @pipe: &rpc_pipe containing input parameters
*
* Data is made available for userspace to read by calls to
* rpc_queue_upcall(). The actual reads will result in calls to
@@ -943,7 +941,7 @@ struct dentry *rpc_create_client_dir(struct dentry *dentry,
/**
* rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir()
- * @clnt: rpc client
+ * @dentry: dentry for the pipe
*/
int rpc_remove_client_dir(struct dentry *dentry)
{
@@ -1115,7 +1113,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &s_ops;
sb->s_time_gran = 1;
- inode = rpc_get_inode(sb, S_IFDIR | 0755);
+ inode = rpc_get_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO);
sb->s_root = root = d_make_root(inode);
if (!root)
return -ENOMEM;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 78ac39fd9fe..3c0653439f3 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -394,6 +394,7 @@ static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
/**
* rpcb_register - set or unset a port registration with the local rpcbind svc
+ * @net: target network namespace
* @prog: RPC program number to bind
* @vers: RPC version number to bind
* @prot: transport protocol to register
@@ -521,6 +522,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
/**
* rpcb_v4_register - set or unset a port registration with the local rpcbind
+ * @net: target network namespace
* @program: RPC program number of service to (un)register
* @version: RPC version number of service to (un)register
* @address: address family, IP address, and port to (un)register
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 6fe2dcead15..3c83035cdaa 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -979,20 +979,21 @@ static void xprt_alloc_slot(struct rpc_task *task)
list_del(&req->rq_list);
goto out_init_req;
}
- req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT);
+ req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
if (!IS_ERR(req))
goto out_init_req;
switch (PTR_ERR(req)) {
case -ENOMEM:
- rpc_delay(task, HZ >> 2);
dprintk("RPC: dynamic allocation of request slot "
"failed! Retrying\n");
+ task->tk_status = -ENOMEM;
break;
case -EAGAIN:
rpc_sleep_on(&xprt->backlog, task, NULL);
dprintk("RPC: waiting for request slot\n");
+ default:
+ task->tk_status = -EAGAIN;
}
- task->tk_status = -EAGAIN;
return;
out_init_req:
task->tk_status = 0;
diff --git a/scripts/coccinelle/misc/ifaddr.cocci b/scripts/coccinelle/misc/ifaddr.cocci
new file mode 100644
index 00000000000..3e4089a7700
--- /dev/null
+++ b/scripts/coccinelle/misc/ifaddr.cocci
@@ -0,0 +1,35 @@
+/// the address of a variable or field is non-zero is likely always to bo
+/// non-zero
+///
+// Confidence: High
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual org
+virtual report
+virtual context
+
+@r@
+expression x;
+statement S1,S2;
+position p;
+@@
+
+*if@p (&x)
+ S1 else S2
+
+@script:python depends on org@
+p << r.p;
+@@
+
+cocci.print_main("test of a variable/field address",p)
+
+@script:python depends on report@
+p << r.p;
+@@
+
+msg = "ERROR: test of a variable/field address"
+coccilib.report.print_report(p[0],msg)
diff --git a/scripts/coccinelle/misc/noderef.cocci b/scripts/coccinelle/misc/noderef.cocci
new file mode 100644
index 00000000000..c1707214e60
--- /dev/null
+++ b/scripts/coccinelle/misc/noderef.cocci
@@ -0,0 +1,65 @@
+/// sizeof when applied to a pointer typed expression gives the size of
+/// the pointer
+///
+// Confidence: High
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual org
+virtual report
+virtual context
+virtual patch
+
+@depends on patch@
+expression *x;
+expression f;
+type T;
+@@
+
+(
+x = <+... sizeof(
+- x
++ *x
+ ) ...+>
+|
+f(...,(T)(x),...,sizeof(
+- x
++ *x
+ ),...)
+|
+f(...,sizeof(x),...,(T)(
+- x
++ *x
+ ),...)
+)
+
+@r depends on !patch@
+expression *x;
+expression f;
+position p;
+type T;
+@@
+
+(
+*x = <+... sizeof@p(x) ...+>
+|
+*f(...,(T)(x),...,sizeof@p(x),...)
+|
+*f(...,sizeof@p(x),...,(T)(x),...)
+)
+
+@script:python depends on org@
+p << r.p;
+@@
+
+cocci.print_main("application of sizeof to pointer",p)
+
+@script:python depends on report@
+p << r.p;
+@@
+
+msg = "ERROR: application of sizeof to pointer"
+coccilib.report.print_report(p[0],msg)
diff --git a/scripts/config b/scripts/config
index a7c7c4b8e95..ed6653ef970 100755
--- a/scripts/config
+++ b/scripts/config
@@ -107,7 +107,8 @@ while [ "$1" != "" ] ; do
;;
--set-str)
- set_var "CONFIG_$ARG" "CONFIG_$ARG=\"$1\""
+ # sed swallows one level of escaping, so we need double-escaping
+ set_var "CONFIG_$ARG" "CONFIG_$ARG=\"${1//\"/\\\\\"}\""
shift
;;
@@ -124,9 +125,11 @@ while [ "$1" != "" ] ; do
if [ $? != 0 ] ; then
echo undef
else
- V="${V/CONFIG_$ARG=/}"
- V="${V/\"/}"
- echo "$V"
+ V="${V/#CONFIG_$ARG=/}"
+ V="${V/#\"/}"
+ V="${V/%\"/}"
+ V="${V/\\\"/\"}"
+ echo "${V}"
fi
fi
;;
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index f208f900ed3..0dc4a2c779b 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -574,8 +574,15 @@ int main(int ac, char **av)
case alldefconfig:
case randconfig:
name = getenv("KCONFIG_ALLCONFIG");
- if (name && !stat(name, &tmpstat)) {
- conf_read_simple(name, S_DEF_USER);
+ if (!name)
+ break;
+ if ((strcmp(name, "") != 0) && (strcmp(name, "1") != 0)) {
+ if (conf_read_simple(name, S_DEF_USER)) {
+ fprintf(stderr,
+ _("*** Can't read seed configuration \"%s\"!\n"),
+ name);
+ exit(1);
+ }
break;
}
switch (input_mode) {
@@ -586,10 +593,13 @@ int main(int ac, char **av)
case randconfig: name = "allrandom.config"; break;
default: break;
}
- if (!stat(name, &tmpstat))
- conf_read_simple(name, S_DEF_USER);
- else if (!stat("all.config", &tmpstat))
- conf_read_simple("all.config", S_DEF_USER);
+ if (conf_read_simple(name, S_DEF_USER) &&
+ conf_read_simple("all.config", S_DEF_USER)) {
+ fprintf(stderr,
+ _("*** KCONFIG_ALLCONFIG set, but no \"%s\" or \"all.config\" file found\n"),
+ name);
+ exit(1);
+ }
break;
default:
break;
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
new file mode 100644
index 00000000000..cd9c6c6bb4c
--- /dev/null
+++ b/scripts/link-vmlinux.sh
@@ -0,0 +1,221 @@
+#!/bin/sh
+#
+# link vmlinux
+#
+# vmlinux is linked from the objects selected by $(KBUILD_VMLINUX_INIT) and
+# $(KBUILD_VMLINUX_MAIN). Most are built-in.o files from top-level directories
+# in the kernel tree, others are specified in arch/$(ARCH)/Makefile.
+# Ordering when linking is important, and $(KBUILD_VMLINUX_INIT) must be first.
+#
+# vmlinux
+# ^
+# |
+# +-< $(KBUILD_VMLINUX_INIT)
+# | +--< init/version.o + more
+# |
+# +--< $(KBUILD_VMLINUX_MAIN)
+# | +--< drivers/built-in.o mm/built-in.o + more
+# |
+# +-< ${kallsymso} (see description in KALLSYMS section)
+#
+# vmlinux version (uname -v) cannot be updated during normal
+# descending-into-subdirs phase since we do not yet know if we need to
+# update vmlinux.
+# Therefore this step is delayed until just before final link of vmlinux.
+#
+# System.map is generated to document addresses of all kernel symbols
+
+# Error out on error
+set -e
+
+# Nice output in kbuild format
+# Will be supressed by "make -s"
+info()
+{
+ if [ "${quiet}" != "silent_" ]; then
+ printf " %-7s %s\n" ${1} ${2}
+ fi
+}
+
+# Link of vmlinux.o used for section mismatch analysis
+# ${1} output file
+modpost_link()
+{
+ ${LD} ${LDFLAGS} -r -o ${1} ${KBUILD_VMLINUX_INIT} \
+ --start-group ${KBUILD_VMLINUX_MAIN} --end-group
+}
+
+# Link of vmlinux
+# ${1} - optional extra .o files
+# ${2} - output file
+vmlinux_link()
+{
+ local lds="${objtree}/${KBUILD_LDS}"
+
+ if [ "${SRCARCH}" != "um" ]; then
+ ${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2} \
+ -T ${lds} ${KBUILD_VMLINUX_INIT} \
+ --start-group ${KBUILD_VMLINUX_MAIN} --end-group ${1}
+ else
+ ${CC} ${CFLAGS_vmlinux} -o ${2} \
+ -Wl,-T,${lds} ${KBUILD_VMLINUX_INIT} \
+ -Wl,--start-group \
+ ${KBUILD_VMLINUX_MAIN} \
+ -Wl,--end-group \
+ -lutil ${1}
+ rm -f linux
+ fi
+}
+
+
+# Create ${2} .o file with all symbols from the ${1} object file
+kallsyms()
+{
+ info KSYM ${2}
+ local kallsymopt;
+
+ if [ -n "${CONFIG_KALLSYMS_ALL}" ]; then
+ kallsymopt=--all-symbols
+ fi
+
+ local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
+ ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
+
+ ${NM} -n ${1} | \
+ scripts/kallsyms ${kallsymopt} | \
+ ${CC} ${aflags} -c -o ${2} -x assembler-with-cpp -
+}
+
+# Create map file with all symbols from ${1}
+# See mksymap for additional details
+mksysmap()
+{
+ ${CONFIG_SHELL} "${srctree}/scripts/mksysmap" ${1} ${2}
+}
+
+sortextable()
+{
+ ${objtree}/scripts/sortextable ${1}
+}
+
+# Delete output files in case of error
+trap cleanup SIGHUP SIGINT SIGQUIT SIGTERM ERR
+cleanup()
+{
+ rm -f .old_version
+ rm -f .tmp_System.map
+ rm -f .tmp_kallsyms*
+ rm -f .tmp_version
+ rm -f .tmp_vmlinux*
+ rm -f System.map
+ rm -f vmlinux
+ rm -f vmlinux.o
+}
+
+#
+#
+# Use "make V=1" to debug this script
+case "${KBUILD_VERBOSE}" in
+*1*)
+ set -x
+ ;;
+esac
+
+if [ "$1" = "clean" ]; then
+ cleanup
+ exit 0
+fi
+
+# We need access to CONFIG_ symbols
+. ./.config
+
+#link vmlinux.o
+info LD vmlinux.o
+modpost_link vmlinux.o
+
+# modpost vmlinux.o to check for section mismatches
+${MAKE} -f "${srctree}/scripts/Makefile.modpost" vmlinux.o
+
+# Update version
+info GEN .version
+if [ ! -r .version ]; then
+ rm -f .version;
+ echo 1 >.version;
+else
+ mv .version .old_version;
+ expr 0$(cat .old_version) + 1 >.version;
+fi;
+
+# final build of init/
+${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
+
+kallsymso=""
+kallsyms_vmlinux=""
+if [ -n "${CONFIG_KALLSYMS}" ]; then
+
+ # kallsyms support
+ # Generate section listing all symbols and add it into vmlinux
+ # It's a three step process:
+ # 1) Link .tmp_vmlinux1 so it has all symbols and sections,
+ # but __kallsyms is empty.
+ # Running kallsyms on that gives us .tmp_kallsyms1.o with
+ # the right size
+ # 2) Link .tmp_vmlinux2 so it now has a __kallsyms section of
+ # the right size, but due to the added section, some
+ # addresses have shifted.
+ # From here, we generate a correct .tmp_kallsyms2.o
+ # 2a) We may use an extra pass as this has been necessary to
+ # woraround some alignment related bugs.
+ # KALLSYMS_EXTRA_PASS=1 is used to trigger this.
+ # 3) The correct ${kallsymso} is linked into the final vmlinux.
+ #
+ # a) Verify that the System.map from vmlinux matches the map from
+ # ${kallsymso}.
+
+ kallsymso=.tmp_kallsyms2.o
+ kallsyms_vmlinux=.tmp_vmlinux2
+
+ # step 1
+ vmlinux_link "" .tmp_vmlinux1
+ kallsyms .tmp_vmlinux1 .tmp_kallsyms1.o
+
+ # step 2
+ vmlinux_link .tmp_kallsyms1.o .tmp_vmlinux2
+ kallsyms .tmp_vmlinux2 .tmp_kallsyms2.o
+
+ # step 2a
+ if [ -n "${KALLSYMS_EXTRA_PASS}" ]; then
+ kallsymso=.tmp_kallsyms3.o
+ kallsyms_vmlinux=.tmp_vmlinux3
+
+ vmlinux_link .tmp_kallsyms2.o .tmp_vmlinux3
+
+ kallsyms .tmp_vmlinux3 .tmp_kallsyms3.o
+ fi
+fi
+
+info LD vmlinux
+vmlinux_link "${kallsymso}" vmlinux
+
+if [ -n "${CONFIG_BUILDTIME_EXTABLE_SORT}" ]; then
+ info SORTEX vmlinux
+ sortextable vmlinux
+fi
+
+info SYSMAP System.map
+mksysmap vmlinux System.map
+
+# step a (see comment above)
+if [ -n "${CONFIG_KALLSYMS}" ]; then
+ mksysmap ${kallsyms_vmlinux} .tmp_System.map
+
+ if ! cmp -s System.map .tmp_System.map; then
+ echo Inconsistent kallsyms data
+ echo echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround
+ cleanup
+ exit 1
+ fi
+fi
+
+# We made a new kernel - delete old version file
+rm -f .old_version
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index eee5f8ed249..c95fdda5841 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -245,7 +245,7 @@ fi
# Build header package
(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
-(cd $objtree; find .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
+(cd $objtree; find arch/$SRCARCH/include .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
destdir=$kernel_headers_dir/usr/src/linux-headers-$version
mkdir -p "$destdir"
(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index faedb1481b2..8f312fa6c28 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -313,9 +313,22 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
snd_pcm_sframes_t hdelta, delta;
unsigned long jdelta;
+ unsigned long curr_jiffies;
+ struct timespec curr_tstamp;
old_hw_ptr = runtime->status->hw_ptr;
+
+ /*
+ * group pointer, time and jiffies reads to allow for more
+ * accurate correlations/corrections.
+ * The values are stored at the end of this routine after
+ * corrections for hw_ptr position
+ */
pos = substream->ops->pointer(substream);
+ curr_jiffies = jiffies;
+ if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
+ snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
+
if (pos == SNDRV_PCM_POS_XRUN) {
xrun(substream);
return -EPIPE;
@@ -343,7 +356,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
delta = runtime->hw_ptr_interrupt + runtime->period_size;
if (delta > new_hw_ptr) {
/* check for double acknowledged interrupts */
- hdelta = jiffies - runtime->hw_ptr_jiffies;
+ hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
if (hdelta > runtime->hw_ptr_buffer_jiffies/2) {
hw_base += runtime->buffer_size;
if (hw_base >= runtime->boundary)
@@ -388,7 +401,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
* Without regular period interrupts, we have to check
* the elapsed time to detect xruns.
*/
- jdelta = jiffies - runtime->hw_ptr_jiffies;
+ jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
goto no_delta_check;
hdelta = jdelta - delta * HZ / runtime->rate;
@@ -430,7 +443,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
if (hdelta < runtime->delay)
goto no_jiffies_check;
hdelta -= runtime->delay;
- jdelta = jiffies - runtime->hw_ptr_jiffies;
+ jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
delta = jdelta /
(((runtime->period_size * HZ) / runtime->rate)
@@ -492,9 +505,9 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
}
runtime->hw_ptr_base = hw_base;
runtime->status->hw_ptr = new_hw_ptr;
- runtime->hw_ptr_jiffies = jiffies;
+ runtime->hw_ptr_jiffies = curr_jiffies;
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
- snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
+ runtime->status->tstamp = curr_tstamp;
return snd_pcm_update_state(substream, runtime);
}
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index eb09a334832..41ca803a1ff 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2239,24 +2239,50 @@ void snd_hda_ctls_clear(struct hda_codec *codec)
/* pseudo device locking
* toggle card->shutdown to allow/disallow the device access (as a hack)
*/
-static int hda_lock_devices(struct snd_card *card)
+int snd_hda_lock_devices(struct hda_bus *bus)
{
+ struct snd_card *card = bus->card;
+ struct hda_codec *codec;
+
spin_lock(&card->files_lock);
- if (card->shutdown) {
- spin_unlock(&card->files_lock);
- return -EINVAL;
- }
+ if (card->shutdown)
+ goto err_unlock;
card->shutdown = 1;
+ if (!list_empty(&card->ctl_files))
+ goto err_clear;
+
+ list_for_each_entry(codec, &bus->codec_list, list) {
+ int pcm;
+ for (pcm = 0; pcm < codec->num_pcms; pcm++) {
+ struct hda_pcm *cpcm = &codec->pcm_info[pcm];
+ if (!cpcm->pcm)
+ continue;
+ if (cpcm->pcm->streams[0].substream_opened ||
+ cpcm->pcm->streams[1].substream_opened)
+ goto err_clear;
+ }
+ }
spin_unlock(&card->files_lock);
return 0;
+
+ err_clear:
+ card->shutdown = 0;
+ err_unlock:
+ spin_unlock(&card->files_lock);
+ return -EINVAL;
}
+EXPORT_SYMBOL_HDA(snd_hda_lock_devices);
-static void hda_unlock_devices(struct snd_card *card)
+void snd_hda_unlock_devices(struct hda_bus *bus)
{
+ struct snd_card *card = bus->card;
+
+ card = bus->card;
spin_lock(&card->files_lock);
card->shutdown = 0;
spin_unlock(&card->files_lock);
}
+EXPORT_SYMBOL_HDA(snd_hda_unlock_devices);
/**
* snd_hda_codec_reset - Clear all objects assigned to the codec
@@ -2270,26 +2296,12 @@ static void hda_unlock_devices(struct snd_card *card)
*/
int snd_hda_codec_reset(struct hda_codec *codec)
{
- struct snd_card *card = codec->bus->card;
- int i, pcm;
+ struct hda_bus *bus = codec->bus;
+ struct snd_card *card = bus->card;
+ int i;
- if (hda_lock_devices(card) < 0)
- return -EBUSY;
- /* check whether the codec isn't used by any mixer or PCM streams */
- if (!list_empty(&card->ctl_files)) {
- hda_unlock_devices(card);
+ if (snd_hda_lock_devices(bus) < 0)
return -EBUSY;
- }
- for (pcm = 0; pcm < codec->num_pcms; pcm++) {
- struct hda_pcm *cpcm = &codec->pcm_info[pcm];
- if (!cpcm->pcm)
- continue;
- if (cpcm->pcm->streams[0].substream_opened ||
- cpcm->pcm->streams[1].substream_opened) {
- hda_unlock_devices(card);
- return -EBUSY;
- }
- }
/* OK, let it free */
@@ -2298,7 +2310,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
codec->power_on = 0;
codec->power_transition = 0;
codec->power_jiffies = jiffies;
- flush_workqueue(codec->bus->workq);
+ flush_workqueue(bus->workq);
#endif
snd_hda_ctls_clear(codec);
/* relase PCMs */
@@ -2306,7 +2318,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
if (codec->pcm_info[i].pcm) {
snd_device_free(card, codec->pcm_info[i].pcm);
clear_bit(codec->pcm_info[i].device,
- codec->bus->pcm_dev_bits);
+ bus->pcm_dev_bits);
}
}
if (codec->patch_ops.free)
@@ -2331,7 +2343,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
codec->owner = NULL;
/* allow device access again */
- hda_unlock_devices(card);
+ snd_hda_unlock_devices(bus);
return 0;
}
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 54b52819fb4..4fc3960c859 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -1023,6 +1023,9 @@ void snd_hda_codec_set_power_to_all(struct hda_codec *codec, hda_nid_t fg,
unsigned int power_state,
bool eapd_workaround);
+int snd_hda_lock_devices(struct hda_bus *bus);
+void snd_hda_unlock_devices(struct hda_bus *bus);
+
/*
* power management
*/
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4ab8102f87e..2b6392be451 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -53,6 +53,8 @@
#endif
#include <sound/core.h>
#include <sound/initval.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
#include "hda_codec.h"
@@ -175,6 +177,13 @@ MODULE_DESCRIPTION("Intel HDA driver");
#define SFX "hda-intel: "
#endif
+#if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO)
+#ifdef CONFIG_SND_HDA_CODEC_HDMI
+#define SUPPORT_VGA_SWITCHEROO
+#endif
+#endif
+
+
/*
* registers
*/
@@ -472,6 +481,12 @@ struct azx {
unsigned int probing :1; /* codec probing phase */
unsigned int snoop:1;
unsigned int align_buffer_size:1;
+ unsigned int region_requested:1;
+
+ /* VGA-switcheroo setup */
+ unsigned int use_vga_switcheroo:1;
+ unsigned int init_failed:1; /* delayed init failed */
+ unsigned int disabled:1; /* disabled by VGA-switcher */
/* for debugging */
unsigned int last_cmd[AZX_MAX_CODECS];
@@ -538,7 +553,20 @@ enum {
#define AZX_DCAPS_PRESET_CTHDA \
(AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
-static char *driver_short_names[] __devinitdata = {
+/*
+ * VGA-switcher support
+ */
+#ifdef SUPPORT_VGA_SWITCHEROO
+#define DELAYED_INIT_MARK
+#define DELAYED_INITDATA_MARK
+#define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo)
+#else
+#define DELAYED_INIT_MARK __devinit
+#define DELAYED_INITDATA_MARK __devinitdata
+#define use_vga_switcheroo(chip) 0
+#endif
+
+static char *driver_short_names[] DELAYED_INITDATA_MARK = {
[AZX_DRIVER_ICH] = "HDA Intel",
[AZX_DRIVER_PCH] = "HDA Intel PCH",
[AZX_DRIVER_SCH] = "HDA Intel MID",
@@ -959,6 +987,8 @@ static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
{
struct azx *chip = bus->private_data;
+ if (chip->disabled)
+ return 0;
chip->last_cmd[azx_command_addr(val)] = val;
if (chip->single_cmd)
return azx_single_send_cmd(bus, val);
@@ -971,6 +1001,8 @@ static unsigned int azx_get_response(struct hda_bus *bus,
unsigned int addr)
{
struct azx *chip = bus->private_data;
+ if (chip->disabled)
+ return 0;
if (chip->single_cmd)
return azx_single_get_response(bus, addr);
else
@@ -1236,6 +1268,11 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
spin_lock(&chip->reg_lock);
+ if (chip->disabled) {
+ spin_unlock(&chip->reg_lock);
+ return IRQ_NONE;
+ }
+
status = azx_readl(chip, INTSTS);
if (status == 0) {
spin_unlock(&chip->reg_lock);
@@ -1521,12 +1558,12 @@ static void azx_bus_reset(struct hda_bus *bus)
*/
/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
-static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] __devinitdata = {
+static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] DELAYED_INITDATA_MARK = {
[AZX_DRIVER_NVIDIA] = 8,
[AZX_DRIVER_TERA] = 1,
};
-static int __devinit azx_codec_create(struct azx *chip, const char *model)
+static int DELAYED_INIT_MARK azx_codec_create(struct azx *chip, const char *model)
{
struct hda_bus_template bus_temp;
int c, codecs, err;
@@ -2444,6 +2481,105 @@ static void azx_notifier_unregister(struct azx *chip)
unregister_reboot_notifier(&chip->reboot_notifier);
}
+static int DELAYED_INIT_MARK azx_first_init(struct azx *chip);
+static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip);
+
+static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci);
+
+#ifdef SUPPORT_VGA_SWITCHEROO
+static void azx_vs_set_state(struct pci_dev *pci,
+ enum vga_switcheroo_state state)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct azx *chip = card->private_data;
+ bool disabled;
+
+ if (chip->init_failed)
+ return;
+
+ disabled = (state == VGA_SWITCHEROO_OFF);
+ if (chip->disabled == disabled)
+ return;
+
+ if (!chip->bus) {
+ chip->disabled = disabled;
+ if (!disabled) {
+ snd_printk(KERN_INFO SFX
+ "%s: Start delayed initialization\n",
+ pci_name(chip->pci));
+ if (azx_first_init(chip) < 0 ||
+ azx_probe_continue(chip) < 0) {
+ snd_printk(KERN_ERR SFX
+ "%s: initialization error\n",
+ pci_name(chip->pci));
+ chip->init_failed = true;
+ }
+ }
+ } else {
+ snd_printk(KERN_INFO SFX
+ "%s %s via VGA-switcheroo\n",
+ disabled ? "Disabling" : "Enabling",
+ pci_name(chip->pci));
+ if (disabled) {
+ azx_suspend(pci, PMSG_FREEZE);
+ chip->disabled = true;
+ snd_hda_lock_devices(chip->bus);
+ } else {
+ snd_hda_unlock_devices(chip->bus);
+ chip->disabled = false;
+ azx_resume(pci);
+ }
+ }
+}
+
+static bool azx_vs_can_switch(struct pci_dev *pci)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct azx *chip = card->private_data;
+
+ if (chip->init_failed)
+ return false;
+ if (chip->disabled || !chip->bus)
+ return true;
+ if (snd_hda_lock_devices(chip->bus))
+ return false;
+ snd_hda_unlock_devices(chip->bus);
+ return true;
+}
+
+static void __devinit init_vga_switcheroo(struct azx *chip)
+{
+ struct pci_dev *p = get_bound_vga(chip->pci);
+ if (p) {
+ snd_printk(KERN_INFO SFX
+ "%s: Handle VGA-switcheroo audio client\n",
+ pci_name(chip->pci));
+ chip->use_vga_switcheroo = 1;
+ pci_dev_put(p);
+ }
+}
+
+static const struct vga_switcheroo_client_ops azx_vs_ops = {
+ .set_gpu_state = azx_vs_set_state,
+ .can_switch = azx_vs_can_switch,
+};
+
+static int __devinit register_vga_switcheroo(struct azx *chip)
+{
+ if (!chip->use_vga_switcheroo)
+ return 0;
+ /* FIXME: currently only handling DIS controller
+ * is there any machine with two switchable HDMI audio controllers?
+ */
+ return vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
+ VGA_SWITCHEROO_DIS,
+ chip->bus != NULL);
+}
+#else
+#define init_vga_switcheroo(chip) /* NOP */
+#define register_vga_switcheroo(chip) 0
+#endif /* SUPPORT_VGA_SWITCHER */
+
/*
* destructor
*/
@@ -2453,6 +2589,12 @@ static int azx_free(struct azx *chip)
azx_notifier_unregister(chip);
+ if (use_vga_switcheroo(chip)) {
+ if (chip->disabled && chip->bus)
+ snd_hda_unlock_devices(chip->bus);
+ vga_switcheroo_unregister_client(chip->pci);
+ }
+
if (chip->initialized) {
azx_clear_irq_pending(chip);
for (i = 0; i < chip->num_streams; i++)
@@ -2482,7 +2624,8 @@ static int azx_free(struct azx *chip)
mark_pages_wc(chip, &chip->posbuf, false);
snd_dma_free_pages(&chip->posbuf);
}
- pci_release_regions(chip->pci);
+ if (chip->region_requested)
+ pci_release_regions(chip->pci);
pci_disable_device(chip->pci);
kfree(chip->azx_dev);
kfree(chip);
@@ -2496,6 +2639,45 @@ static int azx_dev_free(struct snd_device *device)
}
/*
+ * Check of disabled HDMI controller by vga-switcheroo
+ */
+static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci)
+{
+ struct pci_dev *p;
+
+ /* check only discrete GPU */
+ switch (pci->vendor) {
+ case PCI_VENDOR_ID_ATI:
+ case PCI_VENDOR_ID_AMD:
+ case PCI_VENDOR_ID_NVIDIA:
+ if (pci->devfn == 1) {
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(pci->bus),
+ pci->bus->number, 0);
+ if (p) {
+ if ((p->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ return p;
+ pci_dev_put(p);
+ }
+ }
+ break;
+ }
+ return NULL;
+}
+
+static bool __devinit check_hdmi_disabled(struct pci_dev *pci)
+{
+ bool vga_inactive = false;
+ struct pci_dev *p = get_bound_vga(pci);
+
+ if (p) {
+ if (vga_default_device() && p != vga_default_device())
+ vga_inactive = true;
+ pci_dev_put(p);
+ }
+ return vga_inactive;
+}
+
+/*
* white/black-listing for position_fix
*/
static struct snd_pci_quirk position_fix_list[] __devinitdata = {
@@ -2672,12 +2854,11 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
int dev, unsigned int driver_caps,
struct azx **rchip)
{
- struct azx *chip;
- int i, err;
- unsigned short gcap;
static struct snd_device_ops ops = {
.dev_free = azx_dev_free,
};
+ struct azx *chip;
+ int err;
*rchip = NULL;
@@ -2703,6 +2884,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
chip->dev_index = dev;
INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work);
INIT_LIST_HEAD(&chip->pcm_list);
+ init_vga_switcheroo(chip);
chip->position_fix[0] = chip->position_fix[1] =
check_position_fix(chip, position_fix[dev]);
@@ -2730,6 +2912,53 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
}
}
+ if (check_hdmi_disabled(pci)) {
+ snd_printk(KERN_INFO SFX "VGA controller for %s is disabled\n",
+ pci_name(pci));
+ if (use_vga_switcheroo(chip)) {
+ snd_printk(KERN_INFO SFX "Delaying initialization\n");
+ chip->disabled = true;
+ goto ok;
+ }
+ kfree(chip);
+ pci_disable_device(pci);
+ return -ENXIO;
+ }
+
+ err = azx_first_init(chip);
+ if (err < 0) {
+ azx_free(chip);
+ return err;
+ }
+
+ ok:
+ err = register_vga_switcheroo(chip);
+ if (err < 0) {
+ snd_printk(KERN_ERR SFX
+ "Error registering VGA-switcheroo client\n");
+ azx_free(chip);
+ return err;
+ }
+
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
+ snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
+ azx_free(chip);
+ return err;
+ }
+
+ *rchip = chip;
+ return 0;
+}
+
+static int DELAYED_INIT_MARK azx_first_init(struct azx *chip)
+{
+ int dev = chip->dev_index;
+ struct pci_dev *pci = chip->pci;
+ struct snd_card *card = chip->card;
+ int i, err;
+ unsigned short gcap;
+
#if BITS_PER_LONG != 64
/* Fix up base address on ULI M5461 */
if (chip->driver_type == AZX_DRIVER_ULI) {
@@ -2741,28 +2970,23 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
#endif
err = pci_request_regions(pci, "ICH HD audio");
- if (err < 0) {
- kfree(chip);
- pci_disable_device(pci);
+ if (err < 0)
return err;
- }
+ chip->region_requested = 1;
chip->addr = pci_resource_start(pci, 0);
chip->remap_addr = pci_ioremap_bar(pci, 0);
if (chip->remap_addr == NULL) {
snd_printk(KERN_ERR SFX "ioremap error\n");
- err = -ENXIO;
- goto errout;
+ return -ENXIO;
}
if (chip->msi)
if (pci_enable_msi(pci) < 0)
chip->msi = 0;
- if (azx_acquire_irq(chip, 0) < 0) {
- err = -EBUSY;
- goto errout;
- }
+ if (azx_acquire_irq(chip, 0) < 0)
+ return -EBUSY;
pci_set_master(pci);
synchronize_irq(chip->irq);
@@ -2841,7 +3065,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
GFP_KERNEL);
if (!chip->azx_dev) {
snd_printk(KERN_ERR SFX "cannot malloc azx_dev\n");
- goto errout;
+ return -ENOMEM;
}
for (i = 0; i < chip->num_streams; i++) {
@@ -2851,7 +3075,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
BDL_SIZE, &chip->azx_dev[i].bdl);
if (err < 0) {
snd_printk(KERN_ERR SFX "cannot allocate BDL\n");
- goto errout;
+ return -ENOMEM;
}
mark_pages_wc(chip, &chip->azx_dev[i].bdl, true);
}
@@ -2861,13 +3085,13 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
chip->num_streams * 8, &chip->posbuf);
if (err < 0) {
snd_printk(KERN_ERR SFX "cannot allocate posbuf\n");
- goto errout;
+ return -ENOMEM;
}
mark_pages_wc(chip, &chip->posbuf, true);
/* allocate CORB/RIRB */
err = azx_alloc_cmd_io(chip);
if (err < 0)
- goto errout;
+ return err;
/* initialize streams */
azx_init_stream(chip);
@@ -2879,14 +3103,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
/* codec detection */
if (!chip->codec_mask) {
snd_printk(KERN_ERR SFX "no codecs found!\n");
- err = -ENODEV;
- goto errout;
- }
-
- err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
- if (err <0) {
- snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
- goto errout;
+ return -ENODEV;
}
strcpy(card->driver, "HDA-Intel");
@@ -2896,12 +3113,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
"%s at 0x%lx irq %i",
card->shortname, chip->addr, chip->irq);
- *rchip = chip;
return 0;
-
- errout:
- azx_free(chip);
- return err;
}
static void power_down_all_codecs(struct azx *chip)
@@ -2946,6 +3158,27 @@ static int __devinit azx_probe(struct pci_dev *pci,
goto out_free;
card->private_data = chip;
+ if (!chip->disabled) {
+ err = azx_probe_continue(chip);
+ if (err < 0)
+ goto out_free;
+ }
+
+ pci_set_drvdata(pci, card);
+
+ dev++;
+ return 0;
+
+out_free:
+ snd_card_free(card);
+ return err;
+}
+
+static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip)
+{
+ int dev = chip->dev_index;
+ int err;
+
#ifdef CONFIG_SND_HDA_INPUT_BEEP
chip->beep_mode = beep_mode[dev];
#endif
@@ -2979,25 +3212,26 @@ static int __devinit azx_probe(struct pci_dev *pci,
if (err < 0)
goto out_free;
- err = snd_card_register(card);
+ err = snd_card_register(chip->card);
if (err < 0)
goto out_free;
- pci_set_drvdata(pci, card);
chip->running = 1;
power_down_all_codecs(chip);
azx_notifier_register(chip);
- dev++;
- return err;
+ return 0;
+
out_free:
- snd_card_free(card);
+ chip->init_failed = 1;
return err;
}
static void __devexit azx_remove(struct pci_dev *pci)
{
- snd_card_free(pci_get_drvdata(pci));
+ struct snd_card *card = pci_get_drvdata(pci);
+ if (card)
+ snd_card_free(card);
pci_set_drvdata(pci, NULL);
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ff71dcef08e..224410e8e9e 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2368,6 +2368,7 @@ static struct alc_codec_rename_table rename_tbl[] = {
{ 0x10ec0269, 0xffff, 0xa023, "ALC259" },
{ 0x10ec0269, 0xffff, 0x6023, "ALC281X" },
{ 0x10ec0269, 0x00f0, 0x0020, "ALC269VC" },
+ { 0x10ec0269, 0x00f0, 0x0030, "ALC269VD" },
{ 0x10ec0887, 0x00f0, 0x0030, "ALC887-VD" },
{ 0x10ec0888, 0x00f0, 0x0030, "ALC888-VD" },
{ 0x10ec0888, 0xf0f0, 0x3020, "ALC886" },
@@ -5614,6 +5615,7 @@ enum {
ALC269_TYPE_ALC269VA,
ALC269_TYPE_ALC269VB,
ALC269_TYPE_ALC269VC,
+ ALC269_TYPE_ALC269VD,
};
/*
@@ -5625,8 +5627,21 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
static const hda_nid_t alc269_ssids[] = { 0, 0x1b, 0x14, 0x21 };
static const hda_nid_t alc269va_ssids[] = { 0x15, 0x1b, 0x14, 0 };
struct alc_spec *spec = codec->spec;
- const hda_nid_t *ssids = spec->codec_variant == ALC269_TYPE_ALC269VA ?
- alc269va_ssids : alc269_ssids;
+ const hda_nid_t *ssids;
+
+ switch (spec->codec_variant) {
+ case ALC269_TYPE_ALC269VA:
+ case ALC269_TYPE_ALC269VC:
+ ssids = alc269va_ssids;
+ break;
+ case ALC269_TYPE_ALC269VB:
+ case ALC269_TYPE_ALC269VD:
+ ssids = alc269_ssids;
+ break;
+ default:
+ ssids = alc269_ssids;
+ break;
+ }
return alc_parse_auto_config(codec, alc269_ignore, ssids);
}
@@ -5643,6 +5658,11 @@ static void alc269_toggle_power_output(struct hda_codec *codec, int power_up)
static void alc269_shutup(struct hda_codec *codec)
{
+ struct alc_spec *spec = codec->spec;
+
+ if (spec->codec_variant != ALC269_TYPE_ALC269VB)
+ return;
+
if ((alc_get_coef0(codec) & 0x00ff) == 0x017)
alc269_toggle_power_output(codec, 0);
if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
@@ -5654,19 +5674,24 @@ static void alc269_shutup(struct hda_codec *codec)
#ifdef CONFIG_PM
static int alc269_resume(struct hda_codec *codec)
{
- if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
+ struct alc_spec *spec = codec->spec;
+
+ if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+ (alc_get_coef0(codec) & 0x00ff) == 0x018) {
alc269_toggle_power_output(codec, 0);
msleep(150);
}
codec->patch_ops.init(codec);
- if ((alc_get_coef0(codec) & 0x00ff) == 0x017) {
+ if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+ (alc_get_coef0(codec) & 0x00ff) == 0x017) {
alc269_toggle_power_output(codec, 1);
msleep(200);
}
- if ((alc_get_coef0(codec) & 0x00ff) == 0x018)
+ if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
+ (alc_get_coef0(codec) & 0x00ff) == 0x018)
alc269_toggle_power_output(codec, 1);
snd_hda_codec_resume_amp(codec);
@@ -6081,6 +6106,9 @@ static int patch_alc269(struct hda_codec *codec)
err = alc_codec_rename(codec, "ALC3202");
spec->codec_variant = ALC269_TYPE_ALC269VC;
break;
+ case 0x0030:
+ spec->codec_variant = ALC269_TYPE_ALC269VD;
+ break;
default:
alc_fix_pll_init(codec, 0x20, 0x04, 15);
}
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 3cb9aa4299d..fa455675045 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/mbus.h>
#include <linux/delay.h>
+#include <linux/clk.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -449,6 +450,14 @@ static __devinit int kirkwood_i2s_dev_probe(struct platform_device *pdev)
priv->burst = data->burst;
+ priv->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "no clock\n");
+ err = PTR_ERR(priv->clk);
+ goto err_ioremap;
+ }
+ clk_prepare_enable(priv->clk);
+
return snd_soc_register_dai(&pdev->dev, &kirkwood_i2s_dai);
err_ioremap:
@@ -466,6 +475,10 @@ static __devexit int kirkwood_i2s_dev_remove(struct platform_device *pdev)
struct kirkwood_dma_data *priv = dev_get_drvdata(&pdev->dev);
snd_soc_unregister_dai(&pdev->dev);
+
+ clk_disable_unprepare(priv->clk);
+ clk_put(priv->clk);
+
iounmap(priv->io);
release_mem_region(priv->mem->start, SZ_16K);
kfree(priv);
diff --git a/sound/soc/kirkwood/kirkwood.h b/sound/soc/kirkwood/kirkwood.h
index 9047436b393..f9084d83e6b 100644
--- a/sound/soc/kirkwood/kirkwood.h
+++ b/sound/soc/kirkwood/kirkwood.h
@@ -123,6 +123,7 @@ struct kirkwood_dma_data {
void __iomem *io;
int irq;
int burst;
+ struct clk *clk;
};
#endif
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index 9ccfa5e1c11..57a2fa75108 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -109,11 +109,12 @@ config SND_OMAP_SOC_OMAP_ABE_TWL6040
- PandaBoard (4430)
- PandaBoardES (4460)
-config SND_OMAP_SOC_OMAP4_HDMI
- tristate "SoC Audio support for Texas Instruments OMAP4 HDMI"
- depends on SND_OMAP_SOC && OMAP4_DSS_HDMI && OMAP2_DSS && ARCH_OMAP4
+config SND_OMAP_SOC_OMAP_HDMI
+ tristate "SoC Audio support for Texas Instruments OMAP HDMI"
+ depends on SND_OMAP_SOC && OMAP4_DSS_HDMI && OMAP2_DSS
select SND_OMAP_SOC_HDMI
select SND_SOC_OMAP_HDMI_CODEC
+ select OMAP4_DSS_HDMI_AUDIO
help
Say Y if you want to add support for SoC HDMI audio on Texas Instruments
OMAP4 chips
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
index 1d656bce01d..0e14dd32256 100644
--- a/sound/soc/omap/Makefile
+++ b/sound/soc/omap/Makefile
@@ -25,7 +25,7 @@ snd-soc-omap3pandora-objs := omap3pandora.o
snd-soc-omap3beagle-objs := omap3beagle.o
snd-soc-zoom2-objs := zoom2.o
snd-soc-igep0020-objs := igep0020.o
-snd-soc-omap4-hdmi-objs := omap4-hdmi-card.o
+snd-soc-omap-hdmi-card-objs := omap-hdmi-card.o
obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
obj-$(CONFIG_SND_OMAP_SOC_RX51) += snd-soc-rx51.o
@@ -41,4 +41,4 @@ obj-$(CONFIG_SND_OMAP_SOC_OMAP3_PANDORA) += snd-soc-omap3pandora.o
obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o
obj-$(CONFIG_SND_OMAP_SOC_ZOOM2) += snd-soc-zoom2.o
obj-$(CONFIG_SND_OMAP_SOC_IGEP0020) += snd-soc-igep0020.o
-obj-$(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) += snd-soc-omap4-hdmi.o
+obj-$(CONFIG_SND_OMAP_SOC_OMAP_HDMI) += snd-soc-omap-hdmi-card.o
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index e5f44440d1b..34835e8a916 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -109,6 +109,47 @@ static void omap_mcbsp_dump_reg(struct omap_mcbsp *mcbsp)
dev_dbg(mcbsp->dev, "***********************\n");
}
+static irqreturn_t omap_mcbsp_irq_handler(int irq, void *dev_id)
+{
+ struct omap_mcbsp *mcbsp = dev_id;
+ u16 irqst;
+
+ irqst = MCBSP_READ(mcbsp, IRQST);
+ dev_dbg(mcbsp->dev, "IRQ callback : 0x%x\n", irqst);
+
+ if (irqst & RSYNCERREN)
+ dev_err(mcbsp->dev, "RX Frame Sync Error!\n");
+ if (irqst & RFSREN)
+ dev_dbg(mcbsp->dev, "RX Frame Sync\n");
+ if (irqst & REOFEN)
+ dev_dbg(mcbsp->dev, "RX End Of Frame\n");
+ if (irqst & RRDYEN)
+ dev_dbg(mcbsp->dev, "RX Buffer Threshold Reached\n");
+ if (irqst & RUNDFLEN)
+ dev_err(mcbsp->dev, "RX Buffer Underflow!\n");
+ if (irqst & ROVFLEN)
+ dev_err(mcbsp->dev, "RX Buffer Overflow!\n");
+
+ if (irqst & XSYNCERREN)
+ dev_err(mcbsp->dev, "TX Frame Sync Error!\n");
+ if (irqst & XFSXEN)
+ dev_dbg(mcbsp->dev, "TX Frame Sync\n");
+ if (irqst & XEOFEN)
+ dev_dbg(mcbsp->dev, "TX End Of Frame\n");
+ if (irqst & XRDYEN)
+ dev_dbg(mcbsp->dev, "TX Buffer threshold Reached\n");
+ if (irqst & XUNDFLEN)
+ dev_err(mcbsp->dev, "TX Buffer Underflow!\n");
+ if (irqst & XOVFLEN)
+ dev_err(mcbsp->dev, "TX Buffer Overflow!\n");
+ if (irqst & XEMPTYEOFEN)
+ dev_dbg(mcbsp->dev, "TX Buffer empty at end of frame\n");
+
+ MCBSP_WRITE(mcbsp, IRQST, irqst);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id)
{
struct omap_mcbsp *mcbsp_tx = dev_id;
@@ -176,6 +217,10 @@ void omap_mcbsp_config(struct omap_mcbsp *mcbsp,
/* Enable wakeup behavior */
if (mcbsp->pdata->has_wakeup)
MCBSP_WRITE(mcbsp, WAKEUPEN, XRDYEN | RRDYEN);
+
+ /* Enable TX/RX sync error interrupts by default */
+ if (mcbsp->irq)
+ MCBSP_WRITE(mcbsp, IRQEN, RSYNCERREN | XSYNCERREN);
}
/**
@@ -489,23 +534,25 @@ int omap_mcbsp_request(struct omap_mcbsp *mcbsp)
MCBSP_WRITE(mcbsp, SPCR1, 0);
MCBSP_WRITE(mcbsp, SPCR2, 0);
- err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler,
- 0, "McBSP", (void *)mcbsp);
- if (err != 0) {
- dev_err(mcbsp->dev, "Unable to request TX IRQ %d "
- "for McBSP%d\n", mcbsp->tx_irq,
- mcbsp->id);
- goto err_clk_disable;
- }
+ if (mcbsp->irq) {
+ err = request_irq(mcbsp->irq, omap_mcbsp_irq_handler, 0,
+ "McBSP", (void *)mcbsp);
+ if (err != 0) {
+ dev_err(mcbsp->dev, "Unable to request IRQ\n");
+ goto err_clk_disable;
+ }
+ } else {
+ err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler, 0,
+ "McBSP TX", (void *)mcbsp);
+ if (err != 0) {
+ dev_err(mcbsp->dev, "Unable to request TX IRQ\n");
+ goto err_clk_disable;
+ }
- if (mcbsp->rx_irq) {
- err = request_irq(mcbsp->rx_irq,
- omap_mcbsp_rx_irq_handler,
- 0, "McBSP", (void *)mcbsp);
+ err = request_irq(mcbsp->rx_irq, omap_mcbsp_rx_irq_handler, 0,
+ "McBSP RX", (void *)mcbsp);
if (err != 0) {
- dev_err(mcbsp->dev, "Unable to request RX IRQ %d "
- "for McBSP%d\n", mcbsp->rx_irq,
- mcbsp->id);
+ dev_err(mcbsp->dev, "Unable to request RX IRQ\n");
goto err_free_irq;
}
}
@@ -542,9 +589,16 @@ void omap_mcbsp_free(struct omap_mcbsp *mcbsp)
if (mcbsp->pdata->has_wakeup)
MCBSP_WRITE(mcbsp, WAKEUPEN, 0);
- if (mcbsp->rx_irq)
+ /* Disable interrupt requests */
+ if (mcbsp->irq)
+ MCBSP_WRITE(mcbsp, IRQEN, 0);
+
+ if (mcbsp->irq) {
+ free_irq(mcbsp->irq, (void *)mcbsp);
+ } else {
free_irq(mcbsp->rx_irq, (void *)mcbsp);
- free_irq(mcbsp->tx_irq, (void *)mcbsp);
+ free_irq(mcbsp->tx_irq, (void *)mcbsp);
+ }
reg_cache = mcbsp->reg_cache;
@@ -754,7 +808,7 @@ THRESHOLD_PROP_BUILDER(max_tx_thres);
THRESHOLD_PROP_BUILDER(max_rx_thres);
static const char *dma_op_modes[] = {
- "element", "threshold", "frame",
+ "element", "threshold",
};
static ssize_t dma_op_mode_show(struct device *dev,
@@ -949,13 +1003,24 @@ int __devinit omap_mcbsp_init(struct platform_device *pdev)
else
mcbsp->phys_dma_base = res->start;
- mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
- mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
-
- /* From OMAP4 there will be a single irq line */
- if (mcbsp->tx_irq == -ENXIO) {
- mcbsp->tx_irq = platform_get_irq(pdev, 0);
- mcbsp->rx_irq = 0;
+ /*
+ * OMAP1, 2 uses two interrupt lines: TX, RX
+ * OMAP2430, OMAP3 SoC have combined IRQ line as well.
+ * OMAP4 and newer SoC only have the combined IRQ line.
+ * Use the combined IRQ if available since it gives better debugging
+ * possibilities.
+ */
+ mcbsp->irq = platform_get_irq_byname(pdev, "common");
+ if (mcbsp->irq == -ENXIO) {
+ mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
+
+ if (mcbsp->tx_irq == -ENXIO) {
+ mcbsp->irq = platform_get_irq(pdev, 0);
+ mcbsp->tx_irq = 0;
+ } else {
+ mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
+ mcbsp->irq = 0;
+ }
}
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
diff --git a/sound/soc/omap/mcbsp.h b/sound/soc/omap/mcbsp.h
index a944fcc9073..262a6152111 100644
--- a/sound/soc/omap/mcbsp.h
+++ b/sound/soc/omap/mcbsp.h
@@ -217,17 +217,20 @@ enum {
/********************** McBSP DMA operating modes **************************/
#define MCBSP_DMA_MODE_ELEMENT 0
#define MCBSP_DMA_MODE_THRESHOLD 1
-#define MCBSP_DMA_MODE_FRAME 2
-/********************** McBSP WAKEUPEN bit definitions *********************/
+/********************** McBSP WAKEUPEN/IRQST/IRQEN bit definitions *********/
#define RSYNCERREN BIT(0)
#define RFSREN BIT(1)
#define REOFEN BIT(2)
#define RRDYEN BIT(3)
+#define RUNDFLEN BIT(4)
+#define ROVFLEN BIT(5)
#define XSYNCERREN BIT(7)
#define XFSXEN BIT(8)
#define XEOFEN BIT(9)
#define XRDYEN BIT(10)
+#define XUNDFLEN BIT(11)
+#define XOVFLEN BIT(12)
#define XEMPTYEOFEN BIT(14)
/* Clock signal muxing options */
@@ -295,6 +298,7 @@ struct omap_mcbsp {
int configured;
u8 free;
+ int irq;
int rx_irq;
int tx_irq;
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index 93bb8eee22b..9d93793d307 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -40,6 +40,11 @@
#include "omap-pcm.h"
#include "../codecs/twl6040.h"
+struct abe_twl6040 {
+ int jack_detection; /* board can detect jack events */
+ int mclk_freq; /* MCLK frequency speed for twl6040 */
+};
+
static int omap_abe_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
@@ -47,13 +52,13 @@ static int omap_abe_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_card *card = codec->card;
- struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev);
+ struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
int clk_id, freq;
int ret;
clk_id = twl6040_get_clk_id(rtd->codec);
if (clk_id == TWL6040_SYSCLK_SEL_HPPLL)
- freq = pdata->mclk_freq;
+ freq = priv->mclk_freq;
else if (clk_id == TWL6040_SYSCLK_SEL_LPPLL)
freq = 32768;
else
@@ -128,6 +133,9 @@ static const struct snd_soc_dapm_widget twl6040_dapm_widgets[] = {
SND_SOC_DAPM_MIC("Main Handset Mic", NULL),
SND_SOC_DAPM_MIC("Sub Handset Mic", NULL),
SND_SOC_DAPM_LINE("Line In", NULL),
+
+ /* Digital microphones */
+ SND_SOC_DAPM_MIC("Digital Mic", NULL),
};
static const struct snd_soc_dapm_route audio_map[] = {
@@ -173,6 +181,7 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
struct snd_soc_card *card = codec->card;
struct snd_soc_dapm_context *dapm = &codec->dapm;
struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev);
+ struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
int hs_trim;
int ret = 0;
@@ -196,7 +205,7 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
TWL6040_HSF_TRIM_RIGHT(hs_trim));
/* Headset jack detection only if it is supported */
- if (pdata->jack_detection) {
+ if (priv->jack_detection) {
ret = snd_soc_jack_new(codec, "Headset Jack",
SND_JACK_HEADSET, &hs_jack);
if (ret)
@@ -210,10 +219,6 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
-static const struct snd_soc_dapm_widget dmic_dapm_widgets[] = {
- SND_SOC_DAPM_MIC("Digital Mic", NULL),
-};
-
static const struct snd_soc_dapm_route dmic_audio_map[] = {
{"DMic", NULL, "Digital Mic"},
{"Digital Mic", NULL, "Digital Mic1 Bias"},
@@ -223,19 +228,13 @@ static int omap_abe_dmic_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
- int ret;
-
- ret = snd_soc_dapm_new_controls(dapm, dmic_dapm_widgets,
- ARRAY_SIZE(dmic_dapm_widgets));
- if (ret)
- return ret;
return snd_soc_dapm_add_routes(dapm, dmic_audio_map,
ARRAY_SIZE(dmic_audio_map));
}
/* Digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link twl6040_dmic_dai[] = {
+static struct snd_soc_dai_link abe_twl6040_dai_links[] = {
{
.name = "TWL6040",
.stream_name = "TWL6040",
@@ -258,19 +257,6 @@ static struct snd_soc_dai_link twl6040_dmic_dai[] = {
},
};
-static struct snd_soc_dai_link twl6040_only_dai[] = {
- {
- .name = "TWL6040",
- .stream_name = "TWL6040",
- .cpu_dai_name = "omap-mcpdm",
- .codec_dai_name = "twl6040-legacy",
- .platform_name = "omap-pcm-audio",
- .codec_name = "twl6040-codec",
- .init = omap_abe_twl6040_init,
- .ops = &omap_abe_ops,
- },
-};
-
/* Audio machine driver */
static struct snd_soc_card omap_abe_card = {
.owner = THIS_MODULE,
@@ -285,6 +271,8 @@ static __devinit int omap_abe_probe(struct platform_device *pdev)
{
struct omap_abe_twl6040_data *pdata = dev_get_platdata(&pdev->dev);
struct snd_soc_card *card = &omap_abe_card;
+ struct abe_twl6040 *priv;
+ int num_links = 0;
int ret;
card->dev = &pdev->dev;
@@ -294,6 +282,10 @@ static __devinit int omap_abe_probe(struct platform_device *pdev)
return -ENODEV;
}
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
if (pdata->card_name) {
card->name = pdata->card_name;
} else {
@@ -301,18 +293,24 @@ static __devinit int omap_abe_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (!pdata->mclk_freq) {
+ priv->jack_detection = pdata->jack_detection;
+ priv->mclk_freq = pdata->mclk_freq;
+
+
+ if (!priv->mclk_freq) {
dev_err(&pdev->dev, "MCLK frequency missing\n");
return -ENODEV;
}
- if (pdata->has_dmic) {
- card->dai_link = twl6040_dmic_dai;
- card->num_links = ARRAY_SIZE(twl6040_dmic_dai);
- } else {
- card->dai_link = twl6040_only_dai;
- card->num_links = ARRAY_SIZE(twl6040_only_dai);
- }
+ if (pdata->has_dmic)
+ num_links = 2;
+ else
+ num_links = 1;
+
+ card->dai_link = abe_twl6040_dai_links;
+ card->num_links = num_links;
+
+ snd_soc_card_set_drvdata(card, priv);
ret = snd_soc_register_card(card);
if (ret)
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
index 4dcb5a7e40e..75f5dca0e8d 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/omap/omap-dmic.c
@@ -32,6 +32,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
#include <plat/dma.h>
#include <sound/core.h>
@@ -528,10 +529,17 @@ static int __devexit asoc_dmic_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id omap_dmic_of_match[] = {
+ { .compatible = "ti,omap4-dmic", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, omap_dmic_of_match);
+
static struct platform_driver asoc_dmic_driver = {
.driver = {
.name = "omap-dmic",
.owner = THIS_MODULE,
+ .of_match_table = omap_dmic_of_match,
},
.probe = asoc_dmic_probe,
.remove = __devexit_p(asoc_dmic_remove),
diff --git a/sound/soc/omap/omap-hdmi-card.c b/sound/soc/omap/omap-hdmi-card.c
new file mode 100644
index 00000000000..eaa2ea0e3f8
--- /dev/null
+++ b/sound/soc/omap/omap-hdmi-card.c
@@ -0,0 +1,87 @@
+/*
+ * omap-hdmi-card.c
+ *
+ * OMAP ALSA SoC machine driver for TI OMAP HDMI
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Ricardo Neri <ricardo.neri@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <asm/mach-types.h>
+#include <video/omapdss.h>
+
+#define DRV_NAME "omap-hdmi-audio"
+
+static struct snd_soc_dai_link omap_hdmi_dai = {
+ .name = "HDMI",
+ .stream_name = "HDMI",
+ .cpu_dai_name = "omap-hdmi-audio-dai",
+ .platform_name = "omap-pcm-audio",
+ .codec_name = "hdmi-audio-codec",
+ .codec_dai_name = "omap-hdmi-hifi",
+};
+
+static struct snd_soc_card snd_soc_omap_hdmi = {
+ .name = "OMAPHDMI",
+ .owner = THIS_MODULE,
+ .dai_link = &omap_hdmi_dai,
+ .num_links = 1,
+};
+
+static __devinit int omap_hdmi_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &snd_soc_omap_hdmi;
+ int ret;
+
+ card->dev = &pdev->dev;
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+ card->dev = NULL;
+ return ret;
+ }
+ return 0;
+}
+
+static int __devexit omap_hdmi_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ snd_soc_unregister_card(card);
+ card->dev = NULL;
+ return 0;
+}
+
+static struct platform_driver omap_hdmi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = omap_hdmi_probe,
+ .remove = __devexit_p(omap_hdmi_remove),
+};
+
+module_platform_driver(omap_hdmi_driver);
+
+MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
+MODULE_DESCRIPTION("OMAP HDMI machine ASoC driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/omap/omap-hdmi.c b/sound/soc/omap/omap-hdmi.c
index 38e0defa707..a08245d9203 100644
--- a/sound/soc/omap/omap-hdmi.c
+++ b/sound/soc/omap/omap-hdmi.c
@@ -30,21 +30,28 @@
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
+#include <sound/asound.h>
+#include <sound/asoundef.h>
+#include <video/omapdss.h>
#include <plat/dma.h>
#include "omap-pcm.h"
#include "omap-hdmi.h"
-#define DRV_NAME "hdmi-audio-dai"
+#define DRV_NAME "omap-hdmi-audio-dai"
-static struct omap_pcm_dma_data omap_hdmi_dai_dma_params = {
- .name = "HDMI playback",
- .sync_mode = OMAP_DMA_SYNC_PACKET,
+struct hdmi_priv {
+ struct omap_pcm_dma_data dma_params;
+ struct omap_dss_audio dss_audio;
+ struct snd_aes_iec958 iec;
+ struct snd_cea_861_aud_if cea;
+ struct omap_dss_device *dssdev;
};
static int omap_hdmi_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
+ struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
int err;
/*
* Make sure that the period bytes are multiple of the DMA packet size.
@@ -52,46 +59,201 @@ static int omap_hdmi_dai_startup(struct snd_pcm_substream *substream,
*/
err = snd_pcm_hw_constraint_step(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128);
- if (err < 0)
+ if (err < 0) {
+ dev_err(dai->dev, "could not apply constraint\n");
return err;
+ }
+ if (!priv->dssdev->driver->audio_supported(priv->dssdev)) {
+ dev_err(dai->dev, "audio not supported\n");
+ return -ENODEV;
+ }
return 0;
}
+static int omap_hdmi_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+ return priv->dssdev->driver->audio_enable(priv->dssdev);
+}
+
static int omap_hdmi_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
+ struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+ struct snd_aes_iec958 *iec = &priv->iec;
+ struct snd_cea_861_aud_if *cea = &priv->cea;
int err = 0;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
- omap_hdmi_dai_dma_params.packet_size = 16;
+ priv->dma_params.packet_size = 16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
- omap_hdmi_dai_dma_params.packet_size = 32;
+ priv->dma_params.packet_size = 32;
break;
default:
- err = -EINVAL;
+ dev_err(dai->dev, "format not supported!\n");
+ return -EINVAL;
}
- omap_hdmi_dai_dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+ priv->dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
snd_soc_dai_set_dma_data(dai, substream,
- &omap_hdmi_dai_dma_params);
+ &priv->dma_params);
+
+ /*
+ * fill the IEC-60958 channel status word
+ */
+
+ /* specify IEC-60958-3 (commercial use) */
+ iec->status[0] &= ~IEC958_AES0_PROFESSIONAL;
+
+ /* specify that the audio is LPCM*/
+ iec->status[0] &= ~IEC958_AES0_NONAUDIO;
+
+ iec->status[0] |= IEC958_AES0_CON_NOT_COPYRIGHT;
+
+ iec->status[0] |= IEC958_AES0_CON_EMPHASIS_NONE;
+
+ iec->status[0] |= IEC958_AES1_PRO_MODE_NOTID;
+
+ iec->status[1] = IEC958_AES1_CON_GENERAL;
+
+ iec->status[2] |= IEC958_AES2_CON_SOURCE_UNSPEC;
+
+ iec->status[2] |= IEC958_AES2_CON_CHANNEL_UNSPEC;
+
+ switch (params_rate(params)) {
+ case 32000:
+ iec->status[3] |= IEC958_AES3_CON_FS_32000;
+ break;
+ case 44100:
+ iec->status[3] |= IEC958_AES3_CON_FS_44100;
+ break;
+ case 48000:
+ iec->status[3] |= IEC958_AES3_CON_FS_48000;
+ break;
+ case 88200:
+ iec->status[3] |= IEC958_AES3_CON_FS_88200;
+ break;
+ case 96000:
+ iec->status[3] |= IEC958_AES3_CON_FS_96000;
+ break;
+ case 176400:
+ iec->status[3] |= IEC958_AES3_CON_FS_176400;
+ break;
+ case 192000:
+ iec->status[3] |= IEC958_AES3_CON_FS_192000;
+ break;
+ default:
+ dev_err(dai->dev, "rate not supported!\n");
+ return -EINVAL;
+ }
+
+ /* specify the clock accuracy */
+ iec->status[3] |= IEC958_AES3_CON_CLOCK_1000PPM;
+
+ /*
+ * specify the word length. The same word length value can mean
+ * two different lengths. Hence, we need to specify the maximum
+ * word length as well.
+ */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ iec->status[4] |= IEC958_AES4_CON_WORDLEN_20_16;
+ iec->status[4] &= ~IEC958_AES4_CON_MAX_WORDLEN_24;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ iec->status[4] |= IEC958_AES4_CON_WORDLEN_24_20;
+ iec->status[4] |= IEC958_AES4_CON_MAX_WORDLEN_24;
+ break;
+ default:
+ dev_err(dai->dev, "format not supported!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Fill the CEA-861 audio infoframe (see spec for details)
+ */
+
+ cea->db1_ct_cc = (params_channels(params) - 1)
+ & CEA861_AUDIO_INFOFRAME_DB1CC;
+ cea->db1_ct_cc |= CEA861_AUDIO_INFOFRAME_DB1CT_FROM_STREAM;
+
+ cea->db2_sf_ss = CEA861_AUDIO_INFOFRAME_DB2SF_FROM_STREAM;
+ cea->db2_sf_ss |= CEA861_AUDIO_INFOFRAME_DB2SS_FROM_STREAM;
+
+ cea->db3 = 0; /* not used, all zeros */
+
+ /*
+ * The OMAP HDMI IP requires to use the 8-channel channel code when
+ * transmitting more than two channels.
+ */
+ if (params_channels(params) == 2)
+ cea->db4_ca = 0x0;
+ else
+ cea->db4_ca = 0x13;
+
+ cea->db5_dminh_lsv = CEA861_AUDIO_INFOFRAME_DB5_DM_INH_PROHIBITED;
+ /* the expression is trivial but makes clear what we are doing */
+ cea->db5_dminh_lsv |= (0 & CEA861_AUDIO_INFOFRAME_DB5_LSV);
+
+ priv->dss_audio.iec = iec;
+ priv->dss_audio.cea = cea;
+
+ err = priv->dssdev->driver->audio_config(priv->dssdev,
+ &priv->dss_audio);
return err;
}
+static int omap_hdmi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+ int err = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ err = priv->dssdev->driver->audio_start(priv->dssdev);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ priv->dssdev->driver->audio_stop(priv->dssdev);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static void omap_hdmi_dai_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+ priv->dssdev->driver->audio_disable(priv->dssdev);
+}
+
static const struct snd_soc_dai_ops omap_hdmi_dai_ops = {
.startup = omap_hdmi_dai_startup,
.hw_params = omap_hdmi_dai_hw_params,
+ .prepare = omap_hdmi_dai_prepare,
+ .trigger = omap_hdmi_dai_trigger,
+ .shutdown = omap_hdmi_dai_shutdown,
};
static struct snd_soc_dai_driver omap_hdmi_dai = {
.playback = {
.channels_min = 2,
- .channels_max = 2,
+ .channels_max = 8,
.rates = OMAP_HDMI_RATES,
.formats = OMAP_HDMI_FORMATS,
},
@@ -102,31 +264,77 @@ static __devinit int omap_hdmi_probe(struct platform_device *pdev)
{
int ret;
struct resource *hdmi_rsrc;
+ struct hdmi_priv *hdmi_data;
+ bool hdmi_dev_found = false;
+
+ hdmi_data = devm_kzalloc(&pdev->dev, sizeof(*hdmi_data), GFP_KERNEL);
+ if (hdmi_data == NULL) {
+ dev_err(&pdev->dev, "Cannot allocate memory for HDMI data\n");
+ return -ENOMEM;
+ }
hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!hdmi_rsrc) {
dev_err(&pdev->dev, "Cannot obtain IORESOURCE_MEM HDMI\n");
- return -EINVAL;
+ return -ENODEV;
}
- omap_hdmi_dai_dma_params.port_addr = hdmi_rsrc->start
+ hdmi_data->dma_params.port_addr = hdmi_rsrc->start
+ OMAP_HDMI_AUDIO_DMA_PORT;
hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!hdmi_rsrc) {
dev_err(&pdev->dev, "Cannot obtain IORESOURCE_DMA HDMI\n");
- return -EINVAL;
+ return -ENODEV;
}
- omap_hdmi_dai_dma_params.dma_req = hdmi_rsrc->start;
+ hdmi_data->dma_params.dma_req = hdmi_rsrc->start;
+ hdmi_data->dma_params.name = "HDMI playback";
+ hdmi_data->dma_params.sync_mode = OMAP_DMA_SYNC_PACKET;
+
+ /*
+ * TODO: We assume that there is only one DSS HDMI device. Future
+ * OMAP implementations may support more than one HDMI devices and
+ * we should provided separate audio support for all of them.
+ */
+ /* Find an HDMI device. */
+ for_each_dss_dev(hdmi_data->dssdev) {
+ omap_dss_get_device(hdmi_data->dssdev);
+ if (!hdmi_data->dssdev->driver) {
+ omap_dss_put_device(hdmi_data->dssdev);
+ continue;
+ }
+
+ if (hdmi_data->dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
+ hdmi_dev_found = true;
+ break;
+ }
+ }
+
+ if (!hdmi_dev_found) {
+ dev_err(&pdev->dev, "no driver for HDMI display found\n");
+ return -ENODEV;
+ }
+
+ dev_set_drvdata(&pdev->dev, hdmi_data);
ret = snd_soc_register_dai(&pdev->dev, &omap_hdmi_dai);
+
return ret;
}
static int __devexit omap_hdmi_remove(struct platform_device *pdev)
{
+ struct hdmi_priv *hdmi_data = dev_get_drvdata(&pdev->dev);
+
snd_soc_unregister_dai(&pdev->dev);
+
+ if (hdmi_data == NULL) {
+ dev_err(&pdev->dev, "cannot obtain HDMi data\n");
+ return -ENODEV;
+ }
+
+ omap_dss_put_device(hdmi_data->dssdev);
return 0;
}
diff --git a/sound/soc/omap/omap-hdmi.h b/sound/soc/omap/omap-hdmi.h
index 34c298d5057..6ad2bf4f269 100644
--- a/sound/soc/omap/omap-hdmi.h
+++ b/sound/soc/omap/omap-hdmi.h
@@ -28,7 +28,9 @@
#define OMAP_HDMI_AUDIO_DMA_PORT 0x8c
#define OMAP_HDMI_RATES (SNDRV_PCM_RATE_32000 | \
- SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
#define OMAP_HDMI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S24_LE)
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 6912ac7cb62..1046083e90a 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -71,18 +71,17 @@ static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream)
dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
- /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
- if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)
- /*
- * Configure McBSP threshold based on either:
- * packet_size, when the sDMA is in packet mode, or
- * based on the period size.
- */
- if (dma_data->packet_size)
- words = dma_data->packet_size;
- else
- words = snd_pcm_lib_period_bytes(substream) /
- (mcbsp->wlen / 8);
+ /*
+ * Configure McBSP threshold based on either:
+ * packet_size, when the sDMA is in packet mode, or based on the
+ * period size in THRESHOLD mode, otherwise use McBSP threshold = 1
+ * for mono streams.
+ */
+ if (dma_data->packet_size)
+ words = dma_data->packet_size;
+ else if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)
+ words = snd_pcm_lib_period_bytes(substream) /
+ (mcbsp->wlen / 8);
else
words = 1;
@@ -139,13 +138,15 @@ static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream,
if (mcbsp->pdata->buffer_size) {
/*
* Rule for the buffer size. We should not allow
- * smaller buffer than the FIFO size to avoid underruns
+ * smaller buffer than the FIFO size to avoid underruns.
+ * This applies only for the playback stream.
*/
- snd_pcm_hw_rule_add(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
- omap_mcbsp_hwrule_min_buffersize,
- mcbsp,
- SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ omap_mcbsp_hwrule_min_buffersize,
+ mcbsp,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
/* Make sure, that the period size is always even */
snd_pcm_hw_constraint_step(substream->runtime, 0,
@@ -230,6 +231,7 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
unsigned int format, div, framesize, master;
dma_data = &mcbsp->dma_data[substream->stream];
+ channels = params_channels(params);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
@@ -245,7 +247,6 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
}
if (mcbsp->pdata->buffer_size) {
dma_data->set_threshold = omap_mcbsp_set_threshold;
- /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) {
int period_words, max_thrsh;
@@ -283,6 +284,10 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
} else {
sync_mode = OMAP_DMA_SYNC_FRAME;
}
+ } else if (channels > 1) {
+ /* Use packet mode for non mono streams */
+ pkt_size = channels;
+ sync_mode = OMAP_DMA_SYNC_PACKET;
}
}
@@ -301,7 +306,7 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
regs->rcr1 &= ~(RFRLEN1(0x7f) | RWDLEN1(7));
regs->xcr1 &= ~(XFRLEN1(0x7f) | XWDLEN1(7));
format = mcbsp->fmt & SND_SOC_DAIFMT_FORMAT_MASK;
- wpf = channels = params_channels(params);
+ wpf = channels;
if (channels == 2 && (format == SND_SOC_DAIFMT_I2S ||
format == SND_SOC_DAIFMT_LEFT_J)) {
/* Use dual-phase frames */
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index 39705561131..59d47ab5b15 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -33,6 +33,7 @@
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -507,10 +508,17 @@ static int __devexit asoc_mcpdm_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id omap_mcpdm_of_match[] = {
+ { .compatible = "ti,omap4-mcpdm", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, omap_mcpdm_of_match);
+
static struct platform_driver asoc_mcpdm_driver = {
.driver = {
.name = "omap-mcpdm",
.owner = THIS_MODULE,
+ .of_match_table = omap_mcpdm_of_match,
},
.probe = asoc_mcpdm_probe,
diff --git a/sound/soc/omap/omap4-hdmi-card.c b/sound/soc/omap/omap4-hdmi-card.c
deleted file mode 100644
index 28d689b2714..00000000000
--- a/sound/soc/omap/omap4-hdmi-card.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * omap4-hdmi-card.c
- *
- * OMAP ALSA SoC machine driver for TI OMAP4 HDMI
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Ricardo Neri <ricardo.neri@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/module.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <asm/mach-types.h>
-#include <video/omapdss.h>
-
-#define DRV_NAME "omap4-hdmi-audio"
-
-static int omap4_hdmi_dai_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- int i;
- struct omap_overlay_manager *mgr = NULL;
- struct device *dev = substream->pcm->card->dev;
-
- /* Find DSS HDMI device */
- for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
- mgr = omap_dss_get_overlay_manager(i);
- if (mgr && mgr->device
- && mgr->device->type == OMAP_DISPLAY_TYPE_HDMI)
- break;
- }
-
- if (i == omap_dss_get_num_overlay_managers()) {
- dev_err(dev, "HDMI display device not found!\n");
- return -ENODEV;
- }
-
- /* Make sure HDMI is power-on to avoid L3 interconnect errors */
- if (mgr->device->state != OMAP_DSS_DISPLAY_ACTIVE) {
- dev_err(dev, "HDMI display is not active!\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static struct snd_soc_ops omap4_hdmi_dai_ops = {
- .hw_params = omap4_hdmi_dai_hw_params,
-};
-
-static struct snd_soc_dai_link omap4_hdmi_dai = {
- .name = "HDMI",
- .stream_name = "HDMI",
- .cpu_dai_name = "hdmi-audio-dai",
- .platform_name = "omap-pcm-audio",
- .codec_name = "omapdss_hdmi",
- .codec_dai_name = "hdmi-audio-codec",
- .ops = &omap4_hdmi_dai_ops,
-};
-
-static struct snd_soc_card snd_soc_omap4_hdmi = {
- .name = "OMAP4HDMI",
- .owner = THIS_MODULE,
- .dai_link = &omap4_hdmi_dai,
- .num_links = 1,
-};
-
-static __devinit int omap4_hdmi_probe(struct platform_device *pdev)
-{
- struct snd_soc_card *card = &snd_soc_omap4_hdmi;
- int ret;
-
- card->dev = &pdev->dev;
-
- ret = snd_soc_register_card(card);
- if (ret) {
- dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
- card->dev = NULL;
- return ret;
- }
- return 0;
-}
-
-static int __devexit omap4_hdmi_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- snd_soc_unregister_card(card);
- card->dev = NULL;
- return 0;
-}
-
-static struct platform_driver omap4_hdmi_driver = {
- .driver = {
- .name = "omap4-hdmi-audio",
- .owner = THIS_MODULE,
- },
- .probe = omap4_hdmi_probe,
- .remove = __devexit_p(omap4_hdmi_remove),
-};
-
-module_platform_driver(omap4_hdmi_driver);
-
-MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
-MODULE_DESCRIPTION("OMAP4 HDMI machine ASoC driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 7dab7b25b5c..f576971f655 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -35,6 +35,7 @@
#include <sys/mount.h>
#include <sys/statfs.h>
#include "../../include/linux/magic.h"
+#include "../../include/linux/kernel-page-flags.h"
#ifndef MAX_PATH
@@ -73,33 +74,6 @@
#define KPF_BYTES 8
#define PROC_KPAGEFLAGS "/proc/kpageflags"
-/* copied from kpageflags_read() */
-#define KPF_LOCKED 0
-#define KPF_ERROR 1
-#define KPF_REFERENCED 2
-#define KPF_UPTODATE 3
-#define KPF_DIRTY 4
-#define KPF_LRU 5
-#define KPF_ACTIVE 6
-#define KPF_SLAB 7
-#define KPF_WRITEBACK 8
-#define KPF_RECLAIM 9
-#define KPF_BUDDY 10
-
-/* [11-20] new additions in 2.6.31 */
-#define KPF_MMAP 11
-#define KPF_ANON 12
-#define KPF_SWAPCACHE 13
-#define KPF_SWAPBACKED 14
-#define KPF_COMPOUND_HEAD 15
-#define KPF_COMPOUND_TAIL 16
-#define KPF_HUGE 17
-#define KPF_UNEVICTABLE 18
-#define KPF_HWPOISON 19
-#define KPF_NOPAGE 20
-#define KPF_KSM 21
-#define KPF_THP 22
-
/* [32-] kernel hacking assistances */
#define KPF_RESERVED 32
#define KPF_MLOCKED 33
@@ -326,7 +300,7 @@ static char *page_flag_name(uint64_t flags)
{
static char buf[65];
int present;
- int i, j;
+ size_t i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) {
present = (flags >> i) & 1;
@@ -344,7 +318,7 @@ static char *page_flag_name(uint64_t flags)
static char *page_flag_longname(uint64_t flags)
{
static char buf[1024];
- int i, n;
+ size_t i, n;
for (i = 0, n = 0; i < ARRAY_SIZE(page_flag_names); i++) {
if (!page_flag_names[i])
@@ -402,7 +376,7 @@ static void show_page(unsigned long voffset,
static void show_summary(void)
{
- int i;
+ size_t i;
printf(" flags\tpage-count MB"
" symbolic-flags\t\t\tlong-symbolic-flags\n");
@@ -500,7 +474,7 @@ static int debugfs_valid_mountpoint(const char *debugfs)
/* find the path to the mounted debugfs */
static const char *debugfs_find_mountpoint(void)
{
- const char **ptr;
+ const char *const *ptr;
char type[100];
FILE *fp;
@@ -537,7 +511,7 @@ static const char *debugfs_find_mountpoint(void)
static void debugfs_mount(void)
{
- const char **ptr;
+ const char *const *ptr;
/* see if it's already mounted */
if (debugfs_find_mountpoint())
@@ -614,10 +588,10 @@ static int unpoison_page(unsigned long offset)
* page frame walker
*/
-static int hash_slot(uint64_t flags)
+static size_t hash_slot(uint64_t flags)
{
- int k = HASH_KEY(flags);
- int i;
+ size_t k = HASH_KEY(flags);
+ size_t i;
/* Explicitly reserve slot 0 for flags 0: the following logic
* cannot distinguish an unoccupied slot from slot (flags==0).
@@ -670,7 +644,7 @@ static void walk_pfn(unsigned long voffset,
{
uint64_t buf[KPAGEFLAGS_BATCH];
unsigned long batch;
- long pages;
+ unsigned long pages;
unsigned long i;
while (count) {
@@ -779,7 +753,7 @@ static const char *page_flag_type(uint64_t flag)
static void usage(void)
{
- int i, j;
+ size_t i, j;
printf(
"page-types [options]\n"
@@ -938,7 +912,7 @@ static void add_bits_filter(uint64_t mask, uint64_t bits)
static uint64_t parse_flag_name(const char *str, int len)
{
- int i;
+ size_t i;
if (!*str || !len)
return 0;
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index f63ccb0a598..28694f4a913 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -18,3 +18,6 @@ config KVM_MMIO
config KVM_ASYNC_PF
bool
+
+config HAVE_KVM_MSI
+ bool
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index dcaf272c26c..26fd54dc459 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -254,13 +254,17 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
}
}
+bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
+{
+ struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+ smp_rmb();
+ return test_bit(vector, ioapic->handled_vectors);
+}
+
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
- smp_rmb();
- if (!test_bit(vector, ioapic->handled_vectors))
- return;
spin_lock(&ioapic->lock);
__kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
spin_unlock(&ioapic->lock);
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 0b190c34ccc..32872a09b63 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -71,6 +71,7 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, int dest, int dest_mode);
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
+bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector);
int kvm_ioapic_init(struct kvm *kvm);
void kvm_ioapic_destroy(struct kvm *kvm);
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 9f614b4e365..a6a0365475e 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -138,6 +138,20 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
}
+int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
+{
+ struct kvm_kernel_irq_routing_entry route;
+
+ if (!irqchip_in_kernel(kvm) || msi->flags != 0)
+ return -EINVAL;
+
+ route.msi.address_lo = msi->address_lo;
+ route.msi.address_hi = msi->address_hi;
+ route.msi.data = msi->data;
+
+ return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+}
+
/*
* Return value:
* < 0 Interrupt was ignored (masked or not delivered for other reasons)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9739b533ca2..7e140683ff1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -522,12 +522,11 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
return;
if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
- vfree(memslot->dirty_bitmap_head);
+ vfree(memslot->dirty_bitmap);
else
- kfree(memslot->dirty_bitmap_head);
+ kfree(memslot->dirty_bitmap);
memslot->dirty_bitmap = NULL;
- memslot->dirty_bitmap_head = NULL;
}
/*
@@ -611,8 +610,7 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
/*
* Allocation size is twice as large as the actual dirty bitmap size.
- * This makes it possible to do double buffering: see x86's
- * kvm_vm_ioctl_get_dirty_log().
+ * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
*/
static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
{
@@ -627,8 +625,6 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
if (!memslot->dirty_bitmap)
return -ENOMEM;
- memslot->dirty_bitmap_head = memslot->dirty_bitmap;
- memslot->nr_dirty_pages = 0;
#endif /* !CONFIG_S390 */
return 0;
}
@@ -1477,8 +1473,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
- if (!test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap))
- memslot->nr_dirty_pages++;
+ /* TODO: introduce set_bit_le() and use it */
+ test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
}
@@ -1515,6 +1511,30 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
finish_wait(&vcpu->wq, &wait);
}
+#ifndef CONFIG_S390
+/*
+ * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
+ */
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ int me;
+ int cpu = vcpu->cpu;
+ wait_queue_head_t *wqp;
+
+ wqp = kvm_arch_vcpu_wq(vcpu);
+ if (waitqueue_active(wqp)) {
+ wake_up_interruptible(wqp);
+ ++vcpu->stat.halt_wakeup;
+ }
+
+ me = get_cpu();
+ if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
+ if (kvm_arch_vcpu_should_kick(vcpu))
+ smp_send_reschedule(cpu);
+ put_cpu();
+}
+#endif /* !CONFIG_S390 */
+
void kvm_resched(struct kvm_vcpu *vcpu)
{
if (!need_resched())
@@ -1523,6 +1543,31 @@ void kvm_resched(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_resched);
+bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
+{
+ struct pid *pid;
+ struct task_struct *task = NULL;
+
+ rcu_read_lock();
+ pid = rcu_dereference(target->pid);
+ if (pid)
+ task = get_pid_task(target->pid, PIDTYPE_PID);
+ rcu_read_unlock();
+ if (!task)
+ return false;
+ if (task->flags & PF_VCPU) {
+ put_task_struct(task);
+ return false;
+ }
+ if (yield_to(task, 1)) {
+ put_task_struct(task);
+ return true;
+ }
+ put_task_struct(task);
+ return false;
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
+
void kvm_vcpu_on_spin(struct kvm_vcpu *me)
{
struct kvm *kvm = me->kvm;
@@ -1541,8 +1586,6 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
*/
for (pass = 0; pass < 2 && !yielded; pass++) {
kvm_for_each_vcpu(i, vcpu, kvm) {
- struct task_struct *task = NULL;
- struct pid *pid;
if (!pass && i < last_boosted_vcpu) {
i = last_boosted_vcpu;
continue;
@@ -1552,24 +1595,11 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
continue;
if (waitqueue_active(&vcpu->wq))
continue;
- rcu_read_lock();
- pid = rcu_dereference(vcpu->pid);
- if (pid)
- task = get_pid_task(vcpu->pid, PIDTYPE_PID);
- rcu_read_unlock();
- if (!task)
- continue;
- if (task->flags & PF_VCPU) {
- put_task_struct(task);
- continue;
- }
- if (yield_to(task, 1)) {
- put_task_struct(task);
+ if (kvm_vcpu_yield_to(vcpu)) {
kvm->last_boosted_vcpu = i;
yielded = 1;
break;
}
- put_task_struct(task);
}
}
}
@@ -2040,6 +2070,17 @@ static long kvm_vm_ioctl(struct file *filp,
mutex_unlock(&kvm->lock);
break;
#endif
+#ifdef CONFIG_HAVE_KVM_MSI
+ case KVM_SIGNAL_MSI: {
+ struct kvm_msi msi;
+
+ r = -EFAULT;
+ if (copy_from_user(&msi, argp, sizeof msi))
+ goto out;
+ r = kvm_send_userspace_msi(kvm, &msi);
+ break;
+ }
+#endif
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
if (r == -ENOTTY)
@@ -2168,6 +2209,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
case KVM_CAP_SET_BOOT_CPU_ID:
#endif
case KVM_CAP_INTERNAL_ERROR_DATA:
+#ifdef CONFIG_HAVE_KVM_MSI
+ case KVM_CAP_SIGNAL_MSI:
+#endif
return 1;
#ifdef CONFIG_HAVE_KVM_IRQCHIP
case KVM_CAP_IRQ_ROUTING:
@@ -2394,9 +2438,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
gpa_t addr, int len)
{
- if (bus->dev_count == NR_IOBUS_DEVS)
- return -ENOSPC;
-
bus->range[bus->dev_count++] = (struct kvm_io_range) {
.addr = addr,
.len = len,
@@ -2496,12 +2537,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx];
- if (bus->dev_count > NR_IOBUS_DEVS-1)
+ if (bus->dev_count > NR_IOBUS_DEVS - 1)
return -ENOSPC;
- new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL);
+ new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
+ sizeof(struct kvm_io_range)), GFP_KERNEL);
if (!new_bus)
return -ENOMEM;
+ memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
+ sizeof(struct kvm_io_range)));
kvm_io_bus_insert_dev(new_bus, dev, addr, len);
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
@@ -2518,27 +2562,25 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx];
-
- new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
- if (!new_bus)
- return -ENOMEM;
-
r = -ENOENT;
- for (i = 0; i < new_bus->dev_count; i++)
- if (new_bus->range[i].dev == dev) {
+ for (i = 0; i < bus->dev_count; i++)
+ if (bus->range[i].dev == dev) {
r = 0;
- new_bus->dev_count--;
- new_bus->range[i] = new_bus->range[new_bus->dev_count];
- sort(new_bus->range, new_bus->dev_count,
- sizeof(struct kvm_io_range),
- kvm_io_bus_sort_cmp, NULL);
break;
}
- if (r) {
- kfree(new_bus);
+ if (r)
return r;
- }
+
+ new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
+ sizeof(struct kvm_io_range)), GFP_KERNEL);
+ if (!new_bus)
+ return -ENOMEM;
+
+ memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
+ new_bus->dev_count--;
+ memcpy(new_bus->range + i, bus->range + i + 1,
+ (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);