aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_apd.c1
-rw-r--r--drivers/amba/tegra-ahb.c6
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/ata/sata_rcar.c1
-rw-r--r--drivers/block/brd.c6
-rw-r--r--drivers/block/rbd.c24
-rw-r--r--drivers/bus/tegra-aconnect.c66
-rw-r--r--drivers/bus/ti-sysc.c661
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c1
-rw-r--r--drivers/clk/axs10x/pll_clock.c1
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c1
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c1
-rw-r--r--drivers/clk/bcm/clk-kona.c3
-rw-r--r--drivers/clk/berlin/berlin2-div.c1
-rw-r--r--drivers/clk/berlin/bg2.c1
-rw-r--r--drivers/clk/berlin/bg2q.c1
-rw-r--r--drivers/clk/clk-fixed-mmio.c3
-rw-r--r--drivers/clk/clk-fractional-divider.c1
-rw-r--r--drivers/clk/clk-hsdk-pll.c1
-rw-r--r--drivers/clk/clk-multiplier.c1
-rw-r--r--drivers/clk/davinci/pll-da850.c1
-rw-r--r--drivers/clk/h8300/clk-div.c1
-rw-r--r--drivers/clk/h8300/clk-h8s2678.c3
-rw-r--r--drivers/clk/hisilicon/clk-hi3660-stub.c1
-rw-r--r--drivers/clk/imx/clk-composite-8m.c3
-rw-r--r--drivers/clk/imx/clk-frac-pll.c1
-rw-r--r--drivers/clk/imx/clk-imx21.c1
-rw-r--r--drivers/clk/imx/clk-imx27.c1
-rw-r--r--drivers/clk/imx/clk-pfdv2.c1
-rw-r--r--drivers/clk/imx/clk-pllv4.c1
-rw-r--r--drivers/clk/imx/clk-sccg-pll.c1
-rw-r--r--drivers/clk/ingenic/cgu.c1
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c1
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c1
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c1
-rw-r--r--drivers/clk/loongson1/clk-loongson1c.c1
-rw-r--r--drivers/clk/microchip/clk-core.c1
-rw-r--r--drivers/clk/microchip/clk-pic32mzda.c1
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c1
-rw-r--r--drivers/clk/mvebu/armada-37xx-tbg.c1
-rw-r--r--drivers/clk/mvebu/clk-corediv.c1
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-ccu.c1
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c1
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c1
-rw-r--r--drivers/clk/pxa/clk-pxa.c1
-rw-r--r--drivers/clk/renesas/clk-r8a73a4.c1
-rw-r--r--drivers/clk/renesas/clk-r8a7740.c1
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c1
-rw-r--r--drivers/clk/renesas/clk-rz.c1
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c1
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c1
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c1
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c1
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c3
-rw-r--r--drivers/clk/rockchip/clk-px30.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c1
-rw-r--r--drivers/clk/rockchip/clk-rv1108.c1
-rw-r--r--drivers/clk/rockchip/clk.c1
-rw-r--r--drivers/clk/samsung/clk-cpu.c1
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c1
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c1
-rw-r--r--drivers/clk/samsung/clk-exynos4.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c1
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c1
-rw-r--r--drivers/clk/samsung/clk-pll.c3
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c1
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c1
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c1
-rw-r--r--drivers/clk/samsung/clk.c1
-rw-r--r--drivers/clk/sifive/fu540-prci.c1
-rw-r--r--drivers/clk/socfpga/clk-gate-s10.c1
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c1
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c1
-rw-r--r--drivers/clk/st/clkgen-mux.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mmc_timing.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_phase.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_sdm.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-mod1.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-pll2.c1
-rw-r--r--drivers/clk/sunxi/clk-a10-ve.c1
-rw-r--r--drivers/clk/sunxi/clk-a20-gmac.c1
-rw-r--r--drivers/clk/sunxi/clk-mod0.c1
-rw-r--r--drivers/clk/sunxi/clk-simple-gates.c1
-rw-r--r--drivers/clk/sunxi/clk-sun4i-display.c1
-rw-r--r--drivers/clk/sunxi/clk-sun4i-pll3.c1
-rw-r--r--drivers/clk/sunxi/clk-sun4i-tcon-ch1.c1
-rw-r--r--drivers/clk/sunxi/clk-sun8i-apb0.c1
-rw-r--r--drivers/clk/sunxi/clk-sun8i-bus-gates.c1
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c1
-rw-r--r--drivers/clk/sunxi/clk-sun9i-cpus.c1
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c1
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c1
-rw-r--r--drivers/clk/sunxi/clk-usb.c1
-rw-r--r--drivers/clk/tegra/clk-emc.c1
-rw-r--r--drivers/clk/tegra/clk-periph-fixed.c1
-rw-r--r--drivers/clk/tegra/clk-sdmmc-mux.c1
-rw-r--r--drivers/clk/tegra/clk.c1
-rw-r--r--drivers/clk/ti/adpll.c1
-rw-r--r--drivers/clk/ti/clk.c1
-rw-r--r--drivers/clk/ti/fapll.c1
-rw-r--r--drivers/clk/versatile/clk-sp810.c1
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c1
-rw-r--r--drivers/clk/zynqmp/clkc.c4
-rw-r--r--drivers/clocksource/Kconfig21
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/timer-atmel-tcb.c (renamed from drivers/clocksource/tcb_clksrc.c)126
-rw-r--r--drivers/clocksource/timer-ixp4xx.c282
-rw-r--r--drivers/clocksource/timer-milbeaut.c66
-rw-r--r--drivers/clocksource/timer-sun4i.c5
-rw-r--r--drivers/clocksource/timer-tegra20.c63
-rw-r--r--drivers/cpufreq/loongson1-cpufreq.c1
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/dax/Kconfig3
-rw-r--r--drivers/dax/pmem/core.c6
-rw-r--r--drivers/dma-buf/dma-fence.c1
-rw-r--r--drivers/edac/Kconfig4
-rw-r--r--drivers/edac/edac_mc.c12
-rw-r--r--drivers/firmware/Kconfig16
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_scmi/driver.c8
-rw-r--r--drivers/firmware/imx/Makefile2
-rw-r--r--drivers/firmware/imx/imx-scu-irq.c168
-rw-r--r--drivers/firmware/imx/imx-scu.c6
-rw-r--r--drivers/firmware/imx/scu-pd.c121
-rw-r--r--drivers/firmware/ti_sci.c651
-rw-r--r--drivers/firmware/ti_sci.h102
-rw-r--r--drivers/firmware/trusted_foundations.c176
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c18
-rw-r--r--drivers/firmware/xilinx/zynqmp.c56
-rw-r--r--drivers/fpga/Kconfig9
-rw-r--r--drivers/fpga/Makefile1
-rw-r--r--drivers/fpga/zynqmp-fpga.c159
-rw-r--r--drivers/gpio/Kconfig13
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio-ixp4xx.c474
-rw-r--r--drivers/gpio/gpio-thunderx.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c19
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h16
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/i915_request.c60
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c78
-rw-r--r--drivers/gpu/drm/i915/intel_context.c1
-rw-r--r--drivers/gpu/drm/i915/intel_context_types.h3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c11
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c3
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c1
-rw-r--r--drivers/hid/intel-ish-hid/Makefile2
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c6
-rw-r--r--drivers/hwmon/gpio-fan.c25
-rw-r--r--drivers/hwmon/hwmon.c4
-rw-r--r--drivers/hwmon/mlxreg-fan.c31
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c6
-rw-r--r--drivers/hwmon/pwm-fan.c97
-rw-r--r--drivers/i2c/i2c-core-base.c118
-rw-r--r--drivers/iio/inkern.c22
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c8
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c20
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/dma-iommu.c48
-rw-r--r--drivers/irqchip/Kconfig33
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c3
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c3
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c2
-rw-r--r--drivers/irqchip/irq-gic-pm.c76
-rw-r--r--drivers/irqchip/irq-gic-v2m.c8
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c84
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c10
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c4
-rw-r--r--drivers/irqchip/irq-ixp4xx.c403
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c7
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c233
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c615
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c275
-rw-r--r--drivers/lightnvm/core.c82
-rw-r--r--drivers/lightnvm/pblk-cache.c8
-rw-r--r--drivers/lightnvm/pblk-core.c65
-rw-r--r--drivers/lightnvm/pblk-gc.c52
-rw-r--r--drivers/lightnvm/pblk-init.c65
-rw-r--r--drivers/lightnvm/pblk-map.c1
-rw-r--r--drivers/lightnvm/pblk-rb.c13
-rw-r--r--drivers/lightnvm/pblk-read.c394
-rw-r--r--drivers/lightnvm/pblk-recovery.c74
-rw-r--r--drivers/lightnvm/pblk-write.c1
-rw-r--r--drivers/lightnvm/pblk.h28
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c1
-rw-r--r--drivers/md/Kconfig9
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-crypt.c26
-rw-r--r--drivers/md/dm-delay.c3
-rw-r--r--drivers/md/dm-dust.c515
-rw-r--r--drivers/md/dm-exception-store.h3
-rw-r--r--drivers/md/dm-init.c8
-rw-r--r--drivers/md/dm-integrity.c717
-rw-r--r--drivers/md/dm-ioctl.c6
-rw-r--r--drivers/md/dm-mpath.c19
-rw-r--r--drivers/md/dm-rq.c8
-rw-r--r--drivers/md/dm-snap.c359
-rw-r--r--drivers/md/dm-target.c3
-rw-r--r--drivers/md/dm-thin-metadata.c139
-rw-r--r--drivers/md/dm-writecache.c29
-rw-r--r--drivers/md/dm-zoned-metadata.c5
-rw-r--r--drivers/md/dm-zoned-target.c3
-rw-r--r--drivers/md/dm.c12
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c2
-rw-r--r--drivers/media/common/b2c2/Makefile4
-rw-r--r--drivers/media/dvb-frontends/cxd2880/Makefile2
-rw-r--r--drivers/media/i2c/smiapp/Makefile2
-rw-r--r--drivers/media/mmc/siano/Makefile3
-rw-r--r--drivers/media/pci/b2c2/Makefile2
-rw-r--r--drivers/media/pci/bt8xx/Makefile5
-rw-r--r--drivers/media/pci/cx18/Makefile4
-rw-r--r--drivers/media/pci/cx23885/Makefile4
-rw-r--r--drivers/media/pci/cx88/Makefile4
-rw-r--r--drivers/media/pci/ddbridge/Makefile4
-rw-r--r--drivers/media/pci/dm1105/Makefile2
-rw-r--r--drivers/media/pci/mantis/Makefile2
-rw-r--r--drivers/media/pci/netup_unidvb/Makefile2
-rw-r--r--drivers/media/pci/ngene/Makefile4
-rw-r--r--drivers/media/pci/pluto2/Makefile2
-rw-r--r--drivers/media/pci/pt1/Makefile4
-rw-r--r--drivers/media/pci/pt3/Makefile4
-rw-r--r--drivers/media/pci/smipcie/Makefile5
-rw-r--r--drivers/media/pci/ttpci/Makefile4
-rw-r--r--drivers/media/platform/atmel/atmel-isc-regs.h19
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c46
-rw-r--r--drivers/media/platform/coda/coda-common.c10
-rw-r--r--drivers/media/platform/davinci/vpbe.c2
-rw-r--r--drivers/media/platform/omap/omap_vout.c15
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c68
-rw-r--r--drivers/media/platform/sti/c8sectpfe/Makefile5
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c4
-rw-r--r--drivers/media/radio/Makefile2
-rw-r--r--drivers/media/spi/Makefile4
-rw-r--r--drivers/media/usb/as102/Makefile2
-rw-r--r--drivers/media/usb/au0828/Makefile4
-rw-r--r--drivers/media/usb/b2c2/Makefile2
-rw-r--r--drivers/media/usb/cx231xx/Makefile5
-rw-r--r--drivers/media/usb/em28xx/Makefile4
-rw-r--r--drivers/media/usb/go7007/Makefile2
-rw-r--r--drivers/media/usb/pvrusb2/Makefile4
-rw-r--r--drivers/media/usb/siano/Makefile2
-rw-r--r--drivers/media/usb/tm6000/Makefile4
-rw-r--r--drivers/media/usb/ttusb-budget/Makefile2
-rw-r--r--drivers/media/usb/usbvision/Makefile2
-rw-r--r--drivers/memory/emif.h4
-rw-r--r--drivers/memory/tegra/mc.c34
-rw-r--r--drivers/memory/tegra/mc.h2
-rw-r--r--drivers/memory/tegra/tegra114.c4
-rw-r--r--drivers/memory/tegra/tegra124-emc.c1
-rw-r--r--drivers/memory/tegra/tegra124.c4
-rw-r--r--drivers/memory/tegra/tegra20.c28
-rw-r--r--drivers/memory/tegra/tegra210.c2
-rw-r--r--drivers/memory/tegra/tegra30.c4
-rw-r--r--drivers/memory/ti-emif-pm.c3
-rw-r--r--drivers/memory/ti-emif-sram-pm.S41
-rw-r--r--drivers/mfd/intel-lpss.c1
-rw-r--r--drivers/misc/Kconfig48
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/atmel_tclib.c5
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c1
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/Makefile2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c14
-rw-r--r--drivers/net/ieee802154/ca8210.c1
-rw-r--r--drivers/net/wan/ixp4xx_hss.c4
-rw-r--r--drivers/nvdimm/label.c29
-rw-r--r--drivers/nvdimm/namespace_devs.c15
-rw-r--r--drivers/nvdimm/nd.h4
-rw-r--r--drivers/nvme/host/core.c79
-rw-r--r--drivers/nvme/host/fabrics.c4
-rw-r--r--drivers/nvme/host/fc.c14
-rw-r--r--drivers/nvme/host/lightnvm.c1
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/nvme/host/rdma.c34
-rw-r--r--drivers/nvme/host/trace.h1
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c10
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c25
-rw-r--r--drivers/power/reset/syscon-reboot.c19
-rw-r--r--drivers/power/supply/Kconfig29
-rw-r--r--drivers/power/supply/Makefile4
-rw-r--r--drivers/power/supply/ab8500_bmdata.c1
-rw-r--r--drivers/power/supply/axp20x_usb_power.c179
-rw-r--r--drivers/power/supply/axp288_charger.c4
-rw-r--r--drivers/power/supply/axp288_fuel_gauge.c20
-rw-r--r--drivers/power/supply/bq27xxx_battery.c3
-rw-r--r--drivers/power/supply/charger-manager.c3
-rw-r--r--drivers/power/supply/cpcap-battery.c44
-rw-r--r--drivers/power/supply/cpcap-charger.c5
-rw-r--r--drivers/power/supply/gpio-charger.c57
-rw-r--r--drivers/power/supply/ingenic-battery.c184
-rw-r--r--drivers/power/supply/lt3651-charger.c (renamed from drivers/power/supply/ltc3651-charger.c)123
-rw-r--r--drivers/power/supply/max14656_charger_detector.c27
-rw-r--r--drivers/power/supply/olpc_battery.c171
-rw-r--r--drivers/power/supply/power_supply_core.c38
-rw-r--r--drivers/power/supply/power_supply_sysfs.c6
-rw-r--r--drivers/power/supply/ucs1002_power.c646
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c2
-rw-r--r--drivers/pwm/pwm-ep93xx.c2
-rw-r--r--drivers/reset/reset-zynqmp.c8
-rw-r--r--drivers/rtc/rtc-omap.c49
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/cio/qdio_main.c19
-rw-r--r--drivers/s390/cio/trace.c1
-rw-r--r--drivers/s390/cio/trace.h23
-rw-r--r--drivers/soc/Kconfig2
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/meson-gx-pwrc-vpu.c160
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c43
-rw-r--r--drivers/soc/aspeed/Kconfig31
-rw-r--r--drivers/soc/aspeed/Makefile3
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c (renamed from drivers/misc/aspeed-lpc-ctrl.c)0
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c (renamed from drivers/misc/aspeed-lpc-snoop.c)0
-rw-r--r--drivers/soc/aspeed/aspeed-p2a-ctrl.c (renamed from drivers/misc/aspeed-p2a-ctrl.c)0
-rw-r--r--drivers/soc/fsl/qe/gpio.c4
-rw-r--r--drivers/soc/imx/Makefile1
-rw-r--r--drivers/soc/imx/gpc.c4
-rw-r--r--drivers/soc/imx/gpcv2.c43
-rw-r--r--drivers/soc/imx/soc-imx8.c115
-rw-r--r--drivers/soc/ixp4xx/Kconfig16
-rw-r--r--drivers/soc/ixp4xx/Makefile2
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c762
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-qmgr.c488
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c111
-rw-r--r--drivers/soc/qcom/cmd-db.c4
-rw-r--r--drivers/soc/qcom/qmi_interface.c7
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c21
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c2
-rw-r--r--drivers/soc/renesas/renesas-soc.c3
-rw-r--r--drivers/soc/rockchip/grf.c2
-rw-r--r--drivers/soc/tegra/pmc.c171
-rw-r--r--drivers/soc/ti/Kconfig11
-rw-r--r--drivers/soc/ti/Makefile1
-rw-r--r--drivers/soc/ti/pm33xx.c273
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c146
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c18
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c10
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c6
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c2
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c6
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c2
-rw-r--r--drivers/staging/media/imx/imx-media.h3
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c2
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c14
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c3
-rw-r--r--drivers/target/iscsi/cxgbit/Makefile6
-rw-r--r--drivers/tee/optee/core.c80
-rw-r--r--drivers/thermal/Kconfig19
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/broadcom/sr-thermal.c8
-rw-r--r--drivers/thermal/cpu_cooling.c30
-rw-r--r--drivers/thermal/intel/Kconfig1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c16
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c13
-rw-r--r--drivers/thermal/of-thermal.c3
-rw-r--r--drivers/thermal/qcom/Kconfig1
-rw-r--r--drivers/thermal/qcom/Makefile4
-rw-r--r--drivers/thermal/qcom/tsens-8916.c105
-rw-r--r--drivers/thermal/qcom/tsens-8960.c84
-rw-r--r--drivers/thermal/qcom/tsens-common.c159
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c (renamed from drivers/thermal/qcom/tsens-8974.c)166
-rw-r--r--drivers/thermal/qcom/tsens-v1.c193
-rw-r--r--drivers/thermal/qcom/tsens-v2.c111
-rw-r--r--drivers/thermal/qcom/tsens.c100
-rw-r--r--drivers/thermal/qcom/tsens.h291
-rw-r--r--drivers/thermal/qoriq_thermal.c5
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c51
-rw-r--r--drivers/thermal/rcar_thermal.c11
-rw-r--r--drivers/thermal/rockchip_thermal.c74
-rw-r--r--drivers/thermal/st/Kconfig22
-rw-r--r--drivers/thermal/st/stm_thermal.c6
-rw-r--r--drivers/thermal/tegra/Kconfig4
-rw-r--r--drivers/thermal/tegra/soctherm.c961
-rw-r--r--drivers/thermal/tegra/soctherm.h16
-rw-r--r--drivers/thermal/tegra/tegra124-soctherm.c7
-rw-r--r--drivers/thermal/tegra/tegra132-soctherm.c7
-rw-r--r--drivers/thermal/tegra/tegra210-soctherm.c15
-rw-r--r--drivers/thermal/thermal-generic-adc.c9
-rw-r--r--drivers/thermal/thermal_core.c80
-rw-r--r--drivers/thermal/thermal_mmio.c129
-rw-r--r--drivers/tty/hvc/hvc_riscv_sbi.c1
-rw-r--r--drivers/usb/host/ohci-da8xx.c42
-rw-r--r--drivers/usb/storage/Makefile2
-rw-r--r--drivers/video/fbdev/efifb.c8
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c9
-rw-r--r--drivers/xen/xen-pciback/xenbus.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
459 files changed, 13630 insertions, 2913 deletions
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index ddf598ae8b6b..c16f9460c4a2 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -17,6 +17,7 @@
#include <linux/clkdev.h>
#include <linux/acpi.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/pm.h>
#include "internal.h"
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index b0b688c481e8..3751d811be39 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -170,8 +170,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
EXPORT_SYMBOL(tegra_ahb_enable_smmu);
#endif
-#ifdef CONFIG_PM
-static int tegra_ahb_suspend(struct device *dev)
+static int __maybe_unused tegra_ahb_suspend(struct device *dev)
{
int i;
struct tegra_ahb *ahb = dev_get_drvdata(dev);
@@ -181,7 +180,7 @@ static int tegra_ahb_suspend(struct device *dev)
return 0;
}
-static int tegra_ahb_resume(struct device *dev)
+static int __maybe_unused tegra_ahb_resume(struct device *dev)
{
int i;
struct tegra_ahb *ahb = dev_get_drvdata(dev);
@@ -190,7 +189,6 @@ static int tegra_ahb_resume(struct device *dev)
gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
return 0;
}
-#endif
static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
tegra_ahb_suspend,
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index cc6d06c1b2c7..db271b705529 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -44,7 +44,7 @@
#include <linux/ktime.h>
#include <linux/platform_data/dma-ep93xx.h>
-#include <mach/platform.h>
+#include <linux/soc/cirrus/ep93xx.h>
#define DRV_NAME "ep93xx-ide"
#define DRV_VERSION "1.0"
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 59b2317acea9..3495e1733a8e 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -909,7 +909,6 @@ static int sata_rcar_probe(struct platform_device *pdev)
host = ata_host_alloc(dev, 1);
if (!host) {
- dev_err(dev, "ata_host_alloc failed\n");
ret = -ENOMEM;
goto err_pm_put;
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 17defbf4f332..2da615b45b31 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -153,6 +153,12 @@ static void brd_free_pages(struct brd_device *brd)
pos++;
/*
+ * It takes 3.4 seconds to remove 80GiB ramdisk.
+ * So, we need cond_resched to avoid stalling the CPU.
+ */
+ cond_resched();
+
+ /*
* This assumes radix_tree_gang_lookup always returns as
* many pages as possible. If the radix-tree code changes,
* so will this have to.
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 2210c1b9491b..e5009a34f9c2 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -934,7 +934,7 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
struct rbd_client *rbdc;
int ret;
- mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
+ mutex_lock(&client_mutex);
rbdc = rbd_client_find(ceph_opts);
if (rbdc) {
ceph_destroy_options(ceph_opts);
@@ -1326,7 +1326,7 @@ static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
zero_bvecs(&obj_req->bvec_pos, off, bytes);
break;
default:
- rbd_assert(0);
+ BUG();
}
}
@@ -1581,7 +1581,7 @@ static void rbd_obj_request_destroy(struct kref *kref)
kfree(obj_request->bvec_pos.bvecs);
break;
default:
- rbd_assert(0);
+ BUG();
}
kfree(obj_request->img_extents);
@@ -1781,7 +1781,7 @@ static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which)
&obj_req->bvec_pos);
break;
default:
- rbd_assert(0);
+ BUG();
}
}
@@ -2036,7 +2036,7 @@ static int __rbd_img_fill_request(struct rbd_img_request *img_req)
ret = rbd_obj_setup_zeroout(obj_req);
break;
default:
- rbd_assert(0);
+ BUG();
}
if (ret < 0)
return ret;
@@ -2383,7 +2383,7 @@ static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
&obj_req->bvec_pos);
break;
default:
- rbd_assert(0);
+ BUG();
}
} else {
ret = rbd_img_fill_from_bvecs(child_img_req,
@@ -2515,7 +2515,7 @@ static int rbd_obj_issue_copyup_ops(struct rbd_obj_request *obj_req, u32 bytes)
num_osd_ops += count_zeroout_ops(obj_req);
break;
default:
- rbd_assert(0);
+ BUG();
}
obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
@@ -2542,7 +2542,7 @@ static int rbd_obj_issue_copyup_ops(struct rbd_obj_request *obj_req, u32 bytes)
__rbd_obj_setup_zeroout(obj_req, which);
break;
default:
- rbd_assert(0);
+ BUG();
}
ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
@@ -3842,8 +3842,12 @@ static void rbd_queue_workfn(struct work_struct *work)
goto err_rq;
}
- rbd_assert(op_type == OBJ_OP_READ ||
- rbd_dev->spec->snap_id == CEPH_NOSNAP);
+ if (op_type != OBJ_OP_READ && rbd_dev->spec->snap_id != CEPH_NOSNAP) {
+ rbd_warn(rbd_dev, "%s on read-only snapshot",
+ obj_op_name(op_type));
+ result = -EIO;
+ goto err;
+ }
/*
* Quit early if the mapped snapshot no longer exists. It's
diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c
index 084ae286fa23..ac58142301f4 100644
--- a/drivers/bus/tegra-aconnect.c
+++ b/drivers/bus/tegra-aconnect.c
@@ -12,28 +12,38 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
+struct tegra_aconnect {
+ struct clk *ape_clk;
+ struct clk *apb2ape_clk;
+};
+
static int tegra_aconnect_probe(struct platform_device *pdev)
{
- int ret;
+ struct tegra_aconnect *aconnect;
if (!pdev->dev.of_node)
return -EINVAL;
- ret = pm_clk_create(&pdev->dev);
- if (ret)
- return ret;
+ aconnect = devm_kzalloc(&pdev->dev, sizeof(struct tegra_aconnect),
+ GFP_KERNEL);
+ if (!aconnect)
+ return -ENOMEM;
- ret = of_pm_clk_add_clk(&pdev->dev, "ape");
- if (ret)
- goto clk_destroy;
+ aconnect->ape_clk = devm_clk_get(&pdev->dev, "ape");
+ if (IS_ERR(aconnect->ape_clk)) {
+ dev_err(&pdev->dev, "Can't retrieve ape clock\n");
+ return PTR_ERR(aconnect->ape_clk);
+ }
- ret = of_pm_clk_add_clk(&pdev->dev, "apb2ape");
- if (ret)
- goto clk_destroy;
+ aconnect->apb2ape_clk = devm_clk_get(&pdev->dev, "apb2ape");
+ if (IS_ERR(aconnect->apb2ape_clk)) {
+ dev_err(&pdev->dev, "Can't retrieve apb2ape clock\n");
+ return PTR_ERR(aconnect->apb2ape_clk);
+ }
+ dev_set_drvdata(&pdev->dev, aconnect);
pm_runtime_enable(&pdev->dev);
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
@@ -41,35 +51,51 @@ static int tegra_aconnect_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Tegra ACONNECT bus registered\n");
return 0;
-
-clk_destroy:
- pm_clk_destroy(&pdev->dev);
-
- return ret;
}
static int tegra_aconnect_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
- pm_clk_destroy(&pdev->dev);
-
return 0;
}
static int tegra_aconnect_runtime_resume(struct device *dev)
{
- return pm_clk_resume(dev);
+ struct tegra_aconnect *aconnect = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(aconnect->ape_clk);
+ if (ret) {
+ dev_err(dev, "ape clk_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(aconnect->apb2ape_clk);
+ if (ret) {
+ clk_disable_unprepare(aconnect->ape_clk);
+ dev_err(dev, "apb2ape clk_enable failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
}
static int tegra_aconnect_runtime_suspend(struct device *dev)
{
- return pm_clk_suspend(dev);
+ struct tegra_aconnect *aconnect = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(aconnect->ape_clk);
+ clk_disable_unprepare(aconnect->apb2ape_clk);
+
+ return 0;
}
static const struct dev_pm_ops tegra_aconnect_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_aconnect_runtime_suspend,
tegra_aconnect_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id tegra_aconnect_of_match[] = {
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index d299ec79e4c3..308475ed4b32 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -47,7 +47,10 @@ enum sysc_clocks {
SYSC_MAX_CLOCKS,
};
-static const char * const clock_names[SYSC_ICK + 1] = { "fck", "ick", };
+static const char * const clock_names[SYSC_MAX_CLOCKS] = {
+ "fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
+ "opt5", "opt6", "opt7",
+};
#define SYSC_IDLEMODE_MASK 3
#define SYSC_CLOCKACTIVITY_MASK 3
@@ -75,6 +78,7 @@ struct sysc {
u32 module_size;
void __iomem *module_va;
int offsets[SYSC_MAX_REGS];
+ struct ti_sysc_module_data *mdata;
struct clk **clocks;
const char **clock_roles;
int nr_clocks;
@@ -94,7 +98,7 @@ struct sysc {
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
bool is_child);
-void sysc_write(struct sysc *ddata, int offset, u32 value)
+static void sysc_write(struct sysc *ddata, int offset, u32 value)
{
writel_relaxed(value, ddata->module_va + offset);
}
@@ -128,6 +132,81 @@ static u32 sysc_read_revision(struct sysc *ddata)
return sysc_read(ddata, offset);
}
+static int sysc_add_named_clock_from_child(struct sysc *ddata,
+ const char *name,
+ const char *optfck_name)
+{
+ struct device_node *np = ddata->dev->of_node;
+ struct device_node *child;
+ struct clk_lookup *cl;
+ struct clk *clock;
+ const char *n;
+
+ if (name)
+ n = name;
+ else
+ n = optfck_name;
+
+ /* Does the clock alias already exist? */
+ clock = of_clk_get_by_name(np, n);
+ if (!IS_ERR(clock)) {
+ clk_put(clock);
+
+ return 0;
+ }
+
+ child = of_get_next_available_child(np, NULL);
+ if (!child)
+ return -ENODEV;
+
+ clock = devm_get_clk_from_child(ddata->dev, child, name);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ /*
+ * Use clkdev_add() instead of clkdev_alloc() to avoid the MAX_DEV_ID
+ * limit for clk_get(). If cl ever needs to be freed, it should be done
+ * with clkdev_drop().
+ */
+ cl = kcalloc(1, sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->con_id = n;
+ cl->dev_id = dev_name(ddata->dev);
+ cl->clk = clock;
+ clkdev_add(cl);
+
+ clk_put(clock);
+
+ return 0;
+}
+
+static int sysc_init_ext_opt_clock(struct sysc *ddata, const char *name)
+{
+ const char *optfck_name;
+ int error, index;
+
+ if (ddata->nr_clocks < SYSC_OPTFCK0)
+ index = SYSC_OPTFCK0;
+ else
+ index = ddata->nr_clocks;
+
+ if (name)
+ optfck_name = name;
+ else
+ optfck_name = clock_names[index];
+
+ error = sysc_add_named_clock_from_child(ddata, name, optfck_name);
+ if (error)
+ return error;
+
+ ddata->clock_roles[index] = optfck_name;
+ ddata->nr_clocks++;
+
+ return 0;
+}
+
static int sysc_get_one_clock(struct sysc *ddata, const char *name)
{
int error, i, index = -ENODEV;
@@ -199,6 +278,12 @@ static int sysc_get_clocks(struct sysc *ddata)
if (ddata->nr_clocks < 1)
return 0;
+ if ((ddata->cfg.quirks & SYSC_QUIRK_EXT_OPT_CLOCK)) {
+ error = sysc_init_ext_opt_clock(ddata, NULL);
+ if (error)
+ return error;
+ }
+
if (ddata->nr_clocks > SYSC_MAX_CLOCKS) {
dev_err(ddata->dev, "too many clocks for %pOF\n", np);
@@ -231,39 +316,125 @@ static int sysc_get_clocks(struct sysc *ddata)
return 0;
}
+static int sysc_enable_main_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i, error;
+
+ if (!ddata->clocks)
+ return 0;
+
+ for (i = 0; i < SYSC_OPTFCK0; i++) {
+ clock = ddata->clocks[i];
+
+ /* Main clocks may not have ick */
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ error = clk_enable(clock);
+ if (error)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (i--; i >= 0; i--) {
+ clock = ddata->clocks[i];
+
+ /* Main clocks may not have ick */
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+
+ return error;
+}
+
+static void sysc_disable_main_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i;
+
+ if (!ddata->clocks)
+ return;
+
+ for (i = 0; i < SYSC_OPTFCK0; i++) {
+ clock = ddata->clocks[i];
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+}
+
+static int sysc_enable_opt_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i, error;
+
+ if (!ddata->clocks)
+ return 0;
+
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ clock = ddata->clocks[i];
+
+ /* Assume no holes for opt clocks */
+ if (IS_ERR_OR_NULL(clock))
+ return 0;
+
+ error = clk_enable(clock);
+ if (error)
+ goto err_disable;
+ }
+
+ return 0;
+
+err_disable:
+ for (i--; i >= 0; i--) {
+ clock = ddata->clocks[i];
+ if (IS_ERR_OR_NULL(clock))
+ continue;
+
+ clk_disable(clock);
+ }
+
+ return error;
+}
+
+static void sysc_disable_opt_clocks(struct sysc *ddata)
+{
+ struct clk *clock;
+ int i;
+
+ if (!ddata->clocks)
+ return;
+
+ for (i = SYSC_OPTFCK0; i < SYSC_MAX_CLOCKS; i++) {
+ clock = ddata->clocks[i];
+
+ /* Assume no holes for opt clocks */
+ if (IS_ERR_OR_NULL(clock))
+ return;
+
+ clk_disable(clock);
+ }
+}
+
/**
- * sysc_init_resets - reset module on init
+ * sysc_init_resets - init rstctrl reset line if configured
* @ddata: device driver data
*
- * A module can have both OCP softreset control and external rstctrl.
- * If more complicated rstctrl resets are needed, please handle these
- * directly from the child device driver and map only the module reset
- * for the parent interconnect target module device.
- *
- * Automatic reset of the module on init can be skipped with the
- * "ti,no-reset-on-init" device tree property.
+ * See sysc_rstctrl_reset_deassert().
*/
static int sysc_init_resets(struct sysc *ddata)
{
- int error;
-
ddata->rsts =
devm_reset_control_array_get_optional_exclusive(ddata->dev);
if (IS_ERR(ddata->rsts))
return PTR_ERR(ddata->rsts);
- if (ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
- goto deassert;
-
- error = reset_control_assert(ddata->rsts);
- if (error)
- return error;
-
-deassert:
- error = reset_control_deassert(ddata->rsts);
- if (error)
- return error;
-
return 0;
}
@@ -622,91 +793,239 @@ static void sysc_show_registers(struct sysc *ddata)
buf);
}
-static int __maybe_unused sysc_runtime_suspend(struct device *dev)
+#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
+
+static int sysc_enable_module(struct device *dev)
{
- struct ti_sysc_platform_data *pdata;
struct sysc *ddata;
- int error = 0, i;
+ const struct sysc_regbits *regbits;
+ u32 reg, idlemodes, best_mode;
ddata = dev_get_drvdata(dev);
+ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
+ return 0;
- if (!ddata->enabled)
+ /*
+ * TODO: Need to prevent clockdomain autoidle?
+ * See clkdm_deny_idle() in arch/mach-omap2/omap_hwmod.c
+ */
+
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /* Set SIDLE mode */
+ idlemodes = ddata->cfg.sidlemodes;
+ if (!idlemodes || regbits->sidle_shift < 0)
+ goto set_midle;
+
+ best_mode = fls(ddata->cfg.sidlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return -EINVAL;
+ }
+
+ reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ reg |= best_mode << regbits->sidle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
+
+set_midle:
+ /* Set MIDLE mode */
+ idlemodes = ddata->cfg.midlemodes;
+ if (!idlemodes || regbits->midle_shift < 0)
return 0;
- if (ddata->legacy_mode) {
- pdata = dev_get_platdata(ddata->dev);
- if (!pdata)
- return 0;
+ best_mode = fls(ddata->cfg.midlemodes) - 1;
+ if (best_mode > SYSC_IDLE_MASK) {
+ dev_err(dev, "%s: invalid midlemode\n", __func__);
+ return -EINVAL;
+ }
- if (!pdata->idle_module)
- return -ENODEV;
+ reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
+ reg |= best_mode << regbits->midle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- error = pdata->idle_module(dev, &ddata->cookie);
- if (error)
- dev_err(dev, "%s: could not idle: %i\n",
- __func__, error);
+ return 0;
+}
+
+static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
+{
+ if (idlemodes & BIT(SYSC_IDLE_SMART_WKUP))
+ *best_mode = SYSC_IDLE_SMART_WKUP;
+ else if (idlemodes & BIT(SYSC_IDLE_SMART))
+ *best_mode = SYSC_IDLE_SMART;
+ else if (idlemodes & SYSC_IDLE_FORCE)
+ *best_mode = SYSC_IDLE_FORCE;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sysc_disable_module(struct device *dev)
+{
+ struct sysc *ddata;
+ const struct sysc_regbits *regbits;
+ u32 reg, idlemodes, best_mode;
+ int ret;
- goto idled;
+ ddata = dev_get_drvdata(dev);
+ if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
+ return 0;
+
+ /*
+ * TODO: Need to prevent clockdomain autoidle?
+ * See clkdm_deny_idle() in arch/mach-omap2/omap_hwmod.c
+ */
+
+ regbits = ddata->cap->regbits;
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ /* Set MIDLE mode */
+ idlemodes = ddata->cfg.midlemodes;
+ if (!idlemodes || regbits->midle_shift < 0)
+ goto set_sidle;
+
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid midlemode\n", __func__);
+ return ret;
}
- for (i = 0; i < ddata->nr_clocks; i++) {
- if (IS_ERR_OR_NULL(ddata->clocks[i]))
- continue;
+ reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
+ reg |= best_mode << regbits->midle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- if (i >= SYSC_OPTFCK0 && !sysc_opt_clks_needed(ddata))
- break;
+set_sidle:
+ /* Set SIDLE mode */
+ idlemodes = ddata->cfg.sidlemodes;
+ if (!idlemodes || regbits->sidle_shift < 0)
+ return 0;
- clk_disable(ddata->clocks[i]);
+ ret = sysc_best_idle_mode(idlemodes, &best_mode);
+ if (ret) {
+ dev_err(dev, "%s: invalid sidlemode\n", __func__);
+ return ret;
}
-idled:
- ddata->enabled = false;
+ reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
+ reg |= best_mode << regbits->sidle_shift;
+ sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
- return error;
+ return 0;
}
-static int __maybe_unused sysc_runtime_resume(struct device *dev)
+static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
+ struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata;
+ int error;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->idle_module)
+ return -ENODEV;
+
+ error = pdata->idle_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not idle: %i\n",
+ __func__, error);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
+ struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata;
+ int error;
+
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->enable_module)
+ return -ENODEV;
+
+ error = pdata->enable_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not enable: %i\n",
+ __func__, error);
+
+ return 0;
+}
+
+static int __maybe_unused sysc_runtime_suspend(struct device *dev)
+{
struct sysc *ddata;
- int error = 0, i;
+ int error = 0;
ddata = dev_get_drvdata(dev);
- if (ddata->enabled)
+ if (!ddata->enabled)
return 0;
if (ddata->legacy_mode) {
- pdata = dev_get_platdata(ddata->dev);
- if (!pdata)
- return 0;
+ error = sysc_runtime_suspend_legacy(dev, ddata);
+ if (error)
+ return error;
+ } else {
+ error = sysc_disable_module(dev);
+ if (error)
+ return error;
+ }
- if (!pdata->enable_module)
- return -ENODEV;
+ sysc_disable_main_clocks(ddata);
- error = pdata->enable_module(dev, &ddata->cookie);
- if (error)
- dev_err(dev, "%s: could not enable: %i\n",
- __func__, error);
+ if (sysc_opt_clks_needed(ddata))
+ sysc_disable_opt_clocks(ddata);
- goto awake;
- }
+ ddata->enabled = false;
- for (i = 0; i < ddata->nr_clocks; i++) {
- if (IS_ERR_OR_NULL(ddata->clocks[i]))
- continue;
+ return error;
+}
- if (i >= SYSC_OPTFCK0 && !sysc_opt_clks_needed(ddata))
- break;
+static int __maybe_unused sysc_runtime_resume(struct device *dev)
+{
+ struct sysc *ddata;
+ int error = 0;
+
+ ddata = dev_get_drvdata(dev);
- error = clk_enable(ddata->clocks[i]);
+ if (ddata->enabled)
+ return 0;
+
+ if (sysc_opt_clks_needed(ddata)) {
+ error = sysc_enable_opt_clocks(ddata);
if (error)
return error;
}
-awake:
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
+
+ if (ddata->legacy_mode) {
+ error = sysc_runtime_resume_legacy(dev, ddata);
+ if (error)
+ goto err_main_clocks;
+ } else {
+ error = sysc_enable_module(dev);
+ if (error)
+ goto err_main_clocks;
+ }
+
ddata->enabled = true;
+ return 0;
+
+err_main_clocks:
+ sysc_disable_main_clocks(ddata);
+err_opt_clocks:
+ if (sysc_opt_clks_needed(ddata))
+ sysc_disable_opt_clocks(ddata);
+
return error;
}
@@ -788,12 +1107,17 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
0),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
+
+ /* Quirks that need to be set based on the module address */
+ SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -1, 0x50000800, 0xffffffff,
+ SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
+ SYSC_QUIRK_SWSUP_SIDLE),
#ifdef DEBUG
SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
@@ -805,6 +1129,7 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
0xffff00f0, 0),
SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0),
SYSC_QUIRK("dcan", 0, 0, -1, -1, 0x00001401, 0xffffffff, 0),
+ SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0),
@@ -853,6 +1178,42 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
#endif
};
+/*
+ * Early quirks based on module base and register offsets only that are
+ * needed before the module revision can be read
+ */
+static void sysc_init_early_quirks(struct sysc *ddata)
+{
+ const struct sysc_revision_quirk *q;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
+ q = &sysc_revision_quirks[i];
+
+ if (!q->base)
+ continue;
+
+ if (q->base != ddata->module_pa)
+ continue;
+
+ if (q->rev_offset >= 0 &&
+ q->rev_offset != ddata->offsets[SYSC_REVISION])
+ continue;
+
+ if (q->sysc_offset >= 0 &&
+ q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ continue;
+
+ if (q->syss_offset >= 0 &&
+ q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ continue;
+
+ ddata->name = q->name;
+ ddata->cfg.quirks |= q->quirks;
+ }
+}
+
+/* Quirks that also consider the revision register value */
static void sysc_init_revision_quirks(struct sysc *ddata)
{
const struct sysc_revision_quirk *q;
@@ -885,6 +1246,55 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
}
}
+/*
+ * Note that pdata->init_module() typically does a reset first. After
+ * pdata->init_module() is done, PM runtime can be used for the interconnect
+ * target module.
+ */
+static int sysc_legacy_init(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
+ int error;
+
+ if (!ddata->legacy_mode || !pdata || !pdata->init_module)
+ return 0;
+
+ error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
+ if (error == -EEXIST)
+ error = 0;
+
+ return error;
+}
+
+/**
+ * sysc_rstctrl_reset_deassert - deassert rstctrl reset
+ * @ddata: device driver data
+ * @reset: reset before deassert
+ *
+ * A module can have both OCP softreset control and external rstctrl.
+ * If more complicated rstctrl resets are needed, please handle these
+ * directly from the child device driver and map only the module reset
+ * for the parent interconnect target module device.
+ *
+ * Automatic reset of the module on init can be skipped with the
+ * "ti,no-reset-on-init" device tree property.
+ */
+static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset)
+{
+ int error;
+
+ if (!ddata->rsts)
+ return 0;
+
+ if (reset) {
+ error = reset_control_assert(ddata->rsts);
+ if (error)
+ return error;
+ }
+
+ return reset_control_deassert(ddata->rsts);
+}
+
static int sysc_reset(struct sysc *ddata)
{
int offset = ddata->offsets[SYSC_SYSCONFIG];
@@ -915,38 +1325,58 @@ static int sysc_reset(struct sysc *ddata)
100, MAX_MODULE_SOFTRESET_WAIT);
}
-/* At this point the module is configured enough to read the revision */
+/*
+ * At this point the module is configured enough to read the revision but
+ * module may not be completely configured yet to use PM runtime. Enable
+ * all clocks directly during init to configure the quirks needed for PM
+ * runtime based on the revision register.
+ */
static int sysc_init_module(struct sysc *ddata)
{
- int error;
+ int error = 0;
+ bool manage_clocks = true;
+ bool reset = true;
- if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE_ON_INIT) {
- ddata->revision = sysc_read_revision(ddata);
- goto rev_quirks;
- }
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
+ reset = false;
- error = pm_runtime_get_sync(ddata->dev);
- if (error < 0) {
- pm_runtime_put_noidle(ddata->dev);
+ error = sysc_rstctrl_reset_deassert(ddata, reset);
+ if (error)
+ return error;
- return 0;
- }
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))
+ manage_clocks = false;
- error = sysc_reset(ddata);
- if (error) {
- dev_err(ddata->dev, "Reset failed with %d\n", error);
- pm_runtime_put_sync(ddata->dev);
+ if (manage_clocks) {
+ error = sysc_enable_opt_clocks(ddata);
+ if (error)
+ return error;
- return error;
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
}
ddata->revision = sysc_read_revision(ddata);
- pm_runtime_put_sync(ddata->dev);
-
-rev_quirks:
sysc_init_revision_quirks(ddata);
- return 0;
+ error = sysc_legacy_init(ddata);
+ if (error)
+ goto err_main_clocks;
+
+ error = sysc_reset(ddata);
+ if (error)
+ dev_err(ddata->dev, "Reset failed with %d\n", error);
+
+err_main_clocks:
+ if (manage_clocks)
+ sysc_disable_main_clocks(ddata);
+err_opt_clocks:
+ if (manage_clocks)
+ sysc_disable_opt_clocks(ddata);
+
+ return error;
}
static int sysc_init_sysc_mask(struct sysc *ddata)
@@ -1208,7 +1638,7 @@ static int sysc_child_resume_noirq(struct device *dev)
}
#endif
-struct dev_pm_domain sysc_child_pm_domain = {
+static struct dev_pm_domain sysc_child_pm_domain = {
.ops = {
SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
sysc_child_runtime_resume,
@@ -1281,6 +1711,8 @@ static const struct sysc_dts_quirk sysc_dts_quirks[] = {
.mask = SYSC_QUIRK_NO_IDLE_ON_INIT, },
{ .name = "ti,no-reset-on-init",
.mask = SYSC_QUIRK_NO_RESET_ON_INIT, },
+ { .name = "ti,no-idle",
+ .mask = SYSC_QUIRK_NO_IDLE, },
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
@@ -1331,6 +1763,9 @@ static void sysc_unprepare(struct sysc *ddata)
{
int i;
+ if (!ddata->clocks)
+ return;
+
for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
if (!IS_ERR_OR_NULL(ddata->clocks[i]))
clk_unprepare(ddata->clocks[i]);
@@ -1576,28 +2011,26 @@ static const struct sysc_capabilities sysc_dra7_mcan = {
static int sysc_init_pdata(struct sysc *ddata)
{
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
- struct ti_sysc_module_data mdata;
- int error = 0;
+ struct ti_sysc_module_data *mdata;
if (!pdata || !ddata->legacy_mode)
return 0;
- mdata.name = ddata->legacy_mode;
- mdata.module_pa = ddata->module_pa;
- mdata.module_size = ddata->module_size;
- mdata.offsets = ddata->offsets;
- mdata.nr_offsets = SYSC_MAX_REGS;
- mdata.cap = ddata->cap;
- mdata.cfg = &ddata->cfg;
+ mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
+ if (!mdata)
+ return -ENOMEM;
- if (!pdata->init_module)
- return -ENODEV;
+ mdata->name = ddata->legacy_mode;
+ mdata->module_pa = ddata->module_pa;
+ mdata->module_size = ddata->module_size;
+ mdata->offsets = ddata->offsets;
+ mdata->nr_offsets = SYSC_MAX_REGS;
+ mdata->cap = ddata->cap;
+ mdata->cfg = &ddata->cfg;
- error = pdata->init_module(ddata->dev, &mdata, &ddata->cookie);
- if (error == -EEXIST)
- error = 0;
+ ddata->mdata = mdata;
- return error;
+ return 0;
}
static int sysc_init_match(struct sysc *ddata)
@@ -1651,10 +2084,6 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
goto unprepare;
- error = sysc_get_clocks(ddata);
- if (error)
- return error;
-
error = sysc_map_and_check_registers(ddata);
if (error)
goto unprepare;
@@ -1675,15 +2104,21 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
goto unprepare;
+ sysc_init_early_quirks(ddata);
+
+ error = sysc_get_clocks(ddata);
+ if (error)
+ return error;
+
error = sysc_init_resets(ddata);
if (error)
return error;
- pm_runtime_enable(ddata->dev);
error = sysc_init_module(ddata);
if (error)
goto unprepare;
+ pm_runtime_enable(ddata->dev);
error = pm_runtime_get_sync(ddata->dev);
if (error < 0) {
pm_runtime_put_noidle(ddata->dev);
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index 02d3bcd6216c..71c2e9519ca8 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
index c68dada97316..aba787b2e771 100644
--- a/drivers/clk/axs10x/pll_clock.c
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/slab.h>
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
index 2a2c7569336a..b6d07ca0164f 100644
--- a/drivers/clk/bcm/clk-bcm2835-aux.c
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/bcm2835-aux.h>
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 9fcae932e082..770bb01f523e 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -29,6 +29,7 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index eee64b9e5d10..cc3b1e1bc087 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -15,8 +15,9 @@
#include "clk-kona.h"
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
/*
* "Policies" affect the frequencies of bus clocks provided by a
diff --git a/drivers/clk/berlin/berlin2-div.c b/drivers/clk/berlin/berlin2-div.c
index 4d0be66aa6a8..eb14a5bc0507 100644
--- a/drivers/clk/berlin/berlin2-div.c
+++ b/drivers/clk/berlin/berlin2-div.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index 0b4b44a2579e..bccdfa00fd37 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 9b9db743df25..e9518d35f262 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c
index d1a97d971183..51f26619b6a2 100644
--- a/drivers/clk/clk-fixed-mmio.c
+++ b/drivers/clk/clk-fixed-mmio.c
@@ -10,8 +10,9 @@
*/
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
+#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
static struct clk_hw *fixed_mmio_clk_setup(struct device_node *node)
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index d81f1d2e9129..b1e556f20911 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index a47c2b600f20..97d1e8c35b71 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
index 94470b4eadf4..e507aa958da9 100644
--- a/drivers/clk/clk-multiplier.c
+++ b/drivers/clk/clk-multiplier.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/slab.h>
diff --git a/drivers/clk/davinci/pll-da850.c b/drivers/clk/davinci/pll-da850.c
index 0f7198191ea2..bf120bec59ae 100644
--- a/drivers/clk/davinci/pll-da850.c
+++ b/drivers/clk/davinci/pll-da850.c
@@ -11,6 +11,7 @@
#include <linux/clkdev.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/da8xx-cfgchip.h>
#include <linux/mfd/syscon.h>
diff --git a/drivers/clk/h8300/clk-div.c b/drivers/clk/h8300/clk-div.c
index d413ade95c99..376be03bb546 100644
--- a/drivers/clk/h8300/clk-div.c
+++ b/drivers/clk/h8300/clk-div.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c
index c7ae653c8a16..67c495b67c18 100644
--- a/drivers/clk/h8300/clk-h8s2678.c
+++ b/drivers/clk/h8300/clk-h8s2678.c
@@ -6,8 +6,9 @@
*/
#include <linux/clk-provider.h>
-#include <linux/err.h>
#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/hisilicon/clk-hi3660-stub.c b/drivers/clk/hisilicon/clk-hi3660-stub.c
index e8b2c43b1bb8..89934bee7c9e 100644
--- a/drivers/clk/hisilicon/clk-hi3660-stub.c
+++ b/drivers/clk/hisilicon/clk-hi3660-stub.c
@@ -24,6 +24,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 574fac1a169f..388bdb94f841 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -3,9 +3,10 @@
* Copyright 2018 NXP
*/
+#include <linux/clk-provider.h>
#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/clk-provider.h>
#include "clk.h"
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index 76b9eb15604e..fece503e3610 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/bitfield.h>
diff --git a/drivers/clk/imx/clk-imx21.c b/drivers/clk/imx/clk-imx21.c
index e63188eb08ac..6e93284c397b 100644
--- a/drivers/clk/imx/clk-imx21.c
+++ b/drivers/clk/imx/clk-imx21.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/imx21-clock.h>
diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
index 0a0ab95d16fe..a3753067fc12 100644
--- a/drivers/clk/imx/clk-imx27.c
+++ b/drivers/clk/imx/clk-imx27.c
@@ -3,6 +3,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/imx27-clock.h>
diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c
index fb567dcc2118..a03bbed662c6 100644
--- a/drivers/clk/imx/clk-pfdv2.c
+++ b/drivers/clk/imx/clk-pfdv2.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index d7e62c3620d3..8155b12cf0e1 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
diff --git a/drivers/clk/imx/clk-sccg-pll.c b/drivers/clk/imx/clk-sccg-pll.c
index 991bbe63f156..5d65f65c512e 100644
--- a/drivers/clk/imx/clk-sccg-pll.c
+++ b/drivers/clk/imx/clk-sccg-pll.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/bitfield.h>
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 510b685212d3..b80af61dc1f3 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -20,6 +20,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index b86edd328249..25f7df028e67 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -17,6 +17,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <dt-bindings/clock/jz4740-cgu.h>
#include <asm/mach-jz4740/clock.h>
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index bf46a0df2004..dfce740c25a8 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -7,6 +7,7 @@
#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/syscore_ops.h>
#include <dt-bindings/clock/jz4770-cgu.h>
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index 6427be117ff1..d03b7fcfd82b 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -17,6 +17,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <dt-bindings/clock/jz4780-cgu.h>
#include "cgu.h"
diff --git a/drivers/clk/loongson1/clk-loongson1c.c b/drivers/clk/loongson1/clk-loongson1c.c
index 3466f7320b40..22a165ef65cf 100644
--- a/drivers/clk/loongson1/clk-loongson1c.c
+++ b/drivers/clk/loongson1/clk-loongson1c.c
@@ -9,6 +9,7 @@
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <loongson1.h>
#include "clk.h"
diff --git a/drivers/clk/microchip/clk-core.c b/drivers/clk/microchip/clk-core.c
index c3b301463425..4680064f1951 100644
--- a/drivers/clk/microchip/clk-core.c
+++ b/drivers/clk/microchip/clk-core.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <asm/mach-pic32/pic32.h>
#include <asm/traps.h>
diff --git a/drivers/clk/microchip/clk-pic32mzda.c b/drivers/clk/microchip/clk-pic32mzda.c
index 9f734779be92..e6c05df2d47f 100644
--- a/drivers/clk/microchip/clk-pic32mzda.c
+++ b/drivers/clk/microchip/clk-pic32mzda.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 1f1cff428d78..5fc6d486a381 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -18,6 +18,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/clk/mvebu/armada-37xx-tbg.c b/drivers/clk/mvebu/armada-37xx-tbg.c
index ee272d4d8c24..585a02e0b330 100644
--- a/drivers/clk/mvebu/armada-37xx-tbg.c
+++ b/drivers/clk/mvebu/armada-37xx-tbg.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
index 1fc84b0e72ee..818b175391fa 100644
--- a/drivers/clk/mvebu/clk-corediv.c
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c
index 5969f620607a..f2e171a01fb4 100644
--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index f5bc8bd192b7..8b686da5577b 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index 7524d19fe60b..7f67c1036ff9 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index 42627bf8a09e..5326f77eb35a 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <dt-bindings/clock/pxa-clock.h>
diff --git a/drivers/clk/renesas/clk-r8a73a4.c b/drivers/clk/renesas/clk-r8a73a4.c
index 2719c248c67b..cfed11c659d9 100644
--- a/drivers/clk/renesas/clk-r8a73a4.c
+++ b/drivers/clk/renesas/clk-r8a73a4.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/clk-r8a7740.c b/drivers/clk/renesas/clk-r8a7740.c
index 5967656c13cc..d8190f007a81 100644
--- a/drivers/clk/renesas/clk-r8a7740.c
+++ b/drivers/clk/renesas/clk-r8a7740.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index 2913b4148157..da9fe3f032eb 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/clk-rz.c b/drivers/clk/renesas/clk-rz.c
index 3cda53a97f4e..fbc34beafc78 100644
--- a/drivers/clk/renesas/clk-rz.c
+++ b/drivers/clk/renesas/clk-rz.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index dc8ffc7c727a..5f25a70bc61c 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/renesas.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 97c72477cd54..7d042183aa37 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -11,6 +11,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/of.h>
diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
index b241f9ca3d71..cc90b11a9c25 100644
--- a/drivers/clk/renesas/rcar-usb2-clock-sel.c
+++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 30df0dc853f0..0201809bbd37 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index 784b81e1ea7c..ba9f00dc9740 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -3,8 +3,9 @@
* Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd
*/
-#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
#include "clk.h"
#define div_mask(width) ((1 << (width)) - 1)
diff --git a/drivers/clk/rockchip/clk-px30.c b/drivers/clk/rockchip/clk-px30.c
index 601a77f1af78..68d23be18cbc 100644
--- a/drivers/clk/rockchip/clk-px30.c
+++ b/drivers/clk/rockchip/clk-px30.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index c3001980dbdc..3bf919b6c6e3 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -17,6 +17,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
index 5970a50671b9..8278a54db343 100644
--- a/drivers/clk/rockchip/clk-rk3128.c
+++ b/drivers/clk/rockchip/clk-rk3128.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 5ecf28854876..378420b8835a 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <dt-bindings/clock/rk3188-cru-common.h>
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index 7af48184b022..7176003b5c7c 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 24baeb56a1b3..85907f31c63f 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index f12142d9cea2..9b03c1abf19c 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 7c4d242f19c1..d239bbc2fc77 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -13,6 +13,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 5a628148f3f0..2322d712ba88 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/rockchip/clk-rv1108.c b/drivers/clk/rockchip/clk-rv1108.c
index 089cb17925e5..6c051aa04e59 100644
--- a/drivers/clk/rockchip/clk-rv1108.c
+++ b/drivers/clk/rockchip/clk-rv1108.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 0ea8e8080d1a..d5fac5a8a3d7 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/reboot.h>
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index a5fddebbe530..3f80bcd46074 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -33,6 +33,7 @@
*/
#include <linux/errno.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 9c95390d2d77..ce41f36a0e29 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 0e9a41a4cac8..facaad3c56a1 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 54066e6508d3..d2a68a792a21 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 8ae44b5db4c2..91db7894125d 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -4,6 +4,7 @@
// Author: Marek Szyprowski <m.szyprowski@samsung.com>
// Common Clock Framework support for Exynos5 power-domain dependent clocks
+#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index f14139bcb0c1..c8265c4cbc4f 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -12,6 +12,7 @@
#include <dt-bindings/clock/exynos5250.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 1c4c7a3039f1..0c6782ceac48 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -13,7 +13,8 @@
#include <linux/hrtimer.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "clk.h"
#include "clk-pll.h"
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
index 82f8ae22fd34..0117e40c1d0a 100644
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ b/drivers/clk/samsung/clk-s3c2410-dclk.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include "clk.h"
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
index dd1159050a5a..ce21b89d1eb1 100644
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reboot.h>
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index f38f0e24e3b6..b2ea4dfb5b8c 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reboot.h>
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 1f6e47cd327d..9ad546a5f74c 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -15,6 +15,7 @@
#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c
index 0ec8bf7b4b28..6282ee2f361c 100644
--- a/drivers/clk/sifive/fu540-prci.c
+++ b/drivers/clk/sifive/fu540-prci.c
@@ -30,6 +30,7 @@
#include <linux/clk/analogbits-wrpll-cln28hpc.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_clk.h>
diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c
index eee2d48ab656..54a464fa63e0 100644
--- a/drivers/clk/socfpga/clk-gate-s10.c
+++ b/drivers/clk/socfpga/clk-gate-s10.c
@@ -3,6 +3,7 @@
* Copyright (C) 2017, Intel Corporation
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include "stratix10-clk.h"
#include "clk.h"
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 568f59b58ddf..5c50e723ecae 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -4,6 +4,7 @@
*/
#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "stratix10-clk.h"
#include "clk.h"
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index c4d0b6f6abf2..4705eb544f01 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -4,6 +4,7 @@
*/
#include <linux/slab.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "stratix10-clk.h"
#include "clk.h"
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index c514d39760cb..23497f07ad89 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -14,6 +14,7 @@
*/
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index 129ebd2588fd..2bbfb3343311 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index be0deee70182..d3fc1f5bf396 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 3c32d7798f27..9d3f98962779 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index fa2c2dd77102..813e9bf73cbf 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 609970c0b666..b494c4fe0b2c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -16,6 +16,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 4b5f8f4e4ab8..a9c0c5406b85 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index c7bf814dfd2b..25bcf3fd2dfc 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index 5f714b4d8ee4..be5920e8a9ca 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index e71e2451c2e3..0f3df565c6c1 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index a22d11aa38ba..f9625f7b9ec2 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index eada0e291859..ec64eb692ecf 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
index 8936ef87652c..0e23583e4f58 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
index dc9f0a365664..e748b8a6f3c5 100644
--- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c
index 302a18efd39f..6d407a8a61ee 100644
--- a/drivers/clk/sunxi-ng/ccu_div.c
+++ b/drivers/clk/sunxi-ng/ccu_div.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_div.h"
diff --git a/drivers/clk/sunxi-ng/ccu_frac.c b/drivers/clk/sunxi-ng/ccu_frac.c
index d1d168d4c4f0..1842603f8f11 100644
--- a/drivers/clk/sunxi-ng/ccu_frac.c
+++ b/drivers/clk/sunxi-ng/ccu_frac.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/spinlock.h>
#include "ccu_frac.h"
diff --git a/drivers/clk/sunxi-ng/ccu_gate.c b/drivers/clk/sunxi-ng/ccu_gate.c
index cd069d5da215..9c81644e9dfe 100644
--- a/drivers/clk/sunxi-ng/ccu_gate.c
+++ b/drivers/clk/sunxi-ng/ccu_gate.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mmc_timing.c b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
index f9869f7353c0..b23410682088 100644
--- a/drivers/clk/sunxi-ng/ccu_mmc_timing.c
+++ b/drivers/clk/sunxi-ng/ccu_mmc_timing.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/clk/sunxi-ng.h>
+#include <linux/io.h>
#include "ccu_common.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 0357349eb767..e17fb4c9fcfe 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_mp.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c
index 12e0783caee6..c2a672797a74 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.c
+++ b/drivers/clk/sunxi-ng/ccu_mult.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_mult.h"
diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c
index 312664155a54..f9b409c3a89c 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.c
+++ b/drivers/clk/sunxi-ng/ccu_mux.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_mux.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index 2485bda87a9a..50c7e6b1ba13 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_nk.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index 841840e35e61..aa5beaabc292 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_nkm.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index cbcdf664f336..53ec4fb59880 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_gate.h"
#include "ccu_nkmp.h"
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index 424d8635b053..e15413174aa7 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "ccu_frac.h"
#include "ccu_gate.h"
diff --git a/drivers/clk/sunxi-ng/ccu_phase.c b/drivers/clk/sunxi-ng/ccu_phase.c
index 400c58ad72fd..0a4a6fd13f5b 100644
--- a/drivers/clk/sunxi-ng/ccu_phase.c
+++ b/drivers/clk/sunxi-ng/ccu_phase.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/spinlock.h>
#include "ccu_phase.h"
diff --git a/drivers/clk/sunxi-ng/ccu_sdm.c b/drivers/clk/sunxi-ng/ccu_sdm.c
index 3b3dc9bdf2b0..e510467ea24c 100644
--- a/drivers/clk/sunxi-ng/ccu_sdm.c
+++ b/drivers/clk/sunxi-ng/ccu_sdm.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/spinlock.h>
#include "ccu_sdm.h"
diff --git a/drivers/clk/sunxi/clk-a10-mod1.c b/drivers/clk/sunxi/clk-a10-mod1.c
index e2819fa09637..9e6796a7b4c4 100644
--- a/drivers/clk/sunxi/clk-a10-mod1.c
+++ b/drivers/clk/sunxi/clk-a10-mod1.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-a10-pll2.c b/drivers/clk/sunxi/clk-a10-pll2.c
index d8eab90ae661..a709b6a551af 100644
--- a/drivers/clk/sunxi/clk-a10-pll2.c
+++ b/drivers/clk/sunxi/clk-a10-pll2.c
@@ -17,6 +17,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-a10-ve.c b/drivers/clk/sunxi/clk-a10-ve.c
index d9ea22ec4e25..d119b453dccd 100644
--- a/drivers/clk/sunxi/clk-a10-ve.c
+++ b/drivers/clk/sunxi/clk-a10-ve.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
index 3437f734c9bf..e6d639d9ea70 100644
--- a/drivers/clk/sunxi/clk-a20-gmac.c
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -17,6 +17,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index fc0278a1acc7..915954507d0a 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-simple-gates.c b/drivers/clk/sunxi/clk-simple-gates.c
index a085c3bc127c..8130467d647a 100644
--- a/drivers/clk/sunxi/clk-simple-gates.c
+++ b/drivers/clk/sunxi/clk-simple-gates.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun4i-display.c b/drivers/clk/sunxi/clk-sun4i-display.c
index 9780fac6d029..bb2dc83fc697 100644
--- a/drivers/clk/sunxi/clk-sun4i-display.c
+++ b/drivers/clk/sunxi/clk-sun4i-display.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/sunxi/clk-sun4i-pll3.c b/drivers/clk/sunxi/clk-sun4i-pll3.c
index f66267e77d9c..c879d7e25ca0 100644
--- a/drivers/clk/sunxi/clk-sun4i-pll3.c
+++ b/drivers/clk/sunxi/clk-sun4i-pll3.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c b/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
index b6d29d1bedca..af8ca5019639 100644
--- a/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
+++ b/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
index d5c31804ee54..5a7d4dd09e85 100644
--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
@@ -16,6 +16,7 @@
#include <linux/clk-provider.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/sunxi/clk-sun8i-bus-gates.c b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
index bee305bdddbe..bfbcd71b225d 100644
--- a/drivers/clk/sunxi/clk-sun8i-bus-gates.c
+++ b/drivers/clk/sunxi/clk-sun8i-bus-gates.c
@@ -18,6 +18,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index 56db89b6979f..0e924c9cbd5c 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/sunxi/clk-sun9i-cpus.c b/drivers/clk/sunxi/clk-sun9i-cpus.c
index 4d5e14142e15..01255d827fc9 100644
--- a/drivers/clk/sunxi/clk-sun9i-cpus.c
+++ b/drivers/clk/sunxi/clk-sun9i-cpus.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of.h>
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index f00d8758ba24..da264d0f7f4b 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -18,6 +18,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/reset.h>
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 892c29030b7b..f5b1c0067365 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c
index 917fc27a33dd..7d15e0432ed4 100644
--- a/drivers/clk/sunxi/clk-usb.c
+++ b/drivers/clk/sunxi/clk-usb.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 93ecb538e59b..b7f763f0ecd8 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
diff --git a/drivers/clk/tegra/clk-periph-fixed.c b/drivers/clk/tegra/clk-periph-fixed.c
index c57dfb037b10..956f2215c733 100644
--- a/drivers/clk/tegra/clk-periph-fixed.c
+++ b/drivers/clk/tegra/clk-periph-fixed.c
@@ -15,6 +15,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "clk.h"
diff --git a/drivers/clk/tegra/clk-sdmmc-mux.c b/drivers/clk/tegra/clk-sdmmc-mux.c
index 473d418533cb..a5cd3e31dbae 100644
--- a/drivers/clk/tegra/clk-sdmmc-mux.c
+++ b/drivers/clk/tegra/clk-sdmmc-mux.c
@@ -12,6 +12,7 @@
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/types.h>
#include "clk.h"
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index ffaf17f71860..6f2862eddad7 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -18,6 +18,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/clk/tegra.h>
#include <linux/reset-controller.h>
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index 0c210984765a..fdfb90058504 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -14,6 +14,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of_device.h>
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index ba17cc5bd04b..e0b8ed3a1e80 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/ti.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/list.h>
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index ed24f20f63c7..95e36ba64acc 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -13,6 +13,7 @@
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/math64.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index c2b6bb814742..4fa0cd951d2e 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index 19174835693b..5970edb6d334 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -17,6 +17,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/platform_data/x86/clk-pmc-atom.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index 8febd2431545..a11f93ecbf34 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -739,8 +739,8 @@ static int zynqmp_clock_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!eemi_ops)
- return -ENXIO;
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
ret = zynqmp_clk_setup(dev->of_node);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 4b3d143f0f8a..6bcaa4e2e72c 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -69,6 +69,13 @@ config FTTMR010_TIMER
Enables support for the Faraday Technology timer block
FTTMR010.
+config IXP4XX_TIMER
+ bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ help
+ Enables support for the Intel XScale IXP4xx SoC timer.
+
config ROCKCHIP_TIMER
bool "Rockchip timer driver" if COMPILE_TEST
depends on ARM || ARM64
@@ -380,7 +387,7 @@ config ARM_GLOBAL_TIMER
This options enables support for the ARM global timer unit
config ARM_TIMER_SP804
- bool "Support for Dual Timer SP804 module"
+ bool "Support for Dual Timer SP804 module" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && CLKDEV_LOOKUP
select CLKSRC_MMIO
select TIMER_OF if OF
@@ -400,8 +407,11 @@ config ARMV7M_SYSTICK
This options enables support for the ARMv7M system timer unit
config ATMEL_PIT
+ bool "Atmel PIT support" if COMPILE_TEST
+ depends on HAS_IOMEM
select TIMER_OF if OF
- def_bool SOC_AT91SAM9 || SOC_SAMA5
+ help
+ Support for the Periodic Interval Timer found on Atmel SoCs.
config ATMEL_ST
bool "Atmel ST timer support" if COMPILE_TEST
@@ -411,6 +421,13 @@ config ATMEL_ST
help
Support for the Atmel ST timer.
+config ATMEL_TCB_CLKSRC
+ bool "Atmel TC Block timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select TIMER_OF if OF
+ help
+ Support for Timer Counter Blocks on Atmel SoCs.
+
config CLKSRC_EXYNOS_MCT
bool "Exynos multi core timer driver" if COMPILE_TEST
depends on ARM || ARM64
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index be6e0fbc7489..236858fa7fbf 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_TIMER_OF) += timer-of.o
obj-$(CONFIG_TIMER_PROBE) += timer-probe.o
obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o
obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o
-obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
+obj-$(CONFIG_ATMEL_TCB_CLKSRC) += timer-atmel-tcb.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += timer-cs5535.o
@@ -20,6 +20,7 @@ obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
+obj-$(CONFIG_IXP4XX_TIMER) += timer-ixp4xx.o
obj-$(CONFIG_ROCKCHIP_TIMER) += timer-rockchip.o
obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/timer-atmel-tcb.c
index f987027ca566..6ed31f9def7e 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/timer-atmel-tcb.c
@@ -9,9 +9,11 @@
#include <linux/err.h>
#include <linux/ioport.h>
#include <linux/io.h>
-#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
#include <linux/syscore_ops.h>
-#include <linux/atmel_tc.h>
+#include <soc/at91/atmel_tcb.h>
/*
@@ -28,13 +30,6 @@
* source, used in either periodic or oneshot mode. This runs
* at 32 KiHZ, and can handle delays of up to two seconds.
*
- * A boot clocksource and clockevent source are also currently needed,
- * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
- * this code can be used when init_timers() is called, well before most
- * devices are set up. (Some low end AT91 parts, which can run uClinux,
- * have only the timers in one TC block... they currently don't support
- * the tclib code, because of that initialization issue.)
- *
* REVISIT behavior during system suspend states... we should disable
* all clocks and save the power. Easily done for clockevent devices,
* but clocksources won't necessarily get the needed notifications.
@@ -112,7 +107,6 @@ static void tc_clksrc_resume(struct clocksource *cs)
}
static struct clocksource clksrc = {
- .name = "tcb_clksrc",
.rating = 200,
.read = tc_get_cycles,
.mask = CLOCKSOURCE_MASK(32),
@@ -121,6 +115,16 @@ static struct clocksource clksrc = {
.resume = tc_clksrc_resume,
};
+static u64 notrace tc_sched_clock_read(void)
+{
+ return tc_get_cycles(&clksrc);
+}
+
+static u64 notrace tc_sched_clock_read32(void)
+{
+ return tc_get_cycles32(&clksrc);
+}
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS
struct tc_clkevt_device {
@@ -214,7 +218,6 @@ static int tc_next_event(unsigned long delta, struct clock_event_device *d)
static struct tc_clkevt_device clkevt = {
.clkevt = {
- .name = "tc_clkevt",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
/* Should be lower than at91rm9200's system timer */
@@ -330,39 +333,74 @@ static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_id
writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
}
-static int __init tcb_clksrc_init(void)
-{
- static char bootinfo[] __initdata
- = KERN_DEBUG "%s: tc%d at %d.%03d MHz\n";
+static const u8 atmel_tcb_divisors[5] = { 2, 8, 32, 128, 0, };
+
+static const struct of_device_id atmel_tcb_of_match[] = {
+ { .compatible = "atmel,at91rm9200-tcb", .data = (void *)16, },
+ { .compatible = "atmel,at91sam9x5-tcb", .data = (void *)32, },
+ { /* sentinel */ }
+};
- struct platform_device *pdev;
- struct atmel_tc *tc;
+static int __init tcb_clksrc_init(struct device_node *node)
+{
+ struct atmel_tc tc;
struct clk *t0_clk;
+ const struct of_device_id *match;
+ u64 (*tc_sched_clock)(void);
u32 rate, divided_rate = 0;
int best_divisor_idx = -1;
int clk32k_divisor_idx = -1;
+ int bits;
int i;
int ret;
- tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK);
- if (!tc) {
- pr_debug("can't alloc TC for clocksource\n");
- return -ENODEV;
+ /* Protect against multiple calls */
+ if (tcaddr)
+ return 0;
+
+ tc.regs = of_iomap(node->parent, 0);
+ if (!tc.regs)
+ return -ENXIO;
+
+ t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
+ if (IS_ERR(t0_clk))
+ return PTR_ERR(t0_clk);
+
+ tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
+ if (IS_ERR(tc.slow_clk))
+ return PTR_ERR(tc.slow_clk);
+
+ tc.clk[0] = t0_clk;
+ tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
+ if (IS_ERR(tc.clk[1]))
+ tc.clk[1] = t0_clk;
+ tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
+ if (IS_ERR(tc.clk[2]))
+ tc.clk[2] = t0_clk;
+
+ tc.irq[2] = of_irq_get(node->parent, 2);
+ if (tc.irq[2] <= 0) {
+ tc.irq[2] = of_irq_get(node->parent, 0);
+ if (tc.irq[2] <= 0)
+ return -EINVAL;
}
- tcaddr = tc->regs;
- pdev = tc->pdev;
- t0_clk = tc->clk[0];
+ match = of_match_node(atmel_tcb_of_match, node->parent);
+ bits = (uintptr_t)match->data;
+
+ for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
+ writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
+
ret = clk_prepare_enable(t0_clk);
if (ret) {
pr_debug("can't enable T0 clk\n");
- goto err_free_tc;
+ return ret;
}
/* How fast will we be counting? Pick something over 5 MHz. */
rate = (u32) clk_get_rate(t0_clk);
- for (i = 0; i < 5; i++) {
- unsigned divisor = atmel_tc_divisors[i];
+ for (i = 0; i < ARRAY_SIZE(atmel_tcb_divisors); i++) {
+ unsigned divisor = atmel_tcb_divisors[i];
unsigned tmp;
/* remember 32 KiHz clock for later */
@@ -381,27 +419,31 @@ static int __init tcb_clksrc_init(void)
best_divisor_idx = i;
}
-
- printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
- divided_rate / 1000000,
+ clksrc.name = kbasename(node->parent->full_name);
+ clkevt.clkevt.name = kbasename(node->parent->full_name);
+ pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
((divided_rate % 1000000) + 500) / 1000);
- if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
+ tcaddr = tc.regs;
+
+ if (bits == 32) {
/* use apropriate function to read 32 bit counter */
clksrc.read = tc_get_cycles32;
/* setup ony channel 0 */
- tcb_setup_single_chan(tc, best_divisor_idx);
+ tcb_setup_single_chan(&tc, best_divisor_idx);
+ tc_sched_clock = tc_sched_clock_read32;
} else {
- /* tclib will give us three clocks no matter what the
+ /* we have three clocks no matter what the
* underlying platform supports.
*/
- ret = clk_prepare_enable(tc->clk[1]);
+ ret = clk_prepare_enable(tc.clk[1]);
if (ret) {
pr_debug("can't enable T1 clk\n");
goto err_disable_t0;
}
/* setup both channel 0 & 1 */
- tcb_setup_dual_chan(tc, best_divisor_idx);
+ tcb_setup_dual_chan(&tc, best_divisor_idx);
+ tc_sched_clock = tc_sched_clock_read;
}
/* and away we go! */
@@ -410,24 +452,26 @@ static int __init tcb_clksrc_init(void)
goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
- ret = setup_clkevents(tc, clk32k_divisor_idx);
+ ret = setup_clkevents(&tc, clk32k_divisor_idx);
if (ret)
goto err_unregister_clksrc;
+ sched_clock_register(tc_sched_clock, 32, divided_rate);
+
return 0;
err_unregister_clksrc:
clocksource_unregister(&clksrc);
err_disable_t1:
- if (!tc->tcb_config || tc->tcb_config->counter_width != 32)
- clk_disable_unprepare(tc->clk[1]);
+ if (bits != 32)
+ clk_disable_unprepare(tc.clk[1]);
err_disable_t0:
clk_disable_unprepare(t0_clk);
-err_free_tc:
- atmel_tc_free(tc);
+ tcaddr = NULL;
+
return ret;
}
-arch_initcall(tcb_clksrc_init);
+TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
diff --git a/drivers/clocksource/timer-ixp4xx.c b/drivers/clocksource/timer-ixp4xx.c
new file mode 100644
index 000000000000..5c2190b654cd
--- /dev/null
+++ b/drivers/clocksource/timer-ixp4xx.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IXP4 timer driver
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-ixp4xx/common.c
+ * Copyright 2002 (C) Intel Corporation
+ * Copyright 2003-2004 (C) MontaVista, Software, Inc.
+ * Copyright (C) Deepak Saxena <dsaxena@plexity.net>
+ */
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+/* Goes away with OF conversion */
+#include <linux/platform_data/timer-ixp4xx.h>
+
+/*
+ * Constants to make it easy to access Timer Control/Status registers
+ */
+#define IXP4XX_OSTS_OFFSET 0x00 /* Continuous Timestamp */
+#define IXP4XX_OST1_OFFSET 0x04 /* Timer 1 Timestamp */
+#define IXP4XX_OSRT1_OFFSET 0x08 /* Timer 1 Reload */
+#define IXP4XX_OST2_OFFSET 0x0C /* Timer 2 Timestamp */
+#define IXP4XX_OSRT2_OFFSET 0x10 /* Timer 2 Reload */
+#define IXP4XX_OSWT_OFFSET 0x14 /* Watchdog Timer */
+#define IXP4XX_OSWE_OFFSET 0x18 /* Watchdog Enable */
+#define IXP4XX_OSWK_OFFSET 0x1C /* Watchdog Key */
+#define IXP4XX_OSST_OFFSET 0x20 /* Timer Status */
+
+/*
+ * Timer register values and bit definitions
+ */
+#define IXP4XX_OST_ENABLE 0x00000001
+#define IXP4XX_OST_ONE_SHOT 0x00000002
+/* Low order bits of reload value ignored */
+#define IXP4XX_OST_RELOAD_MASK 0x00000003
+#define IXP4XX_OST_DISABLED 0x00000000
+#define IXP4XX_OSST_TIMER_1_PEND 0x00000001
+#define IXP4XX_OSST_TIMER_2_PEND 0x00000002
+#define IXP4XX_OSST_TIMER_TS_PEND 0x00000004
+#define IXP4XX_OSST_TIMER_WDOG_PEND 0x00000008
+#define IXP4XX_OSST_TIMER_WARM_RESET 0x00000010
+
+#define IXP4XX_WDT_KEY 0x0000482E
+#define IXP4XX_WDT_RESET_ENABLE 0x00000001
+#define IXP4XX_WDT_IRQ_ENABLE 0x00000002
+#define IXP4XX_WDT_COUNT_ENABLE 0x00000004
+
+struct ixp4xx_timer {
+ void __iomem *base;
+ unsigned int tick_rate;
+ u32 latch;
+ struct clock_event_device clkevt;
+#ifdef CONFIG_ARM
+ struct delay_timer delay_timer;
+#endif
+};
+
+/*
+ * A local singleton used by sched_clock and delay timer reads, which are
+ * fast and stateless
+ */
+static struct ixp4xx_timer *local_ixp4xx_timer;
+
+static inline struct ixp4xx_timer *
+to_ixp4xx_timer(struct clock_event_device *evt)
+{
+ return container_of(evt, struct ixp4xx_timer, clkevt);
+}
+
+static u64 notrace ixp4xx_read_sched_clock(void)
+{
+ return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
+}
+
+static u64 ixp4xx_clocksource_read(struct clocksource *c)
+{
+ return __raw_readl(local_ixp4xx_timer->base + IXP4XX_OSTS_OFFSET);
+}
+
+static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
+{
+ struct ixp4xx_timer *tmr = dev_id;
+ struct clock_event_device *evt = &tmr->clkevt;
+
+ /* Clear Pending Interrupt */
+ __raw_writel(IXP4XX_OSST_TIMER_1_PEND,
+ tmr->base + IXP4XX_OSST_OFFSET);
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static int ixp4xx_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ /* Keep enable/oneshot bits */
+ val &= IXP4XX_OST_RELOAD_MASK;
+ __raw_writel((cycles & ~IXP4XX_OST_RELOAD_MASK) | val,
+ tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_shutdown(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ val &= ~IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_set_oneshot(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+
+ __raw_writel(IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT,
+ tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_set_periodic(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = tmr->latch & ~IXP4XX_OST_RELOAD_MASK;
+ val |= IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+static int ixp4xx_resume(struct clock_event_device *evt)
+{
+ struct ixp4xx_timer *tmr = to_ixp4xx_timer(evt);
+ u32 val;
+
+ val = __raw_readl(tmr->base + IXP4XX_OSRT1_OFFSET);
+ val |= IXP4XX_OST_ENABLE;
+ __raw_writel(val, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ return 0;
+}
+
+/*
+ * IXP4xx timer tick
+ * We use OS timer1 on the CPU for the timer tick and the timestamp
+ * counter as a source of real clock ticks to account for missed jiffies.
+ */
+static __init int ixp4xx_timer_register(void __iomem *base,
+ int timer_irq,
+ unsigned int timer_freq)
+{
+ struct ixp4xx_timer *tmr;
+ int ret;
+
+ tmr = kzalloc(sizeof(*tmr), GFP_KERNEL);
+ if (!tmr)
+ return -ENOMEM;
+ tmr->base = base;
+ tmr->tick_rate = timer_freq;
+
+ /*
+ * The timer register doesn't allow to specify the two least
+ * significant bits of the timeout value and assumes them being zero.
+ * So make sure the latch is the best value with the two least
+ * significant bits unset.
+ */
+ tmr->latch = DIV_ROUND_CLOSEST(timer_freq,
+ (IXP4XX_OST_RELOAD_MASK + 1) * HZ)
+ * (IXP4XX_OST_RELOAD_MASK + 1);
+
+ local_ixp4xx_timer = tmr;
+
+ /* Reset/disable counter */
+ __raw_writel(0, tmr->base + IXP4XX_OSRT1_OFFSET);
+
+ /* Clear any pending interrupt on timer 1 */
+ __raw_writel(IXP4XX_OSST_TIMER_1_PEND,
+ tmr->base + IXP4XX_OSST_OFFSET);
+
+ /* Reset time-stamp counter */
+ __raw_writel(0, tmr->base + IXP4XX_OSTS_OFFSET);
+
+ clocksource_mmio_init(NULL, "OSTS", timer_freq, 200, 32,
+ ixp4xx_clocksource_read);
+
+ tmr->clkevt.name = "ixp4xx timer1";
+ tmr->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ tmr->clkevt.rating = 200;
+ tmr->clkevt.set_state_shutdown = ixp4xx_shutdown;
+ tmr->clkevt.set_state_periodic = ixp4xx_set_periodic;
+ tmr->clkevt.set_state_oneshot = ixp4xx_set_oneshot;
+ tmr->clkevt.tick_resume = ixp4xx_resume;
+ tmr->clkevt.set_next_event = ixp4xx_set_next_event;
+ tmr->clkevt.cpumask = cpumask_of(0);
+ tmr->clkevt.irq = timer_irq;
+ ret = request_irq(timer_irq, ixp4xx_timer_interrupt,
+ IRQF_TIMER, "IXP4XX-TIMER1", tmr);
+ if (ret) {
+ pr_crit("no timer IRQ\n");
+ return -ENODEV;
+ }
+ clockevents_config_and_register(&tmr->clkevt, timer_freq,
+ 0xf, 0xfffffffe);
+
+ sched_clock_register(ixp4xx_read_sched_clock, 32, timer_freq);
+
+ return 0;
+}
+
+/**
+ * ixp4xx_timer_setup() - Timer setup function to be called from boardfiles
+ * @timerbase: physical base of timer block
+ * @timer_irq: Linux IRQ number for the timer
+ * @timer_freq: Fixed frequency of the timer
+ */
+void __init ixp4xx_timer_setup(resource_size_t timerbase,
+ int timer_irq,
+ unsigned int timer_freq)
+{
+ void __iomem *base;
+
+ base = ioremap(timerbase, 0x100);
+ if (!base) {
+ pr_crit("IXP4xx: can't remap timer\n");
+ return;
+ }
+ ixp4xx_timer_register(base, timer_irq, timer_freq);
+}
+EXPORT_SYMBOL_GPL(ixp4xx_timer_setup);
+
+#ifdef CONFIG_OF
+static __init int ixp4xx_of_timer_init(struct device_node *np)
+{
+ void __iomem *base;
+ int irq;
+ int ret;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_crit("IXP4xx: can't remap timer\n");
+ return -ENODEV;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ\n");
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ /* TODO: get some fixed clocks into the device tree */
+ ret = ixp4xx_timer_register(base, irq, 66666000);
+ if (ret)
+ goto out_unmap;
+ return 0;
+
+out_unmap:
+ iounmap(base);
+ return ret;
+}
+TIMER_OF_DECLARE(ixp4xx, "intel,ixp4xx-timer", ixp4xx_of_timer_init);
+#endif
diff --git a/drivers/clocksource/timer-milbeaut.c b/drivers/clocksource/timer-milbeaut.c
index f2019a88e3ee..fa9fb4eacade 100644
--- a/drivers/clocksource/timer-milbeaut.c
+++ b/drivers/clocksource/timer-milbeaut.c
@@ -26,8 +26,8 @@
#define MLB_TMR_TMCSR_CSL_DIV2 0
#define MLB_TMR_DIV_CNT 2
-#define MLB_TMR_SRC_CH (1)
-#define MLB_TMR_EVT_CH (0)
+#define MLB_TMR_SRC_CH 1
+#define MLB_TMR_EVT_CH 0
#define MLB_TMR_SRC_CH_OFS (MLB_TMR_REGSZPCH * MLB_TMR_SRC_CH)
#define MLB_TMR_EVT_CH_OFS (MLB_TMR_REGSZPCH * MLB_TMR_EVT_CH)
@@ -43,6 +43,8 @@
#define MLB_TMR_EVT_TMRLR2_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMRLR2_OFS)
#define MLB_TIMER_RATING 500
+#define MLB_TIMER_ONESHOT 0
+#define MLB_TIMER_PERIODIC 1
static irqreturn_t mlb_timer_interrupt(int irq, void *dev_id)
{
@@ -59,27 +61,53 @@ static irqreturn_t mlb_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int mlb_set_state_periodic(struct clock_event_device *clk)
+static void mlb_evt_timer_start(struct timer_of *to, bool periodic)
{
- struct timer_of *to = to_timer_of(clk);
u32 val = MLB_TMR_TMCSR_CSL_DIV2;
+ val |= MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_TRG | MLB_TMR_TMCSR_INTE;
+ if (periodic)
+ val |= MLB_TMR_TMCSR_RELD;
writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+}
- writel_relaxed(to->of_clk.period, timer_of_base(to) +
- MLB_TMR_EVT_TMRLR1_OFS);
- val |= MLB_TMR_TMCSR_RELD | MLB_TMR_TMCSR_CNTE |
- MLB_TMR_TMCSR_TRG | MLB_TMR_TMCSR_INTE;
+static void mlb_evt_timer_stop(struct timer_of *to)
+{
+ u32 val = readl_relaxed(timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+
+ val &= ~MLB_TMR_TMCSR_CNTE;
writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+}
+
+static void mlb_evt_timer_register_count(struct timer_of *to, unsigned long cnt)
+{
+ writel_relaxed(cnt, timer_of_base(to) + MLB_TMR_EVT_TMRLR1_OFS);
+}
+
+static int mlb_set_state_periodic(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mlb_evt_timer_stop(to);
+ mlb_evt_timer_register_count(to, to->of_clk.period);
+ mlb_evt_timer_start(to, MLB_TIMER_PERIODIC);
return 0;
}
static int mlb_set_state_oneshot(struct clock_event_device *clk)
{
struct timer_of *to = to_timer_of(clk);
- u32 val = MLB_TMR_TMCSR_CSL_DIV2;
- writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS);
+ mlb_evt_timer_stop(to);
+ mlb_evt_timer_start(to, MLB_TIMER_ONESHOT);
+ return 0;
+}
+
+static int mlb_set_state_shutdown(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mlb_evt_timer_stop(to);
return 0;
}
@@ -88,22 +116,21 @@ static int mlb_clkevt_next_event(unsigned long event,
{
struct timer_of *to = to_timer_of(clk);
- writel_relaxed(event, timer_of_base(to) + MLB_TMR_EVT_TMRLR1_OFS);
- writel_relaxed(MLB_TMR_TMCSR_CSL_DIV2 |
- MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_INTE |
- MLB_TMR_TMCSR_TRG, timer_of_base(to) +
- MLB_TMR_EVT_TMCSR_OFS);
+ mlb_evt_timer_stop(to);
+ mlb_evt_timer_register_count(to, event);
+ mlb_evt_timer_start(to, MLB_TIMER_ONESHOT);
return 0;
}
static int mlb_config_clock_source(struct timer_of *to)
{
- writel_relaxed(0, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
- writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMR_OFS);
+ u32 val = MLB_TMR_TMCSR_CSL_DIV2;
+
+ writel_relaxed(val, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR1_OFS);
writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR2_OFS);
- writel_relaxed(BIT(4) | BIT(1) | BIT(0), timer_of_base(to) +
- MLB_TMR_SRC_TMCSR_OFS);
+ val |= MLB_TMR_TMCSR_RELD | MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_TRG;
+ writel_relaxed(val, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS);
return 0;
}
@@ -123,6 +150,7 @@ static struct timer_of to = {
.features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT,
.set_state_oneshot = mlb_set_state_oneshot,
.set_state_periodic = mlb_set_state_periodic,
+ .set_state_shutdown = mlb_set_state_shutdown,
.set_next_event = mlb_clkevt_next_event,
},
diff --git a/drivers/clocksource/timer-sun4i.c b/drivers/clocksource/timer-sun4i.c
index 6e0180aaf784..65f38f6ca714 100644
--- a/drivers/clocksource/timer-sun4i.c
+++ b/drivers/clocksource/timer-sun4i.c
@@ -186,7 +186,8 @@ static int __init sun4i_timer_init(struct device_node *node)
*/
if (of_machine_is_compatible("allwinner,sun4i-a10") ||
of_machine_is_compatible("allwinner,sun5i-a13") ||
- of_machine_is_compatible("allwinner,sun5i-a10s"))
+ of_machine_is_compatible("allwinner,sun5i-a10s") ||
+ of_machine_is_compatible("allwinner,suniv-f1c100s"))
sched_clock_register(sun4i_timer_sched_read, 32,
timer_of_rate(&to));
@@ -218,3 +219,5 @@ static int __init sun4i_timer_init(struct device_node *node)
}
TIMER_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
sun4i_timer_init);
+TIMER_OF_DECLARE(suniv, "allwinner,suniv-f1c100s-timer",
+ sun4i_timer_init);
diff --git a/drivers/clocksource/timer-tegra20.c b/drivers/clocksource/timer-tegra20.c
index fdb3d795a409..919b3568c495 100644
--- a/drivers/clocksource/timer-tegra20.c
+++ b/drivers/clocksource/timer-tegra20.c
@@ -60,9 +60,6 @@
static u32 usec_config;
static void __iomem *timer_reg_base;
#ifdef CONFIG_ARM
-static void __iomem *rtc_base;
-static struct timespec64 persistent_ts;
-static u64 persistent_ms, last_persistent_ms;
static struct delay_timer tegra_delay_timer;
#endif
@@ -199,40 +196,30 @@ static unsigned long tegra_delay_timer_read_counter_long(void)
return readl(timer_reg_base + TIMERUS_CNTR_1US);
}
+static struct timer_of suspend_rtc_to = {
+ .flags = TIMER_OF_BASE | TIMER_OF_CLOCK,
+};
+
/*
* tegra_rtc_read - Reads the Tegra RTC registers
* Care must be taken that this funciton is not called while the
* tegra_rtc driver could be executing to avoid race conditions
* on the RTC shadow register
*/
-static u64 tegra_rtc_read_ms(void)
+static u64 tegra_rtc_read_ms(struct clocksource *cs)
{
- u32 ms = readl(rtc_base + RTC_MILLISECONDS);
- u32 s = readl(rtc_base + RTC_SHADOW_SECONDS);
+ u32 ms = readl(timer_of_base(&suspend_rtc_to) + RTC_MILLISECONDS);
+ u32 s = readl(timer_of_base(&suspend_rtc_to) + RTC_SHADOW_SECONDS);
return (u64)s * MSEC_PER_SEC + ms;
}
-/*
- * tegra_read_persistent_clock64 - Return time from a persistent clock.
- *
- * Reads the time from a source which isn't disabled during PM, the
- * 32k sync timer. Convert the cycles elapsed since last read into
- * nsecs and adds to a monotonically increasing timespec64.
- * Care must be taken that this funciton is not called while the
- * tegra_rtc driver could be executing to avoid race conditions
- * on the RTC shadow register
- */
-static void tegra_read_persistent_clock64(struct timespec64 *ts)
-{
- u64 delta;
-
- last_persistent_ms = persistent_ms;
- persistent_ms = tegra_rtc_read_ms();
- delta = persistent_ms - last_persistent_ms;
-
- timespec64_add_ns(&persistent_ts, delta * NSEC_PER_MSEC);
- *ts = persistent_ts;
-}
+static struct clocksource suspend_rtc_clocksource = {
+ .name = "tegra_suspend_timer",
+ .rating = 200,
+ .read = tegra_rtc_read_ms,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+};
#endif
static int tegra_timer_common_init(struct device_node *np, struct timer_of *to)
@@ -385,25 +372,15 @@ out:
static int __init tegra20_init_rtc(struct device_node *np)
{
- struct clk *clk;
+ int ret;
- rtc_base = of_iomap(np, 0);
- if (!rtc_base) {
- pr_err("Can't map RTC registers\n");
- return -ENXIO;
- }
+ ret = timer_of_init(np, &suspend_rtc_to);
+ if (ret)
+ return ret;
- /*
- * rtc registers are used by read_persistent_clock, keep the rtc clock
- * enabled
- */
- clk = of_clk_get(np, 0);
- if (IS_ERR(clk))
- pr_warn("Unable to get rtc-tegra clock\n");
- else
- clk_prepare_enable(clk);
+ clocksource_register_hz(&suspend_rtc_clocksource, 1000);
- return register_persistent_clock(tegra_read_persistent_clock64);
+ return 0;
}
TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
#endif
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
index be89416e2358..21c9ce8526c0 100644
--- a/drivers/cpufreq/loongson1-cpufreq.c
+++ b/drivers/cpufreq/loongson1-cpufreq.c
@@ -13,6 +13,7 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 9bbde2f26cac..f5414b6dfb55 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -30,8 +30,8 @@
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
-#include <mach/npe.h>
-#include <mach/qmgr.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
#define MAX_KEYLEN 32
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 5ef624fe3934..a59f338f520f 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -23,7 +23,6 @@ config DEV_DAX
config DEV_DAX_PMEM
tristate "PMEM DAX: direct access to persistent memory"
depends on LIBNVDIMM && NVDIMM_DAX && DEV_DAX
- depends on m # until we can kill DEV_DAX_PMEM_COMPAT
default DEV_DAX
help
Support raw access to persistent memory. Note that this
@@ -50,7 +49,7 @@ config DEV_DAX_KMEM
config DEV_DAX_PMEM_COMPAT
tristate "PMEM DAX: support the deprecated /sys/class/dax interface"
- depends on DEV_DAX_PMEM
+ depends on m && DEV_DAX_PMEM=m
default DEV_DAX_PMEM
help
Older versions of the libdaxctl library expect to find all
diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
index f71019ce0647..f9f51786d556 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem/core.c
@@ -37,13 +37,13 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
devm_nsio_disable(dev, nsio);
/* reserve the metadata area, device-dax will reserve the data */
- pfn_sb = nd_pfn->pfn_sb;
+ pfn_sb = nd_pfn->pfn_sb;
offset = le64_to_cpu(pfn_sb->dataoff);
if (!devm_request_mem_region(dev, nsio->res.start, offset,
dev_name(&ndns->dev))) {
- dev_warn(dev, "could not reserve metadata\n");
+ dev_warn(dev, "could not reserve metadata\n");
return ERR_PTR(-EBUSY);
- }
+ }
rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
if (rc != 2)
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 3aa8733f832a..9bf06042619a 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -29,6 +29,7 @@
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
static DEFINE_SPINLOCK(dma_fence_stub_lock);
static struct dma_fence dma_fence_stub;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 47eb4d13ed5f..5e2e0348d460 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -263,8 +263,8 @@ config EDAC_PND2
micro-server but may appear on others in the future.
config EDAC_MPC85XX
- tristate "Freescale MPC83xx / MPC85xx"
- depends on FSL_SOC
+ bool "Freescale MPC83xx / MPC85xx"
+ depends on FSL_SOC && EDAC=y
help
Support for error detection and correction on the Freescale
MPC8349, MPC8560, MPC8540, MPC8548, T4240
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 13594ffadcb3..64922c8fa7e3 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -679,22 +679,18 @@ static int del_mc_from_global_list(struct mem_ctl_info *mci)
struct mem_ctl_info *edac_mc_find(int idx)
{
- struct mem_ctl_info *mci = NULL;
+ struct mem_ctl_info *mci;
struct list_head *item;
mutex_lock(&mem_ctls_mutex);
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
-
- if (mci->mc_idx >= idx) {
- if (mci->mc_idx == idx) {
- goto unlock;
- }
- break;
- }
+ if (mci->mc_idx == idx)
+ goto unlock;
}
+ mci = NULL;
unlock:
mutex_unlock(&mem_ctls_mutex);
return mci;
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 7b655f6156fb..11fda9eb2466 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -253,6 +253,22 @@ config TI_SCI_PROTOCOL
This protocol library is used by client drivers to use the features
provided by the system controller.
+config TRUSTED_FOUNDATIONS
+ bool "Trusted Foundations secure monitor support"
+ depends on ARM
+ help
+ Some devices (including most early Tegra-based consumer devices on
+ the market) are booted with the Trusted Foundations secure monitor
+ active, requiring some core operations to be performed by the secure
+ monitor instead of the kernel.
+
+ This option allows the kernel to invoke the secure monitor whenever
+ required on devices using Trusted Foundations. See the functions and
+ comments in linux/firmware/trusted_foundations.h or the device tree
+ bindings for "tlm,trusted-foundations" for details on how to use it.
+
+ Choose N if you don't know what this is about.
+
config HAVE_ARM_SMCCC
bool
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 9a3909a22682..3fa0b34eb72f 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
+obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_ARM_SCMI_PROTOCOL) += arm_scmi/
obj-y += psci/
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index 8f952f2f1a29..b5bc4c7a8fab 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -654,9 +654,7 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
static int scmi_mailbox_check(struct device_node *np)
{
- struct of_phandle_args arg;
-
- return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg);
+ return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, NULL);
}
static int scmi_mbox_free_channel(int id, void *p, void *data)
@@ -798,7 +796,9 @@ static int scmi_probe(struct platform_device *pdev)
return -EINVAL;
}
- desc = of_match_device(scmi_of_match, dev)->data;
+ desc = of_device_get_match_data(dev);
+ if (!desc)
+ return -EINVAL;
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
diff --git a/drivers/firmware/imx/Makefile b/drivers/firmware/imx/Makefile
index 1b2e15b3c9ca..802c4ad8e8f9 100644
--- a/drivers/firmware/imx/Makefile
+++ b/drivers/firmware/imx/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o
+obj-$(CONFIG_IMX_SCU) += imx-scu.o misc.o imx-scu-irq.o
obj-$(CONFIG_IMX_SCU_PD) += scu-pd.o
diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c
new file mode 100644
index 000000000000..043833ad3c1a
--- /dev/null
+++ b/drivers/firmware/imx/imx-scu-irq.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 NXP
+ *
+ * Implementation of the SCU IRQ functions using MU.
+ *
+ */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/ipc.h>
+#include <linux/mailbox_client.h>
+
+#define IMX_SC_IRQ_FUNC_ENABLE 1
+#define IMX_SC_IRQ_FUNC_STATUS 2
+#define IMX_SC_IRQ_NUM_GROUP 4
+
+static u32 mu_resource_id;
+
+struct imx_sc_msg_irq_get_status {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct {
+ u16 resource;
+ u8 group;
+ u8 reserved;
+ } __packed req;
+ struct {
+ u32 status;
+ } resp;
+ } data;
+};
+
+struct imx_sc_msg_irq_enable {
+ struct imx_sc_rpc_msg hdr;
+ u32 mask;
+ u16 resource;
+ u8 group;
+ u8 enable;
+} __packed;
+
+static struct imx_sc_ipc *imx_sc_irq_ipc_handle;
+static struct work_struct imx_sc_irq_work;
+static ATOMIC_NOTIFIER_HEAD(imx_scu_irq_notifier_chain);
+
+int imx_scu_irq_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(
+ &imx_scu_irq_notifier_chain, nb);
+}
+EXPORT_SYMBOL(imx_scu_irq_register_notifier);
+
+int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(
+ &imx_scu_irq_notifier_chain, nb);
+}
+EXPORT_SYMBOL(imx_scu_irq_unregister_notifier);
+
+static int imx_scu_irq_notifier_call_chain(unsigned long status, u8 *group)
+{
+ return atomic_notifier_call_chain(&imx_scu_irq_notifier_chain,
+ status, (void *)group);
+}
+
+static void imx_scu_irq_work_handler(struct work_struct *work)
+{
+ struct imx_sc_msg_irq_get_status msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ u32 irq_status;
+ int ret;
+ u8 i;
+
+ for (i = 0; i < IMX_SC_IRQ_NUM_GROUP; i++) {
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_IRQ;
+ hdr->func = IMX_SC_IRQ_FUNC_STATUS;
+ hdr->size = 2;
+
+ msg.data.req.resource = mu_resource_id;
+ msg.data.req.group = i;
+
+ ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
+ if (ret) {
+ pr_err("get irq group %d status failed, ret %d\n",
+ i, ret);
+ return;
+ }
+
+ irq_status = msg.data.resp.status;
+ if (!irq_status)
+ continue;
+
+ imx_scu_irq_notifier_call_chain(irq_status, &i);
+ }
+}
+
+int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
+{
+ struct imx_sc_msg_irq_enable msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_IRQ;
+ hdr->func = IMX_SC_IRQ_FUNC_ENABLE;
+ hdr->size = 3;
+
+ msg.resource = mu_resource_id;
+ msg.group = group;
+ msg.mask = mask;
+ msg.enable = enable;
+
+ ret = imx_scu_call_rpc(imx_sc_irq_ipc_handle, &msg, true);
+ if (ret)
+ pr_err("enable irq failed, group %d, mask %d, ret %d\n",
+ group, mask, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(imx_scu_irq_group_enable);
+
+static void imx_scu_irq_callback(struct mbox_client *c, void *msg)
+{
+ schedule_work(&imx_sc_irq_work);
+}
+
+int imx_scu_enable_general_irq_channel(struct device *dev)
+{
+ struct of_phandle_args spec;
+ struct mbox_client *cl;
+ struct mbox_chan *ch;
+ int ret = 0, i = 0;
+
+ ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle);
+ if (ret)
+ return ret;
+
+ cl = devm_kzalloc(dev, sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return -ENOMEM;
+
+ cl->dev = dev;
+ cl->rx_callback = imx_scu_irq_callback;
+
+ /* SCU general IRQ uses general interrupt channel 3 */
+ ch = mbox_request_channel_byname(cl, "gip3");
+ if (IS_ERR(ch)) {
+ ret = PTR_ERR(ch);
+ dev_err(dev, "failed to request mbox chan gip3, ret %d\n", ret);
+ devm_kfree(dev, cl);
+ return ret;
+ }
+
+ INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler);
+
+ if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", 0, &spec))
+ i = of_alias_get_id(spec.np, "mu");
+
+ /* use mu1 as general mu irq channel if failed */
+ if (i < 0)
+ i = 1;
+
+ mu_resource_id = IMX_SC_R_MU_0A + i;
+
+ return ret;
+}
+EXPORT_SYMBOL(imx_scu_enable_general_irq_channel);
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 2bb1a19c413f..04a24a863d6e 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -10,6 +10,7 @@
#include <linux/err.h>
#include <linux/firmware/imx/types.h>
#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/sci.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
@@ -246,6 +247,11 @@ static int imx_scu_probe(struct platform_device *pdev)
imx_sc_ipc_handle = sc_ipc;
+ ret = imx_scu_enable_general_irq_channel(dev);
+ if (ret)
+ dev_warn(dev,
+ "failed to enable general irq channel: %d\n", ret);
+
dev_info(dev, "NXP i.MX SCU Initialized\n");
return devm_of_platform_populate(dev);
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index 39a94c7177fc..480cec69e2c9 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -74,7 +74,10 @@ struct imx_sc_pd_range {
char *name;
u32 rsrc;
u8 num;
+
+ /* add domain index */
bool postfix;
+ u8 start_from;
};
struct imx_sc_pd_soc {
@@ -84,71 +87,75 @@ struct imx_sc_pd_soc {
static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
/* LSIO SS */
- { "lsio-pwm", IMX_SC_R_PWM_0, 8, 1 },
- { "lsio-gpio", IMX_SC_R_GPIO_0, 8, 1 },
- { "lsio-gpt", IMX_SC_R_GPT_0, 5, 1 },
- { "lsio-kpp", IMX_SC_R_KPP, 1, 0 },
- { "lsio-fspi", IMX_SC_R_FSPI_0, 2, 1 },
- { "lsio-mu", IMX_SC_R_MU_0A, 14, 1 },
+ { "pwm", IMX_SC_R_PWM_0, 8, true, 0 },
+ { "gpio", IMX_SC_R_GPIO_0, 8, true, 0 },
+ { "gpt", IMX_SC_R_GPT_0, 5, true, 0 },
+ { "kpp", IMX_SC_R_KPP, 1, false, 0 },
+ { "fspi", IMX_SC_R_FSPI_0, 2, true, 0 },
+ { "mu", IMX_SC_R_MU_0A, 14, true, 0 },
/* CONN SS */
- { "con-usb", IMX_SC_R_USB_0, 2, 1 },
- { "con-usb0phy", IMX_SC_R_USB_0_PHY, 1, 0 },
- { "con-usb2", IMX_SC_R_USB_2, 1, 0 },
- { "con-usb2phy", IMX_SC_R_USB_2_PHY, 1, 0 },
- { "con-sdhc", IMX_SC_R_SDHC_0, 3, 1 },
- { "con-enet", IMX_SC_R_ENET_0, 2, 1 },
- { "con-nand", IMX_SC_R_NAND, 1, 0 },
- { "con-mlb", IMX_SC_R_MLB_0, 1, 1 },
-
- /* Audio DMA SS */
- { "adma-audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, 0 },
- { "adma-audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, 0 },
- { "adma-audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, 0 },
- { "adma-dma0-ch", IMX_SC_R_DMA_0_CH0, 16, 1 },
- { "adma-dma1-ch", IMX_SC_R_DMA_1_CH0, 16, 1 },
- { "adma-dma2-ch", IMX_SC_R_DMA_2_CH0, 5, 1 },
- { "adma-asrc0", IMX_SC_R_ASRC_0, 1, 0 },
- { "adma-asrc1", IMX_SC_R_ASRC_1, 1, 0 },
- { "adma-esai0", IMX_SC_R_ESAI_0, 1, 0 },
- { "adma-spdif0", IMX_SC_R_SPDIF_0, 1, 0 },
- { "adma-sai", IMX_SC_R_SAI_0, 3, 1 },
- { "adma-amix", IMX_SC_R_AMIX, 1, 0 },
- { "adma-mqs0", IMX_SC_R_MQS_0, 1, 0 },
- { "adma-dsp", IMX_SC_R_DSP, 1, 0 },
- { "adma-dsp-ram", IMX_SC_R_DSP_RAM, 1, 0 },
- { "adma-can", IMX_SC_R_CAN_0, 3, 1 },
- { "adma-ftm", IMX_SC_R_FTM_0, 2, 1 },
- { "adma-lpi2c", IMX_SC_R_I2C_0, 4, 1 },
- { "adma-adc", IMX_SC_R_ADC_0, 1, 1 },
- { "adma-lcd", IMX_SC_R_LCD_0, 1, 1 },
- { "adma-lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, 1 },
- { "adma-lpuart", IMX_SC_R_UART_0, 4, 1 },
- { "adma-lpspi", IMX_SC_R_SPI_0, 4, 1 },
-
- /* VPU SS */
- { "vpu", IMX_SC_R_VPU, 1, 0 },
- { "vpu-pid", IMX_SC_R_VPU_PID0, 8, 1 },
- { "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, 0 },
- { "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, 0 },
+ { "usb", IMX_SC_R_USB_0, 2, true, 0 },
+ { "usb0phy", IMX_SC_R_USB_0_PHY, 1, false, 0 },
+ { "usb2", IMX_SC_R_USB_2, 1, false, 0 },
+ { "usb2phy", IMX_SC_R_USB_2_PHY, 1, false, 0 },
+ { "sdhc", IMX_SC_R_SDHC_0, 3, true, 0 },
+ { "enet", IMX_SC_R_ENET_0, 2, true, 0 },
+ { "nand", IMX_SC_R_NAND, 1, false, 0 },
+ { "mlb", IMX_SC_R_MLB_0, 1, true, 0 },
+
+ /* AUDIO SS */
+ { "audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, false, 0 },
+ { "audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, false, 0 },
+ { "audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, false, 0 },
+ { "dma0-ch", IMX_SC_R_DMA_0_CH0, 16, true, 0 },
+ { "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 },
+ { "dma2-ch", IMX_SC_R_DMA_2_CH0, 5, true, 0 },
+ { "asrc0", IMX_SC_R_ASRC_0, 1, false, 0 },
+ { "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 },
+ { "esai0", IMX_SC_R_ESAI_0, 1, false, 0 },
+ { "spdif0", IMX_SC_R_SPDIF_0, 1, false, 0 },
+ { "sai", IMX_SC_R_SAI_0, 3, true, 0 },
+ { "amix", IMX_SC_R_AMIX, 1, false, 0 },
+ { "mqs0", IMX_SC_R_MQS_0, 1, false, 0 },
+ { "dsp", IMX_SC_R_DSP, 1, false, 0 },
+ { "dsp-ram", IMX_SC_R_DSP_RAM, 1, false, 0 },
+
+ /* DMA SS */
+ { "can", IMX_SC_R_CAN_0, 3, true, 0 },
+ { "ftm", IMX_SC_R_FTM_0, 2, true, 0 },
+ { "lpi2c", IMX_SC_R_I2C_0, 4, true, 0 },
+ { "adc", IMX_SC_R_ADC_0, 1, true, 0 },
+ { "lcd", IMX_SC_R_LCD_0, 1, true, 0 },
+ { "lcd0-pwm", IMX_SC_R_LCD_0_PWM_0, 1, true, 0 },
+ { "lpuart", IMX_SC_R_UART_0, 4, true, 0 },
+ { "lpspi", IMX_SC_R_SPI_0, 4, true, 0 },
+
+ /* VPU SS */
+ { "vpu", IMX_SC_R_VPU, 1, false, 0 },
+ { "vpu-pid", IMX_SC_R_VPU_PID0, 8, true, 0 },
+ { "vpu-dec0", IMX_SC_R_VPU_DEC_0, 1, false, 0 },
+ { "vpu-enc0", IMX_SC_R_VPU_ENC_0, 1, false, 0 },
/* GPU SS */
- { "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, 1 },
+ { "gpu0-pid", IMX_SC_R_GPU_0_PID0, 4, true, 0 },
/* HSIO SS */
- { "hsio-pcie-b", IMX_SC_R_PCIE_B, 1, 0 },
- { "hsio-serdes-1", IMX_SC_R_SERDES_1, 1, 0 },
- { "hsio-gpio", IMX_SC_R_HSIO_GPIO, 1, 0 },
+ { "pcie-b", IMX_SC_R_PCIE_B, 1, false, 0 },
+ { "serdes-1", IMX_SC_R_SERDES_1, 1, false, 0 },
+ { "hsio-gpio", IMX_SC_R_HSIO_GPIO, 1, false, 0 },
+
+ /* MIPI SS */
+ { "mipi0", IMX_SC_R_MIPI_0, 1, false, 0 },
+ { "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, false, 0 },
+ { "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, true, 0 },
- /* MIPI/LVDS SS */
- { "mipi0", IMX_SC_R_MIPI_0, 1, 0 },
- { "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, 0 },
- { "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, 1 },
- { "lvds0", IMX_SC_R_LVDS_0, 1, 0 },
+ /* LVDS SS */
+ { "lvds0", IMX_SC_R_LVDS_0, 1, false, 0 },
/* DC SS */
- { "dc0", IMX_SC_R_DC_0, 1, 0 },
- { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, 1 },
+ { "dc0", IMX_SC_R_DC_0, 1, false, 0 },
+ { "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
};
static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
@@ -236,7 +243,7 @@ imx_scu_add_pm_domain(struct device *dev, int idx,
if (pd_ranges->postfix)
snprintf(sc_pd->name, sizeof(sc_pd->name),
- "%s%i", pd_ranges->name, idx);
+ "%s%i", pd_ranges->name, pd_ranges->start_from + idx);
else
snprintf(sc_pd->name, sizeof(sc_pd->name),
"%s", pd_ranges->name);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 3fbbb61012c4..ef93406ace1b 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -65,18 +65,36 @@ struct ti_sci_xfers_info {
};
/**
+ * struct ti_sci_rm_type_map - Structure representing TISCI Resource
+ * management representation of dev_ids.
+ * @dev_id: TISCI device ID
+ * @type: Corresponding id as identified by TISCI RM.
+ *
+ * Note: This is used only as a work around for using RM range apis
+ * for AM654 SoC. For future SoCs dev_id will be used as type
+ * for RM range APIs. In order to maintain ABI backward compatibility
+ * type is not being changed for AM654 SoC.
+ */
+struct ti_sci_rm_type_map {
+ u32 dev_id;
+ u16 type;
+};
+
+/**
* struct ti_sci_desc - Description of SoC integration
* @default_host_id: Host identifier representing the compute entity
* @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
* @max_msgs: Maximum number of messages that can be pending
* simultaneously in the system
* @max_msg_size: Maximum size of data per message that can be handled.
+ * @rm_type_map: RM resource type mapping structure.
*/
struct ti_sci_desc {
u8 default_host_id;
int max_rx_timeout_ms;
int max_msgs;
int max_msg_size;
+ struct ti_sci_rm_type_map *rm_type_map;
};
/**
@@ -1600,6 +1618,392 @@ fail:
return ret;
}
+static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
+ u16 *type)
+{
+ struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
+ bool found = false;
+ int i;
+
+ /* If map is not provided then assume dev_id is used as type */
+ if (!rm_type_map) {
+ *type = dev_id;
+ return 0;
+ }
+
+ for (i = 0; rm_type_map[i].dev_id; i++) {
+ if (rm_type_map[i].dev_id == dev_id) {
+ *type = rm_type_map[i].type;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ti_sci_get_resource_range - Helper to get a range of resources assigned
+ * to a host. Resource is uniquely identified by
+ * type and subtype.
+ * @handle: Pointer to TISCI handle.
+ * @dev_id: TISCI device ID.
+ * @subtype: Resource assignment subtype that is being requested
+ * from the given device.
+ * @s_host: Host processor ID to which the resources are allocated
+ * @range_start: Start index of the resource range
+ * @range_num: Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 subtype, u8 s_host,
+ u16 *range_start, u16 *range_num)
+{
+ struct ti_sci_msg_resp_get_resource_range *resp;
+ struct ti_sci_msg_req_get_resource_range *req;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ u16 type;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+
+ ret = ti_sci_get_resource_type(info, dev_id, &type);
+ if (ret) {
+ dev_err(dev, "rm type lookup failed for %u\n", dev_id);
+ goto fail;
+ }
+
+ req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
+ req->secondary_host = s_host;
+ req->type = type & MSG_RM_RESOURCE_TYPE_MASK;
+ req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp)) {
+ ret = -ENODEV;
+ } else if (!resp->range_start && !resp->range_num) {
+ ret = -ENODEV;
+ } else {
+ *range_start = resp->range_start;
+ *range_num = resp->range_num;
+ };
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
+ * that is same as ti sci interface host.
+ * @handle: Pointer to TISCI handle.
+ * @dev_id: TISCI device ID.
+ * @subtype: Resource assignment subtype that is being requested
+ * from the given device.
+ * @range_start: Start index of the resource range
+ * @range_num: Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 subtype,
+ u16 *range_start, u16 *range_num)
+{
+ return ti_sci_get_resource_range(handle, dev_id, subtype,
+ TI_SCI_IRQ_SECONDARY_HOST_INVALID,
+ range_start, range_num);
+}
+
+/**
+ * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
+ * assigned to a specified host.
+ * @handle: Pointer to TISCI handle.
+ * @dev_id: TISCI device ID.
+ * @subtype: Resource assignment subtype that is being requested
+ * from the given device.
+ * @s_host: Host processor ID to which the resources are allocated
+ * @range_start: Start index of the resource range
+ * @range_num: Number of resources in the range
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static
+int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 subtype, u8 s_host,
+ u16 *range_start, u16 *range_num)
+{
+ return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
+ range_start, range_num);
+}
+
+/**
+ * ti_sci_manage_irq() - Helper api to configure/release the irq route between
+ * the requested source and destination
+ * @handle: Pointer to TISCI handle.
+ * @valid_params: Bit fields defining the validity of certain params
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ * @s_host: Secondary host ID to which the irq/event is being
+ * requested for.
+ * @type: Request type irq set or release.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
+ u32 valid_params, u16 src_id, u16 src_index,
+ u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
+ u16 global_event, u8 vint_status_bit, u8 s_host,
+ u16 type)
+{
+ struct ti_sci_msg_req_manage_irq *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ struct ti_sci_info *info;
+ struct device *dev;
+ int ret = 0;
+
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (!handle)
+ return -EINVAL;
+
+ info = handle_to_ti_sci_info(handle);
+ dev = info->dev;
+
+ xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
+ req->valid_params = valid_params;
+ req->src_id = src_id;
+ req->src_index = src_index;
+ req->dst_id = dst_id;
+ req->dst_host_irq = dst_host_irq;
+ req->ia_id = ia_id;
+ req->vint = vint;
+ req->global_event = global_event;
+ req->vint_status_bit = vint_status_bit;
+ req->secondary_host = s_host;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
+/**
+ * ti_sci_set_irq() - Helper api to configure the irq route between the
+ * requested source and destination
+ * @handle: Pointer to TISCI handle.
+ * @valid_params: Bit fields defining the validity of certain params
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ * @s_host: Secondary host ID to which the irq/event is being
+ * requested for.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
+ u16 src_id, u16 src_index, u16 dst_id,
+ u16 dst_host_irq, u16 ia_id, u16 vint,
+ u16 global_event, u8 vint_status_bit, u8 s_host)
+{
+ pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
+ __func__, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint, global_event,
+ vint_status_bit);
+
+ return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint,
+ global_event, vint_status_bit, s_host,
+ TI_SCI_MSG_SET_IRQ);
+}
+
+/**
+ * ti_sci_free_irq() - Helper api to free the irq route between the
+ * requested source and destination
+ * @handle: Pointer to TISCI handle.
+ * @valid_params: Bit fields defining the validity of certain params
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ * @s_host: Secondary host ID to which the irq/event is being
+ * requested for.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
+ u16 src_id, u16 src_index, u16 dst_id,
+ u16 dst_host_irq, u16 ia_id, u16 vint,
+ u16 global_event, u8 vint_status_bit, u8 s_host)
+{
+ pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
+ __func__, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint, global_event,
+ vint_status_bit);
+
+ return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
+ dst_id, dst_host_irq, ia_id, vint,
+ global_event, vint_status_bit, s_host,
+ TI_SCI_MSG_FREE_IRQ);
+}
+
+/**
+ * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
+ * source and destination.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @vint_irq: Boolean specifying if this interrupt belongs to
+ * Interrupt Aggregator.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
+ u16 src_index, u16 dst_id, u16 dst_host_irq)
+{
+ u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
+
+ return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
+ dst_host_irq, 0, 0, 0, 0, 0);
+}
+
+/**
+ * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
+ * requested source and Interrupt Aggregator.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
+ u16 src_id, u16 src_index, u16 ia_id,
+ u16 vint, u16 global_event,
+ u8 vint_status_bit)
+{
+ u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
+ MSG_FLAG_GLB_EVNT_VALID |
+ MSG_FLAG_VINT_STS_BIT_VALID;
+
+ return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
+ ia_id, vint, global_event, vint_status_bit, 0);
+}
+
+/**
+ * ti_sci_cmd_free_irq() - Free a host irq route between the between the
+ * requested source and destination.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @dst_id: Device ID of the IRQ destination
+ * @dst_host_irq: IRQ number of the destination device
+ * @vint_irq: Boolean specifying if this interrupt belongs to
+ * Interrupt Aggregator.
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
+ u16 src_index, u16 dst_id, u16 dst_host_irq)
+{
+ u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
+
+ return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
+ dst_host_irq, 0, 0, 0, 0, 0);
+}
+
+/**
+ * ti_sci_cmd_free_event_map() - Free an event map between the requested source
+ * and Interrupt Aggregator.
+ * @handle: Pointer to TISCI handle.
+ * @src_id: Device ID of the IRQ source
+ * @src_index: IRQ source index within the source device
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
+ * @vint: Virtual interrupt to be used within the IA
+ * @global_event: Global event number to be used for the requesting event
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
+ *
+ * Return: 0 if all went fine, else return appropriate error.
+ */
+static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
+ u16 src_id, u16 src_index, u16 ia_id,
+ u16 vint, u16 global_event,
+ u8 vint_status_bit)
+{
+ u32 valid_params = MSG_FLAG_IA_ID_VALID |
+ MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
+ MSG_FLAG_VINT_STS_BIT_VALID;
+
+ return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
+ ia_id, vint, global_event, vint_status_bit, 0);
+}
+
/*
* ti_sci_setup_ops() - Setup the operations structures
* @info: pointer to TISCI pointer
@@ -1610,6 +2014,8 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
struct ti_sci_core_ops *core_ops = &ops->core_ops;
struct ti_sci_dev_ops *dops = &ops->dev_ops;
struct ti_sci_clk_ops *cops = &ops->clk_ops;
+ struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
+ struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
core_ops->reboot_device = ti_sci_cmd_core_reboot;
@@ -1640,6 +2046,15 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
cops->set_freq = ti_sci_cmd_clk_set_freq;
cops->get_freq = ti_sci_cmd_clk_get_freq;
+
+ rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
+ rm_core_ops->get_range_from_shost =
+ ti_sci_cmd_get_resource_range_from_shost;
+
+ iops->set_irq = ti_sci_cmd_set_irq;
+ iops->set_event_map = ti_sci_cmd_set_event_map;
+ iops->free_irq = ti_sci_cmd_free_irq;
+ iops->free_event_map = ti_sci_cmd_free_event_map;
}
/**
@@ -1764,6 +2179,219 @@ const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev)
}
EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
+/**
+ * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
+ * @np: device node
+ * @property: property name containing phandle on TISCI node
+ *
+ * NOTE: The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
+ * Return: pointer to handle if successful, else:
+ * -EPROBE_DEFER if the instance is not ready
+ * -ENODEV if the required node handler is missing
+ * -EINVAL if invalid conditions are encountered.
+ */
+const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
+ const char *property)
+{
+ struct ti_sci_handle *handle = NULL;
+ struct device_node *ti_sci_np;
+ struct ti_sci_info *info;
+ struct list_head *p;
+
+ if (!np) {
+ pr_err("I need a device pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ti_sci_np = of_parse_phandle(np, property, 0);
+ if (!ti_sci_np)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&ti_sci_list_mutex);
+ list_for_each(p, &ti_sci_list) {
+ info = list_entry(p, struct ti_sci_info, node);
+ if (ti_sci_np == info->dev->of_node) {
+ handle = &info->handle;
+ info->users++;
+ break;
+ }
+ }
+ mutex_unlock(&ti_sci_list_mutex);
+ of_node_put(ti_sci_np);
+
+ if (!handle)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
+
+/**
+ * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
+ * @dev: Device pointer requesting TISCI handle
+ * @property: property name containing phandle on TISCI node
+ *
+ * NOTE: This releases the handle once the device resources are
+ * no longer needed. MUST NOT BE released with ti_sci_put_handle.
+ * The function does not track individual clients of the framework
+ * and is expected to be maintained by caller of TI SCI protocol library.
+ *
+ * Return: 0 if all went fine, else corresponding error.
+ */
+const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
+ const char *property)
+{
+ const struct ti_sci_handle *handle;
+ const struct ti_sci_handle **ptr;
+
+ ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+ handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
+
+ if (!IS_ERR(handle)) {
+ *ptr = handle;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return handle;
+}
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
+
+/**
+ * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
+ * @res: Pointer to the TISCI resource
+ *
+ * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
+ */
+u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
+{
+ unsigned long flags;
+ u16 set, free_bit;
+
+ raw_spin_lock_irqsave(&res->lock, flags);
+ for (set = 0; set < res->sets; set++) {
+ free_bit = find_first_zero_bit(res->desc[set].res_map,
+ res->desc[set].num);
+ if (free_bit != res->desc[set].num) {
+ set_bit(free_bit, res->desc[set].res_map);
+ raw_spin_unlock_irqrestore(&res->lock, flags);
+ return res->desc[set].start + free_bit;
+ }
+ }
+ raw_spin_unlock_irqrestore(&res->lock, flags);
+
+ return TI_SCI_RESOURCE_NULL;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
+
+/**
+ * ti_sci_release_resource() - Release a resource from TISCI resource.
+ * @res: Pointer to the TISCI resource
+ * @id: Resource id to be released.
+ */
+void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
+{
+ unsigned long flags;
+ u16 set;
+
+ raw_spin_lock_irqsave(&res->lock, flags);
+ for (set = 0; set < res->sets; set++) {
+ if (res->desc[set].start <= id &&
+ (res->desc[set].num + res->desc[set].start) > id)
+ clear_bit(id - res->desc[set].start,
+ res->desc[set].res_map);
+ }
+ raw_spin_unlock_irqrestore(&res->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ti_sci_release_resource);
+
+/**
+ * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
+ * @res: Pointer to the TISCI resource
+ *
+ * Return: Total number of available resources.
+ */
+u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
+{
+ u32 set, count = 0;
+
+ for (set = 0; set < res->sets; set++)
+ count += res->desc[set].num;
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
+
+/**
+ * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
+ * @handle: TISCI handle
+ * @dev: Device pointer to which the resource is assigned
+ * @dev_id: TISCI device id to which the resource is assigned
+ * @of_prop: property name by which the resource are represented
+ *
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
+ * error pointer.
+ */
+struct ti_sci_resource *
+devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
+ struct device *dev, u32 dev_id, char *of_prop)
+{
+ struct ti_sci_resource *res;
+ u32 resource_subtype;
+ int i, ret;
+
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return ERR_PTR(-ENOMEM);
+
+ res->sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
+ sizeof(u32));
+ if (res->sets < 0) {
+ dev_err(dev, "%s resource type ids not available\n", of_prop);
+ return ERR_PTR(res->sets);
+ }
+
+ res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
+ GFP_KERNEL);
+ if (!res->desc)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < res->sets; i++) {
+ ret = of_property_read_u32_index(dev_of_node(dev), of_prop, i,
+ &resource_subtype);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
+ resource_subtype,
+ &res->desc[i].start,
+ &res->desc[i].num);
+ if (ret) {
+ dev_err(dev, "dev = %d subtype %d not allocated for this host\n",
+ dev_id, resource_subtype);
+ return ERR_PTR(ret);
+ }
+
+ dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
+ dev_id, resource_subtype, res->desc[i].start,
+ res->desc[i].num);
+
+ res->desc[i].res_map =
+ devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
+ sizeof(*res->desc[i].res_map), GFP_KERNEL);
+ if (!res->desc[i].res_map)
+ return ERR_PTR(-ENOMEM);
+ }
+ raw_spin_lock_init(&res->lock);
+
+ return res;
+}
+
static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
void *cmd)
{
@@ -1784,10 +2412,33 @@ static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
/* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
.max_msgs = 20,
.max_msg_size = 64,
+ .rm_type_map = NULL,
+};
+
+static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
+ {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
+ {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
+ {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
+ {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
+ {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
+ {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
+ {.dev_id = 0, .type = 0x000}, /* end of table */
+};
+
+/* Description for AM654 */
+static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
+ .default_host_id = 12,
+ /* Conservative duration */
+ .max_rx_timeout_ms = 10000,
+ /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
+ .max_msgs = 20,
+ .max_msg_size = 60,
+ .rm_type_map = ti_sci_am654_rm_type_map,
};
static const struct of_device_id ti_sci_of_match[] = {
{.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
+ {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, ti_sci_of_match);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 12bf316b68df..4983827151bf 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -35,6 +35,13 @@
#define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d
#define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e
+/* Resource Management Requests */
+#define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500
+
+/* IRQ requests */
+#define TI_SCI_MSG_SET_IRQ 0x1000
+#define TI_SCI_MSG_FREE_IRQ 0x1001
+
/**
* struct ti_sci_msg_hdr - Generic Message Header for All messages and responses
* @type: Type of messages: One of TI_SCI_MSG* values
@@ -461,4 +468,99 @@ struct ti_sci_msg_resp_get_clock_freq {
u64 freq_hz;
} __packed;
+#define TI_SCI_IRQ_SECONDARY_HOST_INVALID 0xff
+
+/**
+ * struct ti_sci_msg_req_get_resource_range - Request to get a host's assigned
+ * range of resources.
+ * @hdr: Generic Header
+ * @type: Unique resource assignment type
+ * @subtype: Resource assignment subtype within the resource type.
+ * @secondary_host: Host processing entity to which the resources are
+ * allocated. This is required only when the destination
+ * host id id different from ti sci interface host id,
+ * else TI_SCI_IRQ_SECONDARY_HOST_INVALID can be passed.
+ *
+ * Request type is TI_SCI_MSG_GET_RESOURCE_RANGE. Responded with requested
+ * resource range which is of type TI_SCI_MSG_GET_RESOURCE_RANGE.
+ */
+struct ti_sci_msg_req_get_resource_range {
+ struct ti_sci_msg_hdr hdr;
+#define MSG_RM_RESOURCE_TYPE_MASK GENMASK(9, 0)
+#define MSG_RM_RESOURCE_SUBTYPE_MASK GENMASK(5, 0)
+ u16 type;
+ u8 subtype;
+ u8 secondary_host;
+} __packed;
+
+/**
+ * struct ti_sci_msg_resp_get_resource_range - Response to resource get range.
+ * @hdr: Generic Header
+ * @range_start: Start index of the resource range.
+ * @range_num: Number of resources in the range.
+ *
+ * Response to request TI_SCI_MSG_GET_RESOURCE_RANGE.
+ */
+struct ti_sci_msg_resp_get_resource_range {
+ struct ti_sci_msg_hdr hdr;
+ u16 range_start;
+ u16 range_num;
+} __packed;
+
+/**
+ * struct ti_sci_msg_req_manage_irq - Request to configure/release the route
+ * between the dev and the host.
+ * @hdr: Generic Header
+ * @valid_params: Bit fields defining the validity of interrupt source
+ * parameters. If a bit is not set, then corresponding
+ * field is not valid and will not be used for route set.
+ * Bit field definitions:
+ * 0 - Valid bit for @dst_id
+ * 1 - Valid bit for @dst_host_irq
+ * 2 - Valid bit for @ia_id
+ * 3 - Valid bit for @vint
+ * 4 - Valid bit for @global_event
+ * 5 - Valid bit for @vint_status_bit_index
+ * 31 - Valid bit for @secondary_host
+ * @src_id: IRQ source peripheral ID.
+ * @src_index: IRQ source index within the peripheral
+ * @dst_id: IRQ Destination ID. Based on the architecture it can be
+ * IRQ controller or host processor ID.
+ * @dst_host_irq: IRQ number of the destination host IRQ controller
+ * @ia_id: Device ID of the interrupt aggregator in which the
+ * vint resides.
+ * @vint: Virtual interrupt number if the interrupt route
+ * is through an interrupt aggregator.
+ * @global_event: Global event that is to be mapped to interrupt
+ * aggregator virtual interrupt status bit.
+ * @vint_status_bit: Virtual interrupt status bit if the interrupt route
+ * utilizes an interrupt aggregator status bit.
+ * @secondary_host: Host ID of the IRQ destination computing entity. This is
+ * required only when destination host id is different
+ * from ti sci interface host id.
+ *
+ * Request type is TI_SCI_MSG_SET/RELEASE_IRQ.
+ * Response is generic ACK / NACK message.
+ */
+struct ti_sci_msg_req_manage_irq {
+ struct ti_sci_msg_hdr hdr;
+#define MSG_FLAG_DST_ID_VALID TI_SCI_MSG_FLAG(0)
+#define MSG_FLAG_DST_HOST_IRQ_VALID TI_SCI_MSG_FLAG(1)
+#define MSG_FLAG_IA_ID_VALID TI_SCI_MSG_FLAG(2)
+#define MSG_FLAG_VINT_VALID TI_SCI_MSG_FLAG(3)
+#define MSG_FLAG_GLB_EVNT_VALID TI_SCI_MSG_FLAG(4)
+#define MSG_FLAG_VINT_STS_BIT_VALID TI_SCI_MSG_FLAG(5)
+#define MSG_FLAG_SHOST_VALID TI_SCI_MSG_FLAG(31)
+ u32 valid_params;
+ u16 src_id;
+ u16 src_index;
+ u16 dst_id;
+ u16 dst_host_irq;
+ u16 ia_id;
+ u16 vint;
+ u16 global_event;
+ u8 vint_status_bit;
+ u8 secondary_host;
+} __packed;
+
#endif /* __TI_SCI_H */
diff --git a/drivers/firmware/trusted_foundations.c b/drivers/firmware/trusted_foundations.c
new file mode 100644
index 000000000000..fd4999388ff1
--- /dev/null
+++ b/drivers/firmware/trusted_foundations.c
@@ -0,0 +1,176 @@
+/*
+ * Trusted Foundations support for ARM CPUs
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+
+#include <linux/firmware/trusted_foundations.h>
+
+#include <asm/firmware.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
+
+#define TF_CACHE_MAINT 0xfffff100
+
+#define TF_CACHE_ENABLE 1
+#define TF_CACHE_DISABLE 2
+
+#define TF_SET_CPU_BOOT_ADDR_SMC 0xfffff200
+
+#define TF_CPU_PM 0xfffffffc
+#define TF_CPU_PM_S3 0xffffffe3
+#define TF_CPU_PM_S2 0xffffffe6
+#define TF_CPU_PM_S2_NO_MC_CLK 0xffffffe5
+#define TF_CPU_PM_S1 0xffffffe4
+#define TF_CPU_PM_S1_NOFLUSH_L2 0xffffffe7
+
+static unsigned long cpu_boot_addr;
+
+static void tf_generic_smc(u32 type, u32 arg1, u32 arg2)
+{
+ register u32 r0 asm("r0") = type;
+ register u32 r1 asm("r1") = arg1;
+ register u32 r2 asm("r2") = arg2;
+
+ asm volatile(
+ ".arch_extension sec\n\t"
+ "stmfd sp!, {r4 - r11}\n\t"
+ __asmeq("%0", "r0")
+ __asmeq("%1", "r1")
+ __asmeq("%2", "r2")
+ "mov r3, #0\n\t"
+ "mov r4, #0\n\t"
+ "smc #0\n\t"
+ "ldmfd sp!, {r4 - r11}\n\t"
+ :
+ : "r" (r0), "r" (r1), "r" (r2)
+ : "memory", "r3", "r12", "lr");
+}
+
+static int tf_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
+{
+ cpu_boot_addr = boot_addr;
+ tf_generic_smc(TF_SET_CPU_BOOT_ADDR_SMC, cpu_boot_addr, 0);
+
+ return 0;
+}
+
+static int tf_prepare_idle(unsigned long mode)
+{
+ switch (mode) {
+ case TF_PM_MODE_LP0:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S3, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP1:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP1_NO_MC_CLK:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S2_NO_MC_CLK,
+ cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP2:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1, cpu_boot_addr);
+ break;
+
+ case TF_PM_MODE_LP2_NOFLUSH_L2:
+ tf_generic_smc(TF_CPU_PM, TF_CPU_PM_S1_NOFLUSH_L2,
+ cpu_boot_addr);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_CACHE_L2X0
+static void tf_cache_write_sec(unsigned long val, unsigned int reg)
+{
+ u32 l2x0_way_mask = 0xff;
+
+ switch (reg) {
+ case L2X0_CTRL:
+ if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_ASSOCIATIVITY_16)
+ l2x0_way_mask = 0xffff;
+
+ if (val == L2X0_CTRL_EN)
+ tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_ENABLE,
+ l2x0_saved_regs.aux_ctrl);
+ else
+ tf_generic_smc(TF_CACHE_MAINT, TF_CACHE_DISABLE,
+ l2x0_way_mask);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int tf_init_cache(void)
+{
+ outer_cache.write_sec = tf_cache_write_sec;
+
+ return 0;
+}
+#endif /* CONFIG_CACHE_L2X0 */
+
+static const struct firmware_ops trusted_foundations_ops = {
+ .set_cpu_boot_addr = tf_set_cpu_boot_addr,
+ .prepare_idle = tf_prepare_idle,
+#ifdef CONFIG_CACHE_L2X0
+ .l2x0_init = tf_init_cache,
+#endif
+};
+
+void register_trusted_foundations(struct trusted_foundations_platform_data *pd)
+{
+ /*
+ * we are not using version information for now since currently
+ * supported SMCs are compatible with all TF releases
+ */
+ register_firmware_ops(&trusted_foundations_ops);
+}
+
+void of_register_trusted_foundations(void)
+{
+ struct device_node *node;
+ struct trusted_foundations_platform_data pdata;
+ int err;
+
+ node = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations");
+ if (!node)
+ return;
+
+ err = of_property_read_u32(node, "tlm,version-major",
+ &pdata.version_major);
+ if (err != 0)
+ panic("Trusted Foundation: missing version-major property\n");
+ err = of_property_read_u32(node, "tlm,version-minor",
+ &pdata.version_minor);
+ if (err != 0)
+ panic("Trusted Foundation: missing version-minor property\n");
+ register_trusted_foundations(&pdata);
+}
+
+bool trusted_foundations_registered(void)
+{
+ return firmware_ops == &trusted_foundations_ops;
+}
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 2771df6df379..c6d0724da4db 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -90,9 +90,6 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
int ret;
struct zynqmp_pm_query_data qdata = {0};
- if (!eemi_ops)
- return -ENXIO;
-
switch (pm_id) {
case PM_GET_API_VERSION:
ret = eemi_ops->get_api_version(&pm_api_version);
@@ -163,21 +160,14 @@ static ssize_t zynqmp_pm_debugfs_api_write(struct file *file,
strcpy(debugfs_buf, "");
- if (*off != 0 || len == 0)
+ if (*off != 0 || len <= 1 || len > PAGE_SIZE - 1)
return -EINVAL;
- kern_buff = kzalloc(len, GFP_KERNEL);
- if (!kern_buff)
- return -ENOMEM;
-
+ kern_buff = memdup_user_nul(ptr, len);
+ if (IS_ERR(kern_buff))
+ return PTR_ERR(kern_buff);
tmp_buff = kern_buff;
- ret = strncpy_from_user(kern_buff, ptr, len);
- if (ret < 0) {
- ret = -EFAULT;
- goto err;
- }
-
/* Read the API name from a user request */
pm_api_req = strsep(&kern_buff, " ");
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 98f936125643..fd3d83745208 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -24,6 +24,8 @@
#include <linux/firmware/xlnx-zynqmp.h>
#include "zynqmp-debug.h"
+static const struct zynqmp_eemi_ops *eemi_ops_tbl;
+
static const struct mfd_cell firmware_devs[] = {
{
.name = "zynqmp_power_controller",
@@ -538,6 +540,49 @@ static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
}
/**
+ * zynqmp_pm_fpga_load - Perform the fpga load
+ * @address: Address to write to
+ * @size: pl bitstream size
+ * @flags: Bitstream type
+ * -XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration
+ * -XILINX_ZYNQMP_PM_FPGA_PARTIAL: FPGA partial reconfiguration
+ *
+ * This function provides access to pmufw. To transfer
+ * the required bitstream into PL.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_fpga_load(const u64 address, const u32 size,
+ const u32 flags)
+{
+ return zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address),
+ upper_32_bits(address), size, flags, NULL);
+}
+
+/**
+ * zynqmp_pm_fpga_get_status - Read value from PCAP status register
+ * @value: Value to read
+ *
+ * This function provides access to the pmufw to get the PCAP
+ * status
+ *
+ * Return: Returns status, either success or error+reason
+ */
+static int zynqmp_pm_fpga_get_status(u32 *value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_FPGA_GET_STATUS, 0, 0, 0, 0, ret_payload);
+ *value = ret_payload[1];
+
+ return ret;
+}
+
+/**
* zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
* master has initialized its own power management
*
@@ -640,6 +685,8 @@ static const struct zynqmp_eemi_ops eemi_ops = {
.request_node = zynqmp_pm_request_node,
.release_node = zynqmp_pm_release_node,
.set_requirement = zynqmp_pm_set_requirement,
+ .fpga_load = zynqmp_pm_fpga_load,
+ .fpga_get_status = zynqmp_pm_fpga_get_status,
};
/**
@@ -649,7 +696,11 @@ static const struct zynqmp_eemi_ops eemi_ops = {
*/
const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
{
- return &eemi_ops;
+ if (eemi_ops_tbl)
+ return eemi_ops_tbl;
+ else
+ return ERR_PTR(-EPROBE_DEFER);
+
}
EXPORT_SYMBOL_GPL(zynqmp_pm_get_eemi_ops);
@@ -694,6 +745,9 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
pr_info("%s Trustzone version v%d.%d\n", __func__,
pm_tz_version >> 16, pm_tz_version & 0xFFFF);
+ /* Assign eemi_ops_table */
+ eemi_ops_tbl = &eemi_ops;
+
zynqmp_pm_api_debugfs_init();
ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs,
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index c20445b867ae..d892f3efcd76 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -204,4 +204,13 @@ config FPGA_DFL_PCI
To compile this as a module, choose M here.
+config FPGA_MGR_ZYNQMP_FPGA
+ tristate "Xilinx ZynqMP FPGA"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ help
+ FPGA manager driver support for Xilinx ZynqMP FPGAs.
+ This driver uses the processor configuration port(PCAP)
+ to configure the programmable logic(PL) through PS
+ on ZynqMP SoC.
+
endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index c0dd4c82fbdb..312b9371742f 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_FPGA_MGR_STRATIX10_SOC) += stratix10-soc.o
obj-$(CONFIG_FPGA_MGR_TS73XX) += ts73xx-fpga.o
obj-$(CONFIG_FPGA_MGR_XILINX_SPI) += xilinx-spi.o
obj-$(CONFIG_FPGA_MGR_ZYNQ_FPGA) += zynq-fpga.o
+obj-$(CONFIG_FPGA_MGR_ZYNQMP_FPGA) += zynqmp-fpga.o
obj-$(CONFIG_ALTERA_PR_IP_CORE) += altera-pr-ip-core.o
obj-$(CONFIG_ALTERA_PR_IP_CORE_PLAT) += altera-pr-ip-core-plat.o
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
new file mode 100644
index 000000000000..f7cbaadf49ab
--- /dev/null
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/string.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+
+/* Constant Definitions */
+#define IXR_FPGA_DONE_MASK BIT(3)
+
+/**
+ * struct zynqmp_fpga_priv - Private data structure
+ * @dev: Device data structure
+ * @flags: flags which is used to identify the bitfile type
+ */
+struct zynqmp_fpga_priv {
+ struct device *dev;
+ u32 flags;
+};
+
+static int zynqmp_fpga_ops_write_init(struct fpga_manager *mgr,
+ struct fpga_image_info *info,
+ const char *buf, size_t size)
+{
+ struct zynqmp_fpga_priv *priv;
+
+ priv = mgr->priv;
+ priv->flags = info->flags;
+
+ return 0;
+}
+
+static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
+ const char *buf, size_t size)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ struct zynqmp_fpga_priv *priv;
+ dma_addr_t dma_addr;
+ u32 eemi_flags = 0;
+ char *kbuf;
+ int ret;
+
+ if (!eemi_ops || !eemi_ops->fpga_load)
+ return -ENXIO;
+
+ priv = mgr->priv;
+
+ kbuf = dma_alloc_coherent(priv->dev, size, &dma_addr, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ memcpy(kbuf, buf, size);
+
+ wmb(); /* ensure all writes are done before initiate FW call */
+
+ if (priv->flags & FPGA_MGR_PARTIAL_RECONFIG)
+ eemi_flags |= XILINX_ZYNQMP_PM_FPGA_PARTIAL;
+
+ ret = eemi_ops->fpga_load(dma_addr, size, eemi_flags);
+
+ dma_free_coherent(priv->dev, size, kbuf, dma_addr);
+
+ return ret;
+}
+
+static int zynqmp_fpga_ops_write_complete(struct fpga_manager *mgr,
+ struct fpga_image_info *info)
+{
+ return 0;
+}
+
+static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
+{
+ const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ u32 status;
+
+ if (!eemi_ops || !eemi_ops->fpga_get_status)
+ return FPGA_MGR_STATE_UNKNOWN;
+
+ eemi_ops->fpga_get_status(&status);
+ if (status & IXR_FPGA_DONE_MASK)
+ return FPGA_MGR_STATE_OPERATING;
+
+ return FPGA_MGR_STATE_UNKNOWN;
+}
+
+static const struct fpga_manager_ops zynqmp_fpga_ops = {
+ .state = zynqmp_fpga_ops_state,
+ .write_init = zynqmp_fpga_ops_write_init,
+ .write = zynqmp_fpga_ops_write,
+ .write_complete = zynqmp_fpga_ops_write_complete,
+};
+
+static int zynqmp_fpga_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct zynqmp_fpga_priv *priv;
+ struct fpga_manager *mgr;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+
+ mgr = devm_fpga_mgr_create(dev, "Xilinx ZynqMP FPGA Manager",
+ &zynqmp_fpga_ops, priv);
+ if (!mgr)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mgr);
+
+ ret = fpga_mgr_register(mgr);
+ if (ret) {
+ dev_err(dev, "unable to register FPGA manager");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int zynqmp_fpga_remove(struct platform_device *pdev)
+{
+ struct fpga_manager *mgr = platform_get_drvdata(pdev);
+
+ fpga_mgr_unregister(mgr);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_fpga_of_match[] = {
+ { .compatible = "xlnx,zynqmp-pcap-fpga", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_fpga_of_match);
+
+static struct platform_driver zynqmp_fpga_driver = {
+ .probe = zynqmp_fpga_probe,
+ .remove = zynqmp_fpga_remove,
+ .driver = {
+ .name = "zynqmp_fpga_manager",
+ .of_match_table = of_match_ptr(zynqmp_fpga_of_match),
+ },
+};
+
+module_platform_driver(zynqmp_fpga_driver);
+
+MODULE_AUTHOR("Nava kishore Manne <navam@xilinx.com>");
+MODULE_DESCRIPTION("Xilinx ZynqMp FPGA Manager");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 41f08362dad3..8023d03ec362 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -286,6 +286,19 @@ config GPIO_IOP
If unsure, say N.
+config GPIO_IXP4XX
+ bool "Intel IXP4xx GPIO"
+ depends on ARM # For <asm/mach-types.h>
+ depends on ARCH_IXP4XX
+ select GPIO_GENERIC
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Say yes here to support the GPIO functionality of a number of Intel
+ IXP4xx series of chips.
+
+ If unsure, say N.
+
config GPIO_LOONGSON
bool "Loongson-2/3 GPIO support"
depends on CPU_LOONGSON2 || CPU_LOONGSON3
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index e19be766f6a6..6700eee860b7 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
+obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
new file mode 100644
index 000000000000..4b1cf7ea858d
--- /dev/null
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// IXP4 GPIO driver
+// Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+//
+// based on previous work and know-how from:
+// Deepak Saxena <dsaxena@plexity.net>
+
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+/* Include that go away with DT transition */
+#include <linux/irqchip/irq-ixp4xx.h>
+
+#include <asm/mach-types.h>
+
+#define IXP4XX_REG_GPOUT 0x00
+#define IXP4XX_REG_GPOE 0x04
+#define IXP4XX_REG_GPIN 0x08
+#define IXP4XX_REG_GPIS 0x0C
+#define IXP4XX_REG_GPIT1 0x10
+#define IXP4XX_REG_GPIT2 0x14
+#define IXP4XX_REG_GPCLK 0x18
+#define IXP4XX_REG_GPDBSEL 0x1C
+
+/*
+ * The hardware uses 3 bits to indicate interrupt "style".
+ * we clear and set these three bits accordingly. The lower 24
+ * bits in two registers (GPIT1 and GPIT2) are used to set up
+ * the style for 8 lines each for a total of 16 GPIO lines.
+ */
+#define IXP4XX_GPIO_STYLE_ACTIVE_HIGH 0x0
+#define IXP4XX_GPIO_STYLE_ACTIVE_LOW 0x1
+#define IXP4XX_GPIO_STYLE_RISING_EDGE 0x2
+#define IXP4XX_GPIO_STYLE_FALLING_EDGE 0x3
+#define IXP4XX_GPIO_STYLE_TRANSITIONAL 0x4
+#define IXP4XX_GPIO_STYLE_MASK GENMASK(2, 0)
+#define IXP4XX_GPIO_STYLE_SIZE 3
+
+/**
+ * struct ixp4xx_gpio - IXP4 GPIO state container
+ * @dev: containing device for this instance
+ * @fwnode: the fwnode for this GPIO chip
+ * @gc: gpiochip for this instance
+ * @domain: irqdomain for this chip instance
+ * @base: remapped I/O-memory base
+ * @irq_edge: Each bit represents an IRQ: 1: edge-triggered,
+ * 0: level triggered
+ */
+struct ixp4xx_gpio {
+ struct device *dev;
+ struct fwnode_handle *fwnode;
+ struct gpio_chip gc;
+ struct irq_domain *domain;
+ void __iomem *base;
+ unsigned long long irq_edge;
+};
+
+/**
+ * struct ixp4xx_gpio_map - IXP4 GPIO to parent IRQ map
+ * @gpio_offset: offset of the IXP4 GPIO line
+ * @parent_hwirq: hwirq on the parent IRQ controller
+ */
+struct ixp4xx_gpio_map {
+ int gpio_offset;
+ int parent_hwirq;
+};
+
+/* GPIO lines 0..12 have corresponding IRQs, GPIOs 13..15 have no IRQs */
+const struct ixp4xx_gpio_map ixp4xx_gpiomap[] = {
+ { .gpio_offset = 0, .parent_hwirq = 6 },
+ { .gpio_offset = 1, .parent_hwirq = 7 },
+ { .gpio_offset = 2, .parent_hwirq = 19 },
+ { .gpio_offset = 3, .parent_hwirq = 20 },
+ { .gpio_offset = 4, .parent_hwirq = 21 },
+ { .gpio_offset = 5, .parent_hwirq = 22 },
+ { .gpio_offset = 6, .parent_hwirq = 23 },
+ { .gpio_offset = 7, .parent_hwirq = 24 },
+ { .gpio_offset = 8, .parent_hwirq = 25 },
+ { .gpio_offset = 9, .parent_hwirq = 26 },
+ { .gpio_offset = 10, .parent_hwirq = 27 },
+ { .gpio_offset = 11, .parent_hwirq = 28 },
+ { .gpio_offset = 12, .parent_hwirq = 29 },
+};
+
+static void ixp4xx_gpio_irq_ack(struct irq_data *d)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+
+ __raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
+}
+
+static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+
+ /* ACK when unmasking if not edge-triggered */
+ if (!(g->irq_edge & BIT(d->hwirq)))
+ ixp4xx_gpio_irq_ack(d);
+
+ irq_chip_unmask_parent(d);
+}
+
+static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct ixp4xx_gpio *g = irq_data_get_irq_chip_data(d);
+ int line = d->hwirq;
+ unsigned long flags;
+ u32 int_style;
+ u32 int_reg;
+ u32 val;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_RISING_EDGE;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_set_handler_locked(d, handle_edge_irq);
+ int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE;
+ g->irq_edge |= BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_set_handler_locked(d, handle_level_irq);
+ int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH;
+ g->irq_edge &= ~BIT(d->hwirq);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler_locked(d, handle_level_irq);
+ int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW;
+ g->irq_edge &= ~BIT(d->hwirq);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (line >= 8) {
+ /* pins 8-15 */
+ line -= 8;
+ int_reg = IXP4XX_REG_GPIT2;
+ } else {
+ /* pins 0-7 */
+ int_reg = IXP4XX_REG_GPIT1;
+ }
+
+ spin_lock_irqsave(&g->gc.bgpio_lock, flags);
+
+ /* Clear the style for the appropriate pin */
+ val = __raw_readl(g->base + int_reg);
+ val &= ~(IXP4XX_GPIO_STYLE_MASK << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
+
+ __raw_writel(BIT(line), g->base + IXP4XX_REG_GPIS);
+
+ /* Set the new style */
+ val = __raw_readl(g->base + int_reg);
+ val |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
+
+ /* Force-configure this line as an input */
+ val = __raw_readl(g->base + IXP4XX_REG_GPOE);
+ val |= BIT(d->hwirq);
+ __raw_writel(val, g->base + IXP4XX_REG_GPOE);
+
+ spin_unlock_irqrestore(&g->gc.bgpio_lock, flags);
+
+ /* This parent only accept level high (asserted) */
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static struct irq_chip ixp4xx_gpio_irqchip = {
+ .name = "IXP4GPIO",
+ .irq_ack = ixp4xx_gpio_irq_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = ixp4xx_gpio_irq_unmask,
+ .irq_set_type = ixp4xx_gpio_irq_set_type,
+};
+
+static int ixp4xx_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct ixp4xx_gpio *g = gpiochip_get_data(gc);
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = g->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = offset;
+ fwspec.param[1] = IRQ_TYPE_NONE;
+
+ return irq_create_fwspec_mapping(&fwspec);
+}
+
+static int ixp4xx_gpio_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+
+ /* We support standard DT translation */
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
+ /* This goes away when we transition to DT */
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ WARN_ON(*type == IRQ_TYPE_NONE);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int ixp4xx_gpio_irq_domain_alloc(struct irq_domain *d,
+ unsigned int irq, unsigned int nr_irqs,
+ void *data)
+{
+ struct ixp4xx_gpio *g = d->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = data;
+ int ret;
+ int i;
+
+ ret = ixp4xx_gpio_irq_domain_translate(d, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ dev_dbg(g->dev, "allocate IRQ %d..%d, hwirq %lu..%lu\n",
+ irq, irq + nr_irqs - 1,
+ hwirq, hwirq + nr_irqs - 1);
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_fwspec parent_fwspec;
+ const struct ixp4xx_gpio_map *map;
+ int j;
+
+ /* Not all lines support IRQs */
+ for (j = 0; j < ARRAY_SIZE(ixp4xx_gpiomap); j++) {
+ map = &ixp4xx_gpiomap[j];
+ if (map->gpio_offset == hwirq)
+ break;
+ }
+ if (j == ARRAY_SIZE(ixp4xx_gpiomap)) {
+ dev_err(g->dev, "can't look up hwirq %lu\n", hwirq);
+ return -EINVAL;
+ }
+ dev_dbg(g->dev, "found parent hwirq %u\n", map->parent_hwirq);
+
+ /*
+ * We set handle_bad_irq because the .set_type() should
+ * always be invoked and set the right type of handler.
+ */
+ irq_domain_set_info(d,
+ irq + i,
+ hwirq + i,
+ &ixp4xx_gpio_irqchip,
+ g,
+ handle_bad_irq,
+ NULL, NULL);
+ irq_set_probe(irq + i);
+
+ /*
+ * Create a IRQ fwspec to send up to the parent irqdomain:
+ * specify the hwirq we address on the parent and tie it
+ * all together up the chain.
+ */
+ parent_fwspec.fwnode = d->parent->fwnode;
+ parent_fwspec.param_count = 2;
+ parent_fwspec.param[0] = map->parent_hwirq;
+ /* This parent only handles asserted level IRQs */
+ parent_fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
+ dev_dbg(g->dev, "alloc_irqs_parent for %d parent hwirq %d\n",
+ irq + i, map->parent_hwirq);
+ ret = irq_domain_alloc_irqs_parent(d, irq + i, 1,
+ &parent_fwspec);
+ if (ret)
+ dev_err(g->dev,
+ "failed to allocate parent hwirq %d for hwirq %lu\n",
+ map->parent_hwirq, hwirq);
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops ixp4xx_gpio_irqdomain_ops = {
+ .translate = ixp4xx_gpio_irq_domain_translate,
+ .alloc = ixp4xx_gpio_irq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int ixp4xx_gpio_probe(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct irq_domain *parent;
+ struct resource *res;
+ struct ixp4xx_gpio *g;
+ int ret;
+ int i;
+
+ g = devm_kzalloc(dev, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+ g->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ g->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(g->base)) {
+ dev_err(dev, "ioremap error\n");
+ return PTR_ERR(g->base);
+ }
+
+ /*
+ * Make sure GPIO 14 and 15 are NOT used as clocks but GPIO on
+ * specific machines.
+ */
+ if (machine_is_dsmg600() || machine_is_nas100d())
+ __raw_writel(0x0, g->base + IXP4XX_REG_GPCLK);
+
+ /*
+ * This is a very special big-endian ARM issue: when the IXP4xx is
+ * run in big endian mode, all registers in the machine are switched
+ * around to the CPU-native endianness. As you see mostly in the
+ * driver we use __raw_readl()/__raw_writel() to access the registers
+ * in the appropriate order. With the GPIO library we need to specify
+ * byte order explicitly, so this flag needs to be set when compiling
+ * for big endian.
+ */
+#if defined(CONFIG_CPU_BIG_ENDIAN)
+ flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+#else
+ flags = 0;
+#endif
+
+ /* Populate and register gpio chip */
+ ret = bgpio_init(&g->gc, dev, 4,
+ g->base + IXP4XX_REG_GPIN,
+ g->base + IXP4XX_REG_GPOUT,
+ NULL,
+ NULL,
+ g->base + IXP4XX_REG_GPOE,
+ flags);
+ if (ret) {
+ dev_err(dev, "unable to init generic GPIO\n");
+ return ret;
+ }
+ g->gc.to_irq = ixp4xx_gpio_to_irq;
+ g->gc.ngpio = 16;
+ g->gc.label = "IXP4XX_GPIO_CHIP";
+ /*
+ * TODO: when we have migrated to device tree and all GPIOs
+ * are fetched using phandles, set this to -1 to get rid of
+ * the fixed gpiochip base.
+ */
+ g->gc.base = 0;
+ g->gc.parent = &pdev->dev;
+ g->gc.owner = THIS_MODULE;
+
+ ret = devm_gpiochip_add_data(dev, &g->gc, g);
+ if (ret) {
+ dev_err(dev, "failed to add SoC gpiochip\n");
+ return ret;
+ }
+
+ /*
+ * When we convert to device tree we will simply look up the
+ * parent irqdomain using irq_find_host(parent) as parent comes
+ * from IRQCHIP_DECLARE(), then use of_node_to_fwnode() to get
+ * the fwnode. For now we need this boardfile style code.
+ */
+ if (np) {
+ struct device_node *irq_parent;
+
+ irq_parent = of_irq_find_parent(np);
+ if (!irq_parent) {
+ dev_err(dev, "no IRQ parent node\n");
+ return -ENODEV;
+ }
+ parent = irq_find_host(irq_parent);
+ if (!parent) {
+ dev_err(dev, "no IRQ parent domain\n");
+ return -ENODEV;
+ }
+ g->fwnode = of_node_to_fwnode(np);
+ } else {
+ parent = ixp4xx_get_irq_domain();
+ g->fwnode = irq_domain_alloc_fwnode(g->base);
+ if (!g->fwnode) {
+ dev_err(dev, "no domain base\n");
+ return -ENODEV;
+ }
+ }
+ g->domain = irq_domain_create_hierarchy(parent,
+ IRQ_DOMAIN_FLAG_HIERARCHY,
+ ARRAY_SIZE(ixp4xx_gpiomap),
+ g->fwnode,
+ &ixp4xx_gpio_irqdomain_ops,
+ g);
+ if (!g->domain) {
+ irq_domain_free_fwnode(g->fwnode);
+ dev_err(dev, "no hierarchical irq domain\n");
+ return ret;
+ }
+
+ /*
+ * After adding OF support, this is no longer needed: irqs
+ * will be allocated for the respective fwnodes.
+ */
+ if (!np) {
+ for (i = 0; i < ARRAY_SIZE(ixp4xx_gpiomap); i++) {
+ const struct ixp4xx_gpio_map *map = &ixp4xx_gpiomap[i];
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = g->fwnode;
+ /* This is the hwirq for the GPIO line side of things */
+ fwspec.param[0] = map->gpio_offset;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ fwspec.param_count = 2;
+ ret = __irq_domain_alloc_irqs(g->domain,
+ -1, /* just pick something */
+ 1,
+ NUMA_NO_NODE,
+ &fwspec,
+ false,
+ NULL);
+ if (ret < 0) {
+ irq_domain_free_fwnode(g->fwnode);
+ dev_err(dev,
+ "can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
+ map->gpio_offset, map->parent_hwirq,
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ platform_set_drvdata(pdev, g);
+ dev_info(dev, "IXP4 GPIO @%p registered\n", g->base);
+
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_gpio_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-gpio",
+ },
+ {},
+};
+
+
+static struct platform_driver ixp4xx_gpio_driver = {
+ .driver = {
+ .name = "ixp4xx-gpio",
+ .of_match_table = of_match_ptr(ixp4xx_gpio_of_match),
+ },
+ .probe = ixp4xx_gpio_probe,
+};
+builtin_platform_driver(ixp4xx_gpio_driver);
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index 1306722faa5a..715371b5102a 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -363,22 +363,16 @@ static int thunderx_gpio_irq_request_resources(struct irq_data *data)
{
struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
struct thunderx_gpio *txgpio = txline->txgpio;
- struct irq_data *parent_data = data->parent_data;
int r;
r = gpiochip_lock_as_irq(&txgpio->chip, txline->line);
if (r)
return r;
- if (parent_data && parent_data->chip->irq_request_resources) {
- r = parent_data->chip->irq_request_resources(parent_data);
- if (r)
- goto error;
- }
+ r = irq_chip_request_resources_parent(data);
+ if (r)
+ gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
- return 0;
-error:
- gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
return r;
}
@@ -386,10 +380,8 @@ static void thunderx_gpio_irq_release_resources(struct irq_data *data)
{
struct thunderx_line *txline = irq_data_get_irq_chip_data(data);
struct thunderx_gpio *txgpio = txline->txgpio;
- struct irq_data *parent_data = data->parent_data;
- if (parent_data && parent_data->chip->irq_release_resources)
- parent_data->chip->irq_release_resources(parent_data);
+ irq_chip_release_resources_parent(data);
gpiochip_unlock_as_irq(&txgpio->chip, txline->line);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 4376b17ca594..56f8ca2a3bb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -464,8 +464,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
}
}
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
- if ((adev->flags & AMD_IS_PX) &&
- amdgpu_atpx_dgpu_req_power_for_displays()) {
+ if (adev->flags & AMD_IS_PX) {
pm_runtime_get_sync(adev->ddev->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev->ddev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 95144e49c7f9..34471dbaa872 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -342,6 +342,16 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
if (current_level == level)
return count;
+ /* profile_exit setting is valid only when current mode is in profile mode */
+ if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
+ pr_err("Currently not in any profile mode!\n");
+ return -EINVAL;
+ }
+
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 905cce1814f3..05897b05766b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -38,18 +38,10 @@ static void psp_set_funcs(struct amdgpu_device *adev);
static int psp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct psp_context *psp = &adev->psp;
psp_set_funcs(adev);
- return 0;
-}
-
-static int psp_sw_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct psp_context *psp = &adev->psp;
- int ret;
-
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
@@ -67,6 +59,15 @@ static int psp_sw_init(void *handle)
psp->adev = adev;
+ return 0;
+}
+
+static int psp_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct psp_context *psp = &adev->psp;
+ int ret;
+
ret = psp_init_microcode(psp);
if (ret) {
DRM_ERROR("Failed to load psp firmware!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a07c85815b7a..4f10f5aba00b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2757,6 +2757,37 @@ error_free_sched_entity:
}
/**
+ * amdgpu_vm_check_clean_reserved - check if a VM is clean
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: the VM to check
+ *
+ * check all entries of the root PD, if any subsequent PDs are allocated,
+ * it means there are page table creating and filling, and is no a clean
+ * VM
+ *
+ * Returns:
+ * 0 if this VM is clean
+ */
+static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+ enum amdgpu_vm_level root = adev->vm_manager.root_level;
+ unsigned int entries = amdgpu_vm_num_entries(adev, root);
+ unsigned int i = 0;
+
+ if (!(vm->root.entries))
+ return 0;
+
+ for (i = 0; i < entries; i++) {
+ if (vm->root.entries[i].base.bo)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
*
* @adev: amdgpu_device pointer
@@ -2786,10 +2817,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
return r;
/* Sanity checks */
- if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
- r = -EINVAL;
+ r = amdgpu_vm_check_clean_reserved(adev, vm);
+ if (r)
goto unreserve_bo;
- }
if (pasid) {
unsigned long flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 8dbad496b29f..2471e7cf75ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -372,6 +372,9 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
if (amdgpu_sriov_runtime(adev))
schedule_work(&adev->virt.flr_work);
break;
+ case IDH_QUERY_ALIVE:
+ xgpu_ai_mailbox_send_ack(adev);
+ break;
/* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
* it byfar since that polling thread will handle it,
* other msg like flr complete is not handled here.
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 39d151b79153..077e91a33d62 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -49,6 +49,7 @@ enum idh_event {
IDH_FLR_NOTIFICATION_CMPL,
IDH_SUCCESS,
IDH_FAIL,
+ IDH_QUERY_ALIVE,
IDH_EVENT_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index dc461df48da0..2191d3d0a219 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -787,10 +787,13 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
0xFFFFFFFF, 0x00000004);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
- lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
- upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
offset = 0;
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
@@ -798,10 +801,11 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->uvd.inst[i].gpu_addr));
offset = size;
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+
}
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index f3f5938430d4..c0ec27991c22 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -244,13 +244,18 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
+ offset = AMDGPU_VCE_FIRMWARE_OFFSET;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ uint32_t low = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
+ uint32_t hi = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi;
+ uint64_t tmr_mc_addr = (uint64_t)(hi) << 32 | low;
+
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
- mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
+ mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), tmr_mc_addr >> 8);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
- (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff);
+ (tmr_mc_addr >> 40) & 0xff);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
@@ -258,6 +263,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
(adev->vce.gpu_addr >> 40) & 0xff);
+ MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
+ offset & ~0x0f000000);
+
}
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
@@ -272,10 +280,7 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
mmVCE_LMI_VCPU_CACHE_64BIT_BAR2),
(adev->vce.gpu_addr >> 40) & 0xff);
- offset = AMDGPU_VCE_FIRMWARE_OFFSET;
size = VCE_V4_0_FW_SIZE;
- MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
- offset & ~0x0f000000);
MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 1b2f69a9a24e..8d89ab7f0ae8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -31,7 +31,7 @@
#include "soc15_common.h"
#include "vega10_ih.h"
-
+#define MAX_REARM_RETRY 10
static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
@@ -382,6 +382,38 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
}
/**
+ * vega10_ih_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t reg_rptr = 0;
+ uint32_t v = 0;
+ uint32_t i = 0;
+
+ if (ih == &adev->irq.ih)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ else if (ih == &adev->irq.ih1)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ else
+ return;
+
+ /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
+ for (i = 0; i < MAX_REARM_RETRY; i++) {
+ v = RREG32_NO_KIQ(reg_rptr);
+ if ((v < ih->ring_size) && (v != ih->rptr))
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ else
+ break;
+ }
+}
+
+/**
* vega10_ih_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
@@ -395,6 +427,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+ if (amdgpu_sriov_vf(adev))
+ vega10_ih_irq_rearm(adev, ih);
} else if (ih == &adev->irq.ih) {
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
} else if (ih == &adev->irq.ih1) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1854506e3e8f..995f9df66142 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5242,7 +5242,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_crtc *pcrtc,
bool wait_for_vblank)
{
- uint32_t i, r;
+ uint32_t i;
uint64_t timestamp_ns;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
@@ -5253,6 +5253,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dm_crtc_state *dm_old_crtc_state =
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
int planes_count = 0, vpos, hpos;
+ long r;
unsigned long flags;
struct amdgpu_bo *abo;
uint64_t tiling_flags;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index ec2ca71e1323..c532e9c9e491 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -748,11 +748,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
vsync_polarity = 1;
}
- if (mode->vrefresh <= 24000)
+ if (drm_mode_vrefresh(mode) <= 24)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
- else if (mode->vrefresh <= 25000)
+ else if (drm_mode_vrefresh(mode) <= 25)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
- else if (mode->vrefresh <= 30000)
+ else if (drm_mode_vrefresh(mode) <= 30)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 2ec89bcb59f1..8a9606f91e68 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -196,9 +196,9 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
{
struct dentry *ent;
- char name[10] = "";
+ char name[16] = "";
- sprintf(name, "vgpu%d", vgpu->id);
+ snprintf(name, 16, "vgpu%d", vgpu->id);
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
if (!vgpu->debugfs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 4e1e425189ba..41c8ebc60c63 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -45,6 +45,7 @@ static int vgpu_gem_get_pages(
int i, ret;
gen8_pte_t __iomem *gtt_entries;
struct intel_vgpu_fb_info *fb_info;
+ u32 page_num;
fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
if (WARN_ON(!fb_info))
@@ -54,14 +55,15 @@ static int vgpu_gem_get_pages(
if (unlikely(!st))
return -ENOMEM;
- ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
+ page_num = obj->base.size >> PAGE_SHIFT;
+ ret = sg_alloc_table(st, page_num, GFP_KERNEL);
if (ret) {
kfree(st);
return ret;
}
gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(fb_info->start >> PAGE_SHIFT);
- for_each_sg(st->sgl, sg, fb_info->size, i) {
+ for_each_sg(st->sgl, sg, page_num, i) {
sg->offset = 0;
sg->length = PAGE_SIZE;
sg_dma_address(sg) =
@@ -158,7 +160,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
return NULL;
drm_gem_private_object_init(dev, &obj->base,
- info->size << PAGE_SHIFT);
+ roundup(info->size, PAGE_SIZE));
i915_gem_object_init(obj, &intel_vgpu_gem_ops);
obj->read_domains = I915_GEM_DOMAIN_GTT;
@@ -206,11 +208,12 @@ static int vgpu_get_plane_info(struct drm_device *dev,
struct intel_vgpu_fb_info *info,
int plane_id)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_vgpu_primary_plane_format p;
struct intel_vgpu_cursor_plane_format c;
int ret, tile_height = 1;
+ memset(info, 0, sizeof(*info));
+
if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
ret = intel_vgpu_decode_primary_plane(vgpu, &p);
if (ret)
@@ -267,8 +270,7 @@ static int vgpu_get_plane_info(struct drm_device *dev,
return -EINVAL;
}
- info->size = (info->stride * roundup(info->height, tile_height)
- + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ info->size = info->stride * roundup(info->height, tile_height);
if (info->size == 0) {
gvt_vgpu_err("fb size is zero\n");
return -EINVAL;
@@ -278,11 +280,6 @@ static int vgpu_get_plane_info(struct drm_device *dev,
gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
return -EFAULT;
}
- if (((info->start >> PAGE_SHIFT) + info->size) >
- ggtt_total_entries(&dev_priv->ggtt)) {
- gvt_vgpu_err("Invalid GTT offset or size\n");
- return -EFAULT;
- }
if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
gvt_vgpu_err("invalid gma addr\n");
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index c2f7d20f6346..08c74e65836b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -811,7 +811,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
/* Allocate shadow page table without guest page. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
- struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type)
+ struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
@@ -861,7 +861,7 @@ err_free_spt:
/* Allocate shadow page table associated with specific gfn. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
- struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type,
+ struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
unsigned long gfn, bool guest_pde_ips)
{
struct intel_vgpu_ppgtt_spt *spt;
@@ -936,7 +936,7 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
- intel_gvt_gtt_type_t cur_pt_type;
+ enum intel_gvt_gtt_type cur_pt_type;
GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
@@ -1076,6 +1076,9 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
} else {
int type = get_next_pt_type(we->type);
+ if (!gtt_type_is_pt(type))
+ goto err;
+
spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
if (IS_ERR(spt)) {
ret = PTR_ERR(spt);
@@ -1855,7 +1858,7 @@ static void vgpu_free_mm(struct intel_vgpu_mm *mm)
* Zero on success, negative error code in pointer if failed.
*/
struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_mm *mm;
@@ -2309,7 +2312,7 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
}
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t type)
+ enum intel_gvt_gtt_type type)
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
@@ -2594,7 +2597,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
* Zero on success, negative error code if failed.
*/
struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
{
struct intel_vgpu_mm *mm;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 32c573aea494..42d0394f0de2 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -95,8 +95,8 @@ struct intel_gvt_gtt {
unsigned long scratch_mfn;
};
-typedef enum {
- GTT_TYPE_INVALID = -1,
+enum intel_gvt_gtt_type {
+ GTT_TYPE_INVALID = 0,
GTT_TYPE_GGTT_PTE,
@@ -124,7 +124,7 @@ typedef enum {
GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_MAX,
-} intel_gvt_gtt_type_t;
+};
enum intel_gvt_mm_type {
INTEL_GVT_MM_GGTT,
@@ -148,7 +148,7 @@ struct intel_vgpu_mm {
union {
struct {
- intel_gvt_gtt_type_t root_entry_type;
+ enum intel_gvt_gtt_type root_entry_type;
/*
* The 4 PDPs in ring context. For 48bit addressing,
* only PDP0 is valid and point to PML4. For 32it
@@ -169,7 +169,7 @@ struct intel_vgpu_mm {
};
struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
{
@@ -233,7 +233,7 @@ struct intel_vgpu_ppgtt_spt {
struct intel_vgpu *vgpu;
struct {
- intel_gvt_gtt_type_t type;
+ enum intel_gvt_gtt_type type;
bool pde_ips; /* for 64KB PTEs */
void *vaddr;
struct page *page;
@@ -241,7 +241,7 @@ struct intel_vgpu_ppgtt_spt {
} shadow_page;
struct {
- intel_gvt_gtt_type_t type;
+ enum intel_gvt_gtt_type type;
bool pde_ips; /* for 64KB PTEs */
unsigned long gfn;
unsigned long write_cnt;
@@ -267,7 +267,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
u64 pdps[]);
struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 18f01eeb2510..90673fca792f 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1206,7 +1206,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
{
- intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+ enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
struct intel_vgpu_mm *mm;
u64 *pdps;
@@ -3303,7 +3303,7 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
/* Special MMIO blocks. */
static struct gvt_mmio_block mmio_blocks[] = {
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
- {D_ALL, MCHBAR_MIRROR_REG_BASE, 0x4000, NULL, NULL},
+ {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
pvinfo_mmio_read, pvinfo_mmio_write},
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index e7e14c842be4..edf6d646eb25 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
{RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+ {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
{RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 3de5b643b266..33aaa14bfdde 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -126,7 +126,4 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
-/* define the effective range of MCHBAR register on Sandybridge+ */
-#define MCHBAR_MIRROR_REG_BASE _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000)
-
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 8998fa5ab198..7c99bbc3e2b8 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1343,7 +1343,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm;
struct intel_vgpu *vgpu = workload->vgpu;
- intel_gvt_gtt_type_t root_entry_type;
+ enum intel_gvt_gtt_type root_entry_type;
u64 pdps[GVT_RING_CTX_NR_PDPS];
switch (desc->addressing_mode) {
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index b836721d3b13..f6c78c0fa74b 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -425,6 +425,26 @@ void __i915_request_submit(struct i915_request *request)
if (i915_gem_context_is_banned(request->gem_context))
i915_request_skip(request, -EIO);
+ /*
+ * Are we using semaphores when the gpu is already saturated?
+ *
+ * Using semaphores incurs a cost in having the GPU poll a
+ * memory location, busywaiting for it to change. The continual
+ * memory reads can have a noticeable impact on the rest of the
+ * system with the extra bus traffic, stalling the cpu as it too
+ * tries to access memory across the bus (perf stat -e bus-cycles).
+ *
+ * If we installed a semaphore on this request and we only submit
+ * the request after the signaler completed, that indicates the
+ * system is overloaded and using semaphores at this time only
+ * increases the amount of work we are doing. If so, we disable
+ * further use of semaphores until we are idle again, whence we
+ * optimistically try again.
+ */
+ if (request->sched.semaphores &&
+ i915_sw_fence_signaled(&request->semaphore))
+ request->hw_context->saturated |= request->sched.semaphores;
+
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
@@ -432,6 +452,7 @@ void __i915_request_submit(struct i915_request *request)
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
!i915_request_enable_breadcrumb(request))
intel_engine_queue_breadcrumbs(engine);
@@ -799,6 +820,39 @@ err_unreserve:
}
static int
+i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
+{
+ if (list_is_first(&signal->ring_link, &signal->ring->request_list))
+ return 0;
+
+ signal = list_prev_entry(signal, ring_link);
+ if (i915_timeline_sync_is_later(rq->timeline, &signal->fence))
+ return 0;
+
+ return i915_sw_fence_await_dma_fence(&rq->submit,
+ &signal->fence, 0,
+ I915_FENCE_GFP);
+}
+
+static intel_engine_mask_t
+already_busywaiting(struct i915_request *rq)
+{
+ /*
+ * Polling a semaphore causes bus traffic, delaying other users of
+ * both the GPU and CPU. We want to limit the impact on others,
+ * while taking advantage of early submission to reduce GPU
+ * latency. Therefore we restrict ourselves to not using more
+ * than one semaphore from each source, and not using a semaphore
+ * if we have detected the engine is saturated (i.e. would not be
+ * submitted early and cause bus traffic reading an already passed
+ * semaphore).
+ *
+ * See the are-we-too-late? check in __i915_request_submit().
+ */
+ return rq->sched.semaphores | rq->hw_context->saturated;
+}
+
+static int
emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
@@ -811,11 +865,15 @@ emit_semaphore_wait(struct i915_request *to,
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
/* Just emit the first semaphore we see as request space is limited. */
- if (to->sched.semaphores & from->engine->mask)
+ if (already_busywaiting(to) & from->engine->mask)
return i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
I915_FENCE_GFP);
+ err = i915_request_await_start(to, from);
+ if (err < 0)
+ return err;
+
err = i915_sw_fence_await_dma_fence(&to->semaphore,
&from->fence, 0,
I915_FENCE_GFP);
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 3cbffd400b1b..832cb6b1e9bd 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -23,6 +23,7 @@
*/
#include <linux/kthread.h>
+#include <trace/events/dma_fence.h>
#include <uapi/linux/sched/types.h>
#include "i915_drv.h"
@@ -80,9 +81,39 @@ static inline bool __request_completed(const struct i915_request *rq)
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
}
+static bool
+__dma_fence_signal(struct dma_fence *fence)
+{
+ return !test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
+}
+
+static void
+__dma_fence_signal__timestamp(struct dma_fence *fence, ktime_t timestamp)
+{
+ fence->timestamp = timestamp;
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
+ trace_dma_fence_signaled(fence);
+}
+
+static void
+__dma_fence_signal__notify(struct dma_fence *fence)
+{
+ struct dma_fence_cb *cur, *tmp;
+
+ lockdep_assert_held(fence->lock);
+ lockdep_assert_irqs_disabled();
+
+ list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
+ INIT_LIST_HEAD(&cur->node);
+ cur->func(fence, cur);
+ }
+ INIT_LIST_HEAD(&fence->cb_list);
+}
+
void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ const ktime_t timestamp = ktime_get();
struct intel_context *ce, *cn;
struct list_head *pos, *next;
LIST_HEAD(signal);
@@ -104,6 +135,10 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
&rq->fence.flags));
+ clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+
+ if (!__dma_fence_signal(&rq->fence))
+ continue;
/*
* Queue for execution after dropping the signaling
@@ -111,14 +146,6 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
* more signalers to the same context or engine.
*/
i915_request_get(rq);
-
- /*
- * We may race with direct invocation of
- * dma_fence_signal(), e.g. i915_request_retire(),
- * so we need to acquire our reference to the request
- * before we cancel the breadcrumb.
- */
- clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
list_add_tail(&rq->signal_link, &signal);
}
@@ -141,7 +168,12 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
struct i915_request *rq =
list_entry(pos, typeof(*rq), signal_link);
- dma_fence_signal(&rq->fence);
+ __dma_fence_signal__timestamp(&rq->fence, timestamp);
+
+ spin_lock(&rq->lock);
+ __dma_fence_signal__notify(&rq->fence);
+ spin_unlock(&rq->lock);
+
i915_request_put(rq);
}
}
@@ -243,19 +275,17 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
- struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
-
- GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+ lockdep_assert_held(&rq->lock);
+ lockdep_assert_irqs_disabled();
- if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
- return true;
-
- spin_lock(&b->irq_lock);
- if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags) &&
- !__request_completed(rq)) {
+ if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
+ struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
struct intel_context *ce = rq->hw_context;
struct list_head *pos;
+ spin_lock(&b->irq_lock);
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+
__intel_breadcrumbs_arm_irq(b);
/*
@@ -284,8 +314,8 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
list_move_tail(&ce->signal_link, &b->signalers);
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+ spin_unlock(&b->irq_lock);
}
- spin_unlock(&b->irq_lock);
return !__request_completed(rq);
}
@@ -294,9 +324,15 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
- if (!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
- return;
+ lockdep_assert_held(&rq->lock);
+ lockdep_assert_irqs_disabled();
+ /*
+ * We must wait for b->irq_lock so that we know the interrupt handler
+ * has released its reference to the intel_context and has completed
+ * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if
+ * required).
+ */
spin_lock(&b->irq_lock);
if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
struct intel_context *ce = rq->hw_context;
diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c
index 8931e0fee873..924cc556223a 100644
--- a/drivers/gpu/drm/i915/intel_context.c
+++ b/drivers/gpu/drm/i915/intel_context.c
@@ -230,6 +230,7 @@ intel_context_init(struct intel_context *ce,
ce->gem_context = ctx;
ce->engine = engine;
ce->ops = engine->cops;
+ ce->saturated = 0;
INIT_LIST_HEAD(&ce->signal_link);
INIT_LIST_HEAD(&ce->signals);
diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h
index 68b4ca1611e0..339c7437fe82 100644
--- a/drivers/gpu/drm/i915/intel_context_types.h
+++ b/drivers/gpu/drm/i915/intel_context_types.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include "i915_active_types.h"
+#include "intel_engine_types.h"
struct i915_gem_context;
struct i915_vma;
@@ -58,6 +59,8 @@ struct intel_context {
atomic_t pin_count;
struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
+ intel_engine_mask_t saturated; /* submitting semaphores too late? */
+
/**
* active_tracker: Active tracker for the external rq activity
* on this intel_context object.
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3bd40a4a6739..5098228f1302 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12082,6 +12082,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
struct intel_crtc_state *pipe_config,
bool adjust)
{
+ struct intel_crtc *crtc = to_intel_crtc(current_config->base.crtc);
bool ret = true;
bool fixup_inherited = adjust &&
(current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
@@ -12303,6 +12304,14 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
+ /*
+ * Changing the EDP transcoder input mux
+ * (A_ONOFF vs. A_ON) requires a full modeset.
+ */
+ if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A &&
+ current_config->cpu_transcoder == TRANSCODER_EDP)
+ PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
+
if (!adjust) {
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index c805a0966395..5679f2fffb7c 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -1280,6 +1280,10 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
+ /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
+ if (IS_GEMINILAKE(dev_priv))
+ return 0;
+
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
return 1;
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 37f60cb8e9e1..46cd0e70aecb 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -23,7 +23,6 @@
*/
#include <linux/circ_buf.h>
-#include <trace/events/dma_fence.h>
#include "intel_guc_submission.h"
#include "intel_lrc_reg.h"
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index e94b5b1bc1b7..e7c7be4911c1 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -311,10 +311,17 @@ retry:
pipe_config->base.mode_changed = pipe_config->has_psr;
pipe_config->crc_enabled = enable;
- if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A) {
+ if (IS_HASWELL(dev_priv) &&
+ pipe_config->base.active && crtc->pipe == PIPE_A &&
+ pipe_config->cpu_transcoder == TRANSCODER_EDP) {
+ bool old_need_power_well = pipe_config->pch_pfit.enabled ||
+ pipe_config->pch_pfit.force_thru;
+ bool new_need_power_well = pipe_config->pch_pfit.enabled ||
+ enable;
+
pipe_config->pch_pfit.force_thru = enable;
- if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
- pipe_config->pch_pfit.enabled != enable)
+
+ if (old_need_power_well != new_need_power_well)
pipe_config->base.connectors_changed = true;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 9155dafae2a9..38e2cfa9cec7 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -747,7 +747,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
* will make sure that the refcounting is correct in case we need to
* bring down the GX after a GMU failure
*/
- if (!IS_ERR(gmu->gxpd))
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
pm_runtime_get(gmu->gxpd);
out:
@@ -863,7 +863,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
* domain. Usually the GMU does this but only if the shutdown sequence
* was successful
*/
- if (!IS_ERR(gmu->gxpd))
+ if (!IS_ERR_OR_NULL(gmu->gxpd))
pm_runtime_put_sync(gmu->gxpd);
clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
@@ -1234,7 +1234,7 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
pm_runtime_disable(gmu->dev);
- if (!IS_ERR(gmu->gxpd)) {
+ if (!IS_ERR_OR_NULL(gmu->gxpd)) {
pm_runtime_disable(gmu->gxpd);
dev_pm_domain_detach(gmu->gxpd, false);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 018df2c3b7ed..45a5bc6ede5d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -15,7 +15,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_mdss.h"
-#include "dpu_kms.h"
#define LM_OP_MODE 0x00
#define LM_OUT_SIZE 0x04
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index da1f727d7495..ce1a555e1f31 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -780,7 +780,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
struct dpu_hw_fmt_layout layout;
struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
struct dma_fence *fence;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
@@ -799,8 +798,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
* implicit fence and fb prepare by hand here.
*/
obj = msm_framebuffer_bo(new_state->fb, 0);
- msm_obj = to_msm_bo(obj);
- fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ fence = reservation_object_get_excl_rcu(obj->resv);
if (fence)
drm_atomic_set_fence_for_plane(new_state, fence);
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f5b1256e32b6..131c23a267ee 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -49,15 +49,13 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
struct msm_drm_private *priv = plane->dev->dev_private;
struct msm_kms *kms = priv->kms;
struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
struct dma_fence *fence;
if (!new_state->fb)
return 0;
obj = msm_framebuffer_bo(new_state->fb, 0);
- msm_obj = to_msm_bo(obj);
- fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ fence = reservation_object_get_excl_rcu(obj->resv);
drm_atomic_set_fence_for_plane(new_state, fence);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 31d5a744d84f..35f55dd25994 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -803,7 +803,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
seq_puts(m, " vmas:");
list_for_each_entry(vma, &msm_obj->vmas, list)
- seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
+ seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
+ vma->aspace != NULL ? vma->aspace->name : NULL,
vma->iova, vma->mapped ? "mapped" : "unmapped",
vma->inuse);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index c5ac781dffee..812d1b1369a5 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -86,10 +86,6 @@ struct msm_gem_object {
struct llist_node freed;
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
-
/* For physically contiguous buffers. Used when we don't have
* an IOMMU. Also used for stolen/splashscreen buffer.
*/
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 2216c58620c2..7c41b0599d1a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -41,6 +41,7 @@ struct nv50_disp_interlock {
NV50_DISP_INTERLOCK__SIZE
} type;
u32 data;
+ u32 wimm;
};
void corec37d_ntfy_init(struct nouveau_bo *, u32);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 2e7a0c347ddb..06ee23823a68 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -306,7 +306,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
asyh->set.or = head->func->or != NULL;
}
- if (asyh->state.mode_changed)
+ if (asyh->state.mode_changed || asyh->state.connectors_changed)
nv50_head_atomic_check_mode(head, asyh);
if (asyh->state.color_mgmt_changed ||
@@ -413,6 +413,7 @@ nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
asyh->ovly = armh->ovly;
asyh->dither = armh->dither;
asyh->procamp = armh->procamp;
+ asyh->or = armh->or;
asyh->dp = armh->dp;
asyh->clr.mask = 0;
asyh->set.mask = 0;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
index 9103b8494279..f7dbd965e4e7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
@@ -75,6 +75,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
return ret;
}
+ wndw->interlock.wimm = wndw->interlock.data;
wndw->immd = func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index b95181027b31..283ff690350e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -127,7 +127,7 @@ void
nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
struct nv50_wndw_atom *asyw)
{
- if (interlock) {
+ if (interlock[NV50_DISP_INTERLOCK_CORE]) {
asyw->image.mode = 0;
asyw->image.interval = 1;
}
@@ -149,7 +149,7 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
if (asyw->set.point) {
if (asyw->set.point = false, asyw->set.mask)
interlock[wndw->interlock.type] |= wndw->interlock.data;
- interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.data;
+ interlock[NV50_DISP_INTERLOCK_WIMM] |= wndw->interlock.wimm;
wndw->immd->point(wndw, asyw);
wndw->immd->update(wndw, interlock);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 22cd45845e07..7c2fcaba42d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -631,7 +631,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
/* We need to check that the chipset is supported before booting
* fbdev off the hardware, as there's no way to put it back.
*/
- ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
+ ret = nvkm_device_pci_new(pdev, nouveau_config, "error",
+ true, false, 0, &device);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 7971096b6767..10d91e8bbb94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2540,6 +2540,41 @@ nv166_chipset = {
.sec2 = tu102_sec2_new,
};
+static const struct nvkm_device_chip
+nv167_chipset = {
+ .name = "TU117",
+ .bar = tu102_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = tu102_devinit_new,
+ .fault = tu102_fault_new,
+ .fb = gv100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .gsp = gv100_gsp_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp102_ltc_new,
+ .mc = tu102_mc_new,
+ .mmu = tu102_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .therm = gp100_therm_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = tu102_ce_new,
+ .ce[1] = tu102_ce_new,
+ .ce[2] = tu102_ce_new,
+ .ce[3] = tu102_ce_new,
+ .ce[4] = tu102_ce_new,
+ .disp = tu102_disp_new,
+ .dma = gv100_dma_new,
+ .fifo = tu102_fifo_new,
+ .nvdec[0] = gp102_nvdec_new,
+ .sec2 = tu102_sec2_new,
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -2824,8 +2859,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
u64 mmio_base, mmio_size;
u32 boot0, strap;
void __iomem *map;
- int ret = -EEXIST;
- int i;
+ int ret = -EEXIST, i;
+ unsigned chipset;
mutex_lock(&nv_devices_mutex);
if (nvkm_device_find_locked(handle))
@@ -2870,6 +2905,26 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
strap = ioread32_native(map + 0x101000);
iounmap(map);
+ /* chipset can be overridden for devel/testing purposes */
+ chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
+ if (chipset) {
+ u32 override_boot0;
+
+ if (chipset >= 0x10) {
+ override_boot0 = ((chipset & 0x1ff) << 20);
+ override_boot0 |= 0x000000a1;
+ } else {
+ if (chipset != 0x04)
+ override_boot0 = 0x20104000;
+ else
+ override_boot0 = 0x20004000;
+ }
+
+ nvdev_warn(device, "CHIPSET OVERRIDE: %08x -> %08x\n",
+ boot0, override_boot0);
+ boot0 = override_boot0;
+ }
+
/* determine chipset and derive architecture from it */
if ((boot0 & 0x1f000000) > 0) {
device->chipset = (boot0 & 0x1ff00000) >> 20;
@@ -2996,6 +3051,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x162: device->chip = &nv162_chipset; break;
case 0x164: device->chip = &nv164_chipset; break;
case 0x166: device->chip = &nv166_chipset; break;
+ case 0x167: device->chip = &nv167_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 5f301e632599..818d21bd28d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -365,8 +365,15 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
* and it's better to have a failed modeset than that.
*/
for (cfg = nvkm_dp_rates; cfg->rate; cfg++) {
- if (cfg->nr <= outp_nr && cfg->nr <= outp_bw)
- failsafe = cfg;
+ if (cfg->nr <= outp_nr && cfg->nr <= outp_bw) {
+ /* Try to respect sink limits too when selecting
+ * lowest link configuration.
+ */
+ if (!failsafe ||
+ (cfg->nr <= sink_nr && cfg->bw <= sink_bw))
+ failsafe = cfg;
+ }
+
if (failsafe && cfg[1].rate < dataKBps)
break;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 970f669c6d29..3b2bced1b015 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -165,6 +165,10 @@ err_out0:
void panfrost_device_fini(struct panfrost_device *pfdev)
{
+ panfrost_job_fini(pfdev);
+ panfrost_mmu_fini(pfdev);
+ panfrost_gpu_fini(pfdev);
+ panfrost_reset_fini(pfdev);
panfrost_regulator_fini(pfdev);
panfrost_clk_fini(pfdev);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 94b0819ad50b..d11e2281dde6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -219,7 +219,8 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
fail_job:
panfrost_job_put(job);
fail_out_sync:
- drm_syncobj_put(sync_out);
+ if (sync_out)
+ drm_syncobj_put(sync_out);
return ret;
}
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 0c5d391f0a8f..4501597f30ab 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -531,14 +531,15 @@ pl111_init_clock_divider(struct drm_device *drm)
dev_err(drm->dev, "CLCD: unable to get clcdclk.\n");
return PTR_ERR(parent);
}
+
+ spin_lock_init(&priv->tim2_lock);
+
/* If the clock divider is broken, use the parent directly */
if (priv->variant->broken_clockdivider) {
priv->clk = parent;
return 0;
}
parent_name = __clk_get_name(parent);
-
- spin_lock_init(&priv->tim2_lock);
div->init = &init;
ret = devm_clk_hw_register(drm->dev, div);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index aa898c699101..433df7036f96 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -922,12 +922,12 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
/* get matching reference and feedback divider */
- *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
+ *ref_div = min(max(den/post_div, 1u), ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
if (*fb_div > fb_div_max) {
- *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
+ *ref_div = (*ref_div * fb_div_max)/(*fb_div);
*fb_div = fb_div_max;
}
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index fb985ba1a176..2598741a00a6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -11,6 +11,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/io.h>
#include "sun4i_hdmi.h"
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 9412709067f5..2ea4e20b7b8a 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -41,6 +41,7 @@
#include <linux/component.h>
#include <linux/dmaengine.h>
#include <linux/i2c.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/hid/intel-ish-hid/Makefile b/drivers/hid/intel-ish-hid/Makefile
index 2de97e4b7740..f0a82b1c7cb9 100644
--- a/drivers/hid/intel-ish-hid/Makefile
+++ b/drivers/hid/intel-ish-hid/Makefile
@@ -23,4 +23,4 @@ intel-ishtp-hid-objs += ishtp-hid-client.o
obj-$(CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ishtp-loader.o
intel-ishtp-loader-objs += ishtp-fw-loader.o
-ccflags-y += -Idrivers/hid/intel-ish-hid/ishtp
+ccflags-y += -I $(srctree)/$(src)/ishtp
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index c4dd6301e7c8..0daf0b32aa4a 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -830,10 +830,8 @@ static int aspeed_create_pwm_cooling(struct device *dev,
}
snprintf(cdev->name, MAX_CDEV_NAME_LEN, "%pOFn%d", child, pwm_port);
- cdev->tcdev = thermal_of_cooling_device_register(child,
- cdev->name,
- cdev,
- &aspeed_pwm_cool_ops);
+ cdev->tcdev = devm_thermal_of_cooling_device_register(dev, child,
+ cdev->name, cdev, &aspeed_pwm_cool_ops);
if (IS_ERR(cdev->tcdev))
return PTR_ERR(cdev->tcdev);
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index f1bf67aca9e8..3f6e5b4e3997 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -498,6 +498,11 @@ static const struct of_device_id of_gpio_fan_match[] = {
};
MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
+static void gpio_fan_stop(void *data)
+{
+ set_fan_speed(data, 0);
+}
+
static int gpio_fan_probe(struct platform_device *pdev)
{
int err;
@@ -532,6 +537,7 @@ static int gpio_fan_probe(struct platform_device *pdev)
err = fan_ctrl_init(fan_data);
if (err)
return err;
+ devm_add_action_or_reset(dev, gpio_fan_stop, fan_data);
}
/* Make this driver part of hwmon class. */
@@ -543,32 +549,20 @@ static int gpio_fan_probe(struct platform_device *pdev)
return PTR_ERR(fan_data->hwmon_dev);
/* Optional cooling device register for Device tree platforms */
- fan_data->cdev = thermal_of_cooling_device_register(np,
- "gpio-fan",
- fan_data,
- &gpio_fan_cool_ops);
+ fan_data->cdev = devm_thermal_of_cooling_device_register(dev, np,
+ "gpio-fan", fan_data, &gpio_fan_cool_ops);
dev_info(dev, "GPIO fan initialized\n");
return 0;
}
-static int gpio_fan_remove(struct platform_device *pdev)
+static void gpio_fan_shutdown(struct platform_device *pdev)
{
struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
- if (!IS_ERR(fan_data->cdev))
- thermal_cooling_device_unregister(fan_data->cdev);
-
if (fan_data->gpios)
set_fan_speed(fan_data, 0);
-
- return 0;
-}
-
-static void gpio_fan_shutdown(struct platform_device *pdev)
-{
- gpio_fan_remove(pdev);
}
#ifdef CONFIG_PM_SLEEP
@@ -602,7 +596,6 @@ static SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
static struct platform_driver gpio_fan_driver = {
.probe = gpio_fan_probe,
- .remove = gpio_fan_remove,
.shutdown = gpio_fan_shutdown,
.driver = {
.name = "gpio-fan",
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index cd91510a5387..e694c46ff039 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -118,9 +118,7 @@ static DEFINE_IDA(hwmon_ida);
* The complex conditional is necessary to avoid a cyclic dependency
* between hwmon and thermal_sys modules.
*/
-#if IS_REACHABLE(CONFIG_THERMAL) && defined(CONFIG_THERMAL_OF) && \
- (!defined(CONFIG_THERMAL_HWMON) || \
- !(defined(MODULE) && IS_MODULE(CONFIG_THERMAL)))
+#ifdef CONFIG_THERMAL_OF
static int hwmon_thermal_get_temp(void *data, int *temp)
{
struct hwmon_thermal_data *tdata = data;
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index f816d2ae1e58..ed8d59d4eecb 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -465,42 +465,42 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
static int mlxreg_fan_probe(struct platform_device *pdev)
{
struct mlxreg_core_platform_data *pdata;
+ struct device *dev = &pdev->dev;
struct mlxreg_fan *fan;
struct device *hwm;
int err;
- pdata = dev_get_platdata(&pdev->dev);
+ pdata = dev_get_platdata(dev);
if (!pdata) {
- dev_err(&pdev->dev, "Failed to get platform data.\n");
+ dev_err(dev, "Failed to get platform data.\n");
return -EINVAL;
}
- fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
+ fan = devm_kzalloc(dev, sizeof(*fan), GFP_KERNEL);
if (!fan)
return -ENOMEM;
- fan->dev = &pdev->dev;
+ fan->dev = dev;
fan->regmap = pdata->regmap;
- platform_set_drvdata(pdev, fan);
err = mlxreg_fan_config(fan, pdata);
if (err)
return err;
- hwm = devm_hwmon_device_register_with_info(&pdev->dev, "mlxreg_fan",
+ hwm = devm_hwmon_device_register_with_info(dev, "mlxreg_fan",
fan,
&mlxreg_fan_hwmon_chip_info,
NULL);
if (IS_ERR(hwm)) {
- dev_err(&pdev->dev, "Failed to register hwmon device\n");
+ dev_err(dev, "Failed to register hwmon device\n");
return PTR_ERR(hwm);
}
if (IS_REACHABLE(CONFIG_THERMAL)) {
- fan->cdev = thermal_cooling_device_register("mlxreg_fan", fan,
- &mlxreg_fan_cooling_ops);
+ fan->cdev = devm_thermal_of_cooling_device_register(dev,
+ NULL, "mlxreg_fan", fan, &mlxreg_fan_cooling_ops);
if (IS_ERR(fan->cdev)) {
- dev_err(&pdev->dev, "Failed to register cooling device\n");
+ dev_err(dev, "Failed to register cooling device\n");
return PTR_ERR(fan->cdev);
}
}
@@ -508,22 +508,11 @@ static int mlxreg_fan_probe(struct platform_device *pdev)
return 0;
}
-static int mlxreg_fan_remove(struct platform_device *pdev)
-{
- struct mlxreg_fan *fan = platform_get_drvdata(pdev);
-
- if (IS_REACHABLE(CONFIG_THERMAL))
- thermal_cooling_device_unregister(fan->cdev);
-
- return 0;
-}
-
static struct platform_driver mlxreg_fan_driver = {
.driver = {
.name = "mlxreg-fan",
},
.probe = mlxreg_fan_probe,
- .remove = mlxreg_fan_remove,
};
module_platform_driver(mlxreg_fan_driver);
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
index 1dc0cd452498..09aaefa6fdb8 100644
--- a/drivers/hwmon/npcm750-pwm-fan.c
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -846,10 +846,8 @@ static int npcm7xx_create_pwm_cooling(struct device *dev,
snprintf(cdev->name, THERMAL_NAME_LENGTH, "%pOFn%d", child,
pwm_port);
- cdev->tcdev = thermal_of_cooling_device_register(child,
- cdev->name,
- cdev,
- &npcm7xx_pwm_cool_ops);
+ cdev->tcdev = devm_thermal_of_cooling_device_register(dev, child,
+ cdev->name, cdev, &npcm7xx_pwm_cool_ops);
if (IS_ERR(cdev->tcdev))
return PTR_ERR(cdev->tcdev);
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index eead8afe6447..5fb2745f0226 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -273,27 +273,40 @@ static int pwm_fan_of_get_cooling_data(struct device *dev,
return 0;
}
+static void pwm_fan_regulator_disable(void *data)
+{
+ regulator_disable(data);
+}
+
+static void pwm_fan_pwm_disable(void *__ctx)
+{
+ struct pwm_fan_ctx *ctx = __ctx;
+ pwm_disable(ctx->pwm);
+ del_timer_sync(&ctx->rpm_timer);
+}
+
static int pwm_fan_probe(struct platform_device *pdev)
{
struct thermal_cooling_device *cdev;
+ struct device *dev = &pdev->dev;
struct pwm_fan_ctx *ctx;
struct device *hwmon;
int ret;
struct pwm_state state = { };
u32 ppr = 2;
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
mutex_init(&ctx->lock);
- ctx->pwm = devm_of_pwm_get(&pdev->dev, pdev->dev.of_node, NULL);
+ ctx->pwm = devm_of_pwm_get(dev, dev->of_node, NULL);
if (IS_ERR(ctx->pwm)) {
ret = PTR_ERR(ctx->pwm);
if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Could not get PWM: %d\n", ret);
+ dev_err(dev, "Could not get PWM: %d\n", ret);
return ret;
}
@@ -304,7 +317,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
if (ctx->irq == -EPROBE_DEFER)
return ctx->irq;
- ctx->reg_en = devm_regulator_get_optional(&pdev->dev, "fan");
+ ctx->reg_en = devm_regulator_get_optional(dev, "fan");
if (IS_ERR(ctx->reg_en)) {
if (PTR_ERR(ctx->reg_en) != -ENODEV)
return PTR_ERR(ctx->reg_en);
@@ -313,10 +326,11 @@ static int pwm_fan_probe(struct platform_device *pdev)
} else {
ret = regulator_enable(ctx->reg_en);
if (ret) {
- dev_err(&pdev->dev,
- "Failed to enable fan supply: %d\n", ret);
+ dev_err(dev, "Failed to enable fan supply: %d\n", ret);
return ret;
}
+ devm_add_action_or_reset(dev, pwm_fan_regulator_disable,
+ ctx->reg_en);
}
ctx->pwm_value = MAX_PWM;
@@ -328,91 +342,57 @@ static int pwm_fan_probe(struct platform_device *pdev)
ret = pwm_apply_state(ctx->pwm, &state);
if (ret) {
- dev_err(&pdev->dev, "Failed to configure PWM: %d\n", ret);
- goto err_reg_disable;
+ dev_err(dev, "Failed to configure PWM: %d\n", ret);
+ return ret;
}
-
timer_setup(&ctx->rpm_timer, sample_timer, 0);
+ devm_add_action_or_reset(dev, pwm_fan_pwm_disable, ctx);
- of_property_read_u32(pdev->dev.of_node, "pulses-per-revolution", &ppr);
+ of_property_read_u32(dev->of_node, "pulses-per-revolution", &ppr);
ctx->pulses_per_revolution = ppr;
if (!ctx->pulses_per_revolution) {
- dev_err(&pdev->dev, "pulses-per-revolution can't be zero.\n");
- ret = -EINVAL;
- goto err_pwm_disable;
+ dev_err(dev, "pulses-per-revolution can't be zero.\n");
+ return -EINVAL;
}
if (ctx->irq > 0) {
- ret = devm_request_irq(&pdev->dev, ctx->irq, pulse_handler, 0,
+ ret = devm_request_irq(dev, ctx->irq, pulse_handler, 0,
pdev->name, ctx);
if (ret) {
- dev_err(&pdev->dev,
- "Failed to request interrupt: %d\n", ret);
- goto err_pwm_disable;
+ dev_err(dev, "Failed to request interrupt: %d\n", ret);
+ return ret;
}
ctx->sample_start = ktime_get();
mod_timer(&ctx->rpm_timer, jiffies + HZ);
}
- hwmon = devm_hwmon_device_register_with_groups(&pdev->dev, "pwmfan",
+ hwmon = devm_hwmon_device_register_with_groups(dev, "pwmfan",
ctx, pwm_fan_groups);
if (IS_ERR(hwmon)) {
- ret = PTR_ERR(hwmon);
- dev_err(&pdev->dev,
- "Failed to register hwmon device: %d\n", ret);
- goto err_del_timer;
+ dev_err(dev, "Failed to register hwmon device\n");
+ return PTR_ERR(hwmon);
}
- ret = pwm_fan_of_get_cooling_data(&pdev->dev, ctx);
+ ret = pwm_fan_of_get_cooling_data(dev, ctx);
if (ret)
- goto err_del_timer;
+ return ret;
ctx->pwm_fan_state = ctx->pwm_fan_max_state;
if (IS_ENABLED(CONFIG_THERMAL)) {
- cdev = thermal_of_cooling_device_register(pdev->dev.of_node,
- "pwm-fan", ctx,
- &pwm_fan_cooling_ops);
+ cdev = devm_thermal_of_cooling_device_register(dev,
+ dev->of_node, "pwm-fan", ctx, &pwm_fan_cooling_ops);
if (IS_ERR(cdev)) {
ret = PTR_ERR(cdev);
- dev_err(&pdev->dev,
+ dev_err(dev,
"Failed to register pwm-fan as cooling device: %d\n",
ret);
- goto err_del_timer;
+ return ret;
}
ctx->cdev = cdev;
thermal_cdev_update(cdev);
}
return 0;
-
-err_del_timer:
- del_timer_sync(&ctx->rpm_timer);
-
-err_pwm_disable:
- state.enabled = false;
- pwm_apply_state(ctx->pwm, &state);
-
-err_reg_disable:
- if (ctx->reg_en)
- regulator_disable(ctx->reg_en);
-
- return ret;
-}
-
-static int pwm_fan_remove(struct platform_device *pdev)
-{
- struct pwm_fan_ctx *ctx = platform_get_drvdata(pdev);
-
- thermal_cooling_device_unregister(ctx->cdev);
- del_timer_sync(&ctx->rpm_timer);
-
- if (ctx->pwm_value)
- pwm_disable(ctx->pwm);
-
- if (ctx->reg_en)
- regulator_disable(ctx->reg_en);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -480,7 +460,6 @@ MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
static struct platform_driver pwm_fan_driver = {
.probe = pwm_fan_probe,
- .remove = pwm_fan_remove,
.driver = {
.name = "pwm-fan",
.pm = &pwm_fan_pm,
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 9732a81bb7dd..d389d4fb0623 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -714,7 +714,7 @@ static int i2c_dev_irq_from_resources(const struct resource *resources,
}
/**
- * i2c_new_device - instantiate an i2c device
+ * i2c_new_client_device - instantiate an i2c device
* @adap: the adapter managing the device
* @info: describes one I2C device; bus_num is ignored
* Context: can sleep
@@ -727,17 +727,17 @@ static int i2c_dev_irq_from_resources(const struct resource *resources,
* before any i2c_adapter could exist.
*
* This returns the new i2c client, which may be saved for later use with
- * i2c_unregister_device(); or NULL to indicate an error.
+ * i2c_unregister_device(); or an ERR_PTR to describe the error.
*/
-struct i2c_client *
-i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
+static struct i2c_client *
+i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
{
struct i2c_client *client;
int status;
client = kzalloc(sizeof *client, GFP_KERNEL);
if (!client)
- return NULL;
+ return ERR_PTR(-ENOMEM);
client->adapter = adap;
@@ -803,7 +803,31 @@ out_err:
client->name, client->addr, status);
out_err_silent:
kfree(client);
- return NULL;
+ return ERR_PTR(status);
+}
+EXPORT_SYMBOL_GPL(i2c_new_client_device);
+
+/**
+ * i2c_new_device - instantiate an i2c device
+ * @adap: the adapter managing the device
+ * @info: describes one I2C device; bus_num is ignored
+ * Context: can sleep
+ *
+ * This deprecated function has the same functionality as
+ * @i2c_new_client_device, it just returns NULL instead of an ERR_PTR in case of
+ * an error for compatibility with current I2C API. It will be removed once all
+ * users are converted.
+ *
+ * This returns the new i2c client, which may be saved for later use with
+ * i2c_unregister_device(); or NULL to indicate an error.
+ */
+struct i2c_client *
+i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
+{
+ struct i2c_client *ret;
+
+ ret = i2c_new_client_device(adap, info);
+ return IS_ERR(ret) ? NULL : ret;
}
EXPORT_SYMBOL_GPL(i2c_new_device);
@@ -854,7 +878,7 @@ static struct i2c_driver dummy_driver = {
};
/**
- * i2c_new_dummy - return a new i2c device bound to a dummy driver
+ * i2c_new_dummy_device - return a new i2c device bound to a dummy driver
* @adapter: the adapter managing the device
* @address: seven bit address to be used
* Context: can sleep
@@ -869,18 +893,86 @@ static struct i2c_driver dummy_driver = {
* different driver.
*
* This returns the new i2c client, which should be saved for later use with
- * i2c_unregister_device(); or NULL to indicate an error.
+ * i2c_unregister_device(); or an ERR_PTR to describe the error.
*/
-struct i2c_client *i2c_new_dummy(struct i2c_adapter *adapter, u16 address)
+static struct i2c_client *
+i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address)
{
struct i2c_board_info info = {
I2C_BOARD_INFO("dummy", address),
};
- return i2c_new_device(adapter, &info);
+ return i2c_new_client_device(adapter, &info);
+}
+EXPORT_SYMBOL_GPL(i2c_new_dummy_device);
+
+/**
+ * i2c_new_dummy - return a new i2c device bound to a dummy driver
+ * @adapter: the adapter managing the device
+ * @address: seven bit address to be used
+ * Context: can sleep
+ *
+ * This deprecated function has the same functionality as @i2c_new_dummy_device,
+ * it just returns NULL instead of an ERR_PTR in case of an error for
+ * compatibility with current I2C API. It will be removed once all users are
+ * converted.
+ *
+ * This returns the new i2c client, which should be saved for later use with
+ * i2c_unregister_device(); or NULL to indicate an error.
+ */
+struct i2c_client *i2c_new_dummy(struct i2c_adapter *adapter, u16 address)
+{
+ struct i2c_client *ret;
+
+ ret = i2c_new_dummy_device(adapter, address);
+ return IS_ERR(ret) ? NULL : ret;
}
EXPORT_SYMBOL_GPL(i2c_new_dummy);
+struct i2c_dummy_devres {
+ struct i2c_client *client;
+};
+
+static void devm_i2c_release_dummy(struct device *dev, void *res)
+{
+ struct i2c_dummy_devres *this = res;
+
+ i2c_unregister_device(this->client);
+}
+
+/**
+ * devm_i2c_new_dummy_device - return a new i2c device bound to a dummy driver
+ * @dev: device the managed resource is bound to
+ * @adapter: the adapter managing the device
+ * @address: seven bit address to be used
+ * Context: can sleep
+ *
+ * This is the device-managed version of @i2c_new_dummy_device. It returns the
+ * new i2c client or an ERR_PTR in case of an error.
+ */
+struct i2c_client *devm_i2c_new_dummy_device(struct device *dev,
+ struct i2c_adapter *adapter,
+ u16 address)
+{
+ struct i2c_dummy_devres *dr;
+ struct i2c_client *client;
+
+ dr = devres_alloc(devm_i2c_release_dummy, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ client = i2c_new_dummy_device(adapter, address);
+ if (IS_ERR(client)) {
+ devres_free(dr);
+ } else {
+ dr->client = client;
+ devres_add(dev, dr);
+ }
+
+ return client;
+}
+EXPORT_SYMBOL_GPL(devm_i2c_new_dummy_device);
+
/**
* i2c_new_secondary_device - Helper to get the instantiated secondary address
* and create the associated device
@@ -1000,9 +1092,9 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
info.flags |= I2C_CLIENT_SLAVE;
}
- client = i2c_new_device(adap, &info);
- if (!client)
- return -EINVAL;
+ client = i2c_new_client_device(adap, &info);
+ if (IS_ERR(client))
+ return PTR_ERR(client);
/* Keep track of the added device */
mutex_lock(&adap->userspace_clients_lock);
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 06ca3f7fcc44..4a5eff3f18bc 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -733,11 +733,11 @@ static int iio_channel_read_avail(struct iio_channel *chan,
vals, type, length, info);
}
-int iio_read_avail_channel_raw(struct iio_channel *chan,
- const int **vals, int *length)
+int iio_read_avail_channel_attribute(struct iio_channel *chan,
+ const int **vals, int *type, int *length,
+ enum iio_chan_info_enum attribute)
{
int ret;
- int type;
mutex_lock(&chan->indio_dev->info_exist_lock);
if (!chan->indio_dev->info) {
@@ -745,11 +745,23 @@ int iio_read_avail_channel_raw(struct iio_channel *chan,
goto err_unlock;
}
- ret = iio_channel_read_avail(chan,
- vals, &type, length, IIO_CHAN_INFO_RAW);
+ ret = iio_channel_read_avail(chan, vals, type, length, attribute);
err_unlock:
mutex_unlock(&chan->indio_dev->info_exist_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
+
+int iio_read_avail_channel_raw(struct iio_channel *chan,
+ const int **vals, int *length)
+{
+ int ret;
+ int type;
+
+ ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
+ IIO_CHAN_INFO_RAW);
+
if (ret >= 0 && type != IIO_VAL_INT)
/* raw values are assumed to be IIO_VAL_INT */
ret = -EINVAL;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 1fe039d7326b..82398827b64f 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -205,7 +205,7 @@ config KEYBOARD_LKKBD
config KEYBOARD_EP93XX
tristate "EP93xx Matrix Keypad support"
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
select INPUT_MATRIXKMAP
help
Say Y here to enable the matrix keypad on the Cirrus EP93XX.
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index f77b295e0123..575dac52f7b4 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -27,8 +27,7 @@
#include <linux/io.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
-
-#include <mach/hardware.h>
+#include <linux/soc/cirrus/ep93xx.h>
#include <linux/platform_data/keypad-ep93xx.h>
/*
@@ -137,10 +136,7 @@ static void ep93xx_keypad_config(struct ep93xx_keypad *keypad)
struct ep93xx_keypad_platform_data *pdata = keypad->pdata;
unsigned int val = 0;
- if (pdata->flags & EP93XX_KEYPAD_KDIV)
- clk_set_rate(keypad->clk, EP93XX_KEYTCHCLK_DIV4);
- else
- clk_set_rate(keypad->clk, EP93XX_KEYTCHCLK_DIV16);
+ clk_set_rate(keypad->clk, pdata->clk_rate);
if (pdata->flags & EP93XX_KEYPAD_DISABLE_3_KEY)
val |= KEY_INIT_DIS3KY;
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 1fe149f3def2..4776273fa10b 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -30,6 +30,8 @@ MODULE_ALIAS("platform:ixp4xx-beeper");
static DEFINE_SPINLOCK(beep_lock);
+static int ixp4xx_timer2_irq;
+
static void ixp4xx_spkr_control(unsigned int pin, unsigned int count)
{
unsigned long flags;
@@ -90,6 +92,7 @@ static irqreturn_t ixp4xx_spkr_interrupt(int irq, void *dev_id)
static int ixp4xx_spkr_probe(struct platform_device *dev)
{
struct input_dev *input_dev;
+ int irq;
int err;
input_dev = input_allocate_device();
@@ -110,15 +113,22 @@ static int ixp4xx_spkr_probe(struct platform_device *dev)
input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
input_dev->event = ixp4xx_spkr_event;
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ err = irq;
+ goto err_free_device;
+ }
+
err = gpio_request(dev->id, "ixp4-beeper");
if (err)
goto err_free_device;
- err = request_irq(IRQ_IXP4XX_TIMER2, &ixp4xx_spkr_interrupt,
+ err = request_irq(irq, &ixp4xx_spkr_interrupt,
IRQF_NO_SUSPEND, "ixp4xx-beeper",
(void *) dev->id);
if (err)
goto err_free_gpio;
+ ixp4xx_timer2_irq = irq;
err = input_register_device(input_dev);
if (err)
@@ -129,7 +139,7 @@ static int ixp4xx_spkr_probe(struct platform_device *dev)
return 0;
err_free_irq:
- free_irq(IRQ_IXP4XX_TIMER2, (void *)dev->id);
+ free_irq(irq, (void *)dev->id);
err_free_gpio:
gpio_free(dev->id);
err_free_device:
@@ -146,10 +156,10 @@ static int ixp4xx_spkr_remove(struct platform_device *dev)
input_unregister_device(input_dev);
/* turn the speaker off */
- disable_irq(IRQ_IXP4XX_TIMER2);
+ disable_irq(ixp4xx_timer2_irq);
ixp4xx_spkr_control(pin, 0);
- free_irq(IRQ_IXP4XX_TIMER2, (void *)dev->id);
+ free_irq(ixp4xx_timer2_irq, (void *)dev->id);
gpio_free(dev->id);
return 0;
@@ -161,7 +171,7 @@ static void ixp4xx_spkr_shutdown(struct platform_device *dev)
unsigned int pin = (unsigned int) input_get_drvdata(input_dev);
/* turn off the speaker */
- disable_irq(IRQ_IXP4XX_TIMER2);
+ disable_irq(ixp4xx_timer2_irq);
ixp4xx_spkr_control(pin, 0);
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 15b831113ded..e559e43c8ac2 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -94,6 +94,7 @@ config IOMMU_DMA
bool
select IOMMU_API
select IOMMU_IOVA
+ select IRQ_MSI_IOMMU
select NEED_SG_DMA_LENGTH
config FSL_PAMU
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 5e898047c390..129c4badf9ae 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -907,17 +907,18 @@ out_free_page:
return NULL;
}
-void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
+int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
{
- struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
+ struct device *dev = msi_desc_to_dev(desc);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_dma_cookie *cookie;
struct iommu_dma_msi_page *msi_page;
- phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
unsigned long flags;
- if (!domain || !domain->iova_cookie)
- return;
+ if (!domain || !domain->iova_cookie) {
+ desc->iommu_cookie = NULL;
+ return 0;
+ }
cookie = domain->iova_cookie;
@@ -930,19 +931,26 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
spin_unlock_irqrestore(&cookie->msi_lock, flags);
- if (WARN_ON(!msi_page)) {
- /*
- * We're called from a void callback, so the best we can do is
- * 'fail' by filling the message with obviously bogus values.
- * Since we got this far due to an IOMMU being present, it's
- * not like the existing address would have worked anyway...
- */
- msg->address_hi = ~0U;
- msg->address_lo = ~0U;
- msg->data = ~0U;
- } else {
- msg->address_hi = upper_32_bits(msi_page->iova);
- msg->address_lo &= cookie_msi_granule(cookie) - 1;
- msg->address_lo += lower_32_bits(msi_page->iova);
- }
+ msi_desc_set_iommu_cookie(desc, msi_page);
+
+ if (!msi_page)
+ return -ENOMEM;
+ return 0;
+}
+
+void iommu_dma_compose_msi_msg(struct msi_desc *desc,
+ struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ const struct iommu_dma_msi_page *msi_page;
+
+ msi_page = msi_desc_get_iommu_cookie(desc);
+
+ if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
+ return;
+
+ msg->address_hi = upper_32_bits(msi_page->iova);
+ msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
+ msg->address_lo += lower_32_bits(msi_page->iova);
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 5438abb1baba..1c1f3f66dfd3 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -6,7 +6,6 @@ config IRQCHIP
config ARM_GIC
bool
- select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
@@ -33,7 +32,6 @@ config GIC_NON_BANKED
config ARM_GIC_V3
bool
- select IRQ_DOMAIN
select GENERIC_IRQ_MULTI_HANDLER
select IRQ_DOMAIN_HIERARCHY
select PARTITION_PERCPU
@@ -59,7 +57,6 @@ config ARM_GIC_V3_ITS_FSL_MC
config ARM_NVIC
bool
- select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
select GENERIC_IRQ_CHIP
@@ -160,6 +157,12 @@ config IMGPDC_IRQ
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
+config IXP4XX_IRQ
+ bool
+ select IRQ_DOMAIN
+ select GENERIC_IRQ_MULTI_HANDLER
+ select SPARSE_IRQ
+
config MADERA_IRQ
tristate
@@ -352,7 +355,6 @@ config STM32_EXTI
config QCOM_IRQ_COMBINER
bool "QCOM IRQ combiner support"
depends on ARCH_QCOM && ACPI
- select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
help
Say yes here to add support for the IRQ combiner devices embedded
@@ -369,7 +371,6 @@ config IRQ_UNIPHIER_AIDET
config MESON_IRQ_GPIO
bool "Meson GPIO Interrupt Multiplexer"
depends on ARCH_MESON
- select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
help
Support Meson SoC Family GPIO Interrupt Multiplexer
@@ -385,7 +386,6 @@ config GOLDFISH_PIC
config QCOM_PDC
bool "QCOM PDC"
depends on ARCH_QCOM
- select IRQ_DOMAIN
select IRQ_DOMAIN_HIERARCHY
help
Power Domain Controller driver to manage and configure wakeup
@@ -425,6 +425,27 @@ config LS1X_IRQ
help
Support for the Loongson-1 platform Interrupt Controller.
+config TI_SCI_INTR_IRQCHIP
+ bool
+ depends on TI_SCI_PROTOCOL
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ This enables the irqchip driver support for K3 Interrupt router
+ over TI System Control Interface available on some new TI's SoCs.
+ If you wish to use interrupt router irq resources managed by the
+ TI System Controller, say Y here. Otherwise, say N.
+
+config TI_SCI_INTA_IRQCHIP
+ bool
+ depends on TI_SCI_PROTOCOL
+ select IRQ_DOMAIN_HIERARCHY
+ select TI_SCI_INTA_MSI_DOMAIN
+ help
+ This enables the irqchip driver support for K3 Interrupt aggregator
+ over TI System Control Interface available on some new TI's SoCs.
+ If you wish to use interrupt aggregator irq resources managed by the
+ TI System Controller, say Y here. Otherwise, say N.
+
endmenu
config SIFIVE_PLIC
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 85972ae1bd7f..606a003a0000 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o
obj-$(CONFIG_I8259) += irq-i8259.o
obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
obj-$(CONFIG_IRQ_MIPS_CPU) += irq-mips-cpu.o
+obj-$(CONFIG_IXP4XX_IRQ) += irq-ixp4xx.o
obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
obj-$(CONFIG_JCORE_AIC) += irq-jcore-aic.o
obj-$(CONFIG_RDA_INTC) += irq-rda-intc.o
@@ -97,3 +98,5 @@ obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
+obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o
+obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 0f6e30e9009d..0acebac1920b 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -343,6 +343,9 @@ int __init bcm7038_l1_of_init(struct device_node *dn,
goto out_unmap;
}
+ pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
+ dn, IRQS_PER_WORD * intc->n_words);
+
return 0;
out_unmap:
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 8968e5e93fcb..541bdca9f4af 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -318,6 +318,9 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
}
}
+ pr_info("registered %s intc (%pOF, parent IRQ(s): %d)\n",
+ intc_name, dn, data->num_parent_irqs);
+
return 0;
out_free_domain:
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 5e4ca139e4ea..a0642b59befa 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -264,6 +264,8 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
ct->chip.irq_set_wake = irq_gc_set_wake;
}
+ pr_info("registered L2 intc (%pOF, parent irq: %d)\n", np, parent_irq);
+
return 0;
out_free_domain:
diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c
index ecafd295c31c..c4aac0977d8a 100644
--- a/drivers/irqchip/irq-gic-pm.c
+++ b/drivers/irqchip/irq-gic-pm.c
@@ -19,7 +19,6 @@
#include <linux/of_irq.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/platform_device.h>
-#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -28,17 +27,27 @@ struct gic_clk_data {
const char *const *clocks;
};
+struct gic_chip_pm {
+ struct gic_chip_data *chip_data;
+ const struct gic_clk_data *clk_data;
+ struct clk_bulk_data *clks;
+};
+
static int gic_runtime_resume(struct device *dev)
{
- struct gic_chip_data *gic = dev_get_drvdata(dev);
+ struct gic_chip_pm *chip_pm = dev_get_drvdata(dev);
+ struct gic_chip_data *gic = chip_pm->chip_data;
+ const struct gic_clk_data *data = chip_pm->clk_data;
int ret;
- ret = pm_clk_resume(dev);
- if (ret)
+ ret = clk_bulk_prepare_enable(data->num_clocks, chip_pm->clks);
+ if (ret) {
+ dev_err(dev, "clk_enable failed: %d\n", ret);
return ret;
+ }
/*
- * On the very first resume, the pointer to the driver data
+ * On the very first resume, the pointer to chip_pm->chip_data
* will be NULL and this is intentional, because we do not
* want to restore the GIC on the very first resume. So if
* the pointer is not valid just return.
@@ -54,35 +63,14 @@ static int gic_runtime_resume(struct device *dev)
static int gic_runtime_suspend(struct device *dev)
{
- struct gic_chip_data *gic = dev_get_drvdata(dev);
+ struct gic_chip_pm *chip_pm = dev_get_drvdata(dev);
+ struct gic_chip_data *gic = chip_pm->chip_data;
+ const struct gic_clk_data *data = chip_pm->clk_data;
gic_dist_save(gic);
gic_cpu_save(gic);
- return pm_clk_suspend(dev);
-}
-
-static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data)
-{
- unsigned int i;
- int ret;
-
- if (!dev || !data)
- return -EINVAL;
-
- ret = pm_clk_create(dev);
- if (ret)
- return ret;
-
- for (i = 0; i < data->num_clocks; i++) {
- ret = of_pm_clk_add_clk(dev, data->clocks[i]);
- if (ret) {
- dev_err(dev, "failed to add clock %s\n",
- data->clocks[i]);
- pm_clk_destroy(dev);
- return ret;
- }
- }
+ clk_bulk_disable_unprepare(data->num_clocks, chip_pm->clks);
return 0;
}
@@ -91,8 +79,8 @@ static int gic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct gic_clk_data *data;
- struct gic_chip_data *gic;
- int ret, irq;
+ struct gic_chip_pm *chip_pm;
+ int ret, irq, i;
data = of_device_get_match_data(&pdev->dev);
if (!data) {
@@ -100,28 +88,41 @@ static int gic_probe(struct platform_device *pdev)
return -ENODEV;
}
+ chip_pm = devm_kzalloc(dev, sizeof(*chip_pm), GFP_KERNEL);
+ if (!chip_pm)
+ return -ENOMEM;
+
irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq) {
dev_err(dev, "no parent interrupt found!\n");
return -EINVAL;
}
- ret = gic_get_clocks(dev, data);
+ chip_pm->clks = devm_kcalloc(dev, data->num_clocks,
+ sizeof(*chip_pm->clks), GFP_KERNEL);
+ if (!chip_pm->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < data->num_clocks; i++)
+ chip_pm->clks[i].id = data->clocks[i];
+
+ ret = devm_clk_bulk_get(dev, data->num_clocks, chip_pm->clks);
if (ret)
goto irq_dispose;
+ chip_pm->clk_data = data;
+ dev_set_drvdata(dev, chip_pm);
+
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto rpm_disable;
- ret = gic_of_init_child(dev, &gic, irq);
+ ret = gic_of_init_child(dev, &chip_pm->chip_data, irq);
if (ret)
goto rpm_put;
- platform_set_drvdata(pdev, gic);
-
pm_runtime_put(dev);
dev_info(dev, "GIC IRQ controller registered\n");
@@ -132,7 +133,6 @@ rpm_put:
pm_runtime_put_sync(dev);
rpm_disable:
pm_runtime_disable(dev);
- pm_clk_destroy(dev);
irq_dispose:
irq_dispose_mapping(irq);
@@ -142,6 +142,8 @@ irq_dispose:
static const struct dev_pm_ops gic_pm_ops = {
SET_RUNTIME_PM_OPS(gic_runtime_suspend,
gic_runtime_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const char * const gic400_clocks[] = {
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index de14e06fd9ec..3c77ab676e54 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -110,7 +110,7 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
msg->data -= v2m->spi_offset;
- iommu_dma_map_msi_msg(data->irq, msg);
+ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
}
static struct irq_chip gicv2m_irq_chip = {
@@ -167,6 +167,7 @@ static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
+ msi_alloc_info_t *info = args;
struct v2m_data *v2m = NULL, *tmp;
int hwirq, offset, i, err = 0;
@@ -186,6 +187,11 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
hwirq = v2m->spi_start + offset;
+ err = iommu_dma_prepare_msi(info->desc,
+ v2m->res.start + V2M_MSI_SETSPI_NS);
+ if (err)
+ return err;
+
for (i = 0; i < nr_irqs; i++) {
err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
if (err)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 128ac893d7e4..cfb9b4e5f914 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -26,7 +26,6 @@
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
-#include <linux/list_sort.h>
#include <linux/log2.h>
#include <linux/memblock.h>
#include <linux/mm.h>
@@ -1179,7 +1178,7 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
msg->address_hi = upper_32_bits(addr);
msg->data = its_get_event_id(d);
- iommu_dma_map_msi_msg(d->irq, msg);
+ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
}
static int its_irq_set_irqchip_state(struct irq_data *d,
@@ -1465,9 +1464,8 @@ static struct lpi_range *mk_lpi_range(u32 base, u32 span)
{
struct lpi_range *range;
- range = kzalloc(sizeof(*range), GFP_KERNEL);
+ range = kmalloc(sizeof(*range), GFP_KERNEL);
if (range) {
- INIT_LIST_HEAD(&range->entry);
range->base_id = base;
range->span = span;
}
@@ -1475,31 +1473,6 @@ static struct lpi_range *mk_lpi_range(u32 base, u32 span)
return range;
}
-static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
-{
- struct lpi_range *ra, *rb;
-
- ra = container_of(a, struct lpi_range, entry);
- rb = container_of(b, struct lpi_range, entry);
-
- return ra->base_id - rb->base_id;
-}
-
-static void merge_lpi_ranges(void)
-{
- struct lpi_range *range, *tmp;
-
- list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
- if (!list_is_last(&range->entry, &lpi_range_list) &&
- (tmp->base_id == (range->base_id + range->span))) {
- tmp->base_id = range->base_id;
- tmp->span += range->span;
- list_del(&range->entry);
- kfree(range);
- }
- }
-}
-
static int alloc_lpi_range(u32 nr_lpis, u32 *base)
{
struct lpi_range *range, *tmp;
@@ -1529,25 +1502,49 @@ static int alloc_lpi_range(u32 nr_lpis, u32 *base)
return err;
}
+static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
+{
+ if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
+ return;
+ if (a->base_id + a->span != b->base_id)
+ return;
+ b->base_id = a->base_id;
+ b->span += a->span;
+ list_del(&a->entry);
+ kfree(a);
+}
+
static int free_lpi_range(u32 base, u32 nr_lpis)
{
- struct lpi_range *new;
- int err = 0;
+ struct lpi_range *new, *old;
+
+ new = mk_lpi_range(base, nr_lpis);
+ if (!new)
+ return -ENOMEM;
mutex_lock(&lpi_range_lock);
- new = mk_lpi_range(base, nr_lpis);
- if (!new) {
- err = -ENOMEM;
- goto out;
+ list_for_each_entry_reverse(old, &lpi_range_list, entry) {
+ if (old->base_id < base)
+ break;
}
+ /*
+ * old is the last element with ->base_id smaller than base,
+ * so new goes right after it. If there are no elements with
+ * ->base_id smaller than base, &old->entry ends up pointing
+ * at the head of the list, and inserting new it the start of
+ * the list is the right thing to do in that case as well.
+ */
+ list_add(&new->entry, &old->entry);
+ /*
+ * Now check if we can merge with the preceding and/or
+ * following ranges.
+ */
+ merge_lpi_ranges(old, new);
+ merge_lpi_ranges(new, list_next_entry(new, entry));
- list_add(&new->entry, &lpi_range_list);
- list_sort(NULL, &lpi_range_list, lpi_range_cmp);
- merge_lpi_ranges();
-out:
mutex_unlock(&lpi_range_lock);
- return err;
+ return 0;
}
static int __init its_lpi_init(u32 id_bits)
@@ -2487,7 +2484,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
int err = 0;
/*
- * We ignore "dev" entierely, and rely on the dev_id that has
+ * We ignore "dev" entirely, and rely on the dev_id that has
* been passed via the scratchpad. This limits this domain's
* usefulness to upper layers that definitely know that they
* are built on top of the ITS.
@@ -2566,6 +2563,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
{
msi_alloc_info_t *info = args;
struct its_device *its_dev = info->scratchpad[0].ptr;
+ struct its_node *its = its_dev->its;
irq_hw_number_t hwirq;
int err;
int i;
@@ -2574,6 +2572,10 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
if (err)
return err;
+ err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
+ if (err)
+ return err;
+
for (i = 0; i < nr_irqs; i++) {
err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
if (err)
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index fbfa7ff6deb1..563a9b366294 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -84,6 +84,7 @@ static void mbi_free_msi(struct mbi_range *mbi, unsigned int hwirq,
static int mbi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
+ msi_alloc_info_t *info = args;
struct mbi_range *mbi = NULL;
int hwirq, offset, i, err = 0;
@@ -104,6 +105,11 @@ static int mbi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
hwirq = mbi->spi_start + offset;
+ err = iommu_dma_prepare_msi(info->desc,
+ mbi_phys_base + GICD_SETSPI_NSR);
+ if (err)
+ return err;
+
for (i = 0; i < nr_irqs; i++) {
err = mbi_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
if (err)
@@ -142,7 +148,7 @@ static void mbi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg[0].address_lo = lower_32_bits(mbi_phys_base + GICD_SETSPI_NSR);
msg[0].data = data->parent_data->hwirq;
- iommu_dma_map_msi_msg(data->irq, msg);
+ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
}
#ifdef CONFIG_PCI_MSI
@@ -202,7 +208,7 @@ static void mbi_compose_mbi_msg(struct irq_data *data, struct msi_msg *msg)
msg[1].address_lo = lower_32_bits(mbi_phys_base + GICD_CLRSPI_NSR);
msg[1].data = data->parent_data->hwirq;
- iommu_dma_map_msi_msg(data->irq, &msg[1]);
+ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), &msg[1]);
}
/* Platform-MSI specific irqchip */
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index 88df3d00052c..290531ec3d61 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -144,7 +144,6 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct irqsteer_data *data;
- struct resource *res;
u32 irqs_num;
int i, ret;
@@ -152,8 +151,7 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(&pdev->dev, res);
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->regs)) {
dev_err(&pdev->dev, "failed to initialize reg\n");
return PTR_ERR(data->regs);
diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c
new file mode 100644
index 000000000000..d576809429ac
--- /dev/null
+++ b/drivers/irqchip/irq-ixp4xx.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * irqchip for the IXP4xx interrupt controller
+ * Copyright (C) 2019 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * Based on arch/arm/mach-ixp4xx/common.c
+ * Copyright 2002 (C) Intel Corporation
+ * Copyright 2003-2004 (C) MontaVista, Software, Inc.
+ * Copyright (C) Deepak Saxena <dsaxena@plexity.net>
+ */
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/irq-ixp4xx.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/cpu.h>
+
+#include <asm/exception.h>
+#include <asm/mach/irq.h>
+
+#define IXP4XX_ICPR 0x00 /* Interrupt Status */
+#define IXP4XX_ICMR 0x04 /* Interrupt Enable */
+#define IXP4XX_ICLR 0x08 /* Interrupt IRQ/FIQ Select */
+#define IXP4XX_ICIP 0x0C /* IRQ Status */
+#define IXP4XX_ICFP 0x10 /* FIQ Status */
+#define IXP4XX_ICHR 0x14 /* Interrupt Priority */
+#define IXP4XX_ICIH 0x18 /* IRQ Highest Pri Int */
+#define IXP4XX_ICFH 0x1C /* FIQ Highest Pri Int */
+
+/* IXP43x and IXP46x-only */
+#define IXP4XX_ICPR2 0x20 /* Interrupt Status 2 */
+#define IXP4XX_ICMR2 0x24 /* Interrupt Enable 2 */
+#define IXP4XX_ICLR2 0x28 /* Interrupt IRQ/FIQ Select 2 */
+#define IXP4XX_ICIP2 0x2C /* IRQ Status */
+#define IXP4XX_ICFP2 0x30 /* FIQ Status */
+#define IXP4XX_ICEEN 0x34 /* Error High Pri Enable */
+
+/**
+ * struct ixp4xx_irq - state container for the Faraday IRQ controller
+ * @irqbase: IRQ controller memory base in virtual memory
+ * @is_356: if this is an IXP43x, IXP45x or IX46x SoC (with 64 IRQs)
+ * @irqchip: irqchip for this instance
+ * @domain: IRQ domain for this instance
+ */
+struct ixp4xx_irq {
+ void __iomem *irqbase;
+ bool is_356;
+ struct irq_chip irqchip;
+ struct irq_domain *domain;
+};
+
+/* Local static state container */
+static struct ixp4xx_irq ixirq;
+
+/* GPIO Clocks */
+#define IXP4XX_GPIO_CLK_0 14
+#define IXP4XX_GPIO_CLK_1 15
+
+static int ixp4xx_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ /* All are level active high (asserted) here */
+ if (type != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+ return 0;
+}
+
+static void ixp4xx_irq_mask(struct irq_data *d)
+{
+ struct ixp4xx_irq *ixi = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ if (ixi->is_356 && d->hwirq >= 32) {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR2);
+ val &= ~BIT(d->hwirq - 32);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR2);
+ } else {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR);
+ val &= ~BIT(d->hwirq);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR);
+ }
+}
+
+/*
+ * Level triggered interrupts on GPIO lines can only be cleared when the
+ * interrupt condition disappears.
+ */
+static void ixp4xx_irq_unmask(struct irq_data *d)
+{
+ struct ixp4xx_irq *ixi = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ if (ixi->is_356 && d->hwirq >= 32) {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR2);
+ val |= BIT(d->hwirq - 32);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR2);
+ } else {
+ val = __raw_readl(ixi->irqbase + IXP4XX_ICMR);
+ val |= BIT(d->hwirq);
+ __raw_writel(val, ixi->irqbase + IXP4XX_ICMR);
+ }
+}
+
+asmlinkage void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+ unsigned long status;
+ int i;
+
+ status = __raw_readl(ixi->irqbase + IXP4XX_ICIP);
+ for_each_set_bit(i, &status, 32)
+ handle_domain_irq(ixi->domain, i, regs);
+
+ /*
+ * IXP465/IXP435 has an upper IRQ status register
+ */
+ if (ixi->is_356) {
+ status = __raw_readl(ixi->irqbase + IXP4XX_ICIP2);
+ for_each_set_bit(i, &status, 32)
+ handle_domain_irq(ixi->domain, i + 32, regs);
+ }
+}
+
+static int ixp4xx_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ /* We support standard DT translation */
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
+ if (is_fwnode_irqchip(fwspec->fwnode)) {
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ WARN_ON(*type == IRQ_TYPE_NONE);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int ixp4xx_irq_domain_alloc(struct irq_domain *d,
+ unsigned int irq, unsigned int nr_irqs,
+ void *data)
+{
+ struct ixp4xx_irq *ixi = d->host_data;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ struct irq_fwspec *fwspec = data;
+ int ret;
+ int i;
+
+ ret = ixp4xx_irq_domain_translate(d, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ /*
+ * TODO: after converting IXP4xx to only device tree, set
+ * handle_bad_irq as default handler and assume all consumers
+ * call .set_type() as this is provided in the second cell in
+ * the device tree phandle.
+ */
+ irq_domain_set_info(d,
+ irq + i,
+ hwirq + i,
+ &ixi->irqchip,
+ ixi,
+ handle_level_irq,
+ NULL, NULL);
+ irq_set_probe(irq + i);
+ }
+
+ return 0;
+}
+
+/*
+ * This needs to be a hierarchical irqdomain to work well with the
+ * GPIO irqchip (which is lower in the hierarchy)
+ */
+static const struct irq_domain_ops ixp4xx_irqdomain_ops = {
+ .translate = ixp4xx_irq_domain_translate,
+ .alloc = ixp4xx_irq_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+/**
+ * ixp4xx_get_irq_domain() - retrieve the ixp4xx irq domain
+ *
+ * This function will go away when we transition to DT probing.
+ */
+struct irq_domain *ixp4xx_get_irq_domain(void)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+
+ return ixi->domain;
+}
+EXPORT_SYMBOL_GPL(ixp4xx_get_irq_domain);
+
+/*
+ * This is the Linux IRQ to hwirq mapping table. This goes away when
+ * we have DT support as all IRQ resources are defined in the device
+ * tree. It will register all the IRQs that are not used by the hierarchical
+ * GPIO IRQ chip. The "holes" inbetween these IRQs will be requested by
+ * the GPIO driver using . This is a step-gap solution.
+ */
+struct ixp4xx_irq_chunk {
+ int irq;
+ int hwirq;
+ int nr_irqs;
+};
+
+static const struct ixp4xx_irq_chunk ixp4xx_irq_chunks[] = {
+ {
+ .irq = 16,
+ .hwirq = 0,
+ .nr_irqs = 6,
+ },
+ {
+ .irq = 24,
+ .hwirq = 8,
+ .nr_irqs = 11,
+ },
+ {
+ .irq = 46,
+ .hwirq = 30,
+ .nr_irqs = 2,
+ },
+ /* Only on the 436 variants */
+ {
+ .irq = 48,
+ .hwirq = 32,
+ .nr_irqs = 10,
+ },
+};
+
+/**
+ * ixp4x_irq_setup() - Common setup code for the IXP4xx interrupt controller
+ * @ixi: State container
+ * @irqbase: Virtual memory base for the interrupt controller
+ * @fwnode: Corresponding fwnode abstraction for this controller
+ * @is_356: if this is an IXP43x, IXP45x or IXP46x SoC variant
+ */
+static int ixp4xx_irq_setup(struct ixp4xx_irq *ixi,
+ void __iomem *irqbase,
+ struct fwnode_handle *fwnode,
+ bool is_356)
+{
+ int nr_irqs;
+
+ ixi->irqbase = irqbase;
+ ixi->is_356 = is_356;
+
+ /* Route all sources to IRQ instead of FIQ */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICLR);
+
+ /* Disable all interrupts */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICMR);
+
+ if (is_356) {
+ /* Route upper 32 sources to IRQ instead of FIQ */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICLR2);
+
+ /* Disable upper 32 interrupts */
+ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICMR2);
+
+ nr_irqs = 64;
+ } else {
+ nr_irqs = 32;
+ }
+
+ ixi->irqchip.name = "IXP4xx";
+ ixi->irqchip.irq_mask = ixp4xx_irq_mask;
+ ixi->irqchip.irq_unmask = ixp4xx_irq_unmask;
+ ixi->irqchip.irq_set_type = ixp4xx_set_irq_type;
+
+ ixi->domain = irq_domain_create_linear(fwnode, nr_irqs,
+ &ixp4xx_irqdomain_ops,
+ ixi);
+ if (!ixi->domain) {
+ pr_crit("IXP4XX: can not add primary irqdomain\n");
+ return -ENODEV;
+ }
+
+ set_handle_irq(ixp4xx_handle_irq);
+
+ return 0;
+}
+
+/**
+ * ixp4xx_irq_init() - Function to initialize the irqchip from boardfiles
+ * @irqbase: physical base for the irq controller
+ * @is_356: if this is an IXP43x, IXP45x or IXP46x SoC variant
+ */
+void __init ixp4xx_irq_init(resource_size_t irqbase,
+ bool is_356)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+ void __iomem *base;
+ struct fwnode_handle *fwnode;
+ struct irq_fwspec fwspec;
+ int nr_chunks;
+ int ret;
+ int i;
+
+ base = ioremap(irqbase, 0x100);
+ if (!base) {
+ pr_crit("IXP4XX: could not ioremap interrupt controller\n");
+ return;
+ }
+ fwnode = irq_domain_alloc_fwnode(base);
+ if (!fwnode) {
+ pr_crit("IXP4XX: no domain handle\n");
+ return;
+ }
+ ret = ixp4xx_irq_setup(ixi, base, fwnode, is_356);
+ if (ret) {
+ pr_crit("IXP4XX: failed to set up irqchip\n");
+ irq_domain_free_fwnode(fwnode);
+ }
+
+ nr_chunks = ARRAY_SIZE(ixp4xx_irq_chunks);
+ if (!is_356)
+ nr_chunks--;
+
+ /*
+ * After adding OF support, this is no longer needed: irqs
+ * will be allocated for the respective fwnodes.
+ */
+ for (i = 0; i < nr_chunks; i++) {
+ const struct ixp4xx_irq_chunk *chunk = &ixp4xx_irq_chunks[i];
+
+ pr_info("Allocate Linux IRQs %d..%d HW IRQs %d..%d\n",
+ chunk->irq, chunk->irq + chunk->nr_irqs - 1,
+ chunk->hwirq, chunk->hwirq + chunk->nr_irqs - 1);
+ fwspec.fwnode = fwnode;
+ fwspec.param[0] = chunk->hwirq;
+ fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
+ fwspec.param_count = 2;
+ ret = __irq_domain_alloc_irqs(ixi->domain,
+ chunk->irq,
+ chunk->nr_irqs,
+ NUMA_NO_NODE,
+ &fwspec,
+ false,
+ NULL);
+ if (ret < 0) {
+ pr_crit("IXP4XX: can not allocate irqs in hierarchy %d\n",
+ ret);
+ return;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(ixp4xx_irq_init);
+
+#ifdef CONFIG_OF
+int __init ixp4xx_of_init_irq(struct device_node *np,
+ struct device_node *parent)
+{
+ struct ixp4xx_irq *ixi = &ixirq;
+ void __iomem *base;
+ struct fwnode_handle *fwnode;
+ bool is_356;
+ int ret;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_crit("IXP4XX: could not ioremap interrupt controller\n");
+ return -ENODEV;
+ }
+ fwnode = of_node_to_fwnode(np);
+
+ /* These chip variants have 64 interrupts */
+ is_356 = of_device_is_compatible(np, "intel,ixp43x-interrupt") ||
+ of_device_is_compatible(np, "intel,ixp45x-interrupt") ||
+ of_device_is_compatible(np, "intel,ixp46x-interrupt");
+
+ ret = ixp4xx_irq_setup(ixi, base, fwnode, is_356);
+ if (ret)
+ pr_crit("IXP4XX: failed to set up irqchip\n");
+
+ return ret;
+}
+IRQCHIP_DECLARE(ixp42x, "intel,ixp42x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp43x, "intel,ixp43x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp45x, "intel,ixp45x-interrupt",
+ ixp4xx_of_init_irq);
+IRQCHIP_DECLARE(ixp46x, "intel,ixp46x-interrupt",
+ ixp4xx_of_init_irq);
+#endif
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index c671b3212010..669d29105772 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -100,7 +100,7 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
msg->data |= cpumask_first(mask);
}
- iommu_dma_map_msi_msg(data->irq, msg);
+ iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
}
static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
@@ -141,6 +141,7 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
unsigned int nr_irqs,
void *args)
{
+ msi_alloc_info_t *info = args;
struct ls_scfg_msi *msi_data = domain->host_data;
int pos, err = 0;
@@ -157,6 +158,10 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
if (err)
return err;
+ err = iommu_dma_prepare_msi(info->desc, msi_data->msiir_addr);
+ if (err)
+ return err;
+
irq_domain_set_info(domain, virq, pos,
&ls_scfg_msi_parent_chip, msi_data,
handle_simple_irq, NULL, NULL);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 8c039525703f..04c05a18600c 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -389,10 +389,8 @@ static int intc_irqpin_probe(struct platform_device *pdev)
int k;
p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
- if (!p) {
- dev_err(dev, "failed to allocate driver data\n");
+ if (!p)
return -ENOMEM;
- }
/* deal with driver instance configuration */
of_property_read_u32(dev->of_node, "sense-bitfield-width",
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 7bd1d4cb2e19..e00f2fa27f00 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -14,8 +14,10 @@
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/of_platform.h>
#include <linux/syscore_ops.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -37,12 +39,6 @@ struct stm32_exti_bank {
#define UNDEF_REG ~0
-enum stm32_exti_hwspinlock {
- HWSPINLOCK_UNKNOWN,
- HWSPINLOCK_NONE,
- HWSPINLOCK_READY,
-};
-
struct stm32_desc_irq {
u32 exti;
u32 irq_parent;
@@ -69,8 +65,6 @@ struct stm32_exti_host_data {
void __iomem *base;
struct stm32_exti_chip_data *chips_data;
const struct stm32_exti_drv_data *drv_data;
- struct device_node *node;
- enum stm32_exti_hwspinlock hwlock_state;
struct hwspinlock *hwlock;
};
@@ -285,49 +279,27 @@ static int stm32_exti_set_type(struct irq_data *d,
static int stm32_exti_hwspin_lock(struct stm32_exti_chip_data *chip_data)
{
- struct stm32_exti_host_data *host_data = chip_data->host_data;
- struct hwspinlock *hwlock;
- int id, ret = 0, timeout = 0;
-
- /* first time, check for hwspinlock availability */
- if (unlikely(host_data->hwlock_state == HWSPINLOCK_UNKNOWN)) {
- id = of_hwspin_lock_get_id(host_data->node, 0);
- if (id >= 0) {
- hwlock = hwspin_lock_request_specific(id);
- if (hwlock) {
- /* found valid hwspinlock */
- host_data->hwlock_state = HWSPINLOCK_READY;
- host_data->hwlock = hwlock;
- pr_debug("%s hwspinlock = %d\n", __func__, id);
- } else {
- host_data->hwlock_state = HWSPINLOCK_NONE;
- }
- } else if (id != -EPROBE_DEFER) {
- host_data->hwlock_state = HWSPINLOCK_NONE;
- } else {
- /* hwspinlock driver shall be ready at that stage */
- ret = -EPROBE_DEFER;
- }
- }
+ int ret, timeout = 0;
- if (likely(host_data->hwlock_state == HWSPINLOCK_READY)) {
- /*
- * Use the x_raw API since we are under spin_lock protection.
- * Do not use the x_timeout API because we are under irq_disable
- * mode (see __setup_irq())
- */
- do {
- ret = hwspin_trylock_raw(host_data->hwlock);
- if (!ret)
- return 0;
-
- udelay(HWSPNLCK_RETRY_DELAY);
- timeout += HWSPNLCK_RETRY_DELAY;
- } while (timeout < HWSPNLCK_TIMEOUT);
-
- if (ret == -EBUSY)
- ret = -ETIMEDOUT;
- }
+ if (!chip_data->host_data->hwlock)
+ return 0;
+
+ /*
+ * Use the x_raw API since we are under spin_lock protection.
+ * Do not use the x_timeout API because we are under irq_disable
+ * mode (see __setup_irq())
+ */
+ do {
+ ret = hwspin_trylock_raw(chip_data->host_data->hwlock);
+ if (!ret)
+ return 0;
+
+ udelay(HWSPNLCK_RETRY_DELAY);
+ timeout += HWSPNLCK_RETRY_DELAY;
+ } while (timeout < HWSPNLCK_TIMEOUT);
+
+ if (ret == -EBUSY)
+ ret = -ETIMEDOUT;
if (ret)
pr_err("%s can't get hwspinlock (%d)\n", __func__, ret);
@@ -337,7 +309,7 @@ static int stm32_exti_hwspin_lock(struct stm32_exti_chip_data *chip_data)
static void stm32_exti_hwspin_unlock(struct stm32_exti_chip_data *chip_data)
{
- if (likely(chip_data->host_data->hwlock_state == HWSPINLOCK_READY))
+ if (chip_data->host_data->hwlock)
hwspin_unlock_raw(chip_data->host_data->hwlock);
}
@@ -586,8 +558,7 @@ static int stm32_exti_h_set_affinity(struct irq_data *d,
return -EINVAL;
}
-#ifdef CONFIG_PM
-static int stm32_exti_h_suspend(void)
+static int __maybe_unused stm32_exti_h_suspend(void)
{
struct stm32_exti_chip_data *chip_data;
int i;
@@ -602,7 +573,7 @@ static int stm32_exti_h_suspend(void)
return 0;
}
-static void stm32_exti_h_resume(void)
+static void __maybe_unused stm32_exti_h_resume(void)
{
struct stm32_exti_chip_data *chip_data;
int i;
@@ -616,17 +587,22 @@ static void stm32_exti_h_resume(void)
}
static struct syscore_ops stm32_exti_h_syscore_ops = {
+#ifdef CONFIG_PM_SLEEP
.suspend = stm32_exti_h_suspend,
.resume = stm32_exti_h_resume,
+#endif
};
-static void stm32_exti_h_syscore_init(void)
+static void stm32_exti_h_syscore_init(struct stm32_exti_host_data *host_data)
{
+ stm32_host_data = host_data;
register_syscore_ops(&stm32_exti_h_syscore_ops);
}
-#else
-static inline void stm32_exti_h_syscore_init(void) {}
-#endif
+
+static void stm32_exti_h_syscore_deinit(void)
+{
+ unregister_syscore_ops(&stm32_exti_h_syscore_ops);
+}
static struct irq_chip stm32_exti_h_chip = {
.name = "stm32-exti-h",
@@ -683,8 +659,6 @@ stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
return NULL;
host_data->drv_data = dd;
- host_data->node = node;
- host_data->hwlock_state = HWSPINLOCK_UNKNOWN;
host_data->chips_data = kcalloc(dd->bank_nr,
sizeof(struct stm32_exti_chip_data),
GFP_KERNEL);
@@ -711,7 +685,8 @@ free_host_data:
static struct
stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
- u32 bank_idx)
+ u32 bank_idx,
+ struct device_node *node)
{
const struct stm32_exti_bank *stm32_bank;
struct stm32_exti_chip_data *chip_data;
@@ -731,7 +706,7 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
writel_relaxed(0, base + stm32_bank->imr_ofst);
writel_relaxed(0, base + stm32_bank->emr_ofst);
- pr_info("%pOF: bank%d\n", h_data->node, bank_idx);
+ pr_info("%pOF: bank%d\n", node, bank_idx);
return chip_data;
}
@@ -771,7 +746,7 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
struct stm32_exti_chip_data *chip_data;
stm32_bank = drv_data->exti_banks[i];
- chip_data = stm32_exti_chip_init(host_data, i);
+ chip_data = stm32_exti_chip_init(host_data, i, node);
gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK);
@@ -815,50 +790,130 @@ static const struct irq_domain_ops stm32_exti_h_domain_ops = {
.xlate = irq_domain_xlate_twocell,
};
-static int
-__init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data,
- struct device_node *node,
- struct device_node *parent)
+static void stm32_exti_remove_irq(void *data)
+{
+ struct irq_domain *domain = data;
+
+ irq_domain_remove(domain);
+}
+
+static int stm32_exti_remove(struct platform_device *pdev)
+{
+ stm32_exti_h_syscore_deinit();
+ return 0;
+}
+
+static int stm32_exti_probe(struct platform_device *pdev)
{
+ int ret, i;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct irq_domain *parent_domain, *domain;
struct stm32_exti_host_data *host_data;
- int ret, i;
+ const struct stm32_exti_drv_data *drv_data;
+ struct resource *res;
- parent_domain = irq_find_host(parent);
- if (!parent_domain) {
- pr_err("interrupt-parent not found\n");
- return -EINVAL;
+ host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL);
+ if (!host_data)
+ return -ENOMEM;
+
+ /* check for optional hwspinlock which may be not available yet */
+ ret = of_hwspin_lock_get_id(np, 0);
+ if (ret == -EPROBE_DEFER)
+ /* hwspinlock framework not yet ready */
+ return ret;
+
+ if (ret >= 0) {
+ host_data->hwlock = devm_hwspin_lock_request_specific(dev, ret);
+ if (!host_data->hwlock) {
+ dev_err(dev, "Failed to request hwspinlock\n");
+ return -EINVAL;
+ }
+ } else if (ret != -ENOENT) {
+ /* note: ENOENT is a valid case (means 'no hwspinlock') */
+ dev_err(dev, "Failed to get hwspinlock\n");
+ return ret;
}
- host_data = stm32_exti_host_init(drv_data, node);
- if (!host_data)
+ /* initialize host_data */
+ drv_data = of_device_get_match_data(dev);
+ if (!drv_data) {
+ dev_err(dev, "no of match data\n");
+ return -ENODEV;
+ }
+ host_data->drv_data = drv_data;
+
+ host_data->chips_data = devm_kcalloc(dev, drv_data->bank_nr,
+ sizeof(*host_data->chips_data),
+ GFP_KERNEL);
+ if (!host_data->chips_data)
return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host_data->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(host_data->base)) {
+ dev_err(dev, "Unable to map registers\n");
+ return PTR_ERR(host_data->base);
+ }
+
for (i = 0; i < drv_data->bank_nr; i++)
- stm32_exti_chip_init(host_data, i);
+ stm32_exti_chip_init(host_data, i, np);
+
+ parent_domain = irq_find_host(of_irq_find_parent(np));
+ if (!parent_domain) {
+ dev_err(dev, "GIC interrupt-parent not found\n");
+ return -EINVAL;
+ }
domain = irq_domain_add_hierarchy(parent_domain, 0,
drv_data->bank_nr * IRQS_PER_BANK,
- node, &stm32_exti_h_domain_ops,
+ np, &stm32_exti_h_domain_ops,
host_data);
if (!domain) {
- pr_err("%pOFn: Could not register exti domain.\n", node);
- ret = -ENOMEM;
- goto out_unmap;
+ dev_err(dev, "Could not register exti domain\n");
+ return -ENOMEM;
}
- stm32_exti_h_syscore_init();
+ ret = devm_add_action_or_reset(dev, stm32_exti_remove_irq, domain);
+ if (ret)
+ return ret;
+
+ stm32_exti_h_syscore_init(host_data);
return 0;
+}
-out_unmap:
- iounmap(host_data->base);
- kfree(host_data->chips_data);
- kfree(host_data);
- return ret;
+/* platform driver only for MP1 */
+static const struct of_device_id stm32_exti_ids[] = {
+ { .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data},
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32_exti_ids);
+
+static struct platform_driver stm32_exti_driver = {
+ .probe = stm32_exti_probe,
+ .remove = stm32_exti_remove,
+ .driver = {
+ .name = "stm32_exti",
+ .of_match_table = stm32_exti_ids,
+ },
+};
+
+static int __init stm32_exti_arch_init(void)
+{
+ return platform_driver_register(&stm32_exti_driver);
}
+static void __exit stm32_exti_arch_exit(void)
+{
+ return platform_driver_unregister(&stm32_exti_driver);
+}
+
+arch_initcall(stm32_exti_arch_init);
+module_exit(stm32_exti_arch_exit);
+
+/* no platform driver for F4 and H7 */
static int __init stm32f4_exti_of_init(struct device_node *np,
struct device_node *parent)
{
@@ -874,11 +929,3 @@ static int __init stm32h7_exti_of_init(struct device_node *np,
}
IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init);
-
-static int __init stm32mp1_exti_of_init(struct device_node *np,
- struct device_node *parent)
-{
- return stm32_exti_hierarchy_init(&stm32mp1_drv_data, np, parent);
-}
-
-IRQCHIP_DECLARE(stm32mp1_exti, "st,stm32mp1-exti", stm32mp1_exti_of_init);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
new file mode 100644
index 000000000000..011b60a49e3f
--- /dev/null
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments' K3 Interrupt Aggregator irqchip driver
+ *
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Lokesh Vutla <lokeshvutla@ti.com>
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/msi.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <asm-generic/msi.h>
+
+#define TI_SCI_DEV_ID_MASK 0xffff
+#define TI_SCI_DEV_ID_SHIFT 16
+#define TI_SCI_IRQ_ID_MASK 0xffff
+#define TI_SCI_IRQ_ID_SHIFT 0
+#define HWIRQ_TO_DEVID(hwirq) (((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \
+ (TI_SCI_DEV_ID_MASK))
+#define HWIRQ_TO_IRQID(hwirq) ((hwirq) & (TI_SCI_IRQ_ID_MASK))
+#define TO_HWIRQ(dev, index) ((((dev) & TI_SCI_DEV_ID_MASK) << \
+ TI_SCI_DEV_ID_SHIFT) | \
+ ((index) & TI_SCI_IRQ_ID_MASK))
+
+#define MAX_EVENTS_PER_VINT 64
+#define VINT_ENABLE_SET_OFFSET 0x0
+#define VINT_ENABLE_CLR_OFFSET 0x8
+#define VINT_STATUS_OFFSET 0x18
+
+/**
+ * struct ti_sci_inta_event_desc - Description of an event coming to
+ * Interrupt Aggregator. This serves
+ * as a mapping table for global event,
+ * hwirq and vint bit.
+ * @global_event: Global event number corresponding to this event
+ * @hwirq: Hwirq of the incoming interrupt
+ * @vint_bit: Corresponding vint bit to which this event is attached.
+ */
+struct ti_sci_inta_event_desc {
+ u16 global_event;
+ u32 hwirq;
+ u8 vint_bit;
+};
+
+/**
+ * struct ti_sci_inta_vint_desc - Description of a virtual interrupt coming out
+ * of Interrupt Aggregator.
+ * @domain: Pointer to IRQ domain to which this vint belongs.
+ * @list: List entry for the vint list
+ * @event_map: Bitmap to manage the allocation of events to vint.
+ * @events: Array of event descriptors assigned to this vint.
+ * @parent_virq: Linux IRQ number that gets attached to parent
+ * @vint_id: TISCI vint ID
+ */
+struct ti_sci_inta_vint_desc {
+ struct irq_domain *domain;
+ struct list_head list;
+ DECLARE_BITMAP(event_map, MAX_EVENTS_PER_VINT);
+ struct ti_sci_inta_event_desc events[MAX_EVENTS_PER_VINT];
+ unsigned int parent_virq;
+ u16 vint_id;
+};
+
+/**
+ * struct ti_sci_inta_irq_domain - Structure representing a TISCI based
+ * Interrupt Aggregator IRQ domain.
+ * @sci: Pointer to TISCI handle
+ * @vint: TISCI resource pointer representing IA inerrupts.
+ * @global_event: TISCI resource pointer representing global events.
+ * @vint_list: List of the vints active in the system
+ * @vint_mutex: Mutex to protect vint_list
+ * @base: Base address of the memory mapped IO registers
+ * @pdev: Pointer to platform device.
+ */
+struct ti_sci_inta_irq_domain {
+ const struct ti_sci_handle *sci;
+ struct ti_sci_resource *vint;
+ struct ti_sci_resource *global_event;
+ struct list_head vint_list;
+ /* Mutex to protect vint list */
+ struct mutex vint_mutex;
+ void __iomem *base;
+ struct platform_device *pdev;
+};
+
+#define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \
+ events[i])
+
+/**
+ * ti_sci_inta_irq_handler() - Chained IRQ handler for the vint irqs
+ * @desc: Pointer to irq_desc corresponding to the irq
+ */
+static void ti_sci_inta_irq_handler(struct irq_desc *desc)
+{
+ struct ti_sci_inta_vint_desc *vint_desc;
+ struct ti_sci_inta_irq_domain *inta;
+ struct irq_domain *domain;
+ unsigned int virq, bit;
+ unsigned long val;
+
+ vint_desc = irq_desc_get_handler_data(desc);
+ domain = vint_desc->domain;
+ inta = domain->host_data;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 +
+ VINT_STATUS_OFFSET);
+
+ for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT) {
+ virq = irq_find_mapping(domain, vint_desc->events[bit].hwirq);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+/**
+ * ti_sci_inta_alloc_parent_irq() - Allocate parent irq to Interrupt aggregator
+ * @domain: IRQ domain corresponding to Interrupt Aggregator
+ *
+ * Return 0 if all went well else corresponding error value.
+ */
+static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_domain *domain)
+{
+ struct ti_sci_inta_irq_domain *inta = domain->host_data;
+ struct ti_sci_inta_vint_desc *vint_desc;
+ struct irq_fwspec parent_fwspec;
+ unsigned int parent_virq;
+ u16 vint_id;
+
+ vint_id = ti_sci_get_free_resource(inta->vint);
+ if (vint_id == TI_SCI_RESOURCE_NULL)
+ return ERR_PTR(-EINVAL);
+
+ vint_desc = kzalloc(sizeof(*vint_desc), GFP_KERNEL);
+ if (!vint_desc)
+ return ERR_PTR(-ENOMEM);
+
+ vint_desc->domain = domain;
+ vint_desc->vint_id = vint_id;
+ INIT_LIST_HEAD(&vint_desc->list);
+
+ parent_fwspec.fwnode = of_node_to_fwnode(of_irq_find_parent(dev_of_node(&inta->pdev->dev)));
+ parent_fwspec.param_count = 2;
+ parent_fwspec.param[0] = inta->pdev->id;
+ parent_fwspec.param[1] = vint_desc->vint_id;
+
+ parent_virq = irq_create_fwspec_mapping(&parent_fwspec);
+ if (parent_virq <= 0) {
+ kfree(vint_desc);
+ return ERR_PTR(parent_virq);
+ }
+ vint_desc->parent_virq = parent_virq;
+
+ list_add_tail(&vint_desc->list, &inta->vint_list);
+ irq_set_chained_handler_and_data(vint_desc->parent_virq,
+ ti_sci_inta_irq_handler, vint_desc);
+
+ return vint_desc;
+}
+
+/**
+ * ti_sci_inta_alloc_event() - Attach an event to a IA vint.
+ * @vint_desc: Pointer to vint_desc to which the event gets attached
+ * @free_bit: Bit inside vint to which event gets attached
+ * @hwirq: hwirq of the input event
+ *
+ * Return event_desc pointer if all went ok else appropriate error value.
+ */
+static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta_vint_desc *vint_desc,
+ u16 free_bit,
+ u32 hwirq)
+{
+ struct ti_sci_inta_irq_domain *inta = vint_desc->domain->host_data;
+ struct ti_sci_inta_event_desc *event_desc;
+ u16 dev_id, dev_index;
+ int err;
+
+ dev_id = HWIRQ_TO_DEVID(hwirq);
+ dev_index = HWIRQ_TO_IRQID(hwirq);
+
+ event_desc = &vint_desc->events[free_bit];
+ event_desc->hwirq = hwirq;
+ event_desc->vint_bit = free_bit;
+ event_desc->global_event = ti_sci_get_free_resource(inta->global_event);
+ if (event_desc->global_event == TI_SCI_RESOURCE_NULL)
+ return ERR_PTR(-EINVAL);
+
+ err = inta->sci->ops.rm_irq_ops.set_event_map(inta->sci,
+ dev_id, dev_index,
+ inta->pdev->id,
+ vint_desc->vint_id,
+ event_desc->global_event,
+ free_bit);
+ if (err)
+ goto free_global_event;
+
+ return event_desc;
+free_global_event:
+ ti_sci_release_resource(inta->global_event, event_desc->global_event);
+ return ERR_PTR(err);
+}
+
+/**
+ * ti_sci_inta_alloc_irq() - Allocate an irq within INTA domain
+ * @domain: irq_domain pointer corresponding to INTA
+ * @hwirq: hwirq of the input event
+ *
+ * Note: Allocation happens in the following manner:
+ * - Find a free bit available in any of the vints available in the list.
+ * - If not found, allocate a vint from the vint pool
+ * - Attach the free bit to input hwirq.
+ * Return event_desc if all went ok else appropriate error value.
+ */
+static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *domain,
+ u32 hwirq)
+{
+ struct ti_sci_inta_irq_domain *inta = domain->host_data;
+ struct ti_sci_inta_vint_desc *vint_desc = NULL;
+ struct ti_sci_inta_event_desc *event_desc;
+ u16 free_bit;
+
+ mutex_lock(&inta->vint_mutex);
+ list_for_each_entry(vint_desc, &inta->vint_list, list) {
+ free_bit = find_first_zero_bit(vint_desc->event_map,
+ MAX_EVENTS_PER_VINT);
+ if (free_bit != MAX_EVENTS_PER_VINT) {
+ set_bit(free_bit, vint_desc->event_map);
+ goto alloc_event;
+ }
+ }
+
+ /* No free bits available. Allocate a new vint */
+ vint_desc = ti_sci_inta_alloc_parent_irq(domain);
+ if (IS_ERR(vint_desc)) {
+ mutex_unlock(&inta->vint_mutex);
+ return ERR_PTR(PTR_ERR(vint_desc));
+ }
+
+ free_bit = find_first_zero_bit(vint_desc->event_map,
+ MAX_EVENTS_PER_VINT);
+ set_bit(free_bit, vint_desc->event_map);
+
+alloc_event:
+ event_desc = ti_sci_inta_alloc_event(vint_desc, free_bit, hwirq);
+ if (IS_ERR(event_desc))
+ clear_bit(free_bit, vint_desc->event_map);
+
+ mutex_unlock(&inta->vint_mutex);
+ return event_desc;
+}
+
+/**
+ * ti_sci_inta_free_parent_irq() - Free a parent irq to INTA
+ * @inta: Pointer to inta domain.
+ * @vint_desc: Pointer to vint_desc that needs to be freed.
+ */
+static void ti_sci_inta_free_parent_irq(struct ti_sci_inta_irq_domain *inta,
+ struct ti_sci_inta_vint_desc *vint_desc)
+{
+ if (find_first_bit(vint_desc->event_map, MAX_EVENTS_PER_VINT) == MAX_EVENTS_PER_VINT) {
+ list_del(&vint_desc->list);
+ ti_sci_release_resource(inta->vint, vint_desc->vint_id);
+ irq_dispose_mapping(vint_desc->parent_virq);
+ kfree(vint_desc);
+ }
+}
+
+/**
+ * ti_sci_inta_free_irq() - Free an IRQ within INTA domain
+ * @event_desc: Pointer to event_desc that needs to be freed.
+ * @hwirq: Hwirq number within INTA domain that needs to be freed
+ */
+static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc,
+ u32 hwirq)
+{
+ struct ti_sci_inta_vint_desc *vint_desc;
+ struct ti_sci_inta_irq_domain *inta;
+
+ vint_desc = to_vint_desc(event_desc, event_desc->vint_bit);
+ inta = vint_desc->domain->host_data;
+ /* free event irq */
+ mutex_lock(&inta->vint_mutex);
+ inta->sci->ops.rm_irq_ops.free_event_map(inta->sci,
+ HWIRQ_TO_DEVID(hwirq),
+ HWIRQ_TO_IRQID(hwirq),
+ inta->pdev->id,
+ vint_desc->vint_id,
+ event_desc->global_event,
+ event_desc->vint_bit);
+
+ clear_bit(event_desc->vint_bit, vint_desc->event_map);
+ ti_sci_release_resource(inta->global_event, event_desc->global_event);
+ event_desc->global_event = TI_SCI_RESOURCE_NULL;
+ event_desc->hwirq = 0;
+
+ ti_sci_inta_free_parent_irq(inta, vint_desc);
+ mutex_unlock(&inta->vint_mutex);
+}
+
+/**
+ * ti_sci_inta_request_resources() - Allocate resources for input irq
+ * @data: Pointer to corresponding irq_data
+ *
+ * Note: This is the core api where the actual allocation happens for input
+ * hwirq. This allocation involves creating a parent irq for vint.
+ * If this is done in irq_domain_ops.alloc() then a deadlock is reached
+ * for allocation. So this allocation is being done in request_resources()
+ *
+ * Return: 0 if all went well else corresponding error.
+ */
+static int ti_sci_inta_request_resources(struct irq_data *data)
+{
+ struct ti_sci_inta_event_desc *event_desc;
+
+ event_desc = ti_sci_inta_alloc_irq(data->domain, data->hwirq);
+ if (IS_ERR(event_desc))
+ return PTR_ERR(event_desc);
+
+ data->chip_data = event_desc;
+
+ return 0;
+}
+
+/**
+ * ti_sci_inta_release_resources - Release resources for input irq
+ * @data: Pointer to corresponding irq_data
+ *
+ * Note: Corresponding to request_resources(), all the unmapping and deletion
+ * of parent vint irqs happens in this api.
+ */
+static void ti_sci_inta_release_resources(struct irq_data *data)
+{
+ struct ti_sci_inta_event_desc *event_desc;
+
+ event_desc = irq_data_get_irq_chip_data(data);
+ ti_sci_inta_free_irq(event_desc, data->hwirq);
+}
+
+/**
+ * ti_sci_inta_manage_event() - Control the event based on the offset
+ * @data: Pointer to corresponding irq_data
+ * @offset: register offset using which event is controlled.
+ */
+static void ti_sci_inta_manage_event(struct irq_data *data, u32 offset)
+{
+ struct ti_sci_inta_event_desc *event_desc;
+ struct ti_sci_inta_vint_desc *vint_desc;
+ struct ti_sci_inta_irq_domain *inta;
+
+ event_desc = irq_data_get_irq_chip_data(data);
+ vint_desc = to_vint_desc(event_desc, event_desc->vint_bit);
+ inta = data->domain->host_data;
+
+ writeq_relaxed(BIT(event_desc->vint_bit),
+ inta->base + vint_desc->vint_id * 0x1000 + offset);
+}
+
+/**
+ * ti_sci_inta_mask_irq() - Mask an event
+ * @data: Pointer to corresponding irq_data
+ */
+static void ti_sci_inta_mask_irq(struct irq_data *data)
+{
+ ti_sci_inta_manage_event(data, VINT_ENABLE_CLR_OFFSET);
+}
+
+/**
+ * ti_sci_inta_unmask_irq() - Unmask an event
+ * @data: Pointer to corresponding irq_data
+ */
+static void ti_sci_inta_unmask_irq(struct irq_data *data)
+{
+ ti_sci_inta_manage_event(data, VINT_ENABLE_SET_OFFSET);
+}
+
+/**
+ * ti_sci_inta_ack_irq() - Ack an event
+ * @data: Pointer to corresponding irq_data
+ */
+static void ti_sci_inta_ack_irq(struct irq_data *data)
+{
+ /*
+ * Do not clear the event if hardware is capable of sending
+ * a down event.
+ */
+ if (irqd_get_trigger_type(data) != IRQF_TRIGGER_HIGH)
+ ti_sci_inta_manage_event(data, VINT_STATUS_OFFSET);
+}
+
+static int ti_sci_inta_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val, bool force)
+{
+ return -EINVAL;
+}
+
+/**
+ * ti_sci_inta_set_type() - Update the trigger type of the irq.
+ * @data: Pointer to corresponding irq_data
+ * @type: Trigger type as specified by user
+ *
+ * Note: This updates the handle_irq callback for level msi.
+ *
+ * Return 0 if all went well else appropriate error.
+ */
+static int ti_sci_inta_set_type(struct irq_data *data, unsigned int type)
+{
+ /*
+ * .alloc default sets handle_edge_irq. But if the user specifies
+ * that IRQ is level MSI, then update the handle to handle_level_irq
+ */
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQF_TRIGGER_HIGH:
+ irq_set_handler_locked(data, handle_level_irq);
+ return 0;
+ case IRQF_TRIGGER_RISING:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static struct irq_chip ti_sci_inta_irq_chip = {
+ .name = "INTA",
+ .irq_ack = ti_sci_inta_ack_irq,
+ .irq_mask = ti_sci_inta_mask_irq,
+ .irq_set_type = ti_sci_inta_set_type,
+ .irq_unmask = ti_sci_inta_unmask_irq,
+ .irq_set_affinity = ti_sci_inta_set_affinity,
+ .irq_request_resources = ti_sci_inta_request_resources,
+ .irq_release_resources = ti_sci_inta_release_resources,
+};
+
+/**
+ * ti_sci_inta_irq_domain_free() - Free an IRQ from the IRQ domain
+ * @domain: Domain to which the irqs belong
+ * @virq: base linux virtual IRQ to be freed.
+ * @nr_irqs: Number of continuous irqs to be freed
+ */
+static void ti_sci_inta_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+
+ irq_domain_reset_irq_data(data);
+}
+
+/**
+ * ti_sci_inta_irq_domain_alloc() - Allocate Interrupt aggregator IRQs
+ * @domain: Point to the interrupt aggregator IRQ domain
+ * @virq: Corresponding Linux virtual IRQ number
+ * @nr_irqs: Continuous irqs to be allocated
+ * @data: Pointer to firmware specifier
+ *
+ * No actual allocation happens here.
+ *
+ * Return 0 if all went well else appropriate error value.
+ */
+static int ti_sci_inta_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *data)
+{
+ msi_alloc_info_t *arg = data;
+
+ irq_domain_set_info(domain, virq, arg->hwirq, &ti_sci_inta_irq_chip,
+ NULL, handle_edge_irq, NULL, NULL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ti_sci_inta_irq_domain_ops = {
+ .free = ti_sci_inta_irq_domain_free,
+ .alloc = ti_sci_inta_irq_domain_alloc,
+};
+
+static struct irq_chip ti_sci_inta_msi_irq_chip = {
+ .name = "MSI-INTA",
+ .flags = IRQCHIP_SUPPORTS_LEVEL_MSI,
+};
+
+static void ti_sci_inta_msi_set_desc(msi_alloc_info_t *arg,
+ struct msi_desc *desc)
+{
+ struct platform_device *pdev = to_platform_device(desc->dev);
+
+ arg->desc = desc;
+ arg->hwirq = TO_HWIRQ(pdev->id, desc->inta.dev_index);
+}
+
+static struct msi_domain_ops ti_sci_inta_msi_ops = {
+ .set_desc = ti_sci_inta_msi_set_desc,
+};
+
+static struct msi_domain_info ti_sci_inta_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_LEVEL_CAPABLE),
+ .ops = &ti_sci_inta_msi_ops,
+ .chip = &ti_sci_inta_msi_irq_chip,
+};
+
+static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
+{
+ struct irq_domain *parent_domain, *domain, *msi_domain;
+ struct device_node *parent_node, *node;
+ struct ti_sci_inta_irq_domain *inta;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret;
+
+ node = dev_of_node(dev);
+ parent_node = of_irq_find_parent(node);
+ if (!parent_node) {
+ dev_err(dev, "Failed to get IRQ parent node\n");
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent_node);
+ if (!parent_domain)
+ return -EPROBE_DEFER;
+
+ inta = devm_kzalloc(dev, sizeof(*inta), GFP_KERNEL);
+ if (!inta)
+ return -ENOMEM;
+
+ inta->pdev = pdev;
+ inta->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
+ if (IS_ERR(inta->sci)) {
+ ret = PTR_ERR(inta->sci);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "ti,sci read fail %d\n", ret);
+ inta->sci = NULL;
+ return ret;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &pdev->id);
+ if (ret) {
+ dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+ return -EINVAL;
+ }
+
+ inta->vint = devm_ti_sci_get_of_resource(inta->sci, dev, pdev->id,
+ "ti,sci-rm-range-vint");
+ if (IS_ERR(inta->vint)) {
+ dev_err(dev, "VINT resource allocation failed\n");
+ return PTR_ERR(inta->vint);
+ }
+
+ inta->global_event = devm_ti_sci_get_of_resource(inta->sci, dev, pdev->id,
+ "ti,sci-rm-range-global-event");
+ if (IS_ERR(inta->global_event)) {
+ dev_err(dev, "Global event resource allocation failed\n");
+ return PTR_ERR(inta->global_event);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ inta->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(inta->base))
+ return -ENODEV;
+
+ domain = irq_domain_add_linear(dev_of_node(dev),
+ ti_sci_get_num_resources(inta->vint),
+ &ti_sci_inta_irq_domain_ops, inta);
+ if (!domain) {
+ dev_err(dev, "Failed to allocate IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi_domain = ti_sci_inta_msi_create_irq_domain(of_node_to_fwnode(node),
+ &ti_sci_inta_msi_domain_info,
+ domain);
+ if (!msi_domain) {
+ irq_domain_remove(domain);
+ dev_err(dev, "Failed to allocate msi domain\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&inta->vint_list);
+ mutex_init(&inta->vint_mutex);
+
+ return 0;
+}
+
+static const struct of_device_id ti_sci_inta_irq_domain_of_match[] = {
+ { .compatible = "ti,sci-inta", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_inta_irq_domain_of_match);
+
+static struct platform_driver ti_sci_inta_irq_domain_driver = {
+ .probe = ti_sci_inta_irq_domain_probe,
+ .driver = {
+ .name = "ti-sci-inta",
+ .of_match_table = ti_sci_inta_irq_domain_of_match,
+ },
+};
+module_platform_driver(ti_sci_inta_irq_domain_driver);
+
+MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>");
+MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
new file mode 100644
index 000000000000..59d51a20bbd8
--- /dev/null
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments' K3 Interrupt Router irqchip driver
+ *
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Lokesh Vutla <lokeshvutla@ti.com>
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+#define TI_SCI_DEV_ID_MASK 0xffff
+#define TI_SCI_DEV_ID_SHIFT 16
+#define TI_SCI_IRQ_ID_MASK 0xffff
+#define TI_SCI_IRQ_ID_SHIFT 0
+#define HWIRQ_TO_DEVID(hwirq) (((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \
+ (TI_SCI_DEV_ID_MASK))
+#define HWIRQ_TO_IRQID(hwirq) ((hwirq) & (TI_SCI_IRQ_ID_MASK))
+#define TO_HWIRQ(dev, index) ((((dev) & TI_SCI_DEV_ID_MASK) << \
+ TI_SCI_DEV_ID_SHIFT) | \
+ ((index) & TI_SCI_IRQ_ID_MASK))
+
+/**
+ * struct ti_sci_intr_irq_domain - Structure representing a TISCI based
+ * Interrupt Router IRQ domain.
+ * @sci: Pointer to TISCI handle
+ * @dst_irq: TISCI resource pointer representing GIC irq controller.
+ * @dst_id: TISCI device ID of the GIC irq controller.
+ * @type: Specifies the trigger type supported by this Interrupt Router
+ */
+struct ti_sci_intr_irq_domain {
+ const struct ti_sci_handle *sci;
+ struct ti_sci_resource *dst_irq;
+ u32 dst_id;
+ u32 type;
+};
+
+static struct irq_chip ti_sci_intr_irq_chip = {
+ .name = "INTR",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+/**
+ * ti_sci_intr_irq_domain_translate() - Retrieve hwirq and type from
+ * IRQ firmware specific handler.
+ * @domain: Pointer to IRQ domain
+ * @fwspec: Pointer to IRQ specific firmware structure
+ * @hwirq: IRQ number identified by hardware
+ * @type: IRQ type
+ *
+ * Return 0 if all went ok else appropriate error.
+ */
+static int ti_sci_intr_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct ti_sci_intr_irq_domain *intr = domain->host_data;
+
+ if (fwspec->param_count != 2)
+ return -EINVAL;
+
+ *hwirq = TO_HWIRQ(fwspec->param[0], fwspec->param[1]);
+ *type = intr->type;
+
+ return 0;
+}
+
+/**
+ * ti_sci_intr_irq_domain_free() - Free the specified IRQs from the domain.
+ * @domain: Domain to which the irqs belong
+ * @virq: Linux virtual IRQ to be freed.
+ * @nr_irqs: Number of continuous irqs to be freed
+ */
+static void ti_sci_intr_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct ti_sci_intr_irq_domain *intr = domain->host_data;
+ struct irq_data *data, *parent_data;
+ u16 dev_id, irq_index;
+
+ parent_data = irq_domain_get_irq_data(domain->parent, virq);
+ data = irq_domain_get_irq_data(domain, virq);
+ irq_index = HWIRQ_TO_IRQID(data->hwirq);
+ dev_id = HWIRQ_TO_DEVID(data->hwirq);
+
+ intr->sci->ops.rm_irq_ops.free_irq(intr->sci, dev_id, irq_index,
+ intr->dst_id, parent_data->hwirq);
+ ti_sci_release_resource(intr->dst_irq, parent_data->hwirq);
+ irq_domain_free_irqs_parent(domain, virq, 1);
+ irq_domain_reset_irq_data(data);
+}
+
+/**
+ * ti_sci_intr_alloc_gic_irq() - Allocate GIC specific IRQ
+ * @domain: Pointer to the interrupt router IRQ domain
+ * @virq: Corresponding Linux virtual IRQ number
+ * @hwirq: Corresponding hwirq for the IRQ within this IRQ domain
+ *
+ * Returns 0 if all went well else appropriate error pointer.
+ */
+static int ti_sci_intr_alloc_gic_irq(struct irq_domain *domain,
+ unsigned int virq, u32 hwirq)
+{
+ struct ti_sci_intr_irq_domain *intr = domain->host_data;
+ struct irq_fwspec fwspec;
+ u16 dev_id, irq_index;
+ u16 dst_irq;
+ int err;
+
+ dev_id = HWIRQ_TO_DEVID(hwirq);
+ irq_index = HWIRQ_TO_IRQID(hwirq);
+
+ dst_irq = ti_sci_get_free_resource(intr->dst_irq);
+ if (dst_irq == TI_SCI_RESOURCE_NULL)
+ return -EINVAL;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0; /* SPI */
+ fwspec.param[1] = dst_irq - 32; /* SPI offset */
+ fwspec.param[2] = intr->type;
+
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (err)
+ goto err_irqs;
+
+ err = intr->sci->ops.rm_irq_ops.set_irq(intr->sci, dev_id, irq_index,
+ intr->dst_id, dst_irq);
+ if (err)
+ goto err_msg;
+
+ return 0;
+
+err_msg:
+ irq_domain_free_irqs_parent(domain, virq, 1);
+err_irqs:
+ ti_sci_release_resource(intr->dst_irq, dst_irq);
+ return err;
+}
+
+/**
+ * ti_sci_intr_irq_domain_alloc() - Allocate Interrupt router IRQs
+ * @domain: Point to the interrupt router IRQ domain
+ * @virq: Corresponding Linux virtual IRQ number
+ * @nr_irqs: Continuous irqs to be allocated
+ * @data: Pointer to firmware specifier
+ *
+ * Return 0 if all went well else appropriate error value.
+ */
+static int ti_sci_intr_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ unsigned long hwirq;
+ unsigned int flags;
+ int err;
+
+ err = ti_sci_intr_irq_domain_translate(domain, fwspec, &hwirq, &flags);
+ if (err)
+ return err;
+
+ err = ti_sci_intr_alloc_gic_irq(domain, virq, hwirq);
+ if (err)
+ return err;
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &ti_sci_intr_irq_chip, NULL);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ti_sci_intr_irq_domain_ops = {
+ .free = ti_sci_intr_irq_domain_free,
+ .alloc = ti_sci_intr_irq_domain_alloc,
+ .translate = ti_sci_intr_irq_domain_translate,
+};
+
+static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
+{
+ struct irq_domain *parent_domain, *domain;
+ struct ti_sci_intr_irq_domain *intr;
+ struct device_node *parent_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ parent_node = of_irq_find_parent(dev_of_node(dev));
+ if (!parent_node) {
+ dev_err(dev, "Failed to get IRQ parent node\n");
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent_node);
+ if (!parent_domain) {
+ dev_err(dev, "Failed to find IRQ parent domain\n");
+ return -ENODEV;
+ }
+
+ intr = devm_kzalloc(dev, sizeof(*intr), GFP_KERNEL);
+ if (!intr)
+ return -ENOMEM;
+
+ ret = of_property_read_u32(dev_of_node(dev), "ti,intr-trigger-type",
+ &intr->type);
+ if (ret) {
+ dev_err(dev, "missing ti,intr-trigger-type property\n");
+ return -EINVAL;
+ }
+
+ intr->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
+ if (IS_ERR(intr->sci)) {
+ ret = PTR_ERR(intr->sci);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "ti,sci read fail %d\n", ret);
+ intr->sci = NULL;
+ return ret;
+ }
+
+ ret = of_property_read_u32(dev_of_node(dev), "ti,sci-dst-id",
+ &intr->dst_id);
+ if (ret) {
+ dev_err(dev, "missing 'ti,sci-dst-id' property\n");
+ return -EINVAL;
+ }
+
+ intr->dst_irq = devm_ti_sci_get_of_resource(intr->sci, dev,
+ intr->dst_id,
+ "ti,sci-rm-range-girq");
+ if (IS_ERR(intr->dst_irq)) {
+ dev_err(dev, "Destination irq resource allocation failed\n");
+ return PTR_ERR(intr->dst_irq);
+ }
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, 0, dev_of_node(dev),
+ &ti_sci_intr_irq_domain_ops, intr);
+ if (!domain) {
+ dev_err(dev, "Failed to allocate IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ti_sci_intr_irq_domain_of_match[] = {
+ { .compatible = "ti,sci-intr", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_intr_irq_domain_of_match);
+
+static struct platform_driver ti_sci_intr_irq_domain_driver = {
+ .probe = ti_sci_intr_irq_domain_probe,
+ .driver = {
+ .name = "ti-sci-intr",
+ .of_match_table = ti_sci_intr_irq_domain_of_match,
+ },
+};
+module_platform_driver(ti_sci_intr_irq_domain_driver);
+
+MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ticom>");
+MODULE_DESCRIPTION("K3 Interrupt Router driver over TI SCI protocol");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 5f82036fe322..0df7454832ef 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -45,6 +45,8 @@ struct nvm_dev_map {
int num_ch;
};
+static void nvm_free(struct kref *ref);
+
static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
{
struct nvm_target *tgt;
@@ -325,6 +327,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
struct nvm_target *t;
struct nvm_tgt_dev *tgt_dev;
void *targetdata;
+ unsigned int mdts;
int ret;
switch (create->conf.type) {
@@ -412,8 +415,12 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
tdisk->private_data = targetdata;
tqueue->queuedata = targetdata;
- blk_queue_max_hw_sectors(tqueue,
- (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
+ mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
+ if (dev->geo.mdts) {
+ mdts = min_t(u32, dev->geo.mdts,
+ (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
+ }
+ blk_queue_max_hw_sectors(tqueue, mdts);
set_capacity(tdisk, tt->capacity(targetdata));
add_disk(tdisk);
@@ -476,7 +483,6 @@ static void __nvm_remove_target(struct nvm_target *t, bool graceful)
/**
* nvm_remove_tgt - Removes a target from the media manager
- * @dev: device
* @remove: ioctl structure with target name to remove.
*
* Returns:
@@ -484,18 +490,28 @@ static void __nvm_remove_target(struct nvm_target *t, bool graceful)
* 1: on not found
* <0: on error
*/
-static int nvm_remove_tgt(struct nvm_dev *dev, struct nvm_ioctl_remove *remove)
+static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
{
struct nvm_target *t;
+ struct nvm_dev *dev;
- mutex_lock(&dev->mlock);
- t = nvm_find_target(dev, remove->tgtname);
- if (!t) {
+ down_read(&nvm_lock);
+ list_for_each_entry(dev, &nvm_devices, devices) {
+ mutex_lock(&dev->mlock);
+ t = nvm_find_target(dev, remove->tgtname);
+ if (t) {
+ mutex_unlock(&dev->mlock);
+ break;
+ }
mutex_unlock(&dev->mlock);
- return 1;
}
+ up_read(&nvm_lock);
+
+ if (!t)
+ return 1;
+
__nvm_remove_target(t, true);
- mutex_unlock(&dev->mlock);
+ kref_put(&dev->ref, nvm_free);
return 0;
}
@@ -1089,15 +1105,16 @@ err_fmtype:
return ret;
}
-static void nvm_free(struct nvm_dev *dev)
+static void nvm_free(struct kref *ref)
{
- if (!dev)
- return;
+ struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
if (dev->dma_pool)
dev->ops->destroy_dma_pool(dev->dma_pool);
- nvm_unregister_map(dev);
+ if (dev->rmap)
+ nvm_unregister_map(dev);
+
kfree(dev->lun_map);
kfree(dev);
}
@@ -1134,7 +1151,13 @@ err:
struct nvm_dev *nvm_alloc_dev(int node)
{
- return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
+ struct nvm_dev *dev;
+
+ dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
+ if (dev)
+ kref_init(&dev->ref);
+
+ return dev;
}
EXPORT_SYMBOL(nvm_alloc_dev);
@@ -1142,12 +1165,16 @@ int nvm_register(struct nvm_dev *dev)
{
int ret, exp_pool_size;
- if (!dev->q || !dev->ops)
+ if (!dev->q || !dev->ops) {
+ kref_put(&dev->ref, nvm_free);
return -EINVAL;
+ }
ret = nvm_init(dev);
- if (ret)
+ if (ret) {
+ kref_put(&dev->ref, nvm_free);
return ret;
+ }
exp_pool_size = max_t(int, PAGE_SIZE,
(NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
@@ -1157,7 +1184,7 @@ int nvm_register(struct nvm_dev *dev)
exp_pool_size);
if (!dev->dma_pool) {
pr_err("nvm: could not create dma pool\n");
- nvm_free(dev);
+ kref_put(&dev->ref, nvm_free);
return -ENOMEM;
}
@@ -1179,6 +1206,7 @@ void nvm_unregister(struct nvm_dev *dev)
if (t->dev->parent != dev)
continue;
__nvm_remove_target(t, false);
+ kref_put(&dev->ref, nvm_free);
}
mutex_unlock(&dev->mlock);
@@ -1186,13 +1214,14 @@ void nvm_unregister(struct nvm_dev *dev)
list_del(&dev->devices);
up_write(&nvm_lock);
- nvm_free(dev);
+ kref_put(&dev->ref, nvm_free);
}
EXPORT_SYMBOL(nvm_unregister);
static int __nvm_configure_create(struct nvm_ioctl_create *create)
{
struct nvm_dev *dev;
+ int ret;
down_write(&nvm_lock);
dev = nvm_find_nvm_dev(create->dev);
@@ -1203,7 +1232,12 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
return -EINVAL;
}
- return nvm_create_tgt(dev, create);
+ kref_get(&dev->ref);
+ ret = nvm_create_tgt(dev, create);
+ if (ret)
+ kref_put(&dev->ref, nvm_free);
+
+ return ret;
}
static long nvm_ioctl_info(struct file *file, void __user *arg)
@@ -1322,8 +1356,6 @@ static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
{
struct nvm_ioctl_remove remove;
- struct nvm_dev *dev;
- int ret = 0;
if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
return -EFAULT;
@@ -1335,13 +1367,7 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
return -EINVAL;
}
- list_for_each_entry(dev, &nvm_devices, devices) {
- ret = nvm_remove_tgt(dev, &remove);
- if (!ret)
- break;
- }
-
- return ret;
+ return nvm_remove_tgt(&remove);
}
/* kept for compatibility reasons */
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index c9fa26f95659..5c1034c22197 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -18,7 +18,8 @@
#include "pblk.h"
-int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
+void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
+ unsigned long flags)
{
struct request_queue *q = pblk->dev->q;
struct pblk_w_ctx w_ctx;
@@ -43,6 +44,7 @@ retry:
goto retry;
case NVM_IO_ERR:
pblk_pipeline_stop(pblk);
+ bio_io_error(bio);
goto out;
}
@@ -79,7 +81,9 @@ retry:
out:
generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time);
pblk_write_should_kick(pblk);
- return ret;
+
+ if (ret == NVM_IO_DONE)
+ bio_endio(bio);
}
/*
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 6ca868868fee..773537804319 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -562,11 +562,9 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd)
{
- struct ppa_addr *ppa_list;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
int ret;
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-
pblk_down_chunk(pblk, ppa_list[0]);
ret = pblk_submit_io_sync(pblk, rqd);
pblk_up_chunk(pblk, ppa_list[0]);
@@ -725,6 +723,7 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_meta *lm = &pblk->lm;
struct bio *bio;
+ struct ppa_addr *ppa_list;
struct nvm_rq rqd;
u64 paddr = pblk_line_smeta_start(pblk, line);
int i, ret;
@@ -748,9 +747,10 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
rqd.opcode = NVM_OP_PREAD;
rqd.nr_ppas = lm->smeta_sec;
rqd.is_seq = 1;
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
for (i = 0; i < lm->smeta_sec; i++, paddr++)
- rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
@@ -761,8 +761,10 @@ int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
atomic_dec(&pblk->inflight_io);
- if (rqd.error)
+ if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
pblk_log_read_err(pblk, &rqd);
+ ret = -EIO;
+ }
clear_rqd:
pblk_free_rqd_meta(pblk, &rqd);
@@ -775,6 +777,7 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_meta *lm = &pblk->lm;
struct bio *bio;
+ struct ppa_addr *ppa_list;
struct nvm_rq rqd;
__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
@@ -799,12 +802,13 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
rqd.opcode = NVM_OP_PWRITE;
rqd.nr_ppas = lm->smeta_sec;
rqd.is_seq = 1;
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
for (i = 0; i < lm->smeta_sec; i++, paddr++) {
struct pblk_sec_meta *meta = pblk_get_meta(pblk,
rqd.meta_list, i);
- rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
meta->lba = lba_list[paddr] = addr_empty;
}
@@ -834,8 +838,9 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
struct nvm_geo *geo = &dev->geo;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
- void *ppa_list, *meta_list;
+ void *ppa_list_buf, *meta_list;
struct bio *bio;
+ struct ppa_addr *ppa_list;
struct nvm_rq rqd;
u64 paddr = line->emeta_ssec;
dma_addr_t dma_ppa_list, dma_meta_list;
@@ -851,7 +856,7 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
if (!meta_list)
return -ENOMEM;
- ppa_list = meta_list + pblk_dma_meta_size(pblk);
+ ppa_list_buf = meta_list + pblk_dma_meta_size(pblk);
dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
next_rq:
@@ -872,11 +877,12 @@ next_rq:
rqd.bio = bio;
rqd.meta_list = meta_list;
- rqd.ppa_list = ppa_list;
+ rqd.ppa_list = ppa_list_buf;
rqd.dma_meta_list = dma_meta_list;
rqd.dma_ppa_list = dma_ppa_list;
rqd.opcode = NVM_OP_PREAD;
rqd.nr_ppas = rq_ppas;
+ ppa_list = nvm_rq_to_ppa_list(&rqd);
for (i = 0; i < rqd.nr_ppas; ) {
struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
@@ -904,7 +910,7 @@ next_rq:
}
for (j = 0; j < min; j++, i++, paddr++)
- rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
}
ret = pblk_submit_io_sync(pblk, &rqd);
@@ -916,8 +922,11 @@ next_rq:
atomic_dec(&pblk->inflight_io);
- if (rqd.error)
+ if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
pblk_log_read_err(pblk, &rqd);
+ ret = -EIO;
+ goto free_rqd_dma;
+ }
emeta_buf += rq_len;
left_ppas -= rq_ppas;
@@ -1162,7 +1171,6 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
off = bit * geo->ws_opt;
bitmap_set(line->map_bitmap, off, lm->smeta_sec);
line->sec_in_line -= lm->smeta_sec;
- line->smeta_ssec = off;
line->cur_sec = off + lm->smeta_sec;
if (init && pblk_line_smeta_write(pblk, line, off)) {
@@ -1521,11 +1529,9 @@ void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
{
- struct ppa_addr *ppa_list;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
int i;
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-
for (i = 0; i < rqd->nr_ppas; i++)
pblk_ppa_to_line_put(pblk, ppa_list[i]);
}
@@ -1699,6 +1705,14 @@ static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_GC);
+ if (line->w_err_gc->has_gc_err) {
+ spin_unlock(&line->lock);
+ pblk_err(pblk, "line %d had errors during GC\n", line->id);
+ pblk_put_line_back(pblk, line);
+ line->w_err_gc->has_gc_err = 0;
+ return;
+ }
+
line->state = PBLK_LINESTATE_FREE;
trace_pblk_line_state(pblk_disk_name(pblk), line->id,
line->state);
@@ -2023,7 +2037,7 @@ void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
struct ppa_addr ppa_l2p;
/* logic error: lba out-of-bounds. Ignore update */
- if (!(lba < pblk->rl.nr_secs)) {
+ if (!(lba < pblk->capacity)) {
WARN(1, "pblk: corrupted L2P map request\n");
return;
}
@@ -2063,7 +2077,7 @@ int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
#endif
/* logic error: lba out-of-bounds. Ignore update */
- if (!(lba < pblk->rl.nr_secs)) {
+ if (!(lba < pblk->capacity)) {
WARN(1, "pblk: corrupted L2P map request\n");
return 0;
}
@@ -2109,7 +2123,7 @@ void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
}
/* logic error: lba out-of-bounds. Ignore update */
- if (!(lba < pblk->rl.nr_secs)) {
+ if (!(lba < pblk->capacity)) {
WARN(1, "pblk: corrupted L2P map request\n");
return;
}
@@ -2135,8 +2149,8 @@ out:
spin_unlock(&pblk->trans_lock);
}
-void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
- sector_t blba, int nr_secs)
+int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
+ sector_t blba, int nr_secs, bool *from_cache)
{
int i;
@@ -2150,10 +2164,19 @@ void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
+ if (i > 0 && *from_cache)
+ break;
+ *from_cache = false;
+
kref_get(&line->ref);
+ } else {
+ if (i > 0 && !*from_cache)
+ break;
+ *from_cache = true;
}
}
spin_unlock(&pblk->trans_lock);
+ return i;
}
void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
@@ -2167,7 +2190,7 @@ void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
lba = lba_list[i];
if (lba != ADDR_EMPTY) {
/* logic error: lba out-of-bounds. Ignore update */
- if (!(lba < pblk->rl.nr_secs)) {
+ if (!(lba < pblk->capacity)) {
WARN(1, "pblk: corrupted L2P map request\n");
continue;
}
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 26a52ea7ec45..63ee205b41c4 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -59,24 +59,28 @@ static void pblk_gc_writer_kick(struct pblk_gc *gc)
wake_up_process(gc->gc_writer_ts);
}
-static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
+void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct list_head *move_list;
+ spin_lock(&l_mg->gc_lock);
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_GC);
line->state = PBLK_LINESTATE_CLOSED;
trace_pblk_line_state(pblk_disk_name(pblk), line->id,
line->state);
+
+ /* We need to reset gc_group in order to ensure that
+ * pblk_line_gc_list will return proper move_list
+ * since right now current line is not on any of the
+ * gc lists.
+ */
+ line->gc_group = PBLK_LINEGC_NONE;
move_list = pblk_line_gc_list(pblk, line);
spin_unlock(&line->lock);
-
- if (move_list) {
- spin_lock(&l_mg->gc_lock);
- list_add_tail(&line->list, move_list);
- spin_unlock(&l_mg->gc_lock);
- }
+ list_add_tail(&line->list, move_list);
+ spin_unlock(&l_mg->gc_lock);
}
static void pblk_gc_line_ws(struct work_struct *work)
@@ -84,8 +88,6 @@ static void pblk_gc_line_ws(struct work_struct *work)
struct pblk_line_ws *gc_rq_ws = container_of(work,
struct pblk_line_ws, ws);
struct pblk *pblk = gc_rq_ws->pblk;
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
struct pblk_gc *gc = &pblk->gc;
struct pblk_line *line = gc_rq_ws->line;
struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
@@ -93,18 +95,10 @@ static void pblk_gc_line_ws(struct work_struct *work)
up(&gc->gc_sem);
- gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
- if (!gc_rq->data) {
- pblk_err(pblk, "could not GC line:%d (%d/%d)\n",
- line->id, *line->vsc, gc_rq->nr_secs);
- goto out;
- }
-
/* Read from GC victim block */
ret = pblk_submit_read_gc(pblk, gc_rq);
if (ret) {
- pblk_err(pblk, "failed GC read in line:%d (err:%d)\n",
- line->id, ret);
+ line->w_err_gc->has_gc_err = 1;
goto out;
}
@@ -189,6 +183,8 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work)
struct pblk_line *line = line_ws->line;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
struct pblk_gc *gc = &pblk->gc;
struct pblk_line_ws *gc_rq_ws;
struct pblk_gc_rq *gc_rq;
@@ -247,9 +243,13 @@ next_rq:
gc_rq->nr_secs = nr_secs;
gc_rq->line = line;
+ gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
+ if (!gc_rq->data)
+ goto fail_free_gc_rq;
+
gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
if (!gc_rq_ws)
- goto fail_free_gc_rq;
+ goto fail_free_gc_data;
gc_rq_ws->pblk = pblk;
gc_rq_ws->line = line;
@@ -281,6 +281,8 @@ out:
return;
+fail_free_gc_data:
+ vfree(gc_rq->data);
fail_free_gc_rq:
kfree(gc_rq);
fail_free_lba_list:
@@ -290,8 +292,11 @@ fail_free_invalid_bitmap:
fail_free_ws:
kfree(line_ws);
+ /* Line goes back to closed state, so we cannot release additional
+ * reference for line, since we do that only when we want to do
+ * gc to free line state transition.
+ */
pblk_put_line_back(pblk, line);
- kref_put(&line->ref, pblk_line_put);
atomic_dec(&gc->read_inflight_gc);
pblk_err(pblk, "failed to GC line %d\n", line->id);
@@ -355,8 +360,13 @@ static int pblk_gc_read(struct pblk *pblk)
pblk_gc_kick(pblk);
- if (pblk_gc_line(pblk, line))
+ if (pblk_gc_line(pblk, line)) {
pblk_err(pblk, "failed to GC line %d\n", line->id);
+ /* rollback */
+ spin_lock(&gc->r_lock);
+ list_add_tail(&line->list, &gc->r_list);
+ spin_unlock(&gc->r_lock);
+ }
return 0;
}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 8b643d0bffae..b351c7f002de 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -47,33 +47,6 @@ static struct pblk_global_caches pblk_caches = {
struct bio_set pblk_bio_set;
-static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
- struct bio *bio)
-{
- int ret;
-
- /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
- * constraint. Writes can be of arbitrary size.
- */
- if (bio_data_dir(bio) == READ) {
- blk_queue_split(q, &bio);
- ret = pblk_submit_read(pblk, bio);
- if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
- bio_put(bio);
-
- return ret;
- }
-
- /* Prevent deadlock in the case of a modest LUN configuration and large
- * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
- * available for user I/O.
- */
- if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
- blk_queue_split(q, &bio);
-
- return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
-}
-
static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
{
struct pblk *pblk = q->queuedata;
@@ -86,13 +59,21 @@ static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
}
}
- switch (pblk_rw_io(q, pblk, bio)) {
- case NVM_IO_ERR:
- bio_io_error(bio);
- break;
- case NVM_IO_DONE:
- bio_endio(bio);
- break;
+ /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
+ * constraint. Writes can be of arbitrary size.
+ */
+ if (bio_data_dir(bio) == READ) {
+ blk_queue_split(q, &bio);
+ pblk_submit_read(pblk, bio);
+ } else {
+ /* Prevent deadlock in the case of a modest LUN configuration
+ * and large user I/Os. Unless stalled, the rate limiter
+ * leaves at least 256KB available for user I/O.
+ */
+ if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
+ blk_queue_split(q, &bio);
+
+ pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
}
return BLK_QC_T_NONE;
@@ -105,7 +86,7 @@ static size_t pblk_trans_map_size(struct pblk *pblk)
if (pblk->addrf_len < 32)
entry_size = 4;
- return entry_size * pblk->rl.nr_secs;
+ return entry_size * pblk->capacity;
}
#ifdef CONFIG_NVM_PBLK_DEBUG
@@ -164,13 +145,18 @@ static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
int ret = 0;
map_size = pblk_trans_map_size(pblk);
- pblk->trans_map = vmalloc(map_size);
- if (!pblk->trans_map)
+ pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN
+ | __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM,
+ PAGE_KERNEL);
+ if (!pblk->trans_map) {
+ pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
+ map_size);
return -ENOMEM;
+ }
pblk_ppa_set_empty(&ppa);
- for (i = 0; i < pblk->rl.nr_secs; i++)
+ for (i = 0; i < pblk->capacity; i++)
pblk_trans_map_set(pblk, i, ppa);
ret = pblk_l2p_recover(pblk, factory_init);
@@ -701,7 +687,6 @@ static int pblk_set_provision(struct pblk *pblk, int nr_free_chks)
* on user capacity consider only provisioned blocks
*/
pblk->rl.total_blocks = nr_free_chks;
- pblk->rl.nr_secs = nr_free_chks * geo->clba;
/* Consider sectors used for metadata */
sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
@@ -1284,7 +1269,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
geo->all_luns, pblk->l_mg.nr_lines,
- (unsigned long long)pblk->rl.nr_secs,
+ (unsigned long long)pblk->capacity,
pblk->rwb.nr_entries);
wake_up_process(pblk->writer_ts);
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 7fbc99b60cac..5408e32b2f13 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -162,6 +162,7 @@ int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
*erase_ppa = ppa_list[i];
erase_ppa->a.blk = e_line->id;
+ erase_ppa->a.reserved = 0;
spin_unlock(&e_line->lock);
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 03c241b340ea..5abb1705b039 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -642,7 +642,7 @@ try:
* be directed to disk.
*/
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter, bool advanced_bio)
+ struct ppa_addr ppa)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct pblk_rb_entry *entry;
@@ -673,15 +673,6 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
ret = 0;
goto out;
}
-
- /* Only advance the bio if it hasn't been advanced already. If advanced,
- * this bio is at least a partial bio (i.e., it has partially been
- * filled with data from the cache). If part of the data resides on the
- * media, we will read later on
- */
- if (unlikely(!advanced_bio))
- bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
-
data = bio_data(bio);
memcpy(data, entry->data, rb->seg_size);
@@ -799,8 +790,8 @@ int pblk_rb_tear_down_check(struct pblk_rb *rb)
}
out:
- spin_unlock(&rb->w_lock);
spin_unlock_irq(&rb->s_lock);
+ spin_unlock(&rb->w_lock);
return ret;
}
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 0b7d5fb4548d..d98ea392fe33 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -26,8 +26,7 @@
* issued.
*/
static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
- sector_t lba, struct ppa_addr ppa,
- int bio_iter, bool advanced_bio)
+ sector_t lba, struct ppa_addr ppa)
{
#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a cache address */
@@ -35,73 +34,75 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
BUG_ON(!pblk_addr_in_cache(ppa));
#endif
- return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
- bio_iter, advanced_bio);
+ return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa);
}
-static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
+static int pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct bio *bio, sector_t blba,
- unsigned long *read_bitmap)
+ bool *from_cache)
{
void *meta_list = rqd->meta_list;
- struct ppa_addr ppas[NVM_MAX_VLBA];
- int nr_secs = rqd->nr_ppas;
- bool advanced_bio = false;
- int i, j = 0;
+ int nr_secs, i;
- pblk_lookup_l2p_seq(pblk, ppas, blba, nr_secs);
+retry:
+ nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas,
+ from_cache);
+
+ if (!*from_cache)
+ goto end;
for (i = 0; i < nr_secs; i++) {
- struct ppa_addr p = ppas[i];
struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
sector_t lba = blba + i;
-retry:
- if (pblk_ppa_empty(p)) {
+ if (pblk_ppa_empty(rqd->ppa_list[i])) {
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
- WARN_ON(test_and_set_bit(i, read_bitmap));
meta->lba = addr_empty;
-
- if (unlikely(!advanced_bio)) {
- bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
- advanced_bio = true;
+ } else if (pblk_addr_in_cache(rqd->ppa_list[i])) {
+ /*
+ * Try to read from write buffer. The address is later
+ * checked on the write buffer to prevent retrieving
+ * overwritten data.
+ */
+ if (!pblk_read_from_cache(pblk, bio, lba,
+ rqd->ppa_list[i])) {
+ if (i == 0) {
+ /*
+ * We didn't call with bio_advance()
+ * yet, so we can just retry.
+ */
+ goto retry;
+ } else {
+ /*
+ * We already call bio_advance()
+ * so we cannot retry and we need
+ * to quit that function in order
+ * to allow caller to handle the bio
+ * splitting in the current sector
+ * position.
+ */
+ nr_secs = i;
+ goto end;
+ }
}
-
- goto next;
- }
-
- /* Try to read from write buffer. The address is later checked
- * on the write buffer to prevent retrieving overwritten data.
- */
- if (pblk_addr_in_cache(p)) {
- if (!pblk_read_from_cache(pblk, bio, lba, p, i,
- advanced_bio)) {
- pblk_lookup_l2p_seq(pblk, &p, lba, 1);
- goto retry;
- }
- WARN_ON(test_and_set_bit(i, read_bitmap));
meta->lba = cpu_to_le64(lba);
- advanced_bio = true;
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->cache_reads);
#endif
- } else {
- /* Read from media non-cached sectors */
- rqd->ppa_list[j++] = p;
}
-
-next:
- if (advanced_bio)
- bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
+ bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
}
+end:
if (pblk_io_aligned(pblk, nr_secs))
rqd->is_seq = 1;
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(nr_secs, &pblk->inflight_reads);
#endif
+
+ return nr_secs;
}
@@ -175,12 +176,12 @@ static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
}
-static void pblk_end_user_read(struct bio *bio)
+static void pblk_end_user_read(struct bio *bio, int error)
{
-#ifdef CONFIG_NVM_PBLK_DEBUG
- WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n");
-#endif
- bio_endio(bio);
+ if (error && error != NVM_RSP_WARN_HIGHECC)
+ bio_io_error(bio);
+ else
+ bio_endio(bio);
}
static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
@@ -197,9 +198,7 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
pblk_log_read_err(pblk, rqd);
pblk_read_check_seq(pblk, rqd, r_ctx->lba);
-
- if (int_bio)
- bio_put(int_bio);
+ bio_put(int_bio);
if (put_line)
pblk_rq_to_line_put(pblk, rqd);
@@ -219,188 +218,17 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct bio *bio = (struct bio *)r_ctx->private;
- pblk_end_user_read(bio);
+ pblk_end_user_read(bio, rqd->error);
__pblk_end_io_read(pblk, rqd, true);
}
-static void pblk_end_partial_read(struct nvm_rq *rqd)
-{
- struct pblk *pblk = rqd->private;
- struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
- struct pblk_pr_ctx *pr_ctx = r_ctx->private;
- struct pblk_sec_meta *meta;
- struct bio *new_bio = rqd->bio;
- struct bio *bio = pr_ctx->orig_bio;
- void *meta_list = rqd->meta_list;
- unsigned long *read_bitmap = pr_ctx->bitmap;
- struct bvec_iter orig_iter = BVEC_ITER_ALL_INIT;
- struct bvec_iter new_iter = BVEC_ITER_ALL_INIT;
- int nr_secs = pr_ctx->orig_nr_secs;
- int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
- void *src_p, *dst_p;
- int bit, i;
-
- if (unlikely(nr_holes == 1)) {
- struct ppa_addr ppa;
-
- ppa = rqd->ppa_addr;
- rqd->ppa_list = pr_ctx->ppa_ptr;
- rqd->dma_ppa_list = pr_ctx->dma_ppa_list;
- rqd->ppa_list[0] = ppa;
- }
-
- for (i = 0; i < nr_secs; i++) {
- meta = pblk_get_meta(pblk, meta_list, i);
- pr_ctx->lba_list_media[i] = le64_to_cpu(meta->lba);
- meta->lba = cpu_to_le64(pr_ctx->lba_list_mem[i]);
- }
-
- /* Fill the holes in the original bio */
- i = 0;
- for (bit = 0; bit < nr_secs; bit++) {
- if (!test_bit(bit, read_bitmap)) {
- struct bio_vec dst_bv, src_bv;
- struct pblk_line *line;
-
- line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
- kref_put(&line->ref, pblk_line_put);
-
- meta = pblk_get_meta(pblk, meta_list, bit);
- meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
-
- dst_bv = bio_iter_iovec(bio, orig_iter);
- src_bv = bio_iter_iovec(new_bio, new_iter);
-
- src_p = kmap_atomic(src_bv.bv_page);
- dst_p = kmap_atomic(dst_bv.bv_page);
-
- memcpy(dst_p + dst_bv.bv_offset,
- src_p + src_bv.bv_offset,
- PBLK_EXPOSED_PAGE_SIZE);
-
- kunmap_atomic(src_p);
- kunmap_atomic(dst_p);
-
- flush_dcache_page(dst_bv.bv_page);
- mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
-
- bio_advance_iter(new_bio, &new_iter,
- PBLK_EXPOSED_PAGE_SIZE);
- i++;
- }
- bio_advance_iter(bio, &orig_iter, PBLK_EXPOSED_PAGE_SIZE);
- }
-
- bio_put(new_bio);
- kfree(pr_ctx);
-
- /* restore original request */
- rqd->bio = NULL;
- rqd->nr_ppas = nr_secs;
-
- bio_endio(bio);
- __pblk_end_io_read(pblk, rqd, false);
-}
-
-static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned int bio_init_idx,
- unsigned long *read_bitmap,
- int nr_holes)
-{
- void *meta_list = rqd->meta_list;
- struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
- struct pblk_pr_ctx *pr_ctx;
- struct bio *new_bio, *bio = r_ctx->private;
- int nr_secs = rqd->nr_ppas;
- int i;
-
- new_bio = bio_alloc(GFP_KERNEL, nr_holes);
-
- if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
- goto fail_bio_put;
-
- if (nr_holes != new_bio->bi_vcnt) {
- WARN_ONCE(1, "pblk: malformed bio\n");
- goto fail_free_pages;
- }
-
- pr_ctx = kzalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
- if (!pr_ctx)
- goto fail_free_pages;
-
- for (i = 0; i < nr_secs; i++) {
- struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
-
- pr_ctx->lba_list_mem[i] = le64_to_cpu(meta->lba);
- }
-
- new_bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
-
- rqd->bio = new_bio;
- rqd->nr_ppas = nr_holes;
-
- pr_ctx->orig_bio = bio;
- bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA);
- pr_ctx->bio_init_idx = bio_init_idx;
- pr_ctx->orig_nr_secs = nr_secs;
- r_ctx->private = pr_ctx;
-
- if (unlikely(nr_holes == 1)) {
- pr_ctx->ppa_ptr = rqd->ppa_list;
- pr_ctx->dma_ppa_list = rqd->dma_ppa_list;
- rqd->ppa_addr = rqd->ppa_list[0];
- }
- return 0;
-
-fail_free_pages:
- pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt);
-fail_bio_put:
- bio_put(new_bio);
-
- return -ENOMEM;
-}
-
-static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned int bio_init_idx,
- unsigned long *read_bitmap, int nr_secs)
-{
- int nr_holes;
- int ret;
-
- nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
-
- if (pblk_setup_partial_read(pblk, rqd, bio_init_idx, read_bitmap,
- nr_holes))
- return NVM_IO_ERR;
-
- rqd->end_io = pblk_end_partial_read;
-
- ret = pblk_submit_io(pblk, rqd);
- if (ret) {
- bio_put(rqd->bio);
- pblk_err(pblk, "partial read IO submission failed\n");
- goto err;
- }
-
- return NVM_IO_OK;
-
-err:
- pblk_err(pblk, "failed to perform partial read\n");
-
- /* Free allocated pages in new bio */
- pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt);
- __pblk_end_io_read(pblk, rqd, false);
- return NVM_IO_ERR;
-}
-
static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
- sector_t lba, unsigned long *read_bitmap)
+ sector_t lba, bool *from_cache)
{
struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
struct ppa_addr ppa;
- pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
+ pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->inflight_reads);
@@ -410,7 +238,6 @@ retry:
if (pblk_ppa_empty(ppa)) {
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
- WARN_ON(test_and_set_bit(0, read_bitmap));
meta->lba = addr_empty;
return;
}
@@ -419,12 +246,11 @@ retry:
* write buffer to prevent retrieving overwritten data.
*/
if (pblk_addr_in_cache(ppa)) {
- if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
- pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
+ if (!pblk_read_from_cache(pblk, bio, lba, ppa)) {
+ pblk_lookup_l2p_seq(pblk, &ppa, lba, 1, from_cache);
goto retry;
}
- WARN_ON(test_and_set_bit(0, read_bitmap));
meta->lba = cpu_to_le64(lba);
#ifdef CONFIG_NVM_PBLK_DEBUG
@@ -435,95 +261,92 @@ retry:
}
}
-int pblk_submit_read(struct pblk *pblk, struct bio *bio)
+void pblk_submit_read(struct pblk *pblk, struct bio *bio)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct request_queue *q = dev->q;
sector_t blba = pblk_get_lba(bio);
unsigned int nr_secs = pblk_get_secs(bio);
+ bool from_cache;
struct pblk_g_ctx *r_ctx;
struct nvm_rq *rqd;
- unsigned int bio_init_idx;
- DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
- int ret = NVM_IO_ERR;
+ struct bio *int_bio, *split_bio;
generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
&pblk->disk->part0);
- bitmap_zero(read_bitmap, nr_secs);
-
rqd = pblk_alloc_rqd(pblk, PBLK_READ);
rqd->opcode = NVM_OP_PREAD;
rqd->nr_ppas = nr_secs;
- rqd->bio = NULL; /* cloned bio if needed */
rqd->private = pblk;
rqd->end_io = pblk_end_io_read;
r_ctx = nvm_rq_to_pdu(rqd);
r_ctx->start_time = jiffies;
r_ctx->lba = blba;
- r_ctx->private = bio; /* original bio */
- /* Save the index for this bio's start. This is needed in case
- * we need to fill a partial read.
- */
- bio_init_idx = pblk_get_bi_idx(bio);
+ if (pblk_alloc_rqd_meta(pblk, rqd)) {
+ bio_io_error(bio);
+ pblk_free_rqd(pblk, rqd, PBLK_READ);
+ return;
+ }
- if (pblk_alloc_rqd_meta(pblk, rqd))
- goto fail_rqd_free;
+ /* Clone read bio to deal internally with:
+ * -read errors when reading from drive
+ * -bio_advance() calls during cache reads
+ */
+ int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
if (nr_secs > 1)
- pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
+ nr_secs = pblk_read_ppalist_rq(pblk, rqd, int_bio, blba,
+ &from_cache);
else
- pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
+ pblk_read_rq(pblk, rqd, int_bio, blba, &from_cache);
- if (bitmap_full(read_bitmap, nr_secs)) {
+split_retry:
+ r_ctx->private = bio; /* original bio */
+ rqd->bio = int_bio; /* internal bio */
+
+ if (from_cache && nr_secs == rqd->nr_ppas) {
+ /* All data was read from cache, we can complete the IO. */
+ pblk_end_user_read(bio, 0);
atomic_inc(&pblk->inflight_io);
__pblk_end_io_read(pblk, rqd, false);
- return NVM_IO_DONE;
- }
-
- /* All sectors are to be read from the device */
- if (bitmap_empty(read_bitmap, rqd->nr_ppas)) {
- struct bio *int_bio = NULL;
+ } else if (nr_secs != rqd->nr_ppas) {
+ /* The read bio request could be partially filled by the write
+ * buffer, but there are some holes that need to be read from
+ * the drive. In order to handle this, we will use block layer
+ * mechanism to split this request in to smaller ones and make
+ * a chain of it.
+ */
+ split_bio = bio_split(bio, nr_secs * NR_PHY_IN_LOG, GFP_KERNEL,
+ &pblk_bio_set);
+ bio_chain(split_bio, bio);
+ generic_make_request(bio);
+
+ /* New bio contains first N sectors of the previous one, so
+ * we can continue to use existing rqd, but we need to shrink
+ * the number of PPAs in it. New bio is also guaranteed that
+ * it contains only either data from cache or from drive, newer
+ * mix of them.
+ */
+ bio = split_bio;
+ rqd->nr_ppas = nr_secs;
+ if (rqd->nr_ppas == 1)
+ rqd->ppa_addr = rqd->ppa_list[0];
- /* Clone read bio to deal with read errors internally */
+ /* Recreate int_bio - existing might have some needed internal
+ * fields modified already.
+ */
+ bio_put(int_bio);
int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
- if (!int_bio) {
- pblk_err(pblk, "could not clone read bio\n");
- goto fail_end_io;
- }
-
- rqd->bio = int_bio;
-
- if (pblk_submit_io(pblk, rqd)) {
- pblk_err(pblk, "read IO submission failed\n");
- ret = NVM_IO_ERR;
- goto fail_end_io;
- }
-
- return NVM_IO_OK;
+ goto split_retry;
+ } else if (pblk_submit_io(pblk, rqd)) {
+ /* Submitting IO to drive failed, let's report an error */
+ rqd->error = -ENODEV;
+ pblk_end_io_read(rqd);
}
-
- /* The read bio request could be partially filled by the write buffer,
- * but there are some holes that need to be read from the drive.
- */
- ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap,
- nr_secs);
- if (ret)
- goto fail_meta_free;
-
- return NVM_IO_OK;
-
-fail_meta_free:
- nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
-fail_rqd_free:
- pblk_free_rqd(pblk, rqd, PBLK_READ);
- return ret;
-fail_end_io:
- __pblk_end_io_read(pblk, rqd, false);
- return ret;
}
static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
@@ -568,7 +391,7 @@ static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
goto out;
/* logic error: lba out-of-bounds */
- if (lba >= pblk->rl.nr_secs) {
+ if (lba >= pblk->capacity) {
WARN(1, "pblk: read lba out of bounds\n");
goto out;
}
@@ -642,7 +465,6 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
if (pblk_submit_io_sync(pblk, &rqd)) {
ret = -EIO;
- pblk_err(pblk, "GC read request failed\n");
goto err_free_bio;
}
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index d86f580036d3..e6dda04de144 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -93,10 +93,24 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
u64 written_secs)
{
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
int i;
for (i = 0; i < written_secs; i += pblk->min_write_pgs)
- pblk_alloc_page(pblk, line, pblk->min_write_pgs);
+ __pblk_alloc_page(pblk, line, pblk->min_write_pgs);
+
+ spin_lock(&l_mg->free_lock);
+ if (written_secs > line->left_msecs) {
+ /*
+ * We have all data sectors written
+ * and some emeta sectors written too.
+ */
+ line->left_msecs = 0;
+ } else {
+ /* We have only some data sectors written. */
+ line->left_msecs -= written_secs;
+ }
+ spin_unlock(&l_mg->free_lock);
}
static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
@@ -165,6 +179,7 @@ static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
struct pblk_pad_rq *pad_rq;
struct nvm_rq *rqd;
struct bio *bio;
+ struct ppa_addr *ppa_list;
void *data;
__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
u64 w_ptr = line->cur_sec;
@@ -194,7 +209,7 @@ next_pad_rq:
rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
if (rq_ppas < pblk->min_write_pgs) {
pblk_err(pblk, "corrupted pad line %d\n", line->id);
- goto fail_free_pad;
+ goto fail_complete;
}
rq_len = rq_ppas * geo->csecs;
@@ -203,7 +218,7 @@ next_pad_rq:
PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- goto fail_free_pad;
+ goto fail_complete;
}
bio->bi_iter.bi_sector = 0; /* internal bio */
@@ -212,8 +227,11 @@ next_pad_rq:
rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
ret = pblk_alloc_rqd_meta(pblk, rqd);
- if (ret)
- goto fail_free_rqd;
+ if (ret) {
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
+ bio_put(bio);
+ goto fail_complete;
+ }
rqd->bio = bio;
rqd->opcode = NVM_OP_PWRITE;
@@ -222,6 +240,7 @@ next_pad_rq:
rqd->end_io = pblk_end_io_recov;
rqd->private = pad_rq;
+ ppa_list = nvm_rq_to_ppa_list(rqd);
meta_list = rqd->meta_list;
for (i = 0; i < rqd->nr_ppas; ) {
@@ -249,18 +268,21 @@ next_pad_rq:
lba_list[w_ptr] = addr_empty;
meta = pblk_get_meta(pblk, meta_list, i);
meta->lba = addr_empty;
- rqd->ppa_list[i] = dev_ppa;
+ ppa_list[i] = dev_ppa;
}
}
kref_get(&pad_rq->ref);
- pblk_down_chunk(pblk, rqd->ppa_list[0]);
+ pblk_down_chunk(pblk, ppa_list[0]);
ret = pblk_submit_io(pblk, rqd);
if (ret) {
pblk_err(pblk, "I/O submission failed: %d\n", ret);
- pblk_up_chunk(pblk, rqd->ppa_list[0]);
- goto fail_free_rqd;
+ pblk_up_chunk(pblk, ppa_list[0]);
+ kref_put(&pad_rq->ref, pblk_recov_complete);
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
+ bio_put(bio);
+ goto fail_complete;
}
left_line_ppas -= rq_ppas;
@@ -268,13 +290,9 @@ next_pad_rq:
if (left_ppas && left_line_ppas)
goto next_pad_rq;
+fail_complete:
kref_put(&pad_rq->ref, pblk_recov_complete);
-
- if (!wait_for_completion_io_timeout(&pad_rq->wait,
- msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
- pblk_err(pblk, "pad write timed out\n");
- ret = -ETIME;
- }
+ wait_for_completion(&pad_rq->wait);
if (!pblk_line_is_full(line))
pblk_err(pblk, "corrupted padded line: %d\n", line->id);
@@ -283,14 +301,6 @@ next_pad_rq:
free_rq:
kfree(pad_rq);
return ret;
-
-fail_free_rqd:
- pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
- bio_put(bio);
-fail_free_pad:
- kfree(pad_rq);
- vfree(data);
- return ret;
}
static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
@@ -412,6 +422,7 @@ retry_rq:
rqd->ppa_list = ppa_list;
rqd->dma_ppa_list = dma_ppa_list;
rqd->dma_meta_list = dma_meta_list;
+ ppa_list = nvm_rq_to_ppa_list(rqd);
if (pblk_io_aligned(pblk, rq_ppas))
rqd->is_seq = 1;
@@ -430,7 +441,7 @@ retry_rq:
}
for (j = 0; j < pblk->min_write_pgs; j++, i++)
- rqd->ppa_list[i] =
+ ppa_list[i] =
addr_to_gen_ppa(pblk, paddr + j, line->id);
}
@@ -444,7 +455,7 @@ retry_rq:
atomic_dec(&pblk->inflight_io);
/* If a read fails, do a best effort by padding the line and retrying */
- if (rqd->error) {
+ if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
int pad_distance, ret;
if (padded) {
@@ -474,11 +485,11 @@ retry_rq:
lba_list[paddr++] = cpu_to_le64(lba);
- if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
+ if (lba == ADDR_EMPTY || lba >= pblk->capacity)
continue;
line->nr_valid_lbas++;
- pblk_update_map(pblk, lba, rqd->ppa_list[i]);
+ pblk_update_map(pblk, lba, ppa_list[i]);
}
left_ppas -= rq_ppas;
@@ -647,10 +658,12 @@ static int pblk_line_was_written(struct pblk_line *line,
bppa = pblk->luns[smeta_blk].bppa;
chunk = &line->chks[pblk_ppa_to_pos(geo, bppa)];
- if (chunk->state & NVM_CHK_ST_FREE)
- return 0;
+ if (chunk->state & NVM_CHK_ST_CLOSED ||
+ (chunk->state & NVM_CHK_ST_OPEN
+ && chunk->wp >= lm->smeta_sec))
+ return 1;
- return 1;
+ return 0;
}
static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
@@ -844,6 +857,7 @@ next:
spin_unlock(&l_mg->free_lock);
} else {
spin_lock(&l_mg->free_lock);
+ l_mg->data_line = data_line;
/* Allocate next line for preparation */
l_mg->data_next = pblk_line_get(pblk);
if (l_mg->data_next) {
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 6593deab52da..4e63f9b5954c 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -228,6 +228,7 @@ static void pblk_submit_rec(struct work_struct *work)
mempool_free(recovery, &pblk->rec_pool);
atomic_dec(&pblk->inflight_io);
+ pblk_write_kick(pblk);
}
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index ac3ab778e976..a67855387f53 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -43,8 +43,6 @@
#define PBLK_CACHE_NAME_LEN (DISK_NAME_LEN + 16)
-#define PBLK_COMMAND_TIMEOUT_MS 30000
-
/* Max 512 LUNs per device */
#define PBLK_MAX_LUNS_BITMAP (4)
@@ -123,18 +121,6 @@ struct pblk_g_ctx {
u64 lba;
};
-/* partial read context */
-struct pblk_pr_ctx {
- struct bio *orig_bio;
- DECLARE_BITMAP(bitmap, NVM_MAX_VLBA);
- unsigned int orig_nr_secs;
- unsigned int bio_init_idx;
- void *ppa_ptr;
- dma_addr_t dma_ppa_list;
- u64 lba_list_mem[NVM_MAX_VLBA];
- u64 lba_list_media[NVM_MAX_VLBA];
-};
-
/* Pad context */
struct pblk_pad_rq {
struct pblk *pblk;
@@ -305,7 +291,6 @@ struct pblk_rl {
struct timer_list u_timer;
- unsigned long long nr_secs;
unsigned long total_blocks;
atomic_t free_blocks; /* Total number of free blocks (+ OP) */
@@ -440,6 +425,7 @@ struct pblk_smeta {
struct pblk_w_err_gc {
int has_write_err;
+ int has_gc_err;
__le64 *lba_list;
};
@@ -465,7 +451,6 @@ struct pblk_line {
int meta_line; /* Metadata line id */
int meta_distance; /* Distance between data and metadata */
- u64 smeta_ssec; /* Sector where smeta starts */
u64 emeta_ssec; /* Sector where emeta starts */
unsigned int sec_in_line; /* Number of usable secs in line */
@@ -762,7 +747,7 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
unsigned int pos, unsigned int nr_entries,
unsigned int count);
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
- struct ppa_addr ppa, int bio_iter, bool advanced_bio);
+ struct ppa_addr ppa);
unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
@@ -862,15 +847,15 @@ int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
struct pblk_line *gc_line, u64 paddr);
void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
u64 *lba_list, int nr_secs);
-void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
- sector_t blba, int nr_secs);
+int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
+ sector_t blba, int nr_secs, bool *from_cache);
void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd);
void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd);
/*
* pblk user I/O write path
*/
-int pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
+void pblk_write_to_cache(struct pblk *pblk, struct bio *bio,
unsigned long flags);
int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
@@ -896,7 +881,7 @@ void pblk_write_kick(struct pblk *pblk);
* pblk read path
*/
extern struct bio_set pblk_bio_set;
-int pblk_submit_read(struct pblk *pblk, struct bio *bio);
+void pblk_submit_read(struct pblk *pblk, struct bio *bio);
int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
/*
* pblk recovery
@@ -921,6 +906,7 @@ void pblk_gc_free_full_lines(struct pblk *pblk);
void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
int *gc_active);
int pblk_gc_sysfs_force(struct pblk *pblk, int force);
+void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line);
/*
* pblk rate limiter
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 22811784dc7d..00d5219094e5 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 2557f198e175..db269a348b20 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -436,6 +436,15 @@ config DM_DELAY
If unsure, say N.
+config DM_DUST
+ tristate "Bad sector simulation target"
+ depends on BLK_DEV_DM
+ ---help---
+ A target that simulates bad sector behavior.
+ Useful for testing.
+
+ If unsure, say N.
+
config DM_INIT
bool "DM \"dm-mod.create=\" parameter support"
depends on BLK_DEV_DM=y
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index a52b703e588e..be7a6eb92abc 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
+obj-$(CONFIG_DM_DUST) += dm-dust.o
obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 6fc93834da44..151aa95775be 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1167,11 +1167,18 @@ static int __load_discards(struct dm_cache_metadata *cmd,
if (r)
return r;
- for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
+ for (b = 0; ; b++) {
r = fn(context, cmd->discard_block_size, to_dblock(b),
dm_bitset_cursor_get_value(&c));
if (r)
break;
+
+ if (b >= (from_dblock(cmd->discard_nr_blocks) - 1))
+ break;
+
+ r = dm_bitset_cursor_next(&c);
+ if (r)
+ break;
}
dm_bitset_cursor_end(&c);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 7f6462f74ac8..1b16d34bb785 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -946,6 +946,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
{
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
+ struct mapped_device *md = dm_table_get_md(ti->table);
/* From now we require underlying device with our integrity profile */
if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
@@ -965,7 +966,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
if (crypt_integrity_aead(cc)) {
cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
- DMINFO("Integrity AEAD, tag size %u, IV size %u.",
+ DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
cc->integrity_tag_size, cc->integrity_iv_size);
if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
@@ -973,7 +974,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
return -EINVAL;
}
} else if (cc->integrity_iv_size)
- DMINFO("Additional per-sector space %u bytes for IV.",
+ DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
cc->integrity_iv_size);
if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
@@ -1031,11 +1032,11 @@ static u8 *org_iv_of_dmreq(struct crypt_config *cc,
return iv_of_dmreq(cc, dmreq) + cc->iv_size;
}
-static uint64_t *org_sector_of_dmreq(struct crypt_config *cc,
+static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
- return (uint64_t*) ptr;
+ return (__le64 *) ptr;
}
static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
@@ -1071,7 +1072,7 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
struct dm_crypt_request *dmreq;
u8 *iv, *org_iv, *tag_iv, *tag;
- uint64_t *sector;
+ __le64 *sector;
int r = 0;
BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
@@ -1143,9 +1144,11 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
r = crypto_aead_decrypt(req);
}
- if (r == -EBADMSG)
- DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
+ if (r == -EBADMSG) {
+ char b[BDEVNAME_SIZE];
+ DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
(unsigned long long)le64_to_cpu(*sector));
+ }
if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
@@ -1166,7 +1169,7 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
struct scatterlist *sg_in, *sg_out;
struct dm_crypt_request *dmreq;
u8 *iv, *org_iv, *tag_iv;
- uint64_t *sector;
+ __le64 *sector;
int r = 0;
/* Reject unexpected unaligned bio. */
@@ -1788,7 +1791,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
if (error == -EBADMSG) {
- DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
+ char b[BDEVNAME_SIZE];
+ DMERR_LIMIT("%s: INTEGRITY AEAD ERROR, sector %llu", bio_devname(ctx->bio_in, b),
(unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
io->error = BLK_STS_PROTECTION;
} else if (error < 0)
@@ -1887,7 +1891,7 @@ static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
* algorithm implementation is used. Help people debug performance
* problems by logging the ->cra_driver_name.
*/
- DMINFO("%s using implementation \"%s\"", ciphermode,
+ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
return 0;
}
@@ -1907,7 +1911,7 @@ static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
return err;
}
- DMINFO("%s using implementation \"%s\"", ciphermode,
+ DMDEBUG_LIMIT("%s using implementation \"%s\"", ciphermode,
crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
return 0;
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index fddffe251bf6..f496213f8b67 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -121,7 +121,8 @@ static void delay_dtr(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
- destroy_workqueue(dc->kdelayd_wq);
+ if (dc->kdelayd_wq)
+ destroy_workqueue(dc->kdelayd_wq);
if (dc->read.dev)
dm_put_device(ti, dc->read.dev);
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
new file mode 100644
index 000000000000..845f376a72d9
--- /dev/null
+++ b/drivers/md/dm-dust.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Red Hat, Inc.
+ *
+ * This is a test "dust" device, which fails reads on specified
+ * sectors, emulating the behavior of a hard disk drive sending
+ * a "Read Medium Error" sense.
+ *
+ */
+
+#include <linux/device-mapper.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+
+#define DM_MSG_PREFIX "dust"
+
+struct badblock {
+ struct rb_node node;
+ sector_t bb;
+};
+
+struct dust_device {
+ struct dm_dev *dev;
+ struct rb_root badblocklist;
+ unsigned long long badblock_count;
+ spinlock_t dust_lock;
+ unsigned int blksz;
+ unsigned int sect_per_block;
+ sector_t start;
+ bool fail_read_on_bb:1;
+ bool quiet_mode:1;
+};
+
+static struct badblock *dust_rb_search(struct rb_root *root, sector_t blk)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct badblock *bblk = rb_entry(node, struct badblock, node);
+
+ if (bblk->bb > blk)
+ node = node->rb_left;
+ else if (bblk->bb < blk)
+ node = node->rb_right;
+ else
+ return bblk;
+ }
+
+ return NULL;
+}
+
+static bool dust_rb_insert(struct rb_root *root, struct badblock *new)
+{
+ struct badblock *bblk;
+ struct rb_node **link = &root->rb_node, *parent = NULL;
+ sector_t value = new->bb;
+
+ while (*link) {
+ parent = *link;
+ bblk = rb_entry(parent, struct badblock, node);
+
+ if (bblk->bb > value)
+ link = &(*link)->rb_left;
+ else if (bblk->bb < value)
+ link = &(*link)->rb_right;
+ else
+ return false;
+ }
+
+ rb_link_node(&new->node, parent, link);
+ rb_insert_color(&new->node, root);
+
+ return true;
+}
+
+static int dust_remove_block(struct dust_device *dd, unsigned long long block)
+{
+ struct badblock *bblock;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
+
+ if (bblock == NULL) {
+ if (!dd->quiet_mode) {
+ DMERR("%s: block %llu not found in badblocklist",
+ __func__, block);
+ }
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ return -EINVAL;
+ }
+
+ rb_erase(&bblock->node, &dd->badblocklist);
+ dd->badblock_count--;
+ if (!dd->quiet_mode)
+ DMINFO("%s: badblock removed at block %llu", __func__, block);
+ kfree(bblock);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+
+ return 0;
+}
+
+static int dust_add_block(struct dust_device *dd, unsigned long long block)
+{
+ struct badblock *bblock;
+ unsigned long flags;
+
+ bblock = kmalloc(sizeof(*bblock), GFP_KERNEL);
+ if (bblock == NULL) {
+ if (!dd->quiet_mode)
+ DMERR("%s: badblock allocation failed", __func__);
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ bblock->bb = block * dd->sect_per_block;
+ if (!dust_rb_insert(&dd->badblocklist, bblock)) {
+ if (!dd->quiet_mode) {
+ DMERR("%s: block %llu already in badblocklist",
+ __func__, block);
+ }
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ kfree(bblock);
+ return -EINVAL;
+ }
+
+ dd->badblock_count++;
+ if (!dd->quiet_mode)
+ DMINFO("%s: badblock added at block %llu", __func__, block);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+
+ return 0;
+}
+
+static int dust_query_block(struct dust_device *dd, unsigned long long block)
+{
+ struct badblock *bblock;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block);
+ if (bblock != NULL)
+ DMINFO("%s: block %llu found in badblocklist", __func__, block);
+ else
+ DMINFO("%s: block %llu not found in badblocklist", __func__, block);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+
+ return 0;
+}
+
+static int __dust_map_read(struct dust_device *dd, sector_t thisblock)
+{
+ struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
+
+ if (bblk)
+ return DM_MAPIO_KILL;
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static int dust_map_read(struct dust_device *dd, sector_t thisblock,
+ bool fail_read_on_bb)
+{
+ unsigned long flags;
+ int ret = DM_MAPIO_REMAPPED;
+
+ if (fail_read_on_bb) {
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ ret = __dust_map_read(dd, thisblock);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ }
+
+ return ret;
+}
+
+static void __dust_map_write(struct dust_device *dd, sector_t thisblock)
+{
+ struct badblock *bblk = dust_rb_search(&dd->badblocklist, thisblock);
+
+ if (bblk) {
+ rb_erase(&bblk->node, &dd->badblocklist);
+ dd->badblock_count--;
+ kfree(bblk);
+ if (!dd->quiet_mode) {
+ sector_div(thisblock, dd->sect_per_block);
+ DMINFO("block %llu removed from badblocklist by write",
+ (unsigned long long)thisblock);
+ }
+ }
+}
+
+static int dust_map_write(struct dust_device *dd, sector_t thisblock,
+ bool fail_read_on_bb)
+{
+ unsigned long flags;
+
+ if (fail_read_on_bb) {
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ __dust_map_write(dd, thisblock);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ }
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static int dust_map(struct dm_target *ti, struct bio *bio)
+{
+ struct dust_device *dd = ti->private;
+ int ret;
+
+ bio_set_dev(bio, dd->dev->bdev);
+ bio->bi_iter.bi_sector = dd->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
+
+ if (bio_data_dir(bio) == READ)
+ ret = dust_map_read(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
+ else
+ ret = dust_map_write(dd, bio->bi_iter.bi_sector, dd->fail_read_on_bb);
+
+ return ret;
+}
+
+static bool __dust_clear_badblocks(struct rb_root *tree,
+ unsigned long long count)
+{
+ struct rb_node *node = NULL, *nnode = NULL;
+
+ nnode = rb_first(tree);
+ if (nnode == NULL) {
+ BUG_ON(count != 0);
+ return false;
+ }
+
+ while (nnode) {
+ node = nnode;
+ nnode = rb_next(node);
+ rb_erase(node, tree);
+ count--;
+ kfree(node);
+ }
+ BUG_ON(count != 0);
+ BUG_ON(tree->rb_node != NULL);
+
+ return true;
+}
+
+static int dust_clear_badblocks(struct dust_device *dd)
+{
+ unsigned long flags;
+ struct rb_root badblocklist;
+ unsigned long long badblock_count;
+
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ badblocklist = dd->badblocklist;
+ badblock_count = dd->badblock_count;
+ dd->badblocklist = RB_ROOT;
+ dd->badblock_count = 0;
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+
+ if (!__dust_clear_badblocks(&badblocklist, badblock_count))
+ DMINFO("%s: no badblocks found", __func__);
+ else
+ DMINFO("%s: badblocks cleared", __func__);
+
+ return 0;
+}
+
+/*
+ * Target parameters:
+ *
+ * <device_path> <offset> <blksz>
+ *
+ * device_path: path to the block device
+ * offset: offset to data area from start of device_path
+ * blksz: block size (minimum 512, maximum 1073741824, must be a power of 2)
+ */
+static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct dust_device *dd;
+ unsigned long long tmp;
+ char dummy;
+ unsigned int blksz;
+ unsigned int sect_per_block;
+ sector_t DUST_MAX_BLKSZ_SECTORS = 2097152;
+ sector_t max_block_sectors = min(ti->len, DUST_MAX_BLKSZ_SECTORS);
+
+ if (argc != 3) {
+ ti->error = "Invalid argument count";
+ return -EINVAL;
+ }
+
+ if (kstrtouint(argv[2], 10, &blksz) || !blksz) {
+ ti->error = "Invalid block size parameter";
+ return -EINVAL;
+ }
+
+ if (blksz < 512) {
+ ti->error = "Block size must be at least 512";
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(blksz)) {
+ ti->error = "Block size must be a power of 2";
+ return -EINVAL;
+ }
+
+ if (to_sector(blksz) > max_block_sectors) {
+ ti->error = "Block size is too large";
+ return -EINVAL;
+ }
+
+ sect_per_block = (blksz >> SECTOR_SHIFT);
+
+ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1 || tmp != (sector_t)tmp) {
+ ti->error = "Invalid device offset sector";
+ return -EINVAL;
+ }
+
+ dd = kzalloc(sizeof(struct dust_device), GFP_KERNEL);
+ if (dd == NULL) {
+ ti->error = "Cannot allocate context";
+ return -ENOMEM;
+ }
+
+ if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dd->dev)) {
+ ti->error = "Device lookup failed";
+ kfree(dd);
+ return -EINVAL;
+ }
+
+ dd->sect_per_block = sect_per_block;
+ dd->blksz = blksz;
+ dd->start = tmp;
+
+ /*
+ * Whether to fail a read on a "bad" block.
+ * Defaults to false; enabled later by message.
+ */
+ dd->fail_read_on_bb = false;
+
+ /*
+ * Initialize bad block list rbtree.
+ */
+ dd->badblocklist = RB_ROOT;
+ dd->badblock_count = 0;
+ spin_lock_init(&dd->dust_lock);
+
+ dd->quiet_mode = false;
+
+ BUG_ON(dm_set_target_max_io_len(ti, dd->sect_per_block) != 0);
+
+ ti->num_discard_bios = 1;
+ ti->num_flush_bios = 1;
+ ti->private = dd;
+
+ return 0;
+}
+
+static void dust_dtr(struct dm_target *ti)
+{
+ struct dust_device *dd = ti->private;
+
+ __dust_clear_badblocks(&dd->badblocklist, dd->badblock_count);
+ dm_put_device(ti, dd->dev);
+ kfree(dd);
+}
+
+static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result_buf, unsigned int maxlen)
+{
+ struct dust_device *dd = ti->private;
+ sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ bool invalid_msg = false;
+ int result = -EINVAL;
+ unsigned long long tmp, block;
+ unsigned long flags;
+ char dummy;
+
+ if (argc == 1) {
+ if (!strcasecmp(argv[0], "addbadblock") ||
+ !strcasecmp(argv[0], "removebadblock") ||
+ !strcasecmp(argv[0], "queryblock")) {
+ DMERR("%s requires an additional argument", argv[0]);
+ } else if (!strcasecmp(argv[0], "disable")) {
+ DMINFO("disabling read failures on bad sectors");
+ dd->fail_read_on_bb = false;
+ result = 0;
+ } else if (!strcasecmp(argv[0], "enable")) {
+ DMINFO("enabling read failures on bad sectors");
+ dd->fail_read_on_bb = true;
+ result = 0;
+ } else if (!strcasecmp(argv[0], "countbadblocks")) {
+ spin_lock_irqsave(&dd->dust_lock, flags);
+ DMINFO("countbadblocks: %llu badblock(s) found",
+ dd->badblock_count);
+ spin_unlock_irqrestore(&dd->dust_lock, flags);
+ result = 0;
+ } else if (!strcasecmp(argv[0], "clearbadblocks")) {
+ result = dust_clear_badblocks(dd);
+ } else if (!strcasecmp(argv[0], "quiet")) {
+ if (!dd->quiet_mode)
+ dd->quiet_mode = true;
+ else
+ dd->quiet_mode = false;
+ result = 0;
+ } else {
+ invalid_msg = true;
+ }
+ } else if (argc == 2) {
+ if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1)
+ return result;
+
+ block = tmp;
+ sector_div(size, dd->sect_per_block);
+ if (block > size) {
+ DMERR("selected block value out of range");
+ return result;
+ }
+
+ if (!strcasecmp(argv[0], "addbadblock"))
+ result = dust_add_block(dd, block);
+ else if (!strcasecmp(argv[0], "removebadblock"))
+ result = dust_remove_block(dd, block);
+ else if (!strcasecmp(argv[0], "queryblock"))
+ result = dust_query_block(dd, block);
+ else
+ invalid_msg = true;
+
+ } else
+ DMERR("invalid number of arguments '%d'", argc);
+
+ if (invalid_msg)
+ DMERR("unrecognized message '%s' received", argv[0]);
+
+ return result;
+}
+
+static void dust_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result, unsigned int maxlen)
+{
+ struct dust_device *dd = ti->private;
+ unsigned int sz = 0;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%s %s %s", dd->dev->name,
+ dd->fail_read_on_bb ? "fail_read_on_bad_block" : "bypass",
+ dd->quiet_mode ? "quiet" : "verbose");
+ break;
+
+ case STATUSTYPE_TABLE:
+ DMEMIT("%s %llu %u", dd->dev->name,
+ (unsigned long long)dd->start, dd->blksz);
+ break;
+ }
+}
+
+static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+{
+ struct dust_device *dd = ti->private;
+ struct dm_dev *dev = dd->dev;
+
+ *bdev = dev->bdev;
+
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (dd->start ||
+ ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ return 1;
+
+ return 0;
+}
+
+static int dust_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn,
+ void *data)
+{
+ struct dust_device *dd = ti->private;
+
+ return fn(ti, dd->dev, dd->start, ti->len, data);
+}
+
+static struct target_type dust_target = {
+ .name = "dust",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = dust_ctr,
+ .dtr = dust_dtr,
+ .iterate_devices = dust_iterate_devices,
+ .map = dust_map,
+ .message = dust_message,
+ .status = dust_status,
+ .prepare_ioctl = dust_prepare_ioctl,
+};
+
+static int __init dm_dust_init(void)
+{
+ int result = dm_register_target(&dust_target);
+
+ if (result < 0)
+ DMERR("dm_register_target failed %d", result);
+
+ return result;
+}
+
+static void __exit dm_dust_exit(void)
+{
+ dm_unregister_target(&dust_target);
+}
+
+module_init(dm_dust_init);
+module_exit(dm_dust_exit);
+
+MODULE_DESCRIPTION(DM_NAME " dust test target");
+MODULE_AUTHOR("Bryan Gurney <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 721efc493942..3f4139ac1f60 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -11,6 +11,7 @@
#define _LINUX_DM_EXCEPTION_STORE
#include <linux/blkdev.h>
+#include <linux/list_bl.h>
#include <linux/device-mapper.h>
/*
@@ -27,7 +28,7 @@ typedef sector_t chunk_t;
* chunk within the device.
*/
struct dm_exception {
- struct list_head hash_list;
+ struct hlist_bl_node hash_list;
chunk_t old_chunk;
chunk_t new_chunk;
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index 4b76f84424c3..352e803f566e 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -160,7 +160,7 @@ static int __init dm_parse_table(struct dm_device *dev, char *str)
while (table_entry) {
DMDEBUG("parsing table \"%s\"", str);
- if (++dev->dmi.target_count >= DM_MAX_TARGETS) {
+ if (++dev->dmi.target_count > DM_MAX_TARGETS) {
DMERR("too many targets %u > %d",
dev->dmi.target_count, DM_MAX_TARGETS);
return -EINVAL;
@@ -242,9 +242,9 @@ static int __init dm_parse_devices(struct list_head *devices, char *str)
return -ENOMEM;
list_add_tail(&dev->list, devices);
- if (++ndev >= DM_MAX_DEVICES) {
- DMERR("too many targets %u > %d",
- dev->dmi.target_count, DM_MAX_TARGETS);
+ if (++ndev > DM_MAX_DEVICES) {
+ DMERR("too many devices %lu > %d",
+ ndev, DM_MAX_DEVICES);
return -EINVAL;
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index c27c32cf4a30..44e76cda087a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -15,6 +15,7 @@
#include <linux/rbtree.h>
#include <linux/delay.h>
#include <linux/random.h>
+#include <linux/reboot.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/async_tx.h>
@@ -24,6 +25,7 @@
#define DEFAULT_INTERLEAVE_SECTORS 32768
#define DEFAULT_JOURNAL_SIZE_FACTOR 7
+#define DEFAULT_SECTORS_PER_BITMAP_BIT 32768
#define DEFAULT_BUFFER_SECTORS 128
#define DEFAULT_JOURNAL_WATERMARK 50
#define DEFAULT_SYNC_MSEC 10000
@@ -33,6 +35,8 @@
#define METADATA_WORKQUEUE_MAX_ACTIVE 16
#define RECALC_SECTORS 8192
#define RECALC_WRITE_SUPER 16
+#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
+#define BITMAP_FLUSH_INTERVAL (10 * HZ)
/*
* Warning - DEBUG_PRINT prints security-sensitive data to the log,
@@ -48,6 +52,7 @@
#define SB_MAGIC "integrt"
#define SB_VERSION_1 1
#define SB_VERSION_2 2
+#define SB_VERSION_3 3
#define SB_SECTORS 8
#define MAX_SECTORS_PER_BLOCK 8
@@ -60,12 +65,14 @@ struct superblock {
__u64 provided_data_sectors; /* userspace uses this value */
__u32 flags;
__u8 log2_sectors_per_block;
- __u8 pad[3];
+ __u8 log2_blocks_per_bitmap_bit;
+ __u8 pad[2];
__u64 recalc_sector;
};
#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
#define SB_FLAG_RECALCULATING 0x2
+#define SB_FLAG_DIRTY_BITMAP 0x4
#define JOURNAL_ENTRY_ROUNDUP 8
@@ -151,9 +158,18 @@ struct dm_integrity_c {
struct workqueue_struct *metadata_wq;
struct superblock *sb;
unsigned journal_pages;
+ unsigned n_bitmap_blocks;
+
struct page_list *journal;
struct page_list *journal_io;
struct page_list *journal_xor;
+ struct page_list *recalc_bitmap;
+ struct page_list *may_write_bitmap;
+ struct bitmap_block_status *bbs;
+ unsigned bitmap_flush_interval;
+ int synchronous_mode;
+ struct bio_list synchronous_bios;
+ struct delayed_work bitmap_flush_work;
struct crypto_skcipher *journal_crypt;
struct scatterlist **journal_scatterlist;
@@ -180,6 +196,7 @@ struct dm_integrity_c {
__s8 log2_metadata_run;
__u8 log2_buffer_sectors;
__u8 sectors_per_block;
+ __u8 log2_blocks_per_bitmap_bit;
unsigned char mode;
int suspending;
@@ -232,17 +249,20 @@ struct dm_integrity_c {
bool journal_uptodate;
bool just_formatted;
+ bool recalculate_flag;
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
struct alg_spec journal_mac_alg;
atomic64_t number_of_mismatches;
+
+ struct notifier_block reboot_notifier;
};
struct dm_integrity_range {
sector_t logical_sector;
- unsigned n_sectors;
+ sector_t n_sectors;
bool waiting;
union {
struct rb_node node;
@@ -288,6 +308,16 @@ struct journal_io {
struct journal_completion *comp;
};
+struct bitmap_block_status {
+ struct work_struct work;
+ struct dm_integrity_c *ic;
+ unsigned idx;
+ unsigned long *bitmap;
+ struct bio_list bio_queue;
+ spinlock_t bio_queue_lock;
+
+};
+
static struct kmem_cache *journal_io_cache;
#define JOURNAL_IO_MEMPOOL 32
@@ -423,7 +453,9 @@ static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
static void sb_set_version(struct dm_integrity_c *ic)
{
- if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
+ if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
+ ic->sb->version = SB_VERSION_3;
+ else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
ic->sb->version = SB_VERSION_2;
else
ic->sb->version = SB_VERSION_1;
@@ -447,6 +479,137 @@ static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
return dm_io(&io_req, 1, &io_loc, NULL);
}
+#define BITMAP_OP_TEST_ALL_SET 0
+#define BITMAP_OP_TEST_ALL_CLEAR 1
+#define BITMAP_OP_SET 2
+#define BITMAP_OP_CLEAR 3
+
+static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
+ sector_t sector, sector_t n_sectors, int mode)
+{
+ unsigned long bit, end_bit, this_end_bit, page, end_page;
+ unsigned long *data;
+
+ if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
+ DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
+ (unsigned long long)sector,
+ (unsigned long long)n_sectors,
+ ic->sb->log2_sectors_per_block,
+ ic->log2_blocks_per_bitmap_bit,
+ mode);
+ BUG();
+ }
+
+ if (unlikely(!n_sectors))
+ return true;
+
+ bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ end_bit = (sector + n_sectors - 1) >>
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+
+ page = bit / (PAGE_SIZE * 8);
+ bit %= PAGE_SIZE * 8;
+
+ end_page = end_bit / (PAGE_SIZE * 8);
+ end_bit %= PAGE_SIZE * 8;
+
+repeat:
+ if (page < end_page) {
+ this_end_bit = PAGE_SIZE * 8 - 1;
+ } else {
+ this_end_bit = end_bit;
+ }
+
+ data = lowmem_page_address(bitmap[page].page);
+
+ if (mode == BITMAP_OP_TEST_ALL_SET) {
+ while (bit <= this_end_bit) {
+ if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
+ do {
+ if (data[bit / BITS_PER_LONG] != -1)
+ return false;
+ bit += BITS_PER_LONG;
+ } while (this_end_bit >= bit + BITS_PER_LONG - 1);
+ continue;
+ }
+ if (!test_bit(bit, data))
+ return false;
+ bit++;
+ }
+ } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
+ while (bit <= this_end_bit) {
+ if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
+ do {
+ if (data[bit / BITS_PER_LONG] != 0)
+ return false;
+ bit += BITS_PER_LONG;
+ } while (this_end_bit >= bit + BITS_PER_LONG - 1);
+ continue;
+ }
+ if (test_bit(bit, data))
+ return false;
+ bit++;
+ }
+ } else if (mode == BITMAP_OP_SET) {
+ while (bit <= this_end_bit) {
+ if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
+ do {
+ data[bit / BITS_PER_LONG] = -1;
+ bit += BITS_PER_LONG;
+ } while (this_end_bit >= bit + BITS_PER_LONG - 1);
+ continue;
+ }
+ __set_bit(bit, data);
+ bit++;
+ }
+ } else if (mode == BITMAP_OP_CLEAR) {
+ if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
+ clear_page(data);
+ else while (bit <= this_end_bit) {
+ if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
+ do {
+ data[bit / BITS_PER_LONG] = 0;
+ bit += BITS_PER_LONG;
+ } while (this_end_bit >= bit + BITS_PER_LONG - 1);
+ continue;
+ }
+ __clear_bit(bit, data);
+ bit++;
+ }
+ } else {
+ BUG();
+ }
+
+ if (unlikely(page < end_page)) {
+ bit = 0;
+ page++;
+ goto repeat;
+ }
+
+ return true;
+}
+
+static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
+{
+ unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
+ unsigned i;
+
+ for (i = 0; i < n_bitmap_pages; i++) {
+ unsigned long *dst_data = lowmem_page_address(dst[i].page);
+ unsigned long *src_data = lowmem_page_address(src[i].page);
+ copy_page(dst_data, src_data);
+ }
+}
+
+static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
+{
+ unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
+
+ BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
+ return &ic->bbs[bitmap_block];
+}
+
static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
bool e, const char *function)
{
@@ -455,8 +618,8 @@ static void access_journal_check(struct dm_integrity_c *ic, unsigned section, un
if (unlikely(section >= ic->journal_sections) ||
unlikely(offset >= limit)) {
- printk(KERN_CRIT "%s: invalid access at (%u,%u), limit (%u,%u)\n",
- function, section, offset, ic->journal_sections, limit);
+ DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
+ function, section, offset, ic->journal_sections, limit);
BUG();
}
#endif
@@ -756,12 +919,12 @@ static void complete_journal_io(unsigned long error, void *context)
complete_journal_op(comp);
}
-static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
- unsigned n_sections, struct journal_completion *comp)
+static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
+ unsigned sector, unsigned n_sectors, struct journal_completion *comp)
{
struct dm_io_request io_req;
struct dm_io_region io_loc;
- unsigned sector, n_sectors, pl_index, pl_offset;
+ unsigned pl_index, pl_offset;
int r;
if (unlikely(dm_integrity_failed(ic))) {
@@ -770,9 +933,6 @@ static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned
return;
}
- sector = section * ic->journal_section_sectors;
- n_sectors = n_sections * ic->journal_section_sectors;
-
pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
@@ -805,6 +965,17 @@ static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned
}
}
+static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
+ unsigned n_sections, struct journal_completion *comp)
+{
+ unsigned sector, n_sectors;
+
+ sector = section * ic->journal_section_sectors;
+ n_sectors = n_sections * ic->journal_section_sectors;
+
+ rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
+}
+
static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
{
struct journal_completion io_comp;
@@ -988,6 +1159,12 @@ static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrit
} while (unlikely(new_range->waiting));
}
+static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
+{
+ if (unlikely(!add_new_range(ic, new_range, true)))
+ wait_and_add_new_range(ic, new_range);
+}
+
static void init_journal_node(struct journal_node *node)
{
RB_CLEAR_NODE(&node->node);
@@ -1204,6 +1381,14 @@ static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
int r = dm_integrity_failed(ic);
if (unlikely(r) && !bio->bi_status)
bio->bi_status = errno_to_blk_status(r);
+ if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
+ unsigned long flags;
+ spin_lock_irqsave(&ic->endio_wait.lock, flags);
+ bio_list_add(&ic->synchronous_bios, bio);
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
+ spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
+ return;
+ }
bio_endio(bio);
}
@@ -1477,7 +1662,8 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
else
wanted_tag_size *= ic->tag_size;
if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
- DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size);
+ DMERR("Invalid integrity data size %u, expected %u",
+ bip->bip_iter.bi_size, wanted_tag_size);
return DM_MAPIO_KILL;
}
}
@@ -1681,7 +1867,7 @@ retry:
unsigned ws, we, range_sectors;
dio->range.n_sectors = min(dio->range.n_sectors,
- ic->free_sectors << ic->sb->log2_sectors_per_block);
+ (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
if (unlikely(!dio->range.n_sectors)) {
if (from_map)
goto offload_to_thread;
@@ -1764,6 +1950,20 @@ offload_to_thread:
goto journal_read_write;
}
+ if (ic->mode == 'B' && dio->write) {
+ if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
+ struct bitmap_block_status *bbs;
+
+ bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
+ spin_lock(&bbs->bio_queue_lock);
+ bio_list_add(&bbs->bio_queue, bio);
+ spin_unlock(&bbs->bio_queue_lock);
+ queue_work(ic->writer_wq, &bbs->work);
+ return;
+ }
+ }
+
dio->in_flight = (atomic_t)ATOMIC_INIT(2);
if (need_sync_io) {
@@ -1790,10 +1990,15 @@ offload_to_thread:
if (need_sync_io) {
wait_for_completion_io(&read_comp);
- if (unlikely(ic->recalc_wq != NULL) &&
- ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
goto skip_check;
+ if (ic->mode == 'B') {
+ if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
+ goto skip_check;
+ }
+
if (likely(!bio->bi_status))
integrity_metadata(&dio->work);
else
@@ -1831,8 +2036,16 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
wraparound_section(ic, &ic->free_section);
ic->n_uncommitted_sections++;
}
- WARN_ON(ic->journal_sections * ic->journal_section_entries !=
- (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
+ if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
+ (ic->n_uncommitted_sections + ic->n_committed_sections) *
+ ic->journal_section_entries + ic->free_sectors)) {
+ DMCRIT("journal_sections %u, journal_section_entries %u, "
+ "n_uncommitted_sections %u, n_committed_sections %u, "
+ "journal_section_entries %u, free_sectors %u",
+ ic->journal_sections, ic->journal_section_entries,
+ ic->n_uncommitted_sections, ic->n_committed_sections,
+ ic->journal_section_entries, ic->free_sectors);
+ }
}
static void integrity_commit(struct work_struct *w)
@@ -1981,8 +2194,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
spin_lock_irq(&ic->endio_wait.lock);
- if (unlikely(!add_new_range(ic, &io->range, true)))
- wait_and_add_new_range(ic, &io->range);
+ add_new_range_and_wait(ic, &io->range);
if (likely(!from_replay)) {
struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
@@ -2120,11 +2332,14 @@ static void integrity_recalc(struct work_struct *w)
sector_t area, offset;
sector_t metadata_block;
unsigned metadata_offset;
+ sector_t logical_sector, n_sectors;
__u8 *t;
unsigned i;
int r;
unsigned super_counter = 0;
+ DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
+
spin_lock_irq(&ic->endio_wait.lock);
next_chunk:
@@ -2133,21 +2348,49 @@ next_chunk:
goto unlock_ret;
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
- if (unlikely(range.logical_sector >= ic->provided_data_sectors))
+ if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
+ if (ic->mode == 'B') {
+ DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
+ }
goto unlock_ret;
+ }
get_area_and_offset(ic, range.logical_sector, &area, &offset);
range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
if (!ic->meta_dev)
- range.n_sectors = min(range.n_sectors, (1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
-
- if (unlikely(!add_new_range(ic, &range, true)))
- wait_and_add_new_range(ic, &range);
+ range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
+ add_new_range_and_wait(ic, &range);
spin_unlock_irq(&ic->endio_wait.lock);
+ logical_sector = range.logical_sector;
+ n_sectors = range.n_sectors;
+
+ if (ic->mode == 'B') {
+ if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
+ goto advance_and_next;
+ }
+ while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
+ ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
+ logical_sector += ic->sectors_per_block;
+ n_sectors -= ic->sectors_per_block;
+ cond_resched();
+ }
+ while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
+ ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
+ n_sectors -= ic->sectors_per_block;
+ cond_resched();
+ }
+ get_area_and_offset(ic, logical_sector, &area, &offset);
+ }
+
+ DEBUG_print("recalculating: %lx, %lx\n", logical_sector, n_sectors);
if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
recalc_write_super(ic);
+ if (ic->mode == 'B') {
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
+ }
super_counter = 0;
}
@@ -2162,7 +2405,7 @@ next_chunk:
io_req.client = ic->io;
io_loc.bdev = ic->dev->bdev;
io_loc.sector = get_data_sector(ic, area, offset);
- io_loc.count = range.n_sectors;
+ io_loc.count = n_sectors;
r = dm_io(&io_req, 1, &io_loc, NULL);
if (unlikely(r)) {
@@ -2171,8 +2414,8 @@ next_chunk:
}
t = ic->recalc_tags;
- for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
- integrity_sector_checksum(ic, range.logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
+ for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
+ integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
t += ic->tag_size;
}
@@ -2184,6 +2427,9 @@ next_chunk:
goto err;
}
+advance_and_next:
+ cond_resched();
+
spin_lock_irq(&ic->endio_wait.lock);
remove_range_unlocked(ic, &range);
ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
@@ -2199,6 +2445,103 @@ unlock_ret:
recalc_write_super(ic);
}
+static void bitmap_block_work(struct work_struct *w)
+{
+ struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
+ struct dm_integrity_c *ic = bbs->ic;
+ struct bio *bio;
+ struct bio_list bio_queue;
+ struct bio_list waiting;
+
+ bio_list_init(&waiting);
+
+ spin_lock(&bbs->bio_queue_lock);
+ bio_queue = bbs->bio_queue;
+ bio_list_init(&bbs->bio_queue);
+ spin_unlock(&bbs->bio_queue_lock);
+
+ while ((bio = bio_list_pop(&bio_queue))) {
+ struct dm_integrity_io *dio;
+
+ dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
+
+ if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
+ remove_range(ic, &dio->range);
+ INIT_WORK(&dio->work, integrity_bio_wait);
+ queue_work(ic->wait_wq, &dio->work);
+ } else {
+ block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_SET);
+ bio_list_add(&waiting, bio);
+ }
+ }
+
+ if (bio_list_empty(&waiting))
+ return;
+
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
+ bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
+ BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
+
+ while ((bio = bio_list_pop(&waiting))) {
+ struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
+
+ block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
+ dio->range.n_sectors, BITMAP_OP_SET);
+
+ remove_range(ic, &dio->range);
+ INIT_WORK(&dio->work, integrity_bio_wait);
+ queue_work(ic->wait_wq, &dio->work);
+ }
+
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
+}
+
+static void bitmap_flush_work(struct work_struct *work)
+{
+ struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
+ struct dm_integrity_range range;
+ unsigned long limit;
+ struct bio *bio;
+
+ dm_integrity_flush_buffers(ic);
+
+ range.logical_sector = 0;
+ range.n_sectors = ic->provided_data_sectors;
+
+ spin_lock_irq(&ic->endio_wait.lock);
+ add_new_range_and_wait(ic, &range);
+ spin_unlock_irq(&ic->endio_wait.lock);
+
+ dm_integrity_flush_buffers(ic);
+ if (ic->meta_dev)
+ blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL);
+
+ limit = ic->provided_data_sectors;
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+ limit = le64_to_cpu(ic->sb->recalc_sector)
+ >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
+ << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ }
+ /*DEBUG_print("zeroing journal\n");*/
+ block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
+ block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
+
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+
+ spin_lock_irq(&ic->endio_wait.lock);
+ remove_range_unlocked(ic, &range);
+ while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
+ bio_endio(bio);
+ spin_unlock_irq(&ic->endio_wait.lock);
+ spin_lock_irq(&ic->endio_wait.lock);
+ }
+ spin_unlock_irq(&ic->endio_wait.lock);
+}
+
+
static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
unsigned n_sections, unsigned char commit_seq)
{
@@ -2395,9 +2738,37 @@ clear_journal:
init_journal_node(&ic->journal_tree[i]);
}
+static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
+{
+ DEBUG_print("dm_integrity_enter_synchronous_mode\n");
+
+ if (ic->mode == 'B') {
+ ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
+ ic->synchronous_mode = 1;
+
+ cancel_delayed_work_sync(&ic->bitmap_flush_work);
+ queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
+ flush_workqueue(ic->commit_wq);
+ }
+}
+
+static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
+{
+ struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
+
+ DEBUG_print("dm_integrity_reboot\n");
+
+ dm_integrity_enter_synchronous_mode(ic);
+
+ return NOTIFY_DONE;
+}
+
static void dm_integrity_postsuspend(struct dm_target *ti)
{
struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
+ int r;
+
+ WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
del_timer_sync(&ic->autocommit_timer);
@@ -2406,6 +2777,9 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
if (ic->recalc_wq)
drain_workqueue(ic->recalc_wq);
+ if (ic->mode == 'B')
+ cancel_delayed_work_sync(&ic->bitmap_flush_work);
+
queue_work(ic->commit_wq, &ic->commit_work);
drain_workqueue(ic->commit_wq);
@@ -2416,6 +2790,18 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
dm_integrity_flush_buffers(ic);
}
+ if (ic->mode == 'B') {
+ dm_integrity_flush_buffers(ic);
+#if 1
+ /* set to 0 to test bitmap replay code */
+ init_journal(ic, 0, ic->journal_sections, 0);
+ ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
+ r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ if (unlikely(r))
+ dm_integrity_io_error(ic, "writing superblock", r);
+#endif
+ }
+
WRITE_ONCE(ic->suspending, 0);
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
@@ -2426,11 +2812,70 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
static void dm_integrity_resume(struct dm_target *ti)
{
struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
+ int r;
+ DEBUG_print("resume\n");
+
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
+ DEBUG_print("resume dirty_bitmap\n");
+ rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+ if (ic->mode == 'B') {
+ if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
+ block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
+ block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
+ if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
+ BITMAP_OP_TEST_ALL_CLEAR)) {
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
+ ic->sb->recalc_sector = cpu_to_le64(0);
+ }
+ } else {
+ DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
+ ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
+ ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
+ block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
+ block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
+ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
+ ic->sb->recalc_sector = cpu_to_le64(0);
+ }
+ } else {
+ if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
+ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) {
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
+ ic->sb->recalc_sector = cpu_to_le64(0);
+ }
+ init_journal(ic, 0, ic->journal_sections, 0);
+ replay_journal(ic);
+ ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
+ }
+ r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ if (unlikely(r))
+ dm_integrity_io_error(ic, "writing superblock", r);
+ } else {
+ replay_journal(ic);
+ if (ic->mode == 'B') {
+ int mode;
+ ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
+ ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
+ r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
+ if (unlikely(r))
+ dm_integrity_io_error(ic, "writing superblock", r);
+
+ mode = ic->recalculate_flag ? BITMAP_OP_SET : BITMAP_OP_CLEAR;
+ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, mode);
+ block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, mode);
+ block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, mode);
+ rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
+ ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
+ }
+ }
- replay_journal(ic);
-
- if (ic->recalc_wq && ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+ DEBUG_print("testing recalc: %x\n", ic->sb->flags);
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
__u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
+ DEBUG_print("recalc pos: %lx / %lx\n", (long)recalc_pos, ic->provided_data_sectors);
if (recalc_pos < ic->provided_data_sectors) {
queue_work(ic->recalc_wq, &ic->recalc_work);
} else if (recalc_pos > ic->provided_data_sectors) {
@@ -2438,6 +2883,16 @@ static void dm_integrity_resume(struct dm_target *ti)
recalc_write_super(ic);
}
}
+
+ ic->reboot_notifier.notifier_call = dm_integrity_reboot;
+ ic->reboot_notifier.next = NULL;
+ ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */
+ WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
+
+#if 0
+ /* set to 1 to stress test synchronous mode */
+ dm_integrity_enter_synchronous_mode(ic);
+#endif
}
static void dm_integrity_status(struct dm_target *ti, status_type_t type,
@@ -2462,10 +2917,14 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
watermark_percentage += ic->journal_entries / 2;
do_div(watermark_percentage, ic->journal_entries);
- arg_count = 5;
+ arg_count = 3;
arg_count += !!ic->meta_dev;
arg_count += ic->sectors_per_block != 1;
arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
+ arg_count += ic->mode == 'J';
+ arg_count += ic->mode == 'J';
+ arg_count += ic->mode == 'B';
+ arg_count += ic->mode == 'B';
arg_count += !!ic->internal_hash_alg.alg_string;
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
@@ -2475,13 +2934,19 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" meta_device:%s", ic->meta_dev->name);
if (ic->sectors_per_block != 1)
DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
- if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
+ if (ic->recalculate_flag)
DMEMIT(" recalculate");
DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
- DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
- DMEMIT(" commit_time:%u", ic->autocommit_msec);
+ if (ic->mode == 'J') {
+ DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
+ DMEMIT(" commit_time:%u", ic->autocommit_msec);
+ }
+ if (ic->mode == 'B') {
+ DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
+ DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
+ }
#define EMIT_ALG(a, n) \
do { \
@@ -2562,7 +3027,7 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
return -EINVAL;
} else {
- __u64 meta_size = ic->provided_data_sectors * ic->tag_size;
+ __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
>> (ic->log2_buffer_sectors + SECTOR_SHIFT);
meta_size <<= ic->log2_buffer_sectors;
@@ -2659,37 +3124,37 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
}
-static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl)
+static void dm_integrity_free_page_list(struct page_list *pl)
{
unsigned i;
if (!pl)
return;
- for (i = 0; i < ic->journal_pages; i++)
- if (pl[i].page)
- __free_page(pl[i].page);
+ for (i = 0; pl[i].page; i++)
+ __free_page(pl[i].page);
kvfree(pl);
}
-static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic)
+static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
{
- size_t page_list_desc_size = ic->journal_pages * sizeof(struct page_list);
struct page_list *pl;
unsigned i;
- pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO);
+ pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
if (!pl)
return NULL;
- for (i = 0; i < ic->journal_pages; i++) {
+ for (i = 0; i < n_pages; i++) {
pl[i].page = alloc_page(GFP_KERNEL);
if (!pl[i].page) {
- dm_integrity_free_page_list(ic, pl);
+ dm_integrity_free_page_list(pl);
return NULL;
}
if (i)
pl[i - 1].next = &pl[i];
}
+ pl[i].page = NULL;
+ pl[i].next = NULL;
return pl;
}
@@ -2702,7 +3167,8 @@ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, str
kvfree(sl);
}
-static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
+static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
+ struct page_list *pl)
{
struct scatterlist **sl;
unsigned i;
@@ -2721,7 +3187,8 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
unsigned idx;
page_list_location(ic, i, 0, &start_index, &start_offset);
- page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset);
+ page_list_location(ic, i, ic->journal_section_sectors - 1,
+ &end_index, &end_offset);
n_pages = (end_index - start_index + 1);
@@ -2842,7 +3309,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
}
ic->journal_pages = journal_pages;
- ic->journal = dm_integrity_alloc_page_list(ic);
+ ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
if (!ic->journal) {
*error = "Could not allocate memory for journal";
r = -ENOMEM;
@@ -2874,7 +3341,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
DEBUG_print("cipher %s, block size %u iv size %u\n",
ic->journal_crypt_alg.alg_string, blocksize, ivsize);
- ic->journal_io = dm_integrity_alloc_page_list(ic);
+ ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
if (!ic->journal_io) {
*error = "Could not allocate memory for journal io";
r = -ENOMEM;
@@ -2898,7 +3365,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
goto bad;
}
- ic->journal_xor = dm_integrity_alloc_page_list(ic);
+ ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
if (!ic->journal_xor) {
*error = "Could not allocate memory for journal xor";
r = -ENOMEM;
@@ -2922,7 +3389,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
memset(crypt_iv, 0x00, ivsize);
- skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
+ skcipher_request_set_crypt(req, sg, sg,
+ PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
init_completion(&comp.comp);
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
if (do_crypt(true, req, &comp))
@@ -3063,7 +3531,7 @@ bad:
* device
* offset from the start of the device
* tag size
- * D - direct writes, J - journal writes, R - recovery mode
+ * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
* number of optional arguments
* optional arguments:
* journal_sectors
@@ -3071,10 +3539,14 @@ bad:
* buffer_sectors
* journal_watermark
* commit_time
+ * meta_device
+ * block_size
+ * sectors_per_bit
+ * bitmap_flush_interval
* internal_hash
* journal_crypt
* journal_mac
- * block_size
+ * recalculate
*/
static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
{
@@ -3087,10 +3559,13 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
{0, 9, "Invalid number of feature args"},
};
unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
- bool recalculate;
bool should_write_sb;
__u64 threshold;
unsigned long long start;
+ __s8 log2_sectors_per_bitmap_bit = -1;
+ __s8 log2_blocks_per_bitmap_bit;
+ __u64 bits_in_journal;
+ __u64 n_bitmap_bits;
#define DIRECT_ARGUMENTS 4
@@ -3114,6 +3589,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
init_waitqueue_head(&ic->copy_to_journal_wait);
init_completion(&ic->crypto_backoff);
atomic64_set(&ic->number_of_mismatches, 0);
+ ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
if (r) {
@@ -3136,10 +3612,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
}
- if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R"))
+ if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
+ !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
ic->mode = argv[3][0];
- else {
- ti->error = "Invalid mode (expecting J, D, R)";
+ } else {
+ ti->error = "Invalid mode (expecting J, B, D, R)";
r = -EINVAL;
goto bad;
}
@@ -3149,7 +3626,6 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
buffer_sectors = DEFAULT_BUFFER_SECTORS;
journal_watermark = DEFAULT_JOURNAL_WATERMARK;
sync_msec = DEFAULT_SYNC_MSEC;
- recalculate = false;
ic->sectors_per_block = 1;
as.argc = argc - DIRECT_ARGUMENTS;
@@ -3161,6 +3637,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
while (extra_args--) {
const char *opt_string;
unsigned val;
+ unsigned long long llval;
opt_string = dm_shift_arg(&as);
if (!opt_string) {
r = -EINVAL;
@@ -3182,7 +3659,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
dm_put_device(ti, ic->meta_dev);
ic->meta_dev = NULL;
}
- r = dm_get_device(ti, strchr(opt_string, ':') + 1, dm_table_get_mode(ti->table), &ic->meta_dev);
+ r = dm_get_device(ti, strchr(opt_string, ':') + 1,
+ dm_table_get_mode(ti->table), &ic->meta_dev);
if (r) {
ti->error = "Device lookup failed";
goto bad;
@@ -3196,6 +3674,14 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
ic->sectors_per_block = val >> SECTOR_SHIFT;
+ } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
+ log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
+ } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
+ if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
+ r = -EINVAL;
+ ti->error = "Invalid bitmap_flush_interval argument";
+ }
+ ic->bitmap_flush_interval = msecs_to_jiffies(val);
} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
"Invalid internal_hash argument");
@@ -3212,7 +3698,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (r)
goto bad;
} else if (!strcmp(opt_string, "recalculate")) {
- recalculate = true;
+ ic->recalculate_flag = true;
} else {
r = -EINVAL;
ti->error = "Invalid argument";
@@ -3228,7 +3714,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (!journal_sectors) {
journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
- ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
+ ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
}
if (!buffer_sectors)
@@ -3263,6 +3749,12 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
else
ic->log2_tag_size = -1;
+ if (ic->mode == 'B' && !ic->internal_hash) {
+ r = -EINVAL;
+ ti->error = "Bitmap mode can be only used with internal hash";
+ goto bad;
+ }
+
ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
ic->autocommit_msec = sync_msec;
timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
@@ -3308,7 +3800,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
INIT_WORK(&ic->commit_work, integrity_commit);
- if (ic->mode == 'J') {
+ if (ic->mode == 'J' || ic->mode == 'B') {
ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
if (!ic->writer_wq) {
ti->error = "Cannot allocate workqueue";
@@ -3349,7 +3841,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
should_write_sb = true;
}
- if (!ic->sb->version || ic->sb->version > SB_VERSION_2) {
+ if (!ic->sb->version || ic->sb->version > SB_VERSION_3) {
r = -EINVAL;
ti->error = "Unknown version";
goto bad;
@@ -3409,6 +3901,27 @@ try_smaller_buffer:
ti->error = "The device is too small";
goto bad;
}
+
+ if (log2_sectors_per_bitmap_bit < 0)
+ log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
+ if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
+ log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
+
+ bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
+ if (bits_in_journal > UINT_MAX)
+ bits_in_journal = UINT_MAX;
+ while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
+ log2_sectors_per_bitmap_bit++;
+
+ log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
+ ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
+ if (should_write_sb) {
+ ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
+ }
+ n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
+ + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
+ ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
+
if (!ic->meta_dev)
ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
@@ -3433,25 +3946,21 @@ try_smaller_buffer:
DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
DEBUG_print(" journal_entries %u\n", ic->journal_entries);
DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
- DEBUG_print(" device_sectors 0x%llx\n", (unsigned long long)ic->device_sectors);
+ DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
(unsigned long long)ic->provided_data_sectors);
DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
+ DEBUG_print(" bits_in_journal %llu\n", (unsigned long long)bits_in_journal);
- if (recalculate && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
+ if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
ic->sb->recalc_sector = cpu_to_le64(0);
}
- if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
- if (!ic->internal_hash) {
- r = -EINVAL;
- ti->error = "Recalculate is only valid with internal hash";
- goto bad;
- }
+ if (ic->internal_hash) {
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
if (!ic->recalc_wq ) {
ti->error = "Cannot allocate workqueue";
@@ -3488,6 +3997,45 @@ try_smaller_buffer:
r = create_journal(ic, &ti->error);
if (r)
goto bad;
+
+ }
+
+ if (ic->mode == 'B') {
+ unsigned i;
+ unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
+
+ ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
+ if (!ic->recalc_bitmap) {
+ r = -ENOMEM;
+ goto bad;
+ }
+ ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
+ if (!ic->may_write_bitmap) {
+ r = -ENOMEM;
+ goto bad;
+ }
+ ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
+ if (!ic->bbs) {
+ r = -ENOMEM;
+ goto bad;
+ }
+ INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
+ for (i = 0; i < ic->n_bitmap_blocks; i++) {
+ struct bitmap_block_status *bbs = &ic->bbs[i];
+ unsigned sector, pl_index, pl_offset;
+
+ INIT_WORK(&bbs->work, bitmap_block_work);
+ bbs->ic = ic;
+ bbs->idx = i;
+ bio_list_init(&bbs->bio_queue);
+ spin_lock_init(&bbs->bio_queue_lock);
+
+ sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
+ pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
+ pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
+
+ bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
+ }
}
if (should_write_sb) {
@@ -3512,6 +4060,17 @@ try_smaller_buffer:
if (r)
goto bad;
}
+ if (ic->mode == 'B') {
+ unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
+ if (!max_io_len)
+ max_io_len = 1U << 31;
+ DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
+ if (!ti->max_io_len || ti->max_io_len > max_io_len) {
+ r = dm_set_target_max_io_len(ti, max_io_len);
+ if (r)
+ goto bad;
+ }
+ }
if (!ic->internal_hash)
dm_integrity_set(ti, ic);
@@ -3520,6 +4079,7 @@ try_smaller_buffer:
ti->flush_supported = true;
return 0;
+
bad:
dm_integrity_dtr(ti);
return r;
@@ -3542,10 +4102,9 @@ static void dm_integrity_dtr(struct dm_target *ti)
destroy_workqueue(ic->writer_wq);
if (ic->recalc_wq)
destroy_workqueue(ic->recalc_wq);
- if (ic->recalc_buffer)
- vfree(ic->recalc_buffer);
- if (ic->recalc_tags)
- kvfree(ic->recalc_tags);
+ vfree(ic->recalc_buffer);
+ kvfree(ic->recalc_tags);
+ kvfree(ic->bbs);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
mempool_exit(&ic->journal_io_mempool);
@@ -3555,9 +4114,11 @@ static void dm_integrity_dtr(struct dm_target *ti)
dm_put_device(ti, ic->dev);
if (ic->meta_dev)
dm_put_device(ti, ic->meta_dev);
- dm_integrity_free_page_list(ic, ic->journal);
- dm_integrity_free_page_list(ic, ic->journal_io);
- dm_integrity_free_page_list(ic, ic->journal_xor);
+ dm_integrity_free_page_list(ic->journal);
+ dm_integrity_free_page_list(ic->journal_io);
+ dm_integrity_free_page_list(ic->journal_xor);
+ dm_integrity_free_page_list(ic->recalc_bitmap);
+ dm_integrity_free_page_list(ic->may_write_bitmap);
if (ic->journal_scatterlist)
dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
if (ic->journal_io_scatterlist)
@@ -3595,7 +4156,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 2, 0},
+ .version = {1, 3, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index c740153b4e52..1e03bc89e20f 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -2069,7 +2069,7 @@ int __init dm_early_create(struct dm_ioctl *dmi,
/* alloc table */
r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md);
if (r)
- goto err_destroy_dm;
+ goto err_hash_remove;
/* add targets */
for (i = 0; i < dmi->target_count; i++) {
@@ -2116,6 +2116,10 @@ int __init dm_early_create(struct dm_ioctl *dmi,
err_destroy_table:
dm_table_destroy(t);
+err_hash_remove:
+ (void) __hash_remove(__get_name_cell(dmi->name));
+ /* release reference from __get_name_cell */
+ dm_put(md);
err_destroy_dm:
dm_put(md);
dm_destroy(md);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 2ee5e357a0a7..dbcc1e41cd57 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -544,8 +544,23 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
return DM_MAPIO_REMAPPED;
}
-static void multipath_release_clone(struct request *clone)
+static void multipath_release_clone(struct request *clone,
+ union map_info *map_context)
{
+ if (unlikely(map_context)) {
+ /*
+ * non-NULL map_context means caller is still map
+ * method; must undo multipath_clone_and_map()
+ */
+ struct dm_mpath_io *mpio = get_mpio(map_context);
+ struct pgpath *pgpath = mpio->pgpath;
+
+ if (pgpath && pgpath->pg->ps.type->end_io)
+ pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
+ &pgpath->path,
+ mpio->nr_bytes);
+ }
+
blk_put_request(clone);
}
@@ -882,6 +897,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
if (attached_handler_name || m->hw_handler_name) {
INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
+ kfree(attached_handler_name);
if (r) {
dm_put_device(ti, p->path.dev);
goto bad;
@@ -896,7 +912,6 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
return p;
bad:
- kfree(attached_handler_name);
free_pgpath(p);
return ERR_PTR(r);
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index b66745bd08bb..5f7063f05ae0 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -168,7 +168,7 @@ static void dm_end_request(struct request *clone, blk_status_t error)
struct request *rq = tio->orig;
blk_rq_unprep_clone(clone);
- tio->ti->type->release_clone_rq(clone);
+ tio->ti->type->release_clone_rq(clone, NULL);
rq_end_stats(md, rq);
blk_mq_end_request(rq, error);
@@ -201,7 +201,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
rq_end_stats(md, rq);
if (tio->clone) {
blk_rq_unprep_clone(tio->clone);
- tio->ti->type->release_clone_rq(tio->clone);
+ tio->ti->type->release_clone_rq(tio->clone, NULL);
}
dm_mq_delay_requeue_request(rq, delay_ms);
@@ -398,7 +398,7 @@ static int map_request(struct dm_rq_target_io *tio)
case DM_MAPIO_REMAPPED:
if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
/* -ENOMEM */
- ti->type->release_clone_rq(clone);
+ ti->type->release_clone_rq(clone, &tio->info);
return DM_MAPIO_REQUEUE;
}
@@ -408,7 +408,7 @@ static int map_request(struct dm_rq_target_io *tio)
ret = dm_dispatch_clone_request(clone, rq);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
blk_rq_unprep_clone(clone);
- tio->ti->type->release_clone_rq(clone);
+ tio->ti->type->release_clone_rq(clone, &tio->info);
tio->clone = NULL;
return DM_MAPIO_REQUEUE;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a168963b757d..3107f2b1988b 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/list.h>
+#include <linux/list_bl.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -44,11 +45,11 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
struct dm_exception_table {
uint32_t hash_mask;
unsigned hash_shift;
- struct list_head *table;
+ struct hlist_bl_head *table;
};
struct dm_snapshot {
- struct mutex lock;
+ struct rw_semaphore lock;
struct dm_dev *origin;
struct dm_dev *cow;
@@ -76,7 +77,9 @@ struct dm_snapshot {
atomic_t pending_exceptions_count;
- /* Protected by "lock" */
+ spinlock_t pe_allocation_lock;
+
+ /* Protected by "pe_allocation_lock" */
sector_t exception_start_sequence;
/* Protected by kcopyd single-threaded callback */
@@ -457,9 +460,9 @@ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
continue;
- mutex_lock(&s->lock);
+ down_read(&s->lock);
active = s->active;
- mutex_unlock(&s->lock);
+ up_read(&s->lock);
if (active) {
if (snap_src)
@@ -618,6 +621,36 @@ static void unregister_snapshot(struct dm_snapshot *s)
* The lowest hash_shift bits of the chunk number are ignored, allowing
* some consecutive chunks to be grouped together.
*/
+static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk);
+
+/* Lock to protect access to the completed and pending exception hash tables. */
+struct dm_exception_table_lock {
+ struct hlist_bl_head *complete_slot;
+ struct hlist_bl_head *pending_slot;
+};
+
+static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
+ struct dm_exception_table_lock *lock)
+{
+ struct dm_exception_table *complete = &s->complete;
+ struct dm_exception_table *pending = &s->pending;
+
+ lock->complete_slot = &complete->table[exception_hash(complete, chunk)];
+ lock->pending_slot = &pending->table[exception_hash(pending, chunk)];
+}
+
+static void dm_exception_table_lock(struct dm_exception_table_lock *lock)
+{
+ hlist_bl_lock(lock->complete_slot);
+ hlist_bl_lock(lock->pending_slot);
+}
+
+static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
+{
+ hlist_bl_unlock(lock->pending_slot);
+ hlist_bl_unlock(lock->complete_slot);
+}
+
static int dm_exception_table_init(struct dm_exception_table *et,
uint32_t size, unsigned hash_shift)
{
@@ -625,12 +658,12 @@ static int dm_exception_table_init(struct dm_exception_table *et,
et->hash_shift = hash_shift;
et->hash_mask = size - 1;
- et->table = dm_vcalloc(size, sizeof(struct list_head));
+ et->table = dm_vcalloc(size, sizeof(struct hlist_bl_head));
if (!et->table)
return -ENOMEM;
for (i = 0; i < size; i++)
- INIT_LIST_HEAD(et->table + i);
+ INIT_HLIST_BL_HEAD(et->table + i);
return 0;
}
@@ -638,15 +671,16 @@ static int dm_exception_table_init(struct dm_exception_table *et,
static void dm_exception_table_exit(struct dm_exception_table *et,
struct kmem_cache *mem)
{
- struct list_head *slot;
- struct dm_exception *ex, *next;
+ struct hlist_bl_head *slot;
+ struct dm_exception *ex;
+ struct hlist_bl_node *pos, *n;
int i, size;
size = et->hash_mask + 1;
for (i = 0; i < size; i++) {
slot = et->table + i;
- list_for_each_entry_safe (ex, next, slot, hash_list)
+ hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
kmem_cache_free(mem, ex);
}
@@ -660,7 +694,7 @@ static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
static void dm_remove_exception(struct dm_exception *e)
{
- list_del(&e->hash_list);
+ hlist_bl_del(&e->hash_list);
}
/*
@@ -670,11 +704,12 @@ static void dm_remove_exception(struct dm_exception *e)
static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
chunk_t chunk)
{
- struct list_head *slot;
+ struct hlist_bl_head *slot;
+ struct hlist_bl_node *pos;
struct dm_exception *e;
slot = &et->table[exception_hash(et, chunk)];
- list_for_each_entry (e, slot, hash_list)
+ hlist_bl_for_each_entry(e, pos, slot, hash_list)
if (chunk >= e->old_chunk &&
chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
return e;
@@ -721,7 +756,8 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
static void dm_insert_exception(struct dm_exception_table *eh,
struct dm_exception *new_e)
{
- struct list_head *l;
+ struct hlist_bl_head *l;
+ struct hlist_bl_node *pos;
struct dm_exception *e = NULL;
l = &eh->table[exception_hash(eh, new_e->old_chunk)];
@@ -731,7 +767,7 @@ static void dm_insert_exception(struct dm_exception_table *eh,
goto out;
/* List is ordered by old_chunk */
- list_for_each_entry_reverse(e, l, hash_list) {
+ hlist_bl_for_each_entry(e, pos, l, hash_list) {
/* Insert after an existing chunk? */
if (new_e->old_chunk == (e->old_chunk +
dm_consecutive_chunk_count(e) + 1) &&
@@ -752,12 +788,24 @@ static void dm_insert_exception(struct dm_exception_table *eh,
return;
}
- if (new_e->old_chunk > e->old_chunk)
+ if (new_e->old_chunk < e->old_chunk)
break;
}
out:
- list_add(&new_e->hash_list, e ? &e->hash_list : l);
+ if (!e) {
+ /*
+ * Either the table doesn't support consecutive chunks or slot
+ * l is empty.
+ */
+ hlist_bl_add_head(&new_e->hash_list, l);
+ } else if (new_e->old_chunk < e->old_chunk) {
+ /* Add before an existing exception */
+ hlist_bl_add_before(&new_e->hash_list, &e->hash_list);
+ } else {
+ /* Add to l's tail: e is the last exception in this slot */
+ hlist_bl_add_behind(&new_e->hash_list, &e->hash_list);
+ }
}
/*
@@ -766,6 +814,7 @@ out:
*/
static int dm_add_exception(void *context, chunk_t old, chunk_t new)
{
+ struct dm_exception_table_lock lock;
struct dm_snapshot *s = context;
struct dm_exception *e;
@@ -778,7 +827,17 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
/* Consecutive_count is implicitly initialised to zero */
e->new_chunk = new;
+ /*
+ * Although there is no need to lock access to the exception tables
+ * here, if we don't then hlist_bl_add_head(), called by
+ * dm_insert_exception(), will complain about accessing the
+ * corresponding list without locking it first.
+ */
+ dm_exception_table_lock_init(s, old, &lock);
+
+ dm_exception_table_lock(&lock);
dm_insert_exception(&s->complete, e);
+ dm_exception_table_unlock(&lock);
return 0;
}
@@ -807,7 +866,7 @@ static int calc_max_buckets(void)
{
/* use a fixed size of 2MB */
unsigned long mem = 2 * 1024 * 1024;
- mem /= sizeof(struct list_head);
+ mem /= sizeof(struct hlist_bl_head);
return mem;
}
@@ -927,7 +986,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
int r;
chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
- mutex_lock(&s->lock);
+ down_write(&s->lock);
/*
* Process chunks (and associated exceptions) in reverse order
@@ -942,7 +1001,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
b = __release_queued_bios_after_merge(s);
out:
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
if (b)
flush_bios(b);
@@ -1001,9 +1060,9 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
if (linear_chunks < 0) {
DMERR("Read error in exception store: "
"shutting down merge");
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->merge_failed = 1;
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
}
goto shut;
}
@@ -1044,10 +1103,10 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
previous_count = read_pending_exceptions_done_count();
}
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->first_merging_chunk = old_chunk;
s->num_merging_chunks = linear_chunks;
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
/* Wait until writes to all 'linear_chunks' drain */
for (i = 0; i < linear_chunks; i++)
@@ -1089,10 +1148,10 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
return;
shut:
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->merge_failed = 1;
b = __release_queued_bios_after_merge(s);
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
error_bios(b);
merge_shutdown(s);
@@ -1188,10 +1247,11 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->snapshot_overflowed = 0;
s->active = 0;
atomic_set(&s->pending_exceptions_count, 0);
+ spin_lock_init(&s->pe_allocation_lock);
s->exception_start_sequence = 0;
s->exception_complete_sequence = 0;
s->out_of_order_tree = RB_ROOT;
- mutex_init(&s->lock);
+ init_rwsem(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
s->state_bits = 0;
@@ -1357,9 +1417,9 @@ static void snapshot_dtr(struct dm_target *ti)
/* Check whether exception handover must be cancelled */
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest && (s == snap_src)) {
- mutex_lock(&snap_dest->lock);
+ down_write(&snap_dest->lock);
snap_dest->valid = 0;
- mutex_unlock(&snap_dest->lock);
+ up_write(&snap_dest->lock);
DMERR("Cancelling snapshot handover.");
}
up_read(&_origins_lock);
@@ -1390,8 +1450,6 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
- mutex_destroy(&s->lock);
-
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
@@ -1467,6 +1525,13 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
dm_table_event(s->ti->table);
}
+static void invalidate_snapshot(struct dm_snapshot *s, int err)
+{
+ down_write(&s->lock);
+ __invalidate_snapshot(s, err);
+ up_write(&s->lock);
+}
+
static void pending_complete(void *context, int success)
{
struct dm_snap_pending_exception *pe = context;
@@ -1475,43 +1540,63 @@ static void pending_complete(void *context, int success)
struct bio *origin_bios = NULL;
struct bio *snapshot_bios = NULL;
struct bio *full_bio = NULL;
+ struct dm_exception_table_lock lock;
int error = 0;
+ dm_exception_table_lock_init(s, pe->e.old_chunk, &lock);
+
if (!success) {
/* Read/write error - snapshot is unusable */
- mutex_lock(&s->lock);
- __invalidate_snapshot(s, -EIO);
+ invalidate_snapshot(s, -EIO);
error = 1;
+
+ dm_exception_table_lock(&lock);
goto out;
}
e = alloc_completed_exception(GFP_NOIO);
if (!e) {
- mutex_lock(&s->lock);
- __invalidate_snapshot(s, -ENOMEM);
+ invalidate_snapshot(s, -ENOMEM);
error = 1;
+
+ dm_exception_table_lock(&lock);
goto out;
}
*e = pe->e;
- mutex_lock(&s->lock);
+ down_read(&s->lock);
+ dm_exception_table_lock(&lock);
if (!s->valid) {
+ up_read(&s->lock);
free_completed_exception(e);
error = 1;
+
goto out;
}
- /* Check for conflicting reads */
- __check_for_conflicting_io(s, pe->e.old_chunk);
-
/*
- * Add a proper exception, and remove the
- * in-flight exception from the list.
+ * Add a proper exception. After inserting the completed exception all
+ * subsequent snapshot reads to this chunk will be redirected to the
+ * COW device. This ensures that we do not starve. Moreover, as long
+ * as the pending exception exists, neither origin writes nor snapshot
+ * merging can overwrite the chunk in origin.
*/
dm_insert_exception(&s->complete, e);
+ up_read(&s->lock);
+
+ /* Wait for conflicting reads to drain */
+ if (__chunk_is_tracked(s, pe->e.old_chunk)) {
+ dm_exception_table_unlock(&lock);
+ __check_for_conflicting_io(s, pe->e.old_chunk);
+ dm_exception_table_lock(&lock);
+ }
out:
+ /* Remove the in-flight exception from the list */
dm_remove_exception(&pe->e);
+
+ dm_exception_table_unlock(&lock);
+
snapshot_bios = bio_list_get(&pe->snapshot_bios);
origin_bios = bio_list_get(&pe->origin_bios);
full_bio = pe->full_bio;
@@ -1519,8 +1604,6 @@ out:
full_bio->bi_end_io = pe->full_bio_end_io;
increment_pending_exceptions_done_count();
- mutex_unlock(&s->lock);
-
/* Submit any pending write bios */
if (error) {
if (full_bio)
@@ -1660,43 +1743,59 @@ __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
}
/*
- * Looks to see if this snapshot already has a pending exception
- * for this chunk, otherwise it allocates a new one and inserts
- * it into the pending table.
+ * Inserts a pending exception into the pending table.
*
- * NOTE: a write lock must be held on snap->lock before calling
- * this.
+ * NOTE: a write lock must be held on the chunk's pending exception table slot
+ * before calling this.
*/
static struct dm_snap_pending_exception *
-__find_pending_exception(struct dm_snapshot *s,
- struct dm_snap_pending_exception *pe, chunk_t chunk)
+__insert_pending_exception(struct dm_snapshot *s,
+ struct dm_snap_pending_exception *pe, chunk_t chunk)
{
- struct dm_snap_pending_exception *pe2;
-
- pe2 = __lookup_pending_exception(s, chunk);
- if (pe2) {
- free_pending_exception(pe);
- return pe2;
- }
-
pe->e.old_chunk = chunk;
bio_list_init(&pe->origin_bios);
bio_list_init(&pe->snapshot_bios);
pe->started = 0;
pe->full_bio = NULL;
+ spin_lock(&s->pe_allocation_lock);
if (s->store->type->prepare_exception(s->store, &pe->e)) {
+ spin_unlock(&s->pe_allocation_lock);
free_pending_exception(pe);
return NULL;
}
pe->exception_sequence = s->exception_start_sequence++;
+ spin_unlock(&s->pe_allocation_lock);
dm_insert_exception(&s->pending, &pe->e);
return pe;
}
+/*
+ * Looks to see if this snapshot already has a pending exception
+ * for this chunk, otherwise it allocates a new one and inserts
+ * it into the pending table.
+ *
+ * NOTE: a write lock must be held on the chunk's pending exception table slot
+ * before calling this.
+ */
+static struct dm_snap_pending_exception *
+__find_pending_exception(struct dm_snapshot *s,
+ struct dm_snap_pending_exception *pe, chunk_t chunk)
+{
+ struct dm_snap_pending_exception *pe2;
+
+ pe2 = __lookup_pending_exception(s, chunk);
+ if (pe2) {
+ free_pending_exception(pe);
+ return pe2;
+ }
+
+ return __insert_pending_exception(s, pe, chunk);
+}
+
static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
struct bio *bio, chunk_t chunk)
{
@@ -1714,6 +1813,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
+ struct dm_exception_table_lock lock;
init_tracked_chunk(bio);
@@ -1723,13 +1823,15 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
}
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
+ dm_exception_table_lock_init(s, chunk, &lock);
/* Full snapshots are not usable */
/* To get here the table must be live so s->active is always set. */
if (!s->valid)
return DM_MAPIO_KILL;
- mutex_lock(&s->lock);
+ down_read(&s->lock);
+ dm_exception_table_lock(&lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
bio_data_dir(bio) == WRITE)) {
@@ -1752,15 +1854,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
- mutex_unlock(&s->lock);
+ dm_exception_table_unlock(&lock);
pe = alloc_pending_exception(s);
- mutex_lock(&s->lock);
-
- if (!s->valid || s->snapshot_overflowed) {
- free_pending_exception(pe);
- r = DM_MAPIO_KILL;
- goto out_unlock;
- }
+ dm_exception_table_lock(&lock);
e = dm_lookup_exception(&s->complete, chunk);
if (e) {
@@ -1771,13 +1867,22 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
pe = __find_pending_exception(s, pe, chunk);
if (!pe) {
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
+ down_write(&s->lock);
+
if (s->store->userspace_supports_overflow) {
- s->snapshot_overflowed = 1;
- DMERR("Snapshot overflowed: Unable to allocate exception.");
+ if (s->valid && !s->snapshot_overflowed) {
+ s->snapshot_overflowed = 1;
+ DMERR("Snapshot overflowed: Unable to allocate exception.");
+ }
} else
__invalidate_snapshot(s, -ENOMEM);
+ up_write(&s->lock);
+
r = DM_MAPIO_KILL;
- goto out_unlock;
+ goto out;
}
}
@@ -1789,7 +1894,10 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
bio->bi_iter.bi_size ==
(s->store->chunk_size << SECTOR_SHIFT)) {
pe->started = 1;
- mutex_unlock(&s->lock);
+
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
start_full_bio(pe, bio);
goto out;
}
@@ -1797,9 +1905,12 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
bio_list_add(&pe->snapshot_bios, bio);
if (!pe->started) {
- /* this is protected by snap->lock */
+ /* this is protected by the exception table lock */
pe->started = 1;
- mutex_unlock(&s->lock);
+
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
+
start_copy(pe);
goto out;
}
@@ -1809,7 +1920,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
}
out_unlock:
- mutex_unlock(&s->lock);
+ dm_exception_table_unlock(&lock);
+ up_read(&s->lock);
out:
return r;
}
@@ -1845,7 +1957,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
- mutex_lock(&s->lock);
+ down_write(&s->lock);
/* Full merging snapshots are redirected to the origin */
if (!s->valid)
@@ -1876,12 +1988,12 @@ redirect_to_origin:
bio_set_dev(bio, s->origin->bdev);
if (bio_data_dir(bio) == WRITE) {
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
return do_origin(s->origin, bio);
}
out_unlock:
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
return r;
}
@@ -1913,7 +2025,7 @@ static int snapshot_preresume(struct dm_target *ti)
down_read(&_origins_lock);
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- mutex_lock(&snap_src->lock);
+ down_read(&snap_src->lock);
if (s == snap_src) {
DMERR("Unable to resume snapshot source until "
"handover completes.");
@@ -1923,7 +2035,7 @@ static int snapshot_preresume(struct dm_target *ti)
"source is suspended.");
r = -EINVAL;
}
- mutex_unlock(&snap_src->lock);
+ up_read(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1969,11 +2081,11 @@ static void snapshot_resume(struct dm_target *ti)
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
- mutex_lock(&snap_src->lock);
- mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
+ down_write(&snap_src->lock);
+ down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
__handover_exceptions(snap_src, snap_dest);
- mutex_unlock(&snap_dest->lock);
- mutex_unlock(&snap_src->lock);
+ up_write(&snap_dest->lock);
+ up_write(&snap_src->lock);
}
up_read(&_origins_lock);
@@ -1988,9 +2100,9 @@ static void snapshot_resume(struct dm_target *ti)
/* Now we have correct chunk size, reregister */
reregister_snapshot(s);
- mutex_lock(&s->lock);
+ down_write(&s->lock);
s->active = 1;
- mutex_unlock(&s->lock);
+ up_write(&s->lock);
}
static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
@@ -2030,7 +2142,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
- mutex_lock(&snap->lock);
+ down_write(&snap->lock);
if (!snap->valid)
DMEMIT("Invalid");
@@ -2055,7 +2167,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
DMEMIT("Unknown");
}
- mutex_unlock(&snap->lock);
+ up_write(&snap->lock);
break;
@@ -2107,9 +2219,10 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
int r = DM_MAPIO_REMAPPED;
struct dm_snapshot *snap;
struct dm_exception *e;
- struct dm_snap_pending_exception *pe;
+ struct dm_snap_pending_exception *pe, *pe2;
struct dm_snap_pending_exception *pe_to_start_now = NULL;
struct dm_snap_pending_exception *pe_to_start_last = NULL;
+ struct dm_exception_table_lock lock;
chunk_t chunk;
/* Do all the snapshots on this origin */
@@ -2121,52 +2234,59 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
if (dm_target_is_snapshot_merge(snap->ti))
continue;
- mutex_lock(&snap->lock);
-
- /* Only deal with valid and active snapshots */
- if (!snap->valid || !snap->active)
- goto next_snapshot;
-
/* Nothing to do if writing beyond end of snapshot */
if (sector >= dm_table_get_size(snap->ti->table))
- goto next_snapshot;
+ continue;
/*
* Remember, different snapshots can have
* different chunk sizes.
*/
chunk = sector_to_chunk(snap->store, sector);
+ dm_exception_table_lock_init(snap, chunk, &lock);
- /*
- * Check exception table to see if block
- * is already remapped in this snapshot
- * and trigger an exception if not.
- */
- e = dm_lookup_exception(&snap->complete, chunk);
- if (e)
+ down_read(&snap->lock);
+ dm_exception_table_lock(&lock);
+
+ /* Only deal with valid and active snapshots */
+ if (!snap->valid || !snap->active)
goto next_snapshot;
pe = __lookup_pending_exception(snap, chunk);
if (!pe) {
- mutex_unlock(&snap->lock);
- pe = alloc_pending_exception(snap);
- mutex_lock(&snap->lock);
-
- if (!snap->valid) {
- free_pending_exception(pe);
- goto next_snapshot;
- }
-
+ /*
+ * Check exception table to see if block is already
+ * remapped in this snapshot and trigger an exception
+ * if not.
+ */
e = dm_lookup_exception(&snap->complete, chunk);
- if (e) {
- free_pending_exception(pe);
+ if (e)
goto next_snapshot;
- }
- pe = __find_pending_exception(snap, pe, chunk);
- if (!pe) {
- __invalidate_snapshot(snap, -ENOMEM);
- goto next_snapshot;
+ dm_exception_table_unlock(&lock);
+ pe = alloc_pending_exception(snap);
+ dm_exception_table_lock(&lock);
+
+ pe2 = __lookup_pending_exception(snap, chunk);
+
+ if (!pe2) {
+ e = dm_lookup_exception(&snap->complete, chunk);
+ if (e) {
+ free_pending_exception(pe);
+ goto next_snapshot;
+ }
+
+ pe = __insert_pending_exception(snap, pe, chunk);
+ if (!pe) {
+ dm_exception_table_unlock(&lock);
+ up_read(&snap->lock);
+
+ invalidate_snapshot(snap, -ENOMEM);
+ continue;
+ }
+ } else {
+ free_pending_exception(pe);
+ pe = pe2;
}
}
@@ -2193,7 +2313,8 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
}
next_snapshot:
- mutex_unlock(&snap->lock);
+ dm_exception_table_unlock(&lock);
+ up_read(&snap->lock);
if (pe_to_start_now) {
start_copy(pe_to_start_now);
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 314d17ca6466..64dd0b34fcf4 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -136,7 +136,8 @@ static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
return DM_MAPIO_KILL;
}
-static void io_err_release_clone_rq(struct request *clone)
+static void io_err_release_clone_rq(struct request *clone,
+ union map_info *map_context)
{
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index ed3caceaed07..7f0840601737 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -202,6 +202,13 @@ struct dm_pool_metadata {
bool fail_io:1;
/*
+ * Set once a thin-pool has been accessed through one of the interfaces
+ * that imply the pool is in-service (e.g. thin devices created/deleted,
+ * thin-pool message, metadata snapshots, etc).
+ */
+ bool in_service:1;
+
+ /*
* Reading the space map roots can fail, so we read it into these
* buffers before the superblock is locked and updated.
*/
@@ -367,6 +374,32 @@ static int subtree_equal(void *context, const void *value1_le, const void *value
/*----------------------------------------------------------------*/
+/*
+ * Variant that is used for in-core only changes or code that
+ * shouldn't put the pool in service on its own (e.g. commit).
+ */
+static inline void __pmd_write_lock(struct dm_pool_metadata *pmd)
+ __acquires(pmd->root_lock)
+{
+ down_write(&pmd->root_lock);
+}
+#define pmd_write_lock_in_core(pmd) __pmd_write_lock((pmd))
+
+static inline void pmd_write_lock(struct dm_pool_metadata *pmd)
+{
+ __pmd_write_lock(pmd);
+ if (unlikely(!pmd->in_service))
+ pmd->in_service = true;
+}
+
+static inline void pmd_write_unlock(struct dm_pool_metadata *pmd)
+ __releases(pmd->root_lock)
+{
+ up_write(&pmd->root_lock);
+}
+
+/*----------------------------------------------------------------*/
+
static int superblock_lock_zero(struct dm_pool_metadata *pmd,
struct dm_block **sblock)
{
@@ -790,6 +823,9 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
*/
BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
+ if (unlikely(!pmd->in_service))
+ return 0;
+
r = __write_changed_details(pmd);
if (r < 0)
return r;
@@ -853,6 +889,7 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
pmd->time = 0;
INIT_LIST_HEAD(&pmd->thin_devices);
pmd->fail_io = false;
+ pmd->in_service = false;
pmd->bdev = bdev;
pmd->data_block_size = data_block_size;
@@ -903,7 +940,6 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
DMWARN("%s: __commit_transaction() failed, error = %d",
__func__, r);
}
-
if (!pmd->fail_io)
__destroy_persistent_data_objects(pmd);
@@ -1032,10 +1068,10 @@ int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __create_thin(pmd, dev);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1123,10 +1159,10 @@ int dm_pool_create_snap(struct dm_pool_metadata *pmd,
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __create_snap(pmd, dev, origin);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1166,10 +1202,10 @@ int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __delete_device(pmd, dev);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1180,7 +1216,7 @@ int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (pmd->fail_io)
goto out;
@@ -1194,7 +1230,7 @@ int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
r = 0;
out:
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1225,7 +1261,12 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
* We commit to ensure the btree roots which we increment in a
* moment are up to date.
*/
- __commit_transaction(pmd);
+ r = __commit_transaction(pmd);
+ if (r < 0) {
+ DMWARN("%s: __commit_transaction() failed, error = %d",
+ __func__, r);
+ return r;
+ }
/*
* Copy the superblock.
@@ -1283,10 +1324,10 @@ int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __reserve_metadata_snap(pmd);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1331,10 +1372,10 @@ int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __release_metadata_snap(pmd);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1377,19 +1418,19 @@ int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock_in_core(pmd);
if (!pmd->fail_io)
r = __open_device(pmd, dev, 0, td);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
int dm_pool_close_thin_device(struct dm_thin_device *td)
{
- down_write(&td->pmd->root_lock);
+ pmd_write_lock_in_core(td->pmd);
__close_device(td);
- up_write(&td->pmd->root_lock);
+ pmd_write_unlock(td->pmd);
return 0;
}
@@ -1570,10 +1611,10 @@ int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
{
int r = -EINVAL;
- down_write(&td->pmd->root_lock);
+ pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __insert(td, block, data_block);
- up_write(&td->pmd->root_lock);
+ pmd_write_unlock(td->pmd);
return r;
}
@@ -1657,10 +1698,10 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
{
int r = -EINVAL;
- down_write(&td->pmd->root_lock);
+ pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __remove(td, block);
- up_write(&td->pmd->root_lock);
+ pmd_write_unlock(td->pmd);
return r;
}
@@ -1670,10 +1711,10 @@ int dm_thin_remove_range(struct dm_thin_device *td,
{
int r = -EINVAL;
- down_write(&td->pmd->root_lock);
+ pmd_write_lock(td->pmd);
if (!td->pmd->fail_io)
r = __remove_range(td, begin, end);
- up_write(&td->pmd->root_lock);
+ pmd_write_unlock(td->pmd);
return r;
}
@@ -1696,13 +1737,13 @@ int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_
{
int r = 0;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
for (; b != e; b++) {
r = dm_sm_inc_block(pmd->data_sm, b);
if (r)
break;
}
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1711,13 +1752,13 @@ int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_
{
int r = 0;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
for (; b != e; b++) {
r = dm_sm_dec_block(pmd->data_sm, b);
if (r)
break;
}
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1765,10 +1806,10 @@ int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = dm_sm_new_block(pmd->data_sm, result);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1777,12 +1818,16 @@ int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ /*
+ * Care is taken to not have commit be what
+ * triggers putting the thin-pool in-service.
+ */
+ __pmd_write_lock(pmd);
if (pmd->fail_io)
goto out;
r = __commit_transaction(pmd);
- if (r <= 0)
+ if (r < 0)
goto out;
/*
@@ -1790,7 +1835,7 @@ int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
*/
r = __begin_transaction(pmd);
out:
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1806,7 +1851,7 @@ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (pmd->fail_io)
goto out;
@@ -1817,7 +1862,7 @@ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
pmd->fail_io = true;
out:
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1948,10 +1993,10 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io)
r = __resize_space_map(pmd->data_sm, new_count);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -1960,29 +2005,29 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
{
int r = -EINVAL;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
if (!pmd->fail_io) {
r = __resize_space_map(pmd->metadata_sm, new_count);
if (!r)
__set_metadata_reserve(pmd);
}
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
{
- down_write(&pmd->root_lock);
+ pmd_write_lock_in_core(pmd);
dm_bm_set_read_only(pmd->bm);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
}
void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
{
- down_write(&pmd->root_lock);
+ pmd_write_lock_in_core(pmd);
dm_bm_set_read_write(pmd->bm);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
}
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
@@ -1992,9 +2037,9 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
{
int r;
- down_write(&pmd->root_lock);
+ pmd_write_lock_in_core(pmd);
r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
@@ -2005,7 +2050,7 @@ int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
struct dm_block *sblock;
struct thin_disk_superblock *disk_super;
- down_write(&pmd->root_lock);
+ pmd_write_lock(pmd);
pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
r = superblock_lock(pmd, &sblock);
@@ -2019,7 +2064,7 @@ int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
dm_bm_unlock(sblock);
out:
- up_write(&pmd->root_lock);
+ pmd_write_unlock(pmd);
return r;
}
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index f7822875589e..1cb137f0ef9d 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -190,7 +190,6 @@ struct writeback_struct {
struct dm_writecache *wc;
struct wc_entry **wc_list;
unsigned wc_list_n;
- unsigned page_offset;
struct page *page;
struct wc_entry *wc_list_inline[WB_LIST_INLINE];
struct bio bio;
@@ -546,21 +545,20 @@ static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
e = container_of(node, struct wc_entry, rb_node);
if (read_original_sector(wc, e) == block)
break;
+
node = (read_original_sector(wc, e) >= block ?
e->rb_node.rb_left : e->rb_node.rb_right);
if (unlikely(!node)) {
- if (!(flags & WFE_RETURN_FOLLOWING)) {
+ if (!(flags & WFE_RETURN_FOLLOWING))
return NULL;
- }
if (read_original_sector(wc, e) >= block) {
- break;
+ return e;
} else {
node = rb_next(&e->rb_node);
- if (unlikely(!node)) {
+ if (unlikely(!node))
return NULL;
- }
e = container_of(node, struct wc_entry, rb_node);
- break;
+ return e;
}
}
}
@@ -571,7 +569,7 @@ static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
node = rb_prev(&e->rb_node);
else
node = rb_next(&e->rb_node);
- if (!node)
+ if (unlikely(!node))
return e;
e2 = container_of(node, struct wc_entry, rb_node);
if (read_original_sector(wc, e2) != block)
@@ -804,7 +802,7 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
writecache_free_entry(wc, e);
}
- if (!node)
+ if (unlikely(!node))
break;
e = container_of(node, struct wc_entry, rb_node);
@@ -1478,10 +1476,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
wb = container_of(bio, struct writeback_struct, bio);
wb->wc = wc;
- wb->bio.bi_end_io = writecache_writeback_endio;
- bio_set_dev(&wb->bio, wc->dev->bdev);
- wb->bio.bi_iter.bi_sector = read_original_sector(wc, e);
- wb->page_offset = PAGE_SIZE;
+ bio->bi_end_io = writecache_writeback_endio;
+ bio_set_dev(bio, wc->dev->bdev);
+ bio->bi_iter.bi_sector = read_original_sector(wc, e);
if (max_pages <= WB_LIST_INLINE ||
unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
GFP_NOIO | __GFP_NORETRY |
@@ -1507,12 +1504,12 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
wb->wc_list[wb->wc_list_n++] = f;
e = f;
}
- bio_set_op_attrs(&wb->bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
if (writecache_has_error(wc)) {
bio->bi_status = BLK_STS_IOERR;
- bio_endio(&wb->bio);
+ bio_endio(bio);
} else {
- submit_bio(&wb->bio);
+ submit_bio(bio);
}
__writeback_throttle(wc, wbl);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index fa68336560c3..d8334cd45d7c 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1169,6 +1169,9 @@ static int dmz_init_zones(struct dmz_metadata *zmd)
goto out;
}
+ if (!nr_blkz)
+ break;
+
/* Process report */
for (i = 0; i < nr_blkz; i++) {
ret = dmz_init_zone(zmd, zone, &blkz[i]);
@@ -1204,6 +1207,8 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
/* Get zone information from disk */
ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
&blkz, &nr_blkz, GFP_NOIO);
+ if (!nr_blkz)
+ ret = -EIO;
if (ret) {
dmz_dev_err(zmd->dev, "Get zone %u report failed",
dmz_id(zmd, zone));
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 8865c1709e16..51d029bbb740 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -643,7 +643,8 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
q = bdev_get_queue(dev->bdev);
dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
- aligned_capacity = dev->capacity & ~(blk_queue_zone_sectors(q) - 1);
+ aligned_capacity = dev->capacity &
+ ~((sector_t)blk_queue_zone_sectors(q) - 1);
if (ti->begin ||
((ti->len != dev->capacity) && (ti->len != aligned_capacity))) {
ti->error = "Partial mapping not supported";
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 043f0761e4a0..1fb1333fefec 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -781,7 +781,8 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
}
static struct table_device *find_table_device(struct list_head *l, dev_t dev,
- fmode_t mode) {
+ fmode_t mode)
+{
struct table_device *td;
list_for_each_entry(td, l, list)
@@ -792,7 +793,8 @@ static struct table_device *find_table_device(struct list_head *l, dev_t dev,
}
int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
- struct dm_dev **result) {
+ struct dm_dev **result)
+{
int r;
struct table_device *td;
@@ -1906,7 +1908,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
static struct mapped_device *alloc_dev(int minor)
{
int r, numa_node_id = dm_get_numa_node();
- struct dax_device *dax_dev = NULL;
struct mapped_device *md;
void *old_md;
@@ -1969,11 +1970,10 @@ static struct mapped_device *alloc_dev(int minor)
sprintf(md->disk->disk_name, "dm-%d", minor);
if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
- dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops);
- if (!dax_dev)
+ md->dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops);
+ if (!md->dax_dev)
goto bad;
}
- md->dax_dev = dax_dev;
add_disk_no_queue_reg(md->disk);
format_dev_t(md->name, MKDEV(_major, minor));
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 0a3b8ae4a29c..b8a62188f6be 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -190,6 +190,8 @@ static int sm_find_free(void *addr, unsigned begin, unsigned end,
static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm)
{
+ memset(ll, 0, sizeof(struct ll_disk));
+
ll->tm = tm;
ll->bitmap_info.tm = tm;
diff --git a/drivers/media/common/b2c2/Makefile b/drivers/media/common/b2c2/Makefile
index aa2dc2434ee5..0e32b77f349b 100644
--- a/drivers/media/common/b2c2/Makefile
+++ b/drivers/media/common/b2c2/Makefile
@@ -4,5 +4,5 @@ b2c2-flexcop-objs += flexcop-sram.o flexcop-eeprom.o flexcop-misc.o
b2c2-flexcop-objs += flexcop-hw-filter.o
obj-$(CONFIG_DVB_B2C2_FLEXCOP) += b2c2-flexcop.o
-ccflags-y += -Idrivers/media/dvb-frontends/
-ccflags-y += -Idrivers/media/tuners/
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends/
+ccflags-y += -I $(srctree)/drivers/media/tuners/
diff --git a/drivers/media/dvb-frontends/cxd2880/Makefile b/drivers/media/dvb-frontends/cxd2880/Makefile
index c6baa4caba19..646598b556d5 100644
--- a/drivers/media/dvb-frontends/cxd2880/Makefile
+++ b/drivers/media/dvb-frontends/cxd2880/Makefile
@@ -14,5 +14,3 @@ cxd2880-objs := cxd2880_common.o \
cxd2880_top.o
obj-$(CONFIG_DVB_CXD2880) += cxd2880.o
-
-ccflags-y += -Idrivers/media/dvb-frontends
diff --git a/drivers/media/i2c/smiapp/Makefile b/drivers/media/i2c/smiapp/Makefile
index f45a003cbe7e..9f03aefd4fd7 100644
--- a/drivers/media/i2c/smiapp/Makefile
+++ b/drivers/media/i2c/smiapp/Makefile
@@ -2,4 +2,4 @@ smiapp-objs += smiapp-core.o smiapp-regs.o \
smiapp-quirk.o smiapp-limits.o
obj-$(CONFIG_VIDEO_SMIAPP) += smiapp.o
-ccflags-y += -Idrivers/media/i2c
+ccflags-y += -I $(srctree)/drivers/media/i2c
diff --git a/drivers/media/mmc/siano/Makefile b/drivers/media/mmc/siano/Makefile
index 5fc345645a80..848548feeb19 100644
--- a/drivers/media/mmc/siano/Makefile
+++ b/drivers/media/mmc/siano/Makefile
@@ -1,4 +1,3 @@
obj-$(CONFIG_SMS_SDIO_DRV) += smssdio.o
-ccflags-y += -Idrivers/media/common/siano
-
+ccflags-y += -I $(srctree)/drivers/media/common/siano
diff --git a/drivers/media/pci/b2c2/Makefile b/drivers/media/pci/b2c2/Makefile
index b43b9167db5a..14ed6e441738 100644
--- a/drivers/media/pci/b2c2/Makefile
+++ b/drivers/media/pci/b2c2/Makefile
@@ -6,4 +6,4 @@ endif
b2c2-flexcop-pci-objs += flexcop-pci.o
obj-$(CONFIG_DVB_B2C2_FLEXCOP_PCI) += b2c2-flexcop-pci.o
-ccflags-y += -Idrivers/media/common/b2c2/
+ccflags-y += -I $(srctree)/drivers/media/common/b2c2/
diff --git a/drivers/media/pci/bt8xx/Makefile b/drivers/media/pci/bt8xx/Makefile
index 7f1c3beb1bbc..69bc0d9c478e 100644
--- a/drivers/media/pci/bt8xx/Makefile
+++ b/drivers/media/pci/bt8xx/Makefile
@@ -6,6 +6,5 @@ bttv-objs := bttv-driver.o bttv-cards.o bttv-if.o \
obj-$(CONFIG_VIDEO_BT848) += bttv.o
obj-$(CONFIG_DVB_BT8XX) += bt878.o dvb-bt8xx.o dst.o dst_ca.o
-ccflags-y += -Idrivers/media/dvb-frontends
-ccflags-y += -Idrivers/media/common
-ccflags-y += -Idrivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/tuners
diff --git a/drivers/media/pci/cx18/Makefile b/drivers/media/pci/cx18/Makefile
index 9c82c2df05e1..df00ef8b4521 100644
--- a/drivers/media/pci/cx18/Makefile
+++ b/drivers/media/pci/cx18/Makefile
@@ -9,5 +9,5 @@ cx18-alsa-objs := cx18-alsa-main.o cx18-alsa-pcm.o
obj-$(CONFIG_VIDEO_CX18) += cx18.o
obj-$(CONFIG_VIDEO_CX18_ALSA) += cx18-alsa.o
-ccflags-y += -Idrivers/media/dvb-frontends
-ccflags-y += -Idrivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/tuners
diff --git a/drivers/media/pci/cx23885/Makefile b/drivers/media/pci/cx23885/Makefile
index 130f0aa29ac6..a785169ec368 100644
--- a/drivers/media/pci/cx23885/Makefile
+++ b/drivers/media/pci/cx23885/Makefile
@@ -8,7 +8,7 @@ cx23885-objs := cx23885-cards.o cx23885-video.o cx23885-vbi.o \
obj-$(CONFIG_VIDEO_CX23885) += cx23885.o
obj-$(CONFIG_MEDIA_ALTERA_CI) += altera-ci.o
-ccflags-y += -Idrivers/media/tuners
-ccflags-y += -Idrivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
ccflags-y += $(extra-cflags-y) $(extra-cflags-m)
diff --git a/drivers/media/pci/cx88/Makefile b/drivers/media/pci/cx88/Makefile
index d0f45d652d6e..c2a015869760 100644
--- a/drivers/media/pci/cx88/Makefile
+++ b/drivers/media/pci/cx88/Makefile
@@ -10,5 +10,5 @@ obj-$(CONFIG_VIDEO_CX88_ALSA) += cx88-alsa.o
obj-$(CONFIG_VIDEO_CX88_BLACKBIRD) += cx88-blackbird.o
obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
obj-$(CONFIG_VIDEO_CX88_VP3054) += cx88-vp3054-i2c.o
-ccflags-y += -Idrivers/media/tuners
-ccflags-y += -Idrivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
diff --git a/drivers/media/pci/ddbridge/Makefile b/drivers/media/pci/ddbridge/Makefile
index 5b6d5bbc38af..2b77c8d0eb2e 100644
--- a/drivers/media/pci/ddbridge/Makefile
+++ b/drivers/media/pci/ddbridge/Makefile
@@ -9,5 +9,5 @@ ddbridge-objs := ddbridge-main.o ddbridge-core.o ddbridge-ci.o \
obj-$(CONFIG_DVB_DDBRIDGE) += ddbridge.o
-ccflags-y += -Idrivers/media/dvb-frontends/
-ccflags-y += -Idrivers/media/tuners/
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends/
+ccflags-y += -I $(srctree)/drivers/media/tuners/
diff --git a/drivers/media/pci/dm1105/Makefile b/drivers/media/pci/dm1105/Makefile
index d22c2547ee86..87e8e8052cdd 100644
--- a/drivers/media/pci/dm1105/Makefile
+++ b/drivers/media/pci/dm1105/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_DVB_DM1105) += dm1105.o
-ccflags-y += -Idrivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
diff --git a/drivers/media/pci/mantis/Makefile b/drivers/media/pci/mantis/Makefile
index b5ef39692cb0..49e82247b618 100644
--- a/drivers/media/pci/mantis/Makefile
+++ b/drivers/media/pci/mantis/Makefile
@@ -26,4 +26,4 @@ obj-$(CONFIG_MANTIS_CORE) += mantis_core.o
obj-$(CONFIG_DVB_MANTIS) += mantis.o
obj-$(CONFIG_DVB_HOPPER) += hopper.o
-ccflags-y += -Idrivers/media/dvb-frontends/
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends/
diff --git a/drivers/media/pci/netup_unidvb/Makefile b/drivers/media/pci/netup_unidvb/Makefile
index 944c3e164157..215bdafcc279 100644
--- a/drivers/media/pci/netup_unidvb/Makefile
+++ b/drivers/media/pci/netup_unidvb/Makefile
@@ -6,4 +6,4 @@ netup-unidvb-objs += netup_unidvb_spi.o
obj-$(CONFIG_DVB_NETUP_UNIDVB) += netup-unidvb.o
-ccflags-y += -Idrivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
diff --git a/drivers/media/pci/ngene/Makefile b/drivers/media/pci/ngene/Makefile
index ec450ad19281..5d16090e0677 100644
--- a/drivers/media/pci/ngene/Makefile
+++ b/drivers/media/pci/ngene/Makefile
@@ -7,5 +7,5 @@ ngene-objs := ngene-core.o ngene-i2c.o ngene-cards.o ngene-dvb.o
obj-$(CONFIG_DVB_NGENE) += ngene.o
-ccflags-y += -Idrivers/media/dvb-frontends/
-ccflags-y += -Idrivers/media/tuners/
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends/
+ccflags-y += -I $(srctree)/drivers/media/tuners/
diff --git a/drivers/media/pci/pluto2/Makefile b/drivers/media/pci/pluto2/Makefile
index 3c2aea1ac752..4d21a2c1544d 100644
--- a/drivers/media/pci/pluto2/Makefile
+++ b/drivers/media/pci/pluto2/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_DVB_PLUTO2) += pluto2.o
-ccflags-y += -Idrivers/media/dvb-frontends/
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends/
diff --git a/drivers/media/pci/pt1/Makefile b/drivers/media/pci/pt1/Makefile
index bc491e08dd63..f80a1cd4c0f5 100644
--- a/drivers/media/pci/pt1/Makefile
+++ b/drivers/media/pci/pt1/Makefile
@@ -2,5 +2,5 @@ earth-pt1-objs := pt1.o
obj-$(CONFIG_DVB_PT1) += earth-pt1.o
-ccflags-y += -Idrivers/media/dvb-frontends
-ccflags-y += -Idrivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/tuners
diff --git a/drivers/media/pci/pt3/Makefile b/drivers/media/pci/pt3/Makefile
index 8698d5dfaf52..da6b265f4b39 100644
--- a/drivers/media/pci/pt3/Makefile
+++ b/drivers/media/pci/pt3/Makefile
@@ -4,5 +4,5 @@ earth-pt3-objs += pt3.o pt3_i2c.o pt3_dma.o
obj-$(CONFIG_DVB_PT3) += earth-pt3.o
-ccflags-y += -Idrivers/media/dvb-frontends
-ccflags-y += -Idrivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/tuners
diff --git a/drivers/media/pci/smipcie/Makefile b/drivers/media/pci/smipcie/Makefile
index 214ebfe12cf7..2426b7566553 100644
--- a/drivers/media/pci/smipcie/Makefile
+++ b/drivers/media/pci/smipcie/Makefile
@@ -4,6 +4,5 @@ smipcie-objs := smipcie-main.o smipcie-ir.o
obj-$(CONFIG_DVB_SMIPCIE) += smipcie.o
-ccflags-y += -Idrivers/media/tuners
-ccflags-y += -Idrivers/media/dvb-frontends
-
+ccflags-y += -I $(srctree)/drivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
diff --git a/drivers/media/pci/ttpci/Makefile b/drivers/media/pci/ttpci/Makefile
index 58ca12732aad..9b44c479fcdd 100644
--- a/drivers/media/pci/ttpci/Makefile
+++ b/drivers/media/pci/ttpci/Makefile
@@ -18,5 +18,5 @@ obj-$(CONFIG_DVB_BUDGET_CI) += budget-ci.o
obj-$(CONFIG_DVB_BUDGET_PATCH) += budget-patch.o
obj-$(CONFIG_DVB_AV7110) += dvb-ttpci.o
-ccflags-y += -Idrivers/media/dvb-frontends/
-ccflags-y += -Idrivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends/
+ccflags-y += -I $(srctree)/drivers/media/tuners
diff --git a/drivers/media/platform/atmel/atmel-isc-regs.h b/drivers/media/platform/atmel/atmel-isc-regs.h
index d730693f299c..8f7f8efc71a7 100644
--- a/drivers/media/platform/atmel/atmel-isc-regs.h
+++ b/drivers/media/platform/atmel/atmel-isc-regs.h
@@ -37,6 +37,25 @@
#define ISC_PFG_CFG0_BPS_TWELVE (0x0 << 28)
#define ISC_PFE_CFG0_BPS_MASK GENMASK(30, 28)
+#define ISC_PFE_CFG0_COLEN BIT(12)
+#define ISC_PFE_CFG0_ROWEN BIT(13)
+
+/* ISC Parallel Front End Configuration 1 Register */
+#define ISC_PFE_CFG1 0x00000010
+
+#define ISC_PFE_CFG1_COLMIN(v) ((v))
+#define ISC_PFE_CFG1_COLMIN_MASK GENMASK(15, 0)
+#define ISC_PFE_CFG1_COLMAX(v) ((v) << 16)
+#define ISC_PFE_CFG1_COLMAX_MASK GENMASK(31, 16)
+
+/* ISC Parallel Front End Configuration 2 Register */
+#define ISC_PFE_CFG2 0x00000014
+
+#define ISC_PFE_CFG2_ROWMIN(v) ((v))
+#define ISC_PFE_CFG2_ROWMIN_MASK GENMASK(15, 0)
+#define ISC_PFE_CFG2_ROWMAX(v) ((v) << 16)
+#define ISC_PFE_CFG2_ROWMAX_MASK GENMASK(31, 16)
+
/* ISC Clock Enable Register */
#define ISC_CLKEN 0x00000018
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
index 4bba9da206e4..94cb309fdb52 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -721,6 +721,40 @@ static void isc_start_dma(struct isc_device *isc)
u32 sizeimage = isc->fmt.fmt.pix.sizeimage;
u32 dctrl_dview;
dma_addr_t addr0;
+ u32 h, w;
+
+ h = isc->fmt.fmt.pix.height;
+ w = isc->fmt.fmt.pix.width;
+
+ /*
+ * In case the sensor is not RAW, it will output a pixel (12-16 bits)
+ * with two samples on the ISC Data bus (which is 8-12)
+ * ISC will count each sample, so, we need to multiply these values
+ * by two, to get the real number of samples for the required pixels.
+ */
+ if (!ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code)) {
+ h <<= 1;
+ w <<= 1;
+ }
+
+ /*
+ * We limit the column/row count that the ISC will output according
+ * to the configured resolution that we want.
+ * This will avoid the situation where the sensor is misconfigured,
+ * sending more data, and the ISC will just take it and DMA to memory,
+ * causing corruption.
+ */
+ regmap_write(regmap, ISC_PFE_CFG1,
+ (ISC_PFE_CFG1_COLMIN(0) & ISC_PFE_CFG1_COLMIN_MASK) |
+ (ISC_PFE_CFG1_COLMAX(w - 1) & ISC_PFE_CFG1_COLMAX_MASK));
+
+ regmap_write(regmap, ISC_PFE_CFG2,
+ (ISC_PFE_CFG2_ROWMIN(0) & ISC_PFE_CFG2_ROWMIN_MASK) |
+ (ISC_PFE_CFG2_ROWMAX(h - 1) & ISC_PFE_CFG2_ROWMAX_MASK));
+
+ regmap_update_bits(regmap, ISC_PFE_CFG0,
+ ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN,
+ ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN);
addr0 = vb2_dma_contig_plane_dma_addr(&isc->cur_frm->vb.vb2_buf, 0);
regmap_write(regmap, ISC_DAD0, addr0);
@@ -1965,6 +1999,8 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
struct vb2_queue *q = &isc->vb2_vidq;
int ret;
+ INIT_WORK(&isc->awb_work, isc_awb_work);
+
ret = v4l2_device_register_subdev_nodes(&isc->v4l2_dev);
if (ret < 0) {
v4l2_err(&isc->v4l2_dev, "Failed to register subdev nodes\n");
@@ -2018,8 +2054,6 @@ static int isc_async_complete(struct v4l2_async_notifier *notifier)
return ret;
}
- INIT_WORK(&isc->awb_work, isc_awb_work);
-
/* Register video device */
strscpy(vdev->name, ATMEL_ISC_NAME, sizeof(vdev->name));
vdev->release = video_device_release_empty;
@@ -2135,8 +2169,11 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
break;
}
- subdev_entity->asd = devm_kzalloc(dev,
- sizeof(*subdev_entity->asd), GFP_KERNEL);
+ /* asd will be freed by the subsystem once it's added to the
+ * notifier list
+ */
+ subdev_entity->asd = kzalloc(sizeof(*subdev_entity->asd),
+ GFP_KERNEL);
if (!subdev_entity->asd) {
of_node_put(rem);
ret = -ENOMEM;
@@ -2284,6 +2321,7 @@ static int atmel_isc_probe(struct platform_device *pdev)
subdev_entity->asd);
if (ret) {
fwnode_handle_put(subdev_entity->asd->match.fwnode);
+ kfree(subdev_entity->asd);
goto cleanup_subdev;
}
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 3ce58dee4422..1d96cca61547 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -1515,10 +1515,20 @@ static int coda_queue_setup(struct vb2_queue *vq,
static int coda_buf_prepare(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct coda_q_data *q_data;
q_data = get_q_data(ctx, vb->vb2_queue->type);
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ v4l2_warn(&ctx->dev->v4l2_dev,
+ "%s field isn't supported\n", __func__);
+ return -EINVAL;
+ }
+ }
if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
v4l2_warn(&ctx->dev->v4l2_dev,
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 8339163a5231..4e24f5d781f4 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -104,7 +104,7 @@ static int vpbe_enum_outputs(struct vpbe_device *vpbe_dev,
struct v4l2_output *output)
{
struct vpbe_config *cfg = vpbe_dev->cfg;
- int temp_index = output->index;
+ unsigned int temp_index = output->index;
if (temp_index >= cfg->num_outputs)
return -EINVAL;
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 37f0d7146dfa..cb6a9e3946b6 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -1527,23 +1527,20 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
unsigned long size;
struct videobuf_buffer *vb;
- vb = q->bufs[b->index];
-
if (!vout->streaming)
return -EINVAL;
- if (file->f_flags & O_NONBLOCK)
- /* Call videobuf_dqbuf for non blocking mode */
- ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
- else
- /* Call videobuf_dqbuf for blocking mode */
- ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
+ ret = videobuf_dqbuf(q, b, !!(file->f_flags & O_NONBLOCK));
+ if (ret)
+ return ret;
+
+ vb = q->bufs[b->index];
addr = (unsigned long) vout->buf_phy_addr[vb->i];
size = (unsigned long) vb->size;
dma_unmap_single(vout->vid_dev->v4l2_dev.dev, addr,
size, DMA_TO_DEVICE);
- return ret;
+ return 0;
}
static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index 799e526fd3df..8f097e514900 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -68,6 +68,7 @@ struct rcar_csi2;
/* Field Detection Control */
#define FLD_REG 0x1c
#define FLD_FLD_NUM(n) (((n) & 0xff) << 16)
+#define FLD_DET_SEL(n) (((n) & 0x3) << 4)
#define FLD_FLD_EN4 BIT(3)
#define FLD_FLD_EN3 BIT(2)
#define FLD_FLD_EN2 BIT(1)
@@ -84,6 +85,9 @@ struct rcar_csi2;
/* Interrupt Enable */
#define INTEN_REG 0x30
+#define INTEN_INT_AFIFO_OF BIT(27)
+#define INTEN_INT_ERRSOTHS BIT(4)
+#define INTEN_INT_ERRSOTSYNCHS BIT(3)
/* Interrupt Source Mask */
#define INTCLOSE_REG 0x34
@@ -475,7 +479,7 @@ static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp)
static int rcsi2_start_receiver(struct rcar_csi2 *priv)
{
const struct rcar_csi2_format *format;
- u32 phycnt, vcdt = 0, vcdt2 = 0;
+ u32 phycnt, vcdt = 0, vcdt2 = 0, fld = 0;
unsigned int i;
int mbps, ret;
@@ -507,6 +511,16 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
vcdt2 |= vcdt_part << ((i % 2) * 16);
}
+ if (priv->mf.field == V4L2_FIELD_ALTERNATE) {
+ fld = FLD_DET_SEL(1) | FLD_FLD_EN4 | FLD_FLD_EN3 | FLD_FLD_EN2
+ | FLD_FLD_EN;
+
+ if (priv->mf.height == 240)
+ fld |= FLD_FLD_NUM(0);
+ else
+ fld |= FLD_FLD_NUM(1);
+ }
+
phycnt = PHYCNT_ENABLECLK;
phycnt |= (1 << priv->lanes) - 1;
@@ -514,6 +528,10 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
if (mbps < 0)
return mbps;
+ /* Enable interrupts. */
+ rcsi2_write(priv, INTEN_REG, INTEN_INT_AFIFO_OF | INTEN_INT_ERRSOTHS
+ | INTEN_INT_ERRSOTSYNCHS);
+
/* Init */
rcsi2_write(priv, TREF_REG, TREF_TREF);
rcsi2_write(priv, PHTC_REG, 0);
@@ -549,8 +567,7 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
rcsi2_write(priv, PHYCNT_REG, phycnt);
rcsi2_write(priv, LINKCNT_REG, LINKCNT_MONITOR_EN |
LINKCNT_REG_MONI_PACT_EN | LINKCNT_ICLK_NONSTOP);
- rcsi2_write(priv, FLD_REG, FLD_FLD_NUM(2) | FLD_FLD_EN4 |
- FLD_FLD_EN3 | FLD_FLD_EN2 | FLD_FLD_EN);
+ rcsi2_write(priv, FLD_REG, fld);
rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ);
rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ | PHYCNT_RSTZ);
@@ -675,6 +692,43 @@ static const struct v4l2_subdev_ops rcar_csi2_subdev_ops = {
.pad = &rcar_csi2_pad_ops,
};
+static irqreturn_t rcsi2_irq(int irq, void *data)
+{
+ struct rcar_csi2 *priv = data;
+ u32 status, err_status;
+
+ status = rcsi2_read(priv, INTSTATE_REG);
+ err_status = rcsi2_read(priv, INTERRSTATE_REG);
+
+ if (!status)
+ return IRQ_HANDLED;
+
+ rcsi2_write(priv, INTSTATE_REG, status);
+
+ if (!err_status)
+ return IRQ_HANDLED;
+
+ rcsi2_write(priv, INTERRSTATE_REG, err_status);
+
+ dev_info(priv->dev, "Transfer error, restarting CSI-2 receiver\n");
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t rcsi2_irq_thread(int irq, void *data)
+{
+ struct rcar_csi2 *priv = data;
+
+ mutex_lock(&priv->lock);
+ rcsi2_stop(priv);
+ usleep_range(1000, 2000);
+ if (rcsi2_start(priv))
+ dev_warn(priv->dev, "Failed to restart CSI-2 receiver\n");
+ mutex_unlock(&priv->lock);
+
+ return IRQ_HANDLED;
+}
+
/* -----------------------------------------------------------------------------
* Async handling and registration of subdevices and links.
*/
@@ -947,7 +1001,7 @@ static int rcsi2_probe_resources(struct rcar_csi2 *priv,
struct platform_device *pdev)
{
struct resource *res;
- int irq;
+ int irq, ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(&pdev->dev, res);
@@ -958,6 +1012,12 @@ static int rcsi2_probe_resources(struct rcar_csi2 *priv,
if (irq < 0)
return irq;
+ ret = devm_request_threaded_irq(&pdev->dev, irq, rcsi2_irq,
+ rcsi2_irq_thread, IRQF_SHARED,
+ KBUILD_MODNAME, priv);
+ if (ret)
+ return ret;
+
priv->rstc = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(priv->rstc))
return PTR_ERR(priv->rstc);
diff --git a/drivers/media/platform/sti/c8sectpfe/Makefile b/drivers/media/platform/sti/c8sectpfe/Makefile
index 34d69472b6f0..aedfc725cc19 100644
--- a/drivers/media/platform/sti/c8sectpfe/Makefile
+++ b/drivers/media/platform/sti/c8sectpfe/Makefile
@@ -4,6 +4,5 @@ c8sectpfe-y += c8sectpfe-core.o c8sectpfe-common.o c8sectpfe-dvb.o \
obj-$(CONFIG_DVB_C8SECTPFE) += c8sectpfe.o
-ccflags-y += -Idrivers/media/common
-ccflags-y += -Idrivers/media/dvb-frontends/
-ccflags-y += -Idrivers/media/tuners/
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends/
+ccflags-y += -I $(srctree)/drivers/media/tuners/
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
index 7fb3a4fa07c1..447bdfbe5afe 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.c
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -334,8 +334,8 @@ static int tegra_cec_probe(struct platform_device *pdev)
hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev);
- if (!hdmi_dev)
- return -ENODEV;
+ if (IS_ERR(hdmi_dev))
+ return PTR_ERR(hdmi_dev);
cec = devm_kzalloc(&pdev->dev, sizeof(struct tegra_cec), GFP_KERNEL);
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index 37e6e8255b57..53c7ae135460 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -36,5 +36,3 @@ obj-$(CONFIG_RADIO_TEA575X) += tea575x.o
obj-$(CONFIG_USB_RAREMONO) += radio-raremono.o
shark2-objs := radio-shark2.o radio-tea5777.o
-
-ccflags-y += -Isound
diff --git a/drivers/media/spi/Makefile b/drivers/media/spi/Makefile
index 9e536777a330..446e6c567e94 100644
--- a/drivers/media/spi/Makefile
+++ b/drivers/media/spi/Makefile
@@ -1,6 +1,4 @@
obj-$(CONFIG_VIDEO_GS1662) += gs1662.o
obj-$(CONFIG_CXD2880_SPI_DRV) += cxd2880-spi.o
-ccflags-y += -Idrivers/media/dvb-core
-ccflags-y += -Idrivers/media/dvb-frontends
-ccflags-y += -Idrivers/media/dvb-frontends/cxd2880
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends/cxd2880
diff --git a/drivers/media/usb/as102/Makefile b/drivers/media/usb/as102/Makefile
index b0b319622edb..de671aed5dfc 100644
--- a/drivers/media/usb/as102/Makefile
+++ b/drivers/media/usb/as102/Makefile
@@ -4,4 +4,4 @@ dvb-as102-objs := as102_drv.o as102_fw.o as10x_cmd.o as10x_cmd_stream.o \
obj-$(CONFIG_DVB_AS102) += dvb-as102.o
-ccflags-y += -Idrivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
diff --git a/drivers/media/usb/au0828/Makefile b/drivers/media/usb/au0828/Makefile
index 5691881c56c0..4347812d101a 100644
--- a/drivers/media/usb/au0828/Makefile
+++ b/drivers/media/usb/au0828/Makefile
@@ -11,7 +11,7 @@ endif
obj-$(CONFIG_VIDEO_AU0828) += au0828.o
-ccflags-y += -Idrivers/media/tuners
-ccflags-y += -Idrivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
ccflags-y += $(extra-cflags-y) $(extra-cflags-m)
diff --git a/drivers/media/usb/b2c2/Makefile b/drivers/media/usb/b2c2/Makefile
index f3cef05f37b6..e7f949d18fbf 100644
--- a/drivers/media/usb/b2c2/Makefile
+++ b/drivers/media/usb/b2c2/Makefile
@@ -1,4 +1,4 @@
b2c2-flexcop-usb-objs := flexcop-usb.o
obj-$(CONFIG_DVB_B2C2_FLEXCOP_USB) += b2c2-flexcop-usb.o
-ccflags-y += -Idrivers/media/common/b2c2/
+ccflags-y += -I $(srctree)/drivers/media/common/b2c2/
diff --git a/drivers/media/usb/cx231xx/Makefile b/drivers/media/usb/cx231xx/Makefile
index c023d97407de..8acbbcba7d0c 100644
--- a/drivers/media/usb/cx231xx/Makefile
+++ b/drivers/media/usb/cx231xx/Makefile
@@ -9,6 +9,5 @@ obj-$(CONFIG_VIDEO_CX231XX) += cx231xx.o
obj-$(CONFIG_VIDEO_CX231XX_ALSA) += cx231xx-alsa.o
obj-$(CONFIG_VIDEO_CX231XX_DVB) += cx231xx-dvb.o
-ccflags-y += -Idrivers/media/tuners
-ccflags-y += -Idrivers/media/dvb-frontends
-ccflags-y += -Idrivers/media/usb/dvb-usb
+ccflags-y += -I $(srctree)/drivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
diff --git a/drivers/media/usb/em28xx/Makefile b/drivers/media/usb/em28xx/Makefile
index 8a224007d755..8c2fc3104561 100644
--- a/drivers/media/usb/em28xx/Makefile
+++ b/drivers/media/usb/em28xx/Makefile
@@ -11,5 +11,5 @@ obj-$(CONFIG_VIDEO_EM28XX_ALSA) += em28xx-alsa.o
obj-$(CONFIG_VIDEO_EM28XX_DVB) += em28xx-dvb.o
obj-$(CONFIG_VIDEO_EM28XX_RC) += em28xx-rc.o
-ccflags-y += -Idrivers/media/tuners
-ccflags-y += -Idrivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
diff --git a/drivers/media/usb/go7007/Makefile b/drivers/media/usb/go7007/Makefile
index 3d95bbc4192c..712a3507f195 100644
--- a/drivers/media/usb/go7007/Makefile
+++ b/drivers/media/usb/go7007/Makefile
@@ -9,4 +9,4 @@ go7007-y := go7007-v4l2.o go7007-driver.o go7007-i2c.o go7007-fw.o \
s2250-y := s2250-board.o
-ccflags-$(CONFIG_VIDEO_GO7007_LOADER:m=y) += -Idrivers/media/common
+ccflags-$(CONFIG_VIDEO_GO7007_LOADER:m=y) += -I $(srctree)/drivers/media/common
diff --git a/drivers/media/usb/pvrusb2/Makefile b/drivers/media/usb/pvrusb2/Makefile
index 9facf6873404..2e71afc4f6de 100644
--- a/drivers/media/usb/pvrusb2/Makefile
+++ b/drivers/media/usb/pvrusb2/Makefile
@@ -17,5 +17,5 @@ pvrusb2-objs := pvrusb2-i2c-core.o \
obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2.o
-ccflags-y += -Idrivers/media/tuners
-ccflags-y += -Idrivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
diff --git a/drivers/media/usb/siano/Makefile b/drivers/media/usb/siano/Makefile
index 7d48864e2782..ba56e9818489 100644
--- a/drivers/media/usb/siano/Makefile
+++ b/drivers/media/usb/siano/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_SMS_USB_DRV) += smsusb.o
-ccflags-y += -Idrivers/media/common/siano
+ccflags-y += -I $(srctree)/drivers/media/common/siano
ccflags-y += $(extra-cflags-y) $(extra-cflags-m)
diff --git a/drivers/media/usb/tm6000/Makefile b/drivers/media/usb/tm6000/Makefile
index 744c039e621a..75247a02a485 100644
--- a/drivers/media/usb/tm6000/Makefile
+++ b/drivers/media/usb/tm6000/Makefile
@@ -10,5 +10,5 @@ obj-$(CONFIG_VIDEO_TM6000) += tm6000.o
obj-$(CONFIG_VIDEO_TM6000_ALSA) += tm6000-alsa.o
obj-$(CONFIG_VIDEO_TM6000_DVB) += tm6000-dvb.o
-ccflags-y += -Idrivers/media/tuners
-ccflags-y += -Idrivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/tuners
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
diff --git a/drivers/media/usb/ttusb-budget/Makefile b/drivers/media/usb/ttusb-budget/Makefile
index fe4372dddd0e..37847d773921 100644
--- a/drivers/media/usb/ttusb-budget/Makefile
+++ b/drivers/media/usb/ttusb-budget/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_DVB_TTUSB_BUDGET) += dvb-ttusb-budget.o
-ccflags-y += -Idrivers/media/dvb-frontends
+ccflags-y += -I $(srctree)/drivers/media/dvb-frontends
diff --git a/drivers/media/usb/usbvision/Makefile b/drivers/media/usb/usbvision/Makefile
index 494d030b4979..e8e5eda08b6f 100644
--- a/drivers/media/usb/usbvision/Makefile
+++ b/drivers/media/usb/usbvision/Makefile
@@ -1,5 +1,3 @@
usbvision-objs := usbvision-core.o usbvision-video.o usbvision-i2c.o usbvision-cards.o
obj-$(CONFIG_VIDEO_USBVISION) += usbvision.o
-
-ccflags-y += -Idrivers/media/tuners
diff --git a/drivers/memory/emif.h b/drivers/memory/emif.h
index 9e9f8037955d..6b71fadb3cfa 100644
--- a/drivers/memory/emif.h
+++ b/drivers/memory/emif.h
@@ -537,6 +537,9 @@
#define MCONNID_SHIFT 0
#define MCONNID_MASK (0xff << 0)
+/* READ_WRITE_LEVELING_CONTROL */
+#define RDWRLVLFULL_START 0x80000000
+
/* DDR_PHY_CTRL_1 - EMIF4D */
#define DLL_SLAVE_DLY_CTRL_SHIFT_4D 4
#define DLL_SLAVE_DLY_CTRL_MASK_4D (0xFF << 4)
@@ -598,6 +601,7 @@ extern struct emif_regs_amx3 ti_emif_regs_amx3;
void ti_emif_save_context(void);
void ti_emif_restore_context(void);
+void ti_emif_run_hw_leveling(void);
void ti_emif_enter_sr(void);
void ti_emif_exit_sr(void);
void ti_emif_abort_sr(void);
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 0a53598d982f..163b6c69e651 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -51,6 +51,9 @@
#define MC_EMEM_ADR_CFG 0x54
#define MC_EMEM_ADR_CFG_EMEM_NUMDEV BIT(0)
+#define MC_TIMING_CONTROL 0xfc
+#define MC_TIMING_UPDATE BIT(0)
+
static const struct of_device_id tegra_mc_of_match[] = {
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
{ .compatible = "nvidia,tegra20-mc-gart", .data = &tegra20_mc_soc },
@@ -74,7 +77,7 @@ static const struct of_device_id tegra_mc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
-static int terga_mc_block_dma_common(struct tegra_mc *mc,
+static int tegra_mc_block_dma_common(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
unsigned long flags;
@@ -90,13 +93,13 @@ static int terga_mc_block_dma_common(struct tegra_mc *mc,
return 0;
}
-static bool terga_mc_dma_idling_common(struct tegra_mc *mc,
+static bool tegra_mc_dma_idling_common(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
return (mc_readl(mc, rst->status) & BIT(rst->bit)) != 0;
}
-static int terga_mc_unblock_dma_common(struct tegra_mc *mc,
+static int tegra_mc_unblock_dma_common(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
unsigned long flags;
@@ -112,17 +115,17 @@ static int terga_mc_unblock_dma_common(struct tegra_mc *mc,
return 0;
}
-static int terga_mc_reset_status_common(struct tegra_mc *mc,
+static int tegra_mc_reset_status_common(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
return (mc_readl(mc, rst->control) & BIT(rst->bit)) != 0;
}
-const struct tegra_mc_reset_ops terga_mc_reset_ops_common = {
- .block_dma = terga_mc_block_dma_common,
- .dma_idling = terga_mc_dma_idling_common,
- .unblock_dma = terga_mc_unblock_dma_common,
- .reset_status = terga_mc_reset_status_common,
+const struct tegra_mc_reset_ops tegra_mc_reset_ops_common = {
+ .block_dma = tegra_mc_block_dma_common,
+ .dma_idling = tegra_mc_dma_idling_common,
+ .unblock_dma = tegra_mc_unblock_dma_common,
+ .reset_status = tegra_mc_reset_status_common,
};
static inline struct tegra_mc *reset_to_mc(struct reset_controller_dev *rcdev)
@@ -282,25 +285,28 @@ static int tegra_mc_setup_latency_allowance(struct tegra_mc *mc)
u32 value;
/* compute the number of MC clock cycles per tick */
- tick = mc->tick * clk_get_rate(mc->clk);
+ tick = (unsigned long long)mc->tick * clk_get_rate(mc->clk);
do_div(tick, NSEC_PER_SEC);
- value = readl(mc->regs + MC_EMEM_ARB_CFG);
+ value = mc_readl(mc, MC_EMEM_ARB_CFG);
value &= ~MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE_MASK;
value |= MC_EMEM_ARB_CFG_CYCLES_PER_UPDATE(tick);
- writel(value, mc->regs + MC_EMEM_ARB_CFG);
+ mc_writel(mc, value, MC_EMEM_ARB_CFG);
/* write latency allowance defaults */
for (i = 0; i < mc->soc->num_clients; i++) {
const struct tegra_mc_la *la = &mc->soc->clients[i].la;
u32 value;
- value = readl(mc->regs + la->reg);
+ value = mc_readl(mc, la->reg);
value &= ~(la->mask << la->shift);
value |= (la->def & la->mask) << la->shift;
- writel(value, mc->regs + la->reg);
+ mc_writel(mc, value, la->reg);
}
+ /* latch new values */
+ mc_writel(mc, MC_TIMING_UPDATE, MC_TIMING_CONTROL);
+
return 0;
}
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index 887a3b07334f..392993955c93 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -35,7 +35,7 @@ static inline void mc_writel(struct tegra_mc *mc, u32 value,
writel_relaxed(value, mc->regs + offset);
}
-extern const struct tegra_mc_reset_ops terga_mc_reset_ops_common;
+extern const struct tegra_mc_reset_ops tegra_mc_reset_ops_common;
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
extern const struct tegra_mc_soc tegra20_mc_soc;
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index 6560a5101322..62305fafd641 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -572,7 +572,7 @@ static const struct tegra_mc_client tegra114_mc_clients[] = {
},
}, {
.id = 0x34,
- .name = "fdcwr2",
+ .name = "fdcdwr2",
.swgroup = TEGRA_SWGROUP_NV,
.smmu = {
.reg = 0x22c,
@@ -975,7 +975,7 @@ const struct tegra_mc_soc tegra114_mc_soc = {
.smmu = &tegra114_smmu_soc,
.intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
MC_INT_DECERR_EMEM,
- .reset_ops = &terga_mc_reset_ops_common,
+ .reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra114_mc_resets,
.num_resets = ARRAY_SIZE(tegra114_mc_resets),
};
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index eedb7d48e2ea..772716ab6b23 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -20,6 +20,7 @@
#include <linux/clkdev.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index b561a1fe7f46..8f8487bda642 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -1074,7 +1074,7 @@ const struct tegra_mc_soc tegra124_mc_soc = {
.intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
- .reset_ops = &terga_mc_reset_ops_common,
+ .reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra124_mc_resets,
.num_resets = ARRAY_SIZE(tegra124_mc_resets),
};
@@ -1104,7 +1104,7 @@ const struct tegra_mc_soc tegra132_mc_soc = {
.intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
- .reset_ops = &terga_mc_reset_ops_common,
+ .reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra124_mc_resets,
.num_resets = ARRAY_SIZE(tegra124_mc_resets),
};
diff --git a/drivers/memory/tegra/tegra20.c b/drivers/memory/tegra/tegra20.c
index 7119e532471c..121237b16add 100644
--- a/drivers/memory/tegra/tegra20.c
+++ b/drivers/memory/tegra/tegra20.c
@@ -198,7 +198,7 @@ static const struct tegra_mc_reset tegra20_mc_resets[] = {
TEGRA20_MC_RESET(VI, 0x100, 0x178, 0x104, 14),
};
-static int terga20_mc_hotreset_assert(struct tegra_mc *mc,
+static int tegra20_mc_hotreset_assert(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
unsigned long flags;
@@ -214,7 +214,7 @@ static int terga20_mc_hotreset_assert(struct tegra_mc *mc,
return 0;
}
-static int terga20_mc_hotreset_deassert(struct tegra_mc *mc,
+static int tegra20_mc_hotreset_deassert(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
unsigned long flags;
@@ -230,7 +230,7 @@ static int terga20_mc_hotreset_deassert(struct tegra_mc *mc,
return 0;
}
-static int terga20_mc_block_dma(struct tegra_mc *mc,
+static int tegra20_mc_block_dma(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
unsigned long flags;
@@ -246,19 +246,19 @@ static int terga20_mc_block_dma(struct tegra_mc *mc,
return 0;
}
-static bool terga20_mc_dma_idling(struct tegra_mc *mc,
+static bool tegra20_mc_dma_idling(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
return mc_readl(mc, rst->status) == 0;
}
-static int terga20_mc_reset_status(struct tegra_mc *mc,
+static int tegra20_mc_reset_status(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
return (mc_readl(mc, rst->reset) & BIT(rst->bit)) == 0;
}
-static int terga20_mc_unblock_dma(struct tegra_mc *mc,
+static int tegra20_mc_unblock_dma(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
unsigned long flags;
@@ -274,13 +274,13 @@ static int terga20_mc_unblock_dma(struct tegra_mc *mc,
return 0;
}
-const struct tegra_mc_reset_ops terga20_mc_reset_ops = {
- .hotreset_assert = terga20_mc_hotreset_assert,
- .hotreset_deassert = terga20_mc_hotreset_deassert,
- .block_dma = terga20_mc_block_dma,
- .dma_idling = terga20_mc_dma_idling,
- .unblock_dma = terga20_mc_unblock_dma,
- .reset_status = terga20_mc_reset_status,
+static const struct tegra_mc_reset_ops tegra20_mc_reset_ops = {
+ .hotreset_assert = tegra20_mc_hotreset_assert,
+ .hotreset_deassert = tegra20_mc_hotreset_deassert,
+ .block_dma = tegra20_mc_block_dma,
+ .dma_idling = tegra20_mc_dma_idling,
+ .unblock_dma = tegra20_mc_unblock_dma,
+ .reset_status = tegra20_mc_reset_status,
};
const struct tegra_mc_soc tegra20_mc_soc = {
@@ -290,7 +290,7 @@ const struct tegra_mc_soc tegra20_mc_soc = {
.client_id_mask = 0x3f,
.intmask = MC_INT_SECURITY_VIOLATION | MC_INT_INVALID_GART_PAGE |
MC_INT_DECERR_EMEM,
- .reset_ops = &terga20_mc_reset_ops,
+ .reset_ops = &tegra20_mc_reset_ops,
.resets = tegra20_mc_resets,
.num_resets = ARRAY_SIZE(tegra20_mc_resets),
};
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index d00a77160407..aa22cda637eb 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -1132,7 +1132,7 @@ const struct tegra_mc_soc tegra210_mc_soc = {
.intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
- .reset_ops = &terga_mc_reset_ops_common,
+ .reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra210_mc_resets,
.num_resets = ARRAY_SIZE(tegra210_mc_resets),
};
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index bee5314ed404..c9af0f682ead 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -726,7 +726,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
},
}, {
.id = 0x34,
- .name = "fdcwr2",
+ .name = "fdcdwr2",
.swgroup = TEGRA_SWGROUP_NV2,
.smmu = {
.reg = 0x22c,
@@ -999,7 +999,7 @@ const struct tegra_mc_soc tegra30_mc_soc = {
.smmu = &tegra30_smmu_soc,
.intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
MC_INT_DECERR_EMEM,
- .reset_ops = &terga_mc_reset_ops_common,
+ .reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra30_mc_resets,
.num_resets = ARRAY_SIZE(tegra30_mc_resets),
};
diff --git a/drivers/memory/ti-emif-pm.c b/drivers/memory/ti-emif-pm.c
index 2250d03ea17f..ab07aa163138 100644
--- a/drivers/memory/ti-emif-pm.c
+++ b/drivers/memory/ti-emif-pm.c
@@ -138,6 +138,9 @@ static int ti_emif_alloc_sram(struct device *dev,
emif_data->pm_functions.exit_sr =
sram_resume_address(emif_data,
(unsigned long)ti_emif_exit_sr);
+ emif_data->pm_functions.run_hw_leveling =
+ sram_resume_address(emif_data,
+ (unsigned long)ti_emif_run_hw_leveling);
emif_data->pm_data.regs_virt =
(struct emif_regs_amx3 *)emif_data->ti_emif_sram_data_virt;
diff --git a/drivers/memory/ti-emif-sram-pm.S b/drivers/memory/ti-emif-sram-pm.S
index a5369181e5c2..d75ae18efa7d 100644
--- a/drivers/memory/ti-emif-sram-pm.S
+++ b/drivers/memory/ti-emif-sram-pm.S
@@ -27,6 +27,7 @@
#define EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK 0x0700
#define EMIF_SDCFG_TYPE_DDR2 0x2 << SDRAM_TYPE_SHIFT
+#define EMIF_SDCFG_TYPE_DDR3 0x3 << SDRAM_TYPE_SHIFT
#define EMIF_STATUS_READY 0x4
#define AM43XX_EMIF_PHY_CTRL_REG_COUNT 0x120
@@ -245,6 +246,46 @@ emif_skip_restore_extra_regs:
ENDPROC(ti_emif_restore_context)
/*
+ * void ti_emif_run_hw_leveling(void)
+ *
+ * Used during resume to run hardware leveling again and restore the
+ * configuration of the EMIF PHY, only for DDR3.
+ */
+ENTRY(ti_emif_run_hw_leveling)
+ adr r4, ti_emif_pm_sram_data
+ ldr r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
+
+ ldr r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
+ orr r3, r3, #RDWRLVLFULL_START
+ ldr r2, [r0, #EMIF_SDRAM_CONFIG]
+ and r2, r2, #SDRAM_TYPE_MASK
+ cmp r2, #EMIF_SDCFG_TYPE_DDR3
+ bne skip_hwlvl
+
+ str r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
+
+ /*
+ * If EMIF registers are touched during initial stage of HW
+ * leveling sequence there will be an L3 NOC timeout error issued
+ * as the EMIF will not respond, which is not fatal, but it is
+ * avoidable. This small wait loop is enough time for this condition
+ * to clear, even at worst case of CPU running at max speed of 1Ghz.
+ */
+ mov r2, #0x2000
+1:
+ subs r2, r2, #0x1
+ bne 1b
+
+ /* Bit clears when operation is complete */
+2: ldr r1, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
+ tst r1, #RDWRLVLFULL_START
+ bne 2b
+
+skip_hwlvl:
+ mov pc, lr
+ENDPROC(ti_emif_run_hw_leveling)
+
+/*
* void ti_emif_enter_sr(void)
*
* Programs the EMIF to tell the SDRAM to enter into self-refresh
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 7e425ff53491..fc6aa4c50144 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -18,6 +18,7 @@
#include <linux/clk-provider.h>
#include <linux/debugfs.h>
#include <linux/idr.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 3209ee020b15..6a0365b2332c 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -59,30 +59,6 @@ config ATMEL_TCLIB
blocks found on many Atmel processors. This facilitates using
these blocks by different drivers despite processor differences.
-config ATMEL_TCB_CLKSRC
- bool "TC Block Clocksource"
- depends on ATMEL_TCLIB
- default y
- help
- Select this to get a high precision clocksource based on a
- TC block with a 5+ MHz base clock rate. Two timer channels
- are combined to make a single 32-bit timer.
-
- When GENERIC_CLOCKEVENTS is defined, the third timer channel
- may be used as a clock event device supporting oneshot mode
- (delays of up to two seconds) based on the 32 KiHz clock.
-
-config ATMEL_TCB_CLKSRC_BLOCK
- int
- depends on ATMEL_TCB_CLKSRC
- default 0
- range 0 1
- help
- Some chips provide more than one TC block, so you have the
- choice of which one to use for the clock framework. The other
- TC can be used for other purposes, such as PWM generation and
- interval timing.
-
config DUMMY_IRQ
tristate "Dummy IRQ handler"
default n
@@ -496,30 +472,6 @@ config VEXPRESS_SYSCFG
bus. System Configuration interface is one of the possible means
of generating transactions on this bus.
-config ASPEED_P2A_CTRL
- depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
- tristate "Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC bridge control"
- help
- Control Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC mappings through
- ioctl()s, the driver also provides an interface for userspace mappings to
- a pre-defined region.
-
-config ASPEED_LPC_CTRL
- depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
- tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control"
- ---help---
- Control Aspeed ast2400/2500 HOST LPC to BMC mappings through
- ioctl()s, the driver also provides a read/write interface to a BMC ram
- region where the host LPC read/write region can be buffered.
-
-config ASPEED_LPC_SNOOP
- tristate "Aspeed ast2500 HOST LPC snoop support"
- depends on (ARCH_ASPEED || COMPILE_TEST) && REGMAP && MFD_SYSCON
- help
- Provides a driver to control the LPC snoop interface which
- allows the BMC to listen on and save the data written by
- the host to an arbitrary LPC I/O port.
-
config PCI_ENDPOINT_TEST
depends on PCI
select CRC32
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c36239573a5c..b9affcdaa3d6 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -54,9 +54,6 @@ obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
-obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
-obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
-obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
obj-$(CONFIG_OCXL) += ocxl/
obj-y += cardreader/
diff --git a/drivers/misc/atmel_tclib.c b/drivers/misc/atmel_tclib.c
index ac24a4bd63f7..2c6850ef0e9c 100644
--- a/drivers/misc/atmel_tclib.c
+++ b/drivers/misc/atmel_tclib.c
@@ -1,4 +1,3 @@
-#include <linux/atmel_tc.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -10,6 +9,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/of.h>
+#include <soc/at91/atmel_tcb.h>
/*
* This is a thin library to solve the problem of how to portably allocate
@@ -111,6 +111,9 @@ static int __init tc_probe(struct platform_device *pdev)
struct resource *r;
unsigned int i;
+ if (of_get_child_count(pdev->dev.of_node))
+ return -EBUSY;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index ec980bda071c..b61de360f26f 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -19,6 +19,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
diff --git a/drivers/net/ethernet/chelsio/libcxgb/Makefile b/drivers/net/ethernet/chelsio/libcxgb/Makefile
index 2534e30a1560..441913b5221e 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/Makefile
+++ b/drivers/net/ethernet/chelsio/libcxgb/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+ccflags-y := -I $(srctree)/$(src)/../cxgb4
obj-$(CONFIG_CHELSIO_LIB) += libcxgb.o
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index e9a0213b08c4..6238e6951336 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -41,7 +41,7 @@ config CS89x0_PLATFORM
config EP93XX_ETH
tristate "EP93xx Ethernet support"
- depends on ARM && ARCH_EP93XX
+ depends on (ARM && ARCH_EP93XX) || COMPILE_TEST
select MII
help
This is a driver for the ethernet hardware included in EP93xx CPUs.
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 13dfdfca49fc..a6da9873570b 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -25,7 +25,7 @@
#include <linux/io.h>
#include <linux/slab.h>
-#include <mach/hardware.h>
+#include <linux/platform_data/eth-ep93xx.h>
#define DRV_MODULE_NAME "ep93xx-eth"
#define DRV_MODULE_VERSION "0.1"
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index b6b3ff0fe17f..7ccb950aa7d4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -22,7 +22,6 @@ config MLXSW_CORE_HWMON
config MLXSW_CORE_THERMAL
bool "Thermal zone support for Mellanox Technologies Switch ASICs"
depends on MLXSW_CORE && THERMAL
- depends on !(MLXSW_CORE=y && THERMAL=m)
default y
---help---
Say Y here if you want to automatically control fans speed according
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index ed6623a9801e..319db3ece263 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -31,14 +31,15 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/net_tstamp.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/ptp_classify.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <mach/ixp46x_ts.h>
-#include <mach/npe.h>
-#include <mach/qmgr.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
#define DEBUG_DESC 0
#define DEBUG_RX 0
@@ -1497,6 +1498,15 @@ static struct platform_driver ixp4xx_eth_driver = {
static int __init eth_init_module(void)
{
int err;
+
+ /*
+ * FIXME: we bail out on device tree boot but this really needs
+ * to be fixed in a nicer way: this registers the MDIO bus before
+ * even matching the driver infrastructure, we should only probe
+ * detected hardware.
+ */
+ if (of_have_populated_dt())
+ return -ENODEV;
if ((err = ixp4xx_mdio_register()))
return err;
return platform_driver_register(&ixp4xx_eth_driver);
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index b2ff903a9cb6..b188fce3f641 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -53,6 +53,7 @@
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/ieee802154.h>
+#include <linux/io.h>
#include <linux/kfifo.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 5c60dc60a8e6..46a05b6540b8 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -22,8 +22,8 @@
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
-#include <mach/npe.h>
-#include <mach/qmgr.h>
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
#define DEBUG_DESC 0
#define DEBUG_RX 0
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index f3d753d3169c..2030805aa216 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -756,6 +756,17 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
return &guid_null;
}
+static void reap_victim(struct nd_mapping *nd_mapping,
+ struct nd_label_ent *victim)
+{
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ u32 slot = to_slot(ndd, victim->label);
+
+ dev_dbg(ndd->dev, "free: %d\n", slot);
+ nd_label_free_slot(ndd, slot);
+ victim->label = NULL;
+}
+
static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
int pos, unsigned long flags)
@@ -763,9 +774,9 @@ static int __pmem_label_update(struct nd_region *nd_region,
struct nd_namespace_common *ndns = &nspm->nsio.common;
struct nd_interleave_set *nd_set = nd_region->nd_set;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
- struct nd_label_ent *label_ent, *victim = NULL;
struct nd_namespace_label *nd_label;
struct nd_namespace_index *nsindex;
+ struct nd_label_ent *label_ent;
struct nd_label_id label_id;
struct resource *res;
unsigned long *free;
@@ -834,18 +845,10 @@ static int __pmem_label_update(struct nd_region *nd_region,
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
if (!label_ent->label)
continue;
- if (memcmp(nspm->uuid, label_ent->label->uuid,
- NSLABEL_UUID_LEN) != 0)
- continue;
- victim = label_ent;
- list_move_tail(&victim->list, &nd_mapping->labels);
- break;
- }
- if (victim) {
- dev_dbg(ndd->dev, "free: %d\n", slot);
- slot = to_slot(ndd, victim->label);
- nd_label_free_slot(ndd, slot);
- victim->label = NULL;
+ if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
+ || memcmp(nspm->uuid, label_ent->label->uuid,
+ NSLABEL_UUID_LEN) == 0)
+ reap_victim(nd_mapping, label_ent);
}
/* update index */
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index f293556cbbf6..d0214644e334 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1247,12 +1247,27 @@ static int namespace_update_uuid(struct nd_region *nd_region,
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ struct nd_label_ent *label_ent;
struct resource *res;
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, old_label_id.id) == 0)
sprintf((void *) res->name, "%s",
new_label_id.id);
+
+ mutex_lock(&nd_mapping->lock);
+ list_for_each_entry(label_ent, &nd_mapping->labels, list) {
+ struct nd_namespace_label *nd_label = label_ent->label;
+ struct nd_label_id label_id;
+
+ if (!nd_label)
+ continue;
+ nd_label_gen_id(&label_id, nd_label->uuid,
+ __le32_to_cpu(nd_label->flags));
+ if (strcmp(old_label_id.id, label_id.id) == 0)
+ set_bit(ND_LABEL_REAP, &label_ent->flags);
+ }
+ mutex_unlock(&nd_mapping->lock);
}
kfree(*old_uuid);
out:
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index a5ac3b240293..191d62af0e51 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -113,8 +113,12 @@ struct nd_percpu_lane {
spinlock_t lock;
};
+enum nd_label_flags {
+ ND_LABEL_REAP,
+};
struct nd_label_ent {
struct list_head list;
+ unsigned long flags;
struct nd_namespace_label *label;
};
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index a6644a2c3ef7..7da80f375315 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1257,10 +1257,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return 0;
}
+ effects |= nvme_known_admin_effects(opcode);
if (ctrl->effects)
effects = le32_to_cpu(ctrl->effects->acs[opcode]);
- else
- effects = nvme_known_admin_effects(opcode);
/*
* For simplicity, IO to all namespaces is quiesced even if the command
@@ -2342,20 +2341,35 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL,
};
-static int nvme_active_ctrls(struct nvme_subsystem *subsys)
+static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
+ struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
- int count = 0;
- struct nvme_ctrl *ctrl;
+ struct nvme_ctrl *tmp;
+
+ lockdep_assert_held(&nvme_subsystems_lock);
+
+ list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
+ if (ctrl->state == NVME_CTRL_DELETING ||
+ ctrl->state == NVME_CTRL_DEAD)
+ continue;
+
+ if (tmp->cntlid == ctrl->cntlid) {
+ dev_err(ctrl->device,
+ "Duplicate cntlid %u with %s, rejecting\n",
+ ctrl->cntlid, dev_name(tmp->device));
+ return false;
+ }
- mutex_lock(&subsys->lock);
- list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
- if (ctrl->state != NVME_CTRL_DELETING &&
- ctrl->state != NVME_CTRL_DEAD)
- count++;
+ if ((id->cmic & (1 << 1)) ||
+ (ctrl->opts && ctrl->opts->discovery_nqn))
+ continue;
+
+ dev_err(ctrl->device,
+ "Subsystem does not support multiple controllers\n");
+ return false;
}
- mutex_unlock(&subsys->lock);
- return count;
+ return true;
}
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
@@ -2395,22 +2409,13 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
mutex_lock(&nvme_subsystems_lock);
found = __nvme_find_get_subsystem(subsys->subnqn);
if (found) {
- /*
- * Verify that the subsystem actually supports multiple
- * controllers, else bail out.
- */
- if (!(ctrl->opts && ctrl->opts->discovery_nqn) &&
- nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
- dev_err(ctrl->device,
- "ignoring ctrl due to duplicate subnqn (%s).\n",
- found->subnqn);
- nvme_put_subsystem(found);
- ret = -EINVAL;
- goto out_unlock;
- }
-
__nvme_release_subsystem(subsys);
subsys = found;
+
+ if (!nvme_validate_cntlid(subsys, ctrl, id)) {
+ ret = -EINVAL;
+ goto out_put_subsystem;
+ }
} else {
ret = device_add(&subsys->dev);
if (ret) {
@@ -2422,23 +2427,20 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
list_add_tail(&subsys->entry, &nvme_subsystems);
}
- ctrl->subsys = subsys;
- mutex_unlock(&nvme_subsystems_lock);
-
if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
dev_name(ctrl->device))) {
dev_err(ctrl->device,
"failed to create sysfs link from subsystem.\n");
- /* the transport driver will eventually put the subsystem */
- return -EINVAL;
+ goto out_put_subsystem;
}
- mutex_lock(&subsys->lock);
+ ctrl->subsys = subsys;
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
- mutex_unlock(&subsys->lock);
-
+ mutex_unlock(&nvme_subsystems_lock);
return 0;
+out_put_subsystem:
+ nvme_put_subsystem(subsys);
out_unlock:
mutex_unlock(&nvme_subsystems_lock);
put_device(&subsys->dev);
@@ -3605,19 +3607,18 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
u32 aer_notice_type = (result & 0xff00) >> 8;
+ trace_nvme_async_event(ctrl, aer_notice_type);
+
switch (aer_notice_type) {
case NVME_AER_NOTICE_NS_CHANGED:
- trace_nvme_async_event(ctrl, aer_notice_type);
set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
nvme_queue_scan(ctrl);
break;
case NVME_AER_NOTICE_FW_ACT_STARTING:
- trace_nvme_async_event(ctrl, aer_notice_type);
queue_work(nvme_wq, &ctrl->fw_act_work);
break;
#ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA:
- trace_nvme_async_event(ctrl, aer_notice_type);
if (!ctrl->ana_log_buf)
break;
queue_work(nvme_wq, &ctrl->ana_work);
@@ -3696,10 +3697,10 @@ static void nvme_free_ctrl(struct device *dev)
__free_page(ctrl->discard_page);
if (subsys) {
- mutex_lock(&subsys->lock);
+ mutex_lock(&nvme_subsystems_lock);
list_del(&ctrl->subsys_entry);
- mutex_unlock(&subsys->lock);
sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
+ mutex_unlock(&nvme_subsystems_lock);
}
ctrl->ops->free_ctrl(ctrl);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 592d1e61ef7e..5838f7cd53ac 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -978,7 +978,7 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
NVMF_OPT_DISABLE_SQFLOW)
static struct nvme_ctrl *
-nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
+nvmf_create_ctrl(struct device *dev, const char *buf)
{
struct nvmf_ctrl_options *opts;
struct nvmf_transport_ops *ops;
@@ -1073,7 +1073,7 @@ static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
goto out_unlock;
}
- ctrl = nvmf_create_ctrl(nvmf_device, buf, count);
+ ctrl = nvmf_create_ctrl(nvmf_device, buf);
if (IS_ERR(ctrl)) {
ret = PTR_ERR(ctrl);
goto out_unlock;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 9544eb60f725..dd8169bbf0d2 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -202,7 +202,7 @@ static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt);
-
+static struct workqueue_struct *nvme_fc_wq;
/*
* These items are short-term. They will eventually be moved into
@@ -2054,7 +2054,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
*/
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
active = atomic_xchg(&ctrl->err_work_active, 1);
- if (!active && !schedule_work(&ctrl->err_work)) {
+ if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
atomic_set(&ctrl->err_work_active, 0);
WARN_ON(1);
}
@@ -3399,6 +3399,10 @@ static int __init nvme_fc_init_module(void)
{
int ret;
+ nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
+ if (!nvme_fc_wq)
+ return -ENOMEM;
+
/*
* NOTE:
* It is expected that in the future the kernel will combine
@@ -3416,7 +3420,7 @@ static int __init nvme_fc_init_module(void)
ret = class_register(&fc_class);
if (ret) {
pr_err("couldn't register class fc\n");
- return ret;
+ goto out_destroy_wq;
}
/*
@@ -3440,6 +3444,9 @@ out_destroy_device:
device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class:
class_unregister(&fc_class);
+out_destroy_wq:
+ destroy_workqueue(nvme_fc_wq);
+
return ret;
}
@@ -3456,6 +3463,7 @@ static void __exit nvme_fc_exit_module(void)
device_destroy(&fc_class, MKDEV(0, 0));
class_unregister(&fc_class);
+ destroy_workqueue(nvme_fc_wq);
}
module_init(nvme_fc_init_module);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 949e29e1d782..4f20a10b39d3 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -977,6 +977,7 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
geo->csecs = 1 << ns->lba_shift;
geo->sos = ns->ms;
geo->ext = ns->ext;
+ geo->mdts = ns->ctrl->max_hw_sectors;
dev->q = q;
memcpy(dev->name, disk_name, DISK_NAME_LEN);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5c9429d41120..499acf07d61a 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -31,7 +31,7 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
} else if (ns->head->disk) {
sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
- ctrl->cntlid, ns->head->instance);
+ ctrl->instance, ns->head->instance);
*flags = GENHD_FL_HIDDEN;
} else {
sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3e4fb891a95a..2a8708c9ac18 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1296,6 +1296,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
switch (dev->ctrl.state) {
case NVME_CTRL_DELETING:
shutdown = true;
+ /* fall through */
case NVME_CTRL_CONNECTING:
case NVME_CTRL_RESETTING:
dev_warn_ratelimited(dev->ctrl.device,
@@ -2280,8 +2281,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
return ret;
}
dev->ctrl.tagset = &dev->tagset;
-
- nvme_dbbuf_set(dev);
} else {
blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
@@ -2289,6 +2288,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
nvme_free_queues(dev, dev->online_queues);
}
+ nvme_dbbuf_set(dev);
return 0;
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index e1824c2e0a1c..f383146e7d0f 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -697,15 +697,6 @@ out_free_queues:
return ret;
}
-static void nvme_rdma_free_tagset(struct nvme_ctrl *nctrl,
- struct blk_mq_tag_set *set)
-{
- struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
-
- blk_mq_free_tag_set(set);
- nvme_rdma_dev_put(ctrl->device);
-}
-
static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
bool admin)
{
@@ -744,24 +735,9 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
ret = blk_mq_alloc_tag_set(set);
if (ret)
- goto out;
-
- /*
- * We need a reference on the device as long as the tag_set is alive,
- * as the MRs in the request structures need a valid ib_device.
- */
- ret = nvme_rdma_dev_get(ctrl->device);
- if (!ret) {
- ret = -EINVAL;
- goto out_free_tagset;
- }
+ return ERR_PTR(ret);
return set;
-
-out_free_tagset:
- blk_mq_free_tag_set(set);
-out:
- return ERR_PTR(ret);
}
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
@@ -769,7 +745,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
{
if (remove) {
blk_cleanup_queue(ctrl->ctrl.admin_q);
- nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
+ blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
}
if (ctrl->async_event_sqe.data) {
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
@@ -847,7 +823,7 @@ out_cleanup_queue:
blk_cleanup_queue(ctrl->ctrl.admin_q);
out_free_tagset:
if (new)
- nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
+ blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
out_free_async_qe:
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
@@ -862,7 +838,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
{
if (remove) {
blk_cleanup_queue(ctrl->ctrl.connect_q);
- nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
+ blk_mq_free_tag_set(ctrl->ctrl.tagset);
}
nvme_rdma_free_io_queues(ctrl);
}
@@ -903,7 +879,7 @@ out_cleanup_connect_q:
blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tag_set:
if (new)
- nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
+ blk_mq_free_tag_set(ctrl->ctrl.tagset);
out_free_io_queues:
nvme_rdma_free_io_queues(ctrl);
return ret;
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 97d3c77365b8..e71502d141ed 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -167,6 +167,7 @@ TRACE_EVENT(nvme_async_event,
aer_name(NVME_AER_NOTICE_NS_CHANGED),
aer_name(NVME_AER_NOTICE_ANA),
aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
+ aer_name(NVME_AER_NOTICE_DISC_CHANGED),
aer_name(NVME_AER_ERROR),
aer_name(NVME_AER_SMART),
aer_name(NVME_AER_CSS),
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
index 490c8fcaec80..5893543918c8 100644
--- a/drivers/nvmem/zynqmp_nvmem.c
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -16,6 +16,8 @@ struct zynqmp_nvmem_data {
struct nvmem_device *nvmem;
};
+static const struct zynqmp_eemi_ops *eemi_ops;
+
static int zynqmp_nvmem_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
@@ -23,9 +25,7 @@ static int zynqmp_nvmem_read(void *context, unsigned int offset,
int idcode, version;
struct zynqmp_nvmem_data *priv = context;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
-
- if (!eemi_ops || !eemi_ops->get_chipid)
+ if (!eemi_ops->get_chipid)
return -ENXIO;
ret = eemi_ops->get_chipid(&idcode, &version);
@@ -61,6 +61,10 @@ static int zynqmp_nvmem_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
priv->dev = dev;
econfig.dev = dev;
econfig.reg_read = zynqmp_nvmem_read;
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 2b686c55b717..e341cc5c0ea6 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -57,15 +57,21 @@
#define SHDW_WK_PIN(reg, cfg) ((reg) & AT91_SHDW_WKUPIS((cfg)->wkup_pin_input))
#define SHDW_RTCWK(reg, cfg) (((reg) >> ((cfg)->sr_rtcwk_shift)) & 0x1)
+#define SHDW_RTTWK(reg, cfg) (((reg) >> ((cfg)->sr_rttwk_shift)) & 0x1)
#define SHDW_RTCWKEN(cfg) (1 << ((cfg)->mr_rtcwk_shift))
+#define SHDW_RTTWKEN(cfg) (1 << ((cfg)->mr_rttwk_shift))
#define DBC_PERIOD_US(x) DIV_ROUND_UP_ULL((1000000 * (x)), \
SLOW_CLOCK_FREQ)
+#define SHDW_CFG_NOT_USED (32)
+
struct shdwc_config {
u8 wkup_pin_input;
u8 mr_rtcwk_shift;
+ u8 mr_rttwk_shift;
u8 sr_rtcwk_shift;
+ u8 sr_rttwk_shift;
};
struct shdwc {
@@ -104,6 +110,8 @@ static void __init at91_wakeup_status(struct platform_device *pdev)
reason = "WKUP pin";
else if (SHDW_RTCWK(reg, shdw->cfg))
reason = "RTC";
+ else if (SHDW_RTTWK(reg, shdw->cfg))
+ reason = "RTT";
pr_info("AT91: Wake-Up source: %s\n", reason);
}
@@ -221,6 +229,9 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
if (of_property_read_bool(np, "atmel,wakeup-rtc-timer"))
mode |= SHDW_RTCWKEN(shdw->cfg);
+ if (of_property_read_bool(np, "atmel,wakeup-rtt-timer"))
+ mode |= SHDW_RTTWKEN(shdw->cfg);
+
dev_dbg(&pdev->dev, "%s: mode = %#x\n", __func__, mode);
writel(mode, shdw->shdwc_base + AT91_SHDW_MR);
@@ -231,13 +242,27 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
static const struct shdwc_config sama5d2_shdwc_config = {
.wkup_pin_input = 0,
.mr_rtcwk_shift = 17,
+ .mr_rttwk_shift = SHDW_CFG_NOT_USED,
.sr_rtcwk_shift = 5,
+ .sr_rttwk_shift = SHDW_CFG_NOT_USED,
+};
+
+static const struct shdwc_config sam9x60_shdwc_config = {
+ .wkup_pin_input = 0,
+ .mr_rtcwk_shift = 17,
+ .mr_rttwk_shift = 16,
+ .sr_rtcwk_shift = 5,
+ .sr_rttwk_shift = 4,
};
static const struct of_device_id at91_shdwc_of_match[] = {
{
.compatible = "atmel,sama5d2-shdwc",
.data = &sama5d2_shdwc_config,
+ },
+ {
+ .compatible = "microchip,sam9x60-shdwc",
+ .data = &sam9x60_shdwc_config,
}, {
/*sentinel*/
}
diff --git a/drivers/power/reset/syscon-reboot.c b/drivers/power/reset/syscon-reboot.c
index 7d0d269a0837..5a6bb638c331 100644
--- a/drivers/power/reset/syscon-reboot.c
+++ b/drivers/power/reset/syscon-reboot.c
@@ -27,6 +27,7 @@
struct syscon_reboot_context {
struct regmap *map;
u32 offset;
+ u32 value;
u32 mask;
struct notifier_block restart_handler;
};
@@ -39,7 +40,7 @@ static int syscon_restart_handle(struct notifier_block *this,
restart_handler);
/* Issue the reboot */
- regmap_write(ctx->map, ctx->offset, ctx->mask);
+ regmap_update_bits(ctx->map, ctx->offset, ctx->mask, ctx->value);
mdelay(1000);
@@ -51,6 +52,7 @@ static int syscon_reboot_probe(struct platform_device *pdev)
{
struct syscon_reboot_context *ctx;
struct device *dev = &pdev->dev;
+ int mask_err, value_err;
int err;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
@@ -64,8 +66,21 @@ static int syscon_reboot_probe(struct platform_device *pdev)
if (of_property_read_u32(pdev->dev.of_node, "offset", &ctx->offset))
return -EINVAL;
- if (of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask))
+ value_err = of_property_read_u32(pdev->dev.of_node, "value", &ctx->value);
+ mask_err = of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask);
+ if (value_err && mask_err) {
+ dev_err(dev, "unable to read 'value' and 'mask'");
return -EINVAL;
+ }
+
+ if (value_err) {
+ /* support old binding */
+ ctx->value = ctx->mask;
+ ctx->mask = 0xFFFFFFFF;
+ } else if (mask_err) {
+ /* support value without mask*/
+ ctx->mask = 0xFFFFFFFF;
+ }
ctx->restart_handler.notifier_call = syscon_restart_handle;
ctx->restart_handler.priority = 192;
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 0230c96fa94d..26dacdab03cc 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -169,6 +169,17 @@ config BATTERY_COLLIE
Say Y to enable support for the battery on the Sharp Zaurus
SL-5500 (collie) models.
+config BATTERY_INGENIC
+ tristate "Ingenic JZ47xx SoCs battery driver"
+ depends on MIPS || COMPILE_TEST
+ depends on INGENIC_ADC
+ help
+ Choose this option if you want to monitor battery status on
+ Ingenic JZ47xx SoC based devices.
+
+ This driver can also be built as a module. If so, the module will be
+ called ingenic-battery.
+
config BATTERY_IPAQ_MICRO
tristate "iPAQ Atmel Micro ASIC battery driver"
depends on MFD_IPAQ_MICRO
@@ -475,12 +486,12 @@ config CHARGER_MANAGER
runtime and in suspend-to-RAM by waking up the system periodically
with help of suspend_again support.
-config CHARGER_LTC3651
- tristate "LTC3651 charger"
+config CHARGER_LT3651
+ tristate "Analog Devices LT3651 charger"
depends on GPIOLIB
help
- Say Y to include support for the LTC3651 battery charger which reports
- its status via GPIO lines.
+ Say Y to include support for the Analog Devices (Linear Technology)
+ LT3651 battery charger which reports its status via GPIO lines.
config CHARGER_MAX14577
tristate "Maxim MAX14577/77836 battery charger driver"
@@ -667,4 +678,14 @@ config FUEL_GAUGE_SC27XX
Say Y here to enable support for fuel gauge with SC27XX
PMIC chips.
+config CHARGER_UCS1002
+ tristate "Microchip UCS1002 USB Port Power Controller"
+ depends on I2C
+ depends on OF
+ depends on REGULATOR
+ select REGMAP_I2C
+ help
+ Say Y to enable support for Microchip UCS1002 Programmable
+ USB Port Power Controller with Charger Emulation.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index b73eb8c5c1a9..f208273f9686 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
+obj-$(CONFIG_BATTERY_INGENIC) += ingenic-battery.o
obj-$(CONFIG_BATTERY_IPAQ_MICRO) += ipaq_micro_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o
@@ -67,7 +68,7 @@ obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o
obj-$(CONFIG_CHARGER_LP8788) += lp8788-charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
-obj-$(CONFIG_CHARGER_LTC3651) += ltc3651-charger.o
+obj-$(CONFIG_CHARGER_LT3651) += lt3651-charger.o
obj-$(CONFIG_CHARGER_MAX14577) += max14577_charger.o
obj-$(CONFIG_CHARGER_DETECTOR_MAX14656) += max14656_charger_detector.o
obj-$(CONFIG_CHARGER_MAX77650) += max77650-charger.o
@@ -88,3 +89,4 @@ obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
obj-$(CONFIG_CHARGER_CROS_USBPD) += cros_usbpd-charger.o
obj-$(CONFIG_CHARGER_SC2731) += sc2731_charger.o
obj-$(CONFIG_FUEL_GAUGE_SC27XX) += sc27xx_fuel_gauge.o
+obj-$(CONFIG_CHARGER_UCS1002) += ucs1002_power.o
diff --git a/drivers/power/supply/ab8500_bmdata.c b/drivers/power/supply/ab8500_bmdata.c
index 7b2b69916f48..f6a66979cbb5 100644
--- a/drivers/power/supply/ab8500_bmdata.c
+++ b/drivers/power/supply/ab8500_bmdata.c
@@ -508,6 +508,7 @@ int ab8500_bm_of_probe(struct device *dev,
btech = of_get_property(battery_node, "stericsson,battery-type", NULL);
if (!btech) {
dev_warn(dev, "missing property battery-name/type\n");
+ of_node_put(battery_node);
return -EINVAL;
}
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index f52fe77edb6f..d2b1255ee1cc 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -24,6 +24,7 @@
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/iio/consumer.h>
+#include <linux/workqueue.h>
#define DRVNAME "axp20x-usb-power-supply"
@@ -36,16 +37,27 @@
#define AXP20X_VBUS_VHOLD_MASK GENMASK(5, 3)
#define AXP20X_VBUS_VHOLD_OFFSET 3
#define AXP20X_VBUS_CLIMIT_MASK 3
-#define AXP20X_VBUC_CLIMIT_900mA 0
-#define AXP20X_VBUC_CLIMIT_500mA 1
-#define AXP20X_VBUC_CLIMIT_100mA 2
-#define AXP20X_VBUC_CLIMIT_NONE 3
+#define AXP20X_VBUS_CLIMIT_900mA 0
+#define AXP20X_VBUS_CLIMIT_500mA 1
+#define AXP20X_VBUS_CLIMIT_100mA 2
+#define AXP20X_VBUS_CLIMIT_NONE 3
+
+#define AXP813_VBUS_CLIMIT_900mA 0
+#define AXP813_VBUS_CLIMIT_1500mA 1
+#define AXP813_VBUS_CLIMIT_2000mA 2
+#define AXP813_VBUS_CLIMIT_2500mA 3
#define AXP20X_ADC_EN1_VBUS_CURR BIT(2)
#define AXP20X_ADC_EN1_VBUS_VOLT BIT(3)
#define AXP20X_VBUS_MON_VBUS_VALID BIT(3)
+/*
+ * Note do not raise the debounce time, we must report Vusb high within
+ * 100ms otherwise we get Vbus errors in musb.
+ */
+#define DEBOUNCE_TIME msecs_to_jiffies(50)
+
struct axp20x_usb_power {
struct device_node *np;
struct regmap *regmap;
@@ -53,6 +65,8 @@ struct axp20x_usb_power {
enum axp20x_variants axp20x_id;
struct iio_channel *vbus_v;
struct iio_channel *vbus_i;
+ struct delayed_work vbus_detect;
+ unsigned int old_status;
};
static irqreturn_t axp20x_usb_power_irq(int irq, void *devid)
@@ -64,6 +78,89 @@ static irqreturn_t axp20x_usb_power_irq(int irq, void *devid)
return IRQ_HANDLED;
}
+static void axp20x_usb_power_poll_vbus(struct work_struct *work)
+{
+ struct axp20x_usb_power *power =
+ container_of(work, struct axp20x_usb_power, vbus_detect.work);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(power->regmap, AXP20X_PWR_INPUT_STATUS, &val);
+ if (ret)
+ goto out;
+
+ val &= (AXP20X_PWR_STATUS_VBUS_PRESENT | AXP20X_PWR_STATUS_VBUS_USED);
+ if (val != power->old_status)
+ power_supply_changed(power->supply);
+
+ power->old_status = val;
+
+out:
+ mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+}
+
+static bool axp20x_usb_vbus_needs_polling(struct axp20x_usb_power *power)
+{
+ if (power->axp20x_id >= AXP221_ID)
+ return true;
+
+ return false;
+}
+
+static int axp20x_get_current_max(struct axp20x_usb_power *power, int *val)
+{
+ unsigned int v;
+ int ret = regmap_read(power->regmap, AXP20X_VBUS_IPSOUT_MGMT, &v);
+
+ if (ret)
+ return ret;
+
+ switch (v & AXP20X_VBUS_CLIMIT_MASK) {
+ case AXP20X_VBUS_CLIMIT_100mA:
+ if (power->axp20x_id == AXP221_ID)
+ *val = -1; /* No 100mA limit */
+ else
+ *val = 100000;
+ break;
+ case AXP20X_VBUS_CLIMIT_500mA:
+ *val = 500000;
+ break;
+ case AXP20X_VBUS_CLIMIT_900mA:
+ *val = 900000;
+ break;
+ case AXP20X_VBUS_CLIMIT_NONE:
+ *val = -1;
+ break;
+ }
+
+ return 0;
+}
+
+static int axp813_get_current_max(struct axp20x_usb_power *power, int *val)
+{
+ unsigned int v;
+ int ret = regmap_read(power->regmap, AXP20X_VBUS_IPSOUT_MGMT, &v);
+
+ if (ret)
+ return ret;
+
+ switch (v & AXP20X_VBUS_CLIMIT_MASK) {
+ case AXP813_VBUS_CLIMIT_900mA:
+ *val = 900000;
+ break;
+ case AXP813_VBUS_CLIMIT_1500mA:
+ *val = 1500000;
+ break;
+ case AXP813_VBUS_CLIMIT_2000mA:
+ *val = 2000000;
+ break;
+ case AXP813_VBUS_CLIMIT_2500mA:
+ *val = 2500000;
+ break;
+ }
+ return 0;
+}
+
static int axp20x_usb_power_get_property(struct power_supply *psy,
enum power_supply_property psp, union power_supply_propval *val)
{
@@ -102,28 +199,9 @@ static int axp20x_usb_power_get_property(struct power_supply *psy,
val->intval = ret * 1700; /* 1 step = 1.7 mV */
return 0;
case POWER_SUPPLY_PROP_CURRENT_MAX:
- ret = regmap_read(power->regmap, AXP20X_VBUS_IPSOUT_MGMT, &v);
- if (ret)
- return ret;
-
- switch (v & AXP20X_VBUS_CLIMIT_MASK) {
- case AXP20X_VBUC_CLIMIT_100mA:
- if (power->axp20x_id == AXP221_ID)
- val->intval = -1; /* No 100mA limit */
- else
- val->intval = 100000;
- break;
- case AXP20X_VBUC_CLIMIT_500mA:
- val->intval = 500000;
- break;
- case AXP20X_VBUC_CLIMIT_900mA:
- val->intval = 900000;
- break;
- case AXP20X_VBUC_CLIMIT_NONE:
- val->intval = -1;
- break;
- }
- return 0;
+ if (power->axp20x_id == AXP813_ID)
+ return axp813_get_current_max(power, &val->intval);
+ return axp20x_get_current_max(power, &val->intval);
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (IS_ENABLED(CONFIG_AXP20X_ADC)) {
ret = iio_read_channel_processed(power->vbus_i,
@@ -214,6 +292,31 @@ static int axp20x_usb_power_set_voltage_min(struct axp20x_usb_power *power,
return -EINVAL;
}
+static int axp813_usb_power_set_current_max(struct axp20x_usb_power *power,
+ int intval)
+{
+ int val;
+
+ switch (intval) {
+ case 900000:
+ return regmap_update_bits(power->regmap,
+ AXP20X_VBUS_IPSOUT_MGMT,
+ AXP20X_VBUS_CLIMIT_MASK,
+ AXP813_VBUS_CLIMIT_900mA);
+ case 1500000:
+ case 2000000:
+ case 2500000:
+ val = (intval - 1000000) / 500000;
+ return regmap_update_bits(power->regmap,
+ AXP20X_VBUS_IPSOUT_MGMT,
+ AXP20X_VBUS_CLIMIT_MASK, val);
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
static int axp20x_usb_power_set_current_max(struct axp20x_usb_power *power,
int intval)
{
@@ -248,6 +351,9 @@ static int axp20x_usb_power_set_property(struct power_supply *psy,
return axp20x_usb_power_set_voltage_min(power, val->intval);
case POWER_SUPPLY_PROP_CURRENT_MAX:
+ if (power->axp20x_id == AXP813_ID)
+ return axp813_usb_power_set_current_max(power,
+ val->intval);
return axp20x_usb_power_set_current_max(power, val->intval);
default:
@@ -357,6 +463,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
if (!power)
return -ENOMEM;
+ platform_set_drvdata(pdev, power);
power->axp20x_id = (enum axp20x_variants)of_device_get_match_data(
&pdev->dev);
@@ -382,7 +489,8 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
usb_power_desc = &axp20x_usb_power_desc;
irq_names = axp20x_irq_names;
} else if (power->axp20x_id == AXP221_ID ||
- power->axp20x_id == AXP223_ID) {
+ power->axp20x_id == AXP223_ID ||
+ power->axp20x_id == AXP813_ID) {
usb_power_desc = &axp22x_usb_power_desc;
irq_names = axp22x_irq_names;
} else {
@@ -415,6 +523,19 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
irq_names[i], ret);
}
+ INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
+ if (axp20x_usb_vbus_needs_polling(power))
+ queue_delayed_work(system_wq, &power->vbus_detect, 0);
+
+ return 0;
+}
+
+static int axp20x_usb_power_remove(struct platform_device *pdev)
+{
+ struct axp20x_usb_power *power = platform_get_drvdata(pdev);
+
+ cancel_delayed_work_sync(&power->vbus_detect);
+
return 0;
}
@@ -428,12 +549,16 @@ static const struct of_device_id axp20x_usb_power_match[] = {
}, {
.compatible = "x-powers,axp223-usb-power-supply",
.data = (void *)AXP223_ID,
+ }, {
+ .compatible = "x-powers,axp813-usb-power-supply",
+ .data = (void *)AXP813_ID,
}, { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, axp20x_usb_power_match);
static struct platform_driver axp20x_usb_power_driver = {
.probe = axp20x_usb_power_probe,
+ .remove = axp20x_usb_power_remove,
.driver = {
.name = DRVNAME,
.of_match_table = axp20x_usb_power_match,
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index f8c6da9277b3..00b961890a38 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -833,6 +833,10 @@ static int axp288_charger_probe(struct platform_device *pdev)
/* Register charger interrupts */
for (i = 0; i < CHRG_INTR_END; i++) {
pirq = platform_get_irq(info->pdev, i);
+ if (pirq < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq);
+ return pirq;
+ }
info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
if (info->irq[i] < 0) {
dev_warn(&info->pdev->dev,
diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
index 9ff2461820d8..368281bc0d2b 100644
--- a/drivers/power/supply/axp288_fuel_gauge.c
+++ b/drivers/power/supply/axp288_fuel_gauge.c
@@ -686,6 +686,26 @@ intr_failed:
*/
static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
{
+ /* ACEPC T8 Cherry Trail Z8350 mini PC */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"),
+ /* also match on somewhat unique bios-version */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
+ },
+ },
+ {
+ /* ACEPC T11 Cherry Trail Z8350 mini PC */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"),
+ /* also match on somewhat unique bios-version */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
+ },
+ },
+ {
/* Intel Cherry Trail Compute Stick, Windows version */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 29b3a4056865..195c18c2f426 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1612,7 +1612,8 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
di->charge_design_full = bq27xxx_battery_read_dcap(di);
}
- if (di->cache.capacity != cache.capacity)
+ if ((di->cache.capacity != cache.capacity) ||
+ (di->cache.flags != cache.flags))
power_supply_changed(di->bat);
if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index 2e8db5e6de0b..a6900aa0d2ed 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -1987,6 +1987,9 @@ static struct platform_driver charger_manager_driver = {
static int __init charger_manager_init(void)
{
cm_wq = create_freezable_workqueue("charger_manager");
+ if (unlikely(!cm_wq))
+ return -ENOMEM;
+
INIT_DELAYED_WORK(&cm_monitor_work, cm_monitor_poller);
return platform_driver_register(&charger_manager_driver);
diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c
index 6887870ba32c..61d6447d1966 100644
--- a/drivers/power/supply/cpcap-battery.c
+++ b/drivers/power/supply/cpcap-battery.c
@@ -82,9 +82,9 @@ struct cpcap_battery_config {
};
struct cpcap_coulomb_counter_data {
- s32 sample; /* 24-bits */
+ s32 sample; /* 24 or 32 bits */
s32 accumulator;
- s16 offset; /* 10-bits */
+ s16 offset; /* 9 bits */
};
enum cpcap_battery_state {
@@ -213,7 +213,7 @@ static int cpcap_battery_get_current(struct cpcap_battery_ddata *ddata)
* TI or ST coulomb counter in the PMIC.
*/
static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
- u32 sample, s32 accumulator,
+ s32 sample, s32 accumulator,
s16 offset, u32 divider)
{
s64 acc;
@@ -224,9 +224,6 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
if (!divider)
return 0;
- sample &= 0xffffff; /* 24-bits, unsigned */
- offset &= 0x7ff; /* 10-bits, signed */
-
switch (ddata->vendor) {
case CPCAP_VENDOR_ST:
cc_lsb = 95374; /* μAms per LSB */
@@ -259,7 +256,7 @@ static int cpcap_battery_cc_raw_div(struct cpcap_battery_ddata *ddata,
/* 3600000μAms = 1μAh */
static int cpcap_battery_cc_to_uah(struct cpcap_battery_ddata *ddata,
- u32 sample, s32 accumulator,
+ s32 sample, s32 accumulator,
s16 offset)
{
return cpcap_battery_cc_raw_div(ddata, sample,
@@ -268,7 +265,7 @@ static int cpcap_battery_cc_to_uah(struct cpcap_battery_ddata *ddata,
}
static int cpcap_battery_cc_to_ua(struct cpcap_battery_ddata *ddata,
- u32 sample, s32 accumulator,
+ s32 sample, s32 accumulator,
s16 offset)
{
return cpcap_battery_cc_raw_div(ddata, sample,
@@ -312,17 +309,19 @@ cpcap_battery_read_accumulated(struct cpcap_battery_ddata *ddata,
/* Sample value CPCAP_REG_CCS1 & 2 */
ccd->sample = (buf[1] & 0x0fff) << 16;
ccd->sample |= buf[0];
+ if (ddata->vendor == CPCAP_VENDOR_TI)
+ ccd->sample = sign_extend32(24, ccd->sample);
/* Accumulator value CPCAP_REG_CCA1 & 2 */
ccd->accumulator = ((s16)buf[3]) << 16;
ccd->accumulator |= buf[2];
- /* Offset value CPCAP_REG_CCO */
- ccd->offset = buf[5];
-
- /* Adjust offset based on mode value CPCAP_REG_CCM? */
- if (buf[4] >= 0x200)
- ccd->offset |= 0xfc00;
+ /*
+ * Coulomb counter calibration offset is CPCAP_REG_CCM,
+ * REG_CCO seems unused
+ */
+ ccd->offset = buf[4];
+ ccd->offset = sign_extend32(ccd->offset, 9);
return cpcap_battery_cc_to_uah(ddata,
ccd->sample,
@@ -477,11 +476,11 @@ static int cpcap_battery_get_property(struct power_supply *psy,
val->intval = ddata->config.info.voltage_min_design;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
- if (cached) {
+ sample = latest->cc.sample - previous->cc.sample;
+ if (!sample) {
val->intval = cpcap_battery_cc_get_avg_current(ddata);
break;
}
- sample = latest->cc.sample - previous->cc.sample;
accumulator = latest->cc.accumulator - previous->cc.accumulator;
val->intval = cpcap_battery_cc_to_ua(ddata, sample,
accumulator,
@@ -498,13 +497,13 @@ static int cpcap_battery_get_property(struct power_supply *psy,
val->intval = div64_s64(tmp, 100);
break;
case POWER_SUPPLY_PROP_POWER_AVG:
- if (cached) {
+ sample = latest->cc.sample - previous->cc.sample;
+ if (!sample) {
tmp = cpcap_battery_cc_get_avg_current(ddata);
tmp *= (latest->voltage / 10000);
val->intval = div64_s64(tmp, 100);
break;
}
- sample = latest->cc.sample - previous->cc.sample;
accumulator = latest->cc.accumulator - previous->cc.accumulator;
tmp = cpcap_battery_cc_to_ua(ddata, sample, accumulator,
latest->cc.offset);
@@ -562,11 +561,11 @@ static irqreturn_t cpcap_battery_irq_thread(int irq, void *data)
switch (d->action) {
case CPCAP_BATTERY_IRQ_ACTION_BATTERY_LOW:
- if (latest->counter_uah >= 0)
+ if (latest->current_ua >= 0)
dev_warn(ddata->dev, "Battery low at 3.3V!\n");
break;
case CPCAP_BATTERY_IRQ_ACTION_POWEROFF:
- if (latest->counter_uah >= 0) {
+ if (latest->current_ua >= 0) {
dev_emerg(ddata->dev,
"Battery empty at 3.1V, powering off\n");
orderly_poweroff(true);
@@ -670,8 +669,9 @@ static int cpcap_battery_init_iio(struct cpcap_battery_ddata *ddata)
return 0;
out_err:
- dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
- error);
+ if (error != -EPROBE_DEFER)
+ dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
+ error);
return error;
}
diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c
index c3ed7b476676..b4781b5d1e10 100644
--- a/drivers/power/supply/cpcap-charger.c
+++ b/drivers/power/supply/cpcap-charger.c
@@ -574,8 +574,9 @@ static int cpcap_charger_init_iio(struct cpcap_charger_ddata *ddata)
return 0;
out_err:
- dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
- error);
+ if (error != -EPROBE_DEFER)
+ dev_err(ddata->dev, "could not initialize VBUS or ID IIO: %i\n",
+ error);
return error;
}
diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c
index 7e4f11d5a230..f99e8f1eef23 100644
--- a/drivers/power/supply/gpio-charger.c
+++ b/drivers/power/supply/gpio-charger.c
@@ -29,11 +29,13 @@
struct gpio_charger {
unsigned int irq;
+ unsigned int charge_status_irq;
bool wakeup_enabled;
struct power_supply *charger;
struct power_supply_desc charger_desc;
struct gpio_desc *gpiod;
+ struct gpio_desc *charge_status;
};
static irqreturn_t gpio_charger_irq(int irq, void *devid)
@@ -59,6 +61,12 @@ static int gpio_charger_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_ONLINE:
val->intval = gpiod_get_value_cansleep(gpio_charger->gpiod);
break;
+ case POWER_SUPPLY_PROP_STATUS:
+ if (gpiod_get_value_cansleep(gpio_charger->charge_status))
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
default:
return -EINVAL;
}
@@ -93,8 +101,29 @@ static enum power_supply_type gpio_charger_get_type(struct device *dev)
return POWER_SUPPLY_TYPE_UNKNOWN;
}
+static int gpio_charger_get_irq(struct device *dev, void *dev_id,
+ struct gpio_desc *gpio)
+{
+ int ret, irq = gpiod_to_irq(gpio);
+
+ if (irq > 0) {
+ ret = devm_request_any_context_irq(dev, irq, gpio_charger_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ dev_name(dev),
+ dev_id);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to request irq: %d\n", ret);
+ irq = 0;
+ }
+ }
+
+ return irq;
+}
+
static enum power_supply_property gpio_charger_properties[] = {
POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_STATUS /* Must always be last in the array. */
};
static int gpio_charger_probe(struct platform_device *pdev)
@@ -104,8 +133,10 @@ static int gpio_charger_probe(struct platform_device *pdev)
struct power_supply_config psy_cfg = {};
struct gpio_charger *gpio_charger;
struct power_supply_desc *charger_desc;
+ struct gpio_desc *charge_status;
+ int charge_status_irq;
unsigned long flags;
- int irq, ret;
+ int ret;
if (!pdata && !dev->of_node) {
dev_err(dev, "No platform data\n");
@@ -151,9 +182,17 @@ static int gpio_charger_probe(struct platform_device *pdev)
return PTR_ERR(gpio_charger->gpiod);
}
+ charge_status = devm_gpiod_get_optional(dev, "charge-status", GPIOD_IN);
+ gpio_charger->charge_status = charge_status;
+ if (IS_ERR(gpio_charger->charge_status))
+ return PTR_ERR(gpio_charger->charge_status);
+
charger_desc = &gpio_charger->charger_desc;
charger_desc->properties = gpio_charger_properties;
charger_desc->num_properties = ARRAY_SIZE(gpio_charger_properties);
+ /* Remove POWER_SUPPLY_PROP_STATUS from the supported properties. */
+ if (!gpio_charger->charge_status)
+ charger_desc->num_properties -= 1;
charger_desc->get_property = gpio_charger_get_property;
psy_cfg.of_node = dev->of_node;
@@ -180,16 +219,12 @@ static int gpio_charger_probe(struct platform_device *pdev)
return ret;
}
- irq = gpiod_to_irq(gpio_charger->gpiod);
- if (irq > 0) {
- ret = devm_request_any_context_irq(dev, irq, gpio_charger_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(dev), gpio_charger->charger);
- if (ret < 0)
- dev_warn(dev, "Failed to request irq: %d\n", ret);
- else
- gpio_charger->irq = irq;
- }
+ gpio_charger->irq = gpio_charger_get_irq(dev, gpio_charger->charger,
+ gpio_charger->gpiod);
+
+ charge_status_irq = gpio_charger_get_irq(dev, gpio_charger->charger,
+ gpio_charger->charge_status);
+ gpio_charger->charge_status_irq = charge_status_irq;
platform_set_drvdata(pdev, gpio_charger);
diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c
new file mode 100644
index 000000000000..35816d4b3012
--- /dev/null
+++ b/drivers/power/supply/ingenic-battery.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Battery driver for the Ingenic JZ47xx SoCs
+ * Copyright (c) 2019 Artur Rojek <contact@artur-rojek.eu>
+ *
+ * based on drivers/power/supply/jz4740-battery.c
+ */
+
+#include <linux/iio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/property.h>
+
+struct ingenic_battery {
+ struct device *dev;
+ struct iio_channel *channel;
+ struct power_supply_desc desc;
+ struct power_supply *battery;
+ struct power_supply_battery_info info;
+};
+
+static int ingenic_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ingenic_battery *bat = power_supply_get_drvdata(psy);
+ struct power_supply_battery_info *info = &bat->info;
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = iio_read_channel_processed(bat->channel, &val->intval);
+ val->intval *= 1000;
+ if (val->intval < info->voltage_min_design_uv)
+ val->intval = POWER_SUPPLY_HEALTH_DEAD;
+ else if (val->intval > info->voltage_max_design_uv)
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ return ret;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = iio_read_channel_processed(bat->channel, &val->intval);
+ val->intval *= 1000;
+ return ret;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = info->voltage_min_design_uv;
+ return 0;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = info->voltage_max_design_uv;
+ return 0;
+ default:
+ return -EINVAL;
+ };
+}
+
+/* Set the most appropriate IIO channel voltage reference scale
+ * based on the battery's max voltage.
+ */
+static int ingenic_battery_set_scale(struct ingenic_battery *bat)
+{
+ const int *scale_raw;
+ int scale_len, scale_type, best_idx = -1, best_mV, max_raw, i, ret;
+ u64 max_mV;
+
+ ret = iio_read_max_channel_raw(bat->channel, &max_raw);
+ if (ret) {
+ dev_err(bat->dev, "Unable to read max raw channel value\n");
+ return ret;
+ }
+
+ ret = iio_read_avail_channel_attribute(bat->channel, &scale_raw,
+ &scale_type, &scale_len,
+ IIO_CHAN_INFO_SCALE);
+ if (ret < 0) {
+ dev_err(bat->dev, "Unable to read channel avail scale\n");
+ return ret;
+ }
+ if (ret != IIO_AVAIL_LIST || scale_type != IIO_VAL_FRACTIONAL_LOG2)
+ return -EINVAL;
+
+ max_mV = bat->info.voltage_max_design_uv / 1000;
+
+ for (i = 0; i < scale_len; i += 2) {
+ u64 scale_mV = (max_raw * scale_raw[i]) >> scale_raw[i + 1];
+
+ if (scale_mV < max_mV)
+ continue;
+
+ if (best_idx >= 0 && scale_mV > best_mV)
+ continue;
+
+ best_mV = scale_mV;
+ best_idx = i;
+ }
+
+ if (best_idx < 0) {
+ dev_err(bat->dev, "Unable to find matching voltage scale\n");
+ return -EINVAL;
+ }
+
+ return iio_write_channel_attribute(bat->channel,
+ scale_raw[best_idx],
+ scale_raw[best_idx + 1],
+ IIO_CHAN_INFO_SCALE);
+}
+
+static enum power_supply_property ingenic_battery_properties[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+};
+
+static int ingenic_battery_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ingenic_battery *bat;
+ struct power_supply_config psy_cfg = {};
+ struct power_supply_desc *desc;
+ int ret;
+
+ bat = devm_kzalloc(dev, sizeof(*bat), GFP_KERNEL);
+ if (!bat)
+ return -ENOMEM;
+
+ bat->dev = dev;
+ bat->channel = devm_iio_channel_get(dev, "battery");
+ if (IS_ERR(bat->channel))
+ return PTR_ERR(bat->channel);
+
+ desc = &bat->desc;
+ desc->name = "jz-battery";
+ desc->type = POWER_SUPPLY_TYPE_BATTERY;
+ desc->properties = ingenic_battery_properties;
+ desc->num_properties = ARRAY_SIZE(ingenic_battery_properties);
+ desc->get_property = ingenic_battery_get_property;
+ psy_cfg.drv_data = bat;
+ psy_cfg.of_node = dev->of_node;
+
+ bat->battery = devm_power_supply_register(dev, desc, &psy_cfg);
+ if (IS_ERR(bat->battery)) {
+ dev_err(dev, "Unable to register battery\n");
+ return PTR_ERR(bat->battery);
+ }
+
+ ret = power_supply_get_battery_info(bat->battery, &bat->info);
+ if (ret) {
+ dev_err(dev, "Unable to get battery info: %d\n", ret);
+ return ret;
+ }
+ if (bat->info.voltage_min_design_uv < 0) {
+ dev_err(dev, "Unable to get voltage min design\n");
+ return bat->info.voltage_min_design_uv;
+ }
+ if (bat->info.voltage_max_design_uv < 0) {
+ dev_err(dev, "Unable to get voltage max design\n");
+ return bat->info.voltage_max_design_uv;
+ }
+
+ return ingenic_battery_set_scale(bat);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id ingenic_battery_of_match[] = {
+ { .compatible = "ingenic,jz4740-battery", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ingenic_battery_of_match);
+#endif
+
+static struct platform_driver ingenic_battery_driver = {
+ .driver = {
+ .name = "ingenic-battery",
+ .of_match_table = of_match_ptr(ingenic_battery_of_match),
+ },
+ .probe = ingenic_battery_probe,
+};
+module_platform_driver(ingenic_battery_driver);
+
+MODULE_DESCRIPTION("Battery driver for Ingenic JZ47xx SoCs");
+MODULE_AUTHOR("Artur Rojek <contact@artur-rojek.eu>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/ltc3651-charger.c b/drivers/power/supply/lt3651-charger.c
index eea63ff211c4..8de500ffad95 100644
--- a/drivers/power/supply/ltc3651-charger.c
+++ b/drivers/power/supply/lt3651-charger.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
+ * Driver for Analog Devices (Linear Technology) LT3651 charger IC.
* Copyright (C) 2017, Topic Embedded Products
- * Driver for LTC3651 charger IC.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/device.h>
@@ -19,7 +15,7 @@
#include <linux/slab.h>
#include <linux/of.h>
-struct ltc3651_charger {
+struct lt3651_charger {
struct power_supply *charger;
struct power_supply_desc charger_desc;
struct gpio_desc *acpr_gpio;
@@ -27,7 +23,7 @@ struct ltc3651_charger {
struct gpio_desc *chrg_gpio;
};
-static irqreturn_t ltc3651_charger_irq(int irq, void *devid)
+static irqreturn_t lt3651_charger_irq(int irq, void *devid)
{
struct power_supply *charger = devid;
@@ -36,37 +32,37 @@ static irqreturn_t ltc3651_charger_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-static inline struct ltc3651_charger *psy_to_ltc3651_charger(
+static inline struct lt3651_charger *psy_to_lt3651_charger(
struct power_supply *psy)
{
return power_supply_get_drvdata(psy);
}
-static int ltc3651_charger_get_property(struct power_supply *psy,
+static int lt3651_charger_get_property(struct power_supply *psy,
enum power_supply_property psp, union power_supply_propval *val)
{
- struct ltc3651_charger *ltc3651_charger = psy_to_ltc3651_charger(psy);
+ struct lt3651_charger *lt3651_charger = psy_to_lt3651_charger(psy);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- if (!ltc3651_charger->chrg_gpio) {
+ if (!lt3651_charger->chrg_gpio) {
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
break;
}
- if (gpiod_get_value(ltc3651_charger->chrg_gpio))
+ if (gpiod_get_value(lt3651_charger->chrg_gpio))
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
break;
case POWER_SUPPLY_PROP_ONLINE:
- val->intval = gpiod_get_value(ltc3651_charger->acpr_gpio);
+ val->intval = gpiod_get_value(lt3651_charger->acpr_gpio);
break;
case POWER_SUPPLY_PROP_HEALTH:
- if (!ltc3651_charger->fault_gpio) {
+ if (!lt3651_charger->fault_gpio) {
val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
break;
}
- if (!gpiod_get_value(ltc3651_charger->fault_gpio)) {
+ if (!gpiod_get_value(lt3651_charger->fault_gpio)) {
val->intval = POWER_SUPPLY_HEALTH_GOOD;
break;
}
@@ -74,11 +70,11 @@ static int ltc3651_charger_get_property(struct power_supply *psy,
* If the fault pin is active, the chrg pin explains the type
* of failure.
*/
- if (!ltc3651_charger->chrg_gpio) {
+ if (!lt3651_charger->chrg_gpio) {
val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
break;
}
- val->intval = gpiod_get_value(ltc3651_charger->chrg_gpio) ?
+ val->intval = gpiod_get_value(lt3651_charger->chrg_gpio) ?
POWER_SUPPLY_HEALTH_OVERHEAT :
POWER_SUPPLY_HEALTH_DEAD;
break;
@@ -89,59 +85,59 @@ static int ltc3651_charger_get_property(struct power_supply *psy,
return 0;
}
-static enum power_supply_property ltc3651_charger_properties[] = {
+static enum power_supply_property lt3651_charger_properties[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_HEALTH,
};
-static int ltc3651_charger_probe(struct platform_device *pdev)
+static int lt3651_charger_probe(struct platform_device *pdev)
{
struct power_supply_config psy_cfg = {};
- struct ltc3651_charger *ltc3651_charger;
+ struct lt3651_charger *lt3651_charger;
struct power_supply_desc *charger_desc;
int ret;
- ltc3651_charger = devm_kzalloc(&pdev->dev, sizeof(*ltc3651_charger),
+ lt3651_charger = devm_kzalloc(&pdev->dev, sizeof(*lt3651_charger),
GFP_KERNEL);
- if (!ltc3651_charger)
+ if (!lt3651_charger)
return -ENOMEM;
- ltc3651_charger->acpr_gpio = devm_gpiod_get(&pdev->dev,
+ lt3651_charger->acpr_gpio = devm_gpiod_get(&pdev->dev,
"lltc,acpr", GPIOD_IN);
- if (IS_ERR(ltc3651_charger->acpr_gpio)) {
- ret = PTR_ERR(ltc3651_charger->acpr_gpio);
+ if (IS_ERR(lt3651_charger->acpr_gpio)) {
+ ret = PTR_ERR(lt3651_charger->acpr_gpio);
dev_err(&pdev->dev, "Failed to acquire acpr GPIO: %d\n", ret);
return ret;
}
- ltc3651_charger->fault_gpio = devm_gpiod_get_optional(&pdev->dev,
+ lt3651_charger->fault_gpio = devm_gpiod_get_optional(&pdev->dev,
"lltc,fault", GPIOD_IN);
- if (IS_ERR(ltc3651_charger->fault_gpio)) {
- ret = PTR_ERR(ltc3651_charger->fault_gpio);
+ if (IS_ERR(lt3651_charger->fault_gpio)) {
+ ret = PTR_ERR(lt3651_charger->fault_gpio);
dev_err(&pdev->dev, "Failed to acquire fault GPIO: %d\n", ret);
return ret;
}
- ltc3651_charger->chrg_gpio = devm_gpiod_get_optional(&pdev->dev,
+ lt3651_charger->chrg_gpio = devm_gpiod_get_optional(&pdev->dev,
"lltc,chrg", GPIOD_IN);
- if (IS_ERR(ltc3651_charger->chrg_gpio)) {
- ret = PTR_ERR(ltc3651_charger->chrg_gpio);
+ if (IS_ERR(lt3651_charger->chrg_gpio)) {
+ ret = PTR_ERR(lt3651_charger->chrg_gpio);
dev_err(&pdev->dev, "Failed to acquire chrg GPIO: %d\n", ret);
return ret;
}
- charger_desc = &ltc3651_charger->charger_desc;
+ charger_desc = &lt3651_charger->charger_desc;
charger_desc->name = pdev->dev.of_node->name;
charger_desc->type = POWER_SUPPLY_TYPE_MAINS;
- charger_desc->properties = ltc3651_charger_properties;
- charger_desc->num_properties = ARRAY_SIZE(ltc3651_charger_properties);
- charger_desc->get_property = ltc3651_charger_get_property;
+ charger_desc->properties = lt3651_charger_properties;
+ charger_desc->num_properties = ARRAY_SIZE(lt3651_charger_properties);
+ charger_desc->get_property = lt3651_charger_get_property;
psy_cfg.of_node = pdev->dev.of_node;
- psy_cfg.drv_data = ltc3651_charger;
+ psy_cfg.drv_data = lt3651_charger;
- ltc3651_charger->charger = devm_power_supply_register(&pdev->dev,
+ lt3651_charger->charger = devm_power_supply_register(&pdev->dev,
charger_desc, &psy_cfg);
- if (IS_ERR(ltc3651_charger->charger)) {
- ret = PTR_ERR(ltc3651_charger->charger);
+ if (IS_ERR(lt3651_charger->charger)) {
+ ret = PTR_ERR(lt3651_charger->charger);
dev_err(&pdev->dev, "Failed to register power supply: %d\n",
ret);
return ret;
@@ -152,59 +148,60 @@ static int ltc3651_charger_probe(struct platform_device *pdev)
* support IRQs on these pins, userspace will have to poll the sysfs
* files manually.
*/
- if (ltc3651_charger->acpr_gpio) {
- ret = gpiod_to_irq(ltc3651_charger->acpr_gpio);
+ if (lt3651_charger->acpr_gpio) {
+ ret = gpiod_to_irq(lt3651_charger->acpr_gpio);
if (ret >= 0)
ret = devm_request_any_context_irq(&pdev->dev, ret,
- ltc3651_charger_irq,
+ lt3651_charger_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(&pdev->dev), ltc3651_charger->charger);
+ dev_name(&pdev->dev), lt3651_charger->charger);
if (ret < 0)
dev_warn(&pdev->dev, "Failed to request acpr irq\n");
}
- if (ltc3651_charger->fault_gpio) {
- ret = gpiod_to_irq(ltc3651_charger->fault_gpio);
+ if (lt3651_charger->fault_gpio) {
+ ret = gpiod_to_irq(lt3651_charger->fault_gpio);
if (ret >= 0)
ret = devm_request_any_context_irq(&pdev->dev, ret,
- ltc3651_charger_irq,
+ lt3651_charger_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(&pdev->dev), ltc3651_charger->charger);
+ dev_name(&pdev->dev), lt3651_charger->charger);
if (ret < 0)
dev_warn(&pdev->dev, "Failed to request fault irq\n");
}
- if (ltc3651_charger->chrg_gpio) {
- ret = gpiod_to_irq(ltc3651_charger->chrg_gpio);
+ if (lt3651_charger->chrg_gpio) {
+ ret = gpiod_to_irq(lt3651_charger->chrg_gpio);
if (ret >= 0)
ret = devm_request_any_context_irq(&pdev->dev, ret,
- ltc3651_charger_irq,
+ lt3651_charger_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(&pdev->dev), ltc3651_charger->charger);
+ dev_name(&pdev->dev), lt3651_charger->charger);
if (ret < 0)
dev_warn(&pdev->dev, "Failed to request chrg irq\n");
}
- platform_set_drvdata(pdev, ltc3651_charger);
+ platform_set_drvdata(pdev, lt3651_charger);
return 0;
}
-static const struct of_device_id ltc3651_charger_match[] = {
- { .compatible = "lltc,ltc3651-charger" },
+static const struct of_device_id lt3651_charger_match[] = {
+ { .compatible = "lltc,ltc3651-charger" }, /* DEPRECATED */
+ { .compatible = "lltc,lt3651-charger" },
{ }
};
-MODULE_DEVICE_TABLE(of, ltc3651_charger_match);
+MODULE_DEVICE_TABLE(of, lt3651_charger_match);
-static struct platform_driver ltc3651_charger_driver = {
- .probe = ltc3651_charger_probe,
+static struct platform_driver lt3651_charger_driver = {
+ .probe = lt3651_charger_probe,
.driver = {
- .name = "ltc3651-charger",
- .of_match_table = ltc3651_charger_match,
+ .name = "lt3651-charger",
+ .of_match_table = lt3651_charger_match,
},
};
-module_platform_driver(ltc3651_charger_driver);
+module_platform_driver(lt3651_charger_driver);
MODULE_AUTHOR("Mike Looijmans <mike.looijmans@topic.nl>");
-MODULE_DESCRIPTION("Driver for LTC3651 charger");
+MODULE_DESCRIPTION("Driver for LT3651 charger");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ltc3651-charger");
+MODULE_ALIAS("platform:lt3651-charger");
diff --git a/drivers/power/supply/max14656_charger_detector.c b/drivers/power/supply/max14656_charger_detector.c
index b91b1d2999dc..9e6472834e37 100644
--- a/drivers/power/supply/max14656_charger_detector.c
+++ b/drivers/power/supply/max14656_charger_detector.c
@@ -240,6 +240,14 @@ static enum power_supply_property max14656_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
+static void stop_irq_work(void *data)
+{
+ struct max14656_chip *chip = data;
+
+ cancel_delayed_work_sync(&chip->irq_work);
+}
+
+
static int max14656_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -278,7 +286,19 @@ static int max14656_probe(struct i2c_client *client,
if (ret)
return -ENODEV;
+ chip->detect_psy = devm_power_supply_register(dev,
+ &chip->psy_desc, &psy_cfg);
+ if (IS_ERR(chip->detect_psy)) {
+ dev_err(dev, "power_supply_register failed\n");
+ return -EINVAL;
+ }
+
INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker);
+ ret = devm_add_action(dev, stop_irq_work, chip);
+ if (ret) {
+ dev_err(dev, "devm_add_action %d failed\n", ret);
+ return ret;
+ }
ret = devm_request_irq(dev, chip->irq, max14656_irq,
IRQF_TRIGGER_FALLING,
@@ -289,13 +309,6 @@ static int max14656_probe(struct i2c_client *client,
}
enable_irq_wake(chip->irq);
- chip->detect_psy = devm_power_supply_register(dev,
- &chip->psy_desc, &psy_cfg);
- if (IS_ERR(chip->detect_psy)) {
- dev_err(dev, "power_supply_register failed\n");
- return -EINVAL;
- }
-
schedule_delayed_work(&chip->irq_work, msecs_to_jiffies(2000));
return 0;
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index 5a97e42a3547..7720e4c2ac0b 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/err.h>
#include <linux/device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/jiffies.h>
@@ -52,6 +53,14 @@
#define BAT_ADDR_MFR_TYPE 0x5F
+struct olpc_battery_data {
+ struct power_supply *olpc_ac;
+ struct power_supply *olpc_bat;
+ char bat_serial[17];
+ bool new_proto;
+ bool little_endian;
+};
+
/*********************************************************************
* Power
*********************************************************************/
@@ -90,13 +99,10 @@ static const struct power_supply_desc olpc_ac_desc = {
.get_property = olpc_ac_get_prop,
};
-static struct power_supply *olpc_ac;
-
-static char bat_serial[17]; /* Ick */
-
-static int olpc_bat_get_status(union power_supply_propval *val, uint8_t ec_byte)
+static int olpc_bat_get_status(struct olpc_battery_data *data,
+ union power_supply_propval *val, uint8_t ec_byte)
{
- if (olpc_platform_info.ecver > 0x44) {
+ if (data->new_proto) {
if (ec_byte & (BAT_STAT_CHARGING | BAT_STAT_TRICKLE))
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (ec_byte & BAT_STAT_DISCHARGING)
@@ -318,6 +324,14 @@ static int olpc_bat_get_voltage_max_design(union power_supply_propval *val)
return ret;
}
+static u16 ecword_to_cpu(struct olpc_battery_data *data, u16 ec_word)
+{
+ if (data->little_endian)
+ return le16_to_cpu((__force __le16)ec_word);
+ else
+ return be16_to_cpu((__force __be16)ec_word);
+}
+
/*********************************************************************
* Battery properties
*********************************************************************/
@@ -325,8 +339,9 @@ static int olpc_bat_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
+ struct olpc_battery_data *data = power_supply_get_drvdata(psy);
int ret = 0;
- __be16 ec_word;
+ u16 ec_word;
uint8_t ec_byte;
__be64 ser_buf;
@@ -346,7 +361,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- ret = olpc_bat_get_status(val, ec_byte);
+ ret = olpc_bat_get_status(data, val, ec_byte);
if (ret)
return ret;
break;
@@ -389,7 +404,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32;
+ val->intval = ecword_to_cpu(data, ec_word) * 9760L / 32;
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_CURRENT_NOW:
@@ -397,7 +412,7 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (s16)be16_to_cpu(ec_word) * 15625L / 120;
+ val->intval = ecword_to_cpu(data, ec_word) * 15625L / 120;
break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &ec_byte, 1);
@@ -428,29 +443,29 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256;
+ val->intval = ecword_to_cpu(data, ec_word) * 10 / 256;
break;
case POWER_SUPPLY_PROP_TEMP_AMBIENT:
ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 10 / 256;
+ val->intval = (int)ecword_to_cpu(data, ec_word) * 10 / 256;
break;
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
- val->intval = (s16)be16_to_cpu(ec_word) * 6250 / 15;
+ val->intval = ecword_to_cpu(data, ec_word) * 6250 / 15;
break;
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = olpc_ec_cmd(EC_BAT_SERIAL, NULL, 0, (void *)&ser_buf, 8);
if (ret)
return ret;
- sprintf(bat_serial, "%016llx", (long long)be64_to_cpu(ser_buf));
- val->strval = bat_serial;
+ sprintf(data->bat_serial, "%016llx", (long long)be64_to_cpu(ser_buf));
+ val->strval = data->bat_serial;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
ret = olpc_bat_get_voltage_max_design(val);
@@ -536,7 +551,7 @@ static ssize_t olpc_bat_eeprom_read(struct file *filp, struct kobject *kobj,
return count;
}
-static const struct bin_attribute olpc_bat_eeprom = {
+static struct bin_attribute olpc_bat_eeprom = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO,
@@ -560,7 +575,7 @@ static ssize_t olpc_bat_error_read(struct device *dev,
return sprintf(buf, "%d\n", ec_byte);
}
-static const struct device_attribute olpc_bat_error = {
+static struct device_attribute olpc_bat_error = {
.attr = {
.name = "error",
.mode = S_IRUGO,
@@ -568,6 +583,27 @@ static const struct device_attribute olpc_bat_error = {
.show = olpc_bat_error_read,
};
+static struct attribute *olpc_bat_sysfs_attrs[] = {
+ &olpc_bat_error.attr,
+ NULL
+};
+
+static struct bin_attribute *olpc_bat_sysfs_bin_attrs[] = {
+ &olpc_bat_eeprom,
+ NULL
+};
+
+static const struct attribute_group olpc_bat_sysfs_group = {
+ .attrs = olpc_bat_sysfs_attrs,
+ .bin_attrs = olpc_bat_sysfs_bin_attrs,
+
+};
+
+static const struct attribute_group *olpc_bat_sysfs_groups[] = {
+ &olpc_bat_sysfs_group,
+ NULL
+};
+
/*********************************************************************
* Initialisation
*********************************************************************/
@@ -578,17 +614,17 @@ static struct power_supply_desc olpc_bat_desc = {
.use_for_apm = 1,
};
-static struct power_supply *olpc_bat;
-
static int olpc_battery_suspend(struct platform_device *pdev,
pm_message_t state)
{
- if (device_may_wakeup(&olpc_ac->dev))
+ struct olpc_battery_data *data = platform_get_drvdata(pdev);
+
+ if (device_may_wakeup(&data->olpc_ac->dev))
olpc_ec_wakeup_set(EC_SCI_SRC_ACPWR);
else
olpc_ec_wakeup_clear(EC_SCI_SRC_ACPWR);
- if (device_may_wakeup(&olpc_bat->dev))
+ if (device_may_wakeup(&data->olpc_bat->dev))
olpc_ec_wakeup_set(EC_SCI_SRC_BATTERY | EC_SCI_SRC_BATSOC
| EC_SCI_SRC_BATERR);
else
@@ -600,16 +636,37 @@ static int olpc_battery_suspend(struct platform_device *pdev,
static int olpc_battery_probe(struct platform_device *pdev)
{
- int ret;
+ struct power_supply_config bat_psy_cfg = {};
+ struct power_supply_config ac_psy_cfg = {};
+ struct olpc_battery_data *data;
uint8_t status;
+ uint8_t ecver;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, data);
- /*
- * We've seen a number of EC protocol changes; this driver requires
- * the latest EC protocol, supported by 0x44 and above.
- */
- if (olpc_platform_info.ecver < 0x44) {
+ /* See if the EC is already there and get the EC revision */
+ ret = olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, &ecver, 1);
+ if (ret)
+ return ret;
+
+ if (of_find_compatible_node(NULL, NULL, "olpc,xo1.75-ec")) {
+ /* XO 1.75 */
+ data->new_proto = true;
+ data->little_endian = true;
+ } else if (ecver > 0x44) {
+ /* XO 1 or 1.5 with a new EC firmware. */
+ data->new_proto = true;
+ } else if (ecver < 0x44) {
+ /*
+ * We've seen a number of EC protocol changes; this driver
+ * requires the latest EC protocol, supported by 0x44 and above.
+ */
printk(KERN_NOTICE "OLPC EC version 0x%02x too old for "
- "battery driver.\n", olpc_platform_info.ecver);
+ "battery driver.\n", ecver);
return -ENXIO;
}
@@ -619,59 +676,44 @@ static int olpc_battery_probe(struct platform_device *pdev)
/* Ignore the status. It doesn't actually matter */
- olpc_ac = power_supply_register(&pdev->dev, &olpc_ac_desc, NULL);
- if (IS_ERR(olpc_ac))
- return PTR_ERR(olpc_ac);
+ ac_psy_cfg.of_node = pdev->dev.of_node;
+ ac_psy_cfg.drv_data = data;
+
+ data->olpc_ac = devm_power_supply_register(&pdev->dev, &olpc_ac_desc,
+ &ac_psy_cfg);
+ if (IS_ERR(data->olpc_ac))
+ return PTR_ERR(data->olpc_ac);
- if (olpc_board_at_least(olpc_board_pre(0xd0))) { /* XO-1.5 */
+ if (of_device_is_compatible(pdev->dev.of_node, "olpc,xo1.5-battery")) {
+ /* XO-1.5 */
olpc_bat_desc.properties = olpc_xo15_bat_props;
olpc_bat_desc.num_properties = ARRAY_SIZE(olpc_xo15_bat_props);
- } else { /* XO-1 */
+ } else {
+ /* XO-1 */
olpc_bat_desc.properties = olpc_xo1_bat_props;
olpc_bat_desc.num_properties = ARRAY_SIZE(olpc_xo1_bat_props);
}
- olpc_bat = power_supply_register(&pdev->dev, &olpc_bat_desc, NULL);
- if (IS_ERR(olpc_bat)) {
- ret = PTR_ERR(olpc_bat);
- goto battery_failed;
- }
-
- ret = device_create_bin_file(&olpc_bat->dev, &olpc_bat_eeprom);
- if (ret)
- goto eeprom_failed;
+ bat_psy_cfg.of_node = pdev->dev.of_node;
+ bat_psy_cfg.drv_data = data;
+ bat_psy_cfg.attr_grp = olpc_bat_sysfs_groups;
- ret = device_create_file(&olpc_bat->dev, &olpc_bat_error);
- if (ret)
- goto error_failed;
+ data->olpc_bat = devm_power_supply_register(&pdev->dev, &olpc_bat_desc,
+ &bat_psy_cfg);
+ if (IS_ERR(data->olpc_bat))
+ return PTR_ERR(data->olpc_bat);
if (olpc_ec_wakeup_available()) {
- device_set_wakeup_capable(&olpc_ac->dev, true);
- device_set_wakeup_capable(&olpc_bat->dev, true);
+ device_set_wakeup_capable(&data->olpc_ac->dev, true);
+ device_set_wakeup_capable(&data->olpc_bat->dev, true);
}
return 0;
-
-error_failed:
- device_remove_bin_file(&olpc_bat->dev, &olpc_bat_eeprom);
-eeprom_failed:
- power_supply_unregister(olpc_bat);
-battery_failed:
- power_supply_unregister(olpc_ac);
- return ret;
-}
-
-static int olpc_battery_remove(struct platform_device *pdev)
-{
- device_remove_file(&olpc_bat->dev, &olpc_bat_error);
- device_remove_bin_file(&olpc_bat->dev, &olpc_bat_eeprom);
- power_supply_unregister(olpc_bat);
- power_supply_unregister(olpc_ac);
- return 0;
}
static const struct of_device_id olpc_battery_ids[] = {
{ .compatible = "olpc,xo1-battery" },
+ { .compatible = "olpc,xo1.5-battery" },
{}
};
MODULE_DEVICE_TABLE(of, olpc_battery_ids);
@@ -682,7 +724,6 @@ static struct platform_driver olpc_battery_driver = {
.of_match_table = olpc_battery_ids,
},
.probe = olpc_battery_probe,
- .remove = olpc_battery_remove,
.suspend = olpc_battery_suspend,
};
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index c917a8b43b2b..f7033ecf6d0b 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -598,10 +598,12 @@ int power_supply_get_battery_info(struct power_supply *psy,
err = of_property_read_string(battery_np, "compatible", &value);
if (err)
- return err;
+ goto out_put_node;
- if (strcmp("simple-battery", value))
- return -ENODEV;
+ if (strcmp("simple-battery", value)) {
+ err = -ENODEV;
+ goto out_put_node;
+ }
/* The property and field names below must correspond to elements
* in enum power_supply_property. For reasoning, see
@@ -620,19 +622,21 @@ int power_supply_get_battery_info(struct power_supply *psy,
&info->precharge_current_ua);
of_property_read_u32(battery_np, "charge-term-current-microamp",
&info->charge_term_current_ua);
- of_property_read_u32(battery_np, "constant_charge_current_max_microamp",
+ of_property_read_u32(battery_np, "constant-charge-current-max-microamp",
&info->constant_charge_current_max_ua);
- of_property_read_u32(battery_np, "constant_charge_voltage_max_microvolt",
+ of_property_read_u32(battery_np, "constant-charge-voltage-max-microvolt",
&info->constant_charge_voltage_max_uv);
of_property_read_u32(battery_np, "factory-internal-resistance-micro-ohms",
&info->factory_internal_resistance_uohm);
len = of_property_count_u32_elems(battery_np, "ocv-capacity-celsius");
if (len < 0 && len != -EINVAL) {
- return len;
+ err = len;
+ goto out_put_node;
} else if (len > POWER_SUPPLY_OCV_TEMP_MAX) {
dev_err(&psy->dev, "Too many temperature values\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto out_put_node;
} else if (len > 0) {
of_property_read_u32_array(battery_np, "ocv-capacity-celsius",
info->ocv_temp, len);
@@ -650,7 +654,8 @@ int power_supply_get_battery_info(struct power_supply *psy,
dev_err(&psy->dev, "failed to get %s\n", propname);
kfree(propname);
power_supply_put_battery_info(psy, info);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_put_node;
}
kfree(propname);
@@ -661,16 +666,21 @@ int power_supply_get_battery_info(struct power_supply *psy,
devm_kcalloc(&psy->dev, tab_len, sizeof(*table), GFP_KERNEL);
if (!info->ocv_table[index]) {
power_supply_put_battery_info(psy, info);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out_put_node;
}
for (i = 0; i < tab_len; i++) {
- table[i].ocv = be32_to_cpu(*list++);
- table[i].capacity = be32_to_cpu(*list++);
+ table[i].ocv = be32_to_cpu(*list);
+ list++;
+ table[i].capacity = be32_to_cpu(*list);
+ list++;
}
}
- return 0;
+out_put_node:
+ of_node_put(battery_np);
+ return err;
}
EXPORT_SYMBOL_GPL(power_supply_get_battery_info);
@@ -899,7 +909,7 @@ static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
return ret;
}
-static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
+static int ps_get_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
unsigned long *state)
{
struct power_supply *psy;
@@ -934,7 +944,7 @@ static int ps_set_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
static const struct thermal_cooling_device_ops psy_tcd_ops = {
.get_max_state = ps_get_max_charge_cntl_limit,
- .get_cur_state = ps_get_cur_chrage_cntl_limit,
+ .get_cur_state = ps_get_cur_charge_cntl_limit,
.set_cur_state = ps_set_cur_charge_cntl_limit,
};
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 5358a80d854f..a704a76d7529 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -56,13 +56,13 @@ static const char * const power_supply_status_text[] = {
};
static const char * const power_supply_charge_type_text[] = {
- "Unknown", "N/A", "Trickle", "Fast"
+ "Unknown", "N/A", "Trickle", "Fast", "Standard", "Adaptive", "Custom"
};
static const char * const power_supply_health_text[] = {
"Unknown", "Good", "Overheat", "Dead", "Over voltage",
"Unspecified failure", "Cold", "Watchdog timer expire",
- "Safety timer expire"
+ "Safety timer expire", "Over current"
};
static const char * const power_supply_technology_text[] = {
@@ -274,6 +274,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(constant_charge_voltage_max),
POWER_SUPPLY_ATTR(charge_control_limit),
POWER_SUPPLY_ATTR(charge_control_limit_max),
+ POWER_SUPPLY_ATTR(charge_control_start_threshold),
+ POWER_SUPPLY_ATTR(charge_control_end_threshold),
POWER_SUPPLY_ATTR(input_current_limit),
POWER_SUPPLY_ATTR(energy_full_design),
POWER_SUPPLY_ATTR(energy_empty_design),
diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
new file mode 100644
index 000000000000..1c89d030c045
--- /dev/null
+++ b/drivers/power/supply/ucs1002_power.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for UCS1002 Programmable USB Port Power Controller
+ *
+ * Copyright (C) 2019 Zodiac Inflight Innovations
+ */
+#include <linux/bits.h>
+#include <linux/freezer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+/* UCS1002 Registers */
+#define UCS1002_REG_CURRENT_MEASUREMENT 0x00
+
+/*
+ * The Total Accumulated Charge registers store the total accumulated
+ * charge delivered from the VS source to a portable device. The total
+ * value is calculated using four registers, from 01h to 04h. The bit
+ * weighting of the registers is given in mA/hrs.
+ */
+#define UCS1002_REG_TOTAL_ACC_CHARGE 0x01
+
+/* Other Status Register */
+#define UCS1002_REG_OTHER_STATUS 0x0f
+# define F_ADET_PIN BIT(4)
+# define F_CHG_ACT BIT(3)
+
+/* Interrupt Status */
+#define UCS1002_REG_INTERRUPT_STATUS 0x10
+# define F_DISCHARGE_ERR BIT(6)
+# define F_RESET BIT(5)
+# define F_MIN_KEEP_OUT BIT(4)
+# define F_TSD BIT(3)
+# define F_OVER_VOLT BIT(2)
+# define F_BACK_VOLT BIT(1)
+# define F_OVER_ILIM BIT(0)
+
+/* Pin Status Register */
+#define UCS1002_REG_PIN_STATUS 0x14
+# define UCS1002_PWR_STATE_MASK 0x03
+# define F_PWR_EN_PIN BIT(6)
+# define F_M2_PIN BIT(5)
+# define F_M1_PIN BIT(4)
+# define F_EM_EN_PIN BIT(3)
+# define F_SEL_PIN BIT(2)
+# define F_ACTIVE_MODE_MASK GENMASK(5, 3)
+# define F_ACTIVE_MODE_PASSTHROUGH F_M2_PIN
+# define F_ACTIVE_MODE_DEDICATED F_EM_EN_PIN
+# define F_ACTIVE_MODE_BC12_DCP (F_M2_PIN | F_EM_EN_PIN)
+# define F_ACTIVE_MODE_BC12_SDP F_M1_PIN
+# define F_ACTIVE_MODE_BC12_CDP (F_M1_PIN | F_M2_PIN | F_EM_EN_PIN)
+
+/* General Configuration Register */
+#define UCS1002_REG_GENERAL_CFG 0x15
+# define F_RATION_EN BIT(3)
+
+/* Emulation Configuration Register */
+#define UCS1002_REG_EMU_CFG 0x16
+
+/* Switch Configuration Register */
+#define UCS1002_REG_SWITCH_CFG 0x17
+# define F_PIN_IGNORE BIT(7)
+# define F_EM_EN_SET BIT(5)
+# define F_M2_SET BIT(4)
+# define F_M1_SET BIT(3)
+# define F_S0_SET BIT(2)
+# define F_PWR_EN_SET BIT(1)
+# define F_LATCH_SET BIT(0)
+# define V_SET_ACTIVE_MODE_MASK GENMASK(5, 3)
+# define V_SET_ACTIVE_MODE_PASSTHROUGH F_M2_SET
+# define V_SET_ACTIVE_MODE_DEDICATED F_EM_EN_SET
+# define V_SET_ACTIVE_MODE_BC12_DCP (F_M2_SET | F_EM_EN_SET)
+# define V_SET_ACTIVE_MODE_BC12_SDP F_M1_SET
+# define V_SET_ACTIVE_MODE_BC12_CDP (F_M1_SET | F_M2_SET | F_EM_EN_SET)
+
+/* Current Limit Register */
+#define UCS1002_REG_ILIMIT 0x19
+# define UCS1002_ILIM_SW_MASK GENMASK(3, 0)
+
+/* Product ID */
+#define UCS1002_REG_PRODUCT_ID 0xfd
+# define UCS1002_PRODUCT_ID 0x4e
+
+/* Manufacture name */
+#define UCS1002_MANUFACTURER "SMSC"
+
+struct ucs1002_info {
+ struct power_supply *charger;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct regulator_desc *regulator_descriptor;
+ bool present;
+};
+
+static enum power_supply_property ucs1002_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_PRESENT, /* the presence of PED */
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+static int ucs1002_get_online(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_OTHER_STATUS, &reg);
+ if (ret)
+ return ret;
+
+ val->intval = !!(reg & F_CHG_ACT);
+
+ return 0;
+}
+
+static int ucs1002_get_charge(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ /*
+ * To fit within 32 bits some values are rounded (uA/h)
+ *
+ * For Total Accumulated Charge Middle Low Byte register, addr
+ * 03h, byte 2
+ *
+ * B0: 0.01084 mA/h rounded to 11 uA/h
+ * B1: 0.02169 mA/h rounded to 22 uA/h
+ * B2: 0.04340 mA/h rounded to 43 uA/h
+ * B3: 0.08676 mA/h rounded to 87 uA/h
+ * B4: 0.17350 mA/h rounded to 173 uÁ/h
+ *
+ * For Total Accumulated Charge Low Byte register, addr 04h,
+ * byte 3
+ *
+ * B6: 0.00271 mA/h rounded to 3 uA/h
+ * B7: 0.005422 mA/h rounded to 5 uA/h
+ */
+ static const int bit_weights_uAh[BITS_PER_TYPE(u32)] = {
+ /*
+ * Bit corresponding to low byte (offset 0x04)
+ * B0 B1 B2 B3 B4 B5 B6 B7
+ */
+ 0, 0, 0, 0, 0, 0, 3, 5,
+ /*
+ * Bit corresponding to middle low byte (offset 0x03)
+ * B0 B1 B2 B3 B4 B5 B6 B7
+ */
+ 11, 22, 43, 87, 173, 347, 694, 1388,
+ /*
+ * Bit corresponding to middle high byte (offset 0x02)
+ * B0 B1 B2 B3 B4 B5 B6 B7
+ */
+ 2776, 5552, 11105, 22210, 44420, 88840, 177700, 355400,
+ /*
+ * Bit corresponding to high byte (offset 0x01)
+ * B0 B1 B2 B3 B4 B5 B6 B7
+ */
+ 710700, 1421000, 2843000, 5685000, 11371000, 22742000,
+ 45484000, 90968000,
+ };
+ unsigned long total_acc_charger;
+ unsigned int reg;
+ int i, ret;
+
+ ret = regmap_bulk_read(info->regmap, UCS1002_REG_TOTAL_ACC_CHARGE,
+ &reg, sizeof(u32));
+ if (ret)
+ return ret;
+
+ total_acc_charger = be32_to_cpu(reg); /* BE as per offsets above */
+ val->intval = 0;
+
+ for_each_set_bit(i, &total_acc_charger, ARRAY_SIZE(bit_weights_uAh))
+ val->intval += bit_weights_uAh[i];
+
+ return 0;
+}
+
+static int ucs1002_get_current(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ /*
+ * The Current Measurement register stores the measured
+ * current value delivered to the portable device. The range
+ * is from 9.76 mA to 2.5 A.
+ */
+ static const int bit_weights_uA[BITS_PER_TYPE(u8)] = {
+ 9760, 19500, 39000, 78100, 156200, 312300, 624600, 1249300,
+ };
+ unsigned long current_measurement;
+ unsigned int reg;
+ int i, ret;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_CURRENT_MEASUREMENT, &reg);
+ if (ret)
+ return ret;
+
+ current_measurement = reg;
+ val->intval = 0;
+
+ for_each_set_bit(i, &current_measurement, ARRAY_SIZE(bit_weights_uA))
+ val->intval += bit_weights_uA[i];
+
+ return 0;
+}
+
+/*
+ * The Current Limit register stores the maximum current used by the
+ * port switch. The range is from 500mA to 2.5 A.
+ */
+static const u32 ucs1002_current_limit_uA[] = {
+ 500000, 900000, 1000000, 1200000, 1500000, 1800000, 2000000, 2500000,
+};
+
+static int ucs1002_get_max_current(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_ILIMIT, &reg);
+ if (ret)
+ return ret;
+
+ val->intval = ucs1002_current_limit_uA[reg & UCS1002_ILIM_SW_MASK];
+
+ return 0;
+}
+
+static int ucs1002_set_max_current(struct ucs1002_info *info, u32 val)
+{
+ unsigned int reg;
+ int ret, idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(ucs1002_current_limit_uA); idx++) {
+ if (val == ucs1002_current_limit_uA[idx])
+ break;
+ }
+
+ if (idx == ARRAY_SIZE(ucs1002_current_limit_uA))
+ return -EINVAL;
+
+ ret = regmap_write(info->regmap, UCS1002_REG_ILIMIT, idx);
+ if (ret)
+ return ret;
+ /*
+ * Any current limit setting exceeding the one set via ILIM
+ * pin will be rejected, so we read out freshly changed limit
+ * to make sure that it took effect.
+ */
+ ret = regmap_read(info->regmap, UCS1002_REG_ILIMIT, &reg);
+ if (ret)
+ return ret;
+
+ if (reg != idx)
+ return -EINVAL;
+
+ return 0;
+}
+
+static enum power_supply_usb_type ucs1002_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_PD,
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
+};
+
+static int ucs1002_set_usb_type(struct ucs1002_info *info, int val)
+{
+ unsigned int mode;
+
+ if (val < 0 || val >= ARRAY_SIZE(ucs1002_usb_types))
+ return -EINVAL;
+
+ switch (ucs1002_usb_types[val]) {
+ case POWER_SUPPLY_USB_TYPE_PD:
+ mode = V_SET_ACTIVE_MODE_DEDICATED;
+ break;
+ case POWER_SUPPLY_USB_TYPE_SDP:
+ mode = V_SET_ACTIVE_MODE_BC12_SDP;
+ break;
+ case POWER_SUPPLY_USB_TYPE_DCP:
+ mode = V_SET_ACTIVE_MODE_BC12_DCP;
+ break;
+ case POWER_SUPPLY_USB_TYPE_CDP:
+ mode = V_SET_ACTIVE_MODE_BC12_CDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(info->regmap, UCS1002_REG_SWITCH_CFG,
+ V_SET_ACTIVE_MODE_MASK, mode);
+}
+
+static int ucs1002_get_usb_type(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ enum power_supply_usb_type type;
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_PIN_STATUS, &reg);
+ if (ret)
+ return ret;
+
+ switch (reg & F_ACTIVE_MODE_MASK) {
+ default:
+ type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ break;
+ case F_ACTIVE_MODE_DEDICATED:
+ type = POWER_SUPPLY_USB_TYPE_PD;
+ break;
+ case F_ACTIVE_MODE_BC12_SDP:
+ type = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case F_ACTIVE_MODE_BC12_DCP:
+ type = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ case F_ACTIVE_MODE_BC12_CDP:
+ type = POWER_SUPPLY_USB_TYPE_CDP;
+ break;
+ };
+
+ val->intval = type;
+
+ return 0;
+}
+
+static int ucs1002_get_health(struct ucs1002_info *info,
+ union power_supply_propval *val)
+{
+ unsigned int reg;
+ int ret, health;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_INTERRUPT_STATUS, &reg);
+ if (ret)
+ return ret;
+
+ if (reg & F_TSD)
+ health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (reg & (F_OVER_VOLT | F_BACK_VOLT))
+ health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ else if (reg & F_OVER_ILIM)
+ health = POWER_SUPPLY_HEALTH_OVERCURRENT;
+ else if (reg & (F_DISCHARGE_ERR | F_MIN_KEEP_OUT))
+ health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ else
+ health = POWER_SUPPLY_HEALTH_GOOD;
+
+ val->intval = health;
+
+ return 0;
+}
+
+static int ucs1002_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct ucs1002_info *info = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ return ucs1002_get_online(info, val);
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ return ucs1002_get_charge(info, val);
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ return ucs1002_get_current(info, val);
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ return ucs1002_get_max_current(info, val);
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return ucs1002_get_usb_type(info, val);
+ case POWER_SUPPLY_PROP_HEALTH:
+ return ucs1002_get_health(info, val);
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = info->present;
+ return 0;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = UCS1002_MANUFACTURER;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ucs1002_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct ucs1002_info *info = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ return ucs1002_set_max_current(info, val->intval);
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return ucs1002_set_usb_type(info, val->intval);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ucs1002_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct power_supply_desc ucs1002_charger_desc = {
+ .name = "ucs1002",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = ucs1002_usb_types,
+ .num_usb_types = ARRAY_SIZE(ucs1002_usb_types),
+ .get_property = ucs1002_get_property,
+ .set_property = ucs1002_set_property,
+ .property_is_writeable = ucs1002_property_is_writeable,
+ .properties = ucs1002_props,
+ .num_properties = ARRAY_SIZE(ucs1002_props),
+};
+
+static irqreturn_t ucs1002_charger_irq(int irq, void *data)
+{
+ int ret, regval;
+ bool present;
+ struct ucs1002_info *info = data;
+
+ present = info->present;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_OTHER_STATUS, &regval);
+ if (ret)
+ return IRQ_HANDLED;
+
+ /* update attached status */
+ info->present = regval & F_ADET_PIN;
+
+ /* notify the change */
+ if (present != info->present)
+ power_supply_changed(info->charger);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ucs1002_alert_irq(int irq, void *data)
+{
+ struct ucs1002_info *info = data;
+
+ power_supply_changed(info->charger);
+
+ return IRQ_HANDLED;
+}
+
+static const struct regulator_ops ucs1002_regulator_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+};
+
+static const struct regulator_desc ucs1002_regulator_descriptor = {
+ .name = "ucs1002-vbus",
+ .ops = &ucs1002_regulator_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = UCS1002_REG_SWITCH_CFG,
+ .enable_mask = F_PWR_EN_SET,
+ .enable_val = F_PWR_EN_SET,
+ .fixed_uV = 5000000,
+ .n_voltages = 1,
+};
+
+static int ucs1002_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ struct device *dev = &client->dev;
+ struct power_supply_config charger_config = {};
+ const struct regmap_config regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
+ struct regulator_config regulator_config = {};
+ int irq_a_det, irq_alert, ret;
+ struct regulator_dev *rdev;
+ struct ucs1002_info *info;
+ unsigned int regval;
+
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->regmap = devm_regmap_init_i2c(client, &regmap_config);
+ ret = PTR_ERR_OR_ZERO(info->regmap);
+ if (ret) {
+ dev_err(dev, "Regmap initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ info->client = client;
+
+ irq_a_det = of_irq_get_byname(dev->of_node, "a_det");
+ irq_alert = of_irq_get_byname(dev->of_node, "alert");
+
+ charger_config.of_node = dev->of_node;
+ charger_config.drv_data = info;
+
+ ret = regmap_read(info->regmap, UCS1002_REG_PRODUCT_ID, &regval);
+ if (ret) {
+ dev_err(dev, "Failed to read product ID: %d\n", ret);
+ return ret;
+ }
+
+ if (regval != UCS1002_PRODUCT_ID) {
+ dev_err(dev,
+ "Product ID does not match (0x%02x != 0x%02x)\n",
+ regval, UCS1002_PRODUCT_ID);
+ return -ENODEV;
+ }
+
+ /* Enable charge rationing by default */
+ ret = regmap_update_bits(info->regmap, UCS1002_REG_GENERAL_CFG,
+ F_RATION_EN, F_RATION_EN);
+ if (ret) {
+ dev_err(dev, "Failed to read general config: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Ignore the M1, M2, PWR_EN, and EM_EN pin states. Set active
+ * mode selection to BC1.2 CDP.
+ */
+ ret = regmap_update_bits(info->regmap, UCS1002_REG_SWITCH_CFG,
+ V_SET_ACTIVE_MODE_MASK | F_PIN_IGNORE,
+ V_SET_ACTIVE_MODE_BC12_CDP | F_PIN_IGNORE);
+ if (ret) {
+ dev_err(dev, "Failed to configure default mode: %d\n", ret);
+ return ret;
+ }
+ /*
+ * Be safe and set initial current limit to 500mA
+ */
+ ret = ucs1002_set_max_current(info, 500000);
+ if (ret) {
+ dev_err(dev, "Failed to set max current default: %d\n", ret);
+ return ret;
+ }
+
+ info->charger = devm_power_supply_register(dev, &ucs1002_charger_desc,
+ &charger_config);
+ ret = PTR_ERR_OR_ZERO(info->charger);
+ if (ret) {
+ dev_err(dev, "Failed to register power supply: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(info->regmap, UCS1002_REG_PIN_STATUS, &regval);
+ if (ret) {
+ dev_err(dev, "Failed to read pin status: %d\n", ret);
+ return ret;
+ }
+
+ info->regulator_descriptor =
+ devm_kmemdup(dev, &ucs1002_regulator_descriptor,
+ sizeof(ucs1002_regulator_descriptor),
+ GFP_KERNEL);
+ if (!info->regulator_descriptor)
+ return -ENOMEM;
+
+ info->regulator_descriptor->enable_is_inverted = !(regval & F_SEL_PIN);
+
+ regulator_config.dev = dev;
+ regulator_config.of_node = dev->of_node;
+ regulator_config.regmap = info->regmap;
+
+ rdev = devm_regulator_register(dev, info->regulator_descriptor,
+ &regulator_config);
+ ret = PTR_ERR_OR_ZERO(rdev);
+ if (ret) {
+ dev_err(dev, "Failed to register VBUS regulator: %d\n", ret);
+ return ret;
+ }
+
+ if (irq_a_det > 0) {
+ ret = devm_request_threaded_irq(dev, irq_a_det, NULL,
+ ucs1002_charger_irq,
+ IRQF_ONESHOT,
+ "ucs1002-a_det", info);
+ if (ret) {
+ dev_err(dev, "Failed to request A_DET threaded irq: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (irq_alert > 0) {
+ ret = devm_request_threaded_irq(dev, irq_alert, NULL,
+ ucs1002_alert_irq,
+ IRQF_ONESHOT,
+ "ucs1002-alert", info);
+ if (ret) {
+ dev_err(dev, "Failed to request ALERT threaded irq: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ucs1002_of_match[] = {
+ { .compatible = "microchip,ucs1002", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ucs1002_of_match);
+
+static struct i2c_driver ucs1002_driver = {
+ .driver = {
+ .name = "ucs1002",
+ .of_match_table = ucs1002_of_match,
+ },
+ .probe = ucs1002_probe,
+};
+module_i2c_driver(ucs1002_driver);
+
+MODULE_DESCRIPTION("Microchip UCS1002 Programmable USB Port Power Controller");
+MODULE_AUTHOR("Enric Balletbo Serra <enric.balletbo@collabora.com>");
+MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 0d0f8376bc35..7da1fdb4d269 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -17,10 +17,10 @@
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/platform_device.h>
-#include <linux/atmel_tc.h>
#include <linux/pwm.h>
#include <linux/of_device.h>
#include <linux/slab.h>
+#include <soc/at91/atmel_tcb.h>
#define NPWM 6
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index bbf10ae02f0e..fa168581e6b8 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -35,7 +35,7 @@
#include <asm/div64.h>
-#include <mach/platform.h> /* for ep93xx_pwm_{acquire,release}_gpio() */
+#include <linux/soc/cirrus/ep93xx.h> /* for ep93xx_pwm_{acquire,release}_gpio() */
#define EP93XX_PWMx_TERM_COUNT 0x00
#define EP93XX_PWMx_DUTY_CYCLE 0x04
diff --git a/drivers/reset/reset-zynqmp.c b/drivers/reset/reset-zynqmp.c
index 2ef1f13aa47b..99e75d92dada 100644
--- a/drivers/reset/reset-zynqmp.c
+++ b/drivers/reset/reset-zynqmp.c
@@ -79,11 +79,11 @@ static int zynqmp_reset_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- platform_set_drvdata(pdev, priv);
-
priv->eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!priv->eemi_ops)
- return -ENXIO;
+ if (IS_ERR(priv->eemi_ops))
+ return PTR_ERR(priv->eemi_ops);
+
+ platform_set_drvdata(pdev, priv);
priv->rcdev.ops = &zynqmp_reset_ops;
priv->rcdev.owner = THIS_MODULE;
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 32994b0dd139..a2941c875a06 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -403,15 +403,12 @@ static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
static struct omap_rtc *omap_rtc_power_off_rtc;
-/*
- * omap_rtc_poweroff: RTC-controlled power off
- *
- * The RTC can be used to control an external PMIC via the pmic_power_en pin,
- * which can be configured to transition to OFF on ALARM2 events.
- *
- * Called with local interrupts disabled.
+/**
+ * omap_rtc_power_off_program: Set the pmic power off sequence. The RTC
+ * generates pmic_pwr_enable control, which can be used to control an external
+ * PMIC.
*/
-static void omap_rtc_power_off(void)
+int omap_rtc_power_off_program(struct device *dev)
{
struct omap_rtc *rtc = omap_rtc_power_off_rtc;
struct rtc_time tm;
@@ -425,6 +422,9 @@ static void omap_rtc_power_off(void)
rtc_writel(rtc, OMAP_RTC_PMIC_REG, val | OMAP_RTC_PMIC_POWER_EN_EN);
again:
+ /* Clear any existing ALARM2 event */
+ rtc_writel(rtc, OMAP_RTC_STATUS_REG, OMAP_RTC_STATUS_ALARM2);
+
/* set alarm one second from now */
omap_rtc_read_time_raw(rtc, &tm);
seconds = tm.tm_sec;
@@ -461,6 +461,39 @@ again:
rtc->type->lock(rtc);
+ return 0;
+}
+EXPORT_SYMBOL(omap_rtc_power_off_program);
+
+/*
+ * omap_rtc_poweroff: RTC-controlled power off
+ *
+ * The RTC can be used to control an external PMIC via the pmic_power_en pin,
+ * which can be configured to transition to OFF on ALARM2 events.
+ *
+ * Notes:
+ * The one-second alarm offset is the shortest offset possible as the alarm
+ * registers must be set before the next timer update and the offset
+ * calculation is too heavy for everything to be done within a single access
+ * period (~15 us).
+ *
+ * Called with local interrupts disabled.
+ */
+static void omap_rtc_power_off(void)
+{
+ struct rtc_device *rtc = omap_rtc_power_off_rtc->rtc;
+ u32 val;
+
+ omap_rtc_power_off_program(rtc->dev.parent);
+
+ /* Set PMIC power enable and EXT_WAKEUP in case PB power on is used */
+ omap_rtc_power_off_rtc->type->unlock(omap_rtc_power_off_rtc);
+ val = rtc_readl(omap_rtc_power_off_rtc, OMAP_RTC_PMIC_REG);
+ val |= OMAP_RTC_PMIC_POWER_EN_EN | OMAP_RTC_PMIC_EXT_WKUP_POL(0) |
+ OMAP_RTC_PMIC_EXT_WKUP_EN(0);
+ rtc_writel(omap_rtc_power_off_rtc, OMAP_RTC_PMIC_REG, val);
+ omap_rtc_power_off_rtc->type->lock(omap_rtc_power_off_rtc);
+
/*
* Wait for alarm to trigger (within one second) and external PMIC to
* power off the system. Add a 500 ms margin for external latencies
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index f89f9d02e788..c09039eea707 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3827,7 +3827,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
if ((start_padding_sectors || end_padding_sectors) &&
(rq_data_dir(req) == WRITE)) {
DBF_DEV_EVENT(DBF_ERR, basedev,
- "raw write not track aligned (%lu,%lu) req %p",
+ "raw write not track aligned (%llu,%llu) req %p",
start_padding_sectors, end_padding_sectors, req);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index cfce255521ac..7b7620de2acd 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -205,17 +205,22 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
int auto_ack, int merge_pending)
{
unsigned char __state = 0;
- int i;
+ int i = 1;
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
/* get initial state: */
__state = q->slsb.val[bufnr];
+
+ /* Bail out early if there is no work on the queue: */
+ if (__state & SLSB_OWNER_CU)
+ goto out;
+
if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
__state = SLSB_P_OUTPUT_EMPTY;
- for (i = 1; i < count; i++) {
+ for (; i < count; i++) {
bufnr = next_buf(bufnr);
/* merge PENDING into EMPTY: */
@@ -228,6 +233,8 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
if (q->slsb.val[bufnr] != __state)
break;
}
+
+out:
*state = __state;
return i;
}
@@ -382,7 +389,7 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
{
if (need_siga_sync(q))
qdio_siga_sync_q(q);
- return get_buf_states(q, bufnr, state, 1, 0, 0);
+ return get_buf_state(q, bufnr, state, 0);
}
static inline void qdio_stop_polling(struct qdio_q *q)
@@ -719,11 +726,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
multicast_outbound(q)))
qdio_siga_sync_q(q);
- /*
- * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
- * would return 0.
- */
- count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
+ count = atomic_read(&q->nr_buf_used);
if (!count)
return 0;
diff --git a/drivers/s390/cio/trace.c b/drivers/s390/cio/trace.c
index e331cd97e83b..882ee538ca30 100644
--- a/drivers/s390/cio/trace.c
+++ b/drivers/s390/cio/trace.c
@@ -21,5 +21,4 @@ EXPORT_TRACEPOINT_SYMBOL(s390_cio_csch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_hsch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_xsch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_rsch);
-EXPORT_TRACEPOINT_SYMBOL(s390_cio_rchp);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_chsc);
diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
index 0ebb29b6fd6d..4803139bce14 100644
--- a/drivers/s390/cio/trace.h
+++ b/drivers/s390/cio/trace.h
@@ -274,29 +274,6 @@ DEFINE_EVENT(s390_class_schid, s390_cio_rsch,
TP_ARGS(schid, cc)
);
-/**
- * s390_cio_rchp - Reset Channel Path (RCHP) instruction was performed
- * @chpid: Channel-Path Identifier
- * @cc: Condition code
- */
-TRACE_EVENT(s390_cio_rchp,
- TP_PROTO(struct chp_id chpid, int cc),
- TP_ARGS(chpid, cc),
- TP_STRUCT__entry(
- __field(u8, cssid)
- __field(u8, id)
- __field(int, cc)
- ),
- TP_fast_assign(
- __entry->cssid = chpid.cssid;
- __entry->id = chpid.id;
- __entry->cc = cc;
- ),
- TP_printk("chpid=%x.%02x cc=%d", __entry->cssid, __entry->id,
- __entry->cc
- )
-);
-
#define CHSC_MAX_REQUEST_LEN 64
#define CHSC_MAX_RESPONSE_LEN 64
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index c07b4a85253f..75bdbb2c5140 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -2,10 +2,12 @@ menu "SOC (System On Chip) specific Drivers"
source "drivers/soc/actions/Kconfig"
source "drivers/soc/amlogic/Kconfig"
+source "drivers/soc/aspeed/Kconfig"
source "drivers/soc/atmel/Kconfig"
source "drivers/soc/bcm/Kconfig"
source "drivers/soc/fsl/Kconfig"
source "drivers/soc/imx/Kconfig"
+source "drivers/soc/ixp4xx/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 90b686e586c6..524ecdc2a9bb 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -4,6 +4,7 @@
#
obj-$(CONFIG_ARCH_ACTIONS) += actions/
+obj-$(CONFIG_SOC_ASPEED) += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
obj-$(CONFIG_ARCH_DOVE) += dove/
@@ -11,6 +12,7 @@ obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
obj-$(CONFIG_ARCH_GEMINI) += gemini/
obj-$(CONFIG_ARCH_MXC) += imx/
+obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
obj-y += mediatek/
obj-y += amlogic/
diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
index 6289965c42e9..511b6856225d 100644
--- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
+++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
@@ -11,6 +11,7 @@
#include <linux/bitfield.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/clk.h>
@@ -26,6 +27,7 @@
#define HHI_MEM_PD_REG0 (0x40 << 2)
#define HHI_VPU_MEM_PD_REG0 (0x41 << 2)
#define HHI_VPU_MEM_PD_REG1 (0x42 << 2)
+#define HHI_VPU_MEM_PD_REG2 (0x4d << 2)
struct meson_gx_pwrc_vpu {
struct generic_pm_domain genpd;
@@ -54,12 +56,55 @@ static int meson_gx_pwrc_vpu_power_off(struct generic_pm_domain *genpd)
/* Power Down Memories */
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
- 0x2 << i, 0x3 << i);
+ 0x3 << i, 0x3 << i);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
- 0x2 << i, 0x3 << i);
+ 0x3 << i, 0x3 << i);
+ udelay(5);
+ }
+ for (i = 8; i < 16; i++) {
+ regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
+ BIT(i), BIT(i));
+ udelay(5);
+ }
+ udelay(20);
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI, GEN_PWR_VPU_HDMI);
+
+ msleep(20);
+
+ clk_disable_unprepare(pd->vpu_clk);
+ clk_disable_unprepare(pd->vapb_clk);
+
+ return 0;
+}
+
+static int meson_g12a_pwrc_vpu_power_off(struct generic_pm_domain *genpd)
+{
+ struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
+ int i;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI_ISO, GEN_PWR_VPU_HDMI_ISO);
+ udelay(20);
+
+ /* Power Down Memories */
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
+ 0x3 << i, 0x3 << i);
+ udelay(5);
+ }
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
+ 0x3 << i, 0x3 << i);
+ udelay(5);
+ }
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2,
+ 0x3 << i, 0x3 << i);
udelay(5);
}
for (i = 8; i < 16; i++) {
@@ -108,13 +153,67 @@ static int meson_gx_pwrc_vpu_power_on(struct generic_pm_domain *genpd)
/* Power Up Memories */
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
- 0x2 << i, 0);
+ 0x3 << i, 0);
+ udelay(5);
+ }
+
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
+ 0x3 << i, 0);
+ udelay(5);
+ }
+
+ for (i = 8; i < 16; i++) {
+ regmap_update_bits(pd->regmap_hhi, HHI_MEM_PD_REG0,
+ BIT(i), 0);
+ udelay(5);
+ }
+ udelay(20);
+
+ ret = reset_control_assert(pd->rstc);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI_ISO, 0);
+
+ ret = reset_control_deassert(pd->rstc);
+ if (ret)
+ return ret;
+
+ ret = meson_gx_pwrc_vpu_setup_clk(pd);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int meson_g12a_pwrc_vpu_power_on(struct generic_pm_domain *genpd)
+{
+ struct meson_gx_pwrc_vpu *pd = genpd_to_pd(genpd);
+ int ret;
+ int i;
+
+ regmap_update_bits(pd->regmap_ao, AO_RTI_GEN_PWR_SLEEP0,
+ GEN_PWR_VPU_HDMI, 0);
+ udelay(20);
+
+ /* Power Up Memories */
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
+ 0x3 << i, 0);
udelay(5);
}
for (i = 0; i < 32; i += 2) {
regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
- 0x2 << i, 0);
+ 0x3 << i, 0);
+ udelay(5);
+ }
+
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG2,
+ 0x3 << i, 0);
udelay(5);
}
@@ -160,15 +259,37 @@ static struct meson_gx_pwrc_vpu vpu_hdmi_pd = {
},
};
+static struct meson_gx_pwrc_vpu vpu_hdmi_pd_g12a = {
+ .genpd = {
+ .name = "vpu_hdmi",
+ .power_off = meson_g12a_pwrc_vpu_power_off,
+ .power_on = meson_g12a_pwrc_vpu_power_on,
+ },
+};
+
static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
{
+ const struct meson_gx_pwrc_vpu *vpu_pd_match;
struct regmap *regmap_ao, *regmap_hhi;
+ struct meson_gx_pwrc_vpu *vpu_pd;
struct reset_control *rstc;
struct clk *vpu_clk;
struct clk *vapb_clk;
bool powered_off;
int ret;
+ vpu_pd_match = of_device_get_match_data(&pdev->dev);
+ if (!vpu_pd_match) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ return -ENODEV;
+ }
+
+ vpu_pd = devm_kzalloc(&pdev->dev, sizeof(*vpu_pd), GFP_KERNEL);
+ if (!vpu_pd)
+ return -ENOMEM;
+
+ memcpy(vpu_pd, vpu_pd_match, sizeof(*vpu_pd));
+
regmap_ao = syscon_node_to_regmap(of_get_parent(pdev->dev.of_node));
if (IS_ERR(regmap_ao)) {
dev_err(&pdev->dev, "failed to get regmap\n");
@@ -201,39 +322,46 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
return PTR_ERR(vapb_clk);
}
- vpu_hdmi_pd.regmap_ao = regmap_ao;
- vpu_hdmi_pd.regmap_hhi = regmap_hhi;
- vpu_hdmi_pd.rstc = rstc;
- vpu_hdmi_pd.vpu_clk = vpu_clk;
- vpu_hdmi_pd.vapb_clk = vapb_clk;
+ vpu_pd->regmap_ao = regmap_ao;
+ vpu_pd->regmap_hhi = regmap_hhi;
+ vpu_pd->rstc = rstc;
+ vpu_pd->vpu_clk = vpu_clk;
+ vpu_pd->vapb_clk = vapb_clk;
+
+ platform_set_drvdata(pdev, vpu_pd);
- powered_off = meson_gx_pwrc_vpu_get_power(&vpu_hdmi_pd);
+ powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd);
/* If already powered, sync the clock states */
if (!powered_off) {
- ret = meson_gx_pwrc_vpu_setup_clk(&vpu_hdmi_pd);
+ ret = meson_gx_pwrc_vpu_setup_clk(vpu_pd);
if (ret)
return ret;
}
- pm_genpd_init(&vpu_hdmi_pd.genpd, &pm_domain_always_on_gov,
+ pm_genpd_init(&vpu_pd->genpd, &pm_domain_always_on_gov,
powered_off);
return of_genpd_add_provider_simple(pdev->dev.of_node,
- &vpu_hdmi_pd.genpd);
+ &vpu_pd->genpd);
}
static void meson_gx_pwrc_vpu_shutdown(struct platform_device *pdev)
{
+ struct meson_gx_pwrc_vpu *vpu_pd = platform_get_drvdata(pdev);
bool powered_off;
- powered_off = meson_gx_pwrc_vpu_get_power(&vpu_hdmi_pd);
+ powered_off = meson_gx_pwrc_vpu_get_power(vpu_pd);
if (!powered_off)
- meson_gx_pwrc_vpu_power_off(&vpu_hdmi_pd.genpd);
+ vpu_pd->genpd.power_off(&vpu_pd->genpd);
}
static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = {
- { .compatible = "amlogic,meson-gx-pwrc-vpu" },
+ { .compatible = "amlogic,meson-gx-pwrc-vpu", .data = &vpu_hdmi_pd },
+ {
+ .compatible = "amlogic,meson-g12a-pwrc-vpu",
+ .data = &vpu_hdmi_pd_g12a
+ },
{ /* sentinel */ }
};
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index 37ea0a1c24c8..bca34954518e 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -37,26 +37,34 @@ static const struct meson_gx_soc_id {
{ "AXG", 0x25 },
{ "GXLX", 0x26 },
{ "TXHD", 0x27 },
+ { "G12A", 0x28 },
+ { "G12B", 0x29 },
};
static const struct meson_gx_package_id {
const char *name;
unsigned int major_id;
unsigned int pack_id;
+ unsigned int pack_mask;
} soc_packages[] = {
- { "S905", 0x1f, 0 },
- { "S905H", 0x1f, 0x13 },
- { "S905M", 0x1f, 0x20 },
- { "S905D", 0x21, 0 },
- { "S905X", 0x21, 0x80 },
- { "S905W", 0x21, 0xa0 },
- { "S905L", 0x21, 0xc0 },
- { "S905M2", 0x21, 0xe0 },
- { "S912", 0x22, 0 },
- { "962X", 0x24, 0x10 },
- { "962E", 0x24, 0x20 },
- { "A113X", 0x25, 0x37 },
- { "A113D", 0x25, 0x22 },
+ { "S905", 0x1f, 0, 0x20 }, /* pack_id != 0x20 */
+ { "S905H", 0x1f, 0x3, 0xf }, /* pack_id & 0xf == 0x3 */
+ { "S905M", 0x1f, 0x20, 0xf0 }, /* pack_id == 0x20 */
+ { "S905D", 0x21, 0, 0xf0 },
+ { "S905X", 0x21, 0x80, 0xf0 },
+ { "S905W", 0x21, 0xa0, 0xf0 },
+ { "S905L", 0x21, 0xc0, 0xf0 },
+ { "S905M2", 0x21, 0xe0, 0xf0 },
+ { "S805X", 0x21, 0x30, 0xf0 },
+ { "S805Y", 0x21, 0xb0, 0xf0 },
+ { "S912", 0x22, 0, 0x0 }, /* Only S912 is known for GXM */
+ { "962X", 0x24, 0x10, 0xf0 },
+ { "962E", 0x24, 0x20, 0xf0 },
+ { "A113X", 0x25, 0x37, 0xff },
+ { "A113D", 0x25, 0x22, 0xff },
+ { "S905D2", 0x28, 0x10, 0xf0 },
+ { "S905X2", 0x28, 0x40, 0xf0 },
+ { "S922X", 0x29, 0x40, 0xf0 },
};
static inline unsigned int socinfo_to_major(u32 socinfo)
@@ -81,13 +89,14 @@ static inline unsigned int socinfo_to_misc(u32 socinfo)
static const char *socinfo_to_package_id(u32 socinfo)
{
- unsigned int pack = socinfo_to_pack(socinfo) & 0xf0;
+ unsigned int pack = socinfo_to_pack(socinfo);
unsigned int major = socinfo_to_major(socinfo);
int i;
for (i = 0 ; i < ARRAY_SIZE(soc_packages) ; ++i) {
if (soc_packages[i].major_id == major &&
- soc_packages[i].pack_id == pack)
+ soc_packages[i].pack_id ==
+ (pack & soc_packages[i].pack_mask))
return soc_packages[i].name;
}
@@ -123,8 +132,10 @@ static int __init meson_gx_socinfo_init(void)
return -ENODEV;
/* check if interface is enabled */
- if (!of_device_is_available(np))
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
return -ENODEV;
+ }
/* check if chip-id is available */
if (!of_property_read_bool(np, "amlogic,has-chip-id"))
diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig
new file mode 100644
index 000000000000..765d10191387
--- /dev/null
+++ b/drivers/soc/aspeed/Kconfig
@@ -0,0 +1,31 @@
+menu "Aspeed SoC drivers"
+
+config SOC_ASPEED
+ def_bool y
+ depends on ARCH_ASPEED || COMPILE_TEST
+
+config ASPEED_LPC_CTRL
+ depends on SOC_ASPEED && REGMAP && MFD_SYSCON
+ tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control"
+ ---help---
+ Control Aspeed ast2400/2500 HOST LPC to BMC mappings through
+ ioctl()s, the driver also provides a read/write interface to a BMC ram
+ region where the host LPC read/write region can be buffered.
+
+config ASPEED_LPC_SNOOP
+ tristate "Aspeed ast2500 HOST LPC snoop support"
+ depends on SOC_ASPEED && REGMAP && MFD_SYSCON
+ help
+ Provides a driver to control the LPC snoop interface which
+ allows the BMC to listen on and save the data written by
+ the host to an arbitrary LPC I/O port.
+
+config ASPEED_P2A_CTRL
+ depends on SOC_ASPEED && REGMAP && MFD_SYSCON
+ tristate "Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC bridge control"
+ help
+ Control Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC mappings through
+ ioctl()s, the driver also provides an interface for userspace mappings to
+ a pre-defined region.
+
+endmenu
diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile
new file mode 100644
index 000000000000..2f7b6da7be79
--- /dev/null
+++ b/drivers/soc/aspeed/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
+obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
+obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
diff --git a/drivers/misc/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index a024f8042259..a024f8042259 100644
--- a/drivers/misc/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
diff --git a/drivers/misc/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
index 2feb4347d67f..2feb4347d67f 100644
--- a/drivers/misc/aspeed-lpc-snoop.c
+++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
diff --git a/drivers/misc/aspeed-p2a-ctrl.c b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
index b60fbeaffcbd..b60fbeaffcbd 100644
--- a/drivers/misc/aspeed-p2a-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 819bed0f5667..51b3a47b5a55 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -179,8 +179,10 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index)
if (err < 0)
goto err0;
gc = gpio_to_chip(err);
- if (WARN_ON(!gc))
+ if (WARN_ON(!gc)) {
+ err = -ENODEV;
goto err0;
+ }
if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) {
pr_debug("%s: tried to get a non-qe pin\n", __func__);
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 506a6f3c2b9b..d6b529e06d9a 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
+obj-$(CONFIG_ARCH_MXC) += soc-imx8.o
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index 29b43651c261..d9231bd3c691 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -406,7 +406,6 @@ static int imx_gpc_probe(struct platform_device *pdev)
const struct imx_gpc_dt_data *of_id_data = of_id->data;
struct device_node *pgc_node;
struct regmap *regmap;
- struct resource *res;
void __iomem *base;
int ret;
@@ -417,8 +416,7 @@ static int imx_gpc_probe(struct platform_device *pdev)
!pgc_node)
return 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 176f473127b6..31b8d002d855 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -136,8 +136,8 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
GPC_PU_PGC_SW_PUP_REQ : GPC_PU_PGC_SW_PDN_REQ;
const bool enable_power_control = !on;
const bool has_regulator = !IS_ERR(domain->regulator);
- unsigned long deadline;
int i, ret = 0;
+ u32 pxx_req;
regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
domain->bits.map, domain->bits.map);
@@ -169,30 +169,19 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
* As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
* for PUP_REQ/PDN_REQ bit to be cleared
*/
- deadline = jiffies + msecs_to_jiffies(1);
- while (true) {
- u32 pxx_req;
-
- regmap_read(domain->regmap, offset, &pxx_req);
-
- if (!(pxx_req & domain->bits.pxx))
- break;
-
- if (time_after(jiffies, deadline)) {
- dev_err(domain->dev, "falied to command PGC\n");
- ret = -ETIMEDOUT;
- /*
- * If we were in a process of enabling a
- * domain and failed we might as well disable
- * the regulator we just enabled. And if it
- * was the opposite situation and we failed to
- * power down -- keep the regulator on
- */
- on = !on;
- break;
- }
-
- cpu_relax();
+ ret = regmap_read_poll_timeout(domain->regmap, offset, pxx_req,
+ !(pxx_req & domain->bits.pxx),
+ 0, USEC_PER_MSEC);
+ if (ret) {
+ dev_err(domain->dev, "failed to command PGC\n");
+ /*
+ * If we were in a process of enabling a
+ * domain and failed we might as well disable
+ * the regulator we just enabled. And if it
+ * was the opposite situation and we failed to
+ * power down -- keep the regulator on
+ */
+ on = !on;
}
if (enable_power_control)
@@ -574,7 +563,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *pgc_np, *np;
struct regmap *regmap;
- struct resource *res;
void __iomem *base;
int ret;
@@ -584,8 +572,7 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8.c
new file mode 100644
index 000000000000..fc6429f9170a
--- /dev/null
+++ b/drivers/soc/imx/soc-imx8.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#define REV_B1 0x21
+
+#define IMX8MQ_SW_INFO_B1 0x40
+#define IMX8MQ_SW_MAGIC_B1 0xff0055aa
+
+struct imx8_soc_data {
+ char *name;
+ u32 (*soc_revision)(void);
+};
+
+static u32 __init imx8mq_soc_revision(void)
+{
+ struct device_node *np;
+ void __iomem *ocotp_base;
+ u32 magic;
+ u32 rev = 0;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp");
+ if (!np)
+ goto out;
+
+ ocotp_base = of_iomap(np, 0);
+ WARN_ON(!ocotp_base);
+
+ magic = readl_relaxed(ocotp_base + IMX8MQ_SW_INFO_B1);
+ if (magic == IMX8MQ_SW_MAGIC_B1)
+ rev = REV_B1;
+
+ iounmap(ocotp_base);
+
+out:
+ of_node_put(np);
+ return rev;
+}
+
+static const struct imx8_soc_data imx8mq_soc_data = {
+ .name = "i.MX8MQ",
+ .soc_revision = imx8mq_soc_revision,
+};
+
+static const struct of_device_id imx8_soc_match[] = {
+ { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
+ { }
+};
+
+#define imx8_revision(soc_rev) \
+ soc_rev ? \
+ kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf, soc_rev & 0xf) : \
+ "unknown"
+
+static int __init imx8_soc_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ struct device_node *root;
+ const struct of_device_id *id;
+ u32 soc_rev = 0;
+ const struct imx8_soc_data *data;
+ int ret;
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr)
+ return -ENODEV;
+
+ soc_dev_attr->family = "Freescale i.MX";
+
+ root = of_find_node_by_path("/");
+ ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
+ if (ret)
+ goto free_soc;
+
+ id = of_match_node(imx8_soc_match, root);
+ if (!id)
+ goto free_soc;
+
+ of_node_put(root);
+
+ data = id->data;
+ if (data) {
+ soc_dev_attr->soc_id = data->name;
+ if (data->soc_revision)
+ soc_rev = data->soc_revision();
+ }
+
+ soc_dev_attr->revision = imx8_revision(soc_rev);
+ if (!soc_dev_attr->revision)
+ goto free_soc;
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev))
+ goto free_rev;
+
+ return 0;
+
+free_rev:
+ kfree(soc_dev_attr->revision);
+free_soc:
+ kfree(soc_dev_attr);
+ of_node_put(root);
+ return -ENODEV;
+}
+device_initcall(imx8_soc_init);
diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig
new file mode 100644
index 000000000000..de6becdc78a2
--- /dev/null
+++ b/drivers/soc/ixp4xx/Kconfig
@@ -0,0 +1,16 @@
+menu "IXP4xx SoC drivers"
+
+config IXP4XX_QMGR
+ tristate "IXP4xx Queue Manager support"
+ help
+ This driver supports IXP4xx built-in hardware queue manager
+ and is automatically selected by Ethernet and HSS drivers.
+
+config IXP4XX_NPE
+ tristate "IXP4xx Network Processor Engine support"
+ select FW_LOADER
+ help
+ This driver supports IXP4xx built-in network coprocessors
+ and is automatically selected by Ethernet and HSS drivers.
+
+endmenu
diff --git a/drivers/soc/ixp4xx/Makefile b/drivers/soc/ixp4xx/Makefile
new file mode 100644
index 000000000000..d20d99e6df65
--- /dev/null
+++ b/drivers/soc/ixp4xx/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx-qmgr.o
+obj-$(CONFIG_IXP4XX_NPE) += ixp4xx-npe.o
diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c
new file mode 100644
index 000000000000..15979d4376ab
--- /dev/null
+++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
@@ -0,0 +1,762 @@
+/*
+ * Intel IXP4xx Network Processor Engine driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * The code is based on publicly available information:
+ * - Intel IXP4xx Developer's Manual and other e-papers
+ * - Intel IXP400 Access Library Software (BSD license)
+ * - previous works by Christian Hohnstaedt <chohnstaedt@innominate.com>
+ * Thanks, Christian.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ixp4xx/npe.h>
+
+#define DEBUG_MSG 0
+#define DEBUG_FW 0
+
+#define NPE_COUNT 3
+#define MAX_RETRIES 1000 /* microseconds */
+#define NPE_42X_DATA_SIZE 0x800 /* in dwords */
+#define NPE_46X_DATA_SIZE 0x1000
+#define NPE_A_42X_INSTR_SIZE 0x1000
+#define NPE_B_AND_C_42X_INSTR_SIZE 0x800
+#define NPE_46X_INSTR_SIZE 0x1000
+#define REGS_SIZE 0x1000
+
+#define NPE_PHYS_REG 32
+
+#define FW_MAGIC 0xFEEDF00D
+#define FW_BLOCK_TYPE_INSTR 0x0
+#define FW_BLOCK_TYPE_DATA 0x1
+#define FW_BLOCK_TYPE_EOF 0xF
+
+/* NPE exec status (read) and command (write) */
+#define CMD_NPE_STEP 0x01
+#define CMD_NPE_START 0x02
+#define CMD_NPE_STOP 0x03
+#define CMD_NPE_CLR_PIPE 0x04
+#define CMD_CLR_PROFILE_CNT 0x0C
+#define CMD_RD_INS_MEM 0x10 /* instruction memory */
+#define CMD_WR_INS_MEM 0x11
+#define CMD_RD_DATA_MEM 0x12 /* data memory */
+#define CMD_WR_DATA_MEM 0x13
+#define CMD_RD_ECS_REG 0x14 /* exec access register */
+#define CMD_WR_ECS_REG 0x15
+
+#define STAT_RUN 0x80000000
+#define STAT_STOP 0x40000000
+#define STAT_CLEAR 0x20000000
+#define STAT_ECS_K 0x00800000 /* pipeline clean */
+
+#define NPE_STEVT 0x1B
+#define NPE_STARTPC 0x1C
+#define NPE_REGMAP 0x1E
+#define NPE_CINDEX 0x1F
+
+#define INSTR_WR_REG_SHORT 0x0000C000
+#define INSTR_WR_REG_BYTE 0x00004000
+#define INSTR_RD_FIFO 0x0F888220
+#define INSTR_RESET_MBOX 0x0FAC8210
+
+#define ECS_BG_CTXT_REG_0 0x00 /* Background Executing Context */
+#define ECS_BG_CTXT_REG_1 0x01 /* Stack level */
+#define ECS_BG_CTXT_REG_2 0x02
+#define ECS_PRI_1_CTXT_REG_0 0x04 /* Priority 1 Executing Context */
+#define ECS_PRI_1_CTXT_REG_1 0x05 /* Stack level */
+#define ECS_PRI_1_CTXT_REG_2 0x06
+#define ECS_PRI_2_CTXT_REG_0 0x08 /* Priority 2 Executing Context */
+#define ECS_PRI_2_CTXT_REG_1 0x09 /* Stack level */
+#define ECS_PRI_2_CTXT_REG_2 0x0A
+#define ECS_DBG_CTXT_REG_0 0x0C /* Debug Executing Context */
+#define ECS_DBG_CTXT_REG_1 0x0D /* Stack level */
+#define ECS_DBG_CTXT_REG_2 0x0E
+#define ECS_INSTRUCT_REG 0x11 /* NPE Instruction Register */
+
+#define ECS_REG_0_ACTIVE 0x80000000 /* all levels */
+#define ECS_REG_0_NEXTPC_MASK 0x1FFF0000 /* BG/PRI1/PRI2 levels */
+#define ECS_REG_0_LDUR_BITS 8
+#define ECS_REG_0_LDUR_MASK 0x00000700 /* all levels */
+#define ECS_REG_1_CCTXT_BITS 16
+#define ECS_REG_1_CCTXT_MASK 0x000F0000 /* all levels */
+#define ECS_REG_1_SELCTXT_BITS 0
+#define ECS_REG_1_SELCTXT_MASK 0x0000000F /* all levels */
+#define ECS_DBG_REG_2_IF 0x00100000 /* debug level */
+#define ECS_DBG_REG_2_IE 0x00080000 /* debug level */
+
+/* NPE watchpoint_fifo register bit */
+#define WFIFO_VALID 0x80000000
+
+/* NPE messaging_status register bit definitions */
+#define MSGSTAT_OFNE 0x00010000 /* OutFifoNotEmpty */
+#define MSGSTAT_IFNF 0x00020000 /* InFifoNotFull */
+#define MSGSTAT_OFNF 0x00040000 /* OutFifoNotFull */
+#define MSGSTAT_IFNE 0x00080000 /* InFifoNotEmpty */
+#define MSGSTAT_MBINT 0x00100000 /* Mailbox interrupt */
+#define MSGSTAT_IFINT 0x00200000 /* InFifo interrupt */
+#define MSGSTAT_OFINT 0x00400000 /* OutFifo interrupt */
+#define MSGSTAT_WFINT 0x00800000 /* WatchFifo interrupt */
+
+/* NPE messaging_control register bit definitions */
+#define MSGCTL_OUT_FIFO 0x00010000 /* enable output FIFO */
+#define MSGCTL_IN_FIFO 0x00020000 /* enable input FIFO */
+#define MSGCTL_OUT_FIFO_WRITE 0x01000000 /* enable FIFO + WRITE */
+#define MSGCTL_IN_FIFO_WRITE 0x02000000
+
+/* NPE mailbox_status value for reset */
+#define RESET_MBOX_STAT 0x0000F0F0
+
+#define NPE_A_FIRMWARE "NPE-A"
+#define NPE_B_FIRMWARE "NPE-B"
+#define NPE_C_FIRMWARE "NPE-C"
+
+const char *npe_names[] = { NPE_A_FIRMWARE, NPE_B_FIRMWARE, NPE_C_FIRMWARE };
+
+#define print_npe(pri, npe, fmt, ...) \
+ printk(pri "%s: " fmt, npe_name(npe), ## __VA_ARGS__)
+
+#if DEBUG_MSG
+#define debug_msg(npe, fmt, ...) \
+ print_npe(KERN_DEBUG, npe, fmt, ## __VA_ARGS__)
+#else
+#define debug_msg(npe, fmt, ...)
+#endif
+
+static struct {
+ u32 reg, val;
+} ecs_reset[] = {
+ { ECS_BG_CTXT_REG_0, 0xA0000000 },
+ { ECS_BG_CTXT_REG_1, 0x01000000 },
+ { ECS_BG_CTXT_REG_2, 0x00008000 },
+ { ECS_PRI_1_CTXT_REG_0, 0x20000080 },
+ { ECS_PRI_1_CTXT_REG_1, 0x01000000 },
+ { ECS_PRI_1_CTXT_REG_2, 0x00008000 },
+ { ECS_PRI_2_CTXT_REG_0, 0x20000080 },
+ { ECS_PRI_2_CTXT_REG_1, 0x01000000 },
+ { ECS_PRI_2_CTXT_REG_2, 0x00008000 },
+ { ECS_DBG_CTXT_REG_0, 0x20000000 },
+ { ECS_DBG_CTXT_REG_1, 0x00000000 },
+ { ECS_DBG_CTXT_REG_2, 0x001E0000 },
+ { ECS_INSTRUCT_REG, 0x1003C00F },
+};
+
+static struct npe npe_tab[NPE_COUNT] = {
+ {
+ .id = 0,
+ }, {
+ .id = 1,
+ }, {
+ .id = 2,
+ }
+};
+
+int npe_running(struct npe *npe)
+{
+ return (__raw_readl(&npe->regs->exec_status_cmd) & STAT_RUN) != 0;
+}
+
+static void npe_cmd_write(struct npe *npe, u32 addr, int cmd, u32 data)
+{
+ __raw_writel(data, &npe->regs->exec_data);
+ __raw_writel(addr, &npe->regs->exec_addr);
+ __raw_writel(cmd, &npe->regs->exec_status_cmd);
+}
+
+static u32 npe_cmd_read(struct npe *npe, u32 addr, int cmd)
+{
+ __raw_writel(addr, &npe->regs->exec_addr);
+ __raw_writel(cmd, &npe->regs->exec_status_cmd);
+ /* Iintroduce extra read cycles after issuing read command to NPE
+ so that we read the register after the NPE has updated it.
+ This is to overcome race condition between XScale and NPE */
+ __raw_readl(&npe->regs->exec_data);
+ __raw_readl(&npe->regs->exec_data);
+ return __raw_readl(&npe->regs->exec_data);
+}
+
+static void npe_clear_active(struct npe *npe, u32 reg)
+{
+ u32 val = npe_cmd_read(npe, reg, CMD_RD_ECS_REG);
+ npe_cmd_write(npe, reg, CMD_WR_ECS_REG, val & ~ECS_REG_0_ACTIVE);
+}
+
+static void npe_start(struct npe *npe)
+{
+ /* ensure only Background Context Stack Level is active */
+ npe_clear_active(npe, ECS_PRI_1_CTXT_REG_0);
+ npe_clear_active(npe, ECS_PRI_2_CTXT_REG_0);
+ npe_clear_active(npe, ECS_DBG_CTXT_REG_0);
+
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+ __raw_writel(CMD_NPE_START, &npe->regs->exec_status_cmd);
+}
+
+static void npe_stop(struct npe *npe)
+{
+ __raw_writel(CMD_NPE_STOP, &npe->regs->exec_status_cmd);
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd); /*FIXME?*/
+}
+
+static int __must_check npe_debug_instr(struct npe *npe, u32 instr, u32 ctx,
+ u32 ldur)
+{
+ u32 wc;
+ int i;
+
+ /* set the Active bit, and the LDUR, in the debug level */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG,
+ ECS_REG_0_ACTIVE | (ldur << ECS_REG_0_LDUR_BITS));
+
+ /* set CCTXT at ECS DEBUG L3 to specify in which context to execute
+ the instruction, and set SELCTXT at ECS DEBUG Level to specify
+ which context store to access.
+ Debug ECS Level Reg 1 has form 0x000n000n, where n = context number
+ */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_1, CMD_WR_ECS_REG,
+ (ctx << ECS_REG_1_CCTXT_BITS) |
+ (ctx << ECS_REG_1_SELCTXT_BITS));
+
+ /* clear the pipeline */
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+
+ /* load NPE instruction into the instruction register */
+ npe_cmd_write(npe, ECS_INSTRUCT_REG, CMD_WR_ECS_REG, instr);
+
+ /* we need this value later to wait for completion of NPE execution
+ step */
+ wc = __raw_readl(&npe->regs->watch_count);
+
+ /* issue a Step One command via the Execution Control register */
+ __raw_writel(CMD_NPE_STEP, &npe->regs->exec_status_cmd);
+
+ /* Watch Count register increments when NPE completes an instruction */
+ for (i = 0; i < MAX_RETRIES; i++) {
+ if (wc != __raw_readl(&npe->regs->watch_count))
+ return 0;
+ udelay(1);
+ }
+
+ print_npe(KERN_ERR, npe, "reset: npe_debug_instr(): timeout\n");
+ return -ETIMEDOUT;
+}
+
+static int __must_check npe_logical_reg_write8(struct npe *npe, u32 addr,
+ u8 val, u32 ctx)
+{
+ /* here we build the NPE assembler instruction: mov8 d0, #0 */
+ u32 instr = INSTR_WR_REG_BYTE | /* OpCode */
+ addr << 9 | /* base Operand */
+ (val & 0x1F) << 4 | /* lower 5 bits to immediate data */
+ (val & ~0x1F) << (18 - 5);/* higher 3 bits to CoProc instr. */
+ return npe_debug_instr(npe, instr, ctx, 1); /* execute it */
+}
+
+static int __must_check npe_logical_reg_write16(struct npe *npe, u32 addr,
+ u16 val, u32 ctx)
+{
+ /* here we build the NPE assembler instruction: mov16 d0, #0 */
+ u32 instr = INSTR_WR_REG_SHORT | /* OpCode */
+ addr << 9 | /* base Operand */
+ (val & 0x1F) << 4 | /* lower 5 bits to immediate data */
+ (val & ~0x1F) << (18 - 5);/* higher 11 bits to CoProc instr. */
+ return npe_debug_instr(npe, instr, ctx, 1); /* execute it */
+}
+
+static int __must_check npe_logical_reg_write32(struct npe *npe, u32 addr,
+ u32 val, u32 ctx)
+{
+ /* write in 16 bit steps first the high and then the low value */
+ if (npe_logical_reg_write16(npe, addr, val >> 16, ctx))
+ return -ETIMEDOUT;
+ return npe_logical_reg_write16(npe, addr + 2, val & 0xFFFF, ctx);
+}
+
+static int npe_reset(struct npe *npe)
+{
+ u32 val, ctl, exec_count, ctx_reg2;
+ int i;
+
+ ctl = (__raw_readl(&npe->regs->messaging_control) | 0x3F000000) &
+ 0x3F3FFFFF;
+
+ /* disable parity interrupt */
+ __raw_writel(ctl & 0x3F00FFFF, &npe->regs->messaging_control);
+
+ /* pre exec - debug instruction */
+ /* turn off the halt bit by clearing Execution Count register. */
+ exec_count = __raw_readl(&npe->regs->exec_count);
+ __raw_writel(0, &npe->regs->exec_count);
+ /* ensure that IF and IE are on (temporarily), so that we don't end up
+ stepping forever */
+ ctx_reg2 = npe_cmd_read(npe, ECS_DBG_CTXT_REG_2, CMD_RD_ECS_REG);
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2 |
+ ECS_DBG_REG_2_IF | ECS_DBG_REG_2_IE);
+
+ /* clear the FIFOs */
+ while (__raw_readl(&npe->regs->watchpoint_fifo) & WFIFO_VALID)
+ ;
+ while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE)
+ /* read from the outFIFO until empty */
+ print_npe(KERN_DEBUG, npe, "npe_reset: read FIFO = 0x%X\n",
+ __raw_readl(&npe->regs->in_out_fifo));
+
+ while (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE)
+ /* step execution of the NPE intruction to read inFIFO using
+ the Debug Executing Context stack */
+ if (npe_debug_instr(npe, INSTR_RD_FIFO, 0, 0))
+ return -ETIMEDOUT;
+
+ /* reset the mailbox reg from the XScale side */
+ __raw_writel(RESET_MBOX_STAT, &npe->regs->mailbox_status);
+ /* from NPE side */
+ if (npe_debug_instr(npe, INSTR_RESET_MBOX, 0, 0))
+ return -ETIMEDOUT;
+
+ /* Reset the physical registers in the NPE register file */
+ for (val = 0; val < NPE_PHYS_REG; val++) {
+ if (npe_logical_reg_write16(npe, NPE_REGMAP, val >> 1, 0))
+ return -ETIMEDOUT;
+ /* address is either 0 or 4 */
+ if (npe_logical_reg_write32(npe, (val & 1) * 4, 0, 0))
+ return -ETIMEDOUT;
+ }
+
+ /* Reset the context store = each context's Context Store registers */
+
+ /* Context 0 has no STARTPC. Instead, this value is used to set NextPC
+ for Background ECS, to set where NPE starts executing code */
+ val = npe_cmd_read(npe, ECS_BG_CTXT_REG_0, CMD_RD_ECS_REG);
+ val &= ~ECS_REG_0_NEXTPC_MASK;
+ val |= (0 /* NextPC */ << 16) & ECS_REG_0_NEXTPC_MASK;
+ npe_cmd_write(npe, ECS_BG_CTXT_REG_0, CMD_WR_ECS_REG, val);
+
+ for (i = 0; i < 16; i++) {
+ if (i) { /* Context 0 has no STEVT nor STARTPC */
+ /* STEVT = off, 0x80 */
+ if (npe_logical_reg_write8(npe, NPE_STEVT, 0x80, i))
+ return -ETIMEDOUT;
+ if (npe_logical_reg_write16(npe, NPE_STARTPC, 0, i))
+ return -ETIMEDOUT;
+ }
+ /* REGMAP = d0->p0, d8->p2, d16->p4 */
+ if (npe_logical_reg_write16(npe, NPE_REGMAP, 0x820, i))
+ return -ETIMEDOUT;
+ if (npe_logical_reg_write8(npe, NPE_CINDEX, 0, i))
+ return -ETIMEDOUT;
+ }
+
+ /* post exec */
+ /* clear active bit in debug level */
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_0, CMD_WR_ECS_REG, 0);
+ /* clear the pipeline */
+ __raw_writel(CMD_NPE_CLR_PIPE, &npe->regs->exec_status_cmd);
+ /* restore previous values */
+ __raw_writel(exec_count, &npe->regs->exec_count);
+ npe_cmd_write(npe, ECS_DBG_CTXT_REG_2, CMD_WR_ECS_REG, ctx_reg2);
+
+ /* write reset values to Execution Context Stack registers */
+ for (val = 0; val < ARRAY_SIZE(ecs_reset); val++)
+ npe_cmd_write(npe, ecs_reset[val].reg, CMD_WR_ECS_REG,
+ ecs_reset[val].val);
+
+ /* clear the profile counter */
+ __raw_writel(CMD_CLR_PROFILE_CNT, &npe->regs->exec_status_cmd);
+
+ __raw_writel(0, &npe->regs->exec_count);
+ __raw_writel(0, &npe->regs->action_points[0]);
+ __raw_writel(0, &npe->regs->action_points[1]);
+ __raw_writel(0, &npe->regs->action_points[2]);
+ __raw_writel(0, &npe->regs->action_points[3]);
+ __raw_writel(0, &npe->regs->watch_count);
+
+ val = ixp4xx_read_feature_bits();
+ /* reset the NPE */
+ ixp4xx_write_feature_bits(val &
+ ~(IXP4XX_FEATURE_RESET_NPEA << npe->id));
+ /* deassert reset */
+ ixp4xx_write_feature_bits(val |
+ (IXP4XX_FEATURE_RESET_NPEA << npe->id));
+ for (i = 0; i < MAX_RETRIES; i++) {
+ if (ixp4xx_read_feature_bits() &
+ (IXP4XX_FEATURE_RESET_NPEA << npe->id))
+ break; /* NPE is back alive */
+ udelay(1);
+ }
+ if (i == MAX_RETRIES)
+ return -ETIMEDOUT;
+
+ npe_stop(npe);
+
+ /* restore NPE configuration bus Control Register - parity settings */
+ __raw_writel(ctl, &npe->regs->messaging_control);
+ return 0;
+}
+
+
+int npe_send_message(struct npe *npe, const void *msg, const char *what)
+{
+ const u32 *send = msg;
+ int cycles = 0;
+
+ debug_msg(npe, "Trying to send message %s [%08X:%08X]\n",
+ what, send[0], send[1]);
+
+ if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE) {
+ debug_msg(npe, "NPE input FIFO not empty\n");
+ return -EIO;
+ }
+
+ __raw_writel(send[0], &npe->regs->in_out_fifo);
+
+ if (!(__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNF)) {
+ debug_msg(npe, "NPE input FIFO full\n");
+ return -EIO;
+ }
+
+ __raw_writel(send[1], &npe->regs->in_out_fifo);
+
+ while ((cycles < MAX_RETRIES) &&
+ (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_IFNE)) {
+ udelay(1);
+ cycles++;
+ }
+
+ if (cycles == MAX_RETRIES) {
+ debug_msg(npe, "Timeout sending message\n");
+ return -ETIMEDOUT;
+ }
+
+#if DEBUG_MSG > 1
+ debug_msg(npe, "Sending a message took %i cycles\n", cycles);
+#endif
+ return 0;
+}
+
+int npe_recv_message(struct npe *npe, void *msg, const char *what)
+{
+ u32 *recv = msg;
+ int cycles = 0, cnt = 0;
+
+ debug_msg(npe, "Trying to receive message %s\n", what);
+
+ while (cycles < MAX_RETRIES) {
+ if (__raw_readl(&npe->regs->messaging_status) & MSGSTAT_OFNE) {
+ recv[cnt++] = __raw_readl(&npe->regs->in_out_fifo);
+ if (cnt == 2)
+ break;
+ } else {
+ udelay(1);
+ cycles++;
+ }
+ }
+
+ switch(cnt) {
+ case 1:
+ debug_msg(npe, "Received [%08X]\n", recv[0]);
+ break;
+ case 2:
+ debug_msg(npe, "Received [%08X:%08X]\n", recv[0], recv[1]);
+ break;
+ }
+
+ if (cycles == MAX_RETRIES) {
+ debug_msg(npe, "Timeout waiting for message\n");
+ return -ETIMEDOUT;
+ }
+
+#if DEBUG_MSG > 1
+ debug_msg(npe, "Receiving a message took %i cycles\n", cycles);
+#endif
+ return 0;
+}
+
+int npe_send_recv_message(struct npe *npe, void *msg, const char *what)
+{
+ int result;
+ u32 *send = msg, recv[2];
+
+ if ((result = npe_send_message(npe, msg, what)) != 0)
+ return result;
+ if ((result = npe_recv_message(npe, recv, what)) != 0)
+ return result;
+
+ if ((recv[0] != send[0]) || (recv[1] != send[1])) {
+ debug_msg(npe, "Message %s: unexpected message received\n",
+ what);
+ return -EIO;
+ }
+ return 0;
+}
+
+
+int npe_load_firmware(struct npe *npe, const char *name, struct device *dev)
+{
+ const struct firmware *fw_entry;
+
+ struct dl_block {
+ u32 type;
+ u32 offset;
+ } *blk;
+
+ struct dl_image {
+ u32 magic;
+ u32 id;
+ u32 size;
+ union {
+ u32 data[0];
+ struct dl_block blocks[0];
+ };
+ } *image;
+
+ struct dl_codeblock {
+ u32 npe_addr;
+ u32 size;
+ u32 data[0];
+ } *cb;
+
+ int i, j, err, data_size, instr_size, blocks, table_end;
+ u32 cmd;
+
+ if ((err = request_firmware(&fw_entry, name, dev)) != 0)
+ return err;
+
+ err = -EINVAL;
+ if (fw_entry->size < sizeof(struct dl_image)) {
+ print_npe(KERN_ERR, npe, "incomplete firmware file\n");
+ goto err;
+ }
+ image = (struct dl_image*)fw_entry->data;
+
+#if DEBUG_FW
+ print_npe(KERN_DEBUG, npe, "firmware: %08X %08X %08X (0x%X bytes)\n",
+ image->magic, image->id, image->size, image->size * 4);
+#endif
+
+ if (image->magic == swab32(FW_MAGIC)) { /* swapped file */
+ image->id = swab32(image->id);
+ image->size = swab32(image->size);
+ } else if (image->magic != FW_MAGIC) {
+ print_npe(KERN_ERR, npe, "bad firmware file magic: 0x%X\n",
+ image->magic);
+ goto err;
+ }
+ if ((image->size * 4 + sizeof(struct dl_image)) != fw_entry->size) {
+ print_npe(KERN_ERR, npe,
+ "inconsistent size of firmware file\n");
+ goto err;
+ }
+ if (((image->id >> 24) & 0xF /* NPE ID */) != npe->id) {
+ print_npe(KERN_ERR, npe, "firmware file NPE ID mismatch\n");
+ goto err;
+ }
+ if (image->magic == swab32(FW_MAGIC))
+ for (i = 0; i < image->size; i++)
+ image->data[i] = swab32(image->data[i]);
+
+ if (cpu_is_ixp42x() && ((image->id >> 28) & 0xF /* device ID */)) {
+ print_npe(KERN_INFO, npe, "IXP43x/IXP46x firmware ignored on "
+ "IXP42x\n");
+ goto err;
+ }
+
+ if (npe_running(npe)) {
+ print_npe(KERN_INFO, npe, "unable to load firmware, NPE is "
+ "already running\n");
+ err = -EBUSY;
+ goto err;
+ }
+#if 0
+ npe_stop(npe);
+ npe_reset(npe);
+#endif
+
+ print_npe(KERN_INFO, npe, "firmware functionality 0x%X, "
+ "revision 0x%X:%X\n", (image->id >> 16) & 0xFF,
+ (image->id >> 8) & 0xFF, image->id & 0xFF);
+
+ if (cpu_is_ixp42x()) {
+ if (!npe->id)
+ instr_size = NPE_A_42X_INSTR_SIZE;
+ else
+ instr_size = NPE_B_AND_C_42X_INSTR_SIZE;
+ data_size = NPE_42X_DATA_SIZE;
+ } else {
+ instr_size = NPE_46X_INSTR_SIZE;
+ data_size = NPE_46X_DATA_SIZE;
+ }
+
+ for (blocks = 0; blocks * sizeof(struct dl_block) / 4 < image->size;
+ blocks++)
+ if (image->blocks[blocks].type == FW_BLOCK_TYPE_EOF)
+ break;
+ if (blocks * sizeof(struct dl_block) / 4 >= image->size) {
+ print_npe(KERN_INFO, npe, "firmware EOF block marker not "
+ "found\n");
+ goto err;
+ }
+
+#if DEBUG_FW
+ print_npe(KERN_DEBUG, npe, "%i firmware blocks found\n", blocks);
+#endif
+
+ table_end = blocks * sizeof(struct dl_block) / 4 + 1 /* EOF marker */;
+ for (i = 0, blk = image->blocks; i < blocks; i++, blk++) {
+ if (blk->offset > image->size - sizeof(struct dl_codeblock) / 4
+ || blk->offset < table_end) {
+ print_npe(KERN_INFO, npe, "invalid offset 0x%X of "
+ "firmware block #%i\n", blk->offset, i);
+ goto err;
+ }
+
+ cb = (struct dl_codeblock*)&image->data[blk->offset];
+ if (blk->type == FW_BLOCK_TYPE_INSTR) {
+ if (cb->npe_addr + cb->size > instr_size)
+ goto too_big;
+ cmd = CMD_WR_INS_MEM;
+ } else if (blk->type == FW_BLOCK_TYPE_DATA) {
+ if (cb->npe_addr + cb->size > data_size)
+ goto too_big;
+ cmd = CMD_WR_DATA_MEM;
+ } else {
+ print_npe(KERN_INFO, npe, "invalid firmware block #%i "
+ "type 0x%X\n", i, blk->type);
+ goto err;
+ }
+ if (blk->offset + sizeof(*cb) / 4 + cb->size > image->size) {
+ print_npe(KERN_INFO, npe, "firmware block #%i doesn't "
+ "fit in firmware image: type %c, start 0x%X,"
+ " length 0x%X\n", i,
+ blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D',
+ cb->npe_addr, cb->size);
+ goto err;
+ }
+
+ for (j = 0; j < cb->size; j++)
+ npe_cmd_write(npe, cb->npe_addr + j, cmd, cb->data[j]);
+ }
+
+ npe_start(npe);
+ if (!npe_running(npe))
+ print_npe(KERN_ERR, npe, "unable to start\n");
+ release_firmware(fw_entry);
+ return 0;
+
+too_big:
+ print_npe(KERN_INFO, npe, "firmware block #%i doesn't fit in NPE "
+ "memory: type %c, start 0x%X, length 0x%X\n", i,
+ blk->type == FW_BLOCK_TYPE_INSTR ? 'I' : 'D',
+ cb->npe_addr, cb->size);
+err:
+ release_firmware(fw_entry);
+ return err;
+}
+
+
+struct npe *npe_request(unsigned id)
+{
+ if (id < NPE_COUNT)
+ if (npe_tab[id].valid)
+ if (try_module_get(THIS_MODULE))
+ return &npe_tab[id];
+ return NULL;
+}
+
+void npe_release(struct npe *npe)
+{
+ module_put(THIS_MODULE);
+}
+
+static int ixp4xx_npe_probe(struct platform_device *pdev)
+{
+ int i, found = 0;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ for (i = 0; i < NPE_COUNT; i++) {
+ struct npe *npe = &npe_tab[i];
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
+ return -ENODEV;
+
+ if (!(ixp4xx_read_feature_bits() &
+ (IXP4XX_FEATURE_RESET_NPEA << i))) {
+ dev_info(dev, "NPE%d at 0x%08x-0x%08x not available\n",
+ i, res->start, res->end);
+ continue; /* NPE already disabled or not present */
+ }
+ npe->regs = devm_ioremap_resource(dev, res);
+ if (!npe->regs)
+ return -ENOMEM;
+
+ if (npe_reset(npe)) {
+ dev_info(dev, "NPE%d at 0x%08x-0x%08x does not reset\n",
+ i, res->start, res->end);
+ continue;
+ }
+ npe->valid = 1;
+ dev_info(dev, "NPE%d at 0x%08x-0x%08x registered\n",
+ i, res->start, res->end);
+ found++;
+ }
+
+ if (!found)
+ return -ENODEV;
+ return 0;
+}
+
+static int ixp4xx_npe_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < NPE_COUNT; i++)
+ if (npe_tab[i].regs) {
+ npe_reset(&npe_tab[i]);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_npe_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-network-processing-engine",
+ },
+ {},
+};
+
+static struct platform_driver ixp4xx_npe_driver = {
+ .driver = {
+ .name = "ixp4xx-npe",
+ .of_match_table = of_match_ptr(ixp4xx_npe_of_match),
+ },
+ .probe = ixp4xx_npe_probe,
+ .remove = ixp4xx_npe_remove,
+};
+module_platform_driver(ixp4xx_npe_driver);
+
+MODULE_AUTHOR("Krzysztof Halasa");
+MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(NPE_A_FIRMWARE);
+MODULE_FIRMWARE(NPE_B_FIRMWARE);
+MODULE_FIRMWARE(NPE_C_FIRMWARE);
+
+EXPORT_SYMBOL(npe_names);
+EXPORT_SYMBOL(npe_running);
+EXPORT_SYMBOL(npe_request);
+EXPORT_SYMBOL(npe_release);
+EXPORT_SYMBOL(npe_load_firmware);
+EXPORT_SYMBOL(npe_send_message);
+EXPORT_SYMBOL(npe_recv_message);
+EXPORT_SYMBOL(npe_send_recv_message);
diff --git a/drivers/soc/ixp4xx/ixp4xx-qmgr.c b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
new file mode 100644
index 000000000000..bb90670ec160
--- /dev/null
+++ b/drivers/soc/ixp4xx/ixp4xx-qmgr.c
@@ -0,0 +1,488 @@
+/*
+ * Intel IXP4xx Queue Manager driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ixp4xx/qmgr.h>
+
+static struct qmgr_regs __iomem *qmgr_regs;
+static int qmgr_irq_1;
+static int qmgr_irq_2;
+static spinlock_t qmgr_lock;
+static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
+static void (*irq_handlers[QUEUES])(void *pdev);
+static void *irq_pdevs[QUEUES];
+
+#if DEBUG_QMGR
+char qmgr_queue_descs[QUEUES][32];
+#endif
+
+void qmgr_put_entry(unsigned int queue, u32 val)
+{
+#if DEBUG_QMGR
+ BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
+
+ printk(KERN_DEBUG "Queue %s(%i) put %X\n",
+ qmgr_queue_descs[queue], queue, val);
+#endif
+ __raw_writel(val, &qmgr_regs->acc[queue][0]);
+}
+
+u32 qmgr_get_entry(unsigned int queue)
+{
+ u32 val;
+ val = __raw_readl(&qmgr_regs->acc[queue][0]);
+#if DEBUG_QMGR
+ BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */
+
+ printk(KERN_DEBUG "Queue %s(%i) get %X\n",
+ qmgr_queue_descs[queue], queue, val);
+#endif
+ return val;
+}
+
+static int __qmgr_get_stat1(unsigned int queue)
+{
+ return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
+ >> ((queue & 7) << 2)) & 0xF;
+}
+
+static int __qmgr_get_stat2(unsigned int queue)
+{
+ BUG_ON(queue >= HALF_QUEUES);
+ return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
+ >> ((queue & 0xF) << 1)) & 0x3;
+}
+
+/**
+ * qmgr_stat_empty() - checks if a hardware queue is empty
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is empty.
+ */
+int qmgr_stat_empty(unsigned int queue)
+{
+ BUG_ON(queue >= HALF_QUEUES);
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY;
+}
+
+/**
+ * qmgr_stat_below_low_watermark() - checks if a queue is below low watermark
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is below low watermark.
+ */
+int qmgr_stat_below_low_watermark(unsigned int queue)
+{
+ if (queue >= HALF_QUEUES)
+ return (__raw_readl(&qmgr_regs->statne_h) >>
+ (queue - HALF_QUEUES)) & 0x01;
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY;
+}
+
+/**
+ * qmgr_stat_full() - checks if a hardware queue is full
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue is full.
+ */
+int qmgr_stat_full(unsigned int queue)
+{
+ if (queue >= HALF_QUEUES)
+ return (__raw_readl(&qmgr_regs->statf_h) >>
+ (queue - HALF_QUEUES)) & 0x01;
+ return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL;
+}
+
+/**
+ * qmgr_stat_overflow() - checks if a hardware queue experienced overflow
+ * @queue: queue number
+ *
+ * Returns non-zero value if the queue experienced overflow.
+ */
+int qmgr_stat_overflow(unsigned int queue)
+{
+ return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW;
+}
+
+void qmgr_set_irq(unsigned int queue, int src,
+ void (*handler)(void *pdev), void *pdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ if (queue < HALF_QUEUES) {
+ u32 __iomem *reg;
+ int bit;
+ BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
+ reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
+ bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
+ __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
+ reg);
+ } else
+ /* IRQ source for queues 32-63 is fixed */
+ BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
+
+ irq_handlers[queue] = handler;
+ irq_pdevs[queue] = pdev;
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+
+static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
+{
+ int i, ret = 0;
+ u32 en_bitmap, src, stat;
+
+ /* ACK - it may clear any bits so don't rely on it */
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
+
+ en_bitmap = qmgr_regs->irqen[0];
+ while (en_bitmap) {
+ i = __fls(en_bitmap); /* number of the last "low" queue */
+ en_bitmap &= ~BIT(i);
+ src = qmgr_regs->irqsrc[i >> 3];
+ stat = qmgr_regs->stat1[i >> 3];
+ if (src & 4) /* the IRQ condition is inverted */
+ stat = ~stat;
+ if (stat & BIT(src & 3)) {
+ irq_handlers[i](irq_pdevs[i]);
+ ret = IRQ_HANDLED;
+ }
+ }
+ return ret;
+}
+
+
+static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
+{
+ int i, ret = 0;
+ u32 req_bitmap;
+
+ /* ACK - it may clear any bits so don't rely on it */
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
+
+ req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
+ while (req_bitmap) {
+ i = __fls(req_bitmap); /* number of the last "high" queue */
+ req_bitmap &= ~BIT(i);
+ irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+
+static irqreturn_t qmgr_irq(int irq, void *pdev)
+{
+ int i, half = (irq == qmgr_irq_1 ? 0 : 1);
+ u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
+
+ if (!req_bitmap)
+ return 0;
+ __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
+
+ while (req_bitmap) {
+ i = __fls(req_bitmap); /* number of the last queue */
+ req_bitmap &= ~BIT(i);
+ i += half * HALF_QUEUES;
+ irq_handlers[i](irq_pdevs[i]);
+ }
+ return IRQ_HANDLED;
+}
+
+
+void qmgr_enable_irq(unsigned int queue)
+{
+ unsigned long flags;
+ int half = queue / 32;
+ u32 mask = 1 << (queue & (HALF_QUEUES - 1));
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
+ &qmgr_regs->irqen[half]);
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+void qmgr_disable_irq(unsigned int queue)
+{
+ unsigned long flags;
+ int half = queue / 32;
+ u32 mask = 1 << (queue & (HALF_QUEUES - 1));
+
+ spin_lock_irqsave(&qmgr_lock, flags);
+ __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
+ &qmgr_regs->irqen[half]);
+ __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
+ spin_unlock_irqrestore(&qmgr_lock, flags);
+}
+
+static inline void shift_mask(u32 *mask)
+{
+ mask[3] = mask[3] << 1 | mask[2] >> 31;
+ mask[2] = mask[2] << 1 | mask[1] >> 31;
+ mask[1] = mask[1] << 1 | mask[0] >> 31;
+ mask[0] <<= 1;
+}
+
+#if DEBUG_QMGR
+int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
+ unsigned int nearly_empty_watermark,
+ unsigned int nearly_full_watermark,
+ const char *desc_format, const char* name)
+#else
+int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
+ unsigned int nearly_empty_watermark,
+ unsigned int nearly_full_watermark)
+#endif
+{
+ u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
+ int err;
+
+ BUG_ON(queue >= QUEUES);
+
+ if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
+ return -EINVAL;
+
+ switch (len) {
+ case 16:
+ cfg = 0 << 24;
+ mask[0] = 0x1;
+ break;
+ case 32:
+ cfg = 1 << 24;
+ mask[0] = 0x3;
+ break;
+ case 64:
+ cfg = 2 << 24;
+ mask[0] = 0xF;
+ break;
+ case 128:
+ cfg = 3 << 24;
+ mask[0] = 0xFF;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cfg |= nearly_empty_watermark << 26;
+ cfg |= nearly_full_watermark << 29;
+ len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
+ mask[1] = mask[2] = mask[3] = 0;
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ spin_lock_irq(&qmgr_lock);
+ if (__raw_readl(&qmgr_regs->sram[queue])) {
+ err = -EBUSY;
+ goto err;
+ }
+
+ while (1) {
+ if (!(used_sram_bitmap[0] & mask[0]) &&
+ !(used_sram_bitmap[1] & mask[1]) &&
+ !(used_sram_bitmap[2] & mask[2]) &&
+ !(used_sram_bitmap[3] & mask[3]))
+ break; /* found free space */
+
+ addr++;
+ shift_mask(mask);
+ if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
+ printk(KERN_ERR "qmgr: no free SRAM space for"
+ " queue %i\n", queue);
+ err = -ENOMEM;
+ goto err;
+ }
+ }
+
+ used_sram_bitmap[0] |= mask[0];
+ used_sram_bitmap[1] |= mask[1];
+ used_sram_bitmap[2] |= mask[2];
+ used_sram_bitmap[3] |= mask[3];
+ __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
+#if DEBUG_QMGR
+ snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
+ desc_format, name);
+ printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
+ qmgr_queue_descs[queue], queue, addr);
+#endif
+ spin_unlock_irq(&qmgr_lock);
+ return 0;
+
+err:
+ spin_unlock_irq(&qmgr_lock);
+ module_put(THIS_MODULE);
+ return err;
+}
+
+void qmgr_release_queue(unsigned int queue)
+{
+ u32 cfg, addr, mask[4];
+
+ BUG_ON(queue >= QUEUES); /* not in valid range */
+
+ spin_lock_irq(&qmgr_lock);
+ cfg = __raw_readl(&qmgr_regs->sram[queue]);
+ addr = (cfg >> 14) & 0xFF;
+
+ BUG_ON(!addr); /* not requested */
+
+ switch ((cfg >> 24) & 3) {
+ case 0: mask[0] = 0x1; break;
+ case 1: mask[0] = 0x3; break;
+ case 2: mask[0] = 0xF; break;
+ case 3: mask[0] = 0xFF; break;
+ }
+
+ mask[1] = mask[2] = mask[3] = 0;
+
+ while (addr--)
+ shift_mask(mask);
+
+#if DEBUG_QMGR
+ printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
+ qmgr_queue_descs[queue], queue);
+ qmgr_queue_descs[queue][0] = '\x0';
+#endif
+
+ while ((addr = qmgr_get_entry(queue)))
+ printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
+ queue, addr);
+
+ __raw_writel(0, &qmgr_regs->sram[queue]);
+
+ used_sram_bitmap[0] &= ~mask[0];
+ used_sram_bitmap[1] &= ~mask[1];
+ used_sram_bitmap[2] &= ~mask[2];
+ used_sram_bitmap[3] &= ~mask[3];
+ irq_handlers[queue] = NULL; /* catch IRQ bugs */
+ spin_unlock_irq(&qmgr_lock);
+
+ module_put(THIS_MODULE);
+}
+
+static int ixp4xx_qmgr_probe(struct platform_device *pdev)
+{
+ int i, err;
+ irq_handler_t handler1, handler2;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq1, irq2;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ qmgr_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qmgr_regs))
+ return PTR_ERR(qmgr_regs);
+
+ irq1 = platform_get_irq(pdev, 0);
+ if (irq1 <= 0)
+ return irq1 ? irq1 : -EINVAL;
+ qmgr_irq_1 = irq1;
+ irq2 = platform_get_irq(pdev, 1);
+ if (irq2 <= 0)
+ return irq2 ? irq2 : -EINVAL;
+ qmgr_irq_2 = irq2;
+
+ /* reset qmgr registers */
+ for (i = 0; i < 4; i++) {
+ __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
+ __raw_writel(0, &qmgr_regs->irqsrc[i]);
+ }
+ for (i = 0; i < 2; i++) {
+ __raw_writel(0, &qmgr_regs->stat2[i]);
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
+ __raw_writel(0, &qmgr_regs->irqen[i]);
+ }
+
+ __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
+ __raw_writel(0, &qmgr_regs->statf_h);
+
+ for (i = 0; i < QUEUES; i++)
+ __raw_writel(0, &qmgr_regs->sram[i]);
+
+ if (cpu_is_ixp42x_rev_a0()) {
+ handler1 = qmgr_irq1_a0;
+ handler2 = qmgr_irq2_a0;
+ } else
+ handler1 = handler2 = qmgr_irq;
+
+ err = devm_request_irq(dev, irq1, handler1, 0, "IXP4xx Queue Manager",
+ NULL);
+ if (err) {
+ dev_err(dev, "failed to request IRQ%i (%i)\n",
+ irq1, err);
+ return err;
+ }
+
+ err = devm_request_irq(dev, irq2, handler2, 0, "IXP4xx Queue Manager",
+ NULL);
+ if (err) {
+ dev_err(dev, "failed to request IRQ%i (%i)\n",
+ irq2, err);
+ return err;
+ }
+
+ used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
+ spin_lock_init(&qmgr_lock);
+
+ dev_info(dev, "IXP4xx Queue Manager initialized.\n");
+ return 0;
+}
+
+static int ixp4xx_qmgr_remove(struct platform_device *pdev)
+{
+ synchronize_irq(qmgr_irq_1);
+ synchronize_irq(qmgr_irq_2);
+ return 0;
+}
+
+static const struct of_device_id ixp4xx_qmgr_of_match[] = {
+ {
+ .compatible = "intel,ixp4xx-ahb-queue-manager",
+ },
+ {},
+};
+
+static struct platform_driver ixp4xx_qmgr_driver = {
+ .driver = {
+ .name = "ixp4xx-qmgr",
+ .of_match_table = of_match_ptr(ixp4xx_qmgr_of_match),
+ },
+ .probe = ixp4xx_qmgr_probe,
+ .remove = ixp4xx_qmgr_remove,
+};
+module_platform_driver(ixp4xx_qmgr_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Krzysztof Halasa");
+
+EXPORT_SYMBOL(qmgr_put_entry);
+EXPORT_SYMBOL(qmgr_get_entry);
+EXPORT_SYMBOL(qmgr_stat_empty);
+EXPORT_SYMBOL(qmgr_stat_below_low_watermark);
+EXPORT_SYMBOL(qmgr_stat_full);
+EXPORT_SYMBOL(qmgr_stat_overflow);
+EXPORT_SYMBOL(qmgr_set_irq);
+EXPORT_SYMBOL(qmgr_enable_irq);
+EXPORT_SYMBOL(qmgr_disable_irq);
+#if DEBUG_QMGR
+EXPORT_SYMBOL(qmgr_queue_descs);
+EXPORT_SYMBOL(qmgr_request_queue);
+#else
+EXPORT_SYMBOL(__qmgr_request_queue);
+#endif
+EXPORT_SYMBOL(qmgr_release_queue);
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index 8236a6c87e19..c4449a163991 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -381,6 +381,10 @@ enum pwrap_regs {
PWRAP_EXT_GPS_AUXADC_RDATA_ADDR,
PWRAP_GPSINF_0_STA,
PWRAP_GPSINF_1_STA,
+
+ /* MT8516 only regs */
+ PWRAP_OP_TYPE,
+ PWRAP_MSB_FIRST,
};
static int mt2701_regs[] = {
@@ -852,6 +856,91 @@ static int mt8183_regs[] = {
[PWRAP_WACS2_VLDCLR] = 0xC28,
};
+static int mt8516_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xc,
+ [PWRAP_RDDMY] = 0x10,
+ [PWRAP_SI_CK_CON] = 0x14,
+ [PWRAP_CSHEXT_WRITE] = 0x18,
+ [PWRAP_CSHEXT_READ] = 0x1c,
+ [PWRAP_CSLEXT_START] = 0x20,
+ [PWRAP_CSLEXT_END] = 0x24,
+ [PWRAP_STAUPD_PRD] = 0x28,
+ [PWRAP_STAUPD_GRPEN] = 0x2c,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x40,
+ [PWRAP_STAUPD_STA] = 0x44,
+ [PWRAP_WRAP_STA] = 0x48,
+ [PWRAP_HARB_INIT] = 0x4c,
+ [PWRAP_HARB_HPRIO] = 0x50,
+ [PWRAP_HIPRIO_ARB_EN] = 0x54,
+ [PWRAP_HARB_STA0] = 0x58,
+ [PWRAP_HARB_STA1] = 0x5c,
+ [PWRAP_MAN_EN] = 0x60,
+ [PWRAP_MAN_CMD] = 0x64,
+ [PWRAP_MAN_RDATA] = 0x68,
+ [PWRAP_MAN_VLDCLR] = 0x6c,
+ [PWRAP_WACS0_EN] = 0x70,
+ [PWRAP_INIT_DONE0] = 0x74,
+ [PWRAP_WACS0_CMD] = 0x78,
+ [PWRAP_WACS0_RDATA] = 0x7c,
+ [PWRAP_WACS0_VLDCLR] = 0x80,
+ [PWRAP_WACS1_EN] = 0x84,
+ [PWRAP_INIT_DONE1] = 0x88,
+ [PWRAP_WACS1_CMD] = 0x8c,
+ [PWRAP_WACS1_RDATA] = 0x90,
+ [PWRAP_WACS1_VLDCLR] = 0x94,
+ [PWRAP_WACS2_EN] = 0x98,
+ [PWRAP_INIT_DONE2] = 0x9c,
+ [PWRAP_WACS2_CMD] = 0xa0,
+ [PWRAP_WACS2_RDATA] = 0xa4,
+ [PWRAP_WACS2_VLDCLR] = 0xa8,
+ [PWRAP_INT_EN] = 0xac,
+ [PWRAP_INT_FLG_RAW] = 0xb0,
+ [PWRAP_INT_FLG] = 0xb4,
+ [PWRAP_INT_CLR] = 0xb8,
+ [PWRAP_SIG_ADR] = 0xbc,
+ [PWRAP_SIG_MODE] = 0xc0,
+ [PWRAP_SIG_VALUE] = 0xc4,
+ [PWRAP_SIG_ERRVAL] = 0xc8,
+ [PWRAP_CRC_EN] = 0xcc,
+ [PWRAP_TIMER_EN] = 0xd0,
+ [PWRAP_TIMER_STA] = 0xd4,
+ [PWRAP_WDT_UNIT] = 0xd8,
+ [PWRAP_WDT_SRC_EN] = 0xdc,
+ [PWRAP_WDT_FLG] = 0xe0,
+ [PWRAP_DEBUG_INT_SEL] = 0xe4,
+ [PWRAP_DVFS_ADR0] = 0xe8,
+ [PWRAP_DVFS_WDATA0] = 0xec,
+ [PWRAP_DVFS_ADR1] = 0xf0,
+ [PWRAP_DVFS_WDATA1] = 0xf4,
+ [PWRAP_DVFS_ADR2] = 0xf8,
+ [PWRAP_DVFS_WDATA2] = 0xfc,
+ [PWRAP_DVFS_ADR3] = 0x100,
+ [PWRAP_DVFS_WDATA3] = 0x104,
+ [PWRAP_DVFS_ADR4] = 0x108,
+ [PWRAP_DVFS_WDATA4] = 0x10c,
+ [PWRAP_DVFS_ADR5] = 0x110,
+ [PWRAP_DVFS_WDATA5] = 0x114,
+ [PWRAP_DVFS_ADR6] = 0x118,
+ [PWRAP_DVFS_WDATA6] = 0x11c,
+ [PWRAP_DVFS_ADR7] = 0x120,
+ [PWRAP_DVFS_WDATA7] = 0x124,
+ [PWRAP_SPMINF_STA] = 0x128,
+ [PWRAP_CIPHER_KEY_SEL] = 0x12c,
+ [PWRAP_CIPHER_IV_SEL] = 0x130,
+ [PWRAP_CIPHER_EN] = 0x134,
+ [PWRAP_CIPHER_RDY] = 0x138,
+ [PWRAP_CIPHER_MODE] = 0x13c,
+ [PWRAP_CIPHER_SWRST] = 0x140,
+ [PWRAP_DCM_EN] = 0x144,
+ [PWRAP_DCM_DBC_PRD] = 0x148,
+ [PWRAP_SW_RST] = 0x168,
+ [PWRAP_OP_TYPE] = 0x16c,
+ [PWRAP_MSB_FIRST] = 0x170,
+};
+
enum pmic_type {
PMIC_MT6323,
PMIC_MT6351,
@@ -869,6 +958,7 @@ enum pwrap_type {
PWRAP_MT8135,
PWRAP_MT8173,
PWRAP_MT8183,
+ PWRAP_MT8516,
};
struct pmic_wrapper;
@@ -1281,7 +1371,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp)
static int pwrap_init_cipher(struct pmic_wrapper *wrp)
{
int ret;
- u32 rdata;
+ u32 rdata = 0;
pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST);
pwrap_writel(wrp, 0x0, PWRAP_CIPHER_SWRST);
@@ -1297,6 +1387,7 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
case PWRAP_MT6765:
case PWRAP_MT6797:
case PWRAP_MT8173:
+ case PWRAP_MT8516:
pwrap_writel(wrp, 1, PWRAP_CIPHER_EN);
break;
case PWRAP_MT7622:
@@ -1478,7 +1569,8 @@ static int pwrap_init(struct pmic_wrapper *wrp)
{
int ret;
- reset_control_reset(wrp->rstc);
+ if (wrp->rstc)
+ reset_control_reset(wrp->rstc);
if (wrp->rstc_bridge)
reset_control_reset(wrp->rstc_bridge);
@@ -1764,6 +1856,18 @@ static const struct pmic_wrapper_type pwrap_mt8183 = {
.init_soc_specific = pwrap_mt8183_init_soc_specific,
};
+static struct pmic_wrapper_type pwrap_mt8516 = {
+ .regs = mt8516_regs,
+ .type = PWRAP_MT8516,
+ .arb_en_all = 0xff,
+ .int_en_all = ~(u32)(BIT(31) | BIT(2)),
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .caps = PWRAP_CAP_DCM,
+ .init_reg_clock = pwrap_mt2701_init_reg_clock,
+ .init_soc_specific = NULL,
+};
+
static const struct of_device_id of_pwrap_match_tbl[] = {
{
.compatible = "mediatek,mt2701-pwrap",
@@ -1787,6 +1891,9 @@ static const struct of_device_id of_pwrap_match_tbl[] = {
.compatible = "mediatek,mt8183-pwrap",
.data = &pwrap_mt8183,
}, {
+ .compatible = "mediatek,mt8516-pwrap",
+ .data = &pwrap_mt8516,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index c701b3b010f1..f6c3d17b05c7 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -248,8 +248,8 @@ static int cmd_db_dev_probe(struct platform_device *pdev)
}
cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB);
- if (IS_ERR_OR_NULL(cmd_db_header)) {
- ret = PTR_ERR(cmd_db_header);
+ if (!cmd_db_header) {
+ ret = -ENOMEM;
cmd_db_header = NULL;
return ret;
}
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index c239a28e503f..f9e309f0acd3 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -345,8 +345,7 @@ int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout)
struct qmi_handle *qmi = txn->qmi;
int ret;
- ret = wait_for_completion_interruptible_timeout(&txn->completion,
- timeout);
+ ret = wait_for_completion_timeout(&txn->completion, timeout);
mutex_lock(&qmi->txn_lock);
mutex_lock(&txn->lock);
@@ -354,9 +353,7 @@ int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout)
mutex_unlock(&txn->lock);
mutex_unlock(&qmi->txn_lock);
- if (ret < 0)
- return ret;
- else if (ret == 0)
+ if (ret == 0)
return -ETIMEDOUT;
else
return txn->result;
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index 7200d762a951..6f5e8be9689c 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -137,6 +137,26 @@ static struct class rmtfs_class = {
.name = "rmtfs",
};
+static int qcom_rmtfs_mem_mmap(struct file *filep, struct vm_area_struct *vma)
+{
+ struct qcom_rmtfs_mem *rmtfs_mem = filep->private_data;
+
+ if (vma->vm_end - vma->vm_start > rmtfs_mem->size) {
+ dev_dbg(&rmtfs_mem->dev,
+ "vm_end[%lu] - vm_start[%lu] [%lu] > mem->size[%pa]\n",
+ vma->vm_end, vma->vm_start,
+ (vma->vm_end - vma->vm_start), &rmtfs_mem->size);
+ return -EINVAL;
+ }
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ return remap_pfn_range(vma,
+ vma->vm_start,
+ rmtfs_mem->addr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
static const struct file_operations qcom_rmtfs_mem_fops = {
.owner = THIS_MODULE,
.open = qcom_rmtfs_mem_open,
@@ -144,6 +164,7 @@ static const struct file_operations qcom_rmtfs_mem_fops = {
.write = qcom_rmtfs_mem_write,
.release = qcom_rmtfs_mem_release,
.llseek = default_llseek,
+ .mmap = qcom_rmtfs_mem_mmap,
};
static void qcom_rmtfs_mem_release_device(struct device *dev)
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index 75bd9a83aef0..e278fc11fe5c 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -459,7 +459,7 @@ static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
do {
slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
i, msg->num_cmds, 0);
- if (slot == tcs->num_tcs * tcs->ncpt)
+ if (slot >= tcs->num_tcs * tcs->ncpt)
return -ENOMEM;
i += tcs->ncpt;
} while (slot + msg->num_cmds - 1 >= i);
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 4af96e668a2f..3299cf5365f3 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -335,6 +335,9 @@ static int __init renesas_soc_init(void)
/* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */
if ((product & 0x7fff) == 0x5210)
product ^= 0x11;
+ /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */
+ if ((product & 0x7fff) == 0x5211)
+ product ^= 0x12;
if (soc->id && ((product >> 8) & 0xff) != soc->id) {
pr_warn("SoC mismatch (product = 0x%x)\n", product);
return -ENODEV;
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index 96882ffde67e..3b81e1d75a97 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -66,9 +66,11 @@ static const struct rockchip_grf_info rk3228_grf __initconst = {
};
#define RK3288_GRF_SOC_CON0 0x244
+#define RK3288_GRF_SOC_CON2 0x24c
static const struct rockchip_grf_value rk3288_defaults[] __initconst = {
{ "jtag switching", RK3288_GRF_SOC_CON0, HIWORD_UPDATE(0, 1, 12) },
+ { "pwm select", RK3288_GRF_SOC_CON2, HIWORD_UPDATE(1, 1, 0) },
};
static const struct rockchip_grf_info rk3288_grf __initconst = {
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 0df258518693..5648e5c09ef5 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -272,6 +272,14 @@ static const char * const tegra30_reset_sources[] = {
"WATCHDOG",
"SENSOR",
"SW_MAIN",
+ "LP0"
+};
+
+static const char * const tegra210_reset_sources[] = {
+ "POWER_ON_RESET",
+ "WATCHDOG",
+ "SENSOR",
+ "SW_MAIN",
"LP0",
"AOTAG"
};
@@ -656,10 +664,15 @@ static int tegra_genpd_power_on(struct generic_pm_domain *domain)
int err;
err = tegra_powergate_power_up(pg, true);
- if (err)
+ if (err) {
dev_err(dev, "failed to turn on PM domain %s: %d\n",
pg->genpd.name, err);
+ goto out;
+ }
+
+ reset_control_release(pg->reset);
+out:
return err;
}
@@ -669,10 +682,18 @@ static int tegra_genpd_power_off(struct generic_pm_domain *domain)
struct device *dev = pg->pmc->dev;
int err;
+ err = reset_control_acquire(pg->reset);
+ if (err < 0) {
+ pr_err("failed to acquire resets: %d\n", err);
+ return err;
+ }
+
err = tegra_powergate_power_down(pg);
- if (err)
+ if (err) {
dev_err(dev, "failed to turn off PM domain %s: %d\n",
pg->genpd.name, err);
+ reset_control_release(pg->reset);
+ }
return err;
}
@@ -937,38 +958,53 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
struct device *dev = pg->pmc->dev;
int err;
- pg->reset = of_reset_control_array_get_exclusive(np);
+ pg->reset = of_reset_control_array_get_exclusive_released(np);
if (IS_ERR(pg->reset)) {
err = PTR_ERR(pg->reset);
dev_err(dev, "failed to get device resets: %d\n", err);
return err;
}
- if (off)
+ err = reset_control_acquire(pg->reset);
+ if (err < 0) {
+ pr_err("failed to acquire resets: %d\n", err);
+ goto out;
+ }
+
+ if (off) {
err = reset_control_assert(pg->reset);
- else
+ } else {
err = reset_control_deassert(pg->reset);
+ if (err < 0)
+ goto out;
- if (err)
+ reset_control_release(pg->reset);
+ }
+
+out:
+ if (err) {
+ reset_control_release(pg->reset);
reset_control_put(pg->reset);
+ }
return err;
}
-static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
+static int tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
{
struct device *dev = pmc->dev;
struct tegra_powergate *pg;
- int id, err;
+ int id, err = 0;
bool off;
pg = kzalloc(sizeof(*pg), GFP_KERNEL);
if (!pg)
- return;
+ return -ENOMEM;
id = tegra_powergate_lookup(pmc, np->name);
if (id < 0) {
dev_err(dev, "powergate lookup failed for %pOFn: %d\n", np, id);
+ err = -ENODEV;
goto free_mem;
}
@@ -1021,7 +1057,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
dev_dbg(dev, "added PM domain %s\n", pg->genpd.name);
- return;
+ return 0;
remove_genpd:
pm_genpd_remove(&pg->genpd);
@@ -1040,25 +1076,67 @@ set_available:
free_mem:
kfree(pg);
+
+ return err;
}
-static void tegra_powergate_init(struct tegra_pmc *pmc,
- struct device_node *parent)
+static int tegra_powergate_init(struct tegra_pmc *pmc,
+ struct device_node *parent)
{
struct device_node *np, *child;
- unsigned int i;
+ int err = 0;
+
+ np = of_get_child_by_name(parent, "powergates");
+ if (!np)
+ return 0;
+
+ for_each_child_of_node(np, child) {
+ err = tegra_powergate_add(pmc, child);
+ if (err < 0) {
+ of_node_put(child);
+ break;
+ }
+ }
+
+ of_node_put(np);
+
+ return err;
+}
+
+static void tegra_powergate_remove(struct generic_pm_domain *genpd)
+{
+ struct tegra_powergate *pg = to_powergate(genpd);
+
+ reset_control_put(pg->reset);
+
+ while (pg->num_clks--)
+ clk_put(pg->clks[pg->num_clks]);
+
+ kfree(pg->clks);
- /* Create a bitmap of the available and valid partitions */
- for (i = 0; i < pmc->soc->num_powergates; i++)
- if (pmc->soc->powergates[i])
- set_bit(i, pmc->powergates_available);
+ set_bit(pg->id, pmc->powergates_available);
+
+ kfree(pg);
+}
+
+static void tegra_powergate_remove_all(struct device_node *parent)
+{
+ struct generic_pm_domain *genpd;
+ struct device_node *np, *child;
np = of_get_child_by_name(parent, "powergates");
if (!np)
return;
- for_each_child_of_node(np, child)
- tegra_powergate_add(pmc, child);
+ for_each_child_of_node(np, child) {
+ of_genpd_del_provider(child);
+
+ genpd = of_genpd_remove_last(child);
+ if (IS_ERR(genpd))
+ continue;
+
+ tegra_powergate_remove(genpd);
+ }
of_node_put(np);
}
@@ -1709,13 +1787,16 @@ static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc)
static ssize_t reset_reason_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u32 value, rst_src;
+ u32 value;
value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
- rst_src = (value & pmc->soc->regs->rst_source_mask) >>
- pmc->soc->regs->rst_source_shift;
+ value &= pmc->soc->regs->rst_source_mask;
+ value >>= pmc->soc->regs->rst_source_shift;
+
+ if (WARN_ON(value >= pmc->soc->num_reset_sources))
+ return sprintf(buf, "%s\n", "UNKNOWN");
- return sprintf(buf, "%s\n", pmc->soc->reset_sources[rst_src]);
+ return sprintf(buf, "%s\n", pmc->soc->reset_sources[value]);
}
static DEVICE_ATTR_RO(reset_reason);
@@ -1723,13 +1804,16 @@ static DEVICE_ATTR_RO(reset_reason);
static ssize_t reset_level_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u32 value, rst_lvl;
+ u32 value;
value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status);
- rst_lvl = (value & pmc->soc->regs->rst_level_mask) >>
- pmc->soc->regs->rst_level_shift;
+ value &= pmc->soc->regs->rst_level_mask;
+ value >>= pmc->soc->regs->rst_level_shift;
- return sprintf(buf, "%s\n", pmc->soc->reset_levels[rst_lvl]);
+ if (WARN_ON(value >= pmc->soc->num_reset_levels))
+ return sprintf(buf, "%s\n", "UNKNOWN");
+
+ return sprintf(buf, "%s\n", pmc->soc->reset_levels[value]);
}
static DEVICE_ATTR_RO(reset_level);
@@ -1999,7 +2083,7 @@ static int tegra_pmc_probe(struct platform_device *pdev)
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
err = tegra_powergate_debugfs_init();
if (err < 0)
- return err;
+ goto cleanup_sysfs;
}
err = register_restart_handler(&tegra_pmc_restart_handler);
@@ -2013,9 +2097,13 @@ static int tegra_pmc_probe(struct platform_device *pdev)
if (err)
goto cleanup_restart_handler;
+ err = tegra_powergate_init(pmc, pdev->dev.of_node);
+ if (err < 0)
+ goto cleanup_powergates;
+
err = tegra_pmc_irq_init(pmc);
if (err < 0)
- goto cleanup_restart_handler;
+ goto cleanup_powergates;
mutex_lock(&pmc->powergates_lock);
iounmap(pmc->base);
@@ -2026,10 +2114,15 @@ static int tegra_pmc_probe(struct platform_device *pdev)
return 0;
+cleanup_powergates:
+ tegra_powergate_remove_all(pdev->dev.of_node);
cleanup_restart_handler:
unregister_restart_handler(&tegra_pmc_restart_handler);
cleanup_debugfs:
debugfs_remove(pmc->debugfs);
+cleanup_sysfs:
+ device_remove_file(&pdev->dev, &dev_attr_reset_reason);
+ device_remove_file(&pdev->dev, &dev_attr_reset_level);
return err;
}
@@ -2185,7 +2278,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
.reset_sources = tegra30_reset_sources,
- .num_reset_sources = 5,
+ .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
};
@@ -2236,7 +2329,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
.reset_sources = tegra30_reset_sources,
- .num_reset_sources = 5,
+ .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
};
@@ -2347,7 +2440,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = {
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
.reset_sources = tegra30_reset_sources,
- .num_reset_sources = 5,
+ .num_reset_sources = ARRAY_SIZE(tegra30_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
};
@@ -2452,8 +2545,8 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = {
.regs = &tegra20_pmc_regs,
.init = tegra20_pmc_init,
.setup_irq_polarity = tegra20_pmc_setup_irq_polarity,
- .reset_sources = tegra30_reset_sources,
- .num_reset_sources = 5,
+ .reset_sources = tegra210_reset_sources,
+ .num_reset_sources = ARRAY_SIZE(tegra210_reset_sources),
.reset_levels = NULL,
.num_reset_levels = 0,
};
@@ -2578,9 +2671,9 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = {
.init = NULL,
.setup_irq_polarity = tegra186_pmc_setup_irq_polarity,
.reset_sources = tegra186_reset_sources,
- .num_reset_sources = 14,
+ .num_reset_sources = ARRAY_SIZE(tegra186_reset_sources),
.reset_levels = tegra186_reset_levels,
- .num_reset_levels = 3,
+ .num_reset_levels = ARRAY_SIZE(tegra186_reset_levels),
.num_wake_events = ARRAY_SIZE(tegra186_wake_events),
.wake_events = tegra186_wake_events,
};
@@ -2719,6 +2812,7 @@ static int __init tegra_pmc_early_init(void)
const struct of_device_id *match;
struct device_node *np;
struct resource regs;
+ unsigned int i;
bool invert;
mutex_init(&pmc->powergates_lock);
@@ -2775,7 +2869,10 @@ static int __init tegra_pmc_early_init(void)
if (pmc->soc->maybe_tz_only)
pmc->tz_only = tegra_pmc_detect_tz_only(pmc);
- tegra_powergate_init(pmc, np);
+ /* Create a bitmap of the available and valid partitions */
+ for (i = 0; i < pmc->soc->num_powergates; i++)
+ if (pmc->soc->powergates[i])
+ set_bit(i, pmc->powergates_available);
/*
* Invert the interrupt polarity if a PMC device tree node
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index be4570baad96..dbd6c60b81db 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -45,11 +45,12 @@ config KEYSTONE_NAVIGATOR_DMA
config AMX3_PM
tristate "AMx3 Power Management"
depends on SOC_AM33XX || SOC_AM43XX
- depends on WKUP_M3_IPC && TI_EMIF_SRAM && SRAM
+ depends on WKUP_M3_IPC && TI_EMIF_SRAM && SRAM && RTC_DRV_OMAP
help
Enable power management on AM335x and AM437x. Required for suspend to mem
and standby states on both AM335x and AM437x platforms and for deeper cpuidle
- c-states on AM335x.
+ c-states on AM335x. Also required for rtc and ddr in self-refresh low
+ power mode on AM437x platforms.
config WKUP_M3_IPC
tristate "TI AMx3 Wkup-M3 IPC Driver"
@@ -73,4 +74,10 @@ config TI_SCI_PM_DOMAINS
called ti_sci_pm_domains. Note this is needed early in boot before
rootfs may be available.
+config TI_SCI_INTA_MSI_DOMAIN
+ bool
+ select GENERIC_MSI_IRQ_DOMAIN
+ help
+ Driver to enable Interrupt Aggregator specific MSI Domain.
+
endif # SOC_TI
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index a22edc0b258a..b3868d392d4f 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
obj-$(CONFIG_AMX3_PM) += pm33xx.o
obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
+obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index d0dab323651f..fc5802ccb1c0 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -6,6 +6,7 @@
* Vaibhav Bedia, Dave Gerlach
*/
+#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/genalloc.h>
@@ -13,9 +14,12 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/platform_data/pm33xx.h>
#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/rtc/rtc-omap.h>
#include <linux/sizes.h>
#include <linux/sram.h>
#include <linux/suspend.h>
@@ -29,33 +33,162 @@
#define AMX3_PM_SRAM_SYMBOL_OFFSET(sym) ((unsigned long)(sym) - \
(unsigned long)pm_sram->do_wfi)
+#define RTC_SCRATCH_RESUME_REG 0
+#define RTC_SCRATCH_MAGIC_REG 1
+#define RTC_REG_BOOT_MAGIC 0x8cd0 /* RTC */
+#define GIC_INT_SET_PENDING_BASE 0x200
+#define AM43XX_GIC_DIST_BASE 0x48241000
+
+static u32 rtc_magic_val;
+
static int (*am33xx_do_wfi_sram)(unsigned long unused);
static phys_addr_t am33xx_do_wfi_sram_phys;
static struct gen_pool *sram_pool, *sram_pool_data;
static unsigned long ocmcram_location, ocmcram_location_data;
+static struct rtc_device *omap_rtc;
+static void __iomem *gic_dist_base;
+
static struct am33xx_pm_platform_data *pm_ops;
static struct am33xx_pm_sram_addr *pm_sram;
static struct device *pm33xx_dev;
static struct wkup_m3_ipc *m3_ipc;
+#ifdef CONFIG_SUSPEND
+static int rtc_only_idle;
+static int retrigger_irq;
static unsigned long suspend_wfi_flags;
+static struct wkup_m3_wakeup_src wakeup_src = {.irq_nr = 0,
+ .src = "Unknown",
+};
+
+static struct wkup_m3_wakeup_src rtc_alarm_wakeup = {
+ .irq_nr = 108, .src = "RTC Alarm",
+};
+
+static struct wkup_m3_wakeup_src rtc_ext_wakeup = {
+ .irq_nr = 0, .src = "Ext wakeup",
+};
+#endif
+
static u32 sram_suspend_address(unsigned long addr)
{
return ((unsigned long)am33xx_do_wfi_sram +
AMX3_PM_SRAM_SYMBOL_OFFSET(addr));
}
+static int am33xx_push_sram_idle(void)
+{
+ struct am33xx_pm_ro_sram_data ro_sram_data;
+ int ret;
+ u32 table_addr, ro_data_addr;
+ void *copy_addr;
+
+ ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
+ ro_sram_data.amx3_pm_sram_data_phys =
+ gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
+ ro_sram_data.rtc_base_virt = pm_ops->get_rtc_base_addr();
+
+ /* Save physical address to calculate resume offset during pm init */
+ am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
+ ocmcram_location);
+
+ am33xx_do_wfi_sram = sram_exec_copy(sram_pool, (void *)ocmcram_location,
+ pm_sram->do_wfi,
+ *pm_sram->do_wfi_sz);
+ if (!am33xx_do_wfi_sram) {
+ dev_err(pm33xx_dev,
+ "PM: %s: am33xx_do_wfi copy to sram failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ table_addr =
+ sram_suspend_address((unsigned long)pm_sram->emif_sram_table);
+ ret = ti_emif_copy_pm_function_table(sram_pool, (void *)table_addr);
+ if (ret) {
+ dev_dbg(pm33xx_dev,
+ "PM: %s: EMIF function copy failed\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ ro_data_addr =
+ sram_suspend_address((unsigned long)pm_sram->ro_sram_data);
+ copy_addr = sram_exec_copy(sram_pool, (void *)ro_data_addr,
+ &ro_sram_data,
+ sizeof(ro_sram_data));
+ if (!copy_addr) {
+ dev_err(pm33xx_dev,
+ "PM: %s: ro_sram_data copy to sram failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int __init am43xx_map_gic(void)
+{
+ gic_dist_base = ioremap(AM43XX_GIC_DIST_BASE, SZ_4K);
+
+ if (!gic_dist_base)
+ return -ENOMEM;
+
+ return 0;
+}
+
#ifdef CONFIG_SUSPEND
+struct wkup_m3_wakeup_src rtc_wake_src(void)
+{
+ u32 i;
+
+ i = __raw_readl(pm_ops->get_rtc_base_addr() + 0x44) & 0x40;
+
+ if (i) {
+ retrigger_irq = rtc_alarm_wakeup.irq_nr;
+ return rtc_alarm_wakeup;
+ }
+
+ retrigger_irq = rtc_ext_wakeup.irq_nr;
+
+ return rtc_ext_wakeup;
+}
+
+int am33xx_rtc_only_idle(unsigned long wfi_flags)
+{
+ omap_rtc_power_off_program(&omap_rtc->dev);
+ am33xx_do_wfi_sram(wfi_flags);
+ return 0;
+}
+
static int am33xx_pm_suspend(suspend_state_t suspend_state)
{
int i, ret = 0;
- ret = pm_ops->soc_suspend((unsigned long)suspend_state,
- am33xx_do_wfi_sram, suspend_wfi_flags);
+ if (suspend_state == PM_SUSPEND_MEM &&
+ pm_ops->check_off_mode_enable()) {
+ pm_ops->prepare_rtc_suspend();
+ pm_ops->save_context();
+ suspend_wfi_flags |= WFI_FLAG_RTC_ONLY;
+ clk_save_context();
+ ret = pm_ops->soc_suspend(suspend_state, am33xx_rtc_only_idle,
+ suspend_wfi_flags);
+
+ suspend_wfi_flags &= ~WFI_FLAG_RTC_ONLY;
+
+ if (!ret) {
+ clk_restore_context();
+ pm_ops->restore_context();
+ m3_ipc->ops->set_rtc_only(m3_ipc);
+ am33xx_push_sram_idle();
+ }
+ } else {
+ ret = pm_ops->soc_suspend(suspend_state, am33xx_do_wfi_sram,
+ suspend_wfi_flags);
+ }
if (ret) {
dev_err(pm33xx_dev, "PM: Kernel suspend failure\n");
@@ -77,8 +210,20 @@ static int am33xx_pm_suspend(suspend_state_t suspend_state)
"PM: CM3 returned unknown result = %d\n", i);
ret = -1;
}
+
+ /* print the wakeup reason */
+ if (rtc_only_idle) {
+ wakeup_src = rtc_wake_src();
+ pr_info("PM: Wakeup source %s\n", wakeup_src.src);
+ } else {
+ pr_info("PM: Wakeup source %s\n",
+ m3_ipc->ops->request_wake_src(m3_ipc));
+ }
}
+ if (suspend_state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable())
+ pm_ops->prepare_rtc_resume();
+
return ret;
}
@@ -101,6 +246,18 @@ static int am33xx_pm_enter(suspend_state_t suspend_state)
static int am33xx_pm_begin(suspend_state_t state)
{
int ret = -EINVAL;
+ struct nvmem_device *nvmem;
+
+ if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) {
+ nvmem = devm_nvmem_device_get(&omap_rtc->dev,
+ "omap_rtc_scratch0");
+ if (nvmem)
+ nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
+ (void *)&rtc_magic_val);
+ rtc_only_idle = 1;
+ } else {
+ rtc_only_idle = 0;
+ }
switch (state) {
case PM_SUSPEND_MEM:
@@ -116,7 +273,28 @@ static int am33xx_pm_begin(suspend_state_t state)
static void am33xx_pm_end(void)
{
+ u32 val = 0;
+ struct nvmem_device *nvmem;
+
+ nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0");
m3_ipc->ops->finish_low_power(m3_ipc);
+ if (rtc_only_idle) {
+ if (retrigger_irq)
+ /*
+ * 32 bits of Interrupt Set-Pending correspond to 32
+ * 32 interrupts. Compute the bit offset of the
+ * Interrupt and set that particular bit
+ * Compute the register offset by dividing interrupt
+ * number by 32 and mutiplying by 4
+ */
+ writel_relaxed(1 << (retrigger_irq & 31),
+ gic_dist_base + GIC_INT_SET_PENDING_BASE
+ + retrigger_irq / 32 * 4);
+ nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
+ (void *)&val);
+ }
+
+ rtc_only_idle = 0;
}
static int am33xx_pm_valid(suspend_state_t state)
@@ -219,51 +397,37 @@ mpu_put_node:
return ret;
}
-static int am33xx_push_sram_idle(void)
+static int am33xx_pm_rtc_setup(void)
{
- struct am33xx_pm_ro_sram_data ro_sram_data;
- int ret;
- u32 table_addr, ro_data_addr;
- void *copy_addr;
-
- ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
- ro_sram_data.amx3_pm_sram_data_phys =
- gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
- ro_sram_data.rtc_base_virt = pm_ops->get_rtc_base_addr();
+ struct device_node *np;
+ unsigned long val = 0;
+ struct nvmem_device *nvmem;
- /* Save physical address to calculate resume offset during pm init */
- am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
- ocmcram_location);
+ np = of_find_node_by_name(NULL, "rtc");
- am33xx_do_wfi_sram = sram_exec_copy(sram_pool, (void *)ocmcram_location,
- pm_sram->do_wfi,
- *pm_sram->do_wfi_sz);
- if (!am33xx_do_wfi_sram) {
- dev_err(pm33xx_dev,
- "PM: %s: am33xx_do_wfi copy to sram failed\n",
- __func__);
- return -ENODEV;
- }
-
- table_addr =
- sram_suspend_address((unsigned long)pm_sram->emif_sram_table);
- ret = ti_emif_copy_pm_function_table(sram_pool, (void *)table_addr);
- if (ret) {
- dev_dbg(pm33xx_dev,
- "PM: %s: EMIF function copy failed\n", __func__);
- return -EPROBE_DEFER;
- }
+ if (of_device_is_available(np)) {
+ omap_rtc = rtc_class_open("rtc0");
+ if (!omap_rtc) {
+ pr_warn("PM: rtc0 not available");
+ return -EPROBE_DEFER;
+ }
- ro_data_addr =
- sram_suspend_address((unsigned long)pm_sram->ro_sram_data);
- copy_addr = sram_exec_copy(sram_pool, (void *)ro_data_addr,
- &ro_sram_data,
- sizeof(ro_sram_data));
- if (!copy_addr) {
- dev_err(pm33xx_dev,
- "PM: %s: ro_sram_data copy to sram failed\n",
- __func__);
- return -ENODEV;
+ nvmem = devm_nvmem_device_get(&omap_rtc->dev,
+ "omap_rtc_scratch0");
+ if (nvmem) {
+ nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
+ 4, (void *)&rtc_magic_val);
+ if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC)
+ pr_warn("PM: bootloader does not support rtc-only!\n");
+
+ nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
+ 4, (void *)&val);
+ val = pm_sram->resume_address;
+ nvmem_device_write(nvmem, RTC_SCRATCH_RESUME_REG * 4,
+ 4, (void *)&val);
+ }
+ } else {
+ pr_warn("PM: no-rtc available, rtc-only mode disabled.\n");
}
return 0;
@@ -284,34 +448,42 @@ static int am33xx_pm_probe(struct platform_device *pdev)
return -ENODEV;
}
+ ret = am43xx_map_gic();
+ if (ret) {
+ pr_err("PM: Could not ioremap GIC base\n");
+ return ret;
+ }
+
pm_sram = pm_ops->get_sram_addrs();
if (!pm_sram) {
dev_err(dev, "PM: Cannot get PM asm function addresses!!\n");
return -ENODEV;
}
+ m3_ipc = wkup_m3_ipc_get();
+ if (!m3_ipc) {
+ pr_err("PM: Cannot get wkup_m3_ipc handle\n");
+ return -EPROBE_DEFER;
+ }
+
pm33xx_dev = dev;
ret = am33xx_pm_alloc_sram();
if (ret)
return ret;
- ret = am33xx_push_sram_idle();
+ ret = am33xx_pm_rtc_setup();
if (ret)
goto err_free_sram;
- m3_ipc = wkup_m3_ipc_get();
- if (!m3_ipc) {
- dev_dbg(dev, "PM: Cannot get wkup_m3_ipc handle\n");
- ret = -EPROBE_DEFER;
+ ret = am33xx_push_sram_idle();
+ if (ret)
goto err_free_sram;
- }
am33xx_pm_set_ipc_ops();
#ifdef CONFIG_SUSPEND
suspend_set_ops(&am33xx_pm_ops);
-#endif /* CONFIG_SUSPEND */
/*
* For a system suspend we must flush the caches, we want
@@ -323,6 +495,7 @@ static int am33xx_pm_probe(struct platform_device *pdev)
suspend_wfi_flags |= WFI_FLAG_SELF_REFRESH;
suspend_wfi_flags |= WFI_FLAG_SAVE_EMIF;
suspend_wfi_flags |= WFI_FLAG_WAKE_M3;
+#endif /* CONFIG_SUSPEND */
ret = pm_ops->init();
if (ret) {
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
new file mode 100644
index 000000000000..0eb9462f609e
--- /dev/null
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments' K3 Interrupt Aggregator MSI bus
+ *
+ * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Lokesh Vutla <lokeshvutla@ti.com>
+ */
+
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+static void ti_sci_inta_msi_write_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ /* Nothing to do */
+}
+
+static void ti_sci_inta_msi_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ /* Nothing to do */
+}
+
+static void ti_sci_inta_msi_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ if (WARN_ON(!chip))
+ return;
+
+ chip->irq_request_resources = irq_chip_request_resources_parent;
+ chip->irq_release_resources = irq_chip_release_resources_parent;
+ chip->irq_compose_msi_msg = ti_sci_inta_msi_compose_msi_msg;
+ chip->irq_write_msi_msg = ti_sci_inta_msi_write_msg;
+ chip->irq_set_type = irq_chip_set_type_parent;
+ chip->irq_unmask = irq_chip_unmask_parent;
+ chip->irq_mask = irq_chip_mask_parent;
+ chip->irq_ack = irq_chip_ack_parent;
+}
+
+struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ ti_sci_inta_msi_update_chip_ops(info);
+
+ domain = msi_create_irq_domain(fwnode, info, parent);
+ if (domain)
+ irq_domain_update_bus_token(domain, DOMAIN_BUS_TI_SCI_INTA_MSI);
+
+ return domain;
+}
+EXPORT_SYMBOL_GPL(ti_sci_inta_msi_create_irq_domain);
+
+static void ti_sci_inta_msi_free_descs(struct device *dev)
+{
+ struct msi_desc *desc, *tmp;
+
+ list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
+ list_del(&desc->list);
+ free_msi_entry(desc);
+ }
+}
+
+static int ti_sci_inta_msi_alloc_descs(struct device *dev,
+ struct ti_sci_resource *res)
+{
+ struct msi_desc *msi_desc;
+ int set, i, count = 0;
+
+ for (set = 0; set < res->sets; set++) {
+ for (i = 0; i < res->desc[set].num; i++) {
+ msi_desc = alloc_msi_entry(dev, 1, NULL);
+ if (!msi_desc) {
+ ti_sci_inta_msi_free_descs(dev);
+ return -ENOMEM;
+ }
+
+ msi_desc->inta.dev_index = res->desc[set].start + i;
+ INIT_LIST_HEAD(&msi_desc->list);
+ list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
+ count++;
+ }
+ }
+
+ return count;
+}
+
+int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
+ struct ti_sci_resource *res)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct irq_domain *msi_domain;
+ int ret, nvec;
+
+ msi_domain = dev_get_msi_domain(dev);
+ if (!msi_domain)
+ return -EINVAL;
+
+ if (pdev->id < 0)
+ return -ENODEV;
+
+ nvec = ti_sci_inta_msi_alloc_descs(dev, res);
+ if (nvec <= 0)
+ return nvec;
+
+ ret = msi_domain_alloc_irqs(msi_domain, dev, nvec);
+ if (ret) {
+ dev_err(dev, "Failed to allocate IRQs %d\n", ret);
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ ti_sci_inta_msi_free_descs(&pdev->dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
+
+void ti_sci_inta_msi_domain_free_irqs(struct device *dev)
+{
+ msi_domain_free_irqs(dev->msi_domain, dev);
+ ti_sci_inta_msi_free_descs(dev);
+}
+EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_free_irqs);
+
+unsigned int ti_sci_inta_msi_get_virq(struct device *dev, u32 dev_index)
+{
+ struct msi_desc *desc;
+
+ for_each_msi_entry(desc, dev)
+ if (desc->inta.dev_index == dev_index)
+ return desc->irq;
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ti_sci_inta_msi_get_virq);
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
index 354d256e6e00..600f57cf0c2e 100644
--- a/drivers/soc/xilinx/zynqmp_pm_domains.c
+++ b/drivers/soc/xilinx/zynqmp_pm_domains.c
@@ -23,6 +23,8 @@
/* Flag stating if PM nodes mapped to the PM domain has been requested */
#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0)
+static const struct zynqmp_eemi_ops *eemi_ops;
+
/**
* struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain
* @gpd: Generic power domain
@@ -71,9 +73,8 @@ static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
{
int ret;
struct zynqmp_pm_domain *pd;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!eemi_ops || !eemi_ops->set_requirement)
+ if (!eemi_ops->set_requirement)
return -ENXIO;
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
@@ -107,9 +108,8 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
struct zynqmp_pm_domain *pd;
u32 capabilities = 0;
bool may_wakeup;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!eemi_ops || !eemi_ops->set_requirement)
+ if (!eemi_ops->set_requirement)
return -ENXIO;
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
@@ -160,9 +160,8 @@ static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
{
int ret;
struct zynqmp_pm_domain *pd;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!eemi_ops || !eemi_ops->request_node)
+ if (!eemi_ops->request_node)
return -ENXIO;
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
@@ -197,9 +196,8 @@ static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain,
{
int ret;
struct zynqmp_pm_domain *pd;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!eemi_ops || !eemi_ops->release_node)
+ if (!eemi_ops->release_node)
return;
pd = container_of(domain, struct zynqmp_pm_domain, gpd);
@@ -266,6 +264,10 @@ static int zynqmp_gpd_probe(struct platform_device *pdev)
struct zynqmp_pm_domain *pd;
struct device *dev = &pdev->dev;
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index 771cb59b9d22..1b9d14411a15 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -31,6 +31,7 @@ static const char *const suspend_modes[] = {
};
static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
+static const struct zynqmp_eemi_ops *eemi_ops;
enum pm_api_cb_id {
PM_INIT_SUSPEND_CB = 30,
@@ -92,9 +93,8 @@ static ssize_t suspend_mode_store(struct device *dev,
const char *buf, size_t count)
{
int md, ret = -EINVAL;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
- if (!eemi_ops || !eemi_ops->set_suspend_mode)
+ if (!eemi_ops->set_suspend_mode)
return ret;
for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++)
@@ -120,9 +120,11 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
int ret, irq;
u32 pm_api_version;
- const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
- if (!eemi_ops || !eemi_ops->get_api_version || !eemi_ops->init_finalize)
+ if (!eemi_ops->get_api_version || !eemi_ops->init_finalize)
return -ENXIO;
eemi_ops->init_finalize();
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 9f83e1b17aa1..9850a0efe85a 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -138,6 +139,7 @@
#define SPI_AUTOSUSPEND_TIMEOUT 3000
enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
+static const struct zynqmp_eemi_ops *eemi_ops;
/**
* struct zynqmp_qspi - Defines qspi driver instance
@@ -1021,6 +1023,10 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
struct resource *res;
struct device *dev = &pdev->dev;
+ eemi_ops = zynqmp_pm_get_eemi_ops();
+ if (IS_ERR(eemi_ops))
+ return PTR_ERR(eemi_ops);
+
master = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
if (!master)
return -ENOMEM;
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
index 1ba4a5154fb5..64037b0a8387 100644
--- a/drivers/staging/media/imx/imx-ic-prpencvf.c
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -1266,7 +1266,7 @@ static int prp_registered(struct v4l2_subdev *sd)
if (ret)
return ret;
- ret = imx_media_capture_device_register(priv->vdev);
+ ret = imx_media_capture_device_register(priv->md, priv->vdev);
if (ret)
return ret;
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index b7ce9d439279..9430c835c434 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -701,7 +701,8 @@ void imx_media_capture_device_error(struct imx_media_video_dev *vdev)
}
EXPORT_SYMBOL_GPL(imx_media_capture_device_error);
-int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
+int imx_media_capture_device_register(struct imx_media_dev *md,
+ struct imx_media_video_dev *vdev)
{
struct capture_priv *priv = to_capture_priv(vdev);
struct v4l2_subdev *sd = priv->src_sd;
@@ -710,8 +711,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
struct v4l2_subdev_format fmt_src;
int ret;
- /* get media device */
- priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
+ priv->md = md;
vfd->v4l2_dev = sd->v4l2_dev;
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 28fe66052cc7..1d248aca40a9 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -1812,7 +1812,7 @@ static int csi_registered(struct v4l2_subdev *sd)
if (ret)
goto free_fim;
- ret = imx_media_capture_device_register(priv->vdev);
+ ret = imx_media_capture_device_register(priv->md, priv->vdev);
if (ret)
goto free_fim;
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
index eb59ba0c3b62..6587aa49e005 100644
--- a/drivers/staging/media/imx/imx-media.h
+++ b/drivers/staging/media/imx/imx-media.h
@@ -268,7 +268,8 @@ int imx_media_of_add_csi(struct imx_media_dev *imxmd,
struct imx_media_video_dev *
imx_media_capture_device_init(struct v4l2_subdev *src_sd, int pad);
void imx_media_capture_device_remove(struct imx_media_video_dev *vdev);
-int imx_media_capture_device_register(struct imx_media_video_dev *vdev);
+int imx_media_capture_device_register(struct imx_media_dev *md,
+ struct imx_media_video_dev *vdev);
void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev);
struct imx_media_buffer *
imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev);
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index 18eb5d3ecf10..a708a0340eb1 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -1126,7 +1126,7 @@ static int imx7_csi_registered(struct v4l2_subdev *sd)
if (ret < 0)
return ret;
- ret = imx_media_capture_device_register(csi->vdev);
+ ret = imx_media_capture_device_register(csi->imxmd, csi->vdev);
if (ret < 0)
return ret;
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
index 58721c46fba4..8bbc905b26c8 100644
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
@@ -352,7 +352,7 @@ static int rockchip_vpu_video_device_register(struct rockchip_vpu_dev *vpu)
vpu->vfd_enc = vfd;
video_set_drvdata(vfd, vpu);
- ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
if (ret) {
v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
goto err_free_dev;
@@ -463,6 +463,8 @@ static int rockchip_vpu_probe(struct platform_device *pdev)
vpu->mdev.dev = vpu->dev;
strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
+ strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
+ sizeof(vpu->mdev.model));
media_device_init(&vpu->mdev);
vpu->v4l2_dev.mdev = &vpu->mdev;
@@ -480,15 +482,18 @@ static int rockchip_vpu_probe(struct platform_device *pdev)
return 0;
err_video_dev_unreg:
if (vpu->vfd_enc) {
+ v4l2_m2m_unregister_media_controller(vpu->m2m_dev);
video_unregister_device(vpu->vfd_enc);
video_device_release(vpu->vfd_enc);
}
err_m2m_rel:
+ media_device_cleanup(&vpu->mdev);
v4l2_m2m_release(vpu->m2m_dev);
err_v4l2_unreg:
v4l2_device_unregister(&vpu->v4l2_dev);
err_clk_unprepare:
clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ pm_runtime_dont_use_autosuspend(vpu->dev);
pm_runtime_disable(vpu->dev);
return ret;
}
@@ -500,15 +505,16 @@ static int rockchip_vpu_remove(struct platform_device *pdev)
v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
media_device_unregister(&vpu->mdev);
- v4l2_m2m_unregister_media_controller(vpu->m2m_dev);
- v4l2_m2m_release(vpu->m2m_dev);
- media_device_cleanup(&vpu->mdev);
if (vpu->vfd_enc) {
+ v4l2_m2m_unregister_media_controller(vpu->m2m_dev);
video_unregister_device(vpu->vfd_enc);
video_device_release(vpu->vfd_enc);
}
+ media_device_cleanup(&vpu->mdev);
+ v4l2_m2m_release(vpu->m2m_dev);
v4l2_device_unregister(&vpu->v4l2_dev);
clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ pm_runtime_dont_use_autosuspend(vpu->dev);
pm_runtime_disable(vpu->dev);
return 0;
}
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
index fb5e36aedd8c..dcbfc3cbc9f3 100644
--- a/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
@@ -152,9 +152,10 @@ static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct rockchip_vpu_dev *vpu = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
strscpy(cap->driver, vpu->dev->driver->name, sizeof(cap->driver));
- strscpy(cap->card, vpu->vfd_enc->name, sizeof(cap->card));
+ strscpy(cap->card, vdev->name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform: %s",
vpu->dev->driver->name);
return 0;
diff --git a/drivers/target/iscsi/cxgbit/Makefile b/drivers/target/iscsi/cxgbit/Makefile
index d16aaae7ba2a..0dcaf2006f78 100644
--- a/drivers/target/iscsi/cxgbit/Makefile
+++ b/drivers/target/iscsi/cxgbit/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
-ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
-ccflags-y += -Idrivers/target/iscsi
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
+ccflags-y += -I $(srctree)/drivers/net/ethernet/chelsio/libcxgb
+ccflags-y += -I $(srctree)/drivers/target/iscsi
obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit.o
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 0842b6e6af82..48963eab32f5 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -419,9 +419,35 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
return true;
}
+static struct tee_shm_pool *optee_config_dyn_shm(void)
+{
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+
+ rc = optee_shm_pool_alloc_pages();
+ if (IS_ERR(rc))
+ return rc;
+ priv_mgr = rc;
+
+ rc = optee_shm_pool_alloc_pages();
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ return rc;
+ }
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+ }
+
+ return rc;
+}
+
static struct tee_shm_pool *
-optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm,
- u32 sec_caps)
+optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
{
union {
struct arm_smccc_res smccc;
@@ -436,10 +462,11 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm,
struct tee_shm_pool_mgr *priv_mgr;
struct tee_shm_pool_mgr *dmabuf_mgr;
void *rc;
+ const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
if (res.result.status != OPTEE_SMC_RETURN_OK) {
- pr_info("shm service not available\n");
+ pr_err("static shm service not available\n");
return ERR_PTR(-ENOENT);
}
@@ -465,28 +492,15 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm,
}
vaddr = (unsigned long)va;
- /*
- * If OP-TEE can work with unregistered SHM, we will use own pool
- * for private shm
- */
- if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
- rc = optee_shm_pool_alloc_pages();
- if (IS_ERR(rc))
- goto err_memunmap;
- priv_mgr = rc;
- } else {
- const size_t sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
-
- rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
- 3 /* 8 bytes aligned */);
- if (IS_ERR(rc))
- goto err_memunmap;
- priv_mgr = rc;
-
- vaddr += sz;
- paddr += sz;
- size -= sz;
- }
+ rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
+ 3 /* 8 bytes aligned */);
+ if (IS_ERR(rc))
+ goto err_memunmap;
+ priv_mgr = rc;
+
+ vaddr += sz;
+ paddr += sz;
+ size -= sz;
rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
if (IS_ERR(rc))
@@ -552,7 +566,7 @@ static optee_invoke_fn *get_invoke_func(struct device_node *np)
static struct optee *optee_probe(struct device_node *np)
{
optee_invoke_fn *invoke_fn;
- struct tee_shm_pool *pool;
+ struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
struct optee *optee = NULL;
void *memremaped_shm = NULL;
struct tee_device *teedev;
@@ -581,13 +595,17 @@ static struct optee *optee_probe(struct device_node *np)
}
/*
- * We have no other option for shared memory, if secure world
- * doesn't have any reserved memory we can use we can't continue.
+ * Try to use dynamic shared memory if possible
*/
- if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
- return ERR_PTR(-EINVAL);
+ if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ pool = optee_config_dyn_shm();
+
+ /*
+ * If dynamic shared memory is not available or failed - try static one
+ */
+ if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
+ pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
- pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps);
if (IS_ERR(pool))
return (void *)pool;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 653aa27a25a4..15bdd25780be 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig THERMAL
- tristate "Generic Thermal sysfs driver"
+ bool "Generic Thermal sysfs driver"
help
Generic Thermal Sysfs driver offers a generic mechanism for
thermal management. Usually it's made up of one or more thermal
@@ -11,7 +11,7 @@ menuconfig THERMAL
Each thermal zone contains its own temperature, trip points,
cooling devices.
All platforms with ACPI thermal support can use this driver.
- If you want this support, you should say Y or M here.
+ If you want this support, you should say Y here.
if THERMAL
@@ -24,7 +24,6 @@ config THERMAL_STATISTICS
config THERMAL_EMERGENCY_POWEROFF_DELAY_MS
int "Emergency poweroff delay in milli-seconds"
- depends on THERMAL
default 0
help
Thermal subsystem will issue a graceful shutdown when
@@ -149,10 +148,9 @@ config THERMAL_GOV_POWER_ALLOCATOR
allocating and limiting power to devices.
config CPU_THERMAL
- bool "generic cpu cooling support"
+ bool "Generic cpu cooling support"
depends on CPU_FREQ
depends on THERMAL_OF
- depends on THERMAL=y
help
This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists
@@ -200,6 +198,17 @@ config THERMAL_EMULATION
because userland can easily disable the thermal policy by simply
flooding this sysfs node with low temperature values.
+config THERMAL_MMIO
+ tristate "Generic Thermal MMIO driver"
+ depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This option enables the generic thermal MMIO driver that will use
+ memory-mapped reads to get the temperature. Any HW/System that
+ allows temperature reading by a single memory-mapped reading, be it
+ register or shared memory, is a potential candidate to work with this
+ driver.
+
config HISI_THERMAL
tristate "Hisilicon thermal driver"
depends on ARCH_HISI || COMPILE_TEST
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 486d682be047..74a37c7f847a 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -29,6 +29,7 @@ thermal_sys-$(CONFIG_DEVFREQ_THERMAL) += devfreq_cooling.o
# platform thermal drivers
obj-y += broadcom/
+obj-$(CONFIG_THERMAL_MMIO) += thermal_mmio.o
obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o
obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o
obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o
diff --git a/drivers/thermal/broadcom/sr-thermal.c b/drivers/thermal/broadcom/sr-thermal.c
index 2284cbecedf3..475ce2900771 100644
--- a/drivers/thermal/broadcom/sr-thermal.c
+++ b/drivers/thermal/broadcom/sr-thermal.c
@@ -3,7 +3,6 @@
* Copyright (C) 2018 Broadcom
*/
-#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -100,18 +99,11 @@ static const struct of_device_id sr_thermal_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sr_thermal_of_match);
-static const struct acpi_device_id sr_thermal_acpi_ids[] = {
- { .id = "BRCM0500" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(acpi, sr_thermal_acpi_ids);
-
static struct platform_driver sr_thermal_driver = {
.probe = sr_thermal_probe,
.driver = {
.name = "sr-thermal",
.of_match_table = sr_thermal_of_match,
- .acpi_match_table = ACPI_PTR(sr_thermal_acpi_ids),
},
};
module_platform_driver(sr_thermal_driver);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index f7c1f49ec87f..4c5db59a619b 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -1,26 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/drivers/thermal/cpu_cooling.c
*
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
- * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
*
- * Copyright (C) 2014 Viresh Kumar <viresh.kumar@linaro.org>
+ * Copyright (C) 2012-2018 Linaro Limited.
*
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
+ * Authors: Amit Daniel <amit.kachhap@linaro.org>
+ * Viresh Kumar <viresh.kumar@linaro.org>
*
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/module.h>
#include <linux/thermal.h>
@@ -99,7 +87,6 @@ struct cpufreq_cooling_device {
unsigned int clipped_freq;
unsigned int max_level;
struct freq_table *freq_table; /* In descending order */
- struct thermal_cooling_device *cdev;
struct cpufreq_policy *policy;
struct list_head node;
struct time_in_idle *idle_time;
@@ -207,8 +194,7 @@ static int update_freq_table(struct cpufreq_cooling_device *cpufreq_cdev,
dev = get_cpu_device(cpu);
if (unlikely(!dev)) {
- dev_warn(&cpufreq_cdev->cdev->device,
- "No cpu device for cpu %d\n", cpu);
+ pr_warn("No cpu device for cpu %d\n", cpu);
return -ENODEV;
}
@@ -458,7 +444,7 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
load = 0;
total_load += load;
- if (trace_thermal_power_cpu_limit_enabled() && load_cpu)
+ if (load_cpu)
load_cpu[i] = load;
i++;
@@ -541,7 +527,6 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
struct cpufreq_policy *policy = cpufreq_cdev->policy;
- power = power > 0 ? power : 0;
last_load = cpufreq_cdev->last_load ?: 1;
normalised_power = (power * 100) / last_load;
target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
@@ -692,7 +677,6 @@ __cpufreq_cooling_register(struct device_node *np,
goto remove_ida;
cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
- cpufreq_cdev->cdev = cdev;
mutex_lock(&cooling_list_lock);
/* Register the notifier for first cpufreq cooling device */
@@ -810,7 +794,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- thermal_cooling_device_unregister(cpufreq_cdev->cdev);
+ thermal_cooling_device_unregister(cdev);
ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
kfree(cpufreq_cdev->idle_time);
kfree(cpufreq_cdev->freq_table);
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index 2e013eeb4a1d..2c727a820759 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -1,6 +1,5 @@
config INTEL_POWERCLAMP
tristate "Intel PowerClamp idle injection driver"
- depends on THERMAL
depends on X86
depends on CPU_SUP_INTEL
help
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index 0c19fcd56a0d..79a7df2baa92 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -220,6 +220,7 @@ static int int3403_add(struct platform_device *pdev)
{
struct int3403_priv *priv;
int result = 0;
+ unsigned long long tmp;
acpi_status status;
priv = devm_kzalloc(&pdev->dev, sizeof(struct int3403_priv),
@@ -234,19 +235,18 @@ static int int3403_add(struct platform_device *pdev)
goto err;
}
- status = acpi_evaluate_integer(priv->adev->handle, "PTYP",
- NULL, &priv->type);
- if (ACPI_FAILURE(status)) {
- unsigned long long tmp;
- status = acpi_evaluate_integer(priv->adev->handle, "_TMP",
- NULL, &tmp);
+ status = acpi_evaluate_integer(priv->adev->handle, "_TMP",
+ NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_evaluate_integer(priv->adev->handle, "PTYP",
+ NULL, &priv->type);
if (ACPI_FAILURE(status)) {
result = -EINVAL;
goto err;
- } else {
- priv->type = INT3403_TYPE_SENSOR;
}
+ } else {
+ priv->type = INT3403_TYPE_SENSOR;
}
platform_set_drvdata(pdev, priv);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 8e1cf4d789be..2e6071a82da2 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -81,22 +81,13 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- struct pci_dev *pci_dev; \
- struct platform_device *pdev; \
- struct proc_thermal_device *proc_dev; \
+ struct proc_thermal_device *proc_dev = dev_get_drvdata(dev); \
\
if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \
dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \
return 0; \
} \
\
- if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
- pdev = to_platform_device(dev); \
- proc_dev = platform_get_drvdata(pdev); \
- } else { \
- pci_dev = to_pci_dev(dev); \
- proc_dev = pci_get_drvdata(pci_dev); \
- } \
return sprintf(buf, "%lu\n",\
(unsigned long)proc_dev->power_limits[index].suffix * 1000); \
}
@@ -274,7 +265,7 @@ static void proc_thermal_notify(acpi_handle handle, u32 event, void *data)
THERMAL_DEVICE_POWER_CAPABILITY_CHANGED);
break;
default:
- dev_err(proc_priv->dev, "Unsupported event [0x%x]\n", event);
+ dev_dbg(proc_priv->dev, "Unsupported event [0x%x]\n", event);
break;
}
}
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 2df059cc07e2..dc5093be553e 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -5,6 +5,9 @@
* Copyright (C) 2013 Texas Instruments
* Copyright (C) 2013 Eduardo Valentin <eduardo.valentin@ti.com>
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/thermal.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index cdb455ffd575..3ce20fec86a2 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -1,6 +1,5 @@
config QCOM_TSENS
tristate "Qualcomm TSENS Temperature Alarm"
- depends on THERMAL
depends on QCOM_QFPROM
depends on ARCH_QCOM || COMPILE_TEST
help
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
index 717a08600bb5..fc6fe50cdde4 100644
--- a/drivers/thermal/qcom/Makefile
+++ b/drivers/thermal/qcom/Makefile
@@ -1,3 +1,5 @@
obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o
-qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-v2.o
+
+qcom_tsens-y += tsens.o tsens-common.o tsens-v0_1.o \
+ tsens-8960.o tsens-v2.o tsens-v1.o
obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o
diff --git a/drivers/thermal/qcom/tsens-8916.c b/drivers/thermal/qcom/tsens-8916.c
deleted file mode 100644
index c6dd620ac029..000000000000
--- a/drivers/thermal/qcom/tsens-8916.c
+++ /dev/null
@@ -1,105 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/platform_device.h>
-#include "tsens.h"
-
-/* eeprom layout data for 8916 */
-#define BASE0_MASK 0x0000007f
-#define BASE1_MASK 0xfe000000
-#define BASE0_SHIFT 0
-#define BASE1_SHIFT 25
-
-#define S0_P1_MASK 0x00000f80
-#define S1_P1_MASK 0x003e0000
-#define S2_P1_MASK 0xf8000000
-#define S3_P1_MASK 0x000003e0
-#define S4_P1_MASK 0x000f8000
-
-#define S0_P2_MASK 0x0001f000
-#define S1_P2_MASK 0x07c00000
-#define S2_P2_MASK 0x0000001f
-#define S3_P2_MASK 0x00007c00
-#define S4_P2_MASK 0x01f00000
-
-#define S0_P1_SHIFT 7
-#define S1_P1_SHIFT 17
-#define S2_P1_SHIFT 27
-#define S3_P1_SHIFT 5
-#define S4_P1_SHIFT 15
-
-#define S0_P2_SHIFT 12
-#define S1_P2_SHIFT 22
-#define S2_P2_SHIFT 0
-#define S3_P2_SHIFT 10
-#define S4_P2_SHIFT 20
-
-#define CAL_SEL_MASK 0xe0000000
-#define CAL_SEL_SHIFT 29
-
-static int calibrate_8916(struct tsens_device *tmdev)
-{
- int base0 = 0, base1 = 0, i;
- u32 p1[5], p2[5];
- int mode = 0;
- u32 *qfprom_cdata, *qfprom_csel;
-
- qfprom_cdata = (u32 *)qfprom_read(tmdev->dev, "calib");
- if (IS_ERR(qfprom_cdata))
- return PTR_ERR(qfprom_cdata);
-
- qfprom_csel = (u32 *)qfprom_read(tmdev->dev, "calib_sel");
- if (IS_ERR(qfprom_csel))
- return PTR_ERR(qfprom_csel);
-
- mode = (qfprom_csel[0] & CAL_SEL_MASK) >> CAL_SEL_SHIFT;
- dev_dbg(tmdev->dev, "calibration mode is %d\n", mode);
-
- switch (mode) {
- case TWO_PT_CALIB:
- base1 = (qfprom_cdata[1] & BASE1_MASK) >> BASE1_SHIFT;
- p2[0] = (qfprom_cdata[0] & S0_P2_MASK) >> S0_P2_SHIFT;
- p2[1] = (qfprom_cdata[0] & S1_P2_MASK) >> S1_P2_SHIFT;
- p2[2] = (qfprom_cdata[1] & S2_P2_MASK) >> S2_P2_SHIFT;
- p2[3] = (qfprom_cdata[1] & S3_P2_MASK) >> S3_P2_SHIFT;
- p2[4] = (qfprom_cdata[1] & S4_P2_MASK) >> S4_P2_SHIFT;
- for (i = 0; i < tmdev->num_sensors; i++)
- p2[i] = ((base1 + p2[i]) << 3);
- /* Fall through */
- case ONE_PT_CALIB2:
- base0 = (qfprom_cdata[0] & BASE0_MASK);
- p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT;
- p1[1] = (qfprom_cdata[0] & S1_P1_MASK) >> S1_P1_SHIFT;
- p1[2] = (qfprom_cdata[0] & S2_P1_MASK) >> S2_P1_SHIFT;
- p1[3] = (qfprom_cdata[1] & S3_P1_MASK) >> S3_P1_SHIFT;
- p1[4] = (qfprom_cdata[1] & S4_P1_MASK) >> S4_P1_SHIFT;
- for (i = 0; i < tmdev->num_sensors; i++)
- p1[i] = (((base0) + p1[i]) << 3);
- break;
- default:
- for (i = 0; i < tmdev->num_sensors; i++) {
- p1[i] = 500;
- p2[i] = 780;
- }
- break;
- }
-
- compute_intercept_slope(tmdev, p1, p2, mode);
-
- return 0;
-}
-
-static const struct tsens_ops ops_8916 = {
- .init = init_common,
- .calibrate = calibrate_8916,
- .get_temp = get_temp_common,
-};
-
-const struct tsens_data data_8916 = {
- .num_sensors = 5,
- .ops = &ops_8916,
- .reg_offsets = { [SROT_CTRL_OFFSET] = 0x0 },
- .hw_ids = (unsigned int []){0, 1, 2, 4, 5 },
-};
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
index 0f0adb302a7b..8d9b721dadb6 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -56,21 +56,21 @@
#define TRDY_MASK BIT(7)
#define TIMEOUT_US 100
-static int suspend_8960(struct tsens_device *tmdev)
+static int suspend_8960(struct tsens_priv *priv)
{
int ret;
unsigned int mask;
- struct regmap *map = tmdev->tm_map;
+ struct regmap *map = priv->tm_map;
- ret = regmap_read(map, THRESHOLD_ADDR, &tmdev->ctx.threshold);
+ ret = regmap_read(map, THRESHOLD_ADDR, &priv->ctx.threshold);
if (ret)
return ret;
- ret = regmap_read(map, CNTL_ADDR, &tmdev->ctx.control);
+ ret = regmap_read(map, CNTL_ADDR, &priv->ctx.control);
if (ret)
return ret;
- if (tmdev->num_sensors > 1)
+ if (priv->num_sensors > 1)
mask = SLP_CLK_ENA | EN;
else
mask = SLP_CLK_ENA_8660 | EN;
@@ -82,10 +82,10 @@ static int suspend_8960(struct tsens_device *tmdev)
return 0;
}
-static int resume_8960(struct tsens_device *tmdev)
+static int resume_8960(struct tsens_priv *priv)
{
int ret;
- struct regmap *map = tmdev->tm_map;
+ struct regmap *map = priv->tm_map;
ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST);
if (ret)
@@ -95,80 +95,80 @@ static int resume_8960(struct tsens_device *tmdev)
* Separate CONFIG restore is not needed only for 8660 as
* config is part of CTRL Addr and its restored as such
*/
- if (tmdev->num_sensors > 1) {
+ if (priv->num_sensors > 1) {
ret = regmap_update_bits(map, CONFIG_ADDR, CONFIG_MASK, CONFIG);
if (ret)
return ret;
}
- ret = regmap_write(map, THRESHOLD_ADDR, tmdev->ctx.threshold);
+ ret = regmap_write(map, THRESHOLD_ADDR, priv->ctx.threshold);
if (ret)
return ret;
- ret = regmap_write(map, CNTL_ADDR, tmdev->ctx.control);
+ ret = regmap_write(map, CNTL_ADDR, priv->ctx.control);
if (ret)
return ret;
return 0;
}
-static int enable_8960(struct tsens_device *tmdev, int id)
+static int enable_8960(struct tsens_priv *priv, int id)
{
int ret;
u32 reg, mask;
- ret = regmap_read(tmdev->tm_map, CNTL_ADDR, &reg);
+ ret = regmap_read(priv->tm_map, CNTL_ADDR, &reg);
if (ret)
return ret;
mask = BIT(id + SENSOR0_SHIFT);
- ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg | SW_RST);
+ ret = regmap_write(priv->tm_map, CNTL_ADDR, reg | SW_RST);
if (ret)
return ret;
- if (tmdev->num_sensors > 1)
+ if (priv->num_sensors > 1)
reg |= mask | SLP_CLK_ENA | EN;
else
reg |= mask | SLP_CLK_ENA_8660 | EN;
- ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg);
+ ret = regmap_write(priv->tm_map, CNTL_ADDR, reg);
if (ret)
return ret;
return 0;
}
-static void disable_8960(struct tsens_device *tmdev)
+static void disable_8960(struct tsens_priv *priv)
{
int ret;
u32 reg_cntl;
u32 mask;
- mask = GENMASK(tmdev->num_sensors - 1, 0);
+ mask = GENMASK(priv->num_sensors - 1, 0);
mask <<= SENSOR0_SHIFT;
mask |= EN;
- ret = regmap_read(tmdev->tm_map, CNTL_ADDR, &reg_cntl);
+ ret = regmap_read(priv->tm_map, CNTL_ADDR, &reg_cntl);
if (ret)
return;
reg_cntl &= ~mask;
- if (tmdev->num_sensors > 1)
+ if (priv->num_sensors > 1)
reg_cntl &= ~SLP_CLK_ENA;
else
reg_cntl &= ~SLP_CLK_ENA_8660;
- regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
+ regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
}
-static int init_8960(struct tsens_device *tmdev)
+static int init_8960(struct tsens_priv *priv)
{
int ret, i;
u32 reg_cntl;
- tmdev->tm_map = dev_get_regmap(tmdev->dev, NULL);
- if (!tmdev->tm_map)
+ priv->tm_map = dev_get_regmap(priv->dev, NULL);
+ if (!priv->tm_map)
return -ENODEV;
/*
@@ -177,21 +177,21 @@ static int init_8960(struct tsens_device *tmdev)
* but the control registers stay in the same place, i.e
* directly after the first 5 status registers.
*/
- for (i = 0; i < tmdev->num_sensors; i++) {
+ for (i = 0; i < priv->num_sensors; i++) {
if (i >= 5)
- tmdev->sensor[i].status = S0_STATUS_ADDR + 40;
- tmdev->sensor[i].status += i * 4;
+ priv->sensor[i].status = S0_STATUS_ADDR + 40;
+ priv->sensor[i].status += i * 4;
}
reg_cntl = SW_RST;
- ret = regmap_update_bits(tmdev->tm_map, CNTL_ADDR, SW_RST, reg_cntl);
+ ret = regmap_update_bits(priv->tm_map, CNTL_ADDR, SW_RST, reg_cntl);
if (ret)
return ret;
- if (tmdev->num_sensors > 1) {
+ if (priv->num_sensors > 1) {
reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18);
reg_cntl &= ~SW_RST;
- ret = regmap_update_bits(tmdev->tm_map, CONFIG_ADDR,
+ ret = regmap_update_bits(priv->tm_map, CONFIG_ADDR,
CONFIG_MASK, CONFIG);
} else {
reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16);
@@ -199,30 +199,30 @@ static int init_8960(struct tsens_device *tmdev)
reg_cntl |= CONFIG_8660 << CONFIG_SHIFT_8660;
}
- reg_cntl |= GENMASK(tmdev->num_sensors - 1, 0) << SENSOR0_SHIFT;
- ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
+ reg_cntl |= GENMASK(priv->num_sensors - 1, 0) << SENSOR0_SHIFT;
+ ret = regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
if (ret)
return ret;
reg_cntl |= EN;
- ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
+ ret = regmap_write(priv->tm_map, CNTL_ADDR, reg_cntl);
if (ret)
return ret;
return 0;
}
-static int calibrate_8960(struct tsens_device *tmdev)
+static int calibrate_8960(struct tsens_priv *priv)
{
int i;
char *data;
- ssize_t num_read = tmdev->num_sensors;
- struct tsens_sensor *s = tmdev->sensor;
+ ssize_t num_read = priv->num_sensors;
+ struct tsens_sensor *s = priv->sensor;
- data = qfprom_read(tmdev->dev, "calib");
+ data = qfprom_read(priv->dev, "calib");
if (IS_ERR(data))
- data = qfprom_read(tmdev->dev, "calib_backup");
+ data = qfprom_read(priv->dev, "calib_backup");
if (IS_ERR(data))
return PTR_ERR(data);
@@ -243,21 +243,21 @@ static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s)
return adc_code * slope + offset;
}
-static int get_temp_8960(struct tsens_device *tmdev, int id, int *temp)
+static int get_temp_8960(struct tsens_priv *priv, int id, int *temp)
{
int ret;
u32 code, trdy;
- const struct tsens_sensor *s = &tmdev->sensor[id];
+ const struct tsens_sensor *s = &priv->sensor[id];
unsigned long timeout;
timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
do {
- ret = regmap_read(tmdev->tm_map, INT_STATUS_ADDR, &trdy);
+ ret = regmap_read(priv->tm_map, INT_STATUS_ADDR, &trdy);
if (ret)
return ret;
if (!(trdy & TRDY_MASK))
continue;
- ret = regmap_read(tmdev->tm_map, s->status, &code);
+ ret = regmap_read(priv->tm_map, s->status, &code);
if (ret)
return ret;
*temp = code_to_mdegC(code, s);
@@ -277,7 +277,7 @@ static const struct tsens_ops ops_8960 = {
.resume = resume_8960,
};
-const struct tsens_data data_8960 = {
+const struct tsens_plat_data data_8960 = {
.num_sensors = 11,
.ops = &ops_8960,
};
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
index f80c73f11740..928e8e81ba69 100644
--- a/drivers/thermal/qcom/tsens-common.c
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -12,18 +12,6 @@
#include <linux/regmap.h>
#include "tsens.h"
-/* SROT */
-#define TSENS_EN BIT(0)
-
-/* TM */
-#define STATUS_OFFSET 0x30
-#define SN_ADDR_OFFSET 0x4
-#define SN_ST_TEMP_MASK 0x3ff
-#define CAL_DEGC_PT1 30
-#define CAL_DEGC_PT2 120
-#define SLOPE_FACTOR 1000
-#define SLOPE_DEFAULT 3200
-
char *qfprom_read(struct device *dev, const char *cname)
{
struct nvmem_cell *cell;
@@ -46,18 +34,18 @@ char *qfprom_read(struct device *dev, const char *cname)
* and offset values are derived from tz->tzp->slope and tz->tzp->offset
* resp.
*/
-void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1,
+void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
u32 *p2, u32 mode)
{
int i;
int num, den;
- for (i = 0; i < tmdev->num_sensors; i++) {
- dev_dbg(tmdev->dev,
+ for (i = 0; i < priv->num_sensors; i++) {
+ dev_dbg(priv->dev,
"sensor%d - data_point1:%#x data_point2:%#x\n",
i, p1[i], p2[i]);
- tmdev->sensor[i].slope = SLOPE_DEFAULT;
+ priv->sensor[i].slope = SLOPE_DEFAULT;
if (mode == TWO_PT_CALIB) {
/*
* slope (m) = adc_code2 - adc_code1 (y2 - y1)/
@@ -66,16 +54,30 @@ void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1,
num = p2[i] - p1[i];
num *= SLOPE_FACTOR;
den = CAL_DEGC_PT2 - CAL_DEGC_PT1;
- tmdev->sensor[i].slope = num / den;
+ priv->sensor[i].slope = num / den;
}
- tmdev->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
+ priv->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
(CAL_DEGC_PT1 *
- tmdev->sensor[i].slope);
- dev_dbg(tmdev->dev, "offset:%d\n", tmdev->sensor[i].offset);
+ priv->sensor[i].slope);
+ dev_dbg(priv->dev, "offset:%d\n", priv->sensor[i].offset);
}
}
+bool is_sensor_enabled(struct tsens_priv *priv, u32 hw_id)
+{
+ u32 val;
+ int ret;
+
+ if ((hw_id > (priv->num_sensors - 1)) || (hw_id < 0))
+ return -EINVAL;
+ ret = regmap_field_read(priv->rf[SENSOR_EN], &val);
+ if (ret)
+ return ret;
+
+ return val & (1 << hw_id);
+}
+
static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
{
int degc, num, den;
@@ -95,18 +97,54 @@ static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
return degc;
}
-int get_temp_common(struct tsens_device *tmdev, int id, int *temp)
+int get_temp_tsens_valid(struct tsens_priv *priv, int i, int *temp)
{
- struct tsens_sensor *s = &tmdev->sensor[id];
- u32 code;
- unsigned int status_reg;
+ struct tsens_sensor *s = &priv->sensor[i];
+ u32 temp_idx = LAST_TEMP_0 + s->hw_id;
+ u32 valid_idx = VALID_0 + s->hw_id;
+ u32 last_temp = 0, valid, mask;
+ int ret;
+
+ ret = regmap_field_read(priv->rf[valid_idx], &valid);
+ if (ret)
+ return ret;
+ while (!valid) {
+ /* Valid bit is 0 for 6 AHB clock cycles.
+ * At 19.2MHz, 1 AHB clock is ~60ns.
+ * We should enter this loop very, very rarely.
+ */
+ ndelay(400);
+ ret = regmap_field_read(priv->rf[valid_idx], &valid);
+ if (ret)
+ return ret;
+ }
+
+ /* Valid bit is set, OK to read the temperature */
+ ret = regmap_field_read(priv->rf[temp_idx], &last_temp);
+ if (ret)
+ return ret;
+
+ if (priv->feat->adc) {
+ /* Convert temperature from ADC code to milliCelsius */
+ *temp = code_to_degc(last_temp, s) * 1000;
+ } else {
+ mask = GENMASK(priv->fields[LAST_TEMP_0].msb,
+ priv->fields[LAST_TEMP_0].lsb);
+ /* Convert temperature from deciCelsius to milliCelsius */
+ *temp = sign_extend32(last_temp, fls(mask) - 1) * 100;
+ }
+
+ return 0;
+}
+
+int get_temp_common(struct tsens_priv *priv, int i, int *temp)
+{
+ struct tsens_sensor *s = &priv->sensor[i];
int last_temp = 0, ret;
- status_reg = tmdev->tm_offset + STATUS_OFFSET + s->hw_id * SN_ADDR_OFFSET;
- ret = regmap_read(tmdev->tm_map, status_reg, &code);
+ ret = regmap_field_read(priv->rf[LAST_TEMP_0 + s->hw_id], &last_temp);
if (ret)
return ret;
- last_temp = code & SN_ST_TEMP_MASK;
*temp = code_to_degc(last_temp, s) * 1000;
@@ -127,21 +165,21 @@ static const struct regmap_config tsens_srot_config = {
.reg_stride = 4,
};
-int __init init_common(struct tsens_device *tmdev)
+int __init init_common(struct tsens_priv *priv)
{
void __iomem *tm_base, *srot_base;
+ struct device *dev = priv->dev;
struct resource *res;
- u32 code;
- int ret;
- struct platform_device *op = of_find_device_by_node(tmdev->dev->of_node);
- u16 ctrl_offset = tmdev->reg_offsets[SROT_CTRL_OFFSET];
+ u32 enabled;
+ int ret, i, j;
+ struct platform_device *op = of_find_device_by_node(priv->dev->of_node);
if (!op)
return -EINVAL;
if (op->num_resources > 1) {
/* DT with separate SROT and TM address space */
- tmdev->tm_offset = 0;
+ priv->tm_offset = 0;
res = platform_get_resource(op, IORESOURCE_MEM, 1);
srot_base = devm_ioremap_resource(&op->dev, res);
if (IS_ERR(srot_base)) {
@@ -149,16 +187,15 @@ int __init init_common(struct tsens_device *tmdev)
goto err_put_device;
}
- tmdev->srot_map = devm_regmap_init_mmio(tmdev->dev, srot_base,
+ priv->srot_map = devm_regmap_init_mmio(dev, srot_base,
&tsens_srot_config);
- if (IS_ERR(tmdev->srot_map)) {
- ret = PTR_ERR(tmdev->srot_map);
+ if (IS_ERR(priv->srot_map)) {
+ ret = PTR_ERR(priv->srot_map);
goto err_put_device;
}
-
} else {
/* old DTs where SROT and TM were in a contiguous 2K block */
- tmdev->tm_offset = 0x1000;
+ priv->tm_offset = 0x1000;
}
res = platform_get_resource(op, IORESOURCE_MEM, 0);
@@ -168,19 +205,47 @@ int __init init_common(struct tsens_device *tmdev)
goto err_put_device;
}
- tmdev->tm_map = devm_regmap_init_mmio(tmdev->dev, tm_base, &tsens_config);
- if (IS_ERR(tmdev->tm_map)) {
- ret = PTR_ERR(tmdev->tm_map);
+ priv->tm_map = devm_regmap_init_mmio(dev, tm_base, &tsens_config);
+ if (IS_ERR(priv->tm_map)) {
+ ret = PTR_ERR(priv->tm_map);
goto err_put_device;
}
- if (tmdev->srot_map) {
- ret = regmap_read(tmdev->srot_map, ctrl_offset, &code);
- if (ret)
+ priv->rf[TSENS_EN] = devm_regmap_field_alloc(dev, priv->srot_map,
+ priv->fields[TSENS_EN]);
+ if (IS_ERR(priv->rf[TSENS_EN])) {
+ ret = PTR_ERR(priv->rf[TSENS_EN]);
+ goto err_put_device;
+ }
+ ret = regmap_field_read(priv->rf[TSENS_EN], &enabled);
+ if (ret)
+ goto err_put_device;
+ if (!enabled) {
+ dev_err(dev, "tsens device is not enabled\n");
+ ret = -ENODEV;
+ goto err_put_device;
+ }
+
+ priv->rf[SENSOR_EN] = devm_regmap_field_alloc(dev, priv->srot_map,
+ priv->fields[SENSOR_EN]);
+ if (IS_ERR(priv->rf[SENSOR_EN])) {
+ ret = PTR_ERR(priv->rf[SENSOR_EN]);
+ goto err_put_device;
+ }
+ /* now alloc regmap_fields in tm_map */
+ for (i = 0, j = LAST_TEMP_0; i < priv->feat->max_sensors; i++, j++) {
+ priv->rf[j] = devm_regmap_field_alloc(dev, priv->tm_map,
+ priv->fields[j]);
+ if (IS_ERR(priv->rf[j])) {
+ ret = PTR_ERR(priv->rf[j]);
goto err_put_device;
- if (!(code & TSENS_EN)) {
- dev_err(tmdev->dev, "tsens device is not enabled\n");
- ret = -ENODEV;
+ }
+ }
+ for (i = 0, j = VALID_0; i < priv->feat->max_sensors; i++, j++) {
+ priv->rf[j] = devm_regmap_field_alloc(dev, priv->tm_map,
+ priv->fields[j]);
+ if (IS_ERR(priv->rf[j])) {
+ ret = PTR_ERR(priv->rf[j]);
goto err_put_device;
}
}
diff --git a/drivers/thermal/qcom/tsens-8974.c b/drivers/thermal/qcom/tsens-v0_1.c
index 3d3fda3d731b..a319283c223f 100644
--- a/drivers/thermal/qcom/tsens-8974.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -6,6 +6,48 @@
#include <linux/platform_device.h>
#include "tsens.h"
+/* ----- SROT ------ */
+#define SROT_CTRL_OFF 0x0000
+
+/* ----- TM ------ */
+#define TM_INT_EN_OFF 0x0000
+#define TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF 0x0004
+#define TM_Sn_STATUS_OFF 0x0030
+#define TM_TRDY_OFF 0x005c
+
+/* eeprom layout data for 8916 */
+#define MSM8916_BASE0_MASK 0x0000007f
+#define MSM8916_BASE1_MASK 0xfe000000
+#define MSM8916_BASE0_SHIFT 0
+#define MSM8916_BASE1_SHIFT 25
+
+#define MSM8916_S0_P1_MASK 0x00000f80
+#define MSM8916_S1_P1_MASK 0x003e0000
+#define MSM8916_S2_P1_MASK 0xf8000000
+#define MSM8916_S3_P1_MASK 0x000003e0
+#define MSM8916_S4_P1_MASK 0x000f8000
+
+#define MSM8916_S0_P2_MASK 0x0001f000
+#define MSM8916_S1_P2_MASK 0x07c00000
+#define MSM8916_S2_P2_MASK 0x0000001f
+#define MSM8916_S3_P2_MASK 0x00007c00
+#define MSM8916_S4_P2_MASK 0x01f00000
+
+#define MSM8916_S0_P1_SHIFT 7
+#define MSM8916_S1_P1_SHIFT 17
+#define MSM8916_S2_P1_SHIFT 27
+#define MSM8916_S3_P1_SHIFT 5
+#define MSM8916_S4_P1_SHIFT 15
+
+#define MSM8916_S0_P2_SHIFT 12
+#define MSM8916_S1_P2_SHIFT 22
+#define MSM8916_S2_P2_SHIFT 0
+#define MSM8916_S3_P2_SHIFT 10
+#define MSM8916_S4_P2_SHIFT 20
+
+#define MSM8916_CAL_SEL_MASK 0xe0000000
+#define MSM8916_CAL_SEL_SHIFT 29
+
/* eeprom layout data for 8974 */
#define BASE1_MASK 0xff
#define S0_P1_MASK 0x3f00
@@ -91,7 +133,59 @@
#define BIT_APPEND 0x3
-static int calibrate_8974(struct tsens_device *tmdev)
+static int calibrate_8916(struct tsens_priv *priv)
+{
+ int base0 = 0, base1 = 0, i;
+ u32 p1[5], p2[5];
+ int mode = 0;
+ u32 *qfprom_cdata, *qfprom_csel;
+
+ qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
+ if (IS_ERR(qfprom_cdata))
+ return PTR_ERR(qfprom_cdata);
+
+ qfprom_csel = (u32 *)qfprom_read(priv->dev, "calib_sel");
+ if (IS_ERR(qfprom_csel))
+ return PTR_ERR(qfprom_csel);
+
+ mode = (qfprom_csel[0] & MSM8916_CAL_SEL_MASK) >> MSM8916_CAL_SEL_SHIFT;
+ dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base1 = (qfprom_cdata[1] & MSM8916_BASE1_MASK) >> MSM8916_BASE1_SHIFT;
+ p2[0] = (qfprom_cdata[0] & MSM8916_S0_P2_MASK) >> MSM8916_S0_P2_SHIFT;
+ p2[1] = (qfprom_cdata[0] & MSM8916_S1_P2_MASK) >> MSM8916_S1_P2_SHIFT;
+ p2[2] = (qfprom_cdata[1] & MSM8916_S2_P2_MASK) >> MSM8916_S2_P2_SHIFT;
+ p2[3] = (qfprom_cdata[1] & MSM8916_S3_P2_MASK) >> MSM8916_S3_P2_SHIFT;
+ p2[4] = (qfprom_cdata[1] & MSM8916_S4_P2_MASK) >> MSM8916_S4_P2_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p2[i] = ((base1 + p2[i]) << 3);
+ /* Fall through */
+ case ONE_PT_CALIB2:
+ base0 = (qfprom_cdata[0] & MSM8916_BASE0_MASK);
+ p1[0] = (qfprom_cdata[0] & MSM8916_S0_P1_MASK) >> MSM8916_S0_P1_SHIFT;
+ p1[1] = (qfprom_cdata[0] & MSM8916_S1_P1_MASK) >> MSM8916_S1_P1_SHIFT;
+ p1[2] = (qfprom_cdata[0] & MSM8916_S2_P1_MASK) >> MSM8916_S2_P1_SHIFT;
+ p1[3] = (qfprom_cdata[1] & MSM8916_S3_P1_MASK) >> MSM8916_S3_P1_SHIFT;
+ p1[4] = (qfprom_cdata[1] & MSM8916_S4_P1_MASK) >> MSM8916_S4_P1_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] = (((base0) + p1[i]) << 3);
+ break;
+ default:
+ for (i = 0; i < priv->num_sensors; i++) {
+ p1[i] = 500;
+ p2[i] = 780;
+ }
+ break;
+ }
+
+ compute_intercept_slope(priv, p1, p2, mode);
+
+ return 0;
+}
+
+static int calibrate_8974(struct tsens_priv *priv)
{
int base1 = 0, base2 = 0, i;
u32 p1[11], p2[11];
@@ -99,11 +193,11 @@ static int calibrate_8974(struct tsens_device *tmdev)
u32 *calib, *bkp;
u32 calib_redun_sel;
- calib = (u32 *)qfprom_read(tmdev->dev, "calib");
+ calib = (u32 *)qfprom_read(priv->dev, "calib");
if (IS_ERR(calib))
return PTR_ERR(calib);
- bkp = (u32 *)qfprom_read(tmdev->dev, "calib_backup");
+ bkp = (u32 *)qfprom_read(priv->dev, "calib_backup");
if (IS_ERR(bkp))
return PTR_ERR(bkp);
@@ -184,25 +278,25 @@ static int calibrate_8974(struct tsens_device *tmdev)
switch (mode) {
case ONE_PT_CALIB:
- for (i = 0; i < tmdev->num_sensors; i++)
+ for (i = 0; i < priv->num_sensors; i++)
p1[i] += (base1 << 2) | BIT_APPEND;
break;
case TWO_PT_CALIB:
- for (i = 0; i < tmdev->num_sensors; i++) {
+ for (i = 0; i < priv->num_sensors; i++) {
p2[i] += base2;
p2[i] <<= 2;
p2[i] |= BIT_APPEND;
}
/* Fall through */
case ONE_PT_CALIB2:
- for (i = 0; i < tmdev->num_sensors; i++) {
+ for (i = 0; i < priv->num_sensors; i++) {
p1[i] += base1;
p1[i] <<= 2;
p1[i] |= BIT_APPEND;
}
break;
default:
- for (i = 0; i < tmdev->num_sensors; i++)
+ for (i = 0; i < priv->num_sensors; i++)
p2[i] = 780;
p1[0] = 502;
p1[1] = 509;
@@ -218,19 +312,71 @@ static int calibrate_8974(struct tsens_device *tmdev)
break;
}
- compute_intercept_slope(tmdev, p1, p2, mode);
+ compute_intercept_slope(priv, p1, p2, mode);
return 0;
}
+/* v0.1: 8916, 8974 */
+
+static const struct tsens_features tsens_v0_1_feat = {
+ .ver_major = VER_0_1,
+ .crit_int = 0,
+ .adc = 1,
+ .srot_split = 1,
+ .max_sensors = 11,
+};
+
+static const struct reg_field tsens_v0_1_regfields[MAX_REGFIELDS] = {
+ /* ----- SROT ------ */
+ /* No VERSION information */
+
+ /* CTRL_OFFSET */
+ [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0),
+ [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1),
+ [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 13),
+
+ /* ----- TM ------ */
+ /* INTERRUPT ENABLE */
+ [INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 0),
+
+ /* Sn_STATUS */
+ REG_FIELD_FOR_EACH_SENSOR11(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 9),
+ /* No VALID field on v0.1 */
+ REG_FIELD_FOR_EACH_SENSOR11(MIN_STATUS, TM_Sn_STATUS_OFF, 10, 10),
+ REG_FIELD_FOR_EACH_SENSOR11(LOWER_STATUS, TM_Sn_STATUS_OFF, 11, 11),
+ REG_FIELD_FOR_EACH_SENSOR11(UPPER_STATUS, TM_Sn_STATUS_OFF, 12, 12),
+ /* No CRITICAL field on v0.1 */
+ REG_FIELD_FOR_EACH_SENSOR11(MAX_STATUS, TM_Sn_STATUS_OFF, 13, 13),
+
+ /* TRDY: 1=ready, 0=in progress */
+ [TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0),
+};
+
+static const struct tsens_ops ops_8916 = {
+ .init = init_common,
+ .calibrate = calibrate_8916,
+ .get_temp = get_temp_common,
+};
+
+const struct tsens_plat_data data_8916 = {
+ .num_sensors = 5,
+ .ops = &ops_8916,
+ .hw_ids = (unsigned int []){0, 1, 2, 4, 5 },
+
+ .feat = &tsens_v0_1_feat,
+ .fields = tsens_v0_1_regfields,
+};
+
static const struct tsens_ops ops_8974 = {
.init = init_common,
.calibrate = calibrate_8974,
.get_temp = get_temp_common,
};
-const struct tsens_data data_8974 = {
+const struct tsens_plat_data data_8974 = {
.num_sensors = 11,
.ops = &ops_8974,
- .reg_offsets = { [SROT_CTRL_OFFSET] = 0x0 },
+ .feat = &tsens_v0_1_feat,
+ .fields = tsens_v0_1_regfields,
};
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
new file mode 100644
index 000000000000..10b595d4f619
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, Linaro Limited
+ */
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include "tsens.h"
+
+/* ----- SROT ------ */
+#define SROT_HW_VER_OFF 0x0000
+#define SROT_CTRL_OFF 0x0004
+
+/* ----- TM ------ */
+#define TM_INT_EN_OFF 0x0000
+#define TM_Sn_UPPER_LOWER_STATUS_CTRL_OFF 0x0004
+#define TM_Sn_STATUS_OFF 0x0044
+#define TM_TRDY_OFF 0x0084
+
+/* eeprom layout data for qcs404/405 (v1) */
+#define BASE0_MASK 0x000007f8
+#define BASE1_MASK 0x0007f800
+#define BASE0_SHIFT 3
+#define BASE1_SHIFT 11
+
+#define S0_P1_MASK 0x0000003f
+#define S1_P1_MASK 0x0003f000
+#define S2_P1_MASK 0x3f000000
+#define S3_P1_MASK 0x000003f0
+#define S4_P1_MASK 0x003f0000
+#define S5_P1_MASK 0x0000003f
+#define S6_P1_MASK 0x0003f000
+#define S7_P1_MASK 0x3f000000
+#define S8_P1_MASK 0x000003f0
+#define S9_P1_MASK 0x003f0000
+
+#define S0_P2_MASK 0x00000fc0
+#define S1_P2_MASK 0x00fc0000
+#define S2_P2_MASK_1_0 0xc0000000
+#define S2_P2_MASK_5_2 0x0000000f
+#define S3_P2_MASK 0x0000fc00
+#define S4_P2_MASK 0x0fc00000
+#define S5_P2_MASK 0x00000fc0
+#define S6_P2_MASK 0x00fc0000
+#define S7_P2_MASK_1_0 0xc0000000
+#define S7_P2_MASK_5_2 0x0000000f
+#define S8_P2_MASK 0x0000fc00
+#define S9_P2_MASK 0x0fc00000
+
+#define S0_P1_SHIFT 0
+#define S0_P2_SHIFT 6
+#define S1_P1_SHIFT 12
+#define S1_P2_SHIFT 18
+#define S2_P1_SHIFT 24
+#define S2_P2_SHIFT_1_0 30
+
+#define S2_P2_SHIFT_5_2 0
+#define S3_P1_SHIFT 4
+#define S3_P2_SHIFT 10
+#define S4_P1_SHIFT 16
+#define S4_P2_SHIFT 22
+
+#define S5_P1_SHIFT 0
+#define S5_P2_SHIFT 6
+#define S6_P1_SHIFT 12
+#define S6_P2_SHIFT 18
+#define S7_P1_SHIFT 24
+#define S7_P2_SHIFT_1_0 30
+
+#define S7_P2_SHIFT_5_2 0
+#define S8_P1_SHIFT 4
+#define S8_P2_SHIFT 10
+#define S9_P1_SHIFT 16
+#define S9_P2_SHIFT 22
+
+#define CAL_SEL_MASK 7
+#define CAL_SEL_SHIFT 0
+
+static int calibrate_v1(struct tsens_priv *priv)
+{
+ u32 base0 = 0, base1 = 0;
+ u32 p1[10], p2[10];
+ u32 mode = 0, lsb = 0, msb = 0;
+ u32 *qfprom_cdata;
+ int i;
+
+ qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
+ if (IS_ERR(qfprom_cdata))
+ return PTR_ERR(qfprom_cdata);
+
+ mode = (qfprom_cdata[4] & CAL_SEL_MASK) >> CAL_SEL_SHIFT;
+ dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base1 = (qfprom_cdata[4] & BASE1_MASK) >> BASE1_SHIFT;
+ p2[0] = (qfprom_cdata[0] & S0_P2_MASK) >> S0_P2_SHIFT;
+ p2[1] = (qfprom_cdata[0] & S1_P2_MASK) >> S1_P2_SHIFT;
+ /* This value is split over two registers, 2 bits and 4 bits */
+ lsb = (qfprom_cdata[0] & S2_P2_MASK_1_0) >> S2_P2_SHIFT_1_0;
+ msb = (qfprom_cdata[1] & S2_P2_MASK_5_2) >> S2_P2_SHIFT_5_2;
+ p2[2] = msb << 2 | lsb;
+ p2[3] = (qfprom_cdata[1] & S3_P2_MASK) >> S3_P2_SHIFT;
+ p2[4] = (qfprom_cdata[1] & S4_P2_MASK) >> S4_P2_SHIFT;
+ p2[5] = (qfprom_cdata[2] & S5_P2_MASK) >> S5_P2_SHIFT;
+ p2[6] = (qfprom_cdata[2] & S6_P2_MASK) >> S6_P2_SHIFT;
+ /* This value is split over two registers, 2 bits and 4 bits */
+ lsb = (qfprom_cdata[2] & S7_P2_MASK_1_0) >> S7_P2_SHIFT_1_0;
+ msb = (qfprom_cdata[3] & S7_P2_MASK_5_2) >> S7_P2_SHIFT_5_2;
+ p2[7] = msb << 2 | lsb;
+ p2[8] = (qfprom_cdata[3] & S8_P2_MASK) >> S8_P2_SHIFT;
+ p2[9] = (qfprom_cdata[3] & S9_P2_MASK) >> S9_P2_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p2[i] = ((base1 + p2[i]) << 2);
+ /* Fall through */
+ case ONE_PT_CALIB2:
+ base0 = (qfprom_cdata[4] & BASE0_MASK) >> BASE0_SHIFT;
+ p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+ p1[1] = (qfprom_cdata[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+ p1[2] = (qfprom_cdata[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+ p1[3] = (qfprom_cdata[1] & S3_P1_MASK) >> S3_P1_SHIFT;
+ p1[4] = (qfprom_cdata[1] & S4_P1_MASK) >> S4_P1_SHIFT;
+ p1[5] = (qfprom_cdata[2] & S5_P1_MASK) >> S5_P1_SHIFT;
+ p1[6] = (qfprom_cdata[2] & S6_P1_MASK) >> S6_P1_SHIFT;
+ p1[7] = (qfprom_cdata[2] & S7_P1_MASK) >> S7_P1_SHIFT;
+ p1[8] = (qfprom_cdata[3] & S8_P1_MASK) >> S8_P1_SHIFT;
+ p1[9] = (qfprom_cdata[3] & S9_P1_MASK) >> S9_P1_SHIFT;
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] = (((base0) + p1[i]) << 2);
+ break;
+ default:
+ for (i = 0; i < priv->num_sensors; i++) {
+ p1[i] = 500;
+ p2[i] = 780;
+ }
+ break;
+ }
+
+ compute_intercept_slope(priv, p1, p2, mode);
+
+ return 0;
+}
+
+/* v1.x: qcs404,405 */
+
+static const struct tsens_features tsens_v1_feat = {
+ .ver_major = VER_1_X,
+ .crit_int = 0,
+ .adc = 1,
+ .srot_split = 1,
+ .max_sensors = 11,
+};
+
+static const struct reg_field tsens_v1_regfields[MAX_REGFIELDS] = {
+ /* ----- SROT ------ */
+ /* VERSION */
+ [VER_MAJOR] = REG_FIELD(SROT_HW_VER_OFF, 28, 31),
+ [VER_MINOR] = REG_FIELD(SROT_HW_VER_OFF, 16, 27),
+ [VER_STEP] = REG_FIELD(SROT_HW_VER_OFF, 0, 15),
+ /* CTRL_OFFSET */
+ [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0),
+ [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1),
+ [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 13),
+
+ /* ----- TM ------ */
+ /* INTERRUPT ENABLE */
+ [INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 0),
+
+ /* Sn_STATUS */
+ REG_FIELD_FOR_EACH_SENSOR11(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 9),
+ REG_FIELD_FOR_EACH_SENSOR11(VALID, TM_Sn_STATUS_OFF, 14, 14),
+ REG_FIELD_FOR_EACH_SENSOR11(MIN_STATUS, TM_Sn_STATUS_OFF, 10, 10),
+ REG_FIELD_FOR_EACH_SENSOR11(LOWER_STATUS, TM_Sn_STATUS_OFF, 11, 11),
+ REG_FIELD_FOR_EACH_SENSOR11(UPPER_STATUS, TM_Sn_STATUS_OFF, 12, 12),
+ /* No CRITICAL field on v1.x */
+ REG_FIELD_FOR_EACH_SENSOR11(MAX_STATUS, TM_Sn_STATUS_OFF, 13, 13),
+
+ /* TRDY: 1=ready, 0=in progress */
+ [TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0),
+};
+
+static const struct tsens_ops ops_generic_v1 = {
+ .init = init_common,
+ .calibrate = calibrate_v1,
+ .get_temp = get_temp_tsens_valid,
+};
+
+const struct tsens_plat_data data_tsens_v1 = {
+ .ops = &ops_generic_v1,
+ .feat = &tsens_v1_feat,
+ .fields = tsens_v1_regfields,
+};
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
index 381a212872bf..1099069f2aa3 100644
--- a/drivers/thermal/qcom/tsens-v2.c
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -4,76 +4,81 @@
* Copyright (c) 2018, Linaro Limited
*/
-#include <linux/regmap.h>
#include <linux/bitops.h>
+#include <linux/regmap.h>
#include "tsens.h"
-#define STATUS_OFFSET 0xa0
-#define LAST_TEMP_MASK 0xfff
-#define STATUS_VALID_BIT BIT(21)
+/* ----- SROT ------ */
+#define SROT_HW_VER_OFF 0x0000
+#define SROT_CTRL_OFF 0x0004
+
+/* ----- TM ------ */
+#define TM_INT_EN_OFF 0x0004
+#define TM_UPPER_LOWER_INT_STATUS_OFF 0x0008
+#define TM_UPPER_LOWER_INT_CLEAR_OFF 0x000c
+#define TM_UPPER_LOWER_INT_MASK_OFF 0x0010
+#define TM_CRITICAL_INT_STATUS_OFF 0x0014
+#define TM_CRITICAL_INT_CLEAR_OFF 0x0018
+#define TM_CRITICAL_INT_MASK_OFF 0x001c
+#define TM_Sn_UPPER_LOWER_THRESHOLD_OFF 0x0020
+#define TM_Sn_CRITICAL_THRESHOLD_OFF 0x0060
+#define TM_Sn_STATUS_OFF 0x00a0
+#define TM_TRDY_OFF 0x00e4
-static int get_temp_tsens_v2(struct tsens_device *tmdev, int id, int *temp)
-{
- struct tsens_sensor *s = &tmdev->sensor[id];
- u32 code;
- unsigned int status_reg;
- u32 last_temp = 0, last_temp2 = 0, last_temp3 = 0;
- int ret;
+/* v2.x: 8996, 8998, sdm845 */
- status_reg = tmdev->tm_offset + STATUS_OFFSET + s->hw_id * 4;
- ret = regmap_read(tmdev->tm_map, status_reg, &code);
- if (ret)
- return ret;
- last_temp = code & LAST_TEMP_MASK;
- if (code & STATUS_VALID_BIT)
- goto done;
+static const struct tsens_features tsens_v2_feat = {
+ .ver_major = VER_2_X,
+ .crit_int = 1,
+ .adc = 0,
+ .srot_split = 1,
+ .max_sensors = 16,
+};
- /* Try a second time */
- ret = regmap_read(tmdev->tm_map, status_reg, &code);
- if (ret)
- return ret;
- if (code & STATUS_VALID_BIT) {
- last_temp = code & LAST_TEMP_MASK;
- goto done;
- } else {
- last_temp2 = code & LAST_TEMP_MASK;
- }
+static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = {
+ /* ----- SROT ------ */
+ /* VERSION */
+ [VER_MAJOR] = REG_FIELD(SROT_HW_VER_OFF, 28, 31),
+ [VER_MINOR] = REG_FIELD(SROT_HW_VER_OFF, 16, 27),
+ [VER_STEP] = REG_FIELD(SROT_HW_VER_OFF, 0, 15),
+ /* CTRL_OFF */
+ [TSENS_EN] = REG_FIELD(SROT_CTRL_OFF, 0, 0),
+ [TSENS_SW_RST] = REG_FIELD(SROT_CTRL_OFF, 1, 1),
+ [SENSOR_EN] = REG_FIELD(SROT_CTRL_OFF, 3, 18),
- /* Try a third/last time */
- ret = regmap_read(tmdev->tm_map, status_reg, &code);
- if (ret)
- return ret;
- if (code & STATUS_VALID_BIT) {
- last_temp = code & LAST_TEMP_MASK;
- goto done;
- } else {
- last_temp3 = code & LAST_TEMP_MASK;
- }
+ /* ----- TM ------ */
+ /* INTERRUPT ENABLE */
+ /* v2 has separate enables for UPPER/LOWER/CRITICAL interrupts */
+ [INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 2),
- if (last_temp == last_temp2)
- last_temp = last_temp2;
- else if (last_temp2 == last_temp3)
- last_temp = last_temp3;
-done:
- /* Convert temperature from deciCelsius to milliCelsius */
- *temp = sign_extend32(last_temp, fls(LAST_TEMP_MASK) - 1) * 100;
+ /* Sn_STATUS */
+ REG_FIELD_FOR_EACH_SENSOR16(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 11),
+ REG_FIELD_FOR_EACH_SENSOR16(VALID, TM_Sn_STATUS_OFF, 21, 21),
+ REG_FIELD_FOR_EACH_SENSOR16(MIN_STATUS, TM_Sn_STATUS_OFF, 16, 16),
+ REG_FIELD_FOR_EACH_SENSOR16(LOWER_STATUS, TM_Sn_STATUS_OFF, 17, 17),
+ REG_FIELD_FOR_EACH_SENSOR16(UPPER_STATUS, TM_Sn_STATUS_OFF, 18, 18),
+ REG_FIELD_FOR_EACH_SENSOR16(CRITICAL_STATUS, TM_Sn_STATUS_OFF, 19, 19),
+ REG_FIELD_FOR_EACH_SENSOR16(MAX_STATUS, TM_Sn_STATUS_OFF, 20, 20),
- return 0;
-}
+ /* TRDY: 1=ready, 0=in progress */
+ [TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0),
+};
static const struct tsens_ops ops_generic_v2 = {
.init = init_common,
- .get_temp = get_temp_tsens_v2,
+ .get_temp = get_temp_tsens_valid,
};
-const struct tsens_data data_tsens_v2 = {
- .ops = &ops_generic_v2,
- .reg_offsets = { [SROT_CTRL_OFFSET] = 0x4 },
+const struct tsens_plat_data data_tsens_v2 = {
+ .ops = &ops_generic_v2,
+ .feat = &tsens_v2_feat,
+ .fields = tsens_v2_regfields,
};
/* Kept around for backward compatibility with old msm8996.dtsi */
-const struct tsens_data data_8996 = {
+const struct tsens_plat_data data_8996 = {
.num_sensors = 13,
.ops = &ops_generic_v2,
- .reg_offsets = { [SROT_CTRL_OFFSET] = 0x4 },
+ .feat = &tsens_v2_feat,
+ .fields = tsens_v2_regfields,
};
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index f1ec9bbe4717..36b0b52db524 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -15,38 +15,38 @@
static int tsens_get_temp(void *data, int *temp)
{
const struct tsens_sensor *s = data;
- struct tsens_device *tmdev = s->tmdev;
+ struct tsens_priv *priv = s->priv;
- return tmdev->ops->get_temp(tmdev, s->id, temp);
+ return priv->ops->get_temp(priv, s->id, temp);
}
-static int tsens_get_trend(void *p, int trip, enum thermal_trend *trend)
+static int tsens_get_trend(void *data, int trip, enum thermal_trend *trend)
{
- const struct tsens_sensor *s = p;
- struct tsens_device *tmdev = s->tmdev;
+ const struct tsens_sensor *s = data;
+ struct tsens_priv *priv = s->priv;
- if (tmdev->ops->get_trend)
- return tmdev->ops->get_trend(tmdev, s->id, trend);
+ if (priv->ops->get_trend)
+ return priv->ops->get_trend(priv, s->id, trend);
return -ENOTSUPP;
}
static int __maybe_unused tsens_suspend(struct device *dev)
{
- struct tsens_device *tmdev = dev_get_drvdata(dev);
+ struct tsens_priv *priv = dev_get_drvdata(dev);
- if (tmdev->ops && tmdev->ops->suspend)
- return tmdev->ops->suspend(tmdev);
+ if (priv->ops && priv->ops->suspend)
+ return priv->ops->suspend(priv);
return 0;
}
static int __maybe_unused tsens_resume(struct device *dev)
{
- struct tsens_device *tmdev = dev_get_drvdata(dev);
+ struct tsens_priv *priv = dev_get_drvdata(dev);
- if (tmdev->ops && tmdev->ops->resume)
- return tmdev->ops->resume(tmdev);
+ if (priv->ops && priv->ops->resume)
+ return priv->ops->resume(priv);
return 0;
}
@@ -64,6 +64,9 @@ static const struct of_device_id tsens_table[] = {
.compatible = "qcom,msm8996-tsens",
.data = &data_8996,
}, {
+ .compatible = "qcom,tsens-v1",
+ .data = &data_tsens_v1,
+ }, {
.compatible = "qcom,tsens-v2",
.data = &data_tsens_v2,
},
@@ -76,22 +79,27 @@ static const struct thermal_zone_of_device_ops tsens_of_ops = {
.get_trend = tsens_get_trend,
};
-static int tsens_register(struct tsens_device *tmdev)
+static int tsens_register(struct tsens_priv *priv)
{
int i;
struct thermal_zone_device *tzd;
- for (i = 0; i < tmdev->num_sensors; i++) {
- tmdev->sensor[i].tmdev = tmdev;
- tmdev->sensor[i].id = i;
- tzd = devm_thermal_zone_of_sensor_register(tmdev->dev, i,
- &tmdev->sensor[i],
+ for (i = 0; i < priv->num_sensors; i++) {
+ if (!is_sensor_enabled(priv, priv->sensor[i].hw_id)) {
+ dev_err(priv->dev, "sensor %d: disabled\n",
+ priv->sensor[i].hw_id);
+ continue;
+ }
+ priv->sensor[i].priv = priv;
+ priv->sensor[i].id = i;
+ tzd = devm_thermal_zone_of_sensor_register(priv->dev, i,
+ &priv->sensor[i],
&tsens_of_ops);
if (IS_ERR(tzd))
continue;
- tmdev->sensor[i].tzd = tzd;
- if (tmdev->ops->enable)
- tmdev->ops->enable(tmdev, i);
+ priv->sensor[i].tzd = tzd;
+ if (priv->ops->enable)
+ priv->ops->enable(priv, i);
}
return 0;
}
@@ -101,8 +109,8 @@ static int tsens_probe(struct platform_device *pdev)
int ret, i;
struct device *dev;
struct device_node *np;
- struct tsens_device *tmdev;
- const struct tsens_data *data;
+ struct tsens_priv *priv;
+ const struct tsens_plat_data *data;
const struct of_device_id *id;
u32 num_sensors;
@@ -129,55 +137,55 @@ static int tsens_probe(struct platform_device *pdev)
return -EINVAL;
}
- tmdev = devm_kzalloc(dev,
- struct_size(tmdev, sensor, num_sensors),
+ priv = devm_kzalloc(dev,
+ struct_size(priv, sensor, num_sensors),
GFP_KERNEL);
- if (!tmdev)
+ if (!priv)
return -ENOMEM;
- tmdev->dev = dev;
- tmdev->num_sensors = num_sensors;
- tmdev->ops = data->ops;
- for (i = 0; i < tmdev->num_sensors; i++) {
+ priv->dev = dev;
+ priv->num_sensors = num_sensors;
+ priv->ops = data->ops;
+ for (i = 0; i < priv->num_sensors; i++) {
if (data->hw_ids)
- tmdev->sensor[i].hw_id = data->hw_ids[i];
+ priv->sensor[i].hw_id = data->hw_ids[i];
else
- tmdev->sensor[i].hw_id = i;
- }
- for (i = 0; i < REG_ARRAY_SIZE; i++) {
- tmdev->reg_offsets[i] = data->reg_offsets[i];
+ priv->sensor[i].hw_id = i;
}
+ priv->feat = data->feat;
+ priv->fields = data->fields;
- if (!tmdev->ops || !tmdev->ops->init || !tmdev->ops->get_temp)
+ if (!priv->ops || !priv->ops->init || !priv->ops->get_temp)
return -EINVAL;
- ret = tmdev->ops->init(tmdev);
+ ret = priv->ops->init(priv);
if (ret < 0) {
dev_err(dev, "tsens init failed\n");
return ret;
}
- if (tmdev->ops->calibrate) {
- ret = tmdev->ops->calibrate(tmdev);
+ if (priv->ops->calibrate) {
+ ret = priv->ops->calibrate(priv);
if (ret < 0) {
- dev_err(dev, "tsens calibration failed\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "tsens calibration failed\n");
return ret;
}
}
- ret = tsens_register(tmdev);
+ ret = tsens_register(priv);
- platform_set_drvdata(pdev, tmdev);
+ platform_set_drvdata(pdev, priv);
return ret;
}
static int tsens_remove(struct platform_device *pdev)
{
- struct tsens_device *tmdev = platform_get_drvdata(pdev);
+ struct tsens_priv *priv = platform_get_drvdata(pdev);
- if (tmdev->ops->disable)
- tmdev->ops->disable(tmdev);
+ if (priv->ops->disable)
+ priv->ops->disable(priv);
return 0;
}
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index 7b7feee5dc46..eefe3844fb4e 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -9,17 +9,39 @@
#define ONE_PT_CALIB 0x1
#define ONE_PT_CALIB2 0x2
#define TWO_PT_CALIB 0x3
+#define CAL_DEGC_PT1 30
+#define CAL_DEGC_PT2 120
+#define SLOPE_FACTOR 1000
+#define SLOPE_DEFAULT 3200
+
#include <linux/thermal.h>
+#include <linux/regmap.h>
+
+struct tsens_priv;
-struct tsens_device;
+enum tsens_ver {
+ VER_0_1 = 0,
+ VER_1_X,
+ VER_2_X,
+};
+/**
+ * struct tsens_sensor - data for each sensor connected to the tsens device
+ * @priv: tsens device instance that this sensor is connected to
+ * @tzd: pointer to the thermal zone that this sensor is in
+ * @offset: offset of temperature adjustment curve
+ * @id: Sensor ID
+ * @hw_id: HW ID can be used in case of platform-specific IDs
+ * @slope: slope of temperature adjustment curve
+ * @status: 8960-specific variable to track 8960 and 8660 status register offset
+ */
struct tsens_sensor {
- struct tsens_device *tmdev;
+ struct tsens_priv *priv;
struct thermal_zone_device *tzd;
int offset;
- int id;
- int hw_id;
+ unsigned int id;
+ unsigned int hw_id;
int slope;
u32 status;
};
@@ -37,63 +59,274 @@ struct tsens_sensor {
*/
struct tsens_ops {
/* mandatory callbacks */
- int (*init)(struct tsens_device *);
- int (*calibrate)(struct tsens_device *);
- int (*get_temp)(struct tsens_device *, int, int *);
+ int (*init)(struct tsens_priv *priv);
+ int (*calibrate)(struct tsens_priv *priv);
+ int (*get_temp)(struct tsens_priv *priv, int i, int *temp);
/* optional callbacks */
- int (*enable)(struct tsens_device *, int);
- void (*disable)(struct tsens_device *);
- int (*suspend)(struct tsens_device *);
- int (*resume)(struct tsens_device *);
- int (*get_trend)(struct tsens_device *, int, enum thermal_trend *);
+ int (*enable)(struct tsens_priv *priv, int i);
+ void (*disable)(struct tsens_priv *priv);
+ int (*suspend)(struct tsens_priv *priv);
+ int (*resume)(struct tsens_priv *priv);
+ int (*get_trend)(struct tsens_priv *priv, int i, enum thermal_trend *trend);
};
-enum reg_list {
- SROT_CTRL_OFFSET,
+#define REG_FIELD_FOR_EACH_SENSOR11(_name, _offset, _startbit, _stopbit) \
+ [_name##_##0] = REG_FIELD(_offset, _startbit, _stopbit), \
+ [_name##_##1] = REG_FIELD(_offset + 4, _startbit, _stopbit), \
+ [_name##_##2] = REG_FIELD(_offset + 8, _startbit, _stopbit), \
+ [_name##_##3] = REG_FIELD(_offset + 12, _startbit, _stopbit), \
+ [_name##_##4] = REG_FIELD(_offset + 16, _startbit, _stopbit), \
+ [_name##_##5] = REG_FIELD(_offset + 20, _startbit, _stopbit), \
+ [_name##_##6] = REG_FIELD(_offset + 24, _startbit, _stopbit), \
+ [_name##_##7] = REG_FIELD(_offset + 28, _startbit, _stopbit), \
+ [_name##_##8] = REG_FIELD(_offset + 32, _startbit, _stopbit), \
+ [_name##_##9] = REG_FIELD(_offset + 36, _startbit, _stopbit), \
+ [_name##_##10] = REG_FIELD(_offset + 40, _startbit, _stopbit)
- REG_ARRAY_SIZE,
+#define REG_FIELD_FOR_EACH_SENSOR16(_name, _offset, _startbit, _stopbit) \
+ [_name##_##0] = REG_FIELD(_offset, _startbit, _stopbit), \
+ [_name##_##1] = REG_FIELD(_offset + 4, _startbit, _stopbit), \
+ [_name##_##2] = REG_FIELD(_offset + 8, _startbit, _stopbit), \
+ [_name##_##3] = REG_FIELD(_offset + 12, _startbit, _stopbit), \
+ [_name##_##4] = REG_FIELD(_offset + 16, _startbit, _stopbit), \
+ [_name##_##5] = REG_FIELD(_offset + 20, _startbit, _stopbit), \
+ [_name##_##6] = REG_FIELD(_offset + 24, _startbit, _stopbit), \
+ [_name##_##7] = REG_FIELD(_offset + 28, _startbit, _stopbit), \
+ [_name##_##8] = REG_FIELD(_offset + 32, _startbit, _stopbit), \
+ [_name##_##9] = REG_FIELD(_offset + 36, _startbit, _stopbit), \
+ [_name##_##10] = REG_FIELD(_offset + 40, _startbit, _stopbit), \
+ [_name##_##11] = REG_FIELD(_offset + 44, _startbit, _stopbit), \
+ [_name##_##12] = REG_FIELD(_offset + 48, _startbit, _stopbit), \
+ [_name##_##13] = REG_FIELD(_offset + 52, _startbit, _stopbit), \
+ [_name##_##14] = REG_FIELD(_offset + 56, _startbit, _stopbit), \
+ [_name##_##15] = REG_FIELD(_offset + 60, _startbit, _stopbit)
+
+/* reg_field IDs to use as an index into an array */
+enum regfield_ids {
+ /* ----- SROT ------ */
+ /* HW_VER */
+ VER_MAJOR = 0,
+ VER_MINOR,
+ VER_STEP,
+ /* CTRL_OFFSET */
+ TSENS_EN = 3,
+ TSENS_SW_RST,
+ SENSOR_EN,
+ CODE_OR_TEMP,
+
+ /* ----- TM ------ */
+ /* STATUS */
+ LAST_TEMP_0 = 7, /* Last temperature reading */
+ LAST_TEMP_1,
+ LAST_TEMP_2,
+ LAST_TEMP_3,
+ LAST_TEMP_4,
+ LAST_TEMP_5,
+ LAST_TEMP_6,
+ LAST_TEMP_7,
+ LAST_TEMP_8,
+ LAST_TEMP_9,
+ LAST_TEMP_10,
+ LAST_TEMP_11,
+ LAST_TEMP_12,
+ LAST_TEMP_13,
+ LAST_TEMP_14,
+ LAST_TEMP_15,
+ VALID_0 = 23, /* VALID reading or not */
+ VALID_1,
+ VALID_2,
+ VALID_3,
+ VALID_4,
+ VALID_5,
+ VALID_6,
+ VALID_7,
+ VALID_8,
+ VALID_9,
+ VALID_10,
+ VALID_11,
+ VALID_12,
+ VALID_13,
+ VALID_14,
+ VALID_15,
+ MIN_STATUS_0, /* MIN threshold violated */
+ MIN_STATUS_1,
+ MIN_STATUS_2,
+ MIN_STATUS_3,
+ MIN_STATUS_4,
+ MIN_STATUS_5,
+ MIN_STATUS_6,
+ MIN_STATUS_7,
+ MIN_STATUS_8,
+ MIN_STATUS_9,
+ MIN_STATUS_10,
+ MIN_STATUS_11,
+ MIN_STATUS_12,
+ MIN_STATUS_13,
+ MIN_STATUS_14,
+ MIN_STATUS_15,
+ MAX_STATUS_0, /* MAX threshold violated */
+ MAX_STATUS_1,
+ MAX_STATUS_2,
+ MAX_STATUS_3,
+ MAX_STATUS_4,
+ MAX_STATUS_5,
+ MAX_STATUS_6,
+ MAX_STATUS_7,
+ MAX_STATUS_8,
+ MAX_STATUS_9,
+ MAX_STATUS_10,
+ MAX_STATUS_11,
+ MAX_STATUS_12,
+ MAX_STATUS_13,
+ MAX_STATUS_14,
+ MAX_STATUS_15,
+ LOWER_STATUS_0, /* LOWER threshold violated */
+ LOWER_STATUS_1,
+ LOWER_STATUS_2,
+ LOWER_STATUS_3,
+ LOWER_STATUS_4,
+ LOWER_STATUS_5,
+ LOWER_STATUS_6,
+ LOWER_STATUS_7,
+ LOWER_STATUS_8,
+ LOWER_STATUS_9,
+ LOWER_STATUS_10,
+ LOWER_STATUS_11,
+ LOWER_STATUS_12,
+ LOWER_STATUS_13,
+ LOWER_STATUS_14,
+ LOWER_STATUS_15,
+ UPPER_STATUS_0, /* UPPER threshold violated */
+ UPPER_STATUS_1,
+ UPPER_STATUS_2,
+ UPPER_STATUS_3,
+ UPPER_STATUS_4,
+ UPPER_STATUS_5,
+ UPPER_STATUS_6,
+ UPPER_STATUS_7,
+ UPPER_STATUS_8,
+ UPPER_STATUS_9,
+ UPPER_STATUS_10,
+ UPPER_STATUS_11,
+ UPPER_STATUS_12,
+ UPPER_STATUS_13,
+ UPPER_STATUS_14,
+ UPPER_STATUS_15,
+ CRITICAL_STATUS_0, /* CRITICAL threshold violated */
+ CRITICAL_STATUS_1,
+ CRITICAL_STATUS_2,
+ CRITICAL_STATUS_3,
+ CRITICAL_STATUS_4,
+ CRITICAL_STATUS_5,
+ CRITICAL_STATUS_6,
+ CRITICAL_STATUS_7,
+ CRITICAL_STATUS_8,
+ CRITICAL_STATUS_9,
+ CRITICAL_STATUS_10,
+ CRITICAL_STATUS_11,
+ CRITICAL_STATUS_12,
+ CRITICAL_STATUS_13,
+ CRITICAL_STATUS_14,
+ CRITICAL_STATUS_15,
+ /* TRDY */
+ TRDY,
+ /* INTERRUPT ENABLE */
+ INT_EN, /* Pre-V1, V1.x */
+ LOW_INT_EN, /* V2.x */
+ UP_INT_EN, /* V2.x */
+ CRIT_INT_EN, /* V2.x */
+
+ /* Keep last */
+ MAX_REGFIELDS
};
/**
- * struct tsens_data - tsens instance specific data
- * @num_sensors: Max number of sensors supported by platform
+ * struct tsens_features - Features supported by the IP
+ * @ver_major: Major number of IP version
+ * @crit_int: does the IP support critical interrupts?
+ * @adc: do the sensors only output adc code (instead of temperature)?
+ * @srot_split: does the IP neatly splits the register space into SROT and TM,
+ * with SROT only being available to secure boot firmware?
+ * @max_sensors: maximum sensors supported by this version of the IP
+ */
+struct tsens_features {
+ unsigned int ver_major;
+ unsigned int crit_int:1;
+ unsigned int adc:1;
+ unsigned int srot_split:1;
+ unsigned int max_sensors;
+};
+
+/**
+ * struct tsens_plat_data - tsens compile-time platform data
+ * @num_sensors: Number of sensors supported by platform
* @ops: operations the tsens instance supports
* @hw_ids: Subset of sensors ids supported by platform, if not the first n
- * @reg_offsets: Register offsets for commonly used registers
+ * @feat: features of the IP
+ * @fields: bitfield locations
*/
-struct tsens_data {
+struct tsens_plat_data {
const u32 num_sensors;
const struct tsens_ops *ops;
- const u16 reg_offsets[REG_ARRAY_SIZE];
unsigned int *hw_ids;
+ const struct tsens_features *feat;
+ const struct reg_field *fields;
};
-/* Registers to be saved/restored across a context loss */
+/**
+ * struct tsens_context - Registers to be saved/restored across a context loss
+ */
struct tsens_context {
int threshold;
int control;
};
-struct tsens_device {
+/**
+ * struct tsens_priv - private data for each instance of the tsens IP
+ * @dev: pointer to struct device
+ * @num_sensors: number of sensors enabled on this device
+ * @tm_map: pointer to TM register address space
+ * @srot_map: pointer to SROT register address space
+ * @tm_offset: deal with old device trees that don't address TM and SROT
+ * address space separately
+ * @rf: array of regmap_fields used to store value of the field
+ * @ctx: registers to be saved and restored during suspend/resume
+ * @feat: features of the IP
+ * @fields: bitfield locations
+ * @ops: pointer to list of callbacks supported by this device
+ * @sensor: list of sensors attached to this device
+ */
+struct tsens_priv {
struct device *dev;
u32 num_sensors;
struct regmap *tm_map;
struct regmap *srot_map;
u32 tm_offset;
- u16 reg_offsets[REG_ARRAY_SIZE];
+ struct regmap_field *rf[MAX_REGFIELDS];
struct tsens_context ctx;
+ const struct tsens_features *feat;
+ const struct reg_field *fields;
const struct tsens_ops *ops;
struct tsens_sensor sensor[0];
};
-char *qfprom_read(struct device *, const char *);
-void compute_intercept_slope(struct tsens_device *, u32 *, u32 *, u32);
-int init_common(struct tsens_device *);
-int get_temp_common(struct tsens_device *, int, int *);
+char *qfprom_read(struct device *dev, const char *cname);
+void compute_intercept_slope(struct tsens_priv *priv, u32 *pt1, u32 *pt2, u32 mode);
+int init_common(struct tsens_priv *priv);
+int get_temp_tsens_valid(struct tsens_priv *priv, int i, int *temp);
+int get_temp_common(struct tsens_priv *priv, int i, int *temp);
+bool is_sensor_enabled(struct tsens_priv *priv, u32 hw_id);
+
+/* TSENS target */
+extern const struct tsens_plat_data data_8960;
+
+/* TSENS v0.1 targets */
+extern const struct tsens_plat_data data_8916, data_8974;
/* TSENS v1 targets */
-extern const struct tsens_data data_8916, data_8974, data_8960;
+extern const struct tsens_plat_data data_tsens_v1;
+
/* TSENS v2 targets */
-extern const struct tsens_data data_8996, data_tsens_v2;
+extern const struct tsens_plat_data data_8996, data_tsens_v2;
#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 3b5f5b3fb1bc..7b364933bfb1 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -193,11 +193,6 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
struct qoriq_tmu_data *data;
struct device_node *np = pdev->dev.of_node;
- if (!np) {
- dev_err(&pdev->dev, "Device OF-Node is NULL");
- return -ENODEV;
- }
-
data = devm_kzalloc(&pdev->dev, sizeof(struct qoriq_tmu_data),
GFP_KERNEL);
if (!data)
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 88fa41cf16e8..83f306265ee1 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -14,7 +14,6 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/spinlock.h>
#include <linux/sys_soc.h>
#include <linux/thermal.h>
@@ -82,7 +81,6 @@ struct rcar_gen3_thermal_tsc {
struct rcar_gen3_thermal_priv {
struct rcar_gen3_thermal_tsc *tscs[TSC_MAX_NUM];
unsigned int num_tscs;
- spinlock_t lock; /* Protect interrupts on and off */
void (*thermal_init)(struct rcar_gen3_thermal_tsc *tsc);
};
@@ -232,38 +230,16 @@ static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data)
{
struct rcar_gen3_thermal_priv *priv = data;
u32 status;
- int i, ret = IRQ_HANDLED;
+ int i;
- spin_lock(&priv->lock);
for (i = 0; i < priv->num_tscs; i++) {
status = rcar_gen3_thermal_read(priv->tscs[i], REG_GEN3_IRQSTR);
rcar_gen3_thermal_write(priv->tscs[i], REG_GEN3_IRQSTR, 0);
if (status)
- ret = IRQ_WAKE_THREAD;
+ thermal_zone_device_update(priv->tscs[i]->zone,
+ THERMAL_EVENT_UNSPECIFIED);
}
- if (ret == IRQ_WAKE_THREAD)
- rcar_thermal_irq_set(priv, false);
-
- spin_unlock(&priv->lock);
-
- return ret;
-}
-
-static irqreturn_t rcar_gen3_thermal_irq_thread(int irq, void *data)
-{
- struct rcar_gen3_thermal_priv *priv = data;
- unsigned long flags;
- int i;
-
- for (i = 0; i < priv->num_tscs; i++)
- thermal_zone_device_update(priv->tscs[i]->zone,
- THERMAL_EVENT_UNSPECIFIED);
-
- spin_lock_irqsave(&priv->lock, flags);
- rcar_thermal_irq_set(priv, true);
- spin_unlock_irqrestore(&priv->lock, flags);
-
return IRQ_HANDLED;
}
@@ -307,7 +283,7 @@ static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
usleep_range(1000, 2000);
- rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0x3F);
+ rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, 0);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQEN, IRQ_TEMPD1 | IRQ_TEMP2);
@@ -331,6 +307,9 @@ MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids);
static int rcar_gen3_thermal_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct rcar_gen3_thermal_priv *priv = dev_get_drvdata(dev);
+
+ rcar_thermal_irq_set(priv, false);
pm_runtime_put(dev);
pm_runtime_disable(dev);
@@ -371,8 +350,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (soc_device_match(r8a7795es1))
priv->thermal_init = rcar_gen3_thermal_init_r8a7795es1;
- spin_lock_init(&priv->lock);
-
platform_set_drvdata(pdev, priv);
/*
@@ -390,9 +367,9 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (!irqname)
return -ENOMEM;
- ret = devm_request_threaded_irq(dev, irq, rcar_gen3_thermal_irq,
- rcar_gen3_thermal_irq_thread,
- IRQF_SHARED, irqname, priv);
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ rcar_gen3_thermal_irq,
+ IRQF_ONESHOT, irqname, priv);
if (ret)
return ret;
}
@@ -433,10 +410,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
}
tsc->zone = zone;
- ret = of_thermal_get_ntrips(tsc->zone);
- if (ret < 0)
- goto error_unregister;
-
tsc->zone->tzp->no_hwmon = false;
ret = thermal_add_hwmon_sysfs(tsc->zone);
if (ret)
@@ -448,6 +421,10 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
goto error_unregister;
}
+ ret = of_thermal_get_ntrips(tsc->zone);
+ if (ret < 0)
+ goto error_unregister;
+
dev_info(dev, "TSC%d: Loaded %d trip points\n", i, ret);
}
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 97462e9b40d8..d0873de718da 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -52,6 +52,7 @@ struct rcar_thermal_chip {
unsigned int irq_per_ch : 1;
unsigned int needs_suspend_resume : 1;
unsigned int nirqs;
+ unsigned int ctemp_bands;
};
static const struct rcar_thermal_chip rcar_thermal = {
@@ -60,6 +61,7 @@ static const struct rcar_thermal_chip rcar_thermal = {
.irq_per_ch = 0,
.needs_suspend_resume = 0,
.nirqs = 1,
+ .ctemp_bands = 1,
};
static const struct rcar_thermal_chip rcar_gen2_thermal = {
@@ -68,6 +70,7 @@ static const struct rcar_thermal_chip rcar_gen2_thermal = {
.irq_per_ch = 0,
.needs_suspend_resume = 0,
.nirqs = 1,
+ .ctemp_bands = 1,
};
static const struct rcar_thermal_chip rcar_gen3_thermal = {
@@ -80,6 +83,7 @@ static const struct rcar_thermal_chip rcar_gen3_thermal = {
* interrupts to detect a temperature change, rise or fall.
*/
.nirqs = 2,
+ .ctemp_bands = 2,
};
struct rcar_thermal_priv {
@@ -263,7 +267,12 @@ static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
return ret;
mutex_lock(&priv->lock);
- tmp = MCELSIUS((priv->ctemp * 5) - 65);
+ if (priv->chip->ctemp_bands == 1)
+ tmp = MCELSIUS((priv->ctemp * 5) - 65);
+ else if (priv->ctemp < 24)
+ tmp = MCELSIUS(((priv->ctemp * 55) - 720) / 10);
+ else
+ tmp = MCELSIUS((priv->ctemp * 5) - 60);
mutex_unlock(&priv->lock);
if ((tmp < MCELSIUS(-45)) || (tmp > MCELSIUS(125))) {
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 9c7643d62ed7..bda1ca199abd 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -172,6 +172,9 @@ struct rockchip_thermal_data {
int tshut_temp;
enum tshut_mode tshut_mode;
enum tshut_polarity tshut_polarity;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state;
+ struct pinctrl_state *otp_state;
};
/**
@@ -222,11 +225,15 @@ struct rockchip_thermal_data {
#define GRF_TSADC_TESTBIT_L 0x0e648
#define GRF_TSADC_TESTBIT_H 0x0e64c
+#define PX30_GRF_SOC_CON2 0x0408
+
#define GRF_SARADC_TESTBIT_ON (0x10001 << 2)
#define GRF_TSADC_TESTBIT_H_ON (0x10001 << 2)
#define GRF_TSADC_VCM_EN_L (0x10001 << 7)
#define GRF_TSADC_VCM_EN_H (0x10001 << 7)
+#define GRF_CON_TSADC_CH_INV (0x10001 << 1)
+
/**
* struct tsadc_table - code to temperature conversion table
* @code: the value of adc channel
@@ -689,6 +696,13 @@ static void rk_tsadcv3_initialize(struct regmap *grf, void __iomem *regs,
regs + TSADCV2_AUTO_CON);
}
+static void rk_tsadcv4_initialize(struct regmap *grf, void __iomem *regs,
+ enum tshut_polarity tshut_polarity)
+{
+ rk_tsadcv2_initialize(grf, regs, tshut_polarity);
+ regmap_write(grf, PX30_GRF_SOC_CON2, GRF_CON_TSADC_CH_INV);
+}
+
static void rk_tsadcv2_irq_ack(void __iomem *regs)
{
u32 val;
@@ -818,6 +832,30 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
writel_relaxed(val, regs + TSADCV2_INT_EN);
}
+static const struct rockchip_tsadc_chip px30_tsadc_data = {
+ .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
+ .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
+ .chn_num = 2, /* 2 channels for tsadc */
+
+ .tshut_mode = TSHUT_MODE_CRU, /* default TSHUT via CRU */
+ .tshut_temp = 95000,
+
+ .initialize = rk_tsadcv4_initialize,
+ .irq_ack = rk_tsadcv3_irq_ack,
+ .control = rk_tsadcv3_control,
+ .get_temp = rk_tsadcv2_get_temp,
+ .set_alarm_temp = rk_tsadcv2_alarm_temp,
+ .set_tshut_temp = rk_tsadcv2_tshut_temp,
+ .set_tshut_mode = rk_tsadcv2_tshut_mode,
+
+ .table = {
+ .id = rk3328_code_table,
+ .length = ARRAY_SIZE(rk3328_code_table),
+ .data_mask = TSADCV2_DATA_MASK,
+ .mode = ADC_INCREMENT,
+ },
+};
+
static const struct rockchip_tsadc_chip rv1108_tsadc_data = {
.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
.chn_num = 1, /* one channel for tsadc */
@@ -990,6 +1028,9 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
};
static const struct of_device_id of_rockchip_thermal_match[] = {
+ { .compatible = "rockchip,px30-tsadc",
+ .data = (void *)&px30_tsadc_data,
+ },
{
.compatible = "rockchip,rv1108-tsadc",
.data = (void *)&rv1108_tsadc_data,
@@ -1242,6 +1283,8 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
return error;
}
+ thermal->chip->control(thermal->regs, false);
+
error = clk_prepare_enable(thermal->clk);
if (error) {
dev_err(&pdev->dev, "failed to enable converter clock: %d\n",
@@ -1267,6 +1310,30 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
thermal->chip->initialize(thermal->grf, thermal->regs,
thermal->tshut_polarity);
+ if (thermal->tshut_mode == TSHUT_MODE_GPIO) {
+ thermal->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(thermal->pinctrl)) {
+ dev_err(&pdev->dev, "failed to find thermal pinctrl\n");
+ return PTR_ERR(thermal->pinctrl);
+ }
+
+ thermal->gpio_state = pinctrl_lookup_state(thermal->pinctrl,
+ "gpio");
+ if (IS_ERR_OR_NULL(thermal->gpio_state)) {
+ dev_err(&pdev->dev, "failed to find thermal gpio state\n");
+ return -EINVAL;
+ }
+
+ thermal->otp_state = pinctrl_lookup_state(thermal->pinctrl,
+ "otpout");
+ if (IS_ERR_OR_NULL(thermal->otp_state)) {
+ dev_err(&pdev->dev, "failed to find thermal otpout state\n");
+ return -EINVAL;
+ }
+
+ pinctrl_select_state(thermal->pinctrl, thermal->otp_state);
+ }
+
for (i = 0; i < thermal->chip->chn_num; i++) {
error = rockchip_thermal_register_sensor(pdev, thermal,
&thermal->sensors[i],
@@ -1337,8 +1404,8 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev)
clk_disable(thermal->pclk);
clk_disable(thermal->clk);
-
- pinctrl_pm_select_sleep_state(dev);
+ if (thermal->tshut_mode == TSHUT_MODE_GPIO)
+ pinctrl_select_state(thermal->pinctrl, thermal->gpio_state);
return 0;
}
@@ -1383,7 +1450,8 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
for (i = 0; i < thermal->chip->chn_num; i++)
rockchip_thermal_toggle_sensor(&thermal->sensors[i], true);
- pinctrl_pm_select_default_state(dev);
+ if (thermal->tshut_mode == TSHUT_MODE_GPIO)
+ pinctrl_select_state(thermal->pinctrl, thermal->otp_state);
return 0;
}
diff --git a/drivers/thermal/st/Kconfig b/drivers/thermal/st/Kconfig
index b80f9a9e4f8f..d8b1a4586d0b 100644
--- a/drivers/thermal/st/Kconfig
+++ b/drivers/thermal/st/Kconfig
@@ -3,9 +3,9 @@
#
config ST_THERMAL
- tristate "Thermal sensors on STMicroelectronics STi series of SoCs"
- help
- Support for thermal sensors on STMicroelectronics STi series of SoCs.
+ tristate "Thermal sensors on STMicroelectronics STi series of SoCs"
+ help
+ Support for thermal sensors on STMicroelectronics STi series of SoCs.
config ST_THERMAL_SYSCFG
select ST_THERMAL
@@ -16,11 +16,11 @@ config ST_THERMAL_MEMMAP
tristate "STi series memory mapped access based thermal sensors"
config STM32_THERMAL
- tristate "Thermal framework support on STMicroelectronics STM32 series of SoCs"
- depends on MACH_STM32MP157
- default y
- help
- Support for thermal framework on STMicroelectronics STM32 series of
- SoCs. This thermal driver allows to access to general thermal framework
- functionalities and to acces to SoC sensor functionalities. This
- configuration is fully dependent of MACH_STM32MP157.
+ tristate "Thermal framework support on STMicroelectronics STM32 series of SoCs"
+ depends on MACH_STM32MP157
+ default y
+ help
+ Support for thermal framework on STMicroelectronics STM32 series of
+ SoCs. This thermal driver allows to access to general thermal framework
+ functionalities and to acces to SoC sensor functionalities. This
+ configuration is fully dependent of MACH_STM32MP157.
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index bbd73c5a4a4e..cf9ddc52f30e 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -570,8 +570,7 @@ thermal_unprepare:
static int stm_thermal_suspend(struct device *dev)
{
int ret;
- struct platform_device *pdev = to_platform_device(dev);
- struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+ struct stm_thermal_sensor *sensor = dev_get_drvdata(dev);
ret = stm_thermal_sensor_off(sensor);
if (ret)
@@ -585,8 +584,7 @@ static int stm_thermal_suspend(struct device *dev)
static int stm_thermal_resume(struct device *dev)
{
int ret;
- struct platform_device *pdev = to_platform_device(dev);
- struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+ struct stm_thermal_sensor *sensor = dev_get_drvdata(dev);
ret = stm_thermal_prepare(sensor);
if (ret)
diff --git a/drivers/thermal/tegra/Kconfig b/drivers/thermal/tegra/Kconfig
index f8740f7852e3..fc0b33b3f26b 100644
--- a/drivers/thermal/tegra/Kconfig
+++ b/drivers/thermal/tegra/Kconfig
@@ -14,7 +14,7 @@ config TEGRA_BPMP_THERMAL
tristate "Tegra BPMP thermal sensing"
depends on TEGRA_BPMP || COMPILE_TEST
help
- Enable this option for support for sensing system temperature of NVIDIA
- Tegra systems-on-chip with the BPMP coprocessor (Tegra186).
+ Enable this option for support for sensing system temperature of NVIDIA
+ Tegra systems-on-chip with the BPMP coprocessor (Tegra186).
endmenu
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 70043a28eb7a..fcf70a3728b6 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014 - 2018, NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Mikko Perttunen <mperttunen@nvidia.com>
@@ -22,6 +23,8 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -85,12 +88,51 @@
#define THERMCTL_LVL0_UP_STATS 0x10
#define THERMCTL_LVL0_DN_STATS 0x14
+#define THERMCTL_INTR_STATUS 0x84
+
+#define TH_INTR_MD0_MASK BIT(25)
+#define TH_INTR_MU0_MASK BIT(24)
+#define TH_INTR_GD0_MASK BIT(17)
+#define TH_INTR_GU0_MASK BIT(16)
+#define TH_INTR_CD0_MASK BIT(9)
+#define TH_INTR_CU0_MASK BIT(8)
+#define TH_INTR_PD0_MASK BIT(1)
+#define TH_INTR_PU0_MASK BIT(0)
+#define TH_INTR_IGNORE_MASK 0xFCFCFCFC
+
#define THERMCTL_STATS_CTL 0x94
#define STATS_CTL_CLR_DN 0x8
#define STATS_CTL_EN_DN 0x4
#define STATS_CTL_CLR_UP 0x2
#define STATS_CTL_EN_UP 0x1
+#define OC1_CFG 0x310
+#define OC1_CFG_LONG_LATENCY_MASK BIT(6)
+#define OC1_CFG_HW_RESTORE_MASK BIT(5)
+#define OC1_CFG_PWR_GOOD_MASK_MASK BIT(4)
+#define OC1_CFG_THROTTLE_MODE_MASK (0x3 << 2)
+#define OC1_CFG_ALARM_POLARITY_MASK BIT(1)
+#define OC1_CFG_EN_THROTTLE_MASK BIT(0)
+
+#define OC1_CNT_THRESHOLD 0x314
+#define OC1_THROTTLE_PERIOD 0x318
+#define OC1_ALARM_COUNT 0x31c
+#define OC1_FILTER 0x320
+#define OC1_STATS 0x3a8
+
+#define OC_INTR_STATUS 0x39c
+#define OC_INTR_ENABLE 0x3a0
+#define OC_INTR_DISABLE 0x3a4
+#define OC_STATS_CTL 0x3c4
+#define OC_STATS_CTL_CLR_ALL 0x2
+#define OC_STATS_CTL_EN_ALL 0x1
+
+#define OC_INTR_OC1_MASK BIT(0)
+#define OC_INTR_OC2_MASK BIT(1)
+#define OC_INTR_OC3_MASK BIT(2)
+#define OC_INTR_OC4_MASK BIT(3)
+#define OC_INTR_OC5_MASK BIT(4)
+
#define THROT_GLOBAL_CFG 0x400
#define THROT_GLOBAL_ENB_MASK BIT(0)
@@ -160,6 +202,15 @@
/* get dividend from the depth */
#define THROT_DEPTH_DIVIDEND(depth) ((256 * (100 - (depth)) / 100) - 1)
+/* gk20a nv_therm interface N:3 Mapping. Levels defined in tegra124-sochterm.h
+ * level vector
+ * NONE 3'b000
+ * LOW 3'b001
+ * MED 3'b011
+ * HIGH 3'b111
+ */
+#define THROT_LEVEL_TO_DEPTH(level) ((0x1 << (level)) - 1)
+
/* get THROT_PSKIP_xxx offset per LIGHT/HEAVY throt and CPU/GPU dev */
#define THROT_OFFSET 0x30
#define THROT_PSKIP_CTRL(throt, dev) (THROT_PSKIP_CTRL_LITE_CPU + \
@@ -173,6 +224,25 @@
#define THROT_DELAY_CTRL(throt) (THROT_DELAY_LITE + \
(THROT_OFFSET * throt))
+#define ALARM_OFFSET 0x14
+#define ALARM_CFG(throt) (OC1_CFG + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_CNT_THRESHOLD(throt) (OC1_CNT_THRESHOLD + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_THROTTLE_PERIOD(throt) (OC1_THROTTLE_PERIOD + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_ALARM_COUNT(throt) (OC1_ALARM_COUNT + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_FILTER(throt) (OC1_FILTER + \
+ (ALARM_OFFSET * (throt - THROTTLE_OC1)))
+
+#define ALARM_STATS(throt) (OC1_STATS + \
+ (4 * (throt - THROTTLE_OC1)))
+
/* get CCROC_THROT_PSKIP_xxx offset per HIGH/MED/LOW vect*/
#define CCROC_THROT_OFFSET 0x0c
#define CCROC_THROT_PSKIP_CTRL_CPU_REG(vect) (CCROC_THROT_PSKIP_CTRL_CPU + \
@@ -184,15 +254,32 @@
#define THERMCTL_LVL_REGS_SIZE 0x20
#define THERMCTL_LVL_REG(rg, lv) ((rg) + ((lv) * THERMCTL_LVL_REGS_SIZE))
+#define OC_THROTTLE_MODE_DISABLED 0
+#define OC_THROTTLE_MODE_BRIEF 2
+
static const int min_low_temp = -127000;
static const int max_high_temp = 127000;
enum soctherm_throttle_id {
THROTTLE_LIGHT = 0,
THROTTLE_HEAVY,
+ THROTTLE_OC1,
+ THROTTLE_OC2,
+ THROTTLE_OC3,
+ THROTTLE_OC4,
+ THROTTLE_OC5, /* OC5 is reserved */
THROTTLE_SIZE,
};
+enum soctherm_oc_irq_id {
+ TEGRA_SOC_OC_IRQ_1,
+ TEGRA_SOC_OC_IRQ_2,
+ TEGRA_SOC_OC_IRQ_3,
+ TEGRA_SOC_OC_IRQ_4,
+ TEGRA_SOC_OC_IRQ_5,
+ TEGRA_SOC_OC_IRQ_MAX,
+};
+
enum soctherm_throttle_dev_id {
THROTTLE_DEV_CPU = 0,
THROTTLE_DEV_GPU,
@@ -202,6 +289,11 @@ enum soctherm_throttle_dev_id {
static const char *const throt_names[] = {
[THROTTLE_LIGHT] = "light",
[THROTTLE_HEAVY] = "heavy",
+ [THROTTLE_OC1] = "oc1",
+ [THROTTLE_OC2] = "oc2",
+ [THROTTLE_OC3] = "oc3",
+ [THROTTLE_OC4] = "oc4",
+ [THROTTLE_OC5] = "oc5",
};
struct tegra_soctherm;
@@ -213,12 +305,23 @@ struct tegra_thermctl_zone {
const struct tegra_tsensor_group *sg;
};
+struct soctherm_oc_cfg {
+ u32 active_low;
+ u32 throt_period;
+ u32 alarm_cnt_thresh;
+ u32 alarm_filter;
+ u32 mode;
+ bool intr_en;
+};
+
struct soctherm_throt_cfg {
const char *name;
unsigned int id;
u8 priority;
u8 cpu_throt_level;
u32 cpu_throt_depth;
+ u32 gpu_throt_level;
+ struct soctherm_oc_cfg oc_cfg;
struct thermal_cooling_device *cdev;
bool init;
};
@@ -231,6 +334,9 @@ struct tegra_soctherm {
void __iomem *clk_regs;
void __iomem *ccroc_regs;
+ int thermal_irq;
+ int edp_irq;
+
u32 *calib;
struct thermal_zone_device **thermctl_tzs;
struct tegra_soctherm_soc *soc;
@@ -238,8 +344,19 @@ struct tegra_soctherm {
struct soctherm_throt_cfg throt_cfgs[THROTTLE_SIZE];
struct dentry *debugfs_dir;
+
+ struct mutex thermctl_lock;
};
+struct soctherm_oc_irq_chip_data {
+ struct mutex irq_lock; /* serialize OC IRQs */
+ struct irq_chip irq_chip;
+ struct irq_domain *domain;
+ int irq_enable;
+};
+
+static struct soctherm_oc_irq_chip_data soc_irq_cdata;
+
/**
* ccroc_writel() - writes a value to a CCROC register
* @ts: pointer to a struct tegra_soctherm
@@ -446,6 +563,24 @@ find_throttle_cfg_by_name(struct tegra_soctherm *ts, const char *name)
return NULL;
}
+static int tsensor_group_thermtrip_get(struct tegra_soctherm *ts, int id)
+{
+ int i, temp = min_low_temp;
+ struct tsensor_group_thermtrips *tt = ts->soc->thermtrips;
+
+ if (id >= TEGRA124_SOCTHERM_SENSOR_NUM)
+ return temp;
+
+ if (tt) {
+ for (i = 0; i < ts->soc->num_ttgs; i++) {
+ if (tt[i].id == id)
+ return tt[i].temp;
+ }
+ }
+
+ return temp;
+}
+
static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
{
struct tegra_thermctl_zone *zone = data;
@@ -464,7 +599,16 @@ static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
return ret;
if (type == THERMAL_TRIP_CRITICAL) {
- return thermtrip_program(dev, sg, temp);
+ /*
+ * If thermtrips property is set in DT,
+ * doesn't need to program critical type trip to HW,
+ * if not, program critical trip to HW.
+ */
+ if (min_low_temp == tsensor_group_thermtrip_get(ts, sg->id))
+ return thermtrip_program(dev, sg, temp);
+ else
+ return 0;
+
} else if (type == THERMAL_TRIP_HOT) {
int i;
@@ -519,10 +663,60 @@ static int tegra_thermctl_get_trend(void *data, int trip,
return 0;
}
+static void thermal_irq_enable(struct tegra_thermctl_zone *zn)
+{
+ u32 r;
+
+ /* multiple zones could be handling and setting trips at once */
+ mutex_lock(&zn->ts->thermctl_lock);
+ r = readl(zn->ts->regs + THERMCTL_INTR_ENABLE);
+ r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, TH_INTR_UP_DN_EN);
+ writel(r, zn->ts->regs + THERMCTL_INTR_ENABLE);
+ mutex_unlock(&zn->ts->thermctl_lock);
+}
+
+static void thermal_irq_disable(struct tegra_thermctl_zone *zn)
+{
+ u32 r;
+
+ /* multiple zones could be handling and setting trips at once */
+ mutex_lock(&zn->ts->thermctl_lock);
+ r = readl(zn->ts->regs + THERMCTL_INTR_DISABLE);
+ r = REG_SET_MASK(r, zn->sg->thermctl_isr_mask, 0);
+ writel(r, zn->ts->regs + THERMCTL_INTR_DISABLE);
+ mutex_unlock(&zn->ts->thermctl_lock);
+}
+
+static int tegra_thermctl_set_trips(void *data, int lo, int hi)
+{
+ struct tegra_thermctl_zone *zone = data;
+ u32 r;
+
+ thermal_irq_disable(zone);
+
+ r = readl(zone->ts->regs + zone->sg->thermctl_lvl0_offset);
+ r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 0);
+ writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
+
+ lo = enforce_temp_range(zone->dev, lo) / zone->ts->soc->thresh_grain;
+ hi = enforce_temp_range(zone->dev, hi) / zone->ts->soc->thresh_grain;
+ dev_dbg(zone->dev, "%s hi:%d, lo:%d\n", __func__, hi, lo);
+
+ r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_up_thresh_mask, hi);
+ r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_dn_thresh_mask, lo);
+ r = REG_SET_MASK(r, THERMCTL_LVL0_CPU0_EN_MASK, 1);
+ writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
+
+ thermal_irq_enable(zone);
+
+ return 0;
+}
+
static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
.get_temp = tegra_thermctl_get_temp,
.set_trip_temp = tegra_thermctl_set_trip_temp,
.get_trend = tegra_thermctl_get_trend,
+ .set_trips = tegra_thermctl_set_trips,
};
static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp)
@@ -555,7 +749,8 @@ static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp)
* @dev: struct device * of the SOC_THERM instance
*
* Configure the SOC_THERM HW trip points, setting "THERMTRIP"
- * "THROTTLE" trip points , using "critical" or "hot" type trip_temp
+ * "THROTTLE" trip points , using "thermtrips", "critical" or "hot"
+ * type trip_temp
* from thermal zone.
* After they have been configured, THERMTRIP or THROTTLE will take
* action when the configured SoC thermal sensor group reaches a
@@ -577,28 +772,23 @@ static int tegra_soctherm_set_hwtrips(struct device *dev,
{
struct tegra_soctherm *ts = dev_get_drvdata(dev);
struct soctherm_throt_cfg *stc;
- int i, trip, temperature;
- int ret;
+ int i, trip, temperature, ret;
- ret = tz->ops->get_crit_temp(tz, &temperature);
- if (ret) {
- dev_warn(dev, "thermtrip: %s: missing critical temperature\n",
- sg->name);
- goto set_throttle;
- }
+ /* Get thermtrips. If missing, try to get critical trips. */
+ temperature = tsensor_group_thermtrip_get(ts, sg->id);
+ if (min_low_temp == temperature)
+ if (tz->ops->get_crit_temp(tz, &temperature))
+ temperature = max_high_temp;
ret = thermtrip_program(dev, sg, temperature);
if (ret) {
- dev_err(dev, "thermtrip: %s: error during enable\n",
- sg->name);
+ dev_err(dev, "thermtrip: %s: error during enable\n", sg->name);
return ret;
}
- dev_info(dev,
- "thermtrip: will shut down when %s reaches %d mC\n",
+ dev_info(dev, "thermtrip: will shut down when %s reaches %d mC\n",
sg->name, temperature);
-set_throttle:
ret = get_hot_temp(tz, &trip, &temperature);
if (ret) {
dev_info(dev, "throttrip: %s: missing hot temperature\n",
@@ -606,7 +796,7 @@ set_throttle:
return 0;
}
- for (i = 0; i < THROTTLE_SIZE; i++) {
+ for (i = 0; i < THROTTLE_OC1; i++) {
struct thermal_cooling_device *cdev;
if (!ts->throt_cfgs[i].init)
@@ -638,6 +828,461 @@ set_throttle:
return 0;
}
+static irqreturn_t soctherm_thermal_isr(int irq, void *dev_id)
+{
+ struct tegra_soctherm *ts = dev_id;
+ u32 r;
+
+ /* Case for no lock:
+ * Although interrupts are enabled in set_trips, there is still no need
+ * to lock here because the interrupts are disabled before programming
+ * new trip points. Hence there cant be a interrupt on the same sensor.
+ * An interrupt can however occur on a sensor while trips are being
+ * programmed on a different one. This beign a LEVEL interrupt won't
+ * cause a new interrupt but this is taken care of by the re-reading of
+ * the STATUS register in the thread function.
+ */
+ r = readl(ts->regs + THERMCTL_INTR_STATUS);
+ writel(r, ts->regs + THERMCTL_INTR_DISABLE);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * soctherm_thermal_isr_thread() - Handles a thermal interrupt request
+ * @irq: The interrupt number being requested; not used
+ * @dev_id: Opaque pointer to tegra_soctherm;
+ *
+ * Clears the interrupt status register if there are expected
+ * interrupt bits set.
+ * The interrupt(s) are then handled by updating the corresponding
+ * thermal zones.
+ *
+ * An error is logged if any unexpected interrupt bits are set.
+ *
+ * Disabled interrupts are re-enabled.
+ *
+ * Return: %IRQ_HANDLED. Interrupt was handled and no further processing
+ * is needed.
+ */
+static irqreturn_t soctherm_thermal_isr_thread(int irq, void *dev_id)
+{
+ struct tegra_soctherm *ts = dev_id;
+ struct thermal_zone_device *tz;
+ u32 st, ex = 0, cp = 0, gp = 0, pl = 0, me = 0;
+
+ st = readl(ts->regs + THERMCTL_INTR_STATUS);
+
+ /* deliberately clear expected interrupts handled in SW */
+ cp |= st & TH_INTR_CD0_MASK;
+ cp |= st & TH_INTR_CU0_MASK;
+
+ gp |= st & TH_INTR_GD0_MASK;
+ gp |= st & TH_INTR_GU0_MASK;
+
+ pl |= st & TH_INTR_PD0_MASK;
+ pl |= st & TH_INTR_PU0_MASK;
+
+ me |= st & TH_INTR_MD0_MASK;
+ me |= st & TH_INTR_MU0_MASK;
+
+ ex |= cp | gp | pl | me;
+ if (ex) {
+ writel(ex, ts->regs + THERMCTL_INTR_STATUS);
+ st &= ~ex;
+
+ if (cp) {
+ tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_CPU];
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ if (gp) {
+ tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_GPU];
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ if (pl) {
+ tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_PLLX];
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+ if (me) {
+ tz = ts->thermctl_tzs[TEGRA124_SOCTHERM_SENSOR_MEM];
+ thermal_zone_device_update(tz,
+ THERMAL_EVENT_UNSPECIFIED);
+ }
+ }
+
+ /* deliberately ignore expected interrupts NOT handled in SW */
+ ex |= TH_INTR_IGNORE_MASK;
+ st &= ~ex;
+
+ if (st) {
+ /* Whine about any other unexpected INTR bits still set */
+ pr_err("soctherm: Ignored unexpected INTRs 0x%08x\n", st);
+ writel(st, ts->regs + THERMCTL_INTR_STATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * soctherm_oc_intr_enable() - Enables the soctherm over-current interrupt
+ * @alarm: The soctherm throttle id
+ * @enable: Flag indicating enable the soctherm over-current
+ * interrupt or disable it
+ *
+ * Enables a specific over-current pins @alarm to raise an interrupt if the flag
+ * is set and the alarm corresponds to OC1, OC2, OC3, or OC4.
+ */
+static void soctherm_oc_intr_enable(struct tegra_soctherm *ts,
+ enum soctherm_throttle_id alarm,
+ bool enable)
+{
+ u32 r;
+
+ if (!enable)
+ return;
+
+ r = readl(ts->regs + OC_INTR_ENABLE);
+ switch (alarm) {
+ case THROTTLE_OC1:
+ r = REG_SET_MASK(r, OC_INTR_OC1_MASK, 1);
+ break;
+ case THROTTLE_OC2:
+ r = REG_SET_MASK(r, OC_INTR_OC2_MASK, 1);
+ break;
+ case THROTTLE_OC3:
+ r = REG_SET_MASK(r, OC_INTR_OC3_MASK, 1);
+ break;
+ case THROTTLE_OC4:
+ r = REG_SET_MASK(r, OC_INTR_OC4_MASK, 1);
+ break;
+ default:
+ r = 0;
+ break;
+ }
+ writel(r, ts->regs + OC_INTR_ENABLE);
+}
+
+/**
+ * soctherm_handle_alarm() - Handles soctherm alarms
+ * @alarm: The soctherm throttle id
+ *
+ * "Handles" over-current alarms (OC1, OC2, OC3, and OC4) by printing
+ * a warning or informative message.
+ *
+ * Return: -EINVAL for @alarm = THROTTLE_OC3, otherwise 0 (success).
+ */
+static int soctherm_handle_alarm(enum soctherm_throttle_id alarm)
+{
+ int rv = -EINVAL;
+
+ switch (alarm) {
+ case THROTTLE_OC1:
+ pr_debug("soctherm: Successfully handled OC1 alarm\n");
+ rv = 0;
+ break;
+
+ case THROTTLE_OC2:
+ pr_debug("soctherm: Successfully handled OC2 alarm\n");
+ rv = 0;
+ break;
+
+ case THROTTLE_OC3:
+ pr_debug("soctherm: Successfully handled OC3 alarm\n");
+ rv = 0;
+ break;
+
+ case THROTTLE_OC4:
+ pr_debug("soctherm: Successfully handled OC4 alarm\n");
+ rv = 0;
+ break;
+
+ default:
+ break;
+ }
+
+ if (rv)
+ pr_err("soctherm: ERROR in handling %s alarm\n",
+ throt_names[alarm]);
+
+ return rv;
+}
+
+/**
+ * soctherm_edp_isr_thread() - log an over-current interrupt request
+ * @irq: OC irq number. Currently not being used. See description
+ * @arg: a void pointer for callback, currently not being used
+ *
+ * Over-current events are handled in hardware. This function is called to log
+ * and handle any OC events that happened. Additionally, it checks every
+ * over-current interrupt registers for registers are set but
+ * was not expected (i.e. any discrepancy in interrupt status) by the function,
+ * the discrepancy will logged.
+ *
+ * Return: %IRQ_HANDLED
+ */
+static irqreturn_t soctherm_edp_isr_thread(int irq, void *arg)
+{
+ struct tegra_soctherm *ts = arg;
+ u32 st, ex, oc1, oc2, oc3, oc4;
+
+ st = readl(ts->regs + OC_INTR_STATUS);
+
+ /* deliberately clear expected interrupts handled in SW */
+ oc1 = st & OC_INTR_OC1_MASK;
+ oc2 = st & OC_INTR_OC2_MASK;
+ oc3 = st & OC_INTR_OC3_MASK;
+ oc4 = st & OC_INTR_OC4_MASK;
+ ex = oc1 | oc2 | oc3 | oc4;
+
+ pr_err("soctherm: OC ALARM 0x%08x\n", ex);
+ if (ex) {
+ writel(st, ts->regs + OC_INTR_STATUS);
+ st &= ~ex;
+
+ if (oc1 && !soctherm_handle_alarm(THROTTLE_OC1))
+ soctherm_oc_intr_enable(ts, THROTTLE_OC1, true);
+
+ if (oc2 && !soctherm_handle_alarm(THROTTLE_OC2))
+ soctherm_oc_intr_enable(ts, THROTTLE_OC2, true);
+
+ if (oc3 && !soctherm_handle_alarm(THROTTLE_OC3))
+ soctherm_oc_intr_enable(ts, THROTTLE_OC3, true);
+
+ if (oc4 && !soctherm_handle_alarm(THROTTLE_OC4))
+ soctherm_oc_intr_enable(ts, THROTTLE_OC4, true);
+
+ if (oc1 && soc_irq_cdata.irq_enable & BIT(0))
+ handle_nested_irq(
+ irq_find_mapping(soc_irq_cdata.domain, 0));
+
+ if (oc2 && soc_irq_cdata.irq_enable & BIT(1))
+ handle_nested_irq(
+ irq_find_mapping(soc_irq_cdata.domain, 1));
+
+ if (oc3 && soc_irq_cdata.irq_enable & BIT(2))
+ handle_nested_irq(
+ irq_find_mapping(soc_irq_cdata.domain, 2));
+
+ if (oc4 && soc_irq_cdata.irq_enable & BIT(3))
+ handle_nested_irq(
+ irq_find_mapping(soc_irq_cdata.domain, 3));
+ }
+
+ if (st) {
+ pr_err("soctherm: Ignored unexpected OC ALARM 0x%08x\n", st);
+ writel(st, ts->regs + OC_INTR_STATUS);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * soctherm_edp_isr() - Disables any active interrupts
+ * @irq: The interrupt request number
+ * @arg: Opaque pointer to an argument
+ *
+ * Writes to the OC_INTR_DISABLE register the over current interrupt status,
+ * masking any asserted interrupts. Doing this prevents the same interrupts
+ * from triggering this isr repeatedly. The thread woken by this isr will
+ * handle asserted interrupts and subsequently unmask/re-enable them.
+ *
+ * The OC_INTR_DISABLE register indicates which OC interrupts
+ * have been disabled.
+ *
+ * Return: %IRQ_WAKE_THREAD, handler requests to wake the handler thread
+ */
+static irqreturn_t soctherm_edp_isr(int irq, void *arg)
+{
+ struct tegra_soctherm *ts = arg;
+ u32 r;
+
+ if (!ts)
+ return IRQ_NONE;
+
+ r = readl(ts->regs + OC_INTR_STATUS);
+ writel(r, ts->regs + OC_INTR_DISABLE);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * soctherm_oc_irq_lock() - locks the over-current interrupt request
+ * @data: Interrupt request data
+ *
+ * Looks up the chip data from @data and locks the mutex associated with
+ * a particular over-current interrupt request.
+ */
+static void soctherm_oc_irq_lock(struct irq_data *data)
+{
+ struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&d->irq_lock);
+}
+
+/**
+ * soctherm_oc_irq_sync_unlock() - Unlocks the OC interrupt request
+ * @data: Interrupt request data
+ *
+ * Looks up the interrupt request data @data and unlocks the mutex associated
+ * with a particular over-current interrupt request.
+ */
+static void soctherm_oc_irq_sync_unlock(struct irq_data *data)
+{
+ struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ mutex_unlock(&d->irq_lock);
+}
+
+/**
+ * soctherm_oc_irq_enable() - Enables the SOC_THERM over-current interrupt queue
+ * @data: irq_data structure of the chip
+ *
+ * Sets the irq_enable bit of SOC_THERM allowing SOC_THERM
+ * to respond to over-current interrupts.
+ *
+ */
+static void soctherm_oc_irq_enable(struct irq_data *data)
+{
+ struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ d->irq_enable |= BIT(data->hwirq);
+}
+
+/**
+ * soctherm_oc_irq_disable() - Disables overcurrent interrupt requests
+ * @irq_data: The interrupt request information
+ *
+ * Clears the interrupt request enable bit of the overcurrent
+ * interrupt request chip data.
+ *
+ * Return: Nothing is returned (void)
+ */
+static void soctherm_oc_irq_disable(struct irq_data *data)
+{
+ struct soctherm_oc_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ d->irq_enable &= ~BIT(data->hwirq);
+}
+
+static int soctherm_oc_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ return 0;
+}
+
+/**
+ * soctherm_oc_irq_map() - SOC_THERM interrupt request domain mapper
+ * @h: Interrupt request domain
+ * @virq: Virtual interrupt request number
+ * @hw: Hardware interrupt request number
+ *
+ * Mapping callback function for SOC_THERM's irq_domain. When a SOC_THERM
+ * interrupt request is called, the irq_domain takes the request's virtual
+ * request number (much like a virtual memory address) and maps it to a
+ * physical hardware request number.
+ *
+ * When a mapping doesn't already exist for a virtual request number, the
+ * irq_domain calls this function to associate the virtual request number with
+ * a hardware request number.
+ *
+ * Return: 0
+ */
+static int soctherm_oc_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct soctherm_oc_irq_chip_data *data = h->host_data;
+
+ irq_set_chip_data(virq, data);
+ irq_set_chip(virq, &data->irq_chip);
+ irq_set_nested_thread(virq, 1);
+ return 0;
+}
+
+/**
+ * soctherm_irq_domain_xlate_twocell() - xlate for soctherm interrupts
+ * @d: Interrupt request domain
+ * @intspec: Array of u32s from DTs "interrupt" property
+ * @intsize: Number of values inside the intspec array
+ * @out_hwirq: HW IRQ value associated with this interrupt
+ * @out_type: The IRQ SENSE type for this interrupt.
+ *
+ * This Device Tree IRQ specifier translation function will translate a
+ * specific "interrupt" as defined by 2 DT values where the cell values map
+ * the hwirq number + 1 and linux irq flags. Since the output is the hwirq
+ * number, this function will subtract 1 from the value listed in DT.
+ *
+ * Return: 0
+ */
+static int soctherm_irq_domain_xlate_twocell(struct irq_domain *d,
+ struct device_node *ctrlr, const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 2))
+ return -EINVAL;
+
+ /*
+ * The HW value is 1 index less than the DT IRQ values.
+ * i.e. OC4 goes to HW index 3.
+ */
+ *out_hwirq = intspec[0] - 1;
+ *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+}
+
+static const struct irq_domain_ops soctherm_oc_domain_ops = {
+ .map = soctherm_oc_irq_map,
+ .xlate = soctherm_irq_domain_xlate_twocell,
+};
+
+/**
+ * soctherm_oc_int_init() - Initial enabling of the over
+ * current interrupts
+ * @np: The devicetree node for soctherm
+ * @num_irqs: The number of new interrupt requests
+ *
+ * Sets the over current interrupt request chip data
+ *
+ * Return: 0 on success or if overcurrent interrupts are not enabled,
+ * -ENOMEM (out of memory), or irq_base if the function failed to
+ * allocate the irqs
+ */
+static int soctherm_oc_int_init(struct device_node *np, int num_irqs)
+{
+ if (!num_irqs) {
+ pr_info("%s(): OC interrupts are not enabled\n", __func__);
+ return 0;
+ }
+
+ mutex_init(&soc_irq_cdata.irq_lock);
+ soc_irq_cdata.irq_enable = 0;
+
+ soc_irq_cdata.irq_chip.name = "soc_therm_oc";
+ soc_irq_cdata.irq_chip.irq_bus_lock = soctherm_oc_irq_lock;
+ soc_irq_cdata.irq_chip.irq_bus_sync_unlock =
+ soctherm_oc_irq_sync_unlock;
+ soc_irq_cdata.irq_chip.irq_disable = soctherm_oc_irq_disable;
+ soc_irq_cdata.irq_chip.irq_enable = soctherm_oc_irq_enable;
+ soc_irq_cdata.irq_chip.irq_set_type = soctherm_oc_irq_set_type;
+ soc_irq_cdata.irq_chip.irq_set_wake = NULL;
+
+ soc_irq_cdata.domain = irq_domain_add_linear(np, num_irqs,
+ &soctherm_oc_domain_ops,
+ &soc_irq_cdata);
+
+ if (!soc_irq_cdata.domain) {
+ pr_err("%s: Failed to create IRQ domain\n", __func__);
+ return -ENOMEM;
+ }
+
+ pr_debug("%s(): OC interrupts enabled successful\n", __func__);
+ return 0;
+}
+
#ifdef CONFIG_DEBUG_FS
static int regs_show(struct seq_file *s, void *data)
{
@@ -929,6 +1574,120 @@ static const struct thermal_cooling_device_ops throt_cooling_ops = {
.set_cur_state = throt_set_cdev_state,
};
+static int soctherm_thermtrips_parse(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_soctherm *ts = dev_get_drvdata(dev);
+ struct tsensor_group_thermtrips *tt = ts->soc->thermtrips;
+ const int max_num_prop = ts->soc->num_ttgs * 2;
+ u32 *tlb;
+ int i, j, n, ret;
+
+ if (!tt)
+ return -ENOMEM;
+
+ n = of_property_count_u32_elems(dev->of_node, "nvidia,thermtrips");
+ if (n <= 0) {
+ dev_info(dev,
+ "missing thermtrips, will use critical trips as shut down temp\n");
+ return n;
+ }
+
+ n = min(max_num_prop, n);
+
+ tlb = devm_kcalloc(&pdev->dev, max_num_prop, sizeof(u32), GFP_KERNEL);
+ if (!tlb)
+ return -ENOMEM;
+ ret = of_property_read_u32_array(dev->of_node, "nvidia,thermtrips",
+ tlb, n);
+ if (ret) {
+ dev_err(dev, "invalid num ele: thermtrips:%d\n", ret);
+ return ret;
+ }
+
+ i = 0;
+ for (j = 0; j < n; j = j + 2) {
+ if (tlb[j] >= TEGRA124_SOCTHERM_SENSOR_NUM)
+ continue;
+
+ tt[i].id = tlb[j];
+ tt[i].temp = tlb[j + 1];
+ i++;
+ }
+
+ return 0;
+}
+
+static void soctherm_oc_cfg_parse(struct device *dev,
+ struct device_node *np_oc,
+ struct soctherm_throt_cfg *stc)
+{
+ u32 val;
+
+ if (of_property_read_bool(np_oc, "nvidia,polarity-active-low"))
+ stc->oc_cfg.active_low = 1;
+ else
+ stc->oc_cfg.active_low = 0;
+
+ if (!of_property_read_u32(np_oc, "nvidia,count-threshold", &val)) {
+ stc->oc_cfg.intr_en = 1;
+ stc->oc_cfg.alarm_cnt_thresh = val;
+ }
+
+ if (!of_property_read_u32(np_oc, "nvidia,throttle-period-us", &val))
+ stc->oc_cfg.throt_period = val;
+
+ if (!of_property_read_u32(np_oc, "nvidia,alarm-filter", &val))
+ stc->oc_cfg.alarm_filter = val;
+
+ /* BRIEF throttling by default, do not support STICKY */
+ stc->oc_cfg.mode = OC_THROTTLE_MODE_BRIEF;
+}
+
+static int soctherm_throt_cfg_parse(struct device *dev,
+ struct device_node *np,
+ struct soctherm_throt_cfg *stc)
+{
+ struct tegra_soctherm *ts = dev_get_drvdata(dev);
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(np, "nvidia,priority", &val);
+ if (ret) {
+ dev_err(dev, "throttle-cfg: %s: invalid priority\n", stc->name);
+ return -EINVAL;
+ }
+ stc->priority = val;
+
+ ret = of_property_read_u32(np, ts->soc->use_ccroc ?
+ "nvidia,cpu-throt-level" :
+ "nvidia,cpu-throt-percent", &val);
+ if (!ret) {
+ if (ts->soc->use_ccroc &&
+ val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH)
+ stc->cpu_throt_level = val;
+ else if (!ts->soc->use_ccroc && val <= 100)
+ stc->cpu_throt_depth = val;
+ else
+ goto err;
+ } else {
+ goto err;
+ }
+
+ ret = of_property_read_u32(np, "nvidia,gpu-throt-level", &val);
+ if (!ret && val <= TEGRA_SOCTHERM_THROT_LEVEL_HIGH)
+ stc->gpu_throt_level = val;
+ else
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(dev, "throttle-cfg: %s: no throt prop or invalid prop\n",
+ stc->name);
+ return -EINVAL;
+}
+
/**
* soctherm_init_hw_throt_cdev() - Parse the HW throttle configurations
* and register them as cooling devices.
@@ -939,8 +1698,7 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
struct tegra_soctherm *ts = dev_get_drvdata(dev);
struct device_node *np_stc, *np_stcc;
const char *name;
- u32 val;
- int i, r;
+ int i;
for (i = 0; i < THROTTLE_SIZE; i++) {
ts->throt_cfgs[i].name = throt_names[i];
@@ -958,6 +1716,7 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
for_each_child_of_node(np_stc, np_stcc) {
struct soctherm_throt_cfg *stc;
struct thermal_cooling_device *tcd;
+ int err;
name = np_stcc->name;
stc = find_throttle_cfg_by_name(ts, name);
@@ -967,51 +1726,34 @@ static void soctherm_init_hw_throt_cdev(struct platform_device *pdev)
continue;
}
- r = of_property_read_u32(np_stcc, "nvidia,priority", &val);
- if (r) {
- dev_info(dev,
- "throttle-cfg: %s: missing priority\n", name);
- continue;
+ if (stc->init) {
+ dev_err(dev, "throttle-cfg: %s: redefined!\n", name);
+ of_node_put(np_stcc);
+ break;
}
- stc->priority = val;
-
- if (ts->soc->use_ccroc) {
- r = of_property_read_u32(np_stcc,
- "nvidia,cpu-throt-level",
- &val);
- if (r) {
- dev_info(dev,
- "throttle-cfg: %s: missing cpu-throt-level\n",
- name);
- continue;
- }
- stc->cpu_throt_level = val;
+
+ err = soctherm_throt_cfg_parse(dev, np_stcc, stc);
+ if (err)
+ continue;
+
+ if (stc->id >= THROTTLE_OC1) {
+ soctherm_oc_cfg_parse(dev, np_stcc, stc);
+ stc->init = true;
} else {
- r = of_property_read_u32(np_stcc,
- "nvidia,cpu-throt-percent",
- &val);
- if (r) {
- dev_info(dev,
- "throttle-cfg: %s: missing cpu-throt-percent\n",
- name);
- continue;
- }
- stc->cpu_throt_depth = val;
- }
- tcd = thermal_of_cooling_device_register(np_stcc,
+ tcd = thermal_of_cooling_device_register(np_stcc,
(char *)name, ts,
&throt_cooling_ops);
- of_node_put(np_stcc);
- if (IS_ERR_OR_NULL(tcd)) {
- dev_err(dev,
- "throttle-cfg: %s: failed to register cooling device\n",
- name);
- continue;
+ if (IS_ERR_OR_NULL(tcd)) {
+ dev_err(dev,
+ "throttle-cfg: %s: failed to register cooling device\n",
+ name);
+ continue;
+ }
+ stc->cdev = tcd;
+ stc->init = true;
}
- stc->cdev = tcd;
- stc->init = true;
}
of_node_put(np_stc);
@@ -1141,6 +1883,50 @@ static void throttlectl_cpu_mn(struct tegra_soctherm *ts,
}
/**
+ * throttlectl_gpu_level_select() - selects throttling level for GPU
+ * @throt: the LIGHT/HEAVY of throttle event id
+ *
+ * This function programs soctherm's interface to GK20a NV_THERM to select
+ * pre-configured "Low", "Medium" or "Heavy" throttle levels.
+ *
+ * Return: boolean true if HW was programmed
+ */
+static void throttlectl_gpu_level_select(struct tegra_soctherm *ts,
+ enum soctherm_throttle_id throt)
+{
+ u32 r, level, throt_vect;
+
+ level = ts->throt_cfgs[throt].gpu_throt_level;
+ throt_vect = THROT_LEVEL_TO_DEPTH(level);
+ r = readl(ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU));
+ r = REG_SET_MASK(r, THROT_PSKIP_CTRL_ENABLE_MASK, 1);
+ r = REG_SET_MASK(r, THROT_PSKIP_CTRL_VECT_GPU_MASK, throt_vect);
+ writel(r, ts->regs + THROT_PSKIP_CTRL(throt, THROTTLE_DEV_GPU));
+}
+
+static int soctherm_oc_cfg_program(struct tegra_soctherm *ts,
+ enum soctherm_throttle_id throt)
+{
+ u32 r;
+ struct soctherm_oc_cfg *oc = &ts->throt_cfgs[throt].oc_cfg;
+
+ if (oc->mode == OC_THROTTLE_MODE_DISABLED)
+ return -EINVAL;
+
+ r = REG_SET_MASK(0, OC1_CFG_HW_RESTORE_MASK, 1);
+ r = REG_SET_MASK(r, OC1_CFG_THROTTLE_MODE_MASK, oc->mode);
+ r = REG_SET_MASK(r, OC1_CFG_ALARM_POLARITY_MASK, oc->active_low);
+ r = REG_SET_MASK(r, OC1_CFG_EN_THROTTLE_MASK, 1);
+ writel(r, ts->regs + ALARM_CFG(throt));
+ writel(oc->throt_period, ts->regs + ALARM_THROTTLE_PERIOD(throt));
+ writel(oc->alarm_cnt_thresh, ts->regs + ALARM_CNT_THRESHOLD(throt));
+ writel(oc->alarm_filter, ts->regs + ALARM_FILTER(throt));
+ soctherm_oc_intr_enable(ts, throt, oc->intr_en);
+
+ return 0;
+}
+
+/**
* soctherm_throttle_program() - programs pulse skippers' configuration
* @throt: the LIGHT/HEAVY of the throttle event id.
*
@@ -1156,12 +1942,17 @@ static void soctherm_throttle_program(struct tegra_soctherm *ts,
if (!stc.init)
return;
+ if ((throt >= THROTTLE_OC1) && (soctherm_oc_cfg_program(ts, throt)))
+ return;
+
/* Setup PSKIP parameters */
if (ts->soc->use_ccroc)
throttlectl_cpu_level_select(ts, throt);
else
throttlectl_cpu_mn(ts, throt);
+ throttlectl_gpu_level_select(ts, throt);
+
r = REG_SET_MASK(0, THROT_PRIORITY_LITE_PRIO_MASK, stc.priority);
writel(r, ts->regs + THROT_PRIORITY_CTRL(throt));
@@ -1215,6 +2006,57 @@ static void tegra_soctherm_throttle(struct device *dev)
writel(v, ts->regs + THERMCTL_STATS_CTL);
}
+static int soctherm_interrupts_init(struct platform_device *pdev,
+ struct tegra_soctherm *tegra)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ ret = soctherm_oc_int_init(np, TEGRA_SOC_OC_IRQ_MAX);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "soctherm_oc_int_init failed\n");
+ return ret;
+ }
+
+ tegra->thermal_irq = platform_get_irq(pdev, 0);
+ if (tegra->thermal_irq < 0) {
+ dev_dbg(&pdev->dev, "get 'thermal_irq' failed.\n");
+ return 0;
+ }
+
+ tegra->edp_irq = platform_get_irq(pdev, 1);
+ if (tegra->edp_irq < 0) {
+ dev_dbg(&pdev->dev, "get 'edp_irq' failed.\n");
+ return 0;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev,
+ tegra->thermal_irq,
+ soctherm_thermal_isr,
+ soctherm_thermal_isr_thread,
+ IRQF_ONESHOT,
+ dev_name(&pdev->dev),
+ tegra);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq 'thermal_irq' failed.\n");
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev,
+ tegra->edp_irq,
+ soctherm_edp_isr,
+ soctherm_edp_isr_thread,
+ IRQF_ONESHOT,
+ "soctherm_edp",
+ tegra);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq 'edp_irq' failed.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static void soctherm_init(struct platform_device *pdev)
{
struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
@@ -1292,6 +2134,7 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
if (!tegra)
return -ENOMEM;
+ mutex_init(&tegra->thermctl_lock);
dev_set_drvdata(&pdev->dev, tegra);
tegra->soc = soc;
@@ -1370,6 +2213,8 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
if (err)
return err;
+ soctherm_thermtrips_parse(pdev);
+
soctherm_init_hw_throt_cdev(pdev);
soctherm_init(pdev);
@@ -1406,6 +2251,8 @@ static int tegra_soctherm_probe(struct platform_device *pdev)
goto disable_clocks;
}
+ err = soctherm_interrupts_init(pdev, tegra);
+
soctherm_debug_init(pdev);
return 0;
diff --git a/drivers/thermal/tegra/soctherm.h b/drivers/thermal/tegra/soctherm.h
index e96ca73fd780..70501e73d586 100644
--- a/drivers/thermal/tegra/soctherm.h
+++ b/drivers/thermal/tegra/soctherm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
*
@@ -29,6 +30,14 @@
#define THERMCTL_THERMTRIP_CTL 0x80
/* BITs are defined in device file */
+#define THERMCTL_INTR_ENABLE 0x88
+#define THERMCTL_INTR_DISABLE 0x8c
+#define TH_INTR_UP_DN_EN 0x3
+#define THERM_IRQ_MEM_MASK (TH_INTR_UP_DN_EN << 24)
+#define THERM_IRQ_GPU_MASK (TH_INTR_UP_DN_EN << 16)
+#define THERM_IRQ_CPU_MASK (TH_INTR_UP_DN_EN << 8)
+#define THERM_IRQ_TSENSE_MASK (TH_INTR_UP_DN_EN << 0)
+
#define SENSOR_PDIV 0x1c0
#define SENSOR_PDIV_CPU_MASK (0xf << 12)
#define SENSOR_PDIV_GPU_MASK (0xf << 8)
@@ -70,6 +79,7 @@ struct tegra_tsensor_group {
u32 thermtrip_enable_mask;
u32 thermtrip_any_en_mask;
u32 thermtrip_threshold_mask;
+ u32 thermctl_isr_mask;
u16 thermctl_lvl0_offset;
u32 thermctl_lvl0_up_thresh_mask;
u32 thermctl_lvl0_dn_thresh_mask;
@@ -92,6 +102,11 @@ struct tegra_tsensor {
const struct tegra_tsensor_group *group;
};
+struct tsensor_group_thermtrips {
+ u8 id;
+ u32 temp;
+};
+
struct tegra_soctherm_fuse {
u32 fuse_base_cp_mask, fuse_base_cp_shift;
u32 fuse_base_ft_mask, fuse_base_ft_shift;
@@ -113,6 +128,7 @@ struct tegra_soctherm_soc {
const int thresh_grain;
const unsigned int bptt;
const bool use_ccroc;
+ struct tsensor_group_thermtrips *thermtrips;
};
int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse,
diff --git a/drivers/thermal/tegra/tegra124-soctherm.c b/drivers/thermal/tegra/tegra124-soctherm.c
index 36768630f78c..20ad27f4d1a1 100644
--- a/drivers/thermal/tegra/tegra124-soctherm.c
+++ b/drivers/thermal/tegra/tegra124-soctherm.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -55,6 +56,7 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_cpu = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_CPU_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_CPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -73,6 +75,7 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_gpu = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_GPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -89,6 +92,7 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_pll = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_TSENSE_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_TSENSE_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -107,6 +111,7 @@ static const struct tegra_tsensor_group tegra124_tsensor_group_mem = {
.thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA124_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_MEM_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
.thermctl_lvl0_up_thresh_mask = TEGRA124_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA124_THERMCTL_LVL0_DN_THRESH_MASK,
diff --git a/drivers/thermal/tegra/tegra132-soctherm.c b/drivers/thermal/tegra/tegra132-soctherm.c
index 97fa30501eb1..b76308fdad9e 100644
--- a/drivers/thermal/tegra/tegra132-soctherm.c
+++ b/drivers/thermal/tegra/tegra132-soctherm.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -55,6 +56,7 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_cpu = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_CPU_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_CPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -73,6 +75,7 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_gpu = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_GPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -89,6 +92,7 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_pll = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_TSENSE_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_TSENSE_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -107,6 +111,7 @@ static const struct tegra_tsensor_group tegra132_tsensor_group_mem = {
.thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA132_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_MEM_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
.thermctl_lvl0_up_thresh_mask = TEGRA132_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA132_THERMCTL_LVL0_DN_THRESH_MASK,
diff --git a/drivers/thermal/tegra/tegra210-soctherm.c b/drivers/thermal/tegra/tegra210-soctherm.c
index ad53169a8e95..d31b50050faa 100644
--- a/drivers/thermal/tegra/tegra210-soctherm.c
+++ b/drivers/thermal/tegra/tegra210-soctherm.c
@@ -1,5 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -56,6 +57,7 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_cpu = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_CPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_CPU_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_CPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_CPU,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -74,6 +76,7 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_gpu = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_GPU_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_GPU_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_GPU,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -90,6 +93,7 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_pll = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_TSENSE_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_TSENSE_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_TSENSE_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_TSENSE,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -108,6 +112,7 @@ static const struct tegra_tsensor_group tegra210_tsensor_group_mem = {
.thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
.thermtrip_enable_mask = TEGRA210_THERMTRIP_MEM_EN_MASK,
.thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+ .thermctl_isr_mask = THERM_IRQ_MEM_MASK,
.thermctl_lvl0_offset = THERMCTL_LEVEL0_GROUP_MEM,
.thermctl_lvl0_up_thresh_mask = TEGRA210_THERMCTL_LVL0_UP_THRESH_MASK,
.thermctl_lvl0_dn_thresh_mask = TEGRA210_THERMCTL_LVL0_DN_THRESH_MASK,
@@ -203,6 +208,13 @@ static const struct tegra_soctherm_fuse tegra210_soctherm_fuse = {
.fuse_spare_realignment = 0,
};
+struct tsensor_group_thermtrips tegra210_tsensor_thermtrips[] = {
+ {.id = TEGRA124_SOCTHERM_SENSOR_NUM},
+ {.id = TEGRA124_SOCTHERM_SENSOR_NUM},
+ {.id = TEGRA124_SOCTHERM_SENSOR_NUM},
+ {.id = TEGRA124_SOCTHERM_SENSOR_NUM},
+};
+
const struct tegra_soctherm_soc tegra210_soctherm = {
.tsensors = tegra210_tsensors,
.num_tsensors = ARRAY_SIZE(tegra210_tsensors),
@@ -212,4 +224,5 @@ const struct tegra_soctherm_soc tegra210_soctherm = {
.thresh_grain = TEGRA210_THRESH_GRAIN,
.bptt = TEGRA210_BPTT,
.use_ccroc = false,
+ .thermtrips = tegra210_tsensor_thermtrips,
};
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index e22fc60ad36d..deb244f12de4 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -29,6 +29,9 @@ static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
int temp, temp_hi, temp_lo, adc_hi, adc_lo;
int i;
+ if (!gti->lookup_table)
+ return val;
+
for (i = 0; i < gti->nlookup_table; i++) {
if (val >= gti->lookup_table[2 * i + 1])
break;
@@ -81,9 +84,9 @@ static int gadc_thermal_read_linear_lookup_table(struct device *dev,
ntable = of_property_count_elems_of_size(np, "temperature-lookup-table",
sizeof(u32));
- if (ntable < 0) {
- dev_err(dev, "Lookup table is not provided\n");
- return ntable;
+ if (ntable <= 0) {
+ dev_notice(dev, "no lookup table, assuming DAC channel returns milliCelcius\n");
+ return 0;
}
if (ntable % 2) {
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 6590bb5cb688..46cfb7de4eb2 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -266,7 +266,7 @@ static int __init thermal_register_governors(void)
return thermal_gov_power_allocator_register();
}
-static void thermal_unregister_governors(void)
+static void __init thermal_unregister_governors(void)
{
thermal_gov_step_wise_unregister();
thermal_gov_fair_share_unregister();
@@ -941,7 +941,7 @@ static void bind_cdev(struct thermal_cooling_device *cdev)
*/
static struct thermal_cooling_device *
__thermal_cooling_device_register(struct device_node *np,
- char *type, void *devdata,
+ const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{
struct thermal_cooling_device *cdev;
@@ -1015,7 +1015,7 @@ __thermal_cooling_device_register(struct device_node *np,
* ERR_PTR. Caller must check return value with IS_ERR*() helpers.
*/
struct thermal_cooling_device *
-thermal_cooling_device_register(char *type, void *devdata,
+thermal_cooling_device_register(const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{
return __thermal_cooling_device_register(NULL, type, devdata, ops);
@@ -1039,13 +1039,62 @@ EXPORT_SYMBOL_GPL(thermal_cooling_device_register);
*/
struct thermal_cooling_device *
thermal_of_cooling_device_register(struct device_node *np,
- char *type, void *devdata,
+ const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
{
return __thermal_cooling_device_register(np, type, devdata, ops);
}
EXPORT_SYMBOL_GPL(thermal_of_cooling_device_register);
+static void thermal_cooling_device_release(struct device *dev, void *res)
+{
+ thermal_cooling_device_unregister(
+ *(struct thermal_cooling_device **)res);
+}
+
+/**
+ * devm_thermal_of_cooling_device_register() - register an OF thermal cooling
+ * device
+ * @dev: a valid struct device pointer of a sensor device.
+ * @np: a pointer to a device tree node.
+ * @type: the thermal cooling device type.
+ * @devdata: device private data.
+ * @ops: standard thermal cooling devices callbacks.
+ *
+ * This function will register a cooling device with device tree node reference.
+ * This interface function adds a new thermal cooling device (fan/processor/...)
+ * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
+ * to all the thermal zone devices registered at the same time.
+ *
+ * Return: a pointer to the created struct thermal_cooling_device or an
+ * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
+ */
+struct thermal_cooling_device *
+devm_thermal_of_cooling_device_register(struct device *dev,
+ struct device_node *np,
+ char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
+{
+ struct thermal_cooling_device **ptr, *tcd;
+
+ ptr = devres_alloc(thermal_cooling_device_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ tcd = __thermal_cooling_device_register(np, type, devdata, ops);
+ if (IS_ERR(tcd)) {
+ devres_free(ptr);
+ return tcd;
+ }
+
+ *ptr = tcd;
+ devres_add(dev, ptr);
+
+ return tcd;
+}
+EXPORT_SYMBOL_GPL(devm_thermal_of_cooling_device_register);
+
static void __unbind(struct thermal_zone_device *tz, int mask,
struct thermal_cooling_device *cdev)
{
@@ -1494,6 +1543,7 @@ static int thermal_pm_notify(struct notifier_block *nb,
unsigned long mode, void *_unused)
{
struct thermal_zone_device *tz;
+ enum thermal_device_mode tz_mode;
switch (mode) {
case PM_HIBERNATION_PREPARE:
@@ -1506,6 +1556,13 @@ static int thermal_pm_notify(struct notifier_block *nb,
case PM_POST_SUSPEND:
atomic_set(&in_suspend, 0);
list_for_each_entry(tz, &thermal_tz_list, node) {
+ tz_mode = THERMAL_DEVICE_ENABLED;
+ if (tz->ops->get_mode)
+ tz->ops->get_mode(tz, &tz_mode);
+
+ if (tz_mode == THERMAL_DEVICE_DISABLED)
+ continue;
+
thermal_zone_device_init(tz);
thermal_zone_device_update(tz,
THERMAL_EVENT_UNSPECIFIED);
@@ -1563,19 +1620,4 @@ error:
mutex_destroy(&poweroff_lock);
return result;
}
-
-static void __exit thermal_exit(void)
-{
- unregister_pm_notifier(&thermal_pm_nb);
- of_thermal_destroy_zones();
- genetlink_exit();
- class_unregister(&thermal_class);
- thermal_unregister_governors();
- ida_destroy(&thermal_tz_ida);
- ida_destroy(&thermal_cdev_ida);
- mutex_destroy(&thermal_list_lock);
- mutex_destroy(&thermal_governor_lock);
-}
-
fs_initcall(thermal_init);
-module_exit(thermal_exit);
diff --git a/drivers/thermal/thermal_mmio.c b/drivers/thermal/thermal_mmio.c
new file mode 100644
index 000000000000..de3cceea23bc
--- /dev/null
+++ b/drivers/thermal/thermal_mmio.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+struct thermal_mmio {
+ void __iomem *mmio_base;
+ u32 (*read_mmio)(void __iomem *mmio_base);
+ u32 mask;
+ int factor;
+};
+
+static u32 thermal_mmio_readb(void __iomem *mmio_base)
+{
+ return readb(mmio_base);
+}
+
+static int thermal_mmio_get_temperature(void *private, int *temp)
+{
+ int t;
+ struct thermal_mmio *sensor =
+ (struct thermal_mmio *)private;
+
+ t = sensor->read_mmio(sensor->mmio_base) & sensor->mask;
+ t *= sensor->factor;
+
+ *temp = t;
+
+ return 0;
+}
+
+static struct thermal_zone_of_device_ops thermal_mmio_ops = {
+ .get_temp = thermal_mmio_get_temperature,
+};
+
+static int thermal_mmio_probe(struct platform_device *pdev)
+{
+ struct resource *resource;
+ struct thermal_mmio *sensor;
+ int (*sensor_init_func)(struct platform_device *pdev,
+ struct thermal_mmio *sensor);
+ struct thermal_zone_device *thermal_zone;
+ int ret;
+ int temperature;
+
+ sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (IS_ERR(resource)) {
+ dev_err(&pdev->dev,
+ "fail to get platform memory resource (%ld)\n",
+ PTR_ERR(resource));
+ return PTR_ERR(resource);
+ }
+
+ sensor->mmio_base = devm_ioremap_resource(&pdev->dev, resource);
+ if (IS_ERR(sensor->mmio_base)) {
+ dev_err(&pdev->dev, "failed to ioremap memory (%ld)\n",
+ PTR_ERR(sensor->mmio_base));
+ return PTR_ERR(sensor->mmio_base);
+ }
+
+ sensor_init_func = device_get_match_data(&pdev->dev);
+ if (sensor_init_func) {
+ ret = sensor_init_func(pdev, sensor);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to initialize sensor (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ thermal_zone = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ 0,
+ sensor,
+ &thermal_mmio_ops);
+ if (IS_ERR(thermal_zone)) {
+ dev_err(&pdev->dev,
+ "failed to register sensor (%ld)\n",
+ PTR_ERR(thermal_zone));
+ return PTR_ERR(thermal_zone);
+ }
+
+ thermal_mmio_get_temperature(sensor, &temperature);
+ dev_info(&pdev->dev,
+ "thermal mmio sensor %s registered, current temperature: %d\n",
+ pdev->name, temperature);
+
+ return 0;
+}
+
+static int al_thermal_init(struct platform_device *pdev,
+ struct thermal_mmio *sensor)
+{
+ sensor->read_mmio = thermal_mmio_readb;
+ sensor->mask = 0xff;
+ sensor->factor = 1000;
+
+ return 0;
+}
+
+static const struct of_device_id thermal_mmio_id_table[] = {
+ { .compatible = "amazon,al-thermal", .data = al_thermal_init},
+ {}
+};
+MODULE_DEVICE_TABLE(of, thermal_mmio_id_table);
+
+static struct platform_driver thermal_mmio_driver = {
+ .probe = thermal_mmio_probe,
+ .driver = {
+ .name = "thermal-mmio",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(thermal_mmio_id_table),
+ },
+};
+
+module_platform_driver(thermal_mmio_driver);
+
+MODULE_AUTHOR("Talel Shenhar <talel@amazon.com>");
+MODULE_DESCRIPTION("Thermal MMIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/hvc/hvc_riscv_sbi.c b/drivers/tty/hvc/hvc_riscv_sbi.c
index 75155bde2b88..31f53fa77e4a 100644
--- a/drivers/tty/hvc/hvc_riscv_sbi.c
+++ b/drivers/tty/hvc/hvc_riscv_sbi.c
@@ -53,7 +53,6 @@ device_initcall(hvc_sbi_init);
static int __init hvc_sbi_console_init(void)
{
hvc_instantiate(0, 0, &hvc_sbi_ops);
- add_preferred_console("hvc", 0, NULL);
return 0;
}
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index ca8a94f15ac0..38183ac438c6 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -40,8 +40,6 @@ struct da8xx_ohci_hcd {
struct phy *usb11_phy;
struct regulator *vbus_reg;
struct notifier_block nb;
- unsigned int reg_enabled;
- struct gpio_desc *vbus_gpio;
struct gpio_desc *oc_gpio;
};
@@ -92,29 +90,21 @@ static int ohci_da8xx_set_power(struct usb_hcd *hcd, int on)
struct device *dev = hcd->self.controller;
int ret;
- if (da8xx_ohci->vbus_gpio) {
- gpiod_set_value_cansleep(da8xx_ohci->vbus_gpio, on);
- return 0;
- }
-
if (!da8xx_ohci->vbus_reg)
return 0;
- if (on && !da8xx_ohci->reg_enabled) {
+ if (on) {
ret = regulator_enable(da8xx_ohci->vbus_reg);
if (ret) {
dev_err(dev, "Failed to enable regulator: %d\n", ret);
return ret;
}
- da8xx_ohci->reg_enabled = 1;
-
- } else if (!on && da8xx_ohci->reg_enabled) {
+ } else {
ret = regulator_disable(da8xx_ohci->vbus_reg);
if (ret) {
dev_err(dev, "Failed to disable regulator: %d\n", ret);
return ret;
}
- da8xx_ohci->reg_enabled = 0;
}
return 0;
@@ -124,9 +114,6 @@ static int ohci_da8xx_get_power(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- if (da8xx_ohci->vbus_gpio)
- return gpiod_get_value_cansleep(da8xx_ohci->vbus_gpio);
-
if (da8xx_ohci->vbus_reg)
return regulator_is_enabled(da8xx_ohci->vbus_reg);
@@ -159,9 +146,6 @@ static int ohci_da8xx_has_set_power(struct usb_hcd *hcd)
{
struct da8xx_ohci_hcd *da8xx_ohci = to_da8xx_ohci(hcd);
- if (da8xx_ohci->vbus_gpio)
- return 1;
-
if (da8xx_ohci->vbus_reg)
return 1;
@@ -206,12 +190,18 @@ static int ohci_da8xx_regulator_event(struct notifier_block *nb,
return 0;
}
-static irqreturn_t ohci_da8xx_oc_handler(int irq, void *data)
+static irqreturn_t ohci_da8xx_oc_thread(int irq, void *data)
{
struct da8xx_ohci_hcd *da8xx_ohci = data;
+ struct device *dev = da8xx_ohci->hcd->self.controller;
+ int ret;
- if (gpiod_get_value(da8xx_ohci->oc_gpio))
- gpiod_set_value(da8xx_ohci->vbus_gpio, 0);
+ if (gpiod_get_value_cansleep(da8xx_ohci->oc_gpio) &&
+ da8xx_ohci->vbus_reg) {
+ ret = regulator_disable(da8xx_ohci->vbus_reg);
+ if (ret)
+ dev_err(dev, "Failed to disable regulator: %d\n", ret);
+ }
return IRQ_HANDLED;
}
@@ -424,11 +414,6 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
}
}
- da8xx_ohci->vbus_gpio = devm_gpiod_get_optional(dev, "vbus",
- GPIOD_OUT_HIGH);
- if (IS_ERR(da8xx_ohci->vbus_gpio))
- goto err;
-
da8xx_ohci->oc_gpio = devm_gpiod_get_optional(dev, "oc", GPIOD_IN);
if (IS_ERR(da8xx_ohci->oc_gpio))
goto err;
@@ -438,8 +423,9 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
if (oc_irq < 0)
goto err;
- error = devm_request_irq(dev, oc_irq, ohci_da8xx_oc_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ error = devm_request_threaded_irq(dev, oc_irq, NULL,
+ ohci_da8xx_oc_thread, IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"OHCI over-current indicator", da8xx_ohci);
if (error)
goto err;
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile
index c5126a4cd954..a67ddcbb4e24 100644
--- a/drivers/usb/storage/Makefile
+++ b/drivers/usb/storage/Makefile
@@ -6,7 +6,7 @@
# Rewritten to use lists instead of if-statements.
#
-ccflags-y := -Idrivers/scsi
+ccflags-y := -I $(srctree)/drivers/scsi
obj-$(CONFIG_USB_UAS) += uas.o
obj-$(CONFIG_USB_STORAGE) += usb-storage.o
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 9e529cc2b4ff..9f39f0c360e0 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -477,8 +477,12 @@ static int efifb_probe(struct platform_device *dev)
* If the UEFI memory map covers the efifb region, we may only
* remap it using the attributes the memory map prescribes.
*/
- mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
- mem_flags &= md.attribute;
+ md.attribute &= EFI_MEMORY_UC | EFI_MEMORY_WC |
+ EFI_MEMORY_WT | EFI_MEMORY_WB;
+ if (md.attribute) {
+ mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+ mem_flags &= md.attribute;
+ }
}
if (mem_flags & EFI_MEMORY_WC)
info->screen_base = ioremap_wc(efifb_fix.smem_start,
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index dd139cda936c..9067998759e3 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
+#include <linux/of.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/bitops.h>
@@ -176,6 +177,14 @@ static int __init ixp4xx_wdt_init(void)
{
int ret;
+ /*
+ * FIXME: we bail out on device tree boot but this really needs
+ * to be fixed in a nicer way: this registers the MDIO bus before
+ * even matching the driver infrastructure, we should only probe
+ * detected hardware.
+ */
+ if (of_have_populated_dt())
+ return -ENODEV;
if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
pr_err("Rev. A0 IXP42x CPU detected - watchdog disabled\n");
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 23f7f6ec7d1f..833b2d2c4318 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -697,7 +697,7 @@ static int xen_pcibk_xenbus_probe(struct xenbus_device *dev,
/* We need to force a call to our callback here in case
* xend already configured us!
*/
- xen_pcibk_be_watch(&pdev->be_watch, NULL, 0);
+ xen_pcibk_be_watch(&pdev->be_watch, NULL, NULL);
out:
return err;
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 0782ff3c2273..faf452d0edf0 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -465,7 +465,6 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
struct watch_adapter *watch;
char *path, *token;
int err, rc;
- LIST_HEAD(staging_q);
path = u->u.buffer + sizeof(u->u.msg);
token = memchr(path, 0, u->u.msg.len);
@@ -523,7 +522,6 @@ static ssize_t xenbus_file_write(struct file *filp,
uint32_t msg_type;
int rc = len;
int ret;
- LIST_HEAD(staging_q);
/*
* We're expecting usermode to be writing properly formed