summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Green <andy.green@linaro.org>2014-12-07 13:41:50 +0800
committerAndy Green <andy.green@linaro.org>2014-12-09 11:57:57 +0800
commitb907c6aff5bce63395b0bd7fc29a3326a6633077 (patch)
tree06e816324d86cbe8316f21abaad756ce36907e9a
parent95c98144e74d23b2ae6a74ac9b39348af44ff400 (diff)
Signed-off-by: Andy Green <andy.green@linaro.org>
-rw-r--r--arch/arm/Kconfig133
-rw-r--r--arch/arm/Kconfig.debug23
-rw-r--r--arch/arm/Makefile17
-rw-r--r--arch/arm/boot/.gitignore1
-rw-r--r--arch/arm/boot/Makefile14
-rw-r--r--arch/arm/boot/compressed/head.S4
-rw-r--r--arch/arm/boot/dts/Makefile14
-rw-r--r--arch/arm/boot/dts/msm-gdsc-8916.dtsi63
-rw-r--r--arch/arm/boot/dts/msm-gdsc.dtsi168
-rw-r--r--arch/arm/boot/dts/msm-pm8916-rpm-regulator.dtsi365
-rw-r--r--arch/arm/boot/dts/msm-pm8916.dtsi632
-rw-r--r--arch/arm/boot/dts/msm-rdbg.dtsi75
-rw-r--r--arch/arm/boot/dts/msm8916-bus.dtsi877
-rw-r--r--arch/arm/boot/dts/msm8916-ipcrouter.dtsi37
-rw-r--r--arch/arm/boot/dts/msm8916-memory.dtsi34
-rw-r--r--arch/arm/boot/dts/msm8916-pinctrl.dtsi1374
-rw-r--r--arch/arm/boot/dts/msm8916-pm.dtsi345
-rw-r--r--arch/arm/boot/dts/msm8916-qrd-skui.dts36
-rw-r--r--arch/arm/boot/dts/msm8916-qrd-skui.dtsi67
-rw-r--r--arch/arm/boot/dts/msm8916-qrd.dtsi137
-rw-r--r--arch/arm/boot/dts/msm8916-regulator.dtsi374
-rw-r--r--arch/arm/boot/dts/msm8916.dtsi809
-rw-r--r--arch/arm/boot/dts/skeleton.dtsi7
-rw-r--r--arch/arm/configs/msm8916-qrd_defconfig140
-rw-r--r--arch/arm/include/asm/cacheflush.h29
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/device.h1
-rw-r--r--arch/arm/include/asm/dma-mapping.h147
-rw-r--r--arch/arm/include/asm/domain.h7
-rw-r--r--arch/arm/include/asm/glue-cache.h2
-rw-r--r--arch/arm/include/asm/io.h25
-rw-r--r--arch/arm/include/asm/mach/arch.h1
-rw-r--r--arch/arm/include/asm/mach/flash.h9
-rw-r--r--arch/arm/include/asm/mach/map.h4
-rw-r--r--arch/arm/include/asm/memory.h6
-rw-r--r--arch/arm/include/asm/mmu_writeable.h31
-rw-r--r--arch/arm/include/asm/page.h5
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h2
-rw-r--r--arch/arm/include/asm/pgtable.h16
-rw-r--r--arch/arm/include/asm/processor.h3
-rw-r--r--arch/arm/include/asm/setup.h31
-rw-r--r--arch/arm/include/asm/smcmod.h165
-rw-r--r--arch/arm/include/asm/smp.h1
-rw-r--r--arch/arm/include/asm/spinlock.h93
-rw-r--r--arch/arm/include/asm/system_misc.h1
-rw-r--r--arch/arm/include/asm/tlbflush.h4
-rw-r--r--arch/arm/include/uapi/asm/posix_types.h3
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/devtree.c28
-rw-r--r--arch/arm/kernel/entry-armv.S116
-rw-r--r--arch/arm/kernel/fiq.c7
-rw-r--r--arch/arm/kernel/head.S11
-rw-r--r--arch/arm/kernel/hw_breakpoint.c22
-rw-r--r--arch/arm/kernel/patch.c8
-rw-r--r--arch/arm/kernel/setup.c45
-rw-r--r--arch/arm/kernel/smp.c1
-rw-r--r--arch/arm/kernel/vmlinux.lds.S20
-rw-r--r--arch/arm/mach-qcom/Kconfig34
-rw-r--r--arch/arm/mach-qcom/Makefile2
-rw-r--r--arch/arm/mach-qcom/board.c73
-rw-r--r--arch/arm/mach-qcom/headsmp.S48
-rw-r--r--arch/arm/mach-qcom/hotplug.c160
-rw-r--r--arch/arm/mach-qcom/platsmp.c427
-rw-r--r--arch/arm/mach-qcom/platsmp.h26
-rw-r--r--arch/arm/mach-qcom/scm-boot.c55
-rw-r--r--arch/arm/mach-qcom/scm.c81
-rw-r--r--arch/arm/mach-qcom/scm.h28
-rw-r--r--arch/arm/mm/Kconfig15
-rw-r--r--arch/arm/mm/alignment.c35
-rw-r--r--arch/arm/mm/cache-v7.S1
-rw-r--r--arch/arm/mm/init.c3
-rw-r--r--arch/arm/tools/mach-types37
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile7
-rw-r--r--drivers/base/power/opp.c90
-rw-r--r--drivers/char/Kconfig55
-rw-r--r--drivers/char/Makefile4
-rw-r--r--drivers/char/mem.c17
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/msm_smd_pkt.c1555
-rw-r--r--drivers/clk/Kconfig2
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/clk.c69
-rw-r--r--drivers/clk/clkdev.c9
-rw-r--r--drivers/clk/qcom/Makefile9
-rw-r--r--drivers/clk/qcom/clock-alpha-pll.c322
-rw-r--r--drivers/clk/qcom/clock-dummy.c100
-rw-r--r--drivers/clk/qcom/clock-gcc-8916.c2979
-rw-r--r--drivers/clk/qcom/clock-generic.c793
-rw-r--r--drivers/clk/qcom/clock-krait.c523
-rw-r--r--drivers/clk/qcom/clock-local2.c1505
-rw-r--r--drivers/clk/qcom/clock-pll.c629
-rw-r--r--drivers/clk/qcom/clock-rpm-8916.c231
-rw-r--r--drivers/clk/qcom/clock-rpm.c318
-rw-r--r--drivers/clk/qcom/clock-voter.c175
-rw-r--r--drivers/clk/qcom/clock.c906
-rw-r--r--drivers/clk/qcom/clock.h51
-rw-r--r--drivers/clk/qcom/gdsc.c419
-rw-r--r--drivers/clocksource/qcom-timer.c1126
-rw-r--r--drivers/cpufreq/Kconfig.arm10
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/qcom-cpufreq.c627
-rw-r--r--drivers/cpuidle/Makefile3
-rw-r--r--drivers/cpuidle/lpm_levels.c1210
-rw-r--r--drivers/devfreq/Kconfig27
-rw-r--r--drivers/devfreq/Makefile3
-rw-r--r--drivers/devfreq/devfreq.c61
-rw-r--r--drivers/devfreq/governor.h1
-rw-r--r--drivers/devfreq/governor_cpubw_hwmon.c470
-rw-r--r--drivers/devfreq/governor_msm_adreno_tz.c358
-rw-r--r--drivers/devfreq/governor_msm_cpufreq.c85
-rw-r--r--drivers/devfreq/governor_performance.c24
-rw-r--r--drivers/devfreq/governor_powersave.c5
-rw-r--r--drivers/devfreq/governor_simpleondemand.c28
-rw-r--r--drivers/devfreq/governor_userspace.c3
-rw-r--r--drivers/hwspinlock/Kconfig11
-rw-r--r--drivers/hwspinlock/Makefile1
-rw-r--r--drivers/hwspinlock/msm_remote_spinlock.c484
-rw-r--r--drivers/irqchip/Kconfig19
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-msm.c49
-rw-r--r--drivers/irqchip/msm_show_resume_irq.c22
-rw-r--r--drivers/misc/Kconfig92
-rw-r--r--drivers/misc/Makefile9
-rw-r--r--drivers/misc/qfp_fuse.c498
-rw-r--r--drivers/misc/qpnp-misc.c189
-rw-r--r--drivers/misc/smsc_hub.c851
-rw-r--r--drivers/misc/uid_stat.c152
-rw-r--r--drivers/of/Kconfig6
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/pinctrl/Kconfig22
-rw-r--r--drivers/pinctrl/Makefile9
-rw-r--r--drivers/pinctrl/core.c10
-rw-r--r--drivers/pinctrl/core.h2
-rw-r--r--drivers/pinctrl/devicetree.c9
-rw-r--r--drivers/pinctrl/pinconf-generic.c3
-rw-r--r--drivers/pinctrl/pinctrl-msm-tlmm-v3.c892
-rw-r--r--drivers/pinctrl/pinctrl-msm-tlmm-v4.c982
-rw-r--r--drivers/pinctrl/pinctrl-msm.c863
-rw-r--r--drivers/pinctrl/pinctrl-msm.h183
-rw-r--r--drivers/power/Kconfig157
-rw-r--r--drivers/power/Makefile14
-rw-r--r--drivers/power/power_supply_sysfs.c24
-rw-r--r--drivers/power/qcom/Kconfig50
-rw-r--r--drivers/power/qcom/Makefile4
-rw-r--r--drivers/power/qcom/idle.h23
-rw-r--r--drivers/power/qcom/msm-pm.c922
-rw-r--r--drivers/power/qcom/no-pm.c26
-rw-r--r--drivers/power/qcom/pm-boot.c77
-rw-r--r--drivers/power/qcom/pm-boot.h19
-rw-r--r--drivers/power/qcom/pm-data.c437
-rw-r--r--drivers/power/qcom/pm-stats.c672
-rw-r--r--drivers/power/reset/Kconfig15
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/msm-poweroff.c302
-rw-r--r--drivers/regulator/Kconfig89
-rw-r--r--drivers/regulator/Makefile9
-rw-r--r--drivers/regulator/of_regulator.c73
-rw-r--r--drivers/regulator/qpnp-regulator.c2054
-rw-r--r--drivers/regulator/rpm-smd-regulator.c1722
-rw-r--r--drivers/regulator/spm-regulator.c641
-rw-r--r--drivers/regulator/stub-regulator.c304
-rw-r--r--drivers/soc/Kconfig2
-rw-r--r--drivers/soc/qcom/Kconfig407
-rw-r--r--drivers/soc/qcom/Makefile53
-rw-r--r--drivers/soc/qcom/common_log.c83
-rw-r--r--drivers/soc/qcom/cpu_ops.c266
-rw-r--r--drivers/soc/qcom/event_timer.c322
-rw-r--r--drivers/soc/qcom/idle-v7.S64
-rw-r--r--drivers/soc/qcom/idle-v8.S59
-rw-r--r--drivers/soc/qcom/ipc_router_hsic_xprt.c720
-rw-r--r--drivers/soc/qcom/ipc_router_smd_xprt.c923
-rw-r--r--drivers/soc/qcom/mct.c926
-rw-r--r--drivers/soc/qcom/memory_dump.c95
-rw-r--r--drivers/soc/qcom/memory_dump_v2.c173
-rw-r--r--drivers/soc/qcom/msm_rq_stats.c390
-rw-r--r--drivers/soc/qcom/nohlt.c49
-rw-r--r--drivers/soc/qcom/rpm-smd-debug.c141
-rw-r--r--drivers/soc/qcom/rpm-smd.c1433
-rw-r--r--drivers/soc/qcom/rpm_log.c527
-rw-r--r--drivers/soc/qcom/rpm_log.h35
-rw-r--r--drivers/soc/qcom/rpm_master_stat.c436
-rw-r--r--drivers/soc/qcom/rpm_rbcpr_stats_v2.c420
-rw-r--r--drivers/soc/qcom/rpm_stats.c405
-rw-r--r--drivers/soc/qcom/rpm_stats.h41
-rw-r--r--drivers/soc/qcom/smd.c3335
-rw-r--r--drivers/soc/qcom/smd_debug.c404
-rw-r--r--drivers/soc/qcom/smd_init_dt.c346
-rw-r--r--drivers/soc/qcom/smd_private.c333
-rw-r--r--drivers/soc/qcom/smd_private.h247
-rw-r--r--drivers/soc/qcom/smem.c1493
-rw-r--r--drivers/soc/qcom/smem_debug.c139
-rw-r--r--drivers/soc/qcom/smem_log.c1043
-rw-r--r--drivers/soc/qcom/smem_private.h104
-rw-r--r--drivers/soc/qcom/smp2p.c1934
-rw-r--r--drivers/soc/qcom/smp2p_loopback.c449
-rw-r--r--drivers/soc/qcom/smp2p_private.h252
-rw-r--r--drivers/soc/qcom/smp2p_private_api.h79
-rw-r--r--drivers/soc/qcom/smp2p_spinlock_test.c499
-rw-r--r--drivers/soc/qcom/smp2p_test.c1248
-rw-r--r--drivers/soc/qcom/smp2p_test_common.h213
-rw-r--r--drivers/soc/qcom/smsm_debug.c330
-rw-r--r--drivers/soc/qcom/socinfo.c1355
-rw-r--r--drivers/soc/qcom/spm-v2.c541
-rw-r--r--drivers/soc/qcom/spm_devices.c562
-rw-r--r--drivers/soc/qcom/spm_driver.h54
-rw-r--r--drivers/soc/qcom/subsys-pil-tz.c964
-rw-r--r--drivers/soc/qcom/subsystem_notif.c222
-rw-r--r--drivers/soc/qcom/subsystem_restart.c1491
-rw-r--r--drivers/soc/qcom/sysmon.c376
-rw-r--r--drivers/spmi/Makefile2
-rw-r--r--drivers/spmi/spmi-resources.c151
-rw-r--r--drivers/spmi/spmi.c2
-rw-r--r--drivers/tty/Kconfig19
-rw-r--r--drivers/tty/Makefile3
-rw-r--r--drivers/tty/serial/msm_serial.c11
-rw-r--r--include/asm-generic/dma-contiguous.h16
-rw-r--r--include/asm-generic/gpio.h2
-rw-r--r--include/asm-generic/percpu-defs.h0
-rw-r--r--include/asm-generic/vmlinux.lds.h17
-rw-r--r--include/dt-bindings/clock/msm-clocks-8916.h222
-rw-r--r--include/dt-bindings/msm/msm-bus-ids.h628
-rw-r--r--include/linux/clk-provider.h2
-rw-r--r--include/linux/clk.h2
-rw-r--r--include/linux/clk/msm-clk-provider.h242
-rw-r--r--include/linux/clk/msm-clk.h48
-rw-r--r--include/linux/clk/msm-clock-generic.h266
-rw-r--r--include/linux/clkdev.h1
-rw-r--r--include/linux/cpu.h7
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/devfreq.h32
-rw-r--r--include/linux/dma-attrs.h2
-rw-r--r--include/linux/esoc_client.h52
-rw-r--r--include/linux/gfp.h15
-rw-r--r--include/linux/hardirq.h68
-rw-r--r--include/linux/highmem.h22
-rw-r--r--include/linux/iopoll.h114
-rw-r--r--include/linux/ipc_logging.h265
-rw-r--r--include/linux/ipc_router.h298
-rw-r--r--include/linux/irq.h5
-rw-r--r--include/linux/irqchip/arm-gic.h10
-rw-r--r--include/linux/irqchip/msm-gpio-irq.h38
-rw-r--r--include/linux/irqchip/msm-mpm-irq.h167
-rw-r--r--include/linux/irqchip/qpnp-int.h131
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/kernel_stat.h11
-rw-r--r--include/linux/keyreset.h28
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/mod_devicetable.h12
-rw-r--r--include/linux/msm-bus-board.h196
-rw-r--r--include/linux/msm-bus.h139
-rw-r--r--include/linux/msm_audio_ion.h68
-rw-r--r--include/linux/msm_kgsl.h113
-rw-r--r--include/linux/msm_mdp.h83
-rw-r--r--include/linux/msm_remote_spinlock.h74
-rw-r--r--include/linux/msm_rtb.h83
-rw-r--r--include/linux/msm_smd_pkt.h23
-rw-r--r--include/linux/percpu-defs.h13
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/pm_opp.h12
-rw-r--r--include/linux/power_supply.h91
-rw-r--r--include/linux/qcomwlan_secif.h41
-rw-r--r--include/linux/qfp_fuse.h41
-rw-r--r--include/linux/qpnp-misc.h38
-rw-r--r--include/linux/qpnp-revid.h139
-rw-r--r--include/linux/qpnp/clkdiv.h35
-rw-r--r--include/linux/qpnp/pin.h190
-rw-r--r--include/linux/qpnp/power-on.h73
-rw-r--r--include/linux/qpnp/pwm.h184
-rw-r--r--include/linux/reboot.h1
-rw-r--r--include/linux/regulator/consumer.h10
-rw-r--r--include/linux/regulator/cpr-regulator.h69
-rw-r--r--include/linux/regulator/krait-regulator.h39
-rw-r--r--include/linux/regulator/machine.h10
-rw-r--r--include/linux/regulator/qpnp-regulator.h197
-rw-r--r--include/linux/regulator/rpm-smd-regulator.h108
-rw-r--r--include/linux/regulator/spm-regulator.h25
-rw-r--r--include/linux/regulator/stub-regulator.h54
-rw-r--r--include/linux/remote_spinlock.h98
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/sched_clock.h5
-rw-r--r--include/linux/smp.h21
-rw-r--r--include/linux/spmi.h74
-rw-r--r--include/linux/topology.h109
-rw-r--r--include/soc/qcom/avs.h38
-rw-r--r--include/soc/qcom/boot_stats.h17
-rw-r--r--include/soc/qcom/clock-alpha-pll.h59
-rw-r--r--include/soc/qcom/clock-krait.h108
-rw-r--r--include/soc/qcom/clock-local2.h239
-rw-r--r--include/soc/qcom/clock-pll.h178
-rw-r--r--include/soc/qcom/clock-rpm.h151
-rw-r--r--include/soc/qcom/clock-voter.h51
-rw-r--r--include/soc/qcom/cpufreq.h39
-rw-r--r--include/soc/qcom/event_timer.h77
-rw-r--r--include/soc/qcom/jtag.h54
-rw-r--r--include/soc/qcom/pm.h155
-rw-r--r--include/soc/qcom/ramdump.h55
-rw-r--r--include/soc/qcom/restart.h24
-rw-r--r--include/soc/qcom/rpm-notifier.h63
-rw-r--r--include/soc/qcom/rpm-smd.h268
-rw-r--r--include/soc/qcom/scm-boot.h65
-rw-r--r--include/soc/qcom/scm.h124
-rw-r--r--include/soc/qcom/smd.h401
-rw-r--r--include/soc/qcom/smem.h241
-rw-r--r--include/soc/qcom/smem_log.h72
-rw-r--r--include/soc/qcom/smsm.h147
-rw-r--r--include/soc/qcom/socinfo.h604
-rw-r--r--include/soc/qcom/spm.h185
-rw-r--r--include/soc/qcom/subsystem_notif.h87
-rw-r--r--include/soc/qcom/subsystem_restart.h143
-rw-r--r--include/soc/qcom/sysmon.h60
-rw-r--r--init/Kconfig6
-rw-r--r--init/main.c12
-rw-r--r--kernel/cpu.c22
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/irq/manage.c45
-rw-r--r--scripts/Makefile.lib6
318 files changed, 69797 insertions, 796 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 89c4b5ccc68d..f5837ab2ed7c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -217,6 +217,9 @@ config NEED_RET_TO_USER
config ARCH_MTD_XIP
bool
+config ARCH_WANT_KMAP_ATOMIC_FLUSH
+ bool
+
config VECTORS_BASE
hex
default 0xffff0000 if MMU || CPU_HIGH_VECTOR
@@ -283,6 +286,24 @@ config GENERIC_BUG
def_bool y
depends on BUG
+config ARM_USER_ACCESSIBLE_TIMER_BASE
+ hex "Base address of user-accessible timer counter page"
+ default 0xfffef000
+ depends on ARM_USE_USER_ACCESSIBLE_TIMERS
+ help
+ Specify the base user-space virtual address where the user-accessible
+ timer counter page should be mapped by the kernel. User-space apps
+ will read directly from the page at this address.
+
+config ARCH_RANDOM
+ bool "SOC specific random number generation"
+ help
+ Allow the kernel to use an architecture specific implementation for
+ random number generation
+
+ If unsure, say N
+
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -313,7 +334,7 @@ config ARCH_MULTIPLATFORM
select ARM_PATCH_PHYS_VIRT
select AUTO_ZRELADDR
select CLKSRC_OF
- select COMMON_CLK
+# select COMMON_CLK
select GENERIC_CLOCKEVENTS
select MIGHT_HAVE_PCI
select MULTI_IRQ_HANDLER
@@ -639,9 +660,22 @@ config ARCH_PXA
config ARCH_MSM
bool "Qualcomm MSM (non-multiplatform)"
+ select ARCH_HAS_CPUFREQ
select ARCH_REQUIRE_GPIOLIB
select COMMON_CLK
select GENERIC_CLOCKEVENTS
+ select GENERIC_GPIO
+ select GENERIC_TIME
+ select GENERIC_ALLOCATOR
+ select HAVE_CLK
+ select HAVE_CLK_PREPARE
+ select NEED_MACH_MEMORY_H
+ select NEED_MACH_IO_H
+ select NEED_MACH_GPIO_H
+ select SOC_BUS
+ select MULTI_IRQ_HANDLER
+ select SPARSE_IRQ
+ select USE_OF
help
Support for Qualcomm MSM/QSD based systems. This runs on the
apps processor of the MSM/QSD and depends on a shared memory
@@ -1009,6 +1043,15 @@ source "arch/arm/firmware/Kconfig"
source arch/arm/mm/Kconfig
+config RESERVE_FIRST_PAGE
+ bool
+ default n
+ help
+ Reserve the first page at PHYS_OFFSET. The first
+ physical page is used by many platforms for warm
+ boot operations. Reserve this page so that it is
+ not allocated by the kernel.
+
config IWMMXT
bool "Enable iWMMXt support"
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B
@@ -1253,6 +1296,12 @@ config ARM_ERRATA_773022
loop buffer may deliver incorrect instructions. This
workaround disables the loop buffer to avoid the erratum.
+config PERFMAP
+ tristate "Perfmap support (EXPERIMENTAL)"
+ depends on ARCH_MSM_KRAIT
+ help
+ Perfmap: memory mapped interface for performance monitors.
+
endmenu
source "arch/arm/common/Kconfig"
@@ -1707,6 +1756,42 @@ config ARCH_WANT_GENERAL_HUGETLB
source "mm/Kconfig"
+config ARCH_MEMORY_PROBE
+ def_bool n
+
+config ARCH_MEMORY_REMOVE
+ def_bool n
+
+config ENABLE_DMM
+ def_bool n
+
+choice
+ prompt "Virtual Memory Reclaim"
+ default NO_VM_RECLAIM
+ help
+ Select the method of reclaiming virtual memory
+
+config ENABLE_VMALLOC_SAVING
+ bool "Reclaim memory for each subsystem"
+ help
+ Enable this config to reclaim the virtual space belonging
+ to any subsystem which is expected to have a lifetime of
+ the entire system. This feature allows lowmem to be non-
+ contiguous.
+
+config NO_VM_RECLAIM
+ bool "Do not reclaim memory"
+ help
+ Do not reclaim any memory. This might result in less lowmem
+ and wasting virtual memory space which could otherwise be
+ reclaimed by using any of the other two config options.
+
+endchoice
+
+config HOLES_IN_ZONE
+ def_bool n
+ depends on SPARSEMEM
+
config FORCE_MAX_ZONEORDER
int "Maximum zone order" if ARCH_SHMOBILE_LEGACY
range 11 64 if ARCH_SHMOBILE_LEGACY
@@ -1790,6 +1875,26 @@ config XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
+config CP_ACCESS
+ tristate "CP register access tool"
+ default m
+ help
+ Provide support for Coprocessor register access using /sys
+ interface. Read and write to CP registers from userspace
+ through sysfs interface. A sys file (cp_rw) will be created under
+ /sys/devices/cpaccess/cpaccess0.
+
+ If unsure, say N.
+
+config ARM_FLUSH_CONSOLE_ON_RESTART
+ bool "Force flush the console on restart"
+ help
+ If the console is locked while the system is rebooted, the messages
+ in the temporary logbuffer would not have propogated to all the
+ console drivers. This option forces the console lock to be
+ released if it failed to be acquired, which will cause all the
+ pending messages to be flushed.
+
endmenu
menu "Boot options"
@@ -1820,6 +1925,21 @@ config DEPRECATED_PARAM_STRUCT
This was deprecated in 2001 and announced to live on for 5 years.
Some old boot loaders still use this way.
+config BUILD_ARM_APPENDED_DTB_IMAGE
+ bool "Build a concatenated zImage/dtb by default"
+ depends on OF
+ help
+ Enabling this option will cause a concatenated zImage and list of
+ DTBs to be built by default (instead of a standalone zImage.)
+ The image will built in arch/arm/boot/zImage-dtb
+
+config BUILD_ARM_APPENDED_DTB_IMAGE_NAMES
+ string "Default dtb names"
+ depends on BUILD_ARM_APPENDED_DTB_IMAGE
+ help
+ Space separated list of names of dtbs to append when
+ building a concatenated zImage-dtb.
+
# Compressed boot loader in ROM. Yes, we really want to ask about
# TEXT and BSS so we preserve their values in the config files.
config ZBOOT_ROM_TEXT
@@ -2043,6 +2163,17 @@ config AUTO_ZRELADDR
0xf8000000. This assumes the zImage being placed in the first 128MB
from start of memory.
+config ARM_DECOMPRESSOR_LIMIT
+ hex "Limit the decompressor memory area"
+ default 0x10000000
+ help
+ Allows overriding of the memory size that decompressor maps with
+ read, write and execute permissions to avoid speculative prefetch.
+
+ By default ARM_DECOMPRESSOR_LIMIT maps first 1GB of memory
+ with read, write and execute permissions and reset of the memory
+ as strongly ordered.
+
endmenu
menu "CPU Power Management"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index d8f6a2ec3d4e..797730343ae6 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -75,6 +75,29 @@ config DEBUG_USER
8 - SIGSEGV faults
16 - SIGBUS faults
+config FORCE_PAGES
+ bool "Force lowmem to be mapped with 4K pages"
+ help
+ There are some advanced debug features that can only be done when
+ memory is mapped with pages instead of sections. Enable this option
+ to always map lowmem pages with pages. This may have a performance
+ cost due to increased TLB pressure.
+
+ If unsure say N.
+
+config FREE_PAGES_RDONLY
+ bool "Set pages as read only while on the buddy list"
+ select FORCE_PAGES
+ select PAGE_POISONING
+ help
+ Pages are always mapped in the kernel. This means that anyone
+ can write to the page if they have the address. Enable this option
+ to mark pages as read only to trigger a fault if any code attempts
+ to write to a page on the buddy list. This may have a performance
+ impact.
+
+ If unsure, say N.
+
# These options are only for real kernel hackers who want to get their hands dirty.
config DEBUG_LL
bool "Kernel low-level debugging functions (read help!)"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 034a94904d69..4d21d84ba867 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -70,6 +70,18 @@ arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3
# Evaluate arch cc-option calls now
arch-y := $(arch-y)
+# Since 'cortex-a15' is a superset of the 'armv7-a' arch spec, we need to
+# explicitly redefine the arch options to not include '-march=armv7-a' when
+# generating code for Krait, which is compatible with the instruction set of the
+# Cortex-A15, because GCC will warn us about ambiguous ISA restrictions caused
+# by seemingly conflicting -march and -mcpu options.
+# If $(CC) does not support the -mcpu=cortex-a15 option, fall back on passing
+# -march=armv7-a to specify the ISA restriction, though this is suboptimal. To
+# keep things simpler, we don't bother with a fallback option if the compiler
+# doesn't even support -march=armv7-a, since in that situation we would have
+# bigger problems.
+arch-$(CONFIG_ARCH_MSM_KRAIT) :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-mcpu=cortex-a15,-march=armv7-a)
+
# This selects how we optimise for the processor.
tune-$(CONFIG_CPU_ARM7TDMI) =-mtune=arm7tdmi
tune-$(CONFIG_CPU_ARM720T) =-mtune=arm7tdmi
@@ -278,6 +290,8 @@ libs-y := arch/arm/lib/ $(libs-y)
# Default target when executing plain make
ifeq ($(CONFIG_XIP_KERNEL),y)
KBUILD_IMAGE := xipImage
+else ifeq ($(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE),y)
+KBUILD_IMAGE := zImage-dtb
else
KBUILD_IMAGE := zImage
endif
@@ -315,6 +329,9 @@ PHONY += dtbs dtbs_install
dtbs dtbs_install: prepare scripts
$(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $@
+zImage-dtb: vmlinux scripts dtbs
+ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore
index 3c79f85975aa..ad7a0253ea96 100644
--- a/arch/arm/boot/.gitignore
+++ b/arch/arm/boot/.gitignore
@@ -4,3 +4,4 @@ xipImage
bootpImage
uImage
*.dtb
+zImage-dtb \ No newline at end of file
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index ec2f8065f955..13ccec54b094 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -27,6 +27,14 @@ export ZRELADDR INITRD_PHYS PARAMS_PHYS
targets := Image zImage xipImage bootpImage uImage
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
+
ifeq ($(CONFIG_XIP_KERNEL),y)
$(obj)/xipImage: vmlinux FORCE
@@ -55,6 +63,12 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
@$(kecho) ' Kernel: $@ is ready'
+$(obj)/zImage-dtb: $(obj)/zImage $(DTB_OBJS) FORCE
+ @echo 1
+ $(call if_changed,cat)
+ @echo 2
+ @echo ' Kernel: $@ is ready'
+
endif
ifneq ($(LOADADDR),)
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 68be9017593d..e9e0284fc8f2 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -639,7 +639,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
mov r0, r3
mov r9, r0, lsr #18
mov r9, r9, lsl #18 @ start of RAM
- add r10, r9, #0x10000000 @ a reasonable RAM size
+ add r10, r9, #CONFIG_ARM_DECOMPRESSOR_LIMIT
mov r1, #0x12 @ XN|U + section mapping
orr r1, r1, #3 << 10 @ AP=11
add r2, r3, #16384
@@ -730,6 +730,8 @@ __armv7_mmu_cache_on:
bic r6, r6, #1 << 31 @ 32-bit translation system
bic r6, r6, #3 << 0 @ use only ttbr0
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
+ mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
+ mcr p15, 0, r0, c7, c5, 4 @ ISB
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
#endif
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 38c89cafa1ab..c0ae2c8f1ab5 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -519,11 +519,23 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt6589-aquaris5.dtb
targets += dtbs dtbs_install
targets += $(dtb-y)
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+
+targets += dtbs
+targets += $(DTB_LIST)
+
+#used for cleaning - not building
+subdir- := qcom
endif
# *.dtb used to be generated in the directory above. Clean out the
# old build results so people don't accidentally use them.
-dtbs: $(addprefix $(obj)/, $(dtb-y))
+dtbs: $(addprefix $(obj)/, $(DTB_LIST))
$(Q)rm -f $(obj)/../*.dtb
clean-files := *.dtb
diff --git a/arch/arm/boot/dts/msm-gdsc-8916.dtsi b/arch/arm/boot/dts/msm-gdsc-8916.dtsi
new file mode 100644
index 000000000000..891e59867d8f
--- /dev/null
+++ b/arch/arm/boot/dts/msm-gdsc-8916.dtsi
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ gdsc_venus: qcom,gdsc@184c018 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_venus";
+ reg = <0x184c018 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_mdss: qcom,gdsc@184d078 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_mdss";
+ reg = <0x184d078 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_jpeg: qcom,gdsc@185701c {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_jpeg";
+ reg = <0x185701c 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_vfe: qcom,gdsc@1858034 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_vfe";
+ reg = <0x1858034 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_oxili_gx: qcom,gdsc@185901c {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_oxili_gx";
+ reg = <0x185901c 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_venus_core0: qcom,gdsc@184c028 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_venus_core0";
+ reg = <0x184c028 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_venus_core1: qcom,gdsc@184c030 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_venus_core1";
+ reg = <0x184c030 0x4>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm/boot/dts/msm-gdsc.dtsi b/arch/arm/boot/dts/msm-gdsc.dtsi
new file mode 100644
index 000000000000..9a1f32eb0c7a
--- /dev/null
+++ b/arch/arm/boot/dts/msm-gdsc.dtsi
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ gdsc_venus: qcom,gdsc@fd8c1024 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_venus";
+ reg = <0xfd8c1024 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_venus_core0: qcom,gdsc@fd8c1040 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_venus_core0";
+ reg = <0xfd8c1040 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_venus_core1: qcom,gdsc@fd8c1044 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_venus_core1";
+ reg = <0xfd8c1044 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_venus_core2: qcom,gdsc@fd8c1050 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_venus_core2";
+ reg = <0xfd8c1050 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_vpu: qcom,gdsc@fd8c1404 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_vpu";
+ reg = <0xfd8c1404 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_camss_top: qcom,gdsc@fd8c34a0 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_camss_top";
+ reg = <0xfd8c34a0 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_mdss: qcom,gdsc@fd8c2304 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_mdss";
+ reg = <0xfd8c2304 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_jpeg: qcom,gdsc@fd8c35a4 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_jpeg";
+ reg = <0xfd8c35a4 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_vfe: qcom,gdsc@fd8c36a4 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_vfe";
+ reg = <0xfd8c36a4 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_cpp: qcom,gdsc@fd8c36d4 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_cpp";
+ reg = <0xfd8c36d4 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_oxili_gx: qcom,gdsc@fd8c4024 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_oxili_gx";
+ reg = <0xfd8c4024 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_oxili_cx: qcom,gdsc@fd8c4034 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_oxili_cx";
+ reg = <0xfd8c4034 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_usb_hsic: qcom,gdsc@fc400404 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_usb_hsic";
+ reg = <0xfc400404 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_pcie: qcom,gdsc@0xfc401e18 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_pcie";
+ reg = <0xfc401e18 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_pcie_0: qcom,gdsc@fc401ac4 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_pcie_0";
+ reg = <0xfc401ac4 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_pcie_1: qcom,gdsc@fc401b44 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_pcie_1";
+ reg = <0xfc401b44 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_usb30: qcom,gdsc@fc401e84 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_usb30";
+ reg = <0xfc401e84 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_usb30_sec: qcom,gdsc@fc401ec0 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_usb30_sec";
+ reg = <0xfc401ec0 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_vcap: qcom,gdsc@fd8c1804 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_vcap";
+ reg = <0xfd8c1804 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_bcss: qcom,gdsc@fc744128 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_bcss";
+ reg = <0xfc744128 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_ufs: qcom,gdsc@fc401d44 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_ufs";
+ reg = <0xfc401d44 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_fd: qcom,gdsc@fd8c3b64 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_fd";
+ reg = <0xfd8c3b64 0x4>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm/boot/dts/msm-pm8916-rpm-regulator.dtsi b/arch/arm/boot/dts/msm-pm8916-rpm-regulator.dtsi
new file mode 100644
index 000000000000..d5b7ea55c72b
--- /dev/null
+++ b/arch/arm/boot/dts/msm-pm8916-rpm-regulator.dtsi
@@ -0,0 +1,365 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&rpm_bus {
+ rpm-regulator-smpa1 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "smpa";
+ qcom,resource-id = <1>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ status = "disabled";
+
+ regulator-s1 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_s1";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-smpa2 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "smpa";
+ qcom,resource-id = <2>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ status = "disabled";
+
+ regulator-s2 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_s2";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-smpa3 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "smpa";
+ qcom,resource-id = <3>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ status = "disabled";
+
+ regulator-s3 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_s3";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-smpa4 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "smpa";
+ qcom,resource-id = <4>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ status = "disabled";
+
+ regulator-s4 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_s4";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa1 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <1>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l1 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l1";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa2 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <2>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l2 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l2";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa3 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <3>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l3 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l3";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa4 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <4>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l4 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l4";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa5 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <5>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l5 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l5";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa6 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <6>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l6 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l6";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa7 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <7>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l7 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l7";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa8 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <8>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l8 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l8";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa9 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <9>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l9 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l9";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa10 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <10>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l10 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l10";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa11 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <11>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l11 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l11";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa12 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <12>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l12 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l12";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa13 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <13>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <5000>;
+ status = "disabled";
+
+ regulator-l13 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l13";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa14 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <14>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <5000>;
+ status = "disabled";
+
+ regulator-l14 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l14";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa15 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <15>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <5000>;
+ status = "disabled";
+
+ regulator-l15 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l15";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa16 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <16>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <5000>;
+ status = "disabled";
+
+ regulator-l16 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l16";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa17 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <17>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l17 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l17";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+
+ rpm-regulator-ldoa18 {
+ compatible = "qcom,rpm-smd-regulator-resource";
+ qcom,resource-name = "ldoa";
+ qcom,resource-id = <18>;
+ qcom,regulator-type = <0>;
+ qcom,hpm-min-load = <10000>;
+ status = "disabled";
+
+ regulator-l18 {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l18";
+ qcom,set = <3>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/msm-pm8916.dtsi b/arch/arm/boot/dts/msm-pm8916.dtsi
new file mode 100644
index 000000000000..1a738de55bb4
--- /dev/null
+++ b/arch/arm/boot/dts/msm-pm8916.dtsi
@@ -0,0 +1,632 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&spmi_bus {
+
+ qcom,pm8916@0 {
+ spmi-slave-container;
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pm8916_revid: qcom,revid@100 {
+ compatible = "qcom,qpnp-revid";
+ reg = <0x100 0x100>;
+ };
+
+ pm8916_pon: qcom,power-on@800 {
+ compatible = "qcom,qpnp-power-on";
+ reg = <0x800 0x100>;
+ interrupts = <0x0 0x8 0x0>,
+ <0x0 0x8 0x1>;
+ interrupt-names = "kpdpwr", "resin";
+ qcom,pon-dbc-delay = <15625>;
+ qcom,system-reset;
+
+ qcom,pon_1 {
+ qcom,pon-type = <0>;
+ qcom,support-reset = <1>;
+ qcom,pull-up = <1>;
+ qcom,s1-timer = <10256>;
+ qcom,s2-timer = <2000>;
+ qcom,s2-type = <1>;
+ linux,code = <116>;
+ };
+
+ qcom,pon_2 {
+ qcom,pon-type = <1>;
+ qcom,pull-up = <1>;
+ linux,code = <114>;
+ };
+ };
+
+ pm8916_mpps: mpps {
+ compatible = "qcom,qpnp-pin";
+ spmi-dev-container;
+ gpio-controller;
+ #gpio-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ label = "pm8916-mpp";
+
+ mpp@a000 {
+ reg = <0xa000 0x100>;
+ qcom,pin-num = <1>;
+ };
+
+ mpp@a100 {
+ reg = <0xa100 0x100>;
+ qcom,pin-num = <2>;
+ };
+
+ mpp@a200 {
+ reg = <0xa200 0x100>;
+ qcom,pin-num = <3>;
+ };
+
+ mpp@a300 {
+ reg = <0xa300 0x100>;
+ qcom,pin-num = <4>;
+ };
+ };
+
+ pm8916_gpios: gpios {
+ compatible = "qcom,qpnp-pin";
+ spmi-dev-container;
+ gpio-controller;
+ #gpio-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ label = "pm8916-gpio";
+
+ gpio@c000 {
+ reg = <0xc000 0x100>;
+ qcom,pin-num = <1>;
+ };
+
+ gpio@c100 {
+ reg = <0xc100 0x100>;
+ qcom,pin-num = <2>;
+ };
+
+ gpio@c200 {
+ reg = <0xc200 0x100>;
+ qcom,pin-num = <3>;
+ };
+
+ gpio@c300 {
+ reg = <0xc300 0x100>;
+ qcom,pin-num = <4>;
+ };
+ };
+
+ pm8916_rtc: qcom,pm8916_rtc {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-rtc";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ qcom,qpnp-rtc-write = <0>;
+ qcom,qpnp-rtc-alarm-pwrup = <0>;
+
+ qcom,pm8916_rtc_rw@6000 {
+ reg = <0x6000 0x100>;
+ };
+ qcom,pm8916_rtc_alarm@6100 {
+ reg = <0x6100 0x100>;
+ interrupts = <0x0 0x61 0x1>;
+ };
+ };
+
+ pm8916_vadc: vadc@3100 {
+ compatible = "qcom,qpnp-vadc";
+ reg = <0x3100 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0x0 0x31 0x0>;
+ interrupt-names = "eoc-int-en-set";
+ qcom,adc-bit-resolution = <15>;
+ qcom,adc-vdd-reference = <1800>;
+ qcom,vadc-poll-eoc;
+ qcom,pmic-revid = <&pm8916_revid>;
+
+ chan@8 {
+ label = "die_temp";
+ reg = <8>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <3>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@9 {
+ label = "ref_625mv";
+ reg = <9>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+
+ chan@a {
+ label = "ref_1250v";
+ reg = <0xa>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <0>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+ };
+
+ pm8916_tz: qcom,temp-alarm@2400 {
+ compatible = "qcom,qpnp-temp-alarm";
+ reg = <0x2400 0x100>;
+ interrupts = <0x0 0x24 0x0>;
+ label = "pm8916_tz";
+ qcom,channel-num = <8>;
+ qcom,threshold-set = <0>;
+ qcom,temp_alarm-vadc = <&pm8916_vadc>;
+ };
+
+ pm8916_adc_tm: vadc@3400 {
+ compatible = "qcom,qpnp-adc-tm";
+ reg = <0x3400 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0x0 0x34 0x0>,
+ <0x0 0x34 0x3>,
+ <0x0 0x34 0x4>;
+ interrupt-names = "eoc-int-en-set",
+ "high-thr-en-set",
+ "low-thr-en-set";
+ qcom,adc-bit-resolution = <15>;
+ qcom,adc-vdd-reference = <1800>;
+ qcom,adc_tm-vadc = <&pm8916_vadc>;
+ };
+
+ pm8916_chg: qcom,charger {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-linear-charger";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qcom,vddmax-mv = <4200>;
+ qcom,vddsafe-mv = <4200>;
+ qcom,vinmin-mv = <4308>;
+ qcom,ibatsafe-ma = <1440>;
+ qcom,thermal-mitigation = <1440 720 630 0>;
+ qcom,cool-bat-decidegc = <100>;
+ qcom,warm-bat-decidegc = <450>;
+ qcom,cool-bat-mv = <4100>;
+ qcom,warm-bat-mv = <4100>;
+ qcom,ibatmax-warm-ma = <360>;
+ qcom,ibatmax-cool-ma = <360>;
+ qcom,batt-hot-percentage = <25>;
+ qcom,batt-cold-percentage = <80>;
+ qcom,tchg-mins = <232>;
+ qcom,resume-soc = <99>;
+ qcom,chg-vadc = <&pm8916_vadc>;
+ qcom,chg-adc_tm = <&pm8916_adc_tm>;
+
+ status = "disabled";
+
+ qcom,chgr@1000 {
+ reg = <0x1000 0x100>;
+ interrupts = <0x0 0x10 0x7>,
+ <0x0 0x10 0x6>,
+ <0x0 0x10 0x5>,
+ <0x0 0x10 0x0>;
+ interrupt-names = "chg-done",
+ "chg-failed",
+ "fast-chg-on",
+ "vbat-det-lo";
+ };
+
+ qcom,bat-if@1200 {
+ reg = <0x1200 0x100>;
+ interrupts = <0x0 0x12 0x1>,
+ <0x0 0x12 0x0>;
+ interrupt-names = "bat-temp-ok",
+ "batt-pres";
+ };
+
+ qcom,usb-chgpth@1300 {
+ reg = <0x1300 0x100>;
+ interrupts = <0 0x13 0x4>,
+ <0 0x13 0x2>,
+ <0 0x13 0x1>;
+ interrupt-names = "usb-over-temp",
+ "chg-gone",
+ "usbin-valid";
+ };
+
+ qcom,chg-misc@1600 {
+ reg = <0x1600 0x100>;
+ };
+ };
+
+ pm8916_bms: qcom,vmbms {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-vm-bms";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+
+ qcom,v-cutoff-uv = <3400000>;
+ qcom,max-voltage-uv = <4200000>;
+ qcom,r-conn-mohm = <0>;
+ qcom,shutdown-soc-valid-limit = <100>;
+ qcom,low-soc-calculate-soc-threshold = <15>;
+ qcom,low-voltage-calculate-soc-ms = <1000>;
+ qcom,low-soc-calculate-soc-ms = <5000>;
+ qcom,calculate-soc-ms = <20000>;
+ qcom,volatge-soc-timeout-ms = <60000>;
+ qcom,low-voltage-threshold = <3450000>;
+ qcom,s3-ocv-tolerence-uv = <1200>;
+ qcom,low-soc-fifo-length = <2>;
+ qcom,bms-vadc = <&pm8916_vadc>;
+ qcom,bms-adc_tm = <&pm8916_adc_tm>;
+ qcom,pmic-revid = <&pm8916_revid>;
+
+ qcom,force-s3-on-suspend;
+ qcom,force-s2-in-charging;
+ qcom,report-charger-eoc;
+
+ qcom,batt-pres-status@1208 {
+ reg = <0x1208 0x1>;
+ };
+
+ qcom,qpnp-chg-pres@1008 {
+ reg = <0x1008 0x1>;
+ };
+
+ qcom,vm-bms@4000 {
+ reg = <0x4000 0x100>;
+ interrupts = <0x0 0x40 0x0>,
+ <0x0 0x40 0x1>,
+ <0x0 0x40 0x2>,
+ <0x0 0x40 0x3>,
+ <0x0 0x40 0x4>,
+ <0x0 0x40 0x5>;
+
+ interrupt-names = "leave_cv",
+ "enter_cv",
+ "good_ocv",
+ "ocv_thr",
+ "fifo_update_done",
+ "fsm_state_change";
+ };
+ };
+
+ pm8916_leds: qcom,leds@a100 {
+ compatible = "qcom,leds-qpnp";
+ reg = <0xa100 0x100>;
+ label = "mpp";
+ };
+ };
+
+ qcom,pm8916@1 {
+ spmi-slave-container;
+ reg = <0x1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ regulator@1400 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_s1";
+ spmi-dev-container;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x1400 0x300>;
+ status = "disabled";
+
+ qcom,ctl@1400 {
+ reg = <0x1400 0x100>;
+ };
+ qcom,ps@1500 {
+ reg = <0x1500 0x100>;
+ };
+ qcom,freq@1600 {
+ reg = <0x1600 0x100>;
+ };
+ };
+
+ regulator@1700 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_s2";
+ spmi-dev-container;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x1700 0x300>;
+ status = "disabled";
+
+ qcom,ctl@1700 {
+ reg = <0x1700 0x100>;
+ };
+ qcom,ps@1800 {
+ reg = <0x1800 0x100>;
+ };
+ qcom,freq@1900 {
+ reg = <0x1900 0x100>;
+ };
+ };
+
+ regulator@1a00 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_s3";
+ spmi-dev-container;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x1a00 0x300>;
+ status = "disabled";
+
+ qcom,ctl@1a00 {
+ reg = <0x1a00 0x100>;
+ };
+ qcom,ps@1b00 {
+ reg = <0x1b00 0x100>;
+ };
+ qcom,freq@1c00 {
+ reg = <0x1c00 0x100>;
+ };
+ };
+
+ regulator@1d00 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_s4";
+ spmi-dev-container;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x1d00 0x300>;
+ status = "disabled";
+
+ qcom,ctl@1d00 {
+ reg = <0x1d00 0x100>;
+ };
+ qcom,ps@1e00 {
+ reg = <0x1e00 0x100>;
+ };
+ qcom,freq@1f00 {
+ reg = <0x1f00 0x100>;
+ };
+ };
+
+ regulator@4000 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l1";
+ reg = <0x4000 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4100 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l2";
+ reg = <0x4100 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4200 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l3";
+ reg = <0x4200 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4300 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l4";
+ reg = <0x4300 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4400 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l5";
+ reg = <0x4400 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4500 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l6";
+ reg = <0x4500 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4600 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l7";
+ reg = <0x4600 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4700 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l8";
+ reg = <0x4700 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4800 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l9";
+ reg = <0x4800 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4900 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l10";
+ reg = <0x4900 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4a00 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l11";
+ reg = <0x4a00 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4b00 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l12";
+ reg = <0x4b00 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4c00 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l13";
+ reg = <0x4c00 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4d00 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l14";
+ reg = <0x4d00 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4e00 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l15";
+ reg = <0x4e00 0x100>;
+ status = "disabled";
+ };
+
+ regulator@4f00 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l16";
+ reg = <0x4f00 0x100>;
+ status = "disabled";
+ };
+
+ regulator@5000 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l17";
+ reg = <0x5000 0x100>;
+ status = "disabled";
+ };
+
+ regulator@5100 {
+ compatible = "qcom,qpnp-regulator";
+ regulator-name = "8916_l18";
+ reg = <0x5100 0x100>;
+ status = "disabled";
+ };
+
+ pm8916_pwm: pwm@bc00 {
+ compatible = "qcom,qpnp-pwm";
+ reg = <0xbc00 0x100>;
+ reg-names = "qpnp-lpg-channel-base";
+ qcom,channel-id = <0>;
+ qcom,supported-sizes = <6>, <9>;
+ };
+
+ pm8916_vib: qcom,vibrator@c000 {
+ compatible = "qcom,qpnp-vibrator";
+ reg = <0xc000 0x100>;
+ label = "vibrator";
+ status = "disabled";
+ };
+
+ pm8916_tombak_dig: msm8x16_wcd_codec@f000{
+ compatible = "qcom,wcd-spmi";
+ reg = <0xf000 0x100>;
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x1 0xf0 0x0>,
+ <0x1 0xf0 0x1>,
+ <0x1 0xf0 0x2>,
+ <0x1 0xf0 0x3>,
+ <0x1 0xf0 0x4>,
+ <0x1 0xf0 0x5>,
+ <0x1 0xf0 0x6>,
+ <0x1 0xf0 0x7>;
+ interrupt-names = "spk_cnp_int",
+ "spk_clip_int",
+ "spk_ocp_int",
+ "ins_rem_det1",
+ "but_rel_det",
+ "but_press_det",
+ "ins_rem_det",
+ "mbhc_int";
+
+ cdc-vdda-cp-supply = <&pm8916_s4>;
+ qcom,cdc-vdda-cp-voltage = <1800000 2200000>;
+ qcom,cdc-vdda-cp-current = <770000>;
+
+ cdc-vdda-h-supply = <&pm8916_l5>;
+ qcom,cdc-vdda-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdda-h-current = <20000>;
+
+ cdc-vdd-px-supply = <&pm8916_s4>;
+ qcom,cdc-vdd-px-voltage = <1800000 2200000>;
+ qcom,cdc-vdd-px-current = <770000>;
+
+ cdc-vdd-pa-supply = <&pm8916_l5>;
+ qcom,cdc-vdd-pa-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-pa-current = <5000>;
+
+ cdc-vdd-mic-bias-supply = <&pm8916_l13>;
+ qcom,cdc-vdd-mic-bias-voltage = <3075000 3075000>;
+ qcom,cdc-vdd-mic-bias-current = <25000>;
+
+ qcom,cdc-mclk-clk-rate = <9600000>;
+
+ qcom,cdc-static-supplies = "cdc-vdda-h",
+ "cdc-vdd-px",
+ "cdc-vdd-pa",
+ "cdc-vdda-cp";
+
+ qcom,cdc-on-demand-supplies = "cdc-vdd-mic-bias";
+ };
+
+ pm8916_tombak_analog: msm8x16_wcd_codec@f100{
+ compatible = "qcom,wcd-spmi";
+ reg = <0xf100 0x100>;
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x1 0xf1 0x0>,
+ <0x1 0xf1 0x1>,
+ <0x1 0xf1 0x2>,
+ <0x1 0xf1 0x3>,
+ <0x1 0xf1 0x4>,
+ <0x1 0xf1 0x5>;
+ interrupt-names = "ear_ocp_int",
+ "hphr_ocp_int",
+ "hphl_ocp_det",
+ "ear_cnp_int",
+ "hphr_cnp_int",
+ "hphl_cnp_int";
+ };
+
+ pm8916_bcm: qpnp-buck-current-monitor@1800 {
+ compatible = "qcom,qpnp-buck-current-monitor";
+ reg = <0x1800 0x100>;
+ interrupts = <1 0x18 0>, <1 0x18 1>;
+ interrupt-names = "iwarning", "icritical";
+ qcom,enable-current-monitor;
+ qcom,icrit-init-threshold-pc = <90>;
+ qcom,iwarn-init-threshold-pc = <70>;
+ qcom,icrit-polling-delay-msec = <1000>;
+ qcom,iwarn-polling-delay-msec = <2000>;
+
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/msm-rdbg.dtsi b/arch/arm/boot/dts/msm-rdbg.dtsi
new file mode 100644
index 000000000000..f7f52bed111c
--- /dev/null
+++ b/arch/arm/boot/dts/msm-rdbg.dtsi
@@ -0,0 +1,75 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ smp2pgpio_rdbg_2_in: qcom,smp2pgpio-rdbg-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "rdbg";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_client_rdbg_2_in {
+ compatible = "qcom,smp2pgpio_client_rdbg_2_in";
+ gpios = <&smp2pgpio_rdbg_2_in 0 0>;
+ };
+
+ smp2pgpio_rdbg_2_out: qcom,smp2pgpio-rdbg-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "rdbg";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_client_rdbg_2_out {
+ compatible = "qcom,smp2pgpio_client_rdbg_2_out";
+ gpios = <&smp2pgpio_rdbg_2_out 0 0>;
+ };
+
+ smp2pgpio_rdbg_1_in: qcom,smp2pgpio-rdbg-1-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "rdbg";
+ qcom,remote-pid = <1>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_client_rdbg_1_in {
+ compatible = "qcom,smp2pgpio_client_rdbg_1_in";
+ gpios = <&smp2pgpio_rdbg_1_in 0 0>;
+ };
+
+ smp2pgpio_rdbg_1_out: qcom,smp2pgpio-rdbg-1-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "rdbg";
+ qcom,remote-pid = <1>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ qcom,smp2pgpio_client_rdbg_1_out {
+ compatible = "qcom,smp2pgpio_client_rdbg_1_out";
+ gpios = <&smp2pgpio_rdbg_1_out 0 0>;
+ };
+};
diff --git a/arch/arm/boot/dts/msm8916-bus.dtsi b/arch/arm/boot/dts/msm8916-bus.dtsi
new file mode 100644
index 000000000000..21ae7a929d1d
--- /dev/null
+++ b/arch/arm/boot/dts/msm8916-bus.dtsi
@@ -0,0 +1,877 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/msm/msm-bus-rule-ops.h>
+
+&soc {
+ ad_hoc_bus: ad-hoc-bus { };
+
+ static-rules {
+ compatible = "qcom,msm-bus-static-bw-rules";
+
+ rule0 {
+ qcom,src-nodes = <&mas_apss>;
+ qcom,src-field = <FLD_IB>;
+ qcom,src-op = <OP_LE>;
+ qcom,thresh = <1600000>;
+ qcom,mode = <THROTTLE_ON>;
+ qcom,dest-node = <&mas_apss>;
+ qcom,dest-bw = <600000>;
+ };
+
+
+ rule1 {
+ qcom,src-nodes = <&mas_apss>;
+ qcom,src-field = <FLD_IB>;
+ qcom,src-op = <OP_LE>;
+ qcom,thresh = <3200000>;
+ qcom,mode = <THROTTLE_ON>;
+ qcom,dest-node = <&mas_apss>;
+ qcom,dest-bw = <1200000>;
+ };
+
+ rule2 {
+ qcom,src-nodes = <&mas_apss>;
+ qcom,src-field = <FLD_IB>;
+ qcom,src-op = <OP_GT>;
+ qcom,thresh = <3200000>;
+ qcom,mode = <THROTTLE_OFF>;
+ qcom,dest-node = <&mas_apss>;
+ };
+
+ rule3 {
+ qcom,src-nodes = <&mas_gfx>;
+ qcom,src-field = <FLD_IB>;
+ qcom,src-op = <OP_LE>;
+ qcom,thresh = <1600000>;
+ qcom,mode = <THROTTLE_ON>;
+ qcom,dest-node = <&mas_gfx>;
+ qcom,dest-bw = <600000>;
+ };
+
+ rule4 {
+ qcom,src-nodes = <&mas_gfx>;
+ qcom,src-field = <FLD_IB>;
+ qcom,src-op = <OP_LE>;
+ qcom,thresh = <3200000>;
+ qcom,mode = <THROTTLE_ON>;
+ qcom,dest-node = <&mas_gfx>;
+ qcom,dest-bw = <1200000>;
+ };
+
+ rule5 {
+ qcom,src-nodes = <&mas_gfx>;
+ qcom,src-field = <FLD_IB>;
+ qcom,src-op = <OP_GT>;
+ qcom,thresh = <3200000>;
+ qcom,mode = <THROTTLE_OFF>;
+ qcom,dest-node = <&mas_gfx>;
+ };
+ };
+};
+
+&ad_hoc_bus {
+ compatible = "qcom,msm-bus-device";
+ reg = <0x580000 0x14000>,
+ <0x400000 0x62000>,
+ <0x500000 0x11000>;
+ reg-names = "snoc-base", "bimc-base", "pnoc-base";
+
+ fab_snoc: fab-snoc {
+ cell-id = <1024>;
+ label = "fab-snoc";
+ qcom,fab-dev;
+ qcom,base-name = "snoc-base";
+ qcom,base-offset = <0x7000>;
+ qcom,qos-off = <0x1000>;
+ qcom,bus-type = <1>;
+ clock-names = "bus_clk", "bus_a_clk";
+ clocks = <&clock_rpm clk_snoc_msmbus_clk>,
+ <&clock_rpm clk_snoc_msmbus_a_clk>;
+
+ coresight-id = <50>;
+ coresight-name = "coresight-snoc";
+ coresight-nr-inports = <0>;
+ coresight-outports = <0>;
+ coresight-child-list = <&funnel_in2>;
+ coresight-child-ports = <5>;
+ };
+
+ fab_bimc: fab-bimc {
+ cell-id = <0>;
+ label = "fab-bimc";
+ qcom,fab-dev;
+ qcom,base-name = "bimc-base";
+ qcom,bus-type = <2>;
+ clock-names = "bus_clk", "bus_a_clk";
+ clocks = <&clock_rpm clk_bimc_msmbus_clk>,
+ <&clock_rpm clk_bimc_msmbus_a_clk>;
+
+ coresight-id = <55>;
+ coresight-name = "coresight-bimc";
+ coresight-nr-inports = <0>;
+ coresight-outports = <0>;
+ coresight-child-list = <&funnel_in2>;
+ coresight-child-ports = <3>;
+ };
+
+ fab_pnoc: fab-pnoc {
+ cell-id = <4096>;
+ label = "fab-pnoc";
+ qcom,fab-dev;
+ qcom,base-name = "pnoc-base";
+ qcom,base-offset = <0x7000>;
+ qcom,qos-delta = <0x1000>;
+ qcom,bus-type = <1>;
+ clock-names = "bus_clk", "bus_a_clk";
+ clocks = <&clock_rpm clk_pcnoc_msmbus_clk>,
+ <&clock_rpm clk_pcnoc_msmbus_a_clk>;
+
+ coresight-id = <54>;
+ coresight-name = "coresight-pnoc";
+ coresight-nr-inports = <0>;
+ coresight-outports = <0>;
+ coresight-child-list = <&funnel_in2>;
+ coresight-child-ports = <6>;
+ };
+
+ /* SNOC Devices */
+ mas_video: mas-video {
+ cell-id = <63>;
+ label = "mas-video";
+ qcom,qport = <8>;
+ qcom,ap-owned;
+ qcom,connections = <&mm_int_0 &mm_int_2>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,qos-mode = "bypass";
+ qcom,buswidth = <16>;
+ };
+
+ mas_jpeg: mas-jpeg {
+ cell-id = <62>;
+ label = "mas-jpeg";
+ qcom,ap-owned;
+ qcom,qport = <6>;
+ qcom,connections = <&mm_int_0 &mm_int_2>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,qos-mode = "bypass";
+ qcom,buswidth = <16>;
+ };
+
+ mas_vfe: mas-vfe {
+ cell-id = <29>;
+ label = "mas-vfe";
+ qcom,ap-owned;
+ qcom,qport = <9>;
+ qcom,connections = <&mm_int_1 &mm_int_2>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,qos-mode = "bypass";
+ qcom,buswidth = <16>;
+ };
+
+ mas_mdp: mas-mdp {
+ cell-id = <22>;
+ label = "mas-mdp";
+ qcom,ap-owned;
+ qcom,connections = <&mm_int_0 &mm_int_2>;
+ qcom,qport = <7>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,qos-mode = "bypass";
+ qcom,buswidth = <16>;
+ };
+
+ mas_qdss_bam: mas-qdss-bam {
+ cell-id = <53>;
+ label = "mas-qdss-bam";
+ qcom,connections = <&qdss_int>;
+ qcom,qport = <11>;
+ qcom,bus-dev = <&fab_snoc>;
+ qom,buswidth = <4>;
+ qcom,ap-owned;
+ qcom,qos-mode = "fixed";
+ qcom,prio1 = <1>;
+ qcom,prio0 = <1>;
+ };
+
+ mas_snoc_cfg: mas-snoc-cfg {
+ cell-id = <54>;
+ label = "mas-snoc-cfg";
+ qcom,connections = <&qdss_int>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,qos-mode = "bypass";
+ qom,buswidth = <4>;
+ qcom,mas-rpm-id = <20>;
+ };
+
+ mas_qdss_etr: mas-qdss-etr {
+ cell-id = <60>;
+ label = "mas-qdss-etr";
+ qcom,connections = <&qdss_int>;
+ qcom,qport = <10>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,qos-mode = "fixed";
+ qcom,prio1 = <1>;
+ qcom,prio0 = <1>;
+ qom,buswidth = <8>;
+ qcom,ap-owned;
+ };
+
+ mm_int_0: mm-int-0 {
+ cell-id = <10000>;
+ label = "mm-int-0";
+ qcom,ap-owned;
+ qcom,connections = <&mm_int_bimc>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <16>;
+ };
+
+ mm_int_1: mm-int-1 {
+ cell-id = <10001>;
+ label = "mm-int1";
+ qcom,ap-owned;
+ qcom,connections = <&mm_int_bimc>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <16>;
+ };
+
+ mm_int_2: mm-int-2 {
+ cell-id = <10002>;
+ label = "mm-int2";
+ qcom,ap-owned;
+ qcom,connections = <&snoc_int_0>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <16>;
+ };
+
+ mm_int_bimc: mm-int-bimc {
+ cell-id = <10003>;
+ label = "mm-int-bimc";
+ qcom,ap-owned;
+ qcom,connections = <&snoc_bimc_1_mas>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <16>;
+ };
+
+ snoc_int_0: snoc-int-0 {
+ cell-id = <10004>;
+ label = "snoc-int-0";
+ qcom,connections = <&slv_qdss_stm &slv_imem &snoc_pnoc_mas>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,mas-rpm-id = <99>;
+ qcom,slv-rpm-id = <130>;
+ qcom,buswidth = <8>;
+ };
+
+ snoc_int_1: snoc-int-1 {
+ cell-id = <10005>;
+ label = "snoc-int-1";
+ qcom,connections = <&slv_apss &slv_cats_0 &slv_cats_1>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,mas-rpm-id = <100>;
+ qcom,slv-rpm-id = <131>;
+ qcom,buswidth = <8>;
+ };
+
+ snoc_int_bimc: snoc-int-bmc {
+ cell-id = <10006>;
+ label = "snoc-bimc";
+ qcom,connections = <&snoc_bimc_0_mas>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,mas-rpm-id = <101>;
+ qcom,slv-rpm-id = <132>;
+ qcom,buswidth = <8>;
+ };
+
+ snoc_bimc_0_mas: snoc-bimc-0-mas {
+ cell-id = <10007>;
+ label = "snoc-bimc-0-mas";
+ qcom,connections = <&snoc_bimc_0_slv>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,mas-rpm-id = <3>;
+ qcom,buswidth = <8>;
+ };
+
+ snoc_bimc_1_mas: snoc-bimc-1-mas {
+ cell-id = <10008>;
+ label = "snoc-bimc-1-mas";
+ qcom,connections = <&snoc_bimc_1_slv>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,ap-owned;
+ qcom,buswidth = <16>;
+ };
+
+ qdss_int: qdss-int {
+ cell-id = <10009>;
+ label = "qdss-int";
+ qcom,ap-owned;
+ qcom,connections = <&snoc_int_0 &snoc_int_bimc>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <8>;
+ };
+
+ bimc_snoc_slv: bimc-snoc-slv {
+ cell-id = <10017>;
+ label = "bimc_snoc_slv";
+ qcom,ap-owned;
+ qcom,connections = <&snoc_int_0 &snoc_int_1>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <8>;
+ };
+
+ snoc_pnoc_mas: snoc-pnoc-mas {
+ cell-id = <10027>;
+ label = "snoc-pnoc-mas";
+ qcom,connections = <&snoc_pnoc_slv>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <8>;
+ };
+
+ pnoc_snoc_slv: pnoc-snoc-slv {
+ cell-id = <10011>;
+ label = "snoc-pnoc";
+ qcom,connections = <&snoc_int_0 &snoc_int_bimc &snoc_int_1>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,slv-rpm-id = <45>;
+ qcom,buswidth = <8>;
+ };
+
+ slv_srvc_snoc: slv-srvc-snoc {
+ cell-id = <587>;
+ label = "snoc-srvc-snoc";
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,slv-rpm-id = <29>;
+ qcom,buswidth = <8>;
+ };
+
+ slv_qdss_stm: slv-qdss-stm {
+ cell-id = <588>;
+ label = "snoc-qdss-stm";
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <4>;
+ qcom,slv-rpm-id = <30>;
+ };
+
+ slv_imem: slv-imem {
+ cell-id = <519>;
+ label = "slv_imem";
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <8>;
+ qcom,slv-rpm-id = <26>;
+ };
+
+ slv_apss: slv-apss {
+ cell-id = <517>;
+ label = "slv_apss";
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,slv-rpm-id = <20>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_cats_0: slv-cats-0 {
+ cell-id = <663>;
+ label = "slv-cats-0";
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,slv-rpm-id = <106>;
+ qcom,buswidth = <16>;
+ };
+
+ slv_cats_1: slv-cats-1 {
+ cell-id = <664>;
+ label = "slv-cats-1";
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,slv-rpm-id = <107>;
+ qcom,buswidth = <8>;
+ };
+
+ /* BIMC nodes */
+ mas_apss: mas-apss {
+ cell-id = <1>;
+ label = "mas-apss";
+ qcom,ap-owned;
+ qcom,connections = <&slv_ebi_ch0 &bimc_snoc_mas &slv_apps_l2>;
+ qcom,qport = <0>;
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,qos-mode = "fixed";
+ qcom,prio-lvl = <0>;
+ qcom,prio-rd = <0>;
+ qcom,prio-wr = <0>;
+ qcom,ws = <10000>;
+ qcom,gp = <5000>;
+ qcom,thmp = <50>;
+ qom,buswidth = <8>;
+ };
+
+ mas_tcu0: mas-tcu0 {
+ cell-id = <104>;
+ label = "mas-tcu0";
+ qcom,ap-owned;
+ qcom,connections = <&slv_ebi_ch0 &bimc_snoc_mas &slv_apps_l2>;
+ qcom,qport = <5>;
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,qos-mode = "fixed";
+ qcom,prio-lvl = <2>;
+ qcom,prio-rd = <2>;
+ qcom,prio-wr = <2>;
+ qom,buswidth = <8>;
+ };
+
+ mas_tcu1: mas-tcu1 {
+ cell-id = <105>;
+ label = "mas-tcu1";
+ qcom,ap-owned;
+ qcom,connections = <&slv_ebi_ch0 &bimc_snoc_mas &slv_apps_l2>;
+ qcom,qport = <6>;
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,qos-mode = "fixed";
+ qcom,prio-lvl = <2>;
+ qcom,prio-rd = <2>;
+ qcom,prio-wr = <2>;
+ qom,buswidth = <8>;
+ };
+
+ mas_gfx: mas-gfx {
+ cell-id = <26>;
+ label = "mas-gfx";
+ qcom,ap-owned;
+ qcom,connections = <&slv_ebi_ch0 &bimc_snoc_mas &slv_apps_l2>;
+ qcom,qport = <2>;
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,qos-mode = "fixed";
+ qcom,prio-lvl = <0>;
+ qcom,prio-rd = <0>;
+ qcom,prio-wr = <0>;
+ qom,buswidth = <8>;
+ qcom,ws = <10000>;
+ qcom,gp = <5000>;
+ qcom,thmp = <50>;
+ };
+
+ bimc_snoc_mas: bimc-snoc-mas {
+ cell-id = <10016>;
+ label = "bimc_snoc_mas";
+ qcom,ap-owned;
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,connections = <&bimc_snoc_slv>;
+ qom,buswidth = <8>;
+ };
+
+ snoc_bimc_0_slv: snoc-bimc-0-slv {
+ cell-id = <10025>;
+ label = "snoc_bimc_0_slv";
+ qcom,connections = <&slv_ebi_ch0>;
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,slv-rpm-id = <24>;
+ qom,buswidth = <8>;
+ };
+
+ snoc_bimc_1_slv: snoc_bimc_1_slv {
+ cell-id = <10026>;
+ label = "snoc_bimc_1_slv";
+ qcom,connections = <&slv_ebi_ch0>;
+ qcom,ap-owned;
+ qcom,bus-dev = <&fab_bimc>;
+ qom,buswidth = <8>;
+ };
+
+ slv_ebi_ch0: slv-ebi-ch0 {
+ cell-id = <512>;
+ label = "slv-ebi-ch0";
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,slv-rpm-id = <0>;
+ qom,buswidth = <8>;
+ };
+
+ slv_apps_l2: slv-apps-l2 {
+ cell-id = <514>;
+ label = "slv-apps-l2";
+ qcom,bus-dev = <&fab_bimc>;
+ qom,buswidth = <8>;
+ };
+
+ /* PNOC nodes */
+ snoc_pnoc_slv: snoc-pnoc-slv {
+ cell-id = <10028>;
+ label = "snoc-pnoc-slv";
+ qcom,connections = <&pnoc_int_0>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <8>;
+ };
+
+ pnoc_int_0: pnoc-int-0 {
+ cell-id = <10012>;
+ label = "pnoc-int-0";
+ qcom,connections = <&pnoc_snoc_mas &pnoc_s_0 &pnoc_s_1 &pnoc_s_2
+ &pnoc_s_3 &pnoc_s_4 &pnoc_s_8 &pnoc_s_9>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <8>;
+ };
+
+ pnoc_int_1: pnoc-int-1 {
+ cell-id = <10013>;
+ label = "pnoc-int-1";
+ qcom,connections = <&pnoc_snoc_mas>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <8>;
+ };
+
+ pnoc_m_0: pnoc-m-0 {
+ cell-id = <10014>;
+ label = "pnoc-m-0";
+ qcom,connections = <&pnoc_int_0>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <8>;
+ };
+
+ pnoc_m_1: pnoc-m-1 {
+ cell-id = <10015>;
+ label = "pnoc-m-1";
+ qcom,connections = <&pnoc_snoc_mas>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <8>;
+ };
+
+ pnoc_s_0: pnoc-s-0 {
+ cell-id = <10018>;
+ label = "pnoc-s-0";
+ qcom,connections = <&slv_clk_ctl &slv_tlmm &slv_tcsr
+ &slv_security &slv_mss>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <4>;
+ };
+
+ pnoc_s_1: pnoc-s-1 {
+ cell-id = <10019>;
+ label = "pnoc-s-1";
+ qcom,connections = <&slv_imem_cfg &slv_crypto_0_cfg
+ &slv_msg_ram &slv_pdm &slv_prng>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <4>;
+ };
+
+ pnoc_s_2: pnoc-s-2 {
+ cell-id = <10020>;
+ label = "pnoc-s-2";
+ qcom,connections = <&slv_spdm &slv_boot_rom &slv_bimc_cfg
+ &slv_pnoc_cfg &slv_pmic_arb>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <4>;
+ };
+
+ pnoc_s_3: pnoc-s-3 {
+ cell-id = <10021>;
+ label = "pnoc-s-3";
+ qcom,connections = <&slv_mpm &slv_snoc_cfg &slv_rbcpr_cfg
+ &slv_qdss_cfg &slv_dehr_cfg>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <4>;
+ };
+
+ pnoc_s_4: pnoc-s-4 {
+ cell-id = <10022>;
+ label = "pnoc-s-4";
+ qcom,connections = <&slv_venus_cfg &slv_camera_cfg
+ &slv_display_cfg>;
+ qcom,bus-dev = <&fab_pnoc>;
+ };
+
+ pnoc_s_8: pnoc-s-8 {
+ cell-id = <10023>;
+ label = "pnoc-s-8";
+ qcom,connections = <&slv_usb_hs &slv_sdcc_1 &slv_blsp_1>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <4>;
+ };
+
+ pnoc_s_9: pnoc-s-9 {
+ cell-id = <10024>;
+ label = "pnoc-s-9";
+ qcom,connections = <&slv_sdcc_2 &slv_audio &slv_gfx_cfg>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <4>;
+ };
+
+ slv_imem_cfg: slv-imem-cfg {
+ cell-id = <627>;
+ label = "slv-imem-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_crypto_0_cfg: slv-crypto-0-cfg {
+ cell-id = <625>;
+ label = "slv-crypto-0-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_msg_ram: slv-msg-ram {
+ cell-id = <535>;
+ label = "slv-msg-ram";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_pdm: slv-pdm {
+ cell-id = <577>;
+ label = "slv-pdm";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_prng: slv-prng {
+ cell-id = <618>;
+ label = "slv-prng";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_clk_ctl: slv-clk-ctl {
+ cell-id = <620>;
+ label = "slv-clk-ctl";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_mss: slv-mss {
+ cell-id = <521>;
+ label = "slv-mss";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_tlmm: slv-tlmm {
+ cell-id = <624>;
+ label = "slv-tlmm";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_tcsr: slv-tcsr {
+ cell-id = <579>;
+ label = "slv-tcsr";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_security: slv-security {
+ cell-id = <622>;
+ label = "slv-security";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_spdm: slv-spdm {
+ cell-id = <533>;
+ label = "slv-spdm";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_pnoc_cfg: slv-pnoc-cfg {
+ cell-id = <641>;
+ label = "slv-pnoc-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_pmic_arb: slv-pmic-arb {
+ cell-id = <632>;
+ label = "slv-pmic-arb";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_bimc_cfg: slv-bimc-cfg {
+ cell-id = <629>;
+ label = "slv-bimc-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_boot_rom: slv-boot-rom {
+ cell-id = <630>;
+ label = "slv-boot-rom";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_mpm: slv-mpm {
+ cell-id = <536>;
+ label = "slv-mpm";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_qdss_cfg: slv-qdss-cfg {
+ cell-id = <635>;
+ label = "slv-qdss-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_rbcpr_cfg: slv-rbcpr-cfg {
+ cell-id = <636>;
+ label = "slv-rbcpr-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_snoc_cfg: slv-snoc-cfg {
+ cell-id = <647>;
+ label = "slv-snoc-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_dehr_cfg: slv-dehr-cfg {
+ cell-id = <634>;
+ label = "slv-dehr-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_venus_cfg: slv-venus-cfg {
+ cell-id = <596>;
+ label = "slv-venus-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_display_cfg: slv-display-cfg {
+ cell-id = <590>;
+ label = "slv-display-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_camera_cfg: slv-camera-cfg {
+ cell-id = <589>;
+ label = "slv-camer-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_usb_hs: slv-usb-hs {
+ cell-id = <614>;
+ label = "slv-usb-hs";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_sdcc_1: slv-sdcc-1 {
+ cell-id = <606>;
+ label = "slv-sdcc-1";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_blsp_1: slv-blsp-1 {
+ cell-id = <613>;
+ label = "slv-blsp-1";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_sdcc_2: slv-sdcc-2 {
+ cell-id = <609>;
+ label = "slv-sdcc-2";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_gfx_cfg: slv-gfx-cfg {
+ cell-id = <598>;
+ label = "slv-gfx-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_audio: slv-audio {
+ cell-id = <522>;
+ label = "slv-audio";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ mas_blsp_1: mas-blsp_1 {
+ cell-id = <86>;
+ label = "mas-blsp-1";
+ qcom,connections = <&pnoc_m_1>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ mas_spdm: mas-spdm {
+ cell-id = <36>;
+ label = "mas-spdm";
+ qcom,connections = <&pnoc_m_0>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ mas_dehr: mas-dehr {
+ cell-id = <75>;
+ label = "mas-dehr";
+ qcom,connections = <&pnoc_m_0>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ mas_audio: mas-audio {
+ cell-id = <15>;
+ label = "mas-audio";
+ qcom,connections = <&pnoc_m_0>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ mas_usb_hs: mas-usb-hs {
+ cell-id = <87>;
+ label = "mas-usb-hs";
+ qcom,connections = <&pnoc_m_1>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ mas_pnoc_crypto_0: mas-pnoc-crypto-0 {
+ cell-id = <55>;
+ label = "mas-pnoc-crypto-0";
+ qcom,connections = <&pnoc_int_1>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <8>;
+ };
+
+ mas_pnoc_sdcc_1: mas-pnoc-sdcc-1 {
+ cell-id = <78>;
+ label = "mas-pnoc-sdcc-1";
+ qcom,connections = <&pnoc_int_1>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <8>;
+ };
+
+ mas_pnoc_sdcc_2: mas-pnoc-sdcc-2 {
+ cell-id = <81>;
+ label = "mas-pnoc-sdcc-2";
+ qcom,connections = <&pnoc_int_1>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <8>;
+ };
+
+ pnoc_snoc_mas: pnoc-snoc-mas {
+ cell-id = <10010>;
+ label = "pnoc-snoc-mas";
+ qcom,connections = <&pnoc_snoc_slv>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,mas-rpm-id = <29>;
+ qcom,buswidth = <8>;
+ };
+};
diff --git a/arch/arm/boot/dts/msm8916-ipcrouter.dtsi b/arch/arm/boot/dts/msm8916-ipcrouter.dtsi
new file mode 100644
index 000000000000..3b59d159ea8d
--- /dev/null
+++ b/arch/arm/boot/dts/msm8916-ipcrouter.dtsi
@@ -0,0 +1,37 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,ipc_router {
+ compatible = "qcom,ipc_router";
+ qcom,node-id = <1>;
+ };
+
+ qcom,ipc_router_modem_xprt {
+ compatible = "qcom,ipc_router_smd_xprt";
+ qcom,ch-name = "IPCRTR";
+ qcom,xprt-remote = "modem";
+ qcom,xprt-linkid = <1>;
+ qcom,xprt-version = <1>;
+ qcom,fragmented-data;
+ };
+
+ qcom,ipc_router_wcnss_xprt {
+ compatible = "qcom,ipc_router_smd_xprt";
+ qcom,ch-name = "IPCRTR";
+ qcom,xprt-remote = "wcnss";
+ qcom,xprt-linkid = <1>;
+ qcom,xprt-version = <1>;
+ qcom,fragmented-data;
+ };
+
+};
diff --git a/arch/arm/boot/dts/msm8916-memory.dtsi b/arch/arm/boot/dts/msm8916-memory.dtsi
new file mode 100644
index 000000000000..385b660287cc
--- /dev/null
+++ b/arch/arm/boot/dts/msm8916-memory.dtsi
@@ -0,0 +1,34 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+//#include "msm8916-ion.dtsi"
+
+/ {
+ memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ };
+
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ smem@0x86 {
+ reg = <0x0 0x86000000 0x0 0x0800000>;
+ no-map;
+ };
+ };
+
+
+};
diff --git a/arch/arm/boot/dts/msm8916-pinctrl.dtsi b/arch/arm/boot/dts/msm8916-pinctrl.dtsi
new file mode 100644
index 000000000000..dbcaa9824e4f
--- /dev/null
+++ b/arch/arm/boot/dts/msm8916-pinctrl.dtsi
@@ -0,0 +1,1374 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ tlmm_pinmux: pinctrl@1000000 {
+ compatible = "qcom,msm-tlmm-v4";
+ reg = <0x1000000 0x300000>;
+ interrupts = <0 208 0>;
+
+ /*General purpose pins*/
+ gp: gp {
+ qcom,pin-type-gp;
+ qcom,num-pins = <122>;
+ #qcom,pin-cells = <1>;
+ msm_gpio: msm_gpio {
+ compatible = "qcom,msm-tlmmv4-gp-intc";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ num_irqs = <122>;
+ };
+ };
+
+ ext-cdc-tlmm-lines {
+ qcom,pins = <&gp 116>, <&gp 112>, <&gp 117>,
+ <&gp 118>, <&gp 119>;
+ qcom,num-grp-pins = <5>;
+ qcom,pin-func = <1>;
+ label = "ext-cdc-tlmm-lines";
+ ext_cdc_tlmm_lines_act: tlmm_lines_on {
+ drive-strength = <8>;
+ bias-pull-none;
+ };
+ ext_cdc_tlmm_lines_sus: tlmm_lines_off {
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ ext-codec-lines {
+ qcom,pins = <&gp 67>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <0>;
+ label = "ext-codec-lines";
+ ext_codec_lines_act: lines_on {
+ drive-strength = <8>;
+ bias-disable;
+ output-high;
+ };
+ ext_codec_lines_sus: lines_off {
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ cdc-pdm-lines {
+ qcom,pins = <&gp 63>, <&gp 64>, <&gp 65>,
+ <&gp 66>, <&gp 67>, <&gp 68>;
+ qcom,num-grp-pins = <6>;
+ qcom,pin-func = <1>;
+ label = "cdc-pdm-lines";
+ cdc_pdm_lines_act: pdm_lines_on {
+ drive-strength = <8>;
+ bias-pull-none;
+ };
+ cdc_pdm_lines_sus: pdm_lines_off {
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ cdc-ext-pa-lines {
+ qcom,pins = <&gp 113>, <&gp 114>,
+ <&gp 115>, <&gp 116>;
+ qcom,num-grp-pins = <4>;
+ qcom,pin-func = <1>;
+ label = "cdc-ext-pa-lines";
+ cdc_ext_pa_act: ext_pa_on {
+ drive-strength = <8>;
+ bias-pull-none;
+ };
+ cdc_ext_pa_sus: ext_pa_off {
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ cdc-ext-pa-ws-line {
+ qcom,pins = <&gp 110>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <2>;
+ label = "cdc-ext-pa-ws-line";
+ cdc_ext_pa_ws_act: ext_pa_on {
+ drive-strength = <8>;
+ bias-pull-none;
+ };
+ cdc_ext_pa_ws_sus: ext_pa_off {
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ cross-conn-det {
+ qcom,pins = <&gp 120>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <0>;
+ label = "cross-conn-det-sw";
+ cross_conn_det_act: lines_on {
+ drive-strength = <8>;
+ bias-pull-none;
+ };
+ cross_conn_det_sus: lines_off {
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ pmx-uartconsole {
+ qcom,pins = <&gp 4>, <&gp 5>;
+ qcom,num-grp-pins = <2>;
+ qcom,pin-func = <2>;
+ label = "uart-console";
+
+ uart_console_sleep: uart-console {
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ blsp1_uart1_active {
+ qcom,pins = <&gp 0>, <&gp 1>, <&gp 2>, <&gp 3>;
+ qcom,num-grp-pins = <4>;
+ qcom,pin-func = <2>;
+ label = "blsp1_uart1_active";
+
+ hsuart_active: default {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart1_sleep {
+ qcom,pins = <&gp 0>, <&gp 1>, <&gp 2>, <&gp 3>;
+ qcom,num-grp-pins = <4>;
+ qcom,pin-func = <0>;
+ label = "blsp1_uart1_sleep";
+
+ hsuart_sleep: sleep {
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ sdhc2_cd_pin {
+ qcom,pins = <&gp 38>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <0>;
+ label = "cd-gpio";
+ sdc2_cd_on: cd_on {
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ sdc2_cd_off: cd_off {
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ /* SDC pin type */
+ sdc: sdc {
+ qcom,pin-type-sdc;
+ /* 0-2 for sdc1 4-6 for sdc2 */
+ qcom,num-pins = <7>;
+ /* Order of pins */
+ /* SDC1: CLK -> 0, CMD -> 1, DATA -> 2 */
+ /* SDC2: CLK -> 4, CMD -> 5, DATA -> 6 */
+ #qcom,pin-cells = <1>;
+ };
+
+ pmx_sdc1_clk {
+ qcom,pins = <&sdc 0>;
+ qcom,num-grp-pins = <1>;
+ label = "sdc1-clk";
+ sdc1_clk_on: clk_on {
+ bias-disable; /* NO pull */
+ drive-strength = <16>; /* 16 MA */
+ };
+ sdc1_clk_off: clk_off {
+ bias-disable; /* NO pull */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ pmx_sdc1_cmd {
+ qcom,pins = <&sdc 1>;
+ qcom,num-grp-pins = <1>;
+ label = "sdc1-cmd";
+ sdc1_cmd_on: cmd_on {
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ sdc1_cmd_off: cmd_off {
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ pmx_sdc1_data {
+ qcom,pins = <&sdc 2>;
+ qcom,num-grp-pins = <1>;
+ label = "sdc1-data";
+ sdc1_data_on: data_on {
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ sdc1_data_off: data_off {
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ pmx_sdc2_clk {
+ qcom,pins = <&sdc 4>;
+ qcom,num-grp-pins = <1>;
+ label = "sdc2-clk";
+ sdc2_clk_on: clk_on {
+ bias-disable; /* NO pull */
+ drive-strength = <16>; /* 16 MA */
+ };
+ sdc2_clk_off: clk_off {
+ bias-disable; /* NO pull */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ pmx_sdc2_cmd {
+ qcom,pins = <&sdc 5>;
+ qcom,num-grp-pins = <1>;
+ label = "sdc2-cmd";
+ sdc2_cmd_on: cmd_on {
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ sdc2_cmd_off: cmd_off {
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ pmx_sdc2_data {
+ qcom,pins = <&sdc 6>;
+ qcom,num-grp-pins = <1>;
+ label = "sdc2-data";
+ sdc2_data_on: data_on {
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ sdc2_data_off: data_off {
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ usb-id-pin {
+ qcom,pins = <&gp 110>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <0>;
+ label = "usb-id-pin";
+ usbid_default: default {
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+ };
+
+ spi0_active {
+ /* MOSI, MISO, CLK */
+ qcom,pins = <&gp 8>, <&gp 9>, <&gp 11>;
+ qcom,num-grp-pins = <3>;
+ qcom,pin-func = <1>;
+ label = "spi0-active";
+ /* active state */
+ spi0_default: default {
+ drive-strength = <12>; /* 12 MA */
+ bias-disable = <0>; /* No PULL */
+ };
+ };
+
+ spi0_suspend {
+ /* MOSI, MISO, CLK */
+ qcom,pins = <&gp 8>, <&gp 9>, <&gp 11>;
+ qcom,num-grp-pins = <3>;
+ qcom,pin-func = <0>;
+ label = "spi0-suspend";
+ /* suspended state */
+ spi0_sleep: sleep {
+ drive-strength = <2>; /* 2 MA */
+ bias-pull-down; /* pull down */
+ };
+ };
+ spi0_cs0_active {
+ /* CS */
+ qcom,pins = <&gp 10>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <1>;
+ label = "spi0-cs0-active";
+ spi0_cs0_active: cs0_active {
+ drive-strength = <2>;
+ bias-disable = <0>;
+ };
+ };
+
+
+ spi0_cs0_suspend {
+ /* CS */
+ qcom,pins = <&gp 10>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <0>;
+ label = "spi0-cs0-suspend";
+ spi0_cs0_sleep: cs0_sleep {
+ drive-strength = <2>;
+ bias-disable = <0>;
+ };
+ };
+
+ ice40-spi-usb-pins {
+ qcom,pins = <&gp 0>, <&gp 1>, <&gp 3>, <&gp 114>;
+ qcom,num-grp-pins = <4>;
+ qcom,pin-func = <0>;
+ label = "ice40-spi-usb-pins";
+
+ /* active state */
+ ice40_default: default {
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ /* sleep state */
+ ice40_sleep: sleep {
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+
+ pmx_i2c_0 {
+ /* CLK, DATA */
+ qcom,pins = <&gp 7>, <&gp 6>;
+ qcom,num-grp-pins = <2>;
+ qcom,pin-func = <3>;
+ label = "pmx_i2c_0";
+
+ i2c_0_active: i2c_0_active {
+ drive-strength = <2>; /* 2 MA */
+ bias-disable; /* No PULL */
+ };
+
+ i2c_0_sleep: i2c_0_sleep {
+ drive-strength = <2>; /* 2 MA */
+ bias-disable; /* No PULL */
+ };
+ };
+
+ pmx_i2c_5 {
+ /* CLK, DATA */
+ qcom,pins = <&gp 19>, <&gp 18>;
+ qcom,num-grp-pins = <2>;
+ qcom,pin-func = <2>;
+ label = "pmx_i2c_5";
+
+ i2c_5_active: i2c_5_active {
+ drive-strength = <2>; /* 2 MA */
+ bias-disable = <0>; /* No PULL */
+ };
+
+ i2c_5_sleep: i2c_5_sleep {
+ drive-strength = <2>; /* 2 MA */
+ bias-disable = <0>; /* No PULL */
+ };
+ };
+
+ /* QDSD pin type */
+ qdsd: qdsd {
+ qcom,pin-type-qdsd;
+ /* 0-> clk, 1 -> cmd, 2->data0, 3->data1, 4->data2, 5->data3 */
+ qcom,num-pins = <6>;
+
+ #qcom,pin-cells = <1>;
+ };
+
+ pmx_qdsd_clk {
+ qcom,pins = <&qdsd 0>;
+ qcom,num-grp-pins = <1>;
+ label = "qdsd-clk";
+ qdsd_clk_sdcard: clk_sdcard {
+ bias-disable; /* NO pull */
+ drive-strength = <7>; /* 7 MA */
+ };
+ qdsd_clk_trace: clk_trace {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_clk_swdtrc: clk_swdtrc {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_clk_spmi: clk_spmi {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ };
+
+ pmx_qdsd_cmd {
+ qcom,pins = <&qdsd 1>;
+ qcom,num-grp-pins = <1>;
+ label = "qdsd-cmd";
+ qdsd_cmd_sdcard: cmd_sdcard {
+ bias-pull-down; /* pull down */
+ drive-strength = <3>; /* 3 MA */
+ };
+ qdsd_cmd_trace: cmd_trace {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_cmd_swduart: cmd_uart {
+ bias-pull-up; /* pull up */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_cmd_swdtrc: cmd_swdtrc {
+ bias-pull-up; /* pull up */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_cmd_jtag: cmd_jtag {
+ bias-disable; /* NO pull */
+ drive-strength = <3>; /* 3 MA */
+ };
+ qdsd_cmd_spmi: cmd_spmi {
+ bias-pull-down; /* pull down */
+ drive-strength = <4>; /* 4 MA */
+ };
+ };
+
+ pmx_qdsd_data0 {
+ qcom,pins = <&qdsd 2>;
+ qcom,num-grp-pins = <1>;
+ label = "qdsd-data0";
+ qdsd_data0_sdcard: data0_sdcard {
+ bias-pull-down; /* pull down */
+ drive-strength = <3>; /* 3 MA */
+ };
+ qdsd_data0_trace: data0_trace {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_data0_swduart: data0_uart {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_data0_swdtrc: data0_swdtrc {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_data0_jtag: data0_jtag {
+ bias-pull-up; /* pull up */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_data0_spmi: data0_spmi {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ };
+
+ pmx_qdsd_data1 {
+ qcom,pins = <&qdsd 3>;
+ qcom,num-grp-pins = <1>;
+ label = "qdsd-data1";
+ qdsd_data1_sdcard: data1_sdcard {
+ bias-pull-down; /* pull down */
+ drive-strength = <3>; /* 3 MA */
+ };
+ qdsd_data1_trace: data1_trace {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_data1_swduart: data1_uart {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_data1_swdtrc: data1_swdtrc {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_data1_jtag: data1_jtag {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ };
+
+ pmx_qdsd_data2 {
+ qcom,pins = <&qdsd 4>;
+ qcom,num-grp-pins = <1>;
+ label = "qdsd-data2";
+ qdsd_data2_sdcard: data2_sdcard {
+ bias-pull-down; /* pull down */
+ drive-strength = <3>; /* 3 MA */
+ };
+ qdsd_data2_trace: data2_trace {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_data2_swduart: data2_uart {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_data2_swdtrc: data2_swdtrc {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_data2_jtag: data2_jtag {
+ bias-pull-up; /* pull up */
+ drive-strength = <3>; /* 3 MA */
+ };
+ };
+
+ pmx_qdsd_data3 {
+ qcom,pins = <&qdsd 5>;
+ qcom,num-grp-pins = <1>;
+ label = "qdsd-data3";
+ qdsd_data3_sdcard: data3_sdcard {
+ bias-pull-down; /* pull down */
+ drive-strength = <3>; /* 3 MA */
+ };
+ qdsd_data3_trace: data3_trace {
+ bias-pull-down; /* pull down */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_data3_swduart: data3_uart {
+ bias-pull-up; /* pull up */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_data3_swdtrc: data3_swdtrc {
+ bias-pull-up; /* pull up */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_data3_jtag: data3_jtag {
+ bias-pull-up; /* pull up */
+ drive-strength = <0>; /* 0 MA */
+ };
+ qdsd_data3_spmi: data3_spmi {
+ bias-pull-down; /* pull down */
+ drive-strength = <3>; /* 3 MA */
+ };
+ };
+
+ pmx_mdss: pmx_mdss {
+ label = "mdss-pins";
+ qcom,pin-func = <0>;
+ mdss_dsi_active: active {
+ drive-strength = <8>; /* 8 mA */
+ bias-disable = <0>; /* no pull */
+ output-high;
+ };
+ mdss_dsi_suspend: suspend {
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ output-low;
+ };
+ };
+
+ wcnss_pmux_5wire: wcnss_pmux_5wire {
+ /* Uses general purpose pins */
+ qcom,pins = <&gp 40>, <&gp 41>,
+ <&gp 42>, <&gp 43>,
+ <&gp 44>;
+ qcom,num-grp-pins = <5>;
+ qcom,pin-func = <1>;
+ label = "wcnss_5wire_pins";
+ /* Active configuration of bus pins */
+ wcnss_default: wcnss_default {
+ drive-strength = <6>; /* 6 MA */
+ bias-pull-up; /* PULL UP */
+ };
+ wcnss_sleep: wcnss_sleep {
+ drive-strength = <2>; /* 2 MA */
+ bias-pull-down; /* PULL Down */
+ };
+ };
+
+ wcnss_pmux_gpio: wcnss_pmux_gpio {
+ /* Uses general purpose pins */
+ qcom,pins = <&gp 40>, <&gp 41>,
+ <&gp 42>, <&gp 43>,
+ <&gp 44>;
+ qcom,num-grp-pins = <5>;
+ qcom,pin-func = <0>;
+ label = "wcnss_5gpio_pins";
+ /* Active configuration of bus pins */
+ wcnss_gpio_default: wcnss_gpio_default {
+ drive-strength = <6>; /* 6 MA */
+ bias-pull-up; /* PULL UP */
+ };
+ };
+
+ pmx_i2c_6 {
+ /* CLK, DATA */
+ qcom,pins = <&gp 22>, <&gp 23>;
+ qcom,num-grp-pins = <2>;
+ qcom,pin-func = <2>;
+ label = "pmx_i2c_6";
+ /* active state */
+ i2c_6_active: i2c_6_active{
+ drive-strength = <2>; /* 2 MA */
+ bias-disable; /* No PULL */
+ };
+ /*suspended state */
+ i2c_6_sleep: i2c_6_sleep {
+ drive-strength = <2>; /* 2 MA */
+ bias-disable; /* No PULL */
+ };
+ };
+
+ pmx_rd_nfc_int{
+ qcom,pins = <&gp 21>;
+ qcom,pin-func = <0>;
+ qcom,num-grp-pins = <1>;
+ label = "pmx_nfc_int";
+
+ nfc_int_active: active {
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+
+ nfc_int_suspend: suspend {
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ };
+
+ pmx_nfc_reset{
+ qcom,pins = <&gp 20>;
+ qcom,pin-func = <0>;
+ qcom,num-grp-pins = <1>;
+ label = "pmx_nfc_disable";
+
+ nfc_disable_active: active {
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+
+ nfc_disable_suspend: suspend {
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ pmx_mdss_te: pmx_mdss_te {
+ label = "mdss-te-pin";
+ qcom,pin-func = <1>;
+ mdss_te_active: active {
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ input-debounce = <0>;
+ };
+ mdss_te_suspend: suspend {
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ input-debounce = <0>;
+ };
+ };
+
+ /* CoreSight */
+ tpiu_seta_1 {
+ qcom,pins = <&gp 8>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <3>;
+ label = "tpiu-seta-1";
+ seta_1: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_2 {
+ qcom,pins = <&gp 9>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <2>;
+ label = "tpiu-seta-2";
+ seta_2: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_3 {
+ qcom,pins = <&gp 10>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <3>;
+ label = "tpiu-seta-3";
+ seta_3: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_4 {
+ qcom,pins = <&gp 39>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <2>;
+ label = "tpiu-seta-4";
+ seta_4: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_5 {
+ qcom,pins = <&gp 40>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <2>;
+ label = "tpiu-seta-5";
+ seta_5: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_6 {
+ qcom,pins = <&gp 41>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <2>;
+ label = "tpiu-seta-6";
+ seta_6: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_7 {
+ qcom,pins = <&gp 42>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <2>;
+ label = "tpiu-seta-7";
+ seta_7: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_8 {
+ qcom,pins = <&gp 43>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <3>;
+ label = "tpiu-seta-8";
+ seta_8: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_9 {
+ qcom,pins = <&gp 45>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <3>;
+ label = "tpiu-seta-9";
+ seta_9: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_10 {
+ qcom,pins = <&gp 46>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <2>;
+ label = "tpiu-seta-10";
+ seta_10: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_11 {
+ qcom,pins = <&gp 47>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <3>;
+ label = "tpiu-seta-11";
+ seta_11: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_12 {
+ qcom,pins = <&gp 48>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <2>;
+ label = "tpiu-seta-12";
+ seta_12: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_13 {
+ qcom,pins = <&gp 62>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <2>;
+ label = "tpiu-seta-13";
+ seta_13: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_14 {
+ qcom,pins = <&gp 69>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <2>;
+ label = "tpiu-seta-14";
+ seta_14: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_15 {
+ qcom,pins = <&gp 112>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <5>;
+ label = "tpiu-seta-15";
+ seta_15: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_16 {
+ qcom,pins = <&gp 113>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <9>;
+ label = "tpiu-seta-16";
+ seta_16: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_17 {
+ qcom,pins = <&gp 114>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <8>;
+ label = "tpiu-seta-17";
+ seta_17: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_seta_18 {
+ qcom,pins = <&gp 115>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <8>;
+ label = "tpiu-seta-18";
+ seta_18: seta {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_1 {
+ qcom,pins = <&gp 4>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <5>;
+ label = "tpiu-setb-1";
+ setb_1: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_2 {
+ qcom,pins = <&gp 5>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <5>;
+ label = "tpiu-setb-2";
+ setb_2: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_3 {
+ qcom,pins = <&gp 26>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <7>;
+ label = "tpiu-setb-3";
+ setb_3: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_4 {
+ qcom,pins = <&gp 27>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <9>;
+ label = "tpiu-setb-4";
+ setb_4: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_5 {
+ qcom,pins = <&gp 28>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <7>;
+ label = "tpiu-setb-5";
+ setb_5: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_6 {
+ qcom,pins = <&gp 29>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <7>;
+ label = "tpiu-setb-6";
+ setb_6: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_7 {
+ qcom,pins = <&gp 30>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <9>;
+ label = "tpiu-setb-7";
+ setb_7: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_8 {
+ qcom,pins = <&gp 31>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <10>;
+ label = "tpiu-setb-8";
+ setb_8: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_9 {
+ qcom,pins = <&gp 32>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <10>;
+ label = "tpiu-setb-9";
+ setb_9: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_10 {
+ qcom,pins = <&gp 33>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <9>;
+ label = "tpiu-setb-10";
+ setb_10: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_11 {
+ qcom,pins = <&gp 34>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <9>;
+ label = "tpiu-setb-11";
+ setb_11: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_12 {
+ qcom,pins = <&gp 35>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <9>;
+ label = "tpiu-setb-12";
+ setb_12: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_13 {
+ qcom,pins = <&gp 36>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <8>;
+ label = "tpiu-setb-13";
+ setb_13: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_14 {
+ qcom,pins = <&gp 37>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <7>;
+ label = "tpiu-setb-14";
+ setb_14: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_15 {
+ qcom,pins = <&gp 110>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <4>;
+ label = "tpiu-setb-15";
+ setb_15: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_16 {
+ qcom,pins = <&gp 111>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <1>;
+ label = "tpiu-setb-16";
+ setb_16: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_17 {
+ qcom,pins = <&gp 120>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <10>;
+ label = "tpiu-setb-17";
+ setb_17: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tpiu_setb_18 {
+ qcom,pins = <&gp 121>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <11>;
+ label = "tpiu-setb-18";
+ setb_18: setb {
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ tlmm_gpio_key {
+ qcom,pins = <&gp 107>, <&gp 108>, <&gp 109>;
+ qcom,pin-func = <0>;
+ qcom,num-grp-pins = <3>;
+ label = "tlmm_gpio_key";
+ gpio_key_active: gpio_key_active {
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ gpio_key_suspend: gpio_key_suspend {
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ gpio_led_pins {
+ qcom,pins = <&gp 8>, <&gp 9>, <&gp 10>;
+ qcom,num-grp-pins = <3>;
+ label = "gpio-led-pins";
+ gpio_led_off: led_off {
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+ };
+
+ /* add pingrp for touchscreen */
+ pmx_ts_int_active {
+ qcom,pins = <&gp 13>;
+ qcom,pin-func = <0>;
+ qcom,num-grp-pins = <1>;
+ label = "pmx_ts_int_active";
+
+ ts_int_active: ts_int_active {
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+ };
+
+ pmx_ts_int_suspend {
+ qcom,pins = <&gp 13>;
+ qcom,pin-func = <0>;
+ qcom,num-grp-pins = <1>;
+ label = "pmx_ts_int_suspend";
+
+ ts_int_suspend: ts_int_suspend {
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ pmx_ts_reset_active {
+ qcom,pins = <&gp 12>;
+ qcom,pin-func = <0>;
+ qcom,num-grp-pins = <1>;
+ label = "pmx_ts_reset_active";
+
+ ts_reset_active: ts_reset_active {
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+ };
+
+ pmx_ts_reset_suspend {
+ qcom,pins = <&gp 12>;
+ qcom,pin-func = <0>;
+ qcom,num-grp-pins = <1>;
+ label = "pmx_ts_reset_suspend";
+
+ ts_reset_suspend: ts_reset_suspend {
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ cci0_pinmux {
+ /* CLK, DATA */
+ qcom,pins = <&gp 29>, <&gp 30>;
+ qcom,num-grp-pins = <2>;
+ qcom,pin-func = <1>;
+ label = "cci0";
+ /* active state */
+ cci0_default: default {
+ drive-strength = <2>; /* 2 MA */
+ bias-disable = <0>; /* No PULL */
+ };
+ /*suspended state */
+ cci0_sleep: sleep {
+ drive-strength = <2>; /* 2 MA */
+ bias-disable = <0>; /* No PULL */
+ };
+ };
+
+ cam_sensor_mclk0 {
+ /* MCLK */
+ qcom,pins = <&gp 26>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <1>;
+ label = "cam-sensor-mclk0";
+ /* active state */
+ cam_sensor_mclk0_default: default {
+ drive-strength = <2>; /* 2 MA */
+ bias-disable = <0>; /* No PULL */
+ };
+ };
+
+ cam_sensor_mclk0_sleep {
+ /* MCLK */
+ qcom,pins = <&gp 26>;
+ qcom,num-grp-pins = <1>;
+ label = "cam-sensor-mclk0-sleep";
+ /* suspend state */
+ cam_sensor_mclk0_sleep: sleep {
+ drive-strength = <2>; /* 2 MA */
+ bias-pull-down; /* PULL DOWN */
+ };
+ };
+
+ cam_sensor_mclk1 {
+ /* MCLK */
+ qcom,pins = <&gp 27>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <1>;
+ label = "cam-sensor-mclk1";
+ /* active state */
+ cam_sensor_mclk1_default: default {
+ drive-strength = <2>; /* 2 MA */
+ bias-disable = <0>; /* No PULL */
+ };
+ };
+
+ cam_sensor_mclk1_sleep {
+ /* MCLK */
+ qcom,pins = <&gp 27>;
+ qcom,num-grp-pins = <1>;
+ label = "cam-sensor-mclk1-sleep";
+ /* suspend state */
+ cam_sensor_mclk1_sleep: sleep {
+ drive-strength = <2>; /* 2 MA */
+ bias-pull-down; /* PULL DOWN */
+ };
+ };
+
+ cam_sensor_rear {
+ /* RESET, STANDBY */
+ qcom,pins = <&gp 35>, <&gp 34>;
+ qcom,num-grp-pins = <2>;
+ qcom,pin-func = <0>;
+ label = "cam-sensor-rear";
+ /* active state */
+ cam_sensor_rear_default: default {
+ drive-strength = <2>; /* 2 MA */
+ bias-disable = <0>; /* No PULL */
+ };
+ };
+
+ cam_sensor_rear_sleep {
+ /* RESET, STANDBY */
+ qcom,pins = <&gp 35>, <&gp 34>;
+ qcom,num-grp-pins = <2>;
+ qcom,pin-func = <0>;
+ label = "cam-sensor-rear-sleep";
+ /*suspended state */
+ cam_sensor_rear_sleep: sleep {
+ drive-strength = <2>; /* 2 MA */
+ bias-pull-down; /* PULL DOWN */
+ };
+ };
+
+ cam_sensor_front {
+ /* RESET, STANDBY */
+ qcom,pins = <&gp 28>, <&gp 33>;
+ qcom,num-grp-pins = <2>;
+ qcom,pin-func = <0>;
+ label = "cam_sensor_front";
+ /* active state */
+ cam_sensor_front_default: default {
+ drive-strength = <2>; /* 2 MA */
+ bias-disable = <0>; /* No PULL */
+ };
+ };
+
+ cam_sensor_front_sleep {
+ /* RESET, STANDBY */
+ qcom,pins = <&gp 28>, <&gp 33>;
+ qcom,num-grp-pins = <2>;
+ qcom,pin-func = <0>;
+ label = "cam_sensor_front";
+ /*suspended state */
+ cam_sensor_front_sleep: sleep {
+ drive-strength = <2>; /* 2 MA */
+ bias-pull-down = <0>; /* PULL DOWN */
+ };
+ };
+
+ cam_sensor_flash {
+ /* FLSH_RESET,FLASH_EN,FLASH_NOW */
+ qcom,pins = <&gp 36>, <&gp 31>,<&gp 32> ;
+ qcom,num-grp-pins = <3>;
+ qcom,pin-func = <0>;
+ label = "cam_sensor_flash";
+ /* active state */
+ cam_sensor_flash_default: default {
+ drive-strength = <2>; /* 2 MA */
+ bias-disable = <0>; /* No PULL */
+ };
+ /*suspended state */
+ cam_sensor_flash_sleep: sleep {
+ drive-strength = <2>; /* 2 MA */
+ bias-pull-down = <0>; /* PULL DOWN */
+ };
+ };
+
+ pmx_i2c_4 {
+ /* CLK, DATA */
+ qcom,pins = <&gp 14>, <&gp 15>;
+ qcom,num-grp-pins = <2>;
+ qcom,pin-func = <2>;
+ label = "pmx_i2c_4";
+
+ i2c_4_active: i2c_4_active {
+ drive-strength = <2>; /* 2 MA */
+ bias-disable = <0>; /* No PULL */
+ };
+
+ i2c_4_sleep: i2c_4_sleep {
+ drive-strength = <2>; /* 2 MA */
+ bias-disable = <0>; /* No PULL */
+ };
+
+ };
+
+ smb_int_pin {
+ qcom,pins = <&gp 62>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <0>;
+ label = "smb1360_int_gpio";
+ smb_int_default: smb_int_default {
+ drive-strength = <2>; /* 2 MA */
+ bias-pull-up; /* PULL UP*/
+ };
+ };
+
+ button_backlight_pin {
+ qcom,pins = <&gp 119>;
+ qcom,num-grp-pins = <1>;
+ qcom,pin-func = <0>;
+ label = "button-backlight-pin";
+ button_backlight_off: button_backlight_off {
+ drive-strength = <2>;
+ bias-disable;
+ output-low;
+ };
+ };
+
+ mpu6050_int_pin {
+ qcom,pins = <&gp 115>;
+ qcom,pin-func = <0>;
+ qcom,num-grp-pins = <1>;
+ label = "mpu6050-irq";
+ mpu6050_default: mpu6050_default {
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ mpu6050_sleep: mpu6050_sleep {
+ drive-strength = <2>;
+ };
+ };
+
+ apds99xx_int_pin {
+ qcom,pins = <&gp 113>;
+ qcom,pin-func = <0>;
+ qcom,num-grp-pins = <1>;
+ label = "apds99xx-irq";
+ apds99xx_default: apds99xx_default {
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ apds99xx_sleep: apds99xx_sleep {
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+ ak8963_int_pin {
+ qcom,pins = <&gp 69>;
+ qcom,pin-func = <0>;
+ qcom,num-grp-pins = <1>;
+ label = "ak8963-irq";
+ ak8963_default: ak8963_default {
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ ak8963_sleep: ak8963_sleep {
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/msm8916-pm.dtsi b/arch/arm/boot/dts/msm8916-pm.dtsi
new file mode 100644
index 000000000000..b060b77be254
--- /dev/null
+++ b/arch/arm/boot/dts/msm8916-pm.dtsi
@@ -0,0 +1,345 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,spm@b089000 {
+ compatible = "qcom,spm-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb089000 0x1000>;
+ qcom,core-id = <0>;
+ qcom,saw2-ver-reg = <0xfd0>;
+ qcom,saw2-cfg = <0x14>;
+ qcom,saw2-spm-dly= <0x3C102800>;
+ qcom,saw2-spm-ctl = <0xe>;
+ qcom,saw2-spm-cmd-wfi = [60 03 60 0B 0f];
+ qcom,saw2-spm-cmd-spc = [20 10 80 30 90 5b 60 03 60 3B 76 76 0B
+ 94 5B 80 10 26 30 0f];
+ qcom,saw2-spm-cmd-pc = [20 10 80 30 90 5b 60 03 60 3B 76 76 0B
+ 94 5B 80 10 26 30 0f];
+ };
+
+ qcom,spm@b099000 {
+ compatible = "qcom,spm-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb099000 0x1000>;
+ qcom,core-id = <1>;
+ qcom,saw2-ver-reg = <0xfd0>;
+ qcom,saw2-cfg = <0x14>;
+ qcom,saw2-spm-dly= <0x3c102800>;
+ qcom,saw2-spm-ctl = <0xe>;
+ qcom,saw2-spm-cmd-wfi = [60 03 60 0B 0f];
+ qcom,saw2-spm-cmd-spc = [20 10 80 30 90 5b 60 03 60 3B 76 76 0B
+ 94 5B 80 10 26 30 0f];
+ qcom,saw2-spm-cmd-pc = [20 10 80 30 90 5b 60 03 60 3B 76 76 0B
+ 94 5B 80 10 26 30 0f];
+ };
+
+ qcom,spm@b0a9000 {
+ compatible = "qcom,spm-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb0a9000 0x1000>;
+ qcom,core-id = <2>;
+ qcom,saw2-ver-reg = <0xfd0>;
+ qcom,saw2-cfg = <0x14>;
+ qcom,saw2-spm-dly= <0x3c102800>;
+ qcom,saw2-spm-ctl = <0xe>;
+ qcom,saw2-spm-cmd-wfi = [60 03 60 0B 0f];
+ qcom,saw2-spm-cmd-spc = [20 10 80 30 90 5b 60 03 60 3B 76 76 0B
+ 94 5B 80 10 26 30 0f];
+ qcom,saw2-spm-cmd-pc = [20 10 80 30 90 5b 60 03 60 3B 76 76 0B
+ 94 5B 80 10 26 30 0f];
+ };
+
+ qcom,spm@b0b9000 {
+ compatible = "qcom,spm-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb0b9000 0x1000>;
+ qcom,core-id = <3>;
+ qcom,saw2-ver-reg = <0xfd0>;
+ qcom,saw2-cfg = <0x14>;
+ qcom,saw2-spm-dly= <0x3c102800>;
+ qcom,saw2-spm-ctl = <0xe>;
+ qcom,saw2-spm-cmd-wfi = [60 03 60 0B 0f];
+ qcom,saw2-spm-cmd-spc = [20 10 80 30 90 5b 60 03 60 3B 76 76 0B
+ 94 5B 80 10 26 30 0f];
+ qcom,saw2-spm-cmd-pc = [20 10 80 30 90 5b 60 03 60 3B 76 76 0B
+ 94 5B 80 10 26 30 0f];
+ };
+
+ qcom,spm@b012000 {
+ compatible = "qcom,spm-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xb012000 0x1000>;
+ qcom,core-id = <0xffff>; /* L2/APCS SAW */
+ qcom,saw2-ver-reg = <0xfd0>;
+ qcom,saw2-cfg = <0x1F>;
+ qcom,saw2-spm-dly= <0x3C102800>;
+ qcom,saw2-spm-ctl = <0xe>;
+ qcom,saw2-pmic-data0 = <0x04030080>;
+ qcom,saw2-pmic-data1 = <0x01030000>;
+ qcom,vctl-timeout-us = <50>;
+ qcom,vctl-port = <0x0>;
+ qcom,phase-port = <0x1>;
+ qcom,pfm-port = <0x2>;
+ qcom,saw2-spm-cmd-ret = [00 03 00 0f];
+ qcom,saw2-spm-cmd-gdhs = [00 20 32 6B C0 E0 D0 42 F0 03 50 4E
+ 02 02 D0 E0 C0 22 6B 02 32 52 F0 0F];
+ qcom,saw2-spm-cmd-pc = [00 32 B0 10 E0 D0 6B C0 42 11 07 01
+ B0 50 4E 02 02 C0 D0 12 E0 6B 02 32 50 0F];
+ qcom,L2-spm-is-apcs-master;
+ };
+
+
+ qcom,lpm-levels {
+ compatible = "qcom,lpm-levels";
+ qcom,default-l2-state = "l2_cache_active";
+ qcom,allow-synced-levels;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,cpu-modes {
+ compatible = "qcom,cpu-modes";
+
+ qcom,cpu-mode@0 {
+ qcom,mode = "wfi";
+ qcom,latency-us = <1>;
+ qcom,ss-power = <560>;
+ qcom,energy-overhead = <12000>;
+ qcom,time-overhead = <20>;
+ };
+
+ qcom,cpu-mode@1 {
+ qcom,mode = "standalone_pc";
+ qcom,latency-us = <180>;
+ qcom,ss-power = <550>;
+ qcom,energy-overhead = <160000>;
+ qcom,time-overhead = <280>;
+ qcom,use-broadcast-timer;
+ };
+
+ qcom,cpu-mode@2 {
+ qcom,mode = "pc";
+ qcom,latency-us = <230>;
+ qcom,ss-power = <535>;
+ qcom,energy-overhead = <200000>;
+ qcom,time-overhead = <330>;
+ qcom,use-broadcast-timer;
+ };
+ };
+
+ qcom,system-modes {
+ compatible = "qcom,system-modes";
+
+ qcom,system-mode@0 {
+ qcom,l2 = "l2_cache_gdhs";
+ qcom,latency-us = <240>;
+ qcom,ss-power = <530>;
+ qcom,energy-overhead = <210000>;
+ qcom,time-overhead = <350>;
+ qcom,min-cpu-mode= "standalone_pc";
+ qcom,sync-mode;
+ };
+
+ qcom,system-mode@1 {
+ qcom,l2 = "l2_cache_pc";
+ qcom,latency-us = <10900>;
+ qcom,ss-power = <490>;
+ qcom,energy-overhead = <1030563>;
+ qcom,time-overhead = <1656>;
+ qcom,min-cpu-mode= "pc";
+ qcom,send-rpm-sleep-set;
+ qcom,sync-mode;
+ };
+ };
+ };
+ qcom,pm-boot {
+ compatible = "qcom,pm-boot";
+ qcom,mode = "tz";
+ };
+
+ qcom,mpm@601d0 {
+
+ status = "disabled";
+
+ compatible = "qcom,mpm-v2";
+ reg = <0x601d0 0x1000>, /* MSM_RPM_MPM_BASE 4K */
+ <0xb011008 0x4>;
+ reg-names = "vmpm", "ipc";
+ interrupts = <0 171 1>;
+ clocks = <&clock_rpm clk_xo_lpm_clk>;
+ clock-names = "xo";
+
+ qcom,ipc-bit-offset = <1>;
+
+ qcom,gic-parent = <&intc>;
+ qcom,gic-map = <2 216>, /* tsens_upper_lower_int */
+ <50 172>, /* usb1_hs_async_wakeup_irq */
+ <53 104>, /* mdss_irq */
+ <62 222>, /* ee0_krait_hlos_spmi_periph_irq */
+ <0xff 18>, /* APC_qgicQTmrSecPhysIrptReq */
+ <0xff 19>, /* APC_qgicQTmrNonSecPhysIrptReq */
+ <0xff 20>, /* qgicQTmrVirtIrptReq */
+ <0xff 35>, /* WDT_barkInt */
+ <0xff 39>, /* arch_mem_timer */
+ <0xff 40>, /* qtmr_phy_irq[0] */
+ <0xff 47>, /* rbif_irq[0] */
+ <0xff 56>, /* q6_wdog_expired_irq */
+ <0xff 57>, /* mss_to_apps_irq(0) */
+ <0xff 58>, /* mss_to_apps_irq(1) */
+ <0xff 59>, /* mss_to_apps_irq(2) */
+ <0xff 60>, /* mss_to_apps_irq(3) */
+ <0xff 61>, /* mss_a2_bam_irq */
+ <0xff 65>, /* o_gc_sys_irq[0] */
+ <0xff 74>, /* venus0_mmu_cirpt[1] */
+ <0xff 75>, /* venus0_mmu_cirpt[0] */
+ <0xff 78>, /* mdss_mmu_cirpt[0] */
+ <0xff 79>, /* mdss_mmu_cirpt[1] */
+ <0xff 97>, /* camss_vfe_mmu_cirpt[1] */
+ <0xff 102>, /* camss_jpeg_mmu_cirpt[1] */
+ <0xff 109>, /* ocmem_dm_nonsec_irq */
+ <0xff 131>, /* blsp1_qup_5_irq */
+ <0xff 140>, /* blsp1_uart_3_irq */
+ <0xff 166>, /* usb_hs_irq */
+ <0xff 155>, /* sdc1_irq(0) */
+ <0xff 157>, /* sdc2_irq(0) */
+ <0xff 170>, /* sdc1_pwr_cmd_irq */
+ <0xff 173>, /* o_wcss_apss_smd_hi */
+ <0xff 174>, /* o_wcss_apss_smd_med */
+ <0xff 175>, /* o_wcss_apss_smd_low */
+ <0xff 176>, /* o_wcss_apss_smsm_irq */
+ <0xff 177>, /* o_wcss_apss_wlan_data_xfer_done */
+ <0xff 178>, /* o_wcss_apss_wlan_rx_data_avail */
+ <0xff 179>, /* o_wcss_apss_asic_intr */
+ <0xff 181>, /* o_wcss_apss_wdog_bite_and_reset_rdy */
+
+ <0xff 188>, /* lpass_irq_out_apcs(0) */
+ <0xff 189>, /* lpass_irq_out_apcs(1) */
+ <0xff 190>, /* lpass_irq_out_apcs(2) */
+ <0xff 191>, /* lpass_irq_out_apcs(3) */
+ <0xff 192>, /* lpass_irq_out_apcs(4) */
+ <0xff 193>, /* lpass_irq_out_apcs(5) */
+ <0xff 194>, /* lpass_irq_out_apcs(6) */
+ <0xff 195>, /* lpass_irq_out_apcs(7) */
+ <0xff 196>, /* lpass_irq_out_apcs(8) */
+ <0xff 197>, /* lpass_irq_out_apcs(9) */
+ <0xff 198>, /* coresight-tmc-etr interrupt */
+ <0xff 200>, /* rpm_ipc(4) */
+ <0xff 201>, /* rpm_ipc(5) */
+ <0xff 202>, /* rpm_ipc(6) */
+ <0xff 203>, /* rpm_ipc(7) */
+ <0xff 204>, /* rpm_ipc(24) */
+ <0xff 205>, /* rpm_ipc(25) */
+ <0xff 206>, /* rpm_ipc(26) */
+ <0xff 207>, /* rpm_ipc(27) */
+ <0xff 239>, /* crypto_bam_irq[1]*/
+ <0xff 240>, /* summary_irq_kpss */
+ <0xff 253>, /* sdc2_pwr_cmd_irq */
+ <0xff 263>, /* msm_iommu_global_client_irq */
+ <0xff 269>, /* rpm_wdog_expired_irq */
+ <0xff 270>, /* blsp1_bam_irq[0] */
+ <0xff 275>, /* rpm_ipc(30) */
+ <0xff 276>; /* rpm_ipc(31) */
+
+ qcom,gpio-parent = <&msm_gpio>;
+ qcom,gpio-map = <3 108 >,
+ <4 1 >,
+ <5 5 >,
+ <6 9 >,
+ <7 107>,
+ <8 98>,
+ <9 97>,
+ <10 11>,
+ <11 69>,
+ <12 12>,
+ <13 13>,
+ <14 20>,
+ <15 62>,
+ <16 54>,
+ <17 21>,
+ <18 52>,
+ <19 25>,
+ <20 51>,
+ <21 50>,
+ <22 28>,
+ <23 31>,
+ <24 34>,
+ <25 35>,
+ <26 36>,
+ <27 37>,
+ <28 38>,
+ <29 49>,
+ <30 109>,
+ <31 110>,
+ <32 111>,
+ <33 112>,
+ <34 113>,
+ <35 114>,
+ <36 115>,
+ <37 117>,
+ <38 118>,
+ <39 120>,
+ <40 121>,
+ <50 66>,
+ <51 68>;
+ };
+
+ qcom,pm@8600664 {
+ compatible = "qcom,pm";
+ reg = <0x8600664 0x40>;
+ qcom,pc-mode = "tz_l2_int";
+ qcom,use-sync-timer;
+ qcom,synced-clocks;
+ };
+
+ qcom,cpu-sleep-status@b088008{
+ compatible = "qcom,cpu-sleep-status";
+ reg = <0xb088008 0x100>;
+ qcom,cpu-alias-addr = <0x10000>;
+ qcom,sleep-status-mask= <0x40000>;
+ };
+
+ qcom,rpm-log@29dc00 {
+ compatible = "qcom,rpm-log";
+ reg = <0x29dc00 0x4000>;
+ qcom,rpm-addr-phys = <0xfc000000>;
+ qcom,offset-version = <4>;
+ qcom,offset-page-buffer-addr = <36>;
+ qcom,offset-log-len = <40>;
+ qcom,offset-log-len-mask = <44>;
+ qcom,offset-page-indices = <56>;
+ };
+
+ qcom,rpm-stats@29dba0 {
+ compatible = "qcom,rpm-stats";
+ reg = <0x29dba0 0x1000>;
+ reg-names = "phys_addr_base";
+ qcom,sleep-stats-version = <2>;
+ };
+
+ qcom,rpm-master-stats@60150 {
+ compatible = "qcom,rpm-master-stats";
+ reg = <0x60150 0x2030>;
+ qcom,masters = "APSS", "MPSS", "PRONTO";
+ qcom,master-stats-version = <2>;
+ qcom,master-offset = <4096>;
+ };
+ qcom,rpm-rbcpr-stats@0x29daa0 {
+ compatible = "qcom,rpmrbcpr-stats";
+ reg = <0x29daa0 0x1a0000>;
+ qcom,start-offset = <0x190010>;
+ };
+};
diff --git a/arch/arm/boot/dts/msm8916-qrd-skui.dts b/arch/arm/boot/dts/msm8916-qrd-skui.dts
new file mode 100644
index 000000000000..6f409f7f8dc9
--- /dev/null
+++ b/arch/arm/boot/dts/msm8916-qrd-skui.dts
@@ -0,0 +1,36 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8916-qrd-skui.dtsi"
+#include "msm8916-memory.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM 8916 QRD SKUI";
+ compatible = "qcom,msm8916-qrd-skui", "qcom,msm8916-qrd", "qcom,msm8916", "qcom,qrd";
+ qcom,board-id = <0x1000b 5> , <0x1010b 5> , <0x3010b 5>;
+};
+
+&soc {
+ gen-vkeys {
+ compatible = "qcom,gen-vkeys";
+ label = "ft5x06_ts";
+ qcom,disp-maxx = <480>;
+ qcom,disp-maxy = <854>;
+ qcom,panel-maxx = <480>;
+ qcom,panel-maxy = <946>;
+ qcom,key-codes = <139 172 158>;
+ qcom,y-offset = <0>;
+ };
+};
+
diff --git a/arch/arm/boot/dts/msm8916-qrd-skui.dtsi b/arch/arm/boot/dts/msm8916-qrd-skui.dtsi
new file mode 100644
index 000000000000..e3c1b467af3b
--- /dev/null
+++ b/arch/arm/boot/dts/msm8916-qrd-skui.dtsi
@@ -0,0 +1,67 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8916-qrd.dtsi"
+
+&tlmm_pinmux {
+ ocp8110_pins {
+ qcom,pins = <&gp 31>, <&gp 32>;
+ qcom,num-grp-pins = <2>;
+ qcom,pin-func = <0>;
+ label = "ocp8110_pins";
+ ocp8110_default: en_default {
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ bma2x2_int1_pin {
+ qcom,pins = <&gp 112>;
+ qcom,num-grp-pins = <1>;
+ label = "bma2x2_int1_pin";
+ bma2x2_int1_default: int1_default {
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ };
+
+ bma2x2_int2_pin {
+ qcom,pins = <&gp 114>;
+ qcom,num-grp-pins = <1>;
+ label = "bma2x2_int2_pin";
+ bma2x2_int2_default: int2_default {
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ };
+};
+
+&sdc2_cd_on {
+ /delete-property/ bias-pull-up;
+ bias-pull-down;
+};
+
+&sdc2_cd_off {
+ /delete-property/ bias-disable;
+ bias-pull-down;
+};
+
+&sdhc_2 {
+ qcom,nonremovable;
+
+ interrupts = <0 1>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 221 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+ /delete-property/ cd-gpios;
+};
+
diff --git a/arch/arm/boot/dts/msm8916-qrd.dtsi b/arch/arm/boot/dts/msm8916-qrd.dtsi
new file mode 100644
index 000000000000..445dca98a888
--- /dev/null
+++ b/arch/arm/boot/dts/msm8916-qrd.dtsi
@@ -0,0 +1,137 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8916.dtsi"
+#include "msm8916-pinctrl.dtsi"
+
+/ {
+ aliases {
+ serial0 = &blsp1_uart2;
+ };
+};
+
+
+&blsp1_uart2 {
+ status = "ok";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart_console_sleep>;
+};
+
+&soc {
+ gpio_keys {
+ compatible = "gpio-keys";
+ input-name = "gpio-keys";
+ pinctrl-names = "tlmm_gpio_key_active","tlmm_gpio_key_suspend";
+ pinctrl-0 = <&gpio_key_active>;
+ pinctrl-1 = <&gpio_key_suspend>;
+
+ vol_up {
+ label = "volume_up";
+ gpios = <&msm_gpio 107 0x1>;
+ linux,input-type = <1>;
+ linux,code = <115>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ };
+ };
+};
+
+&pm8916_gpios {
+ gpio@c000 { /* GPIO 1 */
+ /* Battery UICC Alarm */
+ status = "disabled";
+ };
+
+ gpio@c100 { /* GPIO 2 */
+ /* NFC_CLK_REQ */
+ qcom,mode = <0>; /* QPNP_PIN_MODE_DIG_IN */
+ qcom,pull = <5>; /* QPNP_PIN_PULL_NO */
+ qcom,vin-sel = <2>; /* QPNP_PIN_VIN2 */
+ qcom,src-sel = <2>; /* QPNP_PIN_SEL_FUNC_1 */
+ qcom,master-en = <1>;
+ };
+
+ gpio@c200 { /* GPIO 3 */
+ /* External regulator control for WTR */
+ status = "disabled";
+ };
+
+ gpio@c300 { /* GPIO 4 */
+ /* External regulator control for APC */
+ status = "disabled";
+ };
+};
+
+&sdhc_1 {
+ vdd-supply = <&pm8916_l8>;
+ qcom,vdd-voltage-level = <2900000 2900000>;
+ qcom,vdd-current-level = <200 400000>;
+
+ vdd-io-supply = <&pm8916_l5>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <200 60000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+
+ qcom,nonremovable;
+
+ status = "ok";
+};
+
+&sdhc_2 {
+ vdd-supply = <&pm8916_l11>;
+ qcom,vdd-voltage-level = <2800000 2950000>;
+ qcom,vdd-current-level = <15000 400000>;
+
+ vdd-io-supply = <&pm8916_l12>;
+ qcom,vdd-io-voltage-level = <1800000 2950000>;
+ qcom,vdd-io-current-level = <200 50000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+
+ #address-cells = <0>;
+ interrupt-parent = <&sdhc_2>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 221 0
+ 2 &msm_gpio 38 0>;
+ interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+ cd-gpios = <&msm_gpio 38 0x0>;
+
+ status = "ok";
+};
+
+&spmi_bus {
+ qcom,pm8916@1 {
+ qcom,vibrator@c000 {
+ status = "okay";
+ qcom,vib-timeout-ms = <15000>;
+ qcom,vib-vtg-level-mV = <3100>;
+ };
+ };
+};
+
+&qcom_tzlog {
+ status = "okay";
+};
+
+
+
+
diff --git a/arch/arm/boot/dts/msm8916-regulator.dtsi b/arch/arm/boot/dts/msm8916-regulator.dtsi
new file mode 100644
index 000000000000..749c55261a3b
--- /dev/null
+++ b/arch/arm/boot/dts/msm8916-regulator.dtsi
@@ -0,0 +1,374 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* SPM controlled regulators */
+&spmi_bus {
+ qcom,pm8916@1 {
+ status = "okay";
+ pm8916_s2: spm-regulator@1700 {
+ compatible = "qcom,spm-regulator";
+ regulator-name = "8916_s2";
+ reg = <0x1700 0x100>;
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1350000>;
+ };
+ };
+};
+
+/* CPR controlled regulator */
+
+&soc {
+ mem_acc_vreg_corner: regulator@1946000 {
+ compatible = "qcom,mem-acc-regulator";
+ reg = <0x1946000 0x4>, <0x1946000 0x4>, <0x58000 0x1000>;
+ reg-names = "acc-sel-l1", "acc-sel-l2", "efuse_addr";
+ regulator-name = "mem_acc_corner";
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <3>;
+
+ qcom,acc-sel-l1-bit-pos = <0>;
+ qcom,acc-sel-l2-bit-pos = <8>;
+ qcom,corner-acc-map = <0 1 1>;
+ qcom,l1-config-skip-fuse-sel = <0 52 1 1 0>;
+ };
+
+ apc_vreg_corner: regulator@b018000 {
+ compatible = "qcom,cpr-regulator";
+ reg = <0xb018000 0x1000>, <0xb011064 4>, <0x58000 0x1000>;
+ reg-names = "rbcpr", "rbcpr_clk", "efuse_addr";
+ interrupts = <0 15 0>;
+ regulator-name = "apc_corner";
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <7>;
+
+ qcom,cpr-voltage-ceiling = <1050000 1150000 1350000>;
+ qcom,cpr-voltage-floor = <1050000 1050000 1162500>;
+ vdd-apc-supply = <&pm8916_s2>;
+
+ qcom,vdd-mx-corner-map = <4 5 7>;
+ qcom,vdd-mx-vmin-method = <4>;
+ vdd-mx-supply = <&pm8916_l3_corner_ao>;
+ qcom,vdd-mx-vmax = <7>;
+
+ mem-acc-supply = <&mem_acc_vreg_corner>;
+
+ qcom,cpr-ref-clk = <19200>;
+ qcom,cpr-timer-delay = <5000>;
+ qcom,cpr-timer-cons-up = <0>;
+ qcom,cpr-timer-cons-down = <2>;
+ qcom,cpr-irq-line = <0>;
+ qcom,cpr-step-quotient = <26>;
+ qcom,cpr-up-threshold = <0>;
+ qcom,cpr-down-threshold = <2>;
+ qcom,cpr-idle-clocks = <15>;
+ qcom,cpr-gcnt-time = <1>;
+ qcom,vdd-apc-step-up-limit = <1>;
+ qcom,vdd-apc-step-down-limit = <1>;
+ qcom,cpr-apc-volt-step = <12500>;
+
+ qcom,cpr-fuse-row = <27 0>;
+ qcom,cpr-fuse-target-quot = <42 24 6>;
+ qcom,cpr-fuse-ro-sel = <54 54 54>;
+ qcom,cpr-fuse-bp-cpr-disable = <57>;
+ qcom,cpr-fuse-init-voltage =
+ <27 36 6 0>,
+ <27 18 6 0>,
+ <27 0 6 0>;
+ qcom,cpr-init-voltage-step = <10000>;
+ qcom,cpr-corner-map = <1 1 2 2 3 3 3>;
+ qcom,cpr-corner-frequency-map =
+ <1 200000000>,
+ <2 400000000>,
+ <3 533330000>,
+ <4 800000000>,
+ <5 998400000>,
+ <6 1094400000>,
+ <7 1190400000>;
+ qcom,speed-bin-fuse-sel = <1 34 3 0>;
+ qcom,cpr-speed-bin-max-corners =
+ <0 0 2 4 7>;
+ qcom,cpr-quot-adjust-scaling-factor-max = <650>;
+ qcom,cpr-enable;
+ };
+};
+
+
+/* RPM controlled regulators */
+&rpm_bus {
+
+ /* PM8916 S1 VDD_CX supply */
+ rpm-regulator-smpa1 {
+ status = "okay";
+ pm8916_s1_corner: regulator-s1-corner {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_s1_corner";
+ qcom,set = <3>;
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <7>;
+ qcom,use-voltage-corner;
+ };
+ pm8916_s1_corner_ao: regulator-s1-corner-ao {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_s1_corner_ao";
+ qcom,set = <1>;
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <7>;
+ qcom,use-voltage-corner;
+ };
+ pm8916_s1_floor_corner: regulator-s1-floor-corner {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_s1_floor_corner";
+ qcom,set = <3>;
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <7>;
+ qcom,use-voltage-floor-corner;
+ qcom,always-send-voltage;
+ };
+ };
+
+ rpm-regulator-smpa3 {
+ status = "okay";
+ pm8916_s3: regulator-s3 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1300000>;
+ qcom,init-voltage = <1200000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-smpa4 {
+ status = "okay";
+ pm8916_s4: regulator-s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2100000>;
+ qcom,init-voltage = <1800000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa1 {
+ status = "okay";
+ pm8916_l1: regulator-l1 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ qcom,init-voltage = <1225000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa2 {
+ status = "okay";
+ pm8916_l2: regulator-l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ qcom,init-voltage = <1200000>;
+ status = "okay";
+ };
+ };
+
+ /* PM8916 L3 VDD_MX supply */
+ rpm-regulator-ldoa3 {
+ status = "okay";
+ pm8916_l3: regulator-l3 {
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1287500>;
+ status = "okay";
+ };
+
+ pm8916_l3_corner_ao: regulator-l3-corner-ao {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l3_corner_ao";
+ qcom,set = <1>;
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <7>;
+ qcom,use-voltage-corner;
+ };
+
+ pm8916_l3_corner_so: regulator-l3-corner-so {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l3_corner_so";
+ qcom,set = <2>;
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <7>;
+ qcom,use-voltage-corner;
+ qcom,init-voltage = <1>;
+ };
+ };
+
+ rpm-regulator-ldoa4 {
+ status = "okay";
+ pm8916_l4: regulator-l4 {
+ regulator-min-microvolt = <2050000>;
+ regulator-max-microvolt = <2050000>;
+ qcom,init-voltage = <2050000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa5 {
+ status = "okay";
+ pm8916_l5: regulator-l5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-voltage = <1800000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa6 {
+ status = "okay";
+ pm8916_l6: regulator-l6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-voltage = <1800000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa7 {
+ status = "okay";
+ pm8916_l7: regulator-l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-voltage = <1800000>;
+ status = "okay";
+ };
+
+ pm8916_l7_ao: regulator-l7-ao {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l7_ao";
+ qcom,set = <1>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-voltage = <1800000>;
+ };
+
+ pm8916_l7_so: regulator-l7-so {
+ compatible = "qcom,rpm-smd-regulator";
+ regulator-name = "8916_l7_so";
+ qcom,set = <2>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,init-enable = <0>;
+ };
+ };
+
+ rpm-regulator-ldoa8 {
+ status = "okay";
+ pm8916_l8: regulator-l8 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2900000>;
+ qcom,init-voltage = <2850000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa9 {
+ status = "okay";
+ pm8916_l9: regulator-l9 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ qcom,init-voltage = <3300000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa10 {
+ status = "okay";
+ pm8916_l10: regulator-l10 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2800000>;
+ qcom,init-voltage = <2700000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa11 {
+ status = "okay";
+ pm8916_l11: regulator-l11 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ qcom,init-voltage = <1800000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa12 {
+ status = "okay";
+ pm8916_l12: regulator-l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ qcom,init-voltage = <1800000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa13 {
+ status = "okay";
+ pm8916_l13: regulator-l13 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ qcom,init-voltage = <3075000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa14 {
+ status = "okay";
+ pm8916_l14: regulator-l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ qcom,init-voltage = <1800000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa15 {
+ status = "okay";
+ pm8916_l15: regulator-l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ qcom,init-voltage = <1800000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa16 {
+ status = "okay";
+ pm8916_l16: regulator-l16 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ qcom,init-voltage = <1800000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa17 {
+ status = "okay";
+ pm8916_l17: regulator-l17 {
+ regulator-min-microvolt = <2850000>;
+ regulator-max-microvolt = <2850000>;
+ qcom,init-voltage = <2850000>;
+ status = "okay";
+ };
+ };
+
+ rpm-regulator-ldoa18 {
+ status = "okay";
+ pm8916_l18: regulator-l18 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ qcom,init-voltage = <2700000>;
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/msm8916.dtsi b/arch/arm/boot/dts/msm8916.dtsi
new file mode 100644
index 000000000000..5b4d16d76360
--- /dev/null
+++ b/arch/arm/boot/dts/msm8916.dtsi
@@ -0,0 +1,809 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "skeleton64.dtsi"
+#include <dt-bindings/clock/msm-clocks-8916.h>
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM8916";
+ compatible = "qcom,msm8916";
+ qcom,msm-id = <206 0>,
+ <248 0>,
+ <249 0>,
+ <250 0>;
+
+ interrupt-parent = <&intc>;
+ aliases {
+ sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
+ sdhc2 = &sdhc_2; /* SDC2 SD card slot */
+
+ /* smdtty devices */
+ smd1 = &smdtty_apps_fm;
+ smd2 = &smdtty_apps_riva_bt_acl;
+ smd3 = &smdtty_apps_riva_bt_cmd;
+ smd4 = &smdtty_mbalbridge;
+ smd5 = &smdtty_apps_riva_ant_cmd;
+ smd6 = &smdtty_apps_riva_ant_data;
+ smd7 = &smdtty_data1;
+ smd8 = &smdtty_data4;
+ smd11 = &smdtty_data11;
+ smd21 = &smdtty_data21;
+ smd36 = &smdtty_loopback;
+
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,armv8";
+ reg = <0x0>;
+ enable-method = "qcom,arm-cortex-acc";
+ qcom,acc = <&acc0>;
+ };
+
+ CPU1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,armv8";
+ reg = <0x1>;
+ enable-method = "qcom,arm-cortex-acc";
+ qcom,acc = <&acc1>;
+ };
+
+ CPU2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,armv8";
+ reg = <0x2>;
+ enable-method = "qcom,arm-cortex-acc";
+ qcom,acc = <&acc2>;
+ };
+
+ CPU3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,armv8";
+ reg = <0x3>;
+ enable-method = "qcom,arm-cortex-acc";
+ qcom,acc = <&acc3>;
+ };
+ };
+
+ soc: soc { };
+};
+
+#include "msm8916-ipcrouter.dtsi"
+#include "msm-gdsc-8916.dtsi"
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+ compatible = "simple-bus";
+
+ acc0:clock-controller@b088000 {
+ compatible = "qcom,arm-cortex-acc";
+ reg = <0x0b088000 0x1000>,
+ <0x0b008000 0x1000>;
+ };
+
+ acc1:clock-controller@b098000 {
+ compatible = "qcom,arm-cortex-acc";
+ reg = <0x0b098000 0x1000>,
+ <0x0b008000 0x1000>;
+ };
+
+ acc2:clock-controller@b0a8000 {
+ compatible = "qcom,arm-cortex-acc";
+ reg = <0x0b0a8000 0x1000>,
+ <0x0b008000 0x1000>;
+ };
+
+ acc3:clock-controller@b0b8000 {
+ compatible = "qcom,arm-cortex-acc";
+ reg = <0x0b0b8000 0x1000>,
+ <0x0b008000 0x1000>;
+ };
+
+ intc: interrupt-controller@b000000 {
+ compatible = "qcom,msm-qgic2";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0x0b000000 0x1000>,
+ <0x0b002000 0x1000>;
+ };
+
+ restart@4ab000 {
+ compatible = "qcom,pshold";
+ reg = <0x4ab000 0x4>;
+ };
+
+ qcom,mpm2-sleep-counter@4a3000 {
+ compatible = "qcom,mpm2-sleep-counter";
+ reg = <0x4a3000 0x1000>;
+ clock-frequency = <32768>;
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 2 0xf08>,
+ <1 3 0xf08>,
+ <1 4 0xf08>,
+ <1 1 0xf08>;
+ clock-frequency = <19200000>;
+ };
+
+ timer@b020000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0xb020000 0x1000>;
+ clock-frequency = <19200000>;
+
+ frame@b021000 {
+ frame-number = <0>;
+ interrupts = <0 8 0x4>,
+ <0 7 0x4>;
+ reg = <0xb021000 0x1000>,
+ <0xb022000 0x1000>;
+ };
+
+ frame@b023000 {
+ frame-number = <1>;
+ interrupts = <0 9 0x4>;
+ reg = <0xb023000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b024000 {
+ frame-number = <2>;
+ interrupts = <0 10 0x4>;
+ reg = <0xb024000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b025000 {
+ frame-number = <3>;
+ interrupts = <0 11 0x4>;
+ reg = <0xb025000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b026000 {
+ frame-number = <4>;
+ interrupts = <0 12 0x4>;
+ reg = <0xb026000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b027000 {
+ frame-number = <5>;
+ interrupts = <0 13 0x4>;
+ reg = <0xb027000 0x1000>;
+ status = "disabled";
+ };
+
+ frame@b028000 {
+ frame-number = <6>;
+ interrupts = <0 14 0x4>;
+ reg = <0xb028000 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ clock_rpm: qcom,rpmcc@1800000 {
+ compatible = "qcom,rpmcc-8916";
+ reg = <0x1800000 0x80000>;
+ reg-names = "cc_base";
+ #clock-cells = <1>;
+
+ };
+
+ clock_gcc: qcom,gcc@1800000 {
+ compatible = "qcom,gcc-8916";
+ reg = <0x1800000 0x80000>,
+ <0xb016000 0x00040>;
+ reg-names = "cc_base", "apcs_base";
+ vdd_dig-supply = <&pm8916_s1_corner>;
+ vdd_sr2_dig-supply = <&pm8916_s1_corner_ao>;
+ vdd_sr2_pll-supply = <&pm8916_l7_ao>;
+ clocks = <&clock_rpm clk_xo_clk_src>,
+ <&clock_rpm clk_xo_a_clk_src>;
+ clock-names = "xo", "xo_a";
+ #clock-cells = <1>;
+ };
+
+ clock_debug: qcom,cc-debug@1874000 {
+ compatible = "qcom,cc-debug-8916";
+ reg = <0x1874000 0x4>,
+ <0xb01101c 0x8>;
+ reg-names = "cc_base", "meas";
+ clocks = <&clock_rpm clk_rpm_debug_mux>;
+ clock-names = "rpm_debug_mux";
+ #clock-cells = <1>;
+ };
+
+ tsens: tsens@4a8000 {
+ compatible = "qcom,msm8916-tsens";
+ reg = <0x4a8000 0x2000>,
+ <0x5c000 0x1000>;
+ reg-names = "tsens_physical", "tsens_eeprom_physical";
+ interrupts = <0 184 0>;
+ qcom,sensors = <5>;
+ qcom,slope = <3200 3200 3200 3200 3200>;
+ qcom,sensor-id = <0 1 2 4 5>;
+ };
+
+ qcom,clock-a7@0b011050 {
+ compatible = "qcom,clock-a53-8916";
+ reg = <0x0b011050 0x8>,
+ <0x0005c004 0x8>;
+ reg-names = "rcg-base", "efuse1";
+ qcom,safe-freq = < 400000000 >;
+ cpu-vdd-supply = <&apc_vreg_corner>;
+ clocks = <&clock_gcc clk_gpll0_ao_clk_src>,
+ <&clock_gcc clk_a53sspll>;
+ clock-names = "clk-4", "clk-5";
+ qcom,speed0-bin-v0 =
+ < 0 0>,
+ < 200000000 1>,
+ < 400000000 2>,
+ < 533333000 3>,
+ < 800000000 4>,
+ < 998400000 5>,
+ < 1094400000 6>,
+ < 1152000000 7>,
+ < 1209600000 8>;
+
+ qcom,speed1-bin-v0 =
+ < 0 0>,
+ < 200000000 1>,
+ < 400000000 2>,
+ < 533333000 3>,
+ < 800000000 4>,
+ < 998400000 5>,
+ < 1094400000 6>,
+ < 1152000000 7>;
+ };
+
+ qcom,cpubw {
+ compatible = "qcom,cpubw";
+ qcom,cpu-mem-ports = <1 512>;
+ qcom,bw-tbl =
+ /* 73 9.60 MHz */
+ /* 381 50MHz */
+ < 762 /* 100 MHz */>,
+ < 1525 /* 200 MHz */>,
+ < 3051 /* 400 MHz */>,
+ < 4066 /* 533 MHz */>;
+ qcom,ab-tbl =
+ < 229 >,
+ < 458 >,
+ < 915 >,
+ < 1220 >;
+ };
+
+ qcom,msm-cpufreq@0 {
+ reg = <0 4>;
+ compatible = "qcom,msm-cpufreq";
+ qcom,cpufreq-table =
+ < 200000 762>,
+ < 400000 762>,
+ < 533330 1525>,
+ < 800000 1525>,
+ < 998400 3051>,
+ < 1094400 3051>,
+ < 1152000 4066>,
+ < 1209600 4066>;
+ };
+
+ qcom,sps {
+ compatible = "qcom,msm_sps_4k";
+ qcom,device-type = <3>;
+ qcom,pipe-attr-ee;
+ };
+
+ blsp1_uart1: uart@78af000 {
+ compatible = "qcom,msm-hsuart-v14";
+ reg = <0x78af000 0x200>,
+ <0x7884000 0x23000>;
+ reg-names = "core_mem", "bam_mem";
+ interrupt-names = "core_irq", "bam_irq", "wakeup_irq";
+ #address-cells = <0>;
+ interrupt-parent = <&blsp1_uart1>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 107 0
+ 1 &intc 0 238 0
+ 2 &msm_gpio 1 0>;
+
+ qcom,inject-rx-on-wakeup = <1>;
+ qcom,rx-char-to-inject = <0xFD>;
+
+ qcom,bam-tx-ep-pipe-index = <0>;
+ qcom,bam-rx-ep-pipe-index = <1>;
+ qcom,master-id = <86>;
+
+ clocks = <&clock_gcc clk_gcc_blsp1_uart1_apps_clk>,
+ <&clock_gcc clk_gcc_blsp1_ahb_clk>;
+ clock-names = "core_clk", "iface_clk";
+
+ qcom,msm-bus,name = "blsp1_uart1";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <86 512 0 0>,
+ <86 512 500 800>;
+ pinctrl-names = "sleep", "default";
+ pinctrl-0 = <&hsuart_sleep>;
+ pinctrl-1 = <&hsuart_active>;
+ status = "disabled";
+ };
+
+ blsp1_uart2: serial@78b0000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x78b0000 0x200>;
+ interrupts = <0 108 0>;
+ clocks = <&clock_gcc clk_gcc_blsp1_uart2_apps_clk>,
+ <&clock_gcc clk_gcc_blsp1_ahb_clk>;
+ clock-names = "core", "iface";
+ };
+
+ rmtfs_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x86700000 0xe0000>;
+ reg-names = "rmtfs";
+ };
+
+ dsp_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x867e0000 0x20000>;
+ reg-names = "rfsa_dsp";
+ };
+
+ mdm_sharedmem {
+ compatible = "qcom,sharedmem-uio";
+ reg = <0x867e0000 0x20000>;
+ reg-names = "rfsa_mdm";
+ };
+
+ jtag_fuse: jtagfuse@5e01c {
+ compatible = "qcom,jtag-fuse";
+ reg = <0x5e01c 0x8>;
+ reg-names = "fuse-base";
+ };
+
+ sdhc_1: sdhci@07824000 {
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x07824900 0x11c>, <0x07824000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 123 0>, <0 138 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ qcom,bus-width = <8>;
+
+ qcom,cpu-dma-latency-us = <701>;
+ qcom,msm-bus,name = "sdhc1";
+ qcom,msm-bus,num-cases = <8>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <78 512 0 0>, /* No vote */
+ <78 512 1600 3200>, /* 400 KB/s*/
+ <78 512 80000 160000>, /* 20 MB/s */
+ <78 512 100000 200000>, /* 25 MB/s */
+ <78 512 200000 400000>, /* 50 MB/s */
+ <78 512 400000 800000>, /* 100 MB/s */
+ <78 512 400000 800000>, /* 200 MB/s */
+ <78 512 2048000 4096000>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+ 100000000 200000000 4294967295>;
+ clocks = <&clock_gcc clk_gcc_sdcc1_ahb_clk>,
+ <&clock_gcc clk_gcc_sdcc1_apps_clk>;
+ clock-names = "iface", "core";
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 177770000>;
+ qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v";
+
+ status = "disabled";
+ };
+
+ sdhc_2: sdhci@07864000 {
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x07864900 0x11c>, <0x07864000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 125 0>, <0 221 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ qcom,bus-width = <4>;
+
+ qcom,cpu-dma-latency-us = <701>;
+ qcom,msm-bus,name = "sdhc2";
+ qcom,msm-bus,num-cases = <8>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <81 512 0 0>, /* No vote */
+ <81 512 1600 3200>, /* 400 KB/s*/
+ <81 512 80000 160000>, /* 20 MB/s */
+ <81 512 100000 200000>, /* 25 MB/s */
+ <81 512 200000 400000>, /* 50 MB/s */
+ <81 512 400000 800000>, /* 100 MB/s */
+ <81 512 400000 800000>, /* 200 MB/s */
+ <81 512 2048000 4096000>; /* Max. bandwidth */
+ qcom,bus-bw-vectors-bps = <0 400000 20000000 25000000 50000000
+ 100000000 200000000 4294967295>;
+ clocks = <&clock_gcc clk_gcc_sdcc2_ahb_clk>,
+ <&clock_gcc clk_gcc_sdcc2_apps_clk>;
+ clock-names = "iface", "core";
+
+ qcom,clk-rates = <400000 25000000 50000000 100000000 200000000>;
+
+ status = "disabled";
+ };
+
+ qcom,ipc-spinlock@1905000 {
+ compatible = "qcom,ipc-spinlock-sfpb";
+ reg = <0x1905000 0x8000>;
+ qcom,num-locks = <8>;
+ };
+
+ qcom,smem@86300000 {
+ compatible = "qcom,smem";
+ reg = <0x86300000 0x100000>,
+ <0x0b011008 0x4>,
+ <0x60000 0x8000>,
+ <0x193D000 0x8>;
+ reg-names = "smem", "irq-reg-base", "aux-mem1", "smem_targ_info_reg";
+ qcom,mpu-enabled;
+
+ qcom,smd-modem {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <0>;
+ qcom,smd-irq-offset = <0x0>;
+ qcom,smd-irq-bitmask = <0x1000>;
+ qcom,pil-string = "modem";
+ interrupts = <0 25 1>;
+ };
+
+ qcom,smsm-modem {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <0>;
+ qcom,smsm-irq-offset = <0x0>;
+ qcom,smsm-irq-bitmask = <0x2000>;
+ interrupts = <0 26 1>;
+ };
+
+ qcom,smd-wcnss {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <6>;
+ qcom,smd-irq-offset = <0x0>;
+ qcom,smd-irq-bitmask = <0x20000>;
+ qcom,pil-string = "wcnss";
+ interrupts = <0 142 1>;
+ };
+
+ qcom,smsm-wcnss {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <6>;
+ qcom,smsm-irq-offset = <0x0>;
+ qcom,smsm-irq-bitmask = <0x80000>;
+ interrupts = <0 144 1>;
+ };
+
+ qcom,smd-rpm {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <15>;
+ qcom,smd-irq-offset = <0x0>;
+ qcom,smd-irq-bitmask = <0x1>;
+ interrupts = <0 168 1>;
+ qcom,irq-no-suspend;
+ };
+ };
+
+ rpm_bus: qcom,rpm-smd {
+ compatible = "qcom,rpm-smd";
+ rpm-channel-name = "rpm_requests";
+ rpm-channel-type = <15>; /* SMD_APPS_RPM */
+ };
+
+ qcom,bam_dmux@4044000 {
+ compatible = "qcom,bam_dmux";
+ reg = <0x4044000 0x19000>;
+ interrupts = <0 29 1>;
+ qcom,rx-ring-size = <32>;
+ };
+
+ qcom_tzlog: tz-log@8600720 {
+ compatible = "qcom,tz-log";
+ reg = <0x08600720 0x1000>;
+ status = "disabled";
+ };
+
+ qcom,smdtty {
+ compatible = "qcom,smdtty";
+
+ smdtty_apps_fm: qcom,smdtty-apps-fm {
+ qcom,smdtty-remote = "wcnss";
+ qcom,smdtty-port-name = "APPS_FM";
+ };
+
+ smdtty_apps_riva_bt_acl: smdtty-apps-riva-bt-acl {
+ qcom,smdtty-remote = "wcnss";
+ qcom,smdtty-port-name = "APPS_RIVA_BT_ACL";
+ };
+
+ smdtty_apps_riva_bt_cmd: qcom,smdtty-apps-riva-bt-cmd {
+ qcom,smdtty-remote = "wcnss";
+ qcom,smdtty-port-name = "APPS_RIVA_BT_CMD";
+ };
+
+ smdtty_mbalbridge: qcom,smdtty-mbalbridge {
+ qcom,smdtty-remote = "modem";
+ qcom,smdtty-port-name = "MBALBRIDGE";
+ };
+
+ smdtty_apps_riva_ant_cmd: smdtty-apps-riva-ant-cmd {
+ qcom,smdtty-remote = "wcnss";
+ qcom,smdtty-port-name = "APPS_RIVA_ANT_CMD";
+ };
+
+ smdtty_apps_riva_ant_data: smdtty-apps-riva-ant-data {
+ qcom,smdtty-remote = "wcnss";
+ qcom,smdtty-port-name = "APPS_RIVA_ANT_DATA";
+ };
+
+ smdtty_data1: qcom,smdtty-data1 {
+ qcom,smdtty-remote = "modem";
+ qcom,smdtty-port-name = "DATA1";
+ };
+
+ smdtty_data4: qcom,smdtty-data4 {
+ qcom,smdtty-remote = "modem";
+ qcom,smdtty-port-name = "DATA4";
+ };
+
+ smdtty_data11: qcom,smdtty-data11 {
+ qcom,smdtty-remote = "modem";
+ qcom,smdtty-port-name = "DATA11";
+ };
+
+ smdtty_data21: qcom,smdtty-data21 {
+ qcom,smdtty-remote = "modem";
+ qcom,smdtty-port-name = "DATA21";
+ };
+
+ smdtty_loopback: smdtty-loopback {
+ qcom,smdtty-remote = "modem";
+ qcom,smdtty-port-name = "LOOPBACK";
+ qcom,smdtty-dev-name = "LOOPBACK_TTY";
+ };
+ };
+
+ qcom,smdpkt {
+ compatible = "qcom,smdpkt";
+
+ qcom,smdpkt-data5-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA5_CNTL";
+ qcom,smdpkt-dev-name = "smdcntl0";
+ };
+
+ qcom,smdpkt-data6-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA6_CNTL";
+ qcom,smdpkt-dev-name = "smdcntl1";
+ };
+
+ qcom,smdpkt-data7-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA7_CNTL";
+ qcom,smdpkt-dev-name = "smdcntl2";
+ };
+
+ qcom,smdpkt-data8-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA8_CNTL";
+ qcom,smdpkt-dev-name = "smdcntl3";
+ };
+
+ qcom,smdpkt-data9-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA9_CNTL";
+ qcom,smdpkt-dev-name = "smdcntl4";
+ };
+
+ qcom,smdpkt-data12-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA12_CNTL";
+ qcom,smdpkt-dev-name = "smdcntl5";
+ };
+
+ qcom,smdpkt-data13-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA13_CNTL";
+ qcom,smdpkt-dev-name = "smdcntl6";
+ };
+
+ qcom,smdpkt-data14-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA14_CNTL";
+ qcom,smdpkt-dev-name = "smdcntl7";
+ };
+
+ qcom,smdpkt-data15-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA15_CNTL";
+ qcom,smdpkt-dev-name = "smdcntl9";
+ };
+
+ qcom,smdpkt-data16-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA16_CNTL";
+ qcom,smdpkt-dev-name = "smdcntl10";
+ };
+
+ qcom,smdpkt-data17-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA17_CNTL";
+ qcom,smdpkt-dev-name = "smdcntl11";
+ };
+
+ qcom,smdpkt-data22 {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA22";
+ qcom,smdpkt-dev-name = "smd22";
+ };
+
+ qcom,smdpkt-data23-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA23_CNTL";
+ qcom,smdpkt-dev-name = "smdcnt_rev0";
+ };
+
+ qcom,smdpkt-data24-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA24_CNTL";
+ qcom,smdpkt-dev-name = "smdcnt_rev1";
+ };
+
+ qcom,smdpkt-data25-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA25_CNTL";
+ qcom,smdpkt-dev-name = "smdcnt_rev2";
+ };
+
+ qcom,smdpkt-data26-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA26_CNTL";
+ qcom,smdpkt-dev-name = "smdcnt_rev3";
+ };
+
+ qcom,smdpkt-data27-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA27_CNTL";
+ qcom,smdpkt-dev-name = "smdcnt_rev4";
+ };
+
+ qcom,smdpkt-data28-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA28_CNTL";
+ qcom,smdpkt-dev-name = "smdcnt_rev5";
+ };
+
+ qcom,smdpkt-data29-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA29_CNTL";
+ qcom,smdpkt-dev-name = "smdcnt_rev6";
+ };
+
+ qcom,smdpkt-data30-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA30_CNTL";
+ qcom,smdpkt-dev-name = "smdcnt_rev7";
+ };
+
+ qcom,smdpkt-data31-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA31_CNTL";
+ qcom,smdpkt-dev-name = "smdcnt_rev8";
+ };
+
+ qcom,smdpkt-data40-cntl {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "DATA40_CNTL";
+ qcom,smdpkt-dev-name = "smdcntl8";
+ };
+
+ qcom,smdpkt-apr-apps2 {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "apr_apps2";
+ qcom,smdpkt-dev-name = "apr_apps2";
+ };
+
+ qcom,smdpkt-loopback {
+ qcom,smdpkt-remote = "modem";
+ qcom,smdpkt-port-name = "LOOPBACK";
+ qcom,smdpkt-dev-name = "smd_pkt_loopback";
+ };
+ };
+
+
+ spmi_bus: qcom,spmi@200f000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x200f000 0x1000>,
+ <0x2400000 0x400000>,
+ <0x2c00000 0x400000>,
+ <0x3800000 0x200000>,
+ <0x200a000 0x2100>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupts = <0 190 0>;
+ qcom,pmic-arb-channel = <0>;
+ qcom,pmic-arb-ee = <0>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ };
+
+ qcom,msm-imem@8600000 {
+ compatible = "qcom,msm-imem";
+ reg = <0x08600000 0x1000>; /* Address and size of IMEM */
+ ranges = <0x0 0x08600000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ mem_dump_table@10 {
+ compatible = "qcom,msm-imem-mem_dump_table";
+ reg = <0x10 8>;
+ };
+
+ restart_reason@65c {
+ compatible = "qcom,msm-imem-restart_reason";
+ reg = <0x65c 4>;
+ };
+
+ boot_stats@6b0 {
+ compatible = "qcom,msm-imem-boot_stats";
+ reg = <0x6b0 32>;
+ };
+
+ pil@94c {
+ compatible = "qcom,msm-imem-pil";
+ reg = <0x94c 200>;
+ };
+ };
+
+ qcom,memshare {
+ compatible = "qcom,memshare";
+ };
+
+ cpu-pmu {
+ compatible = "arm,armv8-pmuv3";
+ qcom,irq-is-percpu;
+ interrupts = <1 7 0xf00>;
+ };
+
+};
+
+// AG 2014-11-29 without this, +40mA idle
+
+&gdsc_mdss {
+ status = "okay";
+};
+
+
+#include "msm-pm8916-rpm-regulator.dtsi"
+#include "msm-pm8916.dtsi"
+#include "msm8916-regulator.dtsi"
+#include "msm8916-pm.dtsi"
diff --git a/arch/arm/boot/dts/skeleton.dtsi b/arch/arm/boot/dts/skeleton.dtsi
index b41d241de2cd..f9988cd78c6a 100644
--- a/arch/arm/boot/dts/skeleton.dtsi
+++ b/arch/arm/boot/dts/skeleton.dtsi
@@ -9,5 +9,10 @@
#size-cells = <1>;
chosen { };
aliases { };
- memory { device_type = "memory"; reg = <0 0>; };
+ memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "memory";
+ reg = <0 0>;
+ };
};
diff --git a/arch/arm/configs/msm8916-qrd_defconfig b/arch/arm/configs/msm8916-qrd_defconfig
new file mode 100644
index 000000000000..f87c0d18ccc2
--- /dev/null
+++ b/arch/arm/configs/msm8916-qrd_defconfig
@@ -0,0 +1,140 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MSM_SMD=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSM8916=y
+# CONFIG_CACHE_L2X0 is not set
+CONFIG_SMP=y
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_CMA=y
+CONFIG_CP_ACCESS=y
+CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
+CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES="msm8916-qrd-skui"
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_CMDLINE="console=ttyHSL0,115200,n8"
+CONFIG_ARM_DECOMPRESSOR_LIMIT=0x3200000
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_RUNTIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6=y
+CONFIG_DMA_CMA=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=1024
+CONFIG_QPNP_MISC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_MSM_SMD_PKT=y
+CONFIG_SPMI=y
+CONFIG_DEBUG_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_QPNP=y
+CONFIG_REGULATOR_SPM=y
+CONFIG_MMC=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_SMEM_LOGGING=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_L2_SPM=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_RESET_CONTROLLER=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FUSE_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_CRAMFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_UART_PHYS=0x078B0000
+CONFIG_DEBUG_UART_VIRT=0xFA0B0000
+CONFIG_EARLY_PRINTK=y
+CONFIG_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 10e78d00a0bb..5e4f2278242e 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -94,6 +94,21 @@
* DMA Cache Coherency
* ===================
*
+ * dma_inv_range(start, end)
+ *
+ * Invalidate (discard) the specified virtual address range.
+ * May not write back any entries. If 'start' or 'end'
+ * are not cache line aligned, those lines must be written
+ * back.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * dma_clean_range(start, end)
+ *
+ * Clean (write back) the specified virtual address range.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -115,6 +130,8 @@ struct cpu_cache_fns {
void (*dma_map_area)(const void *, size_t, int);
void (*dma_unmap_area)(const void *, size_t, int);
+ void (*dma_inv_range)(const void *, const void *);
+ void (*dma_clean_range)(const void *, const void *);
void (*dma_flush_range)(const void *, const void *);
};
@@ -142,6 +159,8 @@ extern struct cpu_cache_fns cpu_cache;
*/
#define dmac_map_area cpu_cache.dma_map_area
#define dmac_unmap_area cpu_cache.dma_unmap_area
+#define dmac_inv_range cpu_cache.dma_inv_range
+#define dmac_clean_range cpu_cache.dma_clean_range
#define dmac_flush_range cpu_cache.dma_flush_range
#else
@@ -163,6 +182,8 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
*/
extern void dmac_map_area(const void *, size_t, int);
extern void dmac_unmap_area(const void *, size_t, int);
+extern void dmac_inv_range(const void *, const void *);
+extern void dmac_clean_range(const void *, const void *);
extern void dmac_flush_range(const void *, const void *);
#endif
@@ -489,4 +510,12 @@ int set_memory_nx(unsigned long addr, int numpages);
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
+#ifdef CONFIG_FREE_PAGES_RDONLY
+#define mark_addr_rdonly(a) set_memory_ro((unsigned long)a, 1);
+#define mark_addr_rdwrite(a) set_memory_rw((unsigned long)a, 1);
+#else
+#define mark_addr_rdonly(a)
+#define mark_addr_rdwrite(a)
+#endif
+
#endif
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 819777d0e91f..867fa5bbc64d 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -61,6 +61,7 @@
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_INTEL 0x69
+#define ARM_CPU_IMP_QUALCOMM 0x51
/* ARM implemented processors */
#define ARM_CPU_PART_ARM1136 0x4100b360
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index dc662fca9230..866f027e5e7c 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -25,6 +25,7 @@ struct pdev_archdata {
#ifdef CONFIG_ARCH_OMAP
struct omap_device *od;
#endif
+ u64 dma_mask;
};
#ifdef CONFIG_ARM_DMA_USE_IOMMU
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 85738b200023..b85ecbf3b719 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -189,6 +189,39 @@ extern int dma_supported(struct device *dev, u64 mask);
extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
+/*
+ * dma_coherent_pre_ops - barrier functions for coherent memory before DMA.
+ * A barrier is required to ensure memory operations are complete before the
+ * initiation of a DMA xfer.
+ * If the coherent memory is Strongly Ordered
+ * - pre ARMv7 and 8x50 guarantees ordering wrt other mem accesses
+ * - ARMv7 guarantees ordering only within a 1KB block, so we need a barrier
+ * If coherent memory is normal then we need a barrier to prevent
+ * reordering
+ */
+static inline void dma_coherent_pre_ops(void)
+{
+#if COHERENT_IS_NORMAL == 1
+ dmb();
+#else
+ barrier();
+#endif
+}
+/*
+ * dma_post_coherent_ops - barrier functions for coherent memory after DMA.
+ * If the coherent memory is Strongly Ordered we dont need a barrier since
+ * there are no speculative fetches to Strongly Ordered memory.
+ * If coherent memory is normal then we need a barrier to prevent reordering
+ */
+static inline void dma_coherent_post_ops(void)
+{
+#if COHERENT_IS_NORMAL == 1
+ dmb();
+#else
+ barrier();
+#endif
+}
+
/**
* arm_dma_alloc - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -264,6 +297,72 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs);
+#if 0
+static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
+
+static inline void dma_free_writecombine(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+#endif
+static inline void *dma_alloc_stronglyordered(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
+
+static inline void dma_free_stronglyordered(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mmap_stronglyordered(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
+
+static inline void *dma_alloc_nonconsistent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
+
+static inline void dma_free_nonconsistent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mmap_nonconsistent(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
/*
* This can be called during early boot to increase the size of the atomic
@@ -311,7 +410,55 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
*/
extern void dmabounce_unregister_dev(struct device *);
+/**
+ * dma_cache_pre_ops - clean or invalidate cache before dma transfer is
+ * initiated and perform a barrier operation.
+ * @virtual_addr: A kernel logical or kernel virtual address
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ */
+static inline void dma_cache_pre_ops(void *virtual_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ extern void ___dma_single_cpu_to_dev(const void *, size_t,
+ enum dma_data_direction);
+
+ BUG_ON(!valid_dma_direction(dir));
+
+ ___dma_single_cpu_to_dev(virtual_addr, size, dir);
+}
+/**
+ * dma_cache_post_ops - clean or invalidate cache after dma transfer is
+ * initiated and perform a barrier operation.
+ * @virtual_addr: A kernel logical or kernel virtual address
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ */
+static inline void dma_cache_post_ops(void *virtual_addr,
+ size_t size, enum dma_data_direction dir)
+{
+ extern void ___dma_single_cpu_to_dev(const void *, size_t,
+ enum dma_data_direction);
+
+ BUG_ON(!valid_dma_direction(dir));
+
+ if (arch_has_speculative_dfetch() && dir != DMA_TO_DEVICE)
+ /*
+ * Treat DMA_BIDIRECTIONAL and DMA_FROM_DEVICE
+ * identically: invalidate
+ */
+ ___dma_single_cpu_to_dev(virtual_addr,
+ size, DMA_FROM_DEVICE);
+}
/*
* The scatter list versions of the above methods.
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 6ddbe446425e..d5924f65637d 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -31,8 +31,13 @@
*
* 36-bit addressing and supersections are only available on
* CPUs based on ARMv6+ or the Intel XSC3 core.
+ *
+ * We cannot use domain 0 for the kernel on QSD8x50 since the kernel domain
+ * is set to manager mode when set_fs(KERNEL_DS) is called. Setting domain 0
+ * to manager mode will disable the workaround for a cpu bug that can cause an
+ * invalid fault status and/or tlb corruption (CONFIG_VERIFY_PERMISSION_FAULT).
*/
-#ifndef CONFIG_IO_36
+#if !defined(CONFIG_IO_36) && !defined(CONFIG_VERIFY_PERMISSION_FAULT)
#define DOMAIN_KERNEL 0
#define DOMAIN_TABLE 0
#define DOMAIN_USER 1
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index a3c24cd5b7c8..2681262e5f9b 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -161,6 +161,8 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
#define dmac_map_area __glue(_CACHE,_dma_map_area)
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
+#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
+#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
#endif
#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 180567408ee8..2ea596dda249 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -100,6 +100,31 @@ static inline void __raw_writel(u32 val, volatile void __iomem *addr)
: "r" (val));
}
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ register u64 val asm ("r2");
+
+ asm volatile("ldrd %1, %0"
+ : "+Qo" (*(volatile u64 __force *)addr),
+ "=r" (val));
+ return val;
+}
+#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64) \
+ __raw_readq(c)); __r; })
+
+static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
+ {
+ register u64 v asm ("r2");
+
+ v = val;
+
+ asm volatile("strd %1, %0"
+ : "+Qo" (*(volatile u64 __force *)addr)
+ : "r" (v));
+ }
+
+
+
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 val;
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 0406cb3f1af7..5b2619460fc1 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -54,6 +54,7 @@ struct machine_desc {
void (*init_meminfo)(void);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */
+ void (*init_very_early)(void);
void (*init_early)(void);
void (*init_irq)(void);
void (*init_time)(void);
diff --git a/arch/arm/include/asm/mach/flash.h b/arch/arm/include/asm/mach/flash.h
index 4ca69fe2c850..7ed96303419b 100644
--- a/arch/arm/include/asm/mach/flash.h
+++ b/arch/arm/include/asm/mach/flash.h
@@ -13,27 +13,36 @@
struct mtd_partition;
struct mtd_info;
+enum sw_version {
+ VERSION_1 = 0,
+ VERSION_2,
+};
+
/*
* map_name: the map probe function name
* name: flash device name (eg, as used with mtdparts=)
* width: width of mapped device
+ * interleave: interleave mode feature support
* init: method called at driver/device initialisation
* exit: method called at driver/device removal
* set_vpp: method called to enable or disable VPP
* mmcontrol: method called to enable or disable Sync. Burst Read in OneNAND
* parts: optional array of mtd_partitions for static partitioning
* nr_parts: number of mtd_partitions for static partitoning
+ * version: software register interface version
*/
struct flash_platform_data {
const char *map_name;
const char *name;
unsigned int width;
+ unsigned int interleave;
int (*init)(void);
void (*exit)(void);
void (*set_vpp)(int on);
void (*mmcontrol)(struct mtd_info *mtd, int sync_read);
struct mtd_partition *parts;
unsigned int nr_parts;
+ enum sw_version version;
};
#endif
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index f98c7f32c9c8..7c079ca16c02 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -36,6 +36,7 @@ enum {
MT_MEMORY_RWX_ITCM,
MT_MEMORY_RW_SO,
MT_MEMORY_DMA_READY,
+ MT_DEVICE_USER_ACCESSIBLE,
};
#ifdef CONFIG_MMU
@@ -57,6 +58,9 @@ extern const struct mem_type *get_mem_type(unsigned int type);
*/
extern int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype);
+
+extern int ioremap_pages(unsigned long virt, unsigned long phys,
+ unsigned long size, const struct mem_type *mtype);
#else
#define iotable_init(map,num) do { } while (0)
#define vm_reserve_area_early(a,s,c) do { } while (0)
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e731018869a7..6c8d731c2e5f 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -348,6 +348,12 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
&& pfn_valid(virt_to_pfn(kaddr)))
+/*
+ * Set if the architecture speculatively fetches data into cache.
+ */
+#ifndef arch_has_speculative_dfetch
+#define arch_has_speculative_dfetch() 0
+#endif
#endif
#include <asm-generic/memory_model.h>
diff --git a/arch/arm/include/asm/mmu_writeable.h b/arch/arm/include/asm/mmu_writeable.h
new file mode 100644
index 000000000000..8c64b7fac60d
--- /dev/null
+++ b/arch/arm/include/asm/mmu_writeable.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _MMU_WRITEABLE_H
+#define _MMU_WRITEABLE_H
+
+#ifdef CONFIG_STRICT_MEMORY_RWX
+void mem_text_writeable_spinlock(unsigned long *flags);
+void mem_text_address_writeable(unsigned long);
+void mem_text_address_restore(void);
+void mem_text_writeable_spinunlock(unsigned long *flags);
+#else
+static inline void mem_text_writeable_spinlock(unsigned long *flags) {};
+static inline void mem_text_address_writeable(unsigned long addr) {};
+static inline void mem_text_address_restore(void) {};
+static inline void mem_text_writeable_spinunlock(unsigned long *flags) {};
+#endif
+
+void mem_text_write_kernel_word(unsigned long *addr, unsigned long word);
+
+#endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 4355f0ec44d6..06fa6d07b599 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -160,6 +160,11 @@ typedef struct page *pgtable_t;
extern int pfn_valid(unsigned long);
#endif
+#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+extern int _early_pfn_valid(unsigned long);
+#define early_pfn_valid(pfn) (_early_pfn_valid(pfn))
+#endif
+
#include <asm/memory.h>
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 9fd61c72a33a..96ddaebaad56 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -52,6 +52,7 @@
#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
#define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
#define PMD_SECT_AP1 (_AT(pmdval_t, 1) << 6)
+#define PMD_SECT_AP2 (_AT(pmdval_t, 1) << 7)
#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
/*
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index a31ecdad4b59..a1977728cb1b 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -132,6 +132,8 @@
#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
+#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
+
/*
* Hyp-mode PL2 PTE definitions for LPAE.
*/
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 3b30062975b2..c0ee50d7f876 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -120,18 +120,32 @@ extern pgprot_t pgprot_s2_device;
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
#define pgprot_stronglyordered(prot) \
- __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
+
+#define pgprot_device(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_NONSHARED)
+
+#define pgprot_writethroughcache(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_WRITETHROUGH)
+
+#define pgprot_writebackcache(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_WRITEBACK)
+
+#define pgprot_writebackwacache(prot) \
+ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_WRITEALLOC)
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
#define __HAVE_PHYS_MEM_ACCESS_PROT
+#define COHERENT_IS_NORMAL 1
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#else
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
+#define COHERENT_IS_NORMAL 0
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 8a1e8e995dae..b2c5d790840a 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -30,6 +30,9 @@
#define STACK_TOP_MAX TASK_SIZE
#endif
+extern unsigned int boot_reason;
+extern unsigned int cold_boot;
+
struct debug_info {
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index e0adb9f1bf94..5eb31adc2833 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -21,6 +21,37 @@
#define __tagtable(tag, fn) \
static const struct tagtable __tagtable_##fn __tag = { tag, fn }
+/*
+ * Memory map description
+ */
+#if !defined(NR_BANKS)
+#define NR_BANKS 16
+#endif
+
+struct membank {
+ phys_addr_t start;
+ phys_addr_t size;
+ unsigned int highmem;
+};
+
+struct meminfo {
+ int nr_banks;
+ struct membank bank[NR_BANKS];
+};
+
+extern struct meminfo meminfo;
+
+#define for_each_bank(iter,mi) \
+ for (iter = 0; iter < (mi)->nr_banks; iter++)
+
+#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
+#define bank_pfn_end(bank) (__phys_to_pfn((bank)->start) + \
+ __phys_to_pfn((bank)->size))
+#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT)
+#define bank_phys_start(bank) (bank)->start
+#define bank_phys_end(bank) ((bank)->start + (bank)->size)
+#define bank_phys_size(bank) (bank)->size
+
extern int arm_add_memory(u64 start, u64 size);
extern void early_print(const char *str, ...);
extern void dump_machine_table(void);
diff --git a/arch/arm/include/asm/smcmod.h b/arch/arm/include/asm/smcmod.h
new file mode 100644
index 000000000000..6225c1e78421
--- /dev/null
+++ b/arch/arm/include/asm/smcmod.h
@@ -0,0 +1,165 @@
+/* Qualcomm SMC Module API */
+
+#ifndef __SMCMOD_H_
+#define __SMCMOD_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define SMCMOD_DEV "smcmod"
+
+#define SMCMOD_REG_REQ_MAX_ARGS 2
+
+/**
+ * struct smcmod_reg_req - for SMC register ioctl request
+ *
+ * @service_id - requested service.
+ * @command_id - requested command.
+ * @num_args - number of arguments.
+ * @args - argument(s) to be passed to the secure world.
+ * @return_val - return value from secure world operation.
+ */
+struct smcmod_reg_req {
+ uint32_t service_id; /* in */
+ uint32_t command_id; /* in */
+ uint8_t num_args; /* in */
+ uint32_t args[SMCMOD_REG_REQ_MAX_ARGS]; /* in */
+ uint32_t return_val; /* out */
+};
+
+/**
+ * struct smcmod_buf_req - for SMC buffer ioctl request
+ *
+ * @service_id - requested service.
+ * @command_id - requested command.
+ * @ion_cmd_fd - fd obtained from ION_IOC_MAP or ION_IOC_SHARE.
+ * @cmd_len - length of command data buffer in bytes.
+ * @ion_resp_fd - fd obtained from ION_IOC_MAP or ION_IOC_SHARE.
+ * @resp_len - length of response data buffer in bytes.
+ * @return_val - return value from secure world operation.
+ */
+struct smcmod_buf_req {
+ uint32_t service_id;/* in */
+ uint32_t command_id; /* in */
+ int32_t ion_cmd_fd; /* in */
+ uint32_t cmd_len; /* in */
+ int32_t ion_resp_fd; /* in */
+ uint32_t resp_len; /* in */
+ uint32_t return_val; /* out */
+};
+
+/**
+ * struct smcmod_cipher_req - for SMC cipher command ioctl
+ *
+ * @algorithm - specifies the cipher algorithm.
+ * @operation - specifies encryption or decryption.
+ * @mode - specifies cipher mode.
+ * @ion_key_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @key_size - key size in bytes.
+ * @ion_plain_text_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @plain_text_size - size of plain text in bytes.
+ * @ion_cipher_text_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @cipher_text_size - cipher text size in bytes.
+ * @ion_init_vector_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @init_vector_size - size of initialization vector in bytes.
+ * @key_is_null - indicates that the key is null.
+ * @return_val - return value from secure world opreation.
+ */
+struct smcmod_cipher_req {
+ uint32_t algorithm; /* in */
+ uint32_t operation; /* in */
+ uint32_t mode; /* in */
+ int32_t ion_key_fd; /* in */
+ uint32_t key_size; /* in */
+ int32_t ion_plain_text_fd; /* in (encrypt)/out (decrypt) */
+ uint32_t plain_text_size; /* in */
+ int32_t ion_cipher_text_fd; /* out (encrypt)/in (decrypt) */
+ uint32_t cipher_text_size; /* in */
+ int32_t ion_init_vector_fd; /* in */
+ uint32_t init_vector_size; /* in */
+ uint32_t key_is_null; /* in */
+ uint32_t return_val; /* out */
+};
+
+/**
+ * struct smcmod_msg_digest_req - for message digest command ioctl
+ *
+ * @algorithm - specifies the cipher algorithm.
+ * @ion_key_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @key_size - hash key size in bytes.
+ * @ion_input_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @input_size - input data size in bytes.
+ * @ion_output_fd - fd obtained form ION_IOC_MAP or ION_IOC_SHARE.
+ * @output_size - size of output buffer in bytes.
+ * @fixed_block - indicates whether this is a fixed block digest.
+ * @key_is_null - indicates that the key is null.
+ * @return_val - return value from secure world opreation.
+ */
+struct smcmod_msg_digest_req {
+ uint32_t algorithm; /* in */
+ int32_t ion_key_fd; /* in */
+ uint32_t key_size; /* in */
+ int32_t ion_input_fd; /* in */
+ uint32_t input_size; /* in */
+ int32_t ion_output_fd; /* in/out */
+ uint32_t output_size; /* in */
+ uint32_t fixed_block; /* in */
+ uint32_t key_is_null; /* in */
+ uint32_t return_val; /* out */
+} __packed;
+
+/**
+ * struct smcmod_decrypt_req - used to decrypt image fragments.
+ * @service_id - requested service.
+ * @command_id - requested command.
+ * @operation - specifies metadata parsing or image fragment decrypting.
+ * @request - describes request parameters depending on operation.
+ * @response - this is the response of the request.
+ */
+struct smcmod_decrypt_req {
+ uint32_t service_id;
+ uint32_t command_id;
+#define SMCMOD_DECRYPT_REQ_OP_METADATA 1
+#define SMCMOD_DECRYPT_REQ_OP_IMG_FRAG 2
+ uint32_t operation;
+ union {
+ struct {
+ uint32_t len;
+ uint32_t ion_fd;
+ } metadata;
+ struct {
+ uint32_t ctx_id;
+ uint32_t last_frag;
+ uint32_t frag_len;
+ uint32_t ion_fd;
+ uint32_t offset;
+ } img_frag;
+ } request;
+ union {
+ struct {
+ uint32_t status;
+ uint32_t ctx_id;
+ uint32_t end_offset;
+ } metadata;
+ struct {
+ uint32_t status;
+ } img_frag;
+ } response;
+};
+
+#define SMCMOD_IOC_MAGIC 0x97
+
+/* Number chosen to avoid any conflicts */
+#define SMCMOD_IOCTL_SEND_REG_CMD \
+ _IOWR(SMCMOD_IOC_MAGIC, 32, struct smcmod_reg_req)
+#define SMCMOD_IOCTL_SEND_BUF_CMD \
+ _IOWR(SMCMOD_IOC_MAGIC, 33, struct smcmod_buf_req)
+#define SMCMOD_IOCTL_SEND_CIPHER_CMD \
+ _IOWR(SMCMOD_IOC_MAGIC, 34, struct smcmod_cipher_req)
+#define SMCMOD_IOCTL_SEND_MSG_DIGEST_CMD \
+ _IOWR(SMCMOD_IOC_MAGIC, 35, struct smcmod_msg_digest_req)
+#define SMCMOD_IOCTL_GET_VERSION _IOWR(SMCMOD_IOC_MAGIC, 36, uint32_t)
+#define SMCMOD_IOCTL_SEND_DECRYPT_CMD \
+ _IOWR(SMCMOD_IOC_MAGIC, 37, struct smcmod_decrypt_req)
+
+#endif /* __SMCMOD_H_ */
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 18f5a554134f..d9e2bacde208 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -80,6 +80,7 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
extern int register_ipi_completion(struct completion *completion, int cpu);
+extern void smp_send_all_cpu_backtrace(void);
struct smp_operations {
#ifdef CONFIG_SMP
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index ac4bfae26702..6a0826328286 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -7,21 +7,17 @@
#include <linux/prefetch.h>
+extern int msm_krait_need_wfe_fixup;
+
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
*/
#ifdef CONFIG_THUMB2_KERNEL
/*
- * For Thumb-2, special care is needed to ensure that the conditional WFE
- * instruction really does assemble to exactly 4 bytes (as required by
- * the SMP_ON_UP fixup code). By itself "wfene" might cause the
- * assembler to insert a extra (16-bit) IT instruction, depending on the
- * presence or absence of neighbouring conditional instructions.
- *
- * To avoid this unpredictableness, an approprite IT is inserted explicitly:
- * the assembler won't change IT instructions which are explicitly present
- * in the input.
+ * Both instructions given to the ALT_SMP macro need to be the same size, to
+ * allow the SMP_ON_UP fixups to function correctly. Hence the explicit encoding
+ * specifications.
*/
#define WFE(cond) __ALT_SMP_ASM( \
"it " cond "\n\t" \
@@ -33,6 +29,33 @@
#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
#endif
+/*
+ * The fixup involves disabling FIQs during execution of the WFE instruction.
+ * This could potentially lead to deadlock if a thread is trying to acquire a
+ * spinlock which is being released from an FIQ. This should not be a problem
+ * because FIQs are handled by the secure environment and do not directly
+ * manipulate spinlocks.
+ */
+#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
+#define WFE_SAFE(fixup, tmp) \
+" mrs " tmp ", cpsr\n" \
+" cmp " fixup ", #0\n" \
+" wfeeq\n" \
+" beq 10f\n" \
+" cpsid f\n" \
+" mrc p15, 7, " fixup ", c15, c0, 5\n" \
+" bic " fixup ", " fixup ", #0x10000\n" \
+" mcr p15, 7, " fixup ", c15, c0, 5\n" \
+" isb\n" \
+" wfe\n" \
+" orr " fixup ", " fixup ", #0x10000\n" \
+" mcr p15, 7, " fixup ", c15, c0, 5\n" \
+" isb\n" \
+"10: msr cpsr_cf, " tmp "\n"
+#else
+#define WFE_SAFE(fixup, tmp) " wfe\n"
+#endif
+
#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
static inline void dsb_sev(void)
@@ -57,7 +80,7 @@ static inline void dsb_sev(void)
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
- unsigned long tmp;
+ unsigned long tmp, flags = 0;
u32 newval;
arch_spinlock_t lockval;
@@ -73,7 +96,33 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
: "cc");
while (lockval.tickets.next != lockval.tickets.owner) {
+ if (msm_krait_need_wfe_fixup) {
+ local_save_flags(flags);
+ local_fiq_disable();
+ __asm__ __volatile__(
+ "mrc p15, 7, %0, c15, c0, 5\n"
+ : "=r" (tmp)
+ :
+ : "cc");
+ tmp &= ~(0x10000);
+ __asm__ __volatile__(
+ "mcr p15, 7, %0, c15, c0, 5\n"
+ :
+ : "r" (tmp)
+ : "cc");
+ isb();
+ }
wfe();
+ if (msm_krait_need_wfe_fixup) {
+ tmp |= 0x10000;
+ __asm__ __volatile__(
+ "mcr p15, 7, %0, c15, c0, 5\n"
+ :
+ : "r" (tmp)
+ : "cc");
+ isb();
+ local_irq_restore(flags);
+ }
lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
}
@@ -140,17 +189,19 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
- unsigned long tmp;
+ unsigned long tmp, fixup = msm_krait_need_wfe_fixup;
prefetchw(&rw->lock);
__asm__ __volatile__(
-"1: ldrex %0, [%1]\n"
+"1: ldrex %0, [%2]\n"
" teq %0, #0\n"
- WFE("ne")
-" strexeq %0, %2, [%1]\n"
+" beq 2f\n"
+ WFE_SAFE("%1", "%0")
+"2:\n"
+" strexeq %0, %3, [%2]\n"
" teq %0, #0\n"
" bne 1b"
- : "=&r" (tmp)
+ : "=&r" (tmp), "+r" (fixup)
: "r" (&rw->lock), "r" (0x80000000)
: "cc");
@@ -211,17 +262,19 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
- unsigned long tmp, tmp2;
+ unsigned long tmp, tmp2, fixup = msm_krait_need_wfe_fixup;
prefetchw(&rw->lock);
__asm__ __volatile__(
-"1: ldrex %0, [%2]\n"
+"1: ldrex %0, [%3]\n"
" adds %0, %0, #1\n"
-" strexpl %1, %0, [%2]\n"
- WFE("mi")
+" strexpl %1, %0, [%3]\n"
+" bpl 2f\n"
+ WFE_SAFE("%2", "%0")
+"2:\n"
" rsbpls %0, %1, #0\n"
" bmi 1b"
- : "=&r" (tmp), "=&r" (tmp2)
+ : "=&r" (tmp), "=&r" (tmp2), "+r" (fixup)
: "r" (&rw->lock)
: "cc");
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index a3d61ad984af..062c48452148 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -21,6 +21,7 @@ extern void (*arm_pm_idle)(void);
#define UDBG_BUS (1 << 4)
extern unsigned int user_debug;
+extern char* (*arch_read_hardware_id)(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index def9e570199f..7c51bf6a2cba 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -519,7 +519,11 @@ static inline void __flush_tlb_kernel_page(unsigned long kaddr)
dsb(ishst);
__local_flush_tlb_kernel_page(kaddr);
+#ifdef CONFIG_ARCH_MSM8X60
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", kaddr);
+#else
tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
+#endif
if (tlb_flag(TLB_BARRIER)) {
dsb(ish);
diff --git a/arch/arm/include/uapi/asm/posix_types.h b/arch/arm/include/uapi/asm/posix_types.h
index d2de9cbbcd9b..1db85c99abdd 100644
--- a/arch/arm/include/uapi/asm/posix_types.h
+++ b/arch/arm/include/uapi/asm/posix_types.h
@@ -22,6 +22,9 @@
typedef unsigned short __kernel_mode_t;
#define __kernel_mode_t __kernel_mode_t
+typedef unsigned short __kernel_nlink_t;
+#define __kernel_nlink_t __kernel_nlink_t
+
typedef unsigned short __kernel_ipc_pid_t;
#define __kernel_ipc_pid_t __kernel_ipc_pid_t
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 38ddd9f83d0e..3e55f2ca818d 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -15,7 +15,7 @@ CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
-obj-y := elf.o entry-common.o irq.o opcodes.o \
+obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \
process.o ptrace.o return_address.o \
setup.o signal.o sigreturn_codes.o \
stacktrace.o sys_arm.o time.o traps.o
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 11c54de9f8cf..1e93787a4b43 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -27,6 +27,22 @@
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+#ifndef CONFIG_ARM_LPAE
+ if (base > ((phys_addr_t)~0)) {
+ pr_crit("Ignoring memory at 0x%08llx due to lack of LPAE support\n",
+ base);
+ return;
+ }
+
+ if (size > ((phys_addr_t)~0))
+ size = ((phys_addr_t)~0);
+
+ /* arm_add_memory() already checks for the case of base + size > 4GB */
+#endif
+ arm_add_memory(base, size);
+}
#ifdef CONFIG_SMP
extern struct of_cpu_method __cpu_method_of_table[];
@@ -169,10 +185,14 @@ void __init arm_dt_init_cpu_maps(void)
* a reg property, the DT CPU list can be considered valid and the
* logical map created in smp_setup_processor_id() can be overridden
*/
- for (i = 0; i < cpuidx; i++) {
- set_cpu_possible(i, true);
- cpu_logical_map(i) = tmp_map[i];
- pr_debug("cpu logical map 0x%x\n", cpu_logical_map(i));
+ for (i = 0; i < nr_cpu_ids; i++) {
+ if (i < cpuidx) {
+ set_cpu_possible(i, true);
+ cpu_logical_map(i) = tmp_map[i];
+ pr_debug("cpu logical map 0x%x\n", cpu_logical_map(i));
+ } else {
+ set_cpu_possible(i, false);
+ }
}
}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 2f5555d307b3..1135c24babac 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -832,6 +832,97 @@ ENDPROC(__switch_to)
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
+#ifdef GENERIC_TIME_VSYSCALL
+/*
+ * Reference declaration:
+ *
+ * extern struct timezone __kernel_helper_gtod_timezone
+ * extern unsigned int __kernel_helper_gtod_seqnum
+ *
+ * Definition and user space usage example:
+ *
+ * #define __kernel_helper_gtod_timezone (*(unsigned int*)0xffff0f20)
+ * #define __kernel_helper_gtod_seqnum (*(unsigned int*)0xffff0f28)
+ *
+ * unsigned int prelock, postlock ;
+ * do {
+ * prelock = __kernel_helper_gtod_seqnum;
+ * memcpy(&tz, (void*)&(__kernel_helper_gtod_timezone),
+ * sizeof(struct timezone)) ;
+ * postlock = __kernel_helper_gtod_seqnum;
+ * } while (prelock != postlock);
+ *
+ * 0xffff0f20-3: tz_minuteswest
+ * 0xffff0f24-7: tz_dsttime
+ * 0xffff0f28-b: sequence #.
+ * 0xffff0f30-3: offset into CONFIG_USER_ACCESSIBLE_TIMER_BASE to get the timer.
+ * 0xffff0f34-7: Feature flag
+ * 0xffff0f38-b: wall-to-mononic: tv_sec
+ * 0xffff0f3c-f: wall-to-mononic: tv_nsec
+ */
+ .globl __kuser_gtod_timezone
+__kuser_gtod_timezone: @0xffff0f20
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ /* This offset is where the flag to enable the
+ * user accessible timers is located.
+ */
+ .word 0
+ .word 0
+ .word 0
+ .align 5
+
+/*
+ * Reference declaration:
+ *
+ * extern struct timeval __kernel_helper_gtod_timeval
+ * extern unsigned int __kernel_helper_gtod_seqnum
+ *
+ * Definition and user space usage example:
+ *
+ * #define __kernel_helper_gtod_timeval (*(unsigned int*)0xffff0f40)
+ * #define __kernel_helper_gtod_seqnum (*(unsigned int*)0xffff0f48)
+ *
+ * unsigned int prelock, postlock ;
+ * struct gtod {
+ * uint64_t cycle_last;
+ * uint64_t mask;
+ * uint32_t mult;
+ * uint32_t shift;
+ * uint32_t tv_sec;
+ * uint32_t tv_nsec;
+ * };
+ * struct gtod gdtod;
+ *
+ * do {
+ * prelock = __kernel_helper_gtod_seqnum;
+ * memcpy(&gdtod, (void*)&(__kernel_helper_gtod_timeval),
+ * sizeof(struct gtod)) ;
+ * postlock = __kernel_helper_gtod_seqnum;
+ * } while (prelock != postlock);
+ *
+ * 0xffff0f40-7: cycle_last
+ * 0xffff0f48-f: mask
+ * 0xffff0f50-3: mult
+ * 0xffff0f54-7: shift
+ * 0xffff0f58-b: tv_sec
+ * 0xffff0f5c-f: tv_nsec
+ */
+ .globl __kuser_gtod_timeval
+__kuser_gtod_timeval: @0xffff0f40
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .align 5
+#endif
/*
* Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
@@ -1032,7 +1123,7 @@ __kuser_helper_end:
* SP points to a minimal amount of processor-private memory, the address
* of which is copied into r0 for the mode specific abort handler.
*/
- .macro vector_stub, name, mode, correction=0
+ .macro vector_stub, name, mode, fixup, correction=0
.align 5
vector_\name:
@@ -1061,6 +1152,18 @@ vector_\name:
and lr, lr, #0x0f
THUMB( adr r0, 1f )
THUMB( ldr lr, [r0, lr, lsl #2] )
+ .if \fixup
+#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
+ ldr r0, .krait_fixup
+ ldr r0, [r0]
+ cmp r0, #0
+ beq 10f
+ mrc p15, 7, r0, c15, c0, 5
+ orr r0, r0, #0x10000
+ mcr p15, 7, r0, c15, c0, 5
+10: isb
+#endif
+ .endif
mov r0, sp
ARM( ldr lr, [pc, lr, lsl #2] )
movs pc, lr @ branch to handler in SVC mode
@@ -1085,7 +1188,7 @@ vector_rst:
/*
* Interrupt dispatcher
*/
- vector_stub irq, IRQ_MODE, 4
+ vector_stub irq, IRQ_MODE, 1, 4
.long __irq_usr @ 0 (USR_26 / USR_32)
.long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -1108,7 +1211,7 @@ vector_rst:
* Data abort dispatcher
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
*/
- vector_stub dabt, ABT_MODE, 8
+ vector_stub dabt, ABT_MODE, 0, 8
.long __dabt_usr @ 0 (USR_26 / USR_32)
.long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -1131,7 +1234,7 @@ vector_rst:
* Prefetch abort dispatcher
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
*/
- vector_stub pabt, ABT_MODE, 4
+ vector_stub pabt, ABT_MODE, 0, 4
.long __pabt_usr @ 0 (USR_26 / USR_32)
.long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -1154,7 +1257,7 @@ vector_rst:
* Undef instr entry dispatcher
* Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
*/
- vector_stub und, UND_MODE
+ vector_stub und, UND_MODE, 0
.long __und_usr @ 0 (USR_26 / USR_32)
.long __und_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -1210,6 +1313,9 @@ vector_addrexcptn:
.long __fiq_svc @ e
.long __fiq_svc @ f
+.krait_fixup:
+ .word msm_krait_need_wfe_fixup
+
.globl vector_fiq_offset
.equ vector_fiq_offset, vector_fiq
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index b37752a96652..00b0d8e8949b 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -39,6 +39,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/seq_file.h>
#include <asm/cacheflush.h>
@@ -147,6 +148,11 @@ void disable_fiq(int fiq)
disable_irq(fiq + fiq_start);
}
+void fiq_set_type(int fiq, unsigned int type)
+{
+ irq_set_irq_type(fiq + FIQ_START, type);
+}
+
EXPORT_SYMBOL(set_fiq_handler);
EXPORT_SYMBOL(__set_fiq_regs); /* defined in fiqasm.S */
EXPORT_SYMBOL(__get_fiq_regs); /* defined in fiqasm.S */
@@ -154,6 +160,7 @@ EXPORT_SYMBOL(claim_fiq);
EXPORT_SYMBOL(release_fiq);
EXPORT_SYMBOL(enable_fiq);
EXPORT_SYMBOL(disable_fiq);
+EXPORT_SYMBOL(fiq_set_type);
void __init init_FIQ(int start)
{
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 664eee8c4a26..1a9d952636ee 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -467,6 +467,17 @@ ENTRY(__turn_mmu_on)
mrc p15, 0, r3, c0, c0, 0 @ read id reg
instr_sync
mov r3, r3
+#ifdef CONFIG_ARCH_MSM_KRAIT
+ movw r3, 0xff0d
+ movt r3, 0xffff
+ and r3, r9, r3
+ movw r4, 0x0400
+ movt r4, 0x511f
+ cmp r3, r4
+ mrceq p15, 7, r3, c15, c0, 2
+ biceq r3, r3, #0x400
+ mcreq p15, 7, r3, c15, c0, 2
+#endif
mov r3, r13
ret r3
__turn_mmu_on_end:
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index b5b452f90f76..6b35b54363e9 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -227,6 +227,17 @@ static int get_num_brps(void)
return core_has_mismatch_brps() ? brps - 1 : brps;
}
+/* Determine if halting mode is enabled */
+static int halting_mode_enabled(void)
+{
+ u32 dscr;
+ ARM_DBG_READ(c0, c1, 0, dscr);
+ WARN_ONCE(dscr & ARM_DSCR_HDBGEN,
+ "halting debug mode enabled. "
+ "Unable to access hardware resources.\n");
+ return !!(dscr & ARM_DSCR_HDBGEN);
+}
+
/*
* In order to access the breakpoint/watchpoint control registers,
* we must be running in debug monitor mode. Unfortunately, we can
@@ -932,6 +943,17 @@ static void reset_ctrl_regs(void *unused)
u32 val;
/*
+ * Bail out without clearing the breakpoint registers if halting
+ * debug mode or monitor debug mode is enabled. Checking for monitor
+ * debug mode here ensures we don't clear the breakpoint registers
+ * across power collapse if save and restore code has already
+ * preserved the debug register values or they weren't lost and
+ * monitor mode was already enabled earlier.
+ */
+ if (halting_mode_enabled() || monitor_mode_enabled())
+ return;
+
+ /*
* v7 debug contains save and restore registers so that debug state
* can be maintained across low-power modes without leaving the debug
* logic powered up. It is IMPLEMENTATION DEFINED whether we can access
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
index 07314af47733..7a27c47dad5b 100644
--- a/arch/arm/kernel/patch.c
+++ b/arch/arm/kernel/patch.c
@@ -5,6 +5,7 @@
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
#include <asm/opcodes.h>
+#include <asm/mmu_writeable.h>
#include "patch.h"
@@ -17,6 +18,10 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
{
bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
int size;
+ unsigned long flags;
+
+ mem_text_writeable_spinlock(&flags);
+ mem_text_address_writeable((unsigned long)addr);
if (thumb2 && __opcode_is_thumb16(insn)) {
*(u16 *)addr = __opcode_to_mem_thumb16(insn);
@@ -42,6 +47,9 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
flush_icache_range((uintptr_t)(addr),
(uintptr_t)(addr) + size);
+
+ mem_text_address_restore();
+ mem_text_writeable_spinunlock(&flags);
}
static int __kprobes patch_text_stop_machine(void *data)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c03106378b49..9ff5825b4e03 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -30,6 +30,7 @@
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/sort.h>
+#include <linux/dma-mapping.h>
#include <asm/unified.h>
#include <asm/cp15.h>
@@ -77,7 +78,7 @@ extern void paging_init(const struct machine_desc *desc);
extern void early_paging_init(const struct machine_desc *,
struct proc_info_list *);
extern void sanity_check_meminfo(void);
-extern enum reboot_mode reboot_mode;
+//extern enum reboot_mode reboot_mode;
extern void setup_dma_zone(const struct machine_desc *desc);
unsigned int processor_id;
@@ -104,6 +105,14 @@ EXPORT_SYMBOL(elf_hwcap);
unsigned int elf_hwcap2 __read_mostly;
EXPORT_SYMBOL(elf_hwcap2);
+unsigned int boot_reason;
+EXPORT_SYMBOL(boot_reason);
+
+unsigned int cold_boot;
+EXPORT_SYMBOL(cold_boot);
+
+char* (*arch_read_hardware_id)(void);
+EXPORT_SYMBOL(arch_read_hardware_id);
#ifdef MULTI_CPU
struct processor processor __read_mostly;
@@ -890,6 +899,15 @@ void __init hyp_mode_check(void)
#endif
}
+#if 0
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+#endif
+
void __init setup_arch(char **cmdline_p)
{
const struct machine_desc *mdesc;
@@ -901,8 +919,9 @@ void __init setup_arch(char **cmdline_p)
machine_desc = mdesc;
machine_name = mdesc->name;
- if (mdesc->reboot_mode != REBOOT_HARD)
- reboot_mode = mdesc->reboot_mode;
+// if (mdesc->reboot_mode != REBOOT_HARD)
+// reboot_mode = mdesc->reboot_mode;
+ setup_dma_zone(mdesc);
init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext;
@@ -917,6 +936,10 @@ void __init setup_arch(char **cmdline_p)
early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
setup_dma_zone(mdesc);
+ if (mdesc->init_very_early)
+ mdesc->init_very_early();
+
+// sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
sanity_check_meminfo();
arm_memblock_init(mdesc);
@@ -942,6 +965,7 @@ void __init setup_arch(char **cmdline_p)
smp_build_mpidr_hash();
}
#endif
+ arm_dt_init_cpu_maps();
if (!is_smp())
hyp_mode_check();
@@ -1032,7 +1056,7 @@ static int c_show(struct seq_file *m, void *v)
int i, j;
u32 cpuid;
- for_each_online_cpu(i) {
+ for_each_present_cpu(i) {
/*
* glibc reads /proc/cpuinfo to determine the number of
* online processors, looking for lines beginning with
@@ -1077,10 +1101,15 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
}
- seq_printf(m, "Hardware\t: %s\n", machine_name);
+ if (!arch_read_hardware_id)
+ seq_printf(m, "Hardware\t: %s\n", machine_name);
+ else
+ seq_printf(m, "Hardware\t: %s\n", arch_read_hardware_id());
seq_printf(m, "Revision\t: %04x\n", system_rev);
seq_printf(m, "Serial\t\t: %08x%08x\n",
system_serial_high, system_serial_low);
+ seq_printf(m, "Processor\t: %s rev %d (%s)\n",
+ cpu_name, read_cpuid_id() & 15, elf_platform);
return 0;
}
@@ -1106,3 +1135,9 @@ const struct seq_operations cpuinfo_op = {
.stop = c_stop,
.show = c_show
};
+
+void arch_setup_pdev_archdata(struct platform_device *pdev)
+{
+ pdev->archdata.dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &pdev->archdata.dma_mask;
+}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 13396d3d600e..45f39d0a859f 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -26,6 +26,7 @@
#include <linux/completion.h>
#include <linux/cpufreq.h>
#include <linux/irq_work.h>
+#include <linux/smp.h>
#include <linux/atomic.h>
#include <asm/smp.h>
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 8e95aa47457a..2d2c9ad182c9 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -8,7 +8,10 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/page.h>
-
+#ifdef CONFIG_STRICT_MEMORY_RWX
+#include <asm/pgtable.h>
+#endif
+
#define PROC_INFO \
. = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \
@@ -90,6 +93,10 @@ SECTIONS
_text = .;
HEAD_TEXT
}
+#ifdef CONFIG_STRICT_MEMORY_RWX
+ . = ALIGN(1<<SECTION_SHIFT);
+#endif
+
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
@@ -112,6 +119,9 @@ SECTIONS
ARM_CPU_KEEP(PROC_INFO)
}
+#ifdef CONFIG_STRICT_MEMORY_RWX
+ . = ALIGN(1<<SECTION_SHIFT);
+#endif
RO_DATA(PAGE_SIZE)
. = ALIGN(4);
@@ -145,7 +155,11 @@ SECTIONS
_etext = .; /* End of text and rodata section */
#ifndef CONFIG_XIP_KERNEL
+#ifdef CONFIG_STRICT_MEMORY_RWX
+ . = ALIGN(1<<SECTION_SHIFT);
+#else
. = ALIGN(PAGE_SIZE);
+#endif
__init_begin = .;
#endif
/*
@@ -173,6 +187,9 @@ SECTIONS
.init.proc.info : {
ARM_CPU_DISCARD(PROC_INFO)
}
+#ifdef CONFIG_STRICT_MEMORY_RWX
+ . = ALIGN(1<<SECTION_SHIFT);
+#endif
.init.arch.info : {
__arch_info_begin = .;
*(.arch.info.init)
@@ -202,6 +219,7 @@ SECTIONS
INIT_SETUP(16)
INIT_CALLS
CON_INITCALL
+ COMPAT_EXPORTS
SECURITY_INITCALL
INIT_RAM_FS
}
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index ee5697ba05bc..dbe148b0e625 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -23,6 +23,40 @@ config ARCH_MSM8974
bool "Enable support for MSM8974"
select HAVE_ARM_ARCH_TIMER
+config ARCH_MSM8916
+ bool "MSM8916"
+ select ARCH_MSM_CORTEXMP
+ select HAVE_GENERIC_HARDIRQS
+ select USE_GENERIC_CPU_HELPERS
+ select ARM_GIC
+ select MULTI_IRQ_HANDLER
+ select CPU_V7
+ select HAVE_ARM_ARCH_TIMER
+ select MAY_HAVE_SPARSE_IRQ
+ select SPARSE_IRQ
+ select HAVE_CLK
+ select HAVE_CLK_PREPARE
+ select CLKDEV_LOOKUP
+ select PINCTRL
+ select PINCTRL_MSM_TLMM_V4
+ select USE_PINCTRL_IRQ
+ select MSM_PM if PM
+ select MSM_RPM_SMD
+ select MEMORY_HOLE_CARVEOUT
+ select DONT_MAP_HOLE_AFTER_MEMBANK0
+ select QMI_ENCDEC
+ select MSM_IRQ
+ select MSM_CORTEX_A53
+ #select CPU_FREQ_MSM
+ #select CPU_FREQ
+ select PM_DEVFREQ
+ select MSM_DEVFREQ_CPUBW
+ select ARM_HAS_SG_CHAIN
+ select ARCH_WANT_KMAP_ATOMIC_FLUSH
+ select SOC_BUS
+ select MSM_SCM
+ select MSM_SPM_V2
+
config QCOM_SCM
bool
diff --git a/arch/arm/mach-qcom/Makefile b/arch/arm/mach-qcom/Makefile
index 8f756ae1ae31..55dba28ca36a 100644
--- a/arch/arm/mach-qcom/Makefile
+++ b/arch/arm/mach-qcom/Makefile
@@ -1,5 +1,5 @@
obj-y := board.o
-obj-$(CONFIG_SMP) += platsmp.o
+obj-$(CONFIG_SMP) += platsmp.o headsmp.o hotplug.o
obj-$(CONFIG_QCOM_SCM) += scm.o scm-boot.o
CFLAGS_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
diff --git a/arch/arm/mach-qcom/board.c b/arch/arm/mach-qcom/board.c
index 6d8bbf7d39d8..5a5b9eb48f90 100644
--- a/arch/arm/mach-qcom/board.c
+++ b/arch/arm/mach-qcom/board.c
@@ -11,9 +11,42 @@
*/
#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
+#include <linux/cma.h>
+#include <soc/qcom/socinfo.h>
+#include <soc/qcom/rpm-smd.h>
+#include <soc/qcom/smd.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm.h>
+
+#include <linux/regulator/spm-regulator.h>
+#include <linux/regulator/qpnp-regulator.h>
+
+#include <asm/mach/map.h>
#include <asm/mach/arch.h>
+#include "platsmp.h"
+
+#define MSM_CHIP_DEVICE_TYPE(name, chip, mem_type) { \
+ .virtual = (unsigned long) MSM_##name##_BASE, \
+ .pfn = __phys_to_pfn(chip##_##name##_PHYS), \
+ .length = chip##_##name##_SIZE, \
+ .type = mem_type, \
+ }
+#define MSM_CHIP_DEVICE(name, chip) \
+ MSM_CHIP_DEVICE_TYPE(name, chip, MT_DEVICE)
+
+#define MSM_APCS_GCC_BASE IOMEM(0xFA006000)
+#define MSM8916_APCS_GCC_PHYS 0xB011000
+#define MSM8916_APCS_GCC_SIZE SZ_4K
+
+
static const char * const qcom_dt_match[] __initconst = {
"qcom,apq8064",
"qcom,apq8074-dragonboard",
@@ -22,9 +55,49 @@ static const char * const qcom_dt_match[] __initconst = {
"qcom,ipq8064",
"qcom,msm8660-surf",
"qcom,msm8960-cdp",
+ "qcom,msm8916",
+
NULL
};
+static void __init msm8916_init(void)
+{
+ /*
+ * populate devices from DT first so smem probe will get called as part
+ * of msm_smem_init. socinfo_init needs smem support so call
+ * msm_smem_init before it.
+ */
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ msm_smem_init();
+
+ if (socinfo_init() < 0)
+ pr_err("%s: socinfo_init() failed\n", __func__);
+
+ msm_smd_init();
+ msm_rpm_driver_init();
+ spm_regulator_init();
+ msm_spm_device_init();
+ qpnp_regulator_init();
+ msm_pm_sleep_status_init();
+
+}
+#ifdef CONFIG_ARCH_MSM8916
+static struct map_desc msm8916_io_desc[] __initdata = {
+ MSM_CHIP_DEVICE(APCS_GCC, MSM8916),
+};
+
+void __init msm_map_msm8916_io(void)
+{
+ iotable_init(msm8916_io_desc, ARRAY_SIZE(msm8916_io_desc));
+ debug_ll_io_init();
+}
+#endif /* CONFIG_ARCH_MSM8916 */
+
+
+
DT_MACHINE_START(QCOM_DT, "Qualcomm (Flattened Device Tree)")
+ .map_io = msm_map_msm8916_io,
+ .init_machine = msm8916_init,
.dt_compat = qcom_dt_match,
+ .smp = &msm8916_smp_ops,
MACHINE_END
diff --git a/arch/arm/mach-qcom/headsmp.S b/arch/arm/mach-qcom/headsmp.S
new file mode 100644
index 000000000000..5d50606d3c3f
--- /dev/null
+++ b/arch/arm/mach-qcom/headsmp.S
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2003 ARM Limited
+ * All Rights Reserved
+ * Copyright (c) 2010, 2012, 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+ .arm
+
+__CPUINIT
+
+/*
+ * MSM specific entry point for secondary CPUs. This provides
+ * a "holding pen" into which all secondary cores are held until we're
+ * ready for them to initialise.
+ *
+ * This is executing in physical space with cache's off.
+ */
+ENTRY(msm_secondary_startup)
+THUMB( adr r9, BSYM(2f) ) @ Kernel is always entered in ARM.
+THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
+THUMB( .thumb ) @ switch to Thumb now.
+THUMB(2: )
+ mrc p15, 0, r0, c0, c0, 5 @ MPIDR
+ bic r0, #0xff000000 @ What CPU am I
+ adr r4, 1f @ address of
+ ldmia r4, {r5, r6} @ load curr addr and pen_rel addr
+ sub r4, r4, r5 @ determine virtual/phys offsets
+ add r6, r6, r4 @ apply
+pen:
+ ldr r7, [r6] @ pen_rel has cpu to remove from reset
+ cmp r7, r0 @ are we lucky?
+ bne pen
+
+ /*
+ * we've been released from the holding pen: secondary_stack
+ * should now contain the SVC stack for this core
+ */
+ b secondary_startup
+ENDPROC(msm_secondary_startup)
+
+1: .long .
+ .long pen_release
diff --git a/arch/arm/mach-qcom/hotplug.c b/arch/arm/mach-qcom/hotplug.c
new file mode 100644
index 000000000000..4f28f4fc92f5
--- /dev/null
+++ b/arch/arm/mach-qcom/hotplug.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/msm_rtb.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm.h>
+
+#include <asm/smp_plat.h>
+#include <asm/vfp.h>
+
+#include <soc/qcom/jtag.h>
+
+static cpumask_t cpu_dying_mask;
+
+static DEFINE_PER_CPU(unsigned int, warm_boot_flag);
+
+static inline void cpu_enter_lowpower(void)
+{
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+}
+
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+ /* Just enter wfi for now. TODO: Properly shut off the cpu. */
+ for (;;) {
+
+ lpm_cpu_hotplug_enter(cpu);
+ if (pen_release == cpu_logical_map(cpu)) {
+ /*
+ * OK, proper wakeup, we're done
+ */
+ break;
+ }
+
+ /*
+ * getting here, means that we have come out of WFI without
+ * having been woken up - this shouldn't happen
+ *
+ * The trouble is, letting people know about this is not really
+ * possible, since we are currently running incoherently, and
+ * therefore cannot safely call printk() or anything else
+ */
+ (*spurious)++;
+ }
+}
+
+int msm_cpu_kill(unsigned int cpu)
+{
+ int ret = 0;
+
+ if (cpumask_test_and_clear_cpu(cpu, &cpu_dying_mask))
+ ret = msm_pm_wait_cpu_shutdown(cpu);
+
+ return ret ? 0 : 1;
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void __ref msm_cpu_die(unsigned int cpu)
+{
+ int spurious = 0;
+
+ if (unlikely(cpu != smp_processor_id())) {
+ pr_crit("%s: running on %u, should be %u\n",
+ __func__, smp_processor_id(), cpu);
+ BUG();
+ }
+ /*
+ * we're ready for shutdown now, so do it
+ */
+ cpu_enter_lowpower();
+ platform_do_lowpower(cpu, &spurious);
+
+ pr_debug("CPU%u: %s: normal wakeup\n", cpu, __func__);
+ cpu_leave_lowpower();
+
+ if (spurious)
+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+}
+
+#define CPU_SHIFT 0
+#define CPU_MASK 0xF
+#define CPU_OF(n) (((n) & CPU_MASK) << CPU_SHIFT)
+#define CPUSET_SHIFT 4
+#define CPUSET_MASK 0xFFFF
+#define CPUSET_OF(n) (((n) & CPUSET_MASK) << CPUSET_SHIFT)
+
+static int hotplug_rtb_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ /*
+ * Bits [19:4] of the data are the online mask, lower 4 bits are the
+ * cpu number that is being changed. Additionally, changes to the
+ * online_mask that will be done by the current hotplug will be made
+ * even though they aren't necessarily in the online mask yet.
+ *
+ * XXX: This design is limited to supporting at most 16 cpus
+ */
+ int this_cpumask = CPUSET_OF(1 << (int)hcpu);
+ int cpumask = CPUSET_OF(cpumask_bits(cpu_online_mask)[0]);
+ int cpudata = CPU_OF((int)hcpu) | cpumask;
+
+ switch (action & (~CPU_TASKS_FROZEN)) {
+ case CPU_STARTING:
+ uncached_logk(LOGK_HOTPLUG, (void *)(cpudata | this_cpumask));
+ break;
+ case CPU_DYING:
+ cpumask_set_cpu((unsigned long)hcpu, &cpu_dying_mask);
+ uncached_logk(LOGK_HOTPLUG, (void *)(cpudata & ~this_cpumask));
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+static struct notifier_block hotplug_rtb_notifier = {
+ .notifier_call = hotplug_rtb_callback,
+};
+
+int msm_platform_secondary_init(unsigned int cpu)
+{
+ int ret;
+ unsigned int *warm_boot = &__get_cpu_var(warm_boot_flag);
+
+ if (!(*warm_boot)) {
+ *warm_boot = 1;
+ return 0;
+ }
+ msm_jtag_restore_state();
+#if defined(CONFIG_VFP) && defined (CONFIG_CPU_PM)
+ //vfp_pm_resume();
+#endif
+ ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+
+ return ret;
+}
+
+static int __init init_hotplug(void)
+{
+ return register_hotcpu_notifier(&hotplug_rtb_notifier);
+}
+early_initcall(init_hotplug);
diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c
index d6908569ecaf..cb38cf76df3e 100644
--- a/arch/arm/mach-qcom/platsmp.c
+++ b/arch/arm/mach-qcom/platsmp.c
@@ -1,8 +1,7 @@
/*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
- * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -10,53 +9,60 @@
*/
#include <linux/init.h>
-#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/smp.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-
+#include <linux/regulator/krait-regulator.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm.h>
+#include <soc/qcom/scm-boot.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/mach-types.h>
#include <asm/smp_plat.h>
-#include "scm-boot.h"
-
-#define VDD_SC1_ARRAY_CLAMP_GFS_CTL 0x35a0
-#define SCSS_CPU1CORE_RESET 0x2d80
-#define SCSS_DBG_STATUS_CORE_PWRDUP 0x2e64
-
-#define APCS_CPU_PWR_CTL 0x04
-#define PLL_CLAMP BIT(8)
-#define CORE_PWRD_UP BIT(7)
-#define COREPOR_RST BIT(5)
-#define CORE_RST BIT(4)
-#define L2DT_SLP BIT(3)
-#define CLAMP BIT(0)
-
-#define APC_PWR_GATE_CTL 0x14
-#define BHS_CNT_SHIFT 24
-#define LDO_PWR_DWN_SHIFT 16
-#define LDO_BYP_SHIFT 8
-#define BHS_SEG_SHIFT 1
-#define BHS_EN BIT(0)
+#include <soc/qcom/socinfo.h>
+//#include <mach/hardware.h>
+#include "../mach-msm/include/mach/msm_iomap.h"
-#define APCS_SAW2_VCTL 0x14
-#define APCS_SAW2_2_VCTL 0x1c
+#include "platsmp.h"
-extern void secondary_startup(void);
+#define VDD_SC1_ARRAY_CLAMP_GFS_CTL 0x15A0
+#define SCSS_CPU1CORE_RESET 0xD80
+#define SCSS_DBG_STATUS_CORE_PWRDUP 0xE64
+#define MSM8960_SAW2_BASE_ADDR 0x02089000
+#define MSM8962_SAW2_BASE_ADDR 0xF9089000
+#define APCS_ALIAS0_BASE_ADDR 0xF9088000
+#define MSM_APCS_GCC_BASE IOMEM(0xFA006000)
+/*
+ * Write pen_release in a way that is guaranteed to be visible to all
+ * observers, irrespective of whether they're taking part in coherency
+ * or not. This is necessary for the hotplug code to work reliably.
+ */
+void __cpuinit write_pen_release(int val)
+{
+ pen_release = val;
+ smp_wmb();
+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+}
static DEFINE_SPINLOCK(boot_lock);
-#ifdef CONFIG_HOTPLUG_CPU
-static void __ref qcom_cpu_die(unsigned int cpu)
+void __cpuinit msm_secondary_init(unsigned int cpu)
{
- wfi();
-}
-#endif
+ WARN_ON(msm_platform_secondary_init(cpu));
+
+ /*
+ * let the primary processor know we're out of the
+ * pen, then head off into the C entry point
+ */
+ write_pen_release(-1);
-static void qcom_secondary_init(unsigned int cpu)
-{
/*
* Synchronise with the boot thread.
*/
@@ -64,221 +70,47 @@ static void qcom_secondary_init(unsigned int cpu)
spin_unlock(&boot_lock);
}
-static int scss_release_secondary(unsigned int cpu)
-{
- struct device_node *node;
- void __iomem *base;
-
- node = of_find_compatible_node(NULL, NULL, "qcom,gcc-msm8660");
- if (!node) {
- pr_err("%s: can't find node\n", __func__);
- return -ENXIO;
- }
-
- base = of_iomap(node, 0);
- of_node_put(node);
- if (!base)
- return -ENOMEM;
-
- writel_relaxed(0, base + VDD_SC1_ARRAY_CLAMP_GFS_CTL);
- writel_relaxed(0, base + SCSS_CPU1CORE_RESET);
- writel_relaxed(3, base + SCSS_DBG_STATUS_CORE_PWRDUP);
- mb();
- iounmap(base);
-
- return 0;
-}
-
-static int kpssv1_release_secondary(unsigned int cpu)
+static int __cpuinit arm_release_secondary(unsigned long base, unsigned int cpu)
{
- int ret = 0;
- void __iomem *reg, *saw_reg;
- struct device_node *cpu_node, *acc_node, *saw_node;
- u32 val;
-
- cpu_node = of_get_cpu_node(cpu, NULL);
- if (!cpu_node)
+ void *base_ptr = ioremap_nocache(base + (cpu * 0x10000), SZ_4K);
+ if (!base_ptr)
return -ENODEV;
- acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0);
- if (!acc_node) {
- ret = -ENODEV;
- goto out_acc;
- }
-
- saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
- if (!saw_node) {
- ret = -ENODEV;
- goto out_saw;
- }
-
- reg = of_iomap(acc_node, 0);
- if (!reg) {
- ret = -ENOMEM;
- goto out_acc_map;
- }
-
- saw_reg = of_iomap(saw_node, 0);
- if (!saw_reg) {
- ret = -ENOMEM;
- goto out_saw_map;
- }
-
- /* Turn on CPU rail */
- writel_relaxed(0xA4, saw_reg + APCS_SAW2_VCTL);
- mb();
- udelay(512);
-
- /* Krait bring-up sequence */
- val = PLL_CLAMP | L2DT_SLP | CLAMP;
- writel_relaxed(val, reg + APCS_CPU_PWR_CTL);
- val &= ~L2DT_SLP;
- writel_relaxed(val, reg + APCS_CPU_PWR_CTL);
- mb();
- ndelay(300);
-
- val |= COREPOR_RST;
- writel_relaxed(val, reg + APCS_CPU_PWR_CTL);
+ writel_relaxed(0x00000033, base_ptr+0x04);
mb();
- udelay(2);
- val &= ~CLAMP;
- writel_relaxed(val, reg + APCS_CPU_PWR_CTL);
+ writel_relaxed(0x10000001, base_ptr+0x14);
mb();
udelay(2);
- val &= ~COREPOR_RST;
- writel_relaxed(val, reg + APCS_CPU_PWR_CTL);
- mb();
- udelay(100);
-
- val |= CORE_PWRD_UP;
- writel_relaxed(val, reg + APCS_CPU_PWR_CTL);
- mb();
-
- iounmap(saw_reg);
-out_saw_map:
- iounmap(reg);
-out_acc_map:
- of_node_put(saw_node);
-out_saw:
- of_node_put(acc_node);
-out_acc:
- of_node_put(cpu_node);
- return ret;
-}
-
-static int kpssv2_release_secondary(unsigned int cpu)
-{
- void __iomem *reg;
- struct device_node *cpu_node, *l2_node, *acc_node, *saw_node;
- void __iomem *l2_saw_base;
- unsigned reg_val;
- int ret;
-
- cpu_node = of_get_cpu_node(cpu, NULL);
- if (!cpu_node)
- return -ENODEV;
-
- acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0);
- if (!acc_node) {
- ret = -ENODEV;
- goto out_acc;
- }
-
- l2_node = of_parse_phandle(cpu_node, "next-level-cache", 0);
- if (!l2_node) {
- ret = -ENODEV;
- goto out_l2;
- }
-
- saw_node = of_parse_phandle(l2_node, "qcom,saw", 0);
- if (!saw_node) {
- ret = -ENODEV;
- goto out_saw;
- }
-
- reg = of_iomap(acc_node, 0);
- if (!reg) {
- ret = -ENOMEM;
- goto out_map;
- }
-
- l2_saw_base = of_iomap(saw_node, 0);
- if (!l2_saw_base) {
- ret = -ENOMEM;
- goto out_saw_map;
- }
-
- /* Turn on the BHS, turn off LDO Bypass and power down LDO */
- reg_val = (64 << BHS_CNT_SHIFT) | (0x3f << LDO_PWR_DWN_SHIFT) | BHS_EN;
- writel_relaxed(reg_val, reg + APC_PWR_GATE_CTL);
- mb();
- /* wait for the BHS to settle */
- udelay(1);
-
- /* Turn on BHS segments */
- reg_val |= 0x3f << BHS_SEG_SHIFT;
- writel_relaxed(reg_val, reg + APC_PWR_GATE_CTL);
- mb();
- /* wait for the BHS to settle */
- udelay(1);
-
- /* Finally turn on the bypass so that BHS supplies power */
- reg_val |= 0x3f << LDO_BYP_SHIFT;
- writel_relaxed(reg_val, reg + APC_PWR_GATE_CTL);
-
- /* enable max phases */
- writel_relaxed(0x10003, l2_saw_base + APCS_SAW2_2_VCTL);
+ writel_relaxed(0x00000031, base_ptr+0x04);
mb();
- udelay(50);
- reg_val = COREPOR_RST | CLAMP;
- writel_relaxed(reg_val, reg + APCS_CPU_PWR_CTL);
+ writel_relaxed(0x00000039, base_ptr+0x04);
mb();
udelay(2);
- reg_val &= ~CLAMP;
- writel_relaxed(reg_val, reg + APCS_CPU_PWR_CTL);
+ writel_relaxed(0x00020038, base_ptr+0x04);
mb();
udelay(2);
- reg_val &= ~COREPOR_RST;
- writel_relaxed(reg_val, reg + APCS_CPU_PWR_CTL);
+
+ writel_relaxed(0x00020008, base_ptr+0x04);
mb();
- reg_val |= CORE_PWRD_UP;
- writel_relaxed(reg_val, reg + APCS_CPU_PWR_CTL);
+ writel_relaxed(0x00020088, base_ptr+0x04);
mb();
- ret = 0;
-
- iounmap(l2_saw_base);
-out_saw_map:
- iounmap(reg);
-out_map:
- of_node_put(saw_node);
-out_saw:
- of_node_put(l2_node);
-out_l2:
- of_node_put(acc_node);
-out_acc:
- of_node_put(cpu_node);
-
- return ret;
+ iounmap(base_ptr);
+ return 0;
}
-static DEFINE_PER_CPU(int, cold_boot_done);
-
-static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
+static int __cpuinit release_from_pen(unsigned int cpu)
{
- int ret = 0;
+ unsigned long timeout;
- if (!per_cpu(cold_boot_done, cpu)) {
- ret = func(cpu);
- if (!ret)
- per_cpu(cold_boot_done, cpu) = true;
- }
+ /* Set preset_lpj to avoid subsequent lpj recalculations */
+ preset_lpj = loops_per_jiffy;
/*
* set synchronisation state between this boot processor
@@ -287,92 +119,129 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int))
spin_lock(&boot_lock);
/*
+ * The secondary processor is waiting to be released from
+ * the holding pen - release it, then wait for it to flag
+ * that it has been released by resetting pen_release.
+ *
+ * Note that "pen_release" is the hardware CPU ID, whereas
+ * "cpu" is Linux's internal ID.
+ */
+ write_pen_release(cpu_logical_map(cpu));
+
+ /*
* Send the secondary CPU a soft interrupt, thereby causing
* the boot monitor to read the system wide flags register,
* and branch to the address found there.
*/
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ smp_rmb();
+ if (pen_release == -1)
+ break;
+
+ udelay(10);
+ }
+
/*
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
- return ret;
+ return pen_release != -1 ? -ENOSYS : 0;
}
-static int msm8660_boot_secondary(unsigned int cpu, struct task_struct *idle)
+DEFINE_PER_CPU(int, cold_boot_done);
+
+static int __cpuinit msm8916_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
{
- return qcom_boot_secondary(cpu, scss_release_secondary);
+ pr_info("Starting secondary CPU %d\n", cpu);
+
+ if (per_cpu(cold_boot_done, cpu) == false) {
+ arm_release_secondary(0xb088000, cpu);
+
+ per_cpu(cold_boot_done, cpu) = true;
+ }
+ return release_from_pen(cpu);
}
-static int kpssv1_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static void __init arm_smp_init_cpus(void)
{
- return qcom_boot_secondary(cpu, kpssv1_release_secondary);
+ unsigned int i, ncores;
+
+ pr_info("%s\n", __func__);
+
+ ncores = (__raw_readl(MSM_APCS_GCC_BASE + 0x30)) & 0xF;
+
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
+ }
+
+ for (i = 0; i < ncores; i++)
+ set_cpu_possible(i, true);
}
-static int kpssv2_boot_secondary(unsigned int cpu, struct task_struct *idle)
+static int cold_boot_flags[] __initdata = {
+ 0,
+ SCM_FLAG_COLDBOOT_CPU1,
+ SCM_FLAG_COLDBOOT_CPU2,
+ SCM_FLAG_COLDBOOT_CPU3,
+};
+
+static void __init msm_platform_smp_prepare_cpus_mc(unsigned int max_cpus)
{
- return qcom_boot_secondary(cpu, kpssv2_release_secondary);
+ int cpu, map;
+ u32 aff0_mask = 0;
+ u32 aff1_mask = 0;
+ u32 aff2_mask = 0;
+
+ for_each_present_cpu(cpu) {
+ map = cpu_logical_map(cpu);
+ aff0_mask |= BIT(MPIDR_AFFINITY_LEVEL(map, 0));
+ aff1_mask |= BIT(MPIDR_AFFINITY_LEVEL(map, 1));
+ aff2_mask |= BIT(MPIDR_AFFINITY_LEVEL(map, 2));
+ }
+
+ if (scm_set_boot_addr_mc(virt_to_phys(msm_secondary_startup),
+ aff0_mask, aff1_mask, aff2_mask, SCM_FLAG_COLDBOOT_MC))
+ pr_warn("Failed to set CPU boot address\n");
}
-static void __init qcom_smp_prepare_cpus(unsigned int max_cpus)
+static void __init msm_platform_smp_prepare_cpus(unsigned int max_cpus)
{
int cpu, map;
unsigned int flags = 0;
- static const int cold_boot_flags[] = {
- 0,
- SCM_FLAG_COLDBOOT_CPU1,
- SCM_FLAG_COLDBOOT_CPU2,
- SCM_FLAG_COLDBOOT_CPU3,
- };
+
+ if (scm_is_mc_boot_available())
+ return msm_platform_smp_prepare_cpus_mc(max_cpus);
for_each_present_cpu(cpu) {
map = cpu_logical_map(cpu);
- if (WARN_ON(map >= ARRAY_SIZE(cold_boot_flags))) {
+ if (map > ARRAY_SIZE(cold_boot_flags)) {
set_cpu_present(cpu, false);
+ __WARN();
continue;
}
flags |= cold_boot_flags[map];
}
- if (scm_set_boot_addr(virt_to_phys(secondary_startup), flags)) {
- for_each_present_cpu(cpu) {
- if (cpu == smp_processor_id())
- continue;
- set_cpu_present(cpu, false);
- }
- pr_warn("Failed to set CPU boot address, disabling SMP\n");
- }
+ if (scm_set_boot_addr(virt_to_phys(msm_secondary_startup), flags))
+ pr_warn("Failed to set CPU boot address\n");
}
-static struct smp_operations smp_msm8660_ops __initdata = {
- .smp_prepare_cpus = qcom_smp_prepare_cpus,
- .smp_secondary_init = qcom_secondary_init,
- .smp_boot_secondary = msm8660_boot_secondary,
-#ifdef CONFIG_HOTPLUG_CPU
- .cpu_die = qcom_cpu_die,
-#endif
-};
-CPU_METHOD_OF_DECLARE(qcom_smp, "qcom,gcc-msm8660", &smp_msm8660_ops);
-
-static struct smp_operations qcom_smp_kpssv1_ops __initdata = {
- .smp_prepare_cpus = qcom_smp_prepare_cpus,
- .smp_secondary_init = qcom_secondary_init,
- .smp_boot_secondary = kpssv1_boot_secondary,
-#ifdef CONFIG_HOTPLUG_CPU
- .cpu_die = qcom_cpu_die,
+struct smp_operations msm8916_smp_ops __initdata = {
+ .smp_init_cpus = arm_smp_init_cpus,
+ .smp_prepare_cpus = msm_platform_smp_prepare_cpus,
+ .smp_secondary_init = msm_secondary_init,
+ .smp_boot_secondary = msm8916_boot_secondary,
+#ifdef CONFIG_HOTPLUG
+ .cpu_die = msm_cpu_die,
+ .cpu_kill = msm_cpu_kill,
#endif
};
-CPU_METHOD_OF_DECLARE(qcom_smp_kpssv1, "qcom,kpss-acc-v1", &qcom_smp_kpssv1_ops);
-
-static struct smp_operations qcom_smp_kpssv2_ops __initdata = {
- .smp_prepare_cpus = qcom_smp_prepare_cpus,
- .smp_secondary_init = qcom_secondary_init,
- .smp_boot_secondary = kpssv2_boot_secondary,
-#ifdef CONFIG_HOTPLUG_CPU
- .cpu_die = qcom_cpu_die,
-#endif
-};
-CPU_METHOD_OF_DECLARE(qcom_smp_kpssv2, "qcom,kpss-acc-v2", &qcom_smp_kpssv2_ops);
+
diff --git a/arch/arm/mach-qcom/platsmp.h b/arch/arm/mach-qcom/platsmp.h
new file mode 100644
index 000000000000..c321069a5c9c
--- /dev/null
+++ b/arch/arm/mach-qcom/platsmp.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+void msm_secondary_startup(void);
+void write_pen_release(int val);
+
+void msm_cpu_die(unsigned int cpu);
+int msm_cpu_kill(unsigned int cpu);
+
+extern struct smp_operations arm_smp_ops;
+extern struct smp_operations msm8960_smp_ops;
+extern struct smp_operations msm8974_smp_ops;
+extern struct smp_operations msm8962_smp_ops;
+extern struct smp_operations msm8625_smp_ops;
+extern struct smp_operations scorpion_smp_ops;
+extern struct smp_operations msm8916_smp_ops;
+extern struct smp_operations msm8936_smp_ops;
diff --git a/arch/arm/mach-qcom/scm-boot.c b/arch/arm/mach-qcom/scm-boot.c
index 45cee3e469a5..2110979b9e62 100644
--- a/arch/arm/mach-qcom/scm-boot.c
+++ b/arch/arm/mach-qcom/scm-boot.c
@@ -21,6 +21,15 @@
#include "scm.h"
#include "scm-boot.h"
+#define SCM_BOOT_ADDR_MC 0x11
+
+#ifdef CONFIG_ARM64
+#define SCM_FLAG_HLOS 0x01
+#else
+#define SCM_FLAG_HLOS 0x0
+#endif
+
+
/*
* Set the cold/warm boot address for one of the CPU cores.
*/
@@ -37,3 +46,49 @@ int scm_set_boot_addr(phys_addr_t addr, int flags)
&cmd, sizeof(cmd), NULL, 0);
}
EXPORT_SYMBOL(scm_set_boot_addr);
+
+/**
+ * scm_set_boot_addr_mc - Set entry physical address for cpus
+ * @addr: 32bit physical address
+ * @aff0: Collective bitmask of the affinity-level-0 of the mpidr
+ * 1<<aff0_CPU0| 1<<aff0_CPU1....... | 1<<aff0_CPU32
+ * Supports maximum 32 cpus under any affinity level.
+ * @aff1: Collective bitmask of the affinity-level-1 of the mpidr
+ * @aff2: Collective bitmask of the affinity-level-2 of the mpidr
+ * @flags: Flag to differentiate between coldboot vs warmboot
+ */
+int scm_set_boot_addr_mc(phys_addr_t addr, u32 aff0,
+ u32 aff1, u32 aff2, u32 flags)
+{
+ struct {
+ u32 addr;
+ u32 aff0;
+ u32 aff1;
+ u32 aff2;
+ u32 reserved;
+ u32 flags;
+ } cmd;
+
+ cmd.addr = addr;
+ cmd.aff0 = aff0;
+ cmd.aff1 = aff1;
+ cmd.aff2 = aff2;
+ /* Reserved for future chips with affinity level 3 effectively 1 << 0 */
+ cmd.reserved = ~0U;
+ cmd.flags = flags | SCM_FLAG_HLOS;
+ return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR_MC,
+ &cmd, sizeof(cmd), NULL, 0);
+}
+EXPORT_SYMBOL(scm_set_boot_addr_mc);
+
+/**
+ * scm_is_mc_boot_available -
+ * Checks if TZ supports the boot API for multi-cluster configuration
+ * Returns true if available and false otherwise
+ */
+int scm_is_mc_boot_available(void)
+{
+ return scm_is_call_available(SCM_SVC_BOOT, SCM_BOOT_ADDR_MC);
+}
+EXPORT_SYMBOL(scm_is_mc_boot_available);
+
diff --git a/arch/arm/mach-qcom/scm.c b/arch/arm/mach-qcom/scm.c
index c536fd6bf827..eae84a9b1ed5 100644
--- a/arch/arm/mach-qcom/scm.c
+++ b/arch/arm/mach-qcom/scm.c
@@ -82,6 +82,32 @@ struct scm_response {
u32 is_complete;
};
+#ifdef CONFIG_ARM64
+
+#define R0_STR "x0"
+#define R1_STR "x1"
+#define R2_STR "x2"
+#define R3_STR "x3"
+#define R4_STR "x4"
+
+/* Outer caches unsupported on ARM64 platforms */
+#define outer_inv_range(x, y)
+#define outer_flush_range(x, y)
+
+#define __cpuc_flush_dcache_area __flush_dcache_area
+
+#else
+
+#define R0_STR "r0"
+#define R1_STR "r1"
+#define R2_STR "r2"
+#define R3_STR "r3"
+#define R4_STR "r4"
+
+#endif
+
+
+
/**
* alloc_scm_command() - Allocate an SCM command
* @cmd_size: size of the command buffer
@@ -297,3 +323,58 @@ u32 scm_get_version(void)
return version;
}
EXPORT_SYMBOL(scm_get_version);
+
+#define SCM_CLASS_REGISTER (0x2 << 8)
+#define SCM_MASK_IRQS BIT(5)
+#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
+ SCM_CLASS_REGISTER | \
+ SCM_MASK_IRQS | \
+ (n & 0xf))
+
+/**
+ * scm_call_atomic1() - Send an atomic SCM command with one argument
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
+{
+ int context_id;
+ register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
+ register u32 r1 asm("r1") = (uintptr_t)&context_id;
+ register u32 r2 asm("r2") = arg1;
+
+ asm volatile(
+ __asmeq("%0", R0_STR)
+ __asmeq("%1", R0_STR)
+ __asmeq("%2", R1_STR)
+ __asmeq("%3", R2_STR)
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0\n"
+ : "=r" (r0)
+ : "r" (r0), "r" (r1), "r" (r2)
+ : "r3");
+ return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic1);
+
+#define IS_CALL_AVAIL_CMD 1
+int scm_is_call_available(u32 svc_id, u32 cmd_id)
+{
+ int ret;
+ u32 svc_cmd = (svc_id << 10) | cmd_id;
+ u32 ret_val = 0;
+
+ ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd,
+ sizeof(svc_cmd), &ret_val, sizeof(ret_val));
+ if (ret)
+ return ret;
+
+ return ret_val;
+}
+EXPORT_SYMBOL(scm_is_call_available);
diff --git a/arch/arm/mach-qcom/scm.h b/arch/arm/mach-qcom/scm.h
index 00b31ea58f29..2661e598dc69 100644
--- a/arch/arm/mach-qcom/scm.h
+++ b/arch/arm/mach-qcom/scm.h
@@ -14,12 +14,38 @@
#define SCM_SVC_BOOT 0x1
#define SCM_SVC_PIL 0x2
+#define SCM_SVC_UTIL 0x3
+#define SCM_SVC_TZ 0x4
+#define SCM_SVC_IO 0x5
+#define SCM_SVC_INFO 0x6
+#define SCM_SVC_SSD 0x7
+#define SCM_SVC_FUSE 0x8
+#define SCM_SVC_PWR 0x9
+#define SCM_SVC_MP 0xC
+#define SCM_SVC_DCVS 0xD
+#define SCM_SVC_ES 0x10
+#define SCM_SVC_HDCP 0x11
+#define SCM_SVC_TZSCHEDULER 0xFC
+#define SCM_FUSE_READ 0x7
+#define SCM_CMD_HDCP 0x01
+
+/* SCM Features */
+#define SCM_SVC_SEC_CAMERA 0xD
+
+#define DEFINE_SCM_BUFFER(__n) \
+static char __n[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+#define SCM_BUFFER_SIZE(__buf) sizeof(__buf)
+
+#define SCM_BUFFER_PHYS(__buf) virt_to_phys(__buf)
+
+#
extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
void *resp_buf, size_t resp_len);
#define SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
extern u32 scm_get_version(void);
-
+extern int scm_is_call_available(u32 svc_id, u32 cmd_id);
#endif
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7eb94e6fc376..c59ae579a30f 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -695,6 +695,21 @@ config SWP_EMULATE
If unsure, say Y.
+config FORCE_INSTRUCTION_ALIGNMENT
+ bool "Force instructions address alignment"
+ depends on CPU_V7 && ALIGNMENT_TRAP
+ help
+ Branching to an address in ARM state which is not word aligned,
+ where this is defined to be UNPREDICTABLE, can cause one of the
+ following two behaviours: 1. The unaligned location is forced to
+ be aligned. 2. Using the unaligned address generates a Prefetch
+ Abort on the first instruction using the unaligned PC value.
+
+ To be consistant for the user space binaries, the unaligned location
+ is forced aligned with this config.
+
+ If unsure, say Y.
+
config CPU_BIG_ENDIAN
bool "Build big-endian kernel"
depends on ARCH_SUPPORTS_BIG_ENDIAN
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 83792f4324ea..ab8da50a417e 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -747,6 +747,36 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
return NULL;
}
+#ifdef CONFIG_FORCE_INSTRUCTION_ALIGNMENT
+static int
+do_ialignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ /*
+ * Branching to an address in ARM state which is not word aligned,
+ * where this is defined to be UNPREDICTABLE,
+ * can cause one of the following two behaviours:
+ * 1. The unaligned location is forced to be aligned.
+ * 2. Using the unaligned address generates a Prefetch Abort on
+ * the first instruction using the unaligned PC value.
+ */
+ int isize = 4;
+
+ if (user_mode(regs) && !thumb_mode(regs)) {
+ ai_sys += 1;
+
+ /*
+ * Force align the instruction in software to be following
+ * a single behaviour for the unpredicatable cases.
+ */
+ instruction_pointer(regs) &= ~(isize + (-1UL));
+ return 0;
+ }
+
+ ai_skipped += 1;
+ return 1;
+}
+#endif
+
static int
do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
@@ -990,6 +1020,11 @@ static int __init alignment_init(void)
hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
"alignment exception");
+#ifdef CONFIG_FORCE_INSTRUCTION_ALIGNMENT
+ hook_ifault_code(FAULT_CODE_ALIGNMENT, do_ialignment, SIGBUS,
+ BUS_ADRALN, "alignment exception");
+#endif
+
/*
* ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section
* fault, not as alignment error.
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index b966656d2c2d..5154280c1ad7 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -371,7 +371,6 @@ v7_dma_inv_range:
dsb st
ret lr
ENDPROC(v7_dma_inv_range)
-
/*
* v7_dma_clean_range(start,end)
* - start - virtual start address of region
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 9481f85c56e6..b68992f71f1d 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -46,6 +46,9 @@ unsigned long __init __clear_cr(unsigned long mask)
#endif
static phys_addr_t phys_initrd_start __initdata = 0;
+int msm_krait_need_wfe_fixup;
+EXPORT_SYMBOL(msm_krait_need_wfe_fixup);
+
static unsigned long phys_initrd_size __initdata = 0;
static int __init early_initrd(char *p)
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index a10297da122b..8c7bf7936ef5 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -467,6 +467,7 @@ msm7x27_surf MACH_MSM7X27_SURF MSM7X27_SURF 2705
msm7x27_ffa MACH_MSM7X27_FFA MSM7X27_FFA 2706
msm7x30_ffa MACH_MSM7X30_FFA MSM7X30_FFA 2707
qsd8x50_surf MACH_QSD8X50_SURF QSD8X50_SURF 2708
+qsd8x50_ffa MACH_QSD8X50_FFA QSD8X50_FFA 2710
mx53_evk MACH_MX53_EVK MX53_EVK 2716
igep0030 MACH_IGEP0030 IGEP0030 2717
sbc3530 MACH_SBC3530 SBC3530 2722
@@ -479,6 +480,8 @@ wbd222 MACH_WBD222 WBD222 2753
msm8x60_surf MACH_MSM8X60_SURF MSM8X60_SURF 2755
msm8x60_sim MACH_MSM8X60_SIM MSM8X60_SIM 2756
tcc8000_sdk MACH_TCC8000_SDK TCC8000_SDK 2758
+msm8x55_surf MACH_MSM8X55_SURF MSM8X55_SURF 2768
+msm8x55_ffa MACH_MSM8X55_FFA MSM8X55_FFA 2769
cns3420vb MACH_CNS3420VB CNS3420VB 2776
omap4_panda MACH_OMAP4_PANDA OMAP4_PANDA 2791
ti8168evm MACH_TI8168EVM TI8168EVM 2800
@@ -491,6 +494,8 @@ smdkc210 MACH_SMDKC210 SMDKC210 2838
t5325 MACH_T5325 T5325 2846
income MACH_INCOME INCOME 2849
goni MACH_GONI GONI 2862
+msm8x55_svlte_ffa MACH_MSM8X55_SVLTE_FFA MSM8X55_SVLTE_FFA 2863
+msm8x55_svlte_surf MACH_MSM8X55_SVLTE_SURF MSM8X55_SVLTE_SURF 2864
bv07 MACH_BV07 BV07 2882
openrd_ultimate MACH_OPENRD_ULTIMATE OPENRD_ULTIMATE 2884
devixp MACH_DEVIXP DEVIXP 2885
@@ -515,14 +520,19 @@ mx53_smd MACH_MX53_SMD MX53_SMD 3011
msm8x60_rumi3 MACH_MSM8X60_RUMI3 MSM8X60_RUMI3 3016
msm8x60_ffa MACH_MSM8X60_FFA MSM8X60_FFA 3017
cm_a510 MACH_CM_A510 CM_A510 3020
+fsm9xxx_surf MACH_FSM9XXX_SURF FSM9XXX_SURF 3028
+fsm9xxx_ffa MACH_FSM9XXX_FFA FSM9XXX_FFA 3029
tx28 MACH_TX28 TX28 3043
pcontrol_g20 MACH_PCONTROL_G20 PCONTROL_G20 3062
vpr200 MACH_VPR200 VPR200 3087
torbreck MACH_TORBRECK TORBRECK 3090
prima2_evb MACH_PRIMA2_EVB PRIMA2_EVB 3103
+msm8x60_fluid MACH_MSM8X60_FLUID MSM8X60_FLUID 3124
paz00 MACH_PAZ00 PAZ00 3128
acmenetusfoxg20 MACH_ACMENETUSFOXG20 ACMENETUSFOXG20 3129
+msm8x60_fusion MACH_MSM8X60_FUSION MSM8X60_FUSION 3181
ag5evm MACH_AG5EVM AG5EVM 3189
+msm8x60_fusn_ffa MACH_MSM8X60_FUSN_FFA MSM8X60_FUSN_FFA 3199
ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206
wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207
trimslice MACH_TRIMSLICE TRIMSLICE 3209
@@ -540,17 +550,28 @@ armlex4210 MACH_ARMLEX4210 ARMLEX4210 3361
snowball MACH_SNOWBALL SNOWBALL 3363
xilinx_ep107 MACH_XILINX_EP107 XILINX_EP107 3378
nuri MACH_NURI NURI 3379
+msm8960_cdp MACH_MSM8960_CDP MSM8960_CDP 3396
+msm8960_mtp MACH_MSM8960_MTP MSM8960_MTP 3397
+msm8960_fluid MACH_MSM8960_FLUID MSM8960_FLUID 3398
+msm8960_apq MACH_MSM8960_APQ MSM8960_APQ 3399
origen MACH_ORIGEN ORIGEN 3455
nspire MACH_NSPIRE NSPIRE 3503
nokia_rm696 MACH_NOKIA_RM696 NOKIA_RM696 3522
mikrap_x168 MACH_MIKRAP_X168 MIKRAP_X168 3543
deto_macarm9 MACH_DETO_MACARM9 DETO_MACARM9 3568
+msm8x60_dragon MACH_MSM8X60_DRAGON MSM8X60_DRAGON 3586
m28evk MACH_M28EVK M28EVK 3613
kota2 MACH_KOTA2 KOTA2 3616
bonito MACH_BONITO BONITO 3623
omap3_egf MACH_OMAP3_EGF OMAP3_EGF 3637
smdk4212 MACH_SMDK4212 SMDK4212 3638
apx4devkit MACH_APX4DEVKIT APX4DEVKIT 3712
+msm9615_cdp MACH_MSM9615_CDP MSM9615_CDP 3675
+msm9615_mtp MACH_MSM9615_MTP MSM9615_MTP 3681
+msm8930_cdp MACH_MSM8930_CDP MSM8930_CDP 3727
+msm8930_mtp MACH_MSM8930_MTP MSM8930_MTP 3728
+msm8930_fluid MACH_MSM8930_FLUID MSM8930_FLUID 3729
+msm7627a_qrd1 MACH_MSM7627A_QRD1 MSM7627A_QRD1 3756
smdk4412 MACH_SMDK4412 SMDK4412 3765
marzen MACH_MARZEN MARZEN 3790
krome MACH_KROME KROME 3797
@@ -1007,3 +1028,19 @@ eco5_bx2 MACH_ECO5_BX2 ECO5_BX2 4572
eukrea_cpuimx28sd MACH_EUKREA_CPUIMX28SD EUKREA_CPUIMX28SD 4573
domotab MACH_DOMOTAB DOMOTAB 4574
pfla03 MACH_PFLA03 PFLA03 4575
+msm8625_rumi3 MACH_MSM8625_RUMI3 MSM8625_RUMI3 3871
+msm7627a_evb MACH_MSM7627A_EVB MSM7627A_EVB 3934
+apq8064_cdp MACH_APQ8064_CDP APQ8064_CDP 3948
+apq8064_mtp MACH_APQ8064_MTP APQ8064_MTP 3949
+apq8064_liquid MACH_APQ8064_LIQUID APQ8064_LIQUID 3951
+mpq8064_cdp MACH_MPQ8064_CDP MPQ8064_CDP 3993
+mpq8064_hrd MACH_MPQ8064_HRD MPQ8064_HRD 3994
+mpq8064_dtv MACH_MPQ8064_DTV MPQ8064_DTV 3995
+fsm8064_ep MACH_FSM8064_EP FSM8064_EP 3996
+msm7627a_qrd3 MACH_MSM7627A_QRD3 MSM7627A_QRD3 4005
+msm8625_surf MACH_MSM8625_SURF MSM8625_SURF 4037
+msm8625_evb MACH_MSM8625_EVB MSM8625_EVB 4042
+msm8625_qrd7 MACH_MSM8625_QRD7 MSM8625_QRD7 4095
+msm8625_ffa MACH_MSM8625_FFA MSM8625_FFA 4166
+msm8625_evt MACH_MSM8625_EVT MSM8625_EVT 4193
+qrd_skud_prime MACH_QRD_SKUD_PRIME QRD_SKUD_PRIME 4393
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 1a693d3f9d51..c05b9c131ba4 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -182,4 +182,6 @@ source "drivers/ras/Kconfig"
source "drivers/thunderbolt/Kconfig"
+source "drivers/soc/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index ebee55537a05..5d0bd1c02cb3 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -7,6 +7,9 @@
obj-y += irqchip/
obj-y += bus/
+obj-y += soc/
+
+
obj-$(CONFIG_GENERIC_PHY) += phy/
@@ -19,6 +22,7 @@ obj-$(CONFIG_PARISC) += parisc/
obj-$(CONFIG_RAPIDIO) += rapidio/
obj-y += video/
obj-y += idle/
+obj-y += soc/
# IPMI must come before ACPI in order to provide IPMI opregion support
obj-$(CONFIG_IPMI_HANDLER) += char/ipmi/
@@ -60,8 +64,8 @@ obj-$(CONFIG_FB_I810) += video/fbdev/i810/
obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/
obj-$(CONFIG_PARPORT) += parport/
-obj-y += base/ block/ misc/ mfd/ nfc/
obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
+obj-y += base/ block/ misc/ mfd/ nfc/ soc/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
obj-$(CONFIG_IDE) += ide/
@@ -161,3 +165,4 @@ obj-$(CONFIG_POWERCAP) += powercap/
obj-$(CONFIG_MCB) += mcb/
obj-$(CONFIG_RAS) += ras/
obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
+
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 89ced955fafa..f5640a333c78 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -618,6 +618,96 @@ int dev_pm_opp_disable(struct device *dev, unsigned long freq)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
+#ifdef CONFIG_CPU_FREQ
+/**
+ * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
+ * @dev: device for which we do this operation
+ * @table: Cpufreq table returned back to caller
+ *
+ * Generate a cpufreq table for a provided device- this assumes that the
+ * opp list is already initialized and ready for usage.
+ *
+ * This function allocates required memory for the cpufreq table. It is
+ * expected that the caller does the required maintenance such as freeing
+ * the table as required.
+ *
+ * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
+ * if no memory available for the operation (table is not populated), returns 0
+ * if successful and table is populated.
+ *
+ * WARNING: It is important for the callers to ensure refreshing their copy of
+ * the table if any of the mentioned functions have been invoked in the interim.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * To simplify the logic, we pretend we are updater and hold relevant mutex here
+ * Callers should ensure that this function is *NOT* called under RCU protection
+ * or in contexts where mutex locking cannot be used.
+ */
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ struct device_opp *dev_opp;
+ struct opp *opp;
+ struct cpufreq_frequency_table *freq_table;
+ int i = 0;
+
+ /* Pretend as if I am an updater */
+ mutex_lock(&dev_opp_list_lock);
+
+ dev_opp = find_device_opp(dev);
+ if (IS_ERR(dev_opp)) {
+ int r = PTR_ERR(dev_opp);
+ mutex_unlock(&dev_opp_list_lock);
+ dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
+ return r;
+ }
+
+ freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
+ (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
+ if (!freq_table) {
+ mutex_unlock(&dev_opp_list_lock);
+ dev_warn(dev, "%s: Unable to allocate frequency table\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ list_for_each_entry(opp, &dev_opp->opp_list, node) {
+ if (opp->available) {
+ freq_table[i].driver_data = i;
+ freq_table[i].frequency = opp->rate / 1000;
+ i++;
+ }
+ }
+ mutex_unlock(&dev_opp_list_lock);
+
+ freq_table[i].driver_data = i;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ *table = &freq_table[0];
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
+
+/**
+ * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
+ * @dev: device for which we do this operation
+ * @table: table to free
+ *
+ * Free up the table allocated by dev_pm_opp_init_cpufreq_table
+ */
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ if (!table)
+ return;
+
+ kfree(*table);
+ *table = NULL;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
+#endif /* CONFIG_CPU_FREQ */
+
/**
* dev_pm_opp_get_notifier() - find notifier_head of the device with opp
* @dev: device pointer used to lookup device OPPs.
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index efefd12a0f7b..ee82fa4e8d4f 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -6,6 +6,19 @@ menu "Character devices"
source "drivers/tty/Kconfig"
+config DEVMEM
+ bool "Memory device driver"
+ default y
+ help
+ The memory driver provides two character devices, mem and kmem, which
+ provide access to the system's memory. The mem device is a view of
+ physical memory, and each byte in the device corresponds to the
+ matching physical address. The kmem device is the same as mem, but
+ the addresses correspond to the kernel's virtual address space rather
+ than physical memory. These devices are standard parts of a Linux
+ system and most users should say Y here. You might say N if very
+ security conscience or memory is tight.
+
config DEVKMEM
bool "/dev/kmem virtual device support"
default y
@@ -602,5 +615,47 @@ config TILE_SROM
source "drivers/char/xillybus/Kconfig"
+config MSM_ROTATOR_USE_IMEM
+ bool "Enable rotator driver to use iMem"
+ depends on ARCH_MSM7X30 && MSM_ROTATOR
+ default y
+ help
+ This option enables the msm_rotator driver to use the move efficient
+ iMem. Some MSM platforms may not have iMem available for the rotator
+ block. Or some systems may want the iMem to be dedicated to a
+ different function.
+
+config MSM_ADSPRPC
+ tristate "Qualcomm ADSP RPC driver"
+ depends on MSM_SMD
+ help
+ Provides a communication mechanism that allows for clients to
+ make remote method invocations across processor boundary to
+ applications DSP processor. Say M if you want to enable this
+ module.
+
+config CSDIO_VENDOR_ID
+ hex "Card VendorId"
+ depends on MMC_GENERIC_CSDIO
+ default "0"
+ help
+ Enter vendor id for targeted sdio device, this may be overwritten by
+ module parameters.
+
+config CSDIO_DEVICE_ID
+ hex "CardDeviceId"
+ depends on MMC_GENERIC_CSDIO
+ default "0"
+ help
+ Enter device id for targeted sdio device, this may be overwritten by
+ module parameters.
+
+config MSM_RDBG
+ tristate "Qualcomm Remote debug driver"
+ depends on MSM_AUDIO_QDSP6
+ help
+ Implements a shared memory based transport mechanism that allows
+ for a debugger running on a host PC to communicate with a remote
+ stub running on peripheral subsystems such as the ADSP, MODEM etc.
endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index d06cde26031b..455d03f7eb56 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
-obj-$(CONFIG_MSM_SMD_PKT) += msm_smd_pkt.o
obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_MMTIMER) += mmtimer.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
@@ -62,3 +61,6 @@ js-rtc-y = rtc.o
obj-$(CONFIG_TILE_SROM) += tile-srom.o
obj-$(CONFIG_XILLYBUS) += xillybus/
+obj-$(CONFIG_MSM_ADSPRPC) += adsprpc.o
+obj-$(CONFIG_MSM_RDBG) += rdbg.o
+obj-$(CONFIG_MSM_SMD_PKT) += msm_smd_pkt.o
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 524b707894ef..37e17cac4bf3 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -58,6 +58,7 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
}
#endif
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
#ifdef CONFIG_STRICT_DEVMEM
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
@@ -83,7 +84,9 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
return 1;
}
#endif
+#endif
+#ifdef CONFIG_DEVMEM
void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
{
}
@@ -216,6 +219,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
*ppos += written;
return written;
}
+#endif /* CONFIG_DEVMEM */
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
int __weak phys_mem_access_prot_allowed(struct file *file,
unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
@@ -337,6 +343,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
}
return 0;
}
+#endif /* CONFIG_DEVMEM */
#ifdef CONFIG_DEVKMEM
static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
@@ -667,6 +674,8 @@ static loff_t null_lseek(struct file *file, loff_t offset, int orig)
return file->f_pos = 0;
}
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
+
/*
* The memory devices use the full 32/64 bits of the offset, and so we cannot
* check against negative addresses: they are ok. The return value is weird,
@@ -700,10 +709,14 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
return ret;
}
+#endif
+
+#if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
static int open_port(struct inode *inode, struct file *filp)
{
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
+#endif
#define zero_lseek null_lseek
#define full_lseek null_lseek
@@ -712,6 +725,7 @@ static int open_port(struct inode *inode, struct file *filp)
#define open_mem open_port
#define open_kmem open_mem
+#ifdef CONFIG_DEVMEM
static const struct file_operations mem_fops = {
.llseek = memory_lseek,
.read = read_mem,
@@ -720,6 +734,7 @@ static const struct file_operations mem_fops = {
.open = open_mem,
.get_unmapped_area = get_unmapped_area_mem,
};
+#endif
#ifdef CONFIG_DEVKMEM
static const struct file_operations kmem_fops = {
@@ -782,7 +797,9 @@ static const struct memdev {
const struct file_operations *fops;
struct backing_dev_info *dev_info;
} devlist[] = {
+#ifdef CONFIG_DEVMEM
[1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
+#endif
#ifdef CONFIG_DEVKMEM
[2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
#endif
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index ffa97d261cf3..6bbf2c1559bb 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -59,7 +59,7 @@ static DEFINE_MUTEX(misc_mtx);
/*
* Assigned numbers, used for dynamic minors
*/
-#define DYNAMIC_MINORS 64 /* like dynamic majors */
+#define DYNAMIC_MINORS 96 /* like dynamic majors */
static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS);
#ifdef CONFIG_PROC_FS
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c
index ba82a06d9684..d2a733677c59 100644
--- a/drivers/char/msm_smd_pkt.c
+++ b/drivers/char/msm_smd_pkt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2010, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -9,14 +9,10 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
*/
/*
- * SMD Packet Driver -- Provides userspace interface to SMD packet ports.
+ * SMD Packet Driver -- Provides a binary SMD non-muxed packet port
+ * interface.
*/
#include <linux/slab.h>
@@ -25,193 +21,634 @@
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/sched.h>
+#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/msm_smd_pkt.h>
#include <linux/poll.h>
+#include <asm/ioctls.h>
+#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/ipc_logging.h>
+
+#include <soc/qcom/smd.h>
+#include <soc/qcom/smsm.h>
+#include <soc/qcom/subsystem_restart.h>
-#include <mach/msm_smd.h>
+#ifdef CONFIG_ARCH_FSM9XXX
+#define DEFAULT_NUM_SMD_PKT_PORTS 4
+#else
+#define DEFAULT_NUM_SMD_PKT_PORTS 31
+#endif
-#define NUM_SMD_PKT_PORTS 9
+#define MODULE_NAME "msm_smdpkt"
#define DEVICE_NAME "smdpkt"
-#define MAX_BUF_SIZE 2048
+#define WAKEUPSOURCE_TIMEOUT (2000) /* two seconds */
struct smd_pkt_dev {
+ struct list_head dev_list;
+ char dev_name[SMD_MAX_CH_NAME_LEN];
+ char ch_name[SMD_MAX_CH_NAME_LEN];
+ uint32_t edge;
+
struct cdev cdev;
struct device *devicep;
+ void *pil;
struct smd_channel *ch;
- int open_count;
struct mutex ch_lock;
struct mutex rx_lock;
struct mutex tx_lock;
wait_queue_head_t ch_read_wait_queue;
+ wait_queue_head_t ch_write_wait_queue;
wait_queue_head_t ch_opened_wait_queue;
int i;
+ int ref_cnt;
+
+ int blocking_write;
+ int is_open;
+ int poll_mode;
+ unsigned ch_size;
+ uint open_modem_wait;
+
+ int has_reset;
+ int do_reset_notification;
+ struct completion ch_allocated;
+ struct wakeup_source pa_ws; /* Packet Arrival Wakeup Source */
+ struct work_struct packet_arrival_work;
+ spinlock_t pa_spinlock;
+ int ws_locked;
+};
- unsigned char tx_buf[MAX_BUF_SIZE];
- unsigned char rx_buf[MAX_BUF_SIZE];
- int remote_open;
-} *smd_pkt_devp[NUM_SMD_PKT_PORTS];
+struct smd_pkt_driver {
+ struct list_head list;
+ int ref_cnt;
+ char pdriver_name[SMD_MAX_CH_NAME_LEN];
+ struct platform_driver driver;
+};
+
+static DEFINE_MUTEX(smd_pkt_driver_lock_lha1);
+static LIST_HEAD(smd_pkt_driver_list);
struct class *smd_pkt_classp;
static dev_t smd_pkt_number;
+static struct delayed_work loopback_work;
+static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp);
+static void check_and_wakeup_writer(struct smd_pkt_dev *smd_pkt_devp);
+static uint32_t is_modem_smsm_inited(void);
+
+#define SMD_PKT_PROBE_WAIT_TIMEOUT 3000
+static struct delayed_work smdpkt_probe_work;
+static int smdpkt_probe_done;
+static DEFINE_MUTEX(smd_pkt_dev_lock_lha1);
+static LIST_HEAD(smd_pkt_dev_list);
+static int num_smd_pkt_ports = DEFAULT_NUM_SMD_PKT_PORTS;
+
+#define SMD_PKT_IPC_LOG_PAGE_CNT 2
+static void *smd_pkt_ilctxt;
+
+static int msm_smd_pkt_debug_mask;
+module_param_named(debug_mask, msm_smd_pkt_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+enum {
+ SMD_PKT_STATUS = 1U << 0,
+ SMD_PKT_READ = 1U << 1,
+ SMD_PKT_WRITE = 1U << 2,
+ SMD_PKT_READ_DUMP_BUFFER = 1U << 3,
+ SMD_PKT_WRITE_DUMP_BUFFER = 1U << 4,
+ SMD_PKT_POLL = 1U << 5,
+};
-static int msm_smd_pkt_debug_enable;
-module_param_named(debug_enable, msm_smd_pkt_debug_enable,
- int, S_IRUGO | S_IWUSR | S_IWGRP);
+#define DEBUG
#ifdef DEBUG
-#define D_DUMP_BUFFER(prestr, cnt, buf) do { \
- int i; \
- if (msm_smd_pkt_debug_enable) { \
- pr_debug("%s", prestr); \
- for (i = 0; i < cnt; i++) \
- pr_debug("%.2x", buf[i]); \
- pr_debug("\n"); \
- } \
- } while (0)
-#else
-#define D_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
-#endif
-#ifdef DEBUG
-#define DBG(x...) do { \
- if (msm_smd_pkt_debug_enable) \
- pr_debug(x); \
- } while (0)
+#define SMD_PKT_LOG_STRING(x...) \
+do { \
+ if (smd_pkt_ilctxt) \
+ ipc_log_string(smd_pkt_ilctxt, "<SMD_PKT>: "x); \
+} while (0)
+
+#define SMD_PKT_LOG_BUF(buf, cnt) \
+do { \
+ char log_buf[128]; \
+ int i; \
+ if (smd_pkt_ilctxt) { \
+ i = cnt < 16 ? cnt : 16; \
+ hex_dump_to_buffer(buf, i, 16, 1, log_buf, \
+ sizeof(log_buf), false); \
+ ipc_log_string(smd_pkt_ilctxt, "<SMD_PKT>: %s", log_buf); \
+ } \
+} while (0)
+
+#define D_STATUS(x...) \
+do { \
+ if (msm_smd_pkt_debug_mask & SMD_PKT_STATUS) \
+ pr_info("Status: "x); \
+ SMD_PKT_LOG_STRING(x); \
+} while (0)
+
+#define D_READ(x...) \
+do { \
+ if (msm_smd_pkt_debug_mask & SMD_PKT_READ) \
+ pr_info("Read: "x); \
+ SMD_PKT_LOG_STRING(x); \
+} while (0)
+
+#define D_WRITE(x...) \
+do { \
+ if (msm_smd_pkt_debug_mask & SMD_PKT_WRITE) \
+ pr_info("Write: "x); \
+ SMD_PKT_LOG_STRING(x); \
+} while (0)
+
+#define D_READ_DUMP_BUFFER(prestr, cnt, buf) \
+do { \
+ if (msm_smd_pkt_debug_mask & SMD_PKT_READ_DUMP_BUFFER) \
+ print_hex_dump(KERN_INFO, prestr, \
+ DUMP_PREFIX_NONE, 16, 1, \
+ buf, cnt, 1); \
+ SMD_PKT_LOG_BUF(buf, cnt); \
+} while (0)
+
+#define D_WRITE_DUMP_BUFFER(prestr, cnt, buf) \
+do { \
+ if (msm_smd_pkt_debug_mask & SMD_PKT_WRITE_DUMP_BUFFER) \
+ print_hex_dump(KERN_INFO, prestr, \
+ DUMP_PREFIX_NONE, 16, 1, \
+ buf, cnt, 1); \
+ SMD_PKT_LOG_BUF(buf, cnt); \
+} while (0)
+
+#define D_POLL(x...) \
+do { \
+ if (msm_smd_pkt_debug_mask & SMD_PKT_POLL) \
+ pr_info("Poll: "x); \
+ SMD_PKT_LOG_STRING(x); \
+} while (0)
+
+#define E_SMD_PKT_SSR(x) \
+do { \
+ if (x->do_reset_notification) \
+ pr_err("%s notifying reset for smd_pkt_dev id:%d\n", \
+ __func__, x->i); \
+} while (0)
#else
-#define DBG(x...) do {} while (0)
+#define D_STATUS(x...) do {} while (0)
+#define D_READ(x...) do {} while (0)
+#define D_WRITE(x...) do {} while (0)
+#define D_READ_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
+#define D_WRITE_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
+#define D_POLL(x...) do {} while (0)
+#define E_SMD_PKT_SSR(x) do {} while (0)
#endif
-static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp)
+static ssize_t open_timeout_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t n)
{
- int sz;
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned long tmp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->devicep == d) {
+ if (!kstrtoul(buf, 10, &tmp)) {
+ smd_pkt_devp->open_modem_wait = tmp;
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return n;
+ } else {
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to convert: %s to an int\n",
+ __func__, buf);
+ return -EINVAL;
+ }
+ }
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
- if (!smd_pkt_devp || !smd_pkt_devp->ch)
- return;
+ pr_err("%s: unable to match device to valid smd_pkt port\n", __func__);
+ return -EINVAL;
+}
- sz = smd_cur_packet_size(smd_pkt_devp->ch);
- if (sz == 0) {
- DBG("no packet\n");
- return;
+static ssize_t open_timeout_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->devicep == d) {
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ smd_pkt_devp->open_modem_wait);
+ }
}
- if (sz > smd_read_avail(smd_pkt_devp->ch)) {
- DBG("incomplete packet\n");
- return;
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to match device to valid smd_pkt port\n", __func__);
+ return -EINVAL;
+
+}
+
+static DEVICE_ATTR(open_timeout, 0664, open_timeout_show, open_timeout_store);
+
+/**
+ * loopback_edge_store() - Set the edge type for loopback device
+ * @d: Linux device structure
+ * @attr: Device attribute structure
+ * @buf: Input string
+ * @n: Length of the input string
+ *
+ * This function is used to set the loopback device edge runtime
+ * by writing to the loopback_edge node.
+ */
+static ssize_t loopback_edge_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t n)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned long tmp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->devicep == d) {
+ if (!kstrtoul(buf, 10, &tmp)) {
+ smd_pkt_devp->edge = tmp;
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return n;
+ } else {
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to convert: %s to an int\n",
+ __func__, buf);
+ return -EINVAL;
+ }
+ }
}
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to match device to valid smd_pkt port\n", __func__);
+ return -EINVAL;
+}
+
+/**
+ * loopback_edge_show() - Get the edge type for loopback device
+ * @d: Linux device structure
+ * @attr: Device attribute structure
+ * @buf: Output buffer
+ *
+ * This function is used to get the loopback device edge runtime
+ * by reading the loopback_edge node.
+ */
+static ssize_t loopback_edge_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->devicep == d) {
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ smd_pkt_devp->edge);
+ }
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to match device to valid smd_pkt port\n", __func__);
+ return -EINVAL;
+
+}
- DBG("waking up reader\n");
- wake_up_interruptible(&smd_pkt_devp->ch_read_wait_queue);
+static DEVICE_ATTR(loopback_edge, 0664, loopback_edge_show,
+ loopback_edge_store);
+
+static int notify_reset(struct smd_pkt_dev *smd_pkt_devp)
+{
+ smd_pkt_devp->do_reset_notification = 0;
+
+ return -ENETRESET;
+}
+
+static void clean_and_signal(struct smd_pkt_dev *smd_pkt_devp)
+{
+ smd_pkt_devp->do_reset_notification = 1;
+ smd_pkt_devp->has_reset = 1;
+
+ smd_pkt_devp->is_open = 0;
+
+ wake_up(&smd_pkt_devp->ch_read_wait_queue);
+ wake_up(&smd_pkt_devp->ch_write_wait_queue);
+ wake_up_interruptible(&smd_pkt_devp->ch_opened_wait_queue);
+ D_STATUS("%s smd_pkt_dev id:%d\n", __func__, smd_pkt_devp->i);
+}
+
+static void loopback_probe_worker(struct work_struct *work)
+{
+
+ /* Wait for the modem SMSM to be inited for the SMD
+ ** Loopback channel to be allocated at the modem. Since
+ ** the wait need to be done atmost once, using msleep
+ ** doesn't degrade the performance. */
+ if (!is_modem_smsm_inited())
+ schedule_delayed_work(&loopback_work, msecs_to_jiffies(1000));
+ else
+ smsm_change_state(SMSM_APPS_STATE,
+ 0, SMSM_SMD_LOOPBACK);
+
+}
+
+static void packet_arrival_worker(struct work_struct *work)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned long flags;
+
+ smd_pkt_devp = container_of(work, struct smd_pkt_dev,
+ packet_arrival_work);
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags);
+ if (smd_pkt_devp->ch && smd_pkt_devp->ws_locked) {
+ D_READ("%s locking smd_pkt_dev id:%d wakeup source\n",
+ __func__, smd_pkt_devp->i);
+ /*
+ * Keep system awake long enough to allow userspace client
+ * to process the packet.
+ */
+ __pm_wakeup_event(&smd_pkt_devp->pa_ws, WAKEUPSOURCE_TIMEOUT);
+ }
+ spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags);
+ mutex_unlock(&smd_pkt_devp->ch_lock);
}
-static int smd_pkt_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static long smd_pkt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
- int r, bytes_read;
+ int ret;
struct smd_pkt_dev *smd_pkt_devp;
- struct smd_channel *chl;
- DBG("read %d bytes\n", count);
- if (count > MAX_BUF_SIZE)
+ smd_pkt_devp = file->private_data;
+ if (!smd_pkt_devp)
return -EINVAL;
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ switch (cmd) {
+ case TIOCMGET:
+ D_STATUS("%s TIOCMGET command on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ ret = smd_tiocmget(smd_pkt_devp->ch);
+ break;
+ case TIOCMSET:
+ D_STATUS("%s TIOCSET command on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ ret = smd_tiocmset(smd_pkt_devp->ch, arg, ~arg);
+ break;
+ case SMD_PKT_IOCTL_BLOCKING_WRITE:
+ ret = get_user(smd_pkt_devp->blocking_write, (int *)arg);
+ break;
+ default:
+ pr_err("%s: Unrecognized ioctl command %d\n", __func__, cmd);
+ ret = -ENOIOCTLCMD;
+ }
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+
+ return ret;
+}
+
+ssize_t smd_pkt_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int r;
+ int bytes_read;
+ int pkt_size;
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned long flags;
+
smd_pkt_devp = file->private_data;
- if (!smd_pkt_devp || !smd_pkt_devp->ch)
+
+ if (!smd_pkt_devp) {
+ pr_err("%s on NULL smd_pkt_dev\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!smd_pkt_devp->ch) {
+ pr_err("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
return -EINVAL;
+ }
+
+ if (smd_pkt_devp->do_reset_notification) {
+ /* notify client that a reset occurred */
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ return notify_reset(smd_pkt_devp);
+ }
+ D_READ("Begin %s on smd_pkt_dev id:%d buffer_size %zu\n",
+ __func__, smd_pkt_devp->i, count);
- chl = smd_pkt_devp->ch;
wait_for_packet:
r = wait_event_interruptible(smd_pkt_devp->ch_read_wait_queue,
- (smd_cur_packet_size(chl) > 0 &&
- smd_read_avail(chl) >=
- smd_cur_packet_size(chl)));
+ !smd_pkt_devp->ch ||
+ (smd_cur_packet_size(smd_pkt_devp->ch) > 0
+ && smd_read_avail(smd_pkt_devp->ch)) ||
+ smd_pkt_devp->has_reset);
+
+ mutex_lock(&smd_pkt_devp->rx_lock);
+ if (smd_pkt_devp->has_reset) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ return notify_reset(smd_pkt_devp);
+ }
+
+ if (!smd_pkt_devp->ch) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ pr_err("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return -EINVAL;
+ }
if (r < 0) {
- if (r != -ERESTARTSYS)
- pr_err("wait returned %d\n", r);
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ /* qualify error message */
+ if (r != -ERESTARTSYS) {
+ /* we get this anytime a signal comes in */
+ pr_err("%s: wait_event_interruptible on smd_pkt_dev id:%d ret %i\n",
+ __func__, smd_pkt_devp->i, r);
+ }
return r;
}
- mutex_lock(&smd_pkt_devp->rx_lock);
+ /* Here we have a whole packet waiting for us */
+ pkt_size = smd_cur_packet_size(smd_pkt_devp->ch);
- bytes_read = smd_cur_packet_size(smd_pkt_devp->ch);
- if (bytes_read == 0 ||
- bytes_read < smd_read_avail(smd_pkt_devp->ch)) {
+ if (!pkt_size) {
+ pr_err("%s: No data on smd_pkt_dev id:%d, False wakeup\n",
+ __func__, smd_pkt_devp->i);
mutex_unlock(&smd_pkt_devp->rx_lock);
- DBG("Nothing to read\n");
goto wait_for_packet;
}
- if (bytes_read > count) {
- mutex_unlock(&smd_pkt_devp->rx_lock);
- pr_info("packet size %d > buffer size %d", bytes_read, count);
- return -EINVAL;
+ if (pkt_size < 0) {
+ pr_err("%s: Error %d obtaining packet size for Channel %s",
+ __func__, pkt_size, smd_pkt_devp->ch_name);
+ return pkt_size;
}
- r = smd_read(smd_pkt_devp->ch, smd_pkt_devp->rx_buf, bytes_read);
- if (r != bytes_read) {
+ if ((uint32_t)pkt_size > count) {
+ pr_err("%s: failure on smd_pkt_dev id: %d - packet size %d > buffer size %zu,",
+ __func__, smd_pkt_devp->i,
+ pkt_size, count);
mutex_unlock(&smd_pkt_devp->rx_lock);
- pr_err("smd_read failed to read %d bytes: %d\n", bytes_read, r);
- return -EIO;
+ return -ETOOSMALL;
}
- D_DUMP_BUFFER("read: ", bytes_read, smd_pkt_devp->rx_buf);
- r = copy_to_user(buf, smd_pkt_devp->rx_buf, bytes_read);
+ bytes_read = 0;
+ do {
+ r = smd_read_user_buffer(smd_pkt_devp->ch,
+ (buf + bytes_read),
+ (pkt_size - bytes_read));
+ if (r < 0) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ if (smd_pkt_devp->has_reset) {
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ return notify_reset(smd_pkt_devp);
+ }
+ pr_err("%s Error while reading %d\n", __func__, r);
+ return r;
+ }
+ bytes_read += r;
+ if (pkt_size != bytes_read)
+ wait_event(smd_pkt_devp->ch_read_wait_queue,
+ smd_read_avail(smd_pkt_devp->ch) ||
+ smd_pkt_devp->has_reset);
+ if (smd_pkt_devp->has_reset) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ return notify_reset(smd_pkt_devp);
+ }
+ } while (pkt_size != bytes_read);
+ D_READ_DUMP_BUFFER("Read: ", (bytes_read > 16 ? 16 : bytes_read), buf);
mutex_unlock(&smd_pkt_devp->rx_lock);
- if (r) {
- pr_err("copy_to_user failed %d\n", r);
- return -EFAULT;
+
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags);
+ if (smd_pkt_devp->poll_mode &&
+ !smd_cur_packet_size(smd_pkt_devp->ch)) {
+ __pm_relax(&smd_pkt_devp->pa_ws);
+ smd_pkt_devp->ws_locked = 0;
+ smd_pkt_devp->poll_mode = 0;
+ D_READ("%s unlocked smd_pkt_dev id:%d wakeup_source\n",
+ __func__, smd_pkt_devp->i);
}
+ spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags);
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+
+ D_READ("Finished %s on smd_pkt_dev id:%d %d bytes\n",
+ __func__, smd_pkt_devp->i, bytes_read);
- DBG("read complete %d bytes\n", bytes_read);
+ /* check and wakeup read threads waiting on this device */
check_and_wakeup_reader(smd_pkt_devp);
return bytes_read;
}
-static int smd_pkt_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+ssize_t smd_pkt_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
{
- int r;
+ int r = 0, bytes_written;
struct smd_pkt_dev *smd_pkt_devp;
+ DEFINE_WAIT(write_wait);
- if (count > MAX_BUF_SIZE)
- return -EINVAL;
+ smd_pkt_devp = file->private_data;
- DBG("writing %d bytes\n", count);
+ pr_debug("writing %d bytes\n", count);
+ if (!smd_pkt_devp) {
+ pr_err("%s on NULL smd_pkt_dev\n", __func__);
+ return -EINVAL;
+ }
- smd_pkt_devp = file->private_data;
- if (!smd_pkt_devp || !smd_pkt_devp->ch)
+ if (!smd_pkt_devp->ch) {
+ pr_err("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
return -EINVAL;
+ }
- mutex_lock(&smd_pkt_devp->tx_lock);
- if (smd_write_avail(smd_pkt_devp->ch) < count) {
- mutex_unlock(&smd_pkt_devp->tx_lock);
- DBG("Not enough space to write\n");
- return -ENOMEM;
+ if (smd_pkt_devp->do_reset_notification || smd_pkt_devp->has_reset) {
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ /* notify client that a reset occurred */
+ return notify_reset(smd_pkt_devp);
}
+ D_WRITE("Begin %s on smd_pkt_dev id:%d data_size %zu\n",
+ __func__, smd_pkt_devp->i, count);
- D_DUMP_BUFFER("write: ", count, buf);
- r = copy_from_user(smd_pkt_devp->tx_buf, buf, count);
- if (r) {
- mutex_unlock(&smd_pkt_devp->tx_lock);
- pr_err("copy_from_user failed %d\n", r);
- return -EFAULT;
+ mutex_lock(&smd_pkt_devp->tx_lock);
+ if (!smd_pkt_devp->blocking_write) {
+ if (smd_write_avail(smd_pkt_devp->ch) < count) {
+ pr_err("%s: Not enough space in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ return -ENOMEM;
+ }
}
- r = smd_write(smd_pkt_devp->ch, smd_pkt_devp->tx_buf, count);
- if (r != count) {
+ r = smd_write_start(smd_pkt_devp->ch, count);
+ if (r < 0) {
mutex_unlock(&smd_pkt_devp->tx_lock);
- pr_err("smd_write failed to write %d bytes: %d.\n", count, r);
- return -EIO;
+ pr_err("%s: Error:%d in smd_pkt_dev id:%d @ smd_write_start\n",
+ __func__, r, smd_pkt_devp->i);
+ return r;
}
+
+ bytes_written = 0;
+ do {
+ prepare_to_wait(&smd_pkt_devp->ch_write_wait_queue,
+ &write_wait, TASK_UNINTERRUPTIBLE);
+ if (!smd_write_segment_avail(smd_pkt_devp->ch) &&
+ !smd_pkt_devp->has_reset) {
+ smd_enable_read_intr(smd_pkt_devp->ch);
+ schedule();
+ }
+ finish_wait(&smd_pkt_devp->ch_write_wait_queue, &write_wait);
+ smd_disable_read_intr(smd_pkt_devp->ch);
+
+ if (smd_pkt_devp->has_reset) {
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ return notify_reset(smd_pkt_devp);
+ } else {
+ r = smd_write_segment(smd_pkt_devp->ch,
+ (void *)(buf + bytes_written),
+ (count - bytes_written), 1);
+ if (r < 0) {
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ if (smd_pkt_devp->has_reset) {
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ return notify_reset(smd_pkt_devp);
+ }
+ pr_err("%s on smd_pkt_dev id:%d failed r:%d\n",
+ __func__, smd_pkt_devp->i, r);
+ return r;
+ }
+ bytes_written += r;
+ }
+ } while (bytes_written != count);
+ smd_write_end(smd_pkt_devp->ch);
mutex_unlock(&smd_pkt_devp->tx_lock);
+ D_WRITE_DUMP_BUFFER("Write: ",
+ (bytes_written > 16 ? 16 : bytes_written), buf);
+ D_WRITE("Finished %s on smd_pkt_dev id:%d %zu bytes\n",
+ __func__, smd_pkt_devp->i, count);
- DBG("wrote %d bytes\n", count);
return count;
}
@@ -221,48 +658,158 @@ static unsigned int smd_pkt_poll(struct file *file, poll_table *wait)
unsigned int mask = 0;
smd_pkt_devp = file->private_data;
- if (!smd_pkt_devp)
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
return POLLERR;
+ }
- DBG("poll waiting\n");
+ smd_pkt_devp->poll_mode = 1;
poll_wait(file, &smd_pkt_devp->ch_read_wait_queue, wait);
- if (smd_read_avail(smd_pkt_devp->ch))
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ if (smd_pkt_devp->has_reset || !smd_pkt_devp->ch) {
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+ return POLLERR;
+ }
+
+ if (smd_read_avail(smd_pkt_devp->ch)) {
mask |= POLLIN | POLLRDNORM;
+ D_POLL("%s sets POLLIN for smd_pkt_dev id: %d\n",
+ __func__, smd_pkt_devp->i);
+ }
+ mutex_unlock(&smd_pkt_devp->ch_lock);
- DBG("poll return\n");
return mask;
}
-static void smd_pkt_ch_notify(void *priv, unsigned event)
+static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp)
+{
+ int sz;
+ unsigned long flags;
+
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
+ return;
+ }
+
+ if (!smd_pkt_devp->ch) {
+ pr_err("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return;
+ }
+
+ sz = smd_cur_packet_size(smd_pkt_devp->ch);
+ if (sz == 0) {
+ D_READ("%s: No packet in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return;
+ }
+ if (!smd_read_avail(smd_pkt_devp->ch)) {
+ D_READ(
+ "%s: packet size is %d in smd_pkt_dev id:%d - but the data isn't here\n",
+ __func__, sz, smd_pkt_devp->i);
+ return;
+ }
+
+ /* here we have a packet of size sz ready */
+ spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags);
+ __pm_stay_awake(&smd_pkt_devp->pa_ws);
+ smd_pkt_devp->ws_locked = 1;
+ spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags);
+ wake_up(&smd_pkt_devp->ch_read_wait_queue);
+ schedule_work(&smd_pkt_devp->packet_arrival_work);
+ D_READ("%s: wake_up smd_pkt_dev id:%d\n", __func__, smd_pkt_devp->i);
+}
+
+static void check_and_wakeup_writer(struct smd_pkt_dev *smd_pkt_devp)
+{
+ int sz;
+
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
+ return;
+ }
+
+ if (!smd_pkt_devp->ch) {
+ pr_err("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return;
+ }
+
+ sz = smd_write_segment_avail(smd_pkt_devp->ch);
+ if (sz) {
+ D_WRITE("%s: %d bytes write space in smd_pkt_dev id:%d\n",
+ __func__, sz, smd_pkt_devp->i);
+ smd_disable_read_intr(smd_pkt_devp->ch);
+ wake_up(&smd_pkt_devp->ch_write_wait_queue);
+ }
+}
+
+static void ch_notify(void *priv, unsigned event)
{
struct smd_pkt_dev *smd_pkt_devp = priv;
- if (smd_pkt_devp->ch == 0)
+ if (smd_pkt_devp->ch == 0) {
+ if (event != SMD_EVENT_CLOSE)
+ pr_err("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
return;
+ }
switch (event) {
- case SMD_EVENT_DATA:
- DBG("data\n");
+ case SMD_EVENT_DATA: {
+ D_STATUS("%s: DATA event in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
check_and_wakeup_reader(smd_pkt_devp);
+ if (smd_pkt_devp->blocking_write)
+ check_and_wakeup_writer(smd_pkt_devp);
break;
-
+ }
case SMD_EVENT_OPEN:
- DBG("remote open\n");
- smd_pkt_devp->remote_open = 1;
+ D_STATUS("%s: OPEN event in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ smd_pkt_devp->has_reset = 0;
+ smd_pkt_devp->is_open = 1;
wake_up_interruptible(&smd_pkt_devp->ch_opened_wait_queue);
break;
-
case SMD_EVENT_CLOSE:
- smd_pkt_devp->remote_open = 0;
- pr_info("remote closed\n");
- break;
-
- default:
- pr_err("unknown event %d\n", event);
+ D_STATUS("%s: CLOSE event in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ smd_pkt_devp->is_open = 0;
+ /* put port into reset state */
+ clean_and_signal(smd_pkt_devp);
+ if (!strcmp(smd_pkt_devp->ch_name, "LOOPBACK"))
+ schedule_delayed_work(&loopback_work,
+ msecs_to_jiffies(1000));
break;
}
}
+/*
+ * Legacy configuration : smd_ch_name[], smd_ch_edge[] and smd_pkt_dev_name[].
+ * Future targets use either platform device or device tree configuration.
+ */
+#ifdef CONFIG_ARCH_FSM9XXX
+static char *smd_pkt_dev_name[] = {
+ "smdcntl1",
+ "smdcntl2",
+ "smd22",
+ "smd_pkt_loopback",
+};
+
+static char *smd_ch_name[] = {
+ "DATA6_CNTL",
+ "DATA7_CNTL",
+ "DATA22",
+ "LOOPBACK",
+};
+
+static uint32_t smd_ch_edge[] = {
+ SMD_APPS_QDSP,
+ SMD_APPS_QDSP,
+ SMD_APPS_QDSP,
+ SMD_APPS_QDSP
+};
+#else
static char *smd_pkt_dev_name[] = {
"smdcntl0",
"smdcntl1",
@@ -272,7 +819,29 @@ static char *smd_pkt_dev_name[] = {
"smdcntl5",
"smdcntl6",
"smdcntl7",
+ "smdcntl9",
+ "smdcntl10",
+ "smdcntl11",
"smd22",
+ "smdcnt_rev0",
+ "smdcnt_rev1",
+ "smdcnt_rev2",
+ "smdcnt_rev3",
+ "smdcnt_rev4",
+ "smdcnt_rev5",
+ "smdcnt_rev6",
+ "smdcnt_rev7",
+ "smdcnt_rev8",
+ "smd_sns_dsps",
+ "apr_apps2",
+ "smdcntl8",
+ "smd_sns_adsp",
+ "smd_cxm_qmi",
+ "smd_test_framework",
+ "smd_logging_0",
+ "smd_data_0",
+ "apr",
+ "smd_pkt_loopback",
};
static char *smd_ch_name[] = {
@@ -284,67 +853,381 @@ static char *smd_ch_name[] = {
"DATA12_CNTL",
"DATA13_CNTL",
"DATA14_CNTL",
+ "DATA15_CNTL",
+ "DATA16_CNTL",
+ "DATA17_CNTL",
"DATA22",
+ "DATA23_CNTL",
+ "DATA24_CNTL",
+ "DATA25_CNTL",
+ "DATA26_CNTL",
+ "DATA27_CNTL",
+ "DATA28_CNTL",
+ "DATA29_CNTL",
+ "DATA30_CNTL",
+ "DATA31_CNTL",
+ "SENSOR",
+ "apr_apps2",
+ "DATA40_CNTL",
+ "SENSOR",
+ "CXM_QMI_PORT_8064",
+ "TESTFRAMEWORK",
+ "LOGGING",
+ "DATA",
+ "apr",
+ "LOOPBACK",
+};
+
+static uint32_t smd_ch_edge[] = {
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_MODEM,
+ SMD_APPS_DSPS,
+ SMD_APPS_QDSP,
+ SMD_APPS_MODEM,
+ SMD_APPS_QDSP,
+ SMD_APPS_WCNSS,
+ SMD_APPS_QDSP,
+ SMD_APPS_QDSP,
+ SMD_APPS_QDSP,
+ SMD_APPS_QDSP,
+ SMD_APPS_MODEM,
};
+#endif
+
+static int smd_pkt_dummy_probe(struct platform_device *pdev)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->edge == pdev->id
+ && !strcmp(pdev->name, smd_pkt_devp->ch_name)) {
+ complete_all(&smd_pkt_devp->ch_allocated);
+ D_STATUS("%s allocated SMD ch for smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ break;
+ }
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return 0;
+}
-static int smd_pkt_open(struct inode *inode, struct file *file)
+static uint32_t is_modem_smsm_inited(void)
+{
+ uint32_t modem_state;
+ uint32_t ready_state = (SMSM_INIT | SMSM_SMDINIT);
+
+ modem_state = smsm_get_state(SMSM_MODEM_STATE);
+ return (modem_state & ready_state) == ready_state;
+}
+
+/**
+ * smd_pkt_add_driver() - Add platform drivers for smd pkt device
+ *
+ * @smd_pkt_devp: pointer to the smd pkt device structure
+ *
+ * @returns: 0 for success, standard Linux error code otherwise
+ *
+ * This function is used to register platform driver once for all
+ * smd pkt devices which have same names and increment the reference
+ * count for 2nd to nth devices.
+ */
+static int smd_pkt_add_driver(struct smd_pkt_dev *smd_pkt_devp)
+{
+ int r = 0;
+ struct smd_pkt_driver *smd_pkt_driverp;
+ struct smd_pkt_driver *item;
+
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
+ return -EINVAL;
+ }
+ D_STATUS("Begin %s on smd_pkt_ch[%s]\n", __func__,
+ smd_pkt_devp->ch_name);
+
+ mutex_lock(&smd_pkt_driver_lock_lha1);
+ list_for_each_entry(item, &smd_pkt_driver_list, list) {
+ if (!strcmp(item->pdriver_name, smd_pkt_devp->ch_name)) {
+ D_STATUS("%s:%s Already Platform driver reg. cnt:%d\n",
+ __func__, smd_pkt_devp->ch_name, item->ref_cnt);
+ ++item->ref_cnt;
+ goto exit;
+ }
+ }
+
+ smd_pkt_driverp = kzalloc(sizeof(*smd_pkt_driverp), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(smd_pkt_driverp)) {
+ pr_err("%s: kzalloc() failed for smd_pkt_driver[%s]\n",
+ __func__, smd_pkt_devp->ch_name);
+ r = -ENOMEM;
+ goto exit;
+ }
+
+ smd_pkt_driverp->driver.probe = smd_pkt_dummy_probe;
+ scnprintf(smd_pkt_driverp->pdriver_name, SMD_MAX_CH_NAME_LEN,
+ "%s", smd_pkt_devp->ch_name);
+ smd_pkt_driverp->driver.driver.name = smd_pkt_driverp->pdriver_name;
+ smd_pkt_driverp->driver.driver.owner = THIS_MODULE;
+ r = platform_driver_register(&smd_pkt_driverp->driver);
+ if (r) {
+ pr_err("%s: %s Platform driver reg. failed\n",
+ __func__, smd_pkt_devp->ch_name);
+ kfree(smd_pkt_driverp);
+ goto exit;
+ }
+ ++smd_pkt_driverp->ref_cnt;
+ list_add(&smd_pkt_driverp->list, &smd_pkt_driver_list);
+
+exit:
+ D_STATUS("End %s on smd_pkt_ch[%s]\n", __func__, smd_pkt_devp->ch_name);
+ mutex_unlock(&smd_pkt_driver_lock_lha1);
+ return r;
+}
+
+/**
+ * smd_pkt_remove_driver() - Remove the platform drivers for smd pkt device
+ *
+ * @smd_pkt_devp: pointer to the smd pkt device structure
+ *
+ * This function is used to decrement the reference count on
+ * platform drivers for smd pkt devices and removes the drivers
+ * when the reference count becomes zero.
+ */
+static void smd_pkt_remove_driver(struct smd_pkt_dev *smd_pkt_devp)
+{
+ struct smd_pkt_driver *smd_pkt_driverp;
+ bool found_item = false;
+
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
+ return;
+ }
+
+ D_STATUS("Begin %s on smd_pkt_ch[%s]\n", __func__,
+ smd_pkt_devp->ch_name);
+ mutex_lock(&smd_pkt_driver_lock_lha1);
+ list_for_each_entry(smd_pkt_driverp, &smd_pkt_driver_list, list) {
+ if (!strcmp(smd_pkt_driverp->pdriver_name,
+ smd_pkt_devp->ch_name)) {
+ found_item = true;
+ D_STATUS("%s:%s Platform driver cnt:%d\n",
+ __func__, smd_pkt_devp->ch_name,
+ smd_pkt_driverp->ref_cnt);
+ if (smd_pkt_driverp->ref_cnt > 0)
+ --smd_pkt_driverp->ref_cnt;
+ else
+ pr_warn("%s reference count <= 0\n", __func__);
+ break;
+ }
+ }
+ if (!found_item)
+ pr_err("%s:%s No item found in list.\n",
+ __func__, smd_pkt_devp->ch_name);
+
+ if (found_item && smd_pkt_driverp->ref_cnt == 0) {
+ platform_driver_unregister(&smd_pkt_driverp->driver);
+ smd_pkt_driverp->driver.probe = NULL;
+ list_del(&smd_pkt_driverp->list);
+ kfree(smd_pkt_driverp);
+ }
+ mutex_unlock(&smd_pkt_driver_lock_lha1);
+ D_STATUS("End %s on smd_pkt_ch[%s]\n", __func__, smd_pkt_devp->ch_name);
+}
+
+int smd_pkt_open(struct inode *inode, struct file *file)
{
int r = 0;
struct smd_pkt_dev *smd_pkt_devp;
+ const char *peripheral = NULL;
smd_pkt_devp = container_of(inode->i_cdev, struct smd_pkt_dev, cdev);
- if (!smd_pkt_devp)
+
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
return -EINVAL;
+ }
+ D_STATUS("Begin %s on smd_pkt_dev id:%d\n", __func__, smd_pkt_devp->i);
file->private_data = smd_pkt_devp;
mutex_lock(&smd_pkt_devp->ch_lock);
- if (smd_pkt_devp->open_count == 0) {
- r = smd_open(smd_ch_name[smd_pkt_devp->i],
- &smd_pkt_devp->ch, smd_pkt_devp,
- smd_pkt_ch_notify);
- if (r < 0) {
- pr_err("smd_open failed for %s, %d\n",
- smd_ch_name[smd_pkt_devp->i], r);
+ if (smd_pkt_devp->ch == 0) {
+ wakeup_source_init(&smd_pkt_devp->pa_ws,
+ smd_pkt_devp->dev_name);
+ INIT_WORK(&smd_pkt_devp->packet_arrival_work,
+ packet_arrival_worker);
+ init_completion(&smd_pkt_devp->ch_allocated);
+
+ r = smd_pkt_add_driver(smd_pkt_devp);
+ if (r) {
+ pr_err("%s: %s Platform driver reg. failed\n",
+ __func__, smd_pkt_devp->ch_name);
goto out;
}
+ peripheral = smd_edge_to_pil_str(smd_pkt_devp->edge);
+ if (!IS_ERR_OR_NULL(peripheral)) {
+ smd_pkt_devp->pil = subsystem_get(peripheral);
+ if (IS_ERR(smd_pkt_devp->pil)) {
+ r = PTR_ERR(smd_pkt_devp->pil);
+ pr_err("%s failed on smd_pkt_dev id:%d - subsystem_get failed for %s\n",
+ __func__, smd_pkt_devp->i, peripheral);
+ /*
+ * Sleep inorder to reduce the frequency of
+ * retry by user-space modules and to avoid
+ * possible watchdog bite.
+ */
+ msleep((smd_pkt_devp->open_modem_wait * 1000));
+ goto release_pd;
+ }
+ }
+
+ /* Wait for the modem SMSM to be inited for the SMD
+ ** Loopback channel to be allocated at the modem. Since
+ ** the wait need to be done atmost once, using msleep
+ ** doesn't degrade the performance. */
+ if (!strcmp(smd_pkt_devp->ch_name, "LOOPBACK")) {
+ if (!is_modem_smsm_inited())
+ msleep(5000);
+ smsm_change_state(SMSM_APPS_STATE,
+ 0, SMSM_SMD_LOOPBACK);
+ msleep(100);
+ }
+
+ /*
+ * Wait for a packet channel to be allocated so we know
+ * the modem is ready enough.
+ */
+ if (smd_pkt_devp->open_modem_wait) {
+ r = wait_for_completion_interruptible_timeout(
+ &smd_pkt_devp->ch_allocated,
+ msecs_to_jiffies(
+ smd_pkt_devp->open_modem_wait
+ * 1000));
+ if (r == 0)
+ r = -ETIMEDOUT;
+ if (r < 0) {
+ pr_err("%s: wait on smd_pkt_dev id:%d allocation failed rc:%d\n",
+ __func__, smd_pkt_devp->i, r);
+ goto release_pil;
+ }
+ }
+
+ r = smd_named_open_on_edge(smd_pkt_devp->ch_name,
+ smd_pkt_devp->edge,
+ &smd_pkt_devp->ch,
+ smd_pkt_devp,
+ ch_notify);
+ if (r < 0) {
+ pr_err("%s: %s open failed %d\n", __func__,
+ smd_pkt_devp->ch_name, r);
+ goto release_pil;
+ }
+
r = wait_event_interruptible_timeout(
smd_pkt_devp->ch_opened_wait_queue,
- smd_pkt_devp->remote_open,
- msecs_to_jiffies(2 * HZ));
- if (r == 0)
+ smd_pkt_devp->is_open, (2 * HZ));
+ if (r == 0) {
r = -ETIMEDOUT;
+ /* close the ch to sync smd's state with smd_pkt */
+ smd_close(smd_pkt_devp->ch);
+ smd_pkt_devp->ch = NULL;
+ }
if (r < 0) {
- pr_err("wait returned %d\n", r);
- smd_close(smd_pkt_devp->ch);
- smd_pkt_devp->ch = 0;
+ pr_err("%s: wait on smd_pkt_dev id:%d OPEN event failed rc:%d\n",
+ __func__, smd_pkt_devp->i, r);
+ } else if (!smd_pkt_devp->is_open) {
+ pr_err("%s: Invalid OPEN event on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ r = -ENODEV;
} else {
- smd_pkt_devp->open_count++;
+ smd_disable_read_intr(smd_pkt_devp->ch);
+ smd_pkt_devp->ch_size =
+ smd_write_avail(smd_pkt_devp->ch);
r = 0;
+ smd_pkt_devp->ref_cnt++;
+ D_STATUS("Finished %s on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
}
+ } else {
+ smd_pkt_devp->ref_cnt++;
}
+release_pil:
+ if (peripheral && (r < 0))
+ subsystem_put(smd_pkt_devp->pil);
+
+release_pd:
+ if (r < 0)
+ smd_pkt_remove_driver(smd_pkt_devp);
out:
+ if (!smd_pkt_devp->ch)
+ wakeup_source_trash(&smd_pkt_devp->pa_ws);
+
mutex_unlock(&smd_pkt_devp->ch_lock);
+
+
return r;
}
-static int smd_pkt_release(struct inode *inode, struct file *file)
+int smd_pkt_release(struct inode *inode, struct file *file)
{
int r = 0;
struct smd_pkt_dev *smd_pkt_devp = file->private_data;
- if (!smd_pkt_devp)
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
return -EINVAL;
+ }
+ D_STATUS("Begin %s on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
mutex_lock(&smd_pkt_devp->ch_lock);
- if (--smd_pkt_devp->open_count == 0) {
+ mutex_lock(&smd_pkt_devp->rx_lock);
+ mutex_lock(&smd_pkt_devp->tx_lock);
+ if (smd_pkt_devp->ref_cnt > 0)
+ smd_pkt_devp->ref_cnt--;
+
+ if (smd_pkt_devp->ch != 0 && smd_pkt_devp->ref_cnt == 0) {
+ clean_and_signal(smd_pkt_devp);
r = smd_close(smd_pkt_devp->ch);
smd_pkt_devp->ch = 0;
+ smd_pkt_devp->blocking_write = 0;
+ smd_pkt_devp->poll_mode = 0;
+ smd_pkt_remove_driver(smd_pkt_devp);
+ if (smd_pkt_devp->pil)
+ subsystem_put(smd_pkt_devp->pil);
+ smd_pkt_devp->has_reset = 0;
+ smd_pkt_devp->do_reset_notification = 0;
+ smd_pkt_devp->ws_locked = 0;
+ wakeup_source_trash(&smd_pkt_devp->pa_ws);
}
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ mutex_unlock(&smd_pkt_devp->rx_lock);
mutex_unlock(&smd_pkt_devp->ch_lock);
+ D_STATUS("Finished %s on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+
return r;
}
@@ -355,110 +1238,362 @@ static const struct file_operations smd_pkt_fops = {
.read = smd_pkt_read,
.write = smd_pkt_write,
.poll = smd_pkt_poll,
+ .unlocked_ioctl = smd_pkt_ioctl,
+ .compat_ioctl = smd_pkt_ioctl,
};
-static int __init smd_pkt_init(void)
+static int smd_pkt_init_add_device(struct smd_pkt_dev *smd_pkt_devp, int i)
+{
+ int r = 0;
+
+ smd_pkt_devp->i = i;
+
+ init_waitqueue_head(&smd_pkt_devp->ch_read_wait_queue);
+ init_waitqueue_head(&smd_pkt_devp->ch_write_wait_queue);
+ smd_pkt_devp->is_open = 0;
+ smd_pkt_devp->poll_mode = 0;
+ smd_pkt_devp->ws_locked = 0;
+ init_waitqueue_head(&smd_pkt_devp->ch_opened_wait_queue);
+
+ spin_lock_init(&smd_pkt_devp->pa_spinlock);
+ mutex_init(&smd_pkt_devp->ch_lock);
+ mutex_init(&smd_pkt_devp->rx_lock);
+ mutex_init(&smd_pkt_devp->tx_lock);
+
+ cdev_init(&smd_pkt_devp->cdev, &smd_pkt_fops);
+ smd_pkt_devp->cdev.owner = THIS_MODULE;
+
+ r = cdev_add(&smd_pkt_devp->cdev, (smd_pkt_number + i), 1);
+ if (IS_ERR_VALUE(r)) {
+ pr_err("%s: cdev_add() failed for smd_pkt_dev id:%d ret:%i\n",
+ __func__, i, r);
+ return r;
+ }
+
+ smd_pkt_devp->devicep =
+ device_create(smd_pkt_classp,
+ NULL,
+ (smd_pkt_number + i),
+ NULL,
+ smd_pkt_devp->dev_name);
+
+ if (IS_ERR_OR_NULL(smd_pkt_devp->devicep)) {
+ pr_err("%s: device_create() failed for smd_pkt_dev id:%d\n",
+ __func__, i);
+ r = -ENOMEM;
+ cdev_del(&smd_pkt_devp->cdev);
+ return r;
+ }
+ if (device_create_file(smd_pkt_devp->devicep,
+ &dev_attr_open_timeout))
+ pr_err("%s: unable to create device attr for smd_pkt_dev id:%d\n",
+ __func__, i);
+
+ if (!strcmp(smd_pkt_devp->ch_name, "LOOPBACK")) {
+ if (device_create_file(smd_pkt_devp->devicep,
+ &dev_attr_loopback_edge))
+ pr_err("%s: unable to create device attr for smd_pkt_dev id:%d\n",
+ __func__, i);
+ }
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_add(&smd_pkt_devp->dev_list, &smd_pkt_dev_list);
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+
+ return r;
+}
+
+static void smd_pkt_core_deinit(void)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ struct smd_pkt_dev *index;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry_safe(smd_pkt_devp, index, &smd_pkt_dev_list,
+ dev_list) {
+ cdev_del(&smd_pkt_devp->cdev);
+ list_del(&smd_pkt_devp->dev_list);
+ device_destroy(smd_pkt_classp,
+ MKDEV(MAJOR(smd_pkt_number), smd_pkt_devp->i));
+ kfree(smd_pkt_devp);
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+
+ if (!IS_ERR_OR_NULL(smd_pkt_classp))
+ class_destroy(smd_pkt_classp);
+
+ unregister_chrdev_region(MAJOR(smd_pkt_number), num_smd_pkt_ports);
+}
+
+static int smd_pkt_alloc_chrdev_region(void)
{
- int i;
int r;
- r = alloc_chrdev_region(&smd_pkt_number, 0,
- NUM_SMD_PKT_PORTS, DEVICE_NAME);
- if (r) {
- pr_err("alloc_chrdev_region() failed %d\n", r);
+ if (ARRAY_SIZE(smd_ch_name) != DEFAULT_NUM_SMD_PKT_PORTS ||
+ ARRAY_SIZE(smd_ch_edge) != DEFAULT_NUM_SMD_PKT_PORTS ||
+ ARRAY_SIZE(smd_pkt_dev_name)
+ != DEFAULT_NUM_SMD_PKT_PORTS) {
+ pr_err("%s: mismatch in number of ports\n", __func__);
+ BUG();
+ }
+
+ r = alloc_chrdev_region(&smd_pkt_number,
+ 0,
+ num_smd_pkt_ports,
+ DEVICE_NAME);
+ if (IS_ERR_VALUE(r)) {
+ pr_err("%s: alloc_chrdev_region() failed ret:%i\n",
+ __func__, r);
return r;
}
smd_pkt_classp = class_create(THIS_MODULE, DEVICE_NAME);
if (IS_ERR(smd_pkt_classp)) {
- r = PTR_ERR(smd_pkt_classp);
- pr_err("class_create() failed %d\n", r);
- goto unreg_chardev;
+ pr_err("%s: class_create() failed ENOMEM\n", __func__);
+ r = -ENOMEM;
+ unregister_chrdev_region(MAJOR(smd_pkt_number),
+ num_smd_pkt_ports);
+ return r;
}
- for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
- smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev),
- GFP_KERNEL);
- if (!smd_pkt_devp[i]) {
- pr_err("kmalloc() failed\n");
- goto clean_cdevs;
+ return 0;
+}
+
+static int smd_pkt_core_init(void)
+{
+ int i;
+ int r;
+ struct smd_pkt_dev *smd_pkt_devp;
+
+ r = smd_pkt_alloc_chrdev_region();
+ if (r) {
+ pr_err("%s: smd_pkt_alloc_chrdev_region() failed ret:%i\n",
+ __func__, r);
+ return r;
+ }
+
+ for (i = 0; i < num_smd_pkt_ports; ++i) {
+ smd_pkt_devp = kzalloc(sizeof(struct smd_pkt_dev),
+ GFP_KERNEL);
+ if (IS_ERR_OR_NULL(smd_pkt_devp)) {
+ pr_err("%s: kzalloc() failed for smd_pkt_dev id:%d\n",
+ __func__, i);
+ r = -ENOMEM;
+ goto error_destroy;
}
- smd_pkt_devp[i]->i = i;
+ smd_pkt_devp->edge = smd_ch_edge[i];
+ strlcpy(smd_pkt_devp->ch_name, smd_ch_name[i],
+ SMD_MAX_CH_NAME_LEN);
+ strlcpy(smd_pkt_devp->dev_name, smd_pkt_dev_name[i],
+ SMD_MAX_CH_NAME_LEN);
- init_waitqueue_head(&smd_pkt_devp[i]->ch_read_wait_queue);
- smd_pkt_devp[i]->remote_open = 0;
- init_waitqueue_head(&smd_pkt_devp[i]->ch_opened_wait_queue);
+ r = smd_pkt_init_add_device(smd_pkt_devp, i);
+ if (r < 0) {
+ pr_err("add device failed for idx:%d ret=%d\n", i, r);
+ kfree(smd_pkt_devp);
+ goto error_destroy;
+ }
+ }
- mutex_init(&smd_pkt_devp[i]->ch_lock);
- mutex_init(&smd_pkt_devp[i]->rx_lock);
- mutex_init(&smd_pkt_devp[i]->tx_lock);
+ INIT_DELAYED_WORK(&loopback_work, loopback_probe_worker);
- cdev_init(&smd_pkt_devp[i]->cdev, &smd_pkt_fops);
- smd_pkt_devp[i]->cdev.owner = THIS_MODULE;
+ D_STATUS("SMD Packet Port Driver Initialized.\n");
+ return 0;
- r = cdev_add(&smd_pkt_devp[i]->cdev,
- (smd_pkt_number + i), 1);
- if (r) {
- pr_err("cdev_add() failed %d\n", r);
- kfree(smd_pkt_devp[i]);
- goto clean_cdevs;
+error_destroy:
+ smd_pkt_core_deinit();
+ return r;
+}
+
+static int parse_smdpkt_devicetree(struct device_node *node,
+ struct smd_pkt_dev *smd_pkt_devp)
+{
+ int edge;
+ char *key;
+ const char *ch_name;
+ const char *dev_name;
+ const char *remote_ss;
+
+ key = "qcom,smdpkt-remote";
+ remote_ss = of_get_property(node, key, NULL);
+ if (!remote_ss)
+ goto error;
+
+ edge = smd_remote_ss_to_edge(remote_ss);
+ if (edge < 0)
+ goto error;
+
+ smd_pkt_devp->edge = edge;
+ D_STATUS("%s: %s = %d", __func__, key, edge);
+
+ key = "qcom,smdpkt-port-name";
+ ch_name = of_get_property(node, key, NULL);
+ if (!ch_name)
+ goto error;
+
+ strlcpy(smd_pkt_devp->ch_name, ch_name, SMD_MAX_CH_NAME_LEN);
+ D_STATUS("%s ch_name = %s\n", __func__, ch_name);
+
+ key = "qcom,smdpkt-dev-name";
+ dev_name = of_get_property(node, key, NULL);
+ if (!dev_name)
+ goto error;
+
+ strlcpy(smd_pkt_devp->dev_name, dev_name, SMD_MAX_CH_NAME_LEN);
+ D_STATUS("%s dev_name = %s\n", __func__, dev_name);
+
+ return 0;
+
+error:
+ pr_err("%s: missing key: %s\n", __func__, key);
+ return -ENODEV;
+
+}
+
+static int smd_pkt_devicetree_init(struct platform_device *pdev)
+{
+ int ret;
+ int i = 0;
+ struct device_node *node;
+ struct smd_pkt_dev *smd_pkt_devp;
+ int subnode_num = 0;
+
+ for_each_child_of_node(pdev->dev.of_node, node)
+ ++subnode_num;
+
+ num_smd_pkt_ports = subnode_num;
+
+ ret = smd_pkt_alloc_chrdev_region();
+ if (ret) {
+ pr_err("%s: smd_pkt_alloc_chrdev_region() failed ret:%i\n",
+ __func__, ret);
+ return ret;
+ }
+
+ for_each_child_of_node(pdev->dev.of_node, node) {
+ smd_pkt_devp = kzalloc(sizeof(struct smd_pkt_dev), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(smd_pkt_devp)) {
+ pr_err("%s: kzalloc() failed for smd_pkt_dev id:%d\n",
+ __func__, i);
+ ret = -ENOMEM;
+ goto error_destroy;
}
- smd_pkt_devp[i]->devicep =
- device_create(smd_pkt_classp, NULL,
- (smd_pkt_number + i), NULL,
- smd_pkt_dev_name[i]);
- if (IS_ERR(smd_pkt_devp[i]->devicep)) {
- r = PTR_ERR(smd_pkt_devp[i]->devicep);
- pr_err("device_create() failed %d\n", r);
- cdev_del(&smd_pkt_devp[i]->cdev);
- kfree(smd_pkt_devp[i]);
- goto clean_cdevs;
+ ret = parse_smdpkt_devicetree(node, smd_pkt_devp);
+ if (ret) {
+ pr_err(" failed to parse_smdpkt_devicetree %d\n", i);
+ kfree(smd_pkt_devp);
+ goto error_destroy;
}
+ ret = smd_pkt_init_add_device(smd_pkt_devp, i);
+ if (ret < 0) {
+ pr_err("add device failed for idx:%d ret=%d\n", i, ret);
+ kfree(smd_pkt_devp);
+ goto error_destroy;
+ }
+ i++;
}
- pr_info("SMD Packet Port Driver Initialized.\n");
+ INIT_DELAYED_WORK(&loopback_work, loopback_probe_worker);
+
+ D_STATUS("SMD Packet Port Driver Initialized.\n");
return 0;
-clean_cdevs:
- if (i > 0) {
- while (--i >= 0) {
- mutex_destroy(&smd_pkt_devp[i]->ch_lock);
- mutex_destroy(&smd_pkt_devp[i]->rx_lock);
- mutex_destroy(&smd_pkt_devp[i]->tx_lock);
- cdev_del(&smd_pkt_devp[i]->cdev);
- kfree(smd_pkt_devp[i]);
- device_destroy(smd_pkt_classp,
- MKDEV(MAJOR(smd_pkt_number), i));
+error_destroy:
+ smd_pkt_core_deinit();
+ return ret;
+}
+
+static int msm_smd_pkt_probe(struct platform_device *pdev)
+{
+ int ret;
+ /*
+ * If smd_probe_worker called before msm_smd_pkt_probe,
+ * then remove legacy device and proceed with new configuration.
+ */
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ if (smdpkt_probe_done == 1) {
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ smd_pkt_core_deinit();
+ } else {
+ smdpkt_probe_done = 1;
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ }
+ D_STATUS("%s smdpkt_probe_done = %d\n", __func__, smdpkt_probe_done);
+
+ if (pdev) {
+ if (pdev->dev.of_node) {
+ D_STATUS("%s device tree implementation\n", __func__);
+ ret = smd_pkt_devicetree_init(pdev);
+ if (ret)
+ pr_err("%s: device tree init failed\n",
+ __func__);
}
}
- class_destroy(smd_pkt_classp);
-unreg_chardev:
- unregister_chrdev_region(MAJOR(smd_pkt_number), NUM_SMD_PKT_PORTS);
- return r;
+ return 0;
}
-module_init(smd_pkt_init);
-static void __exit smd_pkt_cleanup(void)
+static void smdpkt_probe_worker(struct work_struct *work)
{
- int i;
+ int ret;
+ D_STATUS("%s smdpkt_probe_done =%d\n", __func__, smdpkt_probe_done);
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ if (!smdpkt_probe_done) {
+ smdpkt_probe_done = 1;
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ ret = smd_pkt_core_init();
+ if (ret < 0)
+ pr_err("smd_pkt_core_init failed ret = %d\n", ret);
+ return;
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+}
- for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) {
- mutex_destroy(&smd_pkt_devp[i]->ch_lock);
- mutex_destroy(&smd_pkt_devp[i]->rx_lock);
- mutex_destroy(&smd_pkt_devp[i]->tx_lock);
- cdev_del(&smd_pkt_devp[i]->cdev);
- kfree(smd_pkt_devp[i]);
- device_destroy(smd_pkt_classp,
- MKDEV(MAJOR(smd_pkt_number), i));
+static struct of_device_id msm_smd_pkt_match_table[] = {
+ { .compatible = "qcom,smdpkt" },
+ {},
+};
+
+static struct platform_driver msm_smd_pkt_driver = {
+ .probe = msm_smd_pkt_probe,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_smd_pkt_match_table,
+ },
+};
+
+static int __init smd_pkt_init(void)
+{
+ int rc;
+
+ INIT_LIST_HEAD(&smd_pkt_dev_list);
+ INIT_LIST_HEAD(&smd_pkt_driver_list);
+ rc = platform_driver_register(&msm_smd_pkt_driver);
+ if (rc) {
+ pr_err("%s: msm_smd_driver register failed %d\n",
+ __func__, rc);
+ return rc;
}
- class_destroy(smd_pkt_classp);
- unregister_chrdev_region(MAJOR(smd_pkt_number), NUM_SMD_PKT_PORTS);
+ INIT_DELAYED_WORK(&smdpkt_probe_work, smdpkt_probe_worker);
+ schedule_delayed_work(&smdpkt_probe_work,
+ msecs_to_jiffies(SMD_PKT_PROBE_WAIT_TIMEOUT));
+
+ smd_pkt_ilctxt = ipc_log_context_create(SMD_PKT_IPC_LOG_PAGE_CNT,
+ "smd_pkt");
+ return 0;
}
+
+static void __exit smd_pkt_cleanup(void)
+{
+ smd_pkt_core_deinit();
+}
+
+module_init(smd_pkt_init);
module_exit(smd_pkt_cleanup);
MODULE_DESCRIPTION("MSM Shared Memory Packet Port");
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 455fd17d938e..9ea54d8c817f 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -140,5 +140,5 @@ endmenu
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/mvebu/Kconfig"
-
source "drivers/clk/samsung/Kconfig"
+source "drivers/clk/qcom/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index d5fba5bc6e1b..5d475d95d0e2 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,7 +1,7 @@
# common clock types
obj-$(CONFIG_HAVE_CLK) += clk-devres.o
obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
-obj-$(CONFIG_COMMON_CLK) += clk.o
+obj-$(CONFIG_OF) += clk.o
obj-$(CONFIG_COMMON_CLK) += clk-divider.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o
@@ -68,3 +68,4 @@ obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
obj-$(CONFIG_X86) += x86/
obj-$(CONFIG_ARCH_ZYNQ) += zynq/
+obj-$(CONFIG_ARCH_QCOM) += qcom/
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 4896ae9e23da..f9f248596187 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -21,9 +21,12 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/clk-provider.h>
#include "clk.h"
+#if defined(CONFIG_COMMON_CLK)
+
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
@@ -2391,6 +2394,22 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
+struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct clk_onecell_data *clk_data = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= clk_data->clk_num) {
+ pr_err("%s: invalid clock index %d\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return clk_data->clks[idx];
+}
+EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
+
+#endif
+
#ifdef CONFIG_OF
/**
* struct of_clk_provider - Clock provider registration structure
@@ -2431,20 +2450,28 @@ struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
return data;
}
EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
-
-struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
+/**
+ * of_clk_del_provider() - Remove a previously registered clock provider
+ * @np: Device node pointer associated with clock provider
+ */
+void of_clk_del_provider(struct device_node *np)
{
- struct clk_onecell_data *clk_data = data;
- unsigned int idx = clkspec->args[0];
+ struct of_clk_provider *cp;
- if (idx >= clk_data->clk_num) {
- pr_err("%s: invalid clock index %d\n", __func__, idx);
- return ERR_PTR(-EINVAL);
+ mutex_lock(&of_clk_mutex);
+ list_for_each_entry(cp, &of_clk_providers, link) {
+ if (cp->node == np) {
+ list_del(&cp->link);
+ of_node_put(cp->node);
+ kfree(cp);
+ break;
+ }
}
-
- return clk_data->clks[idx];
+ mutex_unlock(&of_clk_mutex);
}
-EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
+EXPORT_SYMBOL_GPL(of_clk_del_provider);
+
+
/**
* of_clk_add_provider() - Register a clock provider for a node
@@ -2481,26 +2508,6 @@ int of_clk_add_provider(struct device_node *np,
}
EXPORT_SYMBOL_GPL(of_clk_add_provider);
-/**
- * of_clk_del_provider() - Remove a previously registered clock provider
- * @np: Device node pointer associated with clock provider
- */
-void of_clk_del_provider(struct device_node *np)
-{
- struct of_clk_provider *cp;
-
- mutex_lock(&of_clk_mutex);
- list_for_each_entry(cp, &of_clk_providers, link) {
- if (cp->node == np) {
- list_del(&cp->link);
- of_node_put(cp->node);
- kfree(cp);
- break;
- }
- }
- mutex_unlock(&of_clk_mutex);
-}
-EXPORT_SYMBOL_GPL(of_clk_del_provider);
struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
{
@@ -2584,6 +2591,7 @@ struct clock_provider {
};
static LIST_HEAD(clk_provider_list);
+#if defined(CONFIG_COMMON_CLK)
/*
* This function looks for a parent clock. If there is one, then it
@@ -2675,3 +2683,4 @@ void __init of_clk_init(const struct of_device_id *matches)
}
}
#endif
+#endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index da4bda8b7fc7..0a46c2e6885a 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -26,8 +26,9 @@
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
+#if defined(CONFIG_OF) //&& defined(CONFIG_COMMON_CLK)
/**
* of_clk_get_by_clkspec() - Lookup a clock form a clock provider
* @clkspec: pointer to a clock specifier data structure
@@ -43,13 +44,13 @@ struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec)
if (!clkspec)
return ERR_PTR(-EINVAL);
- of_clk_lock();
- clk = __of_clk_get_from_provider(clkspec);
+// of_clk_lock();
+ clk = of_clk_get_from_provider(clkspec);
if (!IS_ERR(clk) && !__clk_get(clk))
clk = ERR_PTR(-ENOENT);
- of_clk_unlock();
+// of_clk_unlock();
return clk;
}
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 783cfb24faa4..8232ccbe2be2 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -16,3 +16,12 @@ obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
+
+obj-y += clock.o clock-dummy.o clock-generic.o clock-pll.o clock-local2.o clock-alpha-pll.o clock-rpm.o clock-voter.o
+
+#obj-$(CONFIG_DEBUG_FS) += clock-debug.o
+
+# MSM8916
+obj-$(CONFIG_ARCH_MSM8916) += clock-rpm-8916.o clock-gcc-8916.o
+
+clk-qcom-y += gdsc.o
diff --git a/drivers/clk/qcom/clock-alpha-pll.c b/drivers/clk/qcom/clock-alpha-pll.c
new file mode 100644
index 000000000000..0e1a54132bf0
--- /dev/null
+++ b/drivers/clk/qcom/clock-alpha-pll.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <soc/qcom/clock-alpha-pll.h>
+
+#include "clock.h"
+
+#define WAIT_MAX_LOOPS 100
+
+#define MODE_REG(pll) (*pll->base + pll->offset + 0x0)
+#define LOCK_REG(pll) (*pll->base + pll->offset + 0x0)
+#define UPDATE_REG(pll) (*pll->base + pll->offset + 0x0)
+#define L_REG(pll) (*pll->base + pll->offset + 0x4)
+#define A_REG(pll) (*pll->base + pll->offset + 0x8)
+#define VCO_REG(pll) (*pll->base + pll->offset + 0x10)
+#define ALPHA_EN_REG(pll) (*pll->base + pll->offset + 0x10)
+
+#define PLL_BYPASSNL 0x2
+#define PLL_RESET_N 0x4
+#define PLL_OUTCTRL 0x1
+
+/*
+ * Even though 40 bits are present, only the upper 16 bits are
+ * signfigant due to the natural randomness in the XO clock
+ */
+#define ALPHA_REG_BITWIDTH 40
+#define ALPHA_BITWIDTH 16
+
+static unsigned long compute_rate(u64 parent_rate,
+ u32 l_val, u64 a_val)
+{
+ unsigned long rate;
+
+ /*
+ * assuming parent_rate < 2^25, we need a_val < 2^39 to avoid
+ * overflow when multipling below.
+ */
+ a_val = a_val >> 1;
+ rate = parent_rate * l_val;
+ rate += (unsigned long)((parent_rate * a_val) >>
+ (ALPHA_REG_BITWIDTH - 1));
+ return rate;
+}
+
+static bool is_locked(struct alpha_pll_clk *pll)
+{
+ u32 reg = readl_relaxed(LOCK_REG(pll));
+ u32 mask = pll->masks->lock_mask;
+ return (reg & mask) == mask;
+}
+
+static int alpha_pll_enable(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ int count;
+ u32 mode;
+
+ mode = readl_relaxed(MODE_REG(pll));
+ mode |= PLL_BYPASSNL;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset.
+ */
+ mb();
+ udelay(5);
+
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /* Wait for pll to lock. */
+ for (count = WAIT_MAX_LOOPS; count > 0; count--) {
+ if (is_locked(pll))
+ break;
+ udelay(1);
+ }
+
+ if (!count) {
+ pr_err("%s didn't lock after enabling it!\n", c->dbg_name);
+ return -EINVAL;
+ }
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+ return 0;
+}
+
+static void alpha_pll_disable(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ u32 mode;
+
+ mode = readl_relaxed(MODE_REG(pll));
+ mode &= ~PLL_OUTCTRL;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /* Delay of 2 output clock ticks required until output is disabled */
+ mb();
+ udelay(1);
+
+ mode &= ~(PLL_BYPASSNL | PLL_RESET_N);
+ writel_relaxed(mode, MODE_REG(pll));
+}
+
+static u32 find_vco(struct alpha_pll_clk *pll, unsigned long rate)
+{
+ unsigned long i;
+ struct alpha_pll_vco_tbl *v = pll->vco_tbl;
+
+ for (i = 0; i < pll->num_vco; i++) {
+ if (rate >= v[i].min_freq && rate <= v[i].max_freq)
+ return v[i].vco_val;
+ }
+
+ return -EINVAL;
+}
+
+static unsigned long __calc_values(struct alpha_pll_clk *pll,
+ unsigned long rate, int *l_val, u64 *a_val, bool round_up)
+{
+ u64 parent_rate;
+ u64 remainder;
+ u64 quotient;
+ unsigned long freq_hz;
+
+ parent_rate = clk_get_rate(pll->c.parent);
+ quotient = rate;
+ remainder = do_div(quotient, parent_rate);
+ *l_val = quotient;
+
+ if (!remainder) {
+ *a_val = 0;
+ return rate;
+ }
+
+ /* Upper 16 bits of Alpha */
+ quotient = remainder << ALPHA_BITWIDTH;
+ remainder = do_div(quotient, parent_rate);
+
+ if (remainder && round_up)
+ quotient++;
+
+ /* Convert to 40 bit format */
+ *a_val = quotient << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+ freq_hz = compute_rate(parent_rate, *l_val, *a_val);
+ return freq_hz;
+}
+
+static unsigned long round_rate_down(struct alpha_pll_clk *pll,
+ unsigned long rate, int *l_val, u64 *a_val)
+{
+ return __calc_values(pll, rate, l_val, a_val, false);
+}
+
+static unsigned long round_rate_up(struct alpha_pll_clk *pll,
+ unsigned long rate, int *l_val, u64 *a_val)
+{
+ return __calc_values(pll, rate, l_val, a_val, true);
+}
+
+static int alpha_pll_set_rate(struct clk *c, unsigned long rate)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ struct alpha_pll_masks *masks = pll->masks;
+ unsigned long flags, freq_hz;
+ u32 a_upper, a_lower, regval, l_val, vco_val;
+ u64 a_val;
+
+ freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+ if (freq_hz != rate) {
+ pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ vco_val = find_vco(pll, freq_hz);
+ if (IS_ERR_VALUE(vco_val)) {
+ pr_err("alpha pll: not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure PLL is off before changing rate. For optimization reasons,
+ * assume no downstream clock is actively using it. No support
+ * for dynamic update at the moment.
+ */
+ spin_lock_irqsave(&c->lock, flags);
+ if (c->count)
+ alpha_pll_disable(c);
+
+ a_upper = (a_val >> 32) & 0xFF;
+ a_lower = (a_val & 0xFFFFFFFF);
+
+ writel_relaxed(l_val, L_REG(pll));
+ writel_relaxed(a_lower, A_REG(pll));
+ writel_relaxed(a_upper, A_REG(pll) + 0x4);
+
+ if (masks->vco_mask) {
+ regval = readl_relaxed(VCO_REG(pll));
+ regval &= ~(masks->vco_mask << masks->vco_shift);
+ regval |= vco_val << masks->vco_shift;
+ writel_relaxed(regval, VCO_REG(pll));
+ }
+
+ regval = readl_relaxed(ALPHA_EN_REG(pll));
+ regval |= masks->alpha_en_mask;
+ writel_relaxed(regval, ALPHA_EN_REG(pll));
+
+ if (c->count)
+ alpha_pll_enable(c);
+
+ spin_unlock_irqrestore(&c->lock, flags);
+ return 0;
+}
+
+static long alpha_pll_round_rate(struct clk *c, unsigned long rate)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ struct alpha_pll_vco_tbl *v = pll->vco_tbl;
+ u32 ret, l_val;
+ unsigned long freq_hz;
+ u64 a_val;
+ int i;
+
+ freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+ ret = find_vco(pll, freq_hz);
+ if (!IS_ERR_VALUE(ret))
+ return freq_hz;
+
+ freq_hz = 0;
+ for (i = 0; i < pll->num_vco; i++) {
+ if (is_better_rate(rate, freq_hz, v[i].min_freq))
+ freq_hz = v[i].min_freq;
+ if (is_better_rate(rate, freq_hz, v[i].max_freq))
+ freq_hz = v[i].max_freq;
+ }
+ if (!freq_hz)
+ return -EINVAL;
+ return freq_hz;
+}
+
+static void update_vco_tbl(struct alpha_pll_clk *pll)
+{
+ int i, l_val;
+ u64 a_val;
+ unsigned long hz;
+
+ /* Round vco limits to valid rates */
+ for (i = 0; i < pll->num_vco; i++) {
+ hz = round_rate_up(pll, pll->vco_tbl[i].min_freq, &l_val,
+ &a_val);
+ pll->vco_tbl[i].min_freq = hz;
+
+ hz = round_rate_down(pll, pll->vco_tbl[i].max_freq, &l_val,
+ &a_val);
+ pll->vco_tbl[i].max_freq = hz;
+ }
+}
+
+static enum handoff alpha_pll_handoff(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ struct alpha_pll_masks *masks = pll->masks;
+ u64 parent_rate, a_val;
+ u32 alpha_en, l_val;
+
+ update_vco_tbl(pll);
+
+ if (!is_locked(pll))
+ return HANDOFF_DISABLED_CLK;
+
+ alpha_en = readl_relaxed(ALPHA_EN_REG(pll));
+ alpha_en &= masks->alpha_en_mask;
+
+ l_val = readl_relaxed(L_REG(pll));
+ a_val = readl_relaxed(A_REG(pll));
+ a_val |= ((u64)readl_relaxed(A_REG(pll) + 0x4)) << 32;
+
+ if (!alpha_en)
+ a_val = 0;
+
+ parent_rate = clk_get_rate(c->parent);
+ c->rate = compute_rate(parent_rate, l_val, a_val);
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_alpha_pll = {
+ .enable = alpha_pll_enable,
+ .disable = alpha_pll_disable,
+ .round_rate = alpha_pll_round_rate,
+ .set_rate = alpha_pll_set_rate,
+ .handoff = alpha_pll_handoff,
+};
+
+struct clk_ops clk_ops_fixed_alpha_pll = {
+ .enable = alpha_pll_enable,
+ .disable = alpha_pll_disable,
+ .handoff = alpha_pll_handoff,
+};
+
diff --git a/drivers/clk/qcom/clock-dummy.c b/drivers/clk/qcom/clock-dummy.c
new file mode 100644
index 000000000000..e86aaae2f0de
--- /dev/null
+++ b/drivers/clk/qcom/clock-dummy.c
@@ -0,0 +1,100 @@
+/* Copyright (c) 2011,2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+static int dummy_clk_reset(struct clk *clk, enum clk_reset_action action)
+{
+ return 0;
+}
+
+static int dummy_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ clk->rate = rate;
+ return 0;
+}
+
+static int dummy_clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
+static int dummy_clk_set_flags(struct clk *clk, unsigned flags)
+{
+ return 0;
+}
+
+static unsigned long dummy_clk_get_rate(struct clk *clk)
+{
+ return clk->rate;
+}
+
+static long dummy_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ return rate;
+}
+
+struct clk_ops clk_ops_dummy = {
+ .reset = dummy_clk_reset,
+ .set_rate = dummy_clk_set_rate,
+ .set_max_rate = dummy_clk_set_max_rate,
+ .set_flags = dummy_clk_set_flags,
+ .get_rate = dummy_clk_get_rate,
+ .round_rate = dummy_clk_round_rate,
+};
+
+struct clk dummy_clk = {
+ .dbg_name = "dummy_clk",
+ .ops = &clk_ops_dummy,
+ CLK_INIT(dummy_clk),
+};
+
+static struct clk *of_dummy_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ return &dummy_clk;
+}
+
+static struct of_device_id msm_clock_dummy_match_table[] = {
+ { .compatible = "qcom,dummycc" },
+ {}
+};
+
+static int msm_clock_dummy_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = of_clk_add_provider(pdev->dev.of_node, of_dummy_get, NULL);
+ if (ret)
+ return -ENOMEM;
+
+ dev_info(&pdev->dev, "Registered DUMMY provider.\n");
+ return ret;
+}
+
+static struct platform_driver msm_clock_dummy_driver = {
+ .probe = msm_clock_dummy_probe,
+ .driver = {
+ .name = "clock-dummy",
+ .of_match_table = msm_clock_dummy_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init msm_dummy_clk_init(void)
+{
+ return platform_driver_register(&msm_clock_dummy_driver);
+}
+arch_initcall(msm_dummy_clk_init);
+
diff --git a/drivers/clk/qcom/clock-gcc-8916.c b/drivers/clk/qcom/clock-gcc-8916.c
new file mode 100644
index 000000000000..ed6c0e8fbc74
--- /dev/null
+++ b/drivers/clk/qcom/clock-gcc-8916.c
@@ -0,0 +1,2979 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/clock-voter.h>
+
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+
+#include <dt-bindings/clock/msm-clocks-8916.h>
+
+#include "clock.h"
+
+enum {
+ GCC_BASE,
+ APCS_PLL_BASE,
+ N_BASES,
+};
+
+static void __iomem *virt_bases[N_BASES];
+
+#define GCC_REG_BASE(x) (void __iomem *)(virt_bases[GCC_BASE] + (x))
+
+#define GPLL0_MODE 0x21000
+#define GPLL0_L_VAL 0x21004
+#define GPLL0_M_VAL 0x21008
+#define GPLL0_N_VAL 0x2100C
+#define GPLL0_USER_CTL 0x21010
+#define GPLL0_CONFIG_CTL 0x21014
+#define GPLL0_STATUS 0x2101C
+#define GPLL1_MODE 0x20000
+#define GPLL1_L_VAL 0x20004
+#define GPLL1_M_VAL 0x20008
+#define GPLL1_N_VAL 0x2000C
+#define GPLL1_USER_CTL 0x20010
+#define GPLL1_CONFIG_CTL 0x20014
+#define GPLL1_STATUS 0x2001C
+#define GPLL2_MODE 0x4A000
+#define GPLL2_L_VAL 0x4A004
+#define GPLL2_M_VAL 0x4A008
+#define GPLL2_N_VAL 0x4A00C
+#define GPLL2_USER_CTL 0x4A010
+#define GPLL2_CONFIG_CTL 0x4A014
+#define GPLL2_STATUS 0x4A01C
+#define MSS_CFG_AHB_CBCR 0x49000
+#define MSS_Q6_BIMC_AXI_CBCR 0x49004
+#define USB_HS_BCR 0x41000
+#define USB_HS_SYSTEM_CBCR 0x41004
+#define USB_HS_AHB_CBCR 0x41008
+#define USB_HS_SYSTEM_CMD_RCGR 0x41010
+#define USB2A_PHY_SLEEP_CBCR 0x4102C
+#define SDCC1_APPS_CMD_RCGR 0x42004
+#define SDCC1_APPS_CBCR 0x42018
+#define SDCC1_AHB_CBCR 0x4201C
+#define SDCC2_APPS_CMD_RCGR 0x43004
+#define SDCC2_APPS_CBCR 0x43018
+#define SDCC2_AHB_CBCR 0x4301C
+#define BLSP1_AHB_CBCR 0x01008
+#define BLSP1_QUP1_SPI_APPS_CBCR 0x02004
+#define BLSP1_QUP1_I2C_APPS_CBCR 0x02008
+#define BLSP1_QUP1_I2C_APPS_CMD_RCGR 0x0200C
+#define BLSP1_QUP2_I2C_APPS_CMD_RCGR 0x03000
+#define BLSP1_QUP3_I2C_APPS_CMD_RCGR 0x04000
+#define BLSP1_QUP4_I2C_APPS_CMD_RCGR 0x05000
+#define BLSP1_QUP5_I2C_APPS_CMD_RCGR 0x06000
+#define BLSP1_QUP6_I2C_APPS_CMD_RCGR 0x07000
+#define BLSP1_QUP1_SPI_APPS_CMD_RCGR 0x02024
+#define BLSP1_UART1_APPS_CBCR 0x0203C
+#define BLSP1_UART1_APPS_CMD_RCGR 0x02044
+#define BLSP1_QUP2_SPI_APPS_CBCR 0x0300C
+#define BLSP1_QUP2_I2C_APPS_CBCR 0x03010
+#define BLSP1_QUP2_SPI_APPS_CMD_RCGR 0x03014
+#define BLSP1_UART2_APPS_CBCR 0x0302C
+#define BLSP1_UART2_APPS_CMD_RCGR 0x03034
+#define BLSP1_QUP3_SPI_APPS_CBCR 0x0401C
+#define BLSP1_QUP3_I2C_APPS_CBCR 0x04020
+#define BLSP1_QUP3_SPI_APPS_CMD_RCGR 0x04024
+#define BLSP1_QUP4_SPI_APPS_CBCR 0x0501C
+#define BLSP1_QUP4_I2C_APPS_CBCR 0x05020
+#define BLSP1_QUP4_SPI_APPS_CMD_RCGR 0x05024
+#define BLSP1_QUP5_SPI_APPS_CBCR 0x0601C
+#define BLSP1_QUP5_I2C_APPS_CBCR 0x06020
+#define BLSP1_QUP5_SPI_APPS_CMD_RCGR 0x06024
+#define BLSP1_QUP6_SPI_APPS_CBCR 0x0701C
+#define BLSP1_QUP6_I2C_APPS_CBCR 0x07020
+#define BLSP1_QUP6_SPI_APPS_CMD_RCGR 0x07024
+#define PDM_AHB_CBCR 0x44004
+#define PDM2_CBCR 0x4400C
+#define PDM2_CMD_RCGR 0x44010
+#define PRNG_AHB_CBCR 0x13004
+#define BOOT_ROM_AHB_CBCR 0x1300C
+#define CRYPTO_CMD_RCGR 0x16004
+#define CRYPTO_CBCR 0x1601C
+#define CRYPTO_AXI_CBCR 0x16020
+#define CRYPTO_AHB_CBCR 0x16024
+#define GCC_XO_DIV4_CBCR 0x30034
+#define GFX_TBU_CBCR 0x12010
+#define VENUS_TBU_CBCR 0x12014
+#define MDP_TBU_CBCR 0x1201C
+#define APSS_TCU_CBCR 0x12018
+#define GFX_TCU_CBCR 0x12020
+#define MSS_TBU_AXI_CBCR 0x12024
+#define MSS_TBU_GSS_AXI_CBCR 0x12028
+#define MSS_TBU_Q6_AXI_CBCR 0x1202C
+#define JPEG_TBU_CBCR 0x12034
+#define SMMU_CFG_CBCR 0x12038
+#define VFE_TBU_CBCR 0x1203C
+#define GTCU_AHB_CBCR 0x12044
+#define GTCU_AHB_BRIDGE_CBCR 0x12094
+#define APCS_GPLL_ENA_VOTE 0x45000
+#define APCS_CLOCK_BRANCH_ENA_VOTE 0x45004
+#define APCS_CLOCK_SLEEP_ENA_VOTE 0x45008
+#define APCS_SMMU_CLOCK_BRANCH_ENA_VOTE 0x4500C
+#define APSS_AHB_CMD_RCGR 0x46000
+#define GCC_DEBUG_CLK_CTL 0x74000
+#define CLOCK_FRQ_MEASURE_CTL 0x74004
+#define CLOCK_FRQ_MEASURE_STATUS 0x74008
+#define GCC_PLLTEST_PAD_CFG 0x7400C
+#define GP1_CBCR 0x08000
+#define GP1_CMD_RCGR 0x08004
+#define GP2_CBCR 0x09000
+#define GP2_CMD_RCGR 0x09004
+#define GP3_CBCR 0x0A000
+#define GP3_CMD_RCGR 0x0A004
+#define SPDM_JPEG0_CBCR 0x2F028
+#define SPDM_MDP_CBCR 0x2F02C
+#define SPDM_VCODEC0_CBCR 0x2F034
+#define SPDM_VFE0_CBCR 0x2F038
+#define SPDM_GFX3D_CBCR 0x2F03C
+#define SPDM_PCLK0_CBCR 0x2F044
+#define SPDM_CSI0_CBCR 0x2F048
+#define VCODEC0_CMD_RCGR 0x4C000
+#define VENUS0_BCR 0x4C014
+#define VENUS0_VCODEC0_CBCR 0x4C01C
+#define VENUS0_AHB_CBCR 0x4C020
+#define VENUS0_AXI_CBCR 0x4C024
+#define PCLK0_CMD_RCGR 0x4D000
+#define MDP_CMD_RCGR 0x4D014
+#define VSYNC_CMD_RCGR 0x4D02C
+#define BYTE0_CMD_RCGR 0x4D044
+#define ESC0_CMD_RCGR 0x4D05C
+#define MDSS_BCR 0x4D074
+#define MDSS_AHB_CBCR 0x4D07C
+#define MDSS_AXI_CBCR 0x4D080
+#define MDSS_PCLK0_CBCR 0x4D084
+#define MDSS_MDP_CBCR 0x4D088
+#define MDSS_VSYNC_CBCR 0x4D090
+#define MDSS_BYTE0_CBCR 0x4D094
+#define MDSS_ESC0_CBCR 0x4D098
+#define CSI0PHYTIMER_CMD_RCGR 0x4E000
+#define CAMSS_CSI0PHYTIMER_CBCR 0x4E01C
+#define CSI1PHYTIMER_CMD_RCGR 0x4F000
+#define CAMSS_CSI1PHYTIMER_CBCR 0x4F01C
+#define CSI0_CMD_RCGR 0x4E020
+#define CAMSS_CSI0_CBCR 0x4E03C
+#define CAMSS_CSI0_AHB_CBCR 0x4E040
+#define CAMSS_CSI0PHY_CBCR 0x4E048
+#define CAMSS_CSI0RDI_CBCR 0x4E050
+#define CAMSS_CSI0PIX_CBCR 0x4E058
+#define CSI1_CMD_RCGR 0x4F020
+#define CAMSS_CSI1_CBCR 0x4F03C
+#define CAMSS_CSI1_AHB_CBCR 0x4F040
+#define CAMSS_CSI1PHY_CBCR 0x4F048
+#define CAMSS_CSI1RDI_CBCR 0x4F050
+#define CAMSS_CSI1PIX_CBCR 0x4F058
+#define CAMSS_ISPIF_AHB_CBCR 0x50004
+#define CCI_CMD_RCGR 0x51000
+#define CAMSS_CCI_CBCR 0x51018
+#define CAMSS_CCI_AHB_CBCR 0x5101C
+#define MCLK0_CMD_RCGR 0x52000
+#define CAMSS_MCLK0_CBCR 0x52018
+#define MCLK1_CMD_RCGR 0x53000
+#define CAMSS_MCLK1_CBCR 0x53018
+#define CAMSS_GP0_CMD_RCGR 0x54000
+#define CAMSS_GP0_CBCR 0x54018
+#define CAMSS_GP1_CMD_RCGR 0x55000
+#define CAMSS_GP1_CBCR 0x55018
+#define CAMSS_AHB_CBCR 0x5A014
+#define CAMSS_TOP_AHB_CBCR 0x56004
+#define CAMSS_MICRO_AHB_CBCR 0x5600C
+#define CAMSS_MICRO_BCR 0x56008
+#define JPEG0_CMD_RCGR 0x57000
+#define CAMSS_JPEG0_BCR 0x57018
+#define CAMSS_JPEG0_CBCR 0x57020
+#define CAMSS_JPEG_AHB_CBCR 0x57024
+#define CAMSS_JPEG_AXI_CBCR 0x57028
+#define VFE0_CMD_RCGR 0x58000
+#define CPP_CMD_RCGR 0x58018
+#define CAMSS_VFE_BCR 0x58030
+#define CAMSS_VFE0_CBCR 0x58038
+#define CAMSS_CPP_CBCR 0x5803C
+#define CAMSS_CPP_AHB_CBCR 0x58040
+#define CAMSS_VFE_AHB_CBCR 0x58044
+#define CAMSS_VFE_AXI_CBCR 0x58048
+#define CAMSS_CSI_VFE0_BCR 0x5804C
+#define CAMSS_CSI_VFE0_CBCR 0x58050
+#define GFX3D_CMD_RCGR 0x59000
+#define OXILI_GFX3D_CBCR 0x59020
+#define OXILI_GMEM_CBCR 0x59024
+#define OXILI_AHB_CBCR 0x59028
+#define CAMSS_AHB_CMD_RCGR 0x5A000
+#define BIMC_GFX_CBCR 0x31024
+#define BIMC_GPU_CBCR 0x31040
+
+#define APCS_SH_PLL_MODE 0x00000
+#define APCS_SH_PLL_L_VAL 0x00004
+#define APCS_SH_PLL_M_VAL 0x00008
+#define APCS_SH_PLL_N_VAL 0x0000C
+#define APCS_SH_PLL_USER_CTL 0x00010
+#define APCS_SH_PLL_CONFIG_CTL 0x00014
+#define APCS_SH_PLL_STATUS 0x0001C
+
+/* Mux source select values */
+#define xo_source_val 0
+#define xo_a_source_val 0
+#define gpll0_source_val 1
+#define gpll0_aux_source_val 3
+#define gpll1_source_val 1
+#define gpll2_source_val 2
+#define dsi0_phypll_mm_source_val 1
+
+#define F(f, s, div, m, n) \
+ { \
+ .freq_hz = (f), \
+ .src_clk = &s##_clk_src.c, \
+ .m_val = (m), \
+ .n_val = ~((n)-(m)) * !!(n), \
+ .d_val = ~(n),\
+ .div_src_val = BVAL(4, 0, (int)(2*(div) - 1)) \
+ | BVAL(10, 8, s##_source_val), \
+ }
+
+#define F_MDSS(f, s, div, m, n) \
+ { \
+ .freq_hz = (f), \
+ .m_val = (m), \
+ .n_val = ~((n)-(m)) * !!(n), \
+ .d_val = ~(n),\
+ .div_src_val = BVAL(4, 0, (int)(2*(div) - 1)) \
+ | BVAL(10, 8, s##_mm_source_val), \
+ }
+
+#define F_APCS_PLL(f, l, m, n, pre_div, post_div, vco) \
+ { \
+ .freq_hz = (f), \
+ .l_val = (l), \
+ .m_val = (m), \
+ .n_val = (n), \
+ .pre_div_val = BVAL(12, 12, (pre_div)), \
+ .post_div_val = BVAL(9, 8, (post_div)), \
+ .vco_val = BVAL(29, 28, (vco)), \
+ }
+
+#define VDD_DIG_FMAX_MAP1(l1, f1) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP2(l1, f1, l2, f2) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP3(l1, f1, l2, f2, l3, f3) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ [VDD_DIG_##l3] = (f3), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+enum vdd_dig_levels {
+ VDD_DIG_NONE,
+ VDD_DIG_LOW,
+ VDD_DIG_NOMINAL,
+ VDD_DIG_HIGH,
+ VDD_DIG_NUM
+};
+
+static int vdd_corner[] = {
+ RPM_REGULATOR_CORNER_NONE, /* VDD_DIG_NONE */
+ RPM_REGULATOR_CORNER_SVS_SOC, /* VDD_DIG_LOW */
+ RPM_REGULATOR_CORNER_NORMAL, /* VDD_DIG_NOMINAL */
+ RPM_REGULATOR_CORNER_SUPER_TURBO, /* VDD_DIG_HIGH */
+};
+
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
+
+DEFINE_EXT_CLK(xo_clk_src, NULL);
+DEFINE_EXT_CLK(xo_a_clk_src, NULL);
+DEFINE_EXT_CLK(rpm_debug_clk, NULL);
+DEFINE_EXT_CLK(apss_debug_clk, NULL);
+
+DEFINE_CLK_DUMMY(wcnss_m_clk, 0);
+
+enum vdd_sr2_pll_levels {
+ VDD_SR2_PLL_OFF,
+ VDD_SR2_PLL_SVS,
+ VDD_SR2_PLL_NOM,
+ VDD_SR2_PLL_TUR,
+ VDD_SR2_PLL_NUM,
+};
+
+static int vdd_sr2_levels[] = {
+ 0, RPM_REGULATOR_CORNER_NONE, /* VDD_SR2_PLL_OFF */
+ 1800000, RPM_REGULATOR_CORNER_SVS_SOC, /* VDD_SR2_PLL_SVS */
+ 1800000, RPM_REGULATOR_CORNER_NORMAL, /* VDD_SR2_PLL_NOM */
+ 1800000, RPM_REGULATOR_CORNER_SUPER_TURBO, /* VDD_SR2_PLL_TUR */
+};
+
+static DEFINE_VDD_REGULATORS(vdd_sr2_pll, VDD_SR2_PLL_NUM, 2,
+ vdd_sr2_levels, NULL);
+
+static struct pll_freq_tbl apcs_pll_freq[] = {
+ F_APCS_PLL( 998400000, 52, 0x0, 0x1, 0x0, 0x0, 0x0),
+ F_APCS_PLL(1094400000, 57, 0x0, 0x1, 0x0, 0x0, 0x0),
+ F_APCS_PLL(1152000000, 60, 0x0, 0x1, 0x0, 0x0, 0x0),
+ F_APCS_PLL(1190400000, 62, 0x0, 0x1, 0x0, 0x0, 0x0),
+ F_APCS_PLL(1209600000, 63, 0x0, 0x1, 0x0, 0x0, 0x0),
+ F_APCS_PLL(1248000000, 65, 0x0, 0x1, 0x0, 0x0, 0x0),
+ F_APCS_PLL(1401600000, 73, 0x0, 0x1, 0x0, 0x0, 0x0),
+ PLL_F_END
+};
+
+static struct pll_clk a53sspll = {
+ .mode_reg = (void __iomem *)APCS_SH_PLL_MODE,
+ .l_reg = (void __iomem *)APCS_SH_PLL_L_VAL,
+ .m_reg = (void __iomem *)APCS_SH_PLL_M_VAL,
+ .n_reg = (void __iomem *)APCS_SH_PLL_N_VAL,
+ .config_reg = (void __iomem *)APCS_SH_PLL_USER_CTL,
+ .status_reg = (void __iomem *)APCS_SH_PLL_STATUS,
+ .freq_tbl = apcs_pll_freq,
+ .masks = {
+ .vco_mask = BM(29, 28),
+ .pre_div_mask = BIT(12),
+ .post_div_mask = BM(9, 8),
+ .mn_en_mask = BIT(24),
+ .main_output_mask = BIT(0),
+ },
+ .base = &virt_bases[APCS_PLL_BASE],
+ .c = {
+ .parent = &xo_a_clk_src.c,
+ .dbg_name = "a53sspll",
+ .ops = &clk_ops_sr2_pll,
+ .vdd_class = &vdd_sr2_pll,
+ .fmax = (unsigned long [VDD_SR2_PLL_NUM]) {
+ [VDD_SR2_PLL_SVS] = 1000000000,
+ [VDD_SR2_PLL_NOM] = 1900000000,
+ },
+ .num_fmax = VDD_SR2_PLL_NUM,
+ CLK_INIT(a53sspll.c),
+ },
+};
+
+static unsigned int soft_vote_gpll0;
+
+static struct pll_vote_clk gpll0_clk_src = {
+ .en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE,
+ .en_mask = BIT(0),
+ .status_reg = (void __iomem *)GPLL0_STATUS,
+ .status_mask = BIT(17),
+ .soft_vote = &soft_vote_gpll0,
+ .soft_vote_mask = PLL_SOFT_VOTE_PRIMARY,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .parent = &xo_clk_src.c,
+ .rate = 800000000,
+ .dbg_name = "gpll0_clk_src",
+ .ops = &clk_ops_pll_acpu_vote,
+ CLK_INIT(gpll0_clk_src.c),
+ },
+};
+
+/* GPLL0 Aux is needed by GFX3D */
+static struct pll_vote_clk gpll0_aux_clk_src = {
+ .en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE,
+ .en_mask = BIT(0),
+ .status_reg = (void __iomem *)GPLL0_STATUS,
+ .status_mask = BIT(17),
+ .soft_vote = &soft_vote_gpll0,
+ .soft_vote_mask = PLL_SOFT_VOTE_AUX,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .parent = &xo_clk_src.c,
+ .rate = 800000000,
+ .dbg_name = "gpll0_aux_clk_src",
+ .ops = &clk_ops_pll_acpu_vote,
+ CLK_INIT(gpll0_aux_clk_src.c),
+ },
+};
+
+/* Don't vote for xo if using this clock to allow xo shutdown */
+static struct pll_vote_clk gpll0_ao_clk_src = {
+ .en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE,
+ .en_mask = BIT(0),
+ .status_reg = (void __iomem *)GPLL0_STATUS,
+ .status_mask = BIT(17),
+ .soft_vote = &soft_vote_gpll0,
+ .soft_vote_mask = PLL_SOFT_VOTE_ACPU,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .parent = &xo_a_clk_src.c,
+ .rate = 800000000,
+ .dbg_name = "gpll0_ao_clk_src",
+ .ops = &clk_ops_pll_acpu_vote,
+ CLK_INIT(gpll0_ao_clk_src.c),
+ },
+};
+
+static struct pll_vote_clk gpll1_clk_src = {
+ .en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE,
+ .en_mask = BIT(1),
+ .status_reg = (void __iomem *)GPLL1_STATUS,
+ .status_mask = BIT(17),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .parent = &xo_clk_src.c,
+ .rate = 884736000,
+ .dbg_name = "gpll1_clk_src",
+ .ops = &clk_ops_pll_vote,
+ CLK_INIT(gpll1_clk_src.c),
+ },
+};
+
+static struct pll_vote_clk gpll2_clk_src = {
+ .en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE,
+ .en_mask = BIT(2),
+ .status_reg = (void __iomem *)GPLL2_STATUS,
+ .status_mask = BIT(17),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .parent = &xo_clk_src.c,
+ .rate = 930000000,
+ .dbg_name = "gpll2_clk_src",
+ .ops = &clk_ops_pll_vote,
+ CLK_INIT(gpll2_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_apss_ahb_clk[] = {
+ F( 19200000, xo_a, 1, 0, 0),
+ F( 50000000, gpll0, 16, 0, 0),
+ F( 100000000, gpll0, 8, 0, 0),
+ F( 133330000, gpll0, 6, 0, 0),
+ F_END
+};
+
+static struct rcg_clk apss_ahb_clk_src = {
+ .cmd_rcgr_reg = APSS_AHB_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_apss_ahb_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "apss_ahb_clk_src",
+ .ops = &clk_ops_rcg,
+ CLK_INIT(apss_ahb_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_camss_ahb_clk[] = {
+ F( 40000000, gpll0, 10, 1, 2),
+ F( 80000000, gpll0, 10, 0, 0),
+ F_END
+};
+
+static struct rcg_clk camss_ahb_clk_src = {
+ .cmd_rcgr_reg = CAMSS_AHB_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_camss_ahb_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "camss_ahb_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 40000000, NOMINAL, 80000000),
+ CLK_INIT(camss_ahb_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_camss_csi0_1_clk[] = {
+ F( 100000000, gpll0, 8, 0, 0),
+ F( 200000000, gpll0, 4, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csi0_clk_src = {
+ .cmd_rcgr_reg = CSI0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_camss_csi0_1_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "csi0_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOW, 100000000, NOMINAL, 200000000),
+ CLK_INIT(csi0_clk_src.c),
+ },
+};
+
+static struct rcg_clk csi1_clk_src = {
+ .cmd_rcgr_reg = CSI1_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_camss_csi0_1_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "csi1_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOW, 100000000, NOMINAL, 200000000),
+ CLK_INIT(csi1_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_camss_vfe0_clk[] = {
+ F( 50000000, gpll0, 16, 0, 0),
+ F( 80000000, gpll0, 10, 0, 0),
+ F( 100000000, gpll0, 8, 0, 0),
+ F( 160000000, gpll0, 5, 0, 0),
+ F( 177780000, gpll0, 4.5, 0, 0),
+ F( 200000000, gpll0, 4, 0, 0),
+ F( 266670000, gpll0, 3, 0, 0),
+ F( 320000000, gpll0, 2.5, 0, 0),
+ F( 400000000, gpll0, 2, 0, 0),
+ F( 465000000, gpll2, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk vfe0_clk_src = {
+ .cmd_rcgr_reg = VFE0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_camss_vfe0_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "vfe0_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOW, 160000000, NOMINAL, 320000000, HIGH,
+ 465000000),
+ CLK_INIT(vfe0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_oxili_gfx3d_clk[] = {
+ F( 19200000, xo, 1, 0, 0),
+ F( 50000000, gpll0_aux, 16, 0, 0),
+ F( 80000000, gpll0_aux, 10, 0, 0),
+ F( 100000000, gpll0_aux, 8, 0, 0),
+ F( 160000000, gpll0_aux, 5, 0, 0),
+ F( 177780000, gpll0_aux, 4.5, 0, 0),
+ F( 200000000, gpll0_aux, 4, 0, 0),
+ F( 266670000, gpll0_aux, 3, 0, 0),
+ F( 294912000, gpll1, 3, 0, 0),
+ F( 310000000, gpll2, 3, 0, 0),
+ F( 400000000, gpll0_aux, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk gfx3d_clk_src = {
+ .cmd_rcgr_reg = GFX3D_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_oxili_gfx3d_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gfx3d_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOW, 200000000, NOMINAL, 310000000, HIGH,
+ 400000000),
+ CLK_INIT(gfx3d_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_blsp1_qup1_6_i2c_apps_clk[] = {
+ F( 19200000, xo, 1, 0, 0),
+ F( 50000000, gpll0, 16, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = BLSP1_QUP1_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "blsp1_qup1_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOW, 50000000),
+ CLK_INIT(blsp1_qup1_i2c_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_blsp1_qup1_6_spi_apps_clk[] = {
+ F( 960000, xo, 10, 1, 2),
+ F( 4800000, xo, 4, 0, 0),
+ F( 9600000, xo, 2, 0, 0),
+ F( 16000000, gpll0, 10, 1, 5),
+ F( 19200000, xo, 1, 0, 0),
+ F( 25000000, gpll0, 16, 1, 2),
+ F( 50000000, gpll0, 16, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr_reg = BLSP1_QUP1_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "blsp1_qup1_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 25000000, NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup1_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = BLSP1_QUP2_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "blsp1_qup2_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOW, 50000000),
+ CLK_INIT(blsp1_qup2_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr_reg = BLSP1_QUP2_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "blsp1_qup2_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 25000000, NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup2_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = BLSP1_QUP3_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "blsp1_qup3_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOW, 50000000),
+ CLK_INIT(blsp1_qup3_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr_reg = BLSP1_QUP3_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "blsp1_qup3_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 25000000, NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup3_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = BLSP1_QUP4_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "blsp1_qup4_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOW, 50000000),
+ CLK_INIT(blsp1_qup4_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr_reg = BLSP1_QUP4_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "blsp1_qup4_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 25000000, NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup4_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = BLSP1_QUP5_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "blsp1_qup5_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOW, 50000000),
+ CLK_INIT(blsp1_qup5_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr_reg = BLSP1_QUP5_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "blsp1_qup5_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 25000000, NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup5_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = BLSP1_QUP6_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_i2c_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "blsp1_qup6_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOW, 50000000),
+ CLK_INIT(blsp1_qup6_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr_reg = BLSP1_QUP6_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_6_spi_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "blsp1_qup6_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 25000000, NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup6_spi_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_blsp1_uart1_6_apps_clk[] = {
+ F( 3686400, gpll0, 1, 72, 15625),
+ F( 7372800, gpll0, 1, 144, 15625),
+ F( 14745600, gpll0, 1, 288, 15625),
+ F( 16000000, gpll0, 10, 1, 5),
+ F( 19200000, xo, 1, 0, 0),
+ F( 24000000, gpll0, 1, 3, 100),
+ F( 25000000, gpll0, 16, 1, 2),
+ F( 32000000, gpll0, 1, 1, 25),
+ F( 40000000, gpll0, 1, 1, 20),
+ F( 46400000, gpll0, 1, 29, 500),
+ F( 48000000, gpll0, 1, 3, 50),
+ F( 51200000, gpll0, 1, 8, 125),
+ F( 56000000, gpll0, 1, 7, 100),
+ F( 58982400, gpll0, 1, 1152, 15625),
+ F( 60000000, gpll0, 1, 3, 40),
+ F_END
+};
+
+static struct rcg_clk blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr_reg = BLSP1_UART1_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "blsp1_uart1_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 32000000, NOMINAL, 64000000),
+ CLK_INIT(blsp1_uart1_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr_reg = BLSP1_UART2_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_6_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "blsp1_uart2_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 32000000, NOMINAL, 64000000),
+ CLK_INIT(blsp1_uart2_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_camss_cci_clk[] = {
+ F( 19200000, xo, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk cci_clk_src = {
+ .cmd_rcgr_reg = CCI_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_camss_cci_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "cci_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP1(LOW, 19200000),
+ CLK_INIT(cci_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_camss_gp0_1_clk[] = {
+ F( 100000000, gpll0, 8, 0, 0),
+ F( 200000000, gpll0, 4, 0, 0),
+ F_END
+};
+
+static struct rcg_clk camss_gp0_clk_src = {
+ .cmd_rcgr_reg = CAMSS_GP0_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_camss_gp0_1_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "camss_gp0_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 100000000, NOMINAL, 200000000),
+ CLK_INIT(camss_gp0_clk_src.c),
+ },
+};
+
+static struct rcg_clk camss_gp1_clk_src = {
+ .cmd_rcgr_reg = CAMSS_GP1_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_camss_gp0_1_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "camss_gp1_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 100000000, NOMINAL, 200000000),
+ CLK_INIT(camss_gp1_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_camss_jpeg0_clk[] = {
+ F( 133330000, gpll0, 6, 0, 0),
+ F( 266670000, gpll0, 3, 0, 0),
+ F( 320000000, gpll0, 2.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk jpeg0_clk_src = {
+ .cmd_rcgr_reg = JPEG0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_camss_jpeg0_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "jpeg0_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOW, 133330000, NOMINAL, 266670000, HIGH,
+ 320000000),
+ CLK_INIT(jpeg0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_camss_mclk0_1_clk[] = {
+ F( 9600000, xo, 2, 0, 0),
+ F( 23880000, gpll0, 1, 2, 67),
+ F( 66670000, gpll0, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk mclk0_clk_src = {
+ .cmd_rcgr_reg = MCLK0_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_camss_mclk0_1_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "mclk0_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 24000000, NOMINAL, 66670000),
+ CLK_INIT(mclk0_clk_src.c),
+ },
+};
+
+static struct rcg_clk mclk1_clk_src = {
+ .cmd_rcgr_reg = MCLK1_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_camss_mclk0_1_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "mclk1_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 24000000, NOMINAL, 66670000),
+ CLK_INIT(mclk1_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_camss_csi0_1phytimer_clk[] = {
+ F( 100000000, gpll0, 8, 0, 0),
+ F( 200000000, gpll0, 4, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csi0phytimer_clk_src = {
+ .cmd_rcgr_reg = CSI0PHYTIMER_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "csi0phytimer_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOW, 100000000, NOMINAL, 200000000),
+ CLK_INIT(csi0phytimer_clk_src.c),
+ },
+};
+
+static struct rcg_clk csi1phytimer_clk_src = {
+ .cmd_rcgr_reg = CSI1PHYTIMER_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_camss_csi0_1phytimer_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "csi1phytimer_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOW, 100000000, NOMINAL, 200000000),
+ CLK_INIT(csi1phytimer_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_camss_cpp_clk[] = {
+ F( 160000000, gpll0, 5, 0, 0),
+ F( 320000000, gpll0, 2.5, 0, 0),
+ F( 465000000, gpll2, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk cpp_clk_src = {
+ .cmd_rcgr_reg = CPP_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_camss_cpp_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "cpp_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOW, 160000000, NOMINAL, 320000000, HIGH,
+ 465000000),
+ CLK_INIT(cpp_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_gp1_3_clk[] = {
+ F( 19200000, xo, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk gp1_clk_src = {
+ .cmd_rcgr_reg = GP1_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_gp1_3_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gp1_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 100000000, NOMINAL, 200000000),
+ CLK_INIT(gp1_clk_src.c),
+ },
+};
+
+static struct rcg_clk gp2_clk_src = {
+ .cmd_rcgr_reg = GP2_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_gp1_3_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gp2_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 100000000, NOMINAL, 200000000),
+ CLK_INIT(gp2_clk_src.c),
+ },
+};
+
+static struct rcg_clk gp3_clk_src = {
+ .cmd_rcgr_reg = GP3_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_gp1_3_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gp3_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 100000000, NOMINAL, 200000000),
+ CLK_INIT(gp3_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_mdss_byte0_clk[] = {
+ {
+ .div_src_val = BVAL(10, 8, dsi0_phypll_mm_source_val),
+ },
+};
+
+static struct rcg_clk byte0_clk_src = {
+ .cmd_rcgr_reg = BYTE0_CMD_RCGR,
+ .current_freq = ftbl_gcc_mdss_byte0_clk,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "byte0_clk_src",
+ .ops = &clk_ops_byte,
+ VDD_DIG_FMAX_MAP2(LOW, 112500000, NOMINAL, 187500000),
+ CLK_INIT(byte0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_mdss_esc0_clk[] = {
+ F( 19200000, xo, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk esc0_clk_src = {
+ .cmd_rcgr_reg = ESC0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_mdss_esc0_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "esc0_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOW, 19200000),
+ CLK_INIT(esc0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_mdss_mdp_clk[] = {
+ F( 50000000, gpll0, 16, 0, 0),
+ F( 80000000, gpll0, 10, 0, 0),
+ F( 100000000, gpll0, 8, 0, 0),
+ F( 160000000, gpll0, 5, 0, 0),
+ F( 177780000, gpll0, 4.5, 0, 0),
+ F( 200000000, gpll0, 4, 0, 0),
+ F( 266670000, gpll0, 3, 0, 0),
+ F( 320000000, gpll0, 2.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk mdp_clk_src = {
+ .cmd_rcgr_reg = MDP_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_mdss_mdp_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "mdp_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOW, 160000000, NOMINAL, 266670000, HIGH,
+ 320000000),
+ CLK_INIT(mdp_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_mdss_pclk0_clk[] = {
+ {
+ .div_src_val = BVAL(10, 8, dsi0_phypll_mm_source_val)
+ | BVAL(4, 0, 0),
+ },
+};
+
+static struct rcg_clk pclk0_clk_src = {
+ .cmd_rcgr_reg = PCLK0_CMD_RCGR,
+ .current_freq = ftbl_gcc_mdss_pclk0_clk,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "pclk0_clk_src",
+ .ops = &clk_ops_pixel,
+ VDD_DIG_FMAX_MAP2(LOW, 150000000, NOMINAL, 250000000),
+ CLK_INIT(pclk0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_mdss_vsync_clk[] = {
+ F( 19200000, xo, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk vsync_clk_src = {
+ .cmd_rcgr_reg = VSYNC_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_mdss_vsync_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "vsync_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOW, 19200000),
+ CLK_INIT(vsync_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_pdm2_clk[] = {
+ F( 64000000, gpll0, 12.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk pdm2_clk_src = {
+ .cmd_rcgr_reg = PDM2_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_pdm2_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "pdm2_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOW, 64000000),
+ CLK_INIT(pdm2_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_sdcc1_apps_clk[] = {
+ F( 144000, xo, 16, 3, 25),
+ F( 400000, xo, 12, 1, 4),
+ F( 20000000, gpll0, 10, 1, 4),
+ F( 25000000, gpll0, 16, 1, 2),
+ F( 50000000, gpll0, 16, 0, 0),
+ F( 100000000, gpll0, 8, 0, 0),
+ F( 177770000, gpll0, 4.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk sdcc1_apps_clk_src = {
+ .cmd_rcgr_reg = SDCC1_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "sdcc1_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 50000000, NOMINAL, 200000000),
+ CLK_INIT(sdcc1_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_sdcc2_apps_clk[] = {
+ F( 144000, xo, 16, 3, 25),
+ F( 400000, xo, 12, 1, 4),
+ F( 20000000, gpll0, 10, 1, 4),
+ F( 25000000, gpll0, 16, 1, 2),
+ F( 50000000, gpll0, 16, 0, 0),
+ F( 100000000, gpll0, 8, 0, 0),
+ F( 200000000, gpll0, 4, 0, 0),
+ F_END
+};
+
+static struct rcg_clk sdcc2_apps_clk_src = {
+ .cmd_rcgr_reg = SDCC2_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "sdcc2_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOW, 50000000, NOMINAL, 200000000),
+ CLK_INIT(sdcc2_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
+ F( 80000000, gpll0, 10, 0, 0),
+ F_END
+};
+
+static struct rcg_clk usb_hs_system_clk_src = {
+ .cmd_rcgr_reg = USB_HS_SYSTEM_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_usb_hs_system_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "usb_hs_system_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOW, 57140000, NOMINAL, 80000000),
+ CLK_INIT(usb_hs_system_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
+ F( 100000000, gpll0, 8, 0, 0),
+ F( 160000000, gpll0, 5, 0, 0),
+ F( 228570000, gpll0, 3.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk vcodec0_clk_src = {
+ .cmd_rcgr_reg = VCODEC0_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gcc_venus0_vcodec0_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "vcodec0_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOW, 100000000, NOMINAL, 160000000, HIGH,
+ 228570000),
+ CLK_INIT(vcodec0_clk_src.c),
+ },
+};
+
+static struct local_vote_clk gcc_blsp1_ahb_clk = {
+ .cbcr_reg = BLSP1_AHB_CBCR,
+ .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(10),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_blsp1_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup1_i2c_apps_clk = {
+ .cbcr_reg = BLSP1_QUP1_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent = &blsp1_qup1_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup1_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup1_spi_apps_clk = {
+ .cbcr_reg = BLSP1_QUP1_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent = &blsp1_qup1_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup1_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup2_i2c_apps_clk = {
+ .cbcr_reg = BLSP1_QUP2_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent = &blsp1_qup2_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup2_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup2_spi_apps_clk = {
+ .cbcr_reg = BLSP1_QUP2_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent = &blsp1_qup2_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup2_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup3_i2c_apps_clk = {
+ .cbcr_reg = BLSP1_QUP3_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent = &blsp1_qup3_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup3_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup3_spi_apps_clk = {
+ .cbcr_reg = BLSP1_QUP3_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent = &blsp1_qup3_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup3_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup4_i2c_apps_clk = {
+ .cbcr_reg = BLSP1_QUP4_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent = &blsp1_qup4_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup4_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup4_spi_apps_clk = {
+ .cbcr_reg = BLSP1_QUP4_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent = &blsp1_qup4_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup4_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup5_i2c_apps_clk = {
+ .cbcr_reg = BLSP1_QUP5_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent = &blsp1_qup5_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup5_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup5_spi_apps_clk = {
+ .cbcr_reg = BLSP1_QUP5_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent = &blsp1_qup5_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup5_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup6_i2c_apps_clk = {
+ .cbcr_reg = BLSP1_QUP6_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent = &blsp1_qup6_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup6_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup6_spi_apps_clk = {
+ .cbcr_reg = BLSP1_QUP6_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent = &blsp1_qup6_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup6_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_uart1_apps_clk = {
+ .cbcr_reg = BLSP1_UART1_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_uart1_apps_clk",
+ .parent = &blsp1_uart1_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_uart1_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_uart2_apps_clk = {
+ .cbcr_reg = BLSP1_UART2_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_blsp1_uart2_apps_clk",
+ .parent = &blsp1_uart2_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_uart2_apps_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_boot_rom_ahb_clk = {
+ .cbcr_reg = BOOT_ROM_AHB_CBCR,
+ .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(7),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_boot_rom_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_cci_ahb_clk = {
+ .cbcr_reg = CAMSS_CCI_AHB_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_cci_ahb_clk",
+ .parent = &camss_ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_cci_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_cci_clk = {
+ .cbcr_reg = CAMSS_CCI_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_cci_clk",
+ .parent = &cci_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_cci_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_csi0_ahb_clk = {
+ .cbcr_reg = CAMSS_CSI0_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_csi0_ahb_clk",
+ .parent = &camss_ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_csi0_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_csi0_clk = {
+ .cbcr_reg = CAMSS_CSI0_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_csi0_clk",
+ .parent = &csi0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_csi0_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_csi0phy_clk = {
+ .cbcr_reg = CAMSS_CSI0PHY_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_csi0phy_clk",
+ .parent = &csi0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_csi0phy_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_csi0pix_clk = {
+ .cbcr_reg = CAMSS_CSI0PIX_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_csi0pix_clk",
+ .parent = &csi0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_csi0pix_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_csi0rdi_clk = {
+ .cbcr_reg = CAMSS_CSI0RDI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_csi0rdi_clk",
+ .parent = &csi0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_csi0rdi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_csi1_ahb_clk = {
+ .cbcr_reg = CAMSS_CSI1_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_csi1_ahb_clk",
+ .parent = &camss_ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_csi1_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_csi1_clk = {
+ .cbcr_reg = CAMSS_CSI1_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_csi1_clk",
+ .parent = &csi1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_csi1_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_csi1phy_clk = {
+ .cbcr_reg = CAMSS_CSI1PHY_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_csi1phy_clk",
+ .parent = &csi1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_csi1phy_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_csi1pix_clk = {
+ .cbcr_reg = CAMSS_CSI1PIX_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_csi1pix_clk",
+ .parent = &csi1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_csi1pix_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_csi1rdi_clk = {
+ .cbcr_reg = CAMSS_CSI1RDI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_csi1rdi_clk",
+ .parent = &csi1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_csi1rdi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_csi_vfe0_clk = {
+ .cbcr_reg = CAMSS_CSI_VFE0_CBCR,
+ .bcr_reg = CAMSS_CSI_VFE0_BCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_csi_vfe0_clk",
+ .parent = &vfe0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_csi_vfe0_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_gp0_clk = {
+ .cbcr_reg = CAMSS_GP0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_gp0_clk",
+ .parent = &camss_gp0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_gp0_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_gp1_clk = {
+ .cbcr_reg = CAMSS_GP1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_gp1_clk",
+ .parent = &camss_gp1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_gp1_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_ispif_ahb_clk = {
+ .cbcr_reg = CAMSS_ISPIF_AHB_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_ispif_ahb_clk",
+ .parent = &camss_ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_ispif_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_jpeg0_clk = {
+ .cbcr_reg = CAMSS_JPEG0_CBCR,
+ .bcr_reg = CAMSS_JPEG0_BCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_jpeg0_clk",
+ .parent = &jpeg0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_jpeg0_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_jpeg_ahb_clk = {
+ .cbcr_reg = CAMSS_JPEG_AHB_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_jpeg_ahb_clk",
+ .parent = &camss_ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_jpeg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_jpeg_axi_clk = {
+ .cbcr_reg = CAMSS_JPEG_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_jpeg_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_jpeg_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_mclk0_clk = {
+ .cbcr_reg = CAMSS_MCLK0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_mclk0_clk",
+ .parent = &mclk0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_mclk0_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_mclk1_clk = {
+ .cbcr_reg = CAMSS_MCLK1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_mclk1_clk",
+ .parent = &mclk1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_mclk1_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_micro_ahb_clk = {
+ .cbcr_reg = CAMSS_MICRO_AHB_CBCR,
+ .bcr_reg = CAMSS_MICRO_BCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_micro_ahb_clk",
+ .parent = &camss_ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_micro_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_csi0phytimer_clk = {
+ .cbcr_reg = CAMSS_CSI0PHYTIMER_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_csi0phytimer_clk",
+ .parent = &csi0phytimer_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_csi0phytimer_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_csi1phytimer_clk = {
+ .cbcr_reg = CAMSS_CSI1PHYTIMER_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_csi1phytimer_clk",
+ .parent = &csi1phytimer_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_csi1phytimer_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_ahb_clk = {
+ .cbcr_reg = CAMSS_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_ahb_clk",
+ .parent = &camss_ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_top_ahb_clk = {
+ .cbcr_reg = CAMSS_TOP_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_top_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_top_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_cpp_ahb_clk = {
+ .cbcr_reg = CAMSS_CPP_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_cpp_ahb_clk",
+ .parent = &camss_ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_cpp_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_cpp_clk = {
+ .cbcr_reg = CAMSS_CPP_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_cpp_clk",
+ .parent = &cpp_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_cpp_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_vfe0_clk = {
+ .cbcr_reg = CAMSS_VFE0_CBCR,
+ .bcr_reg = CAMSS_VFE_BCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_vfe0_clk",
+ .parent = &vfe0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_vfe0_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_vfe_ahb_clk = {
+ .cbcr_reg = CAMSS_VFE_AHB_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_vfe_ahb_clk",
+ .parent = &camss_ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_vfe_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_camss_vfe_axi_clk = {
+ .cbcr_reg = CAMSS_VFE_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_camss_vfe_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_camss_vfe_axi_clk.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gcc_crypto_clk[] = {
+ F( 50000000, gpll0, 16, 0, 0),
+ F( 80000000, gpll0, 10, 0, 0),
+ F( 100000000, gpll0, 8, 0, 0),
+ F( 160000000, gpll0, 5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk crypto_clk_src = {
+ .cmd_rcgr_reg = CRYPTO_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gcc_crypto_clk,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "crypto_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOW, 80000000, NOMINAL, 160000000),
+ CLK_INIT(crypto_clk_src.c),
+ },
+};
+
+static struct local_vote_clk gcc_crypto_ahb_clk = {
+ .cbcr_reg = CRYPTO_AHB_CBCR,
+ .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(0),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_crypto_ahb_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_crypto_ahb_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_crypto_axi_clk = {
+ .cbcr_reg = CRYPTO_AXI_CBCR,
+ .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(1),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_crypto_axi_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_crypto_axi_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_crypto_clk = {
+ .cbcr_reg = CRYPTO_CBCR,
+ .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(2),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_crypto_clk",
+ .parent = &crypto_clk_src.c,
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_crypto_clk.c),
+ },
+};
+
+static struct branch_clk gcc_oxili_gmem_clk = {
+ .cbcr_reg = OXILI_GMEM_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_oxili_gmem_clk",
+ .parent = &gfx3d_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_oxili_gmem_clk.c),
+ },
+};
+
+static struct branch_clk gcc_bimc_gfx_clk = {
+ .cbcr_reg = BIMC_GFX_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_bimc_gfx_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_bimc_gfx_clk.c),
+ },
+};
+
+static struct branch_clk gcc_bimc_gpu_clk = {
+ .cbcr_reg = BIMC_GPU_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_bimc_gpu_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_bimc_gpu_clk.c),
+ },
+};
+
+static struct branch_clk gcc_gp1_clk = {
+ .cbcr_reg = GP1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_gp1_clk",
+ .parent = &gp1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_gp1_clk.c),
+ },
+};
+
+static struct branch_clk gcc_gp2_clk = {
+ .cbcr_reg = GP2_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_gp2_clk",
+ .parent = &gp2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_gp2_clk.c),
+ },
+};
+
+static struct branch_clk gcc_gp3_clk = {
+ .cbcr_reg = GP3_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_gp3_clk",
+ .parent = &gp3_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_gp3_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mdss_ahb_clk = {
+ .cbcr_reg = MDSS_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_mdss_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mdss_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mdss_axi_clk = {
+ .cbcr_reg = MDSS_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_mdss_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mdss_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mdss_byte0_clk = {
+ .cbcr_reg = MDSS_BYTE0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_mdss_byte0_clk",
+ .parent = &byte0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mdss_byte0_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mdss_esc0_clk = {
+ .cbcr_reg = MDSS_ESC0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_mdss_esc0_clk",
+ .parent = &esc0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mdss_esc0_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mdss_mdp_clk = {
+ .cbcr_reg = MDSS_MDP_CBCR,
+ .bcr_reg = MDSS_BCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_mdss_mdp_clk",
+ .parent = &mdp_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mdss_mdp_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mdss_pclk0_clk = {
+ .cbcr_reg = MDSS_PCLK0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_mdss_pclk0_clk",
+ .parent = &pclk0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mdss_pclk0_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mdss_vsync_clk = {
+ .cbcr_reg = MDSS_VSYNC_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_mdss_vsync_clk",
+ .parent = &vsync_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mdss_vsync_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mss_cfg_ahb_clk = {
+ .cbcr_reg = MSS_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mss_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mss_q6_bimc_axi_clk = {
+ .cbcr_reg = MSS_Q6_BIMC_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_mss_q6_bimc_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mss_q6_bimc_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_oxili_ahb_clk = {
+ .cbcr_reg = OXILI_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_oxili_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_oxili_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_oxili_gfx3d_clk = {
+ .cbcr_reg = OXILI_GFX3D_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_oxili_gfx3d_clk",
+ .parent = &gfx3d_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_oxili_gfx3d_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pdm2_clk = {
+ .cbcr_reg = PDM2_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_pdm2_clk",
+ .parent = &pdm2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pdm2_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pdm_ahb_clk = {
+ .cbcr_reg = PDM_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_pdm_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pdm_ahb_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_prng_ahb_clk = {
+ .cbcr_reg = PRNG_AHB_CBCR,
+ .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(8),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_prng_ahb_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_prng_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc1_ahb_clk = {
+ .cbcr_reg = SDCC1_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc1_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc1_apps_clk = {
+ .cbcr_reg = SDCC1_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_sdcc1_apps_clk",
+ .parent = &sdcc1_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc1_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc2_ahb_clk = {
+ .cbcr_reg = SDCC2_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc2_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc2_apps_clk = {
+ .cbcr_reg = SDCC2_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_sdcc2_apps_clk",
+ .parent = &sdcc2_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc2_apps_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_apss_tcu_clk = {
+ .cbcr_reg = APSS_TCU_CBCR,
+ .vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(1),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_apss_tcu_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_apss_tcu_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_gfx_tcu_clk = {
+ .cbcr_reg = GFX_TCU_CBCR,
+ .vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(2),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_gfx_tcu_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_gfx_tcu_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_gfx_tbu_clk = {
+ .cbcr_reg = GFX_TBU_CBCR,
+ .vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(3),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_gfx_tbu_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_gfx_tbu_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_mdp_tbu_clk = {
+ .cbcr_reg = MDP_TBU_CBCR,
+ .vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(4),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_mdp_tbu_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_mdp_tbu_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_venus_tbu_clk = {
+ .cbcr_reg = VENUS_TBU_CBCR,
+ .vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(5),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_venus_tbu_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_venus_tbu_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_vfe_tbu_clk = {
+ .cbcr_reg = VFE_TBU_CBCR,
+ .vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(9),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_vfe_tbu_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_vfe_tbu_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_jpeg_tbu_clk = {
+ .cbcr_reg = JPEG_TBU_CBCR,
+ .vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(10),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_jpeg_tbu_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_jpeg_tbu_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_smmu_cfg_clk = {
+ .cbcr_reg = SMMU_CFG_CBCR,
+ .vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(12),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_smmu_cfg_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_smmu_cfg_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_gtcu_ahb_clk = {
+ .cbcr_reg = GTCU_AHB_CBCR,
+ .vote_reg = APCS_SMMU_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(13),
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_gtcu_ahb_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_gtcu_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb2a_phy_sleep_clk = {
+ .cbcr_reg = USB2A_PHY_SLEEP_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_usb2a_phy_sleep_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb2a_phy_sleep_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb_hs_ahb_clk = {
+ .cbcr_reg = USB_HS_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_usb_hs_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb_hs_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb_hs_system_clk = {
+ .cbcr_reg = USB_HS_SYSTEM_CBCR,
+ .bcr_reg = USB_HS_BCR,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_usb_hs_system_clk",
+ .parent = &usb_hs_system_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb_hs_system_clk.c),
+ },
+};
+
+static struct branch_clk gcc_venus0_ahb_clk = {
+ .cbcr_reg = VENUS0_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_venus0_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_venus0_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_venus0_axi_clk = {
+ .cbcr_reg = VENUS0_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_venus0_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_venus0_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_venus0_vcodec0_clk = {
+ .cbcr_reg = VENUS0_VCODEC0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_bases[GCC_BASE],
+ .c = {
+ .dbg_name = "gcc_venus0_vcodec0_clk",
+ .parent = &vcodec0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_venus0_vcodec0_clk.c),
+ },
+};
+
+static struct mux_clk gcc_debug_mux;
+static struct clk_ops clk_ops_debug_mux;
+
+static void __iomem *meas_base;
+
+static struct measure_clk apc0_m_clk = {
+ .c = {
+ .ops = &clk_ops_empty,
+ .dbg_name = "apc0_m_clk",
+ CLK_INIT(apc0_m_clk.c),
+ },
+};
+
+static struct measure_clk apc1_m_clk = {
+ .c = {
+ .ops = &clk_ops_empty,
+ .dbg_name = "apc1_m_clk",
+ CLK_INIT(apc1_m_clk.c),
+ },
+};
+
+static struct measure_clk apc2_m_clk = {
+ .c = {
+ .ops = &clk_ops_empty,
+ .dbg_name = "apc2_m_clk",
+ CLK_INIT(apc2_m_clk.c),
+ },
+};
+
+static struct measure_clk apc3_m_clk = {
+ .c = {
+ .ops = &clk_ops_empty,
+ .dbg_name = "apc3_m_clk",
+ CLK_INIT(apc3_m_clk.c),
+ },
+};
+
+static struct measure_clk l2_m_clk = {
+ .c = {
+ .ops = &clk_ops_empty,
+ .dbg_name = "l2_m_clk",
+ CLK_INIT(l2_m_clk.c),
+ },
+};
+
+static struct mux_clk apss_debug_ter_mux = {
+ .ops = &mux_reg_ops,
+ .mask = 0x3,
+ .shift = 8,
+ MUX_SRC_LIST(
+ {&apc0_m_clk.c, 0},
+ {&apc1_m_clk.c, 1},
+ {&apc2_m_clk.c, 2},
+ {&apc3_m_clk.c, 3},
+ ),
+ .base = &meas_base,
+ .c = {
+ .dbg_name = "apss_debug_ter_mux",
+ .ops = &clk_ops_gen_mux,
+ CLK_INIT(apss_debug_ter_mux.c),
+ },
+};
+
+static struct mux_clk apss_debug_sec_mux = {
+ .ops = &mux_reg_ops,
+ .mask = 0x7,
+ .shift = 12,
+ MUX_SRC_LIST(
+ {&apss_debug_ter_mux.c, 0},
+ {&l2_m_clk.c, 1},
+ ),
+ MUX_REC_SRC_LIST(
+ &apss_debug_ter_mux.c,
+ ),
+ .base = &meas_base,
+ .c = {
+ .dbg_name = "apss_debug_sec_mux",
+ .ops = &clk_ops_gen_mux,
+ CLK_INIT(apss_debug_sec_mux.c),
+ },
+};
+
+static struct mux_clk apss_debug_pri_mux = {
+ .ops = &mux_reg_ops,
+ .mask = 0x3,
+ .shift = 16,
+ MUX_SRC_LIST(
+ {&apss_debug_sec_mux.c, 0},
+ ),
+ MUX_REC_SRC_LIST(
+ &apss_debug_sec_mux.c,
+ ),
+ .base = &meas_base,
+ .c = {
+ .dbg_name = "apss_debug_pri_mux",
+ .ops = &clk_ops_gen_mux,
+ CLK_INIT(apss_debug_pri_mux.c),
+ },
+};
+
+static struct measure_clk_data debug_mux_priv = {
+ .cxo = &xo_clk_src.c,
+ .plltest_reg = GCC_PLLTEST_PAD_CFG,
+ .plltest_val = 0x51A00,
+ .xo_div4_cbcr = GCC_XO_DIV4_CBCR,
+ .ctl_reg = CLOCK_FRQ_MEASURE_CTL,
+ .status_reg = CLOCK_FRQ_MEASURE_STATUS,
+ .base = &virt_bases[GCC_BASE],
+};
+
+static int gcc_set_mux_sel(struct mux_clk *clk, int sel)
+{
+ u32 regval;
+
+ regval = readl_relaxed(GCC_REG_BASE(GCC_DEBUG_CLK_CTL));
+ regval &= 0x1FF;
+ writel_relaxed(regval, GCC_REG_BASE(GCC_DEBUG_CLK_CTL));
+
+ if (sel == 0xFFFF)
+ return 0;
+
+ mux_reg_ops.set_mux_sel(clk, sel);
+
+ return 0;
+};
+
+static struct clk_mux_ops gcc_debug_mux_ops;
+
+static struct mux_clk gcc_debug_mux = {
+ .priv = &debug_mux_priv,
+ .ops = &gcc_debug_mux_ops,
+ .offset = GCC_DEBUG_CLK_CTL,
+ .en_mask = BIT(16),
+ .mask = 0x1FF,
+ .base = &virt_bases[GCC_BASE],
+ MUX_REC_SRC_LIST(
+ &rpm_debug_clk.c,
+ &apss_debug_pri_mux.c,
+ ),
+ MUX_SRC_LIST(
+ {&rpm_debug_clk.c, 0xFFFF},
+ {&apss_debug_pri_mux.c, 0x016A},
+ {&gcc_gp1_clk.c, 0x0010},
+ {&gcc_gp2_clk.c, 0x0011},
+ {&gcc_gp3_clk.c, 0x0012},
+ {&gcc_bimc_gfx_clk.c, 0x002d},
+ {&gcc_mss_cfg_ahb_clk.c, 0x0030},
+ {&gcc_mss_q6_bimc_axi_clk.c, 0x0031},
+ {&gcc_apss_tcu_clk.c, 0x0050},
+ {&gcc_mdp_tbu_clk.c, 0x0051},
+ {&gcc_gfx_tbu_clk.c, 0x0052},
+ {&gcc_gfx_tcu_clk.c, 0x0053},
+ {&gcc_venus_tbu_clk.c, 0x0054},
+ {&gcc_gtcu_ahb_clk.c, 0x0058},
+ {&gcc_vfe_tbu_clk.c, 0x005a},
+ {&gcc_smmu_cfg_clk.c, 0x005b},
+ {&gcc_jpeg_tbu_clk.c, 0x005c},
+ {&gcc_usb_hs_system_clk.c, 0x0060},
+ {&gcc_usb_hs_ahb_clk.c, 0x0061},
+ {&gcc_usb2a_phy_sleep_clk.c, 0x0063},
+ {&gcc_sdcc1_apps_clk.c, 0x0068},
+ {&gcc_sdcc1_ahb_clk.c, 0x0069},
+ {&gcc_sdcc2_apps_clk.c, 0x0070},
+ {&gcc_sdcc2_ahb_clk.c, 0x0071},
+ {&gcc_blsp1_ahb_clk.c, 0x0088},
+ {&gcc_blsp1_qup1_spi_apps_clk.c, 0x008a},
+ {&gcc_blsp1_qup1_i2c_apps_clk.c, 0x008b},
+ {&gcc_blsp1_uart1_apps_clk.c, 0x008c},
+ {&gcc_blsp1_qup2_spi_apps_clk.c, 0x008e},
+ {&gcc_blsp1_qup2_i2c_apps_clk.c, 0x0090},
+ {&gcc_blsp1_uart2_apps_clk.c, 0x0091},
+ {&gcc_blsp1_qup3_spi_apps_clk.c, 0x0093},
+ {&gcc_blsp1_qup3_i2c_apps_clk.c, 0x0094},
+ {&gcc_blsp1_qup4_spi_apps_clk.c, 0x0098},
+ {&gcc_blsp1_qup4_i2c_apps_clk.c, 0x0099},
+ {&gcc_blsp1_qup5_spi_apps_clk.c, 0x009c},
+ {&gcc_blsp1_qup5_i2c_apps_clk.c, 0x009d},
+ {&gcc_blsp1_qup6_spi_apps_clk.c, 0x00a1},
+ {&gcc_blsp1_qup6_i2c_apps_clk.c, 0x00a2},
+ {&gcc_camss_ahb_clk.c, 0x00a8},
+ {&gcc_camss_top_ahb_clk.c, 0x00a9},
+ {&gcc_camss_micro_ahb_clk.c, 0x00aa},
+ {&gcc_camss_gp0_clk.c, 0x00ab},
+ {&gcc_camss_gp1_clk.c, 0x00ac},
+ {&gcc_camss_mclk0_clk.c, 0x00ad},
+ {&gcc_camss_mclk1_clk.c, 0x00ae},
+ {&gcc_camss_cci_clk.c, 0x00af},
+ {&gcc_camss_cci_ahb_clk.c, 0x00b0},
+ {&gcc_camss_csi0phytimer_clk.c, 0x00b1},
+ {&gcc_camss_csi1phytimer_clk.c, 0x00b2},
+ {&gcc_camss_jpeg0_clk.c, 0x00b3},
+ {&gcc_camss_jpeg_ahb_clk.c, 0x00b4},
+ {&gcc_camss_jpeg_axi_clk.c, 0x00b5},
+ {&gcc_camss_vfe0_clk.c, 0x00b8},
+ {&gcc_camss_cpp_clk.c, 0x00b9},
+ {&gcc_camss_cpp_ahb_clk.c, 0x00ba},
+ {&gcc_camss_vfe_ahb_clk.c, 0x00bb},
+ {&gcc_camss_vfe_axi_clk.c, 0x00bc},
+ {&gcc_camss_csi_vfe0_clk.c, 0x00bf},
+ {&gcc_camss_csi0_clk.c, 0x00c0},
+ {&gcc_camss_csi0_ahb_clk.c, 0x00c1},
+ {&gcc_camss_csi0phy_clk.c, 0x00c2},
+ {&gcc_camss_csi0rdi_clk.c, 0x00c3},
+ {&gcc_camss_csi0pix_clk.c, 0x00c4},
+ {&gcc_camss_csi1_clk.c, 0x00c5},
+ {&gcc_camss_csi1_ahb_clk.c, 0x00c6},
+ {&gcc_camss_csi1phy_clk.c, 0x00c7},
+ {&gcc_pdm_ahb_clk.c, 0x00d0},
+ {&gcc_pdm2_clk.c, 0x00d2},
+ {&gcc_prng_ahb_clk.c, 0x00d8},
+ {&gcc_camss_csi1rdi_clk.c, 0x00e0},
+ {&gcc_camss_csi1pix_clk.c, 0x00e1},
+ {&gcc_camss_ispif_ahb_clk.c, 0x00e2},
+ {&gcc_boot_rom_ahb_clk.c, 0x00f8},
+ {&gcc_crypto_clk.c, 0x0138},
+ {&gcc_crypto_axi_clk.c, 0x0139},
+ {&gcc_crypto_ahb_clk.c, 0x013a},
+ {&gcc_oxili_gfx3d_clk.c, 0x01ea},
+ {&gcc_oxili_ahb_clk.c, 0x01eb},
+ {&gcc_oxili_gmem_clk.c, 0x01f0},
+ {&gcc_venus0_vcodec0_clk.c, 0x01f1},
+ {&gcc_venus0_axi_clk.c, 0x01f2},
+ {&gcc_venus0_ahb_clk.c, 0x01f3},
+ {&gcc_mdss_ahb_clk.c, 0x01f6},
+ {&gcc_mdss_axi_clk.c, 0x01f7},
+ {&gcc_mdss_pclk0_clk.c, 0x01f8},
+ {&gcc_mdss_mdp_clk.c, 0x01f9},
+ {&gcc_mdss_vsync_clk.c, 0x01fb},
+ {&gcc_mdss_byte0_clk.c, 0x01fc},
+ {&gcc_mdss_esc0_clk.c, 0x01fd},
+ {&gcc_bimc_gpu_clk.c, 0x0157},
+ {&wcnss_m_clk.c, 0x0198},
+ ),
+ .c = {
+ .dbg_name = "gcc_debug_mux",
+ .ops = &clk_ops_debug_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE | CLKFLAG_MEASURE,
+ CLK_INIT(gcc_debug_mux.c),
+ },
+};
+
+/* Clock lookup */
+static struct clk_lookup msm_clocks_lookup[] = {
+ /* PLLs */
+ CLK_LIST(gpll0_clk_src),
+ CLK_LIST(gpll0_ao_clk_src),
+ CLK_LIST(a53sspll),
+ CLK_LIST(gpll1_clk_src),
+ CLK_LIST(gpll2_clk_src),
+
+ /* RCGs */
+ CLK_LIST(apss_ahb_clk_src),
+ CLK_LIST(camss_ahb_clk_src),
+ CLK_LIST(crypto_clk_src),
+ CLK_LIST(csi0_clk_src),
+ CLK_LIST(csi1_clk_src),
+ CLK_LIST(vfe0_clk_src),
+ CLK_LIST(mdp_clk_src),
+ CLK_LIST(gfx3d_clk_src),
+ CLK_LIST(blsp1_qup1_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup1_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup2_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup2_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup3_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup3_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup4_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup4_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup5_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup5_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup6_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup6_spi_apps_clk_src),
+ CLK_LIST(blsp1_uart1_apps_clk_src),
+ CLK_LIST(blsp1_uart2_apps_clk_src),
+ CLK_LIST(cci_clk_src),
+ CLK_LIST(camss_gp0_clk_src),
+ CLK_LIST(camss_gp1_clk_src),
+ CLK_LIST(jpeg0_clk_src),
+ CLK_LIST(mclk0_clk_src),
+ CLK_LIST(mclk1_clk_src),
+ CLK_LIST(csi0phytimer_clk_src),
+ CLK_LIST(csi1phytimer_clk_src),
+ CLK_LIST(cpp_clk_src),
+ CLK_LIST(gp1_clk_src),
+ CLK_LIST(gp2_clk_src),
+ CLK_LIST(gp3_clk_src),
+ CLK_LIST(esc0_clk_src),
+ CLK_LIST(vsync_clk_src),
+ CLK_LIST(pdm2_clk_src),
+ CLK_LIST(sdcc1_apps_clk_src),
+ CLK_LIST(sdcc2_apps_clk_src),
+ CLK_LIST(usb_hs_system_clk_src),
+ CLK_LIST(vcodec0_clk_src),
+
+ /* Voteable Clocks */
+ CLK_LIST(gcc_blsp1_ahb_clk),
+ CLK_LIST(gcc_boot_rom_ahb_clk),
+ CLK_LIST(gcc_prng_ahb_clk),
+ CLK_LIST(gcc_apss_tcu_clk),
+ CLK_LIST(gcc_gfx_tbu_clk),
+ CLK_LIST(gcc_gfx_tcu_clk),
+ CLK_LIST(gcc_gtcu_ahb_clk),
+ CLK_LIST(gcc_jpeg_tbu_clk),
+ CLK_LIST(gcc_mdp_tbu_clk),
+ CLK_LIST(gcc_smmu_cfg_clk),
+ CLK_LIST(gcc_venus_tbu_clk),
+ CLK_LIST(gcc_vfe_tbu_clk),
+
+ /* Branches */
+ CLK_LIST(gcc_blsp1_qup1_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup1_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup2_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup2_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup3_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup3_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup4_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup4_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup5_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup5_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup6_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup6_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_uart1_apps_clk),
+ CLK_LIST(gcc_blsp1_uart2_apps_clk),
+ CLK_LIST(gcc_camss_cci_ahb_clk),
+ CLK_LIST(gcc_camss_cci_clk),
+ CLK_LIST(gcc_camss_csi0_ahb_clk),
+ CLK_LIST(gcc_camss_csi0_clk),
+ CLK_LIST(gcc_camss_csi0phy_clk),
+ CLK_LIST(gcc_camss_csi0pix_clk),
+ CLK_LIST(gcc_camss_csi0rdi_clk),
+ CLK_LIST(gcc_camss_csi1_ahb_clk),
+ CLK_LIST(gcc_camss_csi1_clk),
+ CLK_LIST(gcc_camss_csi1phy_clk),
+ CLK_LIST(gcc_camss_csi1pix_clk),
+ CLK_LIST(gcc_camss_csi1rdi_clk),
+ CLK_LIST(gcc_camss_csi_vfe0_clk),
+ CLK_LIST(gcc_camss_gp0_clk),
+ CLK_LIST(gcc_camss_gp1_clk),
+ CLK_LIST(gcc_camss_ispif_ahb_clk),
+ CLK_LIST(gcc_camss_jpeg0_clk),
+ CLK_LIST(gcc_camss_jpeg_ahb_clk),
+ CLK_LIST(gcc_camss_jpeg_axi_clk),
+ CLK_LIST(gcc_camss_mclk0_clk),
+ CLK_LIST(gcc_camss_mclk1_clk),
+ CLK_LIST(gcc_camss_micro_ahb_clk),
+ CLK_LIST(gcc_camss_csi0phytimer_clk),
+ CLK_LIST(gcc_camss_csi1phytimer_clk),
+ CLK_LIST(gcc_camss_ahb_clk),
+ CLK_LIST(gcc_camss_top_ahb_clk),
+ CLK_LIST(gcc_camss_cpp_ahb_clk),
+ CLK_LIST(gcc_camss_cpp_clk),
+ CLK_LIST(gcc_camss_vfe0_clk),
+ CLK_LIST(gcc_camss_vfe_ahb_clk),
+ CLK_LIST(gcc_camss_vfe_axi_clk),
+ CLK_LIST(gcc_oxili_gmem_clk),
+ CLK_LIST(gcc_gp1_clk),
+ CLK_LIST(gcc_gp2_clk),
+ CLK_LIST(gcc_gp3_clk),
+ CLK_LIST(gcc_mdss_ahb_clk),
+ CLK_LIST(gcc_mdss_axi_clk),
+ CLK_LIST(gcc_mdss_esc0_clk),
+ CLK_LIST(gcc_mdss_mdp_clk),
+ CLK_LIST(gcc_mdss_vsync_clk),
+ CLK_LIST(gcc_mss_cfg_ahb_clk),
+ CLK_LIST(gcc_mss_q6_bimc_axi_clk),
+ CLK_LIST(gcc_oxili_ahb_clk),
+ CLK_LIST(gcc_oxili_gfx3d_clk),
+ CLK_LIST(gcc_pdm2_clk),
+ CLK_LIST(gcc_pdm_ahb_clk),
+ CLK_LIST(gcc_sdcc1_ahb_clk),
+ CLK_LIST(gcc_sdcc1_apps_clk),
+ CLK_LIST(gcc_sdcc2_ahb_clk),
+ CLK_LIST(gcc_sdcc2_apps_clk),
+ CLK_LIST(gcc_usb2a_phy_sleep_clk),
+ CLK_LIST(gcc_usb_hs_ahb_clk),
+ CLK_LIST(gcc_usb_hs_system_clk),
+ CLK_LIST(gcc_venus0_ahb_clk),
+ CLK_LIST(gcc_venus0_axi_clk),
+ CLK_LIST(gcc_venus0_vcodec0_clk),
+ CLK_LIST(gcc_bimc_gfx_clk),
+ CLK_LIST(gcc_bimc_gpu_clk),
+ CLK_LIST(wcnss_m_clk),
+};
+
+static struct clk_lookup msm_clocks_gcc_8916_crypto[] = {
+ /* Crypto clocks */
+ CLK_LOOKUP_OF("core_clk", gcc_crypto_clk, "scm"),
+ CLK_LOOKUP_OF("iface_clk", gcc_crypto_ahb_clk, "scm"),
+ CLK_LOOKUP_OF("bus_clk", gcc_crypto_axi_clk, "scm"),
+ CLK_LOOKUP_OF("core_clk_src", crypto_clk_src, "scm"),
+};
+
+static int __init msm_gcc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct clk *xo_gcc;
+ int ret;
+ u32 regval;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+ if (!res) {
+ dev_err(&pdev->dev, "Register base not defined\n");
+ return -ENOMEM;
+ }
+
+ virt_bases[GCC_BASE] = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!virt_bases[GCC_BASE]) {
+ dev_err(&pdev->dev, "Failed to ioremap CC registers\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs_base");
+ if (!res) {
+ dev_err(&pdev->dev, "APCS PLL Register base not defined\n");
+ return -ENOMEM;
+ }
+
+ virt_bases[APCS_PLL_BASE] = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!virt_bases[APCS_PLL_BASE]) {
+ dev_err(&pdev->dev, "Failed to ioremap APCS PLL registers\n");
+ return -ENOMEM;
+ }
+
+ vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
+ if (IS_ERR(vdd_dig.regulator[0])) {
+ if (!(PTR_ERR(vdd_dig.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_dig regulator!!!\n");
+ return PTR_ERR(vdd_dig.regulator[0]);
+ }
+
+ vdd_sr2_pll.regulator[0] = devm_regulator_get(&pdev->dev,
+ "vdd_sr2_pll");
+ if (IS_ERR(vdd_sr2_pll.regulator[0])) {
+ if (!(PTR_ERR(vdd_sr2_pll.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_sr2_pll regulator!!!\n");
+ return PTR_ERR(vdd_sr2_pll.regulator[0]);
+ }
+
+ vdd_sr2_pll.regulator[1] = devm_regulator_get(&pdev->dev,
+ "vdd_sr2_dig");
+ if (IS_ERR(vdd_sr2_pll.regulator[1])) {
+ if (!(PTR_ERR(vdd_sr2_pll.regulator[1]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_sr2_dig regulator!!!\n");
+ return PTR_ERR(vdd_sr2_pll.regulator[1]);
+ }
+
+ xo_gcc = xo_clk_src.c.parent = devm_clk_get(&pdev->dev, "xo");
+ if (IS_ERR(xo_gcc)) {
+ if (!(PTR_ERR(xo_gcc) == -EPROBE_DEFER))
+ dev_err(&pdev->dev, "Unable to get XO clock!!!\n");
+ return PTR_ERR(xo_gcc);
+ }
+
+ /* Vote for GPLL0 to turn on. Needed by acpuclock. */
+ regval = readl_relaxed(GCC_REG_BASE(APCS_GPLL_ENA_VOTE));
+ regval |= BIT(0);
+ writel_relaxed(regval, GCC_REG_BASE(APCS_GPLL_ENA_VOTE));
+
+ xo_a_clk_src.c.parent = clk_get(&pdev->dev, "xo_a");
+ if (IS_ERR(xo_a_clk_src.c.parent)) {
+ if (!(PTR_ERR(xo_a_clk_src.c.parent) == -EPROBE_DEFER))
+ dev_err(&pdev->dev, "Unable to get xo_a clock!!!\n");
+ return PTR_ERR(xo_a_clk_src.c.parent);
+ }
+
+ ret = of_msm_clock_register(pdev->dev.of_node,
+ msm_clocks_lookup,
+ ARRAY_SIZE(msm_clocks_lookup));
+ if (ret)
+ return ret;
+
+ ret = of_msm_clock_register(pdev->dev.of_node,
+ msm_clocks_gcc_8916_crypto,
+ ARRAY_SIZE(msm_clocks_gcc_8916_crypto));
+ if (ret)
+ return ret;
+
+ clk_set_rate(&apss_ahb_clk_src.c, 19200000);
+ clk_prepare_enable(&apss_ahb_clk_src.c);
+
+ dev_info(&pdev->dev, "Registered GCC clocks\n");
+
+ return 0;
+}
+
+static struct of_device_id msm_clock_gcc_match_table[] = {
+ { .compatible = "qcom,gcc-8916" },
+ {},
+};
+
+static struct platform_driver msm_clock_gcc_ops = {
+ .probe = msm_gcc_probe,
+ .driver = {
+ .name = "qcom,gcc-8916",
+ .of_match_table = msm_clock_gcc_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_gcc_init(void)
+{
+ return platform_driver_register(&msm_clock_gcc_ops);
+}
+arch_initcall(msm_gcc_init);
+
+static struct clk_lookup msm_clocks_measure[] = {
+ CLK_LOOKUP_OF("measure", gcc_debug_mux, "debug"),
+ CLK_LIST(apss_debug_pri_mux),
+ CLK_LIST(apc0_m_clk),
+ CLK_LIST(apc1_m_clk),
+ CLK_LIST(apc2_m_clk),
+ CLK_LIST(apc3_m_clk),
+ CLK_LIST(l2_m_clk),
+};
+
+static int __init msm_clock_debug_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "meas");
+ if (!res) {
+ dev_err(&pdev->dev, "GLB clock diag base not defined.\n");
+ return -EINVAL;
+ }
+
+ meas_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!meas_base) {
+ dev_err(&pdev->dev, "Unable to map GLB clock diag base.\n");
+ return -ENOMEM;
+ }
+
+ clk_ops_debug_mux = clk_ops_gen_mux;
+ clk_ops_debug_mux.get_rate = measure_get_rate;
+
+ gcc_debug_mux_ops = mux_reg_ops;
+ gcc_debug_mux_ops.set_mux_sel = gcc_set_mux_sel;
+
+ rpm_debug_clk.c.parent = clk_get(&pdev->dev, "rpm_debug_mux");
+ if (IS_ERR(rpm_debug_clk.c.parent)) {
+ dev_err(&pdev->dev, "Failed to get RPM debug Mux\n");
+ return PTR_ERR(rpm_debug_clk.c.parent);
+ }
+
+ ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_measure,
+ ARRAY_SIZE(msm_clocks_measure));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register debug Mux\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "Registered Debug Mux successfully\n");
+ return ret;
+}
+
+static struct of_device_id msm_clock_debug_match_table[] = {
+ { .compatible = "qcom,cc-debug-8916" },
+ {}
+};
+
+static struct platform_driver msm_clock_debug_ops = {
+ .probe = msm_clock_debug_probe,
+ .driver = {
+ .name = "qcom,cc-debug-8916",
+ .of_match_table = msm_clock_debug_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_clock_debug_init(void)
+{
+ return platform_driver_register(&msm_clock_debug_ops);
+}
+late_initcall(msm_clock_debug_init);
+
+/* MDSS DSI_PHY_PLL */
+static struct clk_lookup msm_clocks_gcc_mdss[] = {
+ CLK_LIST(byte0_clk_src),
+ CLK_LIST(pclk0_clk_src),
+ CLK_LIST(gcc_mdss_pclk0_clk),
+ CLK_LIST(gcc_mdss_byte0_clk),
+};
+
+static int __init msm_gcc_mdss_probe(struct platform_device *pdev)
+{
+ int counter = 0, ret = 0;
+
+ pclk0_clk_src.c.parent = devm_clk_get(&pdev->dev, "pixel_src");
+ if (IS_ERR(pclk0_clk_src.c.parent)) {
+ dev_err(&pdev->dev, "Failed to get pixel source.\n");
+ return PTR_ERR(pclk0_clk_src.c.parent);
+ }
+
+ for (counter = 0; counter < (sizeof(ftbl_gcc_mdss_pclk0_clk)/
+ sizeof(struct clk_freq_tbl)); counter++)
+ ftbl_gcc_mdss_pclk0_clk[counter].src_clk =
+ pclk0_clk_src.c.parent;
+
+ byte0_clk_src.c.parent = devm_clk_get(&pdev->dev, "byte_src");
+ if (IS_ERR(byte0_clk_src.c.parent)) {
+ dev_err(&pdev->dev, "Failed to get byte0 source.\n");
+ devm_clk_put(&pdev->dev, pclk0_clk_src.c.parent);
+ return PTR_ERR(byte0_clk_src.c.parent);
+ }
+
+ for (counter = 0; counter < (sizeof(ftbl_gcc_mdss_byte0_clk)/
+ sizeof(struct clk_freq_tbl)); counter++)
+ ftbl_gcc_mdss_byte0_clk[counter].src_clk =
+ byte0_clk_src.c.parent;
+
+ ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_gcc_mdss,
+ ARRAY_SIZE(msm_clocks_gcc_mdss));
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "Registered GCC MDSS clocks.\n");
+
+ return ret;
+}
+
+static struct of_device_id msm_clock_mdss_match_table[] = {
+ { .compatible = "qcom,gcc-mdss-8916" },
+ {}
+};
+
+static struct platform_driver msm_clock_gcc_mdss_ops = {
+ .probe = msm_gcc_mdss_probe,
+ .driver = {
+ .name = "gcc-mdss-8916",
+ .of_match_table = msm_clock_mdss_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_gcc_mdss_init(void)
+{
+ return platform_driver_register(&msm_clock_gcc_mdss_ops);
+}
+fs_initcall_sync(msm_gcc_mdss_init);
diff --git a/drivers/clk/qcom/clock-generic.c b/drivers/clk/qcom/clock-generic.c
new file mode 100644
index 000000000000..8292dc9db920
--- /dev/null
+++ b/drivers/clk/qcom/clock-generic.c
@@ -0,0 +1,793 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clock-generic.h>
+
+/* ==================== Mux clock ==================== */
+
+int parent_to_src_sel(struct clk_src *parents, int num_parents, struct clk *p)
+{
+ int i;
+
+ for (i = 0; i < num_parents; i++) {
+ if (parents[i].src == p)
+ return parents[i].sel;
+ }
+
+ return -EINVAL;
+}
+
+static int mux_parent_to_src_sel(struct mux_clk *mux, struct clk *p)
+{
+ return parent_to_src_sel(mux->parents, mux->num_parents, p);
+}
+
+static int mux_set_parent(struct clk *c, struct clk *p)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+ int sel = mux_parent_to_src_sel(mux, p);
+ struct clk *old_parent;
+ int rc = 0, i;
+ unsigned long flags;
+
+ if (sel < 0 && mux->rec_parents) {
+ for (i = 0; i < mux->num_rec_parents; i++) {
+ rc = clk_set_parent(mux->rec_parents[i], p);
+ if (!rc) {
+ /*
+ * This is necessary to ensure prepare/enable
+ * counts get propagated correctly.
+ */
+ p = mux->rec_parents[i];
+ sel = mux_parent_to_src_sel(mux, p);
+ break;
+ }
+ }
+ }
+
+ if (sel < 0)
+ return sel;
+
+ rc = __clk_pre_reparent(c, p, &flags);
+ if (rc)
+ goto out;
+
+ rc = mux->ops->set_mux_sel(mux, sel);
+ if (rc)
+ goto set_fail;
+
+ old_parent = c->parent;
+ c->parent = p;
+ c->rate = clk_get_rate(p);
+ __clk_post_reparent(c, old_parent, &flags);
+
+ return 0;
+
+set_fail:
+ __clk_post_reparent(c, p, &flags);
+out:
+ return rc;
+}
+
+static long mux_round_rate(struct clk *c, unsigned long rate)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+ int i;
+ unsigned long prate, rrate = 0;
+
+ for (i = 0; i < mux->num_parents; i++) {
+ prate = clk_round_rate(mux->parents[i].src, rate);
+ if (is_better_rate(rate, rrate, prate))
+ rrate = prate;
+ }
+ if (!rrate)
+ return -EINVAL;
+
+ return rrate;
+}
+
+static int mux_set_rate(struct clk *c, unsigned long rate)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+ struct clk *new_parent = NULL;
+ int rc = 0, i;
+ unsigned long new_par_curr_rate;
+ unsigned long flags;
+
+ for (i = 0; i < mux->num_parents; i++) {
+ if (clk_round_rate(mux->parents[i].src, rate) == rate) {
+ new_parent = mux->parents[i].src;
+ break;
+ }
+ }
+ if (new_parent == NULL)
+ return -EINVAL;
+
+ /*
+ * Switch to safe parent since the old and new parent might be the
+ * same and the parent might temporarily turn off while switching
+ * rates.
+ */
+ if (mux->safe_sel >= 0) {
+ /*
+ * Some mux implementations might switch to/from a low power
+ * parent as part of their disable/enable ops. Grab the
+ * enable lock to avoid racing with these implementations.
+ */
+ spin_lock_irqsave(&c->lock, flags);
+ rc = mux->ops->set_mux_sel(mux, mux->safe_sel);
+ spin_unlock_irqrestore(&c->lock, flags);
+ }
+ if (rc)
+ return rc;
+
+ new_par_curr_rate = clk_get_rate(new_parent);
+ rc = clk_set_rate(new_parent, rate);
+ if (rc)
+ goto set_rate_fail;
+
+ rc = mux_set_parent(c, new_parent);
+ if (rc)
+ goto set_par_fail;
+
+ return 0;
+
+set_par_fail:
+ clk_set_rate(new_parent, new_par_curr_rate);
+set_rate_fail:
+ WARN(mux->ops->set_mux_sel(mux,
+ mux_parent_to_src_sel(mux, c->parent)),
+ "Set rate failed for %s. Also in bad state!\n", c->dbg_name);
+ return rc;
+}
+
+static int mux_enable(struct clk *c)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+ if (mux->ops->enable)
+ return mux->ops->enable(mux);
+ return 0;
+}
+
+static void mux_disable(struct clk *c)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+ if (mux->ops->disable)
+ return mux->ops->disable(mux);
+}
+
+static struct clk *mux_get_parent(struct clk *c)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+ int sel = mux->ops->get_mux_sel(mux);
+ int i;
+
+ for (i = 0; i < mux->num_parents; i++) {
+ if (mux->parents[i].sel == sel)
+ return mux->parents[i].src;
+ }
+
+ /* Unfamiliar parent. */
+ return NULL;
+}
+
+static enum handoff mux_handoff(struct clk *c)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+
+ c->rate = clk_get_rate(c->parent);
+ mux->safe_sel = mux_parent_to_src_sel(mux, mux->safe_parent);
+
+ if (mux->en_mask && mux->ops && mux->ops->is_enabled)
+ return mux->ops->is_enabled(mux)
+ ? HANDOFF_ENABLED_CLK
+ : HANDOFF_DISABLED_CLK;
+
+ /*
+ * If this function returns 'enabled' even when the clock downstream
+ * of this clock is disabled, then handoff code will unnecessarily
+ * enable the current parent of this clock. If this function always
+ * returns 'disabled' and a clock downstream is on, the clock handoff
+ * code will bump up the ref count for this clock and its current
+ * parent as necessary. So, clocks without an actual HW gate can
+ * always return disabled.
+ */
+ return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *mux_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+
+ if (mux->ops && mux->ops->list_registers)
+ return mux->ops->list_registers(mux, n, regs, size);
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct clk_ops clk_ops_gen_mux = {
+ .enable = mux_enable,
+ .disable = mux_disable,
+ .set_parent = mux_set_parent,
+ .round_rate = mux_round_rate,
+ .set_rate = mux_set_rate,
+ .handoff = mux_handoff,
+ .get_parent = mux_get_parent,
+ .list_registers = mux_clk_list_registers,
+};
+
+/* ==================== Divider clock ==================== */
+
+static long __div_round_rate(struct div_data *data, unsigned long rate,
+ struct clk *parent, unsigned int *best_div, unsigned long *best_prate)
+{
+ unsigned int div, min_div, max_div, _best_div = 1;
+ unsigned long prate, _best_prate = 0, rrate = 0, req_prate, actual_rate;
+ unsigned int numer;
+
+ rate = max(rate, 1UL);
+
+ min_div = max(data->min_div, 1U);
+ max_div = min(data->max_div, (unsigned int) (ULONG_MAX / rate));
+
+ /*
+ * div values are doubled for half dividers.
+ * Adjust for that by picking a numer of 2.
+ */
+ numer = data->is_half_divider ? 2 : 1;
+
+ for (div = min_div; div <= max_div; div++) {
+ req_prate = mult_frac(rate, div, numer);
+ prate = clk_round_rate(parent, req_prate);
+ if (IS_ERR_VALUE(prate))
+ break;
+
+ actual_rate = mult_frac(prate, numer, div);
+ if (is_better_rate(rate, rrate, actual_rate)) {
+ rrate = actual_rate;
+ _best_div = div;
+ _best_prate = prate;
+ }
+
+ /*
+ * Trying higher dividers is only going to ask the parent for
+ * a higher rate. If it can't even output a rate higher than
+ * the one we request for this divider, the parent is not
+ * going to be able to output an even higher rate required
+ * for a higher divider. So, stop trying higher dividers.
+ */
+ if (actual_rate < rate)
+ break;
+
+ if (rrate <= rate + data->rate_margin)
+ break;
+ }
+
+ if (!rrate)
+ return -EINVAL;
+ if (best_div)
+ *best_div = _best_div;
+ if (best_prate)
+ *best_prate = _best_prate;
+
+ return rrate;
+}
+
+static long div_round_rate(struct clk *c, unsigned long rate)
+{
+ struct div_clk *d = to_div_clk(c);
+
+ return __div_round_rate(&d->data, rate, c->parent, NULL, NULL);
+}
+
+static int div_set_rate(struct clk *c, unsigned long rate)
+{
+ struct div_clk *d = to_div_clk(c);
+ int div, rc = 0;
+ long rrate, old_prate, new_prate;
+ struct div_data *data = &d->data;
+
+ rrate = __div_round_rate(data, rate, c->parent, &div, &new_prate);
+ if (rrate != rate)
+ return -EINVAL;
+
+ /*
+ * For fixed divider clock we don't want to return an error if the
+ * requested rate matches the achievable rate. So, don't check for
+ * !d->ops and return an error. __div_round_rate() ensures div ==
+ * d->div if !d->ops.
+ */
+ if (div > data->div)
+ rc = d->ops->set_div(d, div);
+ if (rc)
+ return rc;
+
+ old_prate = clk_get_rate(c->parent);
+ rc = clk_set_rate(c->parent, new_prate);
+ if (rc)
+ goto set_rate_fail;
+
+ if (div < data->div)
+ rc = d->ops->set_div(d, div);
+ if (rc)
+ goto div_dec_fail;
+
+ data->div = div;
+
+ return 0;
+
+div_dec_fail:
+ WARN(clk_set_rate(c->parent, old_prate),
+ "Set rate failed for %s. Also in bad state!\n", c->dbg_name);
+set_rate_fail:
+ if (div > data->div)
+ WARN(d->ops->set_div(d, data->div),
+ "Set rate failed for %s. Also in bad state!\n",
+ c->dbg_name);
+ return rc;
+}
+
+static int div_enable(struct clk *c)
+{
+ struct div_clk *d = to_div_clk(c);
+ if (d->ops && d->ops->enable)
+ return d->ops->enable(d);
+ return 0;
+}
+
+static void div_disable(struct clk *c)
+{
+ struct div_clk *d = to_div_clk(c);
+ if (d->ops && d->ops->disable)
+ return d->ops->disable(d);
+}
+
+static enum handoff div_handoff(struct clk *c)
+{
+ struct div_clk *d = to_div_clk(c);
+ unsigned int div = d->data.div;
+
+ if (d->ops && d->ops->get_div)
+ div = max(d->ops->get_div(d), 1);
+ div = max(div, 1U);
+ c->rate = clk_get_rate(c->parent) / div;
+
+ if (!d->ops || !d->ops->set_div)
+ d->data.min_div = d->data.max_div = div;
+ d->data.div = div;
+
+ if (d->en_mask && d->ops && d->ops->is_enabled)
+ return d->ops->is_enabled(d)
+ ? HANDOFF_ENABLED_CLK
+ : HANDOFF_DISABLED_CLK;
+
+ /*
+ * If this function returns 'enabled' even when the clock downstream
+ * of this clock is disabled, then handoff code will unnecessarily
+ * enable the current parent of this clock. If this function always
+ * returns 'disabled' and a clock downstream is on, the clock handoff
+ * code will bump up the ref count for this clock and its current
+ * parent as necessary. So, clocks without an actual HW gate can
+ * always return disabled.
+ */
+ return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *div_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct div_clk *d = to_div_clk(c);
+
+ if (d->ops && d->ops->list_registers)
+ return d->ops->list_registers(d, n, regs, size);
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct clk_ops clk_ops_div = {
+ .enable = div_enable,
+ .disable = div_disable,
+ .round_rate = div_round_rate,
+ .set_rate = div_set_rate,
+ .handoff = div_handoff,
+ .list_registers = div_clk_list_registers,
+};
+
+static long __slave_div_round_rate(struct clk *c, unsigned long rate,
+ int *best_div)
+{
+ struct div_clk *d = to_div_clk(c);
+ unsigned int div, min_div, max_div;
+ long p_rate;
+
+ rate = max(rate, 1UL);
+
+ min_div = d->data.min_div;
+ max_div = d->data.max_div;
+
+ p_rate = clk_get_rate(c->parent);
+ div = DIV_ROUND_CLOSEST(p_rate, rate);
+ div = max(div, min_div);
+ div = min(div, max_div);
+ if (best_div)
+ *best_div = div;
+
+ return p_rate / div;
+}
+
+static long slave_div_round_rate(struct clk *c, unsigned long rate)
+{
+ return __slave_div_round_rate(c, rate, NULL);
+}
+
+static int slave_div_set_rate(struct clk *c, unsigned long rate)
+{
+ struct div_clk *d = to_div_clk(c);
+ int div, rc = 0;
+ long rrate;
+
+ rrate = __slave_div_round_rate(c, rate, &div);
+ if (rrate != rate)
+ return -EINVAL;
+
+ if (div == d->data.div)
+ return 0;
+
+ /*
+ * For fixed divider clock we don't want to return an error if the
+ * requested rate matches the achievable rate. So, don't check for
+ * !d->ops and return an error. __slave_div_round_rate() ensures
+ * div == d->data.div if !d->ops.
+ */
+ rc = d->ops->set_div(d, div);
+ if (rc)
+ return rc;
+
+ d->data.div = div;
+
+ return 0;
+}
+
+static unsigned long slave_div_get_rate(struct clk *c)
+{
+ struct div_clk *d = to_div_clk(c);
+ if (!d->data.div)
+ return 0;
+ return clk_get_rate(c->parent) / d->data.div;
+}
+
+struct clk_ops clk_ops_slave_div = {
+ .enable = div_enable,
+ .disable = div_disable,
+ .round_rate = slave_div_round_rate,
+ .set_rate = slave_div_set_rate,
+ .get_rate = slave_div_get_rate,
+ .handoff = div_handoff,
+ .list_registers = div_clk_list_registers,
+};
+
+
+/**
+ * External clock
+ * Some clock controllers have input clock signal that come from outside the
+ * clock controller. That input clock signal might then be used as a source for
+ * several clocks inside the clock controller. This external clock
+ * implementation models this input clock signal by just passing on the requests
+ * to the clock's parent, the original external clock source. The driver for the
+ * clock controller should clk_get() the original external clock in the probe
+ * function and set is as a parent to this external clock..
+ */
+
+long parent_round_rate(struct clk *c, unsigned long rate)
+{
+ return clk_round_rate(c->parent, rate);
+}
+
+static int ext_set_rate(struct clk *c, unsigned long rate)
+{
+ return clk_set_rate(c->parent, rate);
+}
+
+unsigned long parent_get_rate(struct clk *c)
+{
+ return clk_get_rate(c->parent);
+}
+
+static int ext_set_parent(struct clk *c, struct clk *p)
+{
+ return clk_set_parent(c->parent, p);
+}
+
+static enum handoff ext_handoff(struct clk *c)
+{
+ c->rate = clk_get_rate(c->parent);
+ /* Similar reasoning applied in div_handoff, see comment there. */
+ return HANDOFF_DISABLED_CLK;
+}
+
+struct clk_ops clk_ops_ext = {
+ .handoff = ext_handoff,
+ .round_rate = parent_round_rate,
+ .set_rate = ext_set_rate,
+ .get_rate = parent_get_rate,
+ .set_parent = ext_set_parent,
+};
+
+
+/* ==================== Mux_div clock ==================== */
+
+static int mux_div_clk_enable(struct clk *c)
+{
+ struct mux_div_clk *md = to_mux_div_clk(c);
+
+ if (md->ops->enable)
+ return md->ops->enable(md);
+ return 0;
+}
+
+static void mux_div_clk_disable(struct clk *c)
+{
+ struct mux_div_clk *md = to_mux_div_clk(c);
+
+ if (md->ops->disable)
+ return md->ops->disable(md);
+}
+
+static long __mux_div_round_rate(struct clk *c, unsigned long rate,
+ struct clk **best_parent, int *best_div, unsigned long *best_prate)
+{
+ struct mux_div_clk *md = to_mux_div_clk(c);
+ unsigned int i;
+ unsigned long rrate, best = 0, _best_div = 0, _best_prate = 0;
+ struct clk *_best_parent = 0;
+
+ for (i = 0; i < md->num_parents; i++) {
+ int div;
+ unsigned long prate;
+
+ rrate = __div_round_rate(&md->data, rate, md->parents[i].src,
+ &div, &prate);
+
+ if (is_better_rate(rate, best, rrate)) {
+ best = rrate;
+ _best_div = div;
+ _best_prate = prate;
+ _best_parent = md->parents[i].src;
+ }
+
+ if (rate <= rrate && rrate <= rate + md->data.rate_margin)
+ break;
+ }
+
+ if (best_div)
+ *best_div = _best_div;
+ if (best_prate)
+ *best_prate = _best_prate;
+ if (best_parent)
+ *best_parent = _best_parent;
+
+ if (best)
+ return best;
+ return -EINVAL;
+}
+
+static long mux_div_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ return __mux_div_round_rate(c, rate, NULL, NULL, NULL);
+}
+
+/* requires enable lock to be held */
+static int __set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div)
+{
+ u32 rc = 0, src_sel;
+
+ src_sel = parent_to_src_sel(md->parents, md->num_parents, parent);
+ /*
+ * If the clock is disabled, don't change to the new settings until
+ * the clock is reenabled
+ */
+ if (md->c.count)
+ rc = md->ops->set_src_div(md, src_sel, div);
+ if (!rc) {
+ md->data.div = div;
+ md->src_sel = src_sel;
+ }
+
+ return rc;
+}
+
+static int set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div)
+{
+ unsigned long flags;
+ u32 rc;
+
+ spin_lock_irqsave(&md->c.lock, flags);
+ rc = __set_src_div(md, parent, div);
+ spin_unlock_irqrestore(&md->c.lock, flags);
+
+ return rc;
+}
+
+/* Must be called after handoff to ensure parent clock rates are initialized */
+static int safe_parent_init_once(struct clk *c)
+{
+ unsigned long rrate;
+ u32 best_div;
+ struct clk *best_parent;
+ struct mux_div_clk *md = to_mux_div_clk(c);
+
+ if (IS_ERR(md->safe_parent))
+ return -EINVAL;
+ if (!md->safe_freq || md->safe_parent)
+ return 0;
+
+ rrate = __mux_div_round_rate(c, md->safe_freq, &best_parent,
+ &best_div, NULL);
+
+ if (rrate == md->safe_freq) {
+ md->safe_div = best_div;
+ md->safe_parent = best_parent;
+ } else {
+ md->safe_parent = ERR_PTR(-EINVAL);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mux_div_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ struct mux_div_clk *md = to_mux_div_clk(c);
+ unsigned long flags, rrate;
+ unsigned long new_prate, old_prate;
+ struct clk *old_parent, *new_parent;
+ u32 new_div, old_div;
+ int rc;
+
+ rc = safe_parent_init_once(c);
+ if (rc)
+ return rc;
+
+ rrate = __mux_div_round_rate(c, rate, &new_parent, &new_div,
+ &new_prate);
+ if (rrate != rate)
+ return -EINVAL;
+
+ old_parent = c->parent;
+ old_div = md->data.div;
+ old_prate = clk_get_rate(c->parent);
+
+ /* Refer to the description of safe_freq in clock-generic.h */
+ if (md->safe_freq)
+ rc = set_src_div(md, md->safe_parent, md->safe_div);
+
+ else if (new_parent == old_parent && new_div >= old_div) {
+ /*
+ * If both the parent_rate and divider changes, there may be an
+ * intermediate frequency generated. Ensure this intermediate
+ * frequency is less than both the new rate and previous rate.
+ */
+ rc = set_src_div(md, old_parent, new_div);
+ }
+ if (rc)
+ return rc;
+
+ rc = clk_set_rate(new_parent, new_prate);
+ if (rc) {
+ pr_err("failed to set %s to %ld\n",
+ new_parent->dbg_name, new_prate);
+ goto err_set_rate;
+ }
+
+ rc = __clk_pre_reparent(c, new_parent, &flags);
+ if (rc)
+ goto err_pre_reparent;
+
+ /* Set divider and mux src atomically */
+ rc = __set_src_div(md, new_parent, new_div);
+ if (rc)
+ goto err_set_src_div;
+
+ c->parent = new_parent;
+
+ __clk_post_reparent(c, old_parent, &flags);
+ return 0;
+
+err_set_src_div:
+ /* Not switching to new_parent, so disable it */
+ __clk_post_reparent(c, new_parent, &flags);
+err_pre_reparent:
+ rc = clk_set_rate(old_parent, old_prate);
+ WARN(rc, "%s: error changing parent (%s) rate to %ld\n",
+ c->dbg_name, old_parent->dbg_name, old_prate);
+err_set_rate:
+ rc = set_src_div(md, old_parent, old_div);
+ WARN(rc, "%s: error changing back to original div (%d) and parent (%s)\n",
+ c->dbg_name, old_div, old_parent->dbg_name);
+
+ return rc;
+}
+
+static struct clk *mux_div_clk_get_parent(struct clk *c)
+{
+ struct mux_div_clk *md = to_mux_div_clk(c);
+ u32 i, div, src_sel;
+
+ md->ops->get_src_div(md, &src_sel, &div);
+
+ md->data.div = div;
+ md->src_sel = src_sel;
+
+ for (i = 0; i < md->num_parents; i++) {
+ if (md->parents[i].sel == src_sel)
+ return md->parents[i].src;
+ }
+
+ return NULL;
+}
+
+static enum handoff mux_div_clk_handoff(struct clk *c)
+{
+ struct mux_div_clk *md = to_mux_div_clk(c);
+ unsigned long parent_rate;
+ unsigned int numer;
+
+ parent_rate = clk_get_rate(c->parent);
+ if (!parent_rate)
+ return HANDOFF_DISABLED_CLK;
+ /*
+ * div values are doubled for half dividers.
+ * Adjust for that by picking a numer of 2.
+ */
+ numer = md->data.is_half_divider ? 2 : 1;
+
+ if (md->data.div) {
+ c->rate = mult_frac(parent_rate, numer, md->data.div);
+ } else {
+ c->rate = 0;
+ return HANDOFF_DISABLED_CLK;
+ }
+
+ if (!md->ops->is_enabled)
+ return HANDOFF_DISABLED_CLK;
+ if (md->ops->is_enabled(md))
+ return HANDOFF_ENABLED_CLK;
+ return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *mux_div_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct mux_div_clk *md = to_mux_div_clk(c);
+
+ if (md->ops && md->ops->list_registers)
+ return md->ops->list_registers(md, n , regs, size);
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct clk_ops clk_ops_mux_div_clk = {
+ .enable = mux_div_clk_enable,
+ .disable = mux_div_clk_disable,
+ .set_rate = mux_div_clk_set_rate,
+ .round_rate = mux_div_clk_round_rate,
+ .get_parent = mux_div_clk_get_parent,
+ .handoff = mux_div_clk_handoff,
+ .list_registers = mux_div_clk_list_registers,
+};
diff --git a/drivers/clk/qcom/clock-krait.c b/drivers/clk/qcom/clock-krait.c
new file mode 100644
index 000000000000..de69c5a1e0c8
--- /dev/null
+++ b/drivers/clk/qcom/clock-krait.c
@@ -0,0 +1,523 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <soc/qcom/avs.h>
+#include <soc/qcom/clock-krait.h>
+
+#include <linux/clk.h>
+#include <mach/msm-krait-l2-accessors.h>
+
+static DEFINE_SPINLOCK(kpss_clock_reg_lock);
+
+#define LPL_SHIFT 8
+static void __kpss_mux_set_sel(struct mux_clk *mux, int sel)
+{
+ unsigned long flags;
+ u32 regval;
+
+ spin_lock_irqsave(&kpss_clock_reg_lock, flags);
+ regval = get_l2_indirect_reg(mux->offset);
+ regval &= ~(mux->mask << mux->shift);
+ regval |= (sel & mux->mask) << mux->shift;
+ if (mux->priv) {
+ regval &= ~(mux->mask << (mux->shift + LPL_SHIFT));
+ regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT);
+ }
+ set_l2_indirect_reg(mux->offset, regval);
+ spin_unlock_irqrestore(&kpss_clock_reg_lock, flags);
+
+ /* Wait for switch to complete. */
+ mb();
+ udelay(1);
+}
+static int kpss_mux_set_sel(struct mux_clk *mux, int sel)
+{
+ mux->en_mask = sel;
+ if (mux->c.count)
+ __kpss_mux_set_sel(mux, sel);
+ return 0;
+}
+
+static int kpss_mux_get_sel(struct mux_clk *mux)
+{
+ u32 sel;
+
+ sel = get_l2_indirect_reg(mux->offset);
+ sel >>= mux->shift;
+ sel &= mux->mask;
+ mux->en_mask = sel;
+
+ return sel;
+}
+
+static int kpss_mux_enable(struct mux_clk *mux)
+{
+ __kpss_mux_set_sel(mux, mux->en_mask);
+ return 0;
+}
+
+static void kpss_mux_disable(struct mux_clk *mux)
+{
+ __kpss_mux_set_sel(mux, mux->safe_sel);
+}
+
+struct clk_mux_ops clk_mux_ops_kpss = {
+ .enable = kpss_mux_enable,
+ .disable = kpss_mux_disable,
+ .set_mux_sel = kpss_mux_set_sel,
+ .get_mux_sel = kpss_mux_get_sel,
+};
+
+/*
+ * The divider can divide by 2, 4, 6 and 8. But we only really need div-2. So
+ * force it to div-2 during handoff and treat it like a fixed div-2 clock.
+ */
+static int kpss_div2_get_div(struct div_clk *div)
+{
+ unsigned long flags;
+ u32 regval;
+ int val;
+
+ spin_lock_irqsave(&kpss_clock_reg_lock, flags);
+ regval = get_l2_indirect_reg(div->offset);
+ val = (regval >> div->shift) & div->mask;
+ regval &= ~(div->mask << div->shift);
+ if (div->priv)
+ regval &= ~(div->mask << (div->shift + LPL_SHIFT));
+ set_l2_indirect_reg(div->offset, regval);
+ spin_unlock_irqrestore(&kpss_clock_reg_lock, flags);
+
+ val = (val + 1) * 2;
+ WARN(val != 2, "Divider %s was configured to div-%d instead of 2!\n",
+ div->c.dbg_name, val);
+
+ return 2;
+}
+
+struct clk_div_ops clk_div_ops_kpss_div2 = {
+ .get_div = kpss_div2_get_div,
+};
+
+#define LOCK_BIT BIT(16)
+
+/* Initialize a HFPLL at a given rate and enable it. */
+static void __hfpll_clk_init_once(struct clk *c)
+{
+ struct hfpll_clk *h = to_hfpll_clk(c);
+ struct hfpll_data const *hd = h->d;
+
+ if (likely(h->init_done))
+ return;
+
+ /* Configure PLL parameters for integer mode. */
+ if (hd->config_val)
+ writel_relaxed(hd->config_val, h->base + hd->config_offset);
+ writel_relaxed(0, h->base + hd->m_offset);
+ writel_relaxed(1, h->base + hd->n_offset);
+
+ if (hd->user_offset) {
+ u32 regval = hd->user_val;
+ unsigned long rate;
+
+ rate = readl_relaxed(h->base + hd->l_offset) * h->src_rate;
+
+ /* Pick the right VCO. */
+ if (hd->user_vco_mask && rate > hd->low_vco_max_rate)
+ regval |= hd->user_vco_mask;
+ writel_relaxed(regval, h->base + hd->user_offset);
+ }
+
+ if (hd->droop_offset)
+ writel_relaxed(hd->droop_val, h->base + hd->droop_offset);
+
+ h->init_done = true;
+}
+
+/* Enable an already-configured HFPLL. */
+static int hfpll_clk_enable(struct clk *c)
+{
+ struct hfpll_clk *h = to_hfpll_clk(c);
+ struct hfpll_data const *hd = h->d;
+
+ if (!h->base)
+ return -ENODEV;
+
+ __hfpll_clk_init_once(c);
+
+ /* Disable PLL bypass mode. */
+ writel_relaxed(0x2, h->base + hd->mode_offset);
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ mb();
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ writel_relaxed(0x6, h->base + hd->mode_offset);
+
+ /* Wait for PLL to lock. */
+ if (hd->status_offset) {
+ while (!(readl_relaxed(h->base + hd->status_offset) & LOCK_BIT))
+ ;
+ } else {
+ mb();
+ udelay(60);
+ }
+
+ /* Enable PLL output. */
+ writel_relaxed(0x7, h->base + hd->mode_offset);
+
+ /* Make sure the enable is done before returning. */
+ mb();
+
+ return 0;
+}
+
+static void hfpll_clk_disable(struct clk *c)
+{
+ struct hfpll_clk *h = to_hfpll_clk(c);
+ struct hfpll_data const *hd = h->d;
+
+ /*
+ * Disable the PLL output, disable test mode, enable the bypass mode,
+ * and assert the reset.
+ */
+ writel_relaxed(0, h->base + hd->mode_offset);
+}
+
+static long hfpll_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ struct hfpll_clk *h = to_hfpll_clk(c);
+ struct hfpll_data const *hd = h->d;
+ unsigned long rrate;
+
+ if (!h->src_rate)
+ return 0;
+
+ rate = max(rate, hd->min_rate);
+ rate = min(rate, hd->max_rate);
+
+ rrate = DIV_ROUND_UP(rate, h->src_rate) * h->src_rate;
+ if (rrate > hd->max_rate)
+ rrate -= h->src_rate;
+
+ return rrate;
+}
+
+/*
+ * For optimization reasons, assumes no downstream clocks are actively using
+ * it.
+ */
+static int hfpll_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ struct hfpll_clk *h = to_hfpll_clk(c);
+ struct hfpll_data const *hd = h->d;
+ unsigned long flags;
+ u32 l_val;
+
+ if (!h->base)
+ return -ENODEV;
+
+ if (rate != hfpll_clk_round_rate(c, rate))
+ return -EINVAL;
+
+ l_val = rate / h->src_rate;
+
+ spin_lock_irqsave(&c->lock, flags);
+
+ if (c->count)
+ hfpll_clk_disable(c);
+
+ /* Pick the right VCO. */
+ if (hd->user_offset && hd->user_vco_mask) {
+ u32 regval;
+ regval = readl_relaxed(h->base + hd->user_offset);
+ if (rate <= hd->low_vco_max_rate)
+ regval &= ~hd->user_vco_mask;
+ else
+ regval |= hd->user_vco_mask;
+ writel_relaxed(regval, h->base + hd->user_offset);
+ }
+
+ writel_relaxed(l_val, h->base + hd->l_offset);
+
+ if (c->count)
+ hfpll_clk_enable(c);
+
+ spin_unlock_irqrestore(&c->lock, flags);
+
+ return 0;
+}
+
+static enum handoff hfpll_clk_handoff(struct clk *c)
+{
+ struct hfpll_clk *h = to_hfpll_clk(c);
+ struct hfpll_data const *hd = h->d;
+ u32 l_val, mode;
+
+ if (!hd)
+ return HANDOFF_DISABLED_CLK;
+
+ if (!h->base)
+ return HANDOFF_DISABLED_CLK;
+
+ /* Assume parent rate doesn't change and cache it. */
+ h->src_rate = clk_get_rate(c->parent);
+ l_val = readl_relaxed(h->base + hd->l_offset);
+ c->rate = l_val * h->src_rate;
+
+ mode = readl_relaxed(h->base + hd->mode_offset) & 0x7;
+ if (mode != 0x7) {
+ __hfpll_clk_init_once(c);
+ return HANDOFF_DISABLED_CLK;
+ }
+
+ if (hd->status_offset &&
+ !(readl_relaxed(h->base + hd->status_offset) & LOCK_BIT)) {
+ WARN(1, "HFPLL %s is ON, but not locked!\n", c->dbg_name);
+ hfpll_clk_disable(c);
+ __hfpll_clk_init_once(c);
+ return HANDOFF_DISABLED_CLK;
+ }
+
+ WARN(c->rate < hd->min_rate || c->rate > hd->max_rate,
+ "HFPLL %s rate %lu outside spec!\n", c->dbg_name, c->rate);
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_hfpll = {
+ .enable = hfpll_clk_enable,
+ .disable = hfpll_clk_disable,
+ .round_rate = hfpll_clk_round_rate,
+ .set_rate = hfpll_clk_set_rate,
+ .handoff = hfpll_clk_handoff,
+};
+
+struct cpu_hwcg_action {
+ bool read;
+ bool enable;
+};
+
+static void cpu_hwcg_rw(void *info)
+{
+ struct cpu_hwcg_action *action = info;
+
+ u32 val;
+ asm volatile ("mrc p15, 7, %[cpmr0], c15, c0, 5\n\t"
+ : [cpmr0]"=r" (val));
+
+ if (action->read) {
+ action->enable = !(val & BIT(0));
+ return;
+ }
+
+ if (action->enable)
+ val &= ~BIT(0);
+ else
+ val |= BIT(0);
+
+ asm volatile ("mcr p15, 7, %[cpmr0], c15, c0, 5\n\t"
+ : : [cpmr0]"r" (val));
+}
+
+static void kpss_cpu_enable_hwcg(struct clk *c)
+{
+ struct kpss_core_clk *cpu = to_kpss_core_clk(c);
+ struct cpu_hwcg_action action = { .enable = true };
+
+ smp_call_function_single(cpu->id, cpu_hwcg_rw, &action, 1);
+}
+
+static void kpss_cpu_disable_hwcg(struct clk *c)
+{
+ struct kpss_core_clk *cpu = to_kpss_core_clk(c);
+ struct cpu_hwcg_action action = { .enable = false };
+
+ smp_call_function_single(cpu->id, cpu_hwcg_rw, &action, 1);
+}
+
+static int kpss_cpu_in_hwcg_mode(struct clk *c)
+{
+ struct kpss_core_clk *cpu = to_kpss_core_clk(c);
+ struct cpu_hwcg_action action = { .read = true };
+
+ smp_call_function_single(cpu->id, cpu_hwcg_rw, &action, 1);
+ return action.enable;
+}
+
+static enum handoff kpss_cpu_handoff(struct clk *c)
+{
+ struct kpss_core_clk *cpu = to_kpss_core_clk(c);
+
+ c->rate = clk_get_rate(c->parent);
+
+ /*
+ * Don't unnecessarily turn on the parents for an offline CPU and
+ * then have them turned off at late init.
+ */
+ return (cpu_online(cpu->id) ?
+ HANDOFF_ENABLED_CLK : HANDOFF_DISABLED_CLK);
+}
+
+u32 find_dscr(struct avs_data *t, unsigned long rate)
+{
+ int i;
+
+ if (!t)
+ return 0;
+
+ for (i = 0; i < t->num; i++) {
+ if (t->rate[i] == rate)
+ return t->dscr[i];
+ }
+
+ return 0;
+}
+
+static int kpss_cpu_pre_set_rate(struct clk *c, unsigned long new_rate)
+{
+ struct kpss_core_clk *cpu = to_kpss_core_clk(c);
+ u32 dscr = find_dscr(cpu->avs_tbl, c->rate);
+
+ if (dscr)
+ AVS_DISABLE(cpu->id);
+ return 0;
+}
+
+static long kpss_core_round_rate(struct clk *c, unsigned long rate)
+{
+ if (c->fmax && c->num_fmax)
+ rate = min(rate, c->fmax[c->num_fmax-1]);
+
+ return clk_round_rate(c->parent, rate);
+}
+
+static int kpss_core_set_rate(struct clk *c, unsigned long rate)
+{
+ return clk_set_rate(c->parent, rate);
+}
+
+static void kpss_cpu_post_set_rate(struct clk *c, unsigned long old_rate)
+{
+ struct kpss_core_clk *cpu = to_kpss_core_clk(c);
+ u32 dscr = find_dscr(cpu->avs_tbl, c->rate);
+
+ /*
+ * FIXME: If AVS enable/disable needs to be done in the
+ * enable/disable op to correctly handle power collapse, then might
+ * need to grab the spinlock here.
+ */
+ if (dscr)
+ AVS_ENABLE(cpu->id, dscr);
+}
+
+static unsigned long kpss_core_get_rate(struct clk *c)
+{
+ return clk_get_rate(c->parent);
+}
+
+static long kpss_core_list_rate(struct clk *c, unsigned n)
+{
+ if (!c->fmax || c->num_fmax <= n)
+ return -ENXIO;
+
+ return c->fmax[n];
+}
+
+struct clk_ops clk_ops_kpss_cpu = {
+ .enable_hwcg = kpss_cpu_enable_hwcg,
+ .disable_hwcg = kpss_cpu_disable_hwcg,
+ .in_hwcg_mode = kpss_cpu_in_hwcg_mode,
+ .pre_set_rate = kpss_cpu_pre_set_rate,
+ .round_rate = kpss_core_round_rate,
+ .set_rate = kpss_core_set_rate,
+ .post_set_rate = kpss_cpu_post_set_rate,
+ .get_rate = kpss_core_get_rate,
+ .list_rate = kpss_core_list_rate,
+ .handoff = kpss_cpu_handoff,
+};
+
+#define SLPDLY_SHIFT 10
+#define SLPDLY_MASK 0x3
+static void kpss_l2_enable_hwcg(struct clk *c)
+{
+ struct kpss_core_clk *l2 = to_kpss_core_clk(c);
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kpss_clock_reg_lock, flags);
+ regval = get_l2_indirect_reg(l2->cp15_iaddr);
+ regval &= ~(SLPDLY_MASK << SLPDLY_SHIFT);
+ regval |= l2->l2_slp_delay;
+ set_l2_indirect_reg(l2->cp15_iaddr, regval);
+ spin_unlock_irqrestore(&kpss_clock_reg_lock, flags);
+}
+
+static void kpss_l2_disable_hwcg(struct clk *c)
+{
+ struct kpss_core_clk *l2 = to_kpss_core_clk(c);
+ u32 regval;
+ unsigned long flags;
+
+ /*
+ * NOTE: Should not be called when HW clock gating is already
+ * disabled.
+ */
+ spin_lock_irqsave(&kpss_clock_reg_lock, flags);
+ regval = get_l2_indirect_reg(l2->cp15_iaddr);
+ l2->l2_slp_delay = regval & (SLPDLY_MASK << SLPDLY_SHIFT);
+ regval |= (SLPDLY_MASK << SLPDLY_SHIFT);
+ set_l2_indirect_reg(l2->cp15_iaddr, regval);
+ spin_unlock_irqrestore(&kpss_clock_reg_lock, flags);
+}
+
+static int kpss_l2_in_hwcg_mode(struct clk *c)
+{
+ struct kpss_core_clk *l2 = to_kpss_core_clk(c);
+ u32 regval;
+
+ regval = get_l2_indirect_reg(l2->cp15_iaddr);
+ regval >>= SLPDLY_SHIFT;
+ regval &= SLPDLY_MASK;
+ return (regval != SLPDLY_MASK);
+}
+
+static enum handoff kpss_l2_handoff(struct clk *c)
+{
+ c->rate = clk_get_rate(c->parent);
+ return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_kpss_l2 = {
+ .enable_hwcg = kpss_l2_enable_hwcg,
+ .disable_hwcg = kpss_l2_disable_hwcg,
+ .in_hwcg_mode = kpss_l2_in_hwcg_mode,
+ .round_rate = kpss_core_round_rate,
+ .set_rate = kpss_core_set_rate,
+ .get_rate = kpss_core_get_rate,
+ .list_rate = kpss_core_list_rate,
+ .handoff = kpss_l2_handoff,
+};
diff --git a/drivers/clk/qcom/clock-local2.c b/drivers/clk/qcom/clock-local2.c
new file mode 100644
index 000000000000..9839053c4798
--- /dev/null
+++ b/drivers/clk/qcom/clock-local2.c
@@ -0,0 +1,1505 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <soc/qcom/clock-local2.h>
+
+/*
+ * When enabling/disabling a clock, check the halt bit up to this number
+ * number of times (with a 1 us delay in between) before continuing.
+ */
+#define HALT_CHECK_MAX_LOOPS 500
+/* For clock without halt checking, wait this long after enables/disables. */
+#define HALT_CHECK_DELAY_US 500
+
+/*
+ * When updating an RCG configuration, check the update bit up to this number
+ * number of times (with a 1 us delay in between) before continuing.
+ */
+#define UPDATE_CHECK_MAX_LOOPS 500
+
+DEFINE_SPINLOCK(local_clock_reg_lock);
+struct clk_freq_tbl rcg_dummy_freq = F_END;
+
+#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
+#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
+#define M_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x8)
+#define N_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0xC)
+#define D_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x10)
+#define CBCR_REG(x) (*(x)->base + (x)->cbcr_reg)
+#define BCR_REG(x) (*(x)->base + (x)->bcr_reg)
+#define RST_REG(x) (*(x)->base + (x)->reset_reg)
+#define VOTE_REG(x) (*(x)->base + (x)->vote_reg)
+#define GATE_EN_REG(x) (*(x)->base + (x)->en_reg)
+
+/*
+ * Important clock bit positions and masks
+ */
+#define CMD_RCGR_ROOT_ENABLE_BIT BIT(1)
+#define CBCR_BRANCH_ENABLE_BIT BIT(0)
+#define CBCR_BRANCH_OFF_BIT BIT(31)
+#define CMD_RCGR_CONFIG_UPDATE_BIT BIT(0)
+#define CMD_RCGR_ROOT_STATUS_BIT BIT(31)
+#define BCR_BLK_ARES_BIT BIT(0)
+#define CBCR_HW_CTL_BIT BIT(1)
+#define CFG_RCGR_DIV_MASK BM(4, 0)
+#define CFG_RCGR_SRC_SEL_MASK BM(10, 8)
+#define MND_MODE_MASK BM(13, 12)
+#define MND_DUAL_EDGE_MODE_BVAL BVAL(13, 12, 0x2)
+#define CMD_RCGR_CONFIG_DIRTY_MASK BM(7, 4)
+#define CBCR_CDIV_LSB 16
+#define CBCR_CDIV_MSB 19
+
+enum branch_state {
+ BRANCH_ON,
+ BRANCH_OFF,
+};
+
+/*
+ * RCG functions
+ */
+
+/*
+ * Update an RCG with a new configuration. This may include a new M, N, or D
+ * value, source selection or pre-divider value.
+ *
+ */
+static void rcg_update_config(struct rcg_clk *rcg)
+{
+ u32 cmd_rcgr_regval, count;
+
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
+ writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
+
+ /* Wait for update to take effect */
+ for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+ if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
+ CMD_RCGR_CONFIG_UPDATE_BIT))
+ return;
+ udelay(1);
+ }
+
+ //CLK_WARN(&rcg->c, count == 0, "rcg didn't update its configuration.");
+}
+
+/* RCG set rate function for clocks with Half Integer Dividers. */
+void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+ u32 cfg_regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+ cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
+ cfg_regval |= nf->div_src_val;
+ writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
+
+ rcg_update_config(rcg);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+/* RCG set rate function for clocks with MND & Half Integer Dividers. */
+void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+ u32 cfg_regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+ writel_relaxed(nf->m_val, M_REG(rcg));
+ writel_relaxed(nf->n_val, N_REG(rcg));
+ writel_relaxed(nf->d_val, D_REG(rcg));
+
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+ cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
+ cfg_regval |= nf->div_src_val;
+
+ /* Activate or disable the M/N:D divider as necessary */
+ cfg_regval &= ~MND_MODE_MASK;
+ if (nf->n_val != 0)
+ cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
+ writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
+
+ rcg_update_config(rcg);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static int rcg_clk_prepare(struct clk *c)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+
+ WARN(rcg->current_freq == &rcg_dummy_freq,
+ "Attempting to prepare %s before setting its rate. "
+ "Set the rate first!\n", rcg->c.dbg_name);
+
+ return 0;
+}
+
+static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ struct clk_freq_tbl *cf, *nf;
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ int rc;
+ unsigned long flags;
+
+ for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
+ && nf->freq_hz != rate; nf++)
+ ;
+
+ if (nf->freq_hz == FREQ_END)
+ return -EINVAL;
+
+ cf = rcg->current_freq;
+
+ rc = __clk_pre_reparent(c, nf->src_clk, &flags);
+ if (rc)
+ return rc;
+
+ BUG_ON(!rcg->set_rate);
+
+ /* Perform clock-specific frequency switch operations. */
+ rcg->set_rate(rcg, nf);
+ rcg->current_freq = nf;
+ c->parent = nf->src_clk;
+
+ __clk_post_reparent(c, cf->src_clk, &flags);
+
+ return 0;
+}
+
+/*
+ * Return a supported rate that's at least the specified rate or
+ * the max supported rate if the specified rate is larger than the
+ * max supported rate.
+ */
+static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ struct clk_freq_tbl *f;
+
+ for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
+ if (f->freq_hz >= rate)
+ return f->freq_hz;
+
+ f--;
+ return f->freq_hz;
+}
+
+/* Return the nth supported frequency for a given clock. */
+static long rcg_clk_list_rate(struct clk *c, unsigned n)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+
+ if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
+ return -ENXIO;
+
+ return (rcg->freq_tbl + n)->freq_hz;
+}
+
+static struct clk *_rcg_clk_get_parent(struct rcg_clk *rcg, int has_mnd)
+{
+ u32 n_regval = 0, m_regval = 0, d_regval = 0;
+ u32 cfg_regval, div, div_regval;
+ struct clk_freq_tbl *freq;
+ u32 cmd_rcgr_regval;
+
+ /* Is there a pending configuration? */
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
+ return NULL;
+
+ /* Get values of m, n, d, div and src_sel registers. */
+ if (has_mnd) {
+ m_regval = readl_relaxed(M_REG(rcg));
+ n_regval = readl_relaxed(N_REG(rcg));
+ d_regval = readl_relaxed(D_REG(rcg));
+
+ /*
+ * The n and d values stored in the frequency tables are sign
+ * extended to 32 bits. The n and d values in the registers are
+ * sign extended to 8 or 16 bits. Sign extend the values read
+ * from the registers so that they can be compared to the
+ * values in the frequency tables.
+ */
+ n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
+ d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
+ }
+
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+ cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
+ | MND_MODE_MASK;
+
+ /* If mnd counter is present, check if it's in use. */
+ has_mnd = (has_mnd) &&
+ ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
+
+ /*
+ * Clear out the mn counter mode bits since we now want to compare only
+ * the source mux selection and pre-divider values in the registers.
+ */
+ cfg_regval &= ~MND_MODE_MASK;
+
+ /* Figure out what rate the rcg is running at */
+ for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+ /* source select does not match */
+ if ((freq->div_src_val & CFG_RCGR_SRC_SEL_MASK)
+ != (cfg_regval & CFG_RCGR_SRC_SEL_MASK))
+ continue;
+ /* divider does not match */
+ div = freq->div_src_val & CFG_RCGR_DIV_MASK;
+ div_regval = cfg_regval & CFG_RCGR_DIV_MASK;
+ if (div != div_regval && (div > 1 || div_regval > 1))
+ continue;
+
+ if (has_mnd) {
+ if (freq->m_val != m_regval)
+ continue;
+ if (freq->n_val != n_regval)
+ continue;
+ if (freq->d_val != d_regval)
+ continue;
+ } else if (freq->n_val) {
+ continue;
+ }
+ break;
+ }
+
+ /* No known frequency found */
+ if (freq->freq_hz == FREQ_END)
+ return NULL;
+
+ rcg->current_freq = freq;
+ return freq->src_clk;
+}
+
+static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg)
+{
+ u32 cmd_rcgr_regval;
+
+ if (rcg->current_freq && rcg->current_freq->freq_hz != FREQ_END)
+ rcg->c.rate = rcg->current_freq->freq_hz;
+
+ /* Is the root enabled? */
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static struct clk *rcg_mnd_clk_get_parent(struct clk *c)
+{
+ return _rcg_clk_get_parent(to_rcg_clk(c), 1);
+}
+
+static struct clk *rcg_clk_get_parent(struct clk *c)
+{
+ return _rcg_clk_get_parent(to_rcg_clk(c), 0);
+}
+
+static enum handoff rcg_mnd_clk_handoff(struct clk *c)
+{
+ return _rcg_clk_handoff(to_rcg_clk(c));
+}
+
+static enum handoff rcg_clk_handoff(struct clk *c)
+{
+ return _rcg_clk_handoff(to_rcg_clk(c));
+}
+
+static void __iomem *rcg_hid_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ static struct clk_register_data data[] = {
+ {"CMD_RCGR", 0x0},
+ {"CFG_RCGR", 0x4},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return CMD_RCGR_REG(rcg);
+}
+
+static void __iomem *rcg_mnd_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ static struct clk_register_data data[] = {
+ {"CMD_RCGR", 0x0},
+ {"CFG_RCGR", 0x4},
+ {"M_VAL", 0x8},
+ {"N_VAL", 0xC},
+ {"D_VAL", 0x10},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return CMD_RCGR_REG(rcg);
+}
+
+#define BRANCH_CHECK_MASK BM(31, 28)
+#define BRANCH_ON_VAL BVAL(31, 28, 0x0)
+#define BRANCH_OFF_VAL BVAL(31, 28, 0x8)
+#define BRANCH_NOC_FSM_ON_VAL BVAL(31, 28, 0x2)
+
+/*
+ * Branch clock functions
+ */
+static void branch_clk_halt_check(struct clk *c, u32 halt_check,
+ void __iomem *cbcr_reg, enum branch_state br_status)
+{
+// char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
+
+ /*
+ * Use a memory barrier since some halt status registers are
+ * not within the same 1K segment as the branch/root enable
+ * registers. It's also needed in the udelay() case to ensure
+ * the delay starts after the branch disable.
+ */
+ mb();
+
+ if (halt_check == DELAY || halt_check == HALT_VOTED) {
+ udelay(HALT_CHECK_DELAY_US);
+ } else if (halt_check == HALT) {
+ int count;
+ u32 val;
+ for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
+ val = readl_relaxed(cbcr_reg);
+ val &= BRANCH_CHECK_MASK;
+ switch (br_status) {
+ case BRANCH_ON:
+ if (val == BRANCH_ON_VAL
+ || val == BRANCH_NOC_FSM_ON_VAL)
+ return;
+ break;
+
+ case BRANCH_OFF:
+ if (val == BRANCH_OFF_VAL)
+ return;
+ break;
+ };
+ udelay(1);
+ }
+ //CLK_WARN(c, count == 0, "status stuck %s", status_str);
+ }
+}
+
+static int branch_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ u32 cbcr_val;
+ struct branch_clk *branch = to_branch_clk(c);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ cbcr_val = readl_relaxed(CBCR_REG(branch));
+ cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
+ writel_relaxed(cbcr_val, CBCR_REG(branch));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ /* Wait for clock to enable before continuing. */
+ branch_clk_halt_check(c, branch->halt_check, CBCR_REG(branch),
+ BRANCH_ON);
+
+ return 0;
+}
+
+static void branch_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ struct branch_clk *branch = to_branch_clk(c);
+ u32 reg_val;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ reg_val = readl_relaxed(CBCR_REG(branch));
+ reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
+ writel_relaxed(reg_val, CBCR_REG(branch));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ /* Wait for clock to disable before continuing. */
+ branch_clk_halt_check(c, branch->halt_check, CBCR_REG(branch),
+ BRANCH_OFF);
+}
+
+static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
+{
+ unsigned long flags;
+ u32 regval;
+
+ if (rate > branch->max_div)
+ return -EINVAL;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ regval = readl_relaxed(CBCR_REG(branch));
+ regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+ regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, rate);
+ writel_relaxed(regval, CBCR_REG(branch));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ return 0;
+}
+
+static int branch_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+
+ if (branch->max_div)
+ return branch_cdiv_set_rate(branch, rate);
+
+ if (!branch->has_sibling)
+ return clk_set_rate(c->parent, rate);
+
+ return -EPERM;
+}
+
+static long branch_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+
+ if (branch->max_div)
+ return rate <= (branch->max_div) ? rate : -EPERM;
+
+ if (!branch->has_sibling)
+ return clk_round_rate(c->parent, rate);
+
+ return -EPERM;
+}
+
+static unsigned long branch_clk_get_rate(struct clk *c)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+
+ if (branch->max_div)
+ return branch->c.rate;
+
+ return clk_get_rate(c->parent);
+}
+
+static long branch_clk_list_rate(struct clk *c, unsigned n)
+{
+ int level;
+ unsigned long fmax = 0, rate;
+ struct branch_clk *branch = to_branch_clk(c);
+ struct clk *parent = c->parent;
+
+ if (branch->has_sibling == 1)
+ return -ENXIO;
+
+ if (!parent || !parent->ops->list_rate)
+ return -ENXIO;
+
+ /* Find max frequency supported within voltage constraints. */
+ if (!parent->vdd_class) {
+ fmax = ULONG_MAX;
+ } else {
+ for (level = 0; level < parent->num_fmax; level++)
+ if (parent->fmax[level])
+ fmax = parent->fmax[level];
+ }
+
+ rate = parent->ops->list_rate(parent, n);
+ if (rate <= fmax)
+ return rate;
+ else
+ return -ENXIO;
+}
+
+static enum handoff branch_clk_handoff(struct clk *c)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+ u32 cbcr_regval;
+
+ cbcr_regval = readl_relaxed(CBCR_REG(branch));
+
+ /* Set the cdiv to c->rate for fixed divider branch clock */
+ if (c->rate && (c->rate < branch->max_div)) {
+ cbcr_regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+ cbcr_regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, c->rate);
+ writel_relaxed(cbcr_regval, CBCR_REG(branch));
+ }
+
+ if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
+ return HANDOFF_DISABLED_CLK;
+
+ if (branch->max_div) {
+ cbcr_regval &= BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+ cbcr_regval >>= CBCR_CDIV_LSB;
+ c->rate = cbcr_regval;
+ } else if (!branch->has_sibling) {
+ c->rate = clk_get_rate(c->parent);
+ }
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static int __branch_clk_reset(void __iomem *bcr_reg,
+ enum clk_reset_action action)
+{
+ int ret = 0;
+ unsigned long flags;
+ u32 reg_val;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ reg_val = readl_relaxed(bcr_reg);
+ switch (action) {
+ case CLK_RESET_ASSERT:
+ reg_val |= BCR_BLK_ARES_BIT;
+ break;
+ case CLK_RESET_DEASSERT:
+ reg_val &= ~BCR_BLK_ARES_BIT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ writel_relaxed(reg_val, bcr_reg);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ /* Make sure write is issued before returning. */
+ mb();
+
+ return ret;
+}
+
+static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+
+ if (!branch->bcr_reg)
+ return -EPERM;
+ return __branch_clk_reset(BCR_REG(branch), action);
+}
+
+static int branch_clk_set_flags(struct clk *c, unsigned flags)
+{
+ u32 cbcr_val;
+ unsigned long irq_flags;
+ struct branch_clk *branch = to_branch_clk(c);
+ int delay_us = 0, ret = 0;
+
+ spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
+ cbcr_val = readl_relaxed(CBCR_REG(branch));
+ switch (flags) {
+ case CLKFLAG_RETAIN_PERIPH:
+ cbcr_val |= BIT(13);
+ delay_us = 1;
+ break;
+ case CLKFLAG_NORETAIN_PERIPH:
+ cbcr_val &= ~BIT(13);
+ break;
+ case CLKFLAG_RETAIN_MEM:
+ cbcr_val |= BIT(14);
+ delay_us = 1;
+ break;
+ case CLKFLAG_NORETAIN_MEM:
+ cbcr_val &= ~BIT(14);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ writel_relaxed(cbcr_val, CBCR_REG(branch));
+ /* Make sure power is enabled before returning. */
+ mb();
+ udelay(delay_us);
+
+ spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
+
+ return ret;
+}
+
+static void __iomem *branch_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+ static struct clk_register_data data[] = {
+ {"CBCR", 0x0},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return CBCR_REG(branch);
+}
+
+/*
+ * Voteable clock functions
+ */
+static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
+{
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+ if (!vclk->bcr_reg) {
+ WARN("clk_reset called on an unsupported clock (%s)\n",
+ c->dbg_name);
+ return -EPERM;
+ }
+ return __branch_clk_reset(BCR_REG(vclk), action);
+}
+
+static int local_vote_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ u32 ena;
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ ena = readl_relaxed(VOTE_REG(vclk));
+ ena |= vclk->en_mask;
+ writel_relaxed(ena, VOTE_REG(vclk));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ branch_clk_halt_check(c, vclk->halt_check, CBCR_REG(vclk), BRANCH_ON);
+
+ return 0;
+}
+
+static void local_vote_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ u32 ena;
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ ena = readl_relaxed(VOTE_REG(vclk));
+ ena &= ~vclk->en_mask;
+ writel_relaxed(ena, VOTE_REG(vclk));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static enum handoff local_vote_clk_handoff(struct clk *c)
+{
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+ u32 vote_regval;
+
+ /* Is the branch voted on by apps? */
+ vote_regval = readl_relaxed(VOTE_REG(vclk));
+ if (!(vote_regval & vclk->en_mask))
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+/* Sample clock for 'ticks' reference clock ticks. */
+static u32 run_measurement(unsigned ticks, void __iomem *ctl_reg,
+ void __iomem *status_reg)
+{
+ /* Stop counters and set the XO4 counter start value. */
+ writel_relaxed(ticks, ctl_reg);
+
+ /* Wait for timer to become ready. */
+ while ((readl_relaxed(status_reg) & BIT(25)) != 0)
+ cpu_relax();
+
+ /* Run measurement and wait for completion. */
+ writel_relaxed(BIT(20)|ticks, ctl_reg);
+ while ((readl_relaxed(status_reg) & BIT(25)) == 0)
+ cpu_relax();
+
+ /* Return measured ticks. */
+ return readl_relaxed(status_reg) & BM(24, 0);
+}
+
+/*
+ * Perform a hardware rate measurement for a given clock.
+ * FOR DEBUG USE ONLY: Measurements take ~15 ms!
+ */
+unsigned long measure_get_rate(struct clk *c)
+{
+ unsigned long flags;
+ u32 gcc_xo4_reg_backup;
+ u64 raw_count_short, raw_count_full;
+ unsigned ret;
+ u32 sample_ticks = 0x10000;
+ u32 multiplier = 0x1;
+ struct measure_clk_data *data = to_mux_clk(c)->priv;
+
+ ret = clk_prepare_enable(data->cxo);
+ if (ret) {
+ pr_warn("CXO clock failed to enable. Can't measure\n");
+ return 0;
+ }
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+
+ /* Enable CXO/4 and RINGOSC branch. */
+ gcc_xo4_reg_backup = readl_relaxed(*data->base + data->xo_div4_cbcr);
+ writel_relaxed(0x1, *data->base + data->xo_div4_cbcr);
+
+ /*
+ * The ring oscillator counter will not reset if the measured clock
+ * is not running. To detect this, run a short measurement before
+ * the full measurement. If the raw results of the two are the same
+ * then the clock must be off.
+ */
+
+ /* Run a short measurement. (~1 ms) */
+ raw_count_short = run_measurement(0x1000, *data->base + data->ctl_reg,
+ *data->base + data->status_reg);
+ /* Run a full measurement. (~14 ms) */
+ raw_count_full = run_measurement(sample_ticks,
+ *data->base + data->ctl_reg,
+ *data->base + data->status_reg);
+ writel_relaxed(gcc_xo4_reg_backup, *data->base + data->xo_div4_cbcr);
+
+ /* Return 0 if the clock is off. */
+ if (raw_count_full == raw_count_short) {
+ ret = 0;
+ } else {
+ /* Compute rate in Hz. */
+ raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
+ do_div(raw_count_full, ((sample_ticks * 10) + 35));
+ ret = (raw_count_full * multiplier);
+ }
+ writel_relaxed(data->plltest_val, *data->base + data->plltest_reg);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ clk_disable_unprepare(data->cxo);
+
+ return ret;
+}
+
+struct frac_entry {
+ int num;
+ int den;
+};
+
+static void __iomem *local_vote_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+ static struct clk_register_data data1[] = {
+ {"CBCR", 0x0},
+ };
+ static struct clk_register_data data2[] = {
+ {"APPS_VOTE", 0x0},
+ {"APPS_SLEEP_VOTE", 0x4},
+ };
+ switch (n) {
+ case 0:
+ *regs = data1;
+ *size = ARRAY_SIZE(data1);
+ return CBCR_REG(vclk);
+ case 1:
+ *regs = data2;
+ *size = ARRAY_SIZE(data2);
+ return VOTE_REG(vclk);
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static struct frac_entry frac_table_675m[] = { /* link rate of 270M */
+ {52, 295}, /* 119 M */
+ {11, 57}, /* 130.25 M */
+ {63, 307}, /* 138.50 M */
+ {11, 50}, /* 148.50 M */
+ {47, 206}, /* 154 M */
+ {31, 100}, /* 205.25 M */
+ {107, 269}, /* 268.50 M */
+ {0, 0},
+};
+
+static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
+ {31, 211}, /* 119 M */
+ {32, 199}, /* 130.25 M */
+ {63, 307}, /* 138.50 M */
+ {11, 60}, /* 148.50 M */
+ {50, 263}, /* 154 M */
+ {31, 120}, /* 205.25 M */
+ {119, 359}, /* 268.50 M */
+ {0, 0},
+};
+
+static int set_rate_edp_pixel(struct clk *clk, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ struct clk_freq_tbl *pixel_freq = rcg->current_freq;
+ struct frac_entry *frac;
+ int delta = 100000;
+ s64 request;
+ s64 src_rate;
+
+ src_rate = clk_get_rate(clk->parent);
+
+ if (src_rate == 810000000)
+ frac = frac_table_810m;
+ else
+ frac = frac_table_675m;
+
+ while (frac->num) {
+ request = rate;
+ request *= frac->den;
+ request = div_s64(request, frac->num);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta))) {
+ frac++;
+ continue;
+ }
+
+ pixel_freq->div_src_val &= ~BM(4, 0);
+ if (frac->den == frac->num) {
+ pixel_freq->m_val = 0;
+ pixel_freq->n_val = 0;
+ } else {
+ pixel_freq->m_val = frac->num;
+ pixel_freq->n_val = ~(frac->den - frac->num);
+ pixel_freq->d_val = ~frac->den;
+ }
+ set_rate_mnd(rcg, pixel_freq);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+enum handoff byte_rcg_handoff(struct clk *clk)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ u32 div_val;
+ unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
+
+ /* If the pre-divider is used, find the rate after the division */
+ div_val = readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_DIV_MASK;
+ if (div_val > 1)
+ pre_div_rate = parent_rate / ((div_val + 1) >> 1);
+ else
+ pre_div_rate = parent_rate;
+
+ clk->rate = pre_div_rate;
+
+ if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static int set_rate_byte(struct clk *clk, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ struct clk *pll = clk->parent;
+ unsigned long source_rate, div;
+ struct clk_freq_tbl *byte_freq = rcg->current_freq;
+ int rc;
+
+ if (rate == 0)
+ return -EINVAL;
+
+ rc = clk_set_rate(pll, rate);
+ if (rc)
+ return rc;
+
+ source_rate = clk_round_rate(pll, rate);
+ if ((2 * source_rate) % rate)
+ return -EINVAL;
+
+ div = ((2 * source_rate)/rate) - 1;
+ if (div > CFG_RCGR_DIV_MASK)
+ return -EINVAL;
+
+ byte_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+ byte_freq->div_src_val |= BVAL(4, 0, div);
+ set_rate_hid(rcg, byte_freq);
+
+ return 0;
+}
+
+enum handoff pixel_rcg_handoff(struct clk *clk)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ u32 div_val = 0, mval = 0, nval = 0, cfg_regval;
+ unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
+
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+
+ /* If the pre-divider is used, find the rate after the division */
+ div_val = cfg_regval & CFG_RCGR_DIV_MASK;
+ if (div_val > 1)
+ pre_div_rate = parent_rate / ((div_val + 1) >> 1);
+ else
+ pre_div_rate = parent_rate;
+
+ clk->rate = pre_div_rate;
+
+ /*
+ * Pixel clocks have one frequency entry in their frequency table.
+ * Update that entry.
+ */
+ if (rcg->current_freq) {
+ rcg->current_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+ rcg->current_freq->div_src_val |= div_val;
+ }
+
+ /* If MND is used, find the rate after the MND division */
+ if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
+ mval = readl_relaxed(M_REG(rcg));
+ nval = readl_relaxed(N_REG(rcg));
+ if (!nval)
+ return HANDOFF_DISABLED_CLK;
+ nval = (~nval) + mval;
+ if (rcg->current_freq) {
+ rcg->current_freq->n_val = ~(nval - mval);
+ rcg->current_freq->m_val = mval;
+ rcg->current_freq->d_val = ~nval;
+ }
+ clk->rate = (pre_div_rate * mval) / nval;
+ }
+
+ if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static long round_rate_pixel(struct clk *clk, unsigned long rate)
+{
+ int frac_num[] = {3, 2, 4, 1};
+ int frac_den[] = {8, 9, 9, 1};
+ int delta = 100000;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(frac_num); i++) {
+ unsigned long request = (rate * frac_den[i]) / frac_num[i];
+ unsigned long src_rate;
+
+ src_rate = clk_round_rate(clk->parent, request);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ return (src_rate * frac_num[i]) / frac_den[i];
+ }
+
+ return -EINVAL;
+}
+
+
+static int set_rate_pixel(struct clk *clk, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ struct clk_freq_tbl *pixel_freq = rcg->current_freq;
+ int frac_num[] = {3, 2, 4, 1};
+ int frac_den[] = {8, 9, 9, 1};
+ int delta = 100000;
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(frac_num); i++) {
+ unsigned long request = (rate * frac_den[i]) / frac_num[i];
+ unsigned long src_rate;
+
+ src_rate = clk_round_rate(clk->parent, request);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ rc = clk_set_rate(clk->parent, src_rate);
+ if (rc)
+ return rc;
+
+ pixel_freq->div_src_val &= ~BM(4, 0);
+ if (frac_den[i] == frac_num[i]) {
+ pixel_freq->m_val = 0;
+ pixel_freq->n_val = 0;
+ } else {
+ pixel_freq->m_val = frac_num[i];
+ pixel_freq->n_val = ~(frac_den[i] - frac_num[i]);
+ pixel_freq->d_val = ~frac_den[i];
+ }
+ set_rate_mnd(rcg, pixel_freq);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Unlike other clocks, the HDMI rate is adjusted through PLL
+ * re-programming. It is also routed through an HID divider.
+ */
+static int rcg_clk_set_rate_hdmi(struct clk *c, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ struct clk_freq_tbl *nf = rcg->freq_tbl;
+ int rc;
+
+ rc = clk_set_rate(nf->src_clk, rate);
+ if (rc < 0)
+ goto out;
+ set_rate_hid(rcg, nf);
+
+ rcg->current_freq = nf;
+out:
+ return rc;
+}
+
+static struct clk *rcg_hdmi_clk_get_parent(struct clk *c)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ struct clk_freq_tbl *freq = rcg->freq_tbl;
+ u32 cmd_rcgr_regval;
+
+ /* Is there a pending configuration? */
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
+ return NULL;
+
+ rcg->current_freq->freq_hz = clk_get_rate(c->parent);
+
+ return freq->src_clk;
+}
+
+static int rcg_clk_set_rate_edp(struct clk *c, unsigned long rate)
+{
+ struct clk_freq_tbl *nf;
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ int rc;
+
+ for (nf = rcg->freq_tbl; nf->freq_hz != rate; nf++)
+ if (nf->freq_hz == FREQ_END) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = clk_set_rate(nf->src_clk, rate);
+ if (rc < 0)
+ goto out;
+ set_rate_hid(rcg, nf);
+
+ rcg->current_freq = nf;
+ c->parent = nf->src_clk;
+out:
+ return rc;
+}
+
+static struct clk *edp_clk_get_parent(struct clk *c)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ struct clk *clk;
+ struct clk_freq_tbl *freq;
+ unsigned long rate;
+ u32 cmd_rcgr_regval;
+
+ /* Is there a pending configuration? */
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
+ return NULL;
+
+ /* Figure out what rate the rcg is running at */
+ for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+ clk = freq->src_clk;
+ if (clk && clk->ops->get_rate) {
+ rate = clk->ops->get_rate(clk);
+ if (rate == freq->freq_hz)
+ break;
+ }
+ }
+
+ /* No known frequency found */
+ if (freq->freq_hz == FREQ_END)
+ return NULL;
+
+ rcg->current_freq = freq;
+ return freq->src_clk;
+}
+
+static int gate_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ u32 regval;
+ struct gate_clk *g = to_gate_clk(c);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ regval = readl_relaxed(GATE_EN_REG(g));
+ regval |= g->en_mask;
+ writel_relaxed(regval, GATE_EN_REG(g));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+ if (g->delay_us)
+ udelay(g->delay_us);
+
+ return 0;
+}
+
+static void gate_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ u32 regval;
+ struct gate_clk *g = to_gate_clk(c);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ regval = readl_relaxed(GATE_EN_REG(g));
+ regval &= ~(g->en_mask);
+ writel_relaxed(regval, GATE_EN_REG(g));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+ if (g->delay_us)
+ udelay(g->delay_us);
+}
+
+static void __iomem *gate_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct gate_clk *g = to_gate_clk(c);
+ static struct clk_register_data data[] = {
+ {"EN_REG", 0x0},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return GATE_EN_REG(g);
+}
+
+static enum handoff gate_clk_handoff(struct clk *c)
+{
+ struct gate_clk *g = to_gate_clk(c);
+ u32 regval;
+
+ regval = readl_relaxed(GATE_EN_REG(g));
+ if (regval & g->en_mask)
+ return HANDOFF_ENABLED_CLK;
+
+ return HANDOFF_DISABLED_CLK;
+}
+
+static int reset_clk_rst(struct clk *c, enum clk_reset_action action)
+{
+ struct reset_clk *rst = to_reset_clk(c);
+
+ if (!rst->reset_reg)
+ return -EPERM;
+
+ return __branch_clk_reset(RST_REG(rst), action);
+}
+
+static DEFINE_SPINLOCK(mux_reg_lock);
+
+static int mux_reg_enable(struct mux_clk *clk)
+{
+ u32 regval;
+ unsigned long flags;
+ u32 offset = clk->en_reg ? clk->en_offset : clk->offset;
+
+ spin_lock_irqsave(&mux_reg_lock, flags);
+ regval = readl_relaxed(*clk->base + offset);
+ regval |= clk->en_mask;
+ writel_relaxed(regval, *clk->base + offset);
+ /* Ensure enable request goes through before returning */
+ mb();
+ spin_unlock_irqrestore(&mux_reg_lock, flags);
+
+ return 0;
+}
+
+static void mux_reg_disable(struct mux_clk *clk)
+{
+ u32 regval;
+ unsigned long flags;
+ u32 offset = clk->en_reg ? clk->en_offset : clk->offset;
+
+ spin_lock_irqsave(&mux_reg_lock, flags);
+ regval = readl_relaxed(*clk->base + offset);
+ regval &= ~clk->en_mask;
+ writel_relaxed(regval, *clk->base + offset);
+ spin_unlock_irqrestore(&mux_reg_lock, flags);
+}
+
+static int mux_reg_set_mux_sel(struct mux_clk *clk, int sel)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mux_reg_lock, flags);
+ regval = readl_relaxed(*clk->base + clk->offset);
+ regval &= ~(clk->mask << clk->shift);
+ regval |= (sel & clk->mask) << clk->shift;
+ writel_relaxed(regval, *clk->base + clk->offset);
+ /* Ensure switch request goes through before returning */
+ mb();
+ spin_unlock_irqrestore(&mux_reg_lock, flags);
+
+ return 0;
+}
+
+static int mux_reg_get_mux_sel(struct mux_clk *clk)
+{
+ u32 regval = readl_relaxed(*clk->base + clk->offset);
+ return !!((regval >> clk->shift) & clk->mask);
+}
+
+static bool mux_reg_is_enabled(struct mux_clk *clk)
+{
+ u32 regval = readl_relaxed(*clk->base + clk->offset);
+ return !!(regval & clk->en_mask);
+}
+
+/* =================Half-integer RCG without MN counter================= */
+#define RCGR_CMD_REG(x) ((x)->base + (x)->div_offset)
+#define RCGR_DIV_REG(x) ((x)->base + (x)->div_offset + 4)
+#define RCGR_SRC_REG(x) ((x)->base + (x)->div_offset + 4)
+
+static int rcg_mux_div_update_config(struct mux_div_clk *md)
+{
+ u32 regval, count;
+
+ regval = readl_relaxed(RCGR_CMD_REG(md));
+ regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
+ writel_relaxed(regval, RCGR_CMD_REG(md));
+
+ /* Wait for update to take effect */
+ for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+ if (!(readl_relaxed(RCGR_CMD_REG(md)) &
+ CMD_RCGR_CONFIG_UPDATE_BIT))
+ return 0;
+ udelay(1);
+ }
+
+ //CLK_WARN(&md->c, true, "didn't update its configuration.");
+
+ return -EBUSY;
+}
+
+static void rcg_get_src_div(struct mux_div_clk *md, u32 *src_sel, u32 *div)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ /* Is there a pending configuration? */
+ regval = readl_relaxed(RCGR_CMD_REG(md));
+ if (regval & CMD_RCGR_CONFIG_DIRTY_MASK) {
+ //CLK_WARN(&md->c, true, "it's a pending configuration.");
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+ return;
+ }
+
+ regval = readl_relaxed(RCGR_DIV_REG(md));
+ regval &= (md->div_mask << md->div_shift);
+ *div = regval >> md->div_shift;
+
+ /* bypass */
+ if (*div == 0)
+ *div = 1;
+ /* the div is doubled here*/
+ *div += 1;
+
+ regval = readl_relaxed(RCGR_SRC_REG(md));
+ regval &= (md->src_mask << md->src_shift);
+ *src_sel = regval >> md->src_shift;
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static int rcg_set_src_div(struct mux_div_clk *md, u32 src_sel, u32 div)
+{
+ u32 regval;
+ unsigned long flags;
+ int ret;
+
+ /* for half-integer divider, div here is doubled */
+ if (div)
+ div -= 1;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ regval = readl_relaxed(RCGR_DIV_REG(md));
+ regval &= ~(md->div_mask << md->div_shift);
+ regval |= div << md->div_shift;
+ writel_relaxed(regval, RCGR_DIV_REG(md));
+
+ regval = readl_relaxed(RCGR_SRC_REG(md));
+ regval &= ~(md->src_mask << md->src_shift);
+ regval |= src_sel << md->src_shift;
+ writel_relaxed(regval, RCGR_SRC_REG(md));
+
+ ret = rcg_mux_div_update_config(md);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+ return ret;
+}
+
+static int rcg_enable(struct mux_div_clk *md)
+{
+ return rcg_set_src_div(md, md->src_sel, md->data.div);
+}
+
+static void rcg_disable(struct mux_div_clk *md)
+{
+ u32 src_sel;
+
+ if (!md->safe_freq)
+ return;
+
+ src_sel = parent_to_src_sel(md->parents, md->num_parents,
+ md->safe_parent);
+
+ rcg_set_src_div(md, src_sel, md->safe_div);
+}
+
+static bool rcg_is_enabled(struct mux_div_clk *md)
+{
+ u32 regval;
+
+ regval = readl_relaxed(RCGR_CMD_REG(md));
+ if (regval & CMD_RCGR_ROOT_STATUS_BIT)
+ return false;
+ else
+ return true;
+}
+
+static void __iomem *rcg_list_registers(struct mux_div_clk *md, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ static struct clk_register_data data[] = {
+ {"CMD_RCGR", 0x0},
+ {"CFG_RCGR", 0x4},
+ };
+
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return RCGR_CMD_REG(md);
+}
+
+struct clk_ops clk_ops_empty;
+
+struct clk_ops clk_ops_rst = {
+ .reset = reset_clk_rst,
+};
+
+struct clk_ops clk_ops_rcg = {
+ .enable = rcg_clk_prepare,
+ .set_rate = rcg_clk_set_rate,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = rcg_clk_handoff,
+ .get_parent = rcg_clk_get_parent,
+ .list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_rcg_mnd = {
+ .enable = rcg_clk_prepare,
+ .set_rate = rcg_clk_set_rate,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = rcg_mnd_clk_handoff,
+ .get_parent = rcg_mnd_clk_get_parent,
+ .list_registers = rcg_mnd_clk_list_registers,
+};
+
+struct clk_ops clk_ops_pixel = {
+ .enable = rcg_clk_prepare,
+ .set_rate = set_rate_pixel,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = round_rate_pixel,
+ .handoff = pixel_rcg_handoff,
+ .list_registers = rcg_mnd_clk_list_registers,
+};
+
+struct clk_ops clk_ops_edppixel = {
+ .enable = rcg_clk_prepare,
+ .set_rate = set_rate_edp_pixel,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = pixel_rcg_handoff,
+ .list_registers = rcg_mnd_clk_list_registers,
+};
+
+struct clk_ops clk_ops_byte = {
+ .enable = rcg_clk_prepare,
+ .set_rate = set_rate_byte,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = byte_rcg_handoff,
+ .list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_rcg_hdmi = {
+ .enable = rcg_clk_prepare,
+ .set_rate = rcg_clk_set_rate_hdmi,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = rcg_clk_handoff,
+ .get_parent = rcg_hdmi_clk_get_parent,
+ .list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_rcg_edp = {
+ .enable = rcg_clk_prepare,
+ .set_rate = rcg_clk_set_rate_edp,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = rcg_clk_handoff,
+ .get_parent = edp_clk_get_parent,
+ .list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_branch = {
+ .enable = branch_clk_enable,
+ .disable = branch_clk_disable,
+ .set_rate = branch_clk_set_rate,
+ .get_rate = branch_clk_get_rate,
+ .list_rate = branch_clk_list_rate,
+ .round_rate = branch_clk_round_rate,
+ .reset = branch_clk_reset,
+ .set_flags = branch_clk_set_flags,
+ .handoff = branch_clk_handoff,
+ .list_registers = branch_clk_list_registers,
+};
+
+struct clk_ops clk_ops_vote = {
+ .enable = local_vote_clk_enable,
+ .disable = local_vote_clk_disable,
+ .reset = local_vote_clk_reset,
+ .handoff = local_vote_clk_handoff,
+ .list_registers = local_vote_clk_list_registers,
+};
+
+struct clk_ops clk_ops_gate = {
+ .enable = gate_clk_enable,
+ .disable = gate_clk_disable,
+ .get_rate = parent_get_rate,
+ .round_rate = parent_round_rate,
+ .handoff = gate_clk_handoff,
+ .list_registers = gate_clk_list_registers,
+};
+
+struct clk_mux_ops mux_reg_ops = {
+ .enable = mux_reg_enable,
+ .disable = mux_reg_disable,
+ .set_mux_sel = mux_reg_set_mux_sel,
+ .get_mux_sel = mux_reg_get_mux_sel,
+ .is_enabled = mux_reg_is_enabled,
+};
+
+struct mux_div_ops rcg_mux_div_ops = {
+ .enable = rcg_enable,
+ .disable = rcg_disable,
+ .set_src_div = rcg_set_src_div,
+ .get_src_div = rcg_get_src_div,
+ .is_enabled = rcg_is_enabled,
+ .list_registers = rcg_list_registers,
+};
diff --git a/drivers/clk/qcom/clock-pll.c b/drivers/clk/qcom/clock-pll.c
new file mode 100644
index 000000000000..7059b5fb6a79
--- /dev/null
+++ b/drivers/clk/qcom/clock-pll.c
@@ -0,0 +1,629 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <soc/qcom/clock-pll.h>
+
+#include "clock.h"
+
+#define PLL_OUTCTRL BIT(0)
+#define PLL_BYPASSNL BIT(1)
+#define PLL_RESET_N BIT(2)
+#define PLL_MODE_MASK BM(3, 0)
+
+#define PLL_EN_REG(x) (*(x)->base + (unsigned long) (x)->en_reg)
+#define PLL_STATUS_REG(x) (*(x)->base + (unsigned long) (x)->status_reg)
+#define PLL_MODE_REG(x) (*(x)->base + (unsigned long) (x)->mode_reg)
+#define PLL_L_REG(x) (*(x)->base + (unsigned long) (x)->l_reg)
+#define PLL_M_REG(x) (*(x)->base + (unsigned long) (x)->m_reg)
+#define PLL_N_REG(x) (*(x)->base + (unsigned long) (x)->n_reg)
+#define PLL_CONFIG_REG(x) (*(x)->base + (unsigned long) (x)->config_reg)
+#define PLL_CFG_ALT_REG(x) (*(x)->base + (unsigned long) \
+ (x)->config_alt_reg)
+#define PLL_CFG_CTL_REG(x) (*(x)->base + (unsigned long) \
+ (x)->config_ctl_reg)
+
+static DEFINE_SPINLOCK(pll_reg_lock);
+
+#define ENABLE_WAIT_MAX_LOOPS 200
+#define PLL_LOCKED_BIT BIT(16)
+
+static long fixed_pll_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ return c->rate;
+}
+
+static int pll_vote_clk_enable(struct clk *c)
+{
+ u32 ena, count;
+ unsigned long flags;
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ ena = readl_relaxed(PLL_EN_REG(pllv));
+ ena |= pllv->en_mask;
+ writel_relaxed(ena, PLL_EN_REG(pllv));
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+ /*
+ * Use a memory barrier since some PLL status registers are
+ * not within the same 1K segment as the voting registers.
+ */
+ mb();
+
+ /* Wait for pll to enable. */
+ for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+ if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
+ return 0;
+ udelay(1);
+ }
+
+ WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
+
+ return -ETIMEDOUT;
+}
+
+static void pll_vote_clk_disable(struct clk *c)
+{
+ u32 ena;
+ unsigned long flags;
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ ena = readl_relaxed(PLL_EN_REG(pllv));
+ ena &= ~(pllv->en_mask);
+ writel_relaxed(ena, PLL_EN_REG(pllv));
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static int pll_vote_clk_is_enabled(struct clk *c)
+{
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+ return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
+}
+
+static enum handoff pll_vote_clk_handoff(struct clk *c)
+{
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+ if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
+ return HANDOFF_ENABLED_CLK;
+
+ return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *pll_vote_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+ static struct clk_register_data data1[] = {
+ {"APPS_VOTE", 0x0},
+ };
+
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data1;
+ *size = ARRAY_SIZE(data1);
+ return PLL_EN_REG(pllv);
+}
+
+struct clk_ops clk_ops_pll_vote = {
+ .enable = pll_vote_clk_enable,
+ .disable = pll_vote_clk_disable,
+ .is_enabled = pll_vote_clk_is_enabled,
+ .round_rate = fixed_pll_clk_round_rate,
+ .handoff = pll_vote_clk_handoff,
+ .list_registers = pll_vote_clk_list_registers,
+};
+
+static void __pll_config_reg(void __iomem *pll_config, struct pll_freq_tbl *f,
+ struct pll_config_masks *masks)
+{
+ u32 regval;
+
+ regval = readl_relaxed(pll_config);
+
+ /* Enable the MN counter if used */
+ if (f->m_val)
+ regval |= masks->mn_en_mask;
+
+ /* Set pre-divider and post-divider values */
+ regval &= ~masks->pre_div_mask;
+ regval |= f->pre_div_val;
+ regval &= ~masks->post_div_mask;
+ regval |= f->post_div_val;
+
+ /* Select VCO setting */
+ regval &= ~masks->vco_mask;
+ regval |= f->vco_val;
+
+ /* Enable main output if it has not been enabled */
+ if (masks->main_output_mask && !(regval & masks->main_output_mask))
+ regval |= masks->main_output_mask;
+
+ writel_relaxed(regval, pll_config);
+}
+
+static int sr2_pll_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+ int ret = 0, count;
+ u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+
+ /* Disable PLL bypass mode. */
+ mode |= PLL_BYPASSNL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ mb();
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Wait for pll to lock. */
+ for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+ if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
+ break;
+ udelay(1);
+ }
+
+ if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT))
+ pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+ return ret;
+}
+
+static void __pll_clk_enable_reg(void __iomem *mode_reg)
+{
+ u32 mode = readl_relaxed(mode_reg);
+ /* Disable PLL bypass mode. */
+ mode |= PLL_BYPASSNL;
+ writel_relaxed(mode, mode_reg);
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ mb();
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, mode_reg);
+
+ /* Wait until PLL is locked. */
+ mb();
+ udelay(50);
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, mode_reg);
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+}
+
+static int local_pll_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ __pll_clk_enable_reg(PLL_MODE_REG(pll));
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+ return 0;
+}
+
+static void __pll_clk_disable_reg(void __iomem *mode_reg)
+{
+ u32 mode = readl_relaxed(mode_reg);
+ mode &= ~PLL_MODE_MASK;
+ writel_relaxed(mode, mode_reg);
+}
+
+static void local_pll_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+
+ /*
+ * Disable the PLL output, disable test mode, enable
+ * the bypass mode, and assert the reset.
+ */
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ __pll_clk_disable_reg(PLL_MODE_REG(pll));
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static enum handoff local_pll_clk_handoff(struct clk *c)
+{
+ struct pll_clk *pll = to_pll_clk(c);
+ u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+ u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
+ unsigned long parent_rate;
+ u32 lval, mval, nval, userval;
+
+ if ((mode & mask) != mask)
+ return HANDOFF_DISABLED_CLK;
+
+ /* Assume bootloaders configure PLL to c->rate */
+ if (c->rate)
+ return HANDOFF_ENABLED_CLK;
+
+ parent_rate = clk_get_rate(c->parent);
+ lval = readl_relaxed(PLL_L_REG(pll));
+ mval = readl_relaxed(PLL_M_REG(pll));
+ nval = readl_relaxed(PLL_N_REG(pll));
+ userval = readl_relaxed(PLL_CONFIG_REG(pll));
+
+ c->rate = parent_rate * lval;
+
+ if (pll->masks.mn_en_mask && userval) {
+ if (!nval)
+ nval = 1;
+ c->rate += (parent_rate * mval) / nval;
+ }
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static long local_pll_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ struct pll_freq_tbl *nf;
+ struct pll_clk *pll = to_pll_clk(c);
+
+ if (!pll->freq_tbl)
+ return -EINVAL;
+
+ for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END; nf++)
+ if (nf->freq_hz >= rate)
+ return nf->freq_hz;
+
+ nf--;
+ return nf->freq_hz;
+}
+
+static int local_pll_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ struct pll_freq_tbl *nf;
+ struct pll_clk *pll = to_pll_clk(c);
+ unsigned long flags;
+
+ for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END
+ && nf->freq_hz != rate; nf++)
+ ;
+
+ if (nf->freq_hz == PLL_FREQ_END)
+ return -EINVAL;
+
+ /*
+ * Ensure PLL is off before changing rate. For optimization reasons,
+ * assume no downstream clock is using actively using it.
+ */
+ spin_lock_irqsave(&c->lock, flags);
+ if (c->count)
+ c->ops->disable(c);
+
+ writel_relaxed(nf->l_val, PLL_L_REG(pll));
+ writel_relaxed(nf->m_val, PLL_M_REG(pll));
+ writel_relaxed(nf->n_val, PLL_N_REG(pll));
+
+ __pll_config_reg(PLL_CONFIG_REG(pll), nf, &pll->masks);
+
+ if (c->count)
+ c->ops->enable(c);
+
+ spin_unlock_irqrestore(&c->lock, flags);
+ return 0;
+}
+
+int sr_pll_clk_enable(struct clk *c)
+{
+ u32 mode;
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ mode = readl_relaxed(PLL_MODE_REG(pll));
+ /* De-assert active-low PLL reset. */
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ mb();
+ udelay(10);
+
+ /* Disable PLL bypass mode. */
+ mode |= PLL_BYPASSNL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Wait until PLL is locked. */
+ mb();
+ udelay(60);
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+ return 0;
+}
+
+int sr_hpm_lp_pll_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+ u32 count, mode;
+ int ret = 0;
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+
+ /* Disable PLL bypass mode and de-assert reset. */
+ mode = PLL_BYPASSNL | PLL_RESET_N;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Wait for pll to lock. */
+ for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+ if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
+ break;
+ udelay(1);
+ }
+
+ if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
+ WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Ensure the write above goes through before returning. */
+ mb();
+
+out:
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+ return ret;
+}
+
+static void __iomem *local_pll_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ /* Not compatible with 8960 & friends */
+ struct pll_clk *pll = to_pll_clk(c);
+ static struct clk_register_data data[] = {
+ {"MODE", 0x0},
+ {"L", 0x4},
+ {"M", 0x8},
+ {"N", 0xC},
+ {"USER", 0x10},
+ {"CONFIG", 0x14},
+ {"STATUS", 0x1C},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return PLL_MODE_REG(pll);
+}
+
+
+struct clk_ops clk_ops_local_pll = {
+ .enable = local_pll_clk_enable,
+ .disable = local_pll_clk_disable,
+ .set_rate = local_pll_clk_set_rate,
+ .handoff = local_pll_clk_handoff,
+ .list_registers = local_pll_clk_list_registers,
+};
+
+struct clk_ops clk_ops_sr2_pll = {
+ .enable = sr2_pll_clk_enable,
+ .disable = local_pll_clk_disable,
+ .set_rate = local_pll_clk_set_rate,
+ .round_rate = local_pll_clk_round_rate,
+ .handoff = local_pll_clk_handoff,
+ .list_registers = local_pll_clk_list_registers,
+};
+
+static DEFINE_SPINLOCK(soft_vote_lock);
+
+static int pll_acpu_vote_clk_enable(struct clk *c)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ spin_lock_irqsave(&soft_vote_lock, flags);
+
+ if (!*pllv->soft_vote)
+ ret = pll_vote_clk_enable(c);
+ if (ret == 0)
+ *pllv->soft_vote |= (pllv->soft_vote_mask);
+
+ spin_unlock_irqrestore(&soft_vote_lock, flags);
+ return ret;
+}
+
+static void pll_acpu_vote_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ spin_lock_irqsave(&soft_vote_lock, flags);
+
+ *pllv->soft_vote &= ~(pllv->soft_vote_mask);
+ if (!*pllv->soft_vote)
+ pll_vote_clk_disable(c);
+
+ spin_unlock_irqrestore(&soft_vote_lock, flags);
+}
+
+static enum handoff pll_acpu_vote_clk_handoff(struct clk *c)
+{
+ if (pll_vote_clk_handoff(c) == HANDOFF_DISABLED_CLK)
+ return HANDOFF_DISABLED_CLK;
+
+ if (pll_acpu_vote_clk_enable(c))
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_pll_acpu_vote = {
+ .enable = pll_acpu_vote_clk_enable,
+ .disable = pll_acpu_vote_clk_disable,
+ .round_rate = fixed_pll_clk_round_rate,
+ .is_enabled = pll_vote_clk_is_enabled,
+ .handoff = pll_acpu_vote_clk_handoff,
+ .list_registers = pll_vote_clk_list_registers,
+};
+
+static void __set_fsm_mode(void __iomem *mode_reg,
+ u32 bias_count, u32 lock_count)
+{
+ u32 regval = readl_relaxed(mode_reg);
+
+ /* De-assert reset to FSM */
+ regval &= ~BIT(21);
+ writel_relaxed(regval, mode_reg);
+
+ /* Program bias count */
+ regval &= ~BM(19, 14);
+ regval |= BVAL(19, 14, bias_count);
+ writel_relaxed(regval, mode_reg);
+
+ /* Program lock count */
+ regval &= ~BM(13, 8);
+ regval |= BVAL(13, 8, lock_count);
+ writel_relaxed(regval, mode_reg);
+
+ /* Enable PLL FSM voting */
+ regval |= BIT(20);
+ writel_relaxed(regval, mode_reg);
+}
+
+static void __configure_alt_config(struct pll_alt_config config,
+ struct pll_config_regs *regs)
+{
+ u32 regval;
+
+ regval = readl_relaxed(PLL_CFG_ALT_REG(regs));
+
+ if (config.mask) {
+ regval &= ~config.mask;
+ regval |= config.val;
+ }
+
+ writel_relaxed(regval, PLL_CFG_ALT_REG(regs));
+}
+
+void __configure_pll(struct pll_config *config,
+ struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+ u32 regval;
+
+ writel_relaxed(config->l, PLL_L_REG(regs));
+ writel_relaxed(config->m, PLL_M_REG(regs));
+ writel_relaxed(config->n, PLL_N_REG(regs));
+
+ regval = readl_relaxed(PLL_CONFIG_REG(regs));
+
+ /* Enable the MN accumulator */
+ if (config->mn_ena_mask) {
+ regval &= ~config->mn_ena_mask;
+ regval |= config->mn_ena_val;
+ }
+
+ /* Enable the main output */
+ if (config->main_output_mask) {
+ regval &= ~config->main_output_mask;
+ regval |= config->main_output_val;
+ }
+
+ /* Enable the aux output */
+ if (config->aux_output_mask) {
+ regval &= ~config->aux_output_mask;
+ regval |= config->aux_output_val;
+ }
+
+ /* Set pre-divider and post-divider values */
+ regval &= ~config->pre_div_mask;
+ regval |= config->pre_div_val;
+ regval &= ~config->post_div_mask;
+ regval |= config->post_div_val;
+
+ /* Select VCO setting */
+ regval &= ~config->vco_mask;
+ regval |= config->vco_val;
+
+ if (config->add_factor_mask) {
+ regval &= ~config->add_factor_mask;
+ regval |= config->add_factor_val;
+ }
+
+ writel_relaxed(regval, PLL_CONFIG_REG(regs));
+
+ if (regs->config_alt_reg)
+ __configure_alt_config(config->alt_cfg, regs);
+
+ if (regs->config_ctl_reg)
+ writel_relaxed(config->cfg_ctl_val, PLL_CFG_CTL_REG(regs));
+}
+
+void configure_sr_pll(struct pll_config *config,
+ struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+ __configure_pll(config, regs, ena_fsm_mode);
+ if (ena_fsm_mode)
+ __set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x8);
+}
+
+void configure_sr_hpm_lp_pll(struct pll_config *config,
+ struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+ __configure_pll(config, regs, ena_fsm_mode);
+ if (ena_fsm_mode)
+ __set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x0);
+}
+
diff --git a/drivers/clk/qcom/clock-rpm-8916.c b/drivers/clk/qcom/clock-rpm-8916.c
new file mode 100644
index 000000000000..ccc27db0e40e
--- /dev/null
+++ b/drivers/clk/qcom/clock-rpm-8916.c
@@ -0,0 +1,231 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-rpm.h>
+#include <soc/qcom/clock-voter.h>
+#include <soc/qcom/rpm-smd.h>
+
+#include <linux/clk/msm-clock-generic.h>
+
+#include <dt-bindings/clock/msm-clocks-8916.h>
+
+#include "clock.h"
+
+#define GCC_DEBUG_CLK_CTL 0x74000
+#define RPM_MISC_CLK_TYPE 0x306b6c63
+#define RPM_BUS_CLK_TYPE 0x316b6c63
+#define RPM_MEM_CLK_TYPE 0x326b6c63
+#define RPM_SMD_KEY_ENABLE 0x62616E45
+
+#define CXO_ID 0x0
+#define QDSS_ID 0x1
+#define BUS_SCALING 0x2
+
+#define PCNOC_ID 0x0
+#define SNOC_ID 0x1
+#define BIMC_ID 0x0
+
+/* XO clock */
+#define BB_CLK1_ID 1
+#define BB_CLK2_ID 2
+#define RF_CLK1_ID 4
+#define RF_CLK2_ID 5
+
+static void __iomem *virt_base;
+
+/* SMD clocks */
+DEFINE_CLK_RPM_SMD(pcnoc_clk, pcnoc_a_clk, RPM_BUS_CLK_TYPE, PCNOC_ID, NULL);
+DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_ID, NULL);
+DEFINE_CLK_RPM_SMD(bimc_clk, bimc_a_clk, RPM_MEM_CLK_TYPE, BIMC_ID, NULL);
+
+DEFINE_CLK_RPM_SMD_BRANCH(xo_clk_src, xo_a_clk_src,
+ RPM_MISC_CLK_TYPE, CXO_ID, 19200000);
+
+DEFINE_CLK_RPM_SMD_QDSS(qdss_clk, qdss_a_clk, RPM_MISC_CLK_TYPE, QDSS_ID);
+
+/* SMD_XO_BUFFER */
+DEFINE_CLK_RPM_SMD_XO_BUFFER(bb_clk1, bb_clk1_a, BB_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(bb_clk2, bb_clk2_a, BB_CLK2_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(rf_clk1, rf_clk1_a, RF_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(rf_clk2, rf_clk2_a, RF_CLK2_ID);
+
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(bb_clk1_pin, bb_clk1_a_pin, BB_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(bb_clk2_pin, bb_clk2_a_pin, BB_CLK2_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(rf_clk1_pin, rf_clk1_a_pin, RF_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(rf_clk2_pin, rf_clk2_a_pin, RF_CLK2_ID);
+
+/* Voter clocks */
+static DEFINE_CLK_VOTER(pcnoc_msmbus_clk, &pcnoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(snoc_msmbus_clk, &snoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_msmbus_clk, &bimc_clk.c, LONG_MAX);
+
+static DEFINE_CLK_VOTER(pcnoc_msmbus_a_clk, &pcnoc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(snoc_msmbus_a_clk, &snoc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_msmbus_a_clk, &bimc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_acpu_a_clk, &bimc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(pcnoc_keepalive_a_clk, &pcnoc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(pcnoc_sps_clk, &pcnoc_a_clk.c, LONG_MAX);
+
+static DEFINE_CLK_VOTER(pcnoc_usb_a_clk, &pcnoc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(snoc_usb_a_clk, &snoc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_usb_a_clk, &bimc_a_clk.c, LONG_MAX);
+
+/* Branch Voter clocks */
+static DEFINE_CLK_BRANCH_VOTER(xo_gcc, &xo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(xo_otg_clk, &xo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(xo_lpm_clk, &xo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(xo_pil_pronto_clk, &xo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(xo_pil_mss_clk, &xo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(xo_wlan_clk, &xo_clk_src.c);
+
+static struct mux_clk rpm_debug_mux = {
+ .ops = &mux_reg_ops,
+ .offset = GCC_DEBUG_CLK_CTL,
+ .en_mask = BIT(16),
+ .mask = 0x1FF,
+ .base = &virt_base,
+ MUX_SRC_LIST(
+ {&snoc_clk.c, 0x0000},
+ {&pcnoc_clk.c, 0x0008},
+ /* BIMC_CLK is 2x clock to the BIMC Core as well as DDR, while the
+ * axi clock is for the BIMC AXI interface. The AXI clock is 1/2 of
+ * the BIMC Clock. measure the gcc_bimc_apss_axi_clk.
+ */
+ {&bimc_clk.c, 0x0155},
+ ),
+ .c = {
+ .dbg_name = "rpm_debug_mux",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(rpm_debug_mux.c),
+ },
+};
+
+/* Lookup Table */
+static struct clk_lookup msm_clocks_rpm[] = {
+ CLK_LIST(xo_clk_src),
+ CLK_LIST(xo_a_clk_src),
+ CLK_LIST(xo_otg_clk),
+ CLK_LIST(xo_lpm_clk),
+ CLK_LIST(xo_pil_mss_clk),
+ CLK_LIST(xo_pil_pronto_clk),
+ CLK_LIST(xo_wlan_clk),
+
+ CLK_LIST(snoc_msmbus_clk),
+ CLK_LIST(snoc_msmbus_a_clk),
+ CLK_LIST(pcnoc_msmbus_clk),
+ CLK_LIST(pcnoc_msmbus_a_clk),
+ CLK_LIST(bimc_msmbus_clk),
+ CLK_LIST(bimc_msmbus_a_clk),
+ CLK_LIST(bimc_acpu_a_clk),
+ CLK_LIST(pcnoc_keepalive_a_clk),
+
+ CLK_LIST(pcnoc_usb_a_clk),
+ CLK_LIST(snoc_usb_a_clk),
+ CLK_LIST(bimc_usb_a_clk),
+
+ /* CoreSight clocks */
+ CLK_LIST(qdss_clk),
+ CLK_LIST(qdss_a_clk),
+
+ CLK_LIST(snoc_clk),
+ CLK_LIST(pcnoc_clk),
+ CLK_LIST(bimc_clk),
+ CLK_LIST(snoc_a_clk),
+ CLK_LIST(pcnoc_a_clk),
+ CLK_LIST(bimc_a_clk),
+
+ CLK_LIST(bb_clk1),
+ CLK_LIST(bb_clk2),
+ CLK_LIST(rf_clk1),
+ CLK_LIST(rf_clk2),
+
+ CLK_LIST(bb_clk1_pin),
+ CLK_LIST(bb_clk2_pin),
+ CLK_LIST(rf_clk1_pin),
+ CLK_LIST(rf_clk2_pin),
+
+ /* RPM debug Mux*/
+ CLK_LIST(rpm_debug_mux),
+};
+
+static int __init msm_rpmcc_8916_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ ret = enable_rpm_scaling();
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get register base\n");
+ return -ENOMEM;
+ }
+
+ virt_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!virt_base) {
+ dev_err(&pdev->dev, "Failed to map CC registers\n");
+ return -ENOMEM;
+ }
+
+ ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_rpm,
+ ARRAY_SIZE(msm_clocks_rpm));
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register RPM clocks\n");
+ return ret;
+ }
+
+ /*
+ * Hold an active set vote for PCNOC AHB source. Sleep set vote is 0.
+ */
+ clk_set_rate(&pcnoc_keepalive_a_clk.c, 19200000);
+ clk_prepare_enable(&pcnoc_keepalive_a_clk.c);
+
+ clk_prepare_enable(&xo_a_clk_src.c);
+
+ dev_info(&pdev->dev, "Registered RPM clocks.\n");
+
+ return 0;
+}
+
+static struct of_device_id msm_clk_rpm_match_table[] = {
+ { .compatible = "qcom,rpmcc-8916" },
+ {}
+};
+
+static struct platform_driver msm_clock_rpm_ops = {
+ .probe = msm_rpmcc_8916_probe,
+ .driver = {
+ .name = "qcom,rpmcc-8916",
+ .of_match_table = msm_clk_rpm_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init msm_rpmcc_8916_init(void)
+{
+ return platform_driver_register(&msm_clock_rpm_ops);
+}
+arch_initcall(msm_rpmcc_8916_init);
diff --git a/drivers/clk/qcom/clock-rpm.c b/drivers/clk/qcom/clock-rpm.c
new file mode 100644
index 000000000000..abcab94177dd
--- /dev/null
+++ b/drivers/clk/qcom/clock-rpm.c
@@ -0,0 +1,318 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/clock-rpm.h>
+
+#define __clk_rpmrs_set_rate(r, value, ctx) \
+ ((r)->rpmrs_data->set_rate_fn((r), (value), (ctx)))
+
+#define clk_rpmrs_set_rate_sleep(r, value) \
+ __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_sleep_id)
+
+#define clk_rpmrs_set_rate_active(r, value) \
+ __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_active_id)
+
+static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
+ uint32_t context)
+{
+ struct msm_rpm_kvp kvp = {
+ .key = r->rpm_key,
+ .data = (void *)&value,
+ .length = sizeof(value),
+ };
+
+ return msm_rpm_send_message(context, r->rpm_res_type, r->rpm_clk_id,
+ &kvp, 1);
+}
+
+static int clk_rpmrs_handoff_smd(struct rpm_clk *r)
+{
+ if (!r->branch)
+ r->c.rate = INT_MAX;
+
+ return 0;
+}
+
+static int clk_rpmrs_is_enabled_smd(struct rpm_clk *r)
+{
+ return !!r->c.prepare_count;
+}
+
+struct clk_rpmrs_data {
+ int (*set_rate_fn)(struct rpm_clk *r, uint32_t value, uint32_t context);
+ int (*get_rate_fn)(struct rpm_clk *r);
+ int (*handoff_fn)(struct rpm_clk *r);
+ int (*is_enabled)(struct rpm_clk *r);
+ int ctx_active_id;
+ int ctx_sleep_id;
+};
+
+struct clk_rpmrs_data clk_rpmrs_data_smd = {
+ .set_rate_fn = clk_rpmrs_set_rate_smd,
+ .handoff_fn = clk_rpmrs_handoff_smd,
+ .is_enabled = clk_rpmrs_is_enabled_smd,
+ .ctx_active_id = MSM_RPM_CTX_ACTIVE_SET,
+ .ctx_sleep_id = MSM_RPM_CTX_SLEEP_SET,
+};
+
+static DEFINE_MUTEX(rpm_clock_lock);
+
+static void to_active_sleep_khz(struct rpm_clk *r, unsigned long rate,
+ unsigned long *active_khz, unsigned long *sleep_khz)
+{
+ /* Convert the rate (hz) to khz */
+ *active_khz = DIV_ROUND_UP(rate, 1000);
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep_khz = 0;
+ else
+ *sleep_khz = *active_khz;
+}
+
+static int rpm_clk_prepare(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ uint32_t value;
+ int rc = 0;
+ unsigned long this_khz, this_sleep_khz;
+ unsigned long peer_khz = 0, peer_sleep_khz = 0;
+ struct rpm_clk *peer = r->peer;
+
+ mutex_lock(&rpm_clock_lock);
+
+ to_active_sleep_khz(r, r->c.rate, &this_khz, &this_sleep_khz);
+
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (this_khz == 0)
+ goto out;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
+
+ value = max(this_khz, peer_khz);
+ if (r->branch)
+ value = !!value;
+
+ rc = clk_rpmrs_set_rate_active(r, value);
+ if (rc)
+ goto out;
+
+ value = max(this_sleep_khz, peer_sleep_khz);
+ if (r->branch)
+ value = !!value;
+
+ rc = clk_rpmrs_set_rate_sleep(r, value);
+ if (rc) {
+ /* Undo the active set vote and restore it to peer_khz */
+ value = peer_khz;
+ rc = clk_rpmrs_set_rate_active(r, value);
+ }
+
+out:
+ if (!rc)
+ r->enabled = true;
+
+ mutex_unlock(&rpm_clock_lock);
+
+ return rc;
+}
+
+static void rpm_clk_unprepare(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+
+ mutex_lock(&rpm_clock_lock);
+
+ if (r->c.rate) {
+ uint32_t value;
+ struct rpm_clk *peer = r->peer;
+ unsigned long peer_khz = 0, peer_sleep_khz = 0;
+ int rc;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
+
+ value = r->branch ? !!peer_khz : peer_khz;
+ rc = clk_rpmrs_set_rate_active(r, value);
+ if (rc)
+ goto out;
+
+ value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
+ rc = clk_rpmrs_set_rate_sleep(r, value);
+ }
+ r->enabled = false;
+out:
+ mutex_unlock(&rpm_clock_lock);
+
+ return;
+}
+
+static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ unsigned long this_khz, this_sleep_khz;
+ int rc = 0;
+
+ mutex_lock(&rpm_clock_lock);
+
+ if (r->enabled) {
+ uint32_t value;
+ struct rpm_clk *peer = r->peer;
+ unsigned long peer_khz = 0, peer_sleep_khz = 0;
+
+ to_active_sleep_khz(r, rate, &this_khz, &this_sleep_khz);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
+
+ value = max(this_khz, peer_khz);
+ rc = clk_rpmrs_set_rate_active(r, value);
+ if (rc)
+ goto out;
+
+ value = max(this_sleep_khz, peer_sleep_khz);
+ rc = clk_rpmrs_set_rate_sleep(r, value);
+ }
+
+out:
+ mutex_unlock(&rpm_clock_lock);
+
+ return rc;
+}
+
+static int rpm_branch_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ if (rate == clk->rate)
+ return 0;
+
+ return -EPERM;
+}
+
+static unsigned long rpm_clk_get_rate(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ if (r->rpmrs_data->get_rate_fn)
+ return r->rpmrs_data->get_rate_fn(r);
+ else
+ return clk->rate;
+}
+
+static int rpm_clk_is_enabled(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ return r->rpmrs_data->is_enabled(r);
+}
+
+static long rpm_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ /* Not supported. */
+ return rate;
+}
+
+static bool rpm_clk_is_local(struct clk *clk)
+{
+ return false;
+}
+
+static enum handoff rpm_clk_handoff(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ int rc;
+
+ /*
+ * Querying an RPM clock's status will return 0 unless the clock's
+ * rate has previously been set through the RPM. When handing off,
+ * assume these clocks are enabled (unless the RPM call fails) so
+ * child clocks of these RPM clocks can still be handed off.
+ */
+ rc = r->rpmrs_data->handoff_fn(r);
+ if (rc < 0)
+ return HANDOFF_DISABLED_CLK;
+
+ /*
+ * Since RPM handoff code may update the software rate of the clock by
+ * querying the RPM, we need to make sure our request to RPM now
+ * matches the software rate of the clock. When we send the request
+ * to RPM, we also need to update any other state info we would
+ * normally update. So, call the appropriate clock function instead
+ * of directly using the RPM driver APIs.
+ */
+ rc = rpm_clk_prepare(clk);
+ if (rc < 0)
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+#define RPM_MISC_CLK_TYPE 0x306b6c63
+#define RPM_SCALING_ENABLE_ID 0x2
+
+int enable_rpm_scaling(void)
+{
+ int rc, value = 0x1;
+ struct msm_rpm_kvp kvp = {
+ .key = RPM_SMD_KEY_ENABLE,
+ .data = (void *)&value,
+ .length = sizeof(value),
+ };
+
+ rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_SLEEP_SET,
+ RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1);
+ if (rc < 0) {
+ if (rc != -EPROBE_DEFER)
+ WARN(1, "RPM clock scaling (sleep set) did not enable!\n");
+ return rc;
+ }
+
+ rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_ACTIVE_SET,
+ RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1);
+ if (rc < 0) {
+ if (rc != -EPROBE_DEFER)
+ WARN(1, "RPM clock scaling (active set) did not enable!\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+struct clk_ops clk_ops_rpm = {
+ .prepare = rpm_clk_prepare,
+ .unprepare = rpm_clk_unprepare,
+ .set_rate = rpm_clk_set_rate,
+ .get_rate = rpm_clk_get_rate,
+ .is_enabled = rpm_clk_is_enabled,
+ .round_rate = rpm_clk_round_rate,
+ .is_local = rpm_clk_is_local,
+ .handoff = rpm_clk_handoff,
+};
+
+struct clk_ops clk_ops_rpm_branch = {
+ .prepare = rpm_clk_prepare,
+ .unprepare = rpm_clk_unprepare,
+ .set_rate = rpm_branch_clk_set_rate,
+ .is_local = rpm_clk_is_local,
+ .handoff = rpm_clk_handoff,
+};
diff --git a/drivers/clk/qcom/clock-voter.c b/drivers/clk/qcom/clock-voter.c
new file mode 100644
index 000000000000..c60660912921
--- /dev/null
+++ b/drivers/clk/qcom/clock-voter.c
@@ -0,0 +1,175 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/clock-voter.h>
+
+static DEFINE_MUTEX(voter_clk_lock);
+
+/* Aggregate the rate of clocks that are currently on. */
+static unsigned long voter_clk_aggregate_rate(const struct clk *parent)
+{
+ struct clk *clk;
+ unsigned long rate = 0;
+
+ list_for_each_entry(clk, &parent->children, siblings) {
+ struct clk_voter *v = to_clk_voter(clk);
+ if (v->enabled)
+ rate = max(clk->rate, rate);
+ }
+ return rate;
+}
+
+static int voter_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ int ret = 0;
+ struct clk *clkp;
+ struct clk_voter *clkh, *v = to_clk_voter(clk);
+ unsigned long cur_rate, new_rate, other_rate = 0;
+
+ if (v->is_branch)
+ return 0;
+
+ mutex_lock(&voter_clk_lock);
+
+ if (v->enabled) {
+ struct clk *parent = clk->parent;
+
+ /*
+ * Get the aggregate rate without this clock's vote and update
+ * if the new rate is different than the current rate
+ */
+ list_for_each_entry(clkp, &parent->children, siblings) {
+ clkh = to_clk_voter(clkp);
+ if (clkh->enabled && clkh != v)
+ other_rate = max(clkp->rate, other_rate);
+ }
+
+ cur_rate = max(other_rate, clk->rate);
+ new_rate = max(other_rate, rate);
+
+ if (new_rate != cur_rate) {
+ ret = clk_set_rate(parent, new_rate);
+ if (ret)
+ goto unlock;
+ }
+ }
+ clk->rate = rate;
+unlock:
+ mutex_unlock(&voter_clk_lock);
+
+ return ret;
+}
+
+static int voter_clk_prepare(struct clk *clk)
+{
+ int ret = 0;
+ unsigned long cur_rate;
+ struct clk *parent;
+ struct clk_voter *v = to_clk_voter(clk);
+
+ mutex_lock(&voter_clk_lock);
+ parent = clk->parent;
+
+ if (v->is_branch) {
+ v->enabled = true;
+ goto out;
+ }
+
+ /*
+ * Increase the rate if this clock is voting for a higher rate
+ * than the current rate.
+ */
+ cur_rate = voter_clk_aggregate_rate(parent);
+ if (clk->rate > cur_rate) {
+ ret = clk_set_rate(parent, clk->rate);
+ if (ret)
+ goto out;
+ }
+ v->enabled = true;
+out:
+ mutex_unlock(&voter_clk_lock);
+
+ return ret;
+}
+
+static void voter_clk_unprepare(struct clk *clk)
+{
+ unsigned long cur_rate, new_rate;
+ struct clk *parent;
+ struct clk_voter *v = to_clk_voter(clk);
+
+
+ mutex_lock(&voter_clk_lock);
+ parent = clk->parent;
+
+ /*
+ * Decrease the rate if this clock was the only one voting for
+ * the highest rate.
+ */
+ v->enabled = false;
+ if (v->is_branch)
+ goto out;
+
+ new_rate = voter_clk_aggregate_rate(parent);
+ cur_rate = max(new_rate, clk->rate);
+
+ if (new_rate < cur_rate)
+ clk_set_rate(parent, new_rate);
+
+out:
+ mutex_unlock(&voter_clk_lock);
+}
+
+static int voter_clk_is_enabled(struct clk *clk)
+{
+ struct clk_voter *v = to_clk_voter(clk);
+ return v->enabled;
+}
+
+static long voter_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ return clk_round_rate(clk->parent, rate);
+}
+
+static bool voter_clk_is_local(struct clk *clk)
+{
+ return true;
+}
+
+static enum handoff voter_clk_handoff(struct clk *clk)
+{
+ if (!clk->rate)
+ return HANDOFF_DISABLED_CLK;
+
+ /*
+ * Send the default rate to the parent if necessary and update the
+ * software state of the voter clock.
+ */
+ if (voter_clk_prepare(clk) < 0)
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_voter = {
+ .prepare = voter_clk_prepare,
+ .unprepare = voter_clk_unprepare,
+ .set_rate = voter_clk_set_rate,
+ .is_enabled = voter_clk_is_enabled,
+ .round_rate = voter_clk_round_rate,
+ .is_local = voter_clk_is_local,
+ .handoff = voter_clk_handoff,
+};
diff --git a/drivers/clk/qcom/clock.c b/drivers/clk/qcom/clock.c
new file mode 100644
index 000000000000..7c987d460584
--- /dev/null
+++ b/drivers/clk/qcom/clock.c
@@ -0,0 +1,906 @@
+/* arch/arm/mach-msm/clock.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <trace/events/power.h>
+#include "clock.h"
+
+struct handoff_clk {
+ struct list_head list;
+ struct clk *clk;
+};
+static LIST_HEAD(handoff_list);
+
+struct handoff_vdd {
+ struct list_head list;
+ struct clk_vdd_class *vdd_class;
+};
+static LIST_HEAD(handoff_vdd_list);
+
+static DEFINE_MUTEX(msm_clock_init_lock);
+
+/* Find the voltage level required for a given rate. */
+int find_vdd_level(struct clk *clk, unsigned long rate)
+{
+ int level;
+
+ for (level = 0; level < clk->num_fmax; level++)
+ if (rate <= clk->fmax[level])
+ break;
+
+ if (level == clk->num_fmax) {
+ pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
+ clk->dbg_name);
+ return -EINVAL;
+ }
+
+ return level;
+}
+
+/* Update voltage level given the current votes. */
+static int update_vdd(struct clk_vdd_class *vdd_class)
+{
+ int level, rc = 0, i, ignore;
+ struct regulator **r = vdd_class->regulator;
+ int *uv = vdd_class->vdd_uv;
+ int *ua = vdd_class->vdd_ua;
+ int n_reg = vdd_class->num_regulators;
+ int cur_lvl = vdd_class->cur_level;
+ int max_lvl = vdd_class->num_levels - 1;
+ int cur_base = cur_lvl * n_reg;
+ int new_base;
+
+ /* aggregate votes */
+ for (level = max_lvl; level > 0; level--)
+ if (vdd_class->level_votes[level])
+ break;
+
+ if (level == cur_lvl)
+ return 0;
+
+ max_lvl = max_lvl * n_reg;
+ new_base = level * n_reg;
+ for (i = 0; i < vdd_class->num_regulators; i++) {
+ rc = regulator_set_voltage(r[i], uv[new_base + i],
+ uv[max_lvl + i]);
+ if (rc)
+ goto set_voltage_fail;
+
+ if (ua) {
+ rc = regulator_set_optimum_mode(r[i], ua[new_base + i]);
+ rc = rc > 0 ? 0 : rc;
+ if (rc)
+ goto set_mode_fail;
+ }
+ if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
+ rc = regulator_enable(r[i]);
+ else if (level == 0)
+ rc = regulator_disable(r[i]);
+ if (rc)
+ goto enable_disable_fail;
+ }
+ if (vdd_class->set_vdd && !vdd_class->num_regulators)
+ rc = vdd_class->set_vdd(vdd_class, level);
+
+ if (!rc)
+ vdd_class->cur_level = level;
+
+ return rc;
+
+enable_disable_fail:
+ /*
+ * set_optimum_mode could use voltage to derive mode. Restore
+ * previous voltage setting for r[i] first.
+ */
+ if (ua) {
+ regulator_set_voltage(r[i], uv[cur_base + i], uv[max_lvl + i]);
+ regulator_set_optimum_mode(r[i], ua[cur_base + i]);
+ }
+
+set_mode_fail:
+ regulator_set_voltage(r[i], uv[cur_base + i], uv[max_lvl + i]);
+
+set_voltage_fail:
+ for (i--; i >= 0; i--) {
+ regulator_set_voltage(r[i], uv[cur_base + i], uv[max_lvl + i]);
+ if (ua)
+ regulator_set_optimum_mode(r[i], ua[cur_base + i]);
+ if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
+ regulator_disable(r[i]);
+ else if (level == 0)
+ ignore = regulator_enable(r[i]);
+ }
+ return rc;
+}
+
+/* Vote for a voltage level. */
+int vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
+{
+ int rc;
+
+ if (level >= vdd_class->num_levels)
+ return -EINVAL;
+
+ mutex_lock(&vdd_class->lock);
+ vdd_class->level_votes[level]++;
+ rc = update_vdd(vdd_class);
+ if (rc)
+ vdd_class->level_votes[level]--;
+ mutex_unlock(&vdd_class->lock);
+
+ return rc;
+}
+
+/* Remove vote for a voltage level. */
+int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
+{
+ int rc = 0;
+
+ if (level >= vdd_class->num_levels)
+ return -EINVAL;
+
+ mutex_lock(&vdd_class->lock);
+ if (WARN(!vdd_class->level_votes[level],
+ "Reference counts are incorrect for %s level %d\n",
+ vdd_class->class_name, level))
+ goto out;
+ vdd_class->level_votes[level]--;
+ rc = update_vdd(vdd_class);
+ if (rc)
+ vdd_class->level_votes[level]++;
+out:
+ mutex_unlock(&vdd_class->lock);
+ return rc;
+}
+
+/* Vote for a voltage level corresponding to a clock's rate. */
+static int vote_rate_vdd(struct clk *clk, unsigned long rate)
+{
+ int level;
+
+ if (!clk->vdd_class)
+ return 0;
+
+ level = find_vdd_level(clk, rate);
+ if (level < 0)
+ return level;
+
+ return vote_vdd_level(clk->vdd_class, level);
+}
+
+/* Remove vote for a voltage level corresponding to a clock's rate. */
+static void unvote_rate_vdd(struct clk *clk, unsigned long rate)
+{
+ int level;
+
+ if (!clk->vdd_class)
+ return;
+
+ level = find_vdd_level(clk, rate);
+ if (level < 0)
+ return;
+
+ unvote_vdd_level(clk->vdd_class, level);
+}
+/* Check if the rate is within the voltage limits of the clock. */
+static bool is_rate_valid(struct clk *clk, unsigned long rate)
+{
+ int level;
+
+ if (!clk->vdd_class)
+ return true;
+
+ level = find_vdd_level(clk, rate);
+ return level >= 0;
+}
+
+/**
+ * __clk_pre_reparent() - Set up the new parent before switching to it and
+ * prevent the enable state of the child clock from changing.
+ * @c: The child clock that's going to switch parents
+ * @new: The new parent that the child clock is going to switch to
+ * @flags: Pointer to scratch space to save spinlock flags
+ *
+ * Cannot be called from atomic context.
+ *
+ * Use this API to set up the @new parent clock to be able to support the
+ * current prepare and enable state of the child clock @c. Once the parent is
+ * set up, the child clock can safely switch to it.
+ *
+ * The caller shall grab the prepare_lock of clock @c before calling this API
+ * and only release it after calling __clk_post_reparent() for clock @c (or
+ * if this API fails). This is necessary to prevent the prepare state of the
+ * child clock @c from changing while the reparenting is in progress. Since
+ * this API takes care of grabbing the enable lock of @c, only atomic
+ * operation are allowed between calls to __clk_pre_reparent and
+ * __clk_post_reparent()
+ *
+ * The scratch space pointed to by @flags should not be altered before
+ * calling __clk_post_reparent() for clock @c.
+ *
+ * See also: __clk_post_reparent()
+ */
+int __clk_pre_reparent(struct clk *c, struct clk *new, unsigned long *flags)
+{
+ int rc;
+
+ if (c->prepare_count) {
+ rc = clk_prepare(new);
+ if (rc)
+ return rc;
+ }
+
+ spin_lock_irqsave(&c->lock, *flags);
+ if (c->count) {
+ rc = clk_enable(new);
+ if (rc) {
+ spin_unlock_irqrestore(&c->lock, *flags);
+ clk_unprepare(new);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+/**
+ * __clk_post_reparent() - Release requirements on old parent after switching
+ * away from it and allow changes to the child clock's enable state.
+ * @c: The child clock that switched parents
+ * @old: The old parent that the child clock switched away from or the new
+ * parent of a failed reparent attempt.
+ * @flags: Pointer to scratch space where spinlock flags were saved
+ *
+ * Cannot be called from atomic context.
+ *
+ * This API works in tandem with __clk_pre_reparent. Use this API to
+ * - Remove prepare and enable requirements from the @old parent after
+ * switching away from it
+ * - Or, undo the effects of __clk_pre_reparent() after a failed attempt to
+ * change parents
+ *
+ * The caller shall release the prepare_lock of @c that was grabbed before
+ * calling __clk_pre_reparent() only after this API is called (or if
+ * __clk_pre_reparent() fails). This is necessary to prevent the prepare
+ * state of the child clock @c from changing while the reparenting is in
+ * progress. Since this API releases the enable lock of @c, the limit to
+ * atomic operations set by __clk_pre_reparent() is no longer present.
+ *
+ * The scratch space pointed to by @flags shall not be altered since the call
+ * to __clk_pre_reparent() for clock @c.
+ *
+ * See also: __clk_pre_reparent()
+ */
+void __clk_post_reparent(struct clk *c, struct clk *old, unsigned long *flags)
+{
+ if (c->count)
+ clk_disable(old);
+ spin_unlock_irqrestore(&c->lock, *flags);
+
+ if (c->prepare_count)
+ clk_unprepare(old);
+}
+int clk_prepare(struct clk *clk)
+{
+ int ret = 0;
+ struct clk *parent;
+
+ if (!clk)
+ return 0;
+ if (IS_ERR(clk))
+ return -EINVAL;
+
+ mutex_lock(&clk->prepare_lock);
+ if (clk->prepare_count == 0) {
+ parent = clk->parent;
+
+ ret = clk_prepare(parent);
+ if (ret)
+ goto out;
+ ret = clk_prepare(clk->depends);
+ if (ret)
+ goto err_prepare_depends;
+
+ ret = vote_rate_vdd(clk, clk->rate);
+ if (ret)
+ goto err_vote_vdd;
+ if (clk->ops->prepare)
+ ret = clk->ops->prepare(clk);
+ if (ret)
+ goto err_prepare_clock;
+ }
+ clk->prepare_count++;
+out:
+ mutex_unlock(&clk->prepare_lock);
+ return ret;
+err_prepare_clock:
+ unvote_rate_vdd(clk, clk->rate);
+err_vote_vdd:
+ clk_unprepare(clk->depends);
+err_prepare_depends:
+ clk_unprepare(parent);
+ goto out;
+}
+EXPORT_SYMBOL(clk_prepare);
+/*
+ * Standard clock functions defined in include/linux/clk.h
+ */
+int clk_enable(struct clk *clk)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct clk *parent;
+ const char *name;
+
+ if (!clk)
+ return 0;
+ if (IS_ERR(clk))
+ return -EINVAL;
+ name = clk->dbg_name;
+
+ spin_lock_irqsave(&clk->lock, flags);
+ WARN(!clk->prepare_count,
+ "%s: Don't call enable on unprepared clocks\n", name);
+ if (clk->count == 0) {
+ parent = clk->parent;
+
+ ret = clk_enable(parent);
+ if (ret)
+ goto err_enable_parent;
+ ret = clk_enable(clk->depends);
+ if (ret)
+ goto err_enable_depends;
+
+ trace_clock_enable(name, 1, smp_processor_id());
+ if (clk->ops->enable)
+ ret = clk->ops->enable(clk);
+ if (ret)
+ goto err_enable_clock;
+ }
+ clk->count++;
+ spin_unlock_irqrestore(&clk->lock, flags);
+
+ return 0;
+
+err_enable_clock:
+ clk_disable(clk->depends);
+err_enable_depends:
+ clk_disable(parent);
+err_enable_parent:
+ spin_unlock_irqrestore(&clk->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(clk_enable);
+void clk_disable(struct clk *clk)
+{
+ const char *name;
+ unsigned long flags;
+
+ if (IS_ERR_OR_NULL(clk))
+ return;
+ name = clk->dbg_name;
+
+ spin_lock_irqsave(&clk->lock, flags);
+ WARN(!clk->prepare_count,
+ "%s: Never called prepare or calling disable after unprepare\n",
+ name);
+ if (WARN(clk->count == 0, "%s is unbalanced", name))
+ goto out;
+ if (clk->count == 1) {
+ struct clk *parent = clk->parent;
+
+ trace_clock_disable(name, 0, smp_processor_id());
+ if (clk->ops->disable)
+ clk->ops->disable(clk);
+ clk_disable(clk->depends);
+ clk_disable(parent);
+ }
+ clk->count--;
+out:
+ spin_unlock_irqrestore(&clk->lock, flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+void clk_unprepare(struct clk *clk)
+{
+ const char *name;
+
+ if (IS_ERR_OR_NULL(clk))
+ return;
+ name = clk->dbg_name;
+
+ mutex_lock(&clk->prepare_lock);
+ if (WARN(!clk->prepare_count, "%s is unbalanced (prepare)", name))
+ goto out;
+ if (clk->prepare_count == 1) {
+ struct clk *parent = clk->parent;
+
+ WARN(clk->count,
+ "%s: Don't call unprepare when the clock is enabled\n",
+ name);
+
+ if (clk->ops->unprepare)
+ clk->ops->unprepare(clk);
+ unvote_rate_vdd(clk, clk->rate);
+ clk_unprepare(clk->depends);
+ clk_unprepare(parent);
+ }
+ clk->prepare_count--;
+out:
+ mutex_unlock(&clk->prepare_lock);
+}
+EXPORT_SYMBOL(clk_unprepare);
+
+int clk_reset(struct clk *clk, enum clk_reset_action action)
+{
+ if (IS_ERR_OR_NULL(clk))
+ return -EINVAL;
+
+ if (!clk->ops->reset)
+ return -ENOSYS;
+
+ return clk->ops->reset(clk, action);
+}
+EXPORT_SYMBOL(clk_reset);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ if (IS_ERR_OR_NULL(clk))
+ return 0;
+
+ if (!clk->ops->get_rate)
+ return clk->rate;
+
+ return clk->ops->get_rate(clk);
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ unsigned long start_rate;
+ int rc = 0;
+ const char *name;
+
+ if (IS_ERR_OR_NULL(clk))
+ return -EINVAL;
+ name = clk->dbg_name;
+
+ if (!is_rate_valid(clk, rate))
+ return -EINVAL;
+
+ mutex_lock(&clk->prepare_lock);
+
+ /* Return early if the rate isn't going to change */
+ if (clk->rate == rate && !(clk->flags & CLKFLAG_NO_RATE_CACHE))
+ goto out;
+
+ if (!clk->ops->set_rate) {
+ rc = -ENOSYS;
+ goto out;
+ }
+
+ trace_clock_set_rate(name, rate, raw_smp_processor_id());
+
+ start_rate = clk->rate;
+
+ if (clk->ops->pre_set_rate)
+ rc = clk->ops->pre_set_rate(clk, rate);
+ if (rc)
+ goto out;
+
+ /* Enforce vdd requirements for target frequency. */
+ if (clk->prepare_count) {
+ rc = vote_rate_vdd(clk, rate);
+ if (rc)
+ goto err_vote_vdd;
+ }
+
+ rc = clk->ops->set_rate(clk, rate);
+ if (rc)
+ goto err_set_rate;
+ clk->rate = rate;
+
+ /* Release vdd requirements for starting frequency. */
+ if (clk->prepare_count)
+ unvote_rate_vdd(clk, start_rate);
+
+ if (clk->ops->post_set_rate)
+ clk->ops->post_set_rate(clk, start_rate);
+
+out:
+ mutex_unlock(&clk->prepare_lock);
+ return rc;
+
+err_set_rate:
+ if (clk->prepare_count)
+ unvote_rate_vdd(clk, rate);
+err_vote_vdd:
+ /* clk->rate is still the old rate. So, pass the new rate instead. */
+ if (clk->ops->post_set_rate)
+ clk->ops->post_set_rate(clk, rate);
+ goto out;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ long rrate;
+ unsigned long fmax = 0, i;
+
+ if (IS_ERR_OR_NULL(clk))
+ return -EINVAL;
+
+ if (!clk->ops->round_rate)
+ return -ENOSYS;
+
+ for (i = 0; i < clk->num_fmax; i++)
+ fmax = max(fmax, clk->fmax[i]);
+
+ if (!fmax)
+ fmax = ULONG_MAX;
+
+ rate = min(rate, fmax);
+ rrate = clk->ops->round_rate(clk, rate);
+ if (rrate > fmax)
+ return -EINVAL;
+ return rrate;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+ if (IS_ERR_OR_NULL(clk))
+ return -EINVAL;
+
+ if (!clk->ops->set_max_rate)
+ return -ENOSYS;
+
+ return clk->ops->set_max_rate(clk, rate);
+}
+EXPORT_SYMBOL(clk_set_max_rate);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ int rc = 0;
+
+ if (!clk->ops->set_parent && clk->parent == parent)
+ return 0;
+
+ if (!clk->ops->set_parent)
+ return -ENOSYS;
+
+ mutex_lock(&clk->prepare_lock);
+ if (clk->parent == parent && !(clk->flags & CLKFLAG_NO_RATE_CACHE))
+ goto out;
+ rc = clk->ops->set_parent(clk, parent);
+out:
+ mutex_unlock(&clk->prepare_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ if (IS_ERR_OR_NULL(clk))
+ return NULL;
+
+ return clk->parent;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
+int clk_set_flags(struct clk *clk, unsigned long flags)
+{
+ if (IS_ERR_OR_NULL(clk))
+ return -EINVAL;
+ if (!clk->ops->set_flags)
+ return -ENOSYS;
+
+ return clk->ops->set_flags(clk, flags);
+}
+EXPORT_SYMBOL(clk_set_flags);
+static LIST_HEAD(initdata_list);
+
+static void init_sibling_lists(struct clk_lookup *clock_tbl, size_t num_clocks)
+{
+ struct clk *clk, *parent;
+ unsigned n;
+
+ for (n = 0; n < num_clocks; n++) {
+ clk = clock_tbl[n].clk;
+ parent = clk->parent;
+ if (parent && list_empty(&clk->siblings))
+ list_add(&clk->siblings, &parent->children);
+ }
+}
+
+static void vdd_class_init(struct clk_vdd_class *vdd)
+{
+ struct handoff_vdd *v;
+
+ if (!vdd)
+ return;
+
+ list_for_each_entry(v, &handoff_vdd_list, list) {
+ if (v->vdd_class == vdd)
+ return;
+ }
+
+ pr_debug("voting for vdd_class %s\n", vdd->class_name);
+ if (vote_vdd_level(vdd, vdd->num_levels - 1))
+ pr_err("failed to vote for %s\n", vdd->class_name);
+
+ v = kmalloc(sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ pr_err("Unable to kmalloc. %s will be stuck at max.\n",
+ vdd->class_name);
+ return;
+ }
+
+ v->vdd_class = vdd;
+ list_add_tail(&v->list, &handoff_vdd_list);
+}
+
+static int __handoff_clk(struct clk *clk)
+{
+ enum handoff state = HANDOFF_DISABLED_CLK;
+ struct handoff_clk *h = NULL;
+ int rc;
+
+ if (clk == NULL || clk->flags & CLKFLAG_INIT_DONE ||
+ clk->flags & CLKFLAG_SKIP_HANDOFF)
+ return 0;
+
+ if (clk->flags & CLKFLAG_INIT_ERR)
+ return -ENXIO;
+
+ /* Handoff any 'depends' clock first. */
+ rc = __handoff_clk(clk->depends);
+ if (rc)
+ goto err;
+
+ /*
+ * Handoff functions for the parent must be called before the
+ * children can be handed off. Without handing off the parents and
+ * knowing their rate and state (on/off), it's impossible to figure
+ * out the rate and state of the children.
+ */
+ if (clk->ops->get_parent)
+ clk->parent = clk->ops->get_parent(clk);
+
+ if (IS_ERR(clk->parent)) {
+ rc = PTR_ERR(clk->parent);
+ goto err;
+ }
+
+ rc = __handoff_clk(clk->parent);
+ if (rc)
+ goto err;
+
+ if (clk->ops->handoff)
+ state = clk->ops->handoff(clk);
+
+ if (state == HANDOFF_ENABLED_CLK) {
+
+ h = kmalloc(sizeof(*h), GFP_KERNEL);
+ if (!h) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = clk_prepare_enable(clk->parent);
+ if (rc)
+ goto err;
+
+ rc = clk_prepare_enable(clk->depends);
+ if (rc)
+ goto err_depends;
+
+ rc = vote_rate_vdd(clk, clk->rate);
+ WARN(rc, "%s unable to vote for voltage!\n", clk->dbg_name);
+
+ clk->count = 1;
+ clk->prepare_count = 1;
+ h->clk = clk;
+ list_add_tail(&h->list, &handoff_list);
+
+ pr_debug("Handed off %s rate=%lu\n", clk->dbg_name, clk->rate);
+ }
+
+ clk->flags |= CLKFLAG_INIT_DONE;
+
+ return 0;
+
+err_depends:
+ clk_disable_unprepare(clk->parent);
+err:
+ kfree(h);
+ clk->flags |= CLKFLAG_INIT_ERR;
+ pr_err("%s handoff failed (%d)\n", clk->dbg_name, rc);
+ return rc;
+}
+
+/**
+ * msm_clock_register() - Register additional clock tables
+ * @table: Table of clocks
+ * @size: Size of @table
+ *
+ * Upon return, clock APIs may be used to control clocks registered using this
+ * function.
+ */
+int __init msm_clock_register(struct clk_lookup *table, size_t size)
+{
+ int n = 0;
+
+ mutex_lock(&msm_clock_init_lock);
+
+ init_sibling_lists(table, size);
+
+ /*
+ * Enable regulators and temporarily set them up at maximum voltage.
+ * Once all the clocks have made their respective vote, remove this
+ * temporary vote. The removing of the temporary vote is done at
+ * late_init, by which time we assume all the clocks would have been
+ * handed off.
+ */
+ for (n = 0; n < size; n++)
+ vdd_class_init(table[n].clk->vdd_class);
+
+ /*
+ * Detect and preserve initial clock state until clock_late_init() or
+ * a driver explicitly changes it, whichever is first.
+ */
+ for (n = 0; n < size; n++)
+ __handoff_clk(table[n].clk);
+
+ clkdev_add_table(table, size);
+
+// clock_debug_register(table, size);
+
+ mutex_unlock(&msm_clock_init_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_clock_register);
+
+struct of_msm_provider_data {
+ struct clk_lookup *table;
+ size_t size;
+};
+
+static struct clk *of_clk_src_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct of_msm_provider_data *ofdata = data;
+ int n;
+
+ for (n = 0; n < ofdata->size; n++) {
+ if (clkspec->args[0] == ofdata->table[n].of_idx)
+ return ofdata->table[n].clk;
+ }
+ return ERR_PTR(-ENOENT);
+}
+
+/**
+ * of_msm_clock_register() - Register clock tables with clkdev and with the
+ * clock DT framework
+ * @table: Table of clocks
+ * @size: Size of @table
+ * @np: Device pointer corresponding to the clock-provider device
+ *
+ * Upon return, clock APIs may be used to control clocks registered using this
+ * function.
+ */
+int __init of_msm_clock_register(struct device_node *np, struct clk_lookup *table,
+ size_t size)
+{
+ int ret = 0;
+ struct of_msm_provider_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->table = table;
+ data->size = size;
+
+ ret = of_clk_add_provider(np, of_clk_src_get, data);
+ if (ret) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ return msm_clock_register(table, size);
+}
+EXPORT_SYMBOL(of_msm_clock_register);
+
+/**
+ * msm_clock_init() - Register and initialize a clock driver
+ * @data: Driver-specific clock initialization data
+ *
+ * Upon return from this call, clock APIs may be used to control
+ * clocks registered with this API.
+ */
+int __init msm_clock_init(struct clock_init_data *data)
+{
+ if (!data)
+ return -EINVAL;
+
+ if (data->pre_init)
+ data->pre_init();
+
+ mutex_lock(&msm_clock_init_lock);
+ if (data->late_init)
+ list_add(&data->list, &initdata_list);
+ mutex_unlock(&msm_clock_init_lock);
+
+ msm_clock_register(data->table, data->size);
+
+ if (data->post_init)
+ data->post_init();
+
+ return 0;
+}
+
+static int __init clock_late_init(void)
+{
+ struct handoff_clk *h, *h_temp;
+ struct handoff_vdd *v, *v_temp;
+ struct clock_init_data *initdata, *initdata_temp;
+ int ret = 0;
+
+ pr_info("%s: Removing enables held for handed-off clocks\n", __func__);
+
+ mutex_lock(&msm_clock_init_lock);
+
+ list_for_each_entry_safe(initdata, initdata_temp,
+ &initdata_list, list) {
+ ret = initdata->late_init();
+ if (ret)
+ pr_err("%s: %pS failed late_init.\n", __func__,
+ initdata);
+ }
+
+ list_for_each_entry_safe(h, h_temp, &handoff_list, list) {
+ clk_disable_unprepare(h->clk);
+ list_del(&h->list);
+ kfree(h);
+ }
+
+ list_for_each_entry_safe(v, v_temp, &handoff_vdd_list, list) {
+ unvote_vdd_level(v->vdd_class, v->vdd_class->num_levels - 1);
+ list_del(&v->list);
+ kfree(v);
+ }
+
+ mutex_unlock(&msm_clock_init_lock);
+
+ return ret;
+}
+/* clock_late_init should run only after all deferred probing
+ * (excluding DLKM probes) has completed.
+ */
+late_initcall_sync(clock_late_init);
diff --git a/drivers/clk/qcom/clock.h b/drivers/clk/qcom/clock.h
new file mode 100644
index 000000000000..b0152d8c1fe1
--- /dev/null
+++ b/drivers/clk/qcom/clock.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_CLK_MSM_CLOCK_H
+#define __DRIVERS_CLK_MSM_CLOCK_H
+
+#include <linux/clkdev.h>
+
+/**
+ * struct clock_init_data - SoC specific clock initialization data
+ * @table: table of lookups to add
+ * @size: size of @table
+ * @pre_init: called before initializing the clock driver.
+ * @post_init: called after registering @table. clock APIs can be called inside.
+ * @late_init: called during late init
+ */
+struct clock_init_data {
+ struct list_head list;
+ struct clk_lookup *table;
+ size_t size;
+ void (*pre_init)(void);
+ void (*post_init)(void);
+ int (*late_init)(void);
+};
+
+int msm_clock_init(struct clock_init_data *data);
+int find_vdd_level(struct clk *clk, unsigned long rate);
+
+#ifdef CONFIG_DEBUG_FS
+int clock_debug_register(struct clk_lookup *t, size_t s);
+void clock_debug_print_enabled(void);
+#else
+static inline int clock_debug_register(struct clk_lookup *t, size_t s)
+{
+ return 0;
+}
+static inline void clock_debug_print_enabled(void) { return; }
+#endif
+
+#endif
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
new file mode 100644
index 000000000000..1ab4f355f3b6
--- /dev/null
+++ b/drivers/clk/qcom/gdsc.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk.h>
+
+#define PWR_ON_MASK BIT(31)
+#define EN_REST_WAIT_MASK (0xF << 20)
+#define EN_FEW_WAIT_MASK (0xF << 16)
+#define CLK_DIS_WAIT_MASK (0xF << 12)
+#define SW_OVERRIDE_MASK BIT(2)
+#define HW_CONTROL_MASK BIT(1)
+#define SW_COLLAPSE_MASK BIT(0)
+
+/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
+#define EN_REST_WAIT_VAL (0x2 << 20)
+#define EN_FEW_WAIT_VAL (0x8 << 16)
+#define CLK_DIS_WAIT_VAL (0x2 << 12)
+
+#define TIMEOUT_US 100
+
+struct gdsc {
+ struct regulator_dev *rdev;
+ struct regulator_desc rdesc;
+ void __iomem *gdscr;
+ struct clk **clocks;
+ int clock_count;
+ bool toggle_mem;
+ bool toggle_periph;
+ bool toggle_logic;
+ bool resets_asserted;
+ bool root_en;
+};
+
+static int gdsc_is_enabled(struct regulator_dev *rdev)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+
+ if (!sc->toggle_logic)
+ return !sc->resets_asserted;
+
+ return !!(readl_relaxed(sc->gdscr) & PWR_ON_MASK);
+}
+
+static int gdsc_enable(struct regulator_dev *rdev)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+ uint32_t regval;
+ int i, ret;
+
+ if (sc->root_en) {
+ for (i = 0; i < sc->clock_count; i++)
+ clk_prepare_enable(sc->clocks[i]);
+ }
+
+ if (sc->toggle_logic) {
+ regval = readl_relaxed(sc->gdscr);
+ if (regval & HW_CONTROL_MASK) {
+ dev_warn(&rdev->dev, "Invalid enable while %s is under HW control\n",
+ sc->rdesc.name);
+ return -EBUSY;
+ }
+
+ regval &= ~SW_COLLAPSE_MASK;
+ writel_relaxed(regval, sc->gdscr);
+
+ ret = readl_tight_poll_timeout(sc->gdscr, regval,
+ regval & PWR_ON_MASK, TIMEOUT_US);
+ if (ret) {
+ dev_err(&rdev->dev, "%s enable timed out: 0x%x\n",
+ sc->rdesc.name, regval);
+ return ret;
+ }
+ } else {
+ for (i = 0; i < sc->clock_count; i++)
+ clk_reset(sc->clocks[i], CLK_RESET_DEASSERT);
+ sc->resets_asserted = false;
+ }
+
+ for (i = 0; i < sc->clock_count; i++) {
+ if (sc->toggle_mem)
+ clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
+ if (sc->toggle_periph)
+ clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
+ }
+
+ /*
+ * If clocks to this power domain were already on, they will take an
+ * additional 4 clock cycles to re-enable after the rail is enabled.
+ * Delay to account for this. A delay is also needed to ensure clocks
+ * are not enabled within 400ns of enabling power to the memories.
+ */
+ udelay(1);
+
+ return 0;
+}
+
+static int gdsc_disable(struct regulator_dev *rdev)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+ uint32_t regval;
+ int i, ret = 0;
+
+ for (i = sc->clock_count-1; i >= 0; i--) {
+ if (sc->toggle_mem)
+ clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
+ if (sc->toggle_periph)
+ clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
+ }
+
+ if (sc->toggle_logic) {
+ regval = readl_relaxed(sc->gdscr);
+ if (regval & HW_CONTROL_MASK) {
+ dev_warn(&rdev->dev, "Invalid disable while %s is under HW control\n",
+ sc->rdesc.name);
+ return -EBUSY;
+ }
+
+ regval |= SW_COLLAPSE_MASK;
+ writel_relaxed(regval, sc->gdscr);
+
+ ret = readl_tight_poll_timeout(sc->gdscr, regval,
+ !(regval & PWR_ON_MASK),
+ TIMEOUT_US);
+ if (ret)
+ dev_err(&rdev->dev, "%s disable timed out: 0x%x\n",
+ sc->rdesc.name, regval);
+ } else {
+ for (i = sc->clock_count-1; i >= 0; i--)
+ clk_reset(sc->clocks[i], CLK_RESET_ASSERT);
+ sc->resets_asserted = true;
+ }
+
+ if (sc->root_en) {
+ for (i = sc->clock_count-1; i >= 0; i--)
+ clk_disable_unprepare(sc->clocks[i]);
+ }
+
+ return ret;
+}
+
+static unsigned int gdsc_get_mode(struct regulator_dev *rdev)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+ uint32_t regval;
+
+ regval = readl_relaxed(sc->gdscr);
+ if (regval & HW_CONTROL_MASK)
+ return REGULATOR_MODE_FAST;
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int gdsc_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+ uint32_t regval;
+ int ret;
+
+ regval = readl_relaxed(sc->gdscr);
+
+ /*
+ * HW control can only be enable/disabled when SW_COLLAPSE
+ * indicates on.
+ */
+ if (regval & SW_COLLAPSE_MASK) {
+ dev_err(&rdev->dev, "can't enable hw collapse now\n");
+ return -EBUSY;
+ }
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ /* Turn on HW trigger mode */
+ regval |= HW_CONTROL_MASK;
+ writel_relaxed(regval, sc->gdscr);
+ /*
+ * There may be a race with internal HW trigger signal,
+ * that will result in GDSC going through a power down and
+ * up cycle. In case HW trigger signal is controlled by
+ * firmware that also poll same status bits as we do, FW
+ * might read an 'on' status before the GDSC can finish
+ * power cycle. We wait 1us before returning to ensure
+ * FW can't immediately poll the status bit.
+ */
+ mb();
+ udelay(1);
+ break;
+
+ case REGULATOR_MODE_NORMAL:
+ /* Turn off HW trigger mode */
+ regval &= ~HW_CONTROL_MASK;
+ writel_relaxed(regval, sc->gdscr);
+ /*
+ * There may be a race with internal HW trigger signal,
+ * that will result in GDSC going through a power down and
+ * up cycle. If we poll too early, status bit will
+ * indicate 'on' before the GDSC can finish the power cycle.
+ * Account for this case by waiting 1us before polling.
+ */
+ mb();
+ udelay(1);
+ ret = readl_tight_poll_timeout(sc->gdscr, regval,
+ regval & PWR_ON_MASK, TIMEOUT_US);
+ if (ret) {
+ dev_err(&rdev->dev, "%s set_mode timed out: 0x%x\n",
+ sc->rdesc.name, regval);
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct regulator_ops gdsc_ops = {
+ .is_enabled = gdsc_is_enabled,
+ .enable = gdsc_enable,
+ .disable = gdsc_disable,
+ .set_mode = gdsc_set_mode,
+ .get_mode = gdsc_get_mode,
+};
+
+static int gdsc_probe(struct platform_device *pdev)
+{
+ static atomic_t gdsc_count = ATOMIC_INIT(-1);
+ struct regulator_config reg_config = {};
+ struct regulator_init_data *init_data;
+ struct resource *res;
+ struct gdsc *sc;
+ uint32_t regval;
+ bool retain_mem, retain_periph, support_hw_trigger;
+ int i, ret;
+
+ sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
+ if (sc == NULL)
+ return -ENOMEM;
+
+ init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
+ if (init_data == NULL)
+ return -ENOMEM;
+
+ if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
+ init_data->supply_regulator = "parent";
+
+ ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
+ &sc->rdesc.name);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -EINVAL;
+ sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (sc->gdscr == NULL)
+ return -ENOMEM;
+
+ sc->clock_count = of_property_count_strings(pdev->dev.of_node,
+ "clock-names");
+ if (sc->clock_count == -EINVAL) {
+ sc->clock_count = 0;
+ } else if (IS_ERR_VALUE(sc->clock_count)) {
+ dev_err(&pdev->dev, "Failed to get clock names\n");
+ return -EINVAL;
+ }
+
+ sc->clocks = devm_kzalloc(&pdev->dev,
+ sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
+ if (!sc->clocks)
+ return -ENOMEM;
+
+ sc->root_en = of_property_read_bool(pdev->dev.of_node,
+ "qcom,enable-root-clk");
+
+ for (i = 0; i < sc->clock_count; i++) {
+ const char *clock_name;
+ of_property_read_string_index(pdev->dev.of_node, "clock-names",
+ i, &clock_name);
+ sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
+ if (IS_ERR(sc->clocks[i])) {
+ int rc = PTR_ERR(sc->clocks[i]);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get %s\n",
+ clock_name);
+ return rc;
+ }
+ }
+
+ sc->rdesc.id = atomic_inc_return(&gdsc_count);
+ sc->rdesc.ops = &gdsc_ops;
+ sc->rdesc.type = REGULATOR_VOLTAGE;
+ sc->rdesc.owner = THIS_MODULE;
+ platform_set_drvdata(pdev, sc);
+
+ /*
+ * Disable HW trigger: collapse/restore occur based on registers writes.
+ * Disable SW override: Use hardware state-machine for sequencing.
+ */
+ regval = readl_relaxed(sc->gdscr);
+ regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
+
+ /* Configure wait time between states. */
+ regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
+ regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
+ writel_relaxed(regval, sc->gdscr);
+
+ retain_mem = of_property_read_bool(pdev->dev.of_node,
+ "qcom,retain-mem");
+ sc->toggle_mem = !retain_mem;
+ retain_periph = of_property_read_bool(pdev->dev.of_node,
+ "qcom,retain-periph");
+ sc->toggle_periph = !retain_periph;
+ sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
+ "qcom,skip-logic-collapse");
+ support_hw_trigger = of_property_read_bool(pdev->dev.of_node,
+ "qcom,support-hw-trigger");
+ if (support_hw_trigger) {
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_MODE;
+ init_data->constraints.valid_modes_mask |=
+ REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
+ }
+
+ if (!sc->toggle_logic) {
+ regval &= ~SW_COLLAPSE_MASK;
+ writel_relaxed(regval, sc->gdscr);
+
+ ret = readl_tight_poll_timeout(sc->gdscr, regval,
+ regval & PWR_ON_MASK, TIMEOUT_US);
+ if (ret) {
+ dev_err(&pdev->dev, "%s enable timed out: 0x%x\n",
+ sc->rdesc.name, regval);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < sc->clock_count; i++) {
+ if (retain_mem || (regval & PWR_ON_MASK))
+ clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
+ else
+ clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
+
+ if (retain_periph || (regval & PWR_ON_MASK))
+ clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
+ else
+ clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
+ }
+
+ reg_config.dev = &pdev->dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = sc;
+ reg_config.of_node = pdev->dev.of_node;
+ sc->rdev = regulator_register(&sc->rdesc, &reg_config);
+ if (IS_ERR(sc->rdev)) {
+ dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
+ sc->rdesc.name);
+ return PTR_ERR(sc->rdev);
+ }
+
+ return 0;
+}
+
+static int gdsc_remove(struct platform_device *pdev)
+{
+ struct gdsc *sc = platform_get_drvdata(pdev);
+ regulator_unregister(sc->rdev);
+ return 0;
+}
+
+static struct of_device_id gdsc_match_table[] = {
+ { .compatible = "qcom,gdsc" },
+ {}
+};
+
+static struct platform_driver gdsc_driver = {
+ .probe = gdsc_probe,
+ .remove = gdsc_remove,
+ .driver = {
+ .name = "gdsc",
+ .of_match_table = gdsc_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gdsc_init(void)
+{
+ return platform_driver_register(&gdsc_driver);
+}
+subsys_initcall(gdsc_init);
+
+static void __exit gdsc_exit(void)
+{
+ platform_driver_unregister(&gdsc_driver);
+}
+module_exit(gdsc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM8974 GDSC power rail regulator driver");
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index 098c542e5c53..86e70efc9bb7 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2009-2012,2014, The Linux Foundation. All rights reserved.
*
@@ -14,36 +13,196 @@
*
*/
+#include <linux/module.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/init.h>
+#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/sched_clock.h>
#include <asm/delay.h>
+#include <linux/percpu.h>
+#include <linux/mm.h>
+#include <linux/sched_clock.h>
+
+#include <asm/localtimer.h>
+#include <asm/mach/time.h>
+#include <asm/smp_plat.h>
+#include <asm/user_accessible_timer.h>
+#include <mach/msm_iomap.h>
+#include <mach/irqs.h>
+#include <soc/qcom/socinfo.h>
+
+#include <soc/qcom/smem.h>
+#if defined(CONFIG_MSM_SMD)
+#include <soc/qcom/smsm.h>
+#endif
+#include "timer.h"
+
+enum {
+ MSM_TIMER_DEBUG_SYNC = 1U << 0,
+};
+static int msm_timer_debug_mask;
+module_param_named(debug_mask, msm_timer_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#ifdef CONFIG_MSM7X00A_USE_GP_TIMER
+ #define DG_TIMER_RATING 100
+#else
+ #define DG_TIMER_RATING 300
+#endif
+
+#ifndef MSM_TMR0_BASE
+#define MSM_TMR0_BASE MSM_TMR_BASE
+#endif
+
+#define MSM_DGT_SHIFT (5)
+
+#define TIMER_MATCH_VAL 0x0000
+#define TIMER_COUNT_VAL 0x0004
+#define TIMER_ENABLE 0x0008
+#define TIMER_CLEAR 0x000C
+#define DGT_CLK_CTL 0x0034
+enum {
+ DGT_CLK_CTL_DIV_1 = 0,
+ DGT_CLK_CTL_DIV_2 = 1,
+ DGT_CLK_CTL_DIV_3 = 2,
+ DGT_CLK_CTL_DIV_4 = 3,
+};
+#define TIMER_STATUS 0x0088
+#define TIMER_ENABLE_EN 1
+#define TIMER_ENABLE_CLR_ON_MATCH_EN 2
+
+#define LOCAL_TIMER 0
+#define GLOBAL_TIMER 1
-#define TIMER_MATCH_VAL 0x0000
-#define TIMER_COUNT_VAL 0x0004
-#define TIMER_ENABLE 0x0008
-#define TIMER_ENABLE_CLR_ON_MATCH_EN BIT(1)
-#define TIMER_ENABLE_EN BIT(0)
-#define TIMER_CLEAR 0x000C
-#define DGT_CLK_CTL 0x10
-#define DGT_CLK_CTL_DIV_4 0x3
-#define TIMER_STS_GPT0_CLR_PEND BIT(10)
+/*
+ * global_timer_offset is added to the regbase of a timer to force the memory
+ * access to come from the CPU0 region.
+ */
+static int global_timer_offset;
+static int msm_global_timer;
+
+#define NR_TIMERS ARRAY_SIZE(msm_clocks)
-#define GPT_HZ 32768
+unsigned int gpt_hz = 32768;
+unsigned int sclk_hz = 32768;
-#define MSM_DGT_SHIFT 5
+static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt);
+static irqreturn_t msm_timer_interrupt(int irq, void *dev_id);
+static cycle_t msm_gpt_read(struct clocksource *cs);
+static cycle_t msm_dgt_read(struct clocksource *cs);
+static void msm_timer_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt);
+static int msm_timer_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt);
+
+enum {
+ MSM_CLOCK_FLAGS_UNSTABLE_COUNT = 1U << 0,
+ MSM_CLOCK_FLAGS_ODD_MATCH_WRITE = 1U << 1,
+ MSM_CLOCK_FLAGS_DELAYED_WRITE_POST = 1U << 2,
+};
+
+struct msm_clock {
+ struct clock_event_device clockevent;
+ struct clocksource clocksource;
+ unsigned int irq;
+ void __iomem *regbase;
+ uint32_t freq;
+ uint32_t shift;
+ uint32_t flags;
+ uint32_t write_delay;
+ uint32_t rollover_offset;
+ uint32_t index;
+ void __iomem *global_counter;
+ void __iomem *local_counter;
+ uint32_t status_mask;
+ union {
+ struct clock_event_device *evt;
+ struct clock_event_device __percpu **percpu_evt;
+ };
+};
-static void __iomem *event_base;
-static void __iomem *sts_base;
+enum {
+ MSM_CLOCK_GPT,
+ MSM_CLOCK_DGT,
+};
+
+struct msm_clock_percpu_data {
+ uint32_t last_set;
+ uint32_t sleep_offset;
+ uint32_t alarm_vtime;
+ uint32_t alarm;
+ uint32_t non_sleep_offset;
+ uint32_t in_sync;
+ cycle_t stopped_tick;
+ int stopped;
+ uint32_t last_sync_gpt;
+ u64 last_sync_jiffies;
+};
+
+struct msm_timer_sync_data_t {
+ struct msm_clock *clock;
+ uint32_t timeout;
+ int exit_sleep;
+};
+
+static struct msm_clock msm_clocks[] = {
+ [MSM_CLOCK_GPT] = {
+ .clockevent = {
+ .name = "gp_timer",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 32,
+ .rating = 200,
+ .set_next_event = msm_timer_set_next_event,
+ .set_mode = msm_timer_set_mode,
+ },
+ .clocksource = {
+ .name = "gp_timer",
+ .rating = 200,
+ .read = msm_gpt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ },
+ .irq = INT_GP_TIMER_EXP,
+ .regbase = MSM_TMR_BASE + 0x4,
+ .freq = 32768,
+ .index = MSM_CLOCK_GPT,
+ .write_delay = 9,
+ },
+ [MSM_CLOCK_DGT] = {
+ .clockevent = {
+ .name = "dg_timer",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 32,
+ .rating = DG_TIMER_RATING,
+ .set_next_event = msm_timer_set_next_event,
+ .set_mode = msm_timer_set_mode,
+ },
+ .clocksource = {
+ .name = "dg_timer",
+ .rating = DG_TIMER_RATING,
+ .read = msm_dgt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ },
+ .irq = INT_DEBUG_TIMER_EXP,
+ .regbase = MSM_TMR_BASE + 0x24,
+ .index = MSM_CLOCK_DGT,
+ .write_delay = 9,
+ }
+};
+
+static DEFINE_PER_CPU(struct msm_clock_percpu_data[NR_TIMERS],
+ msm_clocks_percpu);
+
+static DEFINE_PER_CPU(struct msm_clock *, msm_active_clock);
static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
{
@@ -58,45 +217,182 @@ static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static uint32_t msm_read_timer_count(struct msm_clock *clock, int global)
+{
+ uint32_t t1, t2, t3;
+ int loop_count = 0;
+ void __iomem *addr = clock->regbase + TIMER_COUNT_VAL +
+ global*global_timer_offset;
+
+ if (!(clock->flags & MSM_CLOCK_FLAGS_UNSTABLE_COUNT))
+ return __raw_readl_no_log(addr);
+
+ t1 = __raw_readl_no_log(addr);
+ t2 = __raw_readl_no_log(addr);
+ if ((t2-t1) <= 1)
+ return t2;
+ while (1) {
+ t1 = __raw_readl_no_log(addr);
+ t2 = __raw_readl_no_log(addr);
+ t3 = __raw_readl_no_log(addr);
+ cpu_relax();
+ if ((t3-t2) <= 1)
+ return t3;
+ if ((t2-t1) <= 1)
+ return t2;
+ if ((t2 >= t1) && (t3 >= t2))
+ return t2;
+ if (++loop_count == 5) {
+ pr_err("msm_read_timer_count timer %s did not "
+ "stabilize: %u -> %u -> %u\n",
+ clock->clockevent.name, t1, t2, t3);
+ return t3;
+ }
+ }
+}
+
+static cycle_t msm_gpt_read(struct clocksource *cs)
+{
+ struct msm_clock *clock = &msm_clocks[MSM_CLOCK_GPT];
+ struct msm_clock_percpu_data *clock_state =
+ &per_cpu(msm_clocks_percpu, 0)[MSM_CLOCK_GPT];
+
+ if (clock_state->stopped)
+ return clock_state->stopped_tick;
+
+ return msm_read_timer_count(clock, GLOBAL_TIMER) +
+ clock_state->sleep_offset;
+}
+
+static cycle_t msm_dgt_read(struct clocksource *cs)
+{
+ struct msm_clock *clock = &msm_clocks[MSM_CLOCK_DGT];
+ struct msm_clock_percpu_data *clock_state =
+ &per_cpu(msm_clocks_percpu, 0)[MSM_CLOCK_DGT];
+
+ if (clock_state->stopped)
+ return clock_state->stopped_tick >> clock->shift;
+
+ return (msm_read_timer_count(clock, GLOBAL_TIMER) +
+ clock_state->sleep_offset) >> clock->shift;
+}
+
+static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt)
+{
+ int i;
+
+ if (!is_smp())
+ return container_of(evt, struct msm_clock, clockevent);
+
+ for (i = 0; i < NR_TIMERS; i++)
+ if (evt == &(msm_clocks[i].clockevent))
+ return &msm_clocks[i];
+ return &msm_clocks[msm_global_timer];
+}
+
static int msm_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
- u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
+ int i;
+ struct msm_clock *clock;
+ struct msm_clock_percpu_data *clock_state;
+ uint32_t now;
+ uint32_t alarm;
+ int late;
- ctrl &= ~TIMER_ENABLE_EN;
- writel_relaxed(ctrl, event_base + TIMER_ENABLE);
+ clock = clockevent_to_clock(evt);
+ clock_state = &__get_cpu_var(msm_clocks_percpu)[clock->index];
+ now = msm_read_timer_count(clock, LOCAL_TIMER);
+ alarm = now + (cycles << clock->shift);
+ if (clock->flags & MSM_CLOCK_FLAGS_ODD_MATCH_WRITE)
+ while (now == clock_state->last_set)
+ now = msm_read_timer_count(clock, LOCAL_TIMER);
- writel_relaxed(ctrl, event_base + TIMER_CLEAR);
- writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
+ clock_state->alarm = alarm;
+ __raw_writel(alarm, clock->regbase + TIMER_MATCH_VAL);
- if (sts_base)
- while (readl_relaxed(sts_base) & TIMER_STS_GPT0_CLR_PEND)
- cpu_relax();
+ if (clock->flags & MSM_CLOCK_FLAGS_DELAYED_WRITE_POST) {
+ /* read the counter four extra times to make sure write posts
+ before reading the time */
+ for (i = 0; i < 4; i++)
+ __raw_readl_no_log(clock->regbase + TIMER_COUNT_VAL);
+ }
+ now = msm_read_timer_count(clock, LOCAL_TIMER);
+ clock_state->last_set = now;
+ clock_state->alarm_vtime = alarm + clock_state->sleep_offset;
+ late = now - alarm;
+ if (late >= (int)(-clock->write_delay << clock->shift) &&
+ late < clock->freq*5)
+ return -ETIME;
- writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
return 0;
}
static void msm_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
- u32 ctrl;
+ struct msm_clock *clock;
+ struct msm_clock **cur_clock;
+ struct msm_clock_percpu_data *clock_state, *gpt_state;
+ unsigned long irq_flags;
+ struct irq_chip *chip;
- ctrl = readl_relaxed(event_base + TIMER_ENABLE);
- ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN);
+ clock = clockevent_to_clock(evt);
+ clock_state = &__get_cpu_var(msm_clocks_percpu)[clock->index];
+ gpt_state = &__get_cpu_var(msm_clocks_percpu)[MSM_CLOCK_GPT];
+
+ local_irq_save(irq_flags);
switch (mode) {
case CLOCK_EVT_MODE_RESUME:
case CLOCK_EVT_MODE_PERIODIC:
break;
case CLOCK_EVT_MODE_ONESHOT:
- /* Timer is enabled in set_next_event */
+ clock_state->stopped = 0;
+ clock_state->sleep_offset =
+ -msm_read_timer_count(clock, LOCAL_TIMER) +
+ clock_state->stopped_tick;
+ get_cpu_var(msm_active_clock) = clock;
+ put_cpu_var(msm_active_clock);
+ __raw_writel(TIMER_ENABLE_EN, clock->regbase + TIMER_ENABLE);
+ chip = irq_get_chip(clock->irq);
+ if (chip && chip->irq_unmask)
+ chip->irq_unmask(irq_get_irq_data(clock->irq));
+ if (clock != &msm_clocks[MSM_CLOCK_GPT])
+ __raw_writel(TIMER_ENABLE_EN,
+ msm_clocks[MSM_CLOCK_GPT].regbase +
+ TIMER_ENABLE);
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
+ cur_clock = &get_cpu_var(msm_active_clock);
+ if (*cur_clock == clock)
+ *cur_clock = NULL;
+ put_cpu_var(msm_active_clock);
+ clock_state->in_sync = 0;
+ clock_state->stopped = 1;
+ clock_state->stopped_tick =
+ msm_read_timer_count(clock, LOCAL_TIMER) +
+ clock_state->sleep_offset;
+ __raw_writel(0, clock->regbase + TIMER_MATCH_VAL);
+ chip = irq_get_chip(clock->irq);
+ if (chip && chip->irq_mask)
+ chip->irq_mask(irq_get_irq_data(clock->irq));
+
+ if (!is_smp() || clock != &msm_clocks[MSM_CLOCK_DGT]
+ || smp_processor_id())
+ __raw_writel(0, clock->regbase + TIMER_ENABLE);
+
+ if (msm_global_timer == MSM_CLOCK_DGT &&
+ clock != &msm_clocks[MSM_CLOCK_GPT]) {
+ gpt_state->in_sync = 0;
+ __raw_writel(0, msm_clocks[MSM_CLOCK_GPT].regbase +
+ TIMER_ENABLE);
+ }
break;
}
- writel_relaxed(ctrl, event_base + TIMER_ENABLE);
+ wmb();
+ local_irq_restore(irq_flags);
}
static struct clock_event_device __percpu *msm_evt;
@@ -104,8 +400,10 @@ static struct clock_event_device __percpu *msm_evt;
static void __iomem *source_base;
static notrace cycle_t msm_read_timer_count(struct clocksource *cs)
+
+void __iomem *msm_timer_get_timer0_base(void)
{
- return readl_relaxed(source_base + TIMER_COUNT_VAL);
+ return MSM_TMR_BASE + global_timer_offset;
}
static struct clocksource msm_clocksource = {
@@ -147,12 +445,77 @@ static int msm_local_timer_setup(struct clock_event_device *evt)
return 0;
}
-static void msm_local_timer_stop(struct clock_event_device *evt)
+#define MPM_SCLK_COUNT_VAL 0x0024
+
+#ifdef CONFIG_PM
+/*
+ * Retrieve the cycle count from sclk and optionally synchronize local clock
+ * with the sclk value.
+ *
+ * time_start and time_expired are callbacks that must be specified. The
+ * protocol uses them to detect timeout. The update callback is optional.
+ * If not NULL, update will be called so that it can update local clock.
+ *
+ * The function does not use the argument data directly; it passes data to
+ * the callbacks.
+ *
+ * Return value:
+ * 0: the operation failed
+ * >0: the slow clock value after time-sync
+ */
+static void (*msm_timer_sync_timeout)(void);
+#if defined(CONFIG_MSM_DIRECT_SCLK_ACCESS)
+uint32_t msm_timer_get_sclk_ticks(void)
{
- evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
- disable_percpu_irq(evt->irq);
+ uint32_t t1, t2;
+ int loop_count = 10;
+ int loop_zero_count = 3;
+ int tmp = USEC_PER_SEC;
+ do_div(tmp, sclk_hz);
+ tmp /= (loop_zero_count-1);
+
+ while (loop_zero_count--) {
+ t1 = __raw_readl_no_log(MSM_RPM_MPM_BASE + MPM_SCLK_COUNT_VAL);
+ do {
+ udelay(1);
+ t2 = t1;
+ t1 = __raw_readl_no_log(
+ MSM_RPM_MPM_BASE + MPM_SCLK_COUNT_VAL);
+ } while ((t2 != t1) && --loop_count);
+
+ if (!loop_count) {
+ printk(KERN_EMERG "SCLK did not stabilize\n");
+ return 0;
+ }
+
+ if (t1)
+ break;
+
+ udelay(tmp);
+ }
+
+ if (!loop_zero_count) {
+ printk(KERN_EMERG "SCLK reads zero\n");
+ return 0;
+ }
+
+ return t1;
}
+static uint32_t msm_timer_do_sync_to_sclk(
+ void (*time_start)(struct msm_timer_sync_data_t *data),
+ bool (*time_expired)(struct msm_timer_sync_data_t *data),
+ void (*update)(struct msm_timer_sync_data_t *, uint32_t, uint32_t),
+ struct msm_timer_sync_data_t *data)
+{
+ unsigned t1 = msm_timer_get_sclk_ticks();
+
+ if (t1 && update != NULL)
+ update(data, t1, sclk_hz);
+ return t1;
+}
+#else
+
static int msm_timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
@@ -230,8 +593,97 @@ err:
sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
msm_delay_timer.freq = dgt_hz;
register_current_timer_delay(&msm_delay_timer);
+
+}
+
+/* Time Master State Bits */
+#define MASTER_BITS_PER_CPU 1
+#define MASTER_TIME_PENDING \
+ (0x01UL << (MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
+
+/* Time Slave State Bits */
+#define SLAVE_TIME_REQUEST 0x0400
+#define SLAVE_TIME_POLL 0x0800
+#define SLAVE_TIME_INIT 0x1000
+
+static uint32_t msm_timer_do_sync_to_sclk(
+ void (*time_start)(struct msm_timer_sync_data_t *data),
+ bool (*time_expired)(struct msm_timer_sync_data_t *data),
+ void (*update)(struct msm_timer_sync_data_t *, uint32_t, uint32_t),
+ struct msm_timer_sync_data_t *data)
+{
+ uint32_t *smem_clock;
+ uint32_t smem_clock_val;
+ uint32_t state;
+
+ smem_clock = smem_find(SMEM_SMEM_SLOW_CLOCK_VALUE,
+ sizeof(uint32_t), 0, SMEM_ANY_HOST_FLAG);
+ if (smem_clock == NULL) {
+ printk(KERN_ERR "no smem clock\n");
+ return 0;
+ }
+
+ state = smsm_get_state(SMSM_MODEM_STATE);
+ if ((state & SMSM_INIT) == 0) {
+ printk(KERN_ERR "smsm not initialized\n");
+ return 0;
+ }
+
+ time_start(data);
+ while ((state = smsm_get_state(SMSM_TIME_MASTER_DEM)) &
+ MASTER_TIME_PENDING) {
+ if (time_expired(data)) {
+ printk(KERN_EMERG "get_smem_clock: timeout 1 still "
+ "invalid state %x\n", state);
+ msm_timer_sync_timeout();
+ }
+ }
+
+ smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_POLL | SLAVE_TIME_INIT,
+ SLAVE_TIME_REQUEST);
+
+ time_start(data);
+ while (!((state = smsm_get_state(SMSM_TIME_MASTER_DEM)) &
+ MASTER_TIME_PENDING)) {
+ if (time_expired(data)) {
+ printk(KERN_EMERG "get_smem_clock: timeout 2 still "
+ "invalid state %x\n", state);
+ msm_timer_sync_timeout();
+ }
+ }
+
+ smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_REQUEST, SLAVE_TIME_POLL);
+
+ time_start(data);
+ do {
+ smem_clock_val = *smem_clock;
+ } while (smem_clock_val == 0 && !time_expired(data));
+
+ state = smsm_get_state(SMSM_TIME_MASTER_DEM);
+
+ if (smem_clock_val) {
+ if (update != NULL)
+ update(data, smem_clock_val, sclk_hz);
+
+ if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC)
+ printk(KERN_INFO
+ "get_smem_clock: state %x clock %u\n",
+ state, smem_clock_val);
+ } else {
+ printk(KERN_EMERG
+ "get_smem_clock: timeout state %x clock %u\n",
+ state, smem_clock_val);
+ msm_timer_sync_timeout();
+ }
+
+ smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_REQUEST | SLAVE_TIME_POLL,
+ SLAVE_TIME_INIT);
+ return smem_clock_val;
+>>>>>>> patched:arch/arm/mach-msm/timer.c
}
+#endif /* CONFIG_MSM_DIRECT_SCLK_ACCESS */
+<<<<<<< current:drivers/clocksource/qcom-timer.c
#ifdef CONFIG_ARCH_QCOM
static void __init msm_dt_timer_init(struct device_node *np)
{
@@ -246,65 +698,394 @@ static void __init msm_dt_timer_init(struct device_node *np)
if (!base) {
pr_err("Failed to map event base\n");
return;
+=======
+/*
+ * Callback function that initializes the timeout value.
+ */
+static void msm_timer_sync_to_sclk_time_start(
+ struct msm_timer_sync_data_t *data)
+{
+ /* approx 2 seconds */
+ uint32_t delta = data->clock->freq << data->clock->shift << 1;
+ data->timeout = msm_read_timer_count(data->clock, LOCAL_TIMER) + delta;
+}
+
+/*
+ * Callback function that checks the timeout.
+ */
+static bool msm_timer_sync_to_sclk_time_expired(
+ struct msm_timer_sync_data_t *data)
+{
+ uint32_t delta = msm_read_timer_count(data->clock, LOCAL_TIMER) -
+ data->timeout;
+ return ((int32_t) delta) > 0;
+}
+
+/*
+ * Callback function that updates local clock from the specified source clock
+ * value and frequency.
+ */
+static void msm_timer_sync_update(struct msm_timer_sync_data_t *data,
+ uint32_t src_clk_val, uint32_t src_clk_freq)
+{
+ struct msm_clock *dst_clk = data->clock;
+ struct msm_clock_percpu_data *dst_clk_state =
+ &__get_cpu_var(msm_clocks_percpu)[dst_clk->index];
+ uint32_t dst_clk_val = msm_read_timer_count(dst_clk, LOCAL_TIMER);
+ uint32_t new_offset;
+
+ if ((dst_clk->freq << dst_clk->shift) == src_clk_freq) {
+ new_offset = src_clk_val - dst_clk_val;
+ } else {
+ uint64_t temp;
+
+ /* separate multiplication and division steps to reduce
+ rounding error */
+ temp = src_clk_val;
+ temp *= dst_clk->freq << dst_clk->shift;
+ do_div(temp, src_clk_freq);
+
+ new_offset = (uint32_t)(temp) - dst_clk_val;
}
- /* We use GPT0 for the clockevent */
- irq = irq_of_parse_and_map(np, 1);
- if (irq <= 0) {
- pr_err("Can't get irq\n");
- return;
+ if (dst_clk_state->sleep_offset + dst_clk_state->non_sleep_offset !=
+ new_offset) {
+ if (data->exit_sleep)
+ dst_clk_state->sleep_offset =
+ new_offset - dst_clk_state->non_sleep_offset;
+ else
+ dst_clk_state->non_sleep_offset =
+ new_offset - dst_clk_state->sleep_offset;
+
+ if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC)
+ printk(KERN_INFO "sync clock %s: "
+ "src %u, new offset %u + %u\n",
+ dst_clk->clocksource.name, src_clk_val,
+ dst_clk_state->sleep_offset,
+ dst_clk_state->non_sleep_offset);
+>>>>>>> patched:arch/arm/mach-msm/timer.c
}
+}
- /* We use CPU0's DGT for the clocksource */
- if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
- percpu_offset = 0;
+/*
+ * Synchronize GPT clock with sclk.
+ */
+static void msm_timer_sync_gpt_to_sclk(int exit_sleep)
+{
+ struct msm_clock *gpt_clk = &msm_clocks[MSM_CLOCK_GPT];
+ struct msm_clock_percpu_data *gpt_clk_state =
+ &__get_cpu_var(msm_clocks_percpu)[MSM_CLOCK_GPT];
+ struct msm_timer_sync_data_t data;
+ uint32_t ret;
- if (of_address_to_resource(np, 0, &res)) {
- pr_err("Failed to parse DGT resource\n");
+ if (gpt_clk_state->in_sync)
return;
- }
- cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res));
- if (!cpu0_base) {
- pr_err("Failed to map source base\n");
+ data.clock = gpt_clk;
+ data.timeout = 0;
+ data.exit_sleep = exit_sleep;
+
+ ret = msm_timer_do_sync_to_sclk(
+ msm_timer_sync_to_sclk_time_start,
+ msm_timer_sync_to_sclk_time_expired,
+ msm_timer_sync_update,
+ &data);
+
+ if (ret)
+ gpt_clk_state->in_sync = 1;
+}
+
+/*
+ * Synchronize clock with GPT clock.
+ */
+static void msm_timer_sync_to_gpt(struct msm_clock *clock, int exit_sleep)
+{
+ struct msm_clock *gpt_clk = &msm_clocks[MSM_CLOCK_GPT];
+ struct msm_clock_percpu_data *gpt_clk_state =
+ &__get_cpu_var(msm_clocks_percpu)[MSM_CLOCK_GPT];
+ struct msm_clock_percpu_data *clock_state =
+ &__get_cpu_var(msm_clocks_percpu)[clock->index];
+ struct msm_timer_sync_data_t data;
+ uint32_t gpt_clk_val;
+ u64 gpt_period = (1ULL << 32) * HZ;
+ u64 now = get_jiffies_64();
+
+ do_div(gpt_period, gpt_hz);
+
+ BUG_ON(clock == gpt_clk);
+
+ if (clock_state->in_sync &&
+ (now - clock_state->last_sync_jiffies < (gpt_period >> 1)))
return;
+
+ gpt_clk_val = msm_read_timer_count(gpt_clk, LOCAL_TIMER)
+ + gpt_clk_state->sleep_offset + gpt_clk_state->non_sleep_offset;
+
+ if (exit_sleep && gpt_clk_val < clock_state->last_sync_gpt)
+ clock_state->non_sleep_offset -= clock->rollover_offset;
+
+ data.clock = clock;
+ data.timeout = 0;
+ data.exit_sleep = exit_sleep;
+
+ msm_timer_sync_update(&data, gpt_clk_val, gpt_hz);
+
+ clock_state->in_sync = 1;
+ clock_state->last_sync_gpt = gpt_clk_val;
+ clock_state->last_sync_jiffies = now;
+}
+
+static void msm_timer_reactivate_alarm(struct msm_clock *clock)
+{
+ struct msm_clock_percpu_data *clock_state =
+ &__get_cpu_var(msm_clocks_percpu)[clock->index];
+ long alarm_delta = clock_state->alarm_vtime -
+ clock_state->sleep_offset -
+ msm_read_timer_count(clock, LOCAL_TIMER);
+ alarm_delta >>= clock->shift;
+ if (alarm_delta < (long)clock->write_delay + 4)
+ alarm_delta = clock->write_delay + 4;
+ while (msm_timer_set_next_event(alarm_delta, &clock->clockevent))
+ ;
+}
+
+int64_t msm_timer_enter_idle(void)
+{
+ struct msm_clock *gpt_clk = &msm_clocks[MSM_CLOCK_GPT];
+ struct msm_clock *clock = __get_cpu_var(msm_active_clock);
+ struct msm_clock_percpu_data *clock_state =
+ &__get_cpu_var(msm_clocks_percpu)[clock->index];
+ uint32_t alarm;
+ uint32_t count;
+ int32_t delta;
+
+ BUG_ON(clock != &msm_clocks[MSM_CLOCK_GPT] &&
+ clock != &msm_clocks[MSM_CLOCK_DGT]);
+
+ msm_timer_sync_gpt_to_sclk(0);
+ if (clock != gpt_clk)
+ msm_timer_sync_to_gpt(clock, 0);
+
+ count = msm_read_timer_count(clock, LOCAL_TIMER);
+ if (clock_state->stopped++ == 0)
+ clock_state->stopped_tick = count + clock_state->sleep_offset;
+ alarm = clock_state->alarm;
+ delta = alarm - count;
+ if (delta <= -(int32_t)((clock->freq << clock->shift) >> 10)) {
+ /* timer should have triggered 1ms ago */
+ printk(KERN_ERR "msm_timer_enter_idle: timer late %d, "
+ "reprogram it\n", delta);
+ msm_timer_reactivate_alarm(clock);
}
+ if (delta <= 0)
+ return 0;
+ return clocksource_cyc2ns((alarm - count) >> clock->shift,
+ clock->clocksource.mult,
+ clock->clocksource.shift);
+}
- if (of_property_read_u32(np, "clock-frequency", &freq)) {
- pr_err("Unknown frequency\n");
- return;
+void msm_timer_exit_idle(int low_power)
+{
+ struct msm_clock *gpt_clk = &msm_clocks[MSM_CLOCK_GPT];
+ struct msm_clock *clock = __get_cpu_var(msm_active_clock);
+ struct msm_clock_percpu_data *gpt_clk_state =
+ &__get_cpu_var(msm_clocks_percpu)[MSM_CLOCK_GPT];
+ struct msm_clock_percpu_data *clock_state =
+ &__get_cpu_var(msm_clocks_percpu)[clock->index];
+ uint32_t enabled;
+
+ BUG_ON(clock != &msm_clocks[MSM_CLOCK_GPT] &&
+ clock != &msm_clocks[MSM_CLOCK_DGT]);
+
+ if (!low_power)
+ goto exit_idle_exit;
+
+ enabled = __raw_readl(gpt_clk->regbase + TIMER_ENABLE) &
+ TIMER_ENABLE_EN;
+ if (!enabled)
+ __raw_writel(TIMER_ENABLE_EN, gpt_clk->regbase + TIMER_ENABLE);
+
+#if defined(CONFIG_ARCH_MSM_SCORPION) || defined(CONFIG_ARCH_MSM_KRAIT)
+ gpt_clk_state->in_sync = 0;
+#else
+ gpt_clk_state->in_sync = gpt_clk_state->in_sync && enabled;
+#endif
+ /* Make sure timer is actually enabled before we sync it */
+ wmb();
+ msm_timer_sync_gpt_to_sclk(1);
+
+ if (clock == gpt_clk)
+ goto exit_idle_alarm;
+
+ enabled = __raw_readl(clock->regbase + TIMER_ENABLE) & TIMER_ENABLE_EN;
+ if (!enabled)
+ __raw_writel(TIMER_ENABLE_EN, clock->regbase + TIMER_ENABLE);
+
+#if defined(CONFIG_ARCH_MSM_SCORPION) || defined(CONFIG_ARCH_MSM_KRAIT)
+ clock_state->in_sync = 0;
+#else
+ clock_state->in_sync = clock_state->in_sync && enabled;
+#endif
+ /* Make sure timer is actually enabled before we sync it */
+ wmb();
+ msm_timer_sync_to_gpt(clock, 1);
+
+exit_idle_alarm:
+ msm_timer_reactivate_alarm(clock);
+
+exit_idle_exit:
+ clock_state->stopped--;
+}
+
+/*
+ * Callback function that initializes the timeout value.
+ */
+static void msm_timer_get_sclk_time_start(
+ struct msm_timer_sync_data_t *data)
+{
+ data->timeout = 200000;
+}
+
+/*
+ * Callback function that checks the timeout.
+ */
+static bool msm_timer_get_sclk_time_expired(
+ struct msm_timer_sync_data_t *data)
+{
+ udelay(10);
+ return --data->timeout <= 0;
+}
+
+/*
+ * Retrieve the cycle count from the sclk and convert it into
+ * nanoseconds.
+ *
+ * On exit, if period is not NULL, it contains the period of the
+ * sclk in nanoseconds, i.e. how long the cycle count wraps around.
+ *
+ * Return value:
+ * 0: the operation failed; period is not set either
+ * >0: time in nanoseconds
+ */
+int64_t msm_timer_get_sclk_time(int64_t *period)
+{
+ struct msm_timer_sync_data_t data;
+ uint32_t clock_value;
+ int64_t tmp;
+
+ memset(&data, 0, sizeof(data));
+ clock_value = msm_timer_do_sync_to_sclk(
+ msm_timer_get_sclk_time_start,
+ msm_timer_get_sclk_time_expired,
+ NULL,
+ &data);
+
+ if (!clock_value)
+ return 0;
+
+ if (period) {
+ tmp = 1LL << 32;
+ tmp *= NSEC_PER_SEC;
+ do_div(tmp, sclk_hz);
+ *period = tmp;
+ }
+
+ tmp = (int64_t)clock_value;
+ tmp *= NSEC_PER_SEC;
+ do_div(tmp, sclk_hz);
+ return tmp;
+}
+
+int __init msm_timer_init_time_sync(void (*timeout)(void))
+{
+#if !defined(CONFIG_MSM_DIRECT_SCLK_ACCESS)
+ int ret = smsm_change_intr_mask(SMSM_TIME_MASTER_DEM, 0xFFFFFFFF, 0);
+
+ if (ret) {
+ printk(KERN_ERR "%s: failed to clear interrupt mask, %d\n",
+ __func__, ret);
+ return ret;
}
- event_base = base + 0x4;
- sts_base = base + 0x88;
- source_base = cpu0_base + 0x24;
- freq /= 4;
- writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
+ smsm_change_state(SMSM_APPS_DEM,
+ SLAVE_TIME_REQUEST | SLAVE_TIME_POLL, SLAVE_TIME_INIT);
+#endif
- msm_timer_init(freq, 32, irq, !!percpu_offset);
+ BUG_ON(timeout == NULL);
+ msm_timer_sync_timeout = timeout;
+
+ return 0;
}
+<<<<<<< current:drivers/clocksource/qcom-timer.c
CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
#else
+=======
-static int __init msm_timer_map(phys_addr_t addr, u32 event, u32 source,
- u32 sts)
+#endif
+>>>>>>> patched:arch/arm/mach-msm/timer.c
+
+static u32 notrace msm_read_sched_clock(void)
{
- void __iomem *base;
+ struct msm_clock *clock = &msm_clocks[msm_global_timer];
+ struct clocksource *cs = &clock->clocksource;
+ return cs->read(NULL);
+}
- base = ioremap(addr, SZ_256);
- if (!base) {
- pr_err("Failed to map timer base\n");
- return -ENOMEM;
+static struct delay_timer msm_delay_timer;
+
+static unsigned long msm_read_current_timer(void)
+{
+ struct msm_clock *dgt = &msm_clocks[MSM_CLOCK_DGT];
+ return msm_read_timer_count(dgt, GLOBAL_TIMER);
+}
+
+static void __init msm_sched_clock_init(void)
+{
+ struct msm_clock *clock = &msm_clocks[msm_global_timer];
+
+ setup_sched_clock(msm_read_sched_clock, 32 - clock->shift, clock->freq);
+}
+
+#ifdef CONFIG_LOCAL_TIMERS
+int __cpuinit local_timer_setup(struct clock_event_device *evt)
+{
+ static DEFINE_PER_CPU(bool, first_boot) = true;
+ struct msm_clock *clock = &msm_clocks[msm_global_timer];
+
+ /* Use existing clock_event for cpu 0 */
+ if (!smp_processor_id())
+ return 0;
+
+ if (cpu_is_msm8x60() || soc_class_is_msm8960() ||
+ soc_class_is_apq8064() || soc_class_is_msm8930())
+ __raw_writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
+
+ if (__get_cpu_var(first_boot)) {
+ __raw_writel(0, clock->regbase + TIMER_ENABLE);
+ __raw_writel(0, clock->regbase + TIMER_CLEAR);
+ __raw_writel(~0, clock->regbase + TIMER_MATCH_VAL);
+ __get_cpu_var(first_boot) = false;
+ if (clock->status_mask)
+ while (__raw_readl(MSM_TMR_BASE + TIMER_STATUS) &
+ clock->status_mask)
+ ;
}
- event_base = base + event;
- source_base = base + source;
- if (sts)
- sts_base = base + sts;
+ evt->irq = clock->irq;
+ evt->name = "local_timer";
+ evt->features = CLOCK_EVT_FEAT_ONESHOT;
+ evt->rating = clock->clockevent.rating;
+ evt->set_mode = msm_timer_set_mode;
+ evt->set_next_event = msm_timer_set_next_event;
+
+ *__this_cpu_ptr(clock->percpu_evt) = evt;
+ clockevents_config_and_register(evt, gpt_hz, 4, 0xf0000000);
+ enable_percpu_irq(evt->irq, IRQ_TYPE_EDGE_RISING);
return 0;
}
+<<<<<<< current:drivers/clocksource/qcom-timer.c
static notrace cycle_t msm_read_timer_count_shift(struct clocksource *cs)
{
/*
@@ -315,29 +1096,204 @@ static notrace cycle_t msm_read_timer_count_shift(struct clocksource *cs)
}
void __init msm7x01_timer_init(void)
+=======
+void local_timer_stop(struct clock_event_device *evt)
+>>>>>>> patched:arch/arm/mach-msm/timer.c
{
- struct clocksource *cs = &msm_clocksource;
-
- if (msm_timer_map(0xc0100000, 0x0, 0x10, 0x0))
- return;
- cs->read = msm_read_timer_count_shift;
- cs->mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT));
- /* 600 KHz */
- msm_timer_init(19200000 >> MSM_DGT_SHIFT, 32 - MSM_DGT_SHIFT, 7,
- false);
+ evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+ disable_percpu_irq(evt->irq);
}
-void __init msm7x30_timer_init(void)
+static struct local_timer_ops msm_lt_ops = {
+ local_timer_setup,
+ local_timer_stop,
+};
+#endif /* CONFIG_LOCAL_TIMERS */
+
+#ifdef CONFIG_ARCH_MSM8625
+static void fixup_msm8625_timer(void)
{
- if (msm_timer_map(0xc0100000, 0x4, 0x24, 0x80))
- return;
- msm_timer_init(24576000 / 4, 32, 1, false);
+ struct msm_clock *dgt = &msm_clocks[MSM_CLOCK_DGT];
+ struct msm_clock *gpt = &msm_clocks[MSM_CLOCK_GPT];
+ dgt->irq = MSM8625_INT_DEBUG_TIMER_EXP;
+ gpt->irq = MSM8625_INT_GP_TIMER_EXP;
+ global_timer_offset = MSM_TMR0_BASE - MSM_TMR_BASE;
}
+#else
+static inline void fixup_msm8625_timer(void) { };
+#endif
-void __init qsd8x50_timer_init(void)
+void __init msm_timer_init(void)
{
- if (msm_timer_map(0xAC100000, 0x0, 0x10, 0x34))
- return;
- msm_timer_init(19200000 / 4, 32, 7, false);
+ int i;
+ int res;
+ struct irq_chip *chip;
+ struct msm_clock *dgt = &msm_clocks[MSM_CLOCK_DGT];
+ struct msm_clock *gpt = &msm_clocks[MSM_CLOCK_GPT];
+
+ if (cpu_is_msm7x01() || cpu_is_msm7x25() || cpu_is_msm7x27() ||
+ cpu_is_msm7x25a() || cpu_is_msm7x27a() || cpu_is_msm7x25aa() ||
+ cpu_is_msm7x27aa() || cpu_is_msm8625() || cpu_is_msm7x25ab() ||
+ cpu_is_msm8625q()) {
+ dgt->shift = MSM_DGT_SHIFT;
+ dgt->freq = 19200000 >> MSM_DGT_SHIFT;
+ dgt->clockevent.shift = 32 + MSM_DGT_SHIFT;
+ dgt->clocksource.mask = CLOCKSOURCE_MASK(32 - MSM_DGT_SHIFT);
+ gpt->regbase = MSM_TMR_BASE;
+ dgt->regbase = MSM_TMR_BASE + 0x10;
+ gpt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT
+ | MSM_CLOCK_FLAGS_ODD_MATCH_WRITE
+ | MSM_CLOCK_FLAGS_DELAYED_WRITE_POST;
+ if (cpu_is_msm8625() || cpu_is_msm8625q())
+ fixup_msm8625_timer();
+ } else if (cpu_is_qsd8x50()) {
+ dgt->freq = 4800000;
+ gpt->regbase = MSM_TMR_BASE;
+ dgt->regbase = MSM_TMR_BASE + 0x10;
+ } else if (cpu_is_fsm9xxx())
+ dgt->freq = 4800000;
+ else if (cpu_is_msm7x30() || cpu_is_msm8x55()) {
+ gpt->status_mask = BIT(10);
+ dgt->status_mask = BIT(2);
+ dgt->freq = 6144000;
+ } else if (cpu_is_msm8x60()) {
+ global_timer_offset = MSM_TMR0_BASE - MSM_TMR_BASE;
+ gpt->status_mask = BIT(10);
+ dgt->status_mask = BIT(2);
+ dgt->freq = 6750000;
+ __raw_writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
+ } else if (cpu_is_msm9615()) {
+ dgt->freq = 6750000;
+ __raw_writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
+ gpt->status_mask = BIT(10);
+ dgt->status_mask = BIT(2);
+ gpt->freq = 32765;
+ gpt_hz = 32765;
+ sclk_hz = 32765;
+ gpt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT;
+ dgt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT;
+ } else if (soc_class_is_msm8960() || soc_class_is_apq8064() ||
+ soc_class_is_msm8930()) {
+ global_timer_offset = MSM_TMR0_BASE - MSM_TMR_BASE;
+ dgt->freq = 6750000;
+ __raw_writel(DGT_CLK_CTL_DIV_4, MSM_TMR_BASE + DGT_CLK_CTL);
+ gpt->status_mask = BIT(10);
+ dgt->status_mask = BIT(2);
+ if (!soc_class_is_apq8064()) {
+ gpt->freq = 32765;
+ gpt_hz = 32765;
+ sclk_hz = 32765;
+ }
+ if (!soc_class_is_msm8930() && !cpu_is_msm8960ab()) {
+ gpt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT;
+ dgt->flags |= MSM_CLOCK_FLAGS_UNSTABLE_COUNT;
+ }
+ } else {
+ WARN(1, "Timer running on unknown hardware. Configure this! "
+ "Assuming default configuration.\n");
+ dgt->freq = 6750000;
+ }
+
+ if (msm_clocks[MSM_CLOCK_GPT].clocksource.rating > DG_TIMER_RATING)
+ msm_global_timer = MSM_CLOCK_GPT;
+ else
+ msm_global_timer = MSM_CLOCK_DGT;
+
+ for (i = 0; i < ARRAY_SIZE(msm_clocks); i++) {
+ struct msm_clock *clock = &msm_clocks[i];
+ struct clock_event_device *ce = &clock->clockevent;
+ struct clocksource *cs = &clock->clocksource;
+ __raw_writel(0, clock->regbase + TIMER_ENABLE);
+ __raw_writel(0, clock->regbase + TIMER_CLEAR);
+ __raw_writel(~0, clock->regbase + TIMER_MATCH_VAL);
+
+ if ((clock->freq << clock->shift) == gpt_hz) {
+ clock->rollover_offset = 0;
+ } else {
+ uint64_t temp;
+
+ temp = clock->freq << clock->shift;
+ temp <<= 32;
+ do_div(temp, gpt_hz);
+
+ clock->rollover_offset = (uint32_t) temp;
+ }
+
+ ce->mult = div_sc(clock->freq, NSEC_PER_SEC, ce->shift);
+ /* allow at least 10 seconds to notice that the timer wrapped */
+ ce->max_delta_ns =
+ clockevent_delta2ns(0xf0000000 >> clock->shift, ce);
+ /* ticks gets rounded down by one */
+ ce->min_delta_ns =
+ clockevent_delta2ns(clock->write_delay + 4, ce);
+ ce->cpumask = cpumask_of(0);
+
+ res = clocksource_register_hz(cs, clock->freq);
+ if (res)
+ printk(KERN_ERR "msm_timer_init: clocksource_register "
+ "failed for %s\n", cs->name);
+
+ ce->irq = clock->irq;
+ if (cpu_is_msm8x60() || cpu_is_msm9615() || cpu_is_msm8625() ||
+ cpu_is_msm8625q() || soc_class_is_msm8960() ||
+ soc_class_is_apq8064() || soc_class_is_msm8930()) {
+ clock->percpu_evt = alloc_percpu(struct clock_event_device *);
+ if (!clock->percpu_evt) {
+ pr_err("msm_timer_init: memory allocation "
+ "failed for %s\n", ce->name);
+ continue;
+ }
+
+ *__this_cpu_ptr(clock->percpu_evt) = ce;
+ res = request_percpu_irq(ce->irq, msm_timer_interrupt,
+ ce->name, clock->percpu_evt);
+ if (!res)
+ enable_percpu_irq(ce->irq,
+ IRQ_TYPE_EDGE_RISING);
+ } else {
+ clock->evt = ce;
+ res = request_irq(ce->irq, msm_timer_interrupt,
+ IRQF_TIMER | IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
+ ce->name, &clock->evt);
+ }
+
+ if (res)
+ pr_err("msm_timer_init: request_irq failed for %s\n",
+ ce->name);
+
+ chip = irq_get_chip(clock->irq);
+ if (chip && chip->irq_mask)
+ chip->irq_mask(irq_get_irq_data(clock->irq));
+
+ if (clock->status_mask)
+ while (__raw_readl(MSM_TMR_BASE + TIMER_STATUS) &
+ clock->status_mask)
+ ;
+
+ clockevents_register_device(ce);
+ }
+ msm_sched_clock_init();
+
+ if (use_user_accessible_timers()) {
+ if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_apq8064()) {
+ struct msm_clock *gtclock = &msm_clocks[MSM_CLOCK_GPT];
+ void __iomem *addr = gtclock->regbase +
+ TIMER_COUNT_VAL + global_timer_offset;
+ setup_user_timer_offset(virt_to_phys(addr)&0xfff);
+ set_user_accessible_timer_flag(true);
+ }
+ }
+
+ if (is_smp()) {
+ __raw_writel(1,
+ msm_clocks[MSM_CLOCK_DGT].regbase + TIMER_ENABLE);
+ msm_delay_timer.freq = dgt->freq;
+ msm_delay_timer.read_current_timer = &msm_read_current_timer;
+ register_current_timer_delay(&msm_delay_timer);
+ }
+
+#ifdef CONFIG_LOCAL_TIMERS
+ local_timer_register(&msm_lt_ops);
+#endif
}
#endif
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 83a75dc84761..0518d71d2b71 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -93,6 +93,9 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW
config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR
+ select GENERIC_CPUFREQ_CPU0
+ select PM_OPP
+
default m
help
This adds the CPUFreq driver for Calxeda Highbank SoC
@@ -247,3 +250,10 @@ config ARM_TEGRA_CPUFREQ
default y
help
This adds the CPUFreq driver support for TEGRA SOCs.
+
+config CPU_FREQ_MSM
+ bool
+ depends on CPU_FREQ && (ARCH_MSM || ARCH_QCOM)
+ default y
+ help
+ This enables the CPUFreq driver for Qualcomm CPUs.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 40c53dc1937e..357ae7a36f9b 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -77,6 +77,7 @@ obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_TEGRA_CPUFREQ) += tegra-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
+obj-$(CONFIG_CPU_FREQ_MSM) += qcom-cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
new file mode 100644
index 000000000000..9da596257520
--- /dev/null
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -0,0 +1,627 @@
+/* arch/arm/mach-msm/cpufreq.c
+ *
+ * MSM architecture cpufreq driver
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2014, The Linux Foundation. All rights reserved.
+ * Author: Mike A. Chan <mikechan@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/suspend.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <soc/qcom/cpufreq.h>
+#include <trace/events/power.h>
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/div64.h>
+#endif
+
+static DEFINE_MUTEX(l2bw_lock);
+
+static struct clk *cpu_clk[NR_CPUS];
+static struct clk *l2_clk;
+static unsigned int freq_index[NR_CPUS];
+static unsigned int max_freq_index;
+static struct cpufreq_frequency_table *freq_table;
+static unsigned int *l2_khz;
+static unsigned long *mem_bw;
+static bool hotplug_ready;
+
+struct cpufreq_work_struct {
+ struct work_struct work;
+ struct cpufreq_policy *policy;
+ struct completion complete;
+ int frequency;
+ unsigned int index;
+ int status;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_work_struct, cpufreq_work);
+static struct workqueue_struct *msm_cpufreq_wq;
+
+struct cpufreq_suspend_t {
+ struct mutex suspend_mutex;
+ int device_suspended;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_suspend_t, cpufreq_suspend_info);
+
+unsigned long msm_cpufreq_get_bw(void)
+{
+ return mem_bw[max_freq_index];
+}
+
+static void update_l2_bw(int *also_cpu)
+{
+ int rc = 0, cpu;
+ unsigned int index = 0;
+
+ mutex_lock(&l2bw_lock);
+
+ if (also_cpu)
+ index = freq_index[*also_cpu];
+
+ for_each_online_cpu(cpu) {
+ index = max(index, freq_index[cpu]);
+ }
+
+ if (l2_clk)
+ rc = clk_set_rate(l2_clk, l2_khz[index] * 1000);
+ if (rc) {
+ pr_err("Error setting L2 clock rate!\n");
+ goto out;
+ }
+
+ max_freq_index = index;
+ rc = devfreq_msm_cpufreq_update_bw();
+ if (rc)
+ pr_err("Unable to update BW (%d)\n", rc);
+
+out:
+ mutex_unlock(&l2bw_lock);
+}
+
+static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq,
+ unsigned int index)
+{
+ int ret = 0;
+ int saved_sched_policy = -EINVAL;
+ int saved_sched_rt_prio = -EINVAL;
+ struct cpufreq_freqs freqs;
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+ unsigned long rate;
+
+ freqs.old = policy->cur;
+ freqs.new = new_freq;
+ freqs.cpu = policy->cpu;
+
+ /*
+ * Put the caller into SCHED_FIFO priority to avoid cpu starvation
+ * while increasing frequencies
+ */
+
+ if (freqs.new > freqs.old && current->policy != SCHED_FIFO) {
+ saved_sched_policy = current->policy;
+ saved_sched_rt_prio = current->rt_priority;
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+ }
+
+ cpufreq_freq_transition_begin(policy, &freqs);
+
+ rate = new_freq * 1000;
+ rate = clk_round_rate(cpu_clk[policy->cpu], rate);
+ ret = clk_set_rate(cpu_clk[policy->cpu], rate);
+ if (!ret) {
+ freq_index[policy->cpu] = index;
+ update_l2_bw(NULL);
+ cpufreq_freq_transition_end(policy, &freqs, 1 /* failed */);
+ }
+
+ /* Restore priority after clock ramp-up */
+ if (freqs.new > freqs.old && saved_sched_policy >= 0) {
+ param.sched_priority = saved_sched_rt_prio;
+ sched_setscheduler_nocheck(current, saved_sched_policy, &param);
+ }
+
+ cpufreq_freq_transition_end(policy, &freqs, 0 /* ok */);
+
+
+ return ret;
+}
+
+static void set_cpu_work(struct work_struct *work)
+{
+ struct cpufreq_work_struct *cpu_work =
+ container_of(work, struct cpufreq_work_struct, work);
+
+ cpu_work->status = set_cpu_freq(cpu_work->policy, cpu_work->frequency,
+ cpu_work->index);
+ complete(&cpu_work->complete);
+}
+
+static int msm_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ int ret = -EFAULT;
+ int index;
+ struct cpufreq_frequency_table *table;
+
+ struct cpufreq_work_struct *cpu_work = NULL;
+
+ mutex_lock(&per_cpu(cpufreq_suspend_info, policy->cpu).suspend_mutex);
+
+ if (per_cpu(cpufreq_suspend_info, policy->cpu).device_suspended) {
+ pr_debug("cpufreq: cpu%d scheduling frequency change "
+ "in suspend.\n", policy->cpu);
+ ret = -EFAULT;
+ goto done;
+ }
+
+ table = cpufreq_frequency_get_table(policy->cpu);
+ if (cpufreq_frequency_table_target(policy, table, target_freq, relation,
+ &index)) {
+ pr_err("cpufreq: invalid target_freq: %d\n", target_freq);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
+ policy->cpu, target_freq, relation,
+ policy->min, policy->max, table[index].frequency);
+
+ cpu_work = &per_cpu(cpufreq_work, policy->cpu);
+ cpu_work->policy = policy;
+ cpu_work->frequency = table[index].frequency;
+ cpu_work->index = table[index].driver_data;
+ cpu_work->status = -ENODEV;
+
+ cancel_work_sync(&cpu_work->work);
+ reinit_completion(&cpu_work->complete);
+ queue_work_on(policy->cpu, msm_cpufreq_wq, &cpu_work->work);
+ wait_for_completion(&cpu_work->complete);
+
+ ret = cpu_work->status;
+
+done:
+ mutex_unlock(&per_cpu(cpufreq_suspend_info, policy->cpu).suspend_mutex);
+ return ret;
+}
+
+static int msm_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+ return 0;
+}
+
+static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
+{
+ return clk_get_rate(cpu_clk[cpu]) / 1000;
+}
+
+static int msm_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int cur_freq;
+ int index;
+ int ret = 0;
+ struct cpufreq_frequency_table *table;
+ struct cpufreq_work_struct *cpu_work = NULL;
+ int cpu;
+
+ table = cpufreq_frequency_get_table(policy->cpu);
+ if (table == NULL)
+ return -ENODEV;
+
+ /* FIXME: what's the actual transition time? */
+ ret = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+ if (ret) {
+ pr_err("cpufreq_generic_init failed\n");
+ return ret;
+ }
+
+ /*
+ * In some SoC, some cores are clocked by same source, and their
+ * frequencies can not be changed independently. Find all other
+ * CPUs that share same clock, and mark them as controlled by
+ * same policy.
+ */
+ for_each_possible_cpu(cpu)
+ if (cpu_clk[cpu] == cpu_clk[policy->cpu])
+ cpumask_set_cpu(cpu, policy->cpus);
+
+ cpu_work = &per_cpu(cpufreq_work, policy->cpu);
+ INIT_WORK(&cpu_work->work, set_cpu_work);
+ init_completion(&cpu_work->complete);
+
+ if (cpufreq_frequency_table_cpuinfo(policy, table)) {
+#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
+ policy->cpuinfo.min_freq = CONFIG_MSM_CPU_FREQ_MIN;
+ policy->cpuinfo.max_freq = CONFIG_MSM_CPU_FREQ_MAX;
+#endif
+ }
+#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
+ policy->min = CONFIG_MSM_CPU_FREQ_MIN;
+ policy->max = CONFIG_MSM_CPU_FREQ_MAX;
+#endif
+
+ cur_freq = clk_get_rate(cpu_clk[policy->cpu])/1000;
+
+ if (cpufreq_frequency_table_target(policy, table, cur_freq,
+ CPUFREQ_RELATION_H, &index) &&
+ cpufreq_frequency_table_target(policy, table, cur_freq,
+ CPUFREQ_RELATION_L, &index)) {
+ pr_info("cpufreq: cpu%d at invalid freq: %d\n",
+ policy->cpu, cur_freq);
+ return -EINVAL;
+ }
+ /*
+ * Call set_cpu_freq unconditionally so that when cpu is set to
+ * online, frequency limit will always be updated.
+ */
+ ret = set_cpu_freq(policy, table[index].frequency,
+ table[index].driver_data);
+ if (ret)
+ return ret;
+ pr_debug("cpufreq: cpu%d init at %d switching to %d\n",
+ policy->cpu, cur_freq, table[index].frequency);
+ policy->cur = table[index].frequency;
+
+ return 0;
+}
+
+static int msm_cpufreq_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ int rc;
+
+ /* Fail hotplug until this driver can get CPU clocks */
+ if (!hotplug_ready)
+ return NOTIFY_BAD;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ /*
+ * Scale down clock/power of CPU that is dead and scale it back up
+ * before the CPU is brought up.
+ */
+ case CPU_DEAD:
+ clk_disable_unprepare(cpu_clk[cpu]);
+ clk_disable_unprepare(l2_clk);
+ update_l2_bw(NULL);
+ break;
+ case CPU_UP_CANCELED:
+ clk_unprepare(cpu_clk[cpu]);
+ clk_unprepare(l2_clk);
+ update_l2_bw(NULL);
+ break;
+ case CPU_UP_PREPARE:
+ rc = clk_prepare(l2_clk);
+ if (rc < 0)
+ return NOTIFY_BAD;
+ rc = clk_prepare(cpu_clk[cpu]);
+ if (rc < 0) {
+ clk_unprepare(l2_clk);
+ return NOTIFY_BAD;
+ }
+ update_l2_bw(&cpu);
+ break;
+
+ case CPU_STARTING:
+ rc = clk_enable(l2_clk);
+ if (rc < 0)
+ return NOTIFY_BAD;
+ rc = clk_enable(cpu_clk[cpu]);
+ if (rc) {
+ clk_disable(l2_clk);
+ return NOTIFY_BAD;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata msm_cpufreq_cpu_notifier = {
+ .notifier_call = msm_cpufreq_cpu_callback,
+};
+
+static int msm_cpufreq_suspend(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ mutex_lock(&per_cpu(cpufreq_suspend_info, cpu).suspend_mutex);
+ per_cpu(cpufreq_suspend_info, cpu).device_suspended = 1;
+ mutex_unlock(&per_cpu(cpufreq_suspend_info, cpu).suspend_mutex);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int msm_cpufreq_resume(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ per_cpu(cpufreq_suspend_info, cpu).device_suspended = 0;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int msm_cpufreq_pm_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ return msm_cpufreq_resume();
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ return msm_cpufreq_suspend();
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct notifier_block msm_cpufreq_pm_notifier = {
+ .notifier_call = msm_cpufreq_pm_event,
+};
+
+static struct freq_attr *msm_freq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver msm_cpufreq_driver = {
+ /* lps calculations are handled here. */
+ .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
+ .init = msm_cpufreq_init,
+ .verify = msm_cpufreq_verify,
+ .target = msm_cpufreq_target,
+ .get = msm_cpufreq_get_freq,
+ .name = "msm",
+ .attr = msm_freq_attr,
+};
+
+#define PROP_TBL "qcom,cpufreq-table"
+static int cpufreq_parse_dt(struct device *dev)
+{
+ int ret, len, nf, num_cols = 2, i, j;
+ u32 *data;
+
+ if (l2_clk)
+ num_cols++;
+
+ /* Parse CPU freq -> L2/Mem BW map table. */
+ if (!of_find_property(dev->of_node, PROP_TBL, &len))
+ return -EINVAL;
+ len /= sizeof(*data);
+
+ if (len % num_cols || len == 0)
+ return -EINVAL;
+ nf = len / num_cols;
+
+ data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(dev->of_node, PROP_TBL, data, len);
+ if (ret)
+ return ret;
+
+ /* Allocate all data structures. */
+ freq_table = devm_kzalloc(dev, (nf + 1) * sizeof(*freq_table),
+ GFP_KERNEL);
+ mem_bw = devm_kzalloc(dev, nf * sizeof(*mem_bw), GFP_KERNEL);
+
+ if (!freq_table || !mem_bw)
+ return -ENOMEM;
+
+ if (l2_clk) {
+ l2_khz = devm_kzalloc(dev, nf * sizeof(*l2_khz), GFP_KERNEL);
+ if (!l2_khz)
+ return -ENOMEM;
+ }
+
+ j = 0;
+ for (i = 0; i < nf; i++) {
+ unsigned long f;
+
+ f = clk_round_rate(cpu_clk[0], data[j++] * 1000);
+ if (IS_ERR_VALUE(f))
+ break;
+ f /= 1000;
+
+ /*
+ * Check if this is the last feasible frequency in the table.
+ *
+ * The table listing frequencies higher than what the HW can
+ * support is not an error since the table might be shared
+ * across CPUs in different speed bins. It's also not
+ * sufficient to check if the rounded rate is lower than the
+ * requested rate as it doesn't cover the following example:
+ *
+ * Table lists: 2.2 GHz and 2.5 GHz.
+ * Rounded rate returns: 2.2 GHz and 2.3 GHz.
+ *
+ * In this case, we can CPUfreq to use 2.2 GHz and 2.3 GHz
+ * instead of rejecting the 2.5 GHz table entry.
+ */
+ if (i > 0 && f <= freq_table[i-1].frequency)
+ break;
+
+ freq_table[i].driver_data = i;
+ freq_table[i].frequency = f;
+
+ if (l2_clk) {
+ f = clk_round_rate(l2_clk, data[j++] * 1000);
+ if (IS_ERR_VALUE(f)) {
+ pr_err("Error finding L2 rate for CPU %d KHz\n",
+ freq_table[i].frequency);
+ freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ } else {
+ f /= 1000;
+ l2_khz[i] = f;
+ }
+ }
+
+ mem_bw[i] = data[j++];
+ }
+
+ freq_table[i].driver_data = i;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ devm_kfree(dev, data);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int msm_cpufreq_show(struct seq_file *m, void *unused)
+{
+ unsigned int i, cpu_freq;
+
+ if (!freq_table)
+ return 0;
+
+ seq_printf(m, "%10s%10s", "CPU (KHz)", "L2 (KHz)");
+ seq_printf(m, "%12s\n", "Mem (MBps)");
+
+ for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
+ cpu_freq = freq_table[i].frequency;
+ if (cpu_freq == CPUFREQ_ENTRY_INVALID)
+ continue;
+ seq_printf(m, "%10d", cpu_freq);
+ seq_printf(m, "%10d", l2_khz ? l2_khz[i] : cpu_freq);
+ seq_printf(m, "%12lu", mem_bw[i]);
+ seq_printf(m, "\n");
+ }
+ return 0;
+}
+
+static int msm_cpufreq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_cpufreq_show, inode->i_private);
+}
+
+const struct file_operations msm_cpufreq_fops = {
+ .open = msm_cpufreq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+#endif
+
+static int __init msm_cpufreq_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ char clk_name[] = "cpu??_clk";
+ struct clk *c;
+ int cpu, ret;
+
+ l2_clk = devm_clk_get(dev, "l2_clk");
+ if (IS_ERR(l2_clk))
+ l2_clk = NULL;
+
+ for_each_possible_cpu(cpu) {
+ snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
+ c = devm_clk_get(dev, clk_name);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ cpu_clk[cpu] = c;
+ }
+ hotplug_ready = true;
+
+ ret = cpufreq_parse_dt(dev);
+ if (ret)
+ return ret;
+
+ ret = register_devfreq_msm_cpufreq();
+ if (ret) {
+ pr_err("devfreq governor registration failed\n");
+ return ret;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ if (!debugfs_create_file("msm_cpufreq", S_IRUGO, NULL, NULL,
+ &msm_cpufreq_fops))
+ return -ENOMEM;
+#endif
+
+ return 0;
+}
+
+static struct of_device_id match_table[] = {
+ { .compatible = "qcom,msm-cpufreq" },
+ {}
+};
+
+static struct platform_driver msm_cpufreq_plat_driver = {
+ .driver = {
+ .name = "msm-cpufreq",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_cpufreq_register(void)
+{
+ int cpu, rc;
+
+ for_each_possible_cpu(cpu) {
+ mutex_init(&(per_cpu(cpufreq_suspend_info, cpu).suspend_mutex));
+ per_cpu(cpufreq_suspend_info, cpu).device_suspended = 0;
+ }
+
+ rc = platform_driver_probe(&msm_cpufreq_plat_driver,
+ msm_cpufreq_probe);
+ if (rc < 0) {
+ /* Unblock hotplug if msm-cpufreq probe fails */
+ unregister_hotcpu_notifier(&msm_cpufreq_cpu_notifier);
+ for_each_possible_cpu(cpu)
+ mutex_destroy(&(per_cpu(cpufreq_suspend_info, cpu).
+ suspend_mutex));
+ return rc;
+ }
+
+ msm_cpufreq_wq = alloc_workqueue("msm-cpufreq", WQ_HIGHPRI, 0);
+ register_pm_notifier(&msm_cpufreq_pm_notifier);
+ return cpufreq_register_driver(&msm_cpufreq_driver);
+}
+
+subsys_initcall(msm_cpufreq_register);
+
+static int __init msm_cpufreq_early_register(void)
+{
+ return register_hotcpu_notifier(&msm_cpufreq_cpu_notifier);
+}
+core_initcall(msm_cpufreq_early_register);
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 4d177b916f75..8ca776169b1a 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -30,3 +30,6 @@ obj-$(CONFIG_ARM64_CPUIDLE) += cpuidle-arm64.o
# POWERPC drivers
obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o
obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o
+
+obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
+obj-$(CONFIG_MSM_PM) += lpm_levels.o
diff --git a/drivers/cpuidle/lpm_levels.c b/drivers/cpuidle/lpm_levels.c
new file mode 100644
index 000000000000..458eb133d9ea
--- /dev/null
+++ b/drivers/cpuidle/lpm_levels.c
@@ -0,0 +1,1210 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/irqchip/msm-mpm-irq.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/tick.h>
+#include <linux/suspend.h>
+#include <linux/pm_qos.h>
+#include <linux/of_platform.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm.h>
+#include <soc/qcom/rpm-notifier.h>
+#include <soc/qcom/event_timer.h>
+
+#define SCLK_HZ (32768)
+
+enum {
+ MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0),
+ MSM_LPM_LVL_DBG_IDLE_LIMITS = BIT(1),
+};
+
+struct power_params {
+ uint32_t latency_us;
+ uint32_t ss_power;
+ uint32_t energy_overhead;
+ uint32_t time_overhead_us;
+};
+
+struct lpm_cpu_level {
+ const char *name;
+ enum msm_pm_sleep_mode mode;
+ struct power_params pwr;
+ bool use_bc_timer;
+};
+
+struct lpm_system_level {
+ const char *name;
+ uint32_t l2_mode;
+ struct power_params pwr;
+ enum msm_pm_sleep_mode min_cpu_mode;
+ struct cpumask num_cpu_votes;
+ bool notify_rpm;
+ bool available;
+ bool sync_level;
+};
+
+struct lpm_system_state {
+ struct lpm_cpu_level *cpu_level;
+ int num_cpu_levels;
+ struct lpm_system_level *system_level;
+ int num_system_levels;
+ enum msm_pm_sleep_mode sync_cpu_mode;
+ int last_entered_cluster_index;
+ bool allow_synched_levels;
+ bool no_l2_saw;
+ spinlock_t sync_lock;
+ struct cpumask num_cores_in_sync;
+};
+
+static struct lpm_system_state sys_state;
+static bool suspend_in_progress;
+static int64_t suspend_time;
+
+struct lpm_lookup_table {
+ uint32_t modes;
+ const char *mode_name;
+};
+
+static void lpm_system_level_update(void);
+static void setup_broadcast_timer(void *arg);
+static int lpm_cpu_callback(struct notifier_block *cpu_nb,
+ unsigned long action, void *hcpu);
+
+static struct notifier_block __refdata lpm_cpu_nblk = {
+ .notifier_call = lpm_cpu_callback,
+};
+
+static uint32_t allowed_l2_mode;
+static uint32_t sysfs_dbg_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE;
+static uint32_t default_l2_mode;
+
+
+static ssize_t lpm_levels_attr_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf);
+static ssize_t lpm_levels_attr_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count);
+
+static int lpm_lvl_dbg_msk;
+
+module_param_named(
+ debug_mask, lpm_lvl_dbg_msk, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+static bool menu_select;
+module_param_named(
+ menu_select, menu_select, bool, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+static int msm_pm_sleep_time_override;
+module_param_named(sleep_time_override,
+ msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static struct cpumask num_powered_cores;
+static struct hrtimer lpm_hrtimer;
+
+static struct kobj_attribute lpm_l2_kattr = __ATTR(l2, S_IRUGO|S_IWUSR,\
+ lpm_levels_attr_show, lpm_levels_attr_store);
+
+static struct attribute *lpm_levels_attr[] = {
+ &lpm_l2_kattr.attr,
+ NULL,
+};
+
+static struct attribute_group lpm_levels_attr_grp = {
+ .attrs = lpm_levels_attr,
+};
+
+/* SYSFS */
+static ssize_t lpm_levels_attr_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct kernel_param kp;
+ int rc;
+
+ kp.arg = &sysfs_dbg_l2_mode;
+
+ rc = param_get_uint(buf, &kp);
+
+ if (rc > 0) {
+ strlcat(buf, "\n", PAGE_SIZE);
+ rc++;
+ }
+
+ return rc;
+}
+
+static ssize_t lpm_levels_attr_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct kernel_param kp;
+ unsigned int temp;
+ int rc;
+
+ kp.arg = &temp;
+ rc = param_set_uint(buf, &kp);
+ if (rc)
+ return rc;
+
+ sysfs_dbg_l2_mode = temp;
+ lpm_system_level_update();
+
+ return count;
+}
+
+static int msm_pm_get_sleep_mode_value(const char *mode_name)
+{
+ struct lpm_lookup_table pm_sm_lookup[] = {
+ {MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
+ "wfi"},
+ {MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
+ "standalone_pc"},
+ {MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+ "pc"},
+ {MSM_PM_SLEEP_MODE_RETENTION,
+ "retention"},
+ };
+ int i;
+ int ret = -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(pm_sm_lookup); i++) {
+ if (!strcmp(mode_name, pm_sm_lookup[i].mode_name)) {
+ ret = pm_sm_lookup[i].modes;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int lpm_set_l2_mode(struct lpm_system_state *system_state,
+ int sleep_mode)
+{
+ int lpm = sleep_mode;
+ int rc = 0;
+
+ msm_pm_set_l2_flush_flag(MSM_SCM_L2_ON);
+
+ switch (sleep_mode) {
+ case MSM_SPM_L2_MODE_POWER_COLLAPSE:
+ msm_pm_set_l2_flush_flag(MSM_SCM_L2_OFF);
+ break;
+ case MSM_SPM_L2_MODE_GDHS:
+ msm_pm_set_l2_flush_flag(MSM_SCM_L2_GDHS);
+ break;
+ case MSM_SPM_L2_MODE_PC_NO_RPM:
+ msm_pm_set_l2_flush_flag(MSM_SCM_L2_OFF);
+ break;
+ case MSM_SPM_L2_MODE_RETENTION:
+ case MSM_SPM_L2_MODE_DISABLED:
+ break;
+ default:
+ lpm = MSM_SPM_L2_MODE_DISABLED;
+ break;
+ }
+
+ if (!system_state->no_l2_saw)
+ rc = msm_spm_l2_set_low_power_mode(lpm, true);
+
+ if (rc)
+ pr_err("%s: Failed to set L2 low power mode %d, ERR %d",
+ __func__, lpm, rc);
+
+ return rc;
+}
+
+static void lpm_system_level_update(void)
+{
+ int i;
+ struct lpm_system_level *l = NULL;
+ uint32_t max_l2_mode;
+ static DEFINE_MUTEX(lpm_lock);
+
+ mutex_lock(&lpm_lock);
+
+ if ((cpumask_weight(&num_powered_cores) == 1)
+ || (sys_state.allow_synched_levels))
+ allowed_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE;
+ else
+ allowed_l2_mode = default_l2_mode;
+
+ max_l2_mode = min(allowed_l2_mode, sysfs_dbg_l2_mode);
+
+ for (i = 0; i < sys_state.num_system_levels; i++) {
+ l = &sys_state.system_level[i];
+ l->available = !(l->l2_mode > max_l2_mode);
+ }
+ mutex_unlock(&lpm_lock);
+}
+
+static int lpm_system_mode_select(struct lpm_system_state *system_state,
+ uint32_t sleep_us, bool from_idle)
+{
+ int best_level = -1;
+ int i;
+ uint32_t best_level_pwr = ~0U;
+ uint32_t pwr;
+ uint32_t latency_us = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
+
+ if (!system_state->system_level)
+ return -EINVAL;
+
+ for (i = 0; i < system_state->num_system_levels; i++) {
+ struct lpm_system_level *system_level =
+ &system_state->system_level[i];
+ struct power_params *pwr_params = &system_level->pwr;
+
+ if (!system_level->available)
+ continue;
+
+ /* The following check is to support legacy behavior where
+ * only the last online core enters a system low power mode.
+ * This should eventually be removed once all targets support
+ * system low power modes with multiple cores online.
+ */
+ if (system_level->sync_level
+ && (num_online_cpus() > 1)
+ && !sys_state.allow_synched_levels)
+ continue;
+
+ if (system_level->sync_level &&
+ !cpumask_equal(&system_level->num_cpu_votes,
+ &num_powered_cores))
+ continue;
+
+ if (from_idle && latency_us < pwr_params->latency_us)
+ continue;
+
+ if (sleep_us < pwr_params->time_overhead_us)
+ continue;
+
+ if (suspend_in_progress && from_idle
+ && system_level->notify_rpm)
+ continue;
+
+ if ((sleep_us >> 10) > pwr_params->time_overhead_us) {
+ pwr = pwr_params->ss_power;
+ } else {
+ pwr = pwr_params->ss_power;
+ pwr -= (pwr_params->time_overhead_us *
+ pwr_params->ss_power) / sleep_us;
+ pwr += pwr_params->energy_overhead / sleep_us;
+ }
+
+ if (best_level_pwr >= pwr) {
+ best_level = i;
+ best_level_pwr = pwr;
+ }
+ }
+ return best_level;
+}
+
+static uint64_t lpm_get_system_sleep(bool from_idle, struct cpumask *mask)
+{
+ struct clock_event_device *ed = NULL;
+ uint64_t us = (~0ULL);
+
+ if (tick_get_broadcast_device())
+ ed = tick_get_broadcast_device()->evtdev;
+
+ if (!from_idle) {
+ if (mask)
+ cpumask_copy(mask, cpumask_of(smp_processor_id()));
+ if (msm_pm_sleep_time_override)
+ us = USEC_PER_SEC * msm_pm_sleep_time_override;
+ return us;
+ }
+
+ if (ed && !cpumask_empty(ed->cpumask)) {
+ us = ktime_to_us(ktime_sub(ed->next_event, ktime_get()));
+ if (mask)
+ cpumask_copy(mask, ed->cpumask);
+ } else {
+ BUG_ON(num_possible_cpus() > 1);
+ us = ktime_to_us(tick_nohz_get_sleep_length());
+ if (mask)
+ cpumask_copy(mask, cpumask_of(smp_processor_id()));
+ }
+
+ return us;
+}
+
+static void lpm_system_prepare(struct lpm_system_state *system_state,
+ int index, bool from_idle)
+{
+ struct lpm_system_level *lvl;
+ uint32_t sclk;
+ int ret;
+ int64_t us = (~0ULL);
+ int dbg_mask;
+ struct cpumask nextcpu;
+
+ spin_lock(&system_state->sync_lock);
+ if (index < 0 || !cpumask_equal(&num_powered_cores,
+ &system_state->num_cores_in_sync)) {
+ spin_unlock(&system_state->sync_lock);
+ return;
+ }
+
+ us = lpm_get_system_sleep(from_idle, &nextcpu);
+
+ if (from_idle)
+ dbg_mask = lpm_lvl_dbg_msk & MSM_LPM_LVL_DBG_IDLE_LIMITS;
+ else
+ dbg_mask = lpm_lvl_dbg_msk & MSM_LPM_LVL_DBG_SUSPEND_LIMITS;
+
+ lvl = &system_state->system_level[index];
+
+ ret = lpm_set_l2_mode(system_state, lvl->l2_mode);
+
+ if (ret) {
+ pr_warn("%s(): Cannot set L2 Mode %d, ret:%d\n",
+ __func__, lvl->l2_mode, ret);
+ goto bail_system_sleep;
+ }
+
+ if (!lvl->notify_rpm)
+ goto skip_rpm;
+
+ ret = msm_rpm_enter_sleep(dbg_mask, &nextcpu);
+ if (ret) {
+ pr_info("msm_rpm_enter_sleep() failed with rc = %d\n", ret);
+ goto bail_system_sleep;
+ }
+
+ do_div(us, USEC_PER_SEC/SCLK_HZ);
+ sclk = (uint32_t)us;
+ msm_mpm_enter_sleep(sclk, from_idle, &nextcpu);
+skip_rpm:
+ system_state->last_entered_cluster_index = index;
+ spin_unlock(&system_state->sync_lock);
+ return;
+
+bail_system_sleep:
+ if (default_l2_mode != system_state->system_level[index].l2_mode)
+ lpm_set_l2_mode(system_state, default_l2_mode);
+ spin_unlock(&system_state->sync_lock);
+}
+
+static void lpm_system_unprepare(struct lpm_system_state *system_state,
+ int cpu_index, bool from_idle)
+{
+ int index, i;
+ int cpu = smp_processor_id();
+ struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index];
+ bool first_cpu;
+
+ if (cpu_level->mode < system_state->sync_cpu_mode)
+ return;
+
+ if (!system_state->system_level)
+ return;
+
+ spin_lock(&system_state->sync_lock);
+
+ first_cpu = cpumask_equal(&system_state->num_cores_in_sync,
+ &num_powered_cores);
+ cpumask_clear_cpu(cpu, &system_state->num_cores_in_sync);
+
+ for (i = 0; i < system_state->num_system_levels; i++) {
+ struct lpm_system_level *system_lvl
+ = &system_state->system_level[i];
+ if (cpu_level->mode >= system_lvl->min_cpu_mode)
+ cpumask_clear_cpu(cpu, &system_lvl->num_cpu_votes);
+ }
+
+ if (!first_cpu) {
+ spin_unlock(&system_state->sync_lock);
+ return;
+ }
+
+ index = system_state->last_entered_cluster_index;
+
+ if (index < 0)
+ goto unlock_and_return;
+
+ if (default_l2_mode != system_state->system_level[index].l2_mode)
+ lpm_set_l2_mode(system_state, default_l2_mode);
+
+ if (system_state->system_level[index].notify_rpm) {
+ msm_rpm_exit_sleep();
+ msm_mpm_exit_sleep(from_idle);
+ }
+unlock_and_return:
+ system_state->last_entered_cluster_index = -1;
+ spin_unlock(&system_state->sync_lock);
+}
+
+s32 msm_cpuidle_get_deep_idle_latency(void)
+{
+ int i;
+ struct lpm_cpu_level *level = sys_state.cpu_level;
+
+ if (!level)
+ return 0;
+
+ for (i = 0; i < sys_state.num_cpu_levels; i++, level++) {
+ if (level->mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE)
+ break;
+ }
+
+ if (i == sys_state.num_cpu_levels)
+ return 0;
+ else
+ return level->pwr.latency_us;
+}
+
+static int lpm_cpu_callback(struct notifier_block *cpu_nb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ cpumask_set_cpu(cpu, &num_powered_cores);
+ lpm_system_level_update();
+ break;
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ cpumask_copy(&num_powered_cores, cpu_online_mask);
+ lpm_system_level_update();
+ break;
+ case CPU_ONLINE:
+ smp_call_function_single((unsigned long)hcpu,
+ setup_broadcast_timer, (void *)true, 1);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
+{
+ return HRTIMER_NORESTART;
+}
+
+static void msm_pm_set_timer(uint32_t modified_time_us)
+{
+ u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
+ ktime_t modified_ktime = ns_to_ktime(modified_time_ns);
+ lpm_hrtimer.function = lpm_hrtimer_cb;
+ hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED);
+}
+
+static int lpm_cpu_power_select(struct cpuidle_device *dev, int *index)
+{
+ int best_level = -1;
+ uint32_t best_level_pwr = ~0U;
+ uint32_t latency_us = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
+ uint32_t sleep_us =
+ (uint32_t)(ktime_to_us(tick_nohz_get_sleep_length()));
+ uint32_t modified_time_us = 0;
+ uint32_t next_event_us = 0;
+ uint32_t pwr;
+ int i;
+ uint32_t lvl_latency_us = 0;
+ uint32_t lvl_overhead_us = 0;
+ uint32_t lvl_overhead_energy = 0;
+
+ if (!sys_state.cpu_level)
+ return -EINVAL;
+
+ if (!dev->cpu)
+ next_event_us = (uint32_t)(ktime_to_us(get_next_event_time()));
+
+ for (i = 0; i < sys_state.num_cpu_levels; i++) {
+ struct lpm_cpu_level *level = &sys_state.cpu_level[i];
+ struct power_params *pwr_params = &level->pwr;
+ uint32_t next_wakeup_us = sleep_us;
+ enum msm_pm_sleep_mode mode = level->mode;
+ bool allow;
+
+ allow = msm_pm_sleep_mode_allow(dev->cpu, mode, true);
+
+ if (!allow)
+ continue;
+
+ if ((MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE == mode)
+ || (MSM_PM_SLEEP_MODE_POWER_COLLAPSE == mode))
+ if (!dev->cpu && msm_rpm_waiting_for_ack())
+ break;
+ if ((MSM_PM_SLEEP_MODE_POWER_COLLAPSE == mode)
+ && (num_online_cpus() > 1)
+ && !sys_state.allow_synched_levels)
+ continue;
+
+ lvl_latency_us = pwr_params->latency_us;
+
+ lvl_overhead_us = pwr_params->time_overhead_us;
+
+ lvl_overhead_energy = pwr_params->energy_overhead;
+
+ if (latency_us < lvl_latency_us)
+ continue;
+
+ if (next_event_us) {
+ if (next_event_us < lvl_latency_us)
+ continue;
+
+ if (((next_event_us - lvl_latency_us) < sleep_us) ||
+ (next_event_us < sleep_us))
+ next_wakeup_us = next_event_us - lvl_latency_us;
+ }
+
+ if (next_wakeup_us <= pwr_params->time_overhead_us)
+ continue;
+
+ if ((next_wakeup_us >> 10) > lvl_overhead_us) {
+ pwr = pwr_params->ss_power;
+ } else {
+ pwr = pwr_params->ss_power;
+ pwr -= (lvl_overhead_us * pwr_params->ss_power) /
+ next_wakeup_us;
+ pwr += pwr_params->energy_overhead / next_wakeup_us;
+ }
+
+ if (best_level_pwr >= pwr) {
+ best_level = i;
+ best_level_pwr = pwr;
+ if (next_event_us < sleep_us &&
+ (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT))
+ modified_time_us
+ = next_event_us - lvl_latency_us;
+ else
+ modified_time_us = 0;
+ }
+ }
+
+ if (modified_time_us && !dev->cpu)
+ msm_pm_set_timer(modified_time_us);
+
+ return best_level;
+}
+
+static int lpm_get_l2_cache_value(const char *l2_str)
+{
+ int i;
+ struct lpm_lookup_table l2_mode_lookup[] = {
+ {MSM_SPM_L2_MODE_POWER_COLLAPSE, "l2_cache_pc"},
+ {MSM_SPM_L2_MODE_PC_NO_RPM, "l2_cache_pc_no_rpm"},
+ {MSM_SPM_L2_MODE_GDHS, "l2_cache_gdhs"},
+ {MSM_SPM_L2_MODE_RETENTION, "l2_cache_retention"},
+ {MSM_SPM_L2_MODE_DISABLED, "l2_cache_active"}
+ };
+
+ for (i = 0; i < ARRAY_SIZE(l2_mode_lookup); i++)
+ if (!strcmp(l2_str, l2_mode_lookup[i].mode_name))
+ return l2_mode_lookup[i].modes;
+ return -EINVAL;
+}
+
+static int lpm_levels_sysfs_add(void)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *low_power_kobj = NULL;
+ int rc = 0;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("%s: cannot find kobject for module %s\n",
+ __func__, KBUILD_MODNAME);
+ rc = -ENOENT;
+ goto resource_sysfs_add_exit;
+ }
+
+ low_power_kobj = kobject_create_and_add(
+ "enable_low_power", module_kobj);
+ if (!low_power_kobj) {
+ pr_err("%s: cannot create kobject\n", __func__);
+ rc = -ENOMEM;
+ goto resource_sysfs_add_exit;
+ }
+
+ rc = sysfs_create_group(low_power_kobj, &lpm_levels_attr_grp);
+resource_sysfs_add_exit:
+ if (rc) {
+ if (low_power_kobj) {
+ sysfs_remove_group(low_power_kobj,
+ &lpm_levels_attr_grp);
+ kobject_del(low_power_kobj);
+ }
+ }
+
+ return rc;
+}
+static int lpm_cpu_menu_select(struct cpuidle_device *dev, int *index)
+{
+ int j;
+
+ for (; *index >= 0; (*index)--) {
+ int mode = 0;
+ bool allow = false;
+
+ allow = msm_pm_sleep_mode_allow(dev->cpu, mode, true);
+
+ if (!allow)
+ continue;
+
+ for (j = sys_state.num_cpu_levels; j >= 0; j--) {
+ struct lpm_cpu_level *l = &sys_state.cpu_level[j];
+ if (mode == l->mode)
+ return j;
+ }
+ }
+ return -EPERM;
+}
+
+static inline void lpm_cpu_prepare(struct lpm_system_state *system_state,
+ int cpu_index, bool from_idle)
+{
+ struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index];
+ unsigned int cpu = smp_processor_id();
+
+ /* Use broadcast timer for aggregating sleep mode within a cluster.
+ * A broadcast timer could be used because of harware restriction or
+ * to ensure that we BC timer is used incase a cpu mode could trigger
+ * a cluster level sleep
+ */
+ if (from_idle && (cpu_level->use_bc_timer ||
+ (cpu_level->mode >= system_state->sync_cpu_mode)))
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+}
+
+static inline void lpm_cpu_unprepare(struct lpm_system_state *system_state,
+ int cpu_index, bool from_idle)
+{
+ struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index];
+ unsigned int cpu = smp_processor_id();
+
+ if (from_idle && (cpu_level->use_bc_timer ||
+ (cpu_level->mode >= system_state->sync_cpu_mode)))
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+}
+
+static int lpm_system_select(struct lpm_system_state *system_state,
+ int cpu_index, bool from_idle)
+{
+ struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index];
+ int cpu = smp_processor_id();
+ int i;
+
+ if (cpu_level->mode < system_state->sync_cpu_mode)
+ return -EINVAL;
+
+ if (!system_state->system_level)
+ return -EINVAL;
+
+ spin_lock(&system_state->sync_lock);
+ cpumask_set_cpu(cpu, &system_state->num_cores_in_sync);
+
+ for (i = 0; i < system_state->num_system_levels; i++) {
+ struct lpm_system_level *system_lvl =
+ &system_state->system_level[i];
+ if (cpu_level->mode >= system_lvl->min_cpu_mode)
+ cpumask_set_cpu(cpu, &system_lvl->num_cpu_votes);
+ }
+
+ if (!cpumask_equal(&system_state->num_cores_in_sync,
+ &num_powered_cores)) {
+ spin_unlock(&system_state->sync_lock);
+ return -EBUSY;
+ }
+
+ spin_unlock(&system_state->sync_lock);
+
+ return lpm_system_mode_select(system_state,
+ (uint32_t)lpm_get_system_sleep(from_idle, NULL),
+ from_idle);
+}
+
+static void lpm_enter_low_power(struct lpm_system_state *system_state,
+ int cpu_index, bool from_idle)
+{
+ int idx;
+ struct lpm_cpu_level *cpu_level = &system_state->cpu_level[cpu_index];
+
+ lpm_cpu_prepare(system_state, cpu_index, from_idle);
+
+ idx = lpm_system_select(system_state, cpu_index, from_idle);
+
+ lpm_system_prepare(system_state, idx, from_idle);
+
+ msm_cpu_pm_enter_sleep(cpu_level->mode, from_idle);
+
+ lpm_system_unprepare(system_state, cpu_index, from_idle);
+
+ lpm_cpu_unprepare(system_state, cpu_index, from_idle);
+}
+
+static int lpm_cpuidle_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ int64_t time = ktime_to_ns(ktime_get());
+ int idx;
+
+ idx = menu_select ? lpm_cpu_menu_select(dev, &index) :
+ lpm_cpu_power_select(dev, &index);
+ if (idx < 0) {
+ local_irq_enable();
+ return -EPERM;
+ }
+
+ lpm_enter_low_power(&sys_state, idx, true);
+
+ time = ktime_to_ns(ktime_get()) - time;
+ do_div(time, 1000);
+ dev->last_residency = (int)time;
+
+ local_irq_enable();
+ return idx;
+}
+
+/**
+ * lpm_cpu_hotplug_enter(): Called by dying CPU to terminate in low power mode
+ *
+ * @cpu: cpuid of the dying CPU
+ *
+ * Called from platform_cpu_kill() to terminate hotplug in a low power mode
+ */
+
+void lpm_cpu_hotplug_enter(unsigned int cpu)
+{
+ enum msm_pm_sleep_mode mode = MSM_PM_SLEEP_MODE_NR;
+
+ if (msm_pm_sleep_mode_allow(cpu, MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+ false))
+ mode = MSM_PM_SLEEP_MODE_POWER_COLLAPSE;
+ else if (msm_pm_sleep_mode_allow(cpu,
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE, false))
+ mode = MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE;
+ else
+ __WARN_printf("Power collapse modes not enabled for hotpug\n");
+
+ if (mode < MSM_PM_SLEEP_MODE_NR)
+ msm_cpu_pm_enter_sleep(mode, false);
+}
+
+static int lpm_suspend_enter(suspend_state_t state)
+{
+ int i;
+
+ for (i = sys_state.num_cpu_levels - 1; i >= 0; i--) {
+ bool allow = msm_pm_sleep_mode_allow(smp_processor_id(),
+ sys_state.cpu_level[i].mode, false);
+ if (allow)
+ break;
+ }
+
+ if (i < 0)
+ return -EINVAL;
+
+ lpm_enter_low_power(&sys_state, i, false);
+
+ return 0;
+}
+
+static int lpm_suspend_prepare(void)
+{
+ struct timespec ts;
+
+ getnstimeofday(&ts);
+ suspend_time = timespec_to_ns(&ts);
+
+ suspend_in_progress = true;
+ msm_mpm_suspend_prepare();
+
+ return 0;
+}
+
+static void lpm_suspend_wake(void)
+{
+ struct timespec ts;
+
+ getnstimeofday(&ts);
+ suspend_time = timespec_to_ns(&ts) - suspend_time;
+ msm_pm_add_stat(MSM_PM_STAT_SUSPEND, suspend_time);
+
+ msm_mpm_suspend_wake();
+ suspend_in_progress = false;
+}
+
+static struct platform_device lpm_dev = {
+ .name = "msm_pm",
+ .id = -1,
+};
+
+static const struct platform_suspend_ops lpm_suspend_ops = {
+ .enter = lpm_suspend_enter,
+ .valid = suspend_valid_only_mem,
+ .prepare_late = lpm_suspend_prepare,
+ .wake = lpm_suspend_wake,
+};
+
+static void setup_broadcast_timer(void *arg)
+{
+ unsigned long reason = (unsigned long)arg;
+ int cpu = smp_processor_id();
+
+ reason = reason ?
+ CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
+
+ clockevents_notify(reason, &cpu);
+}
+
+static struct cpuidle_driver msm_cpuidle_driver = {
+ .name = "msm_idle",
+ .owner = THIS_MODULE,
+};
+
+static void lpm_cpuidle_init(void)
+{
+ int i = 0;
+ int state_count = 0;
+
+ if (!sys_state.cpu_level)
+ return;
+ BUG_ON(sys_state.num_cpu_levels > CPUIDLE_STATE_MAX);
+
+ for (i = 0; i < sys_state.num_cpu_levels; i++) {
+ struct cpuidle_state *st = &msm_cpuidle_driver.states[i];
+ struct lpm_cpu_level *cpu_level = &sys_state.cpu_level[i];
+ snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i);
+ snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name);
+ st->flags = 0;
+ st->exit_latency = cpu_level->pwr.latency_us;
+ st->power_usage = cpu_level->pwr.ss_power;
+ st->target_residency = 0;
+ st->enter = lpm_cpuidle_enter;
+ state_count++;
+ }
+ msm_cpuidle_driver.state_count = state_count;
+ msm_cpuidle_driver.safe_state_index = 0;
+
+ if (cpuidle_register(&msm_cpuidle_driver, NULL))
+ pr_err("%s(): Failed to register CPUIDLE device\n", __func__);
+}
+
+static int lpm_parse_power_params(struct device_node *node,
+ struct power_params *pwr)
+{
+ char *key;
+ int ret;
+
+ key = "qcom,latency-us";
+ ret = of_property_read_u32(node, key, &pwr->latency_us);
+ if (ret)
+ goto fail;
+
+ key = "qcom,ss-power";
+ ret = of_property_read_u32(node, key, &pwr->ss_power);
+ if (ret)
+ goto fail;
+
+ key = "qcom,energy-overhead";
+ ret = of_property_read_u32(node, key, &pwr->energy_overhead);
+ if (ret)
+ goto fail;
+
+ key = "qcom,time-overhead";
+ ret = of_property_read_u32(node, key, &pwr->time_overhead_us);
+ if (ret)
+ goto fail;
+fail:
+ if (ret)
+ pr_err("%s(): Error reading %s\n", __func__, key);
+ return ret;
+}
+
+static int lpm_cpu_probe(struct platform_device *pdev)
+{
+ struct lpm_cpu_level *level = NULL, *l;
+ struct device_node *node = NULL;
+ int num_levels = 0;
+ char *key;
+ int ret = -ENODEV;
+
+ for_each_child_of_node(pdev->dev.of_node, node)
+ num_levels++;
+
+ level = kzalloc(num_levels * sizeof(struct lpm_cpu_level),
+ GFP_KERNEL);
+
+ if (!level)
+ return -ENOMEM;
+
+ l = &level[0];
+ for_each_child_of_node(pdev->dev.of_node, node) {
+
+ key = "qcom,mode";
+ ret = of_property_read_string(node, key, &l->name);
+
+ if (ret) {
+ pr_err("%s(): Cannot read cpu mode%s\n", __func__, key);
+ goto fail;
+ }
+
+ l->mode = msm_pm_get_sleep_mode_value(l->name);
+
+ if (l->mode < 0) {
+ pr_err("%s():Cannot parse cpu mode:%s\n", __func__,
+ l->name);
+ goto fail;
+ }
+
+ key = "qcom,use-broadcast-timer";
+ l->use_bc_timer = of_property_read_bool(node, key);
+
+ ret = lpm_parse_power_params(node, &l->pwr);
+ if (ret) {
+ pr_err("%s(): cannot Parse power params\n", __func__);
+ goto fail;
+ }
+ l++;
+ }
+ sys_state.cpu_level = level;
+ sys_state.num_cpu_levels = num_levels;
+ return ret;
+fail:
+ kfree(level);
+ return ret;
+}
+
+static int lpm_system_probe(struct platform_device *pdev)
+{
+ struct lpm_system_level *level = NULL, *l;
+ int num_levels = 0;
+ struct device_node *node;
+ char *key;
+ int ret = -ENODEV;
+
+ for_each_child_of_node(pdev->dev.of_node, node)
+ num_levels++;
+
+ level = kzalloc(num_levels * sizeof(struct lpm_system_level),
+ GFP_KERNEL);
+
+ if (!level)
+ return -ENOMEM;
+
+ l = &level[0];
+ for_each_child_of_node(pdev->dev.of_node, node) {
+
+ key = "qcom,l2";
+ ret = of_property_read_string(node, key, &l->name);
+ if (ret) {
+ pr_err("%s(): Failed to read L2 mode\n", __func__);
+ goto fail;
+ }
+
+ l->l2_mode = lpm_get_l2_cache_value(l->name);
+
+ if (l->l2_mode < 0) {
+ pr_err("%s(): Failed to read l2 cache mode\n",
+ __func__);
+ goto fail;
+ }
+
+ key = "qcom,send-rpm-sleep-set";
+ l->notify_rpm = of_property_read_bool(node, key);
+
+ ret = lpm_parse_power_params(node, &l->pwr);
+ if (ret) {
+ pr_err("%s(): Failed to parse power params\n",
+ __func__);
+ goto fail;
+ }
+
+ key = "qcom,sync-mode";
+ l->sync_level = of_property_read_bool(node, key);
+
+ if (l->sync_level) {
+ const char *name;
+
+ key = "qcom,min-cpu-mode";
+ ret = of_property_read_string(node, key, &name);
+ if (ret) {
+ pr_err("%s(): Required key %s not found\n",
+ __func__, name);
+ goto fail;
+ }
+
+ l->min_cpu_mode = msm_pm_get_sleep_mode_value(name);
+
+ if (l->min_cpu_mode < 0) {
+ pr_err("%s(): Cannot parse cpu mode:%s\n",
+ __func__, name);
+ goto fail;
+ }
+
+ if (l->min_cpu_mode < sys_state.sync_cpu_mode)
+ sys_state.sync_cpu_mode = l->min_cpu_mode;
+ }
+
+ l++;
+ }
+ sys_state.system_level = level;
+ sys_state.num_system_levels = num_levels;
+ sys_state.last_entered_cluster_index = -1;
+ return ret;
+fail:
+ kfree(level);
+ return ret;
+}
+
+static int lpm_probe(struct platform_device *pdev)
+{
+ struct device_node *node = NULL;
+ char *key = NULL;
+ int ret;
+
+ node = pdev->dev.of_node;
+
+ key = "qcom,allow-synced-levels";
+ sys_state.allow_synched_levels = of_property_read_bool(node, key);
+
+ key = "qcom,no-l2-saw";
+ sys_state.no_l2_saw = of_property_read_bool(node, key);
+
+ sys_state.sync_cpu_mode = MSM_PM_SLEEP_MODE_NR;
+ spin_lock_init(&sys_state.sync_lock);
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+ if (ret)
+ goto fail;
+
+ cpumask_copy(&num_powered_cores, cpu_online_mask);
+
+ if (!sys_state.no_l2_saw) {
+ int ret;
+ const char *l2;
+
+ key = "qcom,default-l2-state";
+ ret = of_property_read_string(node, key, &l2);
+ if (ret) {
+ pr_err("%s(): Failed to read default L2 mode\n",
+ __func__);
+ goto fail;
+ }
+
+ default_l2_mode = lpm_get_l2_cache_value(l2);
+ if (default_l2_mode < 0) {
+ pr_err("%s(): Unable to parse default L2 mode\n",
+ __func__);
+ goto fail;
+ }
+
+ if (lpm_levels_sysfs_add())
+ goto fail;
+ msm_pm_set_l2_flush_flag(MSM_SCM_L2_ON);
+ } else {
+ msm_pm_set_l2_flush_flag(MSM_SCM_L2_OFF);
+ default_l2_mode = MSM_SPM_L2_MODE_POWER_COLLAPSE;
+ }
+
+ get_cpu();
+ on_each_cpu(setup_broadcast_timer, (void *)true, 1);
+ put_cpu();
+
+ register_hotcpu_notifier(&lpm_cpu_nblk);
+
+ lpm_system_level_update();
+ platform_device_register(&lpm_dev);
+ suspend_set_ops(&lpm_suspend_ops);
+ hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ lpm_cpuidle_init();
+ return 0;
+fail:
+ pr_err("%s: Error in name %s key %s\n", __func__, node->full_name, key);
+ return -EFAULT;
+}
+
+static struct of_device_id cpu_modes_mtch_tbl[] = {
+ {.compatible = "qcom,cpu-modes"},
+ {},
+};
+
+static struct platform_driver cpu_modes_driver = {
+ .probe = lpm_cpu_probe,
+ .driver = {
+ .name = "cpu-modes",
+ .owner = THIS_MODULE,
+ .of_match_table = cpu_modes_mtch_tbl,
+ },
+};
+
+static struct of_device_id system_modes_mtch_tbl[] = {
+ {.compatible = "qcom,system-modes"},
+ {},
+};
+
+static struct platform_driver system_modes_driver = {
+ .probe = lpm_system_probe,
+ .driver = {
+ .name = "system-modes",
+ .owner = THIS_MODULE,
+ .of_match_table = system_modes_mtch_tbl,
+ },
+};
+
+static struct of_device_id lpm_levels_match_table[] = {
+ {.compatible = "qcom,lpm-levels"},
+ {},
+};
+
+static struct platform_driver lpm_levels_driver = {
+ .probe = lpm_probe,
+ .driver = {
+ .name = "lpm-levels",
+ .owner = THIS_MODULE,
+ .of_match_table = lpm_levels_match_table,
+ },
+};
+
+static int __init lpm_levels_module_init(void)
+{
+ int rc;
+ rc = platform_driver_register(&cpu_modes_driver);
+ if (rc) {
+ pr_info("Error registering %s\n", cpu_modes_driver.driver.name);
+ goto fail;
+ }
+
+ rc = platform_driver_register(&system_modes_driver);
+ if (rc) {
+ platform_driver_unregister(&cpu_modes_driver);
+ pr_info("Error registering %s\n",
+ system_modes_driver.driver.name);
+ goto fail;
+ }
+
+ rc = platform_driver_register(&lpm_levels_driver);
+ if (rc) {
+ platform_driver_unregister(&cpu_modes_driver);
+ platform_driver_unregister(&system_modes_driver);
+ pr_info("Error registering %s\n",
+ lpm_levels_driver.driver.name);
+ }
+fail:
+ return rc;
+}
+late_initcall(lpm_levels_module_init);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index faf4e70c42e0..93e7bd76fb45 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -63,6 +63,33 @@ config DEVFREQ_GOV_USERSPACE
Otherwise, the governor does not change the frequnecy
given at the initialization.
+config DEVFREQ_GOV_MSM_ADRENO_TZ
+ tristate "MSM Adreno Trustzone"
+ depends on MSM_KGSL && MSM_SCM
+ help
+ Trustzone based governor for the Adreno GPU.
+ Sets the frequency using a "on-demand" algorithm.
+ This governor is unlikely to be useful for other devices.
+
+config DEVFREQ_GOV_MSM_CPUFREQ
+ bool "MSM CPUfreq"
+ depends on CPU_FREQ_MSM
+ help
+ MSM CPUfreq based governor for CPU bandwidth voting. Sets the CPU
+ to DDR BW vote based on the current CPU frequency. This governor
+ is unlikely to be useful for non-MSM devices.
+
+config DEVFREQ_GOV_MSM_CPUBW_HWMON
+ tristate "HW monitor based governor for CPUBW"
+ depends on ARCH_MSM_KRAIT
+ help
+ HW monitor based governor for CPU to DDR bandwidth voting. This
+ governor currently supports only Krait L2 PM counters. Sets the CPU
+ BW vote by using L2 PM counters to monitor the Krait's use of DDR.
+ Since this governor uses some of the PM counters it can conflict
+ with existing profiling tools. This governor is unlikely to be
+ useful for other devices.
+
comment "DEVFREQ Drivers"
config ARM_EXYNOS4_BUS_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 16138c9e0d58..b93df3c88769 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -3,6 +3,9 @@ obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
+obj-$(CONFIG_DEVFREQ_GOV_MSM_ADRENO_TZ) += governor_msm_adreno_tz.o
+obj-$(CONFIG_DEVFREQ_GOV_MSM_CPUFREQ) += governor_msm_cpufreq.o
+obj-$(CONFIG_DEVFREQ_GOV_MSM_CPUBW_HWMON) += governor_cpubw_hwmon.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos/
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 30b538d8cc90..a66c151d234a 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -69,11 +69,34 @@ static struct devfreq *find_device_devfreq(struct device *dev)
}
/**
+ * devfreq_set_freq_limits() - Set min and max frequency from freq_table
+ * @devfreq: the devfreq instance
+ */
+static void devfreq_set_freq_limits(struct devfreq *devfreq)
+{
+ int idx;
+ unsigned long min = ~0, max = 0;
+
+ if (!devfreq->profile->freq_table)
+ return;
+
+ for (idx = 0; idx < devfreq->profile->max_state; idx++) {
+ if (min > devfreq->profile->freq_table[idx])
+ min = devfreq->profile->freq_table[idx];
+ if (max < devfreq->profile->freq_table[idx])
+ max = devfreq->profile->freq_table[idx];
+ }
+
+ devfreq->min_freq = min;
+ devfreq->max_freq = max;
+}
+
+/**
* devfreq_get_freq_level() - Lookup freq_table for the frequency
* @devfreq: the devfreq instance
* @freq: the target frequency
*/
-static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
+int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
{
int lev;
@@ -83,6 +106,7 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
return -EINVAL;
}
+EXPORT_SYMBOL(devfreq_get_freq_level);
/**
* devfreq_update_status() - Update statistics of devfreq behavior
@@ -172,7 +196,7 @@ int update_devfreq(struct devfreq *devfreq)
return -EINVAL;
/* Reevaluate the proper frequency */
- err = devfreq->governor->get_target_freq(devfreq, &freq);
+ err = devfreq->governor->get_target_freq(devfreq, &freq, &flags);
if (err)
return err;
@@ -430,6 +454,32 @@ static void devfreq_dev_release(struct device *dev)
}
/**
+ * find_governor_data - Find device specific private data for a governor.
+ * @profile: The profile to search.
+ * @governor_name: The governor to search for.
+ *
+ * Look up the device specific data for a governor.
+ */
+static void *find_governor_data(struct devfreq_dev_profile *profile,
+ const char *governor_name)
+{
+ void *data = NULL;
+ int i;
+
+ if (profile->governor_data == NULL)
+ return NULL;
+
+ for (i = 0; i < profile->num_governor_data; i++) {
+ if (strncmp(governor_name, profile->governor_data[i].name,
+ DEVFREQ_NAME_LEN) == 0) {
+ data = profile->governor_data[i].data;
+ break;
+ }
+ }
+ return data;
+}
+
+/**
* devfreq_add_device() - Add devfreq feature to the device
* @dev: the device to add devfreq feature.
* @profile: device-specific profile to run devfreq.
@@ -476,7 +526,10 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->profile = profile;
strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
devfreq->previous_freq = profile->initial_freq;
- devfreq->data = data;
+
+ devfreq->data = data ? data : find_governor_data(devfreq->profile,
+ governor_name);
+
devfreq->nb.notifier_call = devfreq_notifier_call;
devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
@@ -487,6 +540,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->profile->max_state,
GFP_KERNEL);
devfreq->last_stat_updated = jiffies;
+ devfreq_set_freq_limits(devfreq);
dev_set_name(&devfreq->dev, "%s", dev_name(dev));
err = device_register(&devfreq->dev);
@@ -807,6 +861,7 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
goto out;
}
}
+ df->data = find_governor_data(df->profile, str_governor);
df->governor = governor;
strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index fad7d6321978..ebde695e99c7 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -38,4 +38,5 @@ extern void devfreq_interval_update(struct devfreq *devfreq,
extern int devfreq_add_governor(struct devfreq_governor *governor);
extern int devfreq_remove_governor(struct devfreq_governor *governor);
+extern int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq);
#endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_cpubw_hwmon.c b/drivers/devfreq/governor_cpubw_hwmon.c
new file mode 100644
index 000000000000..3472490d2354
--- /dev/null
+++ b/drivers/devfreq/governor_cpubw_hwmon.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cpubw-hwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/devfreq.h>
+#include "governor.h"
+
+#include <mach/msm-krait-l2-accessors.h>
+
+#define L2PMRESR2 0x412
+#define L2PMCR 0x400
+#define L2PMCNTENCLR 0x402
+#define L2PMCNTENSET 0x403
+#define L2PMINTENCLR 0x404
+#define L2PMINTENSET 0x405
+#define L2PMOVSR 0x406
+#define L2PMOVSSET 0x407
+#define L2PMnEVCNTCR(n) (0x420 + n * 0x10)
+#define L2PMnEVCNTR(n) (0x421 + n * 0x10)
+#define L2PMnEVCNTSR(n) (0x422 + n * 0x10)
+#define L2PMnEVFILTER(n) (0x423 + n * 0x10)
+#define L2PMnEVTYPER(n) (0x424 + n * 0x10)
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%u\n", name); \
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev, \
+ struct device_attribute *attr, const char *buf, \
+ size_t count) \
+{ \
+ int ret; \
+ unsigned int val; \
+ ret = sscanf(buf, "%u", &val); \
+ if (ret != 1) \
+ return -EINVAL; \
+ val = max(val, _min); \
+ val = min(val, _max); \
+ name = val; \
+ return count; \
+}
+
+#define gov_attr(__attr, min, max) \
+show_attr(__attr) \
+store_attr(__attr, min, max) \
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+
+static int l2pm_irq;
+static unsigned int bytes_per_beat;
+static unsigned int tolerance_percent = 10;
+static unsigned int guard_band_mbps = 100;
+static unsigned int decay_rate = 90;
+static unsigned int io_percent = 16;
+static unsigned int bw_step = 190;
+
+#define MIN_MS 10U
+#define MAX_MS 500U
+static unsigned int sample_ms = 50;
+static u32 prev_r_start_val;
+static u32 prev_w_start_val;
+static unsigned long prev_ab;
+static ktime_t prev_ts;
+
+#define RD_MON 0
+#define WR_MON 1
+static void mon_init(void)
+{
+ /* Set up counters 0/1 to count write/read beats */
+ set_l2_indirect_reg(L2PMRESR2, 0x8B0B0000);
+ set_l2_indirect_reg(L2PMnEVCNTCR(RD_MON), 0x0);
+ set_l2_indirect_reg(L2PMnEVCNTCR(WR_MON), 0x0);
+ set_l2_indirect_reg(L2PMnEVCNTR(RD_MON), 0xFFFFFFFF);
+ set_l2_indirect_reg(L2PMnEVCNTR(WR_MON), 0xFFFFFFFF);
+ set_l2_indirect_reg(L2PMnEVFILTER(RD_MON), 0xF003F);
+ set_l2_indirect_reg(L2PMnEVFILTER(WR_MON), 0xF003F);
+ set_l2_indirect_reg(L2PMnEVTYPER(RD_MON), 0xA);
+ set_l2_indirect_reg(L2PMnEVTYPER(WR_MON), 0xB);
+}
+
+static void global_mon_enable(bool en)
+{
+ u32 regval;
+
+ /* Global counter enable */
+ regval = get_l2_indirect_reg(L2PMCR);
+ if (en)
+ regval |= BIT(0);
+ else
+ regval &= ~BIT(0);
+ set_l2_indirect_reg(L2PMCR, regval);
+}
+
+static void mon_enable(int n)
+{
+ /* Clear previous overflow state for event counter n */
+ set_l2_indirect_reg(L2PMOVSR, BIT(n));
+
+ /* Enable event counter n */
+ set_l2_indirect_reg(L2PMCNTENSET, BIT(n));
+}
+
+static void mon_disable(int n)
+{
+ /* Disable event counter n */
+ set_l2_indirect_reg(L2PMCNTENCLR, BIT(n));
+}
+
+static void mon_irq_enable(int n, bool en)
+{
+ if (en)
+ set_l2_indirect_reg(L2PMINTENSET, BIT(n));
+ else
+ set_l2_indirect_reg(L2PMINTENCLR, BIT(n));
+}
+
+/* Returns start counter value to be used with mon_get_mbps() */
+static u32 mon_set_limit_mbyte(int n, unsigned int mbytes)
+{
+ u32 regval, beats;
+
+ beats = mult_frac(mbytes, SZ_1M, bytes_per_beat);
+ regval = 0xFFFFFFFF - beats;
+ set_l2_indirect_reg(L2PMnEVCNTR(n), regval);
+ pr_debug("EV%d MB: %d, start val: %x\n", n, mbytes, regval);
+
+ return regval;
+}
+
+static long mon_get_count(int n, u32 start_val)
+{
+ u32 overflow, count;
+
+ count = get_l2_indirect_reg(L2PMnEVCNTR(n));
+ overflow = get_l2_indirect_reg(L2PMOVSR);
+
+ pr_debug("EV%d ov: %x, cnt: %x\n", n, overflow, count);
+
+ if (overflow & BIT(n))
+ return 0xFFFFFFFF - start_val + count;
+ else
+ return count - start_val;
+}
+
+/* Returns MBps of read/writes for the sampling window. */
+static unsigned int beats_to_mbps(long long beats, unsigned int us)
+{
+ beats *= USEC_PER_SEC;
+ beats *= bytes_per_beat;
+ do_div(beats, us);
+ beats = DIV_ROUND_UP_ULL(beats, SZ_1M);
+
+ return beats;
+}
+
+static int to_limit(int mbps)
+{
+ mbps *= (100 + tolerance_percent) * sample_ms;
+ mbps /= 100;
+ mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
+ return mbps;
+}
+
+static unsigned long measure_bw_and_set_irq(void)
+{
+ long r_mbps, w_mbps, mbps;
+ ktime_t ts;
+ unsigned int us;
+
+ /*
+ * Since we are stopping the counters, we don't want this short work
+ * to be interrupted by other tasks and cause the measurements to be
+ * wrong. Not blocking interrupts to avoid affecting interrupt
+ * latency and since they should be short anyway because they run in
+ * atomic context.
+ */
+ preempt_disable();
+
+ ts = ktime_get();
+ us = ktime_to_us(ktime_sub(ts, prev_ts));
+ if (!us)
+ us = 1;
+
+ mon_disable(RD_MON);
+ mon_disable(WR_MON);
+
+ r_mbps = mon_get_count(RD_MON, prev_r_start_val);
+ r_mbps = beats_to_mbps(r_mbps, us);
+ w_mbps = mon_get_count(WR_MON, prev_w_start_val);
+ w_mbps = beats_to_mbps(w_mbps, us);
+
+ prev_r_start_val = mon_set_limit_mbyte(RD_MON, to_limit(r_mbps));
+ prev_w_start_val = mon_set_limit_mbyte(WR_MON, to_limit(w_mbps));
+ prev_ts = ts;
+
+ mon_enable(RD_MON);
+ mon_enable(WR_MON);
+
+ preempt_enable();
+
+ mbps = r_mbps + w_mbps;
+ pr_debug("R/W/BW/us = %ld/%ld/%ld/%d\n", r_mbps, w_mbps, mbps, us);
+
+ return mbps;
+}
+
+static void compute_bw(int mbps, unsigned long *freq, unsigned long *ab)
+{
+ int new_bw;
+
+ mbps += guard_band_mbps;
+
+ if (mbps > prev_ab) {
+ new_bw = mbps;
+ } else {
+ new_bw = mbps * decay_rate + prev_ab * (100 - decay_rate);
+ new_bw /= 100;
+ }
+
+ prev_ab = new_bw;
+ *ab = roundup(new_bw, bw_step);
+ *freq = (new_bw * 100) / io_percent;
+}
+
+#define TOO_SOON_US (1 * USEC_PER_MSEC)
+static irqreturn_t mon_intr_handler(int irq, void *dev)
+{
+ struct devfreq *df = dev;
+ ktime_t ts;
+ unsigned int us;
+ u32 regval;
+ int ret;
+
+ regval = get_l2_indirect_reg(L2PMOVSR);
+ pr_debug("Got interrupt: %x\n", regval);
+
+ devfreq_monitor_stop(df);
+
+ /*
+ * Don't recalc bandwidth if the interrupt comes right after a
+ * previous bandwidth calculation. This is done for two reasons:
+ *
+ * 1. Sampling the BW during a very short duration can result in a
+ * very inaccurate measurement due to very short bursts.
+ * 2. This can only happen if the limit was hit very close to the end
+ * of the previous sample period. Which means the current BW
+ * estimate is not very off and doesn't need to be readjusted.
+ */
+ ts = ktime_get();
+ us = ktime_to_us(ktime_sub(ts, prev_ts));
+ if (us > TOO_SOON_US) {
+ mutex_lock(&df->lock);
+ ret = update_devfreq(df);
+ if (ret)
+ pr_err("Unable to update freq on IRQ!\n");
+ mutex_unlock(&df->lock);
+ }
+
+ devfreq_monitor_start(df);
+
+ return IRQ_HANDLED;
+}
+
+static int start_monitoring(struct devfreq *df)
+{
+ int ret, mbyte;
+
+ ret = request_threaded_irq(l2pm_irq, NULL, mon_intr_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "cpubw_hwmon", df);
+ if (ret) {
+ pr_err("Unable to register interrupt handler\n");
+ return ret;
+ }
+
+ mon_init();
+ mon_disable(RD_MON);
+ mon_disable(WR_MON);
+
+ mbyte = (df->previous_freq * io_percent) / (2 * 100);
+ prev_r_start_val = mon_set_limit_mbyte(RD_MON, mbyte);
+ prev_w_start_val = mon_set_limit_mbyte(WR_MON, mbyte);
+ prev_ts = ktime_get();
+ prev_ab = 0;
+
+ mon_irq_enable(RD_MON, true);
+ mon_irq_enable(WR_MON, true);
+ mon_enable(RD_MON);
+ mon_enable(WR_MON);
+ global_mon_enable(true);
+
+ return 0;
+}
+
+static void stop_monitoring(struct devfreq *df)
+{
+ global_mon_enable(false);
+ mon_disable(RD_MON);
+ mon_disable(WR_MON);
+ mon_irq_enable(RD_MON, false);
+ mon_irq_enable(WR_MON, false);
+
+ disable_irq(l2pm_irq);
+ free_irq(l2pm_irq, df);
+}
+
+static int devfreq_cpubw_hwmon_get_freq(struct devfreq *df,
+ unsigned long *freq,
+ u32 *flag)
+{
+ unsigned long mbps;
+
+ mbps = measure_bw_and_set_irq();
+ compute_bw(mbps, freq, df->data);
+
+ return 0;
+}
+
+gov_attr(tolerance_percent, 0U, 30U);
+gov_attr(guard_band_mbps, 0U, 2000U);
+gov_attr(decay_rate, 0U, 100U);
+gov_attr(io_percent, 1U, 100U);
+gov_attr(bw_step, 50U, 1000U);
+
+static struct attribute *dev_attr[] = {
+ &dev_attr_tolerance_percent.attr,
+ &dev_attr_guard_band_mbps.attr,
+ &dev_attr_decay_rate.attr,
+ &dev_attr_io_percent.attr,
+ &dev_attr_bw_step.attr,
+ NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+ .name = "cpubw_hwmon",
+ .attrs = dev_attr,
+};
+
+static int devfreq_cpubw_hwmon_ev_handler(struct devfreq *df,
+ unsigned int event, void *data)
+{
+ int ret;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ ret = start_monitoring(df);
+ if (ret)
+ return ret;
+ ret = sysfs_create_group(&df->dev.kobj, &dev_attr_group);
+ if (ret)
+ return ret;
+
+ sample_ms = df->profile->polling_ms;
+ sample_ms = max(MIN_MS, sample_ms);
+ sample_ms = min(MAX_MS, sample_ms);
+ df->profile->polling_ms = sample_ms;
+ devfreq_monitor_start(df);
+
+ pr_debug("Enabled CPU BW HW monitor governor\n");
+ break;
+
+ case DEVFREQ_GOV_STOP:
+ sysfs_remove_group(&df->dev.kobj, &dev_attr_group);
+ devfreq_monitor_stop(df);
+ *(unsigned long *)df->data = 0;
+ stop_monitoring(df);
+ pr_debug("Disabled CPU BW HW monitor governor\n");
+ break;
+
+ case DEVFREQ_GOV_INTERVAL:
+ sample_ms = *(unsigned int *)data;
+ sample_ms = max(MIN_MS, sample_ms);
+ sample_ms = min(MAX_MS, sample_ms);
+ devfreq_interval_update(df, &sample_ms);
+ break;
+ }
+
+ return 0;
+}
+
+static struct devfreq_governor devfreq_cpubw_hwmon = {
+ .name = "cpubw_hwmon",
+ .get_target_freq = devfreq_cpubw_hwmon_get_freq,
+ .event_handler = devfreq_cpubw_hwmon_ev_handler,
+};
+
+static int cpubw_hwmon_driver_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ l2pm_irq = platform_get_irq(pdev, 0);
+ if (l2pm_irq < 0) {
+ pr_err("Unable to get IRQ number\n");
+ return l2pm_irq;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "qcom,bytes-per-beat",
+ &bytes_per_beat);
+ if (ret) {
+ pr_err("Unable to read bytes per beat\n");
+ return ret;
+ }
+
+ ret = devfreq_add_governor(&devfreq_cpubw_hwmon);
+ if (ret) {
+ pr_err("devfreq governor registration failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct of_device_id match_table[] = {
+ { .compatible = "qcom,kraitbw-l2pm" },
+ {}
+};
+
+static struct platform_driver cpubw_hwmon_driver = {
+ .probe = cpubw_hwmon_driver_probe,
+ .driver = {
+ .name = "kraitbw-l2pm",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init cpubw_hwmon_init(void)
+{
+ return platform_driver_register(&cpubw_hwmon_driver);
+}
+module_init(cpubw_hwmon_init);
+
+static void __exit cpubw_hwmon_exit(void)
+{
+ platform_driver_unregister(&cpubw_hwmon_driver);
+}
+module_exit(cpubw_hwmon_exit);
+
+MODULE_DESCRIPTION("HW monitor based CPU DDR bandwidth voting driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c
new file mode 100644
index 000000000000..738056d4f7db
--- /dev/null
+++ b/drivers/devfreq/governor_msm_adreno_tz.c
@@ -0,0 +1,358 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/devfreq.h>
+#include <linux/math64.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ftrace.h>
+#include <linux/msm_adreno_devfreq.h>
+#include <soc/qcom/scm.h>
+#include "governor.h"
+
+static DEFINE_SPINLOCK(tz_lock);
+
+/*
+ * FLOOR is 5msec to capture up to 3 re-draws
+ * per frame for 60fps content.
+ */
+#define FLOOR 5000
+#define LONG_FLOOR 50000
+#define HIST 5
+#define TARGET 80
+#define CAP 75
+
+/*
+ * CEILING is 50msec, larger than any standard
+ * frame length, but less than the idle timer.
+ */
+#define CEILING 50000
+#define TZ_RESET_ID 0x3
+#define TZ_UPDATE_ID 0x4
+#define TZ_INIT_ID 0x6
+
+#define TAG "msm_adreno_tz: "
+
+/* Trap into the TrustZone, and call funcs there. */
+static int __secure_tz_entry2(u32 cmd, u32 val1, u32 val2)
+{
+ int ret;
+ spin_lock(&tz_lock);
+ /* sync memory before sending the commands to tz*/
+ __iowmb();
+ ret = scm_call_atomic2(SCM_SVC_IO, cmd, val1, val2);
+ spin_unlock(&tz_lock);
+ return ret;
+}
+
+static int __secure_tz_entry3(u32 cmd, u32 val1, u32 val2, u32 val3)
+{
+ int ret;
+ spin_lock(&tz_lock);
+ /* sync memory before sending the commands to tz*/
+ __iowmb();
+ ret = scm_call_atomic3(SCM_SVC_IO, cmd, val1, val2, val3);
+ spin_unlock(&tz_lock);
+ return ret;
+}
+
+static void _update_cutoff(struct devfreq_msm_adreno_tz_data *priv,
+ unsigned int norm_max)
+{
+ int i;
+
+ priv->bus.max = norm_max;
+ for (i = 0; i < priv->bus.num; i++) {
+ priv->bus.up[i] = priv->bus.p_up[i] * norm_max / 100;
+ priv->bus.down[i] = priv->bus.p_down[i] * norm_max / 100;
+ }
+}
+
+static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq,
+ u32 *flag)
+{
+ int result = 0;
+ struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+ struct devfreq_dev_status stats;
+ struct xstats b;
+ int val, level = 0;
+ int act_level;
+ int norm_cycles;
+ int gpu_percent;
+
+ if (priv->bus.num)
+ stats.private_data = &b;
+ else
+ stats.private_data = NULL;
+ result = devfreq->profile->get_dev_status(devfreq->dev.parent, &stats);
+ if (result) {
+ pr_err(TAG "get_status failed %d\n", result);
+ return result;
+ }
+
+ *freq = stats.current_frequency;
+ *flag = 0;
+ priv->bin.total_time += stats.total_time;
+ priv->bin.busy_time += stats.busy_time;
+ if (priv->bus.num) {
+ priv->bus.total_time += stats.total_time;
+ priv->bus.gpu_time += stats.busy_time;
+ priv->bus.ram_time += b.ram_time;
+ priv->bus.ram_time += b.ram_wait;
+ }
+
+ /*
+ * Do not waste CPU cycles running this algorithm if
+ * the GPU just started, or if less than FLOOR time
+ * has passed since the last run.
+ */
+ if ((stats.total_time == 0) ||
+ (priv->bin.total_time < FLOOR)) {
+ return 1;
+ }
+
+ level = devfreq_get_freq_level(devfreq, stats.current_frequency);
+ if (level < 0) {
+ pr_err(TAG "bad freq %ld\n", stats.current_frequency);
+ return level;
+ }
+
+ /*
+ * If there is an extended block of busy processing,
+ * increase frequency. Otherwise run the normal algorithm.
+ */
+ if (priv->bin.busy_time > CEILING) {
+ val = -1 * level;
+ } else {
+ val = __secure_tz_entry3(TZ_UPDATE_ID,
+ level,
+ priv->bin.total_time,
+ priv->bin.busy_time);
+ }
+ priv->bin.total_time = 0;
+ priv->bin.busy_time = 0;
+
+ /*
+ * If the decision is to move to a different level, make sure the GPU
+ * frequency changes.
+ */
+ if (val) {
+ level += val;
+ level = max(level, 0);
+ level = min_t(int, level, devfreq->profile->max_state);
+ goto clear;
+ }
+
+ if (priv->bus.total_time < LONG_FLOOR)
+ goto end;
+ norm_cycles = (unsigned int)priv->bus.ram_time /
+ (unsigned int) priv->bus.total_time;
+ gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) /
+ (unsigned int) priv->bus.total_time;
+
+ /*
+ * If there's a new high watermark, update the cutoffs and send the
+ * FAST hint. Otherwise check the current value against the current
+ * cutoffs.
+ */
+ if (norm_cycles > priv->bus.max) {
+ _update_cutoff(priv, norm_cycles);
+ *flag = DEVFREQ_FLAG_FAST_HINT;
+ } else {
+ /* GPU votes for IB not AB so don't under vote the system */
+ norm_cycles = (100 * norm_cycles) / TARGET;
+ act_level = priv->bus.index[level] + b.mod;
+ act_level = (act_level < 0) ? 0 : act_level;
+ act_level = (act_level >= priv->bus.num) ?
+ (priv->bus.num - 1) : act_level;
+ if (norm_cycles > priv->bus.up[act_level] &&
+ gpu_percent > CAP)
+ *flag = DEVFREQ_FLAG_FAST_HINT;
+ else if (norm_cycles < priv->bus.down[act_level] && level)
+ *flag = DEVFREQ_FLAG_SLOW_HINT;
+ }
+
+clear:
+ priv->bus.total_time = 0;
+ priv->bus.gpu_time = 0;
+ priv->bus.ram_time = 0;
+
+end:
+ *freq = devfreq->profile->freq_table[level];
+ return 0;
+}
+
+static int tz_notify(struct notifier_block *nb, unsigned long type, void *devp)
+{
+ int result = 0;
+ struct devfreq *devfreq = devp;
+
+ switch (type) {
+ case ADRENO_DEVFREQ_NOTIFY_IDLE:
+ case ADRENO_DEVFREQ_NOTIFY_RETIRE:
+ mutex_lock(&devfreq->lock);
+ result = update_devfreq(devfreq);
+ mutex_unlock(&devfreq->lock);
+ break;
+ /* ignored by this governor */
+ case ADRENO_DEVFREQ_NOTIFY_SUBMIT:
+ default:
+ break;
+ }
+ return notifier_from_errno(result);
+}
+
+static int tz_start(struct devfreq *devfreq)
+{
+ struct devfreq_msm_adreno_tz_data *priv;
+ unsigned int tz_pwrlevels[MSM_ADRENO_MAX_PWRLEVELS + 1];
+ unsigned int t1, t2 = 2 * HIST;
+ int i, out, ret;
+
+ if (devfreq->data == NULL) {
+ pr_err(TAG "data is required for this governor\n");
+ return -EINVAL;
+ }
+
+ priv = devfreq->data;
+ priv->nb.notifier_call = tz_notify;
+
+ out = 1;
+ if (devfreq->profile->max_state < MSM_ADRENO_MAX_PWRLEVELS) {
+ for (i = 0; i < devfreq->profile->max_state; i++)
+ tz_pwrlevels[out++] = devfreq->profile->freq_table[i];
+ tz_pwrlevels[0] = i;
+ } else {
+ pr_err(TAG "tz_pwrlevels[] is too short\n");
+ return -EINVAL;
+ }
+
+ ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels,
+ sizeof(tz_pwrlevels), NULL, 0);
+
+ if (ret != 0)
+ pr_err(TAG "tz_init failed\n");
+
+ /* Set up the cut-over percentages for the bus calculation. */
+ if (priv->bus.num) {
+ for (i = 0; i < priv->bus.num; i++) {
+ t1 = (u32)(100 * priv->bus.ib[i]) /
+ (u32)priv->bus.ib[priv->bus.num - 1];
+ priv->bus.p_up[i] = t1 - HIST;
+ priv->bus.p_down[i] = t2 - 2 * HIST;
+ t2 = t1;
+ }
+ /* Set the upper-most and lower-most bounds correctly. */
+ priv->bus.p_down[0] = 0;
+ priv->bus.p_down[1] = (priv->bus.p_down[1] > (2 * HIST)) ?
+ priv->bus.p_down[1] : (2 * HIST);
+ if (priv->bus.num - 1 >= 0)
+ priv->bus.p_up[priv->bus.num - 1] = 100;
+ _update_cutoff(priv, priv->bus.max);
+ }
+
+ return kgsl_devfreq_add_notifier(devfreq->dev.parent, &priv->nb);
+}
+
+static int tz_stop(struct devfreq *devfreq)
+{
+ struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+
+ kgsl_devfreq_del_notifier(devfreq->dev.parent, &priv->nb);
+ return 0;
+}
+
+
+static int tz_resume(struct devfreq *devfreq)
+{
+ struct devfreq_dev_profile *profile = devfreq->profile;
+ unsigned long freq;
+
+ freq = profile->initial_freq;
+
+ return profile->target(devfreq->dev.parent, &freq, 0);
+}
+
+static int tz_suspend(struct devfreq *devfreq)
+{
+ struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+
+ __secure_tz_entry2(TZ_RESET_ID, 0, 0);
+
+ priv->bin.total_time = 0;
+ priv->bin.busy_time = 0;
+ priv->bus.total_time = 0;
+ priv->bus.gpu_time = 0;
+ priv->bus.ram_time = 0;
+ return 0;
+}
+
+static int tz_handler(struct devfreq *devfreq, unsigned int event, void *data)
+{
+ int result;
+ BUG_ON(devfreq == NULL);
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ result = tz_start(devfreq);
+ break;
+
+ case DEVFREQ_GOV_STOP:
+ result = tz_stop(devfreq);
+ break;
+
+ case DEVFREQ_GOV_SUSPEND:
+ result = tz_suspend(devfreq);
+ break;
+
+ case DEVFREQ_GOV_RESUME:
+ result = tz_resume(devfreq);
+ break;
+
+ case DEVFREQ_GOV_INTERVAL:
+ /* ignored, this governor doesn't use polling */
+ default:
+ result = 0;
+ break;
+ }
+
+ return result;
+}
+
+static struct devfreq_governor msm_adreno_tz = {
+ .name = "msm-adreno-tz",
+ .get_target_freq = tz_get_target_freq,
+ .event_handler = tz_handler,
+};
+
+static int __init msm_adreno_tz_init(void)
+{
+ return devfreq_add_governor(&msm_adreno_tz);
+}
+subsys_initcall(msm_adreno_tz_init);
+
+static void __exit msm_adreno_tz_exit(void)
+{
+ int ret;
+ ret = devfreq_remove_governor(&msm_adreno_tz);
+ if (ret)
+ pr_err(TAG "failed to remove governor %d\n", ret);
+
+ return;
+}
+
+module_exit(msm_adreno_tz_exit);
+
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/devfreq/governor_msm_cpufreq.c b/drivers/devfreq/governor_msm_cpufreq.c
new file mode 100644
index 000000000000..890cb856f8ab
--- /dev/null
+++ b/drivers/devfreq/governor_msm_cpufreq.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/devfreq.h>
+#include <soc/qcom/cpufreq.h>
+#include "governor.h"
+
+DEFINE_MUTEX(df_lock);
+static struct devfreq *df;
+
+static int devfreq_msm_cpufreq_get_freq(struct devfreq *df,
+ unsigned long *freq,
+ u32 *flag)
+{
+ *freq = msm_cpufreq_get_bw();
+ return 0;
+}
+
+int devfreq_msm_cpufreq_update_bw(void)
+{
+ int ret = 0;
+
+ mutex_lock(&df_lock);
+ if (df) {
+ mutex_lock(&df->lock);
+ ret = update_devfreq(df);
+ mutex_unlock(&df->lock);
+ }
+ mutex_unlock(&df_lock);
+ return ret;
+}
+
+static int devfreq_msm_cpufreq_ev_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ int ret;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ mutex_lock(&df_lock);
+ df = devfreq;
+ mutex_unlock(&df_lock);
+
+ ret = devfreq_msm_cpufreq_update_bw();
+ if (ret) {
+ pr_err("Unable to update BW! Gov start failed!\n");
+ return ret;
+ }
+
+ devfreq_monitor_stop(df);
+ pr_debug("Enabled MSM CPUfreq governor\n");
+ break;
+
+ case DEVFREQ_GOV_STOP:
+ mutex_lock(&df_lock);
+ df = NULL;
+ mutex_unlock(&df_lock);
+
+ pr_debug("Disabled MSM CPUfreq governor\n");
+ break;
+ }
+
+ return 0;
+}
+
+static struct devfreq_governor devfreq_msm_cpufreq = {
+ .name = "msm_cpufreq",
+ .get_target_freq = devfreq_msm_cpufreq_get_freq,
+ .event_handler = devfreq_msm_cpufreq_ev_handler,
+};
+
+int register_devfreq_msm_cpufreq(void)
+{
+ return devfreq_add_governor(&devfreq_msm_cpufreq);
+}
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index c72f942f30a8..74ae3bb277c7 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -14,7 +14,8 @@
#include "governor.h"
static int devfreq_performance_func(struct devfreq *df,
- unsigned long *freq)
+ unsigned long *freq,
+ u32 *flag)
{
/*
* target callback should be able to get floor value as
@@ -31,13 +32,26 @@ static int devfreq_performance_handler(struct devfreq *devfreq,
unsigned int event, void *data)
{
int ret = 0;
+ unsigned long freq;
- if (event == DEVFREQ_GOV_START) {
- mutex_lock(&devfreq->lock);
+ mutex_lock(&devfreq->lock);
+ freq = devfreq->previous_freq;
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ devfreq->profile->target(devfreq->dev.parent,
+ &freq,
+ DEVFREQ_FLAG_WAKEUP_MAXFREQ);
+ /* fall through */
+ case DEVFREQ_GOV_RESUME:
ret = update_devfreq(devfreq);
- mutex_unlock(&devfreq->lock);
+ break;
+ case DEVFREQ_GOV_SUSPEND:
+ devfreq->profile->target(devfreq->dev.parent,
+ &freq,
+ DEVFREQ_FLAG_WAKEUP_MAXFREQ);
+ break;
}
-
+ mutex_unlock(&devfreq->lock);
return ret;
}
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index 0c6bed567e6d..57f3738a0b9d 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -14,7 +14,8 @@
#include "governor.h"
static int devfreq_powersave_func(struct devfreq *df,
- unsigned long *freq)
+ unsigned long *freq,
+ u32 *flag)
{
/*
* target callback should be able to get ceiling value as
@@ -29,7 +30,7 @@ static int devfreq_powersave_handler(struct devfreq *devfreq,
{
int ret = 0;
- if (event == DEVFREQ_GOV_START) {
+ if (event == DEVFREQ_GOV_START || event == DEVFREQ_GOV_RESUME) {
mutex_lock(&devfreq->lock);
ret = update_devfreq(devfreq);
mutex_unlock(&devfreq->lock);
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index 0720ba84ca92..d37997da89b1 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -19,7 +19,8 @@
#define DFSO_UPTHRESHOLD (90)
#define DFSO_DOWNDIFFERENCTIAL (5)
static int devfreq_simple_ondemand_func(struct devfreq *df,
- unsigned long *freq)
+ unsigned long *freq,
+ u32 *flag)
{
struct devfreq_dev_status stat;
int err = df->profile->get_dev_status(df->dev.parent, &stat);
@@ -28,6 +29,7 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
struct devfreq_simple_ondemand_data *data = df->data;
unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
+ unsigned long min = (df->min_freq) ? df->min_freq : 0;
if (err)
return err;
@@ -42,18 +44,30 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
dfso_upthreshold < dfso_downdifferential)
return -EINVAL;
- /* Assume MAX if it is going to be divided by zero */
- if (stat.total_time == 0) {
- *freq = max;
- return 0;
- }
-
/* Prevent overflow */
if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) {
stat.busy_time >>= 7;
stat.total_time >>= 7;
}
+ if (data && data->simple_scaling) {
+ if (stat.busy_time * 100 >
+ stat.total_time * dfso_upthreshold)
+ *freq = max;
+ else if (stat.busy_time * 100 <
+ stat.total_time * dfso_downdifferential)
+ *freq = min;
+ else
+ *freq = df->previous_freq;
+ return 0;
+ }
+
+ /* Assume MAX if it is going to be divided by zero */
+ if (stat.total_time == 0) {
+ *freq = max;
+ return 0;
+ }
+
/* Set MAX if it's busy enough */
if (stat.busy_time * 100 >
stat.total_time * dfso_upthreshold) {
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 35de6e83c1fe..4fbde042e9dd 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -22,7 +22,8 @@ struct userspace_data {
bool valid;
};
-static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
+static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq,
+ u32 *flag)
{
struct userspace_data *data = df->data;
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 3612cb5b30b2..72629c158e7f 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -29,4 +29,15 @@ config HSEM_U8500
If unsure, say N.
+config REMOTE_SPINLOCK_MSM
+ bool "MSM Remote Spinlock Functionality"
+ depends on ARCH_MSM || ARCH_QCOM
+ select HWSPINLOCK
+ help
+ Say y here to support the MSM Remote Spinlock functionality, which
+ provides a synchronisation mechanism for the various processor on the
+ SoC.
+
+ If unsure, say N.
+
endmenu
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
index 93eb64b66486..cb3ce4c08f97 100644
--- a/drivers/hwspinlock/Makefile
+++ b/drivers/hwspinlock/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o
+obj-$(CONFIG_REMOTE_SPINLOCK_MSM) += msm_remote_spinlock.o
diff --git a/drivers/hwspinlock/msm_remote_spinlock.c b/drivers/hwspinlock/msm_remote_spinlock.c
new file mode 100644
index 000000000000..179b4f8bdea1
--- /dev/null
+++ b/drivers/hwspinlock/msm_remote_spinlock.c
@@ -0,0 +1,484 @@
+/* Copyright (c) 2008-2009, 2011-2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/msm_remote_spinlock.h>
+
+#include <soc/qcom/smem.h>
+
+
+#define SPINLOCK_PID_APPS 1
+
+static int is_hw_lock_type;
+static DEFINE_MUTEX(ops_init_lock);
+
+struct spinlock_ops {
+ void (*lock)(raw_remote_spinlock_t *lock);
+ void (*unlock)(raw_remote_spinlock_t *lock);
+ int (*trylock)(raw_remote_spinlock_t *lock);
+ int (*release)(raw_remote_spinlock_t *lock, uint32_t pid);
+ int (*owner)(raw_remote_spinlock_t *lock);
+ void (*lock_rlock_id)(raw_remote_spinlock_t *lock, uint32_t tid);
+ void (*unlock_rlock)(raw_remote_spinlock_t *lock);
+};
+
+static struct spinlock_ops current_ops;
+
+static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock);
+
+/* ldrex implementation ----------------------------------------------------- */
+static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
+
+#ifdef CONFIG_ARM
+static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+"1: ldrex %0, [%1]\n"
+" teq %0, #0\n"
+" strexeq %0, %2, [%1]\n"
+" teqeq %0, #0\n"
+" bne 1b"
+ : "=&r" (tmp)
+ : "r" (&lock->lock), "r" (SPINLOCK_PID_APPS)
+ : "cc");
+
+ smp_mb();
+}
+
+static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+" ldrex %0, [%1]\n"
+" teq %0, #0\n"
+" strexeq %0, %2, [%1]\n"
+ : "=&r" (tmp)
+ : "r" (&lock->lock), "r" (SPINLOCK_PID_APPS)
+ : "cc");
+
+ if (tmp == 0) {
+ smp_mb();
+ return 1;
+ }
+ return 0;
+}
+
+static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
+{
+ int lock_owner;
+
+ smp_mb();
+ lock_owner = readl_relaxed(&lock->lock);
+ if (lock_owner != SPINLOCK_PID_APPS) {
+ pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
+ __func__, lock_owner);
+ }
+
+ __asm__ __volatile__(
+" str %1, [%0]\n"
+ :
+ : "r" (&lock->lock), "r" (0)
+ : "cc");
+}
+#else
+static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
+{
+}
+
+static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
+{
+ return 0;
+}
+
+static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
+{
+}
+#endif /* CONFIG_ARM */
+/* end ldrex implementation ------------------------------------------------- */
+
+/* sfpb implementation ------------------------------------------------------ */
+static uint32_t lock_count;
+static phys_addr_t reg_base;
+static uint32_t reg_size;
+static uint32_t lock_offset; /* offset into the hardware block before lock 0 */
+static uint32_t lock_size;
+
+static void *hw_mutex_reg_base;
+static DEFINE_MUTEX(hw_map_init_lock);
+
+static char *sfpb_compatible_string = "qcom,ipc-spinlock-sfpb";
+
+static int init_hw_mutex(struct device_node *node)
+{
+ struct resource r;
+ int rc;
+
+ rc = of_address_to_resource(node, 0, &r);
+ if (rc)
+ BUG();
+
+ rc = of_property_read_u32(node, "qcom,num-locks", &lock_count);
+ if (rc)
+ BUG();
+
+ reg_base = r.start;
+ reg_size = (uint32_t)(resource_size(&r));
+ lock_offset = 0;
+ lock_size = reg_size / lock_count;
+
+ return 0;
+}
+
+static void find_and_init_hw_mutex(void)
+{
+ struct device_node *node;
+
+ node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
+ BUG_ON(node == NULL);
+ init_hw_mutex(node);
+ hw_mutex_reg_base = ioremap(reg_base, reg_size);
+ BUG_ON(hw_mutex_reg_base == NULL);
+}
+
+static int remote_spinlock_init_address_hw(int id, _remote_spinlock_t *lock)
+{
+ /*
+ * Optimistic locking. Init only needs to be done once by the first
+ * caller. After that, serializing inits between different callers
+ * is unnecessary. The second check after the lock ensures init
+ * wasn't previously completed by someone else before the lock could
+ * be grabbed.
+ */
+ if (!hw_mutex_reg_base) {
+ mutex_lock(&hw_map_init_lock);
+ if (!hw_mutex_reg_base)
+ find_and_init_hw_mutex();
+ mutex_unlock(&hw_map_init_lock);
+ }
+
+ if (id >= lock_count)
+ return -EINVAL;
+
+ *lock = hw_mutex_reg_base + lock_offset + id * lock_size;
+ return 0;
+}
+
+static void __raw_remote_sfpb_spin_lock(raw_remote_spinlock_t *lock)
+{
+ do {
+ writel_relaxed(SPINLOCK_PID_APPS, lock);
+ smp_mb();
+ } while (readl_relaxed(lock) != SPINLOCK_PID_APPS);
+}
+
+static int __raw_remote_sfpb_spin_trylock(raw_remote_spinlock_t *lock)
+{
+ writel_relaxed(SPINLOCK_PID_APPS, lock);
+ smp_mb();
+ return readl_relaxed(lock) == SPINLOCK_PID_APPS;
+}
+
+static void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
+{
+ int lock_owner;
+
+ lock_owner = readl_relaxed(lock);
+ if (lock_owner != SPINLOCK_PID_APPS) {
+ pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
+ __func__, lock_owner);
+ }
+
+ writel_relaxed(0, lock);
+ smp_mb();
+}
+
+static void __raw_remote_sfpb_spin_lock_rlock_id(raw_remote_spinlock_t *lock,
+ uint32_t tid)
+{
+ if (unlikely(!tid)) {
+ pr_err("%s: unsupported rlock tid=0\n", __func__);
+ BUG();
+ }
+
+ do {
+ writel_relaxed(tid, lock);
+ smp_mb();
+ } while (readl_relaxed(lock) != tid);
+}
+
+static void __raw_remote_sfpb_spin_unlock_rlock(raw_remote_spinlock_t *lock)
+{
+ writel_relaxed(0, lock);
+ smp_mb();
+}
+
+/* end sfpb implementation -------------------------------------------------- */
+
+/* common spinlock API ------------------------------------------------------ */
+/**
+ * Release spinlock if it is owned by @pid.
+ *
+ * This is only to be used for situations where the processor owning
+ * the spinlock has crashed and the spinlock must be released.
+ *
+ * @lock: lock structure
+ * @pid: processor ID of processor to release
+ */
+static int __raw_remote_gen_spin_release(raw_remote_spinlock_t *lock,
+ uint32_t pid)
+{
+ int ret = 1;
+
+ if (readl_relaxed(&lock->lock) == pid) {
+ writel_relaxed(0, &lock->lock);
+ wmb();
+ ret = 0;
+ }
+ return ret;
+}
+
+/**
+ * Return owner of the spinlock.
+ *
+ * @lock: pointer to lock structure
+ * @returns: >= 0 owned PID; < 0 for error case
+ *
+ * Used for testing. PID's are assumed to be 31 bits or less.
+ */
+static int __raw_remote_gen_spin_owner(raw_remote_spinlock_t *lock)
+{
+ rmb();
+ return readl_relaxed(&lock->lock);
+}
+
+
+static int dt_node_is_valid(const struct device_node *node)
+{
+ const char *status;
+ int statlen;
+
+ status = of_get_property(node, "status", &statlen);
+ if (status == NULL)
+ return 1;
+
+ if (statlen > 0) {
+ if (!strcmp(status, "okay") || !strcmp(status, "ok"))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void initialize_ops(void)
+{
+ struct device_node *node;
+
+ /*
+ * of_find_compatible_node() returns a valid pointer even if
+ * the status property is "disabled", so the validity needs
+ * to be checked
+ */
+ node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
+ if (node && dt_node_is_valid(node)) {
+ current_ops.lock = __raw_remote_sfpb_spin_lock;
+ current_ops.unlock = __raw_remote_sfpb_spin_unlock;
+ current_ops.trylock = __raw_remote_sfpb_spin_trylock;
+ current_ops.release = __raw_remote_gen_spin_release;
+ current_ops.owner = __raw_remote_gen_spin_owner;
+ current_ops.lock_rlock_id =
+ __raw_remote_sfpb_spin_lock_rlock_id;
+ current_ops.unlock_rlock = __raw_remote_sfpb_spin_unlock_rlock;
+ is_hw_lock_type = 1;
+ return;
+ }
+
+ node = of_find_compatible_node(NULL, NULL, ldrex_compatible_string);
+ if (node && dt_node_is_valid(node)) {
+ current_ops.lock = __raw_remote_ex_spin_lock;
+ current_ops.unlock = __raw_remote_ex_spin_unlock;
+ current_ops.trylock = __raw_remote_ex_spin_trylock;
+ current_ops.release = __raw_remote_gen_spin_release;
+ current_ops.owner = __raw_remote_gen_spin_owner;
+ is_hw_lock_type = 0;
+ return;
+ }
+
+ current_ops.lock = __raw_remote_ex_spin_lock;
+ current_ops.unlock = __raw_remote_ex_spin_unlock;
+ current_ops.trylock = __raw_remote_ex_spin_trylock;
+ current_ops.release = __raw_remote_gen_spin_release;
+ current_ops.owner = __raw_remote_gen_spin_owner;
+ is_hw_lock_type = 0;
+ pr_warn("Falling back to LDREX remote spinlock implementation");
+}
+
+/**
+ * Release all spinlocks owned by @pid.
+ *
+ * This is only to be used for situations where the processor owning
+ * spinlocks has crashed and the spinlocks must be released.
+ *
+ * @pid - processor ID of processor to release
+ */
+static void remote_spin_release_all_locks(uint32_t pid, int count)
+{
+ int n;
+ _remote_spinlock_t lock;
+
+ if (pid >= REMOTE_SPINLOCK_NUM_PID) {
+ pr_err("%s: Unsupported PID %d\n", __func__, pid);
+ return;
+ }
+
+ for (n = 0; n < count; ++n) {
+ if (remote_spinlock_init_address(n, &lock) == 0)
+ _remote_spin_release(&lock, pid);
+ }
+}
+
+void _remote_spin_release_all(uint32_t pid)
+{
+ remote_spin_release_all_locks(pid, lock_count);
+}
+
+#define SMEM_SPINLOCK_COUNT 8
+#define SMEM_SPINLOCK_ARRAY_SIZE (SMEM_SPINLOCK_COUNT * sizeof(uint32_t))
+
+static int remote_spinlock_init_address_smem(int id, _remote_spinlock_t *lock)
+{
+ _remote_spinlock_t spinlock_start;
+
+ if (id >= SMEM_SPINLOCK_COUNT)
+ return -EINVAL;
+
+ spinlock_start = smem_find(SMEM_SPINLOCK_ARRAY,
+ SMEM_SPINLOCK_ARRAY_SIZE,
+ 0,
+ SMEM_ANY_HOST_FLAG);
+ if (spinlock_start == NULL)
+ return -ENXIO;
+
+ *lock = spinlock_start + id;
+
+ lock_count = SMEM_SPINLOCK_COUNT;
+
+ return 0;
+}
+
+static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock)
+{
+ if (is_hw_lock_type)
+ return remote_spinlock_init_address_hw(id, lock);
+ else
+ return remote_spinlock_init_address_smem(id, lock);
+}
+
+int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
+{
+ BUG_ON(id == NULL);
+
+ /*
+ * Optimistic locking. Init only needs to be done once by the first
+ * caller. After that, serializing inits between different callers
+ * is unnecessary. The second check after the lock ensures init
+ * wasn't previously completed by someone else before the lock could
+ * be grabbed.
+ */
+ if (!current_ops.lock) {
+ mutex_lock(&ops_init_lock);
+ if (!current_ops.lock)
+ initialize_ops();
+ mutex_unlock(&ops_init_lock);
+ }
+
+ if (id[0] == 'S' && id[1] == ':') {
+ /* Single-digit lock ID follows "S:" */
+ BUG_ON(id[3] != '\0');
+
+ return remote_spinlock_init_address((((uint8_t)id[2])-'0'),
+ lock);
+ } else {
+ return -EINVAL;
+ }
+}
+
+/*
+ * lock comes in as a pointer to a pointer to the lock location, so it must
+ * be dereferenced and casted to the right type for the actual lock
+ * implementation functions
+ */
+void _remote_spin_lock(_remote_spinlock_t *lock)
+{
+ if (unlikely(!current_ops.lock))
+ BUG();
+ current_ops.lock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_lock);
+
+void _remote_spin_unlock(_remote_spinlock_t *lock)
+{
+ if (unlikely(!current_ops.unlock))
+ BUG();
+ current_ops.unlock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_unlock);
+
+int _remote_spin_trylock(_remote_spinlock_t *lock)
+{
+ if (unlikely(!current_ops.trylock))
+ BUG();
+ return current_ops.trylock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_trylock);
+
+int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid)
+{
+ if (unlikely(!current_ops.release))
+ BUG();
+ return current_ops.release((raw_remote_spinlock_t *)(*lock), pid);
+}
+EXPORT_SYMBOL(_remote_spin_release);
+
+int _remote_spin_owner(_remote_spinlock_t *lock)
+{
+ if (unlikely(!current_ops.owner))
+ BUG();
+ return current_ops.owner((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_owner);
+
+void _remote_spin_lock_rlock_id(_remote_spinlock_t *lock, uint32_t tid)
+{
+ if (unlikely(!current_ops.lock_rlock_id))
+ BUG();
+ current_ops.lock_rlock_id((raw_remote_spinlock_t *)(*lock), tid);
+}
+EXPORT_SYMBOL(_remote_spin_lock_rlock_id);
+
+void _remote_spin_unlock_rlock(_remote_spinlock_t *lock)
+{
+ if (unlikely(!current_ops.unlock_rlock))
+ BUG();
+ current_ops.unlock_rlock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_unlock_rlock);
+
+/* end common spinlock API -------------------------------------------------- */
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index b21f12f1766d..c7396778c5a9 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -6,6 +6,7 @@ config ARM_GIC
bool
select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
+ select MSM_SHOW_RESUME_IRQ
config GIC_NON_BANKED
bool
@@ -20,6 +21,10 @@ config ARM_NVIC
select IRQ_DOMAIN
select GENERIC_IRQ_CHIP
+config GIC_SECURE
+ bool
+ depends on ARM_GIC
+
config ARM_VIC
bool
select IRQ_DOMAIN
@@ -85,6 +90,16 @@ config ORION_IRQCHIP
select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
+config MSM_SHOW_RESUME_IRQ
+ bool "Enable logging of interrupts that could have caused resume"
+ depends on ARM_GIC
+ default n
+ help
+ This option logs wake up interrupts that have triggered just before
+ the resume loop unrolls. It helps to debug to know any unnecessary
+ wake up interrupts that causes system to come out of low power modes.
+ Say Y if you want to debug why the system resumed.
+
config RENESAS_INTC_IRQPIN
bool
select IRQ_DOMAIN
@@ -125,3 +140,7 @@ config KEYSTONE_IRQ
help
Support for Texas Instruments Keystone 2 IRQ controller IP which
is part of the Keystone 2 IPC mechanism
+
+config MSM_IRQ
+ bool
+ select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 173bb5fa2cc9..dc3e9bbb77be 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -38,3 +38,5 @@ obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o \
irq-bcm7120-l2.o
obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o
+obj-$(CONFIG_MSM_IRQ) += irq-msm.o
+obj-$(CONFIG_MSM_SHOW_RESUME_IRQ) += msm_show_resume_irq.o
diff --git a/drivers/irqchip/irq-msm.c b/drivers/irqchip/irq-msm.c
new file mode 100644
index 000000000000..403b4b5f9caa
--- /dev/null
+++ b/drivers/irqchip/irq-msm.c
@@ -0,0 +1,49 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irqchip/qpnp-int.h>
+#include <linux/irqchip/msm-gpio-irq.h>
+#include <linux/irqchip/msm-mpm-irq.h>
+#include "irqchip.h"
+
+static int __init irq_msm_gpio_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int rc;
+
+#ifdef CONFIG_USE_PINCTRL_IRQ
+ rc = msm_tlmm_of_irq_init(node, parent);
+#else
+ rc = msm_gpio_of_init(node, parent);
+#endif
+ if (rc) {
+ pr_err("Couldn't initlialize gpio irq rc = %d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Initialize the mpm after gpio (and gic) are initialized. Note that
+ * gpio irq controller is the child of gic irq controller, hence gic's
+ * init function will be called prior to gpio.
+ */
+ of_mpm_init();
+
+ return 0;
+}
+
+#ifdef CONFIG_USE_PINCTRL_IRQ
+IRQCHIP_DECLARE(tlmmv3_irq, "qcom,msm-tlmmv3-gp-intc", irq_msm_gpio_init);
+IRQCHIP_DECLARE(tlmmv4_irq, "qcom,msm-tlmmv4-gp-intc", irq_msm_gpio_init);
+#else
+IRQCHIP_DECLARE(tlmm_irq, "qcom,msm-gpio", irq_msm_gpio_init);
+#endif
+IRQCHIP_DECLARE(qpnp_irq, "qcom,spmi-pmic-arb", qpnpint_of_init);
diff --git a/drivers/irqchip/msm_show_resume_irq.c b/drivers/irqchip/msm_show_resume_irq.c
new file mode 100644
index 000000000000..4a796207738f
--- /dev/null
+++ b/drivers/irqchip/msm_show_resume_irq.c
@@ -0,0 +1,22 @@
+/* Copyright (c) 2011, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+int msm_show_resume_irq_mask;
+
+module_param_named(
+ debug_mask, msm_show_resume_irq_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index bbeb4516facf..e4c63ff4cd13 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -304,6 +304,16 @@ config APDS9802ALS
This driver can also be built as a module. If so, the module
will be called apds9802als.
+config APDS9930
+ tristate "Avago APDS9930 combined als and proximity sensors"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for Avago APDS9930
+ combined ambient light and proximity sensor chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called apds9950. If unsure, say N here.
+
config ISL29003
tristate "Intersil ISL29003 ambient light sensor"
depends on I2C && SYSFS
@@ -402,6 +412,10 @@ config TI_DAC7512
This driver can also be built as a module. If so, the module
will be called ti_dac7512.
+config UID_STAT
+ bool "UID based statistics tracking exported to /proc/uid_stat"
+ default n
+
config VMWARE_BALLOON
tristate "VMware Balloon Driver"
depends on X86 && HYPERVISOR_GUEST
@@ -515,6 +529,84 @@ config VEXPRESS_SYSCFG
bus. System Configuration interface is one of the possible means
of generating transactions on this bus.
+config TSIF
+ depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_APQ8064
+ tristate "TSIF (Transport Stream InterFace) support"
+ default n
+ ---help---
+ This driver supports low level TSIF interface. It provides API
+ for upper layer drivers. If you have a TSIF hardware, say
+ Y here and read <file:Documentation/arm/msm/tsif.txt>.
+
+ To compile this driver as module, choose M here: the
+ module will be called msm_tsif.
+
+config TSIF_DEBUG
+ bool "Turn on debugging information for tsif driver"
+ depends on TSIF
+ default n
+ ---help---
+ This turns on debugging information for the tsif driver
+
+config HAPTIC_ISA1200
+ tristate "ISA1200 haptic support"
+ depends on I2C
+ default n
+ help
+ The ISA1200 is a high performance enhanced haptic driver.
+
+config QSEECOM
+ tristate "Qualcomm Secure Execution Communicator driver"
+ help
+ Provides a communication interface between userspace and
+ Qualcomm Secure Execution Environment (QSEE) using Secure Channel
+ Manager (SCM) interface.
+
+config QFP_FUSE
+ tristate "QFPROM Fuse Read/Write support"
+ help
+ This option enables device driver to read/write QFPROM
+ fuses. The ioctls provides the necessary interface
+ to the fuse block. Currently this is supported only
+ on FSM targets.
+
+config QPNP_MISC
+ tristate "QPNP Misc Peripheral"
+ depends on SPMI
+ help
+ Say 'y' here to include support for the Qualcomm QPNP MISC
+ peripheral. The MISC peripheral holds the USB ID interrupt
+ and the driver provides an API to check if this interrupt
+ is available on the current PMIC chip.
+
+config USB_HSIC_SMSC_HUB
+ tristate "Support for HSIC based MSM on-chip SMSC3503 HUB"
+ depends on USB_EHCI_MSM_HSIC
+ help
+ Enables support for the HSIC (High Speed Inter-Chip) based
+ SMSC3503 hub controller present on the Qualcomm chipsets.
+
+ This adds support for connecting devices like mouse in HSIC
+ Host mode.
+
+config TI_DRV2667
+ tristate "TI's DRV2667 haptic controller support"
+ depends on I2C
+ help
+ The DRV2667 is a piezo haptic controller chip. It can drive
+ piezo haptics either in digital mode or analog mode. This chip
+ can be used in variety of devices to provide haptic support.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ti_drv2667.
+
+config APQ8084_DOCKING_STATION
+ tristate "QTI APQ8084 Docking Station USB/Ethernet support"
+ depends on OF_GPIO
+ help
+ This option enables support for the USB and Ethernet ports found on
+ the QTI APQ8084 Docking Station.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 7d5c4cd118c4..20842e89d136 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -29,11 +29,14 @@ obj-$(CONFIG_SGI_GRU) += sgi-gru/
obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
obj-$(CONFIG_HP_ILO) += hpilo.o
obj-$(CONFIG_APDS9802ALS) += apds9802als.o
+obj-$(CONFIG_APDS9930) += apds993x.o
obj-$(CONFIG_ISL29003) += isl29003.o
obj-$(CONFIG_ISL29020) += isl29020.o
+obj-$(CONFIG_USB_HSIC_SMSC_HUB) += smsc_hub.o
obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
obj-$(CONFIG_DS1682) += ds1682.o
obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
+obj-$(CONFIG_UID_STAT) += uid_stat.o
obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
@@ -56,3 +59,9 @@ obj-$(CONFIG_GENWQE) += genwqe/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
+obj-$(CONFIG_TSIF) += msm_tsif.o
+obj-$(CONFIG_HAPTIC_ISA1200) += isa1200.o
+obj-$(CONFIG_QSEECOM) += qseecom.o
+obj-$(CONFIG_QFP_FUSE) += qfp_fuse.o
+obj-$(CONFIG_TI_DRV2667) += ti_drv2667.o
+obj-$(CONFIG_APQ8084_DOCKING_STATION) += apq8084_dock.o
diff --git a/drivers/misc/qfp_fuse.c b/drivers/misc/qfp_fuse.c
new file mode 100644
index 000000000000..7090f966fe12
--- /dev/null
+++ b/drivers/misc/qfp_fuse.c
@@ -0,0 +1,498 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <linux/qfp_fuse.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+
+/*
+ * Time QFPROM requires to reliably burn a fuse.
+ */
+#define QFPROM_BLOW_TIMEOUT_US 20
+#define QFPROM_BLOW_TIMER_OFFSET 0x2038
+/*
+ * Denotes number of cycles required to blow the fuse.
+ */
+#define QFPROM_BLOW_TIMER_VALUE 0xF0
+
+#define QFPROM_BLOW_STATUS_OFFSET 0x204C
+#define QFPROM_BLOW_STATUS_BUSY 0x01
+#define QFPROM_BLOW_STATUS_ERROR 0x02
+
+#define QFP_FUSE_READY 0x01
+#define QFP_FUSE_OFF 0x00
+
+#define QFP_FUSE_BUF_SIZE 64
+#define UINT32_MAX (0xFFFFFFFFU)
+
+static const char *blow_supply = "vdd-blow";
+
+struct qfp_priv_t {
+ void __iomem *base;
+ uint32_t size;
+ uint32_t blow_status_offset;
+ uint32_t blow_timer;
+ struct mutex lock;
+ struct regulator *fuse_vdd;
+ u8 state;
+};
+
+struct qfp_resource {
+ resource_size_t start;
+ resource_size_t size;
+ uint32_t blow_status_offset;
+ uint32_t blow_timer;
+ const char *regulator_name;
+};
+
+/* We need only one instance of this for the driver */
+static struct qfp_priv_t *qfp_priv;
+
+static inline bool is_usr_req_valid(const struct qfp_fuse_req *req)
+{
+ uint32_t size = qfp_priv->size;
+ uint32_t req_size;
+
+ if (req->size >= (UINT32_MAX / sizeof(uint32_t)))
+ return false;
+ req_size = req->size * sizeof(uint32_t);
+ if ((req_size == 0) || (req_size > size))
+ return false;
+ if (req->offset >= size)
+ return false;
+ if ((req->offset + req_size) > size)
+ return false;
+
+ return true;
+}
+
+static int qfp_fuse_open(struct inode *inode, struct file *filp)
+{
+ if (qfp_priv == NULL)
+ return -ENODEV;
+
+ filp->private_data = qfp_priv;
+
+ return 0;
+}
+
+static int qfp_fuse_release(struct inode *inode, struct file *filp)
+{
+
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+static inline int qfp_fuse_wait_for_fuse_blow(u32 *status)
+{
+ u32 timeout = QFPROM_BLOW_TIMEOUT_US;
+ /* wait for 400us before checking for the first time */
+ udelay(400);
+ do {
+ *status = readl_relaxed(
+ qfp_priv->base + qfp_priv->blow_status_offset);
+
+ if (!(*status & QFPROM_BLOW_STATUS_BUSY))
+ return 0;
+
+ timeout--;
+ udelay(1);
+ } while (timeout);
+ pr_err("Timeout waiting for FUSE blow, status = %x\n", *status);
+ return -ETIMEDOUT;
+}
+
+static inline int qfp_fuse_enable_regulator(void)
+{
+ int err;
+ err = regulator_enable(qfp_priv->fuse_vdd);
+ if (err != 0)
+ pr_err("Error (%d) enabling regulator\n", err);
+ return err;
+}
+
+static inline int qfp_fuse_disable_regulator(void)
+{
+ int err;
+ err = regulator_disable(qfp_priv->fuse_vdd);
+ if (err != 0)
+ pr_err("Error (%d) disabling regulator\n", err);
+ return err;
+}
+
+static int qfp_fuse_write_word(u32 *addr, u32 data)
+{
+ u32 blow_status = 0;
+ u32 read_data;
+ int err;
+
+ /* Set QFPROM blow timer register */
+ writel_relaxed(qfp_priv->blow_timer,
+ qfp_priv->base + QFPROM_BLOW_TIMER_OFFSET);
+ mb();
+
+ /* Enable LVS0 regulator */
+ err = qfp_fuse_enable_regulator();
+ if (err != 0)
+ return err;
+
+ /*
+ * Wait for about 1ms. However msleep(1) can sleep for
+ * up to 20ms as per Documentation/timers/timers-howto.txt.
+ * Time is not a constraint here.
+ */
+
+ msleep(20);
+
+ /* Write data */
+ __raw_writel(data, addr);
+ mb();
+
+ /* blow_status = QFPROM_BLOW_STATUS_BUSY; */
+ err = qfp_fuse_wait_for_fuse_blow(&blow_status);
+ if (err) {
+ qfp_fuse_disable_regulator();
+ return err;
+ }
+
+ /* Check error status */
+ if (blow_status & QFPROM_BLOW_STATUS_ERROR) {
+ pr_err("Fuse blow status error: %d\n", blow_status);
+ qfp_fuse_disable_regulator();
+ return -EFAULT;
+ }
+
+ /* Disable regulator */
+ qfp_fuse_disable_regulator();
+ /*
+ * Wait for about 1ms. However msleep(1) can sleep for
+ * up to 20ms as per Documentation/timers/timers-howto.txt.
+ * Time is not a constraint here.
+ */
+ msleep(20);
+
+ /* Verify written data */
+ read_data = readl_relaxed(addr);
+ if (read_data != data) {
+ pr_err("Error: read/write data mismatch\n");
+ pr_err("Address = %p written data = %x read data = %x\n",
+ addr, data, read_data);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static long
+qfp_fuse_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ struct qfp_fuse_req req;
+ u32 fuse_buf[QFP_FUSE_BUF_SIZE];
+ u32 *buf = fuse_buf;
+ u32 *ptr = NULL;
+ int i;
+
+ /* Verify user arguments. */
+ if (_IOC_TYPE(cmd) != QFP_FUSE_IOC_MAGIC)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case QFP_FUSE_IOC_READ:
+ if (arg == 0) {
+ pr_err("user space arg not supplied\n");
+ err = -EFAULT;
+ break;
+ }
+
+ if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
+ pr_err("Error copying req from user space\n");
+ err = -EFAULT;
+ break;
+ }
+
+ /* Check for limits */
+ if (is_usr_req_valid(&req) == false) {
+ pr_err("Invalid request\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (req.size > QFP_FUSE_BUF_SIZE) {
+ /* Allocate memory for buffer */
+ ptr = kzalloc(req.size * 4, GFP_KERNEL);
+ if (ptr == NULL) {
+ pr_alert("No memory for data\n");
+ err = -ENOMEM;
+ break;
+ }
+ buf = ptr;
+ }
+
+ if (mutex_lock_interruptible(&qfp_priv->lock)) {
+ err = -ERESTARTSYS;
+ break;
+ }
+
+ /* Read data */
+ for (i = 0; i < req.size; i++)
+ buf[i] = readl_relaxed(
+ ((u32 *) (qfp_priv->base + req.offset)) + i);
+
+ if (copy_to_user((void __user *)req.data, buf, 4*(req.size))) {
+ pr_err("Error copying to user space\n");
+ err = -EFAULT;
+ }
+
+ mutex_unlock(&qfp_priv->lock);
+ break;
+
+ case QFP_FUSE_IOC_WRITE:
+ if (arg == 0) {
+ pr_err("user space arg not supplied\n");
+ err = -EFAULT;
+ break;
+ }
+
+ if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
+ pr_err("Error copying req from user space\n");
+ err = -EFAULT;
+ break;
+ }
+
+ /* Check for limits */
+ if (is_usr_req_valid(&req) == false) {
+ pr_err("Invalid request\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (req.size > QFP_FUSE_BUF_SIZE) {
+ /* Allocate memory for buffer */
+ ptr = kzalloc(req.size * 4, GFP_KERNEL);
+ if (ptr == NULL) {
+ pr_alert("No memory for data\n");
+ err = -ENOMEM;
+ break;
+ }
+ buf = ptr;
+ }
+
+ /* Copy user data to local buffer */
+ if (copy_from_user(buf, (void __user *)req.data,
+ 4 * (req.size))) {
+ pr_err("Error copying data from user space\n");
+ err = -EFAULT;
+ break;
+ }
+
+ if (mutex_lock_interruptible(&qfp_priv->lock)) {
+ err = -ERESTARTSYS;
+ break;
+ }
+
+ /* Write data word at a time */
+ for (i = 0; i < req.size && !err; i++) {
+ err = qfp_fuse_write_word(((u32 *) (
+ qfp_priv->base + req.offset) + i), buf[i]);
+ }
+
+ mutex_unlock(&qfp_priv->lock);
+ break;
+ default:
+ pr_err("Invalid ioctl command.\n");
+ return -ENOTTY;
+ }
+
+ kfree(ptr);
+
+ return err;
+}
+
+static const struct file_operations qfp_fuse_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = qfp_fuse_ioctl,
+ .open = qfp_fuse_open,
+ .release = qfp_fuse_release
+};
+
+static struct miscdevice qfp_fuse_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "qfpfuse",
+ .fops = &qfp_fuse_fops
+};
+
+static int qfp_get_resource(struct platform_device *pdev,
+ struct qfp_resource *qfp_res)
+{
+ struct resource *res;
+ const char *regulator_name = NULL;
+ uint32_t blow_status_offset = QFPROM_BLOW_STATUS_OFFSET;
+ uint32_t blow_timer = QFPROM_BLOW_TIMER_VALUE;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (pdev->dev.of_node) {
+ struct device_node *np = pdev->dev.of_node;
+
+ if (of_property_read_u32(np, "qcom,blow-status-offset",
+ &blow_status_offset) == 0) {
+ if ((res->start + blow_status_offset) > res->end) {
+ pr_err("Invalid blow-status-offset\n");
+ return -EINVAL;
+ }
+ }
+
+ if (of_property_read_bool(np, "vdd-blow-supply")) {
+ /* For backward compatibility, use the name
+ * from blow_supply */
+ regulator_name = blow_supply;
+ } else {
+ pr_err("Failed to find regulator-name property\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32(np, "qcom,blow-timer", &blow_timer);
+
+ } else {
+ regulator_name = pdev->dev.platform_data;
+ }
+
+ if (!regulator_name)
+ return -EINVAL;
+
+ qfp_res->start = res->start;
+ qfp_res->size = resource_size(res);
+ qfp_res->blow_status_offset = blow_status_offset;
+ qfp_res->blow_timer = blow_timer;
+ qfp_res->regulator_name = regulator_name;
+
+ return 0;
+}
+
+static int qfp_fuse_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct qfp_resource res;
+
+ ret = qfp_get_resource(pdev, &res);
+ if (ret)
+ return ret;
+
+ /* Initialize */
+ qfp_priv = kzalloc(sizeof(struct qfp_priv_t), GFP_KERNEL);
+
+ if (qfp_priv == NULL) {
+ pr_alert("Not enough memory to initialize device\n");
+ return -ENOMEM;
+ }
+
+ qfp_priv->base = ioremap(res.start, res.size);
+ if (!qfp_priv->base) {
+ pr_warn("ioremap failed\n");
+ goto err;
+ }
+ qfp_priv->size = res.size;
+ qfp_priv->blow_status_offset = res.blow_status_offset;
+ qfp_priv->blow_timer = res.blow_timer;
+
+ /* Get regulator for QFPROM writes */
+ qfp_priv->fuse_vdd = regulator_get(&pdev->dev, res.regulator_name);
+ if (IS_ERR(qfp_priv->fuse_vdd)) {
+ ret = PTR_ERR(qfp_priv->fuse_vdd);
+ pr_err("Err (%d) getting %s\n", ret, res.regulator_name);
+ qfp_priv->fuse_vdd = NULL;
+ goto err;
+ }
+
+ mutex_init(&qfp_priv->lock);
+
+ ret = misc_register(&qfp_fuse_dev);
+ if (ret < 0)
+ goto err;
+
+ pr_info("Fuse driver base:%p end:%p\n", qfp_priv->base,
+ qfp_priv->base + qfp_priv->size);
+ return 0;
+
+err:
+ if (qfp_priv->fuse_vdd)
+ regulator_put(qfp_priv->fuse_vdd);
+
+ kfree(qfp_priv);
+ qfp_priv = NULL;
+
+ return ret;
+
+}
+
+static int qfp_fuse_remove(struct platform_device *plat)
+{
+ if (qfp_priv && qfp_priv->fuse_vdd)
+ regulator_put(qfp_priv->fuse_vdd);
+
+ iounmap((void __iomem *)qfp_priv->base);
+ kfree(qfp_priv);
+ qfp_priv = NULL;
+
+ misc_deregister(&qfp_fuse_dev);
+ pr_info("Removing Fuse driver\n");
+ return 0;
+}
+
+static struct of_device_id __attribute__ ((unused)) qfp_fuse_of_match[] = {
+ { .compatible = "qcom,qfp-fuse", },
+ {}
+};
+
+static struct platform_driver qfp_fuse_driver = {
+ .probe = qfp_fuse_probe,
+ .remove = qfp_fuse_remove,
+ .driver = {
+ .name = "qfp_fuse_driver",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(qfp_fuse_of_match),
+ },
+};
+
+static int __init qfp_fuse_init(void)
+{
+ return platform_driver_register(&qfp_fuse_driver);
+}
+
+static void __exit qfp_fuse_exit(void)
+{
+ platform_driver_unregister(&qfp_fuse_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
+MODULE_DESCRIPTION("Driver to read/write to QFPROM fuses.");
+MODULE_VERSION("1.01");
+
+module_init(qfp_fuse_init);
+module_exit(qfp_fuse_exit);
diff --git a/drivers/misc/qpnp-misc.c b/drivers/misc/qpnp-misc.c
new file mode 100644
index 000000000000..4b5d7a03aa18
--- /dev/null
+++ b/drivers/misc/qpnp-misc.c
@@ -0,0 +1,189 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/qpnp-misc.h>
+
+#define QPNP_MISC_DEV_NAME "qcom,qpnp-misc"
+
+#define REG_DIG_MAJOR_REV 0x01
+#define REG_SUBTYPE 0x05
+
+static DEFINE_MUTEX(qpnp_misc_dev_list_mutex);
+static LIST_HEAD(qpnp_misc_dev_list);
+
+/**
+ * struct qpnp_misc_dev - holds controller device specific information
+ * @list: Doubly-linked list parameter linking to other
+ * qpnp_misc devices.
+ * @mutex: Mutex lock that is used to ensure mutual
+ * exclusion between probing and accessing misc
+ * driver information
+ * @dev: Device pointer to the misc device
+ * @resource: Resource pointer that holds base address
+ * @spmi: Spmi pointer which holds spmi information
+ */
+struct qpnp_misc_dev {
+ struct list_head list;
+ struct mutex mutex;
+ struct device *dev;
+ struct resource *resource;
+ struct spmi_device *spmi;
+};
+
+struct qpnp_misc_version {
+ u8 subtype;
+ u8 dig_major_rev;
+};
+
+static struct of_device_id qpnp_misc_match_table[] = {
+ { .compatible = QPNP_MISC_DEV_NAME },
+ {}
+};
+
+static u8 qpnp_read_byte(struct spmi_device *spmi, u16 addr)
+{
+ int rc;
+ u8 val;
+
+ rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, addr, &val, 1);
+ if (rc) {
+ pr_err("SPMI read failed rc=%d\n", rc);
+ return 0;
+ }
+ return val;
+}
+
+static struct qpnp_misc_version irq_support_version[] = {
+ {0x01, 0x02}, /* PM8941 */
+ {0x07, 0x00}, /* PM8226 */
+ {0x09, 0x00}, /* PMA8084 */
+};
+
+static bool __misc_irqs_available(struct qpnp_misc_dev *dev)
+{
+ int i;
+ u8 subtype, dig_major_rev;
+
+ subtype = qpnp_read_byte(dev->spmi, dev->resource->start + REG_SUBTYPE);
+ pr_debug("subtype = 0x%02X\n", subtype);
+
+ dig_major_rev = qpnp_read_byte(dev->spmi,
+ dev->resource->start + REG_DIG_MAJOR_REV);
+ pr_debug("dig_major rev = 0x%02X\n", dig_major_rev);
+
+ for (i = 0; i < ARRAY_SIZE(irq_support_version); i++)
+ if (subtype == irq_support_version[i].subtype
+ && dig_major_rev >= irq_support_version[i].dig_major_rev)
+ return 1;
+
+ return 0;
+}
+
+int qpnp_misc_irqs_available(struct device *consumer_dev)
+{
+ struct device_node *misc_node = NULL;
+ struct qpnp_misc_dev *mdev = NULL;
+ struct qpnp_misc_dev *mdev_found = NULL;
+
+ if (IS_ERR_OR_NULL(consumer_dev)) {
+ pr_err("Invalid consumer device pointer\n");
+ return -EINVAL;
+ }
+
+ misc_node = of_parse_phandle(consumer_dev->of_node, "qcom,misc-ref", 0);
+ if (!misc_node) {
+ pr_debug("Could not find qcom,misc-ref property in %s\n",
+ consumer_dev->of_node->full_name);
+ return 0;
+ }
+
+ mutex_lock(&qpnp_misc_dev_list_mutex);
+ list_for_each_entry(mdev, &qpnp_misc_dev_list, list) {
+ if (mdev->dev->of_node == misc_node) {
+ mdev_found = mdev;
+ break;
+ }
+ }
+ mutex_unlock(&qpnp_misc_dev_list_mutex);
+
+ if (!mdev_found) {
+ /* No MISC device was found. This API should only
+ * be called by drivers which have specified the
+ * misc phandle in their device tree node */
+ pr_err("no probed misc device found\n");
+ return -EPROBE_DEFER;
+ }
+
+ return __misc_irqs_available(mdev_found);
+}
+
+static int qpnp_misc_probe(struct spmi_device *spmi)
+{
+ struct resource *resource;
+ struct qpnp_misc_dev *mdev = ERR_PTR(-EINVAL);
+
+ resource = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+ if (!resource) {
+ pr_err("Unable to get spmi resource for MISC\n");
+ return -EINVAL;
+ }
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev) {
+ pr_err("allocation failed\n");
+ return -ENOMEM;
+ }
+
+ mdev->spmi = spmi;
+ mdev->dev = &(spmi->dev);
+ mdev->resource = resource;
+
+ mutex_lock(&qpnp_misc_dev_list_mutex);
+ list_add_tail(&mdev->list, &qpnp_misc_dev_list);
+ mutex_unlock(&qpnp_misc_dev_list_mutex);
+
+ pr_debug("probed successfully\n");
+ return 0;
+}
+
+static struct spmi_driver qpnp_misc_driver = {
+ .probe = qpnp_misc_probe,
+ .driver = {
+ .name = QPNP_MISC_DEV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = qpnp_misc_match_table,
+ },
+};
+
+static int __init qpnp_misc_init(void)
+{
+ return spmi_driver_register(&qpnp_misc_driver);
+}
+
+static void __exit qpnp_misc_exit(void)
+{
+ return spmi_driver_unregister(&qpnp_misc_driver);
+}
+
+module_init(qpnp_misc_init);
+module_exit(qpnp_misc_exit);
+
+MODULE_DESCRIPTION(QPNP_MISC_DEV_NAME);
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_MISC_DEV_NAME);
diff --git a/drivers/misc/smsc_hub.c b/drivers/misc/smsc_hub.c
new file mode 100644
index 000000000000..fc3cca354c96
--- /dev/null
+++ b/drivers/misc/smsc_hub.c
@@ -0,0 +1,851 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/smsc_hub.h>
+#include <linux/module.h>
+#include <mach/msm_xo.h>
+
+static unsigned short normal_i2c[] = {
+0, I2C_CLIENT_END };
+
+struct hsic_hub {
+ struct device *dev;
+ struct smsc_hub_platform_data *pdata;
+ struct i2c_client *client;
+ struct msm_xo_voter *xo_handle;
+ struct clk *ref_clk;
+ struct regulator *hsic_hub_reg;
+ struct regulator *int_pad_reg, *hub_vbus_reg;
+ bool enabled;
+ struct pinctrl *smsc_pinctrl;
+};
+static struct hsic_hub *smsc_hub;
+static struct platform_driver smsc_hub_driver;
+
+/* APIs for setting/clearing bits and for reading/writing values */
+static inline int hsic_hub_get_u8(struct i2c_client *client, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0)
+ pr_err("%s:i2c_read8 failed\n", __func__);
+ return ret;
+}
+
+static inline int hsic_hub_get_u16(struct i2c_client *client, u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ pr_err("%s:i2c_read16 failed\n", __func__);
+ return ret;
+}
+
+static inline int hsic_hub_write_word_data(struct i2c_client *client, u8 reg,
+ u16 value)
+{
+ int ret;
+
+ ret = i2c_smbus_write_word_data(client, reg, value);
+ if (ret)
+ pr_err("%s:i2c_write16 failed\n", __func__);
+ return ret;
+}
+
+static inline int hsic_hub_write_byte_data(struct i2c_client *client, u8 reg,
+ u8 value)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, reg, value);
+ if (ret)
+ pr_err("%s:i2c_write_byte_data failed\n", __func__);
+ return ret;
+}
+
+static inline int hsic_hub_set_bits(struct i2c_client *client, u8 reg,
+ u8 value)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0) {
+ pr_err("%s:i2c_read_byte_data failed\n", __func__);
+ return ret;
+ }
+ return i2c_smbus_write_byte_data(client, reg, (ret | value));
+}
+
+static inline int hsic_hub_clear_bits(struct i2c_client *client, u8 reg,
+ u8 value)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0) {
+ pr_err("%s:i2c_read_byte_data failed\n", __func__);
+ return ret;
+ }
+ return i2c_smbus_write_byte_data(client, reg, (ret & ~value));
+}
+
+static int smsc4604_send_connect_cmd(struct i2c_client *client)
+{
+ u8 buf[3];
+
+ buf[0] = 0xAA;
+ buf[1] = 0x55;
+ buf[2] = 0x00;
+
+ if (i2c_master_send(client, buf, 3) != 3) {
+ dev_err(&client->dev, "%s: i2c send failed\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int i2c_hsic_hub_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_WORD_DATA))
+ return -EIO;
+
+ switch (smsc_hub->pdata->model_id) {
+ case SMSC3503_ID:
+ /*
+ * CONFIG_N bit in SP_ILOCK register has to be set before
+ * changing other registers to change default configuration
+ * of hsic hub.
+ */
+ hsic_hub_set_bits(client, SMSC3503_SP_ILOCK, CONFIG_N);
+
+ /*
+ * Can change default configuartion like VID,PID,
+ * strings etc by writing new values to hsic hub registers
+ */
+ hsic_hub_write_word_data(client, SMSC3503_VENDORID, 0x05C6);
+
+ /*
+ * CONFIG_N bit in SP_ILOCK register has to be cleared
+ * for new values in registers to be effective after
+ * writing to other registers.
+ */
+ hsic_hub_clear_bits(client, SMSC3503_SP_ILOCK, CONFIG_N);
+ break;
+ case SMSC4604_ID:
+ /*
+ * SMSC4604 requires an I2C attach command to be issued
+ * if I2C bus is connected
+ */
+ return smsc4604_send_connect_cmd(client);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int i2c_hsic_hub_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static const struct i2c_device_id hsic_hub_id[] = {
+ {"i2c_hsic_hub", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, hsichub_id);
+
+static struct i2c_driver hsic_hub_driver = {
+ .driver = {
+ .name = "i2c_hsic_hub",
+ },
+ .probe = i2c_hsic_hub_probe,
+ .remove = i2c_hsic_hub_remove,
+ .id_table = hsic_hub_id,
+};
+
+static int msm_hsic_hub_init_clock(struct hsic_hub *hub, int init)
+{
+ int ret;
+
+ /*
+ * xo_clk_gpio controls an external xo clock which feeds
+ * the hub reference clock. When this gpio is present,
+ * assume that no other clocks are required.
+ */
+ if (hub->pdata->xo_clk_gpio)
+ return 0;
+
+ if (!init) {
+ if (!IS_ERR(hub->ref_clk))
+ clk_disable_unprepare(hub->ref_clk);
+ else
+ msm_xo_put(smsc_hub->xo_handle);
+
+ return 0;
+ }
+
+ hub->ref_clk = devm_clk_get(hub->dev, "ref_clk");
+ if (IS_ERR(hub->ref_clk)) {
+ dev_dbg(hub->dev, "failed to get ref_clk\n");
+
+ /* In the absence of dedicated ref_clk, xo clocks the HUB */
+ smsc_hub->xo_handle = msm_xo_get(MSM_XO_TCXO_D1, "hsic_hub");
+ if (IS_ERR(smsc_hub->xo_handle)) {
+ dev_err(hub->dev, "not able to get the handle\n"
+ "for TCXO D1 buffer\n");
+ return PTR_ERR(smsc_hub->xo_handle);
+ }
+
+ ret = msm_xo_mode_vote(smsc_hub->xo_handle, MSM_XO_MODE_ON);
+ if (ret) {
+ dev_err(hub->dev, "failed to vote for TCXO\n"
+ "D1 buffer\n");
+ msm_xo_put(smsc_hub->xo_handle);
+ return ret;
+ }
+ } else {
+ ret = clk_prepare_enable(hub->ref_clk);
+ if (ret)
+ dev_err(hub->dev, "clk_enable failed for ref_clk\n");
+ }
+
+ return ret;
+}
+#define HSIC_HUB_INT_VOL_MIN 1800000 /* uV */
+#define HSIC_HUB_INT_VOL_MAX 2950000 /* uV */
+static int msm_hsic_hub_init_gpio(struct hsic_hub *hub, int init)
+{
+ int ret = 0;
+ struct pinctrl_state *set_state;
+ struct smsc_hub_platform_data *pdata = hub->pdata;
+
+ if (!init) {
+ if (!IS_ERR(smsc_hub->int_pad_reg)) {
+ regulator_disable(smsc_hub->int_pad_reg);
+ regulator_set_voltage(smsc_hub->int_pad_reg, 0,
+ HSIC_HUB_INT_VOL_MAX);
+ }
+ if (smsc_hub->smsc_pinctrl) {
+ set_state = pinctrl_lookup_state(smsc_hub->smsc_pinctrl,
+ "smsc_sleep");
+ if (IS_ERR(set_state)) {
+ pr_err("cannot get smsc pinctrl sleep state\n");
+ ret = PTR_ERR(set_state);
+ goto out;
+ }
+ ret = pinctrl_select_state(smsc_hub->smsc_pinctrl,
+ set_state);
+ }
+ goto out;
+ }
+
+ /* Get pinctrl if target uses pinctrl */
+ smsc_hub->smsc_pinctrl = devm_pinctrl_get(smsc_hub->dev);
+ if (IS_ERR(smsc_hub->smsc_pinctrl)) {
+ if (of_property_read_bool(smsc_hub->dev->of_node,
+ "pinctrl-names")) {
+ dev_err(smsc_hub->dev, "Error encountered while getting pinctrl");
+ ret = PTR_ERR(smsc_hub->smsc_pinctrl);
+ goto out;
+ }
+ dev_dbg(smsc_hub->dev, "Target does not use pinctrl\n");
+ smsc_hub->smsc_pinctrl = NULL;
+ }
+
+ if (smsc_hub->smsc_pinctrl) {
+ set_state = pinctrl_lookup_state(smsc_hub->smsc_pinctrl,
+ "smsc_active");
+ if (IS_ERR(set_state)) {
+ pr_err("cannot get smsc pinctrl active state\n");
+ ret = PTR_ERR(set_state);
+ goto out;
+ }
+ ret = pinctrl_select_state(smsc_hub->smsc_pinctrl, set_state);
+ if (ret) {
+ pr_err("cannot set smsc pinctrl active state\n");
+ goto out;
+ }
+ }
+
+ ret = devm_gpio_request(hub->dev, pdata->hub_reset, "HSIC_HUB_RESET");
+ if (ret < 0) {
+ dev_err(hub->dev, "gpio request failed for GPIO%d\n",
+ pdata->hub_reset);
+ goto out;
+ }
+
+ if (IS_ERR_OR_NULL(smsc_hub->smsc_pinctrl)) {
+ if (pdata->refclk_gpio) {
+ ret = devm_gpio_request(hub->dev, pdata->refclk_gpio,
+ "HSIC_HUB_CLK");
+ if (ret < 0)
+ dev_err(hub->dev, "gpio request failed (CLK GPIO)\n");
+ }
+
+ if (pdata->xo_clk_gpio) {
+ ret = devm_gpio_request(hub->dev, pdata->xo_clk_gpio,
+ "HSIC_HUB_XO_CLK");
+ if (ret < 0) {
+ dev_err(hub->dev, "gpio request failed(XO CLK GPIO)\n");
+ goto out;
+ }
+ }
+
+ if (pdata->int_gpio) {
+ ret = devm_gpio_request(hub->dev, pdata->int_gpio,
+ "HSIC_HUB_INT");
+ if (ret < 0) {
+ dev_err(hub->dev, "gpio request failed (INT GPIO)\n");
+ goto out;
+ }
+ }
+ }
+ if (of_get_property(smsc_hub->dev->of_node, "hub-int-supply", NULL)) {
+ /* Enable LDO if required for external pull-up */
+ smsc_hub->int_pad_reg = devm_regulator_get(hub->dev, "hub-int");
+ if (IS_ERR(smsc_hub->int_pad_reg)) {
+ dev_dbg(hub->dev, "unable to get ext hub_int reg\n");
+ } else {
+ ret = regulator_set_voltage(smsc_hub->int_pad_reg,
+ HSIC_HUB_INT_VOL_MIN,
+ HSIC_HUB_INT_VOL_MAX);
+ if (ret) {
+ dev_err(hub->dev, "unable to set the voltage\n"
+ " for hsic hub int reg\n");
+ goto out;
+ }
+ ret = regulator_enable(smsc_hub->int_pad_reg);
+ if (ret) {
+ dev_err(hub->dev, "unable to enable int reg\n");
+ regulator_set_voltage(smsc_hub->int_pad_reg, 0,
+ HSIC_HUB_INT_VOL_MAX);
+ goto out;
+ }
+ }
+ }
+out:
+ return ret;
+}
+
+#define HSIC_HUB_VDD_VOL_MIN 1650000 /* uV */
+#define HSIC_HUB_VDD_VOL_MAX 1950000 /* uV */
+#define HSIC_HUB_VDD_LOAD 36000 /* uA */
+static int msm_hsic_hub_init_vdd(struct hsic_hub *hub, int init)
+{
+ int ret;
+
+ if (!of_get_property(hub->dev->of_node, "ext-hub-vddio-supply", NULL))
+ return 0;
+
+ if (!init) {
+ if (!IS_ERR(smsc_hub->hsic_hub_reg)) {
+ regulator_disable(smsc_hub->hsic_hub_reg);
+ regulator_set_optimum_mode(smsc_hub->hsic_hub_reg, 0);
+ regulator_set_voltage(smsc_hub->hsic_hub_reg, 0,
+ HSIC_HUB_VDD_VOL_MAX);
+ }
+ return 0;
+ }
+
+ smsc_hub->hsic_hub_reg = devm_regulator_get(hub->dev, "ext-hub-vddio");
+ if (IS_ERR(smsc_hub->hsic_hub_reg)) {
+ dev_dbg(hub->dev, "unable to get ext hub vddcx\n");
+ } else {
+ ret = regulator_set_voltage(smsc_hub->hsic_hub_reg,
+ HSIC_HUB_VDD_VOL_MIN,
+ HSIC_HUB_VDD_VOL_MAX);
+ if (ret) {
+ dev_err(hub->dev, "unable to set the voltage\n"
+ "for hsic hub reg\n");
+ return ret;
+ }
+
+ ret = regulator_set_optimum_mode(smsc_hub->hsic_hub_reg,
+ HSIC_HUB_VDD_LOAD);
+ if (ret < 0) {
+ dev_err(hub->dev, "Unable to set mode of VDDCX\n");
+ goto reg_optimum_mode_fail;
+ }
+
+ ret = regulator_enable(smsc_hub->hsic_hub_reg);
+ if (ret) {
+ dev_err(hub->dev, "unable to enable ext hub vddcx\n");
+ goto reg_enable_fail;
+ }
+ }
+
+ return 0;
+
+reg_enable_fail:
+ regulator_set_optimum_mode(smsc_hub->hsic_hub_reg, 0);
+reg_optimum_mode_fail:
+ regulator_set_voltage(smsc_hub->hsic_hub_reg, 0,
+ HSIC_HUB_VDD_VOL_MAX);
+
+ return ret;
+}
+
+static int smsc_hub_enable(struct hsic_hub *hub)
+{
+ struct smsc_hub_platform_data *pdata = hub->pdata;
+ struct of_dev_auxdata *hsic_host_auxdata = dev_get_platdata(hub->dev);
+ struct device_node *node = hub->dev->of_node;
+ int ret;
+
+ ret = gpio_direction_output(pdata->xo_clk_gpio, 1);
+ if (ret < 0) {
+ dev_err(hub->dev, "fail to enable xo clk\n");
+ return ret;
+ }
+
+ ret = gpio_direction_output(pdata->hub_reset, 0);
+ if (ret < 0) {
+ dev_err(hub->dev, "fail to assert reset\n");
+ goto disable_xo;
+ }
+ udelay(5);
+ ret = gpio_direction_output(pdata->hub_reset, 1);
+ if (ret < 0) {
+ dev_err(hub->dev, "fail to de-assert reset\n");
+ goto disable_xo;
+ }
+
+ ret = of_platform_populate(node, NULL, hsic_host_auxdata,
+ hub->dev);
+ if (ret < 0) {
+ dev_err(smsc_hub->dev, "fail to add child with %d\n",
+ ret);
+ goto reset;
+ }
+
+ pm_runtime_allow(hub->dev);
+
+ return 0;
+
+reset:
+ gpio_direction_output(pdata->hub_reset, 0);
+disable_xo:
+ gpio_direction_output(pdata->xo_clk_gpio, 0);
+
+ return ret;
+}
+
+static int sms_hub_remove_child(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ /*
+ * Runtime PM is disabled before the driver's remove method
+ * is called. So resume the device before unregistering
+ * the device. Don't worry about the PM usage counter as
+ * the device will be freed.
+ */
+ pm_runtime_get_sync(dev);
+ of_device_unregister(pdev);
+
+ return 0;
+}
+
+static int smsc_hub_disable(struct hsic_hub *hub)
+{
+ struct smsc_hub_platform_data *pdata = hub->pdata;
+
+ pm_runtime_forbid(hub->dev);
+ device_for_each_child(hub->dev, NULL, sms_hub_remove_child);
+ gpio_direction_output(pdata->hub_reset, 0);
+ gpio_direction_output(pdata->xo_clk_gpio, 0);
+
+ return 0;
+}
+
+static ssize_t smsc_hub_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", smsc_hub->enabled ?
+ "enabled" : "disabled");
+}
+
+static ssize_t smsc_hub_enable_store(struct device *dev,
+ struct device_attribute *attr, const char
+ *buf, size_t size)
+{
+
+ bool enable;
+ int val;
+ int ret = size;
+
+ if (sscanf(buf, "%d", &val) == 1) {
+ enable = !!val;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (smsc_hub->enabled == enable)
+ goto out;
+
+ if (enable)
+ ret = smsc_hub_enable(smsc_hub);
+ else
+ ret = smsc_hub_disable(smsc_hub);
+
+ pr_debug("smsc hub %s status %d\n", enable ?
+ "Enable" : "Disable", ret);
+ if (!ret) {
+ ret = size;
+ smsc_hub->enabled = enable;
+ }
+out:
+ return ret;
+}
+
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, smsc_hub_enable_show,
+ smsc_hub_enable_store);
+
+struct smsc_hub_platform_data *msm_hub_dt_to_pdata(
+ struct platform_device *pdev)
+{
+ int rc;
+ u32 temp_val;
+ struct device_node *node = pdev->dev.of_node;
+ struct smsc_hub_platform_data *pdata;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev, "unable to allocate platform data\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rc = of_property_read_u32(node, "smsc,model-id", &temp_val);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to read smsc,model-id\n");
+ return ERR_PTR(rc);
+ } else {
+ pdata->model_id = temp_val;
+ if (pdata->model_id == 0)
+ return pdata;
+ }
+
+ pdata->hub_reset = of_get_named_gpio(node, "smsc,reset-gpio", 0);
+ if (pdata->hub_reset < 0)
+ return ERR_PTR(pdata->hub_reset);
+
+ pdata->refclk_gpio = of_get_named_gpio(node, "smsc,refclk-gpio", 0);
+ if (pdata->refclk_gpio < 0)
+ pdata->refclk_gpio = 0;
+
+ pdata->int_gpio = of_get_named_gpio(node, "smsc,int-gpio", 0);
+ if (pdata->int_gpio < 0)
+ pdata->int_gpio = 0;
+
+ pdata->xo_clk_gpio = of_get_named_gpio(node, "smsc,xo-clk-gpio", 0);
+ if (pdata->xo_clk_gpio < 0)
+ pdata->xo_clk_gpio = 0;
+
+ return pdata;
+}
+
+static int smsc_hub_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct smsc_hub_platform_data *pdata;
+ struct device_node *node = pdev->dev.of_node;
+ struct i2c_adapter *i2c_adap;
+ struct i2c_board_info i2c_info;
+ struct of_dev_auxdata *hsic_host_auxdata = NULL;
+
+ if (pdev->dev.of_node) {
+ dev_dbg(&pdev->dev, "device tree enabled\n");
+ hsic_host_auxdata = dev_get_platdata(&pdev->dev);
+ pdata = msm_hub_dt_to_pdata(pdev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ } else {
+ pdata = pdev->dev.platform_data;
+ }
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -ENODEV;
+ }
+
+ if (pdata->model_id == 0) {
+ dev_dbg(&pdev->dev, "standalone HSIC config enabled\n");
+ return of_platform_populate(node, NULL,
+ hsic_host_auxdata, &pdev->dev);
+ }
+
+ if (!pdata->hub_reset)
+ return -EINVAL;
+
+ smsc_hub = devm_kzalloc(&pdev->dev, sizeof(*smsc_hub), GFP_KERNEL);
+ if (!smsc_hub)
+ return -ENOMEM;
+
+ smsc_hub->dev = &pdev->dev;
+ smsc_hub->pdata = pdata;
+
+ if (of_get_property(pdev->dev.of_node, "hub-vbus-supply", NULL)) {
+ smsc_hub->hub_vbus_reg = devm_regulator_get(&pdev->dev,
+ "hub-vbus");
+ ret = PTR_ERR(smsc_hub->hub_vbus_reg);
+ if (ret == -EPROBE_DEFER) {
+ dev_dbg(&pdev->dev, "failed to get hub_vbus\n");
+ return ret;
+ }
+ }
+
+ ret = msm_hsic_hub_init_vdd(smsc_hub, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init hub VDD\n");
+ return ret;
+ }
+ ret = msm_hsic_hub_init_clock(smsc_hub, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init hub clock\n");
+ goto uninit_vdd;
+ }
+ ret = msm_hsic_hub_init_gpio(smsc_hub, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init hub gpios\n");
+ goto uninit_clock;
+ }
+
+ if (pdata->model_id == SMSC3502_ID) {
+ ret = device_create_file(&pdev->dev, &dev_attr_enable);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "fail to create sysfs file\n");
+ goto uninit_gpio;
+ }
+ pm_runtime_forbid(&pdev->dev);
+ goto done;
+ }
+
+ gpio_direction_output(pdata->hub_reset, 0);
+ /*
+ * Hub reset should be asserted for minimum 2microsec
+ * before deasserting.
+ */
+ udelay(5);
+ gpio_direction_output(pdata->hub_reset, 1);
+
+ if (!IS_ERR_OR_NULL(smsc_hub->hub_vbus_reg)) {
+ ret = regulator_enable(smsc_hub->hub_vbus_reg);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable hub_vbus\n");
+ goto uninit_gpio;
+ }
+ }
+
+ ret = i2c_add_driver(&hsic_hub_driver);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add I2C hsic_hub_driver\n");
+ goto i2c_add_fail;
+ }
+ usleep_range(10000, 12000);
+ i2c_adap = i2c_get_adapter(SMSC_GSBI_I2C_BUS_ID);
+
+ if (!i2c_adap) {
+ dev_err(&pdev->dev, "failed to get i2c adapter\n");
+ i2c_del_driver(&hsic_hub_driver);
+ goto i2c_add_fail;
+ }
+
+ memset(&i2c_info, 0, sizeof(struct i2c_board_info));
+ strlcpy(i2c_info.type, "i2c_hsic_hub", I2C_NAME_SIZE);
+
+ /* 250ms delay is required for SMSC4604 HUB to get I2C up */
+ msleep(250);
+
+ /* Assign I2C slave address per SMSC model */
+ switch (pdata->model_id) {
+ case SMSC3503_ID:
+ normal_i2c[0] = SMSC3503_I2C_ADDR;
+ break;
+ case SMSC4604_ID:
+ normal_i2c[0] = SMSC4604_I2C_ADDR;
+ break;
+ default:
+ dev_err(&pdev->dev, "unsupported SMSC model-id\n");
+ i2c_put_adapter(i2c_adap);
+ i2c_del_driver(&hsic_hub_driver);
+ goto uninit_gpio;
+ }
+
+ smsc_hub->client = i2c_new_probed_device(i2c_adap, &i2c_info,
+ normal_i2c, NULL);
+ i2c_put_adapter(i2c_adap);
+
+i2c_add_fail:
+ ret = of_platform_populate(node, NULL, hsic_host_auxdata, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add child node, ret=%d\n", ret);
+ goto uninit_gpio;
+ }
+
+ smsc_hub->enabled = true;
+
+ if (!smsc_hub->client)
+ dev_err(&pdev->dev,
+ "failed to connect to smsc_hub through I2C\n");
+
+done:
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+
+uninit_gpio:
+ msm_hsic_hub_init_gpio(smsc_hub, 0);
+uninit_clock:
+ msm_hsic_hub_init_clock(smsc_hub, 0);
+uninit_vdd:
+ msm_hsic_hub_init_vdd(smsc_hub, 0);
+
+ return ret;
+}
+
+static int smsc_hub_remove(struct platform_device *pdev)
+{
+ const struct smsc_hub_platform_data *pdata;
+
+ if (!smsc_hub)
+ return 0;
+
+ pdata = smsc_hub->pdata;
+ if (pdata->model_id == SMSC3502_ID)
+ device_remove_file(&pdev->dev, &dev_attr_enable);
+ if (smsc_hub->client) {
+ i2c_unregister_device(smsc_hub->client);
+ smsc_hub->client = NULL;
+ i2c_del_driver(&hsic_hub_driver);
+ }
+ pm_runtime_disable(&pdev->dev);
+
+ if (!IS_ERR_OR_NULL(smsc_hub->hub_vbus_reg))
+ regulator_disable(smsc_hub->hub_vbus_reg);
+ msm_hsic_hub_init_gpio(smsc_hub, 0);
+ msm_hsic_hub_init_clock(smsc_hub, 0);
+ msm_hsic_hub_init_vdd(smsc_hub, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int msm_smsc_runtime_idle(struct device *dev)
+{
+ dev_dbg(dev, "SMSC HUB runtime idle\n");
+
+ return 0;
+}
+
+static int smsc_hub_lpm_enter(struct device *dev)
+{
+ int ret = 0;
+
+ if (!smsc_hub || !smsc_hub->enabled)
+ return 0;
+
+ if (smsc_hub->xo_handle) {
+ ret = msm_xo_mode_vote(smsc_hub->xo_handle, MSM_XO_MODE_OFF);
+ if (ret) {
+ pr_err("%s: failed to devote for TCXO\n"
+ "D1 buffer%d\n", __func__, ret);
+ }
+ } else if (smsc_hub->pdata->xo_clk_gpio) {
+ gpio_direction_output(smsc_hub->pdata->xo_clk_gpio, 0);
+ }
+
+ return ret;
+}
+
+static int smsc_hub_lpm_exit(struct device *dev)
+{
+ int ret = 0;
+
+ if (!smsc_hub || !smsc_hub->enabled)
+ return 0;
+
+ if (smsc_hub->xo_handle) {
+ ret = msm_xo_mode_vote(smsc_hub->xo_handle, MSM_XO_MODE_ON);
+ if (ret) {
+ pr_err("%s: failed to vote for TCXO\n"
+ "D1 buffer%d\n", __func__, ret);
+ }
+ } else if (smsc_hub->pdata->xo_clk_gpio) {
+ gpio_direction_output(smsc_hub->pdata->xo_clk_gpio, 1);
+ }
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops smsc_hub_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(smsc_hub_lpm_enter, smsc_hub_lpm_exit)
+ SET_RUNTIME_PM_OPS(smsc_hub_lpm_enter, smsc_hub_lpm_exit,
+ msm_smsc_runtime_idle)
+};
+#endif
+
+static const struct of_device_id hsic_hub_dt_match[] = {
+ { .compatible = "qcom,hsic-smsc-hub",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, hsic_hub_dt_match);
+
+static struct platform_driver smsc_hub_driver = {
+ .driver = {
+ .name = "msm_smsc_hub",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &smsc_hub_dev_pm_ops,
+#endif
+ .of_match_table = hsic_hub_dt_match,
+ },
+ .probe = smsc_hub_probe,
+ .remove = smsc_hub_remove,
+};
+
+static int __init smsc_hub_init(void)
+{
+ return platform_driver_register(&smsc_hub_driver);
+}
+
+static void __exit smsc_hub_exit(void)
+{
+ platform_driver_unregister(&smsc_hub_driver);
+}
+subsys_initcall(smsc_hub_init);
+module_exit(smsc_hub_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SMSC HSIC HUB driver");
diff --git a/drivers/misc/uid_stat.c b/drivers/misc/uid_stat.c
new file mode 100644
index 000000000000..4766c1f83b94
--- /dev/null
+++ b/drivers/misc/uid_stat.c
@@ -0,0 +1,152 @@
+/* drivers/misc/uid_stat.c
+ *
+ * Copyright (C) 2008 - 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/atomic.h>
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/stat.h>
+#include <linux/uid_stat.h>
+#include <net/activity_stats.h>
+
+static DEFINE_SPINLOCK(uid_lock);
+static LIST_HEAD(uid_list);
+static struct proc_dir_entry *parent;
+
+struct uid_stat {
+ struct list_head link;
+ uid_t uid;
+ atomic_t tcp_rcv;
+ atomic_t tcp_snd;
+};
+
+static struct uid_stat *find_uid_stat(uid_t uid) {
+ struct uid_stat *entry;
+
+ list_for_each_entry(entry, &uid_list, link) {
+ if (entry->uid == uid) {
+ return entry;
+ }
+ }
+ return NULL;
+}
+
+static int uid_stat_atomic_int_show(struct seq_file *m, void *v)
+{
+ unsigned int bytes;
+ atomic_t *counter = m->private;
+
+ bytes = (unsigned int) (atomic_read(counter) + INT_MIN);
+ return seq_printf(m, "%u\n", bytes);
+}
+
+static int uid_stat_read_atomic_int_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uid_stat_atomic_int_show, PDE_DATA(inode));
+}
+
+static const struct file_operations uid_stat_read_atomic_int_fops = {
+ .open = uid_stat_read_atomic_int_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/* Create a new entry for tracking the specified uid. */
+static struct uid_stat *create_stat(uid_t uid) {
+ struct uid_stat *new_uid;
+ /* Create the uid stat struct and append it to the list. */
+ new_uid = kmalloc(sizeof(struct uid_stat), GFP_ATOMIC);
+ if (!new_uid)
+ return NULL;
+
+ new_uid->uid = uid;
+ /* Counters start at INT_MIN, so we can track 4GB of network traffic. */
+ atomic_set(&new_uid->tcp_rcv, INT_MIN);
+ atomic_set(&new_uid->tcp_snd, INT_MIN);
+
+ list_add_tail(&new_uid->link, &uid_list);
+ return new_uid;
+}
+
+static void create_stat_proc(struct uid_stat *new_uid)
+{
+ char uid_s[32];
+ struct proc_dir_entry *entry;
+ sprintf(uid_s, "%d", new_uid->uid);
+ entry = proc_mkdir(uid_s, parent);
+
+ /* Keep reference to uid_stat so we know what uid to read stats from. */
+ proc_create_data("tcp_snd", S_IRUGO, entry,
+ &uid_stat_read_atomic_int_fops, &new_uid->tcp_snd);
+
+ proc_create_data("tcp_rcv", S_IRUGO, entry,
+ &uid_stat_read_atomic_int_fops, &new_uid->tcp_rcv);
+}
+
+static struct uid_stat *find_or_create_uid_stat(uid_t uid)
+{
+ struct uid_stat *entry;
+ unsigned long flags;
+ spin_lock_irqsave(&uid_lock, flags);
+ entry = find_uid_stat(uid);
+ if (entry) {
+ spin_unlock_irqrestore(&uid_lock, flags);
+ return entry;
+ }
+ entry = create_stat(uid);
+ spin_unlock_irqrestore(&uid_lock, flags);
+ if (entry)
+ create_stat_proc(entry);
+ return entry;
+}
+
+int uid_stat_tcp_snd(uid_t uid, int size) {
+ struct uid_stat *entry;
+ activity_stats_update();
+ entry = find_or_create_uid_stat(uid);
+ if (!entry)
+ return -1;
+ atomic_add(size, &entry->tcp_snd);
+ return 0;
+}
+
+int uid_stat_tcp_rcv(uid_t uid, int size) {
+ struct uid_stat *entry;
+ activity_stats_update();
+ entry = find_or_create_uid_stat(uid);
+ if (!entry)
+ return -1;
+ atomic_add(size, &entry->tcp_rcv);
+ return 0;
+}
+
+static int __init uid_stat_init(void)
+{
+ parent = proc_mkdir("uid_stat", NULL);
+ if (!parent) {
+ pr_err("uid_stat: failed to create proc entry\n");
+ return -1;
+ }
+ return 0;
+}
+
+__initcall(uid_stat_init);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 1a13f5b722c5..0ab75f2f054c 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -70,6 +70,12 @@ config OF_PCI_IRQ
help
OpenFirmware PCI IRQ routing helpers
+config OF_SPMI
+ def_tristate SPMI
+ depends on SPMI
+ help
+ OpenFirmware SPMI bus accessors
+
config OF_MTD
depends on MTD
def_bool y
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index d134710de96d..32aa2c972047 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -455,7 +455,7 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
if (size &&
early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
- pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
+ pr_info("Reserved memory: reserved region for node '%s': base %pa, size %ld MiB\n",
uname, &base, (unsigned long)size / SZ_1M);
else
pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %ld MiB\n",
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index c6a66de6ed72..713890ada7fd 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -124,6 +124,28 @@ config PINCTRL_SINGLE
help
This selects the device tree based generic pinctrl driver.
+config USE_PINCTRL_IRQ
+ default n
+ bool "Use Pinctrl IRQ chip"
+ help
+ Use Irq chip with Pinctrl subsystem instead of the irq chip
+ associated with gpio lib. The pinctrl irq chip allows the pin
+ attributes to be configured, prior to configuring them as
+ interrupt triggers.
+
+config PINCTRL_MSM_TLMM_V3
+ bool "MSM TLMMv3 pinctrl driver"
+ depends on OF && (ARCH_MSM || ARCH_QCOM)
+ select PINMUX
+ select GENERIC_PINCONF
+
+config PINCTRL_MSM_TLMM_V4
+ bool "MSM TLMMv4 pinctrl driver"
+ depends on OF && (ARCH_MSM || ARCH_QCOM)
+ bool
+ select PINMUX
+ select GENERIC_PINCONF
+
config PINCTRL_SIRF
bool "CSR SiRFprimaII/SiRFmarco pin controller driver"
depends on ARCH_SIRF
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 51f52d32859e..22f9f7066cee 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -20,6 +20,15 @@ obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
+obj-$(CONFIG_PINCTRL_MSM_TLMM_V3) += pinctrl-msm.o pinctrl-msm-tlmm-v3.o
+obj-$(CONFIG_PINCTRL_MSM_TLMM_V4) += pinctrl-msm.o pinctrl-msm-tlmm-v4.o
+obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
+obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
+obj-$(CONFIG_PINCTRL_IMX28) += pinctrl-imx28.o
+obj-$(CONFIG_PINCTRL_NOMADIK) += pinctrl-nomadik.o
+obj-$(CONFIG_PINCTRL_STN8815) += pinctrl-nomadik-stn8815.o
+obj-$(CONFIG_PINCTRL_DB8500) += pinctrl-nomadik-db8500.o
+obj-$(CONFIG_PINCTRL_DB8540) += pinctrl-nomadik-db8540.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_SIRF) += sirf/
obj-$(CONFIG_PINCTRL_TEGRA) += pinctrl-tegra.o
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index e4f65510c87e..43c272491e5f 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1110,7 +1110,7 @@ void devm_pinctrl_put(struct pinctrl *p)
EXPORT_SYMBOL_GPL(devm_pinctrl_put);
int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
- bool dup, bool locked)
+ bool dup)
{
int i, ret;
struct pinctrl_maps *maps_node;
@@ -1178,11 +1178,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
maps_node->maps = maps;
}
- if (!locked)
- mutex_lock(&pinctrl_maps_mutex);
+ mutex_lock(&pinctrl_maps_mutex);
list_add_tail(&maps_node->node, &pinctrl_maps);
- if (!locked)
- mutex_unlock(&pinctrl_maps_mutex);
+ mutex_unlock(&pinctrl_maps_mutex);
return 0;
}
@@ -1197,7 +1195,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
int pinctrl_register_mappings(struct pinctrl_map const *maps,
unsigned num_maps)
{
- return pinctrl_register_map(maps, num_maps, true, false);
+ return pinctrl_register_map(maps, num_maps, true);
}
void pinctrl_unregister_map(struct pinctrl_map const *map)
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 75476b3d87da..b24ea846c867 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev,
}
int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
- bool dup, bool locked);
+ bool dup);
void pinctrl_unregister_map(struct pinctrl_map const *map);
extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index eda13de2e7c0..347c4d47507c 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
dt_map->num_maps = num_maps;
list_add_tail(&dt_map->node, &p->dt_maps);
- return pinctrl_register_map(map, num_maps, false, true);
+ return pinctrl_register_map(map, num_maps, false);
}
struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
@@ -201,8 +201,13 @@ int pinctrl_dt_to_map(struct pinctrl *p)
propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
prop = of_find_property(np, propname, &size);
kfree(propname);
- if (!prop)
+ if (!prop) {
+ if (!state) {
+ ret = -EINVAL;
+ goto err;
+ }
break;
+ }
list = prop->value;
size /= sizeof(*list);
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 29ff77f90fcb..375022d12588 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -167,11 +167,12 @@ static struct pinconf_generic_dt_params dt_params[] = {
{ "input-schmitt-disable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 0 },
{ "input-debounce", PIN_CONFIG_INPUT_DEBOUNCE, 0 },
{ "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
+ { "input-schmitt", PIN_CONFIG_INPUT_SCHMITT, 0 },
+ { "slew-rate", PIN_CONFIG_SLEW_RATE, 0 },
{ "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
{ "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 },
{ "output-low", PIN_CONFIG_OUTPUT, 0, },
{ "output-high", PIN_CONFIG_OUTPUT, 1, },
- { "slew-rate", PIN_CONFIG_SLEW_RATE, 0},
};
/**
diff --git a/drivers/pinctrl/pinctrl-msm-tlmm-v3.c b/drivers/pinctrl/pinctrl-msm-tlmm-v3.c
new file mode 100644
index 000000000000..f3c094df5bc6
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-msm-tlmm-v3.c
@@ -0,0 +1,892 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+#include "pinctrl-msm.h"
+
+/* config translations */
+#define drv_str_to_rval(drv) ((drv >> 1) - 1)
+#define rval_to_drv_str(val) ((val + 1) << 1)
+#define dir_to_inout_val(dir) (dir << 1)
+#define inout_val_to_dir(val) (val >> 1)
+#define rval_to_pull(val) ((val > 2) ? 1 : val)
+#define TLMMV3_NO_PULL 0
+#define TLMMV3_PULL_DOWN 1
+#define TLMMV3_PULL_UP 3
+/* GP PIN TYPE REG MASKS */
+#define TLMMV3_GP_DRV_SHFT 6
+#define TLMMV3_GP_DRV_MASK 0x7
+#define TLMMV3_GP_PULL_SHFT 0
+#define TLMMV3_GP_PULL_MASK 0x3
+#define TLMMV3_GP_DIR_SHFT 9
+#define TLMMV3_GP_DIR_MASK 1
+#define TLMMV3_GP_FUNC_SHFT 2
+#define TLMMV3_GP_FUNC_MASK 0xF
+#define GPIO_OUT_BIT 1
+#define GPIO_IN_BIT 0
+#define GPIO_OE_BIT 9
+/* SDC1 PIN TYPE REG MASKS */
+#define TLMMV3_SDC1_CLK_DRV_SHFT 6
+#define TLMMV3_SDC1_CLK_DRV_MASK 0x7
+#define TLMMV3_SDC1_DATA_DRV_SHFT 0
+#define TLMMV3_SDC1_DATA_DRV_MASK 0x7
+#define TLMMV3_SDC1_CMD_DRV_SHFT 3
+#define TLMMV3_SDC1_CMD_DRV_MASK 0x7
+#define TLMMV3_SDC1_CLK_PULL_SHFT 13
+#define TLMMV3_SDC1_CLK_PULL_MASK 0x3
+#define TLMMV3_SDC1_DATA_PULL_SHFT 9
+#define TLMMV3_SDC1_DATA_PULL_MASK 0x3
+#define TLMMV3_SDC1_CMD_PULL_SHFT 11
+#define TLMMV3_SDC1_CMD_PULL_MASK 0x3
+#define TLMMV3_SDC1_RCLK_PULL_SHFT 15
+#define TLMMV3_SDC1_RCLK_PULL_MASK 0x3
+/* SDC2 PIN TYPE REG MASKS */
+#define TLMMV3_SDC2_CLK_DRV_SHFT 6
+#define TLMMV3_SDC2_CLK_DRV_MASK 0x7
+#define TLMMV3_SDC2_DATA_DRV_SHFT 0
+#define TLMMV3_SDC2_DATA_DRV_MASK 0x7
+#define TLMMV3_SDC2_CMD_DRV_SHFT 3
+#define TLMMV3_SDC2_CMD_DRV_MASK 0x7
+#define TLMMV3_SDC2_CLK_PULL_SHFT 14
+#define TLMMV3_SDC2_CLK_PULL_MASK 0x3
+#define TLMMV3_SDC2_DATA_PULL_SHFT 9
+#define TLMMV3_SDC2_DATA_PULL_MASK 0x3
+#define TLMMV3_SDC2_CMD_PULL_SHFT 11
+#define TLMMV3_SDC2_CMD_PULL_MASK 0x3
+/* TLMM V3 IRQ REG fields */
+#define INTR_ENABLE_BIT 0
+#define INTR_POL_CTL_BIT 1
+#define INTR_DECT_CTL_BIT 2
+#define INTR_RAW_STATUS_EN_BIT 4
+#define INTR_TARGET_PROC_BIT 5
+#define INTR_DIR_CONN_EN_BIT 8
+#define INTR_STATUS_BIT 0
+#define DC_POLARITY_BIT 8
+
+/* Target processors for TLMM pin based interrupts */
+#define INTR_TARGET_PROC_APPS(core_id) ((core_id) << INTR_TARGET_PROC_BIT)
+#define TLMMV3_APPS_ID_DEFAULT 4
+#define INTR_TARGET_PROC_NONE (7 << INTR_TARGET_PROC_BIT)
+/* Interrupt flag bits */
+#define DC_POLARITY_HI BIT(DC_POLARITY_BIT)
+#define INTR_POL_CTL_HI BIT(INTR_POL_CTL_BIT)
+#define INTR_DECT_CTL_LEVEL (0 << INTR_DECT_CTL_BIT)
+#define INTR_DECT_CTL_POS_EDGE (1 << INTR_DECT_CTL_BIT)
+#define INTR_DECT_CTL_NEG_EDGE (2 << INTR_DECT_CTL_BIT)
+#define INTR_DECT_CTL_DUAL_EDGE (3 << INTR_DECT_CTL_BIT)
+#define INTR_DECT_CTL_MASK (3 << INTR_DECT_CTL_BIT)
+
+#define TLMMV3_GP_INOUT_BIT 1
+#define TLMMV3_GP_OUT BIT(TLMMV3_GP_INOUT_BIT)
+#define TLMMV3_GP_IN 0
+
+#define gc_to_pintype(gc) \
+ container_of(gc, struct msm_pintype_info, gc)
+#define ic_to_pintype(ic) \
+ ((struct msm_pintype_info *)ic->pinfo)
+#define pintype_get_gc(pinfo) (&pinfo->gc)
+#define pintype_get_ic(pinfo) (pinfo->irq_chip)
+
+/* SDC Pin type register offsets */
+#define TLMMV3_SDC_OFFSET 0x2044
+#define TLMMV3_SDC1_CFG(base) (base)
+#define TLMMV3_SDC2_CFG(base) (TLMMV3_SDC1_CFG(base) + 0x4)
+
+/* GP pin type register offsets */
+#define TLMMV3_GP_CFG(base, pin) (base + 0x1000 + 0x10 * (pin))
+#define TLMMV3_GP_INOUT(base, pin) (base + 0x1004 + 0x10 * (pin))
+#define TLMMV3_GP_INTR_CFG(base, pin) (base + 0x1008 + 0x10 * (pin))
+#define TLMMV3_GP_INTR_STATUS(base, pin) (base + 0x100c + 0x10 * (pin))
+
+struct msm_sdc_regs {
+ unsigned int offset;
+ unsigned long pull_mask;
+ unsigned long pull_shft;
+ unsigned long drv_mask;
+ unsigned long drv_shft;
+};
+
+static struct msm_sdc_regs sdc_regs[] = {
+ /* SDC1 CLK */
+ {
+ .offset = 0,
+ .pull_mask = TLMMV3_SDC1_CLK_PULL_MASK,
+ .pull_shft = TLMMV3_SDC1_CLK_PULL_SHFT,
+ .drv_mask = TLMMV3_SDC1_CLK_DRV_MASK,
+ .drv_shft = TLMMV3_SDC1_CLK_DRV_SHFT,
+ },
+ /* SDC1 CMD */
+ {
+ .offset = 0,
+ .pull_mask = TLMMV3_SDC1_CMD_PULL_MASK,
+ .pull_shft = TLMMV3_SDC1_CMD_PULL_SHFT,
+ .drv_mask = TLMMV3_SDC1_CMD_DRV_MASK,
+ .drv_shft = TLMMV3_SDC1_CMD_DRV_SHFT,
+ },
+ /* SDC1 DATA */
+ {
+ .offset = 0,
+ .pull_mask = TLMMV3_SDC1_DATA_PULL_MASK,
+ .pull_shft = TLMMV3_SDC1_DATA_PULL_SHFT,
+ .drv_mask = TLMMV3_SDC1_DATA_DRV_MASK,
+ .drv_shft = TLMMV3_SDC1_DATA_DRV_SHFT,
+ },
+ /* SDC1 RCLK */
+ {
+ .offset = 0,
+ .pull_mask = TLMMV3_SDC1_RCLK_PULL_MASK,
+ .pull_shft = TLMMV3_SDC1_RCLK_PULL_SHFT,
+ },
+ /* SDC2 CLK */
+ {
+ .offset = 0x4,
+ .pull_mask = TLMMV3_SDC2_CLK_PULL_MASK,
+ .pull_shft = TLMMV3_SDC2_CLK_PULL_SHFT,
+ .drv_mask = TLMMV3_SDC2_CLK_DRV_MASK,
+ .drv_shft = TLMMV3_SDC2_CLK_DRV_SHFT,
+ },
+ /* SDC2 CMD */
+ {
+ .offset = 0x4,
+ .pull_mask = TLMMV3_SDC2_CMD_PULL_MASK,
+ .pull_shft = TLMMV3_SDC2_CMD_PULL_SHFT,
+ .drv_mask = TLMMV3_SDC2_CMD_DRV_MASK,
+ .drv_shft = TLMMV3_SDC2_CMD_DRV_SHFT,
+ },
+ /* SDC2 DATA */
+ {
+ .offset = 0x4,
+ .pull_mask = TLMMV3_SDC2_DATA_PULL_MASK,
+ .pull_shft = TLMMV3_SDC2_DATA_PULL_SHFT,
+ .drv_mask = TLMMV3_SDC2_DATA_DRV_MASK,
+ .drv_shft = TLMMV3_SDC2_DATA_DRV_SHFT,
+ },
+};
+
+static int msm_tlmm_v3_sdc_cfg(uint pin_no, unsigned long *config,
+ void __iomem *reg_base,
+ bool write)
+{
+ unsigned int val, id, data;
+ u32 mask, shft;
+ void __iomem *cfg_reg;
+
+ if (pin_no >= ARRAY_SIZE(sdc_regs))
+ return -EINVAL;
+
+ cfg_reg = reg_base + sdc_regs[pin_no].offset;
+ id = pinconf_to_config_param(*config);
+ val = readl_relaxed(cfg_reg);
+ /* Get mask and shft values for this config type */
+ switch (id) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ mask = sdc_regs[pin_no].pull_mask;
+ shft = sdc_regs[pin_no].pull_shft;
+ data = TLMMV3_NO_PULL;
+ if (!write) {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_pull(val);
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ mask = sdc_regs[pin_no].pull_mask;
+ shft = sdc_regs[pin_no].pull_shft;
+ data = TLMMV3_PULL_DOWN;
+ if (!write) {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_pull(val);
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ mask = sdc_regs[pin_no].pull_mask;
+ shft = sdc_regs[pin_no].pull_shft;
+ data = TLMMV3_PULL_UP;
+ if (!write) {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_pull(val);
+ }
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ mask = sdc_regs[pin_no].drv_mask;
+ shft = sdc_regs[pin_no].drv_shft;
+ if (write) {
+ data = pinconf_to_config_argument(*config);
+ data = drv_str_to_rval(data);
+ } else {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_drv_str(val);
+ }
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (write) {
+ val &= ~(mask << shft);
+ val |= (data << shft);
+ writel_relaxed(val, cfg_reg);
+ } else
+ *config = pinconf_to_config_packed(id, data);
+ return 0;
+}
+
+static void msm_tlmm_v3_sdc_set_reg_base(void __iomem **ptype_base,
+ void __iomem *tlmm_base)
+{
+ *ptype_base = tlmm_base + TLMMV3_SDC_OFFSET;
+}
+
+static int msm_tlmm_v3_gp_cfg(uint pin_no, unsigned long *config,
+ void *reg_base, bool write)
+{
+ unsigned int val, id, data, inout_val;
+ u32 mask = 0, shft = 0;
+ void __iomem *inout_reg = NULL;
+ void __iomem *cfg_reg = TLMMV3_GP_CFG(reg_base, pin_no);
+
+ id = pinconf_to_config_param(*config);
+ val = readl_relaxed(cfg_reg);
+ /* Get mask and shft values for this config type */
+ switch (id) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ mask = TLMMV3_GP_PULL_MASK;
+ shft = TLMMV3_GP_PULL_SHFT;
+ data = TLMMV3_NO_PULL;
+ if (!write) {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_pull(val);
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ mask = TLMMV3_GP_PULL_MASK;
+ shft = TLMMV3_GP_PULL_SHFT;
+ data = TLMMV3_PULL_DOWN;
+ if (!write) {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_pull(val);
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ mask = TLMMV3_GP_PULL_MASK;
+ shft = TLMMV3_GP_PULL_SHFT;
+ data = TLMMV3_PULL_UP;
+ if (!write) {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_pull(val);
+ }
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ mask = TLMMV3_GP_DRV_MASK;
+ shft = TLMMV3_GP_DRV_SHFT;
+ if (write) {
+ data = pinconf_to_config_argument(*config);
+ data = drv_str_to_rval(data);
+ } else {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_drv_str(val);
+ }
+ break;
+ case PIN_CONFIG_OUTPUT:
+ mask = TLMMV3_GP_DIR_MASK;
+ shft = TLMMV3_GP_DIR_SHFT;
+ inout_reg = TLMMV3_GP_INOUT(reg_base, pin_no);
+ if (write) {
+ data = pinconf_to_config_argument(*config);
+ inout_val = dir_to_inout_val(data);
+ writel_relaxed(inout_val, inout_reg);
+ data = mask;
+ } else {
+ inout_val = readl_relaxed(inout_reg);
+ data = inout_val_to_dir(inout_val);
+ }
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (write) {
+ val &= ~(mask << shft);
+ val |= (data << shft);
+ writel_relaxed(val, cfg_reg);
+ } else
+ *config = pinconf_to_config_packed(id, data);
+ return 0;
+}
+
+static void msm_tlmm_v3_gp_fn(uint pin_no, u32 func, void *reg_base,
+ bool enable)
+{
+ unsigned int val;
+ void __iomem *cfg_reg = TLMMV3_GP_CFG(reg_base, pin_no);
+ val = readl_relaxed(cfg_reg);
+ val &= ~(TLMMV3_GP_FUNC_MASK << TLMMV3_GP_FUNC_SHFT);
+ if (enable)
+ val |= (func << TLMMV3_GP_FUNC_SHFT);
+ writel_relaxed(val, cfg_reg);
+}
+
+static void msm_tlmm_v3_gp_set_reg_base(void __iomem **ptype_base,
+ void __iomem *tlmm_base)
+{
+ *ptype_base = tlmm_base;
+}
+
+/* GPIO CHIP */
+static int msm_tlmm_v3_gp_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct msm_pintype_info *pinfo = gc_to_pintype(gc);
+ void __iomem *inout_reg = TLMMV3_GP_INOUT(pinfo->reg_base, offset);
+
+ return readl_relaxed(inout_reg) & BIT(GPIO_IN_BIT);
+}
+
+static void msm_tlmm_v3_gp_set(struct gpio_chip *gc, unsigned offset, int val)
+{
+ struct msm_pintype_info *pinfo = gc_to_pintype(gc);
+ void __iomem *inout_reg = TLMMV3_GP_INOUT(pinfo->reg_base, offset);
+
+ writel_relaxed(val ? BIT(GPIO_OUT_BIT) : 0, inout_reg);
+}
+
+static int msm_tlmm_v3_gp_dir_in(struct gpio_chip *gc, unsigned offset)
+{
+ unsigned int val;
+ struct msm_pintype_info *pinfo = gc_to_pintype(gc);
+ void __iomem *cfg_reg = TLMMV3_GP_CFG(pinfo->reg_base, offset);
+
+ val = readl_relaxed(cfg_reg);
+ val &= ~BIT(GPIO_OE_BIT);
+ writel_relaxed(val, cfg_reg);
+ return 0;
+}
+
+static int msm_tlmm_v3_gp_dir_out(struct gpio_chip *gc, unsigned offset,
+ int val)
+{
+ struct msm_pintype_info *pinfo = gc_to_pintype(gc);
+ void __iomem *cfg_reg = TLMMV3_GP_CFG(pinfo->reg_base, offset);
+
+ msm_tlmm_v3_gp_set(gc, offset, val);
+ val = readl_relaxed(cfg_reg);
+ val |= BIT(GPIO_OE_BIT);
+ writel_relaxed(val, cfg_reg);
+ return 0;
+}
+
+static int msm_tlmm_v3_gp_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct msm_pintype_info *pinfo = gc_to_pintype(gc);
+ struct msm_tlmm_irq_chip *ic = pintype_get_ic(pinfo);
+ return irq_create_mapping(ic->domain, offset);
+}
+
+/* Irq reg ops */
+static void msm_tlmm_v3_set_intr_status(struct msm_tlmm_irq_chip *ic,
+ unsigned pin)
+{
+ void __iomem *status_reg = TLMMV3_GP_INTR_STATUS(ic->chip_base, pin);
+ writel_relaxed(0, status_reg);
+}
+
+static int msm_tlmm_v3_get_intr_status(struct msm_tlmm_irq_chip *ic,
+ unsigned pin)
+{
+ void __iomem *status_reg = TLMMV3_GP_INTR_STATUS(ic->chip_base, pin);
+ return readl_relaxed(status_reg) & BIT(INTR_STATUS_BIT);
+}
+
+static void msm_tlmm_v3_set_intr_cfg_enable(struct msm_tlmm_irq_chip *ic,
+ unsigned pin,
+ int enable)
+{
+ unsigned int val;
+ void __iomem *cfg_reg = TLMMV3_GP_INTR_CFG(ic->chip_base, pin);
+
+ val = readl_relaxed(cfg_reg);
+ if (enable) {
+ val &= ~BIT(INTR_DIR_CONN_EN_BIT);
+ val |= BIT(INTR_ENABLE_BIT);
+ } else
+ val &= ~BIT(INTR_ENABLE_BIT);
+ writel_relaxed(val, cfg_reg);
+}
+
+static int msm_tlmm_v3_get_intr_cfg_enable(struct msm_tlmm_irq_chip *ic,
+ unsigned pin)
+{
+ void __iomem *cfg_reg = TLMMV3_GP_INTR_CFG(ic->chip_base, pin);
+ return readl_relaxed(cfg_reg) & BIT(INTR_ENABLE_BIT);
+}
+
+static void msm_tlmm_v3_set_intr_cfg_type(struct msm_tlmm_irq_chip *ic,
+ struct irq_data *d,
+ unsigned int type)
+{
+ unsigned cfg;
+ void __iomem *cfg_reg = TLMMV3_GP_INTR_CFG(ic->chip_base,
+ (irqd_to_hwirq(d)));
+ /*
+ * RAW_STATUS_EN is left on for all gpio irqs. Due to the
+ * internal circuitry of TLMM, toggling the RAW_STATUS
+ * could cause the INTR_STATUS to be set for EDGE interrupts.
+ */
+ cfg = BIT(INTR_RAW_STATUS_EN_BIT) | INTR_TARGET_PROC_APPS(ic->apps_id);
+ writel_relaxed(cfg, cfg_reg);
+ cfg &= ~INTR_DECT_CTL_MASK;
+ if (type == IRQ_TYPE_EDGE_RISING)
+ cfg |= INTR_DECT_CTL_POS_EDGE;
+ else if (type == IRQ_TYPE_EDGE_FALLING)
+ cfg |= INTR_DECT_CTL_NEG_EDGE;
+ else if (type == IRQ_TYPE_EDGE_BOTH)
+ cfg |= INTR_DECT_CTL_DUAL_EDGE;
+ else
+ cfg |= INTR_DECT_CTL_LEVEL;
+
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ cfg &= ~INTR_POL_CTL_HI;
+ else
+ cfg |= INTR_POL_CTL_HI;
+
+ writel_relaxed(cfg, cfg_reg);
+ /*
+ * Sometimes it might take a little while to update
+ * the interrupt status after the RAW_STATUS is enabled
+ * We clear the interrupt status before enabling the
+ * interrupt in the unmask call-back.
+ */
+ udelay(5);
+}
+
+static irqreturn_t msm_tlmm_v3_gp_handle_irq(int irq,
+ struct msm_tlmm_irq_chip *ic)
+{
+ unsigned long i;
+ unsigned int virq = 0;
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct msm_pintype_info *pinfo = ic_to_pintype(ic);
+ struct gpio_chip *gc = pintype_get_gc(pinfo);
+
+ chained_irq_enter(chip, desc);
+ for_each_set_bit(i, ic->enabled_irqs, ic->num_irqs)
+ {
+ dev_dbg(ic->dev, "hwirq in bit mask %d\n", (unsigned int)i);
+ if (msm_tlmm_v3_get_intr_status(ic, i)) {
+ dev_dbg(ic->dev, "hwirw %d fired\n", (unsigned int)i);
+ virq = msm_tlmm_v3_gp_to_irq(gc, i);
+ if (!virq) {
+ dev_dbg(ic->dev, "invalid virq\n");
+ return IRQ_NONE;
+ }
+ generic_handle_irq(virq);
+ }
+
+ }
+ chained_irq_exit(chip, desc);
+ return IRQ_HANDLED;
+}
+
+static void msm_tlmm_v3_irq_ack(struct irq_data *d)
+{
+ struct msm_tlmm_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+ msm_tlmm_v3_set_intr_status(ic, irqd_to_hwirq(d));
+ mb();
+}
+
+static void msm_tlmm_v3_irq_mask(struct irq_data *d)
+{
+ unsigned long irq_flags;
+ struct msm_tlmm_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+ spin_lock_irqsave(&ic->irq_lock, irq_flags);
+ msm_tlmm_v3_set_intr_cfg_enable(ic, irqd_to_hwirq(d), 0);
+ __clear_bit(irqd_to_hwirq(d), ic->enabled_irqs);
+ mb();
+ spin_unlock_irqrestore(&ic->irq_lock, irq_flags);
+ if (ic->irq_chip_extn->irq_mask)
+ ic->irq_chip_extn->irq_mask(d);
+}
+
+static void msm_tlmm_v3_irq_unmask(struct irq_data *d)
+{
+ unsigned long irq_flags;
+ struct msm_tlmm_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+ spin_lock_irqsave(&ic->irq_lock, irq_flags);
+ __set_bit(irqd_to_hwirq(d), ic->enabled_irqs);
+ if (!msm_tlmm_v3_get_intr_cfg_enable(ic, irqd_to_hwirq(d))) {
+ msm_tlmm_v3_set_intr_status(ic, irqd_to_hwirq(d));
+ msm_tlmm_v3_set_intr_cfg_enable(ic, irqd_to_hwirq(d), 1);
+ mb();
+ }
+ spin_unlock_irqrestore(&ic->irq_lock, irq_flags);
+ if (ic->irq_chip_extn->irq_unmask)
+ ic->irq_chip_extn->irq_unmask(d);
+}
+
+static void msm_tlmm_v3_irq_disable(struct irq_data *d)
+{
+ struct msm_tlmm_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ if (ic->irq_chip_extn->irq_disable)
+ ic->irq_chip_extn->irq_disable(d);
+}
+
+static int msm_tlmm_v3_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ unsigned long irq_flags;
+ unsigned int pin = irqd_to_hwirq(d);
+ struct msm_tlmm_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+
+ spin_lock_irqsave(&ic->irq_lock, irq_flags);
+
+ if (flow_type & IRQ_TYPE_EDGE_BOTH) {
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ __set_bit(pin, ic->dual_edge_irqs);
+ else
+ __clear_bit(pin, ic->dual_edge_irqs);
+ } else {
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ __clear_bit(pin, ic->dual_edge_irqs);
+ }
+
+ msm_tlmm_v3_set_intr_cfg_type(ic, d, flow_type);
+
+ mb();
+ spin_unlock_irqrestore(&ic->irq_lock, irq_flags);
+
+ if (ic->irq_chip_extn->irq_set_type)
+ ic->irq_chip_extn->irq_set_type(d, flow_type);
+
+ return 0;
+}
+
+static int msm_tlmm_v3_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ unsigned int pin = irqd_to_hwirq(d);
+ struct msm_tlmm_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+ if (on) {
+ if (bitmap_empty(ic->wake_irqs, ic->num_irqs))
+ irq_set_irq_wake(ic->irq, 1);
+ set_bit(pin, ic->wake_irqs);
+ } else {
+ clear_bit(pin, ic->wake_irqs);
+ if (bitmap_empty(ic->wake_irqs, ic->num_irqs))
+ irq_set_irq_wake(ic->irq, 0);
+ }
+
+ if (ic->irq_chip_extn->irq_set_wake)
+ ic->irq_chip_extn->irq_set_wake(d, on);
+
+ return 0;
+}
+
+static struct lock_class_key msm_tlmm_irq_lock_class;
+
+static int msm_tlmm_v3_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct msm_tlmm_irq_chip *ic = h->host_data;
+
+ irq_set_lockdep_class(virq, &msm_tlmm_irq_lock_class);
+ irq_set_chip_data(virq, ic);
+ irq_set_chip_and_handler(virq, &ic->chip,
+ handle_level_irq);
+ set_irq_flags(virq, IRQF_VALID);
+ return 0;
+}
+
+/*
+ * irq domain callbacks for interrupt controller.
+ */
+static const struct irq_domain_ops msm_tlmm_v3_gp_irqd_ops = {
+ .map = msm_tlmm_v3_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+
+static struct msm_tlmm_irq_chip msm_tlmm_v3_gp_irq = {
+ .chip = {
+ .name = "msm_tlmm_v3_irq",
+ .irq_mask = msm_tlmm_v3_irq_mask,
+ .irq_unmask = msm_tlmm_v3_irq_unmask,
+ .irq_ack = msm_tlmm_v3_irq_ack,
+ .irq_set_type = msm_tlmm_v3_irq_set_type,
+ .irq_set_wake = msm_tlmm_v3_irq_set_wake,
+ .irq_disable = msm_tlmm_v3_irq_disable,
+ },
+ .apps_id = TLMMV3_APPS_ID_DEFAULT,
+ .domain_ops = &msm_tlmm_v3_gp_irqd_ops,
+ .handler = msm_tlmm_v3_gp_handle_irq,
+};
+
+/* Power management core operations */
+
+static int msm_tlmm_v3_gp_irq_suspend(void)
+{
+ unsigned long irq_flags;
+ unsigned long i;
+ struct msm_tlmm_irq_chip *ic = &msm_tlmm_v3_gp_irq;
+ int num_irqs = ic->num_irqs;
+
+ spin_lock_irqsave(&ic->irq_lock, irq_flags);
+ for_each_set_bit(i, ic->enabled_irqs, num_irqs)
+ msm_tlmm_v3_set_intr_cfg_enable(ic, i, 0);
+
+ for_each_set_bit(i, ic->wake_irqs, num_irqs)
+ msm_tlmm_v3_set_intr_cfg_enable(ic, i, 1);
+ mb();
+ spin_unlock_irqrestore(&ic->irq_lock, irq_flags);
+ return 0;
+}
+
+static void msm_tlmm_v3_gp_irq_resume(void)
+{
+ unsigned long irq_flags;
+ unsigned long i;
+ struct msm_tlmm_irq_chip *ic = &msm_tlmm_v3_gp_irq;
+ int num_irqs = ic->num_irqs;
+
+ spin_lock_irqsave(&ic->irq_lock, irq_flags);
+ for_each_set_bit(i, ic->wake_irqs, num_irqs)
+ msm_tlmm_v3_set_intr_cfg_enable(ic, i, 0);
+
+ for_each_set_bit(i, ic->enabled_irqs, num_irqs)
+ msm_tlmm_v3_set_intr_cfg_enable(ic, i, 1);
+ mb();
+ spin_unlock_irqrestore(&ic->irq_lock, irq_flags);
+}
+
+static struct syscore_ops msm_tlmm_v3_irq_syscore_ops = {
+ .suspend = msm_tlmm_v3_gp_irq_suspend,
+ .resume = msm_tlmm_v3_gp_irq_resume,
+};
+
+#ifdef CONFIG_USE_PINCTRL_IRQ
+int msm_tlmm_v3_of_irq_init(struct device_node *controller,
+ struct irq_chip *chip_extn)
+{
+ int ret, num_irqs, apps_id;
+ struct msm_tlmm_irq_chip *ic = &msm_tlmm_v3_gp_irq;
+
+ ret = of_property_read_u32(controller, "num_irqs", &num_irqs);
+ if (ret) {
+ WARN(1, "Cannot get numirqs from device tree\n");
+ return ret;
+ }
+ ret = of_property_read_u32(controller, "apps_id", &apps_id);
+ if (!ret) {
+ pr_info("processor id specified, in device tree %d\n", apps_id);
+ ic->apps_id = apps_id;
+ }
+ ic->num_irqs = num_irqs;
+ ic->domain = irq_domain_add_linear(controller, ic->num_irqs,
+ ic->domain_ops,
+ ic);
+ if (IS_ERR(ic->domain))
+ return -ENOMEM;
+ ic->irq_chip_extn = chip_extn;
+ return 0;
+}
+#endif
+
+static int msm_tlmm_v3_gp_irq_init(int irq, struct msm_pintype_info *pinfo,
+ struct device *tlmm_dev)
+{
+ int num_irqs;
+ struct msm_tlmm_irq_chip *ic = pinfo->irq_chip;
+
+ if (!ic->domain)
+ return 0;
+
+ num_irqs = ic->num_irqs;
+ ic->enabled_irqs = devm_kzalloc(tlmm_dev, sizeof(unsigned long)
+ * BITS_TO_LONGS(num_irqs), GFP_KERNEL);
+ if (IS_ERR(ic->enabled_irqs)) {
+ dev_err(tlmm_dev, "Unable to allocate enabled irqs bitmap\n");
+ return PTR_ERR(ic->enabled_irqs);
+ }
+ ic->dual_edge_irqs = devm_kzalloc(tlmm_dev, sizeof(unsigned long)
+ * BITS_TO_LONGS(num_irqs), GFP_KERNEL);
+ if (IS_ERR(ic->dual_edge_irqs)) {
+ dev_err(tlmm_dev, "Unable to allocate dual edge irqs bitmap\n");
+ return PTR_ERR(ic->dual_edge_irqs);
+ }
+ ic->wake_irqs = devm_kzalloc(tlmm_dev, sizeof(unsigned long)
+ * BITS_TO_LONGS(num_irqs), GFP_KERNEL);
+ if (IS_ERR(ic->wake_irqs)) {
+ dev_err(tlmm_dev, "Unable to allocate dual edge irqs bitmap\n");
+ return PTR_ERR(ic->wake_irqs);
+ }
+ spin_lock_init(&ic->irq_lock);
+ ic->chip_base = pinfo->reg_base;
+ ic->irq = irq;
+ ic->dev = tlmm_dev;
+ ic->num_irqs = pinfo->num_pins;
+ ic->pinfo = pinfo;
+ register_syscore_ops(&msm_tlmm_v3_irq_syscore_ops);
+ return 0;
+}
+
+static irqreturn_t msm_tlmm_v3_handle_irq(int irq, void *data)
+{
+ int i, num_pintypes;
+ struct msm_pintype_info *pintypes, *pintype;
+ struct msm_tlmm_irq_chip *ic;
+ struct msm_tlmm_desc *tlmm_desc = (struct msm_tlmm_desc *)data;
+ irqreturn_t ret = IRQ_NONE;
+
+ pintypes = tlmm_desc->pintypes;
+ num_pintypes = tlmm_desc->num_pintypes;
+ for (i = 0; i < num_pintypes; i++) {
+ pintype = &pintypes[i];
+ if (!pintype->irq_chip)
+ continue;
+ ic = pintype->irq_chip;
+ if (!ic->node)
+ continue;
+ ret = ic->handler(irq, ic);
+ if (ret != IRQ_HANDLED)
+ break;
+ }
+ return ret;
+}
+
+static struct msm_pintype_info tlmm_v3_pininfo[] = {
+ {
+ .prg_cfg = msm_tlmm_v3_gp_cfg,
+ .prg_func = msm_tlmm_v3_gp_fn,
+ .set_reg_base = msm_tlmm_v3_gp_set_reg_base,
+ .reg_base = NULL,
+ .prop_name = "qcom,pin-type-gp",
+ .name = "gp",
+ .gc = {
+ .label = "msm_tlmm_v3_gpio",
+ .direction_input = msm_tlmm_v3_gp_dir_in,
+ .direction_output = msm_tlmm_v3_gp_dir_out,
+ .get = msm_tlmm_v3_gp_get,
+ .set = msm_tlmm_v3_gp_set,
+ .to_irq = msm_tlmm_v3_gp_to_irq,
+ },
+ .init_irq = msm_tlmm_v3_gp_irq_init,
+ .irq_chip = &msm_tlmm_v3_gp_irq,
+ },
+ {
+ .prg_cfg = msm_tlmm_v3_sdc_cfg,
+ .set_reg_base = msm_tlmm_v3_sdc_set_reg_base,
+ .reg_base = NULL,
+ .prop_name = "qcom,pin-type-sdc",
+ .name = "sdc",
+ }
+};
+
+struct msm_tlmm_pintype tlmm_v3_pintypes = {
+ .num_entries = ARRAY_SIZE(tlmm_v3_pininfo),
+ .pintype_info = tlmm_v3_pininfo,
+};
+
+static const struct of_device_id msm_tlmm_v3_dt_match[] = {
+ { .compatible = "qcom,msm-tlmm-v3",
+ .data = &tlmm_v3_pintypes, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, msm_tlmm_v3_dt_match);
+
+static int msm_tlmm_v3_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct msm_tlmm_pintype *pinfo;
+ struct msm_tlmm_desc *tlmm_desc;
+ int irq, ret;
+ struct resource *res;
+ struct device_node *node = pdev->dev.of_node;
+
+ match = of_match_node(msm_tlmm_v3_dt_match, node);
+ if (IS_ERR(match))
+ return PTR_ERR(match);
+ pinfo = match->data;
+ tlmm_desc = devm_kzalloc(&pdev->dev, sizeof(*tlmm_desc), GFP_KERNEL);
+ if (!tlmm_desc) {
+ dev_err(&pdev->dev, "Alloction failed for tlmm desc\n");
+ return -ENOMEM;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot find IO resource\n");
+ return -ENOENT;
+ }
+ tlmm_desc->base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR(tlmm_desc->base))
+ return PTR_ERR(tlmm_desc->base);
+ tlmm_desc->irq = -EINVAL;
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res) {
+ irq = res->start;
+ ret = devm_request_irq(&pdev->dev, irq, msm_tlmm_v3_handle_irq,
+ IRQF_TRIGGER_HIGH,
+ dev_name(&pdev->dev),
+ tlmm_desc);
+ if (ret) {
+ dev_err(&pdev->dev, "register for irq failed\n");
+ return ret;
+ }
+ tlmm_desc->irq = irq;
+ }
+ tlmm_desc->pintypes = pinfo->pintype_info;
+ tlmm_desc->num_pintypes = pinfo->num_entries;
+ return msm_pinctrl_probe(pdev, tlmm_desc);
+}
+
+static struct platform_driver msm_tlmm_v3_drv = {
+ .probe = msm_tlmm_v3_probe,
+ .driver = {
+ .name = "msm-tlmmv3-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(msm_tlmm_v3_dt_match),
+ },
+};
+
+static int __init msm_tlmm_v3_drv_register(void)
+{
+ return platform_driver_register(&msm_tlmm_v3_drv);
+}
+postcore_initcall(msm_tlmm_v3_drv_register);
+
+static void __exit msm_tlmm_v3_drv_unregister(void)
+{
+ platform_driver_unregister(&msm_tlmm_v3_drv);
+}
+module_exit(msm_tlmm_v3_drv_unregister);
+
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/pinctrl/pinctrl-msm-tlmm-v4.c b/drivers/pinctrl/pinctrl-msm-tlmm-v4.c
new file mode 100644
index 000000000000..df5720e88afb
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-msm-tlmm-v4.c
@@ -0,0 +1,982 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+#include "pinctrl-msm.h"
+
+/* config translations */
+#define drv_str_to_rval(drv) ((drv >> 1) - 1)
+#define rval_to_drv_str(val) ((val + 1) << 1)
+#define dir_to_inout_val(dir) (dir << 1)
+#define inout_val_to_dir(val) (val >> 1)
+#define rval_to_pull(val) ((val > 2) ? 1 : val)
+#define TLMMV4_NO_PULL 0
+#define TLMMV4_PULL_DOWN 1
+#define TLMMV4_PULL_UP 3
+/* GP PIN TYPE REG MASKS */
+#define TLMMV4_GP_DRV_SHFT 6
+#define TLMMV4_GP_DRV_MASK 0x7
+#define TLMMV4_GP_PULL_SHFT 0
+#define TLMMV4_GP_PULL_MASK 0x3
+#define TLMMV4_GP_DIR_SHFT 9
+#define TLMMV4_GP_DIR_MASK 1
+#define TLMMV4_GP_FUNC_SHFT 2
+#define TLMMV4_GP_FUNC_MASK 0xF
+#define GPIO_OUT_BIT 1
+#define GPIO_IN_BIT 0
+#define GPIO_OE_BIT 9
+/* SDC1 PIN TYPE REG MASKS */
+#define TLMMV4_SDC1_CLK_DRV_SHFT 6
+#define TLMMV4_SDC1_CLK_DRV_MASK 0x7
+#define TLMMV4_SDC1_DATA_DRV_SHFT 0
+#define TLMMV4_SDC1_DATA_DRV_MASK 0x7
+#define TLMMV4_SDC1_CMD_DRV_SHFT 3
+#define TLMMV4_SDC1_CMD_DRV_MASK 0x7
+#define TLMMV4_SDC1_CLK_PULL_SHFT 13
+#define TLMMV4_SDC1_CLK_PULL_MASK 0x3
+#define TLMMV4_SDC1_DATA_PULL_SHFT 9
+#define TLMMV4_SDC1_DATA_PULL_MASK 0x3
+#define TLMMV4_SDC1_CMD_PULL_SHFT 11
+#define TLMMV4_SDC1_CMD_PULL_MASK 0x3
+#define TLMMV3_SDC1_RCLK_PULL_SHFT 15
+#define TLMMV3_SDC1_RCLK_PULL_MASK 0x3
+/* SDC2 PIN TYPE REG MASKS */
+#define TLMMV4_SDC2_CLK_DRV_SHFT 6
+#define TLMMV4_SDC2_CLK_DRV_MASK 0x7
+#define TLMMV4_SDC2_DATA_DRV_SHFT 0
+#define TLMMV4_SDC2_DATA_DRV_MASK 0x7
+#define TLMMV4_SDC2_CMD_DRV_SHFT 3
+#define TLMMV4_SDC2_CMD_DRV_MASK 0x7
+#define TLMMV4_SDC2_CLK_PULL_SHFT 14
+#define TLMMV4_SDC2_CLK_PULL_MASK 0x3
+#define TLMMV4_SDC2_DATA_PULL_SHFT 9
+#define TLMMV4_SDC2_DATA_PULL_MASK 0x3
+#define TLMMV4_SDC2_CMD_PULL_SHFT 11
+#define TLMMV4_SDC2_CMD_PULL_MASK 0x3
+/* TLMM V4 IRQ REG fields */
+#define INTR_ENABLE_BIT 0
+#define INTR_POL_CTL_BIT 1
+#define INTR_DECT_CTL_BIT 2
+#define INTR_RAW_STATUS_EN_BIT 4
+#define INTR_TARGET_PROC_BIT 5
+#define INTR_DIR_CONN_EN_BIT 8
+#define INTR_STATUS_BIT 0
+#define DC_POLARITY_BIT 8
+
+/* Target processors for TLMM pin based interrupts */
+#define INTR_TARGET_PROC_APPS(core_id) ((core_id) << INTR_TARGET_PROC_BIT)
+#define TLMMV4_APPS_ID_DEFAULT 4
+#define INTR_TARGET_PROC_NONE (7 << INTR_TARGET_PROC_BIT)
+/* Interrupt flag bits */
+#define DC_POLARITY_HI BIT(DC_POLARITY_BIT)
+#define INTR_POL_CTL_HI BIT(INTR_POL_CTL_BIT)
+#define INTR_DECT_CTL_LEVEL (0 << INTR_DECT_CTL_BIT)
+#define INTR_DECT_CTL_POS_EDGE (1 << INTR_DECT_CTL_BIT)
+#define INTR_DECT_CTL_NEG_EDGE (2 << INTR_DECT_CTL_BIT)
+#define INTR_DECT_CTL_DUAL_EDGE (3 << INTR_DECT_CTL_BIT)
+#define INTR_DECT_CTL_MASK (3 << INTR_DECT_CTL_BIT)
+
+#define TLMMV4_GP_INOUT_BIT 1
+#define TLMMV4_GP_OUT BIT(TLMMV4_GP_INOUT_BIT)
+#define TLMMV4_GP_IN 0
+
+#define gc_to_pintype(gc) \
+ container_of(gc, struct msm_pintype_info, gc)
+#define ic_to_pintype(ic) \
+ ((struct msm_pintype_info *)ic->pinfo)
+#define pintype_get_gc(pinfo) (&pinfo->gc)
+#define pintype_get_ic(pinfo) (pinfo->irq_chip)
+
+/* SDC Pin type register offsets */
+#define TLMMV4_SDC_OFFSET 0x0010A000
+#define TLMMV4_SDC1_CFG(base) (base)
+#define TLMMV4_SDC2_CFG(base) (TLMMV4_SDC1_CFG(base) - 0x1000)
+
+/* GP pin type register offsets */
+#define TLMMV4_GP_CFG(base, pin) (base + 0x0 + 0x1000 * (pin))
+#define TLMMV4_GP_INOUT(base, pin) (base + 0x4 + 0x1000 * (pin))
+#define TLMMV4_GP_INTR_CFG(base, pin) (base + 0x8 + 0x1000 * (pin))
+#define TLMMV4_GP_INTR_STATUS(base, pin) (base + 0xc + 0x1000 * (pin))
+
+/* QDSD Pin type register offsets */
+#define TLMMV4_QDSD_OFFSET 0x0019C000
+#define TLMMV4_QDSD_PULL_MASK 0x3
+#define TLMMV4_QDSD_PULL_OFFSET 0x3
+#define TLMMV4_QDSD_CONFIG_WIDTH 0x5
+#define TLMMV4_QDSD_DRV_MASK 0x7
+
+struct msm_sdc_regs {
+ unsigned int offset;
+ unsigned long pull_mask;
+ unsigned long pull_shft;
+ unsigned long drv_mask;
+ unsigned long drv_shft;
+};
+
+static struct msm_sdc_regs sdc_regs[] = {
+ /* SDC1 CLK */
+ {
+ .offset = 0,
+ .pull_mask = TLMMV4_SDC1_CLK_PULL_MASK,
+ .pull_shft = TLMMV4_SDC1_CLK_PULL_SHFT,
+ .drv_mask = TLMMV4_SDC1_CLK_DRV_MASK,
+ .drv_shft = TLMMV4_SDC1_CLK_DRV_SHFT,
+ },
+ /* SDC1 CMD */
+ {
+ .offset = 0,
+ .pull_mask = TLMMV4_SDC1_CMD_PULL_MASK,
+ .pull_shft = TLMMV4_SDC1_CMD_PULL_SHFT,
+ .drv_mask = TLMMV4_SDC1_CMD_DRV_MASK,
+ .drv_shft = TLMMV4_SDC1_CMD_DRV_SHFT,
+ },
+ /* SDC1 DATA */
+ {
+ .offset = 0,
+ .pull_mask = TLMMV4_SDC1_DATA_PULL_MASK,
+ .pull_shft = TLMMV4_SDC1_DATA_PULL_SHFT,
+ .drv_mask = TLMMV4_SDC1_DATA_DRV_MASK,
+ .drv_shft = TLMMV4_SDC1_DATA_DRV_SHFT,
+ },
+ /* SDC1 RCLK */
+ {
+ .offset = 0,
+ .pull_mask = TLMMV3_SDC1_RCLK_PULL_MASK,
+ .pull_shft = TLMMV3_SDC1_RCLK_PULL_SHFT,
+ },
+ /* SDC2 CLK */
+ {
+ .offset = 0x1000,
+ .pull_mask = TLMMV4_SDC2_CLK_PULL_MASK,
+ .pull_shft = TLMMV4_SDC2_CLK_PULL_SHFT,
+ .drv_mask = TLMMV4_SDC2_CLK_DRV_MASK,
+ .drv_shft = TLMMV4_SDC2_CLK_DRV_SHFT,
+ },
+ /* SDC2 CMD */
+ {
+ .offset = 0x1000,
+ .pull_mask = TLMMV4_SDC2_CMD_PULL_MASK,
+ .pull_shft = TLMMV4_SDC2_CMD_PULL_SHFT,
+ .drv_mask = TLMMV4_SDC2_CMD_DRV_MASK,
+ .drv_shft = TLMMV4_SDC2_CMD_DRV_SHFT,
+ },
+ /* SDC2 DATA */
+ {
+ .offset = 0x1000,
+ .pull_mask = TLMMV4_SDC2_DATA_PULL_MASK,
+ .pull_shft = TLMMV4_SDC2_DATA_PULL_SHFT,
+ .drv_mask = TLMMV4_SDC2_DATA_DRV_MASK,
+ .drv_shft = TLMMV4_SDC2_DATA_DRV_SHFT,
+ },
+};
+
+static int msm_tlmm_v4_sdc_cfg(uint pin_no, unsigned long *config,
+ void __iomem *reg_base,
+ bool write)
+{
+ unsigned int val, id, data;
+ u32 mask, shft;
+ void __iomem *cfg_reg;
+
+ if (pin_no >= ARRAY_SIZE(sdc_regs))
+ return -EINVAL;
+
+ cfg_reg = reg_base - sdc_regs[pin_no].offset;
+ id = pinconf_to_config_param(*config);
+ val = readl_relaxed(cfg_reg);
+ /* Get mask and shft values for this config type */
+ switch (id) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ mask = sdc_regs[pin_no].pull_mask;
+ shft = sdc_regs[pin_no].pull_shft;
+ data = TLMMV4_NO_PULL;
+ if (!write) {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_pull(val);
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ mask = sdc_regs[pin_no].pull_mask;
+ shft = sdc_regs[pin_no].pull_shft;
+ data = TLMMV4_PULL_DOWN;
+ if (!write) {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_pull(val);
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ mask = sdc_regs[pin_no].pull_mask;
+ shft = sdc_regs[pin_no].pull_shft;
+ data = TLMMV4_PULL_UP;
+ if (!write) {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_pull(val);
+ }
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ mask = sdc_regs[pin_no].drv_mask;
+ shft = sdc_regs[pin_no].drv_shft;
+ if (write) {
+ data = pinconf_to_config_argument(*config);
+ data = drv_str_to_rval(data);
+ } else {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_drv_str(val);
+ }
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (write) {
+ val &= ~(mask << shft);
+ val |= (data << shft);
+ writel_relaxed(val, cfg_reg);
+ } else
+ *config = pinconf_to_config_packed(id, data);
+ return 0;
+}
+
+static void msm_tlmm_v4_sdc_set_reg_base(void __iomem **ptype_base,
+ void __iomem *tlmm_base)
+{
+ *ptype_base = tlmm_base + TLMMV4_SDC_OFFSET;
+}
+
+static int msm_tlmm_v4_qdsd_cfg(uint pin_no, unsigned long *config,
+ void __iomem *reg_base,
+ bool write)
+{
+ unsigned int val, id, data;
+ u32 mask, shft;
+ void __iomem *cfg_reg;
+
+ cfg_reg = reg_base;
+ id = pinconf_to_config_param(*config);
+ val = readl_relaxed(cfg_reg);
+ /* Get mask and shft values for this config type */
+ switch (id) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ mask = TLMMV4_QDSD_PULL_MASK;
+ shft = pin_no * TLMMV4_QDSD_CONFIG_WIDTH
+ + TLMMV4_QDSD_PULL_OFFSET;
+ data = TLMMV4_NO_PULL;
+ if (!write) {
+ val >>= shft;
+ data = val & mask;
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ mask = TLMMV4_QDSD_PULL_MASK;
+ shft = pin_no * TLMMV4_QDSD_CONFIG_WIDTH
+ + TLMMV4_QDSD_PULL_OFFSET;
+ data = TLMMV4_PULL_DOWN;
+ if (!write) {
+ val >>= shft;
+ data = val & mask;
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ mask = TLMMV4_QDSD_PULL_MASK;
+ shft = pin_no * TLMMV4_QDSD_CONFIG_WIDTH
+ + TLMMV4_QDSD_PULL_OFFSET;
+ data = TLMMV4_PULL_UP;
+ if (!write) {
+ val >>= shft;
+ data = val & mask;
+ }
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ mask = TLMMV4_QDSD_DRV_MASK;
+ shft = pin_no * TLMMV4_QDSD_CONFIG_WIDTH;
+ if (write) {
+ data = pinconf_to_config_argument(*config);
+ } else {
+ val >>= shft;
+ data = val & mask;
+ }
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (write) {
+ val &= ~(mask << shft);
+ /* QDSD software override bit */
+ val |= ((data << shft) | BIT(31));
+ writel_relaxed(val, cfg_reg);
+ } else {
+ *config = pinconf_to_config_packed(id, data);
+ }
+ return 0;
+}
+
+static void msm_tlmm_v4_qdsd_set_reg_base(void __iomem **ptype_base,
+ void __iomem *tlmm_base)
+{
+ *ptype_base = tlmm_base + TLMMV4_QDSD_OFFSET;
+}
+
+
+static int msm_tlmm_v4_gp_cfg(uint pin_no, unsigned long *config,
+ void *reg_base, bool write)
+{
+ unsigned int val, id, data, inout_val;
+ u32 mask = 0, shft = 0;
+ void __iomem *inout_reg = NULL;
+ void __iomem *cfg_reg = TLMMV4_GP_CFG(reg_base, pin_no);
+
+ id = pinconf_to_config_param(*config);
+ val = readl_relaxed(cfg_reg);
+ /* Get mask and shft values for this config type */
+ switch (id) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ mask = TLMMV4_GP_PULL_MASK;
+ shft = TLMMV4_GP_PULL_SHFT;
+ data = TLMMV4_NO_PULL;
+ if (!write) {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_pull(val);
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ mask = TLMMV4_GP_PULL_MASK;
+ shft = TLMMV4_GP_PULL_SHFT;
+ data = TLMMV4_PULL_DOWN;
+ if (!write) {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_pull(val);
+ }
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ mask = TLMMV4_GP_PULL_MASK;
+ shft = TLMMV4_GP_PULL_SHFT;
+ data = TLMMV4_PULL_UP;
+ if (!write) {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_pull(val);
+ }
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ mask = TLMMV4_GP_DRV_MASK;
+ shft = TLMMV4_GP_DRV_SHFT;
+ if (write) {
+ data = pinconf_to_config_argument(*config);
+ data = drv_str_to_rval(data);
+ } else {
+ val >>= shft;
+ val &= mask;
+ data = rval_to_drv_str(val);
+ }
+ break;
+ case PIN_CONFIG_OUTPUT:
+ mask = TLMMV4_GP_DIR_MASK;
+ shft = TLMMV4_GP_DIR_SHFT;
+ inout_reg = TLMMV4_GP_INOUT(reg_base, pin_no);
+ if (write) {
+ data = pinconf_to_config_argument(*config);
+ inout_val = dir_to_inout_val(data);
+ writel_relaxed(inout_val, inout_reg);
+ data = mask;
+ } else {
+ inout_val = readl_relaxed(inout_reg);
+ data = inout_val_to_dir(inout_val);
+ }
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ if (write) {
+ val &= ~(mask << shft);
+ val |= (data << shft);
+ writel_relaxed(val, cfg_reg);
+ } else
+ *config = pinconf_to_config_packed(id, data);
+ return 0;
+}
+
+static void msm_tlmm_v4_gp_fn(uint pin_no, u32 func, void *reg_base,
+ bool enable)
+{
+ unsigned int val;
+ void __iomem *cfg_reg = TLMMV4_GP_CFG(reg_base, pin_no);
+ val = readl_relaxed(cfg_reg);
+ val &= ~(TLMMV4_GP_FUNC_MASK << TLMMV4_GP_FUNC_SHFT);
+ if (enable)
+ val |= (func << TLMMV4_GP_FUNC_SHFT);
+ writel_relaxed(val, cfg_reg);
+}
+
+static void msm_tlmm_v4_gp_set_reg_base(void __iomem **ptype_base,
+ void __iomem *tlmm_base)
+{
+ *ptype_base = tlmm_base;
+}
+
+/* GPIO CHIP */
+static int msm_tlmm_v4_gp_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct msm_pintype_info *pinfo = gc_to_pintype(gc);
+ void __iomem *inout_reg = TLMMV4_GP_INOUT(pinfo->reg_base, offset);
+
+ return readl_relaxed(inout_reg) & BIT(GPIO_IN_BIT);
+}
+
+static void msm_tlmm_v4_gp_set(struct gpio_chip *gc, unsigned offset, int val)
+{
+ struct msm_pintype_info *pinfo = gc_to_pintype(gc);
+ void __iomem *inout_reg = TLMMV4_GP_INOUT(pinfo->reg_base, offset);
+
+ writel_relaxed(val ? BIT(GPIO_OUT_BIT) : 0, inout_reg);
+}
+
+static int msm_tlmm_v4_gp_dir_in(struct gpio_chip *gc, unsigned offset)
+{
+ unsigned int val;
+ struct msm_pintype_info *pinfo = gc_to_pintype(gc);
+ void __iomem *cfg_reg = TLMMV4_GP_CFG(pinfo->reg_base, offset);
+
+ val = readl_relaxed(cfg_reg);
+ val &= ~BIT(GPIO_OE_BIT);
+ writel_relaxed(val, cfg_reg);
+ return 0;
+}
+
+static int msm_tlmm_v4_gp_dir_out(struct gpio_chip *gc, unsigned offset,
+ int val)
+{
+ struct msm_pintype_info *pinfo = gc_to_pintype(gc);
+ void __iomem *cfg_reg = TLMMV4_GP_CFG(pinfo->reg_base, offset);
+
+ msm_tlmm_v4_gp_set(gc, offset, val);
+ val = readl_relaxed(cfg_reg);
+ val |= BIT(GPIO_OE_BIT);
+ writel_relaxed(val, cfg_reg);
+ return 0;
+}
+
+static int msm_tlmm_v4_gp_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct msm_pintype_info *pinfo = gc_to_pintype(gc);
+ struct msm_tlmm_irq_chip *ic = pintype_get_ic(pinfo);
+ return irq_create_mapping(ic->domain, offset);
+}
+
+/* Irq reg ops */
+static void msm_tlmm_v4_set_intr_status(struct msm_tlmm_irq_chip *ic,
+ unsigned pin)
+{
+ void __iomem *status_reg = TLMMV4_GP_INTR_STATUS(ic->chip_base, pin);
+ writel_relaxed(0, status_reg);
+}
+
+static int msm_tlmm_v4_get_intr_status(struct msm_tlmm_irq_chip *ic,
+ unsigned pin)
+{
+ void __iomem *status_reg = TLMMV4_GP_INTR_STATUS(ic->chip_base, pin);
+ return readl_relaxed(status_reg) & BIT(INTR_STATUS_BIT);
+}
+
+static void msm_tlmm_v4_set_intr_cfg_enable(struct msm_tlmm_irq_chip *ic,
+ unsigned pin,
+ int enable)
+{
+ unsigned int val;
+ void __iomem *cfg_reg = TLMMV4_GP_INTR_CFG(ic->chip_base, pin);
+
+ val = readl_relaxed(cfg_reg);
+ if (enable) {
+ val &= ~BIT(INTR_DIR_CONN_EN_BIT);
+ val |= BIT(INTR_ENABLE_BIT);
+ } else
+ val &= ~BIT(INTR_ENABLE_BIT);
+ writel_relaxed(val, cfg_reg);
+}
+
+static int msm_tlmm_v4_get_intr_cfg_enable(struct msm_tlmm_irq_chip *ic,
+ unsigned pin)
+{
+ void __iomem *cfg_reg = TLMMV4_GP_INTR_CFG(ic->chip_base, pin);
+ return readl_relaxed(cfg_reg) & BIT(INTR_ENABLE_BIT);
+}
+
+static void msm_tlmm_v4_set_intr_cfg_type(struct msm_tlmm_irq_chip *ic,
+ struct irq_data *d,
+ unsigned int type)
+{
+ unsigned cfg;
+ void __iomem *cfg_reg = TLMMV4_GP_INTR_CFG(ic->chip_base,
+ (irqd_to_hwirq(d)));
+ /*
+ * RAW_STATUS_EN is left on for all gpio irqs. Due to the
+ * internal circuitry of TLMM, toggling the RAW_STATUS
+ * could cause the INTR_STATUS to be set for EDGE interrupts.
+ */
+ cfg = BIT(INTR_RAW_STATUS_EN_BIT) | INTR_TARGET_PROC_APPS(ic->apps_id);
+ writel_relaxed(cfg, cfg_reg);
+ cfg &= ~INTR_DECT_CTL_MASK;
+ if (type == IRQ_TYPE_EDGE_RISING)
+ cfg |= INTR_DECT_CTL_POS_EDGE;
+ else if (type == IRQ_TYPE_EDGE_FALLING)
+ cfg |= INTR_DECT_CTL_NEG_EDGE;
+ else if (type == IRQ_TYPE_EDGE_BOTH)
+ cfg |= INTR_DECT_CTL_DUAL_EDGE;
+ else
+ cfg |= INTR_DECT_CTL_LEVEL;
+
+ if (type & IRQ_TYPE_LEVEL_LOW)
+ cfg &= ~INTR_POL_CTL_HI;
+ else
+ cfg |= INTR_POL_CTL_HI;
+
+ writel_relaxed(cfg, cfg_reg);
+ /*
+ * Sometimes it might take a little while to update
+ * the interrupt status after the RAW_STATUS is enabled
+ * We clear the interrupt status before enabling the
+ * interrupt in the unmask call-back.
+ */
+ udelay(5);
+}
+
+static irqreturn_t msm_tlmm_v4_gp_handle_irq(int irq,
+ struct msm_tlmm_irq_chip *ic)
+{
+ unsigned long i;
+ unsigned int virq = 0;
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct msm_pintype_info *pinfo = ic_to_pintype(ic);
+ struct gpio_chip *gc = pintype_get_gc(pinfo);
+
+ chained_irq_enter(chip, desc);
+ for_each_set_bit(i, ic->enabled_irqs, ic->num_irqs) {
+ dev_dbg(ic->dev, "hwirq in bit mask %d\n", (unsigned int)i);
+ if (msm_tlmm_v4_get_intr_status(ic, i)) {
+ dev_dbg(ic->dev, "hwirw %d fired\n", (unsigned int)i);
+ virq = msm_tlmm_v4_gp_to_irq(gc, i);
+ if (!virq) {
+ dev_dbg(ic->dev, "invalid virq\n");
+ return IRQ_NONE;
+ }
+ generic_handle_irq(virq);
+ }
+
+ }
+ chained_irq_exit(chip, desc);
+ return IRQ_HANDLED;
+}
+
+static void msm_tlmm_v4_irq_ack(struct irq_data *d)
+{
+ struct msm_tlmm_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+ msm_tlmm_v4_set_intr_status(ic, irqd_to_hwirq(d));
+ mb();
+}
+
+static void msm_tlmm_v4_irq_mask(struct irq_data *d)
+{
+ unsigned long irq_flags;
+ struct msm_tlmm_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+ spin_lock_irqsave(&ic->irq_lock, irq_flags);
+ msm_tlmm_v4_set_intr_cfg_enable(ic, irqd_to_hwirq(d), 0);
+ __clear_bit(irqd_to_hwirq(d), ic->enabled_irqs);
+ mb();
+ spin_unlock_irqrestore(&ic->irq_lock, irq_flags);
+ if (ic->irq_chip_extn->irq_mask)
+ ic->irq_chip_extn->irq_mask(d);
+}
+
+static void msm_tlmm_v4_irq_unmask(struct irq_data *d)
+{
+ unsigned long irq_flags;
+ struct msm_tlmm_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+ spin_lock_irqsave(&ic->irq_lock, irq_flags);
+ __set_bit(irqd_to_hwirq(d), ic->enabled_irqs);
+ if (!msm_tlmm_v4_get_intr_cfg_enable(ic, irqd_to_hwirq(d))) {
+ msm_tlmm_v4_set_intr_status(ic, irqd_to_hwirq(d));
+ msm_tlmm_v4_set_intr_cfg_enable(ic, irqd_to_hwirq(d), 1);
+ mb();
+ }
+ spin_unlock_irqrestore(&ic->irq_lock, irq_flags);
+ if (ic->irq_chip_extn->irq_unmask)
+ ic->irq_chip_extn->irq_unmask(d);
+}
+
+static void msm_tlmm_v4_irq_disable(struct irq_data *d)
+{
+ struct msm_tlmm_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ if (ic->irq_chip_extn->irq_disable)
+ ic->irq_chip_extn->irq_disable(d);
+}
+
+static int msm_tlmm_v4_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ unsigned long irq_flags;
+ unsigned int pin = irqd_to_hwirq(d);
+ struct msm_tlmm_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+
+ spin_lock_irqsave(&ic->irq_lock, irq_flags);
+
+ if (flow_type & IRQ_TYPE_EDGE_BOTH) {
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
+ if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ __set_bit(pin, ic->dual_edge_irqs);
+ else
+ __clear_bit(pin, ic->dual_edge_irqs);
+ } else {
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ __clear_bit(pin, ic->dual_edge_irqs);
+ }
+
+ msm_tlmm_v4_set_intr_cfg_type(ic, d, flow_type);
+
+ mb();
+ spin_unlock_irqrestore(&ic->irq_lock, irq_flags);
+
+ if (ic->irq_chip_extn->irq_set_type)
+ ic->irq_chip_extn->irq_set_type(d, flow_type);
+
+ return 0;
+}
+
+static int msm_tlmm_v4_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ unsigned int pin = irqd_to_hwirq(d);
+ struct msm_tlmm_irq_chip *ic = irq_data_get_irq_chip_data(d);
+
+ if (on) {
+ if (bitmap_empty(ic->wake_irqs, ic->num_irqs))
+ irq_set_irq_wake(ic->irq, 1);
+ set_bit(pin, ic->wake_irqs);
+ } else {
+ clear_bit(pin, ic->wake_irqs);
+ if (bitmap_empty(ic->wake_irqs, ic->num_irqs))
+ irq_set_irq_wake(ic->irq, 0);
+ }
+
+ if (ic->irq_chip_extn->irq_set_wake)
+ ic->irq_chip_extn->irq_set_wake(d, on);
+
+ return 0;
+}
+
+static struct lock_class_key msm_tlmm_irq_lock_class;
+
+static int msm_tlmm_v4_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct msm_tlmm_irq_chip *ic = h->host_data;
+
+ irq_set_lockdep_class(virq, &msm_tlmm_irq_lock_class);
+ irq_set_chip_data(virq, ic);
+ irq_set_chip_and_handler(virq, &ic->chip,
+ handle_level_irq);
+ set_irq_flags(virq, IRQF_VALID);
+ return 0;
+}
+
+/*
+ * irq domain callbacks for interrupt controller.
+ */
+static const struct irq_domain_ops msm_tlmm_v4_gp_irqd_ops = {
+ .map = msm_tlmm_v4_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static struct irq_chip mpm_tlmm_irq_extn;
+
+static struct msm_tlmm_irq_chip msm_tlmm_v4_gp_irq = {
+ .irq_chip_extn = &mpm_tlmm_irq_extn,
+ .chip = {
+ .name = "msm_tlmm_v4_irq",
+ .irq_mask = msm_tlmm_v4_irq_mask,
+ .irq_unmask = msm_tlmm_v4_irq_unmask,
+ .irq_ack = msm_tlmm_v4_irq_ack,
+ .irq_set_type = msm_tlmm_v4_irq_set_type,
+ .irq_set_wake = msm_tlmm_v4_irq_set_wake,
+ .irq_disable = msm_tlmm_v4_irq_disable,
+ },
+ .apps_id = TLMMV4_APPS_ID_DEFAULT,
+ .domain_ops = &msm_tlmm_v4_gp_irqd_ops,
+ .handler = msm_tlmm_v4_gp_handle_irq,
+};
+
+/* Power management core operations */
+
+static int msm_tlmm_v4_gp_irq_suspend(void)
+{
+ unsigned long irq_flags;
+ unsigned long i;
+ struct msm_tlmm_irq_chip *ic = &msm_tlmm_v4_gp_irq;
+ int num_irqs = ic->num_irqs;
+
+ spin_lock_irqsave(&ic->irq_lock, irq_flags);
+ for_each_set_bit(i, ic->enabled_irqs, num_irqs)
+ msm_tlmm_v4_set_intr_cfg_enable(ic, i, 0);
+
+ for_each_set_bit(i, ic->wake_irqs, num_irqs)
+ msm_tlmm_v4_set_intr_cfg_enable(ic, i, 1);
+ mb();
+ spin_unlock_irqrestore(&ic->irq_lock, irq_flags);
+ return 0;
+}
+
+static void msm_tlmm_v4_gp_irq_resume(void)
+{
+ unsigned long irq_flags;
+ unsigned long i;
+ struct msm_tlmm_irq_chip *ic = &msm_tlmm_v4_gp_irq;
+ int num_irqs = ic->num_irqs;
+
+ spin_lock_irqsave(&ic->irq_lock, irq_flags);
+ for_each_set_bit(i, ic->wake_irqs, num_irqs)
+ msm_tlmm_v4_set_intr_cfg_enable(ic, i, 0);
+
+ for_each_set_bit(i, ic->enabled_irqs, num_irqs)
+ msm_tlmm_v4_set_intr_cfg_enable(ic, i, 1);
+ mb();
+ spin_unlock_irqrestore(&ic->irq_lock, irq_flags);
+}
+
+static struct syscore_ops msm_tlmm_v4_irq_syscore_ops = {
+ .suspend = msm_tlmm_v4_gp_irq_suspend,
+ .resume = msm_tlmm_v4_gp_irq_resume,
+};
+
+#ifdef CONFIG_USE_PINCTRL_IRQ
+int msm_tlmm_v4_of_irq_init(struct device_node *controller,
+ struct irq_chip *chip_extn)
+{
+ int ret, num_irqs, apps_id;
+ struct msm_tlmm_irq_chip *ic = &msm_tlmm_v4_gp_irq;
+
+ ret = of_property_read_u32(controller, "num_irqs", &num_irqs);
+ if (ret) {
+ WARN(1, "Cannot get numirqs from device tree\n");
+ return ret;
+ }
+ ret = of_property_read_u32(controller, "apps_id", &apps_id);
+ if (!ret) {
+ pr_info("processor id specified, in device tree %d\n", apps_id);
+ ic->apps_id = apps_id;
+ }
+ ic->num_irqs = num_irqs;
+ ic->domain = irq_domain_add_linear(controller, ic->num_irqs,
+ ic->domain_ops,
+ ic);
+ if (IS_ERR(ic->domain))
+ return -ENOMEM;
+ ic->irq_chip_extn = chip_extn;
+ return 0;
+}
+#endif
+
+static int msm_tlmm_v4_gp_irq_init(int irq, struct msm_pintype_info *pinfo,
+ struct device *tlmm_dev)
+{
+ int num_irqs;
+ struct msm_tlmm_irq_chip *ic = pinfo->irq_chip;
+
+ if (!ic->domain)
+ return 0;
+
+ num_irqs = ic->num_irqs;
+ ic->enabled_irqs = devm_kzalloc(tlmm_dev, sizeof(unsigned long)
+ * BITS_TO_LONGS(num_irqs), GFP_KERNEL);
+ if (IS_ERR(ic->enabled_irqs)) {
+ dev_err(tlmm_dev, "Unable to allocate enabled irqs bitmap\n");
+ return PTR_ERR(ic->enabled_irqs);
+ }
+ ic->dual_edge_irqs = devm_kzalloc(tlmm_dev, sizeof(unsigned long)
+ * BITS_TO_LONGS(num_irqs), GFP_KERNEL);
+ if (IS_ERR(ic->dual_edge_irqs)) {
+ dev_err(tlmm_dev, "Unable to allocate dual edge irqs bitmap\n");
+ return PTR_ERR(ic->dual_edge_irqs);
+ }
+ ic->wake_irqs = devm_kzalloc(tlmm_dev, sizeof(unsigned long)
+ * BITS_TO_LONGS(num_irqs), GFP_KERNEL);
+ if (IS_ERR(ic->wake_irqs)) {
+ dev_err(tlmm_dev, "Unable to allocate wake irqs bitmap\n");
+ return PTR_ERR(ic->wake_irqs);
+ }
+ spin_lock_init(&ic->irq_lock);
+ ic->chip_base = pinfo->reg_base;
+ ic->irq = irq;
+ ic->dev = tlmm_dev;
+ ic->num_irqs = pinfo->num_pins;
+ ic->pinfo = pinfo;
+ register_syscore_ops(&msm_tlmm_v4_irq_syscore_ops);
+ return 0;
+}
+
+static irqreturn_t msm_tlmm_v4_handle_irq(int irq, void *data)
+{
+ int i, num_pintypes;
+ struct msm_pintype_info *pintypes, *pintype;
+ struct msm_tlmm_irq_chip *ic;
+ struct msm_tlmm_desc *tlmm_desc = (struct msm_tlmm_desc *)data;
+ irqreturn_t ret = IRQ_NONE;
+
+ pintypes = tlmm_desc->pintypes;
+ num_pintypes = tlmm_desc->num_pintypes;
+ for (i = 0; i < num_pintypes; i++) {
+ pintype = &pintypes[i];
+ if (!pintype->irq_chip)
+ continue;
+ ic = pintype->irq_chip;
+ if (!ic->node)
+ continue;
+ ret = ic->handler(irq, ic);
+ if (ret != IRQ_HANDLED)
+ break;
+ }
+ return ret;
+}
+
+static struct msm_pintype_info tlmm_v4_pininfo[] = {
+ {
+ .prg_cfg = msm_tlmm_v4_gp_cfg,
+ .prg_func = msm_tlmm_v4_gp_fn,
+ .set_reg_base = msm_tlmm_v4_gp_set_reg_base,
+ .reg_base = NULL,
+ .prop_name = "qcom,pin-type-gp",
+ .name = "gp",
+ .gc = {
+ .label = "msm_tlmm_v4_gpio",
+ .direction_input = msm_tlmm_v4_gp_dir_in,
+ .direction_output = msm_tlmm_v4_gp_dir_out,
+ .get = msm_tlmm_v4_gp_get,
+ .set = msm_tlmm_v4_gp_set,
+ .to_irq = msm_tlmm_v4_gp_to_irq,
+ },
+ .init_irq = msm_tlmm_v4_gp_irq_init,
+ .irq_chip = &msm_tlmm_v4_gp_irq,
+ },
+ {
+ .prg_cfg = msm_tlmm_v4_sdc_cfg,
+ .set_reg_base = msm_tlmm_v4_sdc_set_reg_base,
+ .reg_base = NULL,
+ .prop_name = "qcom,pin-type-sdc",
+ .name = "sdc",
+ },
+ {
+ .prg_cfg = msm_tlmm_v4_qdsd_cfg,
+ .set_reg_base = msm_tlmm_v4_qdsd_set_reg_base,
+ .reg_base = NULL,
+ .prop_name = "qcom,pin-type-qdsd",
+ .name = "qdsd",
+ }
+};
+
+struct msm_tlmm_pintype tlmm_v4_pintypes = {
+ .num_entries = ARRAY_SIZE(tlmm_v4_pininfo),
+ .pintype_info = tlmm_v4_pininfo,
+};
+
+static const struct of_device_id msm_tlmm_v4_dt_match[] = {
+ { .compatible = "qcom,msm-tlmm-v4",
+ .data = &tlmm_v4_pintypes, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, msm_tlmm_v4_dt_match);
+
+static int msm_tlmm_v4_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct msm_tlmm_pintype *pinfo;
+ struct msm_tlmm_desc *tlmm_desc;
+ int irq, ret;
+ struct resource *res;
+ struct device_node *node = pdev->dev.of_node;
+
+ match = of_match_node(msm_tlmm_v4_dt_match, node);
+ if (IS_ERR(match))
+ return PTR_ERR(match);
+ pinfo = match->data;
+ tlmm_desc = devm_kzalloc(&pdev->dev, sizeof(*tlmm_desc), GFP_KERNEL);
+ if (!tlmm_desc) {
+ dev_err(&pdev->dev, "Alloction failed for tlmm desc\n");
+ return -ENOMEM;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot find IO resource\n");
+ return -ENOENT;
+ }
+ tlmm_desc->base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR(tlmm_desc->base))
+ return PTR_ERR(tlmm_desc->base);
+ tlmm_desc->irq = -EINVAL;
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res) {
+ irq = res->start;
+ ret = devm_request_irq(&pdev->dev, irq, msm_tlmm_v4_handle_irq,
+ IRQF_TRIGGER_HIGH,
+ dev_name(&pdev->dev),
+ tlmm_desc);
+ if (ret) {
+ dev_err(&pdev->dev, "register for irq failed\n");
+ return ret;
+ }
+ tlmm_desc->irq = irq;
+ }
+ tlmm_desc->pintypes = pinfo->pintype_info;
+ tlmm_desc->num_pintypes = pinfo->num_entries;
+ return msm_pinctrl_probe(pdev, tlmm_desc);
+}
+
+static struct platform_driver msm_tlmm_v4_drv = {
+ .probe = msm_tlmm_v4_probe,
+ .driver = {
+ .name = "msm-tlmmv4-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(msm_tlmm_v4_dt_match),
+ },
+};
+
+static int __init msm_tlmm_v4_drv_register(void)
+{
+ return platform_driver_register(&msm_tlmm_v4_drv);
+}
+postcore_initcall(msm_tlmm_v4_drv_register);
+
+static void __exit msm_tlmm_v4_drv_unregister(void)
+{
+ platform_driver_unregister(&msm_tlmm_v4_drv);
+}
+module_exit(msm_tlmm_v4_drv_unregister);
+
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/pinctrl/pinctrl-msm.c b/drivers/pinctrl/pinctrl-msm.c
new file mode 100644
index 000000000000..c8a7f4435683
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-msm.c
@@ -0,0 +1,863 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "core.h"
+#include "pinconf.h"
+#include "pinctrl-msm.h"
+
+/**
+ * struct msm_pinctrl_dd: represents the pinctrol driver data.
+ * @base: virtual base of TLMM.
+ * @irq: interrupt number for TLMM summary interrupt.
+ * @num_pins: Number of total pins present on TLMM.
+ * @msm_pindesc: list of descriptors for each pin.
+ * @num_pintypes: number of pintypes on TLMM.
+ * @msm_pintype: points to the representation of all pin types supported.
+ * @pctl: pin controller instance managed by the driver.
+ * @pctl_dev: pin controller descriptor registered with the pinctrl subsystem.
+ * @pin_grps: list of pin groups available to the driver.
+ * @num_grps: number of groups.
+ * @pmx_funcs:list of pin functions available to the driver
+ * @num_funcs: number of functions.
+ * @dev: pin contol device.
+ */
+struct msm_pinctrl_dd {
+ void __iomem *base;
+ int irq;
+ unsigned int num_pins;
+ struct msm_pindesc *msm_pindesc;
+ unsigned int num_pintypes;
+ struct msm_pintype_info *msm_pintype;
+ struct pinctrl_desc pctl;
+ struct pinctrl_dev *pctl_dev;
+ struct msm_pin_grps *pin_grps;
+ unsigned int num_grps;
+ struct msm_pmx_funcs *pmx_funcs;
+ unsigned int num_funcs;
+ struct device *dev;
+};
+
+/**
+ * struct msm_irq_of_info: represents of init data for tlmm interrupt
+ * controllers
+ * @compat: compat string for tlmm interrup controller instance.
+ * @irq_init: irq chip initialization callback.
+ */
+struct msm_irq_of_info {
+ const char *compat;
+ int (*irq_init)(struct device_node *np, struct irq_chip *ic);
+};
+
+static int msm_pmx_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct msm_pinctrl_dd *dd;
+
+ dd = pinctrl_dev_get_drvdata(pctldev);
+ return dd->num_funcs;
+}
+
+static const char *msm_pmx_get_fname(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct msm_pinctrl_dd *dd;
+
+ dd = pinctrl_dev_get_drvdata(pctldev);
+ return dd->pmx_funcs[selector].name;
+}
+
+static int msm_pmx_get_groups(struct pinctrl_dev *pctldev,
+ unsigned selector, const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct msm_pinctrl_dd *dd;
+
+ dd = pinctrl_dev_get_drvdata(pctldev);
+ *groups = dd->pmx_funcs[selector].gps;
+ *num_groups = dd->pmx_funcs[selector].num_grps;
+ return 0;
+}
+#if 0
+static void msm_pmx_prg_fn(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group, bool enable)
+{
+ struct msm_pinctrl_dd *dd;
+ const unsigned int *pins;
+ struct msm_pindesc *pindesc;
+ struct msm_pintype_info *pinfo;
+ unsigned int pin, cnt, func;
+
+ dd = pinctrl_dev_get_drvdata(pctldev);
+ pins = dd->pin_grps[group].pins;
+ pindesc = dd->msm_pindesc;
+
+ /*
+ * for each pin in the pin group selected, program the correspoding
+ * pin function number in the config register.
+ */
+ for (cnt = 0; cnt < dd->pin_grps[group].num_pins; cnt++) {
+ pin = pins[cnt];
+ pinfo = pindesc[pin].pin_info;
+ pin = pin - pinfo->pin_start;
+ func = dd->pin_grps[group].func;
+ pinfo->prg_func(pin, func, pinfo->reg_base, enable);
+ }
+}
+
+static int msm_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
+{
+ msm_pmx_prg_fn(pctldev, selector, group, true);
+ return 0;
+}
+
+static void msm_pmx_disable(struct pinctrl_dev *pctldev,
+ unsigned selector, unsigned group)
+{
+ msm_pmx_prg_fn(pctldev, selector, group, false);
+}
+#endif
+/* Enable gpio function for a pin */
+static int msm_pmx_gpio_request(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *grange,
+ unsigned pin)
+{
+ struct msm_pinctrl_dd *dd;
+ struct msm_pindesc *pindesc;
+ struct msm_pintype_info *pinfo;
+
+ dd = pinctrl_dev_get_drvdata(pctldev);
+ pindesc = dd->msm_pindesc;
+ pinfo = pindesc[pin].pin_info;
+ /* All TLMM versions use function 0 for gpio function */
+ pinfo->prg_func(pin, 0, pinfo->reg_base, true);
+ return 0;
+}
+
+int msm_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned func_selector,
+ unsigned group_selector){
+
+ return 0;
+}
+
+
+static struct pinmux_ops msm_pmxops = {
+ .get_functions_count = msm_pmx_functions_count,
+ .get_function_name = msm_pmx_get_fname,
+ .get_function_groups = msm_pmx_get_groups,
+ .set_mux = msm_pmx_set_mux,
+ .gpio_request_enable = msm_pmx_gpio_request,
+};
+
+static int msm_pconf_prg(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config, bool rw)
+{
+ struct msm_pinctrl_dd *dd;
+ struct msm_pindesc *pindesc;
+ struct msm_pintype_info *pinfo;
+
+ dd = pinctrl_dev_get_drvdata(pctldev);
+ pindesc = dd->msm_pindesc;
+ pinfo = pindesc[pin].pin_info;
+ pin = pin - pinfo->pin_start;
+ return pinfo->prg_cfg(pin, config, pinfo->reg_base, rw);
+}
+
+static int msm_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned num_configs)
+{
+ return msm_pconf_prg(pctldev, pin, configs, true);
+}
+
+static int msm_pconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ return msm_pconf_prg(pctldev, pin, config, false);
+}
+
+static int msm_pconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned group, unsigned long *config, unsigned count)
+{
+ struct msm_pinctrl_dd *dd;
+ const unsigned int *pins;
+ unsigned int cnt;
+
+ dd = pinctrl_dev_get_drvdata(pctldev);
+ pins = dd->pin_grps[group].pins;
+
+ for (cnt = 0; cnt < dd->pin_grps[group].num_pins; cnt++)
+ msm_pconf_set(pctldev, pins[cnt], config, 1);
+
+ return 0;
+}
+
+static int msm_pconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *config)
+{
+ struct msm_pinctrl_dd *dd;
+ const unsigned int *pins;
+
+ dd = pinctrl_dev_get_drvdata(pctldev);
+ pins = dd->pin_grps[group].pins;
+ msm_pconf_get(pctldev, pins[0], config);
+ return 0;
+}
+
+static struct pinconf_ops msm_pconfops = {
+ .pin_config_get = msm_pconf_get,
+ .pin_config_set = msm_pconf_set,
+ .pin_config_group_get = msm_pconf_group_get,
+ .pin_config_group_set = msm_pconf_group_set,
+};
+
+static int msm_get_grps_count(struct pinctrl_dev *pctldev)
+{
+ struct msm_pinctrl_dd *dd;
+
+ dd = pinctrl_dev_get_drvdata(pctldev);
+ return dd->num_grps;
+}
+
+static const char *msm_get_grps_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct msm_pinctrl_dd *dd;
+
+ dd = pinctrl_dev_get_drvdata(pctldev);
+ return dd->pin_grps[selector].name;
+}
+
+static int msm_get_grps_pins(struct pinctrl_dev *pctldev,
+ unsigned selector, const unsigned **pins, unsigned *num_pins)
+{
+ struct msm_pinctrl_dd *dd;
+
+ dd = pinctrl_dev_get_drvdata(pctldev);
+ *pins = dd->pin_grps[selector].pins;
+ *num_pins = dd->pin_grps[selector].num_pins;
+ return 0;
+}
+
+static struct msm_pintype_info *msm_pgrp_to_pintype(struct device_node *nd,
+ struct msm_pinctrl_dd *dd)
+{
+ struct device_node *ptype_nd;
+ struct msm_pintype_info *pinfo = NULL;
+ int idx = 0;
+
+ /*Extract pin type node from parent node */
+ ptype_nd = of_parse_phandle(nd, "qcom,pins", 0);
+ /* find the pin type info for this pin type node */
+ for (idx = 0; idx < dd->num_pintypes; idx++) {
+ pinfo = &dd->msm_pintype[idx];
+ if (ptype_nd == pinfo->node) {
+ of_node_put(ptype_nd);
+ break;
+ }
+ }
+ return pinfo;
+}
+
+/* create pinctrl_map entries by parsing device tree nodes */
+static int msm_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *cfg_np, struct pinctrl_map **maps,
+ unsigned *nmaps)
+{
+ struct msm_pinctrl_dd *dd;
+ struct device_node *parent;
+ struct msm_pindesc *pindesc;
+ struct msm_pintype_info *pinfo;
+ struct pinctrl_map *map;
+ const char *grp_name;
+ char *fn_name;
+ u32 val;
+ unsigned long *cfg;
+ int cfg_cnt = 0, map_cnt = 0, func_cnt = 0, ret = 0;
+
+ dd = pinctrl_dev_get_drvdata(pctldev);
+ pindesc = dd->msm_pindesc;
+ /* get parent node of config node */
+ parent = of_get_parent(cfg_np);
+ /*
+ * parent node contains pin grouping
+ * get pin type from pin grouping
+ */
+ pinfo = msm_pgrp_to_pintype(parent, dd);
+ /* check if there is a function associated with the parent pin group */
+ if (of_find_property(parent, "qcom,pin-func", NULL))
+ func_cnt++;
+ /* get pin configs */
+ ret = pinconf_generic_parse_dt_config(cfg_np, &cfg, &cfg_cnt);
+ if (ret) {
+ dev_err(dd->dev, "properties incorrect\n");
+ return ret;
+ }
+
+ map_cnt = cfg_cnt + func_cnt;
+
+ /* Allocate memory for pin-map entries */
+ map = kzalloc(sizeof(*map) * map_cnt, GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+ *nmaps = 0;
+
+ /* Get group name from node */
+ of_property_read_string(parent, "label", &grp_name);
+ /* create the config map entry */
+ map[*nmaps].data.configs.group_or_pin = grp_name;
+ map[*nmaps].data.configs.configs = cfg;
+ map[*nmaps].data.configs.num_configs = cfg_cnt;
+ map[*nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ *nmaps += 1;
+
+ /* If there is no function specified in device tree return */
+ if (func_cnt == 0) {
+ *maps = map;
+ goto no_func;
+ }
+ /* Get function mapping */
+ of_property_read_u32(parent, "qcom,pin-func", &val);
+ fn_name = kzalloc(strlen(grp_name) + strlen("-func"),
+ GFP_KERNEL);
+ if (!fn_name) {
+ ret = -ENOMEM;
+ goto func_err;
+ }
+ snprintf(fn_name, strlen(grp_name) + strlen("-func") + 1, "%s%s",
+ grp_name, "-func");
+ map[*nmaps].data.mux.group = grp_name;
+ map[*nmaps].data.mux.function = fn_name;
+ map[*nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
+ *nmaps += 1;
+ *maps = map;
+ of_node_put(parent);
+ return 0;
+
+func_err:
+ kfree(cfg);
+ kfree(map);
+no_func:
+ of_node_put(parent);
+ return ret;
+}
+
+/* free the memory allocated to hold the pin-map table */
+static void msm_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned num_maps)
+{
+ int idx;
+
+ for (idx = 0; idx < num_maps; idx++) {
+ if (map[idx].type == PIN_MAP_TYPE_CONFIGS_GROUP)
+ kfree(map[idx].data.configs.configs);
+ else if (map->type == PIN_MAP_TYPE_MUX_GROUP)
+ kfree(map[idx].data.mux.function);
+ };
+
+ kfree(map);
+}
+
+static struct pinctrl_ops msm_pctrlops = {
+ .get_groups_count = msm_get_grps_count,
+ .get_group_name = msm_get_grps_name,
+ .get_group_pins = msm_get_grps_pins,
+ .dt_node_to_map = msm_dt_node_to_map,
+ .dt_free_map = msm_dt_free_map,
+};
+
+static int msm_pinctrl_request_gpio(struct gpio_chip *gc, unsigned offset)
+{
+ return pinctrl_request_gpio(gc->base + offset);
+}
+
+static void msm_pinctrl_free_gpio(struct gpio_chip *gc, unsigned offset)
+{
+ pinctrl_free_gpio(gc->base + offset);
+}
+
+static int msm_of_get_pin(struct device_node *np, int index,
+ struct msm_pinctrl_dd *dd, uint *pin)
+{
+ struct of_phandle_args pargs;
+ struct msm_pintype_info *pinfo, *pintype;
+ int num_pintypes;
+ int ret, i;
+
+ ret = of_parse_phandle_with_args(np, "qcom,pins", "#qcom,pin-cells",
+ index, &pargs);
+ if (ret)
+ return ret;
+ pintype = dd->msm_pintype;
+ num_pintypes = dd->num_pintypes;
+ for (i = 0; i < num_pintypes; i++) {
+ pinfo = &pintype[i];
+ /* Find the matching pin type node */
+ if (pargs.np != pinfo->node)
+ continue;
+ /* Check if arg specified is in valid range for pin type */
+ if (pargs.args[0] > pinfo->num_pins) {
+ ret = -EINVAL;
+ dev_err(dd->dev, "Invalid pin number for type %s\n",
+ pinfo->name);
+ goto out;
+ }
+ /*
+ * Pin number = index within pin type + start of pin numbers
+ * for this pin type
+ */
+ *pin = pargs.args[0] + pinfo->pin_start;
+ }
+out:
+ of_node_put(pargs.np);
+ return ret;
+}
+
+static int msm_pinctrl_dt_parse_pins(struct device_node *dev_node,
+ struct msm_pinctrl_dd *dd)
+{
+ struct device *dev;
+ struct device_node *pgrp_np;
+ struct msm_pin_grps *pin_grps, *curr_grp;
+ struct msm_pmx_funcs *pmx_funcs, *curr_func;
+ char *func_name;
+ const char *grp_name;
+ int ret, i, grp_index = 0, func_index = 0;
+ uint pin = 0, *pins, num_grps = 0, num_pins = 0, len = 0;
+ uint num_funcs = 0;
+ u32 func = 0;
+
+ dev = dd->dev;
+ for_each_child_of_node(dev_node, pgrp_np) {
+ if (!of_find_property(pgrp_np, "qcom,pins", NULL))
+ continue;
+ if (of_find_property(pgrp_np, "qcom,pin-func", NULL))
+ num_funcs++;
+ num_grps++;
+ }
+
+ pin_grps = (struct msm_pin_grps *)devm_kzalloc(dd->dev,
+ sizeof(*pin_grps) * num_grps,
+ GFP_KERNEL);
+ if (!pin_grps) {
+ dev_err(dev, "Failed to allocate grp desc\n");
+ return -ENOMEM;
+ }
+ pmx_funcs = (struct msm_pmx_funcs *)devm_kzalloc(dd->dev,
+ sizeof(*pmx_funcs) * num_funcs,
+ GFP_KERNEL);
+ if (!pmx_funcs) {
+ dev_err(dev, "Failed to allocate grp desc\n");
+ return -ENOMEM;
+ }
+ /*
+ * Iterate over all child nodes, and for nodes containing pin lists
+ * populate corresponding pin group, and if provided, corresponding
+ * function
+ */
+ for_each_child_of_node(dev_node, pgrp_np) {
+ if (!of_find_property(pgrp_np, "qcom,pins", NULL))
+ continue;
+ curr_grp = pin_grps + grp_index;
+ /* Get group name from label*/
+ ret = of_property_read_string(pgrp_np, "label", &grp_name);
+ if (ret) {
+ dev_err(dev, "Unable to allocate group name\n");
+ return ret;
+ }
+ ret = of_property_read_u32(pgrp_np, "qcom,num-grp-pins",
+ &num_pins);
+ if (ret) {
+ dev_err(dev, "pin count not specified for groups %s\n",
+ grp_name);
+ return ret;
+ }
+ pins = devm_kzalloc(dd->dev, sizeof(unsigned int) * num_pins,
+ GFP_KERNEL);
+ if (!pins) {
+ dev_err(dev, "Unable to allocte pins for %s\n",
+ grp_name);
+ return -ENOMEM;
+ }
+ for (i = 0; i < num_pins; i++) {
+ ret = msm_of_get_pin(pgrp_np, i, dd, &pin);
+ if (ret) {
+ dev_err(dev, "Pin grp %s does not have pins\n",
+ grp_name);
+ return ret;
+ }
+ pins[i] = pin;
+ }
+ curr_grp->pins = pins;
+ curr_grp->num_pins = num_pins;
+ curr_grp->name = grp_name;
+ grp_index++;
+ /* Check if func specified */
+ if (!of_find_property(pgrp_np, "qcom,pin-func", NULL))
+ continue;
+ curr_func = pmx_funcs + func_index;
+ len = strlen(grp_name) + strlen("-func") + 1;
+ func_name = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!func_name) {
+ dev_err(dev, "Cannot allocate func name for grp %s",
+ grp_name);
+ return -ENOMEM;
+ }
+ snprintf(func_name, len, "%s%s", grp_name, "-func");
+ curr_func->name = func_name;
+ curr_func->gps = devm_kzalloc(dev, sizeof(char *), GFP_KERNEL);
+ if (!curr_func->gps) {
+ dev_err(dev, "failed to alloc memory for group list ");
+ return -ENOMEM;
+ }
+ of_property_read_u32(pgrp_np, "qcom,pin-func", &func);
+ curr_grp->func = func;
+ curr_func->gps[0] = grp_name;
+ curr_func->num_grps = 1;
+ func_index++;
+ }
+ dd->pin_grps = pin_grps;
+ dd->num_grps = num_grps;
+ dd->pmx_funcs = pmx_funcs;
+ dd->num_funcs = num_funcs;
+ return 0;
+}
+
+static void msm_populate_pindesc(struct msm_pintype_info *pinfo,
+ struct msm_pindesc *msm_pindesc)
+{
+ int i;
+ struct msm_pindesc *pindesc;
+
+ for (i = 0; i < pinfo->num_pins; i++) {
+ pindesc = &msm_pindesc[i + pinfo->pin_start];
+ pindesc->pin_info = pinfo;
+ snprintf(pindesc->name, sizeof(pindesc->name),
+ "%s-%d", pinfo->name, i);
+ }
+}
+
+static bool msm_pintype_supports_gpio(struct msm_pintype_info *pinfo)
+{
+ struct device_node *pt_node;
+
+ if (!pinfo->node)
+ return false;
+
+ for_each_child_of_node(pinfo->node, pt_node) {
+ if (of_find_property(pt_node, "gpio-controller", NULL)) {
+ pinfo->gc.of_node = pt_node;
+ pinfo->supports_gpio = true;
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool msm_pintype_supports_irq(struct msm_pintype_info *pinfo)
+{
+ struct device_node *pt_node;
+
+ if (!pinfo->init_irq)
+ return false;
+ for_each_child_of_node(pinfo->node, pt_node) {
+ if (of_find_property(pt_node, "interrupt-controller", NULL)) {
+ pinfo->irq_chip->node = pt_node;
+ return true;
+ }
+ }
+ return false;
+}
+
+static int msm_pinctrl_dt_parse_pintype(struct device_node *dev_node,
+ struct msm_pinctrl_dd *dd)
+{
+ struct device_node *pt_node;
+ struct msm_pindesc *msm_pindesc;
+ struct msm_pintype_info *pintype, *pinfo;
+ void __iomem **ptype_base;
+ u32 num_pins, pinfo_entries, curr_pins;
+ int i, ret;
+ uint total_pins = 0;
+
+ pinfo = dd->msm_pintype;
+ pinfo_entries = dd->num_pintypes;
+ curr_pins = 0;
+
+ for_each_child_of_node(dev_node, pt_node) {
+ for (i = 0; i < pinfo_entries; i++) {
+ pintype = &pinfo[i];
+ /* Check if node is pintype node */
+ if (!of_find_property(pt_node, pintype->prop_name,
+ NULL))
+ continue;
+ of_node_get(pt_node);
+ pintype->node = pt_node;
+ /* determine number of pins of given pin type */
+ ret = of_property_read_u32(pt_node, "qcom,num-pins",
+ &num_pins);
+ if (ret) {
+ dev_err(dd->dev, "num pins not specified\n");
+ return ret;
+ }
+ /* determine pin number range for given pin type */
+ pintype->num_pins = num_pins;
+ pintype->pin_start = curr_pins;
+ pintype->pin_end = curr_pins + num_pins;
+ ptype_base = &pintype->reg_base;
+ pintype->set_reg_base(ptype_base, dd->base);
+ total_pins += num_pins;
+ curr_pins += num_pins;
+ }
+ }
+ dd->msm_pindesc = devm_kzalloc(dd->dev,
+ sizeof(struct msm_pindesc) *
+ total_pins, GFP_KERNEL);
+ if (!dd->msm_pindesc) {
+ dev_err(dd->dev, "Unable to allocate msm pindesc");
+ goto alloc_fail;
+ }
+
+ dd->num_pins = total_pins;
+ msm_pindesc = dd->msm_pindesc;
+ /*
+ * Populate pin descriptor based on each pin type present in Device
+ * tree and supported by the driver
+ */
+ for (i = 0; i < pinfo_entries; i++) {
+ pintype = &pinfo[i];
+ /* If entry not in device tree, skip */
+ if (!pintype->node)
+ continue;
+ msm_populate_pindesc(pintype, msm_pindesc);
+ }
+ return 0;
+alloc_fail:
+ for (i = 0; i < pinfo_entries; i++) {
+ pintype = &pinfo[i];
+ if (pintype->node)
+ of_node_put(pintype->node);
+ }
+ return -ENOMEM;
+}
+
+
+static void msm_pinctrl_cleanup_dd(struct msm_pinctrl_dd *dd)
+{
+ int i;
+ struct msm_pintype_info *pintype;
+
+ pintype = dd->msm_pintype;
+ for (i = 0; i < dd->num_pintypes; i++) {
+ if (pintype->node)
+ of_node_put(dd->msm_pintype[i].node);
+ }
+}
+
+static int msm_pinctrl_get_drvdata(struct msm_pinctrl_dd *dd,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *node = pdev->dev.of_node;
+
+ ret = msm_pinctrl_dt_parse_pintype(node, dd);
+ if (ret)
+ goto out;
+
+ ret = msm_pinctrl_dt_parse_pins(node, dd);
+ if (ret)
+ msm_pinctrl_cleanup_dd(dd);
+out:
+ return ret;
+}
+
+static int msm_register_pinctrl(struct msm_pinctrl_dd *dd)
+{
+ int i;
+ struct pinctrl_pin_desc *pindesc;
+ struct msm_pintype_info *pinfo, *pintype;
+ struct pinctrl_desc *ctrl_desc = &dd->pctl;
+
+ ctrl_desc->name = "msm-pinctrl";
+ ctrl_desc->owner = THIS_MODULE;
+ ctrl_desc->pmxops = &msm_pmxops;
+ ctrl_desc->confops = &msm_pconfops;
+ ctrl_desc->pctlops = &msm_pctrlops;
+
+ pindesc = devm_kzalloc(dd->dev, sizeof(*pindesc) * dd->num_pins,
+ GFP_KERNEL);
+ if (!pindesc) {
+ dev_err(dd->dev, "Failed to allocate pinctrl pin desc\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < dd->num_pins; i++) {
+ pindesc[i].number = i;
+ pindesc[i].name = dd->msm_pindesc[i].name;
+ }
+ ctrl_desc->pins = pindesc;
+ ctrl_desc->npins = dd->num_pins;
+ dd->pctl_dev = pinctrl_register(ctrl_desc, dd->dev, dd);
+ if (!dd->pctl_dev) {
+ dev_err(dd->dev, "could not register pinctrl driver\n");
+ return -EINVAL;
+ }
+
+ pinfo = dd->msm_pintype;
+ for (i = 0; i < dd->num_pintypes; i++) {
+ pintype = &pinfo[i];
+ if (!pintype->supports_gpio)
+ continue;
+ pintype->grange.name = pintype->name;
+ pintype->grange.id = i;
+ pintype->grange.pin_base = pintype->pin_start;
+ pintype->grange.base = pintype->gc.base;
+ pintype->grange.npins = pintype->gc.ngpio;
+ pintype->grange.gc = &pintype->gc;
+ pinctrl_add_gpio_range(dd->pctl_dev, &pintype->grange);
+ }
+ return 0;
+}
+
+static void msm_register_gpiochip(struct msm_pinctrl_dd *dd)
+{
+
+ struct gpio_chip *gc;
+ struct msm_pintype_info *pintype, *pinfo;
+ int i, ret = 0;
+
+ pinfo = dd->msm_pintype;
+ for (i = 0; i < dd->num_pintypes; i++) {
+ pintype = &pinfo[i];
+ if (!msm_pintype_supports_gpio(pintype))
+ continue;
+ gc = &pintype->gc;
+ gc->request = msm_pinctrl_request_gpio;
+ gc->free = msm_pinctrl_free_gpio;
+ gc->dev = dd->dev;
+ gc->ngpio = pintype->num_pins;
+ gc->base = -1;
+ ret = gpiochip_add(gc);
+ if (ret) {
+ dev_err(dd->dev, "failed to register gpio chip\n");
+ pinfo->supports_gpio = false;
+ }
+ }
+}
+
+static int msm_register_irqchip(struct msm_pinctrl_dd *dd)
+{
+ struct msm_pintype_info *pintype, *pinfo;
+ int i, ret = 0;
+
+ pinfo = dd->msm_pintype;
+ for (i = 0; i < dd->num_pintypes; i++) {
+ pintype = &pinfo[i];
+ if (!msm_pintype_supports_irq(pintype))
+ continue;
+ ret = pintype->init_irq(dd->irq, pintype, dd->dev);
+ return ret;
+ }
+ return 0;
+}
+
+int msm_pinctrl_probe(struct platform_device *pdev,
+ struct msm_tlmm_desc *tlmm_info)
+{
+ struct msm_pinctrl_dd *dd;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ dd = devm_kzalloc(dev, sizeof(*dd), GFP_KERNEL);
+ if (!dd) {
+ dev_err(dev, "Alloction failed for driver data\n");
+ return -ENOMEM;
+ }
+ dd->dev = dev;
+ dd->msm_pintype = tlmm_info->pintypes;
+ dd->base = tlmm_info->base;
+ dd->irq = tlmm_info->irq;
+ dd->num_pintypes = tlmm_info->num_pintypes;
+ ret = msm_pinctrl_get_drvdata(dd, pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "driver data not available\n");
+ return ret;
+ }
+ msm_register_gpiochip(dd);
+ ret = msm_register_pinctrl(dd);
+ if (ret) {
+ msm_pinctrl_cleanup_dd(dd);
+ return ret;
+ }
+ msm_register_irqchip(dd);
+ platform_set_drvdata(pdev, dd);
+ return 0;
+}
+EXPORT_SYMBOL(msm_pinctrl_probe);
+
+#ifdef CONFIG_USE_PINCTRL_IRQ
+struct irq_chip mpm_tlmm_irq_extn = {
+ .irq_eoi = NULL,
+ .irq_mask = NULL,
+ .irq_unmask = NULL,
+ .irq_retrigger = NULL,
+ .irq_set_type = NULL,
+ .irq_set_wake = NULL,
+ .irq_disable = NULL,
+};
+
+struct msm_irq_of_info msm_tlmm_irq[] = {
+#ifdef CONFIG_PINCTRL_MSM_TLMM_V3
+ {
+ .compat = "qcom,msm-tlmmv3-gp-intc",
+ .irq_init = msm_tlmm_v3_of_irq_init,
+ },
+#endif
+#ifdef CONFIG_PINCTRL_MSM_TLMM_V4
+ {
+ .compat = "qcom,msm-tlmmv4-gp-intc",
+ .irq_init = msm_tlmm_v4_of_irq_init,
+ },
+#endif
+};
+
+int __init msm_tlmm_of_irq_init(struct device_node *controller,
+ struct device_node *parent)
+{
+ int rc, i;
+ const char *compat;
+
+ rc = of_property_read_string(controller, "compatible", &compat);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(msm_tlmm_irq); i++) {
+ struct msm_irq_of_info *tlmm_info = &msm_tlmm_irq[i];
+
+ if (!of_compat_cmp(tlmm_info->compat, compat, strlen(compat)))
+ return tlmm_info->irq_init(controller,
+ &mpm_tlmm_irq_extn);
+
+ }
+ return -EIO;
+}
+#endif
diff --git a/drivers/pinctrl/pinctrl-msm.h b/drivers/pinctrl/pinctrl-msm.h
new file mode 100644
index 000000000000..9267a0e19507
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-msm.h
@@ -0,0 +1,183 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __PINCTRL_MSM_H__
+#define __PINCTRL_MSM_H__
+
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/platform_device.h>
+
+/**
+ * struct msm_pin_group: group of pins having the same pinmux function.
+ * @name: name of the pin group.
+ * @pins: the pins included in this group.
+ * @num_pins: number of pins included in this group.
+ * @func: the function number to be programmed when selected.
+ */
+struct msm_pin_grps {
+ const char *name;
+ unsigned int *pins;
+ unsigned num_pins;
+ u32 func;
+};
+
+/**
+ * struct msm_pmx_funcs: represent a pin function.
+ * @name: name of the pin function.
+ * @gps: one or more names of pin groups that provide this function.
+ * @num_grps: number of groups included in @groups.
+ */
+struct msm_pmx_funcs {
+ const char *name;
+ const char **gps;
+ unsigned num_grps;
+};
+
+/**
+ * struct msm_tlmm_irq_chip: represents interrupt controller descriptor
+ * @irq: irq number for tlmm summary interrupt.
+ * @chip_base: base register for TLMM.
+ * @num_irqs: number of pins that can be used as irq lines.
+ * @apps_id: id assigned to the apps processor.
+ * @enabled_irqs: bitmask of pins enabled as interrupts.
+ * @wake_irqs: bitmask of pins enabled as wake up interrupts.
+ * @irq_lock: protect against concurrent access.
+ * @domain: irq domain of given interrupt controller
+ * @irq_chip: irq chip operations.
+ * @irq_chip_extn: extension of irq chip operations.
+ * @dev: TLMM device.
+ * @device_node: device tree node of interrupt controller.
+ * @pinfo: pintype information.
+ * @handler: irq handler for given pintype interrupt controller
+ */
+struct msm_tlmm_irq_chip {
+ int irq;
+ void *__iomem chip_base;
+ unsigned int num_irqs;
+ unsigned int apps_id;
+ unsigned long *enabled_irqs;
+ unsigned long *dual_edge_irqs;
+ unsigned long *wake_irqs;
+ spinlock_t irq_lock;
+ struct irq_domain *domain;
+ const struct irq_domain_ops *domain_ops;
+ struct irq_chip chip;
+ struct irq_chip *irq_chip_extn;
+ struct device *dev;
+ struct device_node *node;
+ void *pinfo;
+ irqreturn_t (*handler)(int irq, struct msm_tlmm_irq_chip *ic);
+};
+
+/**
+ * struct msm_pintype_info: represent a pin type supported by the TLMM.
+ * @prg_cfg: helper to program a given config for a pintype.
+ * @prg_func: helper to program a given func for a pintype.
+ * @pack_cfg: helper to pack a parsed config as per a pintype.
+ * @set_reg_base: helper to set the register base address for a pintype.
+ * @init_irq: helper to initialize any irq functionality.
+ * @reg_data: register base for a pintype.
+ * @prop_name: DT property name for a pintype.
+ * @name: name of pintype.
+ * @num_pins: number of pins of given pintype.
+ * @pin_start: starting pin number for the given pintype within pinctroller.
+ * @pin_end: ending pin number for the given pintype within pinctroller.
+ * @gc: gpio chip implementation for pin type.
+ * @irq_chip: interrupt controller support for given pintype.
+ * @supports_gpio: pintype supports gpio function.
+ * @grange: pins that map to gpios.
+ * @node: device node for the pintype.
+ */
+struct msm_pintype_info {
+ int (*prg_cfg)(uint pin_no, unsigned long *config, void *reg_data,
+ bool rw);
+ void (*prg_func)(uint pin_no, u32 func, void *reg_data, bool enable);
+ void (*set_reg_base)(void __iomem **ptype_base,
+ void __iomem *tlmm_base);
+ int (*init_irq)(int irq, struct msm_pintype_info *pinfo,
+ struct device *tlmm_dev);
+ void __iomem *reg_base;
+ const char *prop_name;
+ const char *name;
+ u32 num_pins;
+ int pin_start;
+ int pin_end;
+ struct gpio_chip gc;
+ struct msm_tlmm_irq_chip *irq_chip;
+ bool supports_gpio;
+ struct pinctrl_gpio_range grange;
+ struct device_node *node;
+};
+
+/**
+ * struct msm_tlmm_pintype: represents all the TLMM pintypes for a given TLMM
+ * version.
+ * @num_entries: number of pintypes.
+ * @pintype_info: descriptor for the pintypes. One for each present.
+ */
+struct msm_tlmm_pintype {
+ const uint num_entries;
+ struct msm_pintype_info *pintype_info;
+};
+
+/**
+ * struct msm_pindesc: descriptor for all pins maintained by pinctrl driver
+ * @pin_info: pintype for a given pin.
+ * @name: name of the pin.
+ */
+struct msm_pindesc {
+ struct msm_pintype_info *pin_info;
+ char name[20];
+};
+
+/**
+ * struct msm_tlmm_desc: descriptor for the TLMM hardware block
+ * @base: base address of tlmm desc.
+ * @irq: summary irq number for tlmm block. Must be > 0 if present.
+ * @num_pintypes: Number of pintypes on the tlmm block for a given SOC.
+ * @pintypes: pintypes supported on a given TLMM block for a given SOC.
+ */
+struct msm_tlmm_desc {
+ void __iomem *base;
+ int irq;
+ unsigned int num_pintypes;
+ struct msm_pintype_info *pintypes;
+};
+
+/* Common probe for all TLMM */
+int msm_pinctrl_probe(struct platform_device *pdev,
+ struct msm_tlmm_desc *tlmm_info);
+#ifdef CONFIG_USE_PINCTRL_IRQ
+#ifdef CONFIG_PINCTRL_MSM_TLMM_V3
+extern int msm_tlmm_v3_of_irq_init(struct device_node *np, struct irq_chip *ic);
+#else
+static inline int msm_tlmm_v3_of_irq_init(struct device_node *np,
+ struct irq_chip *ic)
+{
+ return -EIO;
+}
+#endif
+#ifdef CONFIG_PINCTRL_MSM_TLMM_V4
+extern int msm_tlmm_v4_of_irq_init(struct device_node *np, struct irq_chip *ic);
+#else
+static inline int msm_tlmm_v4_of_irq_init(struct device_node *np,
+ struct irq_chip *ic)
+{
+ return -EIO;
+}
+#endif
+#endif
+#endif
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 0108c2af005b..1ba8538544b1 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -212,6 +212,16 @@ config BATTERY_MAX17042
with MAX17042. This driver also supports max17047/50 chips which are
improved version of max17042.
+config BATTERY_ANDROID
+ tristate "Battery driver for Android"
+ help
+ Say Y to enable generic support for battery charging according
+ to common Android policies.
+ This driver adds periodic battery level and health monitoring,
+ kernel log reporting and other debugging features, common board
+ battery file glue logic for battery/case temperature sensors,
+ etc.
+
config BATTERY_Z2
tristate "Z2 battery driver"
depends on I2C && MACH_ZIPIT2
@@ -368,6 +378,122 @@ config CHARGER_BQ24735
help
Say Y to enable support for the TI BQ24735 battery charger.
+config BATTERY_MSM_FAKE
+ tristate "Fake MSM battery"
+ depends on (ARCH_MSM || ARCH_QCOM) && BATTERY_MSM
+ default n
+ help
+ Say Y to bypass actual battery queries.
+
+config SMB137C_CHARGER
+ tristate "Summit SMB137C Battery Charger"
+ depends on I2C
+ depends on OF
+ help
+ The SMB137C charger chip from Summit is a switching mode based
+ charging solution. This driver supports enabling and disabling
+ charging, setting the input current limit, and enabling USB OTG mode
+ in order to supply 5 V on the VBUS line.
+
+config SMB349_USB_CHARGER
+ tristate "smb349 usb charger (with VBUS detection)"
+ depends on I2C
+ help
+ Say Y to enable support for the SMB349 switching mode based charger.
+ The driver supports charging control (enable/disable) and
+ charge-current limiting. It also provides USB VBUS detection and
+ notification support. The driver controls SMB349 via I2C and
+ supports device-tree interface.
+
+config SMB350_CHARGER
+ tristate "smb350 charger"
+ depends on I2C
+ help
+ Say Y to enable battery charging by SMB350 switching mode based
+ external charger. The device supports stack-cell battery charging.
+ The driver configures the device volatile parameters
+ and the charger device works autonomously.
+ The driver supports charger-enable and charger-suspend/resume.
+ The driver reports the charger status via the power supply framework.
+ A charger status change triggers an IRQ via the device STAT pin.
+
+config SMB135X_CHARGER
+ tristate "SMB135X Battery Charger"
+ depends on I2C
+ help
+ Say Y to include support for SMB135X Battery Charger.
+ SMB135X is a dual path switching mode charger capable of charging
+ the battery with 3Amps of current.
+ The driver supports charger enable/disable.
+ The driver reports the charger status via the power supply framework.
+ A charger status change triggers an IRQ via the device STAT pin.
+
+config SMB1360_CHARGER_FG
+ tristate "SMB1360 Charger and Fuel Guage"
+ depends on I2C
+ help
+ Say Y to include support for SMB1360 Charger and Fuel Guage.
+ SMB1360 is a single path switching mode charger capable of charging
+ the battery with 1.5Amps of current. It supports a fuel gauge which
+ uses voltage and coloumb counting for state of charge reporting.
+ The driver reports the status via the power supply framework.
+ A status change triggers an IRQ via the device STAT pin.
+
+config SMB358_CHARGER
+ tristate "SMB358 Battery Charger"
+ depends on I2C
+ help
+ Say Y to include support for SMB358 Battery Charger.
+ SMB358 is a single path switching mode charger capable of charging
+ the battery with 2Amps of current.
+ The driver supports charger enable/disable.
+ The driver reports the charger status via the power supply framework.
+ A charger status change triggers an IRQ via the device STAT pin.
+
+config BQ27520_TEST_ENABLE
+ bool "Enable BQ27520 Fuel Gauge Chip Test"
+ depends on BATTERY_BQ27520
+ default n
+ help
+ Say Y here to enable Test sysfs Interface for BQ27520 Drivers.
+
+config BATTERY_BQ28400
+ tristate "BQ28400 battery driver"
+ depends on I2C
+ default n
+ help
+ Say Y here to enable support for batteries with BQ28400 (I2C) chips.
+ The bq28400 Texas Instruments Inc device monitors the battery
+ charging/discharging status via Rsens resistor, typically 10 mohm.
+ It monitors the battery temperature via Thermistor.
+ The device monitors the battery level (Relative-State-Of-Charge).
+ The device is SBS compliant, providing battery info over I2C.
+
+config QPNP_CHARGER
+ tristate "QPNP Charger driver"
+ depends on SPMI
+ depends on OF_SPMI
+ depends on THERMAL_QPNP_ADC_TM
+ help
+ Say Y here to enable the switch mode battery charger
+ and boost device which supports USB detection and charging. The driver
+ also offers relevant information to userspace via the power supply
+ framework.
+
+config PM8921_BMS
+ tristate "PM8921 Battery Monitoring System driver"
+ depends on MFD_PM8921_CORE
+ help
+ Say Y here to enable support for pm8921 chip bms subdevice
+
+config BATTERY_BCL
+ tristate "Battery Current Limit driver"
+ help
+ Say Y here to enable support for battery current limit
+ device. The BCL driver will poll BMS if
+ thermal daemon enables BCL.
+ It will notify thermal daemon if IBat crosses Imax threshold.
+
config CHARGER_SMB347
tristate "Summit Microelectronics SMB347 Battery Charger"
depends on I2C
@@ -397,8 +523,39 @@ config BATTERY_GOLDFISH
Say Y to enable support for the battery and AC power in the
Goldfish emulator.
+config QPNP_VM_BMS
+ tristate "QPNP Voltage-Mode Battery Monitoring System driver"
+ depends on SPMI
+ depends on MSM_QPNP_INT
+ help
+ Say Y here to enable support for QPNP chip vm-bms device.
+ The voltage-mode (vm) BMS driver uses periodic VBATT
+ readings from the battery to calculate the State of
+ Charge.
+
+config QPNP_BMS
+ tristate "QPNP Battery Monitoring System driver"
+ depends on SPMI
+ depends on MSM_QPNP_INT
+ help
+ Say Y here to enable support for QPNP chip bms device.
+ It registers a fuelgauge bms power supply to report
+ State of Charge.
+
+config QPNP_LINEAR_CHARGER
+ tristate "QPNP Linear Charger driver"
+ depends on SPMI
+ depends on OF_SPMI
+ depends on MSM_QPNP_INT
+ depends on THERMAL_QPNP_ADC_TM
+ help
+ Say Y here to enable the Linear battery charger which supports USB
+ detection and charging. The driver also offers relevant information
+ to userspace via the power supply framework.
+
source "drivers/power/reset/Kconfig"
endif # POWER_SUPPLY
source "drivers/power/avs/Kconfig"
+source "drivers/power/qcom/Kconfig"
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index dfa894273926..19d8236b5471 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_WM8350_POWER) += wm8350_power.o
obj-$(CONFIG_TEST_POWER) += test_power.o
obj-$(CONFIG_BATTERY_88PM860X) += 88pm860x_battery.o
+obj-$(CONFIG_BATTERY_ANDROID) += android_battery.o
obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o
obj-$(CONFIG_BATTERY_DS2780) += ds2780_battery.o
obj-$(CONFIG_BATTERY_DS2781) += ds2781_battery.o
@@ -56,6 +57,19 @@ obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o
obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o
obj-$(CONFIG_CHARGER_BQ24735) += bq24735-charger.o
obj-$(CONFIG_POWER_AVS) += avs/
+obj-$(CONFIG_SMB349_USB_CHARGER) += smb349-charger.o
+obj-$(CONFIG_SMB350_CHARGER) += smb350_charger.o
+obj-$(CONFIG_SMB135X_CHARGER) += smb135x-charger.o
+obj-$(CONFIG_SMB1360_CHARGER_FG) += smb1360-charger-fg.o
+obj-$(CONFIG_SMB358_CHARGER) += smb358-charger.o
+obj-$(CONFIG_BATTERY_BQ28400) += bq28400_battery.o
+obj-$(CONFIG_SMB137C_CHARGER) += smb137c-charger.o
+obj-$(CONFIG_QPNP_BMS) += qpnp-bms.o batterydata-lib.o
+obj-$(CONFIG_QPNP_VM_BMS) += qpnp-vm-bms.o batterydata-lib.o batterydata-interface.o
+obj-$(CONFIG_QPNP_CHARGER) += qpnp-charger.o
+obj-$(CONFIG_QPNP_LINEAR_CHARGER) += qpnp-linear-charger.o
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o
+obj-$(CONFIG_BATTERY_BCL) += battery_current_limit.o
obj-$(CONFIG_POWER_RESET) += reset/
+obj-y += qcom/
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 62653f50a524..29d1799894a2 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -45,7 +45,7 @@ static ssize_t power_supply_show_property(struct device *dev,
char *buf) {
static char *type_text[] = {
"Unknown", "Battery", "UPS", "Mains", "USB",
- "USB_DCP", "USB_CDP", "USB_ACA"
+ "USB_DCP", "USB_CDP", "USB_ACA", "Wireless", "BMS"
};
static char *status_text[] = {
"Unknown", "Charging", "Discharging", "Not charging", "Full"
@@ -54,8 +54,8 @@ static ssize_t power_supply_show_property(struct device *dev,
"Unknown", "N/A", "Trickle", "Fast"
};
static char *health_text[] = {
- "Unknown", "Good", "Overheat", "Dead", "Over voltage",
- "Unspecified failure", "Cold", "Watchdog timer expire",
+ "Unknown", "Good", "Overheat", "Warm", "Dead", "Over voltage",
+ "Unspecified failure", "Cold", "Cool", "Watchdog timer expire",
"Safety timer expire"
};
static char *technology_text[] = {
@@ -141,6 +141,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(present),
POWER_SUPPLY_ATTR(online),
POWER_SUPPLY_ATTR(authentic),
+ POWER_SUPPLY_ATTR(charging_enabled),
POWER_SUPPLY_ATTR(technology),
POWER_SUPPLY_ATTR(cycle_count),
POWER_SUPPLY_ATTR(voltage_max),
@@ -151,7 +152,11 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(voltage_avg),
POWER_SUPPLY_ATTR(voltage_ocv),
POWER_SUPPLY_ATTR(voltage_boot),
+ POWER_SUPPLY_ATTR(input_voltage_regulation),
POWER_SUPPLY_ATTR(current_max),
+ POWER_SUPPLY_ATTR(input_current_max),
+ POWER_SUPPLY_ATTR(input_current_trim),
+ POWER_SUPPLY_ATTR(input_current_settled),
POWER_SUPPLY_ATTR(current_now),
POWER_SUPPLY_ATTR(current_avg),
POWER_SUPPLY_ATTR(current_boot),
@@ -164,6 +169,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(charge_now),
POWER_SUPPLY_ATTR(charge_avg),
POWER_SUPPLY_ATTR(charge_counter),
+ POWER_SUPPLY_ATTR(charge_counter_shadow),
POWER_SUPPLY_ATTR(constant_charge_current),
POWER_SUPPLY_ATTR(constant_charge_current_max),
POWER_SUPPLY_ATTR(constant_charge_voltage),
@@ -177,6 +183,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(energy_empty),
POWER_SUPPLY_ATTR(energy_now),
POWER_SUPPLY_ATTR(energy_avg),
+ POWER_SUPPLY_ATTR(hi_power),
+ POWER_SUPPLY_ATTR(low_power),
POWER_SUPPLY_ATTR(capacity),
POWER_SUPPLY_ATTR(capacity_alert_min),
POWER_SUPPLY_ATTR(capacity_alert_max),
@@ -186,6 +194,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(temp_min),
POWER_SUPPLY_ATTR(temp_alert_min),
POWER_SUPPLY_ATTR(temp_alert_max),
+ POWER_SUPPLY_ATTR(temp_cool),
+ POWER_SUPPLY_ATTR(temp_warm),
POWER_SUPPLY_ATTR(temp_ambient),
POWER_SUPPLY_ATTR(temp_ambient_alert_min),
POWER_SUPPLY_ATTR(temp_ambient_alert_max),
@@ -197,10 +207,18 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(scope),
POWER_SUPPLY_ATTR(charge_term_current),
POWER_SUPPLY_ATTR(calibrate),
+ POWER_SUPPLY_ATTR(system_temp_level),
+ POWER_SUPPLY_ATTR(resistance),
+ POWER_SUPPLY_ATTR(resistance_capacitive),
+ /* Local extensions */
+ POWER_SUPPLY_ATTR(usb_hc),
+ POWER_SUPPLY_ATTR(usb_otg),
+ POWER_SUPPLY_ATTR(charge_enabled),
/* Properties of type `const char *' */
POWER_SUPPLY_ATTR(model_name),
POWER_SUPPLY_ATTR(manufacturer),
POWER_SUPPLY_ATTR(serial_number),
+ POWER_SUPPLY_ATTR(battery_type),
};
static struct attribute *
diff --git a/drivers/power/qcom/Kconfig b/drivers/power/qcom/Kconfig
new file mode 100644
index 000000000000..0676652647d7
--- /dev/null
+++ b/drivers/power/qcom/Kconfig
@@ -0,0 +1,50 @@
+config MSM_PM
+ depends on PM
+# select MSM_IDLE_STATS
+ bool "Qualcomm platform specific PM driver"
+ help
+ Platform specific power driver to manage cores and l2
+ low power modes. It interface with various system
+ driver and put the cores into low power modes.
+
+config MSM_NOPM
+ default y if !PM
+ bool
+ help
+ This enables bare minimum support of power management at platform level.
+ i.e WFI
+
+if MSM_PM
+menuconfig MSM_IDLE_STATS
+ bool "Collect idle statistics"
+ help
+ Collect cores various low power mode idle statistics
+ and export them in proc/msm_pm_stats. User can read
+ this data and determine what low power modes and how
+ many times cores have entered into LPM modes.
+
+if MSM_IDLE_STATS
+
+config MSM_IDLE_STATS_FIRST_BUCKET
+ int "First bucket time"
+ default 62500
+ help
+ Upper time limit in nanoseconds of first bucket.
+
+config MSM_IDLE_STATS_BUCKET_SHIFT
+ int "Bucket shift"
+ default 2
+
+config MSM_IDLE_STATS_BUCKET_COUNT
+ int "Bucket count"
+ default 10
+
+config MSM_SUSPEND_STATS_FIRST_BUCKET
+ int "First bucket time for suspend"
+ default 1000000000
+ help
+ Upper time limit in nanoseconds of first bucket of the
+ histogram. This is for collecting statistics on suspend.
+
+endif # MSM_IDLE_STATS
+endif # MSM_PM
diff --git a/drivers/power/qcom/Makefile b/drivers/power/qcom/Makefile
new file mode 100644
index 000000000000..846a08df4f03
--- /dev/null
+++ b/drivers/power/qcom/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_MSM_PM) += msm-pm.o pm-data.o
+obj-$(CONFIG_MSM_IDLE_STATS) += pm-stats.o
+obj-$(CONFIG_MSM_NOPM) += no-pm.o
+obj-$(CONFIG_PM) += pm-boot.o
diff --git a/drivers/power/qcom/idle.h b/drivers/power/qcom/idle.h
new file mode 100644
index 000000000000..860490318819
--- /dev/null
+++ b/drivers/power/qcom/idle.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2007-2009,2012-2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_IDLE_H_
+#define _ARCH_ARM_MACH_MSM_IDLE_H_
+
+#if defined(CONFIG_CPU_V7) || defined(CONFIG_ARM64)
+extern unsigned long msm_pm_boot_vector[NR_CPUS];
+void msm_pm_boot_entry(void);
+#else
+static inline void msm_pm_boot_entry(void) {}
+#endif
+#endif
diff --git a/drivers/power/qcom/msm-pm.c b/drivers/power/qcom/msm-pm.c
new file mode 100644
index 000000000000..6abbfdbaab2c
--- /dev/null
+++ b/drivers/power/qcom/msm-pm.c
@@ -0,0 +1,922 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/smp.h>
+#include <linux/tick.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/cpu_pm.h>
+#include <linux/remote_spinlock.h>
+#include <linux/msm_remote_spinlock.h>
+#include <linux/msm-bus.h>
+#include <linux/sched.h>
+#include <soc/qcom/avs.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/scm-boot.h>
+#include <asm/suspend.h>
+#include <asm/cacheflush.h>
+#ifdef CONFIG_VFP
+#include <asm/vfp.h>
+#endif
+#include <soc/qcom/jtag.h>
+#include "idle.h"
+#include "pm-boot.h"
+
+#define CREATE_TRACE_POINTS
+
+#define SCM_CMD_TERMINATE_PC (0x2)
+#define SCM_CMD_CORE_HOTPLUGGED (0x10)
+
+#define SCLK_HZ (32768)
+
+#define MAX_BUF_SIZE 512
+
+static int msm_pm_debug_mask = 1;
+module_param_named(
+ debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+enum {
+ MSM_PM_DEBUG_SUSPEND = BIT(0),
+ MSM_PM_DEBUG_POWER_COLLAPSE = BIT(1),
+ MSM_PM_DEBUG_SUSPEND_LIMITS = BIT(2),
+ MSM_PM_DEBUG_CLOCK = BIT(3),
+ MSM_PM_DEBUG_RESET_VECTOR = BIT(4),
+ MSM_PM_DEBUG_IDLE_CLK = BIT(5),
+ MSM_PM_DEBUG_IDLE = BIT(6),
+ MSM_PM_DEBUG_IDLE_LIMITS = BIT(7),
+ MSM_PM_DEBUG_HOTPLUG = BIT(8),
+};
+
+enum msm_pc_count_offsets {
+ MSM_PC_ENTRY_COUNTER,
+ MSM_PC_EXIT_COUNTER,
+ MSM_PC_FALLTHRU_COUNTER,
+ MSM_PC_UNUSED,
+ MSM_PC_NUM_COUNTERS,
+};
+
+static bool msm_pm_ldo_retention_enabled = true;
+static bool msm_no_ramp_down_pc;
+static struct msm_pm_sleep_status_data *msm_pm_slp_sts;
+DEFINE_PER_CPU(struct clk *, cpu_clks);
+static struct clk *l2_clk;
+
+static int cpu_count;
+static DEFINE_SPINLOCK(cpu_cnt_lock);
+#define SCM_HANDOFF_LOCK_ID "S:7"
+static remote_spinlock_t scm_handoff_lock;
+
+static void __iomem *msm_pc_debug_counters;
+
+/*
+ * Default the l2 flush flag to OFF so the caches are flushed during power
+ * collapse unless the explicitly voted by lpm driver.
+ */
+static enum msm_pm_l2_scm_flag msm_pm_flush_l2_flag = MSM_SCM_L2_OFF;
+
+void msm_pm_set_l2_flush_flag(enum msm_pm_l2_scm_flag flag)
+{
+ msm_pm_flush_l2_flag = flag;
+}
+EXPORT_SYMBOL(msm_pm_set_l2_flush_flag);
+
+static enum msm_pm_l2_scm_flag msm_pm_get_l2_flush_flag(void)
+{
+ return msm_pm_flush_l2_flag;
+}
+
+static cpumask_t retention_cpus;
+static DEFINE_SPINLOCK(retention_lock);
+
+static inline void msm_arch_idle(void)
+{
+ mb();
+ wfi();
+}
+
+static bool msm_pm_is_L1_writeback(void)
+{
+ u32 cache_id;
+
+#if defined(CONFIG_CPU_V7)
+ u32 sel = 0;
+ asm volatile ("mcr p15, 2, %[ccselr], c0, c0, 0\n\t"
+ "isb\n\t"
+ "mrc p15, 1, %[ccsidr], c0, c0, 0\n\t"
+ :[ccsidr]"=r" (cache_id)
+ :[ccselr]"r" (sel)
+ );
+ return cache_id & BIT(31);
+#elif defined(CONFIG_ARM64)
+ u32 sel = 0;
+ asm volatile("msr csselr_el1, %[ccselr]\n\t"
+ "isb\n\t"
+ "mrs %[ccsidr],ccsidr_el1\n\t"
+ :[ccsidr]"=r" (cache_id)
+ :[ccselr]"r" (sel)
+ );
+ return cache_id & BIT(30);
+#else
+#error No valid CPU arch selected
+#endif
+}
+
+static enum msm_pm_time_stats_id msm_pm_swfi(bool from_idle)
+{
+ msm_arch_idle();
+ return MSM_PM_STAT_IDLE_WFI;
+}
+
+static enum msm_pm_time_stats_id msm_pm_retention(bool from_idle)
+{
+ int ret = 0;
+ unsigned int cpu = smp_processor_id();
+ struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
+
+ spin_lock(&retention_lock);
+
+ if (!msm_pm_ldo_retention_enabled)
+ goto bailout;
+
+ cpumask_set_cpu(cpu, &retention_cpus);
+ spin_unlock(&retention_lock);
+
+ clk_disable(cpu_clk);
+
+ ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_POWER_RETENTION, false);
+ WARN_ON(ret);
+
+ msm_arch_idle();
+
+ ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+ WARN_ON(ret);
+
+ if (clk_enable(cpu_clk))
+ pr_err("%s(): Error restore cpu clk\n", __func__);
+
+ spin_lock(&retention_lock);
+ cpumask_clear_cpu(cpu, &retention_cpus);
+bailout:
+ spin_unlock(&retention_lock);
+ return MSM_PM_STAT_RETENTION;
+}
+
+static inline void msm_pc_inc_debug_count(uint32_t cpu,
+ enum msm_pc_count_offsets offset)
+{
+ uint32_t cnt;
+ int cntr_offset = cpu * 4 * MSM_PC_NUM_COUNTERS + offset * 4;
+
+ if (!msm_pc_debug_counters)
+ return;
+
+ cnt = readl_relaxed(msm_pc_debug_counters + cntr_offset);
+ writel_relaxed(++cnt, msm_pc_debug_counters + cntr_offset);
+ mb();
+}
+
+static bool msm_pm_pc_hotplug(void)
+{
+ uint32_t cpu = smp_processor_id();
+
+ if (msm_pm_is_L1_writeback())
+ flush_cache_louis();
+
+ msm_pc_inc_debug_count(cpu, MSM_PC_ENTRY_COUNTER);
+
+ scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC,
+ SCM_CMD_CORE_HOTPLUGGED);
+
+ /* Should not return here */
+ msm_pc_inc_debug_count(cpu, MSM_PC_FALLTHRU_COUNTER);
+ return 0;
+}
+
+int msm_pm_collapse(unsigned long unused)
+{
+ uint32_t cpu = smp_processor_id();
+ enum msm_pm_l2_scm_flag flag = MSM_SCM_L2_ON;
+
+ spin_lock(&cpu_cnt_lock);
+ cpu_count++;
+ if (cpu_count == num_online_cpus())
+ flag = msm_pm_get_l2_flush_flag();
+
+ pr_debug("cpu:%d cores_in_pc:%d L2 flag: %d\n",
+ cpu, cpu_count, flag);
+
+ /*
+ * The scm_handoff_lock will be release by the secure monitor.
+ * It is used to serialize power-collapses from this point on,
+ * so that both Linux and the secure context have a consistent
+ * view regarding the number of running cpus (cpu_count).
+ *
+ * It must be acquired before releasing cpu_cnt_lock.
+ */
+ remote_spin_lock_rlock_id(&scm_handoff_lock,
+ REMOTE_SPINLOCK_TID_START + cpu);
+ spin_unlock(&cpu_cnt_lock);
+
+ if (flag == MSM_SCM_L2_OFF)
+ flush_cache_all();
+ else if (msm_pm_is_L1_writeback())
+ flush_cache_louis();
+
+ msm_pc_inc_debug_count(cpu, MSM_PC_ENTRY_COUNTER);
+
+ scm_call_atomic1(SCM_SVC_BOOT, SCM_CMD_TERMINATE_PC, flag);
+
+ msm_pc_inc_debug_count(cpu, MSM_PC_FALLTHRU_COUNTER);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_pm_collapse);
+
+static bool __ref msm_pm_spm_power_collapse(
+ unsigned int cpu, bool from_idle, bool notify_rpm)
+{
+ void *entry;
+ bool collapsed = 0;
+ int ret;
+ bool save_cpu_regs = !cpu || from_idle;
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: notify_rpm %d\n",
+ cpu, __func__, (int) notify_rpm);
+
+ if (from_idle)
+ cpu_pm_enter();
+
+ ret = msm_spm_set_low_power_mode(
+ MSM_SPM_MODE_POWER_COLLAPSE, notify_rpm);
+ WARN_ON(ret);
+
+ entry = save_cpu_regs ? cpu_resume : msm_secondary_startup;
+
+ msm_pm_boot_config_before_pc(cpu, virt_to_phys(entry));
+
+ if (MSM_PM_DEBUG_RESET_VECTOR & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: program vector to %p\n",
+ cpu, __func__, entry);
+
+ msm_jtag_save_state();
+
+#ifdef CONFIG_CPU_V7
+ collapsed = save_cpu_regs ?
+ !cpu_suspend(0, msm_pm_collapse) : msm_pm_pc_hotplug();
+#else
+ collapsed = save_cpu_regs ?
+ !cpu_suspend(0) : msm_pm_pc_hotplug();
+#endif
+
+ if (save_cpu_regs) {
+ spin_lock(&cpu_cnt_lock);
+ cpu_count--;
+ BUG_ON(cpu_count > num_online_cpus());
+ spin_unlock(&cpu_cnt_lock);
+ }
+ msm_jtag_restore_state();
+
+ if (collapsed)
+ local_fiq_enable();
+
+ msm_pm_boot_config_after_pc(cpu);
+
+ if (from_idle)
+ cpu_pm_exit();
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: msm_pm_collapse returned, collapsed %d\n",
+ cpu, __func__, collapsed);
+
+ ret = msm_spm_set_low_power_mode(MSM_SPM_MODE_CLOCK_GATING, false);
+ WARN_ON(ret);
+ return collapsed;
+}
+
+static enum msm_pm_time_stats_id msm_pm_power_collapse_standalone(
+ bool from_idle)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int avsdscr;
+ unsigned int avscsr;
+ bool collapsed;
+
+ avsdscr = avs_get_avsdscr();
+ avscsr = avs_get_avscsr();
+ avs_set_avscsr(0); /* Disable AVS */
+
+ collapsed = msm_pm_spm_power_collapse(cpu, from_idle, false);
+
+ avs_set_avsdscr(avsdscr);
+ avs_set_avscsr(avscsr);
+ return collapsed ? MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE :
+ MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE;
+}
+
+static int ramp_down_last_cpu(int cpu)
+{
+ struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
+ int ret = 0;
+
+ clk_disable(cpu_clk);
+ clk_disable(l2_clk);
+
+ return ret;
+}
+
+static int ramp_up_first_cpu(int cpu, int saved_rate)
+{
+ struct clk *cpu_clk = per_cpu(cpu_clks, cpu);
+ int rc = 0;
+
+ if (MSM_PM_DEBUG_CLOCK & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: restore clock rate\n",
+ cpu, __func__);
+
+ if (l2_clk) {
+ rc = clk_enable(l2_clk);
+ if (rc)
+ pr_err("%s(): Error restoring l2 clk\n",
+ __func__);
+ }
+
+ if (cpu_clk) {
+ int ret = clk_enable(cpu_clk);
+
+ if (ret) {
+ pr_err("%s(): Error restoring cpu clk\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ return rc;
+}
+
+static enum msm_pm_time_stats_id msm_pm_power_collapse(bool from_idle)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long saved_acpuclk_rate = 0;
+ unsigned int avsdscr;
+ unsigned int avscsr;
+ bool collapsed;
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: idle %d\n",
+ cpu, __func__, (int)from_idle);
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: pre power down\n", cpu, __func__);
+
+ avsdscr = avs_get_avsdscr();
+ avscsr = avs_get_avscsr();
+ avs_set_avscsr(0); /* Disable AVS */
+
+ if (cpu_online(cpu) && !msm_no_ramp_down_pc)
+ saved_acpuclk_rate = ramp_down_last_cpu(cpu);
+
+ collapsed = msm_pm_spm_power_collapse(cpu, from_idle, true);
+
+ if (cpu_online(cpu) && !msm_no_ramp_down_pc)
+ ramp_up_first_cpu(cpu, saved_acpuclk_rate);
+
+ avs_set_avsdscr(avsdscr);
+ avs_set_avscsr(avscsr);
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: post power up\n", cpu, __func__);
+
+ if (MSM_PM_DEBUG_POWER_COLLAPSE & msm_pm_debug_mask)
+ pr_info("CPU%u: %s: return\n", cpu, __func__);
+ return collapsed ? MSM_PM_STAT_IDLE_POWER_COLLAPSE :
+ MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE;
+}
+/******************************************************************************
+ * External Idle/Suspend Functions
+ *****************************************************************************/
+
+void arch_idle(void)
+{
+ return;
+}
+
+static enum msm_pm_time_stats_id (*execute[MSM_PM_SLEEP_MODE_NR])(bool idle) = {
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = msm_pm_swfi,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
+ msm_pm_power_collapse_standalone,
+ [MSM_PM_SLEEP_MODE_RETENTION] = msm_pm_retention,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = msm_pm_power_collapse,
+};
+
+/**
+ * msm_cpu_pm_enter_sleep(): Enter a low power mode on current cpu
+ *
+ * @mode - sleep mode to enter
+ * @from_idle - bool to indicate that the mode is exercised during idle/suspend
+ *
+ * The code should be with interrupts disabled and on the core on which the
+ * low power is to be executed.
+ */
+void msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle)
+{
+ int64_t time = 0;
+ enum msm_pm_time_stats_id exit_stat = -1;
+ unsigned int cpu = smp_processor_id();
+
+ if ((!from_idle && cpu_online(cpu))
+ || (MSM_PM_DEBUG_IDLE & msm_pm_debug_mask))
+ pr_info("CPU%u:%s mode:%d during %s\n", cpu, __func__,
+ mode, from_idle ? "idle" : "suspend");
+
+ if (from_idle)
+ time = sched_clock();
+
+ if (execute[mode])
+ exit_stat = execute[mode](from_idle);
+
+ if (from_idle) {
+ time = sched_clock() - time;
+ if (exit_stat >= 0)
+ msm_pm_add_stat(exit_stat, time);
+ }
+
+}
+
+/**
+ * msm_pm_wait_cpu_shutdown() - Wait for a core to be power collapsed during
+ * hotplug
+ *
+ * @ cpu - cpu to wait on.
+ *
+ * Blocking function call that waits on the core to be power collapsed. This
+ * function is called from platform_cpu_die to ensure that a core is power
+ * collapsed before sending the CPU_DEAD notification so the drivers could
+ * remove the resource votes for this CPU(regulator and clock)
+ */
+int msm_pm_wait_cpu_shutdown(unsigned int cpu)
+{
+ int timeout = 10;
+
+ if (!msm_pm_slp_sts)
+ return 0;
+ if (!msm_pm_slp_sts[cpu].base_addr)
+ return 0;
+ while (1) {
+ /*
+ * Check for the SPM of the core being hotplugged to set
+ * its sleep state.The SPM sleep state indicates that the
+ * core has been power collapsed.
+ */
+ int acc_sts = __raw_readl(msm_pm_slp_sts[cpu].base_addr);
+
+ if (acc_sts & msm_pm_slp_sts[cpu].mask)
+ return 0;
+
+ udelay(100);
+ WARN(++timeout == 20, "CPU%u didn't collapse in 2ms\n", cpu);
+ }
+
+ return -EBUSY;
+}
+
+static void msm_pm_ack_retention_disable(void *data)
+{
+ /*
+ * This is a NULL function to ensure that the core has woken up
+ * and is safe to disable retention.
+ */
+}
+/**
+ * msm_pm_enable_retention() - Disable/Enable retention on all cores
+ * @enable: Enable/Disable retention
+ *
+ */
+void msm_pm_enable_retention(bool enable)
+{
+ if (enable == msm_pm_ldo_retention_enabled)
+ return;
+
+ msm_pm_ldo_retention_enabled = enable;
+
+ /*
+ * If retention is being disabled, wakeup all online core to ensure
+ * that it isn't executing retention. Offlined cores need not be woken
+ * up as they enter the deepest sleep mode, namely RPM assited power
+ * collapse
+ */
+ if (!enable) {
+ preempt_disable();
+ smp_call_function_many(&retention_cpus,
+ msm_pm_ack_retention_disable,
+ NULL, true);
+ preempt_enable();
+ }
+}
+EXPORT_SYMBOL(msm_pm_enable_retention);
+
+/**
+ * msm_pm_retention_enabled() - Check if retention is enabled
+ *
+ * returns true if retention is enabled
+ */
+bool msm_pm_retention_enabled(void)
+{
+ return msm_pm_ldo_retention_enabled;
+}
+EXPORT_SYMBOL(msm_pm_retention_enabled);
+
+static int msm_pm_snoc_client_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ static struct msm_bus_scale_pdata *msm_pm_bus_pdata;
+ static uint32_t msm_pm_bus_client;
+
+ msm_pm_bus_pdata = msm_bus_cl_get_pdata(pdev);
+
+ if (msm_pm_bus_pdata) {
+ msm_pm_bus_client =
+ msm_bus_scale_register_client(msm_pm_bus_pdata);
+
+ if (!msm_pm_bus_client) {
+ pr_err("%s: Failed to register SNOC client", __func__);
+ rc = -ENXIO;
+ goto snoc_cl_probe_done;
+ }
+
+ rc = msm_bus_scale_client_update_request(msm_pm_bus_client, 1);
+
+ if (rc)
+ pr_err("%s: Error setting bus rate", __func__);
+ }
+
+snoc_cl_probe_done:
+ return rc;
+}
+
+static int msm_cpu_status_probe(struct platform_device *pdev)
+{
+ struct msm_pm_sleep_status_data *pdata;
+ char *key;
+ u32 cpu;
+
+ if (!pdev)
+ return -EFAULT;
+
+ msm_pm_slp_sts = devm_kzalloc(&pdev->dev,
+ sizeof(*msm_pm_slp_sts) * num_possible_cpus(),
+ GFP_KERNEL);
+
+ if (!msm_pm_slp_sts)
+ return -ENOMEM;
+
+ if (pdev->dev.of_node) {
+ struct resource *res;
+ u32 offset;
+ int rc;
+ u32 mask;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ key = "qcom,cpu-alias-addr";
+ rc = of_property_read_u32(pdev->dev.of_node, key, &offset);
+
+ if (rc)
+ return -ENODEV;
+
+ key = "qcom,sleep-status-mask";
+ rc = of_property_read_u32(pdev->dev.of_node, key, &mask);
+
+ if (rc)
+ return -ENODEV;
+
+ for_each_possible_cpu(cpu) {
+ phys_addr_t base_c = res->start + cpu * offset;
+ msm_pm_slp_sts[cpu].base_addr =
+ devm_ioremap(&pdev->dev, base_c,
+ resource_size(res));
+ msm_pm_slp_sts[cpu].mask = mask;
+
+ if (!msm_pm_slp_sts[cpu].base_addr)
+ return -ENOMEM;
+ }
+ } else {
+ pdata = pdev->dev.platform_data;
+ if (!pdev->dev.platform_data)
+ return -EINVAL;
+
+ for_each_possible_cpu(cpu) {
+ msm_pm_slp_sts[cpu].base_addr =
+ pdata->base_addr + cpu * pdata->cpu_offset;
+ msm_pm_slp_sts[cpu].mask = pdata->mask;
+ }
+ }
+
+ return 0;
+};
+
+static struct of_device_id msm_slp_sts_match_tbl[] = {
+ {.compatible = "qcom,cpu-sleep-status"},
+ {},
+};
+
+static struct platform_driver msm_cpu_status_driver = {
+ .probe = msm_cpu_status_probe,
+ .driver = {
+ .name = "cpu_slp_status",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_slp_sts_match_tbl,
+ },
+};
+
+static struct of_device_id msm_snoc_clnt_match_tbl[] = {
+ {.compatible = "qcom,pm-snoc-client"},
+ {},
+};
+
+static struct platform_driver msm_cpu_pm_snoc_client_driver = {
+ .probe = msm_pm_snoc_client_probe,
+ .driver = {
+ .name = "pm_snoc_client",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_snoc_clnt_match_tbl,
+ },
+};
+
+static int msm_pm_init(void)
+{
+ enum msm_pm_time_stats_id enable_stats[] = {
+ MSM_PM_STAT_IDLE_WFI,
+ MSM_PM_STAT_RETENTION,
+ MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
+ MSM_PM_STAT_SUSPEND,
+ };
+ msm_pm_mode_sysfs_add(KBUILD_MODNAME);
+ msm_pm_add_stats(enable_stats, ARRAY_SIZE(enable_stats));
+
+ return 0;
+}
+
+struct msm_pc_debug_counters_buffer {
+ void __iomem *reg;
+ u32 len;
+ char buf[MAX_BUF_SIZE];
+};
+
+static inline u32 msm_pc_debug_counters_read_register(
+ void __iomem *reg, int index , int offset)
+{
+ return readl_relaxed(reg + (index * 4 + offset) * 4);
+}
+#if 0
+static char *counter_name[] = {
+ "PC Entry Counter",
+ "Warmboot Entry Counter",
+ "PC Bailout Counter"
+};
+#endif
+static int msm_pc_debug_counters_copy(
+ struct msm_pc_debug_counters_buffer *data)
+{
+#if 0
+ int j;
+ u32 stat;
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ data->len += scnprintf(data->buf + data->len,
+ sizeof(data->buf)-data->len,
+ "CPU%d\n", cpu);
+
+ for (j = 0; j < MSM_PC_NUM_COUNTERS; j++) {
+ stat = msm_pc_debug_counters_read_register(
+ data->reg, cpu, j);
+ data->len += scnprintf(data->buf + data->len,
+ sizeof(data->buf)-data->len,
+ "\t%s : %d\n", counter_name[j],
+ stat);
+ }
+
+ }
+#endif
+ return data->len;
+}
+
+static ssize_t msm_pc_debug_counters_file_read(struct file *file,
+ char __user *bufu, size_t count, loff_t *ppos)
+{
+ struct msm_pc_debug_counters_buffer *data;
+
+ data = file->private_data;
+
+ if (!data)
+ return -EINVAL;
+
+ if (!bufu)
+ return -EINVAL;
+
+// if (!access_ok(VERIFY_WRITE, bufu, count))
+// return -EFAULT;
+
+ if (*ppos >= data->len && data->len == 0)
+ data->len = msm_pc_debug_counters_copy(data);
+
+ return simple_read_from_buffer(bufu, count, ppos,
+ data->buf, data->len);
+}
+
+static int msm_pc_debug_counters_file_open(struct inode *inode,
+ struct file *file)
+{
+ struct msm_pc_debug_counters_buffer *buf;
+ void __iomem *msm_pc_debug_counters_reg;
+
+ msm_pc_debug_counters_reg = inode->i_private;
+
+ if (!msm_pc_debug_counters_reg)
+ return -EINVAL;
+
+ file->private_data = kzalloc(
+ sizeof(struct msm_pc_debug_counters_buffer), GFP_KERNEL);
+
+ if (!file->private_data) {
+ pr_err("%s: ERROR kmalloc failed to allocate %zu bytes\n",
+ __func__, sizeof(struct msm_pc_debug_counters_buffer));
+
+ return -ENOMEM;
+ }
+
+ buf = file->private_data;
+ buf->reg = msm_pc_debug_counters_reg;
+
+ return 0;
+}
+
+static int msm_pc_debug_counters_file_close(struct inode *inode,
+ struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations msm_pc_debug_counters_fops = {
+ .open = msm_pc_debug_counters_file_open,
+ .read = msm_pc_debug_counters_file_read,
+ .release = msm_pc_debug_counters_file_close,
+ .llseek = no_llseek,
+};
+
+static int msm_pm_clk_init(struct platform_device *pdev)
+{
+ bool synced_clocks;
+ u32 cpu;
+ char clk_name[] = "cpu??_clk";
+ char *key;
+
+ key = "qcom,synced-clocks";
+ synced_clocks = of_property_read_bool(pdev->dev.of_node, key);
+
+ for_each_possible_cpu(cpu) {
+ struct clk *clk;
+ snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
+ clk = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(clk)) {
+ if (cpu && synced_clocks)
+ return 0;
+ else {
+ pr_err("failed on %s\n", clk_name);
+ return PTR_ERR(clk);
+ }
+ }
+ per_cpu(cpu_clks, cpu) = clk;
+ }
+
+ if (synced_clocks)
+ return 0;
+
+ l2_clk = devm_clk_get(&pdev->dev, "l2_clk");
+ pr_err("l2_clk %p\n", l2_clk);
+ return PTR_RET(l2_clk);
+}
+
+static int msm_cpu_pm_probe(struct platform_device *pdev)
+{
+ struct dentry *dent = NULL;
+ struct resource *res = NULL;
+ int i;
+ int ret = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return 0;
+ msm_pc_debug_counters_phys = res->start;
+ WARN_ON(resource_size(res) < SZ_64);
+ msm_pc_debug_counters = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (msm_pc_debug_counters) {
+ for (i = 0; i < resource_size(res)/4; i++)
+ __raw_writel(0, msm_pc_debug_counters + i * 4);
+
+ dent = debugfs_create_file("pc_debug_counter", S_IRUGO, NULL,
+ msm_pc_debug_counters,
+ &msm_pc_debug_counters_fops);
+ if (!dent)
+ pr_err("%s: ERROR debugfs_create_file failed\n",
+ __func__);
+ } else {
+ msm_pc_debug_counters = 0;
+ msm_pc_debug_counters_phys = 0;
+ }
+
+ ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
+ if (ret) {
+ pr_err("%s: Failed initializing scm_handoff_lock (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ if (pdev->dev.of_node) {
+ ret = msm_pm_clk_init(pdev);
+ if (ret) {
+ pr_info("msm_pm_clk_init returned error\n");
+ return ret;
+ }
+ }
+
+ msm_pm_init();
+ if (pdev->dev.of_node)
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+ return ret;
+}
+
+static struct of_device_id msm_cpu_pm_table[] = {
+ {.compatible = "qcom,pm"},
+ {},
+};
+
+static struct platform_driver msm_cpu_pm_driver = {
+ .probe = msm_cpu_pm_probe,
+ .driver = {
+ .name = "msm-pm",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_cpu_pm_table,
+ },
+};
+
+static int __init msm_pm_drv_init(void)
+{
+ int rc;
+
+ cpumask_clear(&retention_cpus);
+
+ rc = platform_driver_register(&msm_cpu_pm_snoc_client_driver);
+
+ if (rc) {
+ pr_err("%s(): failed to register driver %s\n", __func__,
+ msm_cpu_pm_snoc_client_driver.driver.name);
+ return rc;
+ }
+
+ return platform_driver_register(&msm_cpu_pm_driver);
+}
+late_initcall(msm_pm_drv_init);
+
+int __init msm_pm_sleep_status_init(void)
+{
+ static bool registered;
+
+ if (registered)
+ return 0;
+ registered = true;
+
+ return platform_driver_register(&msm_cpu_status_driver);
+}
+arch_initcall(msm_pm_sleep_status_init);
diff --git a/drivers/power/qcom/no-pm.c b/drivers/power/qcom/no-pm.c
new file mode 100644
index 000000000000..f71d96752c83
--- /dev/null
+++ b/drivers/power/qcom/no-pm.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2010-2011, 2013-2014, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <asm/proc-fns.h>
+#include <soc/qcom/pm.h>
+
+void arch_idle(void)
+{
+ cpu_do_idle();
+}
+
+void msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle) {}
+
+void msm_pm_enable_retention(bool enable) {}
diff --git a/drivers/power/qcom/pm-boot.c b/drivers/power/qcom/pm-boot.c
new file mode 100644
index 000000000000..5fa3f82b4050
--- /dev/null
+++ b/drivers/power/qcom/pm-boot.c
@@ -0,0 +1,77 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <soc/qcom/scm-boot.h>
+#include <asm/cacheflush.h>
+#include "idle.h"
+#include "pm-boot.h"
+
+static void (*msm_pm_boot_before_pc)(unsigned int cpu, unsigned long entry);
+static void (*msm_pm_boot_after_pc)(unsigned int cpu);
+
+static int msm_pm_tz_boot_init(void)
+{
+ unsigned int flag = 0;
+ if (num_possible_cpus() == 1)
+ flag = SCM_FLAG_WARMBOOT_CPU0;
+ else if (num_possible_cpus() == 2)
+ flag = SCM_FLAG_WARMBOOT_CPU0 | SCM_FLAG_WARMBOOT_CPU1;
+ else if (num_possible_cpus() == 4)
+ flag = SCM_FLAG_WARMBOOT_CPU0 | SCM_FLAG_WARMBOOT_CPU1 |
+ SCM_FLAG_WARMBOOT_CPU2 | SCM_FLAG_WARMBOOT_CPU3;
+ else
+ __WARN();
+
+ return scm_set_boot_addr(virt_to_phys(msm_pm_boot_entry), flag);
+}
+
+static void msm_pm_write_boot_vector(unsigned int cpu, unsigned long address)
+{
+ msm_pm_boot_vector[cpu] = address;
+ flush_cache_all();
+// dmac_clean_range((void *)&msm_pm_boot_vector[cpu],
+// (void *)(&msm_pm_boot_vector[cpu] +
+// sizeof(msm_pm_boot_vector[cpu])));
+}
+
+static void msm_pm_config_tz_before_pc(unsigned int cpu,
+ unsigned long entry)
+{
+ msm_pm_write_boot_vector(cpu, entry);
+}
+
+void msm_pm_boot_config_before_pc(unsigned int cpu, unsigned long entry)
+{
+ if (msm_pm_boot_before_pc)
+ msm_pm_boot_before_pc(cpu, entry);
+}
+
+void msm_pm_boot_config_after_pc(unsigned int cpu)
+{
+ if (msm_pm_boot_after_pc)
+ msm_pm_boot_after_pc(cpu);
+}
+
+static int __init msm_pm_boot_init(void)
+{
+ int ret = 0;
+
+ ret = msm_pm_tz_boot_init();
+ msm_pm_boot_before_pc = msm_pm_config_tz_before_pc;
+ msm_pm_boot_after_pc = NULL;
+
+ return ret;
+}
+postcore_initcall(msm_pm_boot_init);
diff --git a/drivers/power/qcom/pm-boot.h b/drivers/power/qcom/pm-boot.h
new file mode 100644
index 000000000000..50e083f4e4a2
--- /dev/null
+++ b/drivers/power/qcom/pm-boot.h
@@ -0,0 +1,19 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _ARCH_ARM_MACH_MSM_PM_BOOT_H
+#define _ARCH_ARM_MACH_MSM_PM_BOOT_H
+
+void msm_pm_boot_config_before_pc(unsigned int cpu, unsigned long entry);
+void msm_pm_boot_config_after_pc(unsigned int cpu);
+
+#endif
diff --git a/drivers/power/qcom/pm-data.c b/drivers/power/qcom/pm-data.c
new file mode 100644
index 000000000000..41517ed911d8
--- /dev/null
+++ b/drivers/power/qcom/pm-data.c
@@ -0,0 +1,437 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <soc/qcom/pm.h>
+
+#define GET_CPU_OF_ATTR(attr) \
+ (container_of(attr, struct msm_pm_kobj_attribute, ka)->cpu)
+
+struct msm_pm_platform_data {
+ u8 idle_supported; /* Allow device to enter mode during idle */
+ u8 suspend_supported; /* Allow device to enter mode during suspend */
+ u8 suspend_enabled; /* enabled for suspend */
+ u8 idle_enabled; /* enabled for idle low power */
+ u32 latency; /* interrupt latency in microseconds when entering
+ and exiting the low power mode */
+ u32 residency; /* time threshold in microseconds beyond which
+ staying in the low power mode saves power */
+};
+
+static struct msm_pm_platform_data msm_pm_sleep_modes[] = {
+ [MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND)] = {
+ .idle_supported = 0,
+ .suspend_supported = 1,
+ .idle_enabled = 0,
+ .suspend_enabled = 1,
+ },
+
+ [MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_POWER_COLLAPSE)] = {
+ .idle_supported = 1,
+ .suspend_supported = 1,
+ .idle_enabled = 0,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = {
+ .idle_supported = 1,
+ .suspend_supported = 1,
+ .idle_enabled = 0,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_RETENTION)] = {
+ .idle_supported = 1,
+ .suspend_supported = 0,
+ .idle_enabled = 0,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(0, MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)] = {
+ .idle_supported = 1,
+ .suspend_supported = 1,
+ .idle_enabled = 1,
+ .suspend_enabled = 1,
+ },
+
+ [MSM_PM_MODE(1, MSM_PM_SLEEP_MODE_POWER_COLLAPSE)] = {
+ .idle_supported = 1,
+ .suspend_supported = 1,
+ .idle_enabled = 0,
+ .suspend_enabled = 1,
+ },
+
+ [MSM_PM_MODE(1, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND)] = {
+ .idle_supported = 0,
+ .suspend_supported = 0,
+ .idle_enabled = 0,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(1, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = {
+ .idle_supported = 1,
+ .suspend_supported = 1,
+ .idle_enabled = 0,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(1, MSM_PM_SLEEP_MODE_RETENTION)] = {
+ .idle_supported = 1,
+ .suspend_supported = 1,
+ .idle_enabled = 0,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(1, MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)] = {
+ .idle_supported = 1,
+ .suspend_supported = 0,
+ .idle_enabled = 1,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(2, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND)] = {
+ .idle_supported = 0,
+ .suspend_supported = 0,
+ .idle_enabled = 0,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(2, MSM_PM_SLEEP_MODE_POWER_COLLAPSE)] = {
+ .idle_supported = 1,
+ .suspend_supported = 1,
+ .idle_enabled = 0,
+ .suspend_enabled = 1,
+ },
+
+ [MSM_PM_MODE(2, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = {
+ .idle_supported = 1,
+ .suspend_supported = 1,
+ .idle_enabled = 0,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(2, MSM_PM_SLEEP_MODE_RETENTION)] = {
+ .idle_supported = 1,
+ .suspend_supported = 1,
+ .idle_enabled = 0,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(2, MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)] = {
+ .idle_supported = 1,
+ .suspend_supported = 0,
+ .idle_enabled = 1,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(3, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND)] = {
+ .idle_supported = 0,
+ .suspend_supported = 0,
+ .idle_enabled = 0,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(3, MSM_PM_SLEEP_MODE_POWER_COLLAPSE)] = {
+ .idle_supported = 1,
+ .suspend_supported = 1,
+ .idle_enabled = 0,
+ .suspend_enabled = 1,
+ },
+
+ [MSM_PM_MODE(3, MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE)] = {
+ .idle_supported = 1,
+ .suspend_supported = 1,
+ .idle_enabled = 0,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(3, MSM_PM_SLEEP_MODE_RETENTION)] = {
+ .idle_supported = 1,
+ .suspend_supported = 1,
+ .idle_enabled = 0,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(3, MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)] = {
+ .idle_supported = 1,
+ .suspend_supported = 0,
+ .idle_enabled = 1,
+ .suspend_enabled = 0,
+ },
+
+ [MSM_PM_MODE(3, MSM_PM_SLEEP_MODE_NR)] = {
+ .idle_supported = 0,
+ .suspend_supported = 0,
+ .idle_enabled = 0,
+ .suspend_enabled = 0,
+ },
+};
+
+enum {
+ MSM_PM_MODE_ATTR_SUSPEND,
+ MSM_PM_MODE_ATTR_IDLE,
+ MSM_PM_MODE_ATTR_NR,
+};
+
+static char *msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_NR] = {
+ [MSM_PM_MODE_ATTR_SUSPEND] = "suspend_enabled",
+ [MSM_PM_MODE_ATTR_IDLE] = "idle_enabled",
+};
+
+struct msm_pm_kobj_attribute {
+ unsigned int cpu;
+ struct kobj_attribute ka;
+};
+
+struct msm_pm_sysfs_sleep_mode {
+ struct kobject *kobj;
+ struct attribute_group attr_group;
+ struct attribute *attrs[MSM_PM_MODE_ATTR_NR + 1];
+ struct msm_pm_kobj_attribute kas[MSM_PM_MODE_ATTR_NR];
+};
+
+static char *msm_pm_sleep_mode_labels[MSM_PM_SLEEP_MODE_NR] = {
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = "power_collapse",
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = "wfi",
+ [MSM_PM_SLEEP_MODE_RETENTION] = "retention",
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE] =
+ "standalone_power_collapse",
+};
+
+/*
+ * Write out the attribute.
+ */
+static ssize_t msm_pm_mode_attr_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
+ struct kernel_param kp;
+ unsigned int cpu;
+ struct msm_pm_platform_data *mode;
+
+ if (msm_pm_sleep_mode_labels[i] == NULL)
+ continue;
+
+ if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
+ continue;
+
+ cpu = GET_CPU_OF_ATTR(attr);
+ mode = &msm_pm_sleep_modes[MSM_PM_MODE(cpu, i)];
+
+ if (!strcmp(attr->attr.name,
+ msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
+ u32 arg = mode->suspend_enabled;
+ kp.arg = &arg;
+ ret = param_get_int(buf, &kp);
+ } else if (!strcmp(attr->attr.name,
+ msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
+ u32 arg = mode->idle_enabled;
+ kp.arg = &arg;
+ ret = param_get_int(buf, &kp);
+ }
+
+ break;
+ }
+
+ if (ret > 0) {
+ strlcat(buf, "\n", PAGE_SIZE);
+ ret++;
+ }
+
+ return ret;
+}
+
+static ssize_t msm_pm_mode_attr_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
+ struct kernel_param kp;
+ unsigned int cpu;
+ struct msm_pm_platform_data *mode;
+
+ if (msm_pm_sleep_mode_labels[i] == NULL)
+ continue;
+
+ if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
+ continue;
+
+ cpu = GET_CPU_OF_ATTR(attr);
+ mode = &msm_pm_sleep_modes[MSM_PM_MODE(cpu, i)];
+
+ if (!strcmp(attr->attr.name,
+ msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_SUSPEND])) {
+ kp.arg = &mode->suspend_enabled;
+ ret = param_set_byte(buf, &kp);
+ } else if (!strcmp(attr->attr.name,
+ msm_pm_mode_attr_labels[MSM_PM_MODE_ATTR_IDLE])) {
+ kp.arg = &mode->idle_enabled;
+ ret = param_set_byte(buf, &kp);
+ }
+
+ break;
+ }
+
+ return ret ? ret : count;
+}
+
+static int msm_pm_mode_sysfs_add_cpu(
+ unsigned int cpu, struct kobject *modes_kobj)
+{
+ char cpu_name[8];
+ struct kobject *cpu_kobj;
+ struct msm_pm_sysfs_sleep_mode *mode = NULL;
+ int i, j, k;
+ int ret;
+
+ snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
+ cpu_kobj = kobject_create_and_add(cpu_name, modes_kobj);
+ if (!cpu_kobj) {
+ pr_err("%s: cannot create %s kobject\n", __func__, cpu_name);
+ ret = -ENOMEM;
+ goto mode_sysfs_add_cpu_exit;
+ }
+
+ for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
+ int idx = MSM_PM_MODE(cpu, i);
+
+ if ((!msm_pm_sleep_modes[idx].suspend_supported)
+ && (!msm_pm_sleep_modes[idx].idle_supported))
+ continue;
+
+ if (!msm_pm_sleep_mode_labels[i] ||
+ !msm_pm_sleep_mode_labels[i][0])
+ continue;
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode) {
+ pr_err("%s: cannot allocate memory for attributes\n",
+ __func__);
+ ret = -ENOMEM;
+ goto mode_sysfs_add_cpu_exit;
+ }
+
+ mode->kobj = kobject_create_and_add(
+ msm_pm_sleep_mode_labels[i], cpu_kobj);
+ if (!mode->kobj) {
+ pr_err("%s: cannot create kobject\n", __func__);
+ ret = -ENOMEM;
+ goto mode_sysfs_add_cpu_exit;
+ }
+
+ for (k = 0, j = 0; k < MSM_PM_MODE_ATTR_NR; k++) {
+ if ((k == MSM_PM_MODE_ATTR_IDLE) &&
+ !msm_pm_sleep_modes[idx].idle_supported)
+ continue;
+ if ((k == MSM_PM_MODE_ATTR_SUSPEND) &&
+ !msm_pm_sleep_modes[idx].suspend_supported)
+ continue;
+ sysfs_attr_init(&mode->kas[j].ka.attr);
+ mode->kas[j].cpu = cpu;
+ mode->kas[j].ka.attr.mode = 0644;
+ mode->kas[j].ka.show = msm_pm_mode_attr_show;
+ mode->kas[j].ka.store = msm_pm_mode_attr_store;
+ mode->kas[j].ka.attr.name = msm_pm_mode_attr_labels[k];
+ mode->attrs[j] = &mode->kas[j].ka.attr;
+ j++;
+ }
+ mode->attrs[j] = NULL;
+
+ mode->attr_group.attrs = mode->attrs;
+ ret = sysfs_create_group(mode->kobj, &mode->attr_group);
+ if (ret) {
+ pr_err("%s: cannot create kobject attribute group\n",
+ __func__);
+ goto mode_sysfs_add_cpu_exit;
+ }
+ }
+
+ ret = 0;
+
+mode_sysfs_add_cpu_exit:
+ if (ret) {
+ if (mode && mode->kobj)
+ kobject_del(mode->kobj);
+ kfree(mode);
+ }
+
+ return ret;
+}
+
+int msm_pm_mode_sysfs_add(const char *pm_modname)
+{
+ struct kobject *module_kobj;
+ struct kobject *modes_kobj;
+ unsigned int cpu;
+ int ret;
+
+ module_kobj = kset_find_obj(module_kset, pm_modname);
+ if (!module_kobj) {
+ pr_err("%s: cannot find kobject for module %s\n",
+ __func__, pm_modname);
+ ret = -ENOENT;
+ goto mode_sysfs_add_exit;
+ }
+
+ modes_kobj = kobject_create_and_add("modes", module_kobj);
+ if (!modes_kobj) {
+ pr_err("%s: cannot create modes kobject\n", __func__);
+ ret = -ENOMEM;
+ goto mode_sysfs_add_exit;
+ }
+
+ for_each_possible_cpu(cpu) {
+ ret = msm_pm_mode_sysfs_add_cpu(cpu, modes_kobj);
+ if (ret)
+ goto mode_sysfs_add_exit;
+ }
+
+ ret = 0;
+
+mode_sysfs_add_exit:
+ return ret;
+}
+
+int msm_pm_sleep_mode_supported(unsigned int cpu,
+ unsigned int mode, bool idle)
+{
+ int idx = MSM_PM_MODE(cpu, mode);
+ if (idle)
+ return msm_pm_sleep_modes[idx].idle_supported;
+ else
+ return msm_pm_sleep_modes[idx].suspend_supported;
+}
+EXPORT_SYMBOL(msm_pm_sleep_mode_supported);
+
+int msm_pm_sleep_mode_allow(unsigned int cpu,
+ unsigned int mode, bool idle)
+{
+ int idx = MSM_PM_MODE(cpu, mode);
+
+ if ((mode == MSM_PM_SLEEP_MODE_RETENTION)
+ && !msm_pm_retention_enabled())
+ return false;
+
+ if (idle)
+ return msm_pm_sleep_modes[idx].idle_enabled &&
+ msm_pm_sleep_modes[idx].idle_supported;
+ else
+ return msm_pm_sleep_modes[idx].suspend_enabled &&
+ msm_pm_sleep_modes[idx].suspend_supported;
+}
+EXPORT_SYMBOL(msm_pm_sleep_mode_allow);
diff --git a/drivers/power/qcom/pm-stats.c b/drivers/power/qcom/pm-stats.c
new file mode 100644
index 000000000000..b3b28388bab0
--- /dev/null
+++ b/drivers/power/qcom/pm-stats.c
@@ -0,0 +1,672 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm.h>
+
+static struct dentry *msm_pm_dbg_root;
+static struct dentry *msm_pm_dentry[NR_CPUS];
+
+struct pm_debugfs_private_data {
+ struct msm_pm_time_stats *stats;
+ unsigned int cpu;
+ unsigned int stats_id;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(
+ struct pm_debugfs_private_data, msm_pm_debugfs_private_data);
+
+static DEFINE_PER_CPU(
+ struct pm_debugfs_private_data, msm_pm_cpu_states[MSM_PM_STAT_COUNT]);
+
+static struct pm_debugfs_private_data all_stats_private_data;
+
+struct pm_debugfs_private_data msm_pm_suspend_states_data;
+
+struct msm_pm_time_stats {
+ const char *name;
+ int64_t first_bucket_time;
+ int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int count;
+ int64_t total_time;
+ bool enabled;
+ int sleep_mode;
+};
+
+static struct msm_pm_time_stats suspend_stats;
+struct msm_pm_cpu_time_stats {
+ struct msm_pm_time_stats stats[MSM_PM_STAT_COUNT];
+};
+struct pm_l2_debugfs_private_data {
+ char *buf;
+ unsigned int stats_id;
+};
+
+struct _msm_pm_l2_time_stats {
+ struct msm_pm_time_stats stats[MSM_SPM_L2_MODE_LAST];
+};
+enum stats_type {
+ MSM_PM_STATS_TYPE_CPU,
+ MSM_PM_STATS_TYPE_SUSPEND,
+ MSM_PM_STATS_TYPE_L2,
+};
+#define BUF_LEN 64
+static DEFINE_MUTEX(msm_pm_stats_mutex);
+static DEFINE_SPINLOCK(msm_pm_stats_lock);
+static DEFINE_PER_CPU_SHARED_ALIGNED(
+ struct msm_pm_cpu_time_stats, msm_pm_stats);
+static DEFINE_SPINLOCK(msm_pm_l2_stats_lock);
+static struct _msm_pm_l2_time_stats msm_pm_l2_time_stats;
+static struct pm_l2_debugfs_private_data l2_stats_private_data[] = {
+ {NULL, MSM_SPM_L2_MODE_DISABLED},
+ {NULL, MSM_SPM_L2_MODE_RETENTION},
+ {NULL, MSM_SPM_L2_MODE_GDHS},
+ {NULL, MSM_SPM_L2_MODE_PC_NO_RPM},
+ {NULL, MSM_SPM_L2_MODE_POWER_COLLAPSE},
+ {NULL, MSM_SPM_L2_MODE_LAST},
+};
+
+/*
+ * Function to update stats
+ */
+static void update_stats(struct msm_pm_time_stats *stats, int64_t t)
+{
+ int64_t bt;
+ int i;
+
+ if (!stats)
+ return;
+
+ stats->total_time += t;
+ stats->count++;
+
+ bt = t;
+ do_div(bt, stats->first_bucket_time);
+
+ if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
+ (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
+ i = DIV_ROUND_UP(fls((uint32_t)bt),
+ CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
+ else
+ i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+
+ if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
+ i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+
+ stats->bucket[i]++;
+
+ if (t < stats->min_time[i] || !stats->max_time[i])
+ stats->min_time[i] = t;
+ if (t > stats->max_time[i])
+ stats->max_time[i] = t;
+}
+
+/*
+ * Add the given time data to the statistics collection.
+ */
+void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t)
+{
+ struct msm_pm_time_stats *stats;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_pm_stats_lock, flags);
+ if (id == MSM_PM_STAT_SUSPEND) {
+ stats = &suspend_stats;
+ } else {
+ stats = __get_cpu_var(msm_pm_stats).stats;
+ if (!stats[id].enabled)
+ goto add_bail;
+ stats = &stats[id];
+ }
+ update_stats(stats, t);
+add_bail:
+ spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
+}
+void msm_pm_l2_add_stat(uint32_t id, int64_t t)
+{
+ unsigned long flags;
+ struct msm_pm_time_stats *stats;
+
+ if (id == MSM_SPM_L2_MODE_DISABLED || id >= MSM_SPM_L2_MODE_LAST)
+ return;
+
+ spin_lock_irqsave(&msm_pm_l2_stats_lock, flags);
+
+ stats = msm_pm_l2_time_stats.stats;
+ stats = &stats[id];
+ update_stats(stats, t);
+
+ spin_unlock_irqrestore(&msm_pm_l2_stats_lock, flags);
+}
+
+static void stats_show(struct seq_file *m,
+ struct msm_pm_time_stats *stats,
+ int cpu, enum stats_type type)
+{
+ int64_t bucket_time;
+ int64_t s;
+ uint32_t ns;
+ int i;
+ char str[BUF_LEN];
+ int bucket_count = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+ int bucket_shift = CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
+
+ if (!stats || !m)
+ return;
+
+ s = stats->total_time;
+ ns = do_div(s, NSEC_PER_SEC);
+ switch (type) {
+
+ case MSM_PM_STATS_TYPE_CPU:
+ snprintf(str , BUF_LEN, "[cpu %u] %s", cpu, stats->name);
+ break;
+
+ case MSM_PM_STATS_TYPE_SUSPEND:
+ snprintf(str , BUF_LEN, "%s", stats->name);
+ break;
+ case MSM_PM_STATS_TYPE_L2:
+ snprintf(str , BUF_LEN, "[L2] %s", stats->name);
+ break;
+ default:
+ pr_err(" stats type error\n");
+ return;
+ }
+ seq_printf(m, "%s:\n"
+ " count: %7d\n"
+ " total_time: %lld.%09u\n",
+ str,
+ stats->count,
+ s, ns);
+
+ bucket_time = stats->first_bucket_time;
+ for (i = 0; i < bucket_count; i++) {
+ s = bucket_time;
+ ns = do_div(s, NSEC_PER_SEC);
+ seq_printf(m, " <%6lld.%09u: %7d (%lld-%lld)\n",
+ s, ns, stats->bucket[i],
+ stats->min_time[i],
+ stats->max_time[i]);
+ bucket_time <<= bucket_shift;
+ }
+
+ seq_printf(m, " >=%6lld.%09u: %7d (%lld-%lld)\n",
+ s, ns, stats->bucket[i],
+ stats->min_time[i],
+ stats->max_time[i]);
+}
+
+/*
+ * Write out the power management statistics.
+ */
+
+static int msm_pm_stats_show(struct seq_file *m, void *v)
+{
+ int cpu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_pm_stats_lock, flags);
+ for_each_possible_cpu(cpu) {
+ struct msm_pm_time_stats *stats;
+ int id;
+
+ stats = per_cpu(msm_pm_stats, cpu).stats;
+
+ for (id = 0; id < MSM_PM_STAT_COUNT; id++) {
+ /* Skip the disabled ones */
+ if (!stats[id].enabled)
+ continue;
+
+ if (id == MSM_PM_STAT_SUSPEND)
+ continue;
+
+ stats_show(m, &stats[id], cpu, MSM_PM_STATS_TYPE_CPU);
+ }
+ }
+ stats_show(m, &suspend_stats, cpu, true);
+ spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
+ return 0;
+}
+
+#define MSM_PM_STATS_RESET "reset"
+/*
+ * Reset the power management statistics values.
+ */
+static ssize_t msm_pm_write_proc(struct file *file, const char __user *buffer,
+ size_t count, loff_t *off)
+{
+ char buf[sizeof(MSM_PM_STATS_RESET)];
+ int ret;
+ unsigned long flags;
+ unsigned int cpu;
+ size_t len = strnlen(MSM_PM_STATS_RESET, sizeof(MSM_PM_STATS_RESET));
+
+ if (count < sizeof(MSM_PM_STATS_RESET)) {
+ ret = -EINVAL;
+ goto write_proc_failed;
+ }
+
+ if (copy_from_user(buf, buffer, len)) {
+ ret = -EFAULT;
+ goto write_proc_failed;
+ }
+
+ if (strncmp(buf, MSM_PM_STATS_RESET, len)) {
+ ret = -EINVAL;
+ goto write_proc_failed;
+ }
+
+ spin_lock_irqsave(&msm_pm_stats_lock, flags);
+ for_each_possible_cpu(cpu) {
+ struct msm_pm_time_stats *stats;
+ int i;
+
+ stats = per_cpu(msm_pm_stats, cpu).stats;
+ for (i = 0; i < MSM_PM_STAT_COUNT; i++) {
+ memset(stats[i].bucket,
+ 0, sizeof(stats[i].bucket));
+ memset(stats[i].min_time,
+ 0, sizeof(stats[i].min_time));
+ memset(stats[i].max_time,
+ 0, sizeof(stats[i].max_time));
+ stats[i].count = 0;
+ stats[i].total_time = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
+ return count;
+
+write_proc_failed:
+ return ret;
+}
+#undef MSM_PM_STATS_RESET
+static size_t read_cpu_state_stats(struct seq_file *m,
+ struct pm_debugfs_private_data *private_data)
+{
+ struct msm_pm_time_stats *stats = NULL;
+ unsigned int id;
+ unsigned int cpu = 0;
+ unsigned long flags;
+
+ if (private_data == NULL || !private_data->stats)
+ return 0;
+
+ stats = private_data->stats;
+ cpu = private_data->cpu;
+ id = private_data->stats_id;
+
+ spin_lock_irqsave(&msm_pm_stats_lock, flags);
+
+ if (id == MSM_PM_STAT_SUSPEND)
+ stats_show(m, &suspend_stats, cpu, MSM_PM_STATS_TYPE_SUSPEND);
+ else
+ stats_show(m, &stats[id], cpu, MSM_PM_STATS_TYPE_CPU);
+
+ spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
+ return 0;
+}
+
+static size_t read_cpu_stats(struct seq_file *m,
+ struct pm_debugfs_private_data *private_data,
+ unsigned int cpu)
+{
+ struct msm_pm_time_stats *stats = NULL;
+ unsigned int id;
+ unsigned long flags;
+
+ if (private_data == NULL || !private_data->stats)
+ return 0;
+
+ stats = private_data->stats;
+
+ spin_lock_irqsave(&msm_pm_stats_lock, flags);
+
+ for (id = 0; id < MSM_PM_STAT_COUNT; id++) {
+ int mode, idx;
+
+ if (!stats[id].enabled || id == MSM_PM_STAT_SUSPEND)
+ continue;
+
+ mode = stats[id].sleep_mode;
+ idx = MSM_PM_MODE(cpu, mode);
+
+ if (!msm_pm_sleep_mode_supported(cpu, mode, true) &&
+ !msm_pm_sleep_mode_supported(cpu, mode, false))
+ continue;
+ stats_show(m, &stats[id], cpu, MSM_PM_STATS_TYPE_CPU);
+ }
+ spin_unlock_irqrestore(&msm_pm_stats_lock, flags);
+
+ return 0;
+}
+
+static int msm_pm_stat_file_show(struct seq_file *m, void *v)
+{
+ unsigned int cpu = 0;
+ static struct pm_debugfs_private_data *private_data;
+ enum msm_pm_time_stats_id stats_id = MSM_PM_STAT_COUNT;
+
+ if (!m->private)
+ return 0;
+
+ private_data = m->private;
+
+ if (num_possible_cpus() == private_data->cpu) {
+ /* statistics of all the cpus to be printed */
+ unsigned int i;
+ for (i = 0; i < num_possible_cpus(); i++) {
+ private_data = &per_cpu(msm_pm_debugfs_private_data, i);
+ read_cpu_stats(m, private_data, i);
+ }
+ stats_show(m, &suspend_stats, cpu, MSM_PM_STATS_TYPE_SUSPEND);
+ } else {
+ /* only current cpu statistics has to be printed */
+ cpu = private_data->cpu;
+ stats_id = private_data->stats_id;
+ if (private_data->stats_id == MSM_PM_STAT_COUNT) {
+ /* Read all the status for the CPU */
+ read_cpu_stats(m, private_data, cpu);
+
+ } else {
+
+ if (private_data == NULL)
+ return 0;
+
+ read_cpu_state_stats(m, private_data);
+ }
+ }
+ return 0;
+}
+
+
+static int msm_pm_stat_file_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_pm_stat_file_show, inode->i_private);
+}
+
+
+static const struct file_operations msm_pm_stat_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_pm_stat_file_open,
+ .read = seq_read,
+ .release = single_release,
+ .llseek = no_llseek,
+};
+static int msm_pm_l2_stat_file_show(struct seq_file *m, void *v)
+{
+ struct msm_pm_time_stats *stats = NULL;
+ unsigned int id;
+ static struct pm_l2_debugfs_private_data *private_data;
+
+ if (!m->private)
+ return 0;
+
+ private_data = m->private;
+ stats = msm_pm_l2_time_stats.stats;
+
+ if (private_data->stats_id == MSM_SPM_L2_MODE_LAST) {
+ /* All stats print */
+ for (id = 1; id < MSM_SPM_L2_MODE_LAST; id++) {
+ stats_show(m, &stats[id], 0, MSM_PM_STATS_TYPE_L2);
+ }
+ } else {
+ /* individual status print */
+ id = private_data->stats_id;
+ stats_show(m, &stats[id], 0, MSM_PM_STATS_TYPE_L2);
+ }
+ return 0;
+}
+
+static int msm_pm_l2_stat_file_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_pm_l2_stat_file_show, inode->i_private);
+}
+
+static const struct file_operations msm_pm_l2_stat_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_pm_l2_stat_file_open,
+ .read = seq_read,
+ .release = single_release,
+ .llseek = no_llseek,
+};
+
+static bool msm_pm_debugfs_create_l2(void)
+{
+ struct msm_pm_time_stats *stats = msm_pm_l2_time_stats.stats;
+ struct dentry *msm_pm_l2_root;
+ uint32_t stat_id;
+
+ msm_pm_l2_root = debugfs_create_dir("l2", msm_pm_dbg_root);
+ if (!msm_pm_l2_root)
+ return false;
+
+ stats[MSM_SPM_L2_MODE_GDHS].name = "GDHS";
+ stats[MSM_SPM_L2_MODE_GDHS].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_SPM_L2_MODE_RETENTION].name = "Retention";
+ stats[MSM_SPM_L2_MODE_RETENTION].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_SPM_L2_MODE_PC_NO_RPM].name = "No RPM";
+ stats[MSM_SPM_L2_MODE_PC_NO_RPM].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_SPM_L2_MODE_POWER_COLLAPSE].name = "PC";
+ stats[MSM_SPM_L2_MODE_POWER_COLLAPSE].first_bucket_time =
+ CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
+
+ for (stat_id = 1;
+ stat_id < MSM_SPM_L2_MODE_LAST;
+ stat_id++) {
+ if (!debugfs_create_file(
+ stats[stat_id].name,
+ S_IRUGO, msm_pm_l2_root,
+ (void *)&l2_stats_private_data[stat_id],
+ &msm_pm_l2_stat_fops)) {
+ goto l2_err;
+ }
+ }
+ stat_id = MSM_SPM_L2_MODE_LAST;
+ if (!debugfs_create_file("stats",
+ S_IRUGO, msm_pm_l2_root,
+ (void *)&l2_stats_private_data[stat_id],
+ &msm_pm_l2_stat_fops)) {
+ goto l2_err;
+ }
+
+ return true;
+l2_err:
+ debugfs_remove(msm_pm_l2_root);
+ return false;
+}
+
+
+static bool msm_pm_debugfs_create_root(void)
+{
+ bool ret = false;
+
+ msm_pm_dbg_root = debugfs_create_dir("msm_pm_stats", NULL);
+ if (!msm_pm_dbg_root)
+ goto root_error;
+
+ /* create over all stats file */
+ all_stats_private_data.cpu = num_possible_cpus();
+ all_stats_private_data.stats_id = MSM_PM_STAT_COUNT;
+ if (!debugfs_create_file("stats",
+ S_IRUGO, msm_pm_dbg_root, &all_stats_private_data,
+ &msm_pm_stat_fops)) {
+ debugfs_remove(msm_pm_dbg_root);
+ goto root_error;
+ }
+ ret = true;
+
+root_error:
+ return ret;
+}
+static int msm_pm_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_pm_stats_show, NULL);
+}
+
+static const struct file_operations msm_pm_stats_fops = {
+ .open = msm_pm_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = msm_pm_write_proc,
+};
+
+void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size)
+{
+ unsigned int cpu;
+ struct proc_dir_entry *d_entry;
+ int i = 0;
+ char cpu_name[8];
+ bool root_avl = false;
+
+ root_avl = msm_pm_debugfs_create_root();
+ if (root_avl) {
+ if (!msm_pm_debugfs_create_l2())
+ pr_err(" L2 debugfs create error\n");
+ }
+ suspend_stats.name = "system_suspend";
+ suspend_stats.first_bucket_time =
+ CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
+
+ msm_pm_suspend_states_data.stats = &suspend_stats;
+ msm_pm_suspend_states_data.stats_id = MSM_PM_STAT_SUSPEND;
+ if (!debugfs_create_file(suspend_stats.name,
+ S_IRUGO, msm_pm_dbg_root,
+ &msm_pm_suspend_states_data,
+ &msm_pm_stat_fops))
+ pr_err(" system_suspend debugfs create error\n");
+
+ for_each_possible_cpu(cpu) {
+ struct msm_pm_time_stats *stats =
+ per_cpu(msm_pm_stats, cpu).stats;
+
+ struct pm_debugfs_private_data *private_data =
+ &per_cpu(msm_pm_debugfs_private_data, cpu);
+ private_data->stats = stats;
+
+ stats[MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request";
+ stats[MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_PM_STAT_IDLE_SPIN].name = "idle-spin";
+ stats[MSM_PM_STAT_IDLE_SPIN].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_PM_STAT_IDLE_WFI].name = "idle-wfi";
+ stats[MSM_PM_STAT_IDLE_WFI].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_PM_STAT_IDLE_WFI].sleep_mode =
+ MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT;
+
+ stats[MSM_PM_STAT_RETENTION].name = "retention";
+ stats[MSM_PM_STAT_RETENTION].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+ stats[MSM_PM_STAT_RETENTION].sleep_mode =
+ MSM_PM_SLEEP_MODE_RETENTION;
+
+ stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].name =
+ "idle-standalone-power-collapse";
+ stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].
+ first_bucket_time = CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+ stats[MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE].sleep_mode =
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE;
+
+ stats[MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE].name =
+ "idle-failed-standalone-power-collapse";
+ stats[MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE].
+ first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].name =
+ "idle-power-collapse";
+ stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+ stats[MSM_PM_STAT_IDLE_POWER_COLLAPSE].sleep_mode =
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE;
+
+ stats[MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].name =
+ "idle-failed-power-collapse";
+ stats[MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].
+ first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+ stats[MSM_PM_STAT_NOT_IDLE].name = "not-idle";
+ stats[MSM_PM_STAT_NOT_IDLE].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+
+ for (i = 0; i < size; i++)
+ stats[enable_stats[i]].enabled = true;
+
+ if (root_avl) {
+ struct dentry *msm_pm_dbg_core;
+ /* create cpu directory */
+ snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
+ msm_pm_dbg_core = debugfs_create_dir(cpu_name,
+ msm_pm_dbg_root);
+ if (!msm_pm_dbg_core)
+ continue;
+
+ /* create per cpu stats file */
+ private_data->cpu = cpu;
+ private_data->stats_id = MSM_PM_STAT_COUNT;
+ msm_pm_dentry[cpu] = debugfs_create_file("stats",
+ S_IRUGO, msm_pm_dbg_core, private_data,
+ &msm_pm_stat_fops);
+
+ if (msm_pm_dentry[cpu]) {
+ /* Create files related to individual states */
+ int id = 0;
+ struct dentry *handle;
+ struct pm_debugfs_private_data
+ *msm_pm_states_data;
+ for (id = 0; id < MSM_PM_STAT_COUNT; id++) {
+
+ if (stats[id].enabled != true ||
+ id == MSM_PM_STAT_SUSPEND)
+ continue;
+
+ msm_pm_states_data =
+ &per_cpu(msm_pm_cpu_states[id], cpu);
+ msm_pm_states_data->cpu = cpu;
+ msm_pm_states_data->stats_id = id;
+ msm_pm_states_data->stats = stats;
+ handle = debugfs_create_file(
+ stats[id].name,
+ S_IRUGO, msm_pm_dbg_core,
+ msm_pm_states_data,
+ &msm_pm_stat_fops);
+ }
+ }
+ }
+
+ }
+ d_entry = proc_create_data("msm_pm_stats", S_IRUGO | S_IWUSR | S_IWGRP,
+ NULL, &msm_pm_stats_fops, NULL);
+}
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index f65ff49bb275..63ce017f4387 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -84,6 +84,21 @@ config POWER_RESET_LTC2952
This driver supports an external powerdown trigger and board power
down via the LTC2952. Bindings are made in the device tree.
+config POWER_RESET_MSM
+ bool "Qualcomm MSM power-off driver"
+ depends on POWER_RESET && ARCH_MSM
+ help
+ Power off and restart support for Qualcomm boards.
+
+config MSM_DLOAD_MODE
+ bool "Qualcomm download mode"
+ depends on POWER_RESET_MSM
+ help
+ This makes the SoC enter download mode when it resets
+ due to a kernel panic. Note that this doesn't by itself
+ make the kernel reboot on a kernel panic - that must be
+ enabled via another mechanism.
+
config POWER_RESET_QNAP
bool "QNAP power-off driver"
depends on OF_GPIO && PLAT_ORION
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 76ce1c59469b..2f74f01c901e 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
+obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
obj-$(CONFIG_POWER_RESET_SUN6I) += sun6i-reboot.o
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 4702efdfe466..1393aeea147b 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,7 +21,46 @@
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/pm.h>
+#include <linux/qpnp/power-on.h>
+#include <linux/of_address.h>
+#include <asm/cacheflush.h>
+#include <asm/system_misc.h>
+
+#include <soc/qcom/scm.h>
+#include <soc/qcom/restart.h>
+
+#define EMERGENCY_DLOAD_MAGIC1 0x322A4F99
+#define EMERGENCY_DLOAD_MAGIC2 0xC67E4350
+#define EMERGENCY_DLOAD_MAGIC3 0x77777777
+
+#define SCM_IO_DISABLE_PMIC_ARBITER 1
+#define SCM_WDOG_DEBUG_BOOT_PART 0x9
+#define SCM_DLOAD_MODE 0X10
+#define SCM_EDLOAD_MODE 0X01
+#define SCM_DLOAD_CMD 0x10
+
+
+static int restart_mode;
+void *restart_reason;
+static bool scm_pmic_arbiter_disable_supported;
+/* Download mode master kill-switch */
+static void __iomem *msm_ps_hold;
+
+#ifdef CONFIG_MSM_DLOAD_MODE
+#define EDL_MODE_PROP "qcom,msm-imem-emergency_download_mode"
+#define DL_MODE_PROP "qcom,msm-imem-download_mode"
+
+static int in_panic;
+static void *dload_mode_addr;
+static bool dload_mode_enabled;
+static void *emergency_dload_mode_addr;
+static bool scm_dload_supported;
+
+static int dload_set(const char *val, struct kernel_param *kp);
+static int download_mode = 1;
+module_param_call(download_mode, dload_set, param_get_int,
+ &download_mode, 0644);
static void __iomem *msm_ps_hold;
static int do_msm_restart(struct notifier_block *nb, unsigned long action,
void *data)
@@ -42,11 +81,261 @@ static void do_msm_poweroff(void)
/* TODO: Add poweroff capability */
do_msm_restart(&restart_nb, 0, NULL);
}
+static int panic_prep_restart(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ in_panic = 1;
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_blk = {
+ .notifier_call = panic_prep_restart,
+};
+
+static void set_dload_mode(int on)
+{
+ int ret;
+
+ if (dload_mode_addr) {
+ __raw_writel(on ? 0xE47B337D : 0, dload_mode_addr);
+ __raw_writel(on ? 0xCE14091A : 0,
+ dload_mode_addr + sizeof(unsigned int));
+ mb();
+ }
+
+ if (scm_dload_supported) {
+ ret = scm_call_atomic2(SCM_SVC_BOOT,
+ SCM_DLOAD_CMD, on ? SCM_DLOAD_MODE : 0, 0);
+ if (ret)
+ pr_err("Failed to set DLOAD mode: %d\n", ret);
+ }
+
+ dload_mode_enabled = on;
+}
+
+static bool get_dload_mode(void)
+{
+ return dload_mode_enabled;
+}
+
+static void enable_emergency_dload_mode(void)
+{
+ int ret;
+
+ if (emergency_dload_mode_addr) {
+ __raw_writel(EMERGENCY_DLOAD_MAGIC1,
+ emergency_dload_mode_addr);
+ __raw_writel(EMERGENCY_DLOAD_MAGIC2,
+ emergency_dload_mode_addr +
+ sizeof(unsigned int));
+ __raw_writel(EMERGENCY_DLOAD_MAGIC3,
+ emergency_dload_mode_addr +
+ (2 * sizeof(unsigned int)));
+
+ /* Need disable the pmic wdt, then the emergency dload mode
+ * will not auto reset. */
+ qpnp_pon_wd_config(0);
+ mb();
+ }
+
+ if (scm_dload_supported) {
+ ret = scm_call_atomic2(SCM_SVC_BOOT,
+ SCM_DLOAD_CMD, SCM_EDLOAD_MODE, 0);
+ if (ret)
+ pr_err("Failed to set EDLOAD mode: %d\n", ret);
+ }
+}
+
+static int dload_set(const char *val, struct kernel_param *kp)
+{
+ int ret;
+ int old_val = download_mode;
+
+ ret = param_set_int(val, kp);
+
+ if (ret)
+ return ret;
+
+ /* If download_mode is not zero or one, ignore. */
+ if (download_mode >> 1) {
+ download_mode = old_val;
+ return -EINVAL;
+ }
+
+ set_dload_mode(download_mode);
+
+ return 0;
+}
+#else
+#define set_dload_mode(x) do {} while (0)
+
+static void enable_emergency_dload_mode(void)
+{
+ pr_err("dload mode is not enabled on target\n");
+}
+
+static bool get_dload_mode(void)
+{
+ return false;
+}
+#endif
+
+void msm_set_restart_mode(int mode)
+{
+ restart_mode = mode;
+}
+EXPORT_SYMBOL(msm_set_restart_mode);
+
+/*
+ * Force the SPMI PMIC arbiter to shutdown so that no more SPMI transactions
+ * are sent from the MSM to the PMIC. This is required in order to avoid an
+ * SPMI lockup on certain PMIC chips if PS_HOLD is lowered in the middle of
+ * an SPMI transaction.
+ */
+static void halt_spmi_pmic_arbiter(void)
+{
+ if (scm_pmic_arbiter_disable_supported) {
+ pr_crit("Calling SCM to disable SPMI PMIC arbiter\n");
+ scm_call_atomic1(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER, 0);
+ }
+}
+
+static void msm_restart_prepare(const char *cmd)
+{
+#ifdef CONFIG_MSM_DLOAD_MODE
+
+ /* Write download mode flags if we're panic'ing
+ * Write download mode flags if restart_mode says so
+ * Kill download mode if master-kill switch is set
+ */
+
+ set_dload_mode(download_mode &&
+ (in_panic || restart_mode == RESTART_DLOAD));
+#endif
+
+ /* Hard reset the PMIC unless memory contents must be maintained. */
+ if (get_dload_mode() || (cmd != NULL && cmd[0] != '\0'))
+ qpnp_pon_system_pwr_off(PON_POWER_OFF_WARM_RESET);
+ else
+ qpnp_pon_system_pwr_off(PON_POWER_OFF_HARD_RESET);
+
+ if (cmd != NULL) {
+ if (!strncmp(cmd, "bootloader", 10)) {
+ __raw_writel(0x77665500, restart_reason);
+ } else if (!strncmp(cmd, "recovery", 8)) {
+ __raw_writel(0x77665502, restart_reason);
+ } else if (!strcmp(cmd, "rtc")) {
+ __raw_writel(0x77665503, restart_reason);
+ } else if (!strncmp(cmd, "oem-", 4)) {
+ unsigned long code;
+ int ret;
+ ret = kstrtoul(cmd + 4, 16, &code);
+ if (!ret)
+ __raw_writel(0x6f656d00 | (code & 0xff),
+ restart_reason);
+ } else if (!strncmp(cmd, "edl", 3)) {
+ enable_emergency_dload_mode();
+ } else {
+ __raw_writel(0x77665501, restart_reason);
+ }
+ }
+
+ flush_cache_all();
+
+ /*outer_flush_all is not supported by 64bit kernel*/
+#ifndef CONFIG_ARM64
+ outer_flush_all();
+#endif
+
+}
+
+static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
+{
+ int ret;
+
+ pr_notice("Going down for restart now\n");
+
+ msm_restart_prepare(cmd);
+
+ /* Needed to bypass debug image on some chips */
+ ret = scm_call_atomic2(SCM_SVC_BOOT,
+ SCM_WDOG_DEBUG_BOOT_PART, 1, 0);
+ if (ret)
+ pr_err("Failed to disable wdog debug: %d\n", ret);
+
+ halt_spmi_pmic_arbiter();
+ __raw_writel(0, msm_ps_hold);
+
+ mdelay(10000);
+}
+
+static void do_msm_poweroff(void)
+{
+ int ret;
+
+ pr_notice("Powering off the SoC\n");
+#ifdef CONFIG_MSM_DLOAD_MODE
+ set_dload_mode(0);
+#endif
+ qpnp_pon_system_pwr_off(PON_POWER_OFF_SHUTDOWN);
+ /* Needed to bypass debug image on some chips */
+ ret = scm_call_atomic2(SCM_SVC_BOOT,
+ SCM_WDOG_DEBUG_BOOT_PART, 1, 0);
+ if (ret)
+ pr_err("Failed to disable wdog debug: %d\n", ret);
+
+ halt_spmi_pmic_arbiter();
+ /* MSM initiated power off, lower ps_hold */
+ __raw_writel(0, msm_ps_hold);
+
+ mdelay(10000);
+ pr_err("Powering off has failed\n");
+}
static int msm_restart_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *mem;
+ struct device_node *np;
+ int ret = 0;
+
+#ifdef CONFIG_MSM_DLOAD_MODE
+ if (scm_is_call_available(SCM_SVC_BOOT, SCM_DLOAD_CMD) > 0)
+ scm_dload_supported = true;
+
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_blk);
+ np = of_find_compatible_node(NULL, NULL, DL_MODE_PROP);
+ if (!np) {
+ pr_err("unable to find DT imem DLOAD mode node\n");
+ } else {
+ dload_mode_addr = of_iomap(np, 0);
+ if (!dload_mode_addr)
+ pr_err("unable to map imem DLOAD offset\n");
+ }
+
+ np = of_find_compatible_node(NULL, NULL, EDL_MODE_PROP);
+ if (!np) {
+ pr_err("unable to find DT imem EDLOAD mode node\n");
+ } else {
+ emergency_dload_mode_addr = of_iomap(np, 0);
+ if (!emergency_dload_mode_addr)
+ pr_err("unable to map imem EDLOAD mode offset\n");
+ }
+
+ set_dload_mode(download_mode);
+#endif
+ np = of_find_compatible_node(NULL, NULL,
+ "qcom,msm-imem-restart_reason");
+ if (!np) {
+ pr_err("unable to find DT imem restart reason node\n");
+ } else {
+ restart_reason = of_iomap(np, 0);
+ if (!restart_reason) {
+ pr_err("unable to map imem restart reason offset\n");
+ ret = -ENOMEM;
+ goto err_restart_reason;
+ }
+ }
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
msm_ps_hold = devm_ioremap_resource(dev, mem);
@@ -56,8 +345,19 @@ static int msm_restart_probe(struct platform_device *pdev)
register_restart_handler(&restart_nb);
pm_power_off = do_msm_poweroff;
+ arm_pm_restart = do_msm_restart;
+
+ if (scm_is_call_available(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER) > 0)
+ scm_pmic_arbiter_disable_supported = true;
return 0;
+
+err_restart_reason:
+#ifdef CONFIG_MSM_DLOAD_MODE
+ iounmap(emergency_dload_mode_addr);
+ iounmap(dload_mode_addr);
+#endif
+ return ret;
}
static const struct of_device_id of_msm_restart_match[] = {
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 55d7b7b0f2e0..eee3f31bd558 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -57,6 +57,20 @@ config REGULATOR_USERSPACE_CONSUMER
config REGULATOR_88PM800
tristate "Marvell 88PM800 Power regulators"
depends on MFD_88PM800
+
+config REGULATOR_PROXY_CONSUMER
+ bool "Boot time regulator proxy consumer support"
+ help
+ This driver provides support for boot time regulator proxy requests.
+ It can enforce a specified voltage range, set a minimum current,
+ and/or keep a regulator enabled. It is needed in circumstances where
+ reducing one or more of these three quantities will cause hardware to
+ stop working if performed before the driver managing the hardware has
+ probed.
+
+config REGULATOR_GPIO
+ tristate "GPIO regulator support"
+ depends on GPIOLIB
help
This driver supports Marvell 88PM800 voltage regulator chips.
It delivers digitally programmable output,
@@ -258,6 +272,15 @@ config REGULATOR_ISL9305
help
This driver supports ISL9305 voltage regulator chip.
+config REGULATOR_MEM_ACC
+ tristate "QTI Memory accelerator regulator driver"
+ help
+ Say y here to enable the memory accelerator driver for Qualcomm
+ Technologies (QTI) chips. The accelerator controls delays applied
+ for memory accesses.
+ This driver configures the power-mode (corner) for the memory
+ accelerator.
+
config REGULATOR_ISL6271A
tristate "Intersil ISL6271A Power regulator"
depends on I2C
@@ -690,6 +713,20 @@ config REGULATOR_TWL4030
This driver supports the voltage regulators provided by
this family of companion chips.
+config REGULATOR_TPS65132
+ tristate "TI TPS65132 Power regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver supports TPS65132 voltage regulator chips. The chip has
+ a synchronous boost converter that generates a positive voltage which
+ is regulated down by an integrated LDO, providing a positive supply
+ rail. It also has a negative supply rail which is generated by an
+ integrated negative charge pump driven from the boost converter
+ output. Both output regulators support 4V~6V voltage range and
+ can be configured separately. Each regulator can be enabled and
+ disabled independently through an external pin.
+
config REGULATOR_VEXPRESS
tristate "Versatile Express regulators"
depends on VEXPRESS_CONFIG
@@ -725,5 +762,57 @@ config REGULATOR_WM8994
This driver provides support for the voltage regulators on the
WM8994 CODEC.
+
+config REGULATOR_STUB
+ tristate "Stub Regulator"
+ help
+ This driver adds stub regulator support. The driver is absent of any
+ real hardware based implementation. It allows for clients to register
+ their regulator device constraints and use all of the standard
+ regulator interfaces. This is useful for bringing up new platforms
+ when the real hardware based implementation may not be yet available.
+ Clients can use the real regulator device names with proper
+ constraint checking while the real driver is being developed.
+
+config REGULATOR_RPM_SMD
+ bool "RPM SMD regulator driver"
+ depends on OF
+ depends on MSM_RPM_SMD
+ help
+ Compile in support for the RPM SMD regulator driver which is used for
+ setting voltages and other parameters of the various power rails
+ supplied by some Qualcomm PMICs. The RPM SMD regulator driver should
+ be used on systems which contain an RPM which communicates with the
+ application processor over SMD.
+
+config REGULATOR_QPNP
+ depends on SPMI
+ depends on OF_SPMI
+ tristate "Qualcomm QPNP regulator support"
+ help
+ This driver supports voltage regulators in Qualcomm PMIC chips which
+ comply with QPNP. QPNP is a SPMI based PMIC implementation. These
+ chips provide several different varieties of LDO and switching
+ regulators. They also provide voltage switches and boost regulators.
+
+config REGULATOR_SPM
+ bool "SPM regulator driver"
+ depends on SPMI && OF_SPMI
+ help
+ Enable support for the SPM regulator driver which is used for
+ setting voltages of processor supply regulators via the SPM module
+ found inside of Qualcomm Technologies (QTI) chips. The SPM regulator
+ driver can be used on QTI SoCs where the APSS processor cores are
+ supplied by their own PMIC regulator.
+
+config REGULATOR_CPR
+ bool "RBCPR regulator driver for APC"
+ depends on OF
+ help
+ Compile in RBCPR (RapidBridge Core Power Reduction) driver to support
+ corner vote for APC power rail. The driver takes PTE process voltage
+ suggestions in efuse as initial settings. It converts corner vote
+ to voltage value before writing to a voltage regulator API, such as
+ that provided by spm-regulator driver.
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 1029ed39c512..639ecd649b59 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_OF) += of_regulator.o
obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
+#obj-$(CONFIG_REGULATOR_PROXY_CONSUMER) += proxy-consumer.o
obj-$(CONFIG_REGULATOR_88PM800) += 88pm800.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
@@ -59,6 +60,7 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
+obj-$(CONFIG_REGULATOR_MEM_ACC) += mem-acc-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
obj-$(CONFIG_REGULATOR_PWM) += pwm-regulator.o
@@ -85,9 +87,11 @@ obj-$(CONFIG_REGULATOR_TPS65218) += tps65218-regulator.o
obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
+obj-$(CONFIG_REGULATOR_STUB) += stub-regulator.o
obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
obj-$(CONFIG_REGULATOR_TPS80031) += tps80031-regulator.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
+obj-$(CONFIG_REGULATOR_TPS65132) += tps65132-regulator.o
obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
@@ -95,6 +99,9 @@ obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
-
+obj-$(CONFIG_REGULATOR_RPM_SMD) += rpm-smd-regulator.o
+obj-$(CONFIG_REGULATOR_QPNP) += qpnp-regulator.o
+obj-$(CONFIG_REGULATOR_SPM) += spm-regulator.o
+obj-$(CONFIG_REGULATOR_CPR) += cpr-regulator.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 5a1d4afa4776..ddfd7bf6948e 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/string.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
@@ -75,6 +76,71 @@ static void of_get_regulation_constraints(struct device_node *np,
constraints->enable_time = pval;
}
+static const char *consumer_supply_prop_name = "qcom,consumer-supplies";
+#define MAX_DEV_NAME_LEN 256
+/*
+ * Fill in regulator init_data based on qcom legacy requirements.
+ */
+static int of_get_qcom_regulator_init_data(struct device *dev,
+ struct regulator_init_data **init_data)
+{
+ struct device_node *node = dev->of_node;
+ struct regulator_consumer_supply *consumer_supplies;
+ int i, rc, num_consumer_supplies, array_len;
+
+ array_len = of_property_count_strings(node, consumer_supply_prop_name);
+ if (array_len > 0) {
+ /* Array length must be divisible by 2. */
+ if (array_len & 1) {
+ dev_err(dev, "error: %s device node property value "
+ "contains an odd number of elements: %d\n",
+ consumer_supply_prop_name, array_len);
+ return -EINVAL;
+ }
+ num_consumer_supplies = array_len / 2;
+
+ consumer_supplies = devm_kzalloc(dev,
+ sizeof(struct regulator_consumer_supply)
+ * num_consumer_supplies, GFP_KERNEL);
+ if (consumer_supplies == NULL) {
+ dev_err(dev, "devm_kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_consumer_supplies; i++) {
+ rc = of_property_read_string_index(node,
+ consumer_supply_prop_name, i * 2,
+ &consumer_supplies[i].supply);
+ if (rc) {
+ dev_err(dev, "of_property_read_string_index "
+ "failed, rc=%d\n", rc);
+ devm_kfree(dev, consumer_supplies);
+ return rc;
+ }
+
+ rc = of_property_read_string_index(node,
+ consumer_supply_prop_name, (i * 2) + 1,
+ &consumer_supplies[i].dev_name);
+ if (rc) {
+ dev_err(dev, "of_property_read_string_index "
+ "failed, rc=%d\n", rc);
+ devm_kfree(dev, consumer_supplies);
+ return rc;
+ }
+
+ /* Treat dev_name = "" as a wildcard. */
+ if (strnlen(consumer_supplies[i].dev_name,
+ MAX_DEV_NAME_LEN) == 0)
+ consumer_supplies[i].dev_name = NULL;
+ }
+
+ (*init_data)->consumer_supplies = consumer_supplies;
+ (*init_data)->num_consumer_supplies = num_consumer_supplies;
+ }
+
+ return 0;
+}
+
/**
* of_get_regulator_init_data - extract regulator_init_data structure info
* @dev: device requesting for regulator_init_data
@@ -87,6 +153,7 @@ struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
struct device_node *node)
{
struct regulator_init_data *init_data;
+ int rc;
if (!node)
return NULL;
@@ -96,6 +163,12 @@ struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
return NULL; /* Out of memory? */
of_get_regulation_constraints(node, &init_data);
+ rc = of_get_qcom_regulator_init_data(dev, &init_data);
+ if (rc) {
+ devm_kfree(dev, init_data);
+ return NULL;
+ }
+
return init_data;
}
EXPORT_SYMBOL_GPL(of_get_regulator_init_data);
diff --git a/drivers/regulator/qpnp-regulator.c b/drivers/regulator/qpnp-regulator.c
new file mode 100644
index 000000000000..1de715ea8077
--- /dev/null
+++ b/drivers/regulator/qpnp-regulator.c
@@ -0,0 +1,2054 @@
+/*
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/ktime.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/qpnp-regulator.h>
+
+/* Debug Flag Definitions */
+enum {
+ QPNP_VREG_DEBUG_REQUEST = BIT(0), /* Show requests */
+ QPNP_VREG_DEBUG_DUPLICATE = BIT(1), /* Show duplicate requests */
+ QPNP_VREG_DEBUG_INIT = BIT(2), /* Show state after probe */
+ QPNP_VREG_DEBUG_WRITES = BIT(3), /* Show SPMI writes */
+ QPNP_VREG_DEBUG_READS = BIT(4), /* Show SPMI reads */
+ QPNP_VREG_DEBUG_OCP = BIT(5), /* Show VS OCP IRQ events */
+};
+
+static int qpnp_vreg_debug_mask;
+module_param_named(
+ debug_mask, qpnp_vreg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+#define vreg_err(vreg, fmt, ...) \
+ pr_err("%s: " fmt, vreg->rdesc.name, ##__VA_ARGS__)
+
+/* These types correspond to unique register layouts. */
+enum qpnp_regulator_logical_type {
+ QPNP_REGULATOR_LOGICAL_TYPE_SMPS,
+ QPNP_REGULATOR_LOGICAL_TYPE_LDO,
+ QPNP_REGULATOR_LOGICAL_TYPE_VS,
+ QPNP_REGULATOR_LOGICAL_TYPE_BOOST,
+ QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS,
+ QPNP_REGULATOR_LOGICAL_TYPE_BOOST_BYP,
+ QPNP_REGULATOR_LOGICAL_TYPE_LN_LDO,
+ QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS,
+ QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS,
+ QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO,
+};
+
+enum qpnp_regulator_type {
+ QPNP_REGULATOR_TYPE_BUCK = 0x03,
+ QPNP_REGULATOR_TYPE_LDO = 0x04,
+ QPNP_REGULATOR_TYPE_VS = 0x05,
+ QPNP_REGULATOR_TYPE_BOOST = 0x1B,
+ QPNP_REGULATOR_TYPE_FTS = 0x1C,
+ QPNP_REGULATOR_TYPE_BOOST_BYP = 0x1F,
+ QPNP_REGULATOR_TYPE_ULT_LDO = 0x21,
+ QPNP_REGULATOR_TYPE_ULT_BUCK = 0x22,
+};
+
+enum qpnp_regulator_subtype {
+ QPNP_REGULATOR_SUBTYPE_GP_CTL = 0x08,
+ QPNP_REGULATOR_SUBTYPE_RF_CTL = 0x09,
+ QPNP_REGULATOR_SUBTYPE_N50 = 0x01,
+ QPNP_REGULATOR_SUBTYPE_N150 = 0x02,
+ QPNP_REGULATOR_SUBTYPE_N300 = 0x03,
+ QPNP_REGULATOR_SUBTYPE_N600 = 0x04,
+ QPNP_REGULATOR_SUBTYPE_N1200 = 0x05,
+ QPNP_REGULATOR_SUBTYPE_N600_ST = 0x06,
+ QPNP_REGULATOR_SUBTYPE_N1200_ST = 0x07,
+ QPNP_REGULATOR_SUBTYPE_N300_ST = 0x15,
+ QPNP_REGULATOR_SUBTYPE_P50 = 0x08,
+ QPNP_REGULATOR_SUBTYPE_P150 = 0x09,
+ QPNP_REGULATOR_SUBTYPE_P300 = 0x0A,
+ QPNP_REGULATOR_SUBTYPE_P600 = 0x0B,
+ QPNP_REGULATOR_SUBTYPE_P1200 = 0x0C,
+ QPNP_REGULATOR_SUBTYPE_LN = 0x10,
+ QPNP_REGULATOR_SUBTYPE_LV_P50 = 0x28,
+ QPNP_REGULATOR_SUBTYPE_LV_P150 = 0x29,
+ QPNP_REGULATOR_SUBTYPE_LV_P300 = 0x2A,
+ QPNP_REGULATOR_SUBTYPE_LV_P600 = 0x2B,
+ QPNP_REGULATOR_SUBTYPE_LV_P1200 = 0x2C,
+ QPNP_REGULATOR_SUBTYPE_LV100 = 0x01,
+ QPNP_REGULATOR_SUBTYPE_LV300 = 0x02,
+ QPNP_REGULATOR_SUBTYPE_MV300 = 0x08,
+ QPNP_REGULATOR_SUBTYPE_MV500 = 0x09,
+ QPNP_REGULATOR_SUBTYPE_HDMI = 0x10,
+ QPNP_REGULATOR_SUBTYPE_OTG = 0x11,
+ QPNP_REGULATOR_SUBTYPE_5V_BOOST = 0x01,
+ QPNP_REGULATOR_SUBTYPE_FTS_CTL = 0x08,
+ QPNP_REGULATOR_SUBTYPE_BB_2A = 0x01,
+ QPNP_REGULATOR_SUBTYPE_ULT_HF_CTL1 = 0x0D,
+ QPNP_REGULATOR_SUBTYPE_ULT_HF_CTL2 = 0x0E,
+ QPNP_REGULATOR_SUBTYPE_ULT_HF_CTL3 = 0x0F,
+ QPNP_REGULATOR_SUBTYPE_ULT_HF_CTL4 = 0x10,
+};
+
+enum qpnp_common_regulator_registers {
+ QPNP_COMMON_REG_DIG_MAJOR_REV = 0x01,
+ QPNP_COMMON_REG_TYPE = 0x04,
+ QPNP_COMMON_REG_SUBTYPE = 0x05,
+ QPNP_COMMON_REG_VOLTAGE_RANGE = 0x40,
+ QPNP_COMMON_REG_VOLTAGE_SET = 0x41,
+ QPNP_COMMON_REG_MODE = 0x45,
+ QPNP_COMMON_REG_ENABLE = 0x46,
+ QPNP_COMMON_REG_PULL_DOWN = 0x48,
+};
+
+enum qpnp_ldo_registers {
+ QPNP_LDO_REG_SOFT_START = 0x4C,
+};
+
+enum qpnp_vs_registers {
+ QPNP_VS_REG_OCP = 0x4A,
+ QPNP_VS_REG_SOFT_START = 0x4C,
+};
+
+enum qpnp_boost_registers {
+ QPNP_BOOST_REG_CURRENT_LIMIT = 0x4A,
+};
+
+enum qpnp_boost_byp_registers {
+ QPNP_BOOST_BYP_REG_CURRENT_LIMIT = 0x4B,
+};
+
+/* Used for indexing into ctrl_reg. These are offets from 0x40 */
+enum qpnp_common_control_register_index {
+ QPNP_COMMON_IDX_VOLTAGE_RANGE = 0,
+ QPNP_COMMON_IDX_VOLTAGE_SET = 1,
+ QPNP_COMMON_IDX_MODE = 5,
+ QPNP_COMMON_IDX_ENABLE = 6,
+};
+
+/* Common regulator control register layout */
+#define QPNP_COMMON_ENABLE_MASK 0x80
+#define QPNP_COMMON_ENABLE 0x80
+#define QPNP_COMMON_DISABLE 0x00
+#define QPNP_COMMON_ENABLE_FOLLOW_HW_EN3_MASK 0x08
+#define QPNP_COMMON_ENABLE_FOLLOW_HW_EN2_MASK 0x04
+#define QPNP_COMMON_ENABLE_FOLLOW_HW_EN1_MASK 0x02
+#define QPNP_COMMON_ENABLE_FOLLOW_HW_EN0_MASK 0x01
+#define QPNP_COMMON_ENABLE_FOLLOW_ALL_MASK 0x0F
+
+/* Common regulator mode register layout */
+#define QPNP_COMMON_MODE_HPM_MASK 0x80
+#define QPNP_COMMON_MODE_AUTO_MASK 0x40
+#define QPNP_COMMON_MODE_BYPASS_MASK 0x20
+#define QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK 0x10
+#define QPNP_COMMON_MODE_FOLLOW_HW_EN3_MASK 0x08
+#define QPNP_COMMON_MODE_FOLLOW_HW_EN2_MASK 0x04
+#define QPNP_COMMON_MODE_FOLLOW_HW_EN1_MASK 0x02
+#define QPNP_COMMON_MODE_FOLLOW_HW_EN0_MASK 0x01
+#define QPNP_COMMON_MODE_FOLLOW_ALL_MASK 0x1F
+
+/* Common regulator pull down control register layout */
+#define QPNP_COMMON_PULL_DOWN_ENABLE_MASK 0x80
+
+/* LDO regulator current limit control register layout */
+#define QPNP_LDO_CURRENT_LIMIT_ENABLE_MASK 0x80
+
+/* LDO regulator soft start control register layout */
+#define QPNP_LDO_SOFT_START_ENABLE_MASK 0x80
+
+/* VS regulator over current protection control register layout */
+#define QPNP_VS_OCP_OVERRIDE 0x01
+#define QPNP_VS_OCP_NO_OVERRIDE 0x00
+
+/* VS regulator soft start control register layout */
+#define QPNP_VS_SOFT_START_ENABLE_MASK 0x80
+#define QPNP_VS_SOFT_START_SEL_MASK 0x03
+
+/* Boost regulator current limit control register layout */
+#define QPNP_BOOST_CURRENT_LIMIT_ENABLE_MASK 0x80
+#define QPNP_BOOST_CURRENT_LIMIT_MASK 0x07
+
+#define QPNP_VS_OCP_DEFAULT_MAX_RETRIES 10
+#define QPNP_VS_OCP_DEFAULT_RETRY_DELAY_MS 30
+#define QPNP_VS_OCP_FALL_DELAY_US 90
+#define QPNP_VS_OCP_FAULT_DELAY_US 20000
+
+/*
+ * This voltage in uV is returned by get_voltage functions when there is no way
+ * to determine the current voltage level. It is needed because the regulator
+ * framework treats a 0 uV voltage as an error.
+ */
+#define VOLTAGE_UNKNOWN 1
+
+/* VSET value to decide the range of ULT SMPS */
+#define ULT_SMPS_RANGE_SPLIT 0x60
+
+/**
+ * struct qpnp_voltage_range - regulator set point voltage mapping description
+ * @min_uV: Minimum programmable output voltage resulting from
+ * set point register value 0x00
+ * @max_uV: Maximum programmable output voltage
+ * @step_uV: Output voltage increase resulting from the set point
+ * register value increasing by 1
+ * @set_point_min_uV: Minimum allowed voltage
+ * @set_point_max_uV: Maximum allowed voltage. This may be tweaked in order
+ * to pick which range should be used in the case of
+ * overlapping set points.
+ * @n_voltages: Number of preferred voltage set points present in this
+ * range
+ * @range_sel: Voltage range register value corresponding to this range
+ *
+ * The following relationships must be true for the values used in this struct:
+ * (max_uV - min_uV) % step_uV == 0
+ * (set_point_min_uV - min_uV) % step_uV == 0*
+ * (set_point_max_uV - min_uV) % step_uV == 0*
+ * n_voltages = (set_point_max_uV - set_point_min_uV) / step_uV + 1
+ *
+ * *Note, set_point_min_uV == set_point_max_uV == 0 is allowed in order to
+ * specify that the voltage range has meaning, but is not preferred.
+ */
+struct qpnp_voltage_range {
+ int min_uV;
+ int max_uV;
+ int step_uV;
+ int set_point_min_uV;
+ int set_point_max_uV;
+ unsigned n_voltages;
+ u8 range_sel;
+};
+
+/*
+ * The ranges specified in the qpnp_voltage_set_points struct must be listed
+ * so that range[i].set_point_max_uV < range[i+1].set_point_min_uV.
+ */
+struct qpnp_voltage_set_points {
+ struct qpnp_voltage_range *range;
+ int count;
+ unsigned n_voltages;
+};
+
+struct qpnp_regulator_mapping {
+ enum qpnp_regulator_type type;
+ enum qpnp_regulator_subtype subtype;
+ enum qpnp_regulator_logical_type logical_type;
+ u32 revision_min;
+ u32 revision_max;
+ struct regulator_ops *ops;
+ struct qpnp_voltage_set_points *set_points;
+ int hpm_min_load;
+};
+
+struct qpnp_regulator {
+ struct regulator_desc rdesc;
+ struct delayed_work ocp_work;
+ struct spmi_device *spmi_dev;
+ struct regulator_dev *rdev;
+ struct qpnp_voltage_set_points *set_points;
+ enum qpnp_regulator_logical_type logical_type;
+ int enable_time;
+ int ocp_enable;
+ int ocp_irq;
+ int ocp_count;
+ int ocp_max_retries;
+ int ocp_retry_delay_ms;
+ int system_load;
+ int hpm_min_load;
+ u32 write_count;
+ u32 prev_write_count;
+ ktime_t vs_enable_time;
+ u16 base_addr;
+ /* ctrl_reg provides a shadow copy of register values 0x40 to 0x47. */
+ u8 ctrl_reg[8];
+};
+
+#define QPNP_VREG_MAP(_type, _subtype, _dig_major_min, _dig_major_max, \
+ _logical_type, _ops_val, _set_points_val, _hpm_min_load) \
+ { \
+ .type = QPNP_REGULATOR_TYPE_##_type, \
+ .subtype = QPNP_REGULATOR_SUBTYPE_##_subtype, \
+ .revision_min = _dig_major_min, \
+ .revision_max = _dig_major_max, \
+ .logical_type = QPNP_REGULATOR_LOGICAL_TYPE_##_logical_type, \
+ .ops = &qpnp_##_ops_val##_ops, \
+ .set_points = &_set_points_val##_set_points, \
+ .hpm_min_load = _hpm_min_load, \
+ }
+
+#define VOLTAGE_RANGE(_range_sel, _min_uV, _set_point_min_uV, \
+ _set_point_max_uV, _max_uV, _step_uV) \
+ { \
+ .min_uV = _min_uV, \
+ .max_uV = _max_uV, \
+ .set_point_min_uV = _set_point_min_uV, \
+ .set_point_max_uV = _set_point_max_uV, \
+ .step_uV = _step_uV, \
+ .range_sel = _range_sel, \
+ }
+
+#define SET_POINTS(_ranges) \
+{ \
+ .range = _ranges, \
+ .count = ARRAY_SIZE(_ranges), \
+};
+
+/*
+ * These tables contain the physically available PMIC regulator voltage setpoint
+ * ranges. Where two ranges overlap in hardware, one of the ranges is trimmed
+ * to ensure that the setpoints available to software are monotonically
+ * increasing and unique. The set_voltage callback functions expect these
+ * properties to hold.
+ */
+static struct qpnp_voltage_range pldo_ranges[] = {
+ VOLTAGE_RANGE(2, 750000, 750000, 1537500, 1537500, 12500),
+ VOLTAGE_RANGE(3, 1500000, 1550000, 3075000, 3075000, 25000),
+ VOLTAGE_RANGE(4, 1750000, 3100000, 4900000, 4900000, 50000),
+};
+
+static struct qpnp_voltage_range nldo1_ranges[] = {
+ VOLTAGE_RANGE(2, 750000, 750000, 1537500, 1537500, 12500),
+};
+
+static struct qpnp_voltage_range nldo2_ranges[] = {
+ VOLTAGE_RANGE(0, 375000, 0, 0, 1537500, 12500),
+ VOLTAGE_RANGE(1, 375000, 375000, 768750, 768750, 6250),
+ VOLTAGE_RANGE(2, 750000, 775000, 1537500, 1537500, 12500),
+};
+
+static struct qpnp_voltage_range nldo3_ranges[] = {
+ VOLTAGE_RANGE(0, 375000, 375000, 1537500, 1537500, 12500),
+ VOLTAGE_RANGE(1, 375000, 0, 0, 1537500, 12500),
+ VOLTAGE_RANGE(2, 750000, 0, 0, 1537500, 12500),
+};
+
+static struct qpnp_voltage_range ln_ldo_ranges[] = {
+ VOLTAGE_RANGE(1, 690000, 690000, 1110000, 1110000, 60000),
+ VOLTAGE_RANGE(0, 1380000, 1380000, 2220000, 2220000, 120000),
+};
+
+static struct qpnp_voltage_range smps_ranges[] = {
+ VOLTAGE_RANGE(0, 375000, 375000, 1562500, 1562500, 12500),
+ VOLTAGE_RANGE(1, 1550000, 1575000, 3125000, 3125000, 25000),
+};
+
+static struct qpnp_voltage_range ftsmps_ranges[] = {
+ VOLTAGE_RANGE(0, 0, 350000, 1275000, 1275000, 5000),
+ VOLTAGE_RANGE(1, 0, 1280000, 2040000, 2040000, 10000),
+};
+
+static struct qpnp_voltage_range boost_ranges[] = {
+ VOLTAGE_RANGE(0, 4000000, 4000000, 5550000, 5550000, 50000),
+};
+
+static struct qpnp_voltage_range boost_byp_ranges[] = {
+ VOLTAGE_RANGE(0, 2500000, 2500000, 5200000, 5650000, 50000),
+};
+
+static struct qpnp_voltage_range ult_lo_smps_ranges[] = {
+ VOLTAGE_RANGE(0, 375000, 375000, 1562500, 1562500, 12500),
+ VOLTAGE_RANGE(1, 750000, 0, 0, 1525000, 25000),
+};
+
+static struct qpnp_voltage_range ult_ho_smps_ranges[] = {
+ VOLTAGE_RANGE(0, 1550000, 1550000, 2325000, 2325000, 25000),
+};
+
+static struct qpnp_voltage_range ult_nldo_ranges[] = {
+ VOLTAGE_RANGE(0, 375000, 375000, 1537500, 1537500, 12500),
+};
+
+static struct qpnp_voltage_range ult_pldo_ranges[] = {
+ VOLTAGE_RANGE(0, 1750000, 1750000, 3337500, 3337500, 12500),
+};
+
+static struct qpnp_voltage_set_points pldo_set_points = SET_POINTS(pldo_ranges);
+static struct qpnp_voltage_set_points nldo1_set_points
+ = SET_POINTS(nldo1_ranges);
+static struct qpnp_voltage_set_points nldo2_set_points
+ = SET_POINTS(nldo2_ranges);
+static struct qpnp_voltage_set_points nldo3_set_points
+ = SET_POINTS(nldo3_ranges);
+static struct qpnp_voltage_set_points ln_ldo_set_points
+ = SET_POINTS(ln_ldo_ranges);
+static struct qpnp_voltage_set_points smps_set_points = SET_POINTS(smps_ranges);
+static struct qpnp_voltage_set_points ftsmps_set_points
+ = SET_POINTS(ftsmps_ranges);
+static struct qpnp_voltage_set_points boost_set_points
+ = SET_POINTS(boost_ranges);
+static struct qpnp_voltage_set_points boost_byp_set_points
+ = SET_POINTS(boost_byp_ranges);
+static struct qpnp_voltage_set_points ult_lo_smps_set_points
+ = SET_POINTS(ult_lo_smps_ranges);
+static struct qpnp_voltage_set_points ult_ho_smps_set_points
+ = SET_POINTS(ult_ho_smps_ranges);
+static struct qpnp_voltage_set_points ult_nldo_set_points
+ = SET_POINTS(ult_nldo_ranges);
+static struct qpnp_voltage_set_points ult_pldo_set_points
+ = SET_POINTS(ult_pldo_ranges);
+static struct qpnp_voltage_set_points none_set_points;
+
+static struct qpnp_voltage_set_points *all_set_points[] = {
+ &pldo_set_points,
+ &nldo1_set_points,
+ &nldo2_set_points,
+ &nldo3_set_points,
+ &ln_ldo_set_points,
+ &smps_set_points,
+ &ftsmps_set_points,
+ &boost_set_points,
+ &boost_byp_set_points,
+ &ult_lo_smps_set_points,
+ &ult_ho_smps_set_points,
+ &ult_nldo_set_points,
+ &ult_pldo_set_points,
+};
+
+/* Determines which label to add to a debug print statement. */
+enum qpnp_regulator_action {
+ QPNP_REGULATOR_ACTION_INIT,
+ QPNP_REGULATOR_ACTION_ENABLE,
+ QPNP_REGULATOR_ACTION_DISABLE,
+ QPNP_REGULATOR_ACTION_VOLTAGE,
+ QPNP_REGULATOR_ACTION_MODE,
+};
+
+static void qpnp_vreg_show_state(struct regulator_dev *rdev,
+ enum qpnp_regulator_action action);
+
+#define DEBUG_PRINT_BUFFER_SIZE 64
+static void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
+{
+ int pos = 0;
+ int i;
+
+ for (i = 0; i < buf_len; i++) {
+ pos += scnprintf(str + pos, str_len - pos, "0x%02X", buf[i]);
+ if (i < buf_len - 1)
+ pos += scnprintf(str + pos, str_len - pos, ", ");
+ }
+}
+
+static inline int qpnp_vreg_read(struct qpnp_regulator *vreg, u16 addr, u8 *buf,
+ int len)
+{
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+ int rc = 0;
+
+ rc = spmi_ext_register_readl(vreg->spmi_dev, vreg->base_addr + addr, buf, len);
+
+ if (!rc && (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_READS)) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, buf, len);
+ pr_info(" %-11s: read(0x%04X), usid=%d, len=%d; %s\n",
+ vreg->rdesc.name, vreg->base_addr + addr,
+ vreg->spmi_dev->usid, len, str);
+ }
+
+ return rc;
+}
+
+static inline int qpnp_vreg_write(struct qpnp_regulator *vreg, u16 addr,
+ u8 *buf, int len)
+{
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+ int rc = 0;
+
+ if (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_WRITES) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, buf, len);
+ pr_info("%-11s: write(0x%04X), usid=%d, len=%d; %s\n",
+ vreg->rdesc.name, vreg->base_addr + addr,
+ vreg->spmi_dev->usid, len, str);
+ }
+
+ rc = spmi_ext_register_writel(vreg->spmi_dev, vreg->base_addr + addr, buf, len);
+ if (!rc)
+ vreg->write_count += len;
+
+ return rc;
+}
+
+/*
+ * qpnp_vreg_write_optimized - write the minimum sized contiguous subset of buf
+ * @vreg: qpnp_regulator pointer for this regulator
+ * @addr: local SPMI address offset from this peripheral's base address
+ * @buf: new data to write into the SPMI registers
+ * @buf_save: old data in the registers
+ * @len: number of bytes to write
+ *
+ * This function checks for unchanged register values between buf and buf_save
+ * starting at both ends of buf. Only the contiguous subset in the middle of
+ * buf starting and ending with new values is sent.
+ *
+ * Consider the following example:
+ * buf offset: 0 1 2 3 4 5 6 7
+ * reg state: U U C C U C U U
+ * (U = unchanged, C = changed)
+ * In this example registers 2 through 5 will be written with a single
+ * transaction.
+ */
+static inline int qpnp_vreg_write_optimized(struct qpnp_regulator *vreg,
+ u16 addr, u8 *buf, u8 *buf_save, int len)
+{
+ int i, rc, start, end;
+
+ for (i = 0; i < len; i++)
+ if (buf[i] != buf_save[i])
+ break;
+ start = i;
+
+ for (i = len - 1; i >= 0; i--)
+ if (buf[i] != buf_save[i])
+ break;
+ end = i;
+
+ if (start > end) {
+ /* No modified register values present. */
+ return 0;
+ }
+
+ rc = qpnp_vreg_write(vreg, addr + start, &buf[start], end - start + 1);
+ if (!rc)
+ for (i = start; i <= end; i++)
+ buf_save[i] = buf[i];
+
+ return rc;
+}
+
+/*
+ * Perform a masked write to a PMIC register only if the new value differs
+ * from the last value written to the register. This removes redundant
+ * register writing.
+ */
+static int qpnp_vreg_masked_write(struct qpnp_regulator *vreg, u16 addr, u8 val,
+ u8 mask, u8 *reg_save)
+{
+ int rc = 0;
+ u8 reg;
+
+ reg = (*reg_save & ~mask) | (val & mask);
+ if (reg != *reg_save) {
+ rc = qpnp_vreg_write(vreg, addr, &reg, 1);
+
+ if (rc) {
+ vreg_err(vreg, "write failed; addr=0x%03X, rc=%d\n",
+ addr, rc);
+ } else {
+ *reg_save = reg;
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * Perform a masked read-modify-write to a PMIC register only if the new value
+ * differs from the value currently in the register. This removes redundant
+ * register writing.
+ */
+static int qpnp_vreg_masked_read_write(struct qpnp_regulator *vreg, u16 addr,
+ u8 val, u8 mask)
+{
+ int rc;
+ u8 reg;
+
+ rc = qpnp_vreg_read(vreg, addr, &reg, 1);
+ if (rc) {
+ vreg_err(vreg, "read failed; addr=0x%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ return qpnp_vreg_masked_write(vreg, addr, val, mask, &reg);
+}
+
+static int qpnp_regulator_common_is_enabled(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+
+ return (vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]
+ & QPNP_COMMON_ENABLE_MASK)
+ == QPNP_COMMON_ENABLE;
+}
+
+static int qpnp_regulator_common_enable(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_ENABLE,
+ QPNP_COMMON_ENABLE, QPNP_COMMON_ENABLE_MASK,
+ &vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]);
+
+ if (rc)
+ vreg_err(vreg, "qpnp_vreg_masked_write failed, rc=%d\n", rc);
+ else
+ qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_ENABLE);
+
+ return rc;
+}
+
+static int qpnp_regulator_vs_enable(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+
+ if (vreg->ocp_irq) {
+ vreg->ocp_count = 0;
+ vreg->vs_enable_time = ktime_get();
+ }
+
+ return qpnp_regulator_common_enable(rdev);
+}
+
+static int qpnp_regulator_common_disable(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_ENABLE,
+ QPNP_COMMON_DISABLE, QPNP_COMMON_ENABLE_MASK,
+ &vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]);
+
+ if (rc)
+ vreg_err(vreg, "qpnp_vreg_masked_write failed, rc=%d\n", rc);
+ else
+ qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_DISABLE);
+
+ return rc;
+}
+
+/*
+ * Returns 1 if the voltage can be set in the current range, 0 if the voltage
+ * cannot be set in the current range, or errno if an error occurred.
+ */
+static int qpnp_regulator_select_voltage_same_range(struct qpnp_regulator *vreg,
+ int min_uV, int max_uV, int *range_sel, int *voltage_sel,
+ unsigned *selector)
+{
+ struct qpnp_voltage_range *range = NULL;
+ int uV = min_uV;
+ int i;
+
+ *range_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE];
+
+ for (i = 0; i < vreg->set_points->count; i++) {
+ if (vreg->set_points->range[i].range_sel == *range_sel) {
+ range = &vreg->set_points->range[i];
+ break;
+ }
+ }
+
+ if (!range) {
+ /* Unknown range */
+ return 0;
+ }
+
+ if (uV < range->min_uV && max_uV >= range->min_uV)
+ uV = range->min_uV;
+
+ if (uV < range->min_uV || uV > range->max_uV) {
+ /* Current range doesn't support the requested voltage. */
+ return 0;
+ }
+
+ /*
+ * Force uV to be an allowed set point by applying a ceiling function to
+ * the uV value.
+ */
+ *voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
+ uV = *voltage_sel * range->step_uV + range->min_uV;
+
+ if (uV > max_uV) {
+ /*
+ * No set point in the current voltage range is within the
+ * requested min_uV to max_uV range.
+ */
+ return 0;
+ }
+
+ *selector = 0;
+ for (i = 0; i < vreg->set_points->count; i++) {
+ if (uV >= vreg->set_points->range[i].set_point_min_uV
+ && uV <= vreg->set_points->range[i].set_point_max_uV) {
+ *selector +=
+ (uV - vreg->set_points->range[i].set_point_min_uV)
+ / vreg->set_points->range[i].step_uV;
+ break;
+ } else {
+ *selector += vreg->set_points->range[i].n_voltages;
+ }
+ }
+
+ if (*selector >= vreg->set_points->n_voltages)
+ return 0;
+
+ return 1;
+}
+
+static int qpnp_regulator_select_voltage(struct qpnp_regulator *vreg,
+ int min_uV, int max_uV, int *range_sel, int *voltage_sel,
+ unsigned *selector)
+{
+ struct qpnp_voltage_range *range;
+ int uV = min_uV;
+ int lim_min_uV, lim_max_uV, i, range_id, range_max_uV;
+
+ /* Check if request voltage is outside of physically settable range. */
+ lim_min_uV = vreg->set_points->range[0].set_point_min_uV;
+ lim_max_uV =
+ vreg->set_points->range[vreg->set_points->count - 1].set_point_max_uV;
+
+ if (uV < lim_min_uV && max_uV >= lim_min_uV)
+ uV = lim_min_uV;
+
+ if (uV < lim_min_uV || uV > lim_max_uV) {
+ vreg_err(vreg,
+ "request v=[%d, %d] is outside possible v=[%d, %d]\n",
+ min_uV, max_uV, lim_min_uV, lim_max_uV);
+ return -EINVAL;
+ }
+
+ /* Find the range which uV is inside of. */
+ for (i = vreg->set_points->count - 1; i > 0; i--) {
+ range_max_uV = vreg->set_points->range[i - 1].set_point_max_uV;
+ if (uV > range_max_uV && range_max_uV > 0)
+ break;
+ }
+
+ range_id = i;
+ range = &vreg->set_points->range[range_id];
+ *range_sel = range->range_sel;
+
+ /*
+ * Force uV to be an allowed set point by applying a ceiling function to
+ * the uV value.
+ */
+ *voltage_sel = (uV - range->min_uV + range->step_uV - 1)
+ / range->step_uV;
+ uV = *voltage_sel * range->step_uV + range->min_uV;
+
+ if (uV > max_uV) {
+ vreg_err(vreg,
+ "request v=[%d, %d] cannot be met by any set point; "
+ "next set point: %d\n",
+ min_uV, max_uV, uV);
+ return -EINVAL;
+ }
+
+ *selector = 0;
+ for (i = 0; i < range_id; i++)
+ *selector += vreg->set_points->range[i].n_voltages;
+ *selector += (uV - range->set_point_min_uV) / range->step_uV;
+
+ return 0;
+}
+
+static int qpnp_regulator_common_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ int rc, range_sel, voltage_sel;
+ u8 buf[2];
+
+ /*
+ * Favor staying in the current voltage range if possible. This avoids
+ * voltage spikes that occur when changing the voltage range.
+ */
+ rc = qpnp_regulator_select_voltage_same_range(vreg, min_uV, max_uV,
+ &range_sel, &voltage_sel, selector);
+ if (rc == 0)
+ rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV,
+ &range_sel, &voltage_sel, selector);
+ if (rc < 0) {
+ vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
+ return rc;
+ }
+
+ buf[0] = range_sel;
+ buf[1] = voltage_sel;
+ if ((vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE] != range_sel)
+ && (vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET] == voltage_sel)) {
+ /* Handle latched range change. */
+ rc = qpnp_vreg_write(vreg, QPNP_COMMON_REG_VOLTAGE_RANGE,
+ buf, 2);
+ if (!rc) {
+ vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE] = buf[0];
+ vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET] = buf[1];
+ }
+ } else {
+ /* Either write can be optimized away safely. */
+ rc = qpnp_vreg_write_optimized(vreg,
+ QPNP_COMMON_REG_VOLTAGE_RANGE, buf,
+ &vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE], 2);
+ }
+
+ if (rc)
+ vreg_err(vreg, "SPMI write failed, rc=%d\n", rc);
+ else
+ qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_VOLTAGE);
+
+ return rc;
+}
+
+static int qpnp_regulator_common_get_voltage(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ struct qpnp_voltage_range *range = NULL;
+ int range_sel, voltage_sel, i;
+
+ range_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE];
+ voltage_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET];
+
+ for (i = 0; i < vreg->set_points->count; i++) {
+ if (vreg->set_points->range[i].range_sel == range_sel) {
+ range = &vreg->set_points->range[i];
+ break;
+ }
+ }
+
+ if (!range) {
+ vreg_err(vreg, "voltage unknown, range %d is invalid\n",
+ range_sel);
+ return VOLTAGE_UNKNOWN;
+ }
+
+ return range->step_uV * voltage_sel + range->min_uV;
+}
+
+static int qpnp_regulator_single_range_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ int rc, range_sel, voltage_sel;
+
+ rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
+ &voltage_sel, selector);
+ if (rc) {
+ vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Certain types of regulators do not have a range select register so
+ * only voltage set register needs to be written.
+ */
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_VOLTAGE_SET,
+ voltage_sel, 0xFF, &vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET]);
+
+ if (rc)
+ vreg_err(vreg, "SPMI write failed, rc=%d\n", rc);
+ else
+ qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_VOLTAGE);
+
+ return rc;
+}
+
+static int qpnp_regulator_single_range_get_voltage(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ struct qpnp_voltage_range *range = &vreg->set_points->range[0];
+ int voltage_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET];
+
+ return range->step_uV * voltage_sel + range->min_uV;
+}
+
+static int qpnp_regulator_ult_lo_smps_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ int rc, range_sel, voltage_sel;
+
+ /*
+ * Favor staying in the current voltage range if possible. This avoids
+ * voltage spikes that occur when changing the voltage range.
+ */
+ rc = qpnp_regulator_select_voltage_same_range(vreg, min_uV, max_uV,
+ &range_sel, &voltage_sel, selector);
+ if (rc == 0)
+ rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV,
+ &range_sel, &voltage_sel, selector);
+ if (rc < 0) {
+ vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Calculate VSET based on range
+ * In case of range 0: voltage_sel is a 7 bit value, can be written
+ * witout any modification.
+ * In case of range 1: voltage_sel is a 5 bit value, bits[7-5] set to
+ * [011].
+ */
+ if (range_sel == 1)
+ voltage_sel |= ULT_SMPS_RANGE_SPLIT;
+
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_VOLTAGE_SET,
+ voltage_sel, 0xFF, &vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET]);
+ if (rc) {
+ vreg_err(vreg, "SPMI write failed, rc=%d\n", rc);
+ } else {
+ vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE] = range_sel;
+ qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_VOLTAGE);
+ }
+
+ return rc;
+}
+
+static int qpnp_regulator_ult_lo_smps_get_voltage(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ struct qpnp_voltage_range *range = NULL;
+ int range_sel, voltage_sel, i;
+
+ range_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE];
+ voltage_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET];
+
+ for (i = 0; i < vreg->set_points->count; i++) {
+ if (vreg->set_points->range[i].range_sel == range_sel) {
+ range = &vreg->set_points->range[i];
+ break;
+ }
+ }
+
+ if (!range) {
+ vreg_err(vreg, "voltage unknown, range %d is invalid\n",
+ range_sel);
+ return VOLTAGE_UNKNOWN;
+ }
+
+ if (range_sel == 1)
+ voltage_sel &= ~ULT_SMPS_RANGE_SPLIT;
+
+ return range->step_uV * voltage_sel + range->min_uV;
+}
+
+static int qpnp_regulator_common_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ int uV = 0;
+ int i;
+
+ if (selector >= vreg->set_points->n_voltages)
+ return 0;
+
+ for (i = 0; i < vreg->set_points->count; i++) {
+ if (selector < vreg->set_points->range[i].n_voltages) {
+ uV = selector * vreg->set_points->range[i].step_uV
+ + vreg->set_points->range[i].set_point_min_uV;
+ break;
+ } else {
+ selector -= vreg->set_points->range[i].n_voltages;
+ }
+ }
+
+ return uV;
+}
+
+static unsigned int qpnp_regulator_common_get_mode(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+
+ return (vreg->ctrl_reg[QPNP_COMMON_IDX_MODE]
+ & QPNP_COMMON_MODE_HPM_MASK)
+ ? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int qpnp_regulator_common_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ u8 val;
+
+ if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_IDLE) {
+ vreg_err(vreg, "invalid mode: %u\n", mode);
+ return -EINVAL;
+ }
+
+ val = (mode == REGULATOR_MODE_NORMAL ? QPNP_COMMON_MODE_HPM_MASK : 0);
+
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_MODE, val,
+ QPNP_COMMON_MODE_HPM_MASK,
+ &vreg->ctrl_reg[QPNP_COMMON_IDX_MODE]);
+
+ if (rc)
+ vreg_err(vreg, "SPMI write failed, rc=%d\n", rc);
+ else
+ qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_MODE);
+
+ return rc;
+}
+
+static unsigned int qpnp_regulator_common_get_optimum_mode(
+ struct regulator_dev *rdev, int input_uV, int output_uV,
+ int load_uA)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ unsigned int mode;
+
+ if (load_uA + vreg->system_load >= vreg->hpm_min_load)
+ mode = REGULATOR_MODE_NORMAL;
+ else
+ mode = REGULATOR_MODE_IDLE;
+
+ return mode;
+}
+
+static int qpnp_regulator_common_enable_time(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->enable_time;
+}
+
+static int qpnp_regulator_vs_clear_ocp(struct qpnp_regulator *vreg)
+{
+ int rc;
+
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_ENABLE,
+ QPNP_COMMON_DISABLE, QPNP_COMMON_ENABLE_MASK,
+ &vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]);
+ if (rc)
+ vreg_err(vreg, "qpnp_vreg_masked_write failed, rc=%d\n", rc);
+
+ vreg->vs_enable_time = ktime_get();
+
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_ENABLE,
+ QPNP_COMMON_ENABLE, QPNP_COMMON_ENABLE_MASK,
+ &vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]);
+ if (rc)
+ vreg_err(vreg, "qpnp_vreg_masked_write failed, rc=%d\n", rc);
+
+ if (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_OCP) {
+ pr_info("%s: switch state toggled after OCP event\n",
+ vreg->rdesc.name);
+ }
+
+ return rc;
+}
+
+static void qpnp_regulator_vs_ocp_work(struct work_struct *work)
+{
+ struct delayed_work *dwork
+ = container_of(work, struct delayed_work, work);
+ struct qpnp_regulator *vreg
+ = container_of(dwork, struct qpnp_regulator, ocp_work);
+
+ qpnp_regulator_vs_clear_ocp(vreg);
+
+ return;
+}
+
+static irqreturn_t qpnp_regulator_vs_ocp_isr(int irq, void *data)
+{
+ struct qpnp_regulator *vreg = data;
+ ktime_t ocp_irq_time;
+ s64 ocp_trigger_delay_us;
+
+ ocp_irq_time = ktime_get();
+ ocp_trigger_delay_us = ktime_us_delta(ocp_irq_time,
+ vreg->vs_enable_time);
+
+ /*
+ * Reset the OCP count if there is a large delay between switch enable
+ * and when OCP triggers. This is indicative of a hotplug event as
+ * opposed to a fault.
+ */
+ if (ocp_trigger_delay_us > QPNP_VS_OCP_FAULT_DELAY_US)
+ vreg->ocp_count = 0;
+
+ /* Wait for switch output to settle back to 0 V after OCP triggered. */
+ udelay(QPNP_VS_OCP_FALL_DELAY_US);
+
+ vreg->ocp_count++;
+
+ if (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_OCP) {
+ pr_info("%s: VS OCP triggered, count = %d, delay = %lld us\n",
+ vreg->rdesc.name, vreg->ocp_count,
+ ocp_trigger_delay_us);
+ }
+
+ if (vreg->ocp_count == 1) {
+ /* Immediately clear the over current condition. */
+ qpnp_regulator_vs_clear_ocp(vreg);
+ } else if (vreg->ocp_count <= vreg->ocp_max_retries) {
+ /* Schedule the over current clear task to run later. */
+ schedule_delayed_work(&vreg->ocp_work,
+ msecs_to_jiffies(vreg->ocp_retry_delay_ms) + 1);
+ } else {
+ vreg_err(vreg, "OCP triggered %d times; no further retries\n",
+ vreg->ocp_count);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const char * const qpnp_print_actions[] = {
+ [QPNP_REGULATOR_ACTION_INIT] = "initial ",
+ [QPNP_REGULATOR_ACTION_ENABLE] = "enable ",
+ [QPNP_REGULATOR_ACTION_DISABLE] = "disable ",
+ [QPNP_REGULATOR_ACTION_VOLTAGE] = "set voltage",
+ [QPNP_REGULATOR_ACTION_MODE] = "set mode ",
+};
+
+static void qpnp_vreg_show_state(struct regulator_dev *rdev,
+ enum qpnp_regulator_action action)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ const char *action_label = qpnp_print_actions[action];
+ unsigned int mode = 0;
+ int uV = 0;
+ const char *mode_label = "";
+ enum qpnp_regulator_logical_type type;
+ const char *enable_label;
+ char pc_enable_label[5] = {'\0'};
+ char pc_mode_label[8] = {'\0'};
+ bool show_req, show_dupe, show_init, has_changed;
+ u8 en_reg, mode_reg;
+
+ /* Do not print unless appropriate flags are set. */
+ show_req = qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_REQUEST;
+ show_dupe = qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_DUPLICATE;
+ show_init = qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_INIT;
+ has_changed = vreg->write_count != vreg->prev_write_count;
+ if (!((show_init && action == QPNP_REGULATOR_ACTION_INIT)
+ || (show_req && (has_changed || show_dupe)))) {
+ return;
+ }
+
+ vreg->prev_write_count = vreg->write_count;
+
+ type = vreg->logical_type;
+
+ enable_label = qpnp_regulator_common_is_enabled(rdev) ? "on " : "off";
+
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LN_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS)
+ uV = qpnp_regulator_common_get_voltage(rdev);
+
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_BOOST
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_BOOST_BYP
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO)
+ uV = qpnp_regulator_single_range_get_voltage(rdev);
+
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS)
+ uV = qpnp_regulator_ult_lo_smps_get_voltage(rdev);
+
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_VS) {
+ mode = qpnp_regulator_common_get_mode(rdev);
+ mode_label = mode == REGULATOR_MODE_NORMAL ? "HPM" : "LPM";
+ }
+
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_VS) {
+ en_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE];
+ pc_enable_label[0] =
+ en_reg & QPNP_COMMON_ENABLE_FOLLOW_HW_EN3_MASK ? '3' : '_';
+ pc_enable_label[1] =
+ en_reg & QPNP_COMMON_ENABLE_FOLLOW_HW_EN2_MASK ? '2' : '_';
+ pc_enable_label[2] =
+ en_reg & QPNP_COMMON_ENABLE_FOLLOW_HW_EN1_MASK ? '1' : '_';
+ pc_enable_label[3] =
+ en_reg & QPNP_COMMON_ENABLE_FOLLOW_HW_EN0_MASK ? '0' : '_';
+ }
+
+ switch (type) {
+ case QPNP_REGULATOR_LOGICAL_TYPE_SMPS:
+ mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+ pc_mode_label[0] =
+ mode_reg & QPNP_COMMON_MODE_AUTO_MASK ? 'A' : '_';
+ pc_mode_label[1] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK ? 'W' : '_';
+ pc_mode_label[2] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN3_MASK ? '3' : '_';
+ pc_mode_label[3] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN2_MASK ? '2' : '_';
+ pc_mode_label[4] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN1_MASK ? '1' : '_';
+ pc_mode_label[5] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN0_MASK ? '0' : '_';
+
+ pr_info("%s %-11s: %s, v=%7d uV, mode=%s, pc_en=%s, "
+ "alt_mode=%s\n",
+ action_label, vreg->rdesc.name, enable_label, uV,
+ mode_label, pc_enable_label, pc_mode_label);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_LDO:
+ mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+ pc_mode_label[0] =
+ mode_reg & QPNP_COMMON_MODE_AUTO_MASK ? 'A' : '_';
+ pc_mode_label[1] =
+ mode_reg & QPNP_COMMON_MODE_BYPASS_MASK ? 'B' : '_';
+ pc_mode_label[2] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK ? 'W' : '_';
+ pc_mode_label[3] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN3_MASK ? '3' : '_';
+ pc_mode_label[4] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN2_MASK ? '2' : '_';
+ pc_mode_label[5] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN1_MASK ? '1' : '_';
+ pc_mode_label[6] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN0_MASK ? '0' : '_';
+
+ pr_info("%s %-11s: %s, v=%7d uV, mode=%s, pc_en=%s, "
+ "alt_mode=%s\n",
+ action_label, vreg->rdesc.name, enable_label, uV,
+ mode_label, pc_enable_label, pc_mode_label);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_LN_LDO:
+ mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+ pc_mode_label[0] =
+ mode_reg & QPNP_COMMON_MODE_BYPASS_MASK ? 'B' : '_';
+
+ pr_info("%s %-11s: %s, v=%7d uV, alt_mode=%s\n",
+ action_label, vreg->rdesc.name, enable_label, uV,
+ pc_mode_label);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_VS:
+ mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+ pc_mode_label[0] =
+ mode_reg & QPNP_COMMON_MODE_AUTO_MASK ? 'A' : '_';
+ pc_mode_label[1] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK ? 'W' : '_';
+
+ pr_info("%s %-11s: %s, mode=%s, pc_en=%s, alt_mode=%s\n",
+ action_label, vreg->rdesc.name, enable_label,
+ mode_label, pc_enable_label, pc_mode_label);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_BOOST:
+ pr_info("%s %-11s: %s, v=%7d uV\n",
+ action_label, vreg->rdesc.name, enable_label, uV);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_BOOST_BYP:
+ pr_info("%s %-11s: %s, v=%7d uV\n",
+ action_label, vreg->rdesc.name, enable_label, uV);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS:
+ mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+ pc_mode_label[0] =
+ mode_reg & QPNP_COMMON_MODE_AUTO_MASK ? 'A' : '_';
+
+ pr_info("%s %-11s: %s, v=%7d uV, mode=%s, alt_mode=%s\n",
+ action_label, vreg->rdesc.name, enable_label, uV,
+ mode_label, pc_mode_label);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS:
+ case QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS:
+ mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+ pc_mode_label[0] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK ? 'W' : '_';
+ pr_info("%s %-11s: %s, v=%7d uV, mode=%s, alt_mode=%s\n",
+ action_label, vreg->rdesc.name, enable_label, uV,
+ mode_label, pc_mode_label);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO:
+ mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+ pc_mode_label[0] =
+ mode_reg & QPNP_COMMON_MODE_BYPASS_MASK ? 'B' : '_';
+ pc_mode_label[1] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK ? 'W' : '_';
+ pr_info("%s %-11s: %s, v=%7d uV, mode=%s, alt_mode=%s\n",
+ action_label, vreg->rdesc.name, enable_label, uV,
+ mode_label, pc_mode_label);
+ break;
+ default:
+ break;
+ }
+}
+
+static struct regulator_ops qpnp_smps_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_common_set_voltage,
+ .get_voltage = qpnp_regulator_common_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .set_mode = qpnp_regulator_common_set_mode,
+ .get_mode = qpnp_regulator_common_get_mode,
+ .get_optimum_mode = qpnp_regulator_common_get_optimum_mode,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ldo_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_common_set_voltage,
+ .get_voltage = qpnp_regulator_common_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .set_mode = qpnp_regulator_common_set_mode,
+ .get_mode = qpnp_regulator_common_get_mode,
+ .get_optimum_mode = qpnp_regulator_common_get_optimum_mode,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ln_ldo_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_common_set_voltage,
+ .get_voltage = qpnp_regulator_common_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_vs_ops = {
+ .enable = qpnp_regulator_vs_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_boost_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_single_range_set_voltage,
+ .get_voltage = qpnp_regulator_single_range_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ftsmps_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_common_set_voltage,
+ .get_voltage = qpnp_regulator_common_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .set_mode = qpnp_regulator_common_set_mode,
+ .get_mode = qpnp_regulator_common_get_mode,
+ .get_optimum_mode = qpnp_regulator_common_get_optimum_mode,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ult_lo_smps_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_ult_lo_smps_set_voltage,
+ .get_voltage = qpnp_regulator_ult_lo_smps_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .set_mode = qpnp_regulator_common_set_mode,
+ .get_mode = qpnp_regulator_common_get_mode,
+ .get_optimum_mode = qpnp_regulator_common_get_optimum_mode,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ult_ho_smps_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_single_range_set_voltage,
+ .get_voltage = qpnp_regulator_single_range_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .set_mode = qpnp_regulator_common_set_mode,
+ .get_mode = qpnp_regulator_common_get_mode,
+ .get_optimum_mode = qpnp_regulator_common_get_optimum_mode,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ult_ldo_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_single_range_set_voltage,
+ .get_voltage = qpnp_regulator_single_range_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .set_mode = qpnp_regulator_common_set_mode,
+ .get_mode = qpnp_regulator_common_get_mode,
+ .get_optimum_mode = qpnp_regulator_common_get_optimum_mode,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+/* Maximum possible digital major revision value */
+#define INF 0xFF
+
+static const struct qpnp_regulator_mapping supported_regulators[] = {
+ /* type subtype dig_min dig_max ltype ops setpoints hpm_min */
+ QPNP_VREG_MAP(BUCK, GP_CTL, 0, INF, SMPS, smps, smps, 100000),
+ QPNP_VREG_MAP(LDO, N300, 0, INF, LDO, ldo, nldo1, 10000),
+ QPNP_VREG_MAP(LDO, N600, 0, 0, LDO, ldo, nldo2, 10000),
+ QPNP_VREG_MAP(LDO, N1200, 0, 0, LDO, ldo, nldo2, 10000),
+ QPNP_VREG_MAP(LDO, N600, 1, INF, LDO, ldo, nldo3, 10000),
+ QPNP_VREG_MAP(LDO, N1200, 1, INF, LDO, ldo, nldo3, 10000),
+ QPNP_VREG_MAP(LDO, N600_ST, 0, 0, LDO, ldo, nldo2, 10000),
+ QPNP_VREG_MAP(LDO, N1200_ST, 0, 0, LDO, ldo, nldo2, 10000),
+ QPNP_VREG_MAP(LDO, N600_ST, 1, INF, LDO, ldo, nldo3, 10000),
+ QPNP_VREG_MAP(LDO, N1200_ST, 1, INF, LDO, ldo, nldo3, 10000),
+ QPNP_VREG_MAP(LDO, P50, 0, INF, LDO, ldo, pldo, 5000),
+ QPNP_VREG_MAP(LDO, P150, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(LDO, P300, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(LDO, P600, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(LDO, P1200, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(LDO, LN, 0, INF, LN_LDO, ln_ldo, ln_ldo, 0),
+ QPNP_VREG_MAP(LDO, LV_P50, 0, INF, LDO, ldo, pldo, 5000),
+ QPNP_VREG_MAP(LDO, LV_P150, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(LDO, LV_P300, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(LDO, LV_P600, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(LDO, LV_P1200, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(VS, LV100, 0, INF, VS, vs, none, 0),
+ QPNP_VREG_MAP(VS, LV300, 0, INF, VS, vs, none, 0),
+ QPNP_VREG_MAP(VS, MV300, 0, INF, VS, vs, none, 0),
+ QPNP_VREG_MAP(VS, MV500, 0, INF, VS, vs, none, 0),
+ QPNP_VREG_MAP(VS, HDMI, 0, INF, VS, vs, none, 0),
+ QPNP_VREG_MAP(VS, OTG, 0, INF, VS, vs, none, 0),
+ QPNP_VREG_MAP(BOOST, 5V_BOOST, 0, INF, BOOST, boost, boost, 0),
+ QPNP_VREG_MAP(FTS, FTS_CTL, 0, INF, FTSMPS, ftsmps, ftsmps, 100000),
+ QPNP_VREG_MAP(BOOST_BYP, BB_2A, 0, INF, BOOST_BYP, boost, boost_byp, 0),
+ QPNP_VREG_MAP(ULT_BUCK, ULT_HF_CTL1, 0, INF, ULT_LO_SMPS, ult_lo_smps,
+ ult_lo_smps, 100000),
+ QPNP_VREG_MAP(ULT_BUCK, ULT_HF_CTL2, 0, INF, ULT_LO_SMPS, ult_lo_smps,
+ ult_lo_smps, 100000),
+ QPNP_VREG_MAP(ULT_BUCK, ULT_HF_CTL3, 0, INF, ULT_LO_SMPS, ult_lo_smps,
+ ult_lo_smps, 100000),
+ QPNP_VREG_MAP(ULT_BUCK, ULT_HF_CTL4, 0, INF, ULT_HO_SMPS, ult_ho_smps,
+ ult_ho_smps, 100000),
+ QPNP_VREG_MAP(ULT_LDO, N300_ST, 0, INF, ULT_LDO, ult_ldo, ult_nldo,
+ 10000),
+ QPNP_VREG_MAP(ULT_LDO, N600_ST, 0, INF, ULT_LDO, ult_ldo, ult_nldo,
+ 10000),
+ QPNP_VREG_MAP(ULT_LDO, N1200_ST, 0, INF, ULT_LDO, ult_ldo, ult_nldo,
+ 10000),
+ QPNP_VREG_MAP(ULT_LDO, LV_P150, 0, INF, ULT_LDO, ult_ldo, ult_pldo,
+ 10000),
+ QPNP_VREG_MAP(ULT_LDO, LV_P300, 0, INF, ULT_LDO, ult_ldo, ult_pldo,
+ 10000),
+ QPNP_VREG_MAP(ULT_LDO, P600, 0, INF, ULT_LDO, ult_ldo, ult_pldo,
+ 10000),
+ QPNP_VREG_MAP(ULT_LDO, P150, 0, INF, ULT_LDO, ult_ldo, ult_pldo,
+ 10000),
+ QPNP_VREG_MAP(ULT_LDO, P50, 0, INF, ULT_LDO, ult_ldo, ult_pldo,
+ 5000),
+};
+
+static int qpnp_regulator_match(struct qpnp_regulator *vreg)
+{
+ const struct qpnp_regulator_mapping *mapping;
+ struct device_node *node = vreg->spmi_dev->dev.of_node;
+ int rc, i;
+ u32 type_reg[2], dig_major_rev;
+ u8 version[QPNP_COMMON_REG_SUBTYPE - QPNP_COMMON_REG_DIG_MAJOR_REV + 1];
+ u8 type, subtype;
+
+ rc = qpnp_vreg_read(vreg, QPNP_COMMON_REG_DIG_MAJOR_REV, version,
+ ARRAY_SIZE(version));
+ if (rc) {
+ vreg_err(vreg, "could not read version registers, rc=%d\n", rc);
+ return rc;
+ }
+ dig_major_rev = version[QPNP_COMMON_REG_DIG_MAJOR_REV
+ - QPNP_COMMON_REG_DIG_MAJOR_REV];
+ type = version[QPNP_COMMON_REG_TYPE
+ - QPNP_COMMON_REG_DIG_MAJOR_REV];
+ subtype = version[QPNP_COMMON_REG_SUBTYPE
+ - QPNP_COMMON_REG_DIG_MAJOR_REV];
+
+ /*
+ * Override type and subtype register values if qcom,force-type is
+ * present in the device tree node.
+ */
+ rc = of_property_read_u32_array(node, "qcom,force-type", type_reg, 2);
+ if (!rc) {
+ type = type_reg[0];
+ subtype = type_reg[1];
+ }
+
+ rc = -ENODEV;
+ for (i = 0; i < ARRAY_SIZE(supported_regulators); i++) {
+ mapping = &supported_regulators[i];
+ if (mapping->type == type && mapping->subtype == subtype
+ && mapping->revision_min <= dig_major_rev
+ && mapping->revision_max >= dig_major_rev) {
+ vreg->logical_type = mapping->logical_type;
+ vreg->set_points = mapping->set_points;
+ vreg->hpm_min_load = mapping->hpm_min_load;
+ vreg->rdesc.ops = mapping->ops;
+ vreg->rdesc.n_voltages
+ = mapping->set_points->n_voltages;
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int qpnp_regulator_init_registers(struct qpnp_regulator *vreg,
+ struct qpnp_regulator_platform_data *pdata)
+{
+ int rc, i;
+ enum qpnp_regulator_logical_type type;
+ u8 ctrl_reg[8], reg, mask;
+
+ type = vreg->logical_type;
+
+ rc = qpnp_vreg_read(vreg, QPNP_COMMON_REG_VOLTAGE_RANGE,
+ vreg->ctrl_reg, 8);
+ if (rc) {
+ vreg_err(vreg, "spmi read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctrl_reg); i++)
+ ctrl_reg[i] = vreg->ctrl_reg[i];
+
+ /* Set up enable pin control. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_VS)
+ && !(pdata->pin_ctrl_enable
+ & QPNP_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT)) {
+ ctrl_reg[QPNP_COMMON_IDX_ENABLE] &=
+ ~QPNP_COMMON_ENABLE_FOLLOW_ALL_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_ENABLE] |=
+ pdata->pin_ctrl_enable & QPNP_COMMON_ENABLE_FOLLOW_ALL_MASK;
+ }
+
+ /* Set up HPM control. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_VS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS)
+ && (pdata->hpm_enable != QPNP_REGULATOR_USE_HW_DEFAULT)) {
+ ctrl_reg[QPNP_COMMON_IDX_MODE] &= ~QPNP_COMMON_MODE_HPM_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+ (pdata->hpm_enable ? QPNP_COMMON_MODE_HPM_MASK : 0);
+ }
+
+ /* Set up auto mode control. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_VS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS)
+ && (pdata->auto_mode_enable != QPNP_REGULATOR_USE_HW_DEFAULT)) {
+ ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+ ~QPNP_COMMON_MODE_AUTO_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+ (pdata->auto_mode_enable ? QPNP_COMMON_MODE_AUTO_MASK : 0);
+ }
+
+ /* Set up mode pin control. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO)
+ && !(pdata->pin_ctrl_hpm
+ & QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+ ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+ ~QPNP_COMMON_MODE_FOLLOW_ALL_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+ pdata->pin_ctrl_hpm & QPNP_COMMON_MODE_FOLLOW_ALL_MASK;
+ }
+
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_VS
+ && !(pdata->pin_ctrl_hpm & QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+ ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+ ~QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+ pdata->pin_ctrl_hpm & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ }
+
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO)
+ && !(pdata->pin_ctrl_hpm
+ & QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+ ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+ ~QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+ pdata->pin_ctrl_hpm & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ }
+
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LN_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO)
+ && pdata->bypass_mode_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+ ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+ ~QPNP_COMMON_MODE_BYPASS_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+ (pdata->bypass_mode_enable
+ ? QPNP_COMMON_MODE_BYPASS_MASK : 0);
+ }
+
+ /* Set boost current limit. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_BOOST
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_BOOST_BYP)
+ && pdata->boost_current_limit
+ != QPNP_BOOST_CURRENT_LIMIT_HW_DEFAULT) {
+ reg = pdata->boost_current_limit;
+ mask = QPNP_BOOST_CURRENT_LIMIT_MASK;
+ rc = qpnp_vreg_masked_read_write(vreg,
+ (type == QPNP_REGULATOR_LOGICAL_TYPE_BOOST
+ ? QPNP_BOOST_REG_CURRENT_LIMIT
+ : QPNP_BOOST_BYP_REG_CURRENT_LIMIT),
+ reg, mask);
+ if (rc) {
+ vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Write back any control register values that were modified. */
+ rc = qpnp_vreg_write_optimized(vreg, QPNP_COMMON_REG_VOLTAGE_RANGE,
+ ctrl_reg, vreg->ctrl_reg, 8);
+ if (rc) {
+ vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Setup initial range for ULT_LO_SMPS */
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS) {
+ ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE] =
+ (ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET]
+ < ULT_SMPS_RANGE_SPLIT) ? 0 : 1;
+ }
+
+ /* Set pull down. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_VS)
+ && pdata->pull_down_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+ reg = pdata->pull_down_enable
+ ? QPNP_COMMON_PULL_DOWN_ENABLE_MASK : 0;
+ rc = qpnp_vreg_write(vreg, QPNP_COMMON_REG_PULL_DOWN, &reg, 1);
+ if (rc) {
+ vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS
+ && pdata->pull_down_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+ /* FTSMPS has other bits in the pull down control register. */
+ reg = pdata->pull_down_enable
+ ? QPNP_COMMON_PULL_DOWN_ENABLE_MASK : 0;
+ rc = qpnp_vreg_masked_read_write(vreg,
+ QPNP_COMMON_REG_PULL_DOWN, reg,
+ QPNP_COMMON_PULL_DOWN_ENABLE_MASK);
+ if (rc) {
+ vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Set soft start for LDO. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO)
+ && pdata->soft_start_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+ reg = pdata->soft_start_enable
+ ? QPNP_LDO_SOFT_START_ENABLE_MASK : 0;
+ rc = qpnp_vreg_write(vreg, QPNP_LDO_REG_SOFT_START, &reg, 1);
+ if (rc) {
+ vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Set soft start strength and over current protection for VS. */
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_VS) {
+ reg = 0;
+ mask = 0;
+ if (pdata->soft_start_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+ reg |= pdata->soft_start_enable
+ ? QPNP_VS_SOFT_START_ENABLE_MASK : 0;
+ mask |= QPNP_VS_SOFT_START_ENABLE_MASK;
+ }
+ if (pdata->vs_soft_start_strength
+ != QPNP_VS_SOFT_START_STR_HW_DEFAULT) {
+ reg |= pdata->vs_soft_start_strength
+ & QPNP_VS_SOFT_START_SEL_MASK;
+ mask |= QPNP_VS_SOFT_START_SEL_MASK;
+ }
+ rc = qpnp_vreg_masked_read_write(vreg, QPNP_VS_REG_SOFT_START,
+ reg, mask);
+ if (rc) {
+ vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (pdata->ocp_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+ reg = pdata->ocp_enable ? QPNP_VS_OCP_NO_OVERRIDE
+ : QPNP_VS_OCP_OVERRIDE;
+ rc = qpnp_vreg_write(vreg, QPNP_VS_REG_OCP, &reg, 1);
+ if (rc) {
+ vreg_err(vreg, "spmi write failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ return rc;
+}
+
+/* Fill in pdata elements based on values found in device tree. */
+static int qpnp_regulator_get_dt_config(struct spmi_device *spmi,
+ struct qpnp_regulator_platform_data *pdata)
+{
+ struct resource *res;
+ struct device_node *node = spmi->dev.of_node;
+ int rc = 0;
+
+ pdata->init_data.constraints.input_uV
+ = pdata->init_data.constraints.max_uV;
+
+ res = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&spmi->dev, "%s: node is missing base address\n",
+ __func__);
+ return -EINVAL;
+ }
+ pdata->base_addr = res->start;
+
+ /* OCP IRQ is optional so ignore get errors. */
+ pdata->ocp_irq = spmi_get_irq_byname(spmi, NULL, "ocp");
+ if (pdata->ocp_irq < 0)
+ pdata->ocp_irq = 0;
+
+ /*
+ * Initialize configuration parameters to use hardware default in case
+ * no value is specified via device tree.
+ */
+ pdata->auto_mode_enable = QPNP_REGULATOR_USE_HW_DEFAULT;
+ pdata->bypass_mode_enable = QPNP_REGULATOR_USE_HW_DEFAULT;
+ pdata->ocp_enable = QPNP_REGULATOR_USE_HW_DEFAULT;
+ pdata->pull_down_enable = QPNP_REGULATOR_USE_HW_DEFAULT;
+ pdata->soft_start_enable = QPNP_REGULATOR_USE_HW_DEFAULT;
+ pdata->boost_current_limit = QPNP_BOOST_CURRENT_LIMIT_HW_DEFAULT;
+ pdata->pin_ctrl_enable = QPNP_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT;
+ pdata->pin_ctrl_hpm = QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT;
+ pdata->vs_soft_start_strength = QPNP_VS_SOFT_START_STR_HW_DEFAULT;
+ pdata->hpm_enable = QPNP_REGULATOR_USE_HW_DEFAULT;
+
+ /* These bindings are optional, so it is okay if they are not found. */
+ of_property_read_u32(node, "qcom,auto-mode-enable",
+ &pdata->auto_mode_enable);
+ of_property_read_u32(node, "qcom,bypass-mode-enable",
+ &pdata->bypass_mode_enable);
+ of_property_read_u32(node, "qcom,ocp-enable", &pdata->ocp_enable);
+ of_property_read_u32(node, "qcom,ocp-max-retries",
+ &pdata->ocp_max_retries);
+ of_property_read_u32(node, "qcom,ocp-retry-delay",
+ &pdata->ocp_retry_delay_ms);
+ of_property_read_u32(node, "qcom,pull-down-enable",
+ &pdata->pull_down_enable);
+ of_property_read_u32(node, "qcom,soft-start-enable",
+ &pdata->soft_start_enable);
+ of_property_read_u32(node, "qcom,boost-current-limit",
+ &pdata->boost_current_limit);
+ of_property_read_u32(node, "qcom,pin-ctrl-enable",
+ &pdata->pin_ctrl_enable);
+ of_property_read_u32(node, "qcom,pin-ctrl-hpm", &pdata->pin_ctrl_hpm);
+ of_property_read_u32(node, "qcom,hpm-enable", &pdata->hpm_enable);
+ of_property_read_u32(node, "qcom,vs-soft-start-strength",
+ &pdata->vs_soft_start_strength);
+ of_property_read_u32(node, "qcom,system-load", &pdata->system_load);
+ of_property_read_u32(node, "qcom,enable-time", &pdata->enable_time);
+
+ return rc;
+}
+
+static struct of_device_id spmi_match_table[];
+
+#define MAX_NAME_LEN 127
+
+static int qpnp_regulator_probe(struct spmi_device *spmi)
+{
+ struct regulator_config reg_config = {};
+ struct qpnp_regulator_platform_data *pdata;
+ struct qpnp_regulator *vreg;
+ struct regulator_desc *rdesc;
+ struct qpnp_regulator_platform_data of_pdata;
+ struct regulator_init_data *init_data;
+ char *reg_name;
+ int rc;
+ bool is_dt;
+
+ dev_err(&spmi->dev, "%s\n", __func__);
+
+ vreg = kzalloc(sizeof(struct qpnp_regulator), GFP_KERNEL);
+ if (!vreg) {
+ dev_err(&spmi->dev, "%s: Can't allocate qpnp_regulator\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ is_dt = of_match_device(spmi_match_table, &spmi->dev);
+
+ /* Check if device tree is in use. */
+ if (is_dt) {
+ init_data = of_get_regulator_init_data(&spmi->dev,
+ spmi->dev.of_node);
+ if (!init_data) {
+ dev_err(&spmi->dev, "%s: unable to allocate memory\n",
+ __func__);
+ kfree(vreg);
+ return -ENOMEM;
+ }
+ memset(&of_pdata, 0,
+ sizeof(struct qpnp_regulator_platform_data));
+ memcpy(&of_pdata.init_data, init_data,
+ sizeof(struct regulator_init_data));
+
+ if (of_get_property(spmi->dev.of_node, "parent-supply", NULL))
+ of_pdata.init_data.supply_regulator = "parent";
+
+ rc = qpnp_regulator_get_dt_config(spmi, &of_pdata);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: DT parsing failed, rc=%d\n",
+ __func__, rc);
+ kfree(vreg);
+ return -ENOMEM;
+ }
+
+ pdata = &of_pdata;
+ } else {
+ pdata = spmi->dev.platform_data;
+ }
+
+ if (pdata == NULL) {
+ dev_err(&spmi->dev, "%s: no platform data specified\n",
+ __func__);
+ kfree(vreg);
+ return -EINVAL;
+ }
+
+ vreg->spmi_dev = spmi;
+ vreg->prev_write_count = -1;
+ vreg->write_count = 0;
+ vreg->base_addr = pdata->base_addr;
+ vreg->enable_time = pdata->enable_time;
+ vreg->system_load = pdata->system_load;
+ vreg->ocp_enable = pdata->ocp_enable;
+ vreg->ocp_irq = pdata->ocp_irq;
+ vreg->ocp_max_retries = pdata->ocp_max_retries;
+ vreg->ocp_retry_delay_ms = pdata->ocp_retry_delay_ms;
+
+ if (vreg->ocp_max_retries == 0)
+ vreg->ocp_max_retries = QPNP_VS_OCP_DEFAULT_MAX_RETRIES;
+ if (vreg->ocp_retry_delay_ms == 0)
+ vreg->ocp_retry_delay_ms = QPNP_VS_OCP_DEFAULT_RETRY_DELAY_MS;
+
+ rdesc = &vreg->rdesc;
+ rdesc->id = spmi->ctrl->nr;
+ rdesc->owner = THIS_MODULE;
+ rdesc->type = REGULATOR_VOLTAGE;
+
+ reg_name = kzalloc(strnlen(pdata->init_data.constraints.name,
+ MAX_NAME_LEN) + 1, GFP_KERNEL);
+ if (!reg_name) {
+ dev_err(&spmi->dev, "%s: Can't allocate regulator name\n",
+ __func__);
+ kfree(vreg);
+ return -ENOMEM;
+ }
+ strlcpy(reg_name, pdata->init_data.constraints.name,
+ strnlen(pdata->init_data.constraints.name, MAX_NAME_LEN) + 1);
+ rdesc->name = reg_name;
+
+ dev_set_drvdata(&spmi->dev, vreg);
+
+ rc = qpnp_regulator_match(vreg);
+ if (rc) {
+ vreg_err(vreg, "regulator type unknown, rc=%d\n", rc);
+ goto bail;
+ }
+
+ if (is_dt && rdesc->ops) {
+ /* Fill in ops and mode masks when using device tree. */
+ if (rdesc->ops->enable)
+ pdata->init_data.constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_STATUS;
+ if (rdesc->ops->get_voltage)
+ pdata->init_data.constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE;
+ if (rdesc->ops->get_mode) {
+ pdata->init_data.constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_DRMS;
+ pdata->init_data.constraints.valid_modes_mask
+ = REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+ }
+ }
+
+ rc = qpnp_regulator_init_registers(vreg, pdata);
+ if (rc) {
+ vreg_err(vreg, "common initialization failed, rc=%d\n", rc);
+ goto bail;
+ }
+
+ if (vreg->logical_type != QPNP_REGULATOR_LOGICAL_TYPE_VS)
+ vreg->ocp_irq = 0;
+
+ if (vreg->ocp_irq) {
+ rc = devm_request_irq(&spmi->dev, vreg->ocp_irq,
+ qpnp_regulator_vs_ocp_isr, IRQF_TRIGGER_RISING, "ocp",
+ vreg);
+ if (rc < 0) {
+ vreg_err(vreg, "failed to request irq %d, rc=%d\n",
+ vreg->ocp_irq, rc);
+ goto bail;
+ }
+
+ INIT_DELAYED_WORK(&vreg->ocp_work, qpnp_regulator_vs_ocp_work);
+ }
+
+ reg_config.dev = &spmi->dev;
+ reg_config.init_data = &pdata->init_data;
+ reg_config.driver_data = vreg;
+ reg_config.of_node = spmi->dev.of_node;
+ vreg->rdev = regulator_register(rdesc, &reg_config);
+ if (IS_ERR(vreg->rdev)) {
+ rc = PTR_ERR(vreg->rdev);
+ if (rc != -EPROBE_DEFER)
+ vreg_err(vreg, "regulator_register failed, rc=%d\n",
+ rc);
+ goto cancel_ocp_work;
+ }
+
+ qpnp_vreg_show_state(vreg->rdev, QPNP_REGULATOR_ACTION_INIT);
+
+ return 0;
+
+cancel_ocp_work:
+ if (vreg->ocp_irq)
+ cancel_delayed_work_sync(&vreg->ocp_work);
+bail:
+ if (rc && rc != -EPROBE_DEFER)
+ vreg_err(vreg, "probe failed, rc=%d\n", rc);
+
+ kfree(vreg->rdesc.name);
+ kfree(vreg);
+
+ return rc;
+}
+
+static void qpnp_regulator_remove(struct spmi_device *spmi)
+{
+ struct qpnp_regulator *vreg;
+
+ vreg = dev_get_drvdata(&spmi->dev);
+ dev_set_drvdata(&spmi->dev, NULL);
+
+ if (vreg) {
+ regulator_unregister(vreg->rdev);
+ if (vreg->ocp_irq)
+ cancel_delayed_work_sync(&vreg->ocp_work);
+ kfree(vreg->rdesc.name);
+ kfree(vreg);
+ }
+}
+
+static struct of_device_id spmi_match_table[] = {
+ { .compatible = QPNP_REGULATOR_DRIVER_NAME, },
+ {}
+};
+
+static const struct spmi_device_id qpnp_regulator_id[] = {
+ { QPNP_REGULATOR_DRIVER_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spmi, qpnp_regulator_id);
+
+static struct spmi_driver qpnp_regulator_driver = {
+ .driver = {
+ .name = QPNP_REGULATOR_DRIVER_NAME,
+ .of_match_table = spmi_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = qpnp_regulator_probe,
+ .remove = qpnp_regulator_remove,
+// .id_table = qpnp_regulator_id,
+};
+
+/*
+ * Pre-compute the number of set points available for each regulator type to
+ * avoid unnecessary calculations later in runtime.
+ */
+static void qpnp_regulator_set_point_init(void)
+{
+ struct qpnp_voltage_set_points **set_points;
+ int i, j, temp;
+
+ set_points = all_set_points;
+
+ for (i = 0; i < ARRAY_SIZE(all_set_points); i++) {
+ temp = 0;
+ for (j = 0; j < all_set_points[i]->count; j++) {
+ all_set_points[i]->range[j].n_voltages
+ = (all_set_points[i]->range[j].set_point_max_uV
+ - all_set_points[i]->range[j].set_point_min_uV)
+ / all_set_points[i]->range[j].step_uV + 1;
+ if (all_set_points[i]->range[j].set_point_max_uV == 0)
+ all_set_points[i]->range[j].n_voltages = 0;
+ temp += all_set_points[i]->range[j].n_voltages;
+ }
+ all_set_points[i]->n_voltages = temp;
+ }
+}
+
+/**
+ * qpnp_regulator_init() - register spmi driver for qpnp-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ */
+int __init qpnp_regulator_init(void)
+{
+ static bool has_registered;
+
+ if (has_registered)
+ return 0;
+ else
+ has_registered = true;
+
+ qpnp_regulator_set_point_init();
+
+ return spmi_driver_register(&qpnp_regulator_driver);
+}
+EXPORT_SYMBOL(qpnp_regulator_init);
+
+static void __exit qpnp_regulator_exit(void)
+{
+ spmi_driver_unregister(&qpnp_regulator_driver);
+}
+
+MODULE_DESCRIPTION("QPNP PMIC regulator driver");
+MODULE_LICENSE("GPL v2");
+
+module_exit(qpnp_regulator_exit);
diff --git a/drivers/regulator/rpm-smd-regulator.c b/drivers/regulator/rpm-smd-regulator.c
new file mode 100644
index 000000000000..3067a0b1f91f
--- /dev/null
+++ b/drivers/regulator/rpm-smd-regulator.c
@@ -0,0 +1,1722 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <soc/qcom/rpm-smd.h>
+
+/* Debug Definitions */
+
+enum {
+ RPM_VREG_DEBUG_REQUEST = BIT(0),
+ RPM_VREG_DEBUG_FULL_REQUEST = BIT(1),
+ RPM_VREG_DEBUG_DUPLICATE = BIT(2),
+};
+
+static int rpm_vreg_debug_mask;
+module_param_named(
+ debug_mask, rpm_vreg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+#define vreg_err(req, fmt, ...) \
+ pr_err("%s: " fmt, req->rdesc.name, ##__VA_ARGS__)
+
+/* RPM regulator request types */
+enum rpm_regulator_type {
+ RPM_REGULATOR_TYPE_LDO,
+ RPM_REGULATOR_TYPE_SMPS,
+ RPM_REGULATOR_TYPE_VS,
+ RPM_REGULATOR_TYPE_NCP,
+ RPM_REGULATOR_TYPE_MAX,
+};
+
+/* RPM resource parameters */
+enum rpm_regulator_param_index {
+ RPM_REGULATOR_PARAM_ENABLE,
+ RPM_REGULATOR_PARAM_VOLTAGE,
+ RPM_REGULATOR_PARAM_CURRENT,
+ RPM_REGULATOR_PARAM_MODE_LDO,
+ RPM_REGULATOR_PARAM_MODE_SMPS,
+ RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE,
+ RPM_REGULATOR_PARAM_PIN_CTRL_MODE,
+ RPM_REGULATOR_PARAM_FREQUENCY,
+ RPM_REGULATOR_PARAM_HEAD_ROOM,
+ RPM_REGULATOR_PARAM_QUIET_MODE,
+ RPM_REGULATOR_PARAM_FREQ_REASON,
+ RPM_REGULATOR_PARAM_CORNER,
+ RPM_REGULATOR_PARAM_BYPASS,
+ RPM_REGULATOR_PARAM_FLOOR_CORNER,
+ RPM_REGULATOR_PARAM_MAX,
+};
+
+enum rpm_regulator_smps_mode {
+ RPM_REGULATOR_SMPS_MODE_AUTO = 0,
+ RPM_REGULATOR_SMPS_MODE_IPEAK = 1,
+ RPM_REGULATOR_SMPS_MODE_PWM = 2,
+};
+
+enum rpm_regulator_ldo_mode {
+ RPM_REGULATOR_LDO_MODE_IPEAK = 0,
+ RPM_REGULATOR_LDO_MODE_HPM = 1,
+};
+
+#define RPM_SET_CONFIG_ACTIVE BIT(0)
+#define RPM_SET_CONFIG_SLEEP BIT(1)
+#define RPM_SET_CONFIG_BOTH (RPM_SET_CONFIG_ACTIVE \
+ | RPM_SET_CONFIG_SLEEP)
+struct rpm_regulator_param {
+ char *name;
+ char *property_name;
+ u32 key;
+ u32 min;
+ u32 max;
+ u32 supported_regulator_types;
+};
+
+#define PARAM(_idx, _support_ldo, _support_smps, _support_vs, _support_ncp, \
+ _name, _min, _max, _property_name) \
+ [RPM_REGULATOR_PARAM_##_idx] = { \
+ .name = _name, \
+ .property_name = _property_name, \
+ .min = _min, \
+ .max = _max, \
+ .supported_regulator_types = \
+ _support_ldo << RPM_REGULATOR_TYPE_LDO | \
+ _support_smps << RPM_REGULATOR_TYPE_SMPS | \
+ _support_vs << RPM_REGULATOR_TYPE_VS | \
+ _support_ncp << RPM_REGULATOR_TYPE_NCP, \
+ }
+
+static struct rpm_regulator_param params[RPM_REGULATOR_PARAM_MAX] = {
+ /* ID LDO SMPS VS NCP name min max property-name */
+ PARAM(ENABLE, 1, 1, 1, 1, "swen", 0, 1, "qcom,init-enable"),
+ PARAM(VOLTAGE, 1, 1, 0, 1, "uv", 0, 0x7FFFFFF, "qcom,init-voltage"),
+ PARAM(CURRENT, 1, 1, 0, 0, "ma", 0, 0x1FFF, "qcom,init-current"),
+ PARAM(MODE_LDO, 1, 0, 0, 0, "lsmd", 0, 1, "qcom,init-ldo-mode"),
+ PARAM(MODE_SMPS, 0, 1, 0, 0, "ssmd", 0, 2, "qcom,init-smps-mode"),
+ PARAM(PIN_CTRL_ENABLE, 1, 1, 1, 0, "pcen", 0, 0xF, "qcom,init-pin-ctrl-enable"),
+ PARAM(PIN_CTRL_MODE, 1, 1, 1, 0, "pcmd", 0, 0x1F, "qcom,init-pin-ctrl-mode"),
+ PARAM(FREQUENCY, 0, 1, 0, 1, "freq", 0, 31, "qcom,init-frequency"),
+ PARAM(HEAD_ROOM, 1, 0, 0, 1, "hr", 0, 0x7FFFFFFF, "qcom,init-head-room"),
+ PARAM(QUIET_MODE, 0, 1, 0, 0, "qm", 0, 2, "qcom,init-quiet-mode"),
+ PARAM(FREQ_REASON, 0, 1, 0, 1, "resn", 0, 8, "qcom,init-freq-reason"),
+ PARAM(CORNER, 1, 1, 0, 0, "corn", 0, 6, "qcom,init-voltage-corner"),
+ PARAM(BYPASS, 1, 0, 0, 0, "bypa", 0, 1, "qcom,init-disallow-bypass"),
+ PARAM(FLOOR_CORNER, 1, 1, 0, 0, "vfc", 0, 6, "qcom,init-voltage-floor-corner"),
+};
+
+struct rpm_regulator_mode_map {
+ int ldo_mode;
+ int smps_mode;
+};
+
+static struct rpm_regulator_mode_map mode_mapping[] = {
+ [RPM_REGULATOR_MODE_AUTO]
+ = {-1, RPM_REGULATOR_SMPS_MODE_AUTO},
+ [RPM_REGULATOR_MODE_IPEAK]
+ = {RPM_REGULATOR_LDO_MODE_IPEAK, RPM_REGULATOR_SMPS_MODE_IPEAK},
+ [RPM_REGULATOR_MODE_HPM]
+ = {RPM_REGULATOR_LDO_MODE_HPM, RPM_REGULATOR_SMPS_MODE_PWM},
+};
+
+struct rpm_vreg_request {
+ u32 param[RPM_REGULATOR_PARAM_MAX];
+ u32 valid;
+ u32 modified;
+};
+
+struct rpm_vreg {
+ struct rpm_vreg_request aggr_req_active;
+ struct rpm_vreg_request aggr_req_sleep;
+ struct list_head reg_list;
+ const char *resource_name;
+ u32 resource_id;
+ bool allow_atomic;
+ int regulator_type;
+ int hpm_min_load;
+ int enable_time;
+ spinlock_t slock;
+ struct mutex mlock;
+ unsigned long flags;
+ bool sleep_request_sent;
+ bool apps_only;
+ struct msm_rpm_request *handle_active;
+ struct msm_rpm_request *handle_sleep;
+};
+
+struct rpm_regulator {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ struct rpm_vreg *rpm_vreg;
+ struct list_head list;
+ bool set_active;
+ bool set_sleep;
+ bool always_send_voltage;
+ bool always_send_current;
+ struct rpm_vreg_request req;
+ int system_load;
+ int min_uV;
+ int max_uV;
+};
+
+/*
+ * This voltage in uV is returned by get_voltage functions when there is no way
+ * to determine the current voltage level. It is needed because the regulator
+ * framework treats a 0 uV voltage as an error.
+ */
+#define VOLTAGE_UNKNOWN 1
+
+/*
+ * Regulator requests sent in the active set take effect immediately. Requests
+ * sent in the sleep set take effect when the Apps processor transitions into
+ * RPM assisted power collapse. For any given regulator, if an active set
+ * request is present, but not a sleep set request, then the active set request
+ * is used at all times, even when the Apps processor is power collapsed.
+ *
+ * The rpm-regulator-smd takes advantage of this default usage of the active set
+ * request by only sending a sleep set request if it differs from the
+ * corresponding active set request.
+ */
+#define RPM_SET_ACTIVE MSM_RPM_CTX_ACTIVE_SET
+#define RPM_SET_SLEEP MSM_RPM_CTX_SLEEP_SET
+
+static u32 rpm_vreg_string_to_int(const u8 *str)
+{
+ int i, len;
+ u32 output = 0;
+
+ len = strnlen(str, sizeof(u32));
+ for (i = 0; i < len; i++)
+ output |= str[i] << (i * 8);
+
+ return output;
+}
+
+static inline void rpm_vreg_lock(struct rpm_vreg *rpm_vreg)
+{
+ if (rpm_vreg->allow_atomic)
+ spin_lock_irqsave(&rpm_vreg->slock, rpm_vreg->flags);
+ else
+ mutex_lock(&rpm_vreg->mlock);
+}
+
+static inline void rpm_vreg_unlock(struct rpm_vreg *rpm_vreg)
+{
+ if (rpm_vreg->allow_atomic)
+ spin_unlock_irqrestore(&rpm_vreg->slock, rpm_vreg->flags);
+ else
+ mutex_unlock(&rpm_vreg->mlock);
+}
+
+static inline bool rpm_vreg_active_or_sleep_enabled(struct rpm_vreg *rpm_vreg)
+{
+ return (rpm_vreg->aggr_req_active.param[RPM_REGULATOR_PARAM_ENABLE]
+ && (rpm_vreg->aggr_req_active.valid
+ & BIT(RPM_REGULATOR_PARAM_ENABLE)))
+ || ((rpm_vreg->aggr_req_sleep.param[RPM_REGULATOR_PARAM_ENABLE])
+ && (rpm_vreg->aggr_req_sleep.valid
+ & BIT(RPM_REGULATOR_PARAM_ENABLE)));
+}
+
+static inline bool rpm_vreg_shared_active_or_sleep_enabled_valid
+ (struct rpm_vreg *rpm_vreg)
+{
+ return !rpm_vreg->apps_only &&
+ ((rpm_vreg->aggr_req_active.valid
+ & BIT(RPM_REGULATOR_PARAM_ENABLE))
+ || (rpm_vreg->aggr_req_sleep.valid
+ & BIT(RPM_REGULATOR_PARAM_ENABLE)));
+}
+
+/*
+ * This is used when voting for LPM or HPM by subtracting or adding to the
+ * hpm_min_load of a regulator. It has units of uA.
+ */
+#define LOAD_THRESHOLD_STEP 1000
+
+static inline int rpm_vreg_hpm_min_uA(struct rpm_vreg *rpm_vreg)
+{
+ return rpm_vreg->hpm_min_load;
+}
+
+static inline int rpm_vreg_lpm_max_uA(struct rpm_vreg *rpm_vreg)
+{
+ return rpm_vreg->hpm_min_load - LOAD_THRESHOLD_STEP;
+}
+
+#define MICRO_TO_MILLI(uV) ((uV) / 1000)
+#define MILLI_TO_MICRO(uV) ((uV) * 1000)
+
+#define DEBUG_PRINT_BUFFER_SIZE 512
+#define REQ_SENT 0
+#define REQ_PREV 1
+#define REQ_CACHED 2
+#define REQ_TYPES 3
+
+static void rpm_regulator_req(struct rpm_regulator *regulator, int set,
+ bool sent)
+{
+ char buf[DEBUG_PRINT_BUFFER_SIZE];
+ size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ struct rpm_vreg_request *aggr;
+ bool first;
+ u32 mask[REQ_TYPES] = {0, 0, 0};
+ const char *req_names[REQ_TYPES] = {"sent", "prev", "cached"};
+ int pos = 0;
+ int i, j;
+
+ aggr = (set == RPM_SET_ACTIVE)
+ ? &rpm_vreg->aggr_req_active : &rpm_vreg->aggr_req_sleep;
+
+ if (rpm_vreg_debug_mask & RPM_VREG_DEBUG_DUPLICATE) {
+ mask[REQ_SENT] = aggr->modified;
+ mask[REQ_PREV] = aggr->valid & ~aggr->modified;
+ } else if (sent
+ && (rpm_vreg_debug_mask & RPM_VREG_DEBUG_FULL_REQUEST)) {
+ mask[REQ_SENT] = aggr->modified;
+ mask[REQ_PREV] = aggr->valid & ~aggr->modified;
+ } else if (sent && (rpm_vreg_debug_mask & RPM_VREG_DEBUG_REQUEST)) {
+ mask[REQ_SENT] = aggr->modified;
+ }
+
+ if (!(mask[REQ_SENT] | mask[REQ_PREV]))
+ return;
+
+ if (set == RPM_SET_SLEEP && !rpm_vreg->sleep_request_sent) {
+ mask[REQ_CACHED] = mask[REQ_SENT] | mask[REQ_PREV];
+ mask[REQ_SENT] = 0;
+ mask[REQ_PREV] = 0;
+ }
+
+ pos += scnprintf(buf + pos, buflen - pos, "%s%s: ",
+ KERN_INFO, __func__);
+
+ pos += scnprintf(buf + pos, buflen - pos, "%s %u (%s): s=%s",
+ rpm_vreg->resource_name, rpm_vreg->resource_id,
+ regulator->rdesc.name,
+ (set == RPM_SET_ACTIVE ? "act" : "slp"));
+
+ for (i = 0; i < REQ_TYPES; i++) {
+ if (mask[i])
+ pos += scnprintf(buf + pos, buflen - pos, "; %s: ",
+ req_names[i]);
+
+ first = true;
+ for (j = 0; j < RPM_REGULATOR_PARAM_MAX; j++) {
+ if (mask[i] & BIT(j)) {
+ pos += scnprintf(buf + pos, buflen - pos,
+ "%s%s=%u", (first ? "" : ", "),
+ params[j].name, aggr->param[j]);
+ first = false;
+ }
+ }
+ }
+
+ pos += scnprintf(buf + pos, buflen - pos, "\n");
+ printk(buf);
+}
+
+#define RPM_VREG_SET_PARAM(_regulator, _param, _val) \
+{ \
+ (_regulator)->req.param[RPM_REGULATOR_PARAM_##_param] = _val; \
+ (_regulator)->req.modified |= BIT(RPM_REGULATOR_PARAM_##_param); \
+} \
+
+static int rpm_vreg_add_kvp_to_request(struct rpm_vreg *rpm_vreg,
+ const u32 *param, int idx, u32 set)
+{
+ struct msm_rpm_request *handle;
+
+ handle = (set == RPM_SET_ACTIVE ? rpm_vreg->handle_active
+ : rpm_vreg->handle_sleep);
+
+ if (rpm_vreg->allow_atomic)
+ return msm_rpm_add_kvp_data_noirq(handle, params[idx].key,
+ (u8 *)&param[idx], 4);
+ else
+ return msm_rpm_add_kvp_data(handle, params[idx].key,
+ (u8 *)&param[idx], 4);
+}
+
+static void rpm_vreg_check_modified_requests(const u32 *prev_param,
+ const u32 *param, u32 prev_valid, u32 *modified)
+{
+ u32 value_changed = 0;
+ int i;
+
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ if (param[i] != prev_param[i])
+ value_changed |= BIT(i);
+ }
+
+ /*
+ * Only keep bits that are for changed parameters or previously
+ * invalid parameters.
+ */
+ *modified &= value_changed | ~prev_valid;
+}
+
+static int rpm_vreg_add_modified_requests(struct rpm_regulator *regulator,
+ u32 set, const u32 *param, u32 modified)
+{
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ /* Only send requests for modified parameters. */
+ if (modified & BIT(i)) {
+ rc = rpm_vreg_add_kvp_to_request(rpm_vreg, param, i,
+ set);
+ if (rc) {
+ vreg_err(regulator,
+ "add KVP failed: %s %u; %s, rc=%d\n",
+ rpm_vreg->resource_name,
+ rpm_vreg->resource_id, params[i].name,
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int rpm_vreg_send_request(struct rpm_regulator *regulator, u32 set)
+{
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ struct msm_rpm_request *handle
+ = (set == RPM_SET_ACTIVE ? rpm_vreg->handle_active
+ : rpm_vreg->handle_sleep);
+ int rc;
+
+ if (rpm_vreg->allow_atomic)
+ rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(
+ handle));
+ else
+ rc = msm_rpm_wait_for_ack(msm_rpm_send_request(handle));
+
+ if (rc)
+ vreg_err(regulator,
+ "msm rpm send failed: %s %u; set=%s, rc=%d\n",
+ rpm_vreg->resource_name,
+ rpm_vreg->resource_id,
+ (set == RPM_SET_ACTIVE ? "act" : "slp"), rc);
+
+ return rc;
+}
+
+#define RPM_VREG_AGGR_MIN(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ = min(_param_aggr[RPM_REGULATOR_PARAM_##_idx], \
+ _param_reg[RPM_REGULATOR_PARAM_##_idx]); \
+}
+
+#define RPM_VREG_AGGR_MAX(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ = max(_param_aggr[RPM_REGULATOR_PARAM_##_idx], \
+ _param_reg[RPM_REGULATOR_PARAM_##_idx]); \
+}
+
+#define RPM_VREG_AGGR_SUM(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ += _param_reg[RPM_REGULATOR_PARAM_##_idx]; \
+}
+
+#define RPM_VREG_AGGR_OR(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ |= _param_reg[RPM_REGULATOR_PARAM_##_idx]; \
+}
+
+/*
+ * Aggregation is performed on each parameter based on the way that the RPM
+ * aggregates that type internally between RPM masters.
+ */
+static void rpm_vreg_aggregate_params(u32 *param_aggr, const u32 *param_reg)
+{
+ RPM_VREG_AGGR_MAX(ENABLE, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(VOLTAGE, param_aggr, param_reg);
+ RPM_VREG_AGGR_SUM(CURRENT, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(MODE_LDO, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(MODE_SMPS, param_aggr, param_reg);
+ RPM_VREG_AGGR_OR(PIN_CTRL_ENABLE, param_aggr, param_reg);
+ RPM_VREG_AGGR_OR(PIN_CTRL_MODE, param_aggr, param_reg);
+ RPM_VREG_AGGR_MIN(FREQUENCY, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(HEAD_ROOM, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(QUIET_MODE, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(FREQ_REASON, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(CORNER, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(BYPASS, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(FLOOR_CORNER, param_aggr, param_reg);
+}
+
+static int rpm_vreg_aggregate_requests(struct rpm_regulator *regulator)
+{
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ u32 param_active[RPM_REGULATOR_PARAM_MAX];
+ u32 param_sleep[RPM_REGULATOR_PARAM_MAX];
+ u32 modified_active, modified_sleep;
+ struct rpm_regulator *reg;
+ bool sleep_set_differs = false;
+ bool send_active = false;
+ bool send_sleep = false;
+ int rc = 0;
+ int i;
+
+ memset(param_active, 0, sizeof(param_active));
+ memset(param_sleep, 0, sizeof(param_sleep));
+ modified_active = rpm_vreg->aggr_req_active.modified;
+ modified_sleep = rpm_vreg->aggr_req_sleep.modified;
+
+ /*
+ * Aggregate all of the requests for this regulator in both active
+ * and sleep sets.
+ */
+ list_for_each_entry(reg, &rpm_vreg->reg_list, list) {
+ if (reg->set_active) {
+ rpm_vreg_aggregate_params(param_active, reg->req.param);
+ modified_active |= reg->req.modified;
+ }
+ if (reg->set_sleep) {
+ rpm_vreg_aggregate_params(param_sleep, reg->req.param);
+ modified_sleep |= reg->req.modified;
+ }
+ }
+
+ /*
+ * Check if the aggregated sleep set parameter values differ from the
+ * aggregated active set parameter values.
+ */
+ if (!rpm_vreg->sleep_request_sent) {
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ if ((param_active[i] != param_sleep[i])
+ && (modified_sleep & BIT(i))) {
+ sleep_set_differs = true;
+ break;
+ }
+ }
+ }
+
+ /* Add KVPs to the active set RPM request if they have new values. */
+ rpm_vreg_check_modified_requests(rpm_vreg->aggr_req_active.param,
+ param_active, rpm_vreg->aggr_req_active.valid,
+ &modified_active);
+ rc = rpm_vreg_add_modified_requests(regulator, RPM_SET_ACTIVE,
+ param_active, modified_active);
+ if (rc)
+ return rc;
+ send_active = modified_active;
+
+ /*
+ * Sleep set configurations are only sent if they differ from the
+ * active set values. This is because the active set values will take
+ * effect during rpm assisted power collapse in the absence of sleep set
+ * values.
+ *
+ * However, once a sleep set request is sent for a given regulator,
+ * additional sleep set requests must be sent in the future even if they
+ * match the corresponding active set requests.
+ */
+ if (rpm_vreg->sleep_request_sent || sleep_set_differs) {
+ /* Add KVPs to the sleep set RPM request if they are new. */
+ rpm_vreg_check_modified_requests(rpm_vreg->aggr_req_sleep.param,
+ param_sleep, rpm_vreg->aggr_req_sleep.valid,
+ &modified_sleep);
+ rc = rpm_vreg_add_modified_requests(regulator, RPM_SET_SLEEP,
+ param_sleep, modified_sleep);
+ if (rc)
+ return rc;
+ send_sleep = modified_sleep;
+ }
+
+ /* Send active set request to the RPM if it contains new KVPs. */
+ if (send_active) {
+ rc = rpm_vreg_send_request(regulator, RPM_SET_ACTIVE);
+ if (rc)
+ return rc;
+ rpm_vreg->aggr_req_active.valid |= modified_active;
+ }
+ /* Store the results of the aggregation. */
+ rpm_vreg->aggr_req_active.modified = modified_active;
+ memcpy(rpm_vreg->aggr_req_active.param, param_active,
+ sizeof(param_active));
+
+ /* Handle debug printing of the active set request. */
+ rpm_regulator_req(regulator, RPM_SET_ACTIVE, send_active);
+ if (send_active)
+ rpm_vreg->aggr_req_active.modified = 0;
+
+ /* Send sleep set request to the RPM if it contains new KVPs. */
+ if (send_sleep) {
+ rc = rpm_vreg_send_request(regulator, RPM_SET_SLEEP);
+ if (rc)
+ return rc;
+ else
+ rpm_vreg->sleep_request_sent = true;
+ rpm_vreg->aggr_req_sleep.valid |= modified_sleep;
+ }
+ /* Store the results of the aggregation. */
+ rpm_vreg->aggr_req_sleep.modified = modified_sleep;
+ memcpy(rpm_vreg->aggr_req_sleep.param, param_sleep,
+ sizeof(param_sleep));
+
+ /* Handle debug printing of the sleep set request. */
+ rpm_regulator_req(regulator, RPM_SET_SLEEP, send_sleep);
+ if (send_sleep)
+ rpm_vreg->aggr_req_sleep.modified = 0;
+
+ /*
+ * Loop over all requests for this regulator to update the valid and
+ * modified values for use in future aggregation.
+ */
+ list_for_each_entry(reg, &rpm_vreg->reg_list, list) {
+ reg->req.valid |= reg->req.modified;
+ reg->req.modified = 0;
+ }
+
+ return rc;
+}
+
+static int rpm_vreg_is_enabled(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+}
+
+static int rpm_vreg_enable(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc;
+ u32 prev_enable;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_enable = reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+ RPM_VREG_SET_PARAM(reg, ENABLE, 1);
+ rc = rpm_vreg_aggregate_requests(reg);
+ if (rc) {
+ vreg_err(reg, "enable failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, ENABLE, prev_enable);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_disable(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc;
+ u32 prev_enable;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_enable = reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+ RPM_VREG_SET_PARAM(reg, ENABLE, 0);
+ rc = rpm_vreg_aggregate_requests(reg);
+ if (rc) {
+ vreg_err(reg, "enable failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, ENABLE, prev_enable);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ u32 prev_voltage;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_voltage = reg->req.param[RPM_REGULATOR_PARAM_VOLTAGE];
+ RPM_VREG_SET_PARAM(reg, VOLTAGE, min_uV);
+
+ /*
+ * Only send a new voltage if the regulator is currently enabled or
+ * if the regulator has been configured to always send voltage updates.
+ */
+ if (reg->always_send_voltage
+ || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+ || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+ rc = rpm_vreg_aggregate_requests(reg);
+
+ if (rc) {
+ vreg_err(reg, "set voltage failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, VOLTAGE, prev_voltage);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_get_voltage(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int uV;
+
+ uV = reg->req.param[RPM_REGULATOR_PARAM_VOLTAGE];
+ if (uV == 0)
+ uV = VOLTAGE_UNKNOWN;
+
+ return uV;
+}
+
+static int rpm_vreg_set_voltage_corner(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ int corner;
+ u32 prev_corner;
+
+ /*
+ * Translate from values which work as inputs in the
+ * regulator_set_voltage function to the actual corner values
+ * sent to the RPM.
+ */
+ corner = min_uV - RPM_REGULATOR_CORNER_NONE;
+
+ if (corner < params[RPM_REGULATOR_PARAM_CORNER].min
+ || corner > params[RPM_REGULATOR_PARAM_CORNER].max) {
+ vreg_err(reg, "corner=%d is not within allowed range: [%u, %u]\n",
+ corner, params[RPM_REGULATOR_PARAM_CORNER].min,
+ params[RPM_REGULATOR_PARAM_CORNER].max);
+ return -EINVAL;
+ }
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_corner = reg->req.param[RPM_REGULATOR_PARAM_CORNER];
+ RPM_VREG_SET_PARAM(reg, CORNER, corner);
+
+ /*
+ * Only send a new voltage corner if the regulator is currently enabled
+ * or if the regulator has been configured to always send voltage
+ * updates.
+ */
+ if (reg->always_send_voltage
+ || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+ || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+ rc = rpm_vreg_aggregate_requests(reg);
+
+ if (rc) {
+ vreg_err(reg, "set voltage corner failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, CORNER, prev_corner);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_get_voltage_corner(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return reg->req.param[RPM_REGULATOR_PARAM_CORNER]
+ + RPM_REGULATOR_CORNER_NONE;
+}
+
+static int rpm_vreg_set_voltage_floor_corner(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ int corner;
+ u32 prev_corner;
+
+ /*
+ * Translate from values which work as inputs in the
+ * regulator_set_voltage function to the actual corner values
+ * sent to the RPM.
+ */
+ corner = min_uV - RPM_REGULATOR_CORNER_NONE;
+
+ if (corner < params[RPM_REGULATOR_PARAM_FLOOR_CORNER].min
+ || corner > params[RPM_REGULATOR_PARAM_FLOOR_CORNER].max) {
+ vreg_err(reg, "corner=%d is not within allowed range: [%u, %u]\n",
+ corner, params[RPM_REGULATOR_PARAM_FLOOR_CORNER].min,
+ params[RPM_REGULATOR_PARAM_FLOOR_CORNER].max);
+ return -EINVAL;
+ }
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_corner = reg->req.param[RPM_REGULATOR_PARAM_FLOOR_CORNER];
+ RPM_VREG_SET_PARAM(reg, FLOOR_CORNER, corner);
+
+ /*
+ * Only send a new voltage floor corner if the regulator is currently
+ * enabled or if the regulator has been configured to always send
+ * voltage updates.
+ */
+ if (reg->always_send_voltage
+ || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+ || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+ rc = rpm_vreg_aggregate_requests(reg);
+
+ if (rc) {
+ vreg_err(reg, "set voltage corner failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, FLOOR_CORNER, prev_corner);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_get_voltage_floor_corner(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return reg->req.param[RPM_REGULATOR_PARAM_FLOOR_CORNER]
+ + RPM_REGULATOR_CORNER_NONE;
+}
+
+static int rpm_vreg_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ u32 prev_current;
+ int prev_uA;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_current = reg->req.param[RPM_REGULATOR_PARAM_CURRENT];
+ prev_uA = MILLI_TO_MICRO(prev_current);
+
+ if (mode == REGULATOR_MODE_NORMAL) {
+ /* Make sure that request current is in HPM range. */
+ if (prev_uA < rpm_vreg_hpm_min_uA(reg->rpm_vreg))
+ RPM_VREG_SET_PARAM(reg, CURRENT,
+ MICRO_TO_MILLI(rpm_vreg_hpm_min_uA(reg->rpm_vreg)));
+ } else if (REGULATOR_MODE_IDLE) {
+ /* Make sure that request current is in LPM range. */
+ if (prev_uA > rpm_vreg_lpm_max_uA(reg->rpm_vreg))
+ RPM_VREG_SET_PARAM(reg, CURRENT,
+ MICRO_TO_MILLI(rpm_vreg_lpm_max_uA(reg->rpm_vreg)));
+ } else {
+ vreg_err(reg, "invalid mode: %u\n", mode);
+ rpm_vreg_unlock(reg->rpm_vreg);
+ return -EINVAL;
+ }
+
+ /*
+ * Only send a new load current value if the regulator is currently
+ * enabled or if the regulator has been configured to always send
+ * current updates.
+ */
+ if (reg->always_send_current
+ || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+ || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+ rc = rpm_vreg_aggregate_requests(reg);
+
+ if (rc) {
+ vreg_err(reg, "set mode failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, CURRENT, prev_current);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static unsigned int rpm_vreg_get_mode(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return (reg->req.param[RPM_REGULATOR_PARAM_CURRENT]
+ >= MICRO_TO_MILLI(reg->rpm_vreg->hpm_min_load))
+ ? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static unsigned int rpm_vreg_get_optimum_mode(struct regulator_dev *rdev,
+ int input_uV, int output_uV, int load_uA)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ u32 load_mA;
+
+ load_uA += reg->system_load;
+
+ load_mA = MICRO_TO_MILLI(load_uA);
+ if (load_mA > params[RPM_REGULATOR_PARAM_CURRENT].max)
+ load_mA = params[RPM_REGULATOR_PARAM_CURRENT].max;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+ RPM_VREG_SET_PARAM(reg, CURRENT, load_mA);
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return (load_uA >= reg->rpm_vreg->hpm_min_load)
+ ? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int rpm_vreg_enable_time(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return reg->rpm_vreg->enable_time;
+}
+
+/**
+ * rpm_regulator_get() - lookup and obtain a handle to an RPM regulator
+ * @dev: device for regulator consumer
+ * @supply: supply name
+ *
+ * Returns a struct rpm_regulator corresponding to the regulator producer,
+ * or ERR_PTR() containing errno.
+ *
+ * This function may only be called from nonatomic context.
+ */
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply)
+{
+ struct rpm_regulator *framework_reg;
+ struct rpm_regulator *priv_reg = NULL;
+ struct regulator *regulator;
+ struct rpm_vreg *rpm_vreg;
+
+ regulator = regulator_get(dev, supply);
+ if (IS_ERR(regulator)) {
+ pr_err("could not find regulator for: dev=%s, supply=%s, rc=%ld\n",
+ (dev ? dev_name(dev) : ""), (supply ? supply : ""),
+ PTR_ERR(regulator));
+ return ERR_CAST(regulator);
+ }
+
+ framework_reg = regulator_get_drvdata(regulator);
+ if (framework_reg == NULL) {
+ pr_err("regulator structure not found.\n");
+ regulator_put(regulator);
+ return ERR_PTR(-ENODEV);
+ }
+ regulator_put(regulator);
+
+ rpm_vreg = framework_reg->rpm_vreg;
+
+ priv_reg = kzalloc(sizeof(struct rpm_regulator), GFP_KERNEL);
+ if (priv_reg == NULL) {
+ vreg_err(framework_reg,
+ "could not allocate memory for regulator\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Allocate a regulator_dev struct so that framework callback functions
+ * can be called from the private API functions.
+ */
+ priv_reg->rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
+ if (priv_reg->rdev == NULL) {
+ vreg_err(framework_reg,
+ "could not allocate memory for regulator_dev\n");
+ kfree(priv_reg);
+ return ERR_PTR(-ENOMEM);
+ }
+ priv_reg->rdev->reg_data = priv_reg;
+ priv_reg->rpm_vreg = rpm_vreg;
+ priv_reg->rdesc.name = framework_reg->rdesc.name;
+ priv_reg->rdesc.ops = framework_reg->rdesc.ops;
+ priv_reg->set_active = framework_reg->set_active;
+ priv_reg->set_sleep = framework_reg->set_sleep;
+ priv_reg->min_uV = framework_reg->min_uV;
+ priv_reg->max_uV = framework_reg->max_uV;
+ priv_reg->system_load = framework_reg->system_load;
+
+ might_sleep_if(!rpm_vreg->allow_atomic);
+ rpm_vreg_lock(rpm_vreg);
+ list_add(&priv_reg->list, &rpm_vreg->reg_list);
+ rpm_vreg_unlock(rpm_vreg);
+
+ return priv_reg;
+}
+EXPORT_SYMBOL(rpm_regulator_get);
+
+static int rpm_regulator_check_input(struct rpm_regulator *regulator)
+{
+ if (IS_ERR_OR_NULL(regulator) || regulator->rpm_vreg == NULL) {
+ pr_err("invalid rpm_regulator pointer\n");
+ return -EINVAL;
+ }
+
+ might_sleep_if(!regulator->rpm_vreg->allow_atomic);
+
+ return 0;
+}
+
+/**
+ * rpm_regulator_put() - free the RPM regulator handle
+ * @regulator: RPM regulator handle
+ *
+ * Parameter reaggregation does not take place when rpm_regulator_put is called.
+ * Therefore, regulator enable state and voltage must be configured
+ * appropriately before calling rpm_regulator_put.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+void rpm_regulator_put(struct rpm_regulator *regulator)
+{
+ struct rpm_vreg *rpm_vreg;
+ int rc = rpm_regulator_check_input(regulator);
+
+ if (rc)
+ return;
+
+ rpm_vreg = regulator->rpm_vreg;
+
+ might_sleep_if(!rpm_vreg->allow_atomic);
+ rpm_vreg_lock(rpm_vreg);
+ list_del(&regulator->list);
+ rpm_vreg_unlock(rpm_vreg);
+
+ kfree(regulator->rdev);
+ kfree(regulator);
+}
+EXPORT_SYMBOL(rpm_regulator_put);
+
+/**
+ * rpm_regulator_enable() - enable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_enable(struct rpm_regulator *regulator)
+{
+ int rc = rpm_regulator_check_input(regulator);
+
+ if (rc)
+ return rc;
+
+ return rpm_vreg_enable(regulator->rdev);
+}
+EXPORT_SYMBOL(rpm_regulator_enable);
+
+/**
+ * rpm_regulator_disable() - disable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The enable state of the regulator is determined by aggregating the requests
+ * of all consumers. Therefore, it is possible that the regulator will remain
+ * enabled even after rpm_regulator_disable is called.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_disable(struct rpm_regulator *regulator)
+{
+ int rc = rpm_regulator_check_input(regulator);
+
+ if (rc)
+ return rc;
+
+ return rpm_vreg_disable(regulator->rdev);
+}
+EXPORT_SYMBOL(rpm_regulator_disable);
+
+/**
+ * rpm_regulator_set_voltage() - set regulator output voltage
+ * @regulator: RPM regulator handle
+ * @min_uV: minimum required voltage in uV
+ * @max_uV: maximum acceptable voltage in uV
+ *
+ * Sets a voltage regulator to the desired output voltage. This can be set
+ * while the regulator is disabled or enabled. If the regulator is enabled then
+ * the voltage will change to the new value immediately; otherwise, if the
+ * regulator is disabled, then the regulator will output at the new voltage when
+ * enabled.
+ *
+ * The min_uV to max_uV voltage range requested must intersect with the
+ * voltage constraint range configured for the regulator.
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The final voltage value that is sent to the RPM is aggregated based upon the
+ * values requested by all consumers of the regulator. This corresponds to the
+ * maximum min_uV value.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+ int max_uV)
+{
+ int rc = rpm_regulator_check_input(regulator);
+ int uV = min_uV;
+
+ if (rc)
+ return rc;
+
+ if (regulator->rpm_vreg->regulator_type == RPM_REGULATOR_TYPE_VS) {
+ vreg_err(regulator, "unsupported regulator type: %d\n",
+ regulator->rpm_vreg->regulator_type);
+ return -EINVAL;
+ }
+
+ if (min_uV > max_uV) {
+ vreg_err(regulator, "min_uV=%d must be less than max_uV=%d\n",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ if (uV < regulator->min_uV && max_uV >= regulator->min_uV)
+ uV = regulator->min_uV;
+
+ if (uV < regulator->min_uV || uV > regulator->max_uV) {
+ vreg_err(regulator,
+ "request v=[%d, %d] is outside allowed v=[%d, %d]\n",
+ min_uV, max_uV, regulator->min_uV, regulator->max_uV);
+ return -EINVAL;
+ }
+
+ return regulator->rdesc.ops->set_voltage(regulator->rdev, uV, uV, NULL);
+}
+EXPORT_SYMBOL(rpm_regulator_set_voltage);
+
+/**
+ * rpm_regulator_set_mode() - set regulator operating mode
+ * @regulator: RPM regulator handle
+ * @mode: operating mode requested for the regulator
+ *
+ * Requests that the mode of the regulator be set to the mode specified. This
+ * parameter is aggregated using a max function such that AUTO < IPEAK < HPM.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+ enum rpm_regulator_mode mode)
+{
+ int index = 0;
+ u32 new_mode, prev_mode;
+ int rc;
+
+ rc = rpm_regulator_check_input(regulator);
+ if (rc)
+ return rc;
+
+ if (mode < 0 || mode >= ARRAY_SIZE(mode_mapping)) {
+ vreg_err(regulator, "invalid mode requested: %d\n", mode);
+ return -EINVAL;
+ }
+
+ switch (regulator->rpm_vreg->regulator_type) {
+ case RPM_REGULATOR_TYPE_SMPS:
+ index = RPM_REGULATOR_PARAM_MODE_SMPS;
+ new_mode = mode_mapping[mode].smps_mode;
+ break;
+ case RPM_REGULATOR_TYPE_LDO:
+ index = RPM_REGULATOR_PARAM_MODE_LDO;
+ new_mode = mode_mapping[mode].ldo_mode;
+ break;
+ default:
+ vreg_err(regulator, "unsupported regulator type: %d\n",
+ regulator->rpm_vreg->regulator_type);
+ return -EINVAL;
+ };
+
+ if (new_mode < params[index].min || new_mode > params[index].max) {
+ vreg_err(regulator, "invalid mode requested: %d for type: %d\n",
+ mode, regulator->rpm_vreg->regulator_type);
+ return -EINVAL;
+ }
+
+ rpm_vreg_lock(regulator->rpm_vreg);
+
+ prev_mode = regulator->req.param[index];
+ regulator->req.param[index] = new_mode;
+ regulator->req.modified |= BIT(index);
+
+ rc = rpm_vreg_aggregate_requests(regulator);
+ if (rc) {
+ vreg_err(regulator, "set mode failed, rc=%d", rc);
+ regulator->req.param[index] = prev_mode;
+ }
+
+ rpm_vreg_unlock(regulator->rpm_vreg);
+
+ return rc;
+}
+EXPORT_SYMBOL(rpm_regulator_set_mode);
+
+static struct regulator_ops ldo_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage,
+ .get_voltage = rpm_vreg_get_voltage,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops ldo_corner_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage_corner,
+ .get_voltage = rpm_vreg_get_voltage_corner,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops ldo_floor_corner_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage_floor_corner,
+ .get_voltage = rpm_vreg_get_voltage_floor_corner,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops smps_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage,
+ .get_voltage = rpm_vreg_get_voltage,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops smps_corner_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage_corner,
+ .get_voltage = rpm_vreg_get_voltage_corner,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops smps_floor_corner_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage_floor_corner,
+ .get_voltage = rpm_vreg_get_voltage_floor_corner,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops switch_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops ncp_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage,
+ .get_voltage = rpm_vreg_get_voltage,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops *vreg_ops[] = {
+ [RPM_REGULATOR_TYPE_LDO] = &ldo_ops,
+ [RPM_REGULATOR_TYPE_SMPS] = &smps_ops,
+ [RPM_REGULATOR_TYPE_VS] = &switch_ops,
+ [RPM_REGULATOR_TYPE_NCP] = &ncp_ops,
+};
+
+static int rpm_vreg_device_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpm_regulator *reg;
+ struct rpm_vreg *rpm_vreg;
+
+ reg = platform_get_drvdata(pdev);
+ if (reg) {
+ rpm_vreg = reg->rpm_vreg;
+ rpm_vreg_lock(rpm_vreg);
+ regulator_unregister(reg->rdev);
+ list_del(&reg->list);
+ kfree(reg);
+ rpm_vreg_unlock(rpm_vreg);
+ } else {
+ dev_err(dev, "%s: drvdata missing\n", __func__);
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int rpm_vreg_resource_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpm_regulator *reg, *reg_temp;
+ struct rpm_vreg *rpm_vreg;
+
+ rpm_vreg = platform_get_drvdata(pdev);
+ if (rpm_vreg) {
+ rpm_vreg_lock(rpm_vreg);
+ list_for_each_entry_safe(reg, reg_temp, &rpm_vreg->reg_list,
+ list) {
+ /* Only touch data for private consumers. */
+ if (reg->rdev->desc == NULL) {
+ list_del(&reg->list);
+ kfree(reg->rdev);
+ kfree(reg);
+ } else {
+ dev_err(dev, "%s: not all child devices have been removed\n",
+ __func__);
+ }
+ }
+ rpm_vreg_unlock(rpm_vreg);
+
+ msm_rpm_free_request(rpm_vreg->handle_active);
+ msm_rpm_free_request(rpm_vreg->handle_sleep);
+
+ kfree(rpm_vreg);
+ } else {
+ dev_err(dev, "%s: drvdata missing\n", __func__);
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+/*
+ * This probe is called for child rpm-regulator devices which have
+ * properties which are required to configure individual regulator
+ * framework regulators for a given RPM regulator resource.
+ */
+static int rpm_vreg_device_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct regulator_init_data *init_data;
+ struct rpm_vreg *rpm_vreg;
+ struct rpm_regulator *reg;
+ struct regulator_config reg_config = {};
+ int rc = 0;
+ int i, regulator_type;
+ u32 val;
+
+ if (!dev->of_node) {
+ dev_err(dev, "%s: device tree information missing\n", __func__);
+ return -ENODEV;
+ }
+
+ if (pdev->dev.parent == NULL) {
+ dev_err(dev, "%s: parent device missing\n", __func__);
+ return -ENODEV;
+ }
+
+ rpm_vreg = dev_get_drvdata(pdev->dev.parent);
+ if (rpm_vreg == NULL) {
+ dev_err(dev, "%s: rpm_vreg not found in parent device\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ reg = kzalloc(sizeof(struct rpm_regulator), GFP_KERNEL);
+ if (reg == NULL) {
+ dev_err(dev, "%s: could not allocate memory for reg\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ regulator_type = rpm_vreg->regulator_type;
+ reg->rpm_vreg = rpm_vreg;
+ reg->rdesc.ops = vreg_ops[regulator_type];
+ reg->rdesc.owner = THIS_MODULE;
+ reg->rdesc.type = REGULATOR_VOLTAGE;
+
+ /*
+ * Switch to voltage corner regulator ops if qcom,use-voltage-corner
+ * is specified in the device node (SMPS and LDO only).
+ */
+ if (of_property_read_bool(node, "qcom,use-voltage-corner")) {
+ if (of_property_read_bool(node,
+ "qcom,use-voltage-floor-corner")) {
+ dev_err(dev, "%s: invalid properties: both qcom,use-voltage-corner and qcom,use-voltage-floor-corner specified\n",
+ __func__);
+ goto fail_free_reg;
+ }
+
+ if (regulator_type == RPM_REGULATOR_TYPE_SMPS)
+ reg->rdesc.ops = &smps_corner_ops;
+ else if (regulator_type == RPM_REGULATOR_TYPE_LDO)
+ reg->rdesc.ops = &ldo_corner_ops;
+ } else if (of_property_read_bool(node,
+ "qcom,use-voltage-floor-corner")) {
+ if (regulator_type == RPM_REGULATOR_TYPE_SMPS)
+ reg->rdesc.ops = &smps_floor_corner_ops;
+ else if (regulator_type == RPM_REGULATOR_TYPE_LDO)
+ reg->rdesc.ops = &ldo_floor_corner_ops;
+ }
+
+ reg->always_send_voltage
+ = of_property_read_bool(node, "qcom,always-send-voltage");
+ reg->always_send_current
+ = of_property_read_bool(node, "qcom,always-send-current");
+
+ if (regulator_type == RPM_REGULATOR_TYPE_VS)
+ reg->rdesc.n_voltages = 0;
+ else
+ reg->rdesc.n_voltages = 2;
+
+ rc = of_property_read_u32(node, "qcom,set", &val);
+ if (rc) {
+ dev_err(dev, "%s: sleep set and/or active set must be configured via qcom,set property, rc=%d\n",
+ __func__, rc);
+ goto fail_free_reg;
+ } else if (!(val & RPM_SET_CONFIG_BOTH)) {
+ dev_err(dev, "%s: qcom,set=%u property is invalid\n", __func__,
+ val);
+ rc = -EINVAL;
+ goto fail_free_reg;
+ }
+
+ reg->set_active = !!(val & RPM_SET_CONFIG_ACTIVE);
+ reg->set_sleep = !!(val & RPM_SET_CONFIG_SLEEP);
+
+ init_data = of_get_regulator_init_data(dev, node);
+ if (init_data == NULL) {
+ dev_err(dev, "%s: unable to allocate memory\n", __func__);
+ rc = -ENOMEM;
+ goto fail_free_reg;
+ }
+ if (init_data->constraints.name == NULL) {
+ dev_err(dev, "%s: regulator name not specified\n", __func__);
+ rc = -EINVAL;
+ goto fail_free_reg;
+ }
+
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+
+ if (of_get_property(node, "parent-supply", NULL))
+ init_data->supply_regulator = "parent";
+
+ /*
+ * Fill in ops and mode masks based on callbacks specified for
+ * this type of regulator.
+ */
+ if (reg->rdesc.ops->enable)
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_STATUS;
+ if (reg->rdesc.ops->get_voltage)
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE;
+ if (reg->rdesc.ops->get_mode) {
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
+ init_data->constraints.valid_modes_mask
+ |= REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+ }
+
+ reg->rdesc.name = init_data->constraints.name;
+ reg->min_uV = init_data->constraints.min_uV;
+ reg->max_uV = init_data->constraints.max_uV;
+
+ /* Initialize the param array based on optional properties. */
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ rc = of_property_read_u32(node, params[i].property_name, &val);
+ if (rc == 0) {
+ if (params[i].supported_regulator_types
+ & BIT(regulator_type)) {
+ if (val < params[i].min
+ || val > params[i].max) {
+ pr_warn("%s: device tree property: %s=%u is outsided allowed range [%u, %u]\n",
+ reg->rdesc.name,
+ params[i].property_name, val,
+ params[i].min, params[i].max);
+ continue;
+ }
+ reg->req.param[i] = val;
+ reg->req.modified |= BIT(i);
+ } else {
+ pr_warn("%s: regulator type=%d does not support device tree property: %s\n",
+ reg->rdesc.name, regulator_type,
+ params[i].property_name);
+ }
+ }
+ }
+
+ of_property_read_u32(node, "qcom,system-load", &reg->system_load);
+
+ rpm_vreg_lock(rpm_vreg);
+ list_add(&reg->list, &rpm_vreg->reg_list);
+ rpm_vreg_unlock(rpm_vreg);
+
+ reg_config.dev = dev;
+ reg_config.init_data = init_data;
+ reg_config.of_node = node;
+ reg_config.driver_data = reg;
+ reg->rdev = regulator_register(&reg->rdesc, &reg_config);
+ if (IS_ERR(reg->rdev)) {
+ rc = PTR_ERR(reg->rdev);
+ reg->rdev = NULL;
+ pr_err("regulator_register failed: %s, rc=%d\n",
+ reg->rdesc.name, rc);
+ goto fail_remove_from_list;
+ }
+
+ platform_set_drvdata(pdev, reg);
+
+ pr_debug("successfully probed: %s\n", reg->rdesc.name);
+
+ return 0;
+
+fail_remove_from_list:
+ rpm_vreg_lock(rpm_vreg);
+ list_del(&reg->list);
+ rpm_vreg_unlock(rpm_vreg);
+
+fail_free_reg:
+ kfree(reg);
+ return rc;
+}
+
+/*
+ * This probe is called for parent rpm-regulator devices which have
+ * properties which are required to identify a given RPM resource.
+ */
+static int rpm_vreg_resource_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct rpm_vreg *rpm_vreg;
+ int val = 0;
+ u32 resource_type;
+ int rc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "%s: device tree information missing\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Create new rpm_vreg entry. */
+ rpm_vreg = kzalloc(sizeof(struct rpm_vreg), GFP_KERNEL);
+ if (rpm_vreg == NULL) {
+ dev_err(dev, "%s: could not allocate memory for vreg\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Required device tree properties: */
+ rc = of_property_read_string(node, "qcom,resource-name",
+ &rpm_vreg->resource_name);
+ if (rc) {
+ dev_err(dev, "%s: qcom,resource-name missing in DT node\n",
+ __func__);
+ goto fail_free_vreg;
+ }
+ resource_type = rpm_vreg_string_to_int(rpm_vreg->resource_name);
+
+ rc = of_property_read_u32(node, "qcom,resource-id",
+ &rpm_vreg->resource_id);
+ if (rc) {
+ dev_err(dev, "%s: qcom,resource-id missing in DT node\n",
+ __func__);
+ goto fail_free_vreg;
+ }
+
+ rc = of_property_read_u32(node, "qcom,regulator-type",
+ &rpm_vreg->regulator_type);
+ if (rc) {
+ dev_err(dev, "%s: qcom,regulator-type missing in DT node\n",
+ __func__);
+ goto fail_free_vreg;
+ }
+
+ if ((rpm_vreg->regulator_type < 0)
+ || (rpm_vreg->regulator_type >= RPM_REGULATOR_TYPE_MAX)) {
+ dev_err(dev, "%s: invalid regulator type: %d\n", __func__,
+ rpm_vreg->regulator_type);
+ rc = -EINVAL;
+ goto fail_free_vreg;
+ }
+
+ /* Optional device tree properties: */
+ of_property_read_u32(node, "qcom,allow-atomic", &val);
+ rpm_vreg->allow_atomic = !!val;
+ of_property_read_u32(node, "qcom,enable-time", &rpm_vreg->enable_time);
+ of_property_read_u32(node, "qcom,hpm-min-load",
+ &rpm_vreg->hpm_min_load);
+ rpm_vreg->apps_only = of_property_read_bool(node, "qcom,apps-only");
+
+ rpm_vreg->handle_active = msm_rpm_create_request(RPM_SET_ACTIVE,
+ resource_type, rpm_vreg->resource_id, RPM_REGULATOR_PARAM_MAX);
+ if (rpm_vreg->handle_active == NULL
+ || IS_ERR(rpm_vreg->handle_active)) {
+ rc = PTR_ERR(rpm_vreg->handle_active);
+ dev_err(dev, "%s: failed to create active RPM handle, rc=%d\n",
+ __func__, rc);
+ goto fail_free_vreg;
+ }
+
+ rpm_vreg->handle_sleep = msm_rpm_create_request(RPM_SET_SLEEP,
+ resource_type, rpm_vreg->resource_id, RPM_REGULATOR_PARAM_MAX);
+ if (rpm_vreg->handle_sleep == NULL || IS_ERR(rpm_vreg->handle_sleep)) {
+ rc = PTR_ERR(rpm_vreg->handle_sleep);
+ dev_err(dev, "%s: failed to create sleep RPM handle, rc=%d\n",
+ __func__, rc);
+ goto fail_free_handle_active;
+ }
+
+ INIT_LIST_HEAD(&rpm_vreg->reg_list);
+
+ if (rpm_vreg->allow_atomic)
+ spin_lock_init(&rpm_vreg->slock);
+ else
+ mutex_init(&rpm_vreg->mlock);
+
+ platform_set_drvdata(pdev, rpm_vreg);
+
+ rc = of_platform_populate(node, NULL, NULL, dev);
+ if (rc) {
+ dev_err(dev, "%s: failed to add child nodes, rc=%d\n", __func__,
+ rc);
+ goto fail_unset_drvdata;
+ }
+
+ pr_debug("successfully probed: %s (%08X) %u\n", rpm_vreg->resource_name,
+ resource_type, rpm_vreg->resource_id);
+
+ return rc;
+
+fail_unset_drvdata:
+ platform_set_drvdata(pdev, NULL);
+ msm_rpm_free_request(rpm_vreg->handle_sleep);
+
+fail_free_handle_active:
+ msm_rpm_free_request(rpm_vreg->handle_active);
+
+fail_free_vreg:
+ kfree(rpm_vreg);
+
+ return rc;
+}
+
+static struct of_device_id rpm_vreg_match_table_device[] = {
+ { .compatible = "qcom,rpm-smd-regulator", },
+ {}
+};
+
+static struct of_device_id rpm_vreg_match_table_resource[] = {
+ { .compatible = "qcom,rpm-smd-regulator-resource", },
+ {}
+};
+
+static struct platform_driver rpm_vreg_device_driver = {
+ .probe = rpm_vreg_device_probe,
+ .remove = rpm_vreg_device_remove,
+ .driver = {
+ .name = "qcom,rpm-smd-regulator",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_vreg_match_table_device,
+ },
+};
+
+static struct platform_driver rpm_vreg_resource_driver = {
+ .probe = rpm_vreg_resource_probe,
+ .remove = rpm_vreg_resource_remove,
+ .driver = {
+ .name = "qcom,rpm-smd-regulator-resource",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_vreg_match_table_resource,
+ },
+};
+
+/**
+ * rpm_smd_regulator_driver_init() - initialize the RPM SMD regulator drivers
+ *
+ * This function registers the RPM SMD regulator platform drivers.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int __init rpm_smd_regulator_driver_init(void)
+{
+ static bool initialized;
+ int i, rc;
+
+ if (initialized)
+ return 0;
+ else
+ initialized = true;
+
+ /* Store parameter string names as integers */
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++)
+ params[i].key = rpm_vreg_string_to_int(params[i].name);
+
+ rc = platform_driver_register(&rpm_vreg_device_driver);
+ if (rc)
+ return rc;
+
+ return platform_driver_register(&rpm_vreg_resource_driver);
+}
+EXPORT_SYMBOL(rpm_smd_regulator_driver_init);
+
+static void __exit rpm_vreg_exit(void)
+{
+ platform_driver_unregister(&rpm_vreg_device_driver);
+ platform_driver_unregister(&rpm_vreg_resource_driver);
+}
+
+arch_initcall(rpm_smd_regulator_driver_init);
+module_exit(rpm_vreg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPM SMD regulator driver");
diff --git a/drivers/regulator/spm-regulator.c b/drivers/regulator/spm-regulator.c
new file mode 100644
index 000000000000..559a56e6737b
--- /dev/null
+++ b/drivers/regulator/spm-regulator.c
@@ -0,0 +1,641 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/string.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/spm-regulator.h>
+#include <soc/qcom/spm.h>
+
+#define SPM_REGULATOR_DRIVER_NAME "qcom,spm-regulator"
+
+struct voltage_range {
+ int min_uV;
+ int set_point_min_uV;
+ int max_uV;
+ int step_uV;
+};
+
+enum qpnp_regulator_type {
+ QPNP_FTS2_TYPE = 0x1C,
+ QPNP_ULT_HF_TYPE = 0x22,
+};
+
+enum qpnp_regulator_subtype {
+ QPNP_FTS2_SUBTYPE = 0x08,
+ QPNP_ULT_HF_SUBTYPE = 0x0D,
+};
+
+/* Properties for FTS2 type QPNP PMIC regulators. */
+
+static const struct voltage_range fts2_range0 = {0, 350000, 1275000, 5000};
+static const struct voltage_range fts2_range1 = {0, 700000, 2040000, 10000};
+static const struct voltage_range ult_hf_range0 = {375000, 375000, 1562500,
+ 12500};
+static const struct voltage_range ult_hf_range1 = {750000, 750000, 1525000,
+ 25000};
+
+#define QPNP_SMPS_REG_TYPE 0x04
+#define QPNP_SMPS_REG_SUBTYPE 0x05
+#define QPNP_FTS2_REG_VOLTAGE_RANGE 0x40
+#define QPNP_SMPS_REG_VOLTAGE_SETPOINT 0x41
+#define QPNP_SMPS_REG_MODE 0x45
+#define QPNP_SMPS_REG_STEP_CTRL 0x61
+
+#define QPNP_SMPS_MODE_PWM 0x80
+#define QPNP_FTS2_MODE_AUTO 0x40
+
+#define QPNP_FTS2_STEP_CTRL_STEP_MASK 0x18
+#define QPNP_FTS2_STEP_CTRL_STEP_SHIFT 3
+#define QPNP_SMPS_STEP_CTRL_DELAY_MASK 0x07
+#define QPNP_SMPS_STEP_CTRL_DELAY_SHIFT 0
+
+/* Clock rate in kHz of the FTS2 regulator reference clock. */
+#define QPNP_FTS2_CLOCK_RATE 19200
+
+/* Time to delay in us to ensure that a mode change has completed. */
+#define QPNP_FTS2_MODE_CHANGE_DELAY 50
+
+/* Minimum time in us that it takes to complete a single SPMI write. */
+#define QPNP_SPMI_WRITE_MIN_DELAY 8
+
+/* Minimum voltage stepper delay for each step. */
+#define QPNP_FTS2_STEP_DELAY 8
+#define QPNP_ULT_HF_STEP_DELAY 20
+
+/*
+ * The ratio QPNP_FTS2_STEP_MARGIN_NUM/QPNP_FTS2_STEP_MARGIN_DEN is use to
+ * adjust the step rate in order to account for oscillator variance.
+ */
+#define QPNP_FTS2_STEP_MARGIN_NUM 4
+#define QPNP_FTS2_STEP_MARGIN_DEN 5
+
+/* VSET value to decide the range of ULT SMPS */
+#define ULT_SMPS_RANGE_SPLIT 0x60
+
+struct spm_vreg {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ struct spmi_device *spmi_dev;
+ const struct voltage_range *range;
+ int uV;
+ int last_set_uV;
+ unsigned vlevel;
+ unsigned last_set_vlevel;
+ bool online;
+ u16 spmi_base_addr;
+ u8 init_mode;
+ int step_rate;
+ enum qpnp_regulator_type regulator_type;
+ u32 cpu_num;
+};
+
+static int qpnp_fts2_set_mode(struct spm_vreg *vreg, u8 mode)
+{
+ int rc;
+
+ rc = spmi_ext_register_writel(vreg->spmi_dev, vreg->spmi_base_addr + QPNP_SMPS_REG_MODE, &mode, 1);
+ if (rc)
+ dev_err(&vreg->spmi_dev->dev, "%s: could not write to mode register, rc=%d\n",
+ __func__, rc);
+
+ return rc;
+}
+
+static int _spm_regulator_set_voltage(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ if (vreg->vlevel == vreg->last_set_vlevel)
+ return 0;
+
+ if ((vreg->regulator_type == QPNP_FTS2_TYPE)
+ && !(vreg->init_mode & QPNP_SMPS_MODE_PWM)
+ && vreg->uV > vreg->last_set_uV) {
+ /* Switch to PWM mode so that voltage ramping is fast. */
+ rc = qpnp_fts2_set_mode(vreg, QPNP_SMPS_MODE_PWM);
+ if (rc)
+ return rc;
+ }
+
+ rc = msm_spm_set_vdd(vreg->cpu_num, vreg->vlevel);
+ if (rc) {
+ pr_err("%s: msm_spm_set_vdd failed %d\n", vreg->rdesc.name, rc);
+ return rc;
+ }
+
+ if (vreg->uV > vreg->last_set_uV) {
+ /* Wait for voltage stepping to complete. */
+ udelay(DIV_ROUND_UP(vreg->uV - vreg->last_set_uV,
+ vreg->step_rate));
+ }
+
+ if ((vreg->regulator_type == QPNP_FTS2_TYPE)
+ && !(vreg->init_mode & QPNP_SMPS_MODE_PWM)
+ && vreg->uV > vreg->last_set_uV) {
+ /* Wait for mode transition to complete. */
+ udelay(QPNP_FTS2_MODE_CHANGE_DELAY - QPNP_SPMI_WRITE_MIN_DELAY);
+ /* Switch to AUTO mode so that power consumption is lowered. */
+ rc = qpnp_fts2_set_mode(vreg, QPNP_FTS2_MODE_AUTO);
+ if (rc)
+ return rc;
+ }
+
+ vreg->last_set_uV = vreg->uV;
+ vreg->last_set_vlevel = vreg->vlevel;
+
+ return rc;
+}
+
+static int spm_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ const struct voltage_range *range = vreg->range;
+ int uV = min_uV;
+ unsigned vlevel;
+
+ if (uV < range->set_point_min_uV && max_uV >= range->set_point_min_uV)
+ uV = range->set_point_min_uV;
+
+ if (uV < range->set_point_min_uV || uV > range->max_uV) {
+ pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+ vreg->rdesc.name, min_uV, max_uV,
+ range->set_point_min_uV, range->max_uV);
+ return -EINVAL;
+ }
+
+ vlevel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
+ uV = vlevel * range->step_uV + range->min_uV;
+
+ if (uV > max_uV) {
+ pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+ vreg->rdesc.name, min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ *selector = vlevel -
+ (vreg->range->set_point_min_uV - vreg->range->min_uV)
+ / vreg->range->step_uV;
+
+ /* Fix VSET for ULT HF Buck */
+ if ((vreg->regulator_type == QPNP_ULT_HF_TYPE) &&
+ (range == &ult_hf_range1)) {
+
+ vlevel &= 0x1F;
+ vlevel |= ULT_SMPS_RANGE_SPLIT;
+ }
+
+ vreg->vlevel = vlevel;
+ vreg->uV = uV;
+
+ if (!vreg->online)
+ return 0;
+
+ return _spm_regulator_set_voltage(rdev);
+}
+
+static int spm_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->uV;
+}
+
+static int spm_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ if (selector >= vreg->rdesc.n_voltages)
+ return 0;
+
+ return selector * vreg->range->step_uV + vreg->range->set_point_min_uV;
+}
+
+static int spm_regulator_enable(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = _spm_regulator_set_voltage(rdev);
+
+ if (!rc)
+ vreg->online = true;
+
+ return rc;
+}
+
+static int spm_regulator_disable(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ vreg->online = false;
+
+ return 0;
+}
+
+static int spm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->online;
+}
+
+static struct regulator_ops spm_regulator_ops = {
+ .get_voltage = spm_regulator_get_voltage,
+ .set_voltage = spm_regulator_set_voltage,
+ .list_voltage = spm_regulator_list_voltage,
+ .enable = spm_regulator_enable,
+ .disable = spm_regulator_disable,
+ .is_enabled = spm_regulator_is_enabled,
+};
+
+static int qpnp_smps_check_type(struct spm_vreg *vreg)
+{
+ int rc;
+ u8 type[2];
+
+ rc = spmi_ext_register_readl(vreg->spmi_dev, vreg->spmi_base_addr + QPNP_SMPS_REG_TYPE, type, 2);
+ if (rc) {
+ dev_err(&vreg->spmi_dev->dev, "%s: could not read type register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ switch (type[0]) {
+ case QPNP_FTS2_TYPE:
+ if (type[1] != QPNP_FTS2_SUBTYPE) {
+ dev_err(&vreg->spmi_dev->dev, "%s: invalid subtype=0x%02X register\n",
+ __func__, type[1]);
+ return -ENODEV;
+ }
+ vreg->regulator_type = QPNP_FTS2_TYPE;
+ break;
+ case QPNP_ULT_HF_TYPE:
+ if (type[1] != QPNP_ULT_HF_SUBTYPE) {
+ dev_err(&vreg->spmi_dev->dev, "%s: invalid subtype=0x%02X register\n",
+ __func__, type[1]);
+ return -ENODEV;
+ }
+ vreg->regulator_type = QPNP_ULT_HF_TYPE;
+ break;
+ default:
+ dev_err(&vreg->spmi_dev->dev, "%s: invalid type=0x%02X register\n",
+ __func__, type[0]);
+ return -ENODEV;
+ };
+
+ return rc;
+}
+
+static int qpnp_fts2_init_range(struct spm_vreg *vreg)
+{
+ int rc;
+ u8 reg = 0;
+
+ rc = spmi_ext_register_readl(vreg->spmi_dev, vreg->spmi_base_addr + QPNP_FTS2_REG_VOLTAGE_RANGE, &reg, 1);
+ if (rc) {
+ dev_err(&vreg->spmi_dev->dev, "%s: could not read voltage range register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ if (reg == 0x00) {
+ vreg->range = &fts2_range0;
+ } else if (reg == 0x01) {
+ vreg->range = &fts2_range1;
+ } else {
+ dev_err(&vreg->spmi_dev->dev, "%s: voltage range=%d is invalid\n",
+ __func__, reg);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int qpnp_ult_hf_init_range(struct spm_vreg *vreg)
+{
+ int rc;
+ u8 reg = 0;
+
+ rc = spmi_ext_register_readl(vreg->spmi_dev, vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT, &reg, 1);
+ if (rc) {
+ dev_err(&vreg->spmi_dev->dev, "%s: could not read voltage range register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ vreg->range = (reg < ULT_SMPS_RANGE_SPLIT) ? &ult_hf_range0 :
+ &ult_hf_range1;
+ return rc;
+}
+
+static int qpnp_smps_init_voltage(struct spm_vreg *vreg)
+{
+ int rc;
+ u8 reg = 0;
+
+ rc = spmi_ext_register_readl(vreg->spmi_dev, vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT, &reg, 1);
+ if (rc) {
+ dev_err(&vreg->spmi_dev->dev, "%s: could not read voltage setpoint register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ vreg->vlevel = reg;
+ /*
+ * Calculate ULT HF buck VSET based on range:
+ * In case of range 0: VSET is a 7 bit value.
+ * In case of range 1: VSET is a 5 bit value
+ *
+ */
+ if ((vreg->regulator_type == QPNP_ULT_HF_TYPE) &&
+ (vreg->range == &ult_hf_range1))
+ vreg->vlevel &= ~ULT_SMPS_RANGE_SPLIT;
+
+ vreg->uV = vreg->vlevel * vreg->range->step_uV + vreg->range->min_uV;
+ vreg->last_set_uV = vreg->uV;
+
+ return rc;
+}
+
+static int qpnp_smps_init_mode(struct spm_vreg *vreg)
+{
+ const char *mode_name;
+ int rc;
+
+ rc = of_property_read_string(vreg->spmi_dev->dev.of_node, "qcom,mode",
+ &mode_name);
+ if (!rc) {
+ if (strcmp("pwm", mode_name) == 0) {
+ vreg->init_mode = QPNP_SMPS_MODE_PWM;
+ } else if ((strcmp("auto", mode_name) == 0) &&
+ (vreg->regulator_type == QPNP_FTS2_TYPE)) {
+ vreg->init_mode = QPNP_FTS2_MODE_AUTO;
+ } else {
+ dev_err(&vreg->spmi_dev->dev, "%s: unknown regulator mode: %s\n",
+ __func__, mode_name);
+ return -EINVAL;
+ }
+
+ rc = spmi_ext_register_writel(vreg->spmi_dev,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_MODE,
+ &vreg->init_mode, 1);
+ if (rc)
+ dev_err(&vreg->spmi_dev->dev, "%s: could not write mode register, rc=%d\n",
+ __func__, rc);
+ } else {
+ rc = spmi_ext_register_readl(vreg->spmi_dev,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_MODE,
+ &vreg->init_mode, 1);
+ if (rc)
+ dev_err(&vreg->spmi_dev->dev, "%s: could not read mode register, rc=%d\n",
+ __func__, rc);
+ }
+
+ return rc;
+}
+
+static int qpnp_smps_init_step_rate(struct spm_vreg *vreg)
+{
+ int rc;
+ u8 reg = 0;
+ int step = 0, delay;
+
+ rc = spmi_ext_register_readl(vreg->spmi_dev,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_STEP_CTRL, &reg, 1);
+ if (rc) {
+ dev_err(&vreg->spmi_dev->dev, "%s: could not read stepping control register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ /* ULT buck does not support steps */
+ if (vreg->regulator_type != QPNP_ULT_HF_TYPE)
+ step = (reg & QPNP_FTS2_STEP_CTRL_STEP_MASK)
+ >> QPNP_FTS2_STEP_CTRL_STEP_SHIFT;
+
+ delay = (reg & QPNP_SMPS_STEP_CTRL_DELAY_MASK)
+ >> QPNP_SMPS_STEP_CTRL_DELAY_SHIFT;
+
+ /* step_rate has units of uV/us. */
+ vreg->step_rate = QPNP_FTS2_CLOCK_RATE * vreg->range->step_uV
+ * (1 << step);
+
+ if (vreg->regulator_type == QPNP_ULT_HF_TYPE)
+ vreg->step_rate /= 1000 * (QPNP_ULT_HF_STEP_DELAY << delay);
+ else
+ vreg->step_rate /= 1000 * (QPNP_FTS2_STEP_DELAY << delay);
+
+ vreg->step_rate = vreg->step_rate * QPNP_FTS2_STEP_MARGIN_NUM
+ / QPNP_FTS2_STEP_MARGIN_DEN;
+
+ /* Ensure that the stepping rate is greater than 0. */
+ vreg->step_rate = max(vreg->step_rate, 1);
+
+ return rc;
+}
+
+static int spm_regulator_probe(struct spmi_device *spmi)
+{
+ struct regulator_config reg_config = {};
+ struct device_node *node = spmi->dev.of_node;
+ struct regulator_init_data *init_data;
+ struct spm_vreg *vreg;
+ struct resource *res;
+ int rc;
+
+ dev_err(&spmi->dev, "%s\n", __func__);
+
+ if (!node) {
+ dev_err(&spmi->dev, "%s: device node missing\n", __func__);
+ return -ENODEV;
+ }
+
+ rc = msm_spm_probe_done();
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ dev_err(&spmi->dev, "%s: spm unavailable, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ vreg = devm_kzalloc(&spmi->dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg) {
+ pr_err("allocation failed.\n");
+ return -ENOMEM;
+ }
+ vreg->spmi_dev = spmi;
+
+ res = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&spmi->dev, "%s: node is missing base address\n",
+ __func__);
+ return -EINVAL;
+ }
+ vreg->spmi_base_addr = res->start;
+
+ rc = qpnp_smps_check_type(vreg);
+ if (rc)
+ return rc;
+
+ /* Specify CPU 0 as default in order to handle shared regulator case. */
+ vreg->cpu_num = 0;
+ of_property_read_u32(vreg->spmi_dev->dev.of_node, "qcom,cpu-num",
+ &vreg->cpu_num);
+
+ /*
+ * The regulator must be initialized to range 0 or range 1 during
+ * PMIC power on sequence. Once it is set, it cannot be changed
+ * dynamically.
+ */
+ if (vreg->regulator_type == QPNP_FTS2_TYPE)
+ rc = qpnp_fts2_init_range(vreg);
+ else if (vreg->regulator_type == QPNP_ULT_HF_TYPE)
+ rc = qpnp_ult_hf_init_range(vreg);
+ if (rc)
+ return rc;
+
+ rc = qpnp_smps_init_voltage(vreg);
+ if (rc)
+ return rc;
+
+ rc = qpnp_smps_init_mode(vreg);
+ if (rc)
+ return rc;
+
+ rc = qpnp_smps_init_step_rate(vreg);
+ if (rc)
+ return rc;
+
+ init_data = of_get_regulator_init_data(&spmi->dev, node);
+ if (!init_data) {
+ dev_err(&spmi->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return -ENOMEM;
+ }
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS
+ | REGULATOR_CHANGE_VOLTAGE;
+
+ if (!init_data->constraints.name) {
+ dev_err(&spmi->dev, "%s: node is missing regulator name\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ vreg->rdesc.name = init_data->constraints.name;
+ vreg->rdesc.type = REGULATOR_VOLTAGE;
+ vreg->rdesc.owner = THIS_MODULE;
+ vreg->rdesc.ops = &spm_regulator_ops;
+ vreg->rdesc.n_voltages
+ = (vreg->range->max_uV - vreg->range->set_point_min_uV)
+ / vreg->range->step_uV + 1;
+
+ reg_config.dev = &spmi->dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = vreg;
+ reg_config.of_node = node;
+ vreg->rdev = regulator_register(&vreg->rdesc, &reg_config);
+ if (IS_ERR(vreg->rdev)) {
+ rc = PTR_ERR(vreg->rdev);
+ dev_err(&spmi->dev, "%s: regulator_register failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ dev_set_drvdata(&spmi->dev, vreg);
+
+ pr_info("name=%s, range=%s, voltage=%d uV, mode=%s, step rate=%d uV/us\n",
+ vreg->rdesc.name, vreg->range == &fts2_range0 ? "LV" : "MV",
+ vreg->uV,
+ vreg->init_mode & QPNP_SMPS_MODE_PWM ? "PWM" :
+ (vreg->init_mode & QPNP_FTS2_MODE_AUTO ? "AUTO" : "PFM"),
+ vreg->step_rate);
+
+ return rc;
+}
+
+static void spm_regulator_remove(struct spmi_device *spmi)
+{
+ struct spm_vreg *vreg = dev_get_drvdata(&spmi->dev);
+
+ regulator_unregister(vreg->rdev);
+}
+
+static struct of_device_id spm_regulator_match_table[] = {
+ { .compatible = SPM_REGULATOR_DRIVER_NAME, },
+ {}
+};
+
+static const struct spmi_device_id spm_regulator_id[] = {
+ { SPM_REGULATOR_DRIVER_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spmi, spm_regulator_id);
+
+static struct spmi_driver spm_regulator_driver = {
+ .driver = {
+ .name = SPM_REGULATOR_DRIVER_NAME,
+ .of_match_table = spm_regulator_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = spm_regulator_probe,
+ .remove = spm_regulator_remove,
+// .id_table = spm_regulator_id,
+};
+
+/**
+ * spm_regulator_init() - register spmi driver for spm-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int __init spm_regulator_init(void)
+{
+ static bool has_registered;
+pr_err("--------- %s\n", __func__);
+ if (has_registered)
+ return 0;
+ has_registered = true;
+
+ return spmi_driver_register(&spm_regulator_driver);
+}
+EXPORT_SYMBOL(spm_regulator_init);
+
+static void __exit spm_regulator_exit(void)
+{
+ spmi_driver_unregister(&spm_regulator_driver);
+}
+
+//arch_initcall(spm_regulator_init);
+module_exit(spm_regulator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SPM regulator driver");
+MODULE_ALIAS("platform:spm-regulator");
diff --git a/drivers/regulator/stub-regulator.c b/drivers/regulator/stub-regulator.c
new file mode 100644
index 000000000000..04302f05f685
--- /dev/null
+++ b/drivers/regulator/stub-regulator.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/stub-regulator.h>
+
+#define STUB_REGULATOR_MAX_NAME 40
+
+struct regulator_stub {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ int voltage;
+ bool enabled;
+ int mode;
+ int hpm_min_load;
+ int system_uA;
+ char name[STUB_REGULATOR_MAX_NAME];
+};
+
+static int regulator_stub_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+ vreg_priv->voltage = min_uV;
+ return 0;
+}
+
+static int regulator_stub_get_voltage(struct regulator_dev *rdev)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+ return vreg_priv->voltage;
+}
+
+static int regulator_stub_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct regulation_constraints *constraints = rdev->constraints;
+
+ if (selector >= 2)
+ return -EINVAL;
+ else if (selector == 0)
+ return constraints->min_uV;
+ else
+ return constraints->max_uV;
+}
+
+static unsigned int regulator_stub_get_mode(struct regulator_dev *rdev)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+ return vreg_priv->mode;
+}
+
+static int regulator_stub_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+
+ if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_IDLE) {
+ dev_err(&rdev->dev, "%s: invalid mode requested %u\n",
+ __func__, mode);
+ return -EINVAL;
+ }
+ vreg_priv->mode = mode;
+ return 0;
+}
+
+static unsigned int regulator_stub_get_optimum_mode(struct regulator_dev *rdev,
+ int input_uV, int output_uV, int load_uA)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+ unsigned int mode;
+
+ if (load_uA + vreg_priv->system_uA >= vreg_priv->hpm_min_load)
+ mode = REGULATOR_MODE_NORMAL;
+ else
+ mode = REGULATOR_MODE_IDLE;
+
+ return mode;
+}
+
+static int regulator_stub_enable(struct regulator_dev *rdev)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+ vreg_priv->enabled = true;
+ return 0;
+}
+
+static int regulator_stub_disable(struct regulator_dev *rdev)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+ vreg_priv->enabled = false;
+ return 0;
+}
+
+static int regulator_stub_is_enabled(struct regulator_dev *rdev)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+ return vreg_priv->enabled;
+}
+
+/* Real regulator operations. */
+static struct regulator_ops regulator_stub_ops = {
+ .enable = regulator_stub_enable,
+ .disable = regulator_stub_disable,
+ .is_enabled = regulator_stub_is_enabled,
+ .set_voltage = regulator_stub_set_voltage,
+ .get_voltage = regulator_stub_get_voltage,
+ .list_voltage = regulator_stub_list_voltage,
+ .set_mode = regulator_stub_set_mode,
+ .get_mode = regulator_stub_get_mode,
+ .get_optimum_mode = regulator_stub_get_optimum_mode,
+};
+
+static void regulator_stub_cleanup(struct regulator_stub *vreg_priv)
+{
+ if (vreg_priv && vreg_priv->rdev)
+ regulator_unregister(vreg_priv->rdev);
+ kfree(vreg_priv);
+}
+
+static int regulator_stub_probe(struct platform_device *pdev)
+{
+ struct regulator_config reg_config = {};
+ struct regulator_init_data *init_data = NULL;
+ struct device *dev = &pdev->dev;
+ struct stub_regulator_pdata *vreg_pdata;
+ struct regulator_desc *rdesc;
+ struct regulator_stub *vreg_priv;
+ int rc;
+
+ vreg_priv = kzalloc(sizeof(*vreg_priv), GFP_KERNEL);
+ if (!vreg_priv) {
+ dev_err(dev, "%s: Unable to allocate memory\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (dev->of_node) {
+ /* Use device tree. */
+ init_data = of_get_regulator_init_data(dev,
+ dev->of_node);
+ if (!init_data) {
+ dev_err(dev, "%s: unable to allocate memory\n",
+ __func__);
+ rc = -ENOMEM;
+ goto err_probe;
+ }
+
+ if (init_data->constraints.name == NULL) {
+ dev_err(dev, "%s: regulator name not specified\n",
+ __func__);
+ rc = -EINVAL;
+ goto err_probe;
+ }
+
+ if (of_get_property(dev->of_node, "parent-supply", NULL))
+ init_data->supply_regulator = "parent";
+
+ of_property_read_u32(dev->of_node, "qcom,system-load",
+ &vreg_priv->system_uA);
+ of_property_read_u32(dev->of_node, "qcom,hpm-min-load",
+ &vreg_priv->hpm_min_load);
+
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_STATUS;
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE;
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
+ init_data->constraints.valid_modes_mask
+ = REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+ } else {
+ /* Use platform data. */
+ vreg_pdata = dev->platform_data;
+ if (!vreg_pdata) {
+ dev_err(dev, "%s: no platform data\n", __func__);
+ rc = -EINVAL;
+ goto err_probe;
+ }
+ init_data = &vreg_pdata->init_data;
+
+ vreg_priv->system_uA = vreg_pdata->system_uA;
+ vreg_priv->hpm_min_load = vreg_pdata->hpm_min_load;
+ }
+
+ dev_set_drvdata(dev, vreg_priv);
+
+ rdesc = &vreg_priv->rdesc;
+ strlcpy(vreg_priv->name, init_data->constraints.name,
+ STUB_REGULATOR_MAX_NAME);
+ rdesc->name = vreg_priv->name;
+ rdesc->ops = &regulator_stub_ops;
+
+ /*
+ * Ensure that voltage set points are handled correctly for regulators
+ * which have a specified voltage constraint range, as well as those
+ * that do not.
+ */
+ if (init_data->constraints.min_uV == 0 &&
+ init_data->constraints.max_uV == 0)
+ rdesc->n_voltages = 0;
+ else
+ rdesc->n_voltages = 2;
+
+ rdesc->id = pdev->id;
+ rdesc->owner = THIS_MODULE;
+ rdesc->type = REGULATOR_VOLTAGE;
+ vreg_priv->voltage = init_data->constraints.min_uV;
+ if (vreg_priv->system_uA >= vreg_priv->hpm_min_load)
+ vreg_priv->mode = REGULATOR_MODE_NORMAL;
+ else
+ vreg_priv->mode = REGULATOR_MODE_IDLE;
+
+ reg_config.dev = dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = vreg_priv;
+ reg_config.of_node = dev->of_node;
+ vreg_priv->rdev = regulator_register(rdesc, &reg_config);
+
+ if (IS_ERR(vreg_priv->rdev)) {
+ rc = PTR_ERR(vreg_priv->rdev);
+ vreg_priv->rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "%s: regulator_register failed\n",
+ __func__);
+ goto err_probe;
+ }
+
+ return 0;
+
+err_probe:
+ regulator_stub_cleanup(vreg_priv);
+ return rc;
+}
+
+static int regulator_stub_remove(struct platform_device *pdev)
+{
+ struct regulator_stub *vreg_priv = dev_get_drvdata(&pdev->dev);
+
+ regulator_stub_cleanup(vreg_priv);
+ return 0;
+}
+
+static struct of_device_id regulator_stub_match_table[] = {
+ { .compatible = "qcom," STUB_REGULATOR_DRIVER_NAME, },
+ {}
+};
+
+static struct platform_driver regulator_stub_driver = {
+ .probe = regulator_stub_probe,
+ .remove = regulator_stub_remove,
+ .driver = {
+ .name = STUB_REGULATOR_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = regulator_stub_match_table,
+ },
+};
+
+int __init regulator_stub_init(void)
+{
+ static int registered;
+
+ if (registered)
+ return 0;
+ else
+ registered = 1;
+ return platform_driver_register(&regulator_stub_driver);
+}
+postcore_initcall(regulator_stub_init);
+EXPORT_SYMBOL(regulator_stub_init);
+
+static void __exit regulator_stub_exit(void)
+{
+ platform_driver_unregister(&regulator_stub_driver);
+}
+module_exit(regulator_stub_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("stub regulator driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform: " STUB_REGULATOR_DRIVER_NAME);
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 76d6bd4da138..8d6c1a3eeb88 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -3,5 +3,5 @@ menu "SOC (System On Chip) specific Drivers"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/ti/Kconfig"
source "drivers/soc/versatile/Kconfig"
-
+source "drivers/soc/qcom/Kconfig"
endmenu
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 7bd2c94f54a4..348682044e83 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -9,3 +9,410 @@ config QCOM_GSBI
functions for connecting the underlying serial UART, SPI, and I2C
devices to the output pins.
+# When adding new entries keep the list in alphabetical order
+
+if ARCH_MSM || ARCH_QCOM
+
+config MSM_BAM_DMUX
+ bool "BAM Data Mux Driver"
+ depends on SPS
+ help
+ Support Muxed Data Channels over BAM interface.
+ BAM has a limited number of pipes. This driver
+ provides a means to support more logical channels
+ via muxing than BAM could without muxing.
+
+config MSM_EVENT_TIMER
+ bool "Event timer"
+ help
+ This option enables a modules that manages a list of event timers that
+ need to be monitored by the PM. The enables the PM code to monitor
+ events that require the core to be awake and ready to handle the
+ event.
+
+config MSM_IPC_ROUTER_SMD_XPRT
+ depends on MSM_SMD
+ depends on IPC_ROUTER
+ bool "MSM SMD XPRT Layer"
+ help
+ SMD Transport Layer that enables IPC Router communication within
+ a System-on-Chip(SoC). When the SMD channels become available,
+ this layer registers a transport with IPC Router and enable
+ message exchange.
+
+config MSM_IPC_ROUTER_HSIC_XPRT
+ depends on USB_QCOM_IPC_BRIDGE
+ depends on IPC_ROUTER
+ bool "MSM HSIC XPRT Layer"
+ help
+ HSIC Transport Layer that enables off-chip communication of
+ IPC Router. When the HSIC endpoint becomes available, this layer
+ registers the transport with IPC Router and enable message
+ exchange.
+
+config MSM_JTAG
+ bool "Debug and ETM trace support across power collapse"
+ help
+ Enables support for debugging (specifically breakpoints) and ETM
+ processor tracing across power collapse both for JTag and OS hosted
+ software running on the target. Enabling this will ensure debug
+ and ETM registers are saved and restored across power collapse.
+
+ If unsure, say 'N' here to avoid potential power, performance and
+ memory penalty.
+
+config MSM_JTAG_MM
+ bool "Debug and ETM trace support across power collapse using memory mapped access"
+ help
+ Enables support for debugging (specifically breakpoints) and ETM
+ processor tracing across power collapse both for JTag and OS hosted
+ software running on the target. Enabling this will ensure debug
+ and ETM registers are saved and restored across power collapse.
+
+ Required on targets on which cp14 access to debug and ETM registers is
+ not permitted and so memory mapped access is necessary.
+
+ If unsure, say 'N' here to avoid potential power, performance and
+ memory penalty.
+
+config MSM_JTAGV8
+ bool "Debug and ETM trace support across power collapse for ARMv8"
+ help
+ Enables support for debugging (specifically breakpoints) and ETM
+ processor tracing across power collapse both for JTag and OS hosted
+ software running on ARMv8 target. Enabling this will ensure debug
+ and ETM registers are saved and restored across power collapse.
+
+ If unsure, say 'N' here to avoid potential power, performance and
+ memory penalty.
+
+config MSM_QMI_INTERFACE
+ depends on IPC_ROUTER
+ depends on QMI_ENCDEC
+ bool "MSM QMI Interface Library"
+ help
+ Library to send and receive QMI messages over IPC Router.
+ This library provides interface functions to the kernel drivers
+ to perform QMI message marshaling and transport them over IPC
+ Router.
+
+config MSM_SMD
+ depends on MSM_SMEM
+ bool "MSM Shared Memory Driver (SMD)"
+ help
+ Support for the shared memory interprocessor communication protocol
+ which provides virual point to point serial channels between processes
+ on the apps processor and processes on other processors in the SoC.
+ Also includes support for the Shared Memory State Machine (SMSM)
+ protocol which provides a mechanism to publish single bit state
+ information to one or more processors in the SoC.
+
+config MSM_SMD_DEBUG
+ depends on MSM_SMD
+ bool "MSM SMD debug support"
+ help
+ Support for debugging SMD and SMSM communication between apps and
+ other processors in the SoC. Debug support primarily consists of
+ logs consisting of information such as what interrupts were processed,
+ what channels caused interrupt activity, and when internal state
+ change events occur.
+
+config MSM_RPM_SMD
+ select MSM_MPM_OF
+ bool "RPM driver using SMD protocol"
+ help
+ RPM is the dedicated hardware engine for managing shared SoC
+ resources. This config adds driver support for using SMD as a
+ transport layer communication with RPM hardware. It also selects
+ the MSM_MPM config that programs the MPM module to monitor interrupts
+ during sleep modes.
+
+config MSM_RPM_RBCPR_STATS_V2_LOG
+ tristate "MSM Resource Power Manager RPBCPR Stat Driver"
+ depends on DEBUG_FS
+ help
+ This option enables v2 of the rpmrbcpr_stats driver which reads RPM
+ memory for statistics pertaining to RPM's RBCPR(Rapid Bridge Core
+ Power Reduction) driver. The drivers outputs the message via a
+ debugfs node.
+
+config MSM_RPM_LOG
+ tristate "MSM Resource Power Manager Log Driver"
+ depends on DEBUG_FS
+ depends on MSM_RPM_SMD
+ default n
+ help
+ This option enables a driver which can read from a circular buffer
+ of messages produced by the RPM. These messages provide diagnostic
+ information about RPM operation. The driver outputs the messages
+ via a debugfs node.
+
+config MSM_RPM_STATS_LOG
+ tristate "MSM Resource Power Manager Stat Driver"
+ depends on DEBUG_FS
+ depends on MSM_RPM_SMD
+ default n
+ help
+ This option enables a driver which reads RPM messages from a shared
+ memory location. These messages provide statistical information about
+ the low power modes that RPM enters. The drivers outputs the message
+ via a debugfs node.
+
+config MSM_RUN_QUEUE_STATS
+ bool "Enable collection and exporting of MSM Run Queue stats to userspace"
+ default n
+ help
+ This option enables the driver to periodically collecting the statistics
+ of kernel run queue information and calculate the load of the system.
+ This information is exported to usespace via sysfs entries and userspace
+ algorithms uses info and decide when to turn on/off the cpu cores.
+
+config MSM_SMEM
+ depends on REMOTE_SPINLOCK_MSM
+ bool "MSM Shared Memory (SMEM)"
+ help
+ Support for the shared memory interface between the various
+ processors in the System on a Chip (SoC) which allows basic
+ inter-processor communication.
+
+config MSM_SMEM_LOGGING
+ depends on MSM_SMEM
+ bool "MSM Shared Memory Logger"
+ help
+ Enable the shared memory logging to log the events between
+ the various processors in the system. This option exposes
+ the shared memory logger at /dev/smem_log and a debugfs node
+ named smem_log.
+
+config MSM_SMP2P
+ bool "SMSM Point-to-Point (SMP2P)"
+ depends on MSM_SMEM
+ help
+ Provide point-to-point remote signaling support.
+ SMP2P enables transferring 32-bit values between
+ the local and a remote system using shared
+ memory and interrupts. A client can open multiple
+ 32-bit values by specifying a unique string and
+ remote processor ID.
+
+config MSM_SMP2P_TEST
+ bool "SMSM Point-to-Point Test"
+ depends on MSM_SMP2P
+ help
+ Enables loopback and unit testing support for
+ SMP2P. Loopback support is used by other
+ processors to do unit testing. Unit tests
+ are used to verify the local and remote
+ implementations.
+
+config MSM_SPM_V2
+ bool "Driver support for SPM Version 2"
+ help
+ Enables the support for Version 2 of the SPM driver. SPM hardware is
+ used to manage the processor power during sleep. The driver allows
+ configuring SPM to allow different low power modes for both core and
+ L2.
+
+config MSM_L2_SPM
+ bool "SPM support for L2 cache"
+ depends on MSM_SPM_V2
+ help
+ Enable SPM driver support for L2 cache. Some MSM chipsets allow
+ control of L2 cache low power mode with a Subsystem Power manager.
+ Enabling this driver allows configuring L2 SPM for low power modes
+ on supported chipsets.
+
+config MSM_QDSP6_APRV2
+ bool "Audio QDSP6 APRv2 support"
+ depends on MSM_SMD
+ help
+ Enable APRv2 IPC protocol support between
+ application processor and QDSP6. APR is
+ used by audio driver to configure QDSP6's
+ ASM, ADM and AFE.
+
+config MSM_QDSP6_APRV3
+ bool "Audio QDSP6 APRv3 support"
+ depends on MSM_SMD
+ help
+ Enable APRv2 IPC protocol support between
+ application processor and QDSP6. APR is
+ used by audio driver to configure QDSP6v2's
+ ASM, ADM and AFE.
+
+config MSM_ADSP_LOADER
+ tristate "ADSP loader support"
+ select SND_SOC_MSM_APRV2_INTF
+ depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3
+ help
+ Enable ADSP image loader.
+ The ADSP loader brings ADSP out of reset
+ for the platforms that use APRv2.
+ Say M if you want to enable this module.
+
+config MSM_MEMORY_DUMP
+ bool "MSM Memory Dump Support"
+ help
+ This enables memory dump feature. It allows various client
+ subsystems to register respective dump regions. At the time
+ of deadlocks or cpu hangs these dump regions are captured to
+ give a snapshot of the system at the time of the crash.
+
+config MSM_MEMORY_DUMP_V2
+ bool "MSM Memory Dump v2 Support"
+ help
+ This enables memory dump feature. It allows various client
+ subsystems to register respective dump regions. At the time
+ of deadlocks or cpu hangs these dump regions are captured to
+ give a snapshot of the system at the time of the crash.
+
+config MSM_COMMON_LOG
+ bool "MSM Common Log Support"
+ help
+ Use this to export symbols of some log address and variables
+ that need to parse crash dump files to a memory dump table. This
+ table can be used by post analysis tools to extract information
+ from memory when device crashes.
+
+config MSM_WATCHDOG_V2
+ bool "MSM Watchdog Support"
+ help
+ This enables the watchdog module. It causes kernel panic if the
+ watchdog times out. It allows for detection of cpu hangs and
+ deadlocks. It does not run during the bootup process, so it will
+ not catch any early lockups.
+
+config MSM_SUBSYSTEM_RESTART
+ bool "MSM Subsystem Restart"
+ help
+ This option enables the MSM subsystem restart framework.
+
+ The MSM subsystem restart framework provides support to boot,
+ shutdown, and restart subsystems with a reference counted API.
+ It also notifies userspace of transitions between these states via
+ sysfs.
+
+config MSM_SYSMON_COMM
+ bool "MSM System Monitor communication support"
+ depends on MSM_SMD && MSM_SUBSYSTEM_RESTART
+ help
+ This option adds support for MSM System Monitor library, which
+ provides an API that may be used for notifying subsystems within
+ the SoC about other subsystems' power-up/down state-changes.
+
+config MSM_PIL
+ bool "Peripheral image loading"
+ select FW_LOADER
+ default n
+ help
+ Some peripherals need to be loaded into memory before they can be
+ brought out of reset.
+
+ Say yes to support these devices.
+
+config MSM_PIL_SSR_GENERIC
+ tristate "MSM Subsystem Boot Support"
+ depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
+ help
+ Support for booting and shutting down MSM Subsystem processors.
+ This driver also monitors the SMSM status bits and the watchdog
+ interrupt for the subsystem and restarts it on a watchdog bite
+ or a fatal error. Subsystems include LPASS, Venus, VPU, WCNSS and
+ BCSS.
+
+config MSM_PIL_MSS_QDSP6V5
+ tristate "MSS QDSP6v5 (Hexagon) Boot Support"
+ depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
+ help
+ Support for booting and shutting down QDSP6v5 (Hexagon) processors
+ in modem subsystems. If you would like to make or receive phone
+ calls then say Y here.
+
+ If unsure, say N.
+
+config MSM_PIL_FEMTO
+ tristate "FSM99XX Boot Support"
+ depends on MSM_PIL && ARCH_FSM9900
+ help
+ Support for loading and booting firmware images for multiple
+ modems on the FSM9900 family targets.
+ Select Y if you want the modems to boot.
+ If unsure, select N.
+
+config MSM_OCMEM
+ bool "MSM On-Chip memory driver (OCMEM)"
+ help
+ Enable support for On-Chip Memory available on certain MSM chipsets.
+ OCMEM is a low latency, high performance pool shared by subsystems.
+
+config MSM_OCMEM_LOCAL_POWER_CTRL
+ bool "OCMEM Local Power Control"
+ depends on MSM_OCMEM
+ help
+ Enable direct power management of the OCMEM core by the
+ OCMEM driver. By default power management is delegated to
+ the RPM. Selecting this option causes the OCMEM driver to
+ directly handle the various macro power transitions.
+
+config MSM_OCMEM_DEBUG
+ bool "OCMEM Debug Support"
+ depends on MSM_OCMEM
+ help
+ Enable debug options for On-chip Memory (OCMEM) driver.
+ Various debug options include memory, power and latency.
+ Choosing one of these options allows debugging of each
+ individual subsystem separately.
+
+config MSM_OCMEM_NONSECURE
+ bool "OCMEM Non Secure Mode"
+ depends on MSM_OCMEM_DEBUG
+ help
+ Disable OCMEM interaction with secure processor.
+ By default OCMEM is secured and accesses for each master
+ is requested by the OCMEM driver. Selecting this option
+ causes the OCMEM memory to be in non-secure state unless
+ its locked down by the secure processor.
+
+config MSM_OCMEM_POWER_DEBUG
+ bool "OCMEM Power Debug Support"
+ depends on MSM_OCMEM_DEBUG
+ help
+ Enable debug support for OCMEM power management.
+ This adds support for verifying all power management
+ related operations of OCMEM. Both local power management
+ and RPM assisted power management operations are supported.
+
+config MSM_OCMEM_DEBUG_ALWAYS_ON
+ bool "Keep OCMEM always turned ON"
+ depends on MSM_OCMEM_DEBUG
+ help
+ Always vote for all OCMEM clocks and keep all OCMEM
+ macros turned ON and never allow them to be turned OFF.
+ Both local power management and RPM assisted power modes
+ are supported for individual macro power control operations.
+
+config MSM_OCMEM_POWER_DISABLE
+ bool "OCMEM Disable Power Control"
+ depends on MSM_OCMEM
+ help
+ Disable all OCMEM power management.
+ Skip all OCMEM power operations that turn ON or
+ turn OFF the macros. Both local power management and
+ RPM assisted power management operations are skipped.
+ Enable this configuration if OCMEM is being exclusively
+ used as GMEM or OCIMEM.
+
+config MSM_SCM
+ bool "Secure Channel Manager (SCM) support"
+ default n
+
+config MAXIMUM_CURRENT_THROTTLING
+ tristate "CPU current throttling driver"
+ help
+ Say Y to enable maximum current throttling.
+ The maximum current throttling driver enables application to
+ turn on/off CPU based current throttling by setting the
+ credit/upper limit in the CPU registers.
+
+
+endif # ARCH_MSM
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 438901257ac1..e45e5b432597 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1 +1,54 @@
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
+# When adding new entries keep the list in alphabetical order
+CFLAGS_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
+
+obj-$(CONFIG_MSM_RUN_QUEUE_STATS) += msm_rq_stats.o
+#obj-$(CONFIG_DEBUG_FS) += nohlt.o
+obj-$(CONFIG_ARM64) += idle-v8.o cpu_ops.o
+obj-$(CONFIG_CPU_V7) += idle-v7.o
+obj-$(CONFIG_MAXIMUM_CURRENT_THROTTLING) += mct.o
+obj-$(CONFIG_MSM_BAM_DMUX) += bam_dmux.o
+obj-$(CONFIG_MSM_EVENT_TIMER) += event_timer.o
+obj-$(CONFIG_MSM_IPC_ROUTER_HSIC_XPRT) += ipc_router_hsic_xprt.o
+obj-$(CONFIG_MSM_IPC_ROUTER_SMD_XPRT) += ipc_router_smd_xprt.o
+obj-$(CONFIG_MSM_MEMORY_DUMP) += memory_dump.o
+obj-$(CONFIG_MSM_MEMORY_DUMP_V2) += memory_dump_v2.o
+obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd.o
+ifdef CONFIG_DEBUG_FS
+obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd-debug.o
+endif
+obj-$(CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG) += rpm_rbcpr_stats_v2.o
+obj-$(CONFIG_MSM_RPM_STATS_LOG) += rpm_stats.o rpm_master_stat.o
+obj-$(CONFIG_MSM_RPM_LOG) += rpm_log.o
+obj-$(CONFIG_MSM_JTAG) += jtag-fuse.o jtag.o
+obj-$(CONFIG_MSM_JTAG_MM) += jtag-fuse.o jtag-mm.o
+obj-$(CONFIG_MSM_JTAGV8) += jtag-fuse.o jtagv8.o jtagv8-mm.o
+obj-$(CONFIG_MSM_QMI_INTERFACE) += qmi_interface.o
+
+#obj-$(CONFIG_MSM_SCM) += scm.o scm-boot.o
+
+obj-$(CONFIG_MSM_SMD) += smd.o smd_debug.o smd_private.o smd_init_dt.o smsm_debug.o
+obj-$(CONFIG_MSM_SMEM) += smem.o smem_debug.o
+obj-$(CONFIG_MSM_SMEM_LOGGING) += smem_log.o
+obj-$(CONFIG_MSM_COMMON_LOG) += common_log.o
+obj-$(CONFIG_MSM_SMP2P) += smp2p.o
+obj-$(CONFIG_MSM_SMP2P_TEST) += smp2p_loopback.o smp2p_test.o smp2p_spinlock_test.o
+obj-$(CONFIG_MSM_WATCHDOG_V2) += watchdog_v2.o
+#obj-$(CONFIG_MEM_SHARE_QMI_SERVICE) += memshare/
+obj-$(CONFIG_MSM_SPM_V2) += spm-v2.o spm_devices.o
+obj-y += socinfo.o
+
+obj-$(CONFIG_MSM_PIL) += peripheral-loader.o
+obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
+obj-$(CONFIG_MSM_PIL_MSS_QDSP6V5) += pil-q6v5.o pil-msa.o pil-q6v5-mss.o
+obj-$(CONFIG_MSM_PIL_FEMTO) += pil-q6v5.o pil-msa.o pil-femto-modem.o
+
+obj-$(CONFIG_MSM_OCMEM) += ocmem.o ocmem_allocator.o ocmem_notifier.o
+obj-$(CONFIG_MSM_OCMEM) += ocmem_sched.o ocmem_api.o ocmem_rdm.o ocmem_core.o
+
+ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+ obj-y += subsystem_notif.o
+ obj-y += subsystem_restart.o
+ obj-y += ramdump.o
+endif
+obj-$(CONFIG_MSM_SYSMON_COMM) += sysmon.o
diff --git a/drivers/soc/qcom/common_log.c b/drivers/soc/qcom/common_log.c
new file mode 100644
index 000000000000..1034095fc379
--- /dev/null
+++ b/drivers/soc/qcom/common_log.c
@@ -0,0 +1,83 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/kallsyms.h>
+#include <linux/slab.h>
+#include <soc/qcom/memory_dump.h>
+
+static void __init common_log_register_log_buf(void)
+{
+ char **log_bufp;
+ uint32_t *log_buf_lenp;
+ uint32_t *fist_idxp;
+ struct msm_client_dump dump_log_buf, dump_first_idx;
+ struct msm_dump_entry entry_log_buf, entry_first_idx;
+ struct msm_dump_data *dump_data;
+
+ log_bufp = (char **)kallsyms_lookup_name("log_buf");
+ log_buf_lenp = (uint32_t *)kallsyms_lookup_name("log_buf_len");
+ if (!log_bufp || !log_buf_lenp) {
+ pr_err("Unable to find log_buf by kallsyms!\n");
+ return;
+ }
+ fist_idxp = (uint32_t *)kallsyms_lookup_name("log_first_idx");
+ if (MSM_DUMP_MAJOR(msm_dump_table_version()) == 1) {
+ dump_log_buf.id = MSM_LOG_BUF;
+ dump_log_buf.start_addr = virt_to_phys(*log_bufp);
+ dump_log_buf.end_addr = virt_to_phys(*log_bufp + *log_buf_lenp);
+ if (msm_dump_tbl_register(&dump_log_buf))
+ pr_err("Unable to register %d.\n", dump_log_buf.id);
+ dump_first_idx.id = MSM_LOG_BUF_FIRST_IDX;
+ if (fist_idxp) {
+ dump_first_idx.start_addr = virt_to_phys(fist_idxp);
+ if (msm_dump_tbl_register(&dump_first_idx))
+ pr_err("Unable to register %d.\n", dump_first_idx.id);
+ }
+ } else {
+ dump_data = kzalloc(sizeof(struct msm_dump_data), GFP_KERNEL);
+ if (!dump_data) {
+ pr_err("Unable to alloc data space.\n");
+ return;
+ }
+ dump_data->len = *log_buf_lenp;
+ dump_data->addr = virt_to_phys(*log_bufp);
+ entry_log_buf.id = MSM_DUMP_DATA_LOG_BUF;
+ entry_log_buf.addr = virt_to_phys(dump_data);
+ if (msm_dump_data_register(MSM_DUMP_TABLE_APPS, &entry_log_buf)) {
+ kfree(dump_data);
+ pr_err("Unable to register %d.\n", entry_log_buf.id);
+ }
+ if (fist_idxp) {
+ dump_data = kzalloc(sizeof(struct msm_dump_data), GFP_KERNEL);
+ if (!dump_data) {
+ pr_err("Unable to alloc data space.\n");
+ return;
+ }
+ dump_data->addr = virt_to_phys(fist_idxp);
+ entry_first_idx.id = MSM_DUMP_DATA_LOG_BUF_FIRST_IDX;
+ entry_first_idx.addr = virt_to_phys(dump_data);
+ if (msm_dump_data_register(MSM_DUMP_TABLE_APPS, &entry_first_idx))
+ pr_err("Unable to register %d.\n", entry_first_idx.id);
+ }
+ }
+}
+
+static int __init msm_common_log_init(void)
+{
+ common_log_register_log_buf();
+ return 0;
+}
+late_initcall(msm_common_log_init);
diff --git a/drivers/soc/qcom/cpu_ops.c b/drivers/soc/qcom/cpu_ops.c
new file mode 100644
index 000000000000..b8673668c312
--- /dev/null
+++ b/drivers/soc/qcom/cpu_ops.c
@@ -0,0 +1,266 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* MSM ARMv8 CPU Operations
+ * Based on arch/arm64/kernel/smp_spin_table.c
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/smp.h>
+
+#include <soc/qcom/scm-boot.h>
+#include <soc/qcom/socinfo.h>
+
+#include <asm/barrier.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+#include <soc/qcom/pm.h>
+
+#define CPU_PWR_CTL_OFFSET 0x4
+#define CPU_PWR_GATE_CTL_OFFSET 0x14
+
+static DEFINE_RAW_SPINLOCK(boot_lock);
+
+static int cold_boot_flags[] = {
+ 0,
+ SCM_FLAG_COLDBOOT_CPU1,
+ SCM_FLAG_COLDBOOT_CPU2,
+ SCM_FLAG_COLDBOOT_CPU3,
+};
+
+DEFINE_PER_CPU(int, cold_boot_done);
+
+static void write_pen_release(u64 val)
+{
+ void *start = (void *)&secondary_holding_pen_release;
+ unsigned long size = sizeof(secondary_holding_pen_release);
+
+ secondary_holding_pen_release = val;
+ smp_wmb();
+ __flush_dcache_area(start, size);
+}
+
+static int secondary_pen_release(unsigned int cpu)
+{
+ unsigned long timeout;
+
+ /*
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ raw_spin_lock(&boot_lock);
+ write_pen_release(cpu_logical_map(cpu));
+
+ /*
+ * Wake-up cpu with am IPI
+ */
+ arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ if (secondary_holding_pen_release == INVALID_HWID)
+ break;
+ udelay(10);
+ }
+ raw_spin_unlock(&boot_lock);
+
+ return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+}
+
+static int unclamp_secondary_sim(unsigned int cpu)
+{
+ int ret = 0;
+ struct device_node *cpu_node, *acc_node;
+ void __iomem *reg;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (!cpu_node) {
+ ret = -ENODEV;
+ goto out_acc;
+ }
+
+ acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0);
+ if (!acc_node) {
+ ret = -ENODEV;
+ goto out_acc;
+ }
+
+ reg = of_iomap(acc_node, 0);
+ if (!reg) {
+ ret = -ENOMEM;
+ goto out_acc;
+ }
+
+ writel_relaxed(0x800, reg + CPU_PWR_CTL_OFFSET);
+ writel_relaxed(0x3FFF, reg + CPU_PWR_GATE_CTL_OFFSET);
+ mb();
+ iounmap(reg);
+
+out_acc:
+ of_node_put(cpu_node);
+
+ return ret;
+}
+
+static int unclamp_secondary_cpu(unsigned int cpu)
+{
+
+ int ret = 0;
+ struct device_node *cpu_node, *acc_node;
+ void __iomem *reg;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (!cpu_node)
+ return -ENODEV;
+
+ acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0);
+ if (!acc_node) {
+ ret = -ENODEV;
+ goto out_acc;
+ }
+
+ reg = of_iomap(acc_node, 0);
+ if (!reg) {
+ ret = -ENOMEM;
+ goto out_acc;
+ }
+
+ /* Assert Reset on cpu-n */
+ writel_relaxed(0x00000033, reg + CPU_PWR_CTL_OFFSET);
+ mb();
+
+ /*Program skew to 16 X0 clock cycles*/
+ writel_relaxed(0x10000001, reg + CPU_PWR_GATE_CTL_OFFSET);
+ mb();
+ udelay(2);
+
+ /* De-assert coremem clamp */
+ writel_relaxed(0x00000031, reg + CPU_PWR_CTL_OFFSET);
+ mb();
+
+ /* Close coremem array gdhs */
+ writel_relaxed(0x00000039, reg + CPU_PWR_CTL_OFFSET);
+ mb();
+ udelay(2);
+
+ /* De-assert cpu-n clamp */
+ writel_relaxed(0x00020038, reg + CPU_PWR_CTL_OFFSET);
+ mb();
+ udelay(2);
+
+ /* De-assert cpu-n reset */
+ writel_relaxed(0x00020008, reg + CPU_PWR_CTL_OFFSET);
+ mb();
+
+ /* Assert PWRDUP signal on core-n */
+ writel_relaxed(0x00020088, reg + CPU_PWR_CTL_OFFSET);
+ mb();
+
+ /* Secondary CPU-N is now alive */
+ iounmap(reg);
+out_acc:
+ of_node_put(cpu_node);
+
+ return ret;
+}
+
+static int __init msm_cpu_init(struct device_node *dn, unsigned int cpu)
+{
+ /*Nothing to do here but needed to keep framework happy */
+ return 0;
+}
+
+static int __init msm_cpu_prepare(unsigned int cpu)
+{
+
+ if (scm_set_boot_addr(virt_to_phys(secondary_holding_pen),
+ cold_boot_flags[cpu])) {
+ pr_warn("Failed to set CPU %u boot address\n", cpu);
+ return -ENOSYS;
+ }
+ return 0;
+}
+
+static int msm_cpu_boot(unsigned int cpu)
+{
+ int ret = 0;
+
+ if (per_cpu(cold_boot_done, cpu) == false) {
+ if (of_board_is_sim()) {
+ ret = unclamp_secondary_sim(cpu);
+ if (ret)
+ return ret;
+ } else {
+ ret = unclamp_secondary_cpu(cpu);
+ if (ret)
+ return ret;
+ }
+ per_cpu(cold_boot_done, cpu) = true;
+ }
+ return secondary_pen_release(cpu);
+}
+
+void msm_cpu_postboot(void)
+{
+ /*
+ * Let the primary processor know we're out of the pen.
+ */
+ write_pen_release(INVALID_HWID);
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void msm_wfi_cpu_die(unsigned int cpu)
+{
+ if (unlikely(cpu != smp_processor_id())) {
+ pr_crit("%s: running on %u, should be %u\n",
+ __func__, smp_processor_id(), cpu);
+ BUG();
+ }
+ for (;;) {
+ lpm_cpu_hotplug_enter(cpu);
+ if (secondary_holding_pen_release == cpu_logical_map(cpu)) {
+ /*Proper wake up */
+ break;
+ }
+ pr_debug("CPU%u: spurious wakeup call\n", cpu);
+ BUG();
+ }
+}
+#endif
+
+static const struct cpu_operations msm_cortex_a_ops = {
+ .name = "qcom,arm-cortex-acc",
+ .cpu_init = msm_cpu_init,
+ .cpu_prepare = msm_cpu_prepare,
+ .cpu_boot = msm_cpu_boot,
+ .cpu_postboot = msm_cpu_postboot,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_die = msm_wfi_cpu_die,
+#endif
+ .cpu_suspend = msm_pm_collapse,
+};
+CPU_METHOD_OF_DECLARE(msm_cortex_a_ops, &msm_cortex_a_ops);
diff --git a/drivers/soc/qcom/event_timer.c b/drivers/soc/qcom/event_timer.c
new file mode 100644
index 000000000000..dd6b1fe2425a
--- /dev/null
+++ b/drivers/soc/qcom/event_timer.c
@@ -0,0 +1,322 @@
+/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <soc/qcom/event_timer.h>
+
+#define __INIT_HEAD(x) { .head = RB_ROOT,\
+ .next = NULL, }
+
+#define DEFINE_TIME_HEAD(x) struct timerqueue_head x = __INIT_HEAD(x)
+
+/**
+ * struct event_timer_info - basic event timer structure
+ * @node: timerqueue node to track time ordered data structure
+ * of event timers
+ * @timer: hrtimer created for this event.
+ * @function : callback function for event timer.
+ * @data : callback data for event timer.
+ */
+struct event_timer_info {
+ struct timerqueue_node node;
+ void (*function)(void *);
+ void *data;
+};
+
+static DEFINE_TIME_HEAD(timer_head);
+static DEFINE_SPINLOCK(event_timer_lock);
+static DEFINE_SPINLOCK(event_setup_lock);
+static struct hrtimer event_hrtimer;
+static enum hrtimer_restart event_hrtimer_cb(struct hrtimer *hrtimer);
+
+static int msm_event_debug_mask;
+module_param_named(
+ debug_mask, msm_event_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+enum {
+ MSM_EVENT_TIMER_DEBUG = 1U << 0,
+};
+
+
+/**
+ * add_event_timer() : Add a wakeup event. Intended to be called
+ * by clients once. Returns a handle to be used
+ * for future transactions.
+ * @function : The callback function will be called when event
+ * timer expires.
+ * @data: callback data provided by client.
+ */
+struct event_timer_info *add_event_timer(void (*function)(void *), void *data)
+{
+ struct event_timer_info *event_info =
+ kzalloc(sizeof(struct event_timer_info), GFP_KERNEL);
+
+ if (!event_info)
+ return NULL;
+
+ event_info->function = function;
+ event_info->data = data;
+ /* Init rb node and hr timer */
+ timerqueue_init(&event_info->node);
+ pr_debug("%s: New Event Added. Event %p.",
+ __func__,
+ event_info);
+
+ return event_info;
+}
+
+/**
+ * is_event_next(): Helper function to check if the event is the next
+ * next expiring event
+ * @event : handle to the event to be checked.
+ */
+static bool is_event_next(struct event_timer_info *event)
+{
+ struct event_timer_info *next_event;
+ struct timerqueue_node *next;
+ bool ret = false;
+
+ next = timerqueue_getnext(&timer_head);
+ if (!next)
+ goto exit_is_next_event;
+
+ next_event = container_of(next, struct event_timer_info, node);
+ if (!next_event)
+ goto exit_is_next_event;
+
+ if (next_event == event)
+ ret = true;
+
+exit_is_next_event:
+ return ret;
+}
+
+/**
+ * is_event_active(): Helper function to check if the timer for a given event
+ * has been started.
+ * @event : handle to the event to be checked.
+ */
+static bool is_event_active(struct event_timer_info *event)
+{
+ struct timerqueue_node *next;
+ struct event_timer_info *next_event;
+ bool ret = false;
+
+ for (next = timerqueue_getnext(&timer_head); next;
+ next = timerqueue_iterate_next(next)) {
+ next_event = container_of(next, struct event_timer_info, node);
+
+ if (event == next_event) {
+ ret = true;
+ break;
+ }
+ }
+ return ret;
+}
+
+/**
+ * create_httimer(): Helper function to setup hrtimer.
+ */
+static void create_hrtimer(ktime_t expires)
+{
+ static bool timer_initialized;
+
+ if (!timer_initialized) {
+ hrtimer_init(&event_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ timer_initialized = true;
+ }
+
+ event_hrtimer.function = event_hrtimer_cb;
+ hrtimer_start(&event_hrtimer, expires, HRTIMER_MODE_ABS);
+}
+
+/**
+ * event_hrtimer_cb() : Callback function for hr timer.
+ * Make the client CB from here and remove the event
+ * from the time ordered queue.
+ */
+static enum hrtimer_restart event_hrtimer_cb(struct hrtimer *hrtimer)
+{
+ struct event_timer_info *event;
+ struct timerqueue_node *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&event_timer_lock, flags);
+ next = timerqueue_getnext(&timer_head);
+
+ while (next && (ktime_to_ns(next->expires)
+ <= ktime_to_ns(hrtimer->node.expires))) {
+ if (!next)
+ goto hrtimer_cb_exit;
+
+ event = container_of(next, struct event_timer_info, node);
+ if (!event)
+ goto hrtimer_cb_exit;
+
+ if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+ pr_info("%s: Deleting event %p @ %lu", __func__,
+ event,
+ (unsigned long)ktime_to_ns(next->expires));
+
+ timerqueue_del(&timer_head, &event->node);
+
+ if (event->function)
+ event->function(event->data);
+ next = timerqueue_getnext(&timer_head);
+ }
+
+ if (next)
+ create_hrtimer(next->expires);
+
+ spin_unlock_irqrestore(&event_timer_lock, flags);
+hrtimer_cb_exit:
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * create_timer_smp(): Helper function used setting up timer on core 0.
+ */
+static void create_timer_smp(void *data)
+{
+ unsigned long flags;
+ struct event_timer_info *event =
+ (struct event_timer_info *)data;
+ struct timerqueue_node *next;
+
+ spin_lock_irqsave(&event_timer_lock, flags);
+ if (is_event_active(event))
+ timerqueue_del(&timer_head, &event->node);
+
+ next = timerqueue_getnext(&timer_head);
+ timerqueue_add(&timer_head, &event->node);
+ if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+ pr_info("%s: Adding Event %p for %lu", __func__,
+ event,
+ (unsigned long)ktime_to_ns(event->node.expires));
+
+ if (!next ||
+ (next && (ktime_to_ns(event->node.expires) <
+ ktime_to_ns(next->expires)))) {
+ if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+ pr_info("%s: Setting timer for %lu", __func__,
+ (unsigned long)ktime_to_ns(event->node.expires));
+ create_hrtimer(event->node.expires);
+ }
+ spin_unlock_irqrestore(&event_timer_lock, flags);
+}
+
+/**
+ * setup_timer() : Helper function to setup timer on primary
+ * core during hrtimer callback.
+ * @event: event handle causing the wakeup.
+ */
+static void setup_event_hrtimer(struct event_timer_info *event)
+{
+ smp_call_function_single(0, create_timer_smp, event, 1);
+}
+
+/**
+ * activate_event_timer() : Set the expiration time for an event in absolute
+ * ktime. This is a oneshot event timer, clients
+ * should call this again to set another expiration.
+ * @event : event handle.
+ * @event_time : event time in absolute ktime.
+ */
+void activate_event_timer(struct event_timer_info *event, ktime_t event_time)
+{
+ if (!event)
+ return;
+
+ if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+ pr_info("%s: Adding event timer @ %lu", __func__,
+ (unsigned long)ktime_to_us(event_time));
+
+ spin_lock(&event_setup_lock);
+ event->node.expires = event_time;
+ /* Start hr timer and add event to rb tree */
+ setup_event_hrtimer(event);
+ spin_unlock(&event_setup_lock);
+}
+
+
+/**
+ * deactivate_event_timer() : Deactivate an event timer, this removes the event from
+ * the time ordered queue of event timers.
+ * @event: event handle.
+ */
+void deactivate_event_timer(struct event_timer_info *event)
+{
+ unsigned long flags;
+
+ if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+ pr_info("%s: Deactivate timer", __func__);
+
+ spin_lock_irqsave(&event_timer_lock, flags);
+ if (is_event_active(event)) {
+ if (is_event_next(event))
+ hrtimer_try_to_cancel(&event_hrtimer);
+
+ timerqueue_del(&timer_head, &event->node);
+ }
+ spin_unlock_irqrestore(&event_timer_lock, flags);
+}
+
+/**
+ * destroy_event_timer() : Free the event info data structure allocated during
+ * add_event_timer().
+ * @event: event handle.
+ */
+void destroy_event_timer(struct event_timer_info *event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&event_timer_lock, flags);
+ if (is_event_active(event)) {
+ if (is_event_next(event))
+ hrtimer_try_to_cancel(&event_hrtimer);
+
+ timerqueue_del(&timer_head, &event->node);
+ }
+ spin_unlock_irqrestore(&event_timer_lock, flags);
+ kfree(event);
+}
+
+/**
+ * get_next_event_timer() - Get the next wakeup event. Returns
+ * a ktime value of the next expiring event.
+ */
+ktime_t get_next_event_time(void)
+{
+ unsigned long flags;
+ struct timerqueue_node *next;
+ ktime_t next_event = ns_to_ktime(0);
+
+ spin_lock_irqsave(&event_timer_lock, flags);
+ next = timerqueue_getnext(&timer_head);
+ spin_unlock_irqrestore(&event_timer_lock, flags);
+
+ if (!next)
+ return next_event;
+
+ next_event = hrtimer_get_remaining(&event_hrtimer);
+ if (msm_event_debug_mask && MSM_EVENT_TIMER_DEBUG)
+ pr_info("%s: Next Event %lu", __func__,
+ (unsigned long)ktime_to_us(next_event));
+
+ return next_event;
+}
diff --git a/drivers/soc/qcom/idle-v7.S b/drivers/soc/qcom/idle-v7.S
new file mode 100644
index 000000000000..575f5a933582
--- /dev/null
+++ b/drivers/soc/qcom/idle-v7.S
@@ -0,0 +1,64 @@
+/*
+ * Idle processing for ARMv7-based Qualcomm SoCs.
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2009, 2011-2014 The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <linux/threads.h>
+#include <asm/assembler.h>
+
+ .arm
+ENTRY(msm_pm_boot_entry)
+THUMB( adr r9, BSYM(2f) ) /* Kernel is always entered in ARM. */
+THUMB( bx r9 ) /* If this is a Thumb-2 kernel, */
+THUMB( .thumb ) /* switch to Thumb now. */
+THUMB(2: )
+ mrc p15, 0, r0, c0, c0, 5 /* MPIDR */
+ bic r0, #0xff000000 /* what CPU am I */
+
+ adr r3, 3f
+ ldr r1, [r3]
+ sub r3, r1, r3
+ ldr r1, =msm_pc_debug_counters_phys /*phys addr for IMEM reg */
+ sub r1, r1, r3 /* translate virt to phys */
+ ldr r1,[r1]
+
+ cmp r1, #0
+ beq skip_pc_debug3
+ add r1, r1, r0, LSL #4 /* debug location for this CPU */
+ add r1, #4 /* warmboot entry counter*/
+ ldr r2, [r1]
+ add r2, #1
+ str r2, [r1]
+
+skip_pc_debug3:
+ ldr r1, =msm_pm_boot_vector
+ sub r1, r1, r3 /* translate virt to phys */
+
+ add r1, r1, r0, LSL #2 /* locate boot vector for our cpu */
+ ldr pc, [r1] /* jump */
+ENDPROC(msm_pm_boot_entry)
+
+3: .long .
+
+ .data
+
+ .globl msm_pm_boot_vector
+msm_pm_boot_vector:
+ .space 4 * NR_CPUS
+
+ .globl msm_pc_debug_counters_phys
+msm_pc_debug_counters_phys:
+ .long 0x0
diff --git a/drivers/soc/qcom/idle-v8.S b/drivers/soc/qcom/idle-v8.S
new file mode 100644
index 000000000000..12f9c4d9ceec
--- /dev/null
+++ b/drivers/soc/qcom/idle-v8.S
@@ -0,0 +1,59 @@
+/*
+ * Idle processing for ARMv8-based Qualcomm SoCs.
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2009, 2011-2014 The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <linux/threads.h>
+#include <asm/assembler.h>
+
+ENTRY(msm_pm_boot_entry)
+ mrs x0, mpidr_el1
+ and x0, x0, #15 /* what CPU am I */
+
+ adr x3, align
+ ldr x1, [x3]
+ sub x3, x1, x3
+ ldr x1, =msm_pc_debug_counters_phys /*phys addr for IMEM reg */
+ sub x1, x1, x3 /* translate virt to phys */
+ ldr x1,[x1]
+
+ cmp x1, #0
+ beq skip_pc_debug3
+ add x1, x1, x0, LSL #4 /* debug location for this CPU */
+ add x1, x1, #4 /* warmboot entry counter*/
+ ldr x2, [x1]
+ add x2, x2, #1
+ str x2, [x1]
+
+skip_pc_debug3:
+ ldr x1, =msm_pm_boot_vector
+ sub x1, x1, x3 /* translate virt to phys */
+
+ add x1, x1, x0, LSL #3 /* locate boot vector for our cpu */
+ ldr x1, [x1]
+ ret x1 /* jump */
+ENDPROC(msm_pm_boot_entry)
+
+ __ALIGN
+align: .quad .
+ .data
+ .globl msm_pm_boot_vector
+msm_pm_boot_vector:
+ .space 8 * NR_CPUS
+
+ .globl msm_pc_debug_counters_phys
+msm_pc_debug_counters_phys:
+ .long 0x0
diff --git a/drivers/soc/qcom/ipc_router_hsic_xprt.c b/drivers/soc/qcom/ipc_router_hsic_xprt.c
new file mode 100644
index 000000000000..7f07f1970a1d
--- /dev/null
+++ b/drivers/soc/qcom/ipc_router_hsic_xprt.c
@@ -0,0 +1,720 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * IPC ROUTER HSIC XPRT module.
+ */
+#define DEBUG
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/ipc_router_xprt.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include <mach/ipc_bridge.h>
+
+static int msm_ipc_router_hsic_xprt_debug_mask;
+module_param_named(debug_mask, msm_ipc_router_hsic_xprt_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#if defined(DEBUG)
+#define D(x...) do { \
+if (msm_ipc_router_hsic_xprt_debug_mask) \
+ pr_info(x); \
+} while (0)
+#else
+#define D(x...) do { } while (0)
+#endif
+
+#define NUM_HSIC_XPRTS 1
+#define XPRT_NAME_LEN 32
+
+/**
+ * msm_ipc_router_hsic_xprt - IPC Router's HSIC XPRT structure
+ * @list: IPC router's HSIC XPRTs list.
+ * @ch_name: Name of the HSIC endpoint exported by ipc_bridge driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @driver: Platform drivers register by this XPRT.
+ * @xprt: IPC Router XPRT structure to contain HSIC XPRT specific info.
+ * @pdev: Platform device registered by IPC Bridge function driver.
+ * @hsic_xprt_wq: Workqueue to queue read & other XPRT related works.
+ * @read_work: Read Work to perform read operation from HSIC's ipc_bridge.
+ * @in_pkt: Pointer to any partially read packet.
+ * @ss_reset_lock: Lock to protect access to the ss_reset flag.
+ * @ss_reset: flag used to check SSR state.
+ * @sft_close_complete: Variable to indicate completion of SSR handling
+ * by IPC Router.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ * @xprt_option: XPRT specific options to be handled by IPC Router.
+ */
+struct msm_ipc_router_hsic_xprt {
+ struct list_head list;
+ char ch_name[XPRT_NAME_LEN];
+ char xprt_name[XPRT_NAME_LEN];
+ struct platform_driver driver;
+ struct msm_ipc_router_xprt xprt;
+ struct platform_device *pdev;
+ struct workqueue_struct *hsic_xprt_wq;
+ struct delayed_work read_work;
+ struct rr_packet *in_pkt;
+ struct mutex ss_reset_lock;
+ int ss_reset;
+ struct completion sft_close_complete;
+ unsigned xprt_version;
+ unsigned xprt_option;
+};
+
+struct msm_ipc_router_hsic_xprt_work {
+ struct msm_ipc_router_xprt *xprt;
+ struct work_struct work;
+};
+
+static void hsic_xprt_read_data(struct work_struct *work);
+
+/**
+ * msm_ipc_router_hsic_xprt_config - Config. Info. of each HSIC XPRT
+ * @ch_name: Name of the HSIC endpoint exported by ipc_bridge driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @hsic_pdev_id: ID to differentiate among multiple ipc_bridge endpoints.
+ * @link_id: Network Cluster ID to which this XPRT belongs to.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ */
+struct msm_ipc_router_hsic_xprt_config {
+ char ch_name[XPRT_NAME_LEN];
+ char xprt_name[XPRT_NAME_LEN];
+ int hsic_pdev_id;
+ uint32_t link_id;
+ unsigned xprt_version;
+};
+
+struct msm_ipc_router_hsic_xprt_config hsic_xprt_cfg[] = {
+ {"ipc_bridge", "ipc_rtr_ipc_bridge1", 1, 1, 3},
+};
+
+#define MODULE_NAME "ipc_router_hsic_xprt"
+#define IPC_ROUTER_HSIC_XPRT_WAIT_TIMEOUT 3000
+static int ipc_router_hsic_xprt_probe_done;
+static struct delayed_work ipc_router_hsic_xprt_probe_work;
+static DEFINE_MUTEX(hsic_remote_xprt_list_lock_lha1);
+static LIST_HEAD(hsic_remote_xprt_list);
+
+/**
+ * find_hsic_xprt_list() - Find xprt item specific to an HSIC endpoint
+ * @name: Name of the platform device to find in list
+ *
+ * @return: pointer to msm_ipc_router_hsic_xprt if matching endpoint is found,
+ * else NULL.
+ *
+ * This function is used to find specific xprt item from the global xprt list
+ */
+static struct msm_ipc_router_hsic_xprt *
+ find_hsic_xprt_list(const char *name)
+{
+ struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+
+ mutex_lock(&hsic_remote_xprt_list_lock_lha1);
+ list_for_each_entry(hsic_xprtp, &hsic_remote_xprt_list, list) {
+ if (!strcmp(name, hsic_xprtp->ch_name)) {
+ mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
+ return hsic_xprtp;
+ }
+ }
+ mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
+ return NULL;
+}
+
+/**
+ * msm_ipc_router_hsic_get_xprt_version() - Get IPC Router header version
+ * supported by the XPRT
+ * @xprt: XPRT for which the version information is required.
+ *
+ * @return: IPC Router header version supported by the XPRT.
+ */
+static int msm_ipc_router_hsic_get_xprt_version(
+ struct msm_ipc_router_xprt *xprt)
+{
+ struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+ if (!xprt)
+ return -EINVAL;
+ hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
+
+ return (int)hsic_xprtp->xprt_version;
+}
+
+/**
+ * msm_ipc_router_hsic_get_xprt_option() - Get XPRT options
+ * @xprt: XPRT for which the option information is required.
+ *
+ * @return: Options supported by the XPRT.
+ */
+static int msm_ipc_router_hsic_get_xprt_option(
+ struct msm_ipc_router_xprt *xprt)
+{
+ struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+ if (!xprt)
+ return -EINVAL;
+ hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
+
+ return (int)hsic_xprtp->xprt_option;
+}
+
+/**
+ * msm_ipc_router_hsic_remote_write_avail() - Get available write space
+ * @xprt: XPRT for which the available write space info. is required.
+ *
+ * @return: Write space in bytes on success, 0 on SSR.
+ */
+static int msm_ipc_router_hsic_remote_write_avail(
+ struct msm_ipc_router_xprt *xprt)
+{
+ struct ipc_bridge_platform_data *pdata;
+ int write_avail;
+ struct msm_ipc_router_hsic_xprt *hsic_xprtp =
+ container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
+
+ mutex_lock(&hsic_xprtp->ss_reset_lock);
+ if (hsic_xprtp->ss_reset || !hsic_xprtp->pdev) {
+ write_avail = 0;
+ } else {
+ pdata = hsic_xprtp->pdev->dev.platform_data;
+ write_avail = pdata->max_write_size;
+ }
+ mutex_unlock(&hsic_xprtp->ss_reset_lock);
+ return write_avail;
+}
+
+/**
+ * msm_ipc_router_hsic_remote_write() - Write to XPRT
+ * @data: Data to be written to the XPRT.
+ * @len: Length of the data to be written.
+ * @xprt: XPRT to which the data has to be written.
+ *
+ * @return: Data Length on success, standard Linux error codes on failure.
+ */
+static int msm_ipc_router_hsic_remote_write(void *data,
+ uint32_t len, struct msm_ipc_router_xprt *xprt)
+{
+ struct rr_packet *pkt = (struct rr_packet *)data;
+ struct sk_buff *skb;
+ struct ipc_bridge_platform_data *pdata;
+ struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+ int ret;
+
+ if (!pkt || pkt->length != len || !xprt) {
+ pr_err("%s: Invalid input parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
+ mutex_lock(&hsic_xprtp->ss_reset_lock);
+ if (hsic_xprtp->ss_reset) {
+ pr_err("%s: Trying to write on a reset link\n", __func__);
+ mutex_unlock(&hsic_xprtp->ss_reset_lock);
+ return -ENETRESET;
+ }
+
+ if (!hsic_xprtp->pdev) {
+ pr_err("%s: Trying to write on a closed link\n", __func__);
+ mutex_unlock(&hsic_xprtp->ss_reset_lock);
+ return -ENODEV;
+ }
+
+ pdata = hsic_xprtp->pdev->dev.platform_data;
+ if (!pdata || !pdata->write) {
+ pr_err("%s on a uninitialized link\n", __func__);
+ mutex_unlock(&hsic_xprtp->ss_reset_lock);
+ return -EFAULT;
+ }
+
+ skb = skb_peek(pkt->pkt_fragment_q);
+ if (!skb) {
+ pr_err("%s SKB is NULL\n", __func__);
+ mutex_unlock(&hsic_xprtp->ss_reset_lock);
+ return -EINVAL;
+ }
+ D("%s: About to write %d bytes\n", __func__, len);
+ ret = pdata->write(hsic_xprtp->pdev, skb->data, skb->len);
+ if (ret == skb->len)
+ ret = len;
+ D("%s: Finished writing %d bytes\n", __func__, len);
+ mutex_unlock(&hsic_xprtp->ss_reset_lock);
+ return ret;
+}
+
+/**
+ * msm_ipc_router_hsic_remote_close() - Close the XPRT
+ * @xprt: XPRT which needs to be closed.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int msm_ipc_router_hsic_remote_close(
+ struct msm_ipc_router_xprt *xprt)
+{
+ struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+ struct ipc_bridge_platform_data *pdata;
+
+ if (!xprt)
+ return -EINVAL;
+ hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
+
+ mutex_lock(&hsic_xprtp->ss_reset_lock);
+ hsic_xprtp->ss_reset = 1;
+ mutex_unlock(&hsic_xprtp->ss_reset_lock);
+ flush_workqueue(hsic_xprtp->hsic_xprt_wq);
+ destroy_workqueue(hsic_xprtp->hsic_xprt_wq);
+ pdata = hsic_xprtp->pdev->dev.platform_data;
+ if (pdata && pdata->close)
+ pdata->close(hsic_xprtp->pdev);
+ hsic_xprtp->pdev = NULL;
+ return 0;
+}
+
+/**
+ * hsic_xprt_read_data() - Read work to read from the XPRT
+ * @work: Read work to be executed.
+ *
+ * This function is a read work item queued on a XPRT specific workqueue.
+ * The work parameter contains information regarding the XPRT on which this
+ * read work has to be performed. The work item keeps reading from the HSIC
+ * endpoint, until the endpoint returns an error.
+ */
+static void hsic_xprt_read_data(struct work_struct *work)
+{
+ int pkt_size;
+ struct sk_buff *skb = NULL;
+ void *data;
+ struct ipc_bridge_platform_data *pdata;
+ struct delayed_work *rwork = to_delayed_work(work);
+ struct msm_ipc_router_hsic_xprt *hsic_xprtp =
+ container_of(rwork, struct msm_ipc_router_hsic_xprt, read_work);
+
+ while (1) {
+ mutex_lock(&hsic_xprtp->ss_reset_lock);
+ if (hsic_xprtp->ss_reset) {
+ mutex_unlock(&hsic_xprtp->ss_reset_lock);
+ break;
+ }
+ pdata = hsic_xprtp->pdev->dev.platform_data;
+ mutex_unlock(&hsic_xprtp->ss_reset_lock);
+ while (!hsic_xprtp->in_pkt) {
+ hsic_xprtp->in_pkt = kzalloc(sizeof(struct rr_packet),
+ GFP_KERNEL);
+ if (hsic_xprtp->in_pkt)
+ break;
+ pr_err("%s: packet allocation failure\n", __func__);
+ msleep(100);
+ }
+ while (!hsic_xprtp->in_pkt->pkt_fragment_q) {
+ hsic_xprtp->in_pkt->pkt_fragment_q =
+ kmalloc(sizeof(struct sk_buff_head),
+ GFP_KERNEL);
+ if (hsic_xprtp->in_pkt->pkt_fragment_q)
+ break;
+ pr_err("%s: Couldn't alloc pkt_fragment_q\n",
+ __func__);
+ msleep(100);
+ }
+ skb_queue_head_init(hsic_xprtp->in_pkt->pkt_fragment_q);
+ D("%s: Allocated rr_packet\n", __func__);
+
+ while (!skb) {
+ skb = alloc_skb(pdata->max_read_size, GFP_KERNEL);
+ if (skb)
+ break;
+ pr_err("%s: Couldn't alloc SKB\n", __func__);
+ msleep(100);
+ }
+ data = skb_put(skb, pdata->max_read_size);
+ pkt_size = pdata->read(hsic_xprtp->pdev, data,
+ pdata->max_read_size);
+ if (pkt_size < 0) {
+ pr_err("%s: Error %d @ read operation\n",
+ __func__, pkt_size);
+ kfree_skb(skb);
+ kfree(hsic_xprtp->in_pkt->pkt_fragment_q);
+ kfree(hsic_xprtp->in_pkt);
+ break;
+ }
+ skb_queue_tail(hsic_xprtp->in_pkt->pkt_fragment_q, skb);
+ hsic_xprtp->in_pkt->length = pkt_size;
+ D("%s: Packet size read %d\n", __func__, pkt_size);
+ msm_ipc_router_xprt_notify(&hsic_xprtp->xprt,
+ IPC_ROUTER_XPRT_EVENT_DATA, (void *)hsic_xprtp->in_pkt);
+ release_pkt(hsic_xprtp->in_pkt);
+ hsic_xprtp->in_pkt = NULL;
+ skb = NULL;
+ }
+}
+
+/**
+ * hsic_xprt_sft_close_done() - Completion of XPRT reset
+ * @xprt: XPRT on which the reset operation is complete.
+ *
+ * This function is used by IPC Router to signal this HSIC XPRT Abstraction
+ * Layer(XAL) that the reset of XPRT is completely handled by IPC Router.
+ */
+static void hsic_xprt_sft_close_done(struct msm_ipc_router_xprt *xprt)
+{
+ struct msm_ipc_router_hsic_xprt *hsic_xprtp =
+ container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
+
+ complete_all(&hsic_xprtp->sft_close_complete);
+}
+
+/**
+ * msm_ipc_router_hsic_remote_remove() - Remove an HSIC endpoint
+ * @pdev: Platform device corresponding to HSIC endpoint.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying ipc_bridge driver unregisters
+ * a platform device, mapped to an HSIC endpoint, during SSR.
+ */
+static int msm_ipc_router_hsic_remote_remove(struct platform_device *pdev)
+{
+ struct ipc_bridge_platform_data *pdata;
+ struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+
+ hsic_xprtp = find_hsic_xprt_list(pdev->name);
+ if (!hsic_xprtp) {
+ pr_err("%s No device with name %s\n", __func__, pdev->name);
+ return -ENODEV;
+ }
+
+ mutex_lock(&hsic_xprtp->ss_reset_lock);
+ hsic_xprtp->ss_reset = 1;
+ mutex_unlock(&hsic_xprtp->ss_reset_lock);
+ flush_workqueue(hsic_xprtp->hsic_xprt_wq);
+ destroy_workqueue(hsic_xprtp->hsic_xprt_wq);
+ init_completion(&hsic_xprtp->sft_close_complete);
+ msm_ipc_router_xprt_notify(&hsic_xprtp->xprt,
+ IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
+ D("%s: Notified IPC Router of %s CLOSE\n",
+ __func__, hsic_xprtp->xprt.name);
+ wait_for_completion(&hsic_xprtp->sft_close_complete);
+ hsic_xprtp->pdev = NULL;
+ pdata = pdev->dev.platform_data;
+ if (pdata && pdata->close)
+ pdata->close(pdev);
+ return 0;
+}
+
+/**
+ * msm_ipc_router_hsic_remote_probe() - Probe an HSIC endpoint
+ * @pdev: Platform device corresponding to HSIC endpoint.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying ipc_bridge driver registers
+ * a platform device, mapped to an HSIC endpoint.
+ */
+static int msm_ipc_router_hsic_remote_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct ipc_bridge_platform_data *pdata;
+ struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata || !pdata->open || !pdata->read ||
+ !pdata->write || !pdata->close) {
+ pr_err("%s: pdata or pdata->operations is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ hsic_xprtp = find_hsic_xprt_list(pdev->name);
+ if (!hsic_xprtp) {
+ pr_err("%s No device with name %s\n", __func__, pdev->name);
+ return -ENODEV;
+ }
+
+ hsic_xprtp->hsic_xprt_wq =
+ create_singlethread_workqueue(pdev->name);
+ if (!hsic_xprtp->hsic_xprt_wq) {
+ pr_err("%s: WQ creation failed for %s\n",
+ __func__, pdev->name);
+ return -EFAULT;
+ }
+
+ rc = pdata->open(pdev);
+ if (rc < 0) {
+ pr_err("%s: Channel open failed for %s.%d\n",
+ __func__, pdev->name, pdev->id);
+ destroy_workqueue(hsic_xprtp->hsic_xprt_wq);
+ return rc;
+ }
+ hsic_xprtp->pdev = pdev;
+ msm_ipc_router_xprt_notify(&hsic_xprtp->xprt,
+ IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
+ D("%s: Notified IPC Router of %s OPEN\n",
+ __func__, hsic_xprtp->xprt.name);
+ queue_delayed_work(hsic_xprtp->hsic_xprt_wq,
+ &hsic_xprtp->read_work, 0);
+ return 0;
+}
+
+/**
+ * msm_ipc_router_hsic_driver_register() - register HSIC XPRT drivers
+ *
+ * @hsic_xprtp: pointer to IPC router hsic xprt structure.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when a new XPRT is added to register platform
+ * drivers for new XPRT.
+ */
+static int msm_ipc_router_hsic_driver_register(
+ struct msm_ipc_router_hsic_xprt *hsic_xprtp)
+{
+ int ret;
+ struct msm_ipc_router_hsic_xprt *hsic_xprtp_item;
+
+ hsic_xprtp_item = find_hsic_xprt_list(hsic_xprtp->ch_name);
+
+ mutex_lock(&hsic_remote_xprt_list_lock_lha1);
+ list_add(&hsic_xprtp->list, &hsic_remote_xprt_list);
+ mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
+
+ if (!hsic_xprtp_item) {
+ hsic_xprtp->driver.driver.name = hsic_xprtp->ch_name;
+ hsic_xprtp->driver.driver.owner = THIS_MODULE;
+ hsic_xprtp->driver.probe = msm_ipc_router_hsic_remote_probe;
+ hsic_xprtp->driver.remove = msm_ipc_router_hsic_remote_remove;
+
+ ret = platform_driver_register(&hsic_xprtp->driver);
+ if (ret) {
+ pr_err("%s: Failed to register platform driver[%s]\n",
+ __func__, hsic_xprtp->ch_name);
+ return ret;
+ }
+ } else {
+ pr_err("%s Already driver registered %s\n",
+ __func__, hsic_xprtp->ch_name);
+ }
+
+ return 0;
+}
+
+/**
+ * msm_ipc_router_hsic_config_init() - init HSIC xprt configs
+ *
+ * @hsic_xprt_config: pointer to HSIC xprt configurations.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the HSIC XPRT pointer with
+ * the HSIC XPRT configurations either from device tree or static arrays.
+ */
+static int msm_ipc_router_hsic_config_init(
+ struct msm_ipc_router_hsic_xprt_config *hsic_xprt_config)
+{
+ struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+
+ hsic_xprtp = kzalloc(sizeof(struct msm_ipc_router_hsic_xprt),
+ GFP_KERNEL);
+ if (IS_ERR_OR_NULL(hsic_xprtp)) {
+ pr_err("%s: kzalloc() failed for hsic_xprtp id:%s\n",
+ __func__, hsic_xprt_config->ch_name);
+ return -ENOMEM;
+ }
+
+ hsic_xprtp->xprt.link_id = hsic_xprt_config->link_id;
+ hsic_xprtp->xprt_version = hsic_xprt_config->xprt_version;
+
+ strlcpy(hsic_xprtp->ch_name, hsic_xprt_config->ch_name,
+ XPRT_NAME_LEN);
+
+ strlcpy(hsic_xprtp->xprt_name, hsic_xprt_config->xprt_name,
+ XPRT_NAME_LEN);
+ hsic_xprtp->xprt.name = hsic_xprtp->xprt_name;
+
+ hsic_xprtp->xprt.get_version =
+ msm_ipc_router_hsic_get_xprt_version;
+ hsic_xprtp->xprt.get_option =
+ msm_ipc_router_hsic_get_xprt_option;
+ hsic_xprtp->xprt.read_avail = NULL;
+ hsic_xprtp->xprt.read = NULL;
+ hsic_xprtp->xprt.write_avail =
+ msm_ipc_router_hsic_remote_write_avail;
+ hsic_xprtp->xprt.write = msm_ipc_router_hsic_remote_write;
+ hsic_xprtp->xprt.close = msm_ipc_router_hsic_remote_close;
+ hsic_xprtp->xprt.sft_close_done = hsic_xprt_sft_close_done;
+ hsic_xprtp->xprt.priv = NULL;
+
+ hsic_xprtp->in_pkt = NULL;
+ INIT_DELAYED_WORK(&hsic_xprtp->read_work, hsic_xprt_read_data);
+ mutex_init(&hsic_xprtp->ss_reset_lock);
+ hsic_xprtp->ss_reset = 0;
+ hsic_xprtp->xprt_option = 0;
+
+ msm_ipc_router_hsic_driver_register(hsic_xprtp);
+ return 0;
+
+}
+
+/**
+ * parse_devicetree() - parse device tree binding
+ *
+ * @node: pointer to device tree node
+ * @hsic_xprt_config: pointer to HSIC XPRT configurations
+ *
+ * @return: 0 on success, -ENODEV on failure.
+ */
+static int parse_devicetree(struct device_node *node,
+ struct msm_ipc_router_hsic_xprt_config *hsic_xprt_config)
+{
+ int ret;
+ int link_id;
+ int version;
+ char *key;
+ const char *ch_name;
+ const char *remote_ss;
+
+ key = "qcom,ch-name";
+ ch_name = of_get_property(node, key, NULL);
+ if (!ch_name)
+ goto error;
+ strlcpy(hsic_xprt_config->ch_name, ch_name, XPRT_NAME_LEN);
+
+ key = "qcom,xprt-remote";
+ remote_ss = of_get_property(node, key, NULL);
+ if (!remote_ss)
+ goto error;
+
+ key = "qcom,xprt-linkid";
+ ret = of_property_read_u32(node, key, &link_id);
+ if (ret)
+ goto error;
+ hsic_xprt_config->link_id = link_id;
+
+ key = "qcom,xprt-version";
+ ret = of_property_read_u32(node, key, &version);
+ if (ret)
+ goto error;
+ hsic_xprt_config->xprt_version = version;
+
+ scnprintf(hsic_xprt_config->xprt_name, XPRT_NAME_LEN, "%s_%s",
+ remote_ss, hsic_xprt_config->ch_name);
+
+ return 0;
+
+error:
+ pr_err("%s: missing key: %s\n", __func__, key);
+ return -ENODEV;
+}
+
+/**
+ * msm_ipc_router_hsic_xprt_probe() - Probe an HSIC xprt
+ * @pdev: Platform device corresponding to HSIC xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to an HSIC transport.
+ */
+static int msm_ipc_router_hsic_xprt_probe(
+ struct platform_device *pdev)
+{
+ int ret;
+ struct msm_ipc_router_hsic_xprt_config hsic_xprt_config;
+
+ if (pdev && pdev->dev.of_node) {
+ mutex_lock(&hsic_remote_xprt_list_lock_lha1);
+ ipc_router_hsic_xprt_probe_done = 1;
+ mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
+
+ ret = parse_devicetree(pdev->dev.of_node,
+ &hsic_xprt_config);
+ if (ret) {
+ pr_err(" failed to parse device tree\n");
+ return ret;
+ }
+
+ ret = msm_ipc_router_hsic_config_init(
+ &hsic_xprt_config);
+ if (ret) {
+ pr_err(" %s init failed\n", __func__);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/**
+ * ipc_router_hsic_xprt_probe_worker() - probe worker for non DT configurations
+ *
+ * @work: work item to process
+ *
+ * This function is called by schedule_delay_work after 3sec and check if
+ * device tree probe is done or not. If device tree probe fails the default
+ * configurations read from static array.
+ */
+static void ipc_router_hsic_xprt_probe_worker(struct work_struct *work)
+{
+ int i, ret;
+
+ BUG_ON(ARRAY_SIZE(hsic_xprt_cfg) != NUM_HSIC_XPRTS);
+
+ mutex_lock(&hsic_remote_xprt_list_lock_lha1);
+ if (!ipc_router_hsic_xprt_probe_done) {
+ mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
+ for (i = 0; i < ARRAY_SIZE(hsic_xprt_cfg); i++) {
+ ret = msm_ipc_router_hsic_config_init(
+ &hsic_xprt_cfg[i]);
+ if (ret)
+ pr_err(" %s init failed config idx %d\n",
+ __func__, i);
+ }
+ mutex_lock(&hsic_remote_xprt_list_lock_lha1);
+ }
+ mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
+}
+
+static struct of_device_id msm_ipc_router_hsic_xprt_match_table[] = {
+ { .compatible = "qcom,ipc_router_hsic_xprt" },
+ {},
+};
+
+static struct platform_driver msm_ipc_router_hsic_xprt_driver = {
+ .probe = msm_ipc_router_hsic_xprt_probe,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ipc_router_hsic_xprt_match_table,
+ },
+};
+
+static int __init msm_ipc_router_hsic_xprt_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&msm_ipc_router_hsic_xprt_driver);
+ if (rc) {
+ pr_err("%s: msm_ipc_router_hsic_xprt_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ INIT_DELAYED_WORK(&ipc_router_hsic_xprt_probe_work,
+ ipc_router_hsic_xprt_probe_worker);
+ schedule_delayed_work(&ipc_router_hsic_xprt_probe_work,
+ msecs_to_jiffies(IPC_ROUTER_HSIC_XPRT_WAIT_TIMEOUT));
+ return 0;
+}
+
+module_init(msm_ipc_router_hsic_xprt_init);
+MODULE_DESCRIPTION("IPC Router HSIC XPRT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/ipc_router_smd_xprt.c b/drivers/soc/qcom/ipc_router_smd_xprt.c
new file mode 100644
index 000000000000..690682419435
--- /dev/null
+++ b/drivers/soc/qcom/ipc_router_smd_xprt.c
@@ -0,0 +1,923 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * IPC ROUTER SMD XPRT module.
+ */
+#define DEBUG
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/ipc_router_xprt.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <soc/qcom/smd.h>
+#include <soc/qcom/smsm.h>
+#include <soc/qcom/subsystem_restart.h>
+
+static int msm_ipc_router_smd_xprt_debug_mask;
+module_param_named(debug_mask, msm_ipc_router_smd_xprt_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#if defined(DEBUG)
+#define D(x...) do { \
+if (msm_ipc_router_smd_xprt_debug_mask) \
+ pr_info(x); \
+} while (0)
+#else
+#define D(x...) do { } while (0)
+#endif
+
+#define MIN_FRAG_SZ (IPC_ROUTER_HDR_SIZE + sizeof(union rr_control_msg))
+
+#define NUM_SMD_XPRTS 4
+#define XPRT_NAME_LEN (SMD_MAX_CH_NAME_LEN + 12)
+
+/**
+ * msm_ipc_router_smd_xprt - IPC Router's SMD XPRT structure
+ * @list: IPC router's SMD XPRTs list.
+ * @ch_name: Name of the HSIC endpoint exported by ipc_bridge driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @edge: SMD channel edge.
+ * @driver: Platform drivers register by this XPRT.
+ * @xprt: IPC Router XPRT structure to contain XPRT specific info.
+ * @channel: SMD channel specific info.
+ * @smd_xprt_wq: Workqueue to queue read & other XPRT related works.
+ * @write_avail_wait_q: wait queue for writer thread.
+ * @in_pkt: Pointer to any partially read packet.
+ * @is_partial_in_pkt: check pkt completion.
+ * @read_work: Read Work to perform read operation from SMD.
+ * @ss_reset_lock: Lock to protect access to the ss_reset flag.
+ * @ss_reset: flag used to check SSR state.
+ * @pil: handle to the remote subsystem.
+ * @sft_close_complete: Variable to indicate completion of SSR handling
+ * by IPC Router.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ * @xprt_option: XPRT specific options to be handled by IPC Router.
+ */
+struct msm_ipc_router_smd_xprt {
+ struct list_head list;
+ char ch_name[SMD_MAX_CH_NAME_LEN];
+ char xprt_name[XPRT_NAME_LEN];
+ uint32_t edge;
+ struct platform_driver driver;
+ struct msm_ipc_router_xprt xprt;
+ smd_channel_t *channel;
+ struct workqueue_struct *smd_xprt_wq;
+ wait_queue_head_t write_avail_wait_q;
+ struct rr_packet *in_pkt;
+ int is_partial_in_pkt;
+ struct delayed_work read_work;
+ spinlock_t ss_reset_lock; /*Subsystem reset lock*/
+ int ss_reset;
+ void *pil;
+ struct completion sft_close_complete;
+ unsigned xprt_version;
+ unsigned xprt_option;
+};
+
+struct msm_ipc_router_smd_xprt_work {
+ struct msm_ipc_router_xprt *xprt;
+ struct work_struct work;
+};
+
+static void smd_xprt_read_data(struct work_struct *work);
+static void smd_xprt_open_event(struct work_struct *work);
+static void smd_xprt_close_event(struct work_struct *work);
+
+/**
+ * msm_ipc_router_smd_xprt_config - Config. Info. of each SMD XPRT
+ * @ch_name: Name of the SMD endpoint exported by SMD driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @edge: ID to differentiate among multiple SMD endpoints.
+ * @link_id: Network Cluster ID to which this XPRT belongs to.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ */
+struct msm_ipc_router_smd_xprt_config {
+ char ch_name[SMD_MAX_CH_NAME_LEN];
+ char xprt_name[XPRT_NAME_LEN];
+ uint32_t edge;
+ uint32_t link_id;
+ unsigned xprt_version;
+ unsigned xprt_option;
+};
+
+struct msm_ipc_router_smd_xprt_config smd_xprt_cfg[] = {
+ {"RPCRPY_CNTL", "ipc_rtr_smd_rpcrpy_cntl", SMD_APPS_MODEM, 1, 1},
+ {"IPCRTR", "ipc_rtr_smd_ipcrtr", SMD_APPS_MODEM, 1, 1},
+ {"IPCRTR", "ipc_rtr_q6_ipcrtr", SMD_APPS_QDSP, 1, 1},
+ {"IPCRTR", "ipc_rtr_wcnss_ipcrtr", SMD_APPS_WCNSS, 1, 1},
+};
+
+#define MODULE_NAME "ipc_router_smd_xprt"
+#define IPC_ROUTER_SMD_XPRT_WAIT_TIMEOUT 3000
+static int ipc_router_smd_xprt_probe_done;
+static struct delayed_work ipc_router_smd_xprt_probe_work;
+static DEFINE_MUTEX(smd_remote_xprt_list_lock_lha1);
+static LIST_HEAD(smd_remote_xprt_list);
+
+static void pil_vote_load_worker(struct work_struct *work);
+static void pil_vote_unload_worker(struct work_struct *work);
+static struct workqueue_struct *pil_vote_wq;
+
+static int msm_ipc_router_smd_get_xprt_version(
+ struct msm_ipc_router_xprt *xprt)
+{
+ struct msm_ipc_router_smd_xprt *smd_xprtp;
+ if (!xprt)
+ return -EINVAL;
+ smd_xprtp = container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+ return (int)smd_xprtp->xprt_version;
+}
+
+static int msm_ipc_router_smd_get_xprt_option(
+ struct msm_ipc_router_xprt *xprt)
+{
+ struct msm_ipc_router_smd_xprt *smd_xprtp;
+ if (!xprt)
+ return -EINVAL;
+ smd_xprtp = container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+ return (int)smd_xprtp->xprt_option;
+}
+
+static int msm_ipc_router_smd_remote_write_avail(
+ struct msm_ipc_router_xprt *xprt)
+{
+ struct msm_ipc_router_smd_xprt *smd_xprtp =
+ container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+ return smd_write_avail(smd_xprtp->channel);
+}
+
+static int msm_ipc_router_smd_remote_write(void *data,
+ uint32_t len,
+ struct msm_ipc_router_xprt *xprt)
+{
+ struct rr_packet *pkt = (struct rr_packet *)data;
+ struct sk_buff *ipc_rtr_pkt;
+ int offset, sz_written = 0;
+ int ret, num_retries = 0;
+ unsigned long flags;
+ struct msm_ipc_router_smd_xprt *smd_xprtp =
+ container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+ if (!pkt)
+ return -EINVAL;
+
+ if (!len || pkt->length != len)
+ return -EINVAL;
+
+ while ((ret = smd_write_start(smd_xprtp->channel, len)) < 0) {
+ spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+ if (smd_xprtp->ss_reset) {
+ spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock,
+ flags);
+ pr_err("%s: %s chnl reset\n", __func__, xprt->name);
+ return -ENETRESET;
+ }
+ spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+ if (num_retries >= 5) {
+ pr_err("%s: Error %d @smd_write_start for %s\n",
+ __func__, ret, xprt->name);
+ return ret;
+ }
+ msleep(50);
+ num_retries++;
+ }
+
+ D("%s: Ready to write %d bytes\n", __func__, len);
+ skb_queue_walk(pkt->pkt_fragment_q, ipc_rtr_pkt) {
+ offset = 0;
+ while (offset < ipc_rtr_pkt->len) {
+ if (!smd_write_segment_avail(smd_xprtp->channel))
+ smd_enable_read_intr(smd_xprtp->channel);
+
+ wait_event(smd_xprtp->write_avail_wait_q,
+ (smd_write_segment_avail(smd_xprtp->channel) ||
+ smd_xprtp->ss_reset));
+ smd_disable_read_intr(smd_xprtp->channel);
+ spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+ if (smd_xprtp->ss_reset) {
+ spin_unlock_irqrestore(
+ &smd_xprtp->ss_reset_lock, flags);
+ pr_err("%s: %s chnl reset\n",
+ __func__, xprt->name);
+ return -ENETRESET;
+ }
+ spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock,
+ flags);
+
+ sz_written = smd_write_segment(smd_xprtp->channel,
+ ipc_rtr_pkt->data + offset,
+ (ipc_rtr_pkt->len - offset), 0);
+ offset += sz_written;
+ sz_written = 0;
+ }
+ D("%s: Wrote %d bytes over %s\n",
+ __func__, offset, xprt->name);
+ }
+
+ if (!smd_write_end(smd_xprtp->channel))
+ D("%s: Finished writing\n", __func__);
+ return len;
+}
+
+static int msm_ipc_router_smd_remote_close(struct msm_ipc_router_xprt *xprt)
+{
+ int rc;
+ struct msm_ipc_router_smd_xprt *smd_xprtp =
+ container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+ rc = smd_close(smd_xprtp->channel);
+ if (smd_xprtp->pil) {
+ subsystem_put(smd_xprtp->pil);
+ smd_xprtp->pil = NULL;
+ }
+ return rc;
+}
+
+static void smd_xprt_sft_close_done(struct msm_ipc_router_xprt *xprt)
+{
+ struct msm_ipc_router_smd_xprt *smd_xprtp =
+ container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+ complete_all(&smd_xprtp->sft_close_complete);
+}
+
+static void smd_xprt_read_data(struct work_struct *work)
+{
+ int pkt_size, sz_read, sz;
+ struct sk_buff *ipc_rtr_pkt;
+ void *data;
+ unsigned long flags;
+ struct delayed_work *rwork = to_delayed_work(work);
+ struct msm_ipc_router_smd_xprt *smd_xprtp =
+ container_of(rwork, struct msm_ipc_router_smd_xprt, read_work);
+
+ spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+ if (smd_xprtp->ss_reset) {
+ spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+ if (smd_xprtp->in_pkt)
+ release_pkt(smd_xprtp->in_pkt);
+ smd_xprtp->is_partial_in_pkt = 0;
+ pr_err("%s: %s channel reset\n",
+ __func__, smd_xprtp->xprt.name);
+ return;
+ }
+ spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+
+ D("%s pkt_size: %d, read_avail: %d\n", __func__,
+ smd_cur_packet_size(smd_xprtp->channel),
+ smd_read_avail(smd_xprtp->channel));
+ while ((pkt_size = smd_cur_packet_size(smd_xprtp->channel)) &&
+ smd_read_avail(smd_xprtp->channel)) {
+ if (!smd_xprtp->is_partial_in_pkt) {
+ smd_xprtp->in_pkt = kzalloc(sizeof(struct rr_packet),
+ GFP_KERNEL);
+ if (!smd_xprtp->in_pkt) {
+ pr_err("%s: Couldn't alloc rr_packet\n",
+ __func__);
+ return;
+ }
+
+ smd_xprtp->in_pkt->pkt_fragment_q =
+ kmalloc(sizeof(struct sk_buff_head),
+ GFP_KERNEL);
+ if (!smd_xprtp->in_pkt->pkt_fragment_q) {
+ pr_err("%s: Couldn't alloc pkt_fragment_q\n",
+ __func__);
+ kfree(smd_xprtp->in_pkt);
+ return;
+ }
+ skb_queue_head_init(smd_xprtp->in_pkt->pkt_fragment_q);
+ smd_xprtp->is_partial_in_pkt = 1;
+ D("%s: Allocated rr_packet\n", __func__);
+ }
+
+ if (((pkt_size >= MIN_FRAG_SZ) &&
+ (smd_read_avail(smd_xprtp->channel) < MIN_FRAG_SZ)) ||
+ ((pkt_size < MIN_FRAG_SZ) &&
+ (smd_read_avail(smd_xprtp->channel) < pkt_size)))
+ return;
+
+ sz = smd_read_avail(smd_xprtp->channel);
+ do {
+ ipc_rtr_pkt = alloc_skb(sz, GFP_KERNEL);
+ if (!ipc_rtr_pkt) {
+ if (sz <= (PAGE_SIZE/2)) {
+ queue_delayed_work(
+ smd_xprtp->smd_xprt_wq,
+ &smd_xprtp->read_work,
+ msecs_to_jiffies(100));
+ return;
+ }
+ sz = sz / 2;
+ }
+ } while (!ipc_rtr_pkt);
+
+ D("%s: Allocated the sk_buff of size %d\n", __func__, sz);
+ data = skb_put(ipc_rtr_pkt, sz);
+ sz_read = smd_read(smd_xprtp->channel, data, sz);
+ if (sz_read != sz) {
+ pr_err("%s: Couldn't read %s completely\n",
+ __func__, smd_xprtp->xprt.name);
+ kfree_skb(ipc_rtr_pkt);
+ release_pkt(smd_xprtp->in_pkt);
+ smd_xprtp->is_partial_in_pkt = 0;
+ return;
+ }
+ skb_queue_tail(smd_xprtp->in_pkt->pkt_fragment_q, ipc_rtr_pkt);
+ smd_xprtp->in_pkt->length += sz_read;
+ if (sz_read != pkt_size)
+ smd_xprtp->is_partial_in_pkt = 1;
+ else
+ smd_xprtp->is_partial_in_pkt = 0;
+
+ if (!smd_xprtp->is_partial_in_pkt) {
+ D("%s: Packet size read %d\n",
+ __func__, smd_xprtp->in_pkt->length);
+ msm_ipc_router_xprt_notify(&smd_xprtp->xprt,
+ IPC_ROUTER_XPRT_EVENT_DATA,
+ (void *)smd_xprtp->in_pkt);
+ release_pkt(smd_xprtp->in_pkt);
+ smd_xprtp->in_pkt = NULL;
+ }
+ }
+}
+
+static void smd_xprt_open_event(struct work_struct *work)
+{
+ struct msm_ipc_router_smd_xprt_work *xprt_work =
+ container_of(work, struct msm_ipc_router_smd_xprt_work, work);
+ struct msm_ipc_router_smd_xprt *smd_xprtp =
+ container_of(xprt_work->xprt,
+ struct msm_ipc_router_smd_xprt, xprt);
+ unsigned long flags;
+
+ spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+ smd_xprtp->ss_reset = 0;
+ spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+ msm_ipc_router_xprt_notify(xprt_work->xprt,
+ IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
+ D("%s: Notified IPC Router of %s OPEN\n",
+ __func__, xprt_work->xprt->name);
+ kfree(xprt_work);
+}
+
+static void smd_xprt_close_event(struct work_struct *work)
+{
+ struct msm_ipc_router_smd_xprt_work *xprt_work =
+ container_of(work, struct msm_ipc_router_smd_xprt_work, work);
+ struct msm_ipc_router_smd_xprt *smd_xprtp =
+ container_of(xprt_work->xprt,
+ struct msm_ipc_router_smd_xprt, xprt);
+
+ init_completion(&smd_xprtp->sft_close_complete);
+ msm_ipc_router_xprt_notify(xprt_work->xprt,
+ IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
+ D("%s: Notified IPC Router of %s CLOSE\n",
+ __func__, xprt_work->xprt->name);
+ wait_for_completion(&smd_xprtp->sft_close_complete);
+ kfree(xprt_work);
+}
+
+static void msm_ipc_router_smd_remote_notify(void *_dev, unsigned event)
+{
+ unsigned long flags;
+ struct msm_ipc_router_smd_xprt *smd_xprtp;
+ struct msm_ipc_router_smd_xprt_work *xprt_work;
+
+ smd_xprtp = (struct msm_ipc_router_smd_xprt *)_dev;
+ if (!smd_xprtp)
+ return;
+
+ switch (event) {
+ case SMD_EVENT_DATA:
+ if (smd_read_avail(smd_xprtp->channel))
+ queue_delayed_work(smd_xprtp->smd_xprt_wq,
+ &smd_xprtp->read_work, 0);
+ if (smd_write_segment_avail(smd_xprtp->channel))
+ wake_up(&smd_xprtp->write_avail_wait_q);
+ break;
+
+ case SMD_EVENT_OPEN:
+ xprt_work = kmalloc(sizeof(struct msm_ipc_router_smd_xprt_work),
+ GFP_ATOMIC);
+ if (!xprt_work) {
+ pr_err("%s: Couldn't notify %d event to IPC Router\n",
+ __func__, event);
+ return;
+ }
+ xprt_work->xprt = &smd_xprtp->xprt;
+ INIT_WORK(&xprt_work->work, smd_xprt_open_event);
+ queue_work(smd_xprtp->smd_xprt_wq, &xprt_work->work);
+ break;
+
+ case SMD_EVENT_CLOSE:
+ spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+ smd_xprtp->ss_reset = 1;
+ spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+ wake_up(&smd_xprtp->write_avail_wait_q);
+ xprt_work = kmalloc(sizeof(struct msm_ipc_router_smd_xprt_work),
+ GFP_ATOMIC);
+ if (!xprt_work) {
+ pr_err("%s: Couldn't notify %d event to IPC Router\n",
+ __func__, event);
+ return;
+ }
+ xprt_work->xprt = &smd_xprtp->xprt;
+ INIT_WORK(&xprt_work->work, smd_xprt_close_event);
+ queue_work(smd_xprtp->smd_xprt_wq, &xprt_work->work);
+ break;
+ }
+}
+
+static void *msm_ipc_load_subsystem(uint32_t edge)
+{
+ void *pil = NULL;
+ const char *peripheral;
+
+ peripheral = smd_edge_to_pil_str(edge);
+ if (!IS_ERR_OR_NULL(peripheral)) {
+ pil = subsystem_get(peripheral);
+ if (IS_ERR(pil)) {
+ pr_err("%s: Failed to load %s\n",
+ __func__, peripheral);
+ pil = NULL;
+ }
+ }
+ return pil;
+}
+
+/**
+ * find_smd_xprt_list() - Find xprt item specific to an HSIC endpoint
+ * @pdev: Platform device registered by HSIC's ipc_bridge driver
+ *
+ * @return: pointer to msm_ipc_router_smd_xprt if matching endpoint is found,
+ * else NULL.
+ *
+ * This function is used to find specific xprt item from the global xprt list
+ */
+static struct msm_ipc_router_smd_xprt *
+ find_smd_xprt_list(struct platform_device *pdev)
+{
+ struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+ mutex_lock(&smd_remote_xprt_list_lock_lha1);
+ list_for_each_entry(smd_xprtp, &smd_remote_xprt_list, list) {
+ if (!strcmp(pdev->name, smd_xprtp->ch_name)
+ && (pdev->id == smd_xprtp->edge)) {
+ mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+ return smd_xprtp;
+ }
+ }
+ mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+ return NULL;
+}
+
+/**
+ * msm_ipc_router_smd_remote_probe() - Probe an SMD endpoint
+ *
+ * @pdev: Platform device corresponding to SMD endpoint.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying SMD driver registers
+ * a platform device, mapped to SMD endpoint.
+ */
+static int msm_ipc_router_smd_remote_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+ smd_xprtp = find_smd_xprt_list(pdev);
+ if (!smd_xprtp) {
+ pr_err("%s No device with name %s\n", __func__, pdev->name);
+ return -EPROBE_DEFER;
+ }
+ if (strcmp(pdev->name, smd_xprtp->ch_name)
+ || (pdev->id != smd_xprtp->edge)) {
+ pr_err("%s wrong item name:%s edge:%d\n",
+ __func__, smd_xprtp->ch_name, smd_xprtp->edge);
+ return -ENODEV;
+ }
+ smd_xprtp->smd_xprt_wq =
+ create_singlethread_workqueue(pdev->name);
+ if (!smd_xprtp->smd_xprt_wq) {
+ pr_err("%s: WQ creation failed for %s\n",
+ __func__, pdev->name);
+ return -EFAULT;
+ }
+
+ smd_xprtp->pil = msm_ipc_load_subsystem(
+ smd_xprtp->edge);
+ rc = smd_named_open_on_edge(smd_xprtp->ch_name,
+ smd_xprtp->edge,
+ &smd_xprtp->channel,
+ smd_xprtp,
+ msm_ipc_router_smd_remote_notify);
+ if (rc < 0) {
+ pr_err("%s: Channel open failed for %s\n",
+ __func__, smd_xprtp->ch_name);
+ if (smd_xprtp->pil) {
+ subsystem_put(smd_xprtp->pil);
+ smd_xprtp->pil = NULL;
+ }
+ destroy_workqueue(smd_xprtp->smd_xprt_wq);
+ return rc;
+ }
+
+ smd_disable_read_intr(smd_xprtp->channel);
+
+ smsm_change_state(SMSM_APPS_STATE, 0, SMSM_RPCINIT);
+
+ return 0;
+}
+
+struct pil_vote_info {
+ void *pil_handle;
+ struct work_struct load_work;
+ struct work_struct unload_work;
+};
+
+/**
+ * pil_vote_load_worker() - Process vote to load the modem
+ *
+ * @work: Work item to process
+ *
+ * This function is called to process votes to load the modem that have been
+ * queued by msm_ipc_load_default_node().
+ */
+static void pil_vote_load_worker(struct work_struct *work)
+{
+ const char *peripheral;
+ struct pil_vote_info *vote_info;
+
+ vote_info = container_of(work, struct pil_vote_info, load_work);
+ peripheral = smd_edge_to_pil_str(SMD_APPS_MODEM);
+
+ if (!IS_ERR_OR_NULL(peripheral) && !strcmp(peripheral, "modem")) {
+ vote_info->pil_handle = subsystem_get(peripheral);
+ if (IS_ERR(vote_info->pil_handle)) {
+ pr_err("%s: Failed to load %s\n",
+ __func__, peripheral);
+ vote_info->pil_handle = NULL;
+ }
+ } else {
+ vote_info->pil_handle = NULL;
+ }
+}
+
+/**
+ * pil_vote_unload_worker() - Process vote to unload the modem
+ *
+ * @work: Work item to process
+ *
+ * This function is called to process votes to unload the modem that have been
+ * queued by msm_ipc_unload_default_node().
+ */
+static void pil_vote_unload_worker(struct work_struct *work)
+{
+ struct pil_vote_info *vote_info;
+
+ vote_info = container_of(work, struct pil_vote_info, unload_work);
+
+ if (vote_info->pil_handle) {
+ subsystem_put(vote_info->pil_handle);
+ vote_info->pil_handle = NULL;
+ }
+ kfree(vote_info);
+}
+
+/**
+ * msm_ipc_load_default_node() - Queue a vote to load the modem.
+ *
+ * @return: PIL vote info structure on success, NULL on failure.
+ *
+ * This function places a work item that loads the modem on the
+ * single-threaded workqueue used for processing PIL votes to load
+ * or unload the modem.
+ */
+void *msm_ipc_load_default_node(void)
+{
+ struct pil_vote_info *vote_info;
+
+ vote_info = kmalloc(sizeof(struct pil_vote_info), GFP_KERNEL);
+ if (vote_info == NULL) {
+ pr_err("%s: mem alloc for pil_vote_info failed\n", __func__);
+ return NULL;
+ }
+
+ INIT_WORK(&vote_info->load_work, pil_vote_load_worker);
+ queue_work(pil_vote_wq, &vote_info->load_work);
+
+ return vote_info;
+}
+EXPORT_SYMBOL(msm_ipc_load_default_node);
+
+/**
+ * msm_ipc_unload_default_node() - Queue a vote to unload the modem.
+ *
+ * @pil_vote: PIL vote info structure, containing the PIL handle
+ * and work structure.
+ *
+ * This function places a work item that unloads the modem on the
+ * single-threaded workqueue used for processing PIL votes to load
+ * or unload the modem.
+ */
+void msm_ipc_unload_default_node(void *pil_vote)
+{
+ struct pil_vote_info *vote_info;
+
+ if (pil_vote) {
+ vote_info = (struct pil_vote_info *) pil_vote;
+ INIT_WORK(&vote_info->unload_work, pil_vote_unload_worker);
+ queue_work(pil_vote_wq, &vote_info->unload_work);
+ }
+}
+EXPORT_SYMBOL(msm_ipc_unload_default_node);
+
+/**
+ * msm_ipc_router_smd_driver_register() - register SMD XPRT drivers
+ *
+ * @smd_xprtp: pointer to Ipc router smd xprt structure.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when a new XPRT is added to register platform
+ * drivers for new XPRT.
+ */
+static int msm_ipc_router_smd_driver_register(
+ struct msm_ipc_router_smd_xprt *smd_xprtp)
+{
+ int ret;
+ struct msm_ipc_router_smd_xprt *item;
+ unsigned already_registered = 0;
+
+ mutex_lock(&smd_remote_xprt_list_lock_lha1);
+ list_for_each_entry(item, &smd_remote_xprt_list, list) {
+ if (!strcmp(smd_xprtp->ch_name, item->ch_name))
+ already_registered = 1;
+ }
+ list_add(&smd_xprtp->list, &smd_remote_xprt_list);
+ mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+
+ if (!already_registered) {
+ smd_xprtp->driver.driver.name = smd_xprtp->ch_name;
+ smd_xprtp->driver.driver.owner = THIS_MODULE;
+ smd_xprtp->driver.probe = msm_ipc_router_smd_remote_probe;
+
+ ret = platform_driver_register(&smd_xprtp->driver);
+ if (ret) {
+ pr_err("%s: Failed to register platform driver [%s]\n",
+ __func__, smd_xprtp->ch_name);
+ return ret;
+ }
+ } else {
+ pr_err("%s Already driver registered %s\n",
+ __func__, smd_xprtp->ch_name);
+ }
+ return 0;
+}
+
+/**
+ * msm_ipc_router_smd_config_init() - init SMD xprt configs
+ *
+ * @smd_xprt_config: pointer to SMD xprt configurations.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the SMD XPRT pointer with
+ * the SMD XPRT configurations either from device tree or static arrays.
+ */
+static int msm_ipc_router_smd_config_init(
+ struct msm_ipc_router_smd_xprt_config *smd_xprt_config)
+{
+ struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+ smd_xprtp = kzalloc(sizeof(struct msm_ipc_router_smd_xprt), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(smd_xprtp)) {
+ pr_err("%s: kzalloc() failed for smd_xprtp id:%s\n",
+ __func__, smd_xprt_config->ch_name);
+ return -ENOMEM;
+ }
+
+ smd_xprtp->xprt.link_id = smd_xprt_config->link_id;
+ smd_xprtp->xprt_version = smd_xprt_config->xprt_version;
+ smd_xprtp->edge = smd_xprt_config->edge;
+ smd_xprtp->xprt_option = smd_xprt_config->xprt_option;
+
+ strlcpy(smd_xprtp->ch_name, smd_xprt_config->ch_name,
+ SMD_MAX_CH_NAME_LEN);
+
+ strlcpy(smd_xprtp->xprt_name, smd_xprt_config->xprt_name,
+ XPRT_NAME_LEN);
+ smd_xprtp->xprt.name = smd_xprtp->xprt_name;
+
+ smd_xprtp->xprt.get_version =
+ msm_ipc_router_smd_get_xprt_version;
+ smd_xprtp->xprt.get_option =
+ msm_ipc_router_smd_get_xprt_option;
+ smd_xprtp->xprt.read_avail = NULL;
+ smd_xprtp->xprt.read = NULL;
+ smd_xprtp->xprt.write_avail =
+ msm_ipc_router_smd_remote_write_avail;
+ smd_xprtp->xprt.write = msm_ipc_router_smd_remote_write;
+ smd_xprtp->xprt.close = msm_ipc_router_smd_remote_close;
+ smd_xprtp->xprt.sft_close_done = smd_xprt_sft_close_done;
+ smd_xprtp->xprt.priv = NULL;
+
+ init_waitqueue_head(&smd_xprtp->write_avail_wait_q);
+ smd_xprtp->in_pkt = NULL;
+ smd_xprtp->is_partial_in_pkt = 0;
+ INIT_DELAYED_WORK(&smd_xprtp->read_work, smd_xprt_read_data);
+ spin_lock_init(&smd_xprtp->ss_reset_lock);
+ smd_xprtp->ss_reset = 0;
+
+ msm_ipc_router_smd_driver_register(smd_xprtp);
+
+ return 0;
+}
+
+/**
+ * parse_devicetree() - parse device tree binding
+ *
+ * @node: pointer to device tree node
+ * @smd_xprt_config: pointer to SMD XPRT configurations
+ *
+ * @return: 0 on success, -ENODEV on failure.
+ */
+static int parse_devicetree(struct device_node *node,
+ struct msm_ipc_router_smd_xprt_config *smd_xprt_config)
+{
+ int ret;
+ int edge;
+ int link_id;
+ int version;
+ char *key;
+ const char *ch_name;
+ const char *remote_ss;
+
+ key = "qcom,ch-name";
+ ch_name = of_get_property(node, key, NULL);
+ if (!ch_name)
+ goto error;
+ strlcpy(smd_xprt_config->ch_name, ch_name, SMD_MAX_CH_NAME_LEN);
+
+ key = "qcom,xprt-remote";
+ remote_ss = of_get_property(node, key, NULL);
+ if (!remote_ss)
+ goto error;
+ edge = smd_remote_ss_to_edge(remote_ss);
+ if (edge < 0)
+ goto error;
+ smd_xprt_config->edge = edge;
+
+ key = "qcom,xprt-linkid";
+ ret = of_property_read_u32(node, key, &link_id);
+ if (ret)
+ goto error;
+ smd_xprt_config->link_id = link_id;
+
+ key = "qcom,xprt-version";
+ ret = of_property_read_u32(node, key, &version);
+ if (ret)
+ goto error;
+ smd_xprt_config->xprt_version = version;
+
+ key = "qcom,fragmented-data";
+ smd_xprt_config->xprt_option = of_property_read_bool(node, key);
+
+ scnprintf(smd_xprt_config->xprt_name, XPRT_NAME_LEN, "%s_%s",
+ remote_ss, smd_xprt_config->ch_name);
+
+ return 0;
+
+error:
+ pr_err("%s: missing key: %s\n", __func__, key);
+ return -ENODEV;
+}
+
+/**
+ * msm_ipc_router_smd_xprt_probe() - Probe an SMD xprt
+ *
+ * @pdev: Platform device corresponding to SMD xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to an SMD transport.
+ */
+static int msm_ipc_router_smd_xprt_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct msm_ipc_router_smd_xprt_config smd_xprt_config;
+
+ if (pdev) {
+ if (pdev->dev.of_node) {
+ mutex_lock(&smd_remote_xprt_list_lock_lha1);
+ ipc_router_smd_xprt_probe_done = 1;
+ mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+
+ ret = parse_devicetree(pdev->dev.of_node,
+ &smd_xprt_config);
+ if (ret) {
+ pr_err(" failed to parse device tree\n");
+ return ret;
+ }
+
+ ret = msm_ipc_router_smd_config_init(&smd_xprt_config);
+ if (ret) {
+ pr_err("%s init failed\n", __func__);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * ipc_router_smd_xprt_probe_worker() - probe worker for non DT configurations
+ *
+ * @work: work item to process
+ *
+ * This function is called by schedule_delay_work after 3sec and check if
+ * device tree probe is done or not. If device tree probe fails the default
+ * configurations read from static array.
+ */
+static void ipc_router_smd_xprt_probe_worker(struct work_struct *work)
+{
+ int i, ret;
+
+ BUG_ON(ARRAY_SIZE(smd_xprt_cfg) != NUM_SMD_XPRTS);
+
+ mutex_lock(&smd_remote_xprt_list_lock_lha1);
+ if (!ipc_router_smd_xprt_probe_done) {
+ mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+ for (i = 0; i < ARRAY_SIZE(smd_xprt_cfg); i++) {
+ ret = msm_ipc_router_smd_config_init(&smd_xprt_cfg[i]);
+ if (ret)
+ pr_err(" %s init failed config idx %d\n",
+ __func__, i);
+ }
+ mutex_lock(&smd_remote_xprt_list_lock_lha1);
+ }
+ mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+}
+
+static struct of_device_id msm_ipc_router_smd_xprt_match_table[] = {
+ { .compatible = "qcom,ipc_router_smd_xprt" },
+ {},
+};
+
+static struct platform_driver msm_ipc_router_smd_xprt_driver = {
+ .probe = msm_ipc_router_smd_xprt_probe,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ipc_router_smd_xprt_match_table,
+ },
+};
+
+static int __init msm_ipc_router_smd_xprt_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&msm_ipc_router_smd_xprt_driver);
+ if (rc) {
+ pr_err("%s: msm_ipc_router_smd_xprt_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ pil_vote_wq = create_singlethread_workqueue("pil_vote_wq");
+ if (IS_ERR_OR_NULL(pil_vote_wq)) {
+ pr_err("%s: create_singlethread_workqueue failed\n", __func__);
+ return -EFAULT;
+ }
+
+ INIT_DELAYED_WORK(&ipc_router_smd_xprt_probe_work,
+ ipc_router_smd_xprt_probe_worker);
+ schedule_delayed_work(&ipc_router_smd_xprt_probe_work,
+ msecs_to_jiffies(IPC_ROUTER_SMD_XPRT_WAIT_TIMEOUT));
+ return 0;
+}
+
+module_init(msm_ipc_router_smd_xprt_init);
+MODULE_DESCRIPTION("IPC Router SMD XPRT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/mct.c b/drivers/soc/qcom/mct.c
new file mode 100644
index 000000000000..d3bfdfb638af
--- /dev/null
+++ b/drivers/soc/qcom/mct.c
@@ -0,0 +1,926 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#define pr_fmt(fmt) "%s: " fmt, KBUILD_MODNAME
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/cpu_pm.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/sysfs.h>
+#include <linux/of.h>
+#include <linux/syscore_ops.h>
+#include <linux/regulator/consumer.h>
+#include <linux/err.h>
+
+#define MCT_DEV_NAME "mct"
+#define MCT_NAME_LENGTH 20
+#define MCT_SYSFS_MAX_LENGTH 20
+#define DIFFERENT_CONFIG "different_config"
+#define MCT_ENABLE "enable"
+#define MCT_TYPE_NAME "msm_mct"
+
+static struct kobject *mct_kobj;
+static bool mct_boot_enable, mct_deferred, mct_notify_pmic;
+static uint32_t mct_ulim, mct_dcnt, mct_wr, mct_vxwr, mct_vlswr, mct_vaw;
+
+/*
+ * Maximum Current Throttling Weight Register
+ */
+#define write_mct_wr(val) asm("mcr p15, 7, %0, c15, c2, 7" : : "r" (val))
+
+/*
+ * Maximum Current Throttling Venum eXecution-pipe Weight Register
+ */
+#define write_mct_vxwr(val) asm("mcr p15, 7, %0, c15, c3, 1" : : "r" (val))
+
+/*
+ * Maximum Current Throttling Venum L/S-pipe Weight Register
+ */
+#define write_mct_vlswr(val) asm("mcr p15, 7, %0, c15, c3, 2" : : "r" (val))
+
+/*
+ * Maximum Current Throttling Control Register
+ */
+#define write_mct_cr(val) asm("mcr p15, 7, %0, c15, c2, 6" : : "r" (val))
+
+/*
+ * Maximum Current Throttling Count Register
+ */
+#define write_mct_cntr(val) asm("mcr p15, 7, %0, c15, c3, 0" : : "r" (val))
+
+/*
+ * Read MCT CNTR Register value
+ */
+#define read_mct_cntr(val) asm("mrc p15, 7, %0, c15, c3, 0" : "=r" (val))
+
+/*
+ * Default Maximum Current Throttling Weight Register Value
+ */
+#define MCT_DEFAULT_WR 0x14221120
+
+/*
+ * Default Maximum Current Throttling Venum eXecution-pipe Weight Register Value
+ */
+#define MCT_DEFAULT_VXWR 0xF8436430
+
+/*
+ * Default Maximum Current Throttling Venum L/S-pipe Weight Register Value
+ */
+#define MCT_DEFAULT_VLSWR 0xA5846330
+
+/*
+ * Default Maximum Current Throttling ULIM
+ */
+#define MCT_DEFAULT_ULIM 0x007F
+
+/*
+ * Default Maximum Current Throttling DCNT
+ */
+#define MCT_DEFAULT_DCNT 0x0C
+
+/*
+ * Default Maximum Current Throttling VAW
+ */
+#define MCT_DEFAULT_VAW 0x1
+
+/*
+ * Default Maximum Current Throttling Control Register Value for Enabling MCT
+ */
+#define MCT_DEFAULT_ENABLE_CR ((mct_ulim << 16) | \
+ (mct_dcnt << 8) | \
+ (mct_vaw << 4) | 0x3)
+
+/*
+ * Default Maximum Current Throttling Control Register Value for Disabling MCT
+ */
+#define MCT_DEFAULT_DISABLE_CR ((mct_ulim << 16) | \
+ (mct_dcnt << 8) | \
+ (mct_vaw << 4))
+
+/*
+ * Maximum Current Throttling Control Register Value From ULIM and DCNT
+ */
+#define MCT_CR_FROM_ULIM_DCNT(ulim, dcnt) ((ulim << 16) | (dcnt << 8) | \
+ (mct_vaw << 4) | 0x3)
+
+/**
+ * MCT control block
+ * @kobj: Pointer to hold the Kobject of the per cpu instance.
+ * @mct_enabled: Indicates MCT mode enabled or not.
+ * @mct_type: holds the MCT type.
+ * @mct_ulim: holds the MCT upper Limit.
+ * @mct_dcnt: holds the MCT Decrement Count.
+ * @mct_cntr: holds the MCTCNTR register value.
+ * @mct_regulator: Pointer to the MCT regulator.
+ * @mct_reg_enabled: Indicates whether MCT regulator is enabled or not.
+ */
+struct mct_context {
+ struct kobject *kobj;
+ bool mct_enabled;
+ char mct_type[MCT_NAME_LENGTH];
+ u32 mct_ulim;
+ u32 mct_dcnt;
+ u32 mct_cntr;
+ struct regulator *mct_regulator;
+ bool mct_reg_enabled;
+};
+
+static DEFINE_PER_CPU(struct mct_context *, gmct);
+
+/*
+ * Apply MCT CPU register
+ */
+static void mct_apply_cpu_register(void *arg)
+{
+ uint32_t value;
+ unsigned int cpu_index = smp_processor_id();
+
+ if (!per_cpu(gmct, cpu_index))
+ return;
+
+ if (per_cpu(gmct, cpu_index)->mct_enabled) {
+ write_mct_wr(mct_wr);
+ write_mct_vxwr(mct_vxwr);
+ write_mct_vlswr(mct_vlswr);
+ write_mct_cntr(0);
+ value = MCT_CR_FROM_ULIM_DCNT(
+ per_cpu(gmct, cpu_index)->mct_ulim,
+ per_cpu(gmct, cpu_index)->mct_dcnt);
+ write_mct_cr(value);
+ } else {
+ write_mct_cr(MCT_DEFAULT_DISABLE_CR);
+ }
+}
+
+/*
+ * Read MCTCNTR Register value
+ */
+static void mct_read_cntr_register(void *arg)
+{
+ uint32_t value;
+ unsigned int cpu_index = smp_processor_id();
+
+ read_mct_cntr(value);
+ per_cpu(gmct, cpu_index)->mct_cntr = value;
+ return;
+}
+
+static int validate_and_show(const char *kobj_name, char *buf)
+{
+ unsigned int cpu_index = 0;
+
+ if (!per_cpu(gmct, cpu_index)) {
+ pr_err("%s: MCT variables not initalized\n", __func__);
+ return -EPERM;
+ }
+
+ if (!strcmp(kobj_name, MCT_ENABLE)) {
+ for_each_possible_cpu(cpu_index) {
+ if (per_cpu(gmct, cpu_index)->mct_enabled !=
+ per_cpu(gmct, 0)->mct_enabled)
+ goto diff_mode;
+ }
+ cpu_index = 0;
+ } else {
+ sscanf(kobj_name, "cpu%u", &cpu_index);
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%c\n",
+ (per_cpu(gmct, cpu_index)->mct_enabled) ? 'Y' : 'N');
+diff_mode:
+ return snprintf(buf, PAGE_SIZE, "%s\n", DIFFERENT_CONFIG);
+}
+
+static ssize_t enable_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return validate_and_show(kobj->name, buf);
+}
+
+static ssize_t ulim_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ unsigned int cpu_index = 0;
+
+ if (!per_cpu(gmct, cpu_index)) {
+ pr_err("%s: MCT variables not initalized\n", __func__);
+ return -EPERM;
+ }
+
+ if (!strcmp(kobj->name, "ulim")) {
+ for_each_possible_cpu(cpu_index) {
+ if (per_cpu(gmct, cpu_index)->mct_ulim !=
+ per_cpu(gmct, 0)->mct_ulim)
+ goto diff_ulim;
+ }
+ cpu_index = 0;
+ } else {
+ sscanf(kobj->name, "cpu%u", &cpu_index);
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%x\n",
+ per_cpu(gmct, cpu_index)->mct_ulim);
+diff_ulim:
+ return snprintf(buf, PAGE_SIZE, "%s\n", DIFFERENT_CONFIG);
+}
+
+static ssize_t dcnt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ unsigned int cpu_index = 0;
+
+ if (!per_cpu(gmct, cpu_index)) {
+ pr_err("%s: MCT variables not initalized\n", __func__);
+ return -EPERM;
+ }
+
+ if (!strcmp(kobj->name, "dcnt")) {
+ for_each_possible_cpu(cpu_index) {
+ if (per_cpu(gmct, cpu_index)->mct_dcnt !=
+ per_cpu(gmct, 0)->mct_dcnt)
+ goto diff_dcnt;
+ }
+ cpu_index = 0;
+ } else {
+ sscanf(kobj->name, "cpu%u", &cpu_index);
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%x\n",
+ per_cpu(gmct, cpu_index)->mct_dcnt);
+diff_dcnt:
+ return snprintf(buf, PAGE_SIZE, "%s\n", DIFFERENT_CONFIG);
+}
+
+static ssize_t cntr_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ unsigned int cpu_index;
+
+ sscanf(kobj->name, "cpu%u", &cpu_index);
+ if (cpu_online(cpu_index))
+ smp_call_function_single(cpu_index,
+ mct_read_cntr_register,
+ NULL, 1);
+ else
+ per_cpu(gmct, cpu_index)->mct_cntr = 0;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ per_cpu(gmct, cpu_index)->mct_cntr);
+}
+
+static ssize_t type_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ if (per_cpu(gmct, 0)) {
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ per_cpu(gmct, 0)->mct_type);
+ } else {
+ pr_err("%s: MCT var. not initialized\n", __func__);
+ return -EPERM;
+ }
+}
+
+static void mct_update_regulator(uint32_t cpu_index)
+{
+ int ret = 0;
+ bool enable = per_cpu(gmct, cpu_index)->mct_enabled;
+
+ if (!per_cpu(gmct, cpu_index)->mct_regulator)
+ return;
+
+ if (!mct_notify_pmic)
+ return;
+
+ if (enable && !per_cpu(gmct, cpu_index)->mct_reg_enabled)
+ ret = regulator_enable(
+ per_cpu(gmct, cpu_index)->mct_regulator);
+ else if (!enable && per_cpu(gmct, cpu_index)->mct_reg_enabled)
+ ret = regulator_disable(
+ per_cpu(gmct, cpu_index)->mct_regulator);
+ else
+ goto reg_update_exit;
+
+ if (ret) {
+ pr_err("%s: regulator %s failed for CPU%d. err:%d\n",
+ __func__, (enable) ? "enable" : "disable",
+ cpu_index, ret);
+ goto reg_update_exit;
+ } else {
+ pr_debug("%s: regulator %s for CPU%d.\n",
+ __func__, (enable) ? "enabled" : "disabled",
+ cpu_index);
+ per_cpu(gmct, cpu_index)->mct_reg_enabled = enable;
+ }
+
+reg_update_exit:
+ return;
+}
+
+static void update_enable(uint32_t cpu_index)
+{
+ if (per_cpu(gmct, cpu_index)->mct_enabled) {
+ if (cpu_online(cpu_index))
+ smp_call_function_single(cpu_index,
+ mct_apply_cpu_register, NULL, 1);
+ mct_update_regulator(cpu_index);
+ } else {
+ mct_update_regulator(cpu_index);
+ if (cpu_online(cpu_index))
+ smp_call_function_single(cpu_index,
+ mct_apply_cpu_register, NULL, 1);
+ }
+}
+
+static int validate_and_store(const char *kobj_name, const char *buf)
+{
+ int ret = 0, cpu_index = 0;
+ struct kernel_param kp;
+ bool enable;
+
+ if (!per_cpu(gmct, cpu_index)) {
+ ret = -EPERM;
+ pr_err("%s: MCT var. not initialized\n", __func__);
+ goto store_exit;
+ }
+
+ kp.arg = &enable;
+ ret = param_set_bool(buf, &kp);
+ if (ret) {
+ pr_err("%s: Invalid input:%s. err:%d\n", __func__, buf, ret);
+ goto store_exit;
+ }
+
+ if (!strcmp(kobj_name, MCT_ENABLE)) {
+ for_each_possible_cpu(cpu_index) {
+ per_cpu(gmct, cpu_index)->mct_enabled = enable;
+ update_enable(cpu_index);
+ }
+ } else {
+ sscanf(kobj_name, "cpu%u", &cpu_index);
+ per_cpu(gmct, cpu_index)->mct_enabled = enable;
+ update_enable(cpu_index);
+ }
+
+store_exit:
+ return ret;
+}
+
+static ssize_t enable_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret = 0;
+
+ ret = validate_and_store(kobj->name, buf);
+ return (ret) ? ret : count;
+}
+
+static void update_ulim(int cpu_index, uint32_t value)
+{
+ per_cpu(gmct, cpu_index)->mct_ulim = value;
+ if (cpu_online(cpu_index))
+ smp_call_function_single(cpu_index,
+ mct_apply_cpu_register, NULL, 1);
+}
+
+static ssize_t ulim_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int cpu_index = 0, ret = 0;
+ uint32_t value;
+ struct kernel_param kp;
+
+ if (!per_cpu(gmct, cpu_index)) {
+ ret = -EPERM;
+ pr_err("%s: MCT var. not initialized\n", __func__);
+ goto ulim_fail;
+ }
+
+ kp.arg = &value;
+ ret = param_set_uint(buf, &kp);
+ if (ret) {
+ pr_err("%s: Invalid input:%s. err:%d\n", __func__, buf, ret);
+ goto ulim_fail;
+ }
+
+ if (!strcmp(kobj->name, "ulim")) {
+ for_each_possible_cpu(cpu_index) {
+ if (!per_cpu(gmct, cpu_index)->mct_enabled) {
+ pr_err("%s: MCT is disabled for cpu%d.\n",
+ __func__, cpu_index);
+ ret = -EINVAL;
+ continue;
+ }
+ update_ulim(cpu_index, value);
+ }
+ } else {
+ sscanf(kobj->name, "cpu%u", &cpu_index);
+ if (!per_cpu(gmct, cpu_index)->mct_enabled) {
+ pr_err("%s: MCT is disabled for cpu%d.\n",
+ __func__, cpu_index);
+ ret = -EINVAL;
+ goto ulim_fail;
+ }
+ update_ulim(cpu_index, value);
+ }
+
+ulim_fail:
+ return (ret) ? ret : count;
+}
+
+static void update_dcnt(int cpu_index, uint32_t value)
+{
+ per_cpu(gmct, cpu_index)->mct_dcnt = value;
+ if (cpu_online(cpu_index))
+ smp_call_function_single(cpu_index,
+ mct_apply_cpu_register, NULL, 1);
+}
+
+static ssize_t dcnt_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int cpu_index = 0, ret = 0;
+ uint32_t value;
+ struct kernel_param kp;
+
+ if (!per_cpu(gmct, cpu_index)) {
+ ret = -EPERM;
+ pr_err("%s: MCT var. not initialized\n", __func__);
+ goto dcnt_fail;
+ }
+
+ kp.arg = &value;
+ ret = param_set_uint(buf, &kp);
+ if (ret) {
+ pr_err("%s: Invalid input:%s. err:%d\n", __func__, buf, ret);
+ goto dcnt_fail;
+ }
+
+ if (!strcmp(kobj->name, "dcnt")) {
+ for_each_possible_cpu(cpu_index) {
+ if (!per_cpu(gmct, cpu_index)->mct_enabled) {
+ pr_err("%s: MCT is disabled for cpu%d.\n",
+ __func__, cpu_index);
+ ret = -EINVAL;
+ continue;
+ }
+ update_dcnt(cpu_index, value);
+ }
+ } else {
+ sscanf(kobj->name, "cpu%u", &cpu_index);
+ if (!per_cpu(gmct, cpu_index)->mct_enabled) {
+ pr_err("%s: MCT is disabled for cpu%d.\n",
+ __func__, cpu_index);
+ ret = -EINVAL;
+ goto dcnt_fail;
+ }
+ update_dcnt(cpu_index, value);
+ }
+
+dcnt_fail:
+ return (ret) ? ret : count;
+}
+
+static int mct_enable_store(const char *val, const struct kernel_param *kp)
+{
+ return validate_and_store(MCT_ENABLE, val);
+}
+
+static int mct_enable_show(char *buf, const struct kernel_param *kp)
+{
+ return validate_and_show(MCT_ENABLE, buf);
+}
+
+static struct kernel_param_ops pmic_notify_ops = {
+ .set = param_set_bool,
+ .get = param_get_bool,
+};
+
+static struct kernel_param_ops module_ops = {
+ .set = mct_enable_store,
+ .get = mct_enable_show,
+};
+
+module_param_cb(enable, &module_ops, &mct_boot_enable, 0644);
+MODULE_PARM_DESC(enable, "Enable maximum current throttling feature");
+
+module_param_cb(notify_pmic, &pmic_notify_ops, &mct_notify_pmic, 0644);
+MODULE_PARM_DESC(enable, "Enable/disable MCT notification to PMIC");
+
+/*
+ * MCT device attributes
+ */
+static __refdata struct kobj_attribute type_attr =
+ __ATTR(type, 0444, type_show, NULL);
+static __refdata struct kobj_attribute ulim_attr =
+ __ATTR(ulim, 0644, ulim_show, ulim_store);
+static __refdata struct kobj_attribute dcnt_attr =
+ __ATTR(dcnt, 0644, dcnt_show, dcnt_store);
+static __refdata struct kobj_attribute enable_attr =
+ __ATTR(enable, 0644, enable_show, enable_store);
+static __refdata struct kobj_attribute cntr_attr =
+ __ATTR(cntr, 0444, cntr_show, NULL);
+
+static __refdata struct attribute *common_attrs[] = {
+ &type_attr.attr,
+ &ulim_attr.attr,
+ &dcnt_attr.attr,
+ NULL,
+};
+static __refdata struct attribute_group common_attr_group = {
+ .attrs = common_attrs,
+};
+
+static __refdata struct attribute *per_cpu_attrs[] = {
+ &ulim_attr.attr,
+ &dcnt_attr.attr,
+ &enable_attr.attr,
+ &cntr_attr.attr,
+ NULL,
+};
+static __refdata struct attribute_group per_cpu_attr_group = {
+ .attrs = per_cpu_attrs,
+};
+
+static int create_mct_sysfs(void)
+{
+ int i;
+ unsigned int cpu_index = 0;
+ int ret = 0;
+
+ mct_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!mct_kobj) {
+ pr_err("%s: Unable to find kobject for MCT\n",
+ __func__);
+ ret = -ENODEV;
+ goto create_sysfs_exit;
+ }
+
+ ret = sysfs_create_group(mct_kobj, &common_attr_group);
+ if (ret) {
+ pr_err("%s: cannot create attr group. err:%d\n",
+ __func__, ret);
+ goto create_sysfs_exit;
+ }
+ for_each_possible_cpu(cpu_index) {
+ char cpu_node[10] = "";
+
+ snprintf(cpu_node, sizeof(cpu_node), "cpu%u", cpu_index);
+ per_cpu(gmct, cpu_index)->kobj =
+ kobject_create_and_add(cpu_node, mct_kobj);
+ if (!per_cpu(gmct, cpu_index)->kobj) {
+ pr_err("%s: cannot create kobject [%s]\n",
+ __func__, cpu_node);
+ ret = -ENOMEM;
+ goto sysfs_cleanup;
+ }
+
+ ret = sysfs_create_group(per_cpu(gmct,
+ cpu_index)->kobj, &per_cpu_attr_group);
+ if (ret) {
+ pr_err("%s: cannot create per cpu attr group. err:%d\n",
+ __func__, ret);
+ goto sysfs_cleanup;
+ }
+ }
+ goto create_sysfs_exit;
+
+sysfs_cleanup:
+ for (i = 0; i <= cpu_index; i++) {
+ if (per_cpu(gmct, i)->kobj) {
+ kobject_del(per_cpu(gmct, i)->kobj);
+ per_cpu(gmct, i)->kobj = NULL;
+ }
+ }
+ kobject_del(mct_kobj);
+create_sysfs_exit:
+ return ret;
+}
+
+static void remove_mct_sysfs(void)
+{
+ unsigned int cpu_index = 0;
+
+ for_each_possible_cpu(cpu_index) {
+ if (per_cpu(gmct, cpu_index)->kobj)
+ sysfs_remove_group(
+ per_cpu(gmct, cpu_index)->kobj,
+ &per_cpu_attr_group);
+ }
+
+ if (mct_kobj)
+ sysfs_remove_group(mct_kobj, &common_attr_group);
+
+ return;
+}
+
+/*
+ * Use the cpu notifier to reconfig MCT for the online CPU
+ * when necessary.
+ */
+static int mct_cpu_up_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ /* Restore MCT register values after hotplug */
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_STARTING:
+ mct_apply_cpu_register(NULL);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mct_cpu_notifier = {
+ .notifier_call = mct_cpu_up_callback,
+};
+
+static int mct_cpu_pm_notifier(struct notifier_block *nb,
+ unsigned long cmd, void *v)
+{
+ /* Restore MCT register values after Idle power collapse */
+ switch (cmd) {
+ case CPU_PM_EXIT:
+ mct_apply_cpu_register(NULL);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mct_cpupm_nb = {
+ .notifier_call = mct_cpu_pm_notifier,
+};
+
+static void mct_cpu_suspend_resume(void)
+{
+ /* Restore MCT register value for cpu0 after suspend */
+ mct_apply_cpu_register(NULL);
+}
+
+static struct syscore_ops mct_suspend_ops = {
+ .resume = mct_cpu_suspend_resume,
+};
+
+static int mct_init_context(struct device *dev)
+{
+ unsigned int cpu_index = 0, ret = 0;
+ struct mct_context *mct;
+
+ for_each_possible_cpu(cpu_index) {
+ mct = devm_kzalloc(dev, sizeof(struct mct_context), GFP_KERNEL);
+ if (!mct) {
+ pr_err("%s: Cannot allocate mct_context\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ mct->mct_enabled = mct_boot_enable;
+ mct->mct_ulim = mct_ulim;
+ mct->mct_dcnt = mct_dcnt;
+ mct->mct_cntr = 0;
+ strlcpy(mct->mct_type, MCT_TYPE_NAME, sizeof(mct->mct_type));
+ mct->mct_regulator = NULL;
+
+ per_cpu(gmct, cpu_index) = mct;
+ }
+
+ return ret;
+}
+
+static int probe_regulator(struct platform_device *pdev)
+{
+ int ret = 0, reg_cnt = 0, cpu_index = 0;
+ char *key = NULL;
+ struct device_node *node = pdev->dev.of_node;
+
+ key = "qcom,mct-regulators";
+ reg_cnt = of_property_count_strings(node, key);
+ if ((reg_cnt > num_possible_cpus()) || (reg_cnt < 1)) {
+ pr_err("Invalid regulator count:%d\n", reg_cnt);
+ ret = -EINVAL;
+ goto probe_exit;
+ }
+
+ for_each_possible_cpu(cpu_index) {
+ const char *reg_name;
+ struct mct_context *mct = per_cpu(gmct, cpu_index);
+
+ if (!mct || mct->mct_regulator)
+ continue;
+ if (cpu_index >= reg_cnt)
+ break;
+
+ ret = of_property_read_string_index(node, key, cpu_index,
+ &reg_name);
+ if (ret) {
+ pr_err("Error in read:%s index:%d err:%d\n",
+ key, cpu_index, ret);
+ continue;
+ }
+ if (!strlen(reg_name)) {
+ pr_err("No regulator specified for cpu:%d\n",
+ cpu_index);
+ continue;
+ }
+
+ mct->mct_regulator = devm_regulator_get(&pdev->dev, reg_name);
+ if (IS_ERR_OR_NULL(mct->mct_regulator)) {
+ ret = PTR_ERR(mct->mct_regulator);
+ mct->mct_regulator = NULL;
+ if (ret != -EPROBE_DEFER) {
+ pr_err("Failed to get regulator:%s err:%d\n",
+ reg_name, ret);
+ continue;
+ } else {
+ pr_err("probe deferred for regulator:%s\n",
+ reg_name);
+ break;
+ }
+ }
+ }
+
+probe_exit:
+ return ret;
+}
+
+static int probe_deferrable_property(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ ret = mct_init_context(&pdev->dev);
+ if (ret) {
+ pr_err("%s: Error initializing MCT variables. err:%d\n",
+ __func__, ret);
+ goto defer_exit;
+ }
+
+ ret = probe_regulator(pdev);
+ if (ret == -EPROBE_DEFER)
+ mct_deferred = true;
+ else
+ ret = 0;
+
+defer_exit:
+ return ret;
+}
+
+static int mct_probe(struct platform_device *pdev)
+{
+ int ret = 0, cpu_index = 0;
+ char *key = NULL;
+ struct device_node *node = pdev->dev.of_node;
+
+ if (mct_deferred)
+ goto deferred_entry;
+
+ key = "qcom,mct-wr-weight";
+ ret = of_property_read_u32(node, key, &mct_wr);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s:Failed reading node:%s, Key:%s. MCT continues.\n",
+ __func__, node->full_name, key);
+ mct_wr = MCT_DEFAULT_WR;
+ }
+
+ key = "qcom,mct-vxwr-weight";
+ ret = of_property_read_u32(node, key, &mct_vxwr);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s:Failed reading node:%s, Key:%s. MCT continues.\n",
+ __func__, node->full_name, key);
+ mct_vxwr = MCT_DEFAULT_VXWR;
+ }
+
+ key = "qcom,mct-vlswr-weight";
+ ret = of_property_read_u32(node, key, &mct_vlswr);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s:Failed reading node:%s, Key:%s. MCT continues.\n",
+ __func__, node->full_name, key);
+ mct_vlswr = MCT_DEFAULT_VLSWR;
+ }
+
+ key = "qcom,mct-vaw-energy";
+ ret = of_property_read_u32(node, key, &mct_vaw);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s:Failed reading node:%s, Key:%s. MCT continues.\n",
+ __func__, node->full_name, key);
+ mct_vaw = MCT_DEFAULT_VAW;
+ }
+
+ key = "qcom,mct-ulim";
+ ret = of_property_read_u32(node, key, &mct_ulim);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s:Failed reading node:%s, Key:%s. MCT continues.\n",
+ __func__, node->full_name, key);
+ mct_ulim = MCT_DEFAULT_ULIM;
+ }
+
+ key = "qcom,mct-dcnt";
+ ret = of_property_read_u32(node, key, &mct_dcnt);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s:Failed reading node:%s, Key:%s. MCT continues.\n",
+ __func__, node->full_name, key);
+ mct_dcnt = MCT_DEFAULT_DCNT;
+ }
+ mct_boot_enable = true;
+ mct_notify_pmic = false;
+
+deferred_entry:
+ /* probe_deferrable_property will not return any error other than
+ ** -EPROBE_DEFER and error during variable init. Regulator is an
+ ** optional property. So if the regulator property is not defined
+ ** or if the regulator init fails, this function ignores the error
+ ** and returns 0.
+ */
+ ret = probe_deferrable_property(pdev);
+ if (ret)
+ return ret;
+
+ ret = create_mct_sysfs();
+ if (ret) {
+ pr_err("%s: Cannot create mct sysfs. err:%d\n",
+ __func__, ret);
+ goto mct_probe_exit;
+ }
+ cpu_pm_register_notifier(&mct_cpupm_nb);
+ register_cpu_notifier(&mct_cpu_notifier);
+ register_syscore_ops(&mct_suspend_ops);
+ platform_set_drvdata(pdev, gmct);
+ for_each_possible_cpu(cpu_index) {
+ update_enable(cpu_index);
+ }
+
+mct_probe_exit:
+ if (ret) {
+ for_each_possible_cpu(cpu_index) {
+ per_cpu(gmct, cpu_index) = NULL;
+ }
+ }
+ return ret;
+}
+
+static int mct_remove(struct platform_device *pdev)
+{
+ unsigned int cpu_index = 0;
+
+ remove_mct_sysfs();
+ for_each_possible_cpu(cpu_index) {
+ per_cpu(gmct, cpu_index) = NULL;
+ }
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct of_device_id mct_of_match_table[] = {
+ {.compatible = "qcom,max-current-throttling"},
+ {},
+};
+static struct platform_driver mct_driver = {
+ .probe = mct_probe,
+ .remove = mct_remove,
+ .driver = {
+ .name = MCT_DEV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = mct_of_match_table,
+ },
+};
+
+static int __init mct_init(void)
+{
+ return platform_driver_register(&mct_driver);
+}
+
+static void __exit mct_exit(void)
+{
+ platform_driver_unregister(&mct_driver);
+}
+
+late_initcall(mct_init);
+module_exit(mct_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("maximum current throttling");
+MODULE_ALIAS("platform:" MCT_DEV_NAME);
diff --git a/drivers/soc/qcom/memory_dump.c b/drivers/soc/qcom/memory_dump.c
new file mode 100644
index 000000000000..b93a2e4a189d
--- /dev/null
+++ b/drivers/soc/qcom/memory_dump.c
@@ -0,0 +1,95 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <asm/cacheflush.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <soc/qcom/memory_dump.h>
+
+#define MSM_DUMP_TABLE_VERSION MSM_DUMP_MAKE_VERSION(1, 0)
+
+struct msm_dump_table {
+ u32 version;
+ u32 num_entries;
+ struct msm_client_dump client_entries[MAX_NUM_CLIENTS];
+};
+
+struct msm_memory_dump {
+ unsigned long dump_table_phys;
+ struct msm_dump_table *dump_table_ptr;
+};
+
+static struct msm_memory_dump mem_dump_data;
+
+uint32_t msm_dump_table_version(void)
+{
+ return MSM_DUMP_TABLE_VERSION;
+}
+EXPORT_SYMBOL(msm_dump_table_version);
+
+int msm_dump_tbl_register(struct msm_client_dump *client_entry)
+{
+ struct msm_client_dump *entry;
+ struct msm_dump_table *table = mem_dump_data.dump_table_ptr;
+
+ if (!table || table->num_entries >= MAX_NUM_CLIENTS)
+ return -EINVAL;
+ entry = &table->client_entries[table->num_entries];
+ entry->id = client_entry->id;
+ entry->start_addr = client_entry->start_addr;
+ entry->end_addr = client_entry->end_addr;
+ table->num_entries++;
+ /* flush cache */
+ dmac_flush_range(table, table + sizeof(struct msm_dump_table));
+ return 0;
+}
+EXPORT_SYMBOL(msm_dump_tbl_register);
+
+static int __init init_memory_dump(void)
+{
+ struct msm_dump_table *table;
+ struct device_node *np;
+ static void __iomem *imem_base;
+
+ np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-mem_dump_table");
+ if (!np) {
+ pr_err("unable to find DT imem dump table node\n");
+ return -ENODEV;
+ }
+ imem_base = of_iomap(np, 0);
+ if (!imem_base) {
+ pr_err("unable to map imem dump table offset\n");
+ return -ENOMEM;
+ }
+
+ mem_dump_data.dump_table_ptr = kzalloc(sizeof(struct msm_dump_table),
+ GFP_KERNEL);
+ if (!mem_dump_data.dump_table_ptr) {
+ iounmap(imem_base);
+ printk(KERN_ERR "unable to allocate memory for dump table\n");
+ return -ENOMEM;
+ }
+ table = mem_dump_data.dump_table_ptr;
+ table->version = MSM_DUMP_TABLE_VERSION;
+ mem_dump_data.dump_table_phys = virt_to_phys(table);
+ writel_relaxed(mem_dump_data.dump_table_phys, imem_base);
+ printk(KERN_INFO "MSM Memory Dump table set up\n");
+ iounmap(imem_base);
+
+ return 0;
+}
+
+early_initcall(init_memory_dump);
+
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
new file mode 100644
index 000000000000..dddb1a2d1925
--- /dev/null
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -0,0 +1,173 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <asm/cacheflush.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <soc/qcom/memory_dump.h>
+
+#define MSM_DUMP_TABLE_VERSION MSM_DUMP_MAKE_VERSION(2, 0)
+
+struct msm_dump_table {
+ uint32_t version;
+ uint32_t num_entries;
+ struct msm_dump_entry entries[MAX_NUM_ENTRIES];
+};
+
+struct msm_memory_dump {
+ uint64_t table_phys;
+ struct msm_dump_table *table;
+};
+
+static struct msm_memory_dump memdump;
+
+uint32_t msm_dump_table_version(void)
+{
+ return MSM_DUMP_TABLE_VERSION;
+}
+EXPORT_SYMBOL(msm_dump_table_version);
+
+static int msm_dump_table_register(struct msm_dump_entry *entry)
+{
+ struct msm_dump_entry *e;
+ struct msm_dump_table *table = memdump.table;
+
+ if (!table || table->num_entries >= MAX_NUM_ENTRIES)
+ return -EINVAL;
+
+ e = &table->entries[table->num_entries];
+ e->id = entry->id;
+ e->type = MSM_DUMP_TYPE_TABLE;
+ e->addr = entry->addr;
+ table->num_entries++;
+
+ dmac_flush_range(table, (void *)table + sizeof(struct msm_dump_table));
+ return 0;
+}
+
+static struct msm_dump_table *msm_dump_get_table(enum msm_dump_table_ids id)
+{
+ struct msm_dump_table *table = memdump.table;
+ int i;
+
+ if (!table) {
+ pr_err("mem dump base table does not exist\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ for (i = 0; i < MAX_NUM_ENTRIES; i++) {
+ if (table->entries[i].id == id)
+ break;
+ }
+ if (i == MAX_NUM_ENTRIES || !table->entries[i].addr) {
+ pr_err("mem dump base table entry %d invalid\n", id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Get the apps table pointer */
+ table = phys_to_virt(table->entries[i].addr);
+
+ return table;
+}
+
+int msm_dump_data_register(enum msm_dump_table_ids id,
+ struct msm_dump_entry *entry)
+{
+ struct msm_dump_entry *e;
+ struct msm_dump_table *table;
+
+ table = msm_dump_get_table(id);
+ if (IS_ERR(table))
+ return PTR_ERR(table);
+
+ if (!table || table->num_entries >= MAX_NUM_ENTRIES)
+ return -EINVAL;
+
+ e = &table->entries[table->num_entries];
+ e->id = entry->id;
+ e->type = MSM_DUMP_TYPE_DATA;
+ e->addr = entry->addr;
+ table->num_entries++;
+
+ dmac_flush_range(table, (void *)table + sizeof(struct msm_dump_table));
+ return 0;
+}
+EXPORT_SYMBOL(msm_dump_data_register);
+
+static int __init init_memory_dump(void)
+{
+ struct msm_dump_table *table;
+ struct msm_dump_entry entry;
+ struct device_node *np;
+ void __iomem *imem_base;
+ int ret;
+
+ np = of_find_compatible_node(NULL, NULL,
+ "qcom,msm-imem-mem_dump_table");
+ if (!np) {
+ pr_err("mem dump base table DT node does not exist\n");
+ return -ENODEV;
+ }
+
+ imem_base = of_iomap(np, 0);
+ if (!imem_base) {
+ pr_err("mem dump base table imem offset mapping failed\n");
+ return -ENOMEM;
+ }
+
+ memdump.table = kzalloc(sizeof(struct msm_dump_table), GFP_KERNEL);
+ if (!memdump.table) {
+ pr_err("mem dump base table allocation failed\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+ memdump.table->version = MSM_DUMP_TABLE_VERSION;
+ memdump.table_phys = virt_to_phys(memdump.table);
+ writel_relaxed(memdump.table_phys, imem_base);
+ /* Ensure write to imem_base is complete before unmapping */
+ mb();
+ pr_info("MSM Memory Dump base table set up\n");
+
+ iounmap(imem_base);
+
+ table = kzalloc(sizeof(struct msm_dump_table), GFP_KERNEL);
+ if (!table) {
+ pr_err("mem dump apps data table allocation failed\n");
+ ret = -ENOMEM;
+ goto err1;
+ }
+ table->version = MSM_DUMP_TABLE_VERSION;
+
+ entry.id = MSM_DUMP_TABLE_APPS;
+ entry.addr = virt_to_phys(table);
+ ret = msm_dump_table_register(&entry);
+ if (ret) {
+ pr_info("mem dump apps data table register failed\n");
+ goto err2;
+ }
+ pr_info("MSM Memory Dump apps data table set up\n");
+
+ return 0;
+err2:
+ kfree(table);
+err1:
+ kfree(memdump.table);
+ return ret;
+err0:
+ iounmap(imem_base);
+ return ret;
+}
+early_initcall(init_memory_dump);
diff --git a/drivers/soc/qcom/msm_rq_stats.c b/drivers/soc/qcom/msm_rq_stats.c
new file mode 100644
index 000000000000..3ff597d4de45
--- /dev/null
+++ b/drivers/soc/qcom/msm_rq_stats.c
@@ -0,0 +1,390 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm MSM Runqueue Stats and cpu utilization Interface for Userspace
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/cpu.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/rq_stats.h>
+#include <linux/cpufreq.h>
+#include <linux/kernel_stat.h>
+#include <linux/tick.h>
+#include <asm/smp_plat.h>
+#include <linux/suspend.h>
+
+#define MAX_LONG_SIZE 24
+#define DEFAULT_RQ_POLL_JIFFIES 1
+#define DEFAULT_DEF_TIMER_JIFFIES 5
+
+struct notifier_block freq_transition;
+struct notifier_block cpu_hotplug;
+
+struct cpu_load_data {
+ cputime64_t prev_cpu_idle;
+ cputime64_t prev_cpu_wall;
+ unsigned int avg_load_maxfreq;
+ unsigned int samples;
+ unsigned int window_size;
+ unsigned int cur_freq;
+ unsigned int policy_max;
+ cpumask_var_t related_cpus;
+ struct mutex cpu_load_mutex;
+};
+
+static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
+
+
+static int update_average_load(unsigned int freq, unsigned int cpu)
+{
+
+ struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
+ cputime64_t cur_wall_time, cur_idle_time;
+ unsigned int idle_time, wall_time;
+ unsigned int cur_load, load_at_max_freq;
+
+ cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time, 0);
+
+ wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
+ pcpu->prev_cpu_wall = cur_wall_time;
+
+ idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
+ pcpu->prev_cpu_idle = cur_idle_time;
+
+
+ if (unlikely(!wall_time || wall_time < idle_time))
+ return 0;
+
+ cur_load = 100 * (wall_time - idle_time) / wall_time;
+
+ /* Calculate the scaled load across CPU */
+ load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
+
+ if (!pcpu->avg_load_maxfreq) {
+ /* This is the first sample in this window*/
+ pcpu->avg_load_maxfreq = load_at_max_freq;
+ pcpu->window_size = wall_time;
+ } else {
+ /*
+ * The is already a sample available in this window.
+ * Compute weighted average with prev entry, so that we get
+ * the precise weighted load.
+ */
+ pcpu->avg_load_maxfreq =
+ ((pcpu->avg_load_maxfreq * pcpu->window_size) +
+ (load_at_max_freq * wall_time)) /
+ (wall_time + pcpu->window_size);
+
+ pcpu->window_size += wall_time;
+ }
+
+ return 0;
+}
+
+static unsigned int report_load_at_max_freq(void)
+{
+ int cpu;
+ struct cpu_load_data *pcpu;
+ unsigned int total_load = 0;
+
+ for_each_online_cpu(cpu) {
+ pcpu = &per_cpu(cpuload, cpu);
+ mutex_lock(&pcpu->cpu_load_mutex);
+ update_average_load(pcpu->cur_freq, cpu);
+ total_load += pcpu->avg_load_maxfreq;
+ pcpu->avg_load_maxfreq = 0;
+ mutex_unlock(&pcpu->cpu_load_mutex);
+ }
+ return total_load;
+}
+
+static int cpufreq_transition_handler(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freqs = data;
+ struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
+ int j;
+
+ switch (val) {
+ case CPUFREQ_POSTCHANGE:
+ for_each_cpu(j, this_cpu->related_cpus) {
+ struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
+ mutex_lock(&pcpu->cpu_load_mutex);
+ update_average_load(freqs->old, freqs->cpu);
+ pcpu->cur_freq = freqs->new;
+ mutex_unlock(&pcpu->cpu_load_mutex);
+ }
+ break;
+ }
+ return 0;
+}
+
+static void update_related_cpus(void)
+{
+ unsigned cpu;
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
+ struct cpufreq_policy cpu_policy;
+
+ cpufreq_get_policy(&cpu_policy, cpu);
+ cpumask_copy(this_cpu->related_cpus, cpu_policy.cpus);
+ }
+}
+static int cpu_hotplug_handler(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ unsigned int cpu = (unsigned long)data;
+ struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
+
+ switch (val) {
+ case CPU_ONLINE:
+ if (!this_cpu->cur_freq)
+ this_cpu->cur_freq = cpufreq_quick_get(cpu);
+ update_related_cpus();
+ case CPU_ONLINE_FROZEN:
+ this_cpu->avg_load_maxfreq = 0;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int system_suspend_handler(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ switch (val) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ case PM_POST_RESTORE:
+ rq_info.hotplug_disabled = 0;
+ break;
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ rq_info.hotplug_disabled = 1;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+
+static ssize_t hotplug_disable_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ unsigned int val = 0;
+ val = rq_info.hotplug_disabled;
+ return snprintf(buf, MAX_LONG_SIZE, "%d\n", val);
+}
+
+static struct kobj_attribute hotplug_disabled_attr = __ATTR_RO(hotplug_disable);
+
+static void def_work_fn(struct work_struct *work)
+{
+ int64_t diff;
+
+ diff = ktime_to_ns(ktime_get()) - rq_info.def_start_time;
+ do_div(diff, 1000 * 1000);
+ rq_info.def_interval = (unsigned int) diff;
+
+ /* Notify polling threads on change of value */
+ sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
+}
+
+static ssize_t run_queue_avg_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ unsigned int val = 0;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&rq_lock, flags);
+ /* rq avg currently available only on one core */
+ val = rq_info.rq_avg;
+ rq_info.rq_avg = 0;
+ spin_unlock_irqrestore(&rq_lock, flags);
+
+ return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
+}
+
+static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
+
+static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int ret = 0;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&rq_lock, flags);
+ ret = snprintf(buf, MAX_LONG_SIZE, "%u\n",
+ jiffies_to_msecs(rq_info.rq_poll_jiffies));
+ spin_unlock_irqrestore(&rq_lock, flags);
+
+ return ret;
+}
+
+static ssize_t store_run_queue_poll_ms(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int val = 0;
+ unsigned long flags = 0;
+ static DEFINE_MUTEX(lock_poll_ms);
+
+ mutex_lock(&lock_poll_ms);
+
+ spin_lock_irqsave(&rq_lock, flags);
+ sscanf(buf, "%u", &val);
+ rq_info.rq_poll_jiffies = msecs_to_jiffies(val);
+ spin_unlock_irqrestore(&rq_lock, flags);
+
+ mutex_unlock(&lock_poll_ms);
+
+ return count;
+}
+
+static struct kobj_attribute run_queue_poll_ms_attr =
+ __ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms,
+ store_run_queue_poll_ms);
+
+static ssize_t show_def_timer_ms(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, MAX_LONG_SIZE, "%u\n", rq_info.def_interval);
+}
+
+static ssize_t store_def_timer_ms(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int val = 0;
+
+ sscanf(buf, "%u", &val);
+ rq_info.def_timer_jiffies = msecs_to_jiffies(val);
+
+ rq_info.def_start_time = ktime_to_ns(ktime_get());
+ return count;
+}
+
+static struct kobj_attribute def_timer_ms_attr =
+ __ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms,
+ store_def_timer_ms);
+
+static ssize_t show_cpu_normalized_load(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq());
+}
+
+static struct kobj_attribute cpu_normalized_load_attr =
+ __ATTR(cpu_normalized_load, S_IWUSR | S_IRUSR, show_cpu_normalized_load,
+ NULL);
+
+static struct attribute *rq_attrs[] = {
+ &cpu_normalized_load_attr.attr,
+ &def_timer_ms_attr.attr,
+ &run_queue_avg_attr.attr,
+ &run_queue_poll_ms_attr.attr,
+ &hotplug_disabled_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rq_attr_group = {
+ .attrs = rq_attrs,
+};
+
+static int init_rq_attribs(void)
+{
+ int err;
+
+ rq_info.rq_avg = 0;
+ rq_info.attr_group = &rq_attr_group;
+
+ /* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
+ rq_info.kobj = kobject_create_and_add("rq-stats",
+ &get_cpu_device(0)->kobj);
+ if (!rq_info.kobj)
+ return -ENOMEM;
+
+ err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
+ if (err)
+ kobject_put(rq_info.kobj);
+ else
+ kobject_uevent(rq_info.kobj, KOBJ_ADD);
+
+ return err;
+}
+
+static int __init msm_rq_stats_init(void)
+{
+ int ret;
+ int i;
+ struct cpufreq_policy cpu_policy;
+
+#ifndef CONFIG_SMP
+ /* Bail out if this is not an SMP Target */
+ rq_info.init = 0;
+ return -ENOSYS;
+#endif
+
+ rq_wq = create_singlethread_workqueue("rq_stats");
+ BUG_ON(!rq_wq);
+ INIT_WORK(&rq_info.def_timer_work, def_work_fn);
+ spin_lock_init(&rq_lock);
+ rq_info.rq_poll_jiffies = DEFAULT_RQ_POLL_JIFFIES;
+ rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
+ rq_info.rq_poll_last_jiffy = 0;
+ rq_info.def_timer_last_jiffy = 0;
+ rq_info.hotplug_disabled = 0;
+ ret = init_rq_attribs();
+
+ rq_info.init = 1;
+
+ for_each_possible_cpu(i) {
+ struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
+ mutex_init(&pcpu->cpu_load_mutex);
+ cpufreq_get_policy(&cpu_policy, i);
+ pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
+ if (cpu_online(i))
+ pcpu->cur_freq = cpufreq_quick_get(i);
+ cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
+ }
+ freq_transition.notifier_call = cpufreq_transition_handler;
+ cpu_hotplug.notifier_call = cpu_hotplug_handler;
+ cpufreq_register_notifier(&freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ register_hotcpu_notifier(&cpu_hotplug);
+
+ return ret;
+}
+late_initcall(msm_rq_stats_init);
+
+static int __init msm_rq_stats_early_init(void)
+{
+#ifndef CONFIG_SMP
+ /* Bail out if this is not an SMP Target */
+ rq_info.init = 0;
+ return -ENOSYS;
+#endif
+
+ pm_notifier(system_suspend_handler, 0);
+ return 0;
+}
+core_initcall(msm_rq_stats_early_init);
diff --git a/drivers/soc/qcom/nohlt.c b/drivers/soc/qcom/nohlt.c
new file mode 100644
index 000000000000..33585e9218a1
--- /dev/null
+++ b/drivers/soc/qcom/nohlt.c
@@ -0,0 +1,49 @@
+/* Copyright (c) 2009, 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * MSM architecture driver to control arm halt behavior
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/cpu.h>
+
+static int set_nohalt(void *data, u64 val)
+{
+ if (val)
+ cpu_idle_poll_ctrl(true);
+ else
+ cpu_idle_poll_ctrl(false);
+ return 0;
+}
+
+extern int cpu_idle_force_poll;
+
+static int get_nohalt(void *data, u64 *val)
+{
+ *val = (unsigned int)cpu_idle_force_poll;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(nohalt_ops, get_nohalt, set_nohalt, "%llu\n");
+
+static int __init init_hlt_debug(void)
+{
+ debugfs_create_file("nohlt", 0600, NULL, NULL, &nohalt_ops);
+
+ return 0;
+}
+
+late_initcall(init_hlt_debug);
diff --git a/drivers/soc/qcom/rpm-smd-debug.c b/drivers/soc/qcom/rpm-smd-debug.c
new file mode 100644
index 000000000000..c08668149636
--- /dev/null
+++ b/drivers/soc/qcom/rpm-smd-debug.c
@@ -0,0 +1,141 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "rpm-smd-debug: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <soc/qcom/rpm-smd.h>
+
+#define MAX_MSG_BUFFER 350
+#define MAX_KEY_VALUE_PAIRS 20
+
+static struct dentry *rpm_debugfs_dir;
+
+static u32 string_to_uint(const u8 *str)
+{
+ int i, len;
+ u32 output = 0;
+
+ len = strnlen(str, sizeof(u32));
+ for (i = 0; i < len; i++)
+ output |= str[i] << (i * 8);
+
+ return output;
+}
+
+static ssize_t rsc_ops_write(struct file *fp, const char __user *user_buffer,
+ size_t count, loff_t *position)
+{
+ char buf[MAX_MSG_BUFFER], rsc_type_str[6] = {}, rpm_set[8] = {},
+ key_str[6] = {};
+ int i, pos, set = -1, nelems;
+ char *cmp;
+ uint32_t rsc_type, rsc_id, key, data;
+ struct msm_rpm_request *req;
+
+ count = min(count, sizeof(buf) - 1);
+ if (copy_from_user(&buf, user_buffer, count))
+ return -EFAULT;
+ buf[count] = '\0';
+ cmp = strstrip(buf);
+
+ sscanf(cmp, "%7s %5s %u %d %n", rpm_set, rsc_type_str, &rsc_id,
+ &nelems, &pos);
+ if (strlen(rpm_set) > 6 || strlen(rsc_type_str) > 4) {
+ pr_err("Invalid value of set or resource type\n");
+ goto err;
+ }
+
+ if (!strcmp(rpm_set, "active"))
+ set = 0;
+ else if (!strcmp(rpm_set, "sleep"))
+ set = 1;
+
+ rsc_type = string_to_uint(rsc_type_str);
+
+ if (set < 0 || nelems < 0) {
+ pr_err("Invalid value of set or nelems\n");
+ goto err;
+ }
+ if (nelems > MAX_KEY_VALUE_PAIRS) {
+ pr_err("Exceeded max no of key-value entries\n");
+ goto err;
+ }
+
+ req = msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
+ if (!req)
+ return -ENOMEM;
+
+ for (i = 0; i < nelems; i++) {
+ cmp += pos;
+ sscanf(cmp, "%5s %n", key_str, &pos);
+ if (strlen(key_str) > 4) {
+ pr_err("Key value cannot be more than 4 charecters");
+ goto err;
+ }
+ key = string_to_uint(key_str);
+ if (!key) {
+ pr_err("Key values entered incorrectly\n");
+ goto err;
+ }
+
+ cmp += pos;
+ sscanf(cmp, "%u %n", &data, &pos);
+ if (msm_rpm_add_kvp_data(req, key,
+ (void *)&data, sizeof(data)))
+ goto err_request;
+ }
+
+ if (msm_rpm_wait_for_ack(msm_rpm_send_request(req)))
+ pr_err("Sending the RPM message failed\n");
+ else
+ pr_info("RPM message sent succesfully\n");
+
+err_request:
+ msm_rpm_free_request(req);
+err:
+ return count;
+}
+
+static const struct file_operations rsc_ops = {
+ .write = rsc_ops_write,
+};
+
+static int __init rpm_smd_debugfs_init(void)
+{
+ rpm_debugfs_dir = debugfs_create_dir("rpm_send_msg", NULL);
+ if (!rpm_debugfs_dir)
+ return -ENOMEM;
+
+ if (!debugfs_create_file("message", S_IWUSR, rpm_debugfs_dir, NULL,
+ &rsc_ops))
+ return -ENOMEM;
+
+ return 0;
+}
+late_initcall(rpm_smd_debugfs_init);
+
+static void __exit rpm_smd_debugfs_exit(void)
+{
+ debugfs_remove_recursive(rpm_debugfs_dir);
+}
+module_exit(rpm_smd_debugfs_exit);
+
+MODULE_DESCRIPTION("RPM SMD Debug Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/rpm-smd.c b/drivers/soc/qcom/rpm-smd.c
new file mode 100644
index 000000000000..5db15cde9a17
--- /dev/null
+++ b/drivers/soc/qcom/rpm-smd.c
@@ -0,0 +1,1433 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/rbtree.h>
+#include <linux/err.h>
+
+#include <soc/qcom/rpm-notifier.h>
+#include <soc/qcom/rpm-smd.h>
+#include <soc/qcom/smd.h>
+#define CREATE_TRACE_POINTS
+
+/* Debug Definitions */
+enum {
+ MSM_RPM_LOG_REQUEST_PRETTY = BIT(0),
+ MSM_RPM_LOG_REQUEST_RAW = BIT(1),
+ MSM_RPM_LOG_REQUEST_SHOW_MSG_ID = BIT(2),
+};
+
+static int msm_rpm_debug_mask;
+module_param_named(
+ debug_mask, msm_rpm_debug_mask, int, S_IRUGO | S_IWUSR
+);
+
+struct msm_rpm_driver_data {
+ const char *ch_name;
+ uint32_t ch_type;
+ smd_channel_t *ch_info;
+ struct work_struct work;
+ spinlock_t smd_lock_write;
+ spinlock_t smd_lock_read;
+ struct completion smd_open;
+};
+
+#define DEFAULT_BUFFER_SIZE 256
+#define DEBUG_PRINT_BUFFER_SIZE 512
+#define MAX_SLEEP_BUFFER 128
+#define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_KERNEL)
+#define INV_RSC "resource does not exist"
+#define ERR "err\0"
+#define MAX_ERR_BUFFER_SIZE 128
+#define MAX_WAIT_ON_ACK 24
+#define INIT_ERROR 1
+
+static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier);
+static bool standalone;
+static int probe_status = -EPROBE_DEFER;
+
+int msm_rpm_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb);
+}
+
+int msm_rpm_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb);
+}
+
+static struct workqueue_struct *msm_rpm_smd_wq;
+
+enum {
+ MSM_RPM_MSG_REQUEST_TYPE = 0,
+ MSM_RPM_MSG_TYPE_NR,
+};
+
+static const uint32_t msm_rpm_request_service[MSM_RPM_MSG_TYPE_NR] = {
+ 0x716572, /* 'req\0' */
+};
+
+/*the order of fields matter and reflect the order expected by the RPM*/
+struct rpm_request_header {
+ uint32_t service_type;
+ uint32_t request_len;
+};
+
+struct rpm_message_header {
+ uint32_t msg_id;
+ enum msm_rpm_set set;
+ uint32_t resource_type;
+ uint32_t resource_id;
+ uint32_t data_len;
+};
+
+struct kvp {
+ unsigned int k;
+ unsigned int s;
+};
+
+struct msm_rpm_kvp_data {
+ uint32_t key;
+ uint32_t nbytes; /* number of bytes */
+ uint8_t *value;
+ bool valid;
+};
+
+struct slp_buf {
+ struct rb_node node;
+ char ubuf[MAX_SLEEP_BUFFER];
+ char *buf;
+ bool valid;
+};
+static struct rb_root tr_root = RB_ROOT;
+static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq);
+static uint32_t msm_rpm_get_next_msg_id(void);
+
+static inline unsigned int get_rsc_type(char *buf)
+{
+ struct rpm_message_header *h;
+ h = (struct rpm_message_header *)
+ (buf + sizeof(struct rpm_request_header));
+ return h->resource_type;
+}
+
+static inline unsigned int get_rsc_id(char *buf)
+{
+ struct rpm_message_header *h;
+ h = (struct rpm_message_header *)
+ (buf + sizeof(struct rpm_request_header));
+ return h->resource_id;
+}
+
+#define get_data_len(buf) \
+ (((struct rpm_message_header *) \
+ (buf + sizeof(struct rpm_request_header)))->data_len)
+
+#define get_req_len(buf) \
+ (((struct rpm_request_header *)(buf))->request_len)
+
+#define get_msg_id(buf) \
+ (((struct rpm_message_header *) \
+ (buf + sizeof(struct rpm_request_header)))->msg_id)
+
+
+static inline int get_buf_len(char *buf)
+{
+ return get_req_len(buf) + sizeof(struct rpm_request_header);
+}
+
+static inline struct kvp *get_first_kvp(char *buf)
+{
+ return (struct kvp *)(buf + sizeof(struct rpm_request_header)
+ + sizeof(struct rpm_message_header));
+}
+
+static inline struct kvp *get_next_kvp(struct kvp *k)
+{
+ return (struct kvp *)((void *)k + sizeof(*k) + k->s);
+}
+
+static inline void *get_data(struct kvp *k)
+{
+ return (void *)k + sizeof(*k);
+}
+
+
+static void delete_kvp(char *msg, struct kvp *d)
+{
+ struct kvp *n;
+ int dec;
+ uint32_t size;
+
+ n = get_next_kvp(d);
+ dec = (void *)n - (void *)d;
+ size = get_data_len(msg) - ((void *)n - (void *)get_first_kvp(msg));
+
+ memcpy((void *)d, (void *)n, size);
+
+ get_data_len(msg) -= dec;
+ get_req_len(msg) -= dec;
+}
+
+static inline void update_kvp_data(struct kvp *dest, struct kvp *src)
+{
+ memcpy(get_data(dest), get_data(src), src->s);
+}
+
+static void add_kvp(char *buf, struct kvp *n)
+{
+ uint32_t inc = sizeof(*n) + n->s;
+ BUG_ON((get_req_len(buf) + inc) > MAX_SLEEP_BUFFER);
+
+ memcpy(buf + get_buf_len(buf), n, inc);
+
+ get_data_len(buf) += inc;
+ get_req_len(buf) += inc;
+}
+
+static struct slp_buf *tr_search(struct rb_root *root, char *slp)
+{
+ unsigned int type = get_rsc_type(slp);
+ unsigned int id = get_rsc_id(slp);
+
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct slp_buf *cur = rb_entry(node, struct slp_buf, node);
+ unsigned int ctype = get_rsc_type(cur->buf);
+ unsigned int cid = get_rsc_id(cur->buf);
+
+ if (type < ctype)
+ node = node->rb_left;
+ else if (type > ctype)
+ node = node->rb_right;
+ else if (id < cid)
+ node = node->rb_left;
+ else if (id > cid)
+ node = node->rb_right;
+ else
+ return cur;
+ }
+ return NULL;
+}
+
+static int tr_insert(struct rb_root *root, struct slp_buf *slp)
+{
+ unsigned int type = get_rsc_type(slp->buf);
+ unsigned int id = get_rsc_id(slp->buf);
+
+ struct rb_node **node = &(root->rb_node), *parent = NULL;
+
+ while (*node) {
+ struct slp_buf *curr = rb_entry(*node, struct slp_buf, node);
+ unsigned int ctype = get_rsc_type(curr->buf);
+ unsigned int cid = get_rsc_id(curr->buf);
+
+ parent = *node;
+
+ if (type < ctype)
+ node = &((*node)->rb_left);
+ else if (type > ctype)
+ node = &((*node)->rb_right);
+ else if (id < cid)
+ node = &((*node)->rb_left);
+ else if (id > cid)
+ node = &((*node)->rb_right);
+ else
+ return -EINVAL;
+ }
+
+ rb_link_node(&slp->node, parent, node);
+ rb_insert_color(&slp->node, root);
+ slp->valid = true;
+ return 0;
+}
+
+#define for_each_kvp(buf, k) \
+ for (k = (struct kvp *)get_first_kvp(buf); \
+ ((void *)k - (void *)get_first_kvp(buf)) < get_data_len(buf);\
+ k = get_next_kvp(k))
+
+
+static void tr_update(struct slp_buf *s, char *buf)
+{
+ struct kvp *e, *n;
+
+ for_each_kvp(buf, n) {
+ bool found = false;
+ for_each_kvp(s->buf, e) {
+ if (n->k == e->k) {
+ found = true;
+ if (n->s == e->s) {
+ void *e_data = get_data(e);
+ void *n_data = get_data(n);
+ if (memcmp(e_data, n_data, n->s)) {
+ update_kvp_data(e, n);
+ s->valid = true;
+ }
+ } else {
+ delete_kvp(s->buf, e);
+ add_kvp(s->buf, n);
+ s->valid = true;
+ }
+ break;
+ }
+
+ }
+ if (!found) {
+ add_kvp(s->buf, n);
+ s->valid = true;
+ }
+ }
+}
+
+int msm_rpm_smd_buffer_request(char *buf, uint32_t size, gfp_t flag)
+{
+ struct slp_buf *slp;
+ static DEFINE_SPINLOCK(slp_buffer_lock);
+ unsigned long flags;
+
+ if (size > MAX_SLEEP_BUFFER)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&slp_buffer_lock, flags);
+ slp = tr_search(&tr_root, buf);
+
+ if (!slp) {
+ slp = kzalloc(sizeof(struct slp_buf), GFP_ATOMIC);
+ if (!slp) {
+ spin_unlock_irqrestore(&slp_buffer_lock, flags);
+ return -ENOMEM;
+ }
+ slp->buf = PTR_ALIGN(&slp->ubuf[0], sizeof(u32));
+ memcpy(slp->buf, buf, size);
+ if (tr_insert(&tr_root, slp))
+ pr_err("%s(): Error updating sleep request\n",
+ __func__);
+ } else {
+ /* handle unsent requests */
+ tr_update(slp, buf);
+ }
+
+ spin_unlock_irqrestore(&slp_buffer_lock, flags);
+
+ return 0;
+}
+static void msm_rpm_print_sleep_buffer(struct slp_buf *s)
+{
+ char buf[DEBUG_PRINT_BUFFER_SIZE] = {0};
+ int pos;
+ int buflen = DEBUG_PRINT_BUFFER_SIZE;
+ char ch[5] = {0};
+ u32 type;
+ struct kvp *e;
+
+ if (!s)
+ return;
+
+ if (!s->valid)
+ return;
+
+ type = get_rsc_type(s->buf);
+ memcpy(ch, &type, sizeof(u32));
+
+ pos = scnprintf(buf, buflen,
+ "Sleep request type = 0x%08x(%s)",
+ get_rsc_type(s->buf), ch);
+ pos += scnprintf(buf + pos, buflen - pos, " id = 0%x",
+ get_rsc_id(s->buf));
+ for_each_kvp(s->buf, e) {
+ uint32_t i;
+ char *data = get_data(e);
+
+ memcpy(ch, &e->k, sizeof(u32));
+
+ pos += scnprintf(buf + pos, buflen - pos,
+ "\n\t\tkey = 0x%08x(%s)",
+ e->k, ch);
+ pos += scnprintf(buf + pos, buflen - pos,
+ " sz= %d data =", e->s);
+
+ for (i = 0; i < e->s; i++)
+ pos += scnprintf(buf + pos, buflen - pos,
+ " 0x%02X", data[i]);
+ }
+ pos += scnprintf(buf + pos, buflen - pos, "\n");
+ printk(buf);
+}
+
+static struct msm_rpm_driver_data msm_rpm_data = {
+ .smd_open = COMPLETION_INITIALIZER(msm_rpm_data.smd_open),
+};
+
+static int msm_rpm_flush_requests(bool print)
+{
+ struct rb_node *t;
+ int ret;
+ int pkt_sz;
+ char buf[MAX_ERR_BUFFER_SIZE] = {0};
+ int count = 0;
+
+ for (t = rb_first(&tr_root); t; t = rb_next(t)) {
+
+ struct slp_buf *s = rb_entry(t, struct slp_buf, node);
+
+ if (!s->valid)
+ continue;
+
+ if (print)
+ msm_rpm_print_sleep_buffer(s);
+
+ get_msg_id(s->buf) = msm_rpm_get_next_msg_id();
+ ret = msm_rpm_send_smd_buffer(s->buf,
+ get_buf_len(s->buf), true);
+
+ /*
+ * RPM acks need to be handled here if we have sent over
+ * 24 messages such that we do not overrun SMD buffer. Since
+ * we expect only sleep sets at this point (RPM PC would be
+ * disallowed if we had pending active requests), we need not
+ * process these sleep set acks.
+ */
+ count++;
+ if (count > MAX_WAIT_ON_ACK) {
+ int len;
+ pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
+ if (pkt_sz)
+ len = smd_read(msm_rpm_data.ch_info, buf,
+ pkt_sz);
+ count--;
+ }
+
+ WARN_ON(ret != get_buf_len(s->buf));
+
+ s->valid = false;
+ }
+ return 0;
+}
+
+
+static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
+
+struct msm_rpm_request {
+ struct rpm_request_header req_hdr;
+ struct rpm_message_header msg_hdr;
+ struct msm_rpm_kvp_data *kvp;
+ uint32_t num_elements;
+ uint32_t write_idx;
+ uint8_t *buf;
+ uint32_t numbytes;
+};
+
+/*
+ * Data related to message acknowledgement
+ */
+
+LIST_HEAD(msm_rpm_wait_list);
+
+struct msm_rpm_wait_data {
+ struct list_head list;
+ uint32_t msg_id;
+ bool ack_recd;
+ int errno;
+ struct completion ack;
+};
+DEFINE_SPINLOCK(msm_rpm_list_lock);
+
+struct msm_rpm_ack_msg {
+ uint32_t req;
+ uint32_t req_len;
+ uint32_t rsc_id;
+ uint32_t msg_len;
+ uint32_t id_ack;
+};
+
+LIST_HEAD(msm_rpm_ack_list);
+
+static DECLARE_COMPLETION(data_ready);
+
+static void msm_rpm_notify_sleep_chain(struct rpm_message_header *hdr,
+ struct msm_rpm_kvp_data *kvp)
+{
+ struct msm_rpm_notifier_data notif;
+
+ notif.rsc_type = hdr->resource_type;
+ notif.rsc_id = hdr->resource_id;
+ notif.key = kvp->key;
+ notif.size = kvp->nbytes;
+ notif.value = kvp->value;
+ atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, &notif);
+}
+
+static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size, bool noirq)
+{
+ uint32_t i;
+ uint32_t data_size, msg_size;
+
+ if (probe_status)
+ return probe_status;
+
+ if (!handle) {
+ pr_err("%s(): Invalid handle\n", __func__);
+ return -EINVAL;
+ }
+
+ if (size < 0)
+ return -EINVAL;
+
+ data_size = ALIGN(size, SZ_4);
+ msg_size = data_size + sizeof(struct rpm_request_header);
+
+ for (i = 0; i < handle->write_idx; i++) {
+ if (handle->kvp[i].key != key)
+ continue;
+ if (handle->kvp[i].nbytes != data_size) {
+ kfree(handle->kvp[i].value);
+ handle->kvp[i].value = NULL;
+ } else {
+ if (!memcmp(handle->kvp[i].value, data, data_size))
+ return 0;
+ }
+ break;
+ }
+
+ if (i >= handle->num_elements) {
+ pr_err("%s(): Number of resources exceeds max allocated\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (i == handle->write_idx)
+ handle->write_idx++;
+
+ if (!handle->kvp[i].value) {
+ handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq));
+
+ if (!handle->kvp[i].value) {
+ pr_err("%s(): Failed malloc\n", __func__);
+ return -ENOMEM;
+ }
+ } else {
+ /* We enter the else case, if a key already exists but the
+ * data doesn't match. In which case, we should zero the data
+ * out.
+ */
+ memset(handle->kvp[i].value, 0, data_size);
+ }
+
+ if (!handle->kvp[i].valid)
+ handle->msg_hdr.data_len += msg_size;
+ else
+ handle->msg_hdr.data_len += (data_size - handle->kvp[i].nbytes);
+
+ handle->kvp[i].nbytes = data_size;
+ handle->kvp[i].key = key;
+ memcpy(handle->kvp[i].value, data, size);
+ handle->kvp[i].valid = true;
+
+ return 0;
+
+}
+
+static struct msm_rpm_request *msm_rpm_create_request_common(
+ enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
+ int num_elements, bool noirq)
+{
+ struct msm_rpm_request *cdata;
+
+ if (probe_status)
+ return ERR_PTR(probe_status);
+
+ cdata = kzalloc(sizeof(struct msm_rpm_request),
+ GFP_FLAG(noirq));
+
+ if (!cdata) {
+ pr_err("%s():Cannot allocate memory for client data\n",
+ __func__);
+ goto cdata_alloc_fail;
+ }
+
+ cdata->msg_hdr.set = set;
+ cdata->msg_hdr.resource_type = rsc_type;
+ cdata->msg_hdr.resource_id = rsc_id;
+ cdata->msg_hdr.data_len = 0;
+
+ cdata->num_elements = num_elements;
+ cdata->write_idx = 0;
+
+ cdata->kvp = kzalloc(sizeof(struct msm_rpm_kvp_data) * num_elements,
+ GFP_FLAG(noirq));
+
+ if (!cdata->kvp) {
+ pr_warn("%s(): Cannot allocate memory for key value data\n",
+ __func__);
+ goto kvp_alloc_fail;
+ }
+
+ cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq));
+
+ if (!cdata->buf)
+ goto buf_alloc_fail;
+
+ cdata->numbytes = DEFAULT_BUFFER_SIZE;
+ return cdata;
+
+buf_alloc_fail:
+ kfree(cdata->kvp);
+kvp_alloc_fail:
+ kfree(cdata);
+cdata_alloc_fail:
+ return NULL;
+
+}
+
+void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+ int i;
+
+ if (!handle)
+ return;
+ for (i = 0; i < handle->num_elements; i++)
+ kfree(handle->kvp[i].value);
+ kfree(handle->kvp);
+ kfree(handle->buf);
+ kfree(handle);
+}
+EXPORT_SYMBOL(msm_rpm_free_request);
+
+struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+ num_elements, false);
+}
+EXPORT_SYMBOL(msm_rpm_create_request);
+
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+ num_elements, true);
+}
+EXPORT_SYMBOL(msm_rpm_create_request_noirq);
+
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size)
+{
+ return msm_rpm_add_kvp_data_common(handle, key, data, size, false);
+
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data);
+
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size)
+{
+ return msm_rpm_add_kvp_data_common(handle, key, data, size, true);
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
+
+/* Runs in interrupt context */
+static void msm_rpm_notify(void *data, unsigned event)
+{
+ struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data;
+ BUG_ON(!pdata);
+
+ if (!(pdata->ch_info))
+ return;
+
+ switch (event) {
+ case SMD_EVENT_DATA:
+ complete(&data_ready);
+ break;
+ case SMD_EVENT_OPEN:
+ complete(&pdata->smd_open);
+ break;
+ case SMD_EVENT_CLOSE:
+ case SMD_EVENT_STATUS:
+ case SMD_EVENT_REOPEN_READY:
+ break;
+ default:
+ pr_info("Unknown SMD event\n");
+
+ }
+}
+
+bool msm_rpm_waiting_for_ack(void)
+{
+ bool ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+ ret = list_empty(&msm_rpm_wait_list);
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+
+ return !ret;
+}
+
+static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
+{
+ struct list_head *ptr;
+ struct msm_rpm_wait_data *elem = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+ list_for_each(ptr, &msm_rpm_wait_list) {
+ elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+ if (elem && (elem->msg_id == msg_id))
+ break;
+ elem = NULL;
+ }
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+ return elem;
+}
+
+static uint32_t msm_rpm_get_next_msg_id(void)
+{
+ uint32_t id;
+
+ /*
+ * A message id of 0 is used by the driver to indicate a error
+ * condition. The RPM driver uses a id of 1 to indicate unsent data
+ * when the data sent over hasn't been modified. This isn't a error
+ * scenario and wait for ack returns a success when the message id is 1.
+ */
+
+ do {
+ id = atomic_inc_return(&msm_rpm_msg_id);
+ } while ((id == 0) || (id == 1) || msm_rpm_get_entry_from_msg_id(id));
+
+ return id;
+}
+
+static int msm_rpm_add_wait_list(uint32_t msg_id)
+{
+ unsigned long flags;
+ struct msm_rpm_wait_data *data =
+ kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
+
+ if (!data)
+ return -ENOMEM;
+
+ init_completion(&data->ack);
+ data->ack_recd = false;
+ data->msg_id = msg_id;
+ data->errno = INIT_ERROR;
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+ list_add(&data->list, &msm_rpm_wait_list);
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+
+ return 0;
+}
+
+static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+ list_del(&elem->list);
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+ kfree(elem);
+}
+
+static void msm_rpm_process_ack(uint32_t msg_id, int errno)
+{
+ struct list_head *ptr;
+ struct msm_rpm_wait_data *elem = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+ list_for_each(ptr, &msm_rpm_wait_list) {
+ elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+ if (elem && (elem->msg_id == msg_id)) {
+ elem->errno = errno;
+ elem->ack_recd = true;
+ complete(&elem->ack);
+ break;
+ }
+ elem = NULL;
+ }
+ /* Special case where the sleep driver doesn't
+ * wait for ACKs. This would decrease the latency involved with
+ * entering RPM assisted power collapse.
+ */
+
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+}
+
+struct msm_rpm_kvp_packet {
+ uint32_t id;
+ uint32_t len;
+ uint32_t val;
+};
+
+static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
+{
+ return ((struct msm_rpm_ack_msg *)buf)->id_ack;
+}
+
+static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
+{
+ uint8_t *tmp;
+ uint32_t req_len = ((struct msm_rpm_ack_msg *)buf)->req_len;
+
+ int rc = -ENODEV;
+
+ req_len -= sizeof(struct msm_rpm_ack_msg);
+ req_len += 2 * sizeof(uint32_t);
+ if (!req_len)
+ return 0;
+
+ tmp = buf + sizeof(struct msm_rpm_ack_msg);
+
+ BUG_ON(memcmp(tmp, ERR, sizeof(uint32_t)));
+
+ tmp += 2 * sizeof(uint32_t);
+
+ if (!(memcmp(tmp, INV_RSC, min_t(uint32_t, req_len,
+ sizeof(INV_RSC))-1))) {
+ pr_err("%s(): RPM NACK Unsupported resource\n", __func__);
+ rc = -EINVAL;
+ } else {
+ pr_err("%s(): RPM NACK Invalid header\n", __func__);
+ }
+
+ return rc;
+}
+
+static int msm_rpm_read_smd_data(char *buf)
+{
+ uint32_t pkt_sz;
+ int bytes_read = 0;
+
+ pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
+
+ if (!pkt_sz)
+ return -EAGAIN;
+
+ BUG_ON(pkt_sz > MAX_ERR_BUFFER_SIZE);
+
+ if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info))
+ return -EAGAIN;
+
+ do {
+ int len;
+
+ len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz);
+ pkt_sz -= len;
+ bytes_read += len;
+
+ } while (pkt_sz > 0);
+
+ BUG_ON(pkt_sz < 0);
+
+ return 0;
+}
+
+static void msm_rpm_smd_work(struct work_struct *work)
+{
+ uint32_t msg_id;
+ int errno;
+ char buf[MAX_ERR_BUFFER_SIZE] = {0};
+
+ while (1) {
+ wait_for_completion(&data_ready);
+
+ spin_lock(&msm_rpm_data.smd_lock_read);
+ while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+ if (msm_rpm_read_smd_data(buf))
+ break;
+ msg_id = msm_rpm_get_msg_id_from_ack(buf);
+ errno = msm_rpm_get_error_from_ack(buf);
+ msm_rpm_process_ack(msg_id, errno);
+ }
+ spin_unlock(&msm_rpm_data.smd_lock_read);
+ }
+}
+
+static void msm_rpm_log_request(struct msm_rpm_request *cdata)
+{
+ char buf[DEBUG_PRINT_BUFFER_SIZE];
+ size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+ char name[5];
+ u32 value;
+ uint32_t i;
+ int j, prev_valid;
+ int valid_count = 0;
+ int pos = 0;
+
+ name[4] = 0;
+
+ for (i = 0; i < cdata->write_idx; i++)
+ if (cdata->kvp[i].valid)
+ valid_count++;
+
+ pos += scnprintf(buf + pos, buflen - pos, "%sRPM req: ", KERN_INFO);
+ if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_SHOW_MSG_ID)
+ pos += scnprintf(buf + pos, buflen - pos, "msg_id=%u, ",
+ cdata->msg_hdr.msg_id);
+ pos += scnprintf(buf + pos, buflen - pos, "s=%s",
+ (cdata->msg_hdr.set == MSM_RPM_CTX_ACTIVE_SET ? "act" : "slp"));
+
+ if ((msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY)
+ && (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_RAW)) {
+ /* Both pretty and raw formatting */
+ memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos,
+ ", rsc_type=0x%08X (%s), rsc_id=%u; ",
+ cdata->msg_hdr.resource_type, name,
+ cdata->msg_hdr.resource_id);
+
+ for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos,
+ "[key=0x%08X (%s), value=%s",
+ cdata->kvp[i].key, name,
+ (cdata->kvp[i].nbytes ? "0x" : "null"));
+
+ for (j = 0; j < cdata->kvp[i].nbytes; j++)
+ pos += scnprintf(buf + pos, buflen - pos,
+ "%02X ",
+ cdata->kvp[i].value[j]);
+
+ if (cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos, buflen - pos, "(");
+
+ for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
+ value = 0;
+ memcpy(&value, &cdata->kvp[i].value[j],
+ min_t(uint32_t, sizeof(uint32_t),
+ cdata->kvp[i].nbytes - j));
+ pos += scnprintf(buf + pos, buflen - pos, "%u",
+ value);
+ if (j + 4 < cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos,
+ buflen - pos, " ");
+ }
+ if (cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos, buflen - pos, ")");
+ pos += scnprintf(buf + pos, buflen - pos, "]");
+ if (prev_valid + 1 < valid_count)
+ pos += scnprintf(buf + pos, buflen - pos, ", ");
+ prev_valid++;
+ }
+ } else if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) {
+ /* Pretty formatting only */
+ memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos, " %s %u; ", name,
+ cdata->msg_hdr.resource_id);
+
+ for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos, "%s=%s",
+ name, (cdata->kvp[i].nbytes ? "" : "null"));
+
+ for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
+ value = 0;
+ memcpy(&value, &cdata->kvp[i].value[j],
+ min_t(uint32_t, sizeof(uint32_t),
+ cdata->kvp[i].nbytes - j));
+ pos += scnprintf(buf + pos, buflen - pos, "%u",
+ value);
+
+ if (j + 4 < cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos,
+ buflen - pos, " ");
+ }
+ if (prev_valid + 1 < valid_count)
+ pos += scnprintf(buf + pos, buflen - pos, ", ");
+ prev_valid++;
+ }
+ } else {
+ /* Raw formatting only */
+ pos += scnprintf(buf + pos, buflen - pos,
+ ", rsc_type=0x%08X, rsc_id=%u; ",
+ cdata->msg_hdr.resource_type,
+ cdata->msg_hdr.resource_id);
+
+ for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ pos += scnprintf(buf + pos, buflen - pos,
+ "[key=0x%08X, value=%s",
+ cdata->kvp[i].key,
+ (cdata->kvp[i].nbytes ? "0x" : "null"));
+ for (j = 0; j < cdata->kvp[i].nbytes; j++) {
+ pos += scnprintf(buf + pos, buflen - pos,
+ "%02X",
+ cdata->kvp[i].value[j]);
+ if (j + 1 < cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos,
+ buflen - pos, " ");
+ }
+ pos += scnprintf(buf + pos, buflen - pos, "]");
+ if (prev_valid + 1 < valid_count)
+ pos += scnprintf(buf + pos, buflen - pos, ", ");
+ prev_valid++;
+ }
+ }
+
+ pos += scnprintf(buf + pos, buflen - pos, "\n");
+ printk(buf);
+}
+static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+ ret = smd_write_avail(msm_rpm_data.ch_info);
+
+ while ((ret = smd_write_avail(msm_rpm_data.ch_info)) < size) {
+ if (ret < 0)
+ break;
+ if (!noirq) {
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write,
+ flags);
+ cpu_relax();
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+ } else
+ udelay(5);
+ }
+
+ if (ret < 0) {
+ pr_err("%s(): SMD not initialized\n", __func__);
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+ return ret;
+ }
+
+ ret = smd_write(msm_rpm_data.ch_info, buf, size);
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+ return ret;
+
+}
+static int msm_rpm_send_data(struct msm_rpm_request *cdata,
+ int msg_type, bool noirq)
+{
+ uint8_t *tmpbuff;
+ int ret;
+ uint32_t i;
+ uint32_t msg_size;
+ int req_hdr_sz, msg_hdr_sz;
+
+ if (probe_status)
+ return probe_status;
+
+ if (!cdata->msg_hdr.data_len)
+ return 1;
+
+ req_hdr_sz = sizeof(cdata->req_hdr);
+ msg_hdr_sz = sizeof(cdata->msg_hdr);
+
+ cdata->req_hdr.service_type = msm_rpm_request_service[msg_type];
+
+ cdata->req_hdr.request_len = cdata->msg_hdr.data_len + msg_hdr_sz;
+ msg_size = cdata->req_hdr.request_len + req_hdr_sz;
+
+ /* populate data_len */
+ if (msg_size > cdata->numbytes) {
+ kfree(cdata->buf);
+ cdata->numbytes = msg_size;
+ cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq));
+ }
+
+ if (!cdata->buf) {
+ pr_err("%s(): Failed malloc\n", __func__);
+ return 0;
+ }
+
+ tmpbuff = cdata->buf;
+
+ tmpbuff += req_hdr_sz + msg_hdr_sz;
+
+ for (i = 0; (i < cdata->write_idx); i++) {
+ /* Sanity check */
+ BUG_ON((tmpbuff - cdata->buf) > cdata->numbytes);
+
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
+ tmpbuff += sizeof(uint32_t);
+
+ memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
+ tmpbuff += sizeof(uint32_t);
+
+ memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
+ tmpbuff += cdata->kvp[i].nbytes;
+
+ if (cdata->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET)
+ msm_rpm_notify_sleep_chain(&cdata->msg_hdr,
+ &cdata->kvp[i]);
+
+ }
+
+ memcpy(cdata->buf, &cdata->req_hdr, req_hdr_sz + msg_hdr_sz);
+
+ if ((cdata->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET) &&
+ !msm_rpm_smd_buffer_request(cdata->buf, msg_size,
+ GFP_FLAG(noirq)))
+ return 1;
+
+ cdata->msg_hdr.msg_id = msm_rpm_get_next_msg_id();
+
+ memcpy(cdata->buf + req_hdr_sz, &cdata->msg_hdr, msg_hdr_sz);
+
+ if (msm_rpm_debug_mask
+ & (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW))
+ msm_rpm_log_request(cdata);
+
+ if (standalone) {
+ for (i = 0; (i < cdata->write_idx); i++)
+ cdata->kvp[i].valid = false;
+
+ cdata->msg_hdr.data_len = 0;
+ ret = cdata->msg_hdr.msg_id;
+ return ret;
+ }
+
+ msm_rpm_add_wait_list(cdata->msg_hdr.msg_id);
+
+ ret = msm_rpm_send_smd_buffer(&cdata->buf[0], msg_size, noirq);
+
+ if (ret == msg_size) {
+ for (i = 0; (i < cdata->write_idx); i++)
+ cdata->kvp[i].valid = false;
+ cdata->msg_hdr.data_len = 0;
+ ret = cdata->msg_hdr.msg_id;
+ } else if (ret < msg_size) {
+ struct msm_rpm_wait_data *rc;
+ ret = 0;
+ pr_err("Failed to write data msg_size:%d ret:%d\n",
+ msg_size, ret);
+ rc = msm_rpm_get_entry_from_msg_id(cdata->msg_hdr.msg_id);
+ if (rc)
+ msm_rpm_free_list_entry(rc);
+ }
+ return ret;
+}
+
+int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+ int ret;
+ static DEFINE_MUTEX(send_mtx);
+
+ mutex_lock(&send_mtx);
+ ret = msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false);
+ mutex_unlock(&send_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_rpm_send_request);
+
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+ return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true);
+}
+EXPORT_SYMBOL(msm_rpm_send_request_noirq);
+
+int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+ struct msm_rpm_wait_data *elem;
+ int rc = 0;
+
+ if (!msg_id) {
+ pr_err("%s(): Invalid msg id\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (msg_id == 1)
+ return rc;
+
+ if (standalone)
+ return rc;
+
+ elem = msm_rpm_get_entry_from_msg_id(msg_id);
+ if (!elem)
+ return rc;
+
+ wait_for_completion(&elem->ack);
+
+ rc = elem->errno;
+ msm_rpm_free_list_entry(elem);
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack);
+
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+ struct msm_rpm_wait_data *elem;
+ unsigned long flags;
+ int rc = 0;
+ uint32_t id = 0;
+
+ if (!msg_id) {
+ pr_err("%s(): Invalid msg id\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (msg_id == 1)
+ return 0;
+
+ if (standalone)
+ return 0;
+
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
+
+ elem = msm_rpm_get_entry_from_msg_id(msg_id);
+
+ if (!elem)
+ /* Should this be a bug
+ * Is it ok for another thread to read the msg?
+ */
+ goto wait_ack_cleanup;
+
+ if (elem->errno != INIT_ERROR) {
+ rc = elem->errno;
+ msm_rpm_free_list_entry(elem);
+ goto wait_ack_cleanup;
+ }
+
+ while (id != msg_id) {
+ if (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+ int errno;
+ char buf[MAX_ERR_BUFFER_SIZE] = {};
+
+ msm_rpm_read_smd_data(buf);
+ id = msm_rpm_get_msg_id_from_ack(buf);
+ errno = msm_rpm_get_error_from_ack(buf);
+ msm_rpm_process_ack(id, errno);
+ }
+ }
+
+ rc = elem->errno;
+
+ msm_rpm_free_list_entry(elem);
+wait_ack_cleanup:
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
+
+ if (smd_is_pkt_avail(msm_rpm_data.ch_info))
+ complete(&data_ready);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
+
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+ int i, rc;
+ struct msm_rpm_request *req =
+ msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (!req)
+ return -ENOMEM;
+
+ for (i = 0; i < nelems; i++) {
+ rc = msm_rpm_add_kvp_data(req, kvp[i].key,
+ kvp[i].data, kvp[i].length);
+ if (rc)
+ goto bail;
+ }
+
+ rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
+bail:
+ msm_rpm_free_request(req);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message);
+
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+ int i, rc;
+ struct msm_rpm_request *req =
+ msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems);
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (!req)
+ return -ENOMEM;
+
+ for (i = 0; i < nelems; i++) {
+ rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key,
+ kvp[i].data, kvp[i].length);
+ if (rc)
+ goto bail;
+ }
+
+ rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req));
+bail:
+ msm_rpm_free_request(req);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message_noirq);
+
+/**
+ * During power collapse, the rpm driver disables the SMD interrupts to make
+ * sure that the interrupt doesn't wakes us from sleep.
+ */
+int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask)
+{
+ if (standalone)
+ return 0;
+
+ msm_rpm_flush_requests(print);
+
+ return smd_mask_receive_interrupt(msm_rpm_data.ch_info, true, cpumask);
+}
+EXPORT_SYMBOL(msm_rpm_enter_sleep);
+
+/**
+ * When the system resumes from power collapse, the SMD interrupt disabled by
+ * enter function has to reenabled to continue processing SMD message.
+ */
+void msm_rpm_exit_sleep(void)
+{
+ if (standalone)
+ return;
+
+ smd_mask_receive_interrupt(msm_rpm_data.ch_info, false, NULL);
+}
+EXPORT_SYMBOL(msm_rpm_exit_sleep);
+
+static int msm_rpm_dev_probe(struct platform_device *pdev)
+{
+ char *key = NULL;
+ int ret = 0;
+
+ key = "rpm-channel-name";
+ ret = of_property_read_string(pdev->dev.of_node, key,
+ &msm_rpm_data.ch_name);
+ if (ret) {
+ pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
+ pdev->dev.of_node->full_name, key);
+ goto fail;
+ }
+
+ key = "rpm-channel-type";
+ ret = of_property_read_u32(pdev->dev.of_node, key,
+ &msm_rpm_data.ch_type);
+ if (ret) {
+ pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
+ pdev->dev.of_node->full_name, key);
+ goto fail;
+ }
+
+ key = "rpm-standalone";
+ standalone = of_property_read_bool(pdev->dev.of_node, key);
+ if (standalone)
+ goto skip_smd_init;
+
+ ret = smd_named_open_on_edge(msm_rpm_data.ch_name,
+ msm_rpm_data.ch_type,
+ &msm_rpm_data.ch_info,
+ &msm_rpm_data,
+ msm_rpm_notify);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ pr_err("%s: Cannot open RPM channel %s %d\n",
+ __func__, msm_rpm_data.ch_name,
+ msm_rpm_data.ch_type);
+ }
+ goto fail;
+ }
+
+ spin_lock_init(&msm_rpm_data.smd_lock_write);
+ spin_lock_init(&msm_rpm_data.smd_lock_read);
+ INIT_WORK(&msm_rpm_data.work, msm_rpm_smd_work);
+
+ wait_for_completion(&msm_rpm_data.smd_open);
+
+ smd_disable_read_intr(msm_rpm_data.ch_info);
+
+ msm_rpm_smd_wq = alloc_workqueue("rpm-smd",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
+ if (!msm_rpm_smd_wq) {
+ pr_err("%s: Unable to alloc rpm-smd workqueue\n", __func__);
+ ret = -EINVAL;
+ goto fail;
+ }
+ queue_work(msm_rpm_smd_wq, &msm_rpm_data.work);
+
+skip_smd_init:
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+ if (standalone)
+ pr_info("%s: RPM running in standalone mode\n", __func__);
+fail:
+ probe_status = ret;
+ return probe_status;
+}
+
+static struct of_device_id msm_rpm_match_table[] = {
+ {.compatible = "qcom,rpm-smd"},
+ {},
+};
+
+static struct platform_driver msm_rpm_device_driver = {
+ .probe = msm_rpm_dev_probe,
+ .driver = {
+ .name = "rpm-smd",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_rpm_match_table,
+ },
+};
+
+int __init msm_rpm_driver_init(void)
+{
+ static bool registered;
+
+ if (registered)
+ return 0;
+ registered = true;
+
+ return platform_driver_register(&msm_rpm_device_driver);
+}
+EXPORT_SYMBOL(msm_rpm_driver_init);
+arch_initcall(msm_rpm_driver_init);
diff --git a/drivers/soc/qcom/rpm_log.c b/drivers/soc/qcom/rpm_log.c
new file mode 100644
index 000000000000..a667355690c4
--- /dev/null
+++ b/drivers/soc/qcom/rpm_log.c
@@ -0,0 +1,527 @@
+/* Copyright (c) 2010-2011, 2013-2014, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "rpm_log.h"
+
+/* registers in MSM_RPM_LOG_PAGE_INDICES */
+enum {
+ MSM_RPM_LOG_TAIL,
+ MSM_RPM_LOG_HEAD
+};
+
+/* used to 4 byte align message lengths */
+#define PADDED_LENGTH(x) (0xFFFFFFFC & ((x) + 3))
+
+/* calculates the character string length of a message of byte length x */
+#define PRINTED_LENGTH(x) ((x) * 6 + 3)
+
+/* number of ms to wait between checking for new messages in the RPM log */
+#define RECHECK_TIME (50)
+
+#define VERSION_8974 0x1000
+#define RPM_ULOG_LENGTH_SHIFT 16
+#define RPM_ULOG_LENGTH_MASK 0xFFFF0000
+
+struct msm_rpm_log_buffer {
+ char *data;
+ u32 len;
+ u32 pos;
+ u32 max_len;
+ u32 read_idx;
+ struct msm_rpm_log_platform_data *pdata;
+};
+
+/******************************************************************************
+ * Internal functions
+ *****************************************************************************/
+
+static inline u32
+msm_rpm_log_read(const struct msm_rpm_log_platform_data *pdata, u32 page,
+ u32 reg)
+{
+ return readl_relaxed(pdata->reg_base + pdata->reg_offsets[page]
+ + reg * 4);
+}
+
+/*
+ * msm_rpm_log_copy() - Copies messages from a volatile circular buffer in
+ * the RPM's shared memory into a private local buffer
+ * msg_buffer: pointer to local buffer (string)
+ * buf_len: length of local buffer in bytes
+ * read_start_idx: index into shared memory buffer
+ *
+ * Return value: number of bytes written to the local buffer
+ *
+ * Copies messages stored in a circular buffer in the RPM Message Memory into
+ * a specified local buffer. The RPM processor is unaware of these reading
+ * efforts, so care is taken to make sure that messages are valid both before
+ * and after reading. The RPM processor utilizes a ULog driver to write the
+ * log. The RPM processor maintains tail and head indices. These correspond
+ * to the next byte to write into, and the first valid byte, respectively.
+ * Both indices increase monotonically (except for rollover).
+ *
+ * Messages take the form of [(u32)length] [(char)data0,1,...] in which the
+ * length specifies the number of payload bytes. Messages must be 4 byte
+ * aligned, so padding is added at the end of a message as needed.
+ *
+ * Print format:
+ * - 0xXX, 0xXX, 0xXX
+ * - 0xXX
+ * etc...
+ */
+static u32 msm_rpm_log_copy(const struct msm_rpm_log_platform_data *pdata,
+ char *msg_buffer, u32 buf_len, u32 *read_idx)
+{
+ u32 head_idx, tail_idx;
+ u32 pos = 0;
+ u32 i = 0;
+ u32 msg_len;
+ u32 pos_start;
+ char temp[4];
+
+ tail_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
+ MSM_RPM_LOG_TAIL);
+ head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
+ MSM_RPM_LOG_HEAD);
+
+ /* loop while the remote buffer has valid messages left to read */
+ while (tail_idx - head_idx > 0 && tail_idx - *read_idx > 0) {
+ head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
+ MSM_RPM_LOG_HEAD);
+ tail_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
+ MSM_RPM_LOG_TAIL);
+ /* check if the message to be read is valid */
+ if (tail_idx - *read_idx > tail_idx - head_idx) {
+ *read_idx = head_idx;
+ continue;
+ }
+
+ /*
+ * Ensure that all indices are 4 byte aligned.
+ * This conditions is required to interact with a ULog buffer
+ * properly.
+ */
+ if (!IS_ALIGNED((tail_idx | head_idx | *read_idx), 4))
+ break;
+
+ msg_len = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_BUFFER,
+ ((*read_idx) & pdata->log_len_mask) >> 2);
+
+ /* Message length for 8974 is first 2 bytes.
+ * Exclude message length and format from message length.
+ */
+ if (pdata->version == VERSION_8974) {
+ msg_len = (msg_len & RPM_ULOG_LENGTH_MASK) >>
+ RPM_ULOG_LENGTH_SHIFT;
+ msg_len -= 4;
+ }
+
+ /* handle messages that claim to be longer than the log */
+ if (PADDED_LENGTH(msg_len) > tail_idx - *read_idx - 4)
+ msg_len = tail_idx - *read_idx - 4;
+
+ /* check that the local buffer has enough space for this msg */
+ if (pos + PRINTED_LENGTH(msg_len) > buf_len)
+ break;
+
+ pos_start = pos;
+ pos += scnprintf(msg_buffer + pos, buf_len - pos, "- ");
+
+ /* copy message payload to local buffer */
+ for (i = 0; i < msg_len; i++) {
+ /* read from shared memory 4 bytes at a time */
+ if (IS_ALIGNED(i, 4))
+ *((u32 *)temp) = msm_rpm_log_read(pdata,
+ MSM_RPM_LOG_PAGE_BUFFER,
+ ((*read_idx + 4 + i) &
+ pdata->log_len_mask) >> 2);
+
+ pos += scnprintf(msg_buffer + pos, buf_len - pos,
+ "0x%02X, ", temp[i & 0x03]);
+ }
+
+ pos += scnprintf(msg_buffer + pos, buf_len - pos, "\n");
+
+ head_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
+ MSM_RPM_LOG_HEAD);
+ tail_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
+ MSM_RPM_LOG_TAIL);
+
+ /* roll back if message that was read is not still valid */
+ if (tail_idx - *read_idx > tail_idx - head_idx)
+ pos = pos_start;
+
+ *read_idx += PADDED_LENGTH(msg_len) + 4;
+ }
+
+ return pos;
+}
+
+
+/*
+ * msm_rpm_log_file_read() - Reads in log buffer messages then outputs them to a
+ * user buffer
+ *
+ * Return value:
+ * 0: success
+ * -ENOMEM: no memory available
+ * -EINVAL: user buffer null or requested bytes 0
+ * -EFAULT: user buffer not writeable
+ * -EAGAIN: no bytes available at the moment
+ */
+static ssize_t msm_rpm_log_file_read(struct file *file, char __user *bufu,
+ size_t count, loff_t *ppos)
+{
+ u32 out_len, remaining;
+ struct msm_rpm_log_platform_data *pdata;
+ struct msm_rpm_log_buffer *buf;
+
+ buf = file->private_data;
+
+ if (!buf)
+ return -ENOMEM;
+
+ pdata = buf->pdata;
+
+ if (!pdata)
+ return -EINVAL;
+ if (!buf->data)
+ return -ENOMEM;
+ if (!bufu || count == 0)
+ return -EINVAL;
+ if (!access_ok(VERIFY_WRITE, bufu, count))
+ return -EFAULT;
+
+ /* check for more messages if local buffer empty */
+ if (buf->pos == buf->len) {
+ buf->pos = 0;
+ buf->len = msm_rpm_log_copy(pdata, buf->data, buf->max_len,
+ &(buf->read_idx));
+ }
+
+ if ((file->f_flags & O_NONBLOCK) && buf->len == 0)
+ return -EAGAIN;
+
+ /* loop until new messages arrive */
+ while (buf->len == 0) {
+ cond_resched();
+ if (msleep_interruptible(RECHECK_TIME))
+ break;
+ buf->len = msm_rpm_log_copy(pdata, buf->data, buf->max_len,
+ &(buf->read_idx));
+ }
+
+ out_len = ((buf->len - buf->pos) < count ? buf->len - buf->pos : count);
+
+ remaining = __copy_to_user(bufu, &(buf->data[buf->pos]), out_len);
+ buf->pos += out_len - remaining;
+
+ return out_len - remaining;
+}
+
+
+/*
+ * msm_rpm_log_file_open() - Allows a new reader to open the RPM log virtual
+ * file
+ *
+ * One local buffer is kmalloc'ed for each reader, so no resource sharing has
+ * to take place (besides the read only access to the RPM log buffer).
+ *
+ * Return value:
+ * 0: success
+ * -ENOMEM: no memory available
+ */
+static int msm_rpm_log_file_open(struct inode *inode, struct file *file)
+{
+ struct msm_rpm_log_buffer *buf;
+ struct msm_rpm_log_platform_data *pdata;
+
+ pdata = inode->i_private;
+ if (!pdata)
+ return -EINVAL;
+
+ file->private_data =
+ kmalloc(sizeof(struct msm_rpm_log_buffer), GFP_KERNEL);
+ if (!file->private_data) {
+ pr_err("%s: ERROR kmalloc failed to allocate %zu bytes\n",
+ __func__, sizeof(struct msm_rpm_log_buffer));
+ return -ENOMEM;
+ }
+ buf = file->private_data;
+
+ buf->data = kmalloc(PRINTED_LENGTH(pdata->log_len), GFP_KERNEL);
+ if (!buf->data) {
+ kfree(file->private_data);
+ file->private_data = NULL;
+ pr_err("%s: ERROR kmalloc failed to allocate %d bytes\n",
+ __func__, PRINTED_LENGTH(pdata->log_len));
+ return -ENOMEM;
+ }
+
+ buf->pdata = pdata;
+ buf->len = 0;
+ buf->pos = 0;
+ buf->max_len = PRINTED_LENGTH(pdata->log_len);
+ buf->read_idx = msm_rpm_log_read(pdata, MSM_RPM_LOG_PAGE_INDICES,
+ MSM_RPM_LOG_HEAD);
+ return 0;
+}
+
+static int msm_rpm_log_file_close(struct inode *inode, struct file *file)
+{
+ kfree(((struct msm_rpm_log_buffer *)file->private_data)->data);
+ kfree(file->private_data);
+ return 0;
+}
+
+
+static const struct file_operations msm_rpm_log_file_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_rpm_log_file_open,
+ .read = msm_rpm_log_file_read,
+ .release = msm_rpm_log_file_close,
+};
+
+static int msm_rpm_log_probe(struct platform_device *pdev)
+{
+ struct dentry *dent;
+ struct msm_rpm_log_platform_data *pdata;
+ struct resource *res = NULL;
+ struct device_node *node = NULL;
+ phys_addr_t page_buffer_address, rpm_addr_phys;
+ int ret = 0;
+ char *key = NULL;
+ uint32_t val = 0;
+
+ node = pdev->dev.of_node;
+
+ if (node) {
+ pdata = kzalloc(sizeof(struct msm_rpm_log_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ kfree(pdata);
+ return -EINVAL;
+ }
+
+ pdata->phys_addr_base = res->start;
+ pdata->phys_size = resource_size(res);
+
+ pdata->reg_base = ioremap_nocache(pdata->phys_addr_base,
+ pdata->phys_size);
+ if (!pdata->reg_base) {
+ pr_err("%s: ERROR could not ioremap: start=%pa, len=%u\n",
+ __func__, &pdata->phys_addr_base,
+ pdata->phys_size);
+ kfree(pdata);
+ return -EBUSY;
+ }
+ /* Read various parameters from the header if the
+ * version of the RPM Ulog is 0x1000. This version
+ * corresponds to the node in the rpm header which
+ * holds RPM log on 8974.
+ *
+ * offset-page-buffer-addr: At this offset header
+ * contains address of the location where raw log
+ * starts
+ * offset-log-len: At this offset header contains
+ * the length of the log buffer.
+ * offset-log-len-mask: At this offset header contains
+ * the log length mask for the buffer.
+ * offset-page-indices: At this offset header contains
+ * the index for writer. */
+
+ key = "qcom,offset-version";
+ ret = of_property_read_u32(node, key, &val);
+ if (ret) {
+ pr_err("%s: Error in name %s key %s\n",
+ __func__, node->full_name, key);
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ pdata->version = readl_relaxed(pdata->reg_base + val);
+ if (pdata->version == VERSION_8974) {
+ key = "qcom,rpm-addr-phys";
+ ret = of_property_read_u32(node, key, &val);
+ if (ret) {
+ pr_err("%s: Error in name %s key %s\n",
+ __func__, node->full_name, key);
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ rpm_addr_phys = val;
+
+ key = "qcom,offset-page-buffer-addr";
+ ret = of_property_read_u32(node, key, &val);
+ if (ret) {
+ pr_err("%s: Error in name %s key %s\n",
+ __func__, node->full_name, key);
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ page_buffer_address = rpm_addr_phys +
+ readl_relaxed(pdata->reg_base + val);
+ pdata->reg_offsets[MSM_RPM_LOG_PAGE_BUFFER] =
+ page_buffer_address - pdata->phys_addr_base;
+
+ key = "qcom,offset-log-len";
+ ret = of_property_read_u32(node, key, &val);
+ if (ret) {
+ pr_err("%s: Error in name %s key %s\n",
+ __func__, node->full_name, key);
+ ret = -EFAULT;
+ goto fail;
+ }
+ pdata->log_len = readl_relaxed(pdata->reg_base + val);
+
+ if (pdata->log_len > pdata->phys_size) {
+ pr_err("%s: Error phy size: %d should be atleast log length: %d\n",
+ __func__, pdata->phys_size,
+ pdata->log_len);
+
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ key = "qcom,offset-log-len-mask";
+ ret = of_property_read_u32(node, key, &val);
+ if (ret) {
+ pr_err("%s: Error in name %s key %s\n",
+ __func__, node->full_name, key);
+ ret = -EFAULT;
+ goto fail;
+ }
+ pdata->log_len_mask = readl_relaxed(pdata->reg_base
+ + val);
+
+ key = "qcom,offset-page-indices";
+ ret = of_property_read_u32(node, key, &val);
+ if (ret) {
+ pr_err("%s: Error in name %s key %s\n",
+ __func__, node->full_name, key);
+ ret = -EFAULT;
+ goto fail;
+ }
+ pdata->reg_offsets[MSM_RPM_LOG_PAGE_INDICES] =
+ val;
+ } else{
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ } else{
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ return -EINVAL;
+
+ pdata->reg_base = ioremap(pdata->phys_addr_base,
+ pdata->phys_size);
+ if (!pdata->reg_base) {
+ pr_err("%s: ERROR could not ioremap: start=%pa, len=%u\n",
+ __func__, &pdata->phys_addr_base,
+ pdata->phys_size);
+ return -EBUSY;
+ }
+ }
+
+ dent = debugfs_create_file("rpm_log", S_IRUGO, NULL,
+ pdata, &msm_rpm_log_file_fops);
+ if (!dent) {
+ pr_err("%s: ERROR debugfs_create_file failed\n", __func__);
+ if (pdata->version == VERSION_8974) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, dent);
+
+ pr_notice("%s: OK\n", __func__);
+ return 0;
+
+fail:
+ iounmap(pdata->reg_base);
+ kfree(pdata);
+ return ret;
+}
+
+static int msm_rpm_log_remove(struct platform_device *pdev)
+{
+ struct dentry *dent;
+ struct msm_rpm_log_platform_data *pdata;
+
+ pdata = pdev->dev.platform_data;
+
+ iounmap(pdata->reg_base);
+
+ dent = platform_get_drvdata(pdev);
+ debugfs_remove(dent);
+ platform_set_drvdata(pdev, NULL);
+
+ pr_notice("%s: OK\n", __func__);
+ return 0;
+}
+
+static struct of_device_id rpm_log_table[] = {
+ {.compatible = "qcom,rpm-log"},
+ {},
+};
+
+static struct platform_driver msm_rpm_log_driver = {
+ .probe = msm_rpm_log_probe,
+ .remove = msm_rpm_log_remove,
+ .driver = {
+ .name = "msm_rpm_log",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_log_table,
+ },
+};
+
+static int __init msm_rpm_log_init(void)
+{
+ return platform_driver_register(&msm_rpm_log_driver);
+}
+
+static void __exit msm_rpm_log_exit(void)
+{
+ platform_driver_unregister(&msm_rpm_log_driver);
+}
+
+module_init(msm_rpm_log_init);
+module_exit(msm_rpm_log_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPM Log driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:msm_rpm_log");
diff --git a/drivers/soc/qcom/rpm_log.h b/drivers/soc/qcom/rpm_log.h
new file mode 100644
index 000000000000..f75937e59cc3
--- /dev/null
+++ b/drivers/soc/qcom/rpm_log.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2010, 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_LOG_H
+#define __ARCH_ARM_MACH_MSM_RPM_LOG_H
+
+#include <linux/types.h>
+
+enum {
+ MSM_RPM_LOG_PAGE_INDICES,
+ MSM_RPM_LOG_PAGE_BUFFER,
+ MSM_RPM_LOG_PAGE_COUNT
+};
+
+struct msm_rpm_log_platform_data {
+ u32 reg_offsets[MSM_RPM_LOG_PAGE_COUNT];
+ u32 log_len;
+ u32 log_len_mask;
+ phys_addr_t phys_addr_base;
+ u32 phys_size;
+ u32 version;
+ void __iomem *reg_base;
+};
+
+#endif /* __ARCH_ARM_MACH_MSM_RPM_LOG_H */
diff --git a/drivers/soc/qcom/rpm_master_stat.c b/drivers/soc/qcom/rpm_master_stat.c
new file mode 100644
index 000000000000..0a6b6485ef63
--- /dev/null
+++ b/drivers/soc/qcom/rpm_master_stat.c
@@ -0,0 +1,436 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+
+#include "rpm_stats.h"
+
+#define RPM_MASTERS_BUF_LEN 400
+
+#define SNPRINTF(buf, size, format, ...) \
+ do { \
+ if (size > 0) { \
+ int ret; \
+ ret = snprintf(buf, size, format, ## __VA_ARGS__); \
+ if (ret > size) { \
+ buf += size; \
+ size = 0; \
+ } else { \
+ buf += ret; \
+ size -= ret; \
+ } \
+ } \
+ } while (0)
+
+#define GET_MASTER_NAME(a, prvdata) \
+ ((a >= prvdata->num_masters) ? "Invalid Master Name" : \
+ prvdata->master_names[a])
+
+#define GET_FIELD(a) ((strnstr(#a, ".", 80) + 1))
+
+struct msm_rpm_master_stats {
+ uint32_t active_cores;
+ uint32_t numshutdowns;
+ uint64_t shutdown_req;
+ uint64_t wakeup_ind;
+ uint64_t bringup_req;
+ uint64_t bringup_ack;
+ uint32_t wakeup_reason; /* 0 = rude wakeup, 1 = scheduled wakeup */
+ uint32_t last_sleep_transition_duration;
+ uint32_t last_wake_transition_duration;
+};
+
+struct msm_rpm_master_stats_private_data {
+ void __iomem *reg_base;
+ u32 len;
+ char **master_names;
+ u32 num_masters;
+ char buf[RPM_MASTERS_BUF_LEN];
+ struct msm_rpm_master_stats_platform_data *platform_data;
+};
+
+int msm_rpm_master_stats_file_close(struct inode *inode,
+ struct file *file)
+{
+ struct msm_rpm_master_stats_private_data *private = file->private_data;
+
+ if (private->reg_base)
+ iounmap(private->reg_base);
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static int msm_rpm_master_copy_stats(
+ struct msm_rpm_master_stats_private_data *prvdata)
+{
+ struct msm_rpm_master_stats record;
+ struct msm_rpm_master_stats_platform_data *pdata;
+ static int master_cnt;
+ int count, j = 0;
+ char *buf;
+ static DEFINE_MUTEX(msm_rpm_master_stats_mutex);
+
+ mutex_lock(&msm_rpm_master_stats_mutex);
+
+ /* Iterate possible number of masters */
+ if (master_cnt > prvdata->num_masters - 1) {
+ master_cnt = 0;
+ mutex_unlock(&msm_rpm_master_stats_mutex);
+ return 0;
+ }
+
+ pdata = prvdata->platform_data;
+ count = RPM_MASTERS_BUF_LEN;
+ buf = prvdata->buf;
+
+ if (prvdata->platform_data->version == 2) {
+ SNPRINTF(buf, count, "%s\n",
+ GET_MASTER_NAME(master_cnt, prvdata));
+
+ record.shutdown_req = readq_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats, shutdown_req)));
+
+ SNPRINTF(buf, count, "\t%s:0x%llX\n",
+ GET_FIELD(record.shutdown_req),
+ record.shutdown_req);
+
+ record.wakeup_ind = readq_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats, wakeup_ind)));
+
+ SNPRINTF(buf, count, "\t%s:0x%llX\n",
+ GET_FIELD(record.wakeup_ind),
+ record.wakeup_ind);
+
+ record.bringup_req = readq_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats, bringup_req)));
+
+ SNPRINTF(buf, count, "\t%s:0x%llX\n",
+ GET_FIELD(record.bringup_req),
+ record.bringup_req);
+
+ record.bringup_ack = readq_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats, bringup_ack)));
+
+ SNPRINTF(buf, count, "\t%s:0x%llX\n",
+ GET_FIELD(record.bringup_ack),
+ record.bringup_ack);
+
+ record.last_sleep_transition_duration =
+ readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats,
+ last_sleep_transition_duration)));
+
+ SNPRINTF(buf, count, "\t%s:0x%x\n",
+ GET_FIELD(record.last_sleep_transition_duration),
+ record.last_sleep_transition_duration);
+
+ record.last_wake_transition_duration =
+ readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats,
+ last_wake_transition_duration)));
+
+ SNPRINTF(buf, count, "\t%s:0x%x\n",
+ GET_FIELD(record.last_wake_transition_duration),
+ record.last_wake_transition_duration);
+
+ record.wakeup_reason = readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats,
+ wakeup_reason)));
+
+ SNPRINTF(buf, count, "\t%s:0x%x\n",
+ GET_FIELD(record.wakeup_reason),
+ record.wakeup_reason);
+
+ record.numshutdowns = readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset +
+ offsetof(struct msm_rpm_master_stats, numshutdowns)));
+
+ SNPRINTF(buf, count, "\t%s:0x%x\n",
+ GET_FIELD(record.numshutdowns),
+ record.numshutdowns);
+
+ record.active_cores = readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset) +
+ offsetof(struct msm_rpm_master_stats, active_cores));
+
+ SNPRINTF(buf, count, "\t%s:0x%x\n",
+ GET_FIELD(record.active_cores),
+ record.active_cores);
+ } else {
+ SNPRINTF(buf, count, "%s\n",
+ GET_MASTER_NAME(master_cnt, prvdata));
+
+ record.numshutdowns = readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset) + 0x0);
+
+ SNPRINTF(buf, count, "\t%s:0x%0x\n",
+ GET_FIELD(record.numshutdowns),
+ record.numshutdowns);
+
+ record.active_cores = readl_relaxed(prvdata->reg_base +
+ (master_cnt * pdata->master_offset) + 0x4);
+
+ SNPRINTF(buf, count, "\t%s:0x%0x\n",
+ GET_FIELD(record.active_cores),
+ record.active_cores);
+ }
+
+ j = find_first_bit((unsigned long *)&record.active_cores,
+ BITS_PER_LONG);
+ while (j < BITS_PER_LONG) {
+ SNPRINTF(buf, count, "\t\tcore%d\n", j);
+ j = find_next_bit((unsigned long *)&record.active_cores,
+ BITS_PER_LONG, j + 1);
+ }
+
+ master_cnt++;
+ mutex_unlock(&msm_rpm_master_stats_mutex);
+ return RPM_MASTERS_BUF_LEN - count;
+}
+
+static ssize_t msm_rpm_master_stats_file_read(struct file *file,
+ char __user *bufu, size_t count, loff_t *ppos)
+{
+ struct msm_rpm_master_stats_private_data *prvdata;
+ struct msm_rpm_master_stats_platform_data *pdata;
+
+ prvdata = file->private_data;
+ if (!prvdata)
+ return -EINVAL;
+
+ pdata = prvdata->platform_data;
+ if (!pdata)
+ return -EINVAL;
+
+ if (!bufu || count == 0)
+ return -EINVAL;
+
+ if ((*ppos <= pdata->phys_size)) {
+ prvdata->len = msm_rpm_master_copy_stats(prvdata);
+ *ppos = 0;
+ }
+
+ return simple_read_from_buffer(bufu, count, ppos,
+ prvdata->buf, prvdata->len);
+}
+
+static int msm_rpm_master_stats_file_open(struct inode *inode,
+ struct file *file)
+{
+ struct msm_rpm_master_stats_private_data *prvdata;
+ struct msm_rpm_master_stats_platform_data *pdata;
+
+ pdata = inode->i_private;
+
+ file->private_data =
+ kzalloc(sizeof(struct msm_rpm_master_stats_private_data),
+ GFP_KERNEL);
+
+ if (!file->private_data)
+ return -ENOMEM;
+ prvdata = file->private_data;
+
+ prvdata->reg_base = ioremap(pdata->phys_addr_base,
+ pdata->phys_size);
+ if (!prvdata->reg_base) {
+ kfree(file->private_data);
+ prvdata = NULL;
+ pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
+ __func__, &pdata->phys_addr_base,
+ pdata->phys_size);
+ return -EBUSY;
+ }
+
+ prvdata->len = 0;
+ prvdata->num_masters = pdata->num_masters;
+ prvdata->master_names = pdata->masters;
+ prvdata->platform_data = pdata;
+ return 0;
+}
+
+static const struct file_operations msm_rpm_master_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_rpm_master_stats_file_open,
+ .read = msm_rpm_master_stats_file_read,
+ .release = msm_rpm_master_stats_file_close,
+ .llseek = no_llseek,
+};
+
+static struct msm_rpm_master_stats_platform_data
+ *msm_rpm_master_populate_pdata(struct device *dev)
+{
+ struct msm_rpm_master_stats_platform_data *pdata;
+ struct device_node *node = dev->of_node;
+ int rc = 0, i;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "could not allocate memory for platform data\n");
+ goto err;
+ }
+
+ rc = of_property_read_u32(node, "qcom,master-stats-version",
+ &pdata->version);
+ if (rc) {
+ dev_err(dev, "master-stats-version missing rc=%d\n", rc);
+ goto err;
+ }
+
+ rc = of_property_read_u32(node, "qcom,master-offset",
+ &pdata->master_offset);
+ if (rc) {
+ dev_err(dev, "master-offset missing rc=%d\n", rc);
+ goto err;
+ }
+
+ pdata->num_masters = of_property_count_strings(node, "qcom,masters");
+ if (pdata->num_masters < 0) {
+ dev_err(dev, "Failed to get number of masters =%d\n",
+ pdata->num_masters);
+ goto err;
+ }
+
+ pdata->masters = devm_kzalloc(dev, sizeof(char *) * pdata->num_masters,
+ GFP_KERNEL);
+ if (!pdata->masters) {
+ dev_err(dev, "%s:Failed to allocated memory\n", __func__);
+ goto err;
+ }
+
+ /*
+ * Read master names from DT
+ */
+ for (i = 0; i < pdata->num_masters; i++) {
+ const char *master_name;
+ of_property_read_string_index(node, "qcom,masters",
+ i, &master_name);
+ pdata->masters[i] = devm_kzalloc(dev, sizeof(char) *
+ strlen(master_name) + 1, GFP_KERNEL);
+ if (!pdata->masters[i]) {
+ dev_err(dev, "%s:Failed to get memory\n", __func__);
+ goto err;
+ }
+ strlcpy(pdata->masters[i], master_name,
+ strlen(master_name) + 1);
+ }
+ return pdata;
+err:
+ return NULL;
+}
+
+static int msm_rpm_master_stats_probe(struct platform_device *pdev)
+{
+ struct dentry *dent;
+ struct msm_rpm_master_stats_platform_data *pdata;
+ struct resource *res = NULL;
+
+ if (!pdev)
+ return -EINVAL;
+
+ if (pdev->dev.of_node)
+ pdata = msm_rpm_master_populate_pdata(&pdev->dev);
+ else
+ pdata = pdev->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: Unable to get pdata\n", __func__);
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ dev_err(&pdev->dev,
+ "%s: Failed to get IO resource from platform device",
+ __func__);
+ return -ENXIO;
+ }
+
+ pdata->phys_addr_base = res->start;
+ pdata->phys_size = resource_size(res);
+
+ dent = debugfs_create_file("rpm_master_stats", S_IRUGO, NULL,
+ pdata, &msm_rpm_master_stats_fops);
+
+ if (!dent) {
+ dev_err(&pdev->dev, "%s: ERROR debugfs_create_file failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, dent);
+ return 0;
+}
+
+static int msm_rpm_master_stats_remove(struct platform_device *pdev)
+{
+ struct dentry *dent;
+
+ dent = platform_get_drvdata(pdev);
+ debugfs_remove(dent);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct of_device_id rpm_master_table[] = {
+ {.compatible = "qcom,rpm-master-stats"},
+ {},
+};
+
+static struct platform_driver msm_rpm_master_stats_driver = {
+ .probe = msm_rpm_master_stats_probe,
+ .remove = msm_rpm_master_stats_remove,
+ .driver = {
+ .name = "msm_rpm_master_stats",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_master_table,
+ },
+};
+
+static int __init msm_rpm_master_stats_init(void)
+{
+ return platform_driver_register(&msm_rpm_master_stats_driver);
+}
+
+static void __exit msm_rpm_master_stats_exit(void)
+{
+ platform_driver_unregister(&msm_rpm_master_stats_driver);
+}
+
+module_init(msm_rpm_master_stats_init);
+module_exit(msm_rpm_master_stats_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPM Master Statistics driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:msm_master_stat_log");
diff --git a/drivers/soc/qcom/rpm_rbcpr_stats_v2.c b/drivers/soc/qcom/rpm_rbcpr_stats_v2.c
new file mode 100644
index 000000000000..5163f839ad3b
--- /dev/null
+++ b/drivers/soc/qcom/rpm_rbcpr_stats_v2.c
@@ -0,0 +1,420 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/sort.h>
+#include <linux/uaccess.h>
+
+#define RBCPR_BUF_LEN 8000
+#define RBCPR_STATS_MAX_SIZE SZ_2K
+#define RBCPR_MAX_RAILS 4
+#define RBCPR_NUM_RECMNDS 3
+#define RBCPR_NUM_CORNERS 3
+
+#define FIELD(a) ((strnstr(#a, "->", 80) + 2))
+#define PRINT(buf, pos, format, ...) \
+ ((pos < RBCPR_BUF_LEN) ? snprintf((buf + pos), (RBCPR_BUF_LEN - pos),\
+ format, ## __VA_ARGS__) : 0)
+
+enum {
+ CORNER_OFF,
+ CORNER_RETENTION,
+ CORNER_SVS_KRAIT,
+ CORNER_SVS_SOC,
+ CORNER_NOMINAL,
+ CORNER_TURBO,
+ CORNER_SUPER_TURBO,
+ CORNER_MAX,
+};
+
+struct rbcpr_recmnd_data_type {
+ uint32_t microvolts;
+ uint64_t timestamp;
+};
+
+struct rbcpr_corners_data_type {
+ int32_t efuse_adjustment;
+ uint32_t programmed_voltage;
+ uint32_t isr_count;
+ uint32_t min_count;
+ uint32_t max_count;
+ struct rbcpr_recmnd_data_type rbcpr_recmnd[RBCPR_NUM_RECMNDS];
+};
+
+struct rbcpr_rail_stats_header_type {
+ uint32_t num_corners;
+ uint32_t num_latest_recommends;
+};
+
+struct rbcpr_rail_stats_footer_type {
+ uint32_t current_corner;
+ uint32_t railway_voltage;
+ uint32_t off_corner;
+ uint32_t margin;
+};
+
+struct rbcpr_stats_type {
+ uint32_t num_rails;
+ uint32_t status;
+};
+
+struct rbcpr_data_type {
+ void __iomem *start;
+ uint32_t len;
+ char buf[RBCPR_BUF_LEN];
+};
+
+static char *rbcpr_rail_labels[] = {
+ [0] = "VDD-CX",
+ [1] = "VDD-GFX",
+};
+
+static char *rbcpr_corner_string[] = {
+ [CORNER_OFF] = "CORNERS_OFF",
+ [CORNER_RETENTION] = "RETENTION",
+ [CORNER_SVS_KRAIT] = "SVS",
+ [CORNER_SVS_SOC] = "SVS_SOC",
+ [CORNER_NOMINAL] = "NOMINAL",
+ [CORNER_TURBO] = "TURBO",
+ [CORNER_SUPER_TURBO] = "SUPER_TURBO",
+};
+
+#define CORNER_STRING(a) \
+ ((a >= CORNER_MAX) ? "INVALID Corner" : rbcpr_corner_string[a])
+
+static struct rbcpr_data_type *rbcpr_data;
+
+static void msm_rpmrbcpr_print_stats_header(
+ struct rbcpr_stats_type *rbcpr_stats, char *buf,
+ uint32_t *pos)
+{
+ *pos += PRINT(buf, *pos, "\n:RBCPR STATS ");
+ *pos += PRINT(buf, *pos, "(%s: %d)", FIELD(rbcpr_stats->num_rails),
+ rbcpr_stats->num_rails);
+ *pos += PRINT(buf, *pos, "(%s: %d)", FIELD(rbcpr_stats->status),
+ rbcpr_stats->status);
+}
+
+static void msm_rpmrbcpr_print_rail_header(
+ struct rbcpr_rail_stats_header_type *rail_header, char *buf,
+ uint32_t *pos)
+{
+ *pos += PRINT(buf, *pos, "(%s: %d)", FIELD(rail_header->num_corners),
+ rail_header->num_corners);
+ *pos += PRINT(buf, *pos, "(%s: %d)",
+ FIELD(rail_header->num_latest_recommends),
+ rail_header->num_latest_recommends);
+}
+
+static void msm_rpmrbcpr_print_corner_recmnds(
+ struct rbcpr_recmnd_data_type *rbcpr_recmnd, char *buf,
+ uint32_t *pos)
+{
+ *pos += PRINT(buf, *pos, "\n\t\t\t :(%s: %d) ",
+ FIELD(rbcpr_recmd->microvolts),
+ rbcpr_recmnd->microvolts);
+ *pos += PRINT(buf, *pos, " (%s: %lld)", FIELD(rbcpr_recmd->timestamp),
+ rbcpr_recmnd->timestamp);
+}
+
+static void msm_rpmrbcpr_print_corner_data(
+ struct rbcpr_corners_data_type *corner, char *buf,
+ uint32_t num_corners, uint32_t *pos)
+{
+ int i;
+
+ *pos += PRINT(buf, *pos, "(%s: %d)",
+ FIELD(corner->efuse_adjustment),
+ corner->efuse_adjustment);
+ *pos += PRINT(buf, *pos, "(%s: %d)",
+ FIELD(corner->programmed_voltage),
+ corner->programmed_voltage);
+ *pos += PRINT(buf, *pos, "(%s: %d)",
+ FIELD(corner->isr_count), corner->isr_count);
+ *pos += PRINT(buf, *pos, "(%s: %d)",
+ FIELD(corner->min_count), corner->min_count);
+ *pos += PRINT(buf, *pos, "(%s: %d)\n",
+ FIELD(corner->max_count), corner->max_count);
+ *pos += PRINT(buf, *pos, "\t\t\t:Latest Recommends");
+ for (i = 0; i < num_corners; i++)
+ msm_rpmrbcpr_print_corner_recmnds(&corner->rbcpr_recmnd[i], buf,
+ pos);
+}
+
+static void msm_rpmrbcpr_print_rail_footer(
+ struct rbcpr_rail_stats_footer_type *rail, char *buf,
+ uint32_t *pos)
+{
+ *pos += PRINT(buf, *pos, "(%s: %s)", FIELD(rail->current_corner),
+ CORNER_STRING(rail->current_corner));
+ *pos += PRINT(buf, *pos, "(%s: %d)",
+ FIELD(rail->railway_voltage), rail->railway_voltage);
+ *pos += PRINT(buf, *pos, "(%s: %d)",
+ FIELD(rail->off_corner), rail->off_corner);
+ *pos += PRINT(buf, *pos, "(%s: %d)\n",
+ FIELD(rail->margin), rail->margin);
+}
+
+static uint32_t msm_rpmrbcpr_read_rpm_data(void)
+{
+ uint32_t read_offset = 0;
+ static struct rbcpr_stats_type rbcpr_stats_header;
+ uint32_t buffer_offset = 0;
+ char *buf = rbcpr_data->buf;
+ int i, j;
+
+ memcpy_fromio(&rbcpr_stats_header, rbcpr_data->start,
+ sizeof(rbcpr_stats_header));
+ read_offset += sizeof(rbcpr_stats_header);
+ msm_rpmrbcpr_print_stats_header(&rbcpr_stats_header, buf,
+ &buffer_offset);
+
+ for (i = 0; i < rbcpr_stats_header.num_rails; i++) {
+ static struct rbcpr_rail_stats_header_type rail_header;
+ static struct rbcpr_rail_stats_footer_type rail_footer;
+
+ memcpy_fromio(&rail_header, (rbcpr_data->start + read_offset),
+ sizeof(rail_header));
+ read_offset += sizeof(rail_header);
+ buffer_offset += PRINT(buf, buffer_offset, "\n:%s Rail Data ",
+ rbcpr_rail_labels[i]);
+ msm_rpmrbcpr_print_rail_header(&rail_header, buf,
+ &buffer_offset);
+
+ for (j = 0; j < rail_header.num_corners; j++) {
+ static struct rbcpr_corners_data_type corner;
+ uint32_t corner_index;
+
+ memcpy_fromio(&corner,
+ (rbcpr_data->start + read_offset),
+ sizeof(corner));
+ read_offset += sizeof(corner);
+
+ /*
+ * RPM doesn't include corner type in the data for the
+ * corner. For now add this hack to know which corners
+ * are used based on number of corners for the rail.
+ */
+ corner_index = j + 3;
+ if (rail_header.num_corners == 3 && j == 2)
+ corner_index++;
+
+ buffer_offset += PRINT(buf, buffer_offset,
+ "\n\t\t:Corner Data: %s ",
+ CORNER_STRING(corner_index));
+ msm_rpmrbcpr_print_corner_data(&corner, buf,
+ rail_header.num_latest_recommends,
+ &buffer_offset);
+ }
+ buffer_offset += PRINT(buf, buffer_offset,
+ "\n\t\t");
+ memcpy_fromio(&rail_footer, (rbcpr_data->start + read_offset),
+ sizeof(rail_footer));
+ read_offset += sizeof(rail_footer);
+ msm_rpmrbcpr_print_rail_footer(&rail_footer, buf,
+ &buffer_offset);
+ }
+ return buffer_offset;
+}
+
+static int msm_rpmrbcpr_file_read(struct seq_file *m, void *data)
+{
+ struct rbcpr_data_type *pdata = m->private;
+ int ret = 0;
+ int curr_status_counter;
+ static int prev_status_counter;
+ static DEFINE_MUTEX(rbcpr_lock);
+
+ mutex_lock(&rbcpr_lock);
+ if (!pdata) {
+ pr_err("%s pdata is null", __func__);
+ ret = -EINVAL;
+ goto exit_rpmrbcpr_file_read;
+ }
+
+ /* Read RPM stats */
+ curr_status_counter = readl_relaxed(pdata->start +
+ offsetof(struct rbcpr_stats_type, status));
+ if (curr_status_counter != prev_status_counter) {
+ pdata->len = msm_rpmrbcpr_read_rpm_data();
+ pdata->len = 0;
+ prev_status_counter = curr_status_counter;
+ }
+
+ seq_printf(m, "%s", pdata->buf);
+
+exit_rpmrbcpr_file_read:
+ mutex_unlock(&rbcpr_lock);
+ return ret;
+}
+
+static int msm_rpmrbcpr_file_open(struct inode *inode, struct file *file)
+{
+ if (!rbcpr_data->start)
+ return -ENODEV;
+ return single_open(file, msm_rpmrbcpr_file_read, inode->i_private);
+}
+
+static const struct file_operations msm_rpmrbcpr_fops = {
+ .open = msm_rpmrbcpr_file_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int msm_rpmrbcpr_validate(struct platform_device *pdev)
+{
+ int ret = 0;
+ uint32_t num_rails;
+
+ num_rails = readl_relaxed(rbcpr_data->start);
+
+ if (num_rails > RBCPR_MAX_RAILS) {
+ pr_err("%s: Invalid number of RPM RBCPR rails %d",
+ __func__, num_rails);
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+static int msm_rpmrbcpr_probe(struct platform_device *pdev)
+{
+ struct dentry *dent;
+ int ret = 0;
+ struct resource *res = NULL;
+ void __iomem *start_ptr = NULL;
+ uint32_t rbcpr_start_addr = 0;
+ char *key = NULL;
+ uint32_t start_addr;
+
+ rbcpr_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct rbcpr_data_type), GFP_KERNEL);
+
+ if (!rbcpr_data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ pr_err("%s: Failed to get IO resource from platform device",
+ __func__);
+ ret = -ENXIO;
+ goto rbcpr_probe_fail;
+ }
+
+ key = "qcom,start-offset";
+ ret = of_property_read_u32(pdev->dev.of_node, key, &start_addr);
+
+ if (ret) {
+ pr_err("%s: Failed to get start offset", __func__);
+ goto rbcpr_probe_fail;
+ }
+
+ start_addr += res->start;
+ start_ptr = ioremap_nocache(start_addr, 4);
+
+ if (!start_ptr) {
+ pr_err("%s: Failed to remap RBCPR start pointer",
+ __func__);
+ goto rbcpr_probe_fail;
+ }
+
+ rbcpr_start_addr = res->start + readl_relaxed(start_ptr);
+
+ if ((rbcpr_start_addr > (res->end - RBCPR_STATS_MAX_SIZE)) ||
+ (rbcpr_start_addr < start_addr)) {
+ pr_err("%s: Invalid start address for rbcpr stats 0x%x",
+ __func__, rbcpr_start_addr);
+ goto rbcpr_probe_fail;
+ }
+
+ rbcpr_data->start = devm_ioremap_nocache(&pdev->dev, rbcpr_start_addr,
+ RBCPR_STATS_MAX_SIZE);
+
+ if (!rbcpr_data->start) {
+ pr_err("%s: Failed to remap RBCPR start address",
+ __func__);
+ goto rbcpr_probe_fail;
+ }
+
+ ret = msm_rpmrbcpr_validate(pdev);
+
+ if (ret)
+ goto rbcpr_probe_fail;
+
+ dent = debugfs_create_file("rpm_rbcpr", S_IRUGO, NULL,
+ rbcpr_data, &msm_rpmrbcpr_fops);
+
+ if (!dent) {
+ pr_err("%s: error debugfs_create_file failed\n", __func__);
+ ret = -ENOMEM;
+ goto rbcpr_probe_fail;
+ }
+
+ platform_set_drvdata(pdev, dent);
+rbcpr_probe_fail:
+ iounmap(start_ptr);
+ return ret;
+}
+
+static int msm_rpmrbcpr_remove(struct platform_device *pdev)
+{
+ struct dentry *dent;
+
+ dent = platform_get_drvdata(pdev);
+ debugfs_remove(dent);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct of_device_id rpmrbcpr_stats_table[] = {
+ {.compatible = "qcom,rpmrbcpr-stats"},
+ {},
+};
+
+static struct platform_driver msm_rpmrbcpr_driver = {
+ .probe = msm_rpmrbcpr_probe,
+ .remove = msm_rpmrbcpr_remove,
+ .driver = {
+ .name = "msm_rpmrbcpr_stats",
+ .owner = THIS_MODULE,
+ .of_match_table = rpmrbcpr_stats_table,
+ },
+};
+
+static int __init msm_rpmrbcpr_init(void)
+{
+ return platform_driver_register(&msm_rpmrbcpr_driver);
+}
+
+static void __exit msm_rpmrbcpr_exit(void)
+{
+ platform_driver_unregister(&msm_rpmrbcpr_driver);
+}
+
+module_init(msm_rpmrbcpr_init);
+module_exit(msm_rpmrbcpr_exit);
diff --git a/drivers/soc/qcom/rpm_stats.c b/drivers/soc/qcom/rpm_stats.c
new file mode 100644
index 000000000000..ab7c9ba9eb1b
--- /dev/null
+++ b/drivers/soc/qcom/rpm_stats.c
@@ -0,0 +1,405 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/uaccess.h>
+#include <asm/arch_timer.h>
+#include "rpm_stats.h"
+
+
+enum {
+ ID_COUNTER,
+ ID_ACCUM_TIME_SCLK,
+ ID_MAX,
+};
+
+static char *msm_rpmstats_id_labels[ID_MAX] = {
+ [ID_COUNTER] = "Count",
+ [ID_ACCUM_TIME_SCLK] = "Total time(uSec)",
+};
+
+#define SCLK_HZ 32768
+#define MSM_ARCH_TIMER_FREQ 19200000
+
+struct msm_rpmstats_record {
+ char name[32];
+ uint32_t id;
+ uint32_t val;
+};
+
+struct msm_rpmstats_private_data {
+ void __iomem *reg_base;
+ u32 num_records;
+ u32 read_idx;
+ u32 len;
+ char buf[320];
+ struct msm_rpmstats_platform_data *platform_data;
+};
+
+struct msm_rpm_stats_data_v2 {
+ u32 stat_type;
+ u32 count;
+ u64 last_entered_at;
+ u64 last_exited_at;
+ u64 accumulated;
+ u32 client_votes;
+ u32 reserved[3];
+};
+
+static inline u64 get_time_in_sec(u64 counter)
+{
+ do_div(counter, MSM_ARCH_TIMER_FREQ);
+ return counter;
+}
+
+static inline u64 get_time_in_msec(u64 counter)
+{
+ do_div(counter, MSM_ARCH_TIMER_FREQ);
+ counter *= MSEC_PER_SEC;
+ return counter;
+}
+
+static inline int msm_rpmstats_append_data_to_buf(char *buf,
+ struct msm_rpm_stats_data_v2 *data, int buflength)
+{
+ char stat_type[5];
+ u64 time_in_last_mode;
+ u64 time_since_last_mode;
+ u64 actual_last_sleep;
+
+ stat_type[4] = 0;
+ memcpy(stat_type, &data->stat_type, sizeof(u32));
+
+ time_in_last_mode = data->last_exited_at - data->last_entered_at;
+ time_in_last_mode = get_time_in_msec(time_in_last_mode);
+ time_since_last_mode = arch_counter_get_cntpct() - data->last_exited_at;
+ time_since_last_mode = get_time_in_sec(time_since_last_mode);
+ actual_last_sleep = get_time_in_msec(data->accumulated);
+
+ return snprintf(buf , buflength,
+ "RPM Mode:%s\n\t count:%d\ntime in last mode(msec):%llu\n"
+ "time since last mode(sec):%llu\nactual last sleep(msec):%llu\n"
+ "client votes: %#010x\n\n",
+ stat_type, data->count, time_in_last_mode,
+ time_since_last_mode, actual_last_sleep,
+ data->client_votes);
+}
+
+static inline u32 msm_rpmstats_read_long_register_v2(void __iomem *regbase,
+ int index, int offset)
+{
+ return readl_relaxed(regbase + offset +
+ index * sizeof(struct msm_rpm_stats_data_v2));
+}
+
+static inline u64 msm_rpmstats_read_quad_register_v2(void __iomem *regbase,
+ int index, int offset)
+{
+ u64 dst;
+ memcpy_fromio(&dst,
+ regbase + offset + index * sizeof(struct msm_rpm_stats_data_v2),
+ 8);
+ return dst;
+}
+
+static inline int msm_rpmstats_copy_stats_v2(
+ struct msm_rpmstats_private_data *prvdata)
+{
+ void __iomem *reg;
+ struct msm_rpm_stats_data_v2 data;
+ int i, length;
+
+ reg = prvdata->reg_base;
+
+ for (i = 0, length = 0; i < prvdata->num_records; i++) {
+
+ data.stat_type = msm_rpmstats_read_long_register_v2(reg, i,
+ offsetof(struct msm_rpm_stats_data_v2,
+ stat_type));
+ data.count = msm_rpmstats_read_long_register_v2(reg, i,
+ offsetof(struct msm_rpm_stats_data_v2, count));
+ data.last_entered_at = msm_rpmstats_read_quad_register_v2(reg,
+ i, offsetof(struct msm_rpm_stats_data_v2,
+ last_entered_at));
+ data.last_exited_at = msm_rpmstats_read_quad_register_v2(reg,
+ i, offsetof(struct msm_rpm_stats_data_v2,
+ last_exited_at));
+
+ data.accumulated = msm_rpmstats_read_quad_register_v2(reg,
+ i, offsetof(struct msm_rpm_stats_data_v2,
+ accumulated));
+ data.client_votes = msm_rpmstats_read_long_register_v2(reg,
+ i, offsetof(struct msm_rpm_stats_data_v2,
+ client_votes));
+ length += msm_rpmstats_append_data_to_buf(prvdata->buf + length,
+ &data, sizeof(prvdata->buf) - length);
+ prvdata->read_idx++;
+ }
+ return length;
+}
+
+static inline unsigned long msm_rpmstats_read_register(void __iomem *regbase,
+ int index, int offset)
+{
+ return readl_relaxed(regbase + index * 12 + (offset + 1) * 4);
+}
+static void msm_rpmstats_strcpy(char *dest, char *src)
+{
+ union {
+ char ch[4];
+ unsigned long word;
+ } string;
+ int index = 0;
+
+ do {
+ int i;
+ string.word = readl_relaxed(src + 4 * index);
+ for (i = 0; i < 4; i++) {
+ *dest++ = string.ch[i];
+ if (!string.ch[i])
+ break;
+ }
+ index++;
+ } while (*(dest-1));
+
+}
+static int msm_rpmstats_copy_stats(struct msm_rpmstats_private_data *pdata)
+{
+
+ struct msm_rpmstats_record record;
+ unsigned long ptr;
+ unsigned long offset;
+ char *str;
+ uint64_t usec;
+
+ ptr = msm_rpmstats_read_register(pdata->reg_base, pdata->read_idx, 0);
+ offset = (ptr - (unsigned long)pdata->platform_data->phys_addr_base);
+
+ if (offset > pdata->platform_data->phys_size)
+ str = (char *)ioremap(ptr, SZ_256);
+ else
+ str = (char *) pdata->reg_base + offset;
+
+ msm_rpmstats_strcpy(record.name, str);
+
+ if (offset > pdata->platform_data->phys_size)
+ iounmap(str);
+
+ record.id = msm_rpmstats_read_register(pdata->reg_base,
+ pdata->read_idx, 1);
+ record.val = msm_rpmstats_read_register(pdata->reg_base,
+ pdata->read_idx, 2);
+
+ if (record.id == ID_ACCUM_TIME_SCLK) {
+ usec = record.val * USEC_PER_SEC;
+ do_div(usec, SCLK_HZ);
+ } else
+ usec = (unsigned long)record.val;
+
+ pdata->read_idx++;
+
+ return snprintf(pdata->buf, sizeof(pdata->buf),
+ "RPM Mode:%s\n\t%s:%llu\n",
+ record.name,
+ msm_rpmstats_id_labels[record.id],
+ usec);
+}
+
+static ssize_t msm_rpmstats_file_read(struct file *file, char __user *bufu,
+ size_t count, loff_t *ppos)
+{
+ struct msm_rpmstats_private_data *prvdata;
+ prvdata = file->private_data;
+
+ if (!prvdata)
+ return -EINVAL;
+
+ if (!bufu || count == 0)
+ return -EINVAL;
+
+ if (prvdata->platform_data->version == 1) {
+ if (!prvdata->num_records)
+ prvdata->num_records = readl_relaxed(prvdata->reg_base);
+ }
+
+ if ((*ppos >= prvdata->len)
+ && (prvdata->read_idx < prvdata->num_records)) {
+ if (prvdata->platform_data->version == 1)
+ prvdata->len = msm_rpmstats_copy_stats(prvdata);
+ else if (prvdata->platform_data->version == 2)
+ prvdata->len = msm_rpmstats_copy_stats_v2(
+ prvdata);
+ *ppos = 0;
+ }
+
+ return simple_read_from_buffer(bufu, count, ppos,
+ prvdata->buf, prvdata->len);
+}
+
+static int msm_rpmstats_file_open(struct inode *inode, struct file *file)
+{
+ struct msm_rpmstats_private_data *prvdata;
+ struct msm_rpmstats_platform_data *pdata;
+
+ pdata = inode->i_private;
+
+ file->private_data =
+ kmalloc(sizeof(struct msm_rpmstats_private_data), GFP_KERNEL);
+
+ if (!file->private_data)
+ return -ENOMEM;
+ prvdata = file->private_data;
+
+ prvdata->reg_base = ioremap_nocache(pdata->phys_addr_base,
+ pdata->phys_size);
+ if (!prvdata->reg_base) {
+ kfree(file->private_data);
+ prvdata = NULL;
+ pr_err("%s: ERROR could not ioremap start=%pa, len=%u\n",
+ __func__, &pdata->phys_addr_base,
+ pdata->phys_size);
+ return -EBUSY;
+ }
+
+ prvdata->read_idx = prvdata->num_records = prvdata->len = 0;
+ prvdata->platform_data = pdata;
+ if (pdata->version == 2)
+ prvdata->num_records = 2;
+
+ return 0;
+}
+
+static int msm_rpmstats_file_close(struct inode *inode, struct file *file)
+{
+ struct msm_rpmstats_private_data *private = file->private_data;
+
+ if (private->reg_base)
+ iounmap(private->reg_base);
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static const struct file_operations msm_rpmstats_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_rpmstats_file_open,
+ .read = msm_rpmstats_file_read,
+ .release = msm_rpmstats_file_close,
+ .llseek = no_llseek,
+};
+
+static int msm_rpmstats_probe(struct platform_device *pdev)
+{
+ struct dentry *dent = NULL;
+ struct msm_rpmstats_platform_data *pdata;
+ struct msm_rpmstats_platform_data *pd;
+ struct resource *res = NULL;
+ struct device_node *node = NULL;
+ int ret = 0;
+
+ if (!pdev)
+ return -EINVAL;
+
+ pdata = kzalloc(sizeof(struct msm_rpmstats_platform_data), GFP_KERNEL);
+
+ if (!pdata)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res)
+ return -EINVAL;
+
+ pdata->phys_addr_base = res->start;
+
+ pdata->phys_size = resource_size(res);
+ node = pdev->dev.of_node;
+ if (pdev->dev.platform_data) {
+ pd = pdev->dev.platform_data;
+ pdata->version = pd->version;
+
+ } else if (node)
+ ret = of_property_read_u32(node,
+ "qcom,sleep-stats-version", &pdata->version);
+
+ if (!ret) {
+
+ dent = debugfs_create_file("rpm_stats", S_IRUGO, NULL,
+ pdata, &msm_rpmstats_fops);
+
+ if (!dent) {
+ pr_err("%s: ERROR debugfs_create_file failed\n",
+ __func__);
+ kfree(pdata);
+ return -ENOMEM;
+ }
+
+ } else {
+ kfree(pdata);
+ return -EINVAL;
+ }
+ platform_set_drvdata(pdev, dent);
+ return 0;
+}
+
+static int msm_rpmstats_remove(struct platform_device *pdev)
+{
+ struct dentry *dent;
+
+ dent = platform_get_drvdata(pdev);
+ debugfs_remove(dent);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct of_device_id rpm_stats_table[] = {
+ {.compatible = "qcom,rpm-stats"},
+ {},
+};
+
+static struct platform_driver msm_rpmstats_driver = {
+ .probe = msm_rpmstats_probe,
+ .remove = msm_rpmstats_remove,
+ .driver = {
+ .name = "msm_rpm_stat",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_stats_table,
+ },
+};
+static int __init msm_rpmstats_init(void)
+{
+ return platform_driver_register(&msm_rpmstats_driver);
+}
+static void __exit msm_rpmstats_exit(void)
+{
+ platform_driver_unregister(&msm_rpmstats_driver);
+}
+module_init(msm_rpmstats_init);
+module_exit(msm_rpmstats_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPM Statistics driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:msm_stat_log");
diff --git a/drivers/soc/qcom/rpm_stats.h b/drivers/soc/qcom/rpm_stats.h
new file mode 100644
index 000000000000..34c1b99f1264
--- /dev/null
+++ b/drivers/soc/qcom/rpm_stats.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_STATS_H
+#define __ARCH_ARM_MACH_MSM_RPM_STATS_H
+
+#include <linux/types.h>
+
+struct msm_rpmstats_platform_data {
+ phys_addr_t phys_addr_base;
+ u32 phys_size;
+ u32 version;
+};
+
+struct msm_rpm_master_stats_platform_data {
+ phys_addr_t phys_addr_base;
+ u32 phys_size;
+ char **masters;
+ /*
+ * RPM maintains PC stats for each master in MSG RAM,
+ * it allocates 256 bytes for this use.
+ * No of masters differs for different targets.
+ * Based on the number of masters, linux rpm stat
+ * driver reads (32 * num_masters) bytes to display
+ * master stats.
+ */
+ s32 num_masters;
+ u32 master_offset;
+ u32 version;
+};
+#endif
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
new file mode 100644
index 000000000000..c20faf2ed2db
--- /dev/null
+++ b/drivers/soc/qcom/smd.c
@@ -0,0 +1,3335 @@
+/* drivers/soc/qcom/smd.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/termios.h>
+#include <linux/ctype.h>
+#include <linux/remote_spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/kfifo.h>
+#include <linux/pm.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/ipc_logging.h>
+
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/smd.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include "smd_private.h"
+#include "smem_private.h"
+
+#define SMSM_SNAPSHOT_CNT 64
+#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4 + sizeof(uint64_t))
+#define RSPIN_INIT_WAIT_MS 1000
+#define SMD_FIFO_FULL_RESERVE 4
+#define SMD_FIFO_ADDR_ALIGN_BYTES 3
+
+uint32_t SMSM_NUM_ENTRIES = 8;
+uint32_t SMSM_NUM_HOSTS = 3;
+
+/* Legacy SMSM interrupt notifications */
+#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT)
+
+struct smsm_shared_info {
+ uint32_t *state;
+ uint32_t *intr_mask;
+ uint32_t *intr_mux;
+};
+
+static struct smsm_shared_info smsm_info;
+static struct kfifo smsm_snapshot_fifo;
+static struct wakeup_source smsm_snapshot_ws;
+static int smsm_snapshot_count;
+static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
+
+struct smsm_size_info_type {
+ uint32_t num_hosts;
+ uint32_t num_entries;
+ uint32_t reserved0;
+ uint32_t reserved1;
+};
+
+struct smsm_state_cb_info {
+ struct list_head cb_list;
+ uint32_t mask;
+ void *data;
+ void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
+};
+
+struct smsm_state_info {
+ struct list_head callbacks;
+ uint32_t last_value;
+ uint32_t intr_mask_set;
+ uint32_t intr_mask_clear;
+};
+
+static irqreturn_t smsm_irq_handler(int irq, void *data);
+
+/*
+ * Interrupt configuration consists of static configuration for the supported
+ * processors that is done here along with interrupt configuration that is
+ * added by the separate initialization modules (device tree, platform data, or
+ * hard coded).
+ */
+static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
+ [SMD_MODEM] = {
+ .smd.irq_handler = smd_modem_irq_handler,
+ .smsm.irq_handler = smsm_modem_irq_handler,
+ },
+ [SMD_Q6] = {
+ .smd.irq_handler = smd_dsp_irq_handler,
+ .smsm.irq_handler = smsm_dsp_irq_handler,
+ },
+ [SMD_DSPS] = {
+ .smd.irq_handler = smd_dsps_irq_handler,
+ .smsm.irq_handler = smsm_dsps_irq_handler,
+ },
+ [SMD_WCNSS] = {
+ .smd.irq_handler = smd_wcnss_irq_handler,
+ .smsm.irq_handler = smsm_wcnss_irq_handler,
+ },
+ [SMD_MODEM_Q6_FW] = {
+ .smd.irq_handler = smd_modemfw_irq_handler,
+ .smsm.irq_handler = NULL, /* does not support smsm */
+ },
+ [SMD_RPM] = {
+ .smd.irq_handler = smd_rpm_irq_handler,
+ .smsm.irq_handler = NULL, /* does not support smsm */
+ },
+};
+
+union fifo_mem {
+ uint64_t u64;
+ uint8_t u8;
+};
+
+struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
+
+#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
+#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
+ entry * SMSM_NUM_HOSTS + host)
+#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
+
+int msm_smd_debug_mask = MSM_SMD_POWER_INFO | MSM_SMD_INFO |
+ MSM_SMSM_POWER_INFO;
+module_param_named(debug_mask, msm_smd_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+void *smd_log_ctx;
+void *smsm_log_ctx;
+#define NUM_LOG_PAGES 4
+
+#define IPC_LOG_SMD(level, x...) do { \
+ if (smd_log_ctx) \
+ ipc_log_string(smd_log_ctx, x); \
+ else \
+ printk(level x); \
+ } while (0)
+
+#define IPC_LOG_SMSM(level, x...) do { \
+ if (smsm_log_ctx) \
+ ipc_log_string(smsm_log_ctx, x); \
+ else \
+ printk(level x); \
+ } while (0)
+
+#if defined(CONFIG_MSM_SMD_DEBUG)
+#define SMD_DBG(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
+ IPC_LOG_SMD(KERN_DEBUG, x); \
+ } while (0)
+
+#define SMSM_DBG(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
+ IPC_LOG_SMSM(KERN_DEBUG, x); \
+ } while (0)
+
+#define SMD_INFO(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMD_INFO) \
+ IPC_LOG_SMD(KERN_INFO, x); \
+ } while (0)
+
+#define SMSM_INFO(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMSM_INFO) \
+ IPC_LOG_SMSM(KERN_INFO, x); \
+ } while (0)
+
+#define SMD_POWER_INFO(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMD_POWER_INFO) \
+ IPC_LOG_SMD(KERN_INFO, x); \
+ } while (0)
+
+#define SMSM_POWER_INFO(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMSM_POWER_INFO) \
+ IPC_LOG_SMSM(KERN_INFO, x); \
+ } while (0)
+#else
+#define SMD_DBG(x...) do { } while (0)
+#define SMSM_DBG(x...) do { } while (0)
+#define SMD_INFO(x...) do { } while (0)
+#define SMSM_INFO(x...) do { } while (0)
+#define SMD_POWER_INFO(x...) do { } while (0)
+#define SMSM_POWER_INFO(x...) do { } while (0)
+#endif
+
+static void smd_fake_irq_handler(unsigned long arg);
+static void smsm_cb_snapshot(uint32_t use_wakeup_source);
+
+static struct workqueue_struct *smsm_cb_wq;
+static void notify_smsm_cb_clients_worker(struct work_struct *work);
+static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
+static DEFINE_MUTEX(smsm_lock);
+static struct smsm_state_info *smsm_states;
+
+static int smd_stream_write_avail(struct smd_channel *ch);
+static int smd_stream_read_avail(struct smd_channel *ch);
+
+static bool pid_is_on_edge(uint32_t edge_num, unsigned pid);
+
+static inline void smd_write_intr(unsigned int val, void __iomem *addr)
+{
+ wmb();
+ __raw_writel(val, addr);
+}
+
+/**
+ * smd_memcpy_to_fifo() - copy to SMD channel FIFO
+ * @dest: Destination address
+ * @src: Source address
+ * @num_bytes: Number of bytes to copy
+ * @from_user: true if data being copied is from userspace, false otherwise
+ *
+ * @return: Address of destination
+ *
+ * This function copies num_bytes from src to dest. This is used as the memcpy
+ * function to copy data to SMD FIFO in case the SMD FIFO is naturally aligned.
+ */
+static void *smd_memcpy_to_fifo(void *dest, const void *src, size_t num_bytes,
+ bool from_user)
+{
+ union fifo_mem *temp_dst = (union fifo_mem *)dest;
+ union fifo_mem *temp_src = (union fifo_mem *)src;
+ uintptr_t mask = sizeof(union fifo_mem) - 1;
+ int ret;
+
+ /* Do byte copies until we hit 8-byte (double word) alignment */
+ while ((uintptr_t)temp_dst & mask && num_bytes) {
+ if (from_user) {
+ ret = copy_from_user(temp_dst, temp_src, 1);
+ BUG_ON(ret != 0);
+ } else {
+ __raw_writeb(temp_src->u8, temp_dst);
+ }
+
+ temp_src = (union fifo_mem *)((uintptr_t)temp_src + 1);
+ temp_dst = (union fifo_mem *)((uintptr_t)temp_dst + 1);
+ num_bytes--;
+ }
+
+ /* Do double word copies */
+ while (num_bytes >= sizeof(union fifo_mem)) {
+ if (from_user) {
+ ret = copy_from_user(temp_dst, temp_src,
+ sizeof(union fifo_mem));
+ BUG_ON(ret != 0);
+ } else {
+ __raw_writeq(temp_src->u64, temp_dst);
+ }
+
+ temp_dst++;
+ temp_src++;
+ num_bytes -= sizeof(union fifo_mem);
+ }
+
+ /* Copy remaining bytes */
+ while (num_bytes--) {
+ if (from_user) {
+ ret = copy_from_user(temp_dst, temp_src, 1);
+ BUG_ON(ret != 0);
+ } else {
+ __raw_writeb(temp_src->u8, temp_dst);
+ }
+
+ temp_src = (union fifo_mem *)((uintptr_t)temp_src + 1);
+ temp_dst = (union fifo_mem *)((uintptr_t)temp_dst + 1);
+ }
+
+ return dest;
+}
+
+/**
+ * smd_memcpy_from_fifo() - copy from SMD channel FIFO
+ * @dest: Destination address
+ * @src: Source address
+ * @num_bytes: Number of bytes to copy
+ * @to_user: true if data being copied is from userspace, false otherwise
+ *
+ * @return: Address of destination
+ *
+ * This function copies num_bytes from src to dest. This is used as the memcpy
+ * function to copy data from SMD FIFO in case the SMD FIFO is naturally
+ * aligned.
+ */
+static void *smd_memcpy_from_fifo(void *dest, const void *src, size_t num_bytes,
+ bool to_user)
+{
+ union fifo_mem *temp_dst = (union fifo_mem *)dest;
+ union fifo_mem *temp_src = (union fifo_mem *)src;
+ uintptr_t mask = sizeof(union fifo_mem) - 1;
+ int ret;
+
+ /* Do byte copies until we hit 8-byte (double word) alignment */
+ while ((uintptr_t)temp_src & mask && num_bytes) {
+ if (to_user) {
+ ret = copy_to_user(temp_dst, temp_src, 1);
+ BUG_ON(ret != 0);
+ } else {
+ temp_dst->u8 = __raw_readb(temp_src);
+ }
+
+ temp_src = (union fifo_mem *)((uintptr_t)temp_src + 1);
+ temp_dst = (union fifo_mem *)((uintptr_t)temp_dst + 1);
+ num_bytes--;
+ }
+
+ /* Do double word copies */
+ while (num_bytes >= sizeof(union fifo_mem)) {
+ if (to_user) {
+ ret = copy_to_user(temp_dst, temp_src,
+ sizeof(union fifo_mem));
+ BUG_ON(ret != 0);
+ } else {
+ temp_dst->u64 = __raw_readq(temp_src);
+ }
+
+ temp_dst++;
+ temp_src++;
+ num_bytes -= sizeof(union fifo_mem);
+ }
+
+ /* Copy remaining bytes */
+ while (num_bytes--) {
+ if (to_user) {
+ ret = copy_to_user(temp_dst, temp_src, 1);
+ BUG_ON(ret != 0);
+ } else {
+ temp_dst->u8 = __raw_readb(temp_src);
+ }
+
+ temp_src = (union fifo_mem *)((uintptr_t)temp_src + 1);
+ temp_dst = (union fifo_mem *)((uintptr_t)temp_dst + 1);
+ }
+
+ return dest;
+}
+
+/**
+ * smd_memcpy32_to_fifo() - Copy to SMD channel FIFO
+ *
+ * @dest: Destination address
+ * @src: Source address
+ * @num_bytes: Number of bytes to copy
+ * @from_user: always false
+ *
+ * @return: On Success, address of destination
+ *
+ * This function copies num_bytes data from src to dest. This is used as the
+ * memcpy function to copy data to SMD FIFO in case the SMD FIFO is 4 byte
+ * aligned.
+ */
+static void *smd_memcpy32_to_fifo(void *dest, const void *src, size_t num_bytes,
+ bool from_user)
+{
+ uint32_t *dest_local = (uint32_t *)dest;
+ uint32_t *src_local = (uint32_t *)src;
+
+ if (from_user) {
+ panic("%s: Word Based Access not supported",
+ __func__);
+ }
+
+ BUG_ON(num_bytes & SMD_FIFO_ADDR_ALIGN_BYTES);
+ BUG_ON(!dest_local ||
+ ((uintptr_t)dest_local & SMD_FIFO_ADDR_ALIGN_BYTES));
+ BUG_ON(!src_local ||
+ ((uintptr_t)src_local & SMD_FIFO_ADDR_ALIGN_BYTES));
+ num_bytes /= sizeof(uint32_t);
+
+ while (num_bytes--)
+ __raw_writel(*src_local++, dest_local++);
+
+ return dest;
+}
+
+/**
+ * smd_memcpy32_from_fifo() - Copy from SMD channel FIFO
+ * @dest: Destination address
+ * @src: Source address
+ * @num_bytes: Number of bytes to copy
+ * @to_user: true if data being copied is from userspace, false otherwise
+ *
+ * @return: On Success, destination address
+ *
+ * This function copies num_bytes data from SMD FIFO to dest. This is used as
+ * the memcpy function to copy data from SMD FIFO in case the SMD FIFO is 4 byte
+ * aligned.
+ */
+static void *smd_memcpy32_from_fifo(void *dest, const void *src,
+ size_t num_bytes, bool to_user)
+{
+
+ uint32_t *dest_local = (uint32_t *)dest;
+ uint32_t *src_local = (uint32_t *)src;
+
+ if (to_user) {
+ panic("%s: Word Based Access not supported",
+ __func__);
+ }
+
+ BUG_ON(num_bytes & SMD_FIFO_ADDR_ALIGN_BYTES);
+ BUG_ON(!dest_local ||
+ ((uintptr_t)dest_local & SMD_FIFO_ADDR_ALIGN_BYTES));
+ BUG_ON(!src_local ||
+ ((uintptr_t)src_local & SMD_FIFO_ADDR_ALIGN_BYTES));
+ num_bytes /= sizeof(uint32_t);
+
+ while (num_bytes--)
+ *dest_local++ = __raw_readl(src_local++);
+
+ return dest;
+}
+
+static inline void log_notify(uint32_t subsystem, smd_channel_t *ch)
+{
+ const char *subsys = smd_edge_to_subsystem(subsystem);
+
+ (void) subsys;
+
+ if (!ch)
+ SMD_POWER_INFO("Apps->%s\n", subsys);
+ else
+ SMD_POWER_INFO(
+ "Apps->%s ch%d '%s': tx%d/rx%d %dr/%dw : %dr/%dw\n",
+ subsys, ch->n, ch->name,
+ ch->fifo_size -
+ (smd_stream_write_avail(ch) + 1),
+ smd_stream_read_avail(ch),
+ ch->half_ch->get_tail(ch->send),
+ ch->half_ch->get_head(ch->send),
+ ch->half_ch->get_tail(ch->recv),
+ ch->half_ch->get_head(ch->recv)
+ );
+}
+
+static inline void notify_modem_smd(smd_channel_t *ch)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_MODEM].smd;
+
+ log_notify(SMD_APPS_MODEM, ch);
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_MODEM].smd_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_dsp_smd(smd_channel_t *ch)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_Q6].smd;
+
+ log_notify(SMD_APPS_QDSP, ch);
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_Q6].smd_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_dsps_smd(smd_channel_t *ch)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_DSPS].smd;
+
+ log_notify(SMD_APPS_DSPS, ch);
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_DSPS].smd_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_wcnss_smd(struct smd_channel *ch)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_WCNSS].smd;
+
+ log_notify(SMD_APPS_WCNSS, ch);
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_WCNSS].smd_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_modemfw_smd(smd_channel_t *ch)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_MODEM_Q6_FW].smd;
+
+ log_notify(SMD_APPS_Q6FW, ch);
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_MODEM_Q6_FW].smd_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_rpm_smd(smd_channel_t *ch)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_RPM].smd;
+
+ if (intr->out_base) {
+ log_notify(SMD_APPS_RPM, ch);
+ ++interrupt_stats[SMD_RPM].smd_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_modem_smsm(void)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_MODEM].smsm;
+
+ SMSM_POWER_INFO("SMSM Apps->%s", "MODEM");
+
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_MODEM].smsm_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_dsp_smsm(void)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_Q6].smsm;
+
+ SMSM_POWER_INFO("SMSM Apps->%s", "ADSP");
+
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_Q6].smsm_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_dsps_smsm(void)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_DSPS].smsm;
+
+ SMSM_POWER_INFO("SMSM Apps->%s", "DSPS");
+
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_DSPS].smsm_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_wcnss_smsm(void)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_WCNSS].smsm;
+
+ SMSM_POWER_INFO("SMSM Apps->%s", "WCNSS");
+
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_WCNSS].smsm_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
+{
+ if (smsm_info.intr_mask &&
+ (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
+ & notify_mask))
+ notify_modem_smsm();
+
+ if (smsm_info.intr_mask &&
+ (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
+ & notify_mask))
+ notify_dsp_smsm();
+
+ if (smsm_info.intr_mask &&
+ (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
+ & notify_mask)) {
+ notify_wcnss_smsm();
+ }
+
+ if (smsm_info.intr_mask &&
+ (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
+ & notify_mask)) {
+ notify_dsps_smsm();
+ }
+
+ if (smsm_info.intr_mask &&
+ (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
+ & notify_mask)) {
+ smsm_cb_snapshot(1);
+ }
+}
+
+static int smsm_pm_notifier(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ smsm_change_state(SMSM_APPS_STATE, SMSM_PROC_AWAKE, 0);
+ break;
+
+ case PM_POST_SUSPEND:
+ smsm_change_state(SMSM_APPS_STATE, 0, SMSM_PROC_AWAKE);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block smsm_pm_nb = {
+ .notifier_call = smsm_pm_notifier,
+ .priority = 0,
+};
+
+/* the spinlock is used to synchronize between the
+ * irq handler and code that mutates the channel
+ * list or fiddles with channel state
+ */
+static DEFINE_SPINLOCK(smd_lock);
+DEFINE_SPINLOCK(smem_lock);
+
+/* the mutex is used during open() and close()
+ * operations to avoid races while creating or
+ * destroying smd_channel structures
+ */
+static DEFINE_MUTEX(smd_creation_mutex);
+
+struct smd_shared {
+ struct smd_half_channel ch0;
+ struct smd_half_channel ch1;
+};
+
+struct smd_shared_word_access {
+ struct smd_half_channel_word_access ch0;
+ struct smd_half_channel_word_access ch1;
+};
+
+/**
+ * Maps edge type to local and remote processor ID's.
+ */
+static struct edge_to_pid edge_to_pids[] = {
+ [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
+ [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "adsp"},
+ [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
+ [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
+ [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
+ [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
+ [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
+ [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
+ [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
+ [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
+ [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
+ [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
+ [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
+ [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
+ [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
+ [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
+ [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
+ [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
+ [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
+ [SMD_TZ_RPM] = {SMD_TZ, SMD_RPM},
+};
+
+struct restart_notifier_block {
+ unsigned processor;
+ char *name;
+ struct notifier_block nb;
+};
+
+static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
+
+static LIST_HEAD(smd_ch_closed_list);
+static LIST_HEAD(smd_ch_closing_list);
+static LIST_HEAD(smd_ch_to_close_list);
+
+struct remote_proc_info {
+ unsigned remote_pid;
+ unsigned free_space;
+ struct work_struct probe_work;
+ struct list_head ch_list;
+ /* 2 total supported tables of channels */
+ unsigned char ch_allocated[SMEM_NUM_SMD_STREAM_CHANNELS * 2];
+ bool skip_pil;
+};
+
+static struct remote_proc_info remote_info[NUM_SMD_SUBSYSTEMS];
+
+static void finalize_channel_close_fn(struct work_struct *work);
+static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
+static struct workqueue_struct *channel_close_wq;
+
+#define PRI_ALLOC_TBL 1
+#define SEC_ALLOC_TBL 2
+static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm, int table_id,
+ struct remote_proc_info *r_info);
+
+static bool smd_edge_inited(int edge)
+{
+ return edge_to_pids[edge].initialized;
+}
+
+/* on smp systems, the probe might get called from multiple cores,
+ hence use a lock */
+static DEFINE_MUTEX(smd_probe_lock);
+
+/**
+ * scan_alloc_table - Scans a specified SMD channel allocation table in SMEM for
+ * newly created channels that need to be made locally
+ * visable
+ *
+ * @shared: pointer to the table array in SMEM
+ * @smd_ch_allocated: pointer to an array indicating already allocated channels
+ * @table_id: identifier for this channel allocation table
+ * @num_entries: number of entries in this allocation table
+ * @r_info: pointer to the info structure of the remote proc we care about
+ *
+ * The smd_probe_lock must be locked by the calling function. Shared and
+ * smd_ch_allocated are assumed to be valid pointers.
+ */
+static void scan_alloc_table(struct smd_alloc_elm *shared,
+ char *smd_ch_allocated,
+ int table_id,
+ unsigned num_entries,
+ struct remote_proc_info *r_info)
+{
+ unsigned n;
+ uint32_t type;
+
+ for (n = 0; n < num_entries; n++) {
+ if (smd_ch_allocated[n])
+ continue;
+
+ /*
+ * channel should be allocated only if APPS processor is
+ * involved
+ */
+ type = SMD_CHANNEL_TYPE(shared[n].type);
+ if (!pid_is_on_edge(type, SMD_APPS) ||
+ !pid_is_on_edge(type, r_info->remote_pid))
+ continue;
+ if (!shared[n].ref_count)
+ continue;
+ if (!shared[n].name[0])
+ continue;
+
+ if (!smd_edge_inited(type)) {
+ SMD_INFO(
+ "Probe skipping proc %d, tbl %d, ch %d, edge not inited\n",
+ r_info->remote_pid, table_id, n);
+ continue;
+ }
+
+ if (!smd_alloc_channel(&shared[n], table_id, r_info))
+ smd_ch_allocated[n] = 1;
+ else
+ SMD_INFO(
+ "Probe skipping proc %d, tbl %d, ch %d, not allocated\n",
+ r_info->remote_pid, table_id, n);
+ }
+}
+
+/**
+ * smd_channel_probe_worker() - Scan for newly created SMD channels and init
+ * local structures so the channels are visable to
+ * local clients
+ *
+ * @work: work_struct corresponding to an instance of this function running on
+ * a workqueue.
+ */
+static void smd_channel_probe_worker(struct work_struct *work)
+{
+ struct smd_alloc_elm *shared;
+ struct remote_proc_info *r_info;
+ unsigned tbl_size;
+
+ r_info = container_of(work, struct remote_proc_info, probe_work);
+
+ shared = smem_get_entry(ID_CH_ALLOC_TBL, &tbl_size,
+ r_info->remote_pid, 0);
+
+ if (!shared) {
+ pr_err("%s: allocation table not initialized\n", __func__);
+ return;
+ }
+
+ mutex_lock(&smd_probe_lock);
+
+ scan_alloc_table(shared, r_info->ch_allocated, PRI_ALLOC_TBL,
+ tbl_size / sizeof(*shared),
+ r_info);
+
+ shared = smem_get_entry(SMEM_CHANNEL_ALLOC_TBL_2, &tbl_size,
+ r_info->remote_pid, 0);
+ if (shared)
+ scan_alloc_table(shared,
+ &(r_info->ch_allocated[SMEM_NUM_SMD_STREAM_CHANNELS]),
+ SEC_ALLOC_TBL,
+ tbl_size / sizeof(*shared),
+ r_info);
+
+ mutex_unlock(&smd_probe_lock);
+}
+
+/**
+ * get_remote_ch() - gathers remote channel info
+ *
+ * @shared2: Pointer to v2 shared channel structure
+ * @type: Edge type
+ * @pid: Processor ID of processor on edge
+ * @remote_ch: Channel that belongs to processor @pid
+ * @is_word_access_ch: Bool, is this a word aligned access channel
+ *
+ * @returns: 0 on success, error code on failure
+ */
+static int get_remote_ch(void *shared2,
+ uint32_t type, uint32_t pid,
+ void **remote_ch,
+ int is_word_access_ch
+ )
+{
+ if (!remote_ch || !shared2 || !pid_is_on_edge(type, pid) ||
+ !pid_is_on_edge(type, SMD_APPS))
+ return -EINVAL;
+
+ if (is_word_access_ch)
+ *remote_ch =
+ &((struct smd_shared_word_access *)(shared2))->ch1;
+ else
+ *remote_ch = &((struct smd_shared *)(shared2))->ch1;
+
+ return 0;
+}
+
+/**
+ * smd_remote_ss_to_edge() - return edge type from remote ss type
+ * @name: remote subsystem name
+ *
+ * Returns the edge type connected between the local subsystem(APPS)
+ * and remote subsystem @name.
+ */
+int smd_remote_ss_to_edge(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
+ if (edge_to_pids[i].subsys_name[0] != 0x0) {
+ if (!strncmp(edge_to_pids[i].subsys_name, name,
+ strlen(name)))
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(smd_remote_ss_to_edge);
+
+/**
+ * smd_edge_to_pil_str - Returns the PIL string used to load the remote side of
+ * the indicated edge.
+ *
+ * @type - Edge definition
+ * @returns - The PIL string to load the remove side of @type or NULL if the
+ * PIL string does not exist.
+ */
+const char *smd_edge_to_pil_str(uint32_t type)
+{
+ const char *pil_str = NULL;
+
+ if (type < ARRAY_SIZE(edge_to_pids)) {
+ if (!edge_to_pids[type].initialized)
+ return ERR_PTR(-EPROBE_DEFER);
+ if (!remote_info[smd_edge_to_remote_pid(type)].skip_pil) {
+ pil_str = edge_to_pids[type].subsys_name;
+ if (pil_str[0] == 0x0)
+ pil_str = NULL;
+ }
+ }
+ return pil_str;
+}
+EXPORT_SYMBOL(smd_edge_to_pil_str);
+
+/*
+ * Returns a pointer to the subsystem name or NULL if no
+ * subsystem name is available.
+ *
+ * @type - Edge definition
+ */
+const char *smd_edge_to_subsystem(uint32_t type)
+{
+ const char *subsys = NULL;
+
+ if (type < ARRAY_SIZE(edge_to_pids)) {
+ subsys = edge_to_pids[type].subsys_name;
+ if (subsys[0] == 0x0)
+ subsys = NULL;
+ if (!edge_to_pids[type].initialized)
+ subsys = ERR_PTR(-EPROBE_DEFER);
+ }
+ return subsys;
+}
+EXPORT_SYMBOL(smd_edge_to_subsystem);
+
+/*
+ * Returns a pointer to the subsystem name given the
+ * remote processor ID.
+ * subsystem is not necessarily PIL-loadable
+ *
+ * @pid Remote processor ID
+ * @returns Pointer to subsystem name or NULL if not found
+ */
+const char *smd_pid_to_subsystem(uint32_t pid)
+{
+ const char *subsys = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
+ if (pid == edge_to_pids[i].remote_pid) {
+ if (!edge_to_pids[i].initialized) {
+ subsys = ERR_PTR(-EPROBE_DEFER);
+ break;
+ }
+ if (edge_to_pids[i].subsys_name[0] != 0x0) {
+ subsys = edge_to_pids[i].subsys_name;
+ break;
+ } else if (pid == SMD_RPM) {
+ subsys = "rpm";
+ break;
+ }
+ }
+ }
+
+ return subsys;
+}
+EXPORT_SYMBOL(smd_pid_to_subsystem);
+
+static void smd_reset_edge(void *void_ch, unsigned new_state,
+ int is_word_access_ch)
+{
+ if (is_word_access_ch) {
+ struct smd_half_channel_word_access *ch =
+ (struct smd_half_channel_word_access *)(void_ch);
+ if (ch->state != SMD_SS_CLOSED) {
+ ch->state = new_state;
+ ch->fDSR = 0;
+ ch->fCTS = 0;
+ ch->fCD = 0;
+ ch->fSTATE = 1;
+ }
+ } else {
+ struct smd_half_channel *ch =
+ (struct smd_half_channel *)(void_ch);
+ if (ch->state != SMD_SS_CLOSED) {
+ ch->state = new_state;
+ ch->fDSR = 0;
+ ch->fCTS = 0;
+ ch->fCD = 0;
+ ch->fSTATE = 1;
+ }
+ }
+}
+
+/**
+ * smd_channel_reset_state() - find channels in an allocation table and set them
+ * to the specified state
+ *
+ * @shared: Pointer to the allocation table to scan
+ * @table_id: ID of the table
+ * @new_state: New state that channels should be set to
+ * @pid: Processor ID of the remote processor for the channels
+ * @num_entries: Number of entries in the table
+ *
+ * Scan the indicated table for channels between Apps and @pid. If a valid
+ * channel is found, set the remote side of the channel to @new_state.
+ */
+static void smd_channel_reset_state(struct smd_alloc_elm *shared, int table_id,
+ unsigned new_state, unsigned pid, unsigned num_entries)
+{
+ unsigned n;
+ void *shared2;
+ uint32_t type;
+ void *remote_ch;
+ int is_word_access;
+ unsigned base_id;
+
+ switch (table_id) {
+ case PRI_ALLOC_TBL:
+ base_id = SMEM_SMD_BASE_ID;
+ break;
+ case SEC_ALLOC_TBL:
+ base_id = SMEM_SMD_BASE_ID_2;
+ break;
+ default:
+ SMD_INFO("%s: invalid table_id:%d\n", __func__, table_id);
+ return;
+ }
+
+ for (n = 0; n < num_entries; n++) {
+ if (!shared[n].ref_count)
+ continue;
+ if (!shared[n].name[0])
+ continue;
+
+ type = SMD_CHANNEL_TYPE(shared[n].type);
+ is_word_access = is_word_access_ch(type);
+ if (is_word_access)
+ shared2 = smem_find(base_id + n,
+ sizeof(struct smd_shared_word_access), pid,
+ 0);
+ else
+ shared2 = smem_find(base_id + n,
+ sizeof(struct smd_shared), pid, 0);
+ if (!shared2)
+ continue;
+
+ if (!get_remote_ch(shared2, type, pid,
+ &remote_ch, is_word_access))
+ smd_reset_edge(remote_ch, new_state, is_word_access);
+ }
+}
+
+/**
+ * pid_is_on_edge() - checks to see if the processor with id pid is on the
+ * edge specified by edge_num
+ *
+ * @edge_num: the number of the edge which is being tested
+ * @pid: the id of the processor being tested
+ *
+ * @returns: true if on edge, false otherwise
+ */
+static bool pid_is_on_edge(uint32_t edge_num, unsigned pid)
+{
+ struct edge_to_pid edge;
+
+ if (edge_num >= ARRAY_SIZE(edge_to_pids))
+ return 0;
+
+ edge = edge_to_pids[edge_num];
+ return (edge.local_pid == pid || edge.remote_pid == pid);
+}
+
+void smd_channel_reset(uint32_t restart_pid)
+{
+ struct smd_alloc_elm *shared_pri;
+ struct smd_alloc_elm *shared_sec;
+ unsigned long flags;
+ unsigned pri_size;
+ unsigned sec_size;
+
+ SMD_POWER_INFO("%s: starting reset\n", __func__);
+
+ shared_pri = smem_get_entry(ID_CH_ALLOC_TBL, &pri_size, restart_pid, 0);
+ if (!shared_pri) {
+ pr_err("%s: allocation table not initialized\n", __func__);
+ return;
+ }
+ shared_sec = smem_get_entry(SMEM_CHANNEL_ALLOC_TBL_2, &sec_size,
+ restart_pid, 0);
+
+ /* reset SMSM entry */
+ if (smsm_info.state) {
+ writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
+
+ /* restart SMSM init handshake */
+ if (restart_pid == SMSM_MODEM) {
+ smsm_change_state(SMSM_APPS_STATE,
+ SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
+ 0);
+ }
+
+ /* notify SMSM processors */
+ smsm_irq_handler(0, 0);
+ notify_modem_smsm();
+ notify_dsp_smsm();
+ notify_dsps_smsm();
+ notify_wcnss_smsm();
+ }
+
+ /* change all remote states to CLOSING */
+ mutex_lock(&smd_probe_lock);
+ spin_lock_irqsave(&smd_lock, flags);
+ smd_channel_reset_state(shared_pri, PRI_ALLOC_TBL, SMD_SS_CLOSING,
+ restart_pid, pri_size / sizeof(*shared_pri));
+ if (shared_sec)
+ smd_channel_reset_state(shared_sec, SEC_ALLOC_TBL,
+ SMD_SS_CLOSING, restart_pid,
+ sec_size / sizeof(*shared_sec));
+ spin_unlock_irqrestore(&smd_lock, flags);
+ mutex_unlock(&smd_probe_lock);
+
+ mb();
+ smd_fake_irq_handler(0);
+
+ /* change all remote states to CLOSED */
+ mutex_lock(&smd_probe_lock);
+ spin_lock_irqsave(&smd_lock, flags);
+ smd_channel_reset_state(shared_pri, PRI_ALLOC_TBL, SMD_SS_CLOSED,
+ restart_pid, pri_size / sizeof(*shared_pri));
+ if (shared_sec)
+ smd_channel_reset_state(shared_sec, SEC_ALLOC_TBL,
+ SMD_SS_CLOSED, restart_pid,
+ sec_size / sizeof(*shared_sec));
+ spin_unlock_irqrestore(&smd_lock, flags);
+ mutex_unlock(&smd_probe_lock);
+
+ mb();
+ smd_fake_irq_handler(0);
+
+ SMD_POWER_INFO("%s: finished reset\n", __func__);
+}
+
+/* how many bytes are available for reading */
+static int smd_stream_read_avail(struct smd_channel *ch)
+{
+ return (ch->half_ch->get_head(ch->recv) -
+ ch->half_ch->get_tail(ch->recv)) & ch->fifo_mask;
+}
+
+/* how many bytes we are free to write */
+static int smd_stream_write_avail(struct smd_channel *ch)
+{
+ int bytes_avail;
+
+ bytes_avail = ch->fifo_mask - ((ch->half_ch->get_head(ch->send) -
+ ch->half_ch->get_tail(ch->send)) & ch->fifo_mask) + 1;
+
+ if (bytes_avail < SMD_FIFO_FULL_RESERVE)
+ bytes_avail = 0;
+ else
+ bytes_avail -= SMD_FIFO_FULL_RESERVE;
+ return bytes_avail;
+}
+
+static int smd_packet_read_avail(struct smd_channel *ch)
+{
+ if (ch->current_packet) {
+ int n = smd_stream_read_avail(ch);
+ if (n > ch->current_packet)
+ n = ch->current_packet;
+ return n;
+ } else {
+ return 0;
+ }
+}
+
+static int smd_packet_write_avail(struct smd_channel *ch)
+{
+ int n = smd_stream_write_avail(ch);
+ return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
+}
+
+static int ch_is_open(struct smd_channel *ch)
+{
+ return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
+ ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
+ && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
+}
+
+/* provide a pointer and length to readable data in the fifo */
+static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
+{
+ unsigned head = ch->half_ch->get_head(ch->recv);
+ unsigned tail = ch->half_ch->get_tail(ch->recv);
+ unsigned fifo_size = ch->fifo_size;
+
+ BUG_ON(fifo_size >= SZ_1M);
+ BUG_ON(head >= fifo_size);
+ BUG_ON(tail >= fifo_size);
+ BUG_ON(OVERFLOW_ADD_UNSIGNED(uintptr_t, (uintptr_t)ch->recv_data,
+ tail));
+ *ptr = (void *) (ch->recv_data + tail);
+ if (tail <= head)
+ return head - tail;
+ else
+ return fifo_size - tail;
+}
+
+static int read_intr_blocked(struct smd_channel *ch)
+{
+ return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
+}
+
+/* advance the fifo read pointer after data from ch_read_buffer is consumed */
+static void ch_read_done(struct smd_channel *ch, unsigned count)
+{
+ BUG_ON(count > smd_stream_read_avail(ch));
+ ch->half_ch->set_tail(ch->recv,
+ (ch->half_ch->get_tail(ch->recv) + count) & ch->fifo_mask);
+ wmb();
+ ch->half_ch->set_fTAIL(ch->send, 1);
+}
+
+/* basic read interface to ch_read_{buffer,done} used
+ * by smd_*_read() and update_packet_state()
+ * will read-and-discard if the _data pointer is null
+ */
+static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
+{
+ void *ptr;
+ unsigned n;
+ unsigned char *data = _data;
+ int orig_len = len;
+
+ while (len > 0) {
+ n = ch_read_buffer(ch, &ptr);
+ if (n == 0)
+ break;
+
+ if (n > len)
+ n = len;
+ if (_data)
+ ch->read_from_fifo(data, ptr, n, user_buf);
+
+ data += n;
+ len -= n;
+ ch_read_done(ch, n);
+ }
+
+ return orig_len - len;
+}
+
+static void update_stream_state(struct smd_channel *ch)
+{
+ /* streams have no special state requiring updating */
+}
+
+static void update_packet_state(struct smd_channel *ch)
+{
+ unsigned hdr[5];
+ int r;
+ const char *peripheral = NULL;
+
+ /* can't do anything if we're in the middle of a packet */
+ while (ch->current_packet == 0) {
+ /* discard 0 length packets if any */
+
+ /* don't bother unless we can get the full header */
+ if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
+ return;
+
+ r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
+ BUG_ON(r != SMD_HEADER_SIZE);
+
+ ch->current_packet = hdr[0];
+ if (ch->current_packet > (uint32_t)INT_MAX) {
+ pr_err("%s: Invalid packet size of %d bytes detected. Edge: %d, Channel : %s, RPTR: %d, WPTR: %d",
+ __func__, ch->current_packet, ch->type,
+ ch->name, ch->half_ch->get_tail(ch->recv),
+ ch->half_ch->get_head(ch->recv));
+ peripheral = smd_edge_to_pil_str(ch->type);
+ if (peripheral) {
+ if (subsystem_restart(peripheral) < 0)
+ BUG();
+ } else {
+ BUG();
+ }
+ }
+ }
+}
+
+/**
+ * ch_write_buffer() - Provide a pointer and length for the next segment of
+ * free space in the FIFO.
+ * @ch: channel
+ * @ptr: Address to pointer for the next segment write
+ * @returns: Maximum size that can be written until the FIFO is either full
+ * or the end of the FIFO has been reached.
+ *
+ * The returned pointer and length are passed to memcpy, so the next segment is
+ * defined as either the space available between the read index (tail) and the
+ * write index (head) or the space available to the end of the FIFO.
+ */
+static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
+{
+ unsigned head = ch->half_ch->get_head(ch->send);
+ unsigned tail = ch->half_ch->get_tail(ch->send);
+ unsigned fifo_size = ch->fifo_size;
+
+ BUG_ON(fifo_size >= SZ_1M);
+ BUG_ON(head >= fifo_size);
+ BUG_ON(tail >= fifo_size);
+ BUG_ON(OVERFLOW_ADD_UNSIGNED(uintptr_t, (uintptr_t)ch->send_data,
+ head));
+
+ *ptr = (void *) (ch->send_data + head);
+ if (head < tail) {
+ return tail - head - SMD_FIFO_FULL_RESERVE;
+ } else {
+ if (tail < SMD_FIFO_FULL_RESERVE)
+ return fifo_size + tail - head
+ - SMD_FIFO_FULL_RESERVE;
+ else
+ return fifo_size - head;
+ }
+}
+
+/* advace the fifo write pointer after freespace
+ * from ch_write_buffer is filled
+ */
+static void ch_write_done(struct smd_channel *ch, unsigned count)
+{
+ BUG_ON(count > smd_stream_write_avail(ch));
+ ch->half_ch->set_head(ch->send,
+ (ch->half_ch->get_head(ch->send) + count) & ch->fifo_mask);
+ wmb();
+ ch->half_ch->set_fHEAD(ch->send, 1);
+}
+
+static void ch_set_state(struct smd_channel *ch, unsigned n)
+{
+ if (n == SMD_SS_OPENED) {
+ ch->half_ch->set_fDSR(ch->send, 1);
+ ch->half_ch->set_fCTS(ch->send, 1);
+ ch->half_ch->set_fCD(ch->send, 1);
+ } else {
+ ch->half_ch->set_fDSR(ch->send, 0);
+ ch->half_ch->set_fCTS(ch->send, 0);
+ ch->half_ch->set_fCD(ch->send, 0);
+ }
+ ch->half_ch->set_state(ch->send, n);
+ ch->half_ch->set_fSTATE(ch->send, 1);
+ ch->notify_other_cpu(ch);
+}
+
+/**
+ * do_smd_probe() - Look for newly created SMD channels a specific processor
+ *
+ * @remote_pid: remote processor id of the proc that may have created channels
+ */
+static void do_smd_probe(unsigned remote_pid)
+{
+ unsigned free_space;
+
+ free_space = smem_get_free_space(remote_pid);
+ if (free_space != remote_info[remote_pid].free_space) {
+ remote_info[remote_pid].free_space = free_space;
+ schedule_work(&remote_info[remote_pid].probe_work);
+ }
+}
+
+static void smd_state_change(struct smd_channel *ch,
+ unsigned last, unsigned next)
+{
+ ch->last_state = next;
+
+ SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
+
+ switch (next) {
+ case SMD_SS_OPENING:
+ if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
+ ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
+ ch->half_ch->set_tail(ch->recv, 0);
+ ch->half_ch->set_head(ch->send, 0);
+ ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
+ ch_set_state(ch, SMD_SS_OPENING);
+ }
+ break;
+ case SMD_SS_OPENED:
+ if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
+ ch_set_state(ch, SMD_SS_OPENED);
+ ch->notify(ch->priv, SMD_EVENT_OPEN);
+ }
+ break;
+ case SMD_SS_FLUSHING:
+ case SMD_SS_RESET:
+ /* we should force them to close? */
+ break;
+ case SMD_SS_CLOSED:
+ if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
+ ch_set_state(ch, SMD_SS_CLOSING);
+ ch->current_packet = 0;
+ ch->pending_pkt_sz = 0;
+ ch->notify(ch->priv, SMD_EVENT_CLOSE);
+ }
+ break;
+ case SMD_SS_CLOSING:
+ if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
+ list_move(&ch->ch_list,
+ &smd_ch_to_close_list);
+ queue_work(channel_close_wq,
+ &finalize_channel_close_work);
+ }
+ break;
+ }
+}
+
+static void handle_smd_irq_closing_list(void)
+{
+ unsigned long flags;
+ struct smd_channel *ch;
+ struct smd_channel *index;
+ unsigned tmp;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
+ if (ch->half_ch->get_fSTATE(ch->recv))
+ ch->half_ch->set_fSTATE(ch->recv, 0);
+ tmp = ch->half_ch->get_state(ch->recv);
+ if (tmp != ch->last_state)
+ smd_state_change(ch, ch->last_state, tmp);
+ }
+ spin_unlock_irqrestore(&smd_lock, flags);
+}
+
+static void handle_smd_irq(struct remote_proc_info *r_info,
+ void (*notify)(smd_channel_t *ch))
+{
+ unsigned long flags;
+ struct smd_channel *ch;
+ unsigned ch_flags;
+ unsigned tmp;
+ unsigned char state_change;
+ struct list_head *list;
+
+ list = &r_info->ch_list;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_for_each_entry(ch, list, ch_list) {
+ state_change = 0;
+ ch_flags = 0;
+ if (ch_is_open(ch)) {
+ if (ch->half_ch->get_fHEAD(ch->recv)) {
+ ch->half_ch->set_fHEAD(ch->recv, 0);
+ ch_flags |= 1;
+ }
+ if (ch->half_ch->get_fTAIL(ch->recv)) {
+ ch->half_ch->set_fTAIL(ch->recv, 0);
+ ch_flags |= 2;
+ }
+ if (ch->half_ch->get_fSTATE(ch->recv)) {
+ ch->half_ch->set_fSTATE(ch->recv, 0);
+ ch_flags |= 4;
+ }
+ }
+ tmp = ch->half_ch->get_state(ch->recv);
+ if (tmp != ch->last_state) {
+ SMD_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
+ ch->n, ch->name, ch->last_state, tmp);
+ smd_state_change(ch, ch->last_state, tmp);
+ state_change = 1;
+ }
+ if (ch_flags & 0x3) {
+ ch->update_state(ch);
+ SMD_POWER_INFO(
+ "SMD ch%d '%s' Data event 0x%x tx%d/rx%d %dr/%dw : %dr/%dw\n",
+ ch->n, ch->name,
+ ch_flags,
+ ch->fifo_size -
+ (smd_stream_write_avail(ch) + 1),
+ smd_stream_read_avail(ch),
+ ch->half_ch->get_tail(ch->send),
+ ch->half_ch->get_head(ch->send),
+ ch->half_ch->get_tail(ch->recv),
+ ch->half_ch->get_head(ch->recv)
+ );
+ ch->notify(ch->priv, SMD_EVENT_DATA);
+ }
+ if (ch_flags & 0x4 && !state_change) {
+ SMD_POWER_INFO("SMD ch%d '%s' State update\n",
+ ch->n, ch->name);
+ ch->notify(ch->priv, SMD_EVENT_STATUS);
+ }
+ }
+ spin_unlock_irqrestore(&smd_lock, flags);
+ do_smd_probe(r_info->remote_pid);
+}
+
+static inline void log_irq(uint32_t subsystem)
+{
+ const char *subsys = smd_edge_to_subsystem(subsystem);
+
+ (void) subsys;
+
+ SMD_POWER_INFO("SMD Int %s->Apps\n", subsys);
+}
+
+irqreturn_t smd_modem_irq_handler(int irq, void *data)
+{
+ if (unlikely(!edge_to_pids[SMD_APPS_MODEM].initialized))
+ return IRQ_HANDLED;
+ log_irq(SMD_APPS_MODEM);
+ ++interrupt_stats[SMD_MODEM].smd_in_count;
+ handle_smd_irq(&remote_info[SMD_MODEM], notify_modem_smd);
+ handle_smd_irq_closing_list();
+ return IRQ_HANDLED;
+}
+
+irqreturn_t smd_dsp_irq_handler(int irq, void *data)
+{
+ if (unlikely(!edge_to_pids[SMD_APPS_QDSP].initialized))
+ return IRQ_HANDLED;
+ log_irq(SMD_APPS_QDSP);
+ ++interrupt_stats[SMD_Q6].smd_in_count;
+ handle_smd_irq(&remote_info[SMD_Q6], notify_dsp_smd);
+ handle_smd_irq_closing_list();
+ return IRQ_HANDLED;
+}
+
+irqreturn_t smd_dsps_irq_handler(int irq, void *data)
+{
+ if (unlikely(!edge_to_pids[SMD_APPS_DSPS].initialized))
+ return IRQ_HANDLED;
+ log_irq(SMD_APPS_DSPS);
+ ++interrupt_stats[SMD_DSPS].smd_in_count;
+ handle_smd_irq(&remote_info[SMD_DSPS], notify_dsps_smd);
+ handle_smd_irq_closing_list();
+ return IRQ_HANDLED;
+}
+
+irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
+{
+ if (unlikely(!edge_to_pids[SMD_APPS_WCNSS].initialized))
+ return IRQ_HANDLED;
+ log_irq(SMD_APPS_WCNSS);
+ ++interrupt_stats[SMD_WCNSS].smd_in_count;
+ handle_smd_irq(&remote_info[SMD_WCNSS], notify_wcnss_smd);
+ handle_smd_irq_closing_list();
+ return IRQ_HANDLED;
+}
+
+irqreturn_t smd_modemfw_irq_handler(int irq, void *data)
+{
+ if (unlikely(!edge_to_pids[SMD_APPS_Q6FW].initialized))
+ return IRQ_HANDLED;
+ log_irq(SMD_APPS_Q6FW);
+ ++interrupt_stats[SMD_MODEM_Q6_FW].smd_in_count;
+ handle_smd_irq(&remote_info[SMD_MODEM_Q6_FW], notify_modemfw_smd);
+ handle_smd_irq_closing_list();
+ return IRQ_HANDLED;
+}
+
+irqreturn_t smd_rpm_irq_handler(int irq, void *data)
+{
+ if (unlikely(!edge_to_pids[SMD_APPS_RPM].initialized))
+ return IRQ_HANDLED;
+ log_irq(SMD_APPS_RPM);
+ ++interrupt_stats[SMD_RPM].smd_in_count;
+ handle_smd_irq(&remote_info[SMD_RPM], notify_rpm_smd);
+ handle_smd_irq_closing_list();
+ return IRQ_HANDLED;
+}
+
+static void smd_fake_irq_handler(unsigned long arg)
+{
+ handle_smd_irq(&remote_info[SMD_MODEM], notify_modem_smd);
+ handle_smd_irq(&remote_info[SMD_Q6], notify_dsp_smd);
+ handle_smd_irq(&remote_info[SMD_DSPS], notify_dsps_smd);
+ handle_smd_irq(&remote_info[SMD_WCNSS], notify_wcnss_smd);
+ handle_smd_irq(&remote_info[SMD_MODEM_Q6_FW], notify_modemfw_smd);
+ handle_smd_irq(&remote_info[SMD_RPM], notify_rpm_smd);
+ handle_smd_irq_closing_list();
+}
+
+static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
+{
+ if (SMD_XFER_TYPE(alloc_elm->type) == 1)
+ return 0;
+ else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
+ return 1;
+
+ panic("Unsupported SMD xfer type: %d name:%s edge:%d\n",
+ SMD_XFER_TYPE(alloc_elm->type),
+ alloc_elm->name,
+ SMD_CHANNEL_TYPE(alloc_elm->type));
+}
+
+static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
+ int user_buf, bool intr_ntfy)
+{
+ void *ptr;
+ const unsigned char *buf = _data;
+ unsigned xfer;
+ int orig_len = len;
+
+ SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
+ if (len < 0)
+ return -EINVAL;
+ else if (len == 0)
+ return 0;
+
+ while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
+ if (!ch_is_open(ch)) {
+ len = orig_len;
+ break;
+ }
+ if (xfer > len)
+ xfer = len;
+
+ ch->write_to_fifo(ptr, buf, xfer, user_buf);
+ ch_write_done(ch, xfer);
+ len -= xfer;
+ buf += xfer;
+ if (len == 0)
+ break;
+ }
+
+ if (orig_len - len && intr_ntfy)
+ ch->notify_other_cpu(ch);
+
+ return orig_len - len;
+}
+
+static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
+ int user_buf, bool intr_ntfy)
+{
+ int ret;
+ unsigned hdr[5];
+
+ SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
+ if (len < 0)
+ return -EINVAL;
+ else if (len == 0)
+ return 0;
+
+ if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
+ return -ENOMEM;
+
+ hdr[0] = len;
+ hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
+
+
+ ret = smd_stream_write(ch, hdr, sizeof(hdr), 0, false);
+ if (ret < 0 || ret != sizeof(hdr)) {
+ SMD_DBG("%s failed to write pkt header: %d returned\n",
+ __func__, ret);
+ return -EFAULT;
+ }
+
+
+ ret = smd_stream_write(ch, _data, len, user_buf, true);
+ if (ret < 0 || ret != len) {
+ SMD_DBG("%s failed to write pkt data: %d returned\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return len;
+}
+
+static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
+{
+ int r;
+
+ if (len < 0)
+ return -EINVAL;
+
+ r = ch_read(ch, data, len, user_buf);
+ if (r > 0)
+ if (!read_intr_blocked(ch))
+ ch->notify_other_cpu(ch);
+
+ return r;
+}
+
+static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
+{
+ unsigned long flags;
+ int r;
+
+ if (len < 0)
+ return -EINVAL;
+
+ if (ch->current_packet > (uint32_t)INT_MAX) {
+ pr_err("%s: Invalid packet size for Edge %d and Channel %s",
+ __func__, ch->type, ch->name);
+ return -EFAULT;
+ }
+
+ if (len > ch->current_packet)
+ len = ch->current_packet;
+
+ r = ch_read(ch, data, len, user_buf);
+ if (r > 0)
+ if (!read_intr_blocked(ch))
+ ch->notify_other_cpu(ch);
+
+ spin_lock_irqsave(&smd_lock, flags);
+ ch->current_packet -= r;
+ update_packet_state(ch);
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ return r;
+}
+
+static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
+ int user_buf)
+{
+ int r;
+
+ if (len < 0)
+ return -EINVAL;
+
+ if (ch->current_packet > (uint32_t)INT_MAX) {
+ pr_err("%s: Invalid packet size for Edge %d and Channel %s",
+ __func__, ch->type, ch->name);
+ return -EFAULT;
+ }
+
+ if (len > ch->current_packet)
+ len = ch->current_packet;
+
+ r = ch_read(ch, data, len, user_buf);
+ if (r > 0)
+ if (!read_intr_blocked(ch))
+ ch->notify_other_cpu(ch);
+
+ ch->current_packet -= r;
+ update_packet_state(ch);
+
+ return r;
+}
+
+/**
+ * smd_alloc_v2() - Init local channel structure with information stored in SMEM
+ *
+ * @ch: pointer to the local structure for this channel
+ * @table_id: the id of the table this channel resides in. 1 = first table, 2 =
+ * second table, etc
+ * @r_info: pointer to the info structure of the remote proc for this channel
+ * @returns: -EINVAL for failure; 0 for success
+ *
+ * ch must point to an allocated instance of struct smd_channel that is zeroed
+ * out, and has the n and type members already initialized to the correct values
+ */
+static int smd_alloc(struct smd_channel *ch, int table_id,
+ struct remote_proc_info *r_info)
+{
+ void *buffer;
+ unsigned buffer_sz;
+ unsigned base_id;
+ unsigned fifo_id;
+
+ switch (table_id) {
+ case PRI_ALLOC_TBL:
+ base_id = SMEM_SMD_BASE_ID;
+ fifo_id = SMEM_SMD_FIFO_BASE_ID;
+ break;
+ case SEC_ALLOC_TBL:
+ base_id = SMEM_SMD_BASE_ID_2;
+ fifo_id = SMEM_SMD_FIFO_BASE_ID_2;
+ break;
+ default:
+ SMD_INFO("Invalid table_id:%d passed to smd_alloc\n", table_id);
+ return -EINVAL;
+ }
+
+ if (is_word_access_ch(ch->type)) {
+ struct smd_shared_word_access *shared2;
+ shared2 = smem_find(base_id + ch->n, sizeof(*shared2),
+ r_info->remote_pid, 0);
+ if (!shared2) {
+ SMD_INFO("smem_find failed ch=%d\n", ch->n);
+ return -EINVAL;
+ }
+ ch->send = &shared2->ch0;
+ ch->recv = &shared2->ch1;
+ } else {
+ struct smd_shared *shared2;
+ shared2 = smem_find(base_id + ch->n, sizeof(*shared2),
+ r_info->remote_pid, 0);
+ if (!shared2) {
+ SMD_INFO("smem_find failed ch=%d\n", ch->n);
+ return -EINVAL;
+ }
+ ch->send = &shared2->ch0;
+ ch->recv = &shared2->ch1;
+ }
+ ch->half_ch = get_half_ch_funcs(ch->type);
+
+ buffer = smem_get_entry(fifo_id + ch->n, &buffer_sz,
+ r_info->remote_pid, 0);
+ if (!buffer) {
+ SMD_INFO("smem_get_entry failed\n");
+ return -EINVAL;
+ }
+
+ /* buffer must be a power-of-two size */
+ if (buffer_sz & (buffer_sz - 1)) {
+ SMD_INFO("Buffer size: %u not power of two\n", buffer_sz);
+ return -EINVAL;
+ }
+ buffer_sz /= 2;
+ ch->send_data = buffer;
+ ch->recv_data = buffer + buffer_sz;
+ ch->fifo_size = buffer_sz;
+
+ return 0;
+}
+
+/**
+ * smd_alloc_channel() - Create and init local structures for a newly allocated
+ * SMD channel
+ *
+ * @alloc_elm: the allocation element stored in SMEM for this channel
+ * @table_id: the id of the table this channel resides in. 1 = first table, 2 =
+ * seconds table, etc
+ * @r_info: pointer to the info structure of the remote proc for this channel
+ * @returns: error code for failure; 0 for success
+ */
+static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm, int table_id,
+ struct remote_proc_info *r_info)
+{
+ struct smd_channel *ch;
+
+ ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
+ if (ch == 0) {
+ pr_err("smd_alloc_channel() out of memory\n");
+ return -ENOMEM;
+ }
+ ch->n = alloc_elm->cid;
+ ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
+
+ if (smd_alloc(ch, table_id, r_info)) {
+ kfree(ch);
+ return -ENODEV;
+ }
+
+ ch->fifo_mask = ch->fifo_size - 1;
+
+ /* probe_worker guarentees ch->type will be a valid type */
+ if (ch->type == SMD_APPS_MODEM)
+ ch->notify_other_cpu = notify_modem_smd;
+ else if (ch->type == SMD_APPS_QDSP)
+ ch->notify_other_cpu = notify_dsp_smd;
+ else if (ch->type == SMD_APPS_DSPS)
+ ch->notify_other_cpu = notify_dsps_smd;
+ else if (ch->type == SMD_APPS_WCNSS)
+ ch->notify_other_cpu = notify_wcnss_smd;
+ else if (ch->type == SMD_APPS_Q6FW)
+ ch->notify_other_cpu = notify_modemfw_smd;
+ else if (ch->type == SMD_APPS_RPM)
+ ch->notify_other_cpu = notify_rpm_smd;
+
+ if (smd_is_packet(alloc_elm)) {
+ ch->read = smd_packet_read;
+ ch->write = smd_packet_write;
+ ch->read_avail = smd_packet_read_avail;
+ ch->write_avail = smd_packet_write_avail;
+ ch->update_state = update_packet_state;
+ ch->read_from_cb = smd_packet_read_from_cb;
+ ch->is_pkt_ch = 1;
+ } else {
+ ch->read = smd_stream_read;
+ ch->write = smd_stream_write;
+ ch->read_avail = smd_stream_read_avail;
+ ch->write_avail = smd_stream_write_avail;
+ ch->update_state = update_stream_state;
+ ch->read_from_cb = smd_stream_read;
+ }
+
+ if (is_word_access_ch(ch->type)) {
+ ch->read_from_fifo = smd_memcpy32_from_fifo;
+ ch->write_to_fifo = smd_memcpy32_to_fifo;
+ } else {
+ ch->read_from_fifo = smd_memcpy_from_fifo;
+ ch->write_to_fifo = smd_memcpy_to_fifo;
+ }
+
+ smd_memcpy_from_fifo(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN,
+ false);
+ ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
+
+ ch->pdev.name = ch->name;
+ ch->pdev.id = ch->type;
+
+ SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
+ ch->name, ch->n);
+
+ mutex_lock(&smd_creation_mutex);
+ list_add(&ch->ch_list, &smd_ch_closed_list);
+ mutex_unlock(&smd_creation_mutex);
+
+ platform_device_register(&ch->pdev);
+ if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
+ /* create a platform driver to be used by smd_tty driver
+ * so that it can access the loopback port
+ */
+ loopback_tty_pdev.id = ch->type;
+ platform_device_register(&loopback_tty_pdev);
+ }
+ return 0;
+}
+
+static void do_nothing_notify(void *priv, unsigned flags)
+{
+}
+
+static void finalize_channel_close_fn(struct work_struct *work)
+{
+ unsigned long flags;
+ struct smd_channel *ch;
+ struct smd_channel *index;
+
+ mutex_lock(&smd_creation_mutex);
+ spin_lock_irqsave(&smd_lock, flags);
+ list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
+ list_del(&ch->ch_list);
+ list_add(&ch->ch_list, &smd_ch_closed_list);
+ ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
+ ch->notify = do_nothing_notify;
+ }
+ spin_unlock_irqrestore(&smd_lock, flags);
+ mutex_unlock(&smd_creation_mutex);
+}
+
+struct smd_channel *smd_get_channel(const char *name, uint32_t type)
+{
+ struct smd_channel *ch;
+
+ mutex_lock(&smd_creation_mutex);
+ list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
+ if (!strcmp(name, ch->name) &&
+ (type == ch->type)) {
+ list_del(&ch->ch_list);
+ mutex_unlock(&smd_creation_mutex);
+ return ch;
+ }
+ }
+ mutex_unlock(&smd_creation_mutex);
+
+ return NULL;
+}
+
+int smd_named_open_on_edge(const char *name, uint32_t edge,
+ smd_channel_t **_ch,
+ void *priv, void (*notify)(void *, unsigned))
+{
+ struct smd_channel *ch;
+ unsigned long flags;
+
+ if (!smd_edge_inited(edge)) {
+ pr_info("smd_open() before smd_init()\n");
+ return -EPROBE_DEFER;
+ }
+
+ pr_info("smd_open('%s', %p, %p)\n", name, priv, notify);
+
+ ch = smd_get_channel(name, edge);
+ if (!ch) {
+ spin_lock_irqsave(&smd_lock, flags);
+ /* check opened list for port */
+ list_for_each_entry(ch,
+ &remote_info[edge_to_pids[edge].remote_pid].ch_list,
+ ch_list) {
+ if (!strcmp(name, ch->name)) {
+ /* channel is already open */
+ spin_unlock_irqrestore(&smd_lock, flags);
+ pr_info("smd_open: channel '%s' already open\n",
+ ch->name);
+ return -EBUSY;
+ }
+ }
+
+ /* check closing list for port */
+ list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
+ if (!strncmp(name, ch->name, 20) &&
+ (edge == ch->type)) {
+ /* channel exists, but is being closed */
+ spin_unlock_irqrestore(&smd_lock, flags);
+ //pr_info("c\n");
+ return -EAGAIN;
+ }
+ }
+
+ /* check closing workqueue list for port */
+ list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
+ if (!strncmp(name, ch->name, 20) &&
+ (edge == ch->type)) {
+ /* channel exists, but is being closed */
+ spin_unlock_irqrestore(&smd_lock, flags);
+ //pr_info("b\n");
+ return -EAGAIN;
+ }
+ }
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ /* one final check to handle closing->closed race condition */
+ ch = smd_get_channel(name, edge);
+ if (!ch) {
+ //pr_info("a\n");
+ return -ENODEV;
+ }
+ }
+
+ if (notify == 0)
+ notify = do_nothing_notify;
+
+ ch->notify = notify;
+ ch->current_packet = 0;
+ ch->last_state = SMD_SS_CLOSED;
+ ch->priv = priv;
+
+ *_ch = ch;
+
+ pr_info("smd_open: opening '%s'\n", ch->name);
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_add(&ch->ch_list,
+ &remote_info[edge_to_pids[ch->type].remote_pid].ch_list);
+
+ pr_info("%s: opening ch %d\n", __func__, ch->n);
+
+ smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
+
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_named_open_on_edge);
+
+int smd_close(smd_channel_t *ch)
+{
+ unsigned long flags;
+
+ if (ch == 0)
+ return -EINVAL;
+
+ SMD_INFO("smd_close(%s)\n", ch->name);
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_del(&ch->ch_list);
+
+ ch_set_state(ch, SMD_SS_CLOSED);
+
+ if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
+ list_add(&ch->ch_list, &smd_ch_closing_list);
+ spin_unlock_irqrestore(&smd_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&smd_lock, flags);
+ ch->notify = do_nothing_notify;
+ mutex_lock(&smd_creation_mutex);
+ list_add(&ch->ch_list, &smd_ch_closed_list);
+ mutex_unlock(&smd_creation_mutex);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_close);
+
+int smd_write_start(smd_channel_t *ch, int len)
+{
+ int ret;
+ unsigned hdr[5];
+
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+ if (!ch->is_pkt_ch) {
+ pr_err("%s: non-packet channel specified\n", __func__);
+ return -EACCES;
+ }
+ if (len < 1) {
+ pr_err("%s: invalid length: %d\n", __func__, len);
+ return -EINVAL;
+ }
+
+ if (ch->pending_pkt_sz) {
+ pr_err("%s: packet of size: %d in progress\n", __func__,
+ ch->pending_pkt_sz);
+ return -EBUSY;
+ }
+ ch->pending_pkt_sz = len;
+
+ if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
+ ch->pending_pkt_sz = 0;
+ SMD_DBG("%s: no space to write packet header\n", __func__);
+ return -EAGAIN;
+ }
+
+ hdr[0] = len;
+ hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
+
+
+ ret = smd_stream_write(ch, hdr, sizeof(hdr), 0, true);
+ if (ret < 0 || ret != sizeof(hdr)) {
+ ch->pending_pkt_sz = 0;
+ pr_err("%s: packet header failed to write\n", __func__);
+ return -EPERM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(smd_write_start);
+
+int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
+{
+ int bytes_written;
+
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+ if (len < 1) {
+ pr_err("%s: invalid length: %d\n", __func__, len);
+ return -EINVAL;
+ }
+
+ if (!ch->pending_pkt_sz) {
+ pr_err("%s: no transaction in progress\n", __func__);
+ return -ENOEXEC;
+ }
+ if (ch->pending_pkt_sz - len < 0) {
+ pr_err("%s: segment of size: %d will make packet go over length\n",
+ __func__, len);
+ return -EINVAL;
+ }
+
+ bytes_written = smd_stream_write(ch, data, len, user_buf, true);
+
+ ch->pending_pkt_sz -= bytes_written;
+
+ return bytes_written;
+}
+EXPORT_SYMBOL(smd_write_segment);
+
+int smd_write_end(smd_channel_t *ch)
+{
+
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+ if (ch->pending_pkt_sz) {
+ pr_err("%s: current packet not completely written\n", __func__);
+ return -E2BIG;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_write_end);
+
+int smd_write_segment_avail(smd_channel_t *ch)
+{
+ int n;
+
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+ if (!ch->is_pkt_ch) {
+ pr_err("%s: non-packet channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ n = smd_stream_write_avail(ch);
+
+ /* pkt hdr already written, no need to reserve space for it */
+ if (ch->pending_pkt_sz)
+ return n;
+
+ return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
+}
+EXPORT_SYMBOL(smd_write_segment_avail);
+
+int smd_read(smd_channel_t *ch, void *data, int len)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ return ch->read(ch, data, len, 0);
+}
+EXPORT_SYMBOL(smd_read);
+
+int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ return ch->read(ch, data, len, 1);
+}
+EXPORT_SYMBOL(smd_read_user_buffer);
+
+int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ return ch->read_from_cb(ch, data, len, 0);
+}
+EXPORT_SYMBOL(smd_read_from_cb);
+
+int smd_write(smd_channel_t *ch, const void *data, int len)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0, true);
+}
+EXPORT_SYMBOL(smd_write);
+
+int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1, true);
+}
+EXPORT_SYMBOL(smd_write_user_buffer);
+
+int smd_read_avail(smd_channel_t *ch)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ if (ch->current_packet > (uint32_t)INT_MAX) {
+ pr_err("%s: Invalid packet size for Edge %d and Channel %s",
+ __func__, ch->type, ch->name);
+ return -EFAULT;
+ }
+ return ch->read_avail(ch);
+}
+EXPORT_SYMBOL(smd_read_avail);
+
+int smd_write_avail(smd_channel_t *ch)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ return ch->write_avail(ch);
+}
+EXPORT_SYMBOL(smd_write_avail);
+
+void smd_enable_read_intr(smd_channel_t *ch)
+{
+ if (ch)
+ ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
+}
+EXPORT_SYMBOL(smd_enable_read_intr);
+
+void smd_disable_read_intr(smd_channel_t *ch)
+{
+ if (ch)
+ ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
+}
+EXPORT_SYMBOL(smd_disable_read_intr);
+
+/**
+ * Enable/disable receive interrupts for the remote processor used by a
+ * particular channel.
+ * @ch: open channel handle to use for the edge
+ * @mask: 1 = mask interrupts; 0 = unmask interrupts
+ * @cpumask cpumask for the next cpu scheduled to be woken up
+ * @returns: 0 for success; < 0 for failure
+ *
+ * Note that this enables/disables all interrupts from the remote subsystem for
+ * all channels. As such, it should be used with care and only for specific
+ * use cases such as power-collapse sequencing.
+ */
+int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask,
+ const struct cpumask *cpumask)
+{
+ struct irq_chip *irq_chip;
+ struct irq_data *irq_data;
+ struct interrupt_config_item *int_cfg;
+
+ if (!ch)
+ return -EINVAL;
+
+ if (ch->type >= ARRAY_SIZE(edge_to_pids))
+ return -ENODEV;
+
+ int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
+
+ if (int_cfg->irq_id < 0)
+ return -ENODEV;
+
+ irq_chip = irq_get_chip(int_cfg->irq_id);
+ if (!irq_chip)
+ return -ENODEV;
+
+ irq_data = irq_get_irq_data(int_cfg->irq_id);
+ if (!irq_data)
+ return -ENODEV;
+
+ if (mask) {
+ SMD_POWER_INFO("SMD Masking interrupts from %s\n",
+ edge_to_pids[ch->type].subsys_name);
+ irq_chip->irq_mask(irq_data);
+ if (cpumask)
+ irq_set_affinity(int_cfg->irq_id, cpumask);
+ } else {
+ SMD_POWER_INFO("SMD Unmasking interrupts from %s\n",
+ edge_to_pids[ch->type].subsys_name);
+ irq_chip->irq_unmask(irq_data);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_mask_receive_interrupt);
+
+int smd_cur_packet_size(smd_channel_t *ch)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ if (ch->current_packet > (uint32_t)INT_MAX) {
+ pr_err("%s: Invalid packet size for Edge %d and Channel %s",
+ __func__, ch->type, ch->name);
+ return -EFAULT;
+ }
+ return ch->current_packet;
+}
+EXPORT_SYMBOL(smd_cur_packet_size);
+
+int smd_tiocmget(smd_channel_t *ch)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
+ (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
+ (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
+ (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
+ (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
+ (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
+}
+EXPORT_SYMBOL(smd_tiocmget);
+
+/* this api will be called while holding smd_lock */
+int
+smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ if (set & TIOCM_DTR)
+ ch->half_ch->set_fDSR(ch->send, 1);
+
+ if (set & TIOCM_RTS)
+ ch->half_ch->set_fCTS(ch->send, 1);
+
+ if (clear & TIOCM_DTR)
+ ch->half_ch->set_fDSR(ch->send, 0);
+
+ if (clear & TIOCM_RTS)
+ ch->half_ch->set_fCTS(ch->send, 0);
+
+ ch->half_ch->set_fSTATE(ch->send, 1);
+ barrier();
+ ch->notify_other_cpu(ch);
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_tiocmset_from_cb);
+
+int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
+{
+ unsigned long flags;
+
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&smd_lock, flags);
+ smd_tiocmset_from_cb(ch, set, clear);
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_tiocmset);
+
+int smd_is_pkt_avail(smd_channel_t *ch)
+{
+ unsigned long flags;
+
+ if (!ch || !ch->is_pkt_ch)
+ return -EINVAL;
+
+ if (ch->current_packet)
+ return 1;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ update_packet_state(ch);
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ return ch->current_packet ? 1 : 0;
+}
+EXPORT_SYMBOL(smd_is_pkt_avail);
+
+static int smsm_cb_init(void)
+{
+ struct smsm_state_info *state_info;
+ int n;
+ int ret = 0;
+
+ smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
+ GFP_KERNEL);
+
+ if (!smsm_states) {
+ pr_err("%s: SMSM init failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
+ if (!smsm_cb_wq) {
+ pr_err("%s: smsm_cb_wq creation failed\n", __func__);
+ kfree(smsm_states);
+ return -EFAULT;
+ }
+
+ mutex_lock(&smsm_lock);
+ for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
+ state_info = &smsm_states[n];
+ state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
+ state_info->intr_mask_set = 0x0;
+ state_info->intr_mask_clear = 0x0;
+ INIT_LIST_HEAD(&state_info->callbacks);
+ }
+ mutex_unlock(&smsm_lock);
+
+ return ret;
+}
+
+static int smsm_init(void)
+{
+ int i;
+ struct smsm_size_info_type *smsm_size_info;
+ unsigned long flags;
+ unsigned long j_start;
+ static int first = 1;
+ remote_spinlock_t *remote_spinlock;
+
+ if (!first)
+ return 0;
+ first = 0;
+
+ /* Verify that remote spinlock is not deadlocked */
+ remote_spinlock = smem_get_remote_spinlock();
+ j_start = jiffies;
+ while (!remote_spin_trylock_irqsave(remote_spinlock, flags)) {
+ if (jiffies_to_msecs(jiffies - j_start) > RSPIN_INIT_WAIT_MS) {
+ panic("%s: Remote processor %d will not release spinlock\n",
+ __func__, remote_spin_owner(remote_spinlock));
+ }
+ }
+ remote_spin_unlock_irqrestore(remote_spinlock, flags);
+
+ smsm_size_info = smem_find(SMEM_SMSM_SIZE_INFO,
+ sizeof(struct smsm_size_info_type), 0,
+ SMEM_ANY_HOST_FLAG);
+ if (smsm_size_info) {
+ SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
+ SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
+ }
+
+ i = kfifo_alloc(&smsm_snapshot_fifo,
+ sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
+ GFP_KERNEL);
+ if (i) {
+ pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
+ return i;
+ }
+ wakeup_source_init(&smsm_snapshot_ws, "smsm_snapshot");
+
+ if (!smsm_info.state) {
+ smsm_info.state = smem_alloc(ID_SHARED_STATE,
+ SMSM_NUM_ENTRIES *
+ sizeof(uint32_t), 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (smsm_info.state)
+ __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
+ }
+
+ if (!smsm_info.intr_mask) {
+ smsm_info.intr_mask = smem_alloc(SMEM_SMSM_CPU_INTR_MASK,
+ SMSM_NUM_ENTRIES *
+ SMSM_NUM_HOSTS *
+ sizeof(uint32_t), 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (smsm_info.intr_mask) {
+ for (i = 0; i < SMSM_NUM_ENTRIES; i++)
+ __raw_writel(0x0,
+ SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
+
+ /* Configure legacy modem bits */
+ __raw_writel(LEGACY_MODEM_SMSM_MASK,
+ SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
+ SMSM_APPS));
+ }
+ }
+
+ i = smsm_cb_init();
+ if (i)
+ return i;
+
+ wmb();
+
+ smsm_pm_notifier(&smsm_pm_nb, PM_POST_SUSPEND, NULL);
+ i = register_pm_notifier(&smsm_pm_nb);
+ if (i)
+ pr_err("%s: power state notif error %d\n", __func__, i);
+
+ return 0;
+}
+
+static void smsm_cb_snapshot(uint32_t use_wakeup_source)
+{
+ int n;
+ uint32_t new_state;
+ unsigned long flags;
+ int ret;
+ uint64_t timestamp;
+
+ timestamp = sched_clock();
+ ret = kfifo_avail(&smsm_snapshot_fifo);
+ if (ret < SMSM_SNAPSHOT_SIZE) {
+ pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
+ return;
+ }
+
+ /*
+ * To avoid a race condition with notify_smsm_cb_clients_worker, the
+ * following sequence must be followed:
+ * 1) increment snapshot count
+ * 2) insert data into FIFO
+ *
+ * Potentially in parallel, the worker:
+ * a) verifies >= 1 snapshots are in FIFO
+ * b) processes snapshot
+ * c) decrements reference count
+ *
+ * This order ensures that 1 will always occur before abc.
+ */
+ if (use_wakeup_source) {
+ spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
+ if (smsm_snapshot_count == 0) {
+ SMSM_POWER_INFO("SMSM snapshot wake lock\n");
+ __pm_stay_awake(&smsm_snapshot_ws);
+ }
+ ++smsm_snapshot_count;
+ spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
+ }
+
+ /* queue state entries */
+ for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
+ new_state = __raw_readl(SMSM_STATE_ADDR(n));
+
+ ret = kfifo_in(&smsm_snapshot_fifo,
+ &new_state, sizeof(new_state));
+ if (ret != sizeof(new_state)) {
+ pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
+ goto restore_snapshot_count;
+ }
+ }
+
+ ret = kfifo_in(&smsm_snapshot_fifo, &timestamp, sizeof(timestamp));
+ if (ret != sizeof(timestamp)) {
+ pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
+ goto restore_snapshot_count;
+ }
+
+ /* queue wakelock usage flag */
+ ret = kfifo_in(&smsm_snapshot_fifo,
+ &use_wakeup_source, sizeof(use_wakeup_source));
+ if (ret != sizeof(use_wakeup_source)) {
+ pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
+ goto restore_snapshot_count;
+ }
+
+ queue_work(smsm_cb_wq, &smsm_cb_work);
+ return;
+
+restore_snapshot_count:
+ if (use_wakeup_source) {
+ spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
+ if (smsm_snapshot_count) {
+ --smsm_snapshot_count;
+ if (smsm_snapshot_count == 0) {
+ SMSM_POWER_INFO("SMSM snapshot wake unlock\n");
+ __pm_relax(&smsm_snapshot_ws);
+ }
+ } else {
+ pr_err("%s: invalid snapshot count\n", __func__);
+ }
+ spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
+ }
+}
+
+static irqreturn_t smsm_irq_handler(int irq, void *data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&smem_lock, flags);
+ if (!smsm_info.state) {
+ SMSM_INFO("<SM NO STATE>\n");
+ } else {
+ unsigned old_apps, apps;
+ unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
+
+ old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
+
+ SMSM_DBG("<SM %08x %08x>\n", apps, modm);
+ if (modm & SMSM_RESET) {
+ pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
+ } else if (modm & SMSM_INIT) {
+ if (!(apps & SMSM_INIT))
+ apps |= SMSM_INIT;
+ if (modm & SMSM_SMDINIT)
+ apps |= SMSM_SMDINIT;
+ }
+
+ if (old_apps != apps) {
+ SMSM_DBG("<SM %08x NOTIFY>\n", apps);
+ __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
+ notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
+ }
+
+ smsm_cb_snapshot(1);
+ }
+ spin_unlock_irqrestore(&smem_lock, flags);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t smsm_modem_irq_handler(int irq, void *data)
+{
+ SMSM_POWER_INFO("SMSM Int Modem->Apps\n");
+ ++interrupt_stats[SMD_MODEM].smsm_in_count;
+ return smsm_irq_handler(irq, data);
+}
+
+irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
+{
+ SMSM_POWER_INFO("SMSM Int LPASS->Apps\n");
+ ++interrupt_stats[SMD_Q6].smsm_in_count;
+ return smsm_irq_handler(irq, data);
+}
+
+irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
+{
+ SMSM_POWER_INFO("SMSM Int DSPS->Apps\n");
+ ++interrupt_stats[SMD_DSPS].smsm_in_count;
+ return smsm_irq_handler(irq, data);
+}
+
+irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
+{
+ SMSM_POWER_INFO("SMSM Int WCNSS->Apps\n");
+ ++interrupt_stats[SMD_WCNSS].smsm_in_count;
+ return smsm_irq_handler(irq, data);
+}
+
+/*
+ * Changes the global interrupt mask. The set and clear masks are re-applied
+ * every time the global interrupt mask is updated for callback registration
+ * and de-registration.
+ *
+ * The clear mask is applied first, so if a bit is set to 1 in both the clear
+ * mask and the set mask, the result will be that the interrupt is set.
+ *
+ * @smsm_entry SMSM entry to change
+ * @clear_mask 1 = clear bit, 0 = no-op
+ * @set_mask 1 = set bit, 0 = no-op
+ *
+ * @returns 0 for success, < 0 for error
+ */
+int smsm_change_intr_mask(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ uint32_t old_mask, new_mask;
+ unsigned long flags;
+
+ if (smsm_entry >= SMSM_NUM_ENTRIES) {
+ pr_err("smsm_change_state: Invalid entry %d\n",
+ smsm_entry);
+ return -EINVAL;
+ }
+
+ if (!smsm_info.intr_mask) {
+ pr_err("smsm_change_intr_mask <SM NO STATE>\n");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&smem_lock, flags);
+ smsm_states[smsm_entry].intr_mask_clear = clear_mask;
+ smsm_states[smsm_entry].intr_mask_set = set_mask;
+
+ old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+ new_mask = (old_mask & ~clear_mask) | set_mask;
+ __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+
+ wmb();
+ spin_unlock_irqrestore(&smem_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(smsm_change_intr_mask);
+
+int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
+{
+ if (smsm_entry >= SMSM_NUM_ENTRIES) {
+ pr_err("smsm_change_state: Invalid entry %d\n",
+ smsm_entry);
+ return -EINVAL;
+ }
+
+ if (!smsm_info.intr_mask) {
+ pr_err("smsm_change_intr_mask <SM NO STATE>\n");
+ return -EIO;
+ }
+
+ *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+ return 0;
+}
+EXPORT_SYMBOL(smsm_get_intr_mask);
+
+int smsm_change_state(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ unsigned long flags;
+ uint32_t old_state, new_state;
+
+ if (smsm_entry >= SMSM_NUM_ENTRIES) {
+ pr_err("smsm_change_state: Invalid entry %d",
+ smsm_entry);
+ return -EINVAL;
+ }
+
+ if (!smsm_info.state) {
+ pr_err("smsm_change_state <SM NO STATE>\n");
+ return -EIO;
+ }
+ spin_lock_irqsave(&smem_lock, flags);
+
+ old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
+ new_state = (old_state & ~clear_mask) | set_mask;
+ __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
+ SMSM_POWER_INFO("%s %d:%08x->%08x", __func__, smsm_entry,
+ old_state, new_state);
+ notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
+
+ spin_unlock_irqrestore(&smem_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(smsm_change_state);
+
+uint32_t smsm_get_state(uint32_t smsm_entry)
+{
+ uint32_t rv = 0;
+
+ /* needs interface change to return error code */
+ if (smsm_entry >= SMSM_NUM_ENTRIES) {
+ pr_err("smsm_change_state: Invalid entry %d",
+ smsm_entry);
+ return 0;
+ }
+
+ if (!smsm_info.state)
+ pr_err("smsm_get_state <SM NO STATE>\n");
+ else
+ rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
+
+ return rv;
+}
+EXPORT_SYMBOL(smsm_get_state);
+
+/**
+ * Performs SMSM callback client notifiction.
+ */
+void notify_smsm_cb_clients_worker(struct work_struct *work)
+{
+ struct smsm_state_cb_info *cb_info;
+ struct smsm_state_info *state_info;
+ int n;
+ uint32_t new_state;
+ uint32_t state_changes;
+ uint32_t use_wakeup_source;
+ int ret;
+ unsigned long flags;
+ uint64_t t_snapshot;
+ uint64_t t_start;
+ unsigned long nanosec_rem;
+
+ while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
+ t_start = sched_clock();
+ mutex_lock(&smsm_lock);
+ for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
+ state_info = &smsm_states[n];
+
+ ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
+ sizeof(new_state));
+ if (ret != sizeof(new_state)) {
+ pr_err("%s: snapshot underflow %d\n",
+ __func__, ret);
+ mutex_unlock(&smsm_lock);
+ return;
+ }
+
+ state_changes = state_info->last_value ^ new_state;
+ if (state_changes) {
+ SMSM_POWER_INFO("SMSM Change %d: %08x->%08x\n",
+ n, state_info->last_value,
+ new_state);
+ list_for_each_entry(cb_info,
+ &state_info->callbacks, cb_list) {
+
+ if (cb_info->mask & state_changes)
+ cb_info->notify(cb_info->data,
+ state_info->last_value,
+ new_state);
+ }
+ state_info->last_value = new_state;
+ }
+ }
+
+ ret = kfifo_out(&smsm_snapshot_fifo, &t_snapshot,
+ sizeof(t_snapshot));
+ if (ret != sizeof(t_snapshot)) {
+ pr_err("%s: snapshot underflow %d\n",
+ __func__, ret);
+ mutex_unlock(&smsm_lock);
+ return;
+ }
+
+ /* read wakelock flag */
+ ret = kfifo_out(&smsm_snapshot_fifo, &use_wakeup_source,
+ sizeof(use_wakeup_source));
+ if (ret != sizeof(use_wakeup_source)) {
+ pr_err("%s: snapshot underflow %d\n",
+ __func__, ret);
+ mutex_unlock(&smsm_lock);
+ return;
+ }
+ mutex_unlock(&smsm_lock);
+
+ if (use_wakeup_source) {
+ spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
+ if (smsm_snapshot_count) {
+ --smsm_snapshot_count;
+ if (smsm_snapshot_count == 0) {
+ SMSM_POWER_INFO(
+ "SMSM snapshot wake unlock\n");
+ __pm_relax(&smsm_snapshot_ws);
+ }
+ } else {
+ pr_err("%s: invalid snapshot count\n",
+ __func__);
+ }
+ spin_unlock_irqrestore(&smsm_snapshot_count_lock,
+ flags);
+ }
+
+ t_start = t_start - t_snapshot;
+ nanosec_rem = do_div(t_start, 1000000000U);
+ SMSM_POWER_INFO(
+ "SMSM snapshot queue response time %6u.%09lu s\n",
+ (unsigned)t_start, nanosec_rem);
+ }
+}
+
+
+/**
+ * Registers callback for SMSM state notifications when the specified
+ * bits change.
+ *
+ * @smsm_entry Processor entry to deregister
+ * @mask Bits to deregister (if result is 0, callback is removed)
+ * @notify Notification function to deregister
+ * @data Opaque data passed in to callback
+ *
+ * @returns Status code
+ * <0 error code
+ * 0 inserted new entry
+ * 1 updated mask of existing entry
+ */
+int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
+ void (*notify)(void *, uint32_t, uint32_t), void *data)
+{
+ struct smsm_state_info *state;
+ struct smsm_state_cb_info *cb_info;
+ struct smsm_state_cb_info *cb_found = 0;
+ uint32_t new_mask = 0;
+ int ret = 0;
+
+ if (smsm_entry >= SMSM_NUM_ENTRIES)
+ return -EINVAL;
+
+ mutex_lock(&smsm_lock);
+
+ if (!smsm_states) {
+ /* smsm not yet initialized */
+ ret = -ENODEV;
+ goto cleanup;
+ }
+
+ state = &smsm_states[smsm_entry];
+ list_for_each_entry(cb_info,
+ &state->callbacks, cb_list) {
+ if (!ret && (cb_info->notify == notify) &&
+ (cb_info->data == data)) {
+ cb_info->mask |= mask;
+ cb_found = cb_info;
+ ret = 1;
+ }
+ new_mask |= cb_info->mask;
+ }
+
+ if (!cb_found) {
+ cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
+ GFP_ATOMIC);
+ if (!cb_info) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cb_info->mask = mask;
+ cb_info->notify = notify;
+ cb_info->data = data;
+ INIT_LIST_HEAD(&cb_info->cb_list);
+ list_add_tail(&cb_info->cb_list,
+ &state->callbacks);
+ new_mask |= mask;
+ }
+
+ /* update interrupt notification mask */
+ if (smsm_entry == SMSM_MODEM_STATE)
+ new_mask |= LEGACY_MODEM_SMSM_MASK;
+
+ if (smsm_info.intr_mask) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&smem_lock, flags);
+ new_mask = (new_mask & ~state->intr_mask_clear)
+ | state->intr_mask_set;
+ __raw_writel(new_mask,
+ SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+ wmb();
+ spin_unlock_irqrestore(&smem_lock, flags);
+ }
+
+cleanup:
+ mutex_unlock(&smsm_lock);
+ return ret;
+}
+EXPORT_SYMBOL(smsm_state_cb_register);
+
+
+/**
+ * Deregisters for SMSM state notifications for the specified bits.
+ *
+ * @smsm_entry Processor entry to deregister
+ * @mask Bits to deregister (if result is 0, callback is removed)
+ * @notify Notification function to deregister
+ * @data Opaque data passed in to callback
+ *
+ * @returns Status code
+ * <0 error code
+ * 0 not found
+ * 1 updated mask
+ * 2 removed callback
+ */
+int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
+ void (*notify)(void *, uint32_t, uint32_t), void *data)
+{
+ struct smsm_state_cb_info *cb_info;
+ struct smsm_state_cb_info *cb_tmp;
+ struct smsm_state_info *state;
+ uint32_t new_mask = 0;
+ int ret = 0;
+
+ if (smsm_entry >= SMSM_NUM_ENTRIES)
+ return -EINVAL;
+
+ mutex_lock(&smsm_lock);
+
+ if (!smsm_states) {
+ /* smsm not yet initialized */
+ mutex_unlock(&smsm_lock);
+ return -ENODEV;
+ }
+
+ state = &smsm_states[smsm_entry];
+ list_for_each_entry_safe(cb_info, cb_tmp,
+ &state->callbacks, cb_list) {
+ if (!ret && (cb_info->notify == notify) &&
+ (cb_info->data == data)) {
+ cb_info->mask &= ~mask;
+ ret = 1;
+ if (!cb_info->mask) {
+ /* no mask bits set, remove callback */
+ list_del(&cb_info->cb_list);
+ kfree(cb_info);
+ ret = 2;
+ continue;
+ }
+ }
+ new_mask |= cb_info->mask;
+ }
+
+ /* update interrupt notification mask */
+ if (smsm_entry == SMSM_MODEM_STATE)
+ new_mask |= LEGACY_MODEM_SMSM_MASK;
+
+ if (smsm_info.intr_mask) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&smem_lock, flags);
+ new_mask = (new_mask & ~state->intr_mask_clear)
+ | state->intr_mask_set;
+ __raw_writel(new_mask,
+ SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+ wmb();
+ spin_unlock_irqrestore(&smem_lock, flags);
+ }
+
+ mutex_unlock(&smsm_lock);
+ return ret;
+}
+EXPORT_SYMBOL(smsm_state_cb_deregister);
+
+static int restart_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data);
+
+static struct restart_notifier_block restart_notifiers[] = {
+ {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
+ {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
+ {SMD_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
+ {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
+ {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
+ {SMD_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
+};
+
+static int restart_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data)
+{
+ remote_spinlock_t *remote_spinlock;
+
+ /*
+ * Some SMD or SMSM clients assume SMD/SMSM SSR handling will be
+ * done in the AFTER_SHUTDOWN level. If this ever changes, extra
+ * care should be taken to verify no clients are broken.
+ */
+ if (code == SUBSYS_AFTER_SHUTDOWN) {
+ struct restart_notifier_block *notifier;
+
+ notifier = container_of(this,
+ struct restart_notifier_block, nb);
+ SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
+ __func__, notifier->processor,
+ notifier->name);
+
+ remote_spinlock = smem_get_remote_spinlock();
+ remote_spin_release(remote_spinlock, notifier->processor);
+ remote_spin_release_all(notifier->processor);
+
+ smd_channel_reset(notifier->processor);
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * smd_post_init() - SMD post initialization
+ * @remote_pid: remote pid that has been initialized. Ignored when is_legacy=1
+ *
+ * This function is used by the device tree initialization to complete the SMD
+ * init sequence.
+ */
+void smd_post_init(unsigned remote_pid)
+{
+ schedule_work(&remote_info[remote_pid].probe_work);
+}
+
+/**
+ * smsm_post_init() - SMSM post initialization
+ * @returns: 0 for success, standard Linux error code otherwise
+ *
+ * This function is used by the legacy and device tree initialization
+ * to complete the SMSM init sequence.
+ */
+int smsm_post_init(void)
+{
+ int ret;
+
+ ret = smsm_init();
+ if (ret) {
+ pr_err("smsm_init() failed ret = %d\n", ret);
+ return ret;
+ }
+ smsm_irq_handler(0, 0);
+
+ return ret;
+}
+
+/**
+ * smd_get_intr_config() - Get interrupt configuration structure
+ * @edge: edge type identifes local and remote processor
+ * @returns: pointer to interrupt configuration
+ *
+ * This function returns the interrupt configuration of remote processor
+ * based on the edge type.
+ */
+struct interrupt_config *smd_get_intr_config(uint32_t edge)
+{
+ if (edge >= ARRAY_SIZE(edge_to_pids))
+ return NULL;
+ return &private_intr_config[edge_to_pids[edge].remote_pid];
+}
+
+/**
+ * smd_get_edge_remote_pid() - Get the remote processor ID
+ * @edge: edge type identifes local and remote processor
+ * @returns: remote processor ID
+ *
+ * This function returns remote processor ID based on edge type.
+ */
+int smd_edge_to_remote_pid(uint32_t edge)
+{
+ if (edge >= ARRAY_SIZE(edge_to_pids))
+ return -EINVAL;
+ return edge_to_pids[edge].remote_pid;
+}
+
+/**
+ * smd_get_edge_local_pid() - Get the local processor ID
+ * @edge: edge type identifies local and remote processor
+ * @returns: local processor ID
+ *
+ * This function returns local processor ID based on edge type.
+ */
+int smd_edge_to_local_pid(uint32_t edge)
+{
+ if (edge >= ARRAY_SIZE(edge_to_pids))
+ return -EINVAL;
+ return edge_to_pids[edge].local_pid;
+}
+
+/**
+ * smd_proc_set_skip_pil() - Mark if the indicated processor is be loaded by PIL
+ * @pid: the processor id to mark
+ * @skip_pil: true if @pid cannot by loaded by PIL
+ */
+void smd_proc_set_skip_pil(unsigned pid, bool skip_pil)
+{
+ if (pid >= NUM_SMD_SUBSYSTEMS) {
+ pr_err("%s: invalid pid:%d\n", __func__, pid);
+ return;
+ }
+ remote_info[pid].skip_pil = skip_pil;
+}
+
+/**
+ * smd_set_edge_subsys_name() - Set the subsystem name
+ * @edge: edge type identifies local and remote processor
+ * @subsys_name: pointer to subsystem name
+ *
+ * This function is used to set the subsystem name for given edge type.
+ */
+void smd_set_edge_subsys_name(uint32_t edge, const char *subsys_name)
+{
+ if (edge < ARRAY_SIZE(edge_to_pids))
+ if (subsys_name)
+ strlcpy(edge_to_pids[edge].subsys_name,
+ subsys_name, SMD_MAX_CH_NAME_LEN);
+ else
+ strlcpy(edge_to_pids[edge].subsys_name,
+ "", SMD_MAX_CH_NAME_LEN);
+ else
+ pr_err("%s: Invalid edge type[%d]\n", __func__, edge);
+}
+
+/**
+ * smd_reset_all_edge_subsys_name() - Reset the subsystem name
+ *
+ * This function is used to reset the subsystem name of all edges in
+ * targets where configuration information is available through
+ * device tree.
+ */
+void smd_reset_all_edge_subsys_name(void)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(edge_to_pids); i++)
+ strlcpy(edge_to_pids[i].subsys_name,
+ "", sizeof(""));
+}
+
+/**
+ * smd_set_edge_initialized() - Set the edge initialized status
+ * @edge: edge type identifies local and remote processor
+ *
+ * This function set the initialized varibale based on edge type.
+ */
+void smd_set_edge_initialized(uint32_t edge)
+{
+ if (edge < ARRAY_SIZE(edge_to_pids))
+ edge_to_pids[edge].initialized = true;
+ else
+ pr_err("%s: Invalid edge type[%d]\n", __func__, edge);
+}
+
+/**
+ * smd_cfg_smd_intr() - Set the SMD interrupt configuration
+ * @proc: remote processor ID
+ * @mask: bit position in IRQ register
+ * @ptr: IRQ register
+ *
+ * This function is called in Legacy init sequence and used to set
+ * the SMD interrupt configurations for particular processor.
+ */
+void smd_cfg_smd_intr(uint32_t proc, uint32_t mask, void *ptr)
+{
+ private_intr_config[proc].smd.out_bit_pos = mask;
+ private_intr_config[proc].smd.out_base = ptr;
+ private_intr_config[proc].smd.out_offset = 0;
+}
+
+/*
+ * smd_cfg_smsm_intr() - Set the SMSM interrupt configuration
+ * @proc: remote processor ID
+ * @mask: bit position in IRQ register
+ * @ptr: IRQ register
+ *
+ * This function is called in Legacy init sequence and used to set
+ * the SMSM interrupt configurations for particular processor.
+ */
+void smd_cfg_smsm_intr(uint32_t proc, uint32_t mask, void *ptr)
+{
+ private_intr_config[proc].smsm.out_bit_pos = mask;
+ private_intr_config[proc].smsm.out_base = ptr;
+ private_intr_config[proc].smsm.out_offset = 0;
+}
+
+static __init int modem_restart_late_init(void)
+{
+ int i;
+ void *handle;
+ struct restart_notifier_block *nb;
+
+ for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
+ nb = &restart_notifiers[i];
+ handle = subsys_notif_register_notifier(nb->name, &nb->nb);
+ SMD_DBG("%s: registering notif for '%s', handle=%p\n",
+ __func__, nb->name, handle);
+ }
+
+ return 0;
+}
+late_initcall(modem_restart_late_init);
+
+int __init msm_smd_init(void)
+{
+ static bool registered;
+ int rc;
+ int i;
+
+ if (registered)
+ return 0;
+
+ smd_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smd");
+ if (!smd_log_ctx) {
+ pr_err("%s: unable to create SMD logging context\n", __func__);
+ msm_smd_debug_mask = 0;
+ }
+
+ smsm_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smsm");
+ if (!smsm_log_ctx) {
+ pr_err("%s: unable to create SMSM logging context\n", __func__);
+ msm_smd_debug_mask = 0;
+ }
+
+ registered = true;
+
+ for (i = 0; i < NUM_SMD_SUBSYSTEMS; ++i) {
+ remote_info[i].remote_pid = i;
+ remote_info[i].free_space = UINT_MAX;
+ INIT_WORK(&remote_info[i].probe_work, smd_channel_probe_worker);
+ INIT_LIST_HEAD(&remote_info[i].ch_list);
+ }
+
+ channel_close_wq = create_singlethread_workqueue("smd_channel_close");
+ if (IS_ERR(channel_close_wq)) {
+ pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
+ return -ENOMEM;
+ }
+
+ rc = msm_smd_driver_register();
+ if (rc) {
+ pr_err("%s: msm_smd_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+ return 0;
+}
+
+arch_initcall(msm_smd_init);
+
+MODULE_DESCRIPTION("MSM Shared Memory Core");
+MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/smd_debug.c b/drivers/soc/qcom/smd_debug.c
new file mode 100644
index 000000000000..283914440136
--- /dev/null
+++ b/drivers/soc/qcom/smd_debug.c
@@ -0,0 +1,404 @@
+/* drivers/soc/qcom/smd_debug.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+
+#include <soc/qcom/smem.h>
+
+#include "smd_private.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+static char *chstate(unsigned n)
+{
+ switch (n) {
+ case SMD_SS_CLOSED:
+ return "CLOSED";
+ case SMD_SS_OPENING:
+ return "OPENING";
+ case SMD_SS_OPENED:
+ return "OPENED";
+ case SMD_SS_FLUSHING:
+ return "FLUSHING";
+ case SMD_SS_CLOSING:
+ return "CLOSING";
+ case SMD_SS_RESET:
+ return "RESET";
+ case SMD_SS_RESET_OPENING:
+ return "ROPENING";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static void debug_int_stats(struct seq_file *s)
+{
+ int subsys;
+ struct interrupt_stat *stats = interrupt_stats;
+ const char *subsys_name;
+
+ seq_puts(s,
+ " Subsystem | Interrupt ID | In | Out |\n");
+
+ for (subsys = 0; subsys < NUM_SMD_SUBSYSTEMS; ++subsys) {
+ subsys_name = smd_pid_to_subsystem(subsys);
+ if (!IS_ERR_OR_NULL(subsys_name)) {
+ seq_printf(s, "%-10s %4s | %9d | %9u | %9u |\n",
+ smd_pid_to_subsystem(subsys), "smd",
+ stats->smd_interrupt_id,
+ stats->smd_in_count,
+ stats->smd_out_count);
+
+ seq_printf(s, "%-10s %4s | %9d | %9u | %9u |\n",
+ smd_pid_to_subsystem(subsys), "smsm",
+ stats->smsm_interrupt_id,
+ stats->smsm_in_count,
+ stats->smsm_out_count);
+ }
+ ++stats;
+ }
+}
+
+static void debug_int_stats_reset(struct seq_file *s)
+{
+ int subsys;
+ struct interrupt_stat *stats = interrupt_stats;
+
+ seq_puts(s, "Resetting interrupt stats.\n");
+
+ for (subsys = 0; subsys < NUM_SMD_SUBSYSTEMS; ++subsys) {
+ stats->smd_in_count = 0;
+ stats->smd_out_count = 0;
+ stats->smsm_in_count = 0;
+ stats->smsm_out_count = 0;
+ ++stats;
+ }
+}
+
+/* NNV: revist, it may not be smd version */
+static void debug_read_smd_version(struct seq_file *s)
+{
+ uint32_t *smd_ver;
+ uint32_t n, version;
+
+ smd_ver = smem_find(SMEM_VERSION_SMD, 32 * sizeof(uint32_t),
+ 0, SMEM_ANY_HOST_FLAG);
+
+ if (smd_ver)
+ for (n = 0; n < 32; n++) {
+ version = smd_ver[n];
+ seq_printf(s, "entry %d: %d.%d\n", n,
+ version >> 16,
+ version & 0xffff);
+ }
+}
+
+/**
+ * pid_to_str - Convert a numeric processor id value into a human readable
+ * string value.
+ *
+ * @pid: the processor id to convert
+ * @returns: a string representation of @pid
+ */
+static char *pid_to_str(int pid)
+{
+ switch (pid) {
+ case SMD_APPS:
+ return "APPS";
+ case SMD_MODEM:
+ return "MDMSW";
+ case SMD_Q6:
+ return "ADSP";
+ case SMD_TZ:
+ return "TZ";
+ case SMD_WCNSS:
+ return "WCNSS";
+ case SMD_MODEM_Q6_FW:
+ return "MDMFW";
+ case SMD_RPM:
+ return "RPM";
+ default:
+ return "???";
+ }
+}
+
+/**
+ * print_half_ch_state - Print the state of half of a SMD channel in a human
+ * readable format.
+ *
+ * @s: the sequential file to print to
+ * @half_ch: half of a SMD channel that should have its state printed
+ * @half_ch_funcs: the relevant channel access functions for @half_ch
+ * @size: size of the fifo in bytes associated with @half_ch
+ * @proc: the processor id that owns the part of the SMD channel associated with
+ * @half_ch
+ */
+static void print_half_ch_state(struct seq_file *s,
+ void *half_ch,
+ struct smd_half_channel_access *half_ch_funcs,
+ unsigned size,
+ int proc)
+{
+ seq_printf(s, "%-5s|%-7s|0x%05X|0x%05X|0x%05X|%c%c%c%c%c%c%c%c|0x%05X",
+ pid_to_str(proc),
+ chstate(half_ch_funcs->get_state(half_ch)),
+ size,
+ half_ch_funcs->get_tail(half_ch),
+ half_ch_funcs->get_head(half_ch),
+ half_ch_funcs->get_fDSR(half_ch) ? 'D' : 'd',
+ half_ch_funcs->get_fCTS(half_ch) ? 'C' : 'c',
+ half_ch_funcs->get_fCD(half_ch) ? 'C' : 'c',
+ half_ch_funcs->get_fRI(half_ch) ? 'I' : 'i',
+ half_ch_funcs->get_fHEAD(half_ch) ? 'W' : 'w',
+ half_ch_funcs->get_fTAIL(half_ch) ? 'R' : 'r',
+ half_ch_funcs->get_fSTATE(half_ch) ? 'S' : 's',
+ half_ch_funcs->get_fBLOCKREADINTR(half_ch) ? 'B' : 'b',
+ (half_ch_funcs->get_head(half_ch) -
+ half_ch_funcs->get_tail(half_ch)) & (size - 1));
+}
+
+/**
+ * smd_xfer_type_to_str - Convert a numeric transfer type value into a human
+ * readable string value.
+ *
+ * @xfer_type: the processor id to convert
+ * @returns: a string representation of @xfer_type
+ */
+static char *smd_xfer_type_to_str(uint32_t xfer_type)
+{
+ if (xfer_type == 1)
+ return "S"; /* streaming type */
+ else if (xfer_type == 2)
+ return "P"; /* packet type */
+ else
+ return "L"; /* legacy type */
+}
+
+/**
+ * print_smd_ch_table - Print the current state of every valid SMD channel in a
+ * specific SMD channel allocation table to a human
+ * readable formatted output.
+ *
+ * @s: the sequential file to print to
+ * @tbl: a valid pointer to the channel allocation table to print from
+ * @num_tbl_entries: total number of entries in the table referenced by @tbl
+ * @ch_base_id: the SMEM item id corresponding to the array of channel
+ * structures for the channels found in @tbl
+ * @fifo_base_id: the SMEM item id corresponding to the array of channel fifos
+ * for the channels found in @tbl
+ * @pid: processor id to use for any SMEM operations
+ * @flags: flags to use for any SMEM operations
+ */
+static void print_smd_ch_table(struct seq_file *s,
+ struct smd_alloc_elm *tbl,
+ unsigned num_tbl_entries,
+ unsigned ch_base_id,
+ unsigned fifo_base_id,
+ unsigned pid,
+ unsigned flags)
+{
+ void *half_ch;
+ unsigned half_ch_size;
+ uint32_t ch_type;
+ void *buffer;
+ unsigned buffer_size;
+ int n;
+
+/*
+ * formatted, human readable channel state output, ie:
+ID|CHANNEL NAME |T|PROC |STATE |FIFO SZ|RDPTR |WRPTR |FLAGS |DATAPEN
+-------------------------------------------------------------------------------
+00|DS |S|APPS |CLOSED |0x02000|0x00000|0x00000|dcCiwrsb|0x00000
+ | | |MDMSW|OPENING|0x02000|0x00000|0x00000|dcCiwrsb|0x00000
+-------------------------------------------------------------------------------
+ */
+
+ seq_printf(s, "%2s|%-19s|%1s|%-5s|%-7s|%-7s|%-7s|%-7s|%-8s|%-7s\n",
+ "ID",
+ "CHANNEL NAME",
+ "T",
+ "PROC",
+ "STATE",
+ "FIFO SZ",
+ "RDPTR",
+ "WRPTR",
+ "FLAGS",
+ "DATAPEN");
+ seq_puts(s,
+ "-------------------------------------------------------------------------------\n");
+ for (n = 0; n < num_tbl_entries; ++n) {
+ if (strlen(tbl[n].name) == 0)
+ continue;
+
+ seq_printf(s, "%2u|%-19s|%s|", tbl[n].cid, tbl[n].name,
+ smd_xfer_type_to_str(SMD_XFER_TYPE(tbl[n].type)));
+ ch_type = SMD_CHANNEL_TYPE(tbl[n].type);
+ if (is_word_access_ch(ch_type))
+ half_ch_size =
+ sizeof(struct smd_half_channel_word_access);
+ else
+ half_ch_size = sizeof(struct smd_half_channel);
+
+ half_ch = smem_find(ch_base_id + n, 2 * half_ch_size,
+ pid, flags);
+ buffer = smem_get_entry(fifo_base_id + n, &buffer_size,
+ pid, flags);
+ if (half_ch && buffer)
+ print_half_ch_state(s,
+ half_ch,
+ get_half_ch_funcs(ch_type),
+ buffer_size / 2,
+ smd_edge_to_local_pid(ch_type));
+
+ seq_puts(s, "\n");
+ seq_printf(s, "%2s|%-19s|%1s|", "", "", "");
+
+ if (half_ch && buffer)
+ print_half_ch_state(s,
+ half_ch + half_ch_size,
+ get_half_ch_funcs(ch_type),
+ buffer_size / 2,
+ smd_edge_to_remote_pid(ch_type));
+
+ seq_puts(s, "\n");
+ seq_puts(s,
+ "-------------------------------------------------------------------------------\n");
+ }
+}
+
+/**
+ * debug_ch - Print the current state of every valid SMD channel in a human
+ * readable formatted table.
+ *
+ * @s: the sequential file to print to
+ */
+static void debug_ch(struct seq_file *s)
+{
+ struct smd_alloc_elm *tbl;
+ struct smd_alloc_elm *default_pri_tbl;
+ struct smd_alloc_elm *default_sec_tbl;
+ unsigned tbl_size;
+ int i;
+
+ tbl = smem_get_entry(ID_CH_ALLOC_TBL, &tbl_size, 0, SMEM_ANY_HOST_FLAG);
+ default_pri_tbl = tbl;
+
+ if (!tbl) {
+ seq_puts(s, "Channel allocation table not found\n");
+ return;
+ }
+
+ seq_puts(s, "Primary allocation table:\n");
+ print_smd_ch_table(s, tbl, tbl_size / sizeof(*tbl), ID_SMD_CHANNELS,
+ SMEM_SMD_FIFO_BASE_ID,
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ tbl = smem_get_entry(SMEM_CHANNEL_ALLOC_TBL_2, &tbl_size, 0,
+ SMEM_ANY_HOST_FLAG);
+ default_sec_tbl = tbl;
+ if (tbl) {
+ seq_puts(s, "\n\nSecondary allocation table:\n");
+ print_smd_ch_table(s, tbl, tbl_size / sizeof(*tbl),
+ SMEM_SMD_BASE_ID_2,
+ SMEM_SMD_FIFO_BASE_ID_2,
+ 0,
+ SMEM_ANY_HOST_FLAG);
+ }
+
+ for (i = 1; i < NUM_SMD_SUBSYSTEMS; ++i) {
+ tbl = smem_get_entry(ID_CH_ALLOC_TBL, &tbl_size, i, 0);
+ if (tbl && tbl != default_pri_tbl) {
+ seq_puts(s, "\n\n");
+ seq_printf(s, "%s <-> %s Primary allocation table:\n",
+ pid_to_str(SMD_APPS),
+ pid_to_str(i));
+ print_smd_ch_table(s, tbl, tbl_size / sizeof(*tbl),
+ ID_SMD_CHANNELS,
+ SMEM_SMD_FIFO_BASE_ID,
+ i,
+ 0);
+ }
+
+ tbl = smem_get_entry(SMEM_CHANNEL_ALLOC_TBL_2, &tbl_size, i, 0);
+ if (tbl && tbl != default_sec_tbl) {
+ seq_puts(s, "\n\n");
+ seq_printf(s, "%s <-> %s Secondary allocation table:\n",
+ pid_to_str(SMD_APPS),
+ pid_to_str(i));
+ print_smd_ch_table(s, tbl, tbl_size / sizeof(*tbl),
+ SMEM_SMD_BASE_ID_2,
+ SMEM_SMD_FIFO_BASE_ID_2,
+ i,
+ 0);
+ }
+ }
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+ void (*show)(struct seq_file *) = s->private;
+
+ show(s);
+
+ return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+ .open = debug_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void debug_create(const char *name, umode_t mode,
+ struct dentry *dent,
+ void (*show)(struct seq_file *))
+{
+ struct dentry *file;
+
+ file = debugfs_create_file(name, mode, dent, show, &debug_ops);
+ if (!file)
+ pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+static int __init smd_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smd", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ debug_create("ch", 0444, dent, debug_ch);
+ debug_create("version", 0444, dent, debug_read_smd_version);
+ debug_create("int_stats", 0444, dent, debug_int_stats);
+ debug_create("int_stats_reset", 0444, dent, debug_int_stats_reset);
+
+ return 0;
+}
+
+late_initcall(smd_debugfs_init);
+#endif
diff --git a/drivers/soc/qcom/smd_init_dt.c b/drivers/soc/qcom/smd_init_dt.c
new file mode 100644
index 000000000000..993f3536ae04
--- /dev/null
+++ b/drivers/soc/qcom/smd_init_dt.c
@@ -0,0 +1,346 @@
+/* drivers/soc/qcom/smd_init_dt.c
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ipc_logging.h>
+
+#include "smd_private.h"
+
+#define MODULE_NAME "msm_smd"
+#define IPC_LOG(level, x...) do { \
+ if (smd_log_ctx) \
+ ipc_log_string(smd_log_ctx, x); \
+ else \
+ printk(level x); \
+ } while (0)
+
+#if defined(CONFIG_MSM_SMD_DEBUG)
+#define SMD_DBG(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
+ IPC_LOG(KERN_DEBUG, x); \
+ } while (0)
+
+#define SMSM_DBG(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
+ IPC_LOG(KERN_DEBUG, x); \
+ } while (0)
+#else
+#define SMD_DBG(x...) do { } while (0)
+#define SMSM_DBG(x...) do { } while (0)
+#endif
+
+static DEFINE_MUTEX(smd_probe_lock);
+static int first_probe_done;
+
+static int msm_smsm_probe(struct platform_device *pdev)
+{
+ uint32_t edge;
+ char *key;
+ int ret;
+ uint32_t irq_offset;
+ uint32_t irq_bitmask;
+ uint32_t irq_line;
+ struct interrupt_config_item *private_irq;
+ struct device_node *node;
+ void *irq_out_base;
+ resource_size_t irq_out_size;
+ struct platform_device *parent_pdev;
+ struct resource *r;
+ struct interrupt_config *private_intr_config;
+ uint32_t remote_pid;
+
+ node = pdev->dev.of_node;
+
+ if (!pdev->dev.parent) {
+ pr_err("%s: missing link to parent device\n", __func__);
+ return -ENODEV;
+ }
+
+ parent_pdev = to_platform_device(pdev->dev.parent);
+
+ key = "irq-reg-base";
+ r = platform_get_resource_byname(parent_pdev, IORESOURCE_MEM, key);
+ if (!r)
+ goto missing_key;
+ irq_out_size = resource_size(r);
+ irq_out_base = ioremap_nocache(r->start, irq_out_size);
+ if (!irq_out_base) {
+ pr_err("%s: ioremap_nocache() of irq_out_base addr:%pr size:%pr\n",
+ __func__, &r->start, &irq_out_size);
+ return -ENOMEM;
+ }
+ SMSM_DBG("%s: %s = %p", __func__, key, irq_out_base);
+
+ key = "qcom,smsm-edge";
+ ret = of_property_read_u32(node, key, &edge);
+ if (ret)
+ goto missing_key;
+ SMSM_DBG("%s: %s = %d", __func__, key, edge);
+
+ key = "qcom,smsm-irq-offset";
+ ret = of_property_read_u32(node, key, &irq_offset);
+ if (ret)
+ goto missing_key;
+ SMSM_DBG("%s: %s = %x", __func__, key, irq_offset);
+
+ key = "qcom,smsm-irq-bitmask";
+ ret = of_property_read_u32(node, key, &irq_bitmask);
+ if (ret)
+ goto missing_key;
+ SMSM_DBG("%s: %s = %x", __func__, key, irq_bitmask);
+
+ key = "interrupts";
+ irq_line = irq_of_parse_and_map(node, 0);
+ if (!irq_line)
+ goto missing_key;
+ SMSM_DBG("%s: %s = %d", __func__, key, irq_line);
+
+ private_intr_config = smd_get_intr_config(edge);
+ if (!private_intr_config) {
+ pr_err("%s: invalid edge\n", __func__);
+ return -ENODEV;
+ }
+ private_irq = &private_intr_config->smsm;
+ private_irq->out_bit_pos = irq_bitmask;
+ private_irq->out_offset = irq_offset;
+ private_irq->out_base = irq_out_base;
+ private_irq->irq_id = irq_line;
+ remote_pid = smd_edge_to_remote_pid(edge);
+ interrupt_stats[remote_pid].smsm_interrupt_id = irq_line;
+
+ ret = request_irq(irq_line,
+ private_irq->irq_handler,
+ IRQF_TRIGGER_RISING,
+ node->name,
+ NULL);
+ if (ret < 0) {
+ pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
+ return ret;
+ } else {
+ ret = enable_irq_wake(irq_line);
+ if (ret < 0)
+ pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+ irq_line);
+ }
+
+ ret = smsm_post_init();
+ if (ret) {
+ pr_err("smd_post_init() failed ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+
+missing_key:
+ pr_err("%s: missing key: %s", __func__, key);
+ return -ENODEV;
+}
+
+static int msm_smd_probe(struct platform_device *pdev)
+{
+ uint32_t edge;
+ char *key;
+ int ret;
+ uint32_t irq_offset;
+ uint32_t irq_bitmask;
+ uint32_t irq_line;
+ unsigned long irq_flags = IRQF_TRIGGER_RISING;
+ const char *subsys_name;
+ struct interrupt_config_item *private_irq;
+ struct device_node *node;
+ void *irq_out_base;
+ resource_size_t irq_out_size;
+ struct platform_device *parent_pdev;
+ struct resource *r;
+ struct interrupt_config *private_intr_config;
+ uint32_t remote_pid;
+ bool skip_pil;
+
+ node = pdev->dev.of_node;
+
+ if (!pdev->dev.parent) {
+ pr_err("%s: missing link to parent device\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&smd_probe_lock);
+ if (!first_probe_done) {
+ smd_reset_all_edge_subsys_name();
+ first_probe_done = 1;
+ }
+ mutex_unlock(&smd_probe_lock);
+
+ parent_pdev = to_platform_device(pdev->dev.parent);
+
+ key = "irq-reg-base";
+ r = platform_get_resource_byname(parent_pdev, IORESOURCE_MEM, key);
+ if (!r)
+ goto missing_key;
+ irq_out_size = resource_size(r);
+ irq_out_base = ioremap_nocache(r->start, irq_out_size);
+ if (!irq_out_base) {
+ pr_err("%s: ioremap_nocache() of irq_out_base addr:%pr size:%pr\n",
+ __func__, &r->start, &irq_out_size);
+ return -ENOMEM;
+ }
+ SMD_DBG("%s: %s = %p", __func__, key, irq_out_base);
+
+ key = "qcom,smd-edge";
+ ret = of_property_read_u32(node, key, &edge);
+ if (ret)
+ goto missing_key;
+ SMD_DBG("%s: %s = %d", __func__, key, edge);
+
+ key = "qcom,smd-irq-offset";
+ ret = of_property_read_u32(node, key, &irq_offset);
+ if (ret)
+ goto missing_key;
+ SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
+
+ key = "qcom,smd-irq-bitmask";
+ ret = of_property_read_u32(node, key, &irq_bitmask);
+ if (ret)
+ goto missing_key;
+ SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
+
+ key = "interrupts";
+ irq_line = irq_of_parse_and_map(node, 0);
+ if (!irq_line)
+ goto missing_key;
+ SMD_DBG("%s: %s = %d", __func__, key, irq_line);
+
+ key = "label";
+ subsys_name = of_get_property(node, key, NULL);
+ SMD_DBG("%s: %s = %s", __func__, key, subsys_name);
+ /*
+ * Backwards compatibility. Although label is required, some DTs may
+ * still list the legacy pil-string. Sanely handle pil-string.
+ */
+ if (!subsys_name) {
+ pr_warn("msm_smd: Missing required property - label. Using legacy parsing\n");
+ key = "qcom,pil-string";
+ subsys_name = of_get_property(node, key, NULL);
+ SMD_DBG("%s: %s = %s", __func__, key, subsys_name);
+ if (subsys_name)
+ skip_pil = false;
+ else
+ skip_pil = true;
+ } else {
+ key = "qcom,not-loadable";
+ skip_pil = of_property_read_bool(node, key);
+ SMD_DBG("%s: %s = %d\n", __func__, key, skip_pil);
+ }
+
+ key = "qcom,irq-no-suspend";
+ ret = of_property_read_bool(node, key);
+ if (ret)
+ irq_flags |= IRQF_NO_SUSPEND;
+
+ private_intr_config = smd_get_intr_config(edge);
+ if (!private_intr_config) {
+ pr_err("%s: invalid edge\n", __func__);
+ return -ENODEV;
+ }
+ private_irq = &private_intr_config->smd;
+ private_irq->out_bit_pos = irq_bitmask;
+ private_irq->out_offset = irq_offset;
+ private_irq->out_base = irq_out_base;
+ private_irq->irq_id = irq_line;
+ remote_pid = smd_edge_to_remote_pid(edge);
+ interrupt_stats[remote_pid].smd_interrupt_id = irq_line;
+
+ ret = request_irq(irq_line,
+ private_irq->irq_handler,
+ irq_flags,
+ node->name,
+ NULL);
+ if (ret < 0) {
+ pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
+ return ret;
+ } else {
+ ret = enable_irq_wake(irq_line);
+ if (ret < 0)
+ pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+ irq_line);
+ }
+
+ smd_set_edge_subsys_name(edge, subsys_name);
+ smd_proc_set_skip_pil(smd_edge_to_remote_pid(edge), skip_pil);
+
+ smd_set_edge_initialized(edge);
+ smd_post_init(remote_pid);
+ return 0;
+
+missing_key:
+ pr_err("%s: missing key: %s", __func__, key);
+ return -ENODEV;
+}
+
+static struct of_device_id msm_smd_match_table[] = {
+ { .compatible = "qcom,smd" },
+ {},
+};
+
+static struct platform_driver msm_smd_driver = {
+ .probe = msm_smd_probe,
+ .driver = {
+ .name = MODULE_NAME ,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_smd_match_table,
+ },
+};
+
+static struct of_device_id msm_smsm_match_table[] = {
+ { .compatible = "qcom,smsm" },
+ {},
+};
+
+static struct platform_driver msm_smsm_driver = {
+ .probe = msm_smsm_probe,
+ .driver = {
+ .name = "msm_smsm",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_smsm_match_table,
+ },
+};
+
+int msm_smd_driver_register(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&msm_smd_driver);
+ if (rc) {
+ pr_err("%s: smd_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ rc = platform_driver_register(&msm_smsm_driver);
+ if (rc) {
+ pr_err("%s: msm_smsm_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_smd_driver_register);
+
+MODULE_DESCRIPTION("MSM SMD Device Tree Init");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smd_private.c b/drivers/soc/qcom/smd_private.c
new file mode 100644
index 000000000000..a7ef87fc723d
--- /dev/null
+++ b/drivers/soc/qcom/smd_private.c
@@ -0,0 +1,333 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "smd_private.h"
+
+void set_state(volatile void __iomem *half_channel, unsigned data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->state = data;
+}
+
+unsigned get_state(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->state;
+}
+
+void set_fDSR(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->fDSR = data;
+}
+
+unsigned get_fDSR(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->fDSR;
+}
+
+void set_fCTS(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->fCTS = data;
+}
+
+unsigned get_fCTS(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->fCTS;
+}
+
+void set_fCD(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->fCD = data;
+}
+
+unsigned get_fCD(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->fCD;
+}
+
+void set_fRI(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->fRI = data;
+}
+
+unsigned get_fRI(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->fRI;
+}
+
+void set_fHEAD(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->fHEAD = data;
+}
+
+unsigned get_fHEAD(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->fHEAD;
+}
+
+void set_fTAIL(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->fTAIL = data;
+}
+
+unsigned get_fTAIL(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->fTAIL;
+}
+
+void set_fSTATE(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->fSTATE = data;
+}
+
+unsigned get_fSTATE(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->fSTATE;
+}
+
+void set_fBLOCKREADINTR(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)
+ (half_channel))->fBLOCKREADINTR = data;
+}
+
+unsigned get_fBLOCKREADINTR(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)
+ (half_channel))->fBLOCKREADINTR;
+}
+
+void set_tail(volatile void __iomem *half_channel, unsigned data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->tail = data;
+}
+
+unsigned get_tail(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->tail;
+}
+
+void set_head(volatile void __iomem *half_channel, unsigned data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->head = data;
+}
+
+unsigned get_head(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->head;
+}
+
+void set_state_word_access(volatile void __iomem *half_channel, unsigned data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->state = data;
+}
+
+unsigned get_state_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->state;
+}
+
+void set_fDSR_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fDSR = data;
+}
+
+unsigned get_fDSR_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fDSR;
+}
+
+void set_fCTS_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fCTS = data;
+}
+
+unsigned get_fCTS_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fCTS;
+}
+
+void set_fCD_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fCD = data;
+}
+
+unsigned get_fCD_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fCD;
+}
+
+void set_fRI_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fRI = data;
+}
+
+unsigned get_fRI_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fRI;
+}
+
+void set_fHEAD_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fHEAD = data;
+}
+
+unsigned get_fHEAD_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fHEAD;
+}
+
+void set_fTAIL_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fTAIL = data;
+}
+
+unsigned get_fTAIL_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fTAIL;
+}
+
+void set_fSTATE_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fSTATE = data;
+}
+
+unsigned get_fSTATE_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fSTATE;
+}
+
+void set_fBLOCKREADINTR_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fBLOCKREADINTR = data;
+}
+
+unsigned get_fBLOCKREADINTR_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fBLOCKREADINTR;
+}
+
+void set_tail_word_access(volatile void __iomem *half_channel, unsigned data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->tail = data;
+}
+
+unsigned get_tail_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->tail;
+}
+
+void set_head_word_access(volatile void __iomem *half_channel, unsigned data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->head = data;
+}
+
+unsigned get_head_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->head;
+}
+
+int is_word_access_ch(unsigned ch_type)
+{
+ if (ch_type == SMD_APPS_RPM || ch_type == SMD_MODEM_RPM ||
+ ch_type == SMD_QDSP_RPM || ch_type == SMD_WCNSS_RPM ||
+ ch_type == SMD_TZ_RPM)
+ return 1;
+ else
+ return 0;
+}
+
+struct smd_half_channel_access *get_half_ch_funcs(unsigned ch_type)
+{
+ static struct smd_half_channel_access byte_access = {
+ .set_state = set_state,
+ .get_state = get_state,
+ .set_fDSR = set_fDSR,
+ .get_fDSR = get_fDSR,
+ .set_fCTS = set_fCTS,
+ .get_fCTS = get_fCTS,
+ .set_fCD = set_fCD,
+ .get_fCD = get_fCD,
+ .set_fRI = set_fRI,
+ .get_fRI = get_fRI,
+ .set_fHEAD = set_fHEAD,
+ .get_fHEAD = get_fHEAD,
+ .set_fTAIL = set_fTAIL,
+ .get_fTAIL = get_fTAIL,
+ .set_fSTATE = set_fSTATE,
+ .get_fSTATE = get_fSTATE,
+ .set_fBLOCKREADINTR = set_fBLOCKREADINTR,
+ .get_fBLOCKREADINTR = get_fBLOCKREADINTR,
+ .set_tail = set_tail,
+ .get_tail = get_tail,
+ .set_head = set_head,
+ .get_head = get_head,
+ };
+ static struct smd_half_channel_access word_access = {
+ .set_state = set_state_word_access,
+ .get_state = get_state_word_access,
+ .set_fDSR = set_fDSR_word_access,
+ .get_fDSR = get_fDSR_word_access,
+ .set_fCTS = set_fCTS_word_access,
+ .get_fCTS = get_fCTS_word_access,
+ .set_fCD = set_fCD_word_access,
+ .get_fCD = get_fCD_word_access,
+ .set_fRI = set_fRI_word_access,
+ .get_fRI = get_fRI_word_access,
+ .set_fHEAD = set_fHEAD_word_access,
+ .get_fHEAD = get_fHEAD_word_access,
+ .set_fTAIL = set_fTAIL_word_access,
+ .get_fTAIL = get_fTAIL_word_access,
+ .set_fSTATE = set_fSTATE_word_access,
+ .get_fSTATE = get_fSTATE_word_access,
+ .set_fBLOCKREADINTR = set_fBLOCKREADINTR_word_access,
+ .get_fBLOCKREADINTR = get_fBLOCKREADINTR_word_access,
+ .set_tail = set_tail_word_access,
+ .get_tail = get_tail_word_access,
+ .set_head = set_head_word_access,
+ .get_head = get_head_word_access,
+ };
+
+ if (is_word_access_ch(ch_type))
+ return &word_access;
+ else
+ return &byte_access;
+}
+
diff --git a/drivers/soc/qcom/smd_private.h b/drivers/soc/qcom/smd_private.h
new file mode 100644
index 000000000000..090dfb631410
--- /dev/null
+++ b/drivers/soc/qcom/smd_private.h
@@ -0,0 +1,247 @@
+/* drivers/soc/qcom/smd_private.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
+#define _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/remote_spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+
+#include <soc/qcom/smd.h>
+#include <soc/qcom/smsm.h>
+
+#define VERSION_QDSP6 4
+#define VERSION_APPS_SBL 6
+#define VERSION_MODEM_SBL 7
+#define VERSION_APPS 8
+#define VERSION_MODEM 9
+#define VERSION_DSPS 10
+
+#define ID_SMD_CHANNELS SMEM_SMD_BASE_ID
+#define ID_SHARED_STATE SMEM_SMSM_SHARED_STATE
+#define ID_CH_ALLOC_TBL SMEM_CHANNEL_ALLOC_TBL
+
+#define SMD_SS_CLOSED 0x00000000
+#define SMD_SS_OPENING 0x00000001
+#define SMD_SS_OPENED 0x00000002
+#define SMD_SS_FLUSHING 0x00000003
+#define SMD_SS_CLOSING 0x00000004
+#define SMD_SS_RESET 0x00000005
+#define SMD_SS_RESET_OPENING 0x00000006
+
+#define SMD_HEADER_SIZE 20
+
+/* 'type' field of smd_alloc_elm structure
+ * has the following breakup
+ * bits 0-7 -> channel type
+ * bits 8-11 -> xfer type
+ * bits 12-31 -> reserved
+ */
+struct smd_alloc_elm {
+ char name[20];
+ uint32_t cid;
+ uint32_t type;
+ uint32_t ref_count;
+};
+
+#define SMD_CHANNEL_TYPE(x) ((x) & 0x000000FF)
+#define SMD_XFER_TYPE(x) (((x) & 0x00000F00) >> 8)
+
+struct smd_half_channel {
+ unsigned state;
+ unsigned char fDSR;
+ unsigned char fCTS;
+ unsigned char fCD;
+ unsigned char fRI;
+ unsigned char fHEAD;
+ unsigned char fTAIL;
+ unsigned char fSTATE;
+ unsigned char fBLOCKREADINTR;
+ unsigned tail;
+ unsigned head;
+};
+
+struct smd_half_channel_word_access {
+ unsigned state;
+ unsigned fDSR;
+ unsigned fCTS;
+ unsigned fCD;
+ unsigned fRI;
+ unsigned fHEAD;
+ unsigned fTAIL;
+ unsigned fSTATE;
+ unsigned fBLOCKREADINTR;
+ unsigned tail;
+ unsigned head;
+};
+
+struct smd_half_channel_access {
+ void (*set_state)(volatile void __iomem *half_channel, unsigned data);
+ unsigned (*get_state)(volatile void __iomem *half_channel);
+ void (*set_fDSR)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fDSR)(volatile void __iomem *half_channel);
+ void (*set_fCTS)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fCTS)(volatile void __iomem *half_channel);
+ void (*set_fCD)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fCD)(volatile void __iomem *half_channel);
+ void (*set_fRI)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fRI)(volatile void __iomem *half_channel);
+ void (*set_fHEAD)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fHEAD)(volatile void __iomem *half_channel);
+ void (*set_fTAIL)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fTAIL)(volatile void __iomem *half_channel);
+ void (*set_fSTATE)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fSTATE)(volatile void __iomem *half_channel);
+ void (*set_fBLOCKREADINTR)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fBLOCKREADINTR)(volatile void __iomem *half_channel);
+ void (*set_tail)(volatile void __iomem *half_channel, unsigned data);
+ unsigned (*get_tail)(volatile void __iomem *half_channel);
+ void (*set_head)(volatile void __iomem *half_channel, unsigned data);
+ unsigned (*get_head)(volatile void __iomem *half_channel);
+};
+
+int is_word_access_ch(unsigned ch_type);
+
+struct smd_half_channel_access *get_half_ch_funcs(unsigned ch_type);
+
+struct smd_channel {
+ volatile void __iomem *send; /* some variant of smd_half_channel */
+ volatile void __iomem *recv; /* some variant of smd_half_channel */
+ unsigned char *send_data;
+ unsigned char *recv_data;
+ unsigned fifo_size;
+ unsigned fifo_mask;
+ struct list_head ch_list;
+
+ unsigned current_packet;
+ unsigned n;
+ void *priv;
+ void (*notify)(void *priv, unsigned flags);
+
+ int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
+ int (*write)(smd_channel_t *ch, const void *data, int len,
+ int user_buf, bool int_ntfy);
+ int (*read_avail)(smd_channel_t *ch);
+ int (*write_avail)(smd_channel_t *ch);
+ int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
+ int user_buf);
+
+ void (*update_state)(smd_channel_t *ch);
+ unsigned last_state;
+ void (*notify_other_cpu)(smd_channel_t *ch);
+ void *(*read_from_fifo)(void *dest, const void *src, size_t num_bytes,
+ bool to_user);
+ void *(*write_to_fifo)(void *dest, const void *src, size_t num_bytes,
+ bool from_user);
+
+ char name[20];
+ struct platform_device pdev;
+ unsigned type;
+
+ int pending_pkt_sz;
+
+ char is_pkt_ch;
+
+ /*
+ * private internal functions to access *send and *recv.
+ * never to be exported outside of smd
+ */
+ struct smd_half_channel_access *half_ch;
+};
+
+extern spinlock_t smem_lock;
+
+struct interrupt_stat {
+ uint32_t smd_in_count;
+ uint32_t smd_out_count;
+ uint32_t smd_interrupt_id;
+
+ uint32_t smsm_in_count;
+ uint32_t smsm_out_count;
+ uint32_t smsm_interrupt_id;
+};
+extern struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
+
+struct interrupt_config_item {
+ /* must be initialized */
+ irqreturn_t (*irq_handler)(int req, void *data);
+ /* outgoing interrupt config (set from platform data) */
+ uint32_t out_bit_pos;
+ void __iomem *out_base;
+ uint32_t out_offset;
+ int irq_id;
+};
+
+enum {
+ MSM_SMD_DEBUG = 1U << 0,
+ MSM_SMSM_DEBUG = 1U << 1,
+ MSM_SMD_INFO = 1U << 2,
+ MSM_SMSM_INFO = 1U << 3,
+ MSM_SMD_POWER_INFO = 1U << 4,
+ MSM_SMSM_POWER_INFO = 1U << 5,
+};
+
+struct interrupt_config {
+ struct interrupt_config_item smd;
+ struct interrupt_config_item smsm;
+};
+
+struct edge_to_pid {
+ uint32_t local_pid;
+ uint32_t remote_pid;
+ char subsys_name[SMD_MAX_CH_NAME_LEN];
+ bool initialized;
+};
+
+extern void *smd_log_ctx;
+extern int msm_smd_debug_mask;
+
+extern irqreturn_t smd_modem_irq_handler(int irq, void *data);
+extern irqreturn_t smsm_modem_irq_handler(int irq, void *data);
+extern irqreturn_t smd_dsp_irq_handler(int irq, void *data);
+extern irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
+extern irqreturn_t smd_dsps_irq_handler(int irq, void *data);
+extern irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
+extern irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
+extern irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
+extern irqreturn_t smd_rpm_irq_handler(int irq, void *data);
+extern irqreturn_t smd_modemfw_irq_handler(int irq, void *data);
+
+extern int msm_smd_driver_register(void);
+extern void smd_post_init(unsigned remote_pid);
+extern int smsm_post_init(void);
+
+extern struct interrupt_config *smd_get_intr_config(uint32_t edge);
+extern int smd_edge_to_remote_pid(uint32_t edge);
+extern int smd_edge_to_local_pid(uint32_t edge);
+extern void smd_set_edge_subsys_name(uint32_t edge, const char *subsys_name);
+extern void smd_reset_all_edge_subsys_name(void);
+extern void smd_proc_set_skip_pil(unsigned pid, bool skip_pil);
+extern void smd_set_edge_initialized(uint32_t edge);
+extern void smd_cfg_smd_intr(uint32_t proc, uint32_t mask, void *ptr);
+extern void smd_cfg_smsm_intr(uint32_t proc, uint32_t mask, void *ptr);
+#endif
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
new file mode 100644
index 000000000000..fd011b8c4e81
--- /dev/null
+++ b/drivers/soc/qcom/smem.c
@@ -0,0 +1,1493 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/ramdump.h>
+
+#include <soc/qcom/smem.h>
+
+
+#include "smem_private.h"
+
+#define MODEM_SBL_VERSION_INDEX 7
+#define SMEM_VERSION_INFO_SIZE (32 * 4)
+#define SMEM_VERSION 0x000B
+
+enum {
+ MSM_SMEM_DEBUG = 1U << 0,
+ MSM_SMEM_INFO = 1U << 1,
+};
+
+static int msm_smem_debug_mask = MSM_SMEM_INFO;
+module_param_named(debug_mask, msm_smem_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+static void *smem_ipc_log_ctx;
+#define NUM_LOG_PAGES 4
+
+#define IPC_LOG(x...) do { \
+ if (smem_ipc_log_ctx) \
+ ipc_log_string(smem_ipc_log_ctx, x); \
+ } while (0)
+
+
+#define LOG_ERR(x...) do { \
+ pr_err(x); \
+ IPC_LOG(x); \
+ } while (0)
+#define SMEM_DBG(x...) do { \
+ if (msm_smem_debug_mask & MSM_SMEM_DEBUG) \
+ IPC_LOG(x); \
+ } while (0)
+#define SMEM_INFO(x...) do { \
+ if (msm_smem_debug_mask & MSM_SMEM_INFO) \
+ IPC_LOG(x); \
+ } while (0)
+
+#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
+
+static void *smem_ram_base;
+static resource_size_t smem_ram_size;
+static phys_addr_t smem_ram_phys;
+static remote_spinlock_t remote_spinlock;
+static uint32_t num_smem_areas;
+static struct smem_area *smem_areas;
+static struct ramdump_segment *smem_ramdump_segments;
+static int spinlocks_initialized;
+static void *smem_ramdump_dev;
+static DEFINE_MUTEX(spinlock_init_lock);
+static DEFINE_SPINLOCK(smem_init_check_lock);
+static int smem_module_inited;
+static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
+static DEFINE_MUTEX(smem_module_init_notifier_lock);
+static bool probe_done;
+
+/* smem security feature components */
+#define SMEM_TOC_IDENTIFIER 0x434f5424 /* "$TOC" */
+#define SMEM_TOC_MAX_EXCLUSIONS 4
+#define SMEM_PART_HDR_IDENTIFIER 0x54525024 /* "$PRT" */
+#define SMEM_ALLOCATION_CANARY 0xa5a5
+
+struct smem_toc_entry {
+ uint32_t offset;
+ uint32_t size;
+ uint32_t flags;
+ uint16_t host0;
+ uint16_t host1;
+ uint32_t size_cacheline;
+ uint32_t reserved[3];
+ uint32_t exclusion_sizes[SMEM_TOC_MAX_EXCLUSIONS];
+};
+
+struct smem_toc {
+ /* Identifier is a constant, set to SMEM_TOC_IDENTIFIER. */
+ uint32_t identifier;
+ uint32_t version;
+ uint32_t num_entries;
+ uint32_t reserved[5];
+ struct smem_toc_entry entry[];
+};
+
+struct smem_partition_header {
+ /* Identifier is a constant, set to SMEM_PART_HDR_IDENTIFIER. */
+ uint32_t identifier;
+ uint16_t host0;
+ uint16_t host1;
+ uint32_t size;
+ uint32_t offset_free_uncached;
+ uint32_t offset_free_cached;
+ uint32_t reserved[3];
+};
+
+struct smem_partition_allocation_header {
+ /* Canary is a constant, set to SMEM_ALLOCATION_CANARY */
+ uint16_t canary;
+ uint16_t smem_type;
+ uint32_t size; /* includes padding bytes */
+ uint16_t padding_data;
+ uint16_t padding_hdr;
+ uint32_t reserved[1];
+};
+
+struct smem_partition_info {
+ uint32_t partition_num;
+ uint32_t offset;
+ uint32_t size_cacheline;
+};
+
+static struct smem_partition_info partitions[NUM_SMEM_SUBSYSTEMS];
+/* end smem security feature components */
+
+/* Identifier for the SMEM target info struct. */
+#define SMEM_TARG_INFO_IDENTIFIER 0x49494953 /* "SIII" in little-endian. */
+
+struct smem_targ_info_type {
+ /* Identifier is a constant, set to SMEM_TARG_INFO_IDENTIFIER. */
+ uint32_t identifier;
+ uint32_t size;
+ phys_addr_t phys_base_addr;
+};
+
+struct restart_notifier_block {
+ unsigned processor;
+ char *name;
+ struct notifier_block nb;
+};
+
+static int restart_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data);
+
+static struct restart_notifier_block restart_notifiers[] = {
+ {SMEM_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
+ {SMEM_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
+ {SMEM_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
+ {SMEM_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
+ {SMEM_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
+ {SMEM_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
+};
+
+static int init_smem_remote_spinlock(void);
+
+/**
+ * is_probe_done() - Did the probe function successfully complete
+ *
+ * @return - true if probe successfully completed, false if otherwise
+ *
+ * Helper function for EPROBE_DEFER support. If this function returns false,
+ * the calling function should immediately return -EPROBE_DEFER.
+ */
+static bool is_probe_done(void)
+{
+ return probe_done;
+}
+
+/**
+ * smem_phys_to_virt() - Convert a physical base and offset to virtual address
+ *
+ * @base: physical base address to check
+ * @offset: offset from the base to get the final address
+ * @returns: virtual SMEM address; NULL for failure
+ *
+ * Takes a physical address and an offset and checks if the resulting physical
+ * address would fit into one of the smem regions. If so, returns the
+ * corresponding virtual address. Otherwise returns NULL.
+ */
+static void *smem_phys_to_virt(phys_addr_t base, unsigned offset)
+{
+ int i;
+ phys_addr_t phys_addr;
+ resource_size_t size;
+
+ if (OVERFLOW_ADD_UNSIGNED(phys_addr_t, base, offset))
+ return NULL;
+
+ if (!smem_areas) {
+ /*
+ * Early boot - no area configuration yet, so default
+ * to using the main memory region.
+ *
+ * To remove the MSM_SHARED_RAM_BASE and the static
+ * mapping of SMEM in the future, add dump_stack()
+ * to identify the early callers of smem_get_entry()
+ * (which calls this function) and replace those calls
+ * with a new function that knows how to lookup the
+ * SMEM base address before SMEM has been probed.
+ */
+ phys_addr = smem_ram_phys;
+ size = smem_ram_size;
+
+ if (base >= phys_addr && base + offset < phys_addr + size) {
+ if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+ (uintptr_t)smem_ram_base, offset)) {
+ SMEM_INFO("%s: overflow %p %x\n", __func__,
+ smem_ram_base, offset);
+ return NULL;
+ }
+
+ return smem_ram_base + offset;
+ } else {
+ return NULL;
+ }
+ }
+ for (i = 0; i < num_smem_areas; ++i) {
+ phys_addr = smem_areas[i].phys_addr;
+ size = smem_areas[i].size;
+
+ if (base < phys_addr || base + offset >= phys_addr + size)
+ continue;
+
+ if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+ (uintptr_t)smem_areas[i].virt_addr, offset)) {
+ SMEM_INFO("%s: overflow %p %x\n", __func__,
+ smem_areas[i].virt_addr, offset);
+ return NULL;
+ }
+
+ return smem_areas[i].virt_addr + offset;
+ }
+
+ return NULL;
+}
+
+/**
+ * smem_virt_to_phys() - Convert SMEM address to physical address.
+ *
+ * @smem_address: Address of SMEM item (returned by smem_alloc(), etc)
+ * @returns: Physical address (or NULL if there is a failure)
+ *
+ * This function should only be used if an SMEM item needs to be handed
+ * off to a DMA engine. This function will not return a version of EPROBE_DEFER
+ * if the driver is not ready since the caller should obtain @smem_address from
+ * one of the other public APIs and get EPROBE_DEFER at that time, if
+ * applicable.
+ */
+phys_addr_t smem_virt_to_phys(void *smem_address)
+{
+ phys_addr_t phys_addr = 0;
+ int i;
+ void *vend;
+
+ if (!smem_areas)
+ return phys_addr;
+
+ for (i = 0; i < num_smem_areas; ++i) {
+ vend = (void *)(smem_areas[i].virt_addr + smem_areas[i].size);
+
+ if (smem_address >= smem_areas[i].virt_addr &&
+ smem_address < vend) {
+ phys_addr = smem_address - smem_areas[i].virt_addr;
+ phys_addr += smem_areas[i].phys_addr;
+ break;
+ }
+ }
+
+ return phys_addr;
+}
+EXPORT_SYMBOL(smem_virt_to_phys);
+
+/**
+ * __smem_get_entry_nonsecure - Get pointer and size of existing SMEM item
+ *
+ * @id: ID of SMEM item
+ * @size: Pointer to size variable for storing the result
+ * @skip_init_check: True means do not verify that SMEM has been initialized
+ * @use_rspinlock: True to use the remote spinlock
+ * @returns: Pointer to SMEM item or NULL if it doesn't exist
+ */
+static void *__smem_get_entry_nonsecure(unsigned id, unsigned *size,
+ bool skip_init_check, bool use_rspinlock)
+{
+ struct smem_shared *shared = smem_ram_base;
+ struct smem_heap_entry *toc = shared->heap_toc;
+ int use_spinlocks = spinlocks_initialized && use_rspinlock;
+ void *ret = 0;
+ unsigned long flags = 0;
+
+ if (!skip_init_check && !smem_initialized_check())
+ return ret;
+
+ if (id >= SMEM_NUM_ITEMS)
+ return ret;
+
+ if (use_spinlocks)
+ remote_spin_lock_irqsave(&remote_spinlock, flags);
+ /* toc is in device memory and cannot be speculatively accessed */
+ if (toc[id].allocated) {
+ phys_addr_t phys_base;
+
+ *size = toc[id].size;
+ barrier();
+
+ phys_base = toc[id].reserved & BASE_ADDR_MASK;
+ if (!phys_base)
+ phys_base = smem_ram_phys;
+ ret = smem_phys_to_virt(phys_base, toc[id].offset);
+ } else {
+ *size = 0;
+ }
+ if (use_spinlocks)
+ remote_spin_unlock_irqrestore(&remote_spinlock, flags);
+
+ return ret;
+}
+
+/**
+ * __smem_get_entry_secure - Get pointer and size of existing SMEM item with
+ * security support
+ *
+ * @id: ID of SMEM item
+ * @size: Pointer to size variable for storing the result
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @skip_init_check: True means do not verify that SMEM has been initialized
+ * @use_rspinlock: True to use the remote spinlock
+ * @returns: Pointer to SMEM item or NULL if it doesn't exist
+ */
+static void *__smem_get_entry_secure(unsigned id,
+ unsigned *size,
+ unsigned to_proc,
+ unsigned flags,
+ bool skip_init_check,
+ bool use_rspinlock)
+{
+ struct smem_partition_header *hdr;
+ unsigned long lflags = 0;
+ void *item = NULL;
+ struct smem_partition_allocation_header *alloc_hdr;
+ uint32_t partition_num;
+ uint32_t a_hdr_size;
+ int rc;
+
+ SMEM_DBG("%s(%u, %u, %u, %u, %d, %d)\n", __func__, id, *size, to_proc,
+ flags, skip_init_check, use_rspinlock);
+
+ if (!skip_init_check && !smem_initialized_check())
+ return NULL;
+
+ if (id >= SMEM_NUM_ITEMS) {
+ SMEM_INFO("%s: invalid id %d\n", __func__, id);
+ return NULL;
+ }
+
+ if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
+ SMEM_INFO("%s: id %u invalid to_proc %d\n", __func__, id,
+ to_proc);
+ return NULL;
+ }
+
+ if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset)
+ return __smem_get_entry_nonsecure(id, size, skip_init_check,
+ use_rspinlock);
+
+ partition_num = partitions[to_proc].partition_num;
+ hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
+ if (unlikely(!spinlocks_initialized)) {
+ rc = init_smem_remote_spinlock();
+ if (unlikely(rc)) {
+ SMEM_INFO(
+ "%s: id:%u remote spinlock init failed %d\n",
+ __func__, id, rc);
+ return NULL;
+ }
+ }
+ if (use_rspinlock)
+ remote_spin_lock_irqsave(&remote_spinlock, lflags);
+ if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
+ LOG_ERR(
+ "%s: SMEM corruption detected. Partition %d to %d at %p\n",
+ __func__,
+ partition_num,
+ to_proc,
+ hdr);
+ BUG();
+ }
+
+ if (flags & SMEM_ITEM_CACHED_FLAG) {
+ a_hdr_size = ALIGN(sizeof(*alloc_hdr),
+ partitions[to_proc].size_cacheline);
+ for (alloc_hdr = (void *)(hdr) + hdr->size - a_hdr_size;
+ (void *)(alloc_hdr) > (void *)(hdr) +
+ hdr->offset_free_cached;
+ alloc_hdr = (void *)(alloc_hdr) -
+ alloc_hdr->size - a_hdr_size) {
+ if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
+ LOG_ERR(
+ "%s: SMEM corruption detected. Partition %d to %d at %p\n",
+ __func__,
+ partition_num,
+ to_proc,
+ alloc_hdr);
+ BUG();
+
+ }
+ if (alloc_hdr->smem_type == id) {
+ /* 8 byte alignment to match legacy */
+ *size = ALIGN(alloc_hdr->size -
+ alloc_hdr->padding_data, 8);
+ item = (void *)(alloc_hdr) - alloc_hdr->size;
+ break;
+ }
+ }
+ } else {
+ for (alloc_hdr = (void *)(hdr) + sizeof(*hdr);
+ (void *)(alloc_hdr) < (void *)(hdr) +
+ hdr->offset_free_uncached;
+ alloc_hdr = (void *)(alloc_hdr) +
+ sizeof(*alloc_hdr) +
+ alloc_hdr->padding_hdr +
+ alloc_hdr->size) {
+ if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
+ LOG_ERR(
+ "%s: SMEM corruption detected. Partition %d to %d at %p\n",
+ __func__,
+ partition_num,
+ to_proc,
+ alloc_hdr);
+ BUG();
+
+ }
+ if (alloc_hdr->smem_type == id) {
+ /* 8 byte alignment to match legacy */
+ *size = ALIGN(alloc_hdr->size -
+ alloc_hdr->padding_data, 8);
+ item = (void *)(alloc_hdr) +
+ sizeof(*alloc_hdr) +
+ alloc_hdr->padding_hdr;
+ break;
+ }
+ }
+ }
+ if (use_rspinlock)
+ remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+
+ return item;
+}
+
+static void *__smem_find(unsigned id, unsigned size_in, bool skip_init_check)
+{
+ unsigned size;
+ void *ptr;
+
+ ptr = __smem_get_entry_nonsecure(id, &size, skip_init_check, true);
+ if (!ptr)
+ return 0;
+
+ size_in = ALIGN(size_in, 8);
+ if (size_in != size) {
+ SMEM_INFO("smem_find(%u, %u): wrong size %u\n",
+ id, size_in, size);
+ return 0;
+ }
+
+ return ptr;
+}
+
+/**
+ * smem_find - Find existing item with security support
+ *
+ * @id: ID of SMEM item
+ * @size_in: Size of the SMEM item
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ * if the driver is not ready
+ */
+void *smem_find(unsigned id, unsigned size_in, unsigned to_proc, unsigned flags)
+{
+ unsigned size;
+ void *ptr;
+
+ SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
+ flags);
+
+ /*
+ * Handle the circular dependecy between SMEM and software implemented
+ * remote spinlocks. SMEM must initialize the remote spinlocks in
+ * probe() before it is done. EPROBE_DEFER handling will not resolve
+ * this code path, so we must be intellegent to know that the spinlock
+ * item is a special case.
+ */
+ if (!is_probe_done() && id != SMEM_SPINLOCK_ARRAY)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ ptr = smem_get_entry(id, &size, to_proc, flags);
+ if (!ptr)
+ return 0;
+
+ size_in = ALIGN(size_in, 8);
+ if (size_in != size) {
+ SMEM_INFO("smem_find(%u, %u, %u, %u): wrong size %u\n",
+ id, size_in, to_proc, flags, size);
+ return 0;
+ }
+
+ return ptr;
+}
+EXPORT_SYMBOL(smem_find);
+
+/**
+ * alloc_item_nonsecure - Allocate an SMEM item in the nonsecure partition
+ *
+ * @id: ID of SMEM item
+ * @size_in: Size to allocate
+ * @returns: Pointer to SMEM item or NULL for error
+ *
+ * Assumes the id parameter is valid and does not already exist. Assumes
+ * size_in is already adjusted for alignment, if necessary. Requires the
+ * remote spinlock to already be locked.
+ */
+static void *alloc_item_nonsecure(unsigned id, unsigned size_in)
+{
+ void *smem_base = smem_ram_base;
+ struct smem_shared *shared = smem_base;
+ struct smem_heap_entry *toc = shared->heap_toc;
+ void *ret = NULL;
+
+ if (shared->heap_info.heap_remaining >= size_in) {
+ toc[id].offset = shared->heap_info.free_offset;
+ toc[id].size = size_in;
+ /*
+ * wmb() is necessary to ensure the allocation data is
+ * consistent before setting the allocated flag to prevent race
+ * conditions with remote processors
+ */
+ wmb();
+ toc[id].allocated = 1;
+
+ shared->heap_info.free_offset += size_in;
+ shared->heap_info.heap_remaining -= size_in;
+ ret = smem_base + toc[id].offset;
+ /*
+ * wmb() is necessary to ensure the heap data is consistent
+ * before continuing to prevent race conditions with remote
+ * processors
+ */
+ wmb();
+ } else {
+ SMEM_INFO("%s: id %u not enough memory %u (required %u)\n",
+ __func__, id, shared->heap_info.heap_remaining,
+ size_in);
+ }
+
+ return ret;
+}
+
+/**
+ * alloc_item_secure - Allocate an SMEM item in a secure partition
+ *
+ * @id: ID of SMEM item
+ * @size_in: Size to allocate
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item or NULL for error
+ *
+ * Assumes the id parameter is valid and does not already exist. Assumes
+ * size_in is the raw size requested by the client. Assumes to_proc is a valid
+ * host, and a valid partition to that host exists. Requires the remote
+ * spinlock to already be locked.
+ */
+static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
+ unsigned flags)
+{
+ void *smem_base = smem_ram_base;
+ struct smem_partition_header *hdr;
+ struct smem_partition_allocation_header *alloc_hdr;
+ uint32_t a_hdr_size;
+ uint32_t a_data_size;
+ uint32_t size_cacheline;
+ uint32_t free_space;
+ uint32_t partition_num;
+ void *ret = NULL;
+
+ hdr = smem_base + partitions[to_proc].offset;
+ partition_num = partitions[to_proc].partition_num;
+
+ if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
+ LOG_ERR(
+ "%s: SMEM corruption detected. Partition %d to %d at %p\n",
+ __func__,
+ partition_num,
+ to_proc,
+ hdr);
+ BUG();
+ }
+
+ size_cacheline = partitions[to_proc].size_cacheline;
+ free_space = hdr->offset_free_cached -
+ hdr->offset_free_uncached;
+
+ if (flags & SMEM_ITEM_CACHED_FLAG) {
+ a_hdr_size = ALIGN(sizeof(*alloc_hdr), size_cacheline);
+ a_data_size = ALIGN(size_in, size_cacheline);
+ if (free_space < a_hdr_size + a_data_size) {
+ SMEM_INFO(
+ "%s: id %u not enough memory %u (required %u)\n",
+ __func__, id, free_space,
+ a_hdr_size + a_data_size);
+ return ret;
+ }
+ alloc_hdr = (void *)(hdr) + hdr->offset_free_cached -
+ a_hdr_size;
+ alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
+ alloc_hdr->smem_type = id;
+ alloc_hdr->size = a_data_size;
+ alloc_hdr->padding_data = a_data_size - size_in;
+ alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
+ hdr->offset_free_cached = hdr->offset_free_cached -
+ a_hdr_size - a_data_size;
+ ret = (void *)(alloc_hdr) - a_data_size;
+ /*
+ * The SMEM protocol currently does not support cacheable
+ * areas within the smem region, but if it ever does in the
+ * future, then cache management needs to be done here.
+ * The area of memory this item is allocated from will need to
+ * be dynamically made cachable, and a cache flush of the
+ * allocation header using __cpuc_flush_dcache_area and
+ * outer_flush_area will need to be done.
+ */
+ } else {
+ a_hdr_size = sizeof(*alloc_hdr);
+ a_data_size = ALIGN(size_in, 8);
+ if (free_space < a_hdr_size + a_data_size) {
+ SMEM_INFO(
+ "%s: id %u not enough memory %u (required %u)\n",
+ __func__, id, free_space,
+ a_hdr_size + a_data_size);
+ return ret;
+ }
+ alloc_hdr = (void *)(hdr) + hdr->offset_free_uncached;
+ alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
+ alloc_hdr->smem_type = id;
+ alloc_hdr->size = a_data_size;
+ alloc_hdr->padding_data = a_data_size - size_in;
+ alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
+ hdr->offset_free_uncached = hdr->offset_free_uncached +
+ a_hdr_size + a_data_size;
+ ret = alloc_hdr + 1;
+ }
+ /*
+ * wmb() is necessary to ensure the heap and allocation data is
+ * consistent before continuing to prevent race conditions with remote
+ * processors
+ */
+ wmb();
+
+ return ret;
+}
+
+/**
+ * smem_alloc - Find an existing item, otherwise allocate it with security
+ * support
+ *
+ * @id: ID of SMEM item
+ * @size_in: Size of the SMEM item
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item, NULL if it couldn't be found/allocated,
+ * or -EPROBE_DEFER if the driver is not ready
+ */
+void *smem_alloc(unsigned id, unsigned size_in, unsigned to_proc,
+ unsigned flags)
+{
+ unsigned long lflags;
+ void *ret = NULL;
+ int rc;
+ unsigned size_out;
+ unsigned a_size_in;
+
+ SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
+ flags);
+
+ if (!is_probe_done())
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (!smem_initialized_check())
+ return NULL;
+
+ if (id >= SMEM_NUM_ITEMS) {
+ SMEM_INFO("%s: invalid id %u\n", __func__, id);
+ return NULL;
+ }
+
+ if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
+ SMEM_INFO("%s: invalid to_proc %u for id %u\n", __func__,
+ to_proc, id);
+ return NULL;
+ }
+
+ if (unlikely(!spinlocks_initialized)) {
+ rc = init_smem_remote_spinlock();
+ if (unlikely(rc)) {
+ SMEM_INFO("%s: id:%u remote spinlock init failed %d\n",
+ __func__, id, rc);
+ return NULL;
+ }
+ }
+
+ a_size_in = ALIGN(size_in, 8);
+ remote_spin_lock_irqsave(&remote_spinlock, lflags);
+
+ ret = __smem_get_entry_secure(id, &size_out, to_proc, flags, true,
+ false);
+ if (ret) {
+ SMEM_INFO("%s: %u already allocated\n", __func__, id);
+ if (a_size_in == size_out) {
+ remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+ return ret;
+ } else {
+ remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+ SMEM_INFO("%s: id %u wrong size %u (expected %u)\n",
+ __func__, id, size_out, a_size_in);
+ return NULL;
+ }
+ }
+
+ if (id > SMEM_FIXED_ITEM_LAST) {
+ SMEM_INFO("%s: allocating %u size %u to_proc %u flags %u\n",
+ __func__, id, size_in, to_proc, flags);
+ if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset)
+ ret = alloc_item_nonsecure(id, a_size_in);
+ else
+ ret = alloc_item_secure(id, size_in, to_proc, flags);
+
+ } else {
+ SMEM_INFO("%s: attempted to allocate non-dynamic item %u\n",
+ __func__, id);
+ }
+
+ remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+ return ret;
+}
+EXPORT_SYMBOL(smem_alloc);
+
+/**
+ * smem_get_entry - Get existing item with security support
+ *
+ * @id: ID of SMEM item
+ * @size: Pointer to size variable for storing the result
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ * if the driver isn't ready
+ */
+void *smem_get_entry(unsigned id, unsigned *size, unsigned to_proc,
+ unsigned flags)
+{
+ SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, *size, to_proc, flags);
+
+ /*
+ * Handle the circular dependecy between SMEM and software implemented
+ * remote spinlocks. SMEM must initialize the remote spinlocks in
+ * probe() before it is done. EPROBE_DEFER handling will not resolve
+ * this code path, so we must be intellegent to know that the spinlock
+ * item is a special case.
+ */
+ if (!is_probe_done() && id != SMEM_SPINLOCK_ARRAY)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return __smem_get_entry_secure(id, size, to_proc, flags, false, true);
+}
+EXPORT_SYMBOL(smem_get_entry);
+
+/**
+ * smem_get_entry_no_rlock - Get existing item without using remote spinlock
+ *
+ * @id: ID of SMEM item
+ * @size_out: Pointer to size variable for storing the result
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ * if the driver isn't ready
+ *
+ * This function does not lock the remote spinlock and should only be used in
+ * failure-recover cases such as retrieving the subsystem failure reason during
+ * subsystem restart.
+ */
+void *smem_get_entry_no_rlock(unsigned id, unsigned *size_out, unsigned to_proc,
+ unsigned flags)
+{
+ if (!is_probe_done())
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return __smem_get_entry_secure(id, size_out, to_proc, flags, false,
+ false);
+}
+EXPORT_SYMBOL(smem_get_entry_no_rlock);
+
+/**
+ * smem_get_remote_spinlock - Remote spinlock pointer for unit testing.
+ *
+ * @returns: pointer to SMEM remote spinlock
+ */
+remote_spinlock_t *smem_get_remote_spinlock(void)
+{
+ if (unlikely(!spinlocks_initialized))
+ init_smem_remote_spinlock();
+ return &remote_spinlock;
+}
+EXPORT_SYMBOL(smem_get_remote_spinlock);
+
+/**
+ * smem_get_free_space() - Get the available allocation free space for a
+ * partition
+ *
+ * @to_proc: remote SMEM host. Determines the applicable partition
+ * @returns: size in bytes available to allocate
+ *
+ * Helper function for SMD so that SMD only scans the channel allocation
+ * table for a partition when it is reasonably certain that a channel has
+ * actually been created, because scanning can be expensive. Creating a channel
+ * will consume some of the free space in a partition, so SMD can compare the
+ * last free space size against the current free space size to determine if
+ * a channel may have been created. SMD can't do this directly, because the
+ * necessary partition internals are restricted to just SMEM.
+ */
+unsigned smem_get_free_space(unsigned to_proc)
+{
+ struct smem_partition_header *hdr;
+ struct smem_shared *shared;
+
+ if (to_proc >= NUM_SMEM_SUBSYSTEMS) {
+ pr_err("%s: invalid to_proc:%d\n", __func__, to_proc);
+ return UINT_MAX;
+ }
+
+ if (partitions[to_proc].offset) {
+ if (unlikely(OVERFLOW_ADD_UNSIGNED(uintptr_t,
+ (uintptr_t)smem_areas[0].virt_addr,
+ partitions[to_proc].offset))) {
+ pr_err("%s: unexpected overflow detected\n", __func__);
+ return UINT_MAX;
+ }
+ hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
+ return hdr->offset_free_cached - hdr->offset_free_uncached;
+ } else {
+ shared = smem_ram_base;
+ return shared->heap_info.heap_remaining;
+ }
+}
+EXPORT_SYMBOL(smem_get_free_space);
+
+/**
+ * smem_get_version() - Get the smem user version number
+ *
+ * @idx: SMEM user idx in SMEM_VERSION_INFO table.
+ * @returns: smem version number if success otherwise zero.
+ */
+unsigned smem_get_version(unsigned idx)
+{
+ int *version_array;
+
+ if (idx > 32) {
+ pr_err("%s: invalid idx:%d\n", __func__, idx);
+ return 0;
+ }
+
+ version_array = __smem_find(SMEM_VERSION_INFO, SMEM_VERSION_INFO_SIZE,
+ true);
+ if (version_array == NULL)
+ return 0;
+
+ return version_array[idx];
+}
+EXPORT_SYMBOL(smem_get_version);
+
+/**
+ * init_smem_remote_spinlock - Reentrant remote spinlock initialization
+ *
+ * @returns: success or error code for failure
+ */
+static int init_smem_remote_spinlock(void)
+{
+ int rc = 0;
+
+ /*
+ * Optimistic locking. Init only needs to be done once by the first
+ * caller. After that, serializing inits between different callers
+ * is unnecessary. The second check after the lock ensures init
+ * wasn't previously completed by someone else before the lock could
+ * be grabbed.
+ */
+ if (!spinlocks_initialized) {
+ mutex_lock(&spinlock_init_lock);
+ if (!spinlocks_initialized) {
+ rc = remote_spin_lock_init(&remote_spinlock,
+ SMEM_SPINLOCK_SMEM_ALLOC);
+ if (!rc)
+ spinlocks_initialized = 1;
+ }
+ mutex_unlock(&spinlock_init_lock);
+ }
+ return rc;
+}
+
+/**
+ * smem_initialized_check - Reentrant check that smem has been initialized
+ *
+ * @returns: true if initialized, false if not.
+ */
+bool smem_initialized_check(void)
+{
+ static int checked;
+ static int is_inited;
+ unsigned long flags;
+ struct smem_shared *smem;
+
+ if (likely(checked)) {
+ if (unlikely(!is_inited))
+ LOG_ERR("%s: smem not initialized\n", __func__);
+ return is_inited;
+ }
+
+ spin_lock_irqsave(&smem_init_check_lock, flags);
+ if (checked) {
+ spin_unlock_irqrestore(&smem_init_check_lock, flags);
+ if (unlikely(!is_inited))
+ LOG_ERR("%s: smem not initialized\n", __func__);
+ return is_inited;
+ }
+
+ smem = smem_ram_base;
+
+ if (smem->heap_info.initialized != 1)
+ goto failed;
+ if (smem->heap_info.reserved != 0)
+ goto failed;
+
+ /*
+ * The Modem SBL is now the Master SBL version and is required to
+ * pre-initialize SMEM and fill in any necessary configuration
+ * structures. Without the extra configuration data, the SMEM driver
+ * cannot be properly initialized.
+ */
+ if (smem_get_version(MODEM_SBL_VERSION_INDEX) != SMEM_VERSION << 16) {
+ pr_err("%s: SBL version not correct\n", __func__);
+ goto failed;
+ }
+
+ is_inited = 1;
+ checked = 1;
+ spin_unlock_irqrestore(&smem_init_check_lock, flags);
+ return is_inited;
+
+failed:
+ is_inited = 0;
+ checked = 1;
+ spin_unlock_irqrestore(&smem_init_check_lock, flags);
+ LOG_ERR(
+ "%s: shared memory needs to be initialized by SBL before booting\n",
+ __func__);
+ return is_inited;
+}
+EXPORT_SYMBOL(smem_initialized_check);
+
+static int restart_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data)
+{
+ if (code == SUBSYS_AFTER_SHUTDOWN) {
+ struct restart_notifier_block *notifier;
+
+ notifier = container_of(this,
+ struct restart_notifier_block, nb);
+ SMEM_INFO("%s: ssrestart for processor %d ('%s')\n",
+ __func__, notifier->processor,
+ notifier->name);
+
+ remote_spin_release(&remote_spinlock, notifier->processor);
+ remote_spin_release_all(notifier->processor);
+
+ if (smem_ramdump_dev) {
+ int ret;
+
+ SMEM_DBG("%s: saving ramdump\n", __func__);
+ /*
+ * XPU protection does not currently allow the
+ * auxiliary memory regions to be dumped. If this
+ * changes, then num_smem_areas + 1 should be passed
+ * into do_elf_ramdump() to dump all regions.
+ */
+ ret = do_elf_ramdump(smem_ramdump_dev,
+ smem_ramdump_segments, 1);
+ if (ret < 0)
+ LOG_ERR("%s: unable to dump smem %d\n",
+ __func__, ret);
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static __init int modem_restart_late_init(void)
+{
+ int i;
+ void *handle;
+ struct restart_notifier_block *nb;
+
+ smem_ramdump_dev = create_ramdump_device("smem", NULL);
+ if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
+ LOG_ERR("%s: Unable to create smem ramdump device.\n",
+ __func__);
+ smem_ramdump_dev = NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
+ nb = &restart_notifiers[i];
+ handle = subsys_notif_register_notifier(nb->name, &nb->nb);
+ SMEM_DBG("%s: registering notif for '%s', handle=%p\n",
+ __func__, nb->name, handle);
+ }
+
+ return 0;
+}
+late_initcall(modem_restart_late_init);
+
+int smem_module_init_notifier_register(struct notifier_block *nb)
+{
+ int ret;
+ if (!nb)
+ return -EINVAL;
+ mutex_lock(&smem_module_init_notifier_lock);
+ ret = raw_notifier_chain_register(&smem_module_init_notifier_list, nb);
+ if (smem_module_inited)
+ nb->notifier_call(nb, 0, NULL);
+ mutex_unlock(&smem_module_init_notifier_lock);
+ return ret;
+}
+EXPORT_SYMBOL(smem_module_init_notifier_register);
+
+int smem_module_init_notifier_unregister(struct notifier_block *nb)
+{
+ int ret;
+ if (!nb)
+ return -EINVAL;
+ mutex_lock(&smem_module_init_notifier_lock);
+ ret = raw_notifier_chain_unregister(&smem_module_init_notifier_list,
+ nb);
+ mutex_unlock(&smem_module_init_notifier_lock);
+ return ret;
+}
+EXPORT_SYMBOL(smem_module_init_notifier_unregister);
+
+static void smem_module_init_notify(uint32_t state, void *data)
+{
+ mutex_lock(&smem_module_init_notifier_lock);
+ smem_module_inited = 1;
+ raw_notifier_call_chain(&smem_module_init_notifier_list,
+ state, data);
+ mutex_unlock(&smem_module_init_notifier_lock);
+}
+
+/**
+ * smem_init_security_partition - Init local structures for a secured smem
+ * partition that has apps as one of the hosts
+ *
+ * @entry: Entry in the security TOC for the partition to init
+ * @num: Partition ID
+ *
+ * Initialize local data structures to point to a secured smem partition
+ * that is accessible by apps and another processor. Assumes that one of the
+ * listed hosts is apps. Verifiess that the partition is valid, otherwise will
+ * skip. Checks for memory corruption and will BUG() if detected. Assumes
+ * smem_areas is already initialized and that smem_areas[0] corresponds to the
+ * smem region with the secured partitions.
+ */
+static void smem_init_security_partition(struct smem_toc_entry *entry,
+ uint32_t num)
+{
+ uint16_t remote_host;
+ struct smem_partition_header *hdr;
+
+ if (!entry->offset) {
+ SMEM_INFO("Skipping smem partition %d - bad offset\n", num);
+ return;
+ }
+ if (!entry->size) {
+ SMEM_INFO("Skipping smem partition %d - bad size\n", num);
+ return;
+ }
+ if (!entry->size_cacheline) {
+ SMEM_INFO("Skipping smem partition %d - bad cacheline\n", num);
+ return;
+ }
+
+ if (entry->host0 == SMEM_APPS)
+ remote_host = entry->host1;
+ else
+ remote_host = entry->host0;
+
+ if (remote_host >= NUM_SMEM_SUBSYSTEMS) {
+ SMEM_INFO("Skipping smem partition %d - bad remote:%d\n", num,
+ remote_host);
+ return;
+ }
+ if (partitions[remote_host].offset) {
+ SMEM_INFO("Skipping smem partition %d - duplicate of %d\n", num,
+ partitions[remote_host].partition_num);
+ return;
+ }
+
+ hdr = smem_areas[0].virt_addr + entry->offset;
+
+ if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
+ LOG_ERR("Smem partition %d hdr magic is bad\n", num);
+ BUG();
+ }
+ if (!hdr->size) {
+ LOG_ERR("Smem partition %d size is 0\n", num);
+ BUG();
+ }
+ if (hdr->offset_free_uncached > hdr->size) {
+ LOG_ERR("Smem partition %d uncached heap exceeds size\n", num);
+ BUG();
+ }
+ if (hdr->offset_free_cached > hdr->size) {
+ LOG_ERR("Smem partition %d cached heap exceeds size\n", num);
+ BUG();
+ }
+ if (hdr->host0 != SMEM_APPS && hdr->host1 != SMEM_APPS) {
+ LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
+ BUG();
+ }
+ if (hdr->host0 != remote_host && hdr->host1 != remote_host) {
+ LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
+ BUG();
+ }
+
+ partitions[remote_host].partition_num = num;
+ partitions[remote_host].offset = entry->offset;
+ partitions[remote_host].size_cacheline = entry->size_cacheline;
+ SMEM_INFO("Partition %d offset:%x remote:%d\n", num, entry->offset,
+ remote_host);
+}
+
+/**
+ * smem_init_security - Init local support for secured smem
+ *
+ * Looks for a valid security TOC, and if one is found, parses it looking for
+ * partitions that apps can access. If any such partitions are found, do the
+ * required local initialization to support them. Assumes smem_areas is inited
+ * and smem_area[0] corresponds to the smem region with the TOC.
+ */
+static void smem_init_security(void)
+{
+ struct smem_toc *toc;
+ uint32_t i;
+
+ SMEM_DBG("%s\n", __func__);
+
+ toc = smem_areas[0].virt_addr + smem_areas[0].size - 4 * 1024;
+
+ if (toc->identifier != SMEM_TOC_IDENTIFIER) {
+ LOG_ERR("%s failed: invalid TOC magic\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < toc->num_entries; ++i) {
+ SMEM_DBG("Partition %d host0:%d host1:%d\n", i,
+ toc->entry[i].host0,
+ toc->entry[i].host1);
+
+ if (toc->entry[i].host0 == SMEM_APPS ||
+ toc->entry[i].host1 == SMEM_APPS)
+ smem_init_security_partition(&toc->entry[i], i);
+ }
+
+ SMEM_DBG("%s done\n", __func__);
+}
+
+/**
+ * smem_init_target_info - Init smem target information
+ *
+ * @info_addr : smem target info physical address.
+ * @size : size of the smem target info structure.
+ *
+ * This function is used to initialize the smem_targ_info structure and checks
+ * for valid identifier, if identifier is valid initialize smem variables.
+ */
+static int smem_init_target_info(phys_addr_t info_addr, resource_size_t size)
+{
+ struct smem_targ_info_type *smem_targ_info;
+ void *smem_targ_info_addr;
+
+ pr_err("%s: PA=0x%llx, size=0x%llx\n", __func__, (u64)info_addr, (u64)size);
+
+ smem_targ_info_addr = ioremap_nocache(info_addr, size);
+ if (!smem_targ_info_addr) {
+ LOG_ERR("%s: failed ioremap_nocache() of addr:%p size:%p\n",
+ __func__, (void *)info_addr, (void *)size);
+ return -ENODEV;
+ }
+ smem_targ_info =
+ (struct smem_targ_info_type __iomem *)smem_targ_info_addr;
+
+ if (smem_targ_info->identifier != SMEM_TARG_INFO_IDENTIFIER) {
+ LOG_ERR("%s failed: invalid TARGET INFO magic\n", __func__);
+ return -ENODEV;
+ }
+ smem_ram_phys = smem_targ_info->phys_base_addr;
+ smem_ram_size = smem_targ_info->size;
+ iounmap(smem_targ_info_addr);
+ return 0;
+}
+
+static int msm_smem_probe(struct platform_device *pdev)
+{
+ char *key;
+ struct resource *r;
+ phys_addr_t aux_mem_base;
+ resource_size_t aux_mem_size;
+ int temp_string_size = 11; /* max 3 digit count */
+ char temp_string[temp_string_size];
+ int ret;
+ struct ramdump_segment *ramdump_segments_tmp = NULL;
+ struct smem_area *smem_areas_tmp = NULL;
+ int smem_idx = 0;
+ bool security_enabled;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "smem_targ_info_imem");
+ if (r) {
+ pr_err("imem\n");
+ if (smem_init_target_info(r->start, resource_size(r)))
+ goto smem_targ_info_legacy;
+ goto smem_targ_info_done;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "smem_targ_info_reg");
+ if (r) {
+ void *reg_base_addr;
+ uint64_t base_addr;
+ reg_base_addr = ioremap_nocache(r->start, resource_size(r));
+ if (!reg_base_addr) {
+ pr_err("failed to remap smem regs at 0x%llX size 0x%llX\n", (u64)r->start, (u64)resource_size(r));
+ return -ENODEV;
+ }
+ base_addr = (uint32_t)readl_relaxed(reg_base_addr);
+ base_addr |=
+ ((uint64_t)readl_relaxed(reg_base_addr + 0x4) << 32);
+ iounmap(reg_base_addr);
+ if ((base_addr == 0) || ((base_addr >> 32) != 0)) {
+ SMEM_INFO("%s: Invalid SMEM address\n", __func__);
+ goto smem_targ_info_legacy;
+ }
+ if (smem_init_target_info(base_addr,
+ sizeof(struct smem_targ_info_type)))
+ goto smem_targ_info_legacy;
+ goto smem_targ_info_done;
+ }
+
+smem_targ_info_legacy:
+ SMEM_INFO("%s: reading dt-specified SMEM address\n", __func__);
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smem");
+ if (r) {
+ smem_ram_size = resource_size(r);
+ smem_ram_phys = r->start;
+ }
+
+smem_targ_info_done:
+ if (!smem_ram_phys || !smem_ram_size) {
+ LOG_ERR("%s: Missing SMEM TARGET INFO\n", __func__);
+ return -ENODEV;
+ }
+
+ smem_ram_base = ioremap_nocache(smem_ram_phys, smem_ram_size);
+
+ if (!smem_ram_base) {
+ LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
+ __func__,
+ &smem_ram_phys, &smem_ram_size);
+ return -ENODEV;
+ }
+
+ if (!smem_initialized_check())
+ return -ENODEV;
+
+ /*
+ * The software implementation requires smem_find(), which needs
+ * smem_ram_base to be intitialized. The remote spinlock item is
+ * guarenteed to be allocated by the bootloader, so this is the
+ * safest and earliest place to init the spinlock.
+ */
+ ret = init_smem_remote_spinlock();
+ if (ret) {
+ LOG_ERR("%s: remote spinlock init failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ key = "irq-reg-base";
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+ if (!r) {
+ LOG_ERR("%s: missing '%s'\n", __func__, key);
+ return -ENODEV;
+ }
+
+ num_smem_areas = 1;
+ while (1) {
+ scnprintf(temp_string, temp_string_size, "aux-mem%d",
+ num_smem_areas);
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ temp_string);
+ if (!r)
+ break;
+
+ ++num_smem_areas;
+ if (num_smem_areas > 999) {
+ LOG_ERR("%s: max num aux mem regions reached\n",
+ __func__);
+ break;
+ }
+ }
+ /* Initialize main SMEM region and SSR ramdump region */
+ smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
+ GFP_KERNEL);
+ if (!smem_areas_tmp) {
+ LOG_ERR("%s: smem areas kmalloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto free_smem_areas;
+ }
+
+ ramdump_segments_tmp = kmalloc_array(num_smem_areas,
+ sizeof(struct ramdump_segment), GFP_KERNEL);
+ if (!ramdump_segments_tmp) {
+ LOG_ERR("%s: ramdump segment kmalloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto free_smem_areas;
+ }
+ smem_areas_tmp[smem_idx].phys_addr = smem_ram_phys;
+ smem_areas_tmp[smem_idx].size = smem_ram_size;
+ smem_areas_tmp[smem_idx].virt_addr = smem_ram_base;
+
+ ramdump_segments_tmp[smem_idx].address = smem_ram_phys;
+ ramdump_segments_tmp[smem_idx].size = smem_ram_size;
+ ++smem_idx;
+
+ /* Configure auxiliary SMEM regions */
+ while (1) {
+ scnprintf(temp_string, temp_string_size, "aux-mem%d",
+ smem_idx);
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ temp_string);
+ if (!r)
+ break;
+ aux_mem_base = r->start;
+ aux_mem_size = resource_size(r);
+
+ ramdump_segments_tmp[smem_idx].address = aux_mem_base;
+ ramdump_segments_tmp[smem_idx].size = aux_mem_size;
+
+ smem_areas_tmp[smem_idx].phys_addr = aux_mem_base;
+ smem_areas_tmp[smem_idx].size = aux_mem_size;
+ smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
+ (unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
+ smem_areas_tmp[smem_idx].size);
+ SMEM_DBG("%s: %s = %pa %pa -> %p", __func__, temp_string,
+ &aux_mem_base, &aux_mem_size,
+ smem_areas_tmp[smem_idx].virt_addr);
+
+ if (!smem_areas_tmp[smem_idx].virt_addr) {
+ LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
+ __func__,
+ &smem_areas_tmp[smem_idx].phys_addr,
+ &smem_areas_tmp[smem_idx].size);
+ ret = -ENOMEM;
+ goto free_smem_areas;
+ }
+
+ if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+ (uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
+ smem_areas_tmp[smem_idx].size)) {
+ LOG_ERR(
+ "%s: invalid virtual address block %i: %p:%pa\n",
+ __func__, smem_idx,
+ smem_areas_tmp[smem_idx].virt_addr,
+ &smem_areas_tmp[smem_idx].size);
+ ++smem_idx;
+ ret = -EINVAL;
+ goto free_smem_areas;
+ }
+
+ ++smem_idx;
+ if (smem_idx > 999) {
+ LOG_ERR("%s: max num aux mem regions reached\n",
+ __func__);
+ break;
+ }
+ }
+
+ smem_areas = smem_areas_tmp;
+ smem_ramdump_segments = ramdump_segments_tmp;
+
+ key = "qcom,mpu-enabled";
+ security_enabled = of_property_read_bool(pdev->dev.of_node, key);
+ if (security_enabled) {
+ SMEM_INFO("smem security enabled\n");
+ smem_init_security();
+ }
+
+ probe_done = true;
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret)
+ LOG_ERR("%s: of_platform_populate failed %d\n", __func__, ret);
+
+ return 0;
+
+free_smem_areas:
+ for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
+ iounmap(smem_areas_tmp[smem_idx].virt_addr);
+
+ num_smem_areas = 0;
+ kfree(ramdump_segments_tmp);
+ kfree(smem_areas_tmp);
+ return ret;
+}
+
+static struct of_device_id msm_smem_match_table[] = {
+ { .compatible = "qcom,smem" },
+ {},
+};
+
+static struct platform_driver msm_smem_driver = {
+ .probe = msm_smem_probe,
+ .driver = {
+ .name = "msm_smem",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_smem_match_table,
+ },
+};
+
+int __init msm_smem_init(void)
+{
+ static bool registered;
+ int rc;
+
+ if (registered)
+ return 0;
+
+ registered = true;
+
+ smem_ipc_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smem");
+ if (!smem_ipc_log_ctx) {
+ pr_err("%s: unable to create logging context\n", __func__);
+ msm_smem_debug_mask = 0;
+ }
+
+ rc = platform_driver_register(&msm_smem_driver);
+ if (rc) {
+ LOG_ERR("%s: msm_smem_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ smem_module_init_notify(0, NULL);
+
+ pr_err("smem inited\n");
+
+ return 0;
+}
+
+arch_initcall(msm_smem_init);
diff --git a/drivers/soc/qcom/smem_debug.c b/drivers/soc/qcom/smem_debug.c
new file mode 100644
index 000000000000..ace89afb614c
--- /dev/null
+++ b/drivers/soc/qcom/smem_debug.c
@@ -0,0 +1,139 @@
+/* arch/arm/mach-msm/smem_debug.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+
+#include <soc/qcom/smem.h>
+
+#include "smem_private.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define SZ_SMEM_ALLOCATION_TABLE 8192
+
+static void debug_read_mem(struct seq_file *s)
+{
+ unsigned n;
+ struct smem_heap_info *heap_info;
+ struct smem_heap_entry *toc;
+
+ heap_info = smem_find(SMEM_HEAP_INFO, sizeof(struct smem_heap_info),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+ if (!heap_info) {
+ seq_puts(s, "SMEM_HEAP_INFO is NULL\n");
+ return;
+ }
+ toc = smem_find(SMEM_ALLOCATION_TABLE, SZ_SMEM_ALLOCATION_TABLE,
+ 0, SMEM_ANY_HOST_FLAG);
+ if (!toc) {
+ seq_puts(s, "SMEM_ALLOCATION_TABLE is NULL\n");
+ return;
+ }
+
+ seq_printf(s, "heap: init=%d free=%d remain=%d\n",
+ heap_info->initialized,
+ heap_info->free_offset,
+ heap_info->heap_remaining);
+
+ for (n = 0; n < SMEM_NUM_ITEMS; n++) {
+ if (toc[n].allocated == 0)
+ continue;
+ seq_printf(s, "%04d: offset %08x size %08x\n",
+ n, toc[n].offset, toc[n].size);
+ }
+}
+
+static void debug_read_smem_version(struct seq_file *s)
+{
+ uint32_t n, version;
+
+ for (n = 0; n < 32; n++) {
+ version = smem_get_version(n);
+ seq_printf(s, "entry %d: smem = %d proc_comm = %d\n", n,
+ version >> 16,
+ version & 0xffff);
+ }
+}
+
+static void debug_read_build_id(struct seq_file *s)
+{
+ unsigned size;
+ void *data;
+
+ data = smem_get_entry(SMEM_HW_SW_BUILD_ID, &size, 0,
+ SMEM_ANY_HOST_FLAG);
+ if (!data)
+ return;
+
+ seq_write(s, data, size);
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+ void (*show)(struct seq_file *) = s->private;
+
+ show(s);
+
+ return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+ .open = debug_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void debug_create(const char *name, umode_t mode,
+ struct dentry *dent,
+ void (*show)(struct seq_file *))
+{
+ struct dentry *file;
+
+ file = debugfs_create_file(name, mode, dent, show, &debug_ops);
+ if (!file)
+ pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+static int __init smem_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smem", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ debug_create("mem", 0444, dent, debug_read_mem);
+ debug_create("version", 0444, dent, debug_read_smem_version);
+
+ /* NNV: this is google only stuff */
+ debug_create("build", 0444, dent, debug_read_build_id);
+
+ return 0;
+}
+
+late_initcall(smem_debugfs_init);
+#endif
diff --git a/drivers/soc/qcom/smem_log.c b/drivers/soc/qcom/smem_log.c
new file mode 100644
index 000000000000..16265b9e8d9a
--- /dev/null
+++ b/drivers/soc/qcom/smem_log.c
@@ -0,0 +1,1043 @@
+/* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Shared memory logging implementation.
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/remote_spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+
+#include <soc/qcom/smem.h>
+#include <soc/qcom/smem_log.h>
+
+#include <asm/arch_timer.h>
+
+#include "smem_private.h"
+
+#define DEBUG
+#undef DEBUG
+
+#ifdef DEBUG
+#define D_DUMP_BUFFER(prestr, cnt, buf) \
+do { \
+ int i; \
+ printk(KERN_ERR "%s", prestr); \
+ for (i = 0; i < cnt; i++) \
+ printk(KERN_ERR "%.2x", buf[i]); \
+ printk(KERN_ERR "\n"); \
+} while (0)
+#else
+#define D_DUMP_BUFFER(prestr, cnt, buf)
+#endif
+
+#ifdef DEBUG
+#define D(x...) printk(x)
+#else
+#define D(x...) do {} while (0)
+#endif
+
+struct smem_log_item {
+ uint32_t identifier;
+ uint32_t timetick;
+ uint32_t data1;
+ uint32_t data2;
+ uint32_t data3;
+};
+
+#define SMEM_LOG_NUM_ENTRIES 2000
+#define SMEM_LOG_EVENTS_SIZE (sizeof(struct smem_log_item) * \
+ SMEM_LOG_NUM_ENTRIES)
+
+#define SMEM_SPINLOCK_SMEM_LOG "S:2"
+
+static remote_spinlock_t remote_spinlock;
+static uint32_t smem_log_enable;
+static int smem_log_initialized;
+
+module_param_named(log_enable, smem_log_enable, int,
+ S_IRUGO | S_IWUSR | S_IWGRP);
+
+
+struct smem_log_inst {
+ int which_log;
+ struct smem_log_item __iomem *events;
+ uint32_t __iomem *idx;
+ uint32_t num;
+ uint32_t read_idx;
+ uint32_t last_read_avail;
+ wait_queue_head_t read_wait;
+ remote_spinlock_t *remote_spinlock;
+};
+
+enum smem_logs {
+ GEN = 0,
+ NUM
+};
+
+static struct smem_log_inst inst[NUM];
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define HSIZE 13
+
+struct sym {
+ uint32_t val;
+ char *str;
+ struct hlist_node node;
+};
+
+struct sym id_syms[] = {
+ { SMEM_LOG_PROC_ID_MODEM, "MODM" },
+ { SMEM_LOG_PROC_ID_Q6, "QDSP" },
+ { SMEM_LOG_PROC_ID_APPS, "APPS" },
+ { SMEM_LOG_PROC_ID_WCNSS, "WCNSS" },
+};
+
+struct sym base_syms[] = {
+ { SMEM_LOG_SMEM_EVENT_BASE, "SMEM" },
+ { SMEM_LOG_ERROR_EVENT_BASE, "ERROR" },
+ { SMEM_LOG_QMI_CCI_EVENT_BASE, "QCCI" },
+ { SMEM_LOG_QMI_CSI_EVENT_BASE, "QCSI" },
+};
+
+struct sym event_syms[] = {
+ { ERR_ERROR_FATAL, "ERR_ERROR_FATAL" },
+ { ERR_ERROR_FATAL_TASK, "ERR_ERROR_FATAL_TASK" },
+ { SMEM_LOG_EVENT_CB, "CB" },
+ { SMEM_LOG_EVENT_START, "START" },
+ { SMEM_LOG_EVENT_INIT, "INIT" },
+ { SMEM_LOG_EVENT_RUNNING, "RUNNING" },
+ { SMEM_LOG_EVENT_STOP, "STOP" },
+ { SMEM_LOG_EVENT_RESTART, "RESTART" },
+ { SMEM_LOG_EVENT_SS, "SS" },
+ { SMEM_LOG_EVENT_READ, "READ" },
+ { SMEM_LOG_EVENT_WRITE, "WRITE" },
+ { SMEM_LOG_EVENT_SIGS1, "SIGS1" },
+ { SMEM_LOG_EVENT_SIGS2, "SIGS2" },
+ { SMEM_LOG_EVENT_WRITE_DM, "WRITE_DM" },
+ { SMEM_LOG_EVENT_READ_DM, "READ_DM" },
+ { SMEM_LOG_EVENT_SKIP_DM, "SKIP_DM" },
+ { SMEM_LOG_EVENT_STOP_DM, "STOP_DM" },
+ { SMEM_LOG_EVENT_ISR, "ISR" },
+ { SMEM_LOG_EVENT_TASK, "TASK" },
+ { SMEM_LOG_EVENT_RS, "RS" },
+};
+
+struct sym smsm_syms[] = {
+ { 0x80000000, "UN" },
+ { 0x7F000000, "ERR" },
+ { 0x00800000, "SMLP" },
+ { 0x00400000, "ADWN" },
+ { 0x00200000, "PWRS" },
+ { 0x00100000, "DWLD" },
+ { 0x00080000, "SRBT" },
+ { 0x00040000, "SDWN" },
+ { 0x00020000, "ARBT" },
+ { 0x00010000, "REL" },
+ { 0x00008000, "SLE" },
+ { 0x00004000, "SLP" },
+ { 0x00002000, "WFPI" },
+ { 0x00001000, "EEX" },
+ { 0x00000800, "TIN" },
+ { 0x00000400, "TWT" },
+ { 0x00000200, "PWRC" },
+ { 0x00000100, "RUN" },
+ { 0x00000080, "SA" },
+ { 0x00000040, "RES" },
+ { 0x00000020, "RIN" },
+ { 0x00000010, "RWT" },
+ { 0x00000008, "SIN" },
+ { 0x00000004, "SWT" },
+ { 0x00000002, "OE" },
+ { 0x00000001, "I" },
+};
+
+struct sym smsm_entry_type_syms[] = {
+ { 0, "SMSM_APPS_STATE" },
+ { 1, "SMSM_MODEM_STATE" },
+ { 2, "SMSM_Q6_STATE" },
+ { 3, "SMSM_APPS_DEM" },
+ { 4, "SMSM_MODEM_DEM" },
+ { 5, "SMSM_Q6_DEM" },
+ { 6, "SMSM_POWER_MASTER_DEM" },
+ { 7, "SMSM_TIME_MASTER_DEM" },
+};
+
+struct sym smsm_state_syms[] = {
+ { 0x00000001, "INIT" },
+ { 0x00000002, "OSENTERED" },
+ { 0x00000004, "SMDWAIT" },
+ { 0x00000008, "SMDINIT" },
+ { 0x00000010, "RPCWAIT" },
+ { 0x00000020, "RPCINIT" },
+ { 0x00000040, "RESET" },
+ { 0x00000080, "RSA" },
+ { 0x00000100, "RUN" },
+ { 0x00000200, "PWRC" },
+ { 0x00000400, "TIMEWAIT" },
+ { 0x00000800, "TIMEINIT" },
+ { 0x00001000, "PWRC_EARLY_EXIT" },
+ { 0x00002000, "WFPI" },
+ { 0x00004000, "SLEEP" },
+ { 0x00008000, "SLEEPEXIT" },
+ { 0x00010000, "OEMSBL_RELEASE" },
+ { 0x00020000, "APPS_REBOOT" },
+ { 0x00040000, "SYSTEM_POWER_DOWN" },
+ { 0x00080000, "SYSTEM_REBOOT" },
+ { 0x00100000, "SYSTEM_DOWNLOAD" },
+ { 0x00200000, "PWRC_SUSPEND" },
+ { 0x00400000, "APPS_SHUTDOWN" },
+ { 0x00800000, "SMD_LOOPBACK" },
+ { 0x01000000, "RUN_QUIET" },
+ { 0x02000000, "MODEM_WAIT" },
+ { 0x04000000, "MODEM_BREAK" },
+ { 0x08000000, "MODEM_CONTINUE" },
+ { 0x80000000, "UNKNOWN" },
+};
+
+enum sym_tables {
+ ID_SYM,
+ BASE_SYM,
+ EVENT_SYM,
+ SMSM_SYM,
+ SMSM_ENTRY_TYPE_SYM,
+ SMSM_STATE_SYM,
+};
+
+static struct sym_tbl {
+ struct sym *data;
+ int size;
+ struct hlist_head hlist[HSIZE];
+} tbl[] = {
+ { id_syms, ARRAY_SIZE(id_syms) },
+ { base_syms, ARRAY_SIZE(base_syms) },
+ { event_syms, ARRAY_SIZE(event_syms) },
+ { smsm_syms, ARRAY_SIZE(smsm_syms) },
+ { smsm_entry_type_syms, ARRAY_SIZE(smsm_entry_type_syms) },
+ { smsm_state_syms, ARRAY_SIZE(smsm_state_syms) },
+};
+
+union fifo_mem {
+ uint64_t u64;
+ uint8_t u8;
+};
+
+/**
+ * memcpy_to_log() - copy to SMEM log FIFO
+ * @dest: Destination address
+ * @src: Source address
+ * @num_bytes: Number of bytes to copy
+ * @from_user: true if data being copied is from userspace, false otherwise
+ *
+ * @return: Address of destination
+ *
+ * This function copies num_bytes from src to dest maintaining natural alignment
+ * for accesses to dest as required for Device memory.
+ */
+static void *memcpy_to_log(void *dest, const void *src, size_t num_bytes,
+ bool from_user)
+{
+ union fifo_mem *temp_dst = (union fifo_mem *)dest;
+ union fifo_mem *temp_src = (union fifo_mem *)src;
+ uintptr_t mask = sizeof(union fifo_mem) - 1;
+ int ret;
+
+ /* Do byte copies until we hit 8-byte (double word) alignment */
+ while ((uintptr_t)temp_dst & mask && num_bytes) {
+ if (from_user) {
+ ret = copy_from_user(temp_dst, temp_src, 1);
+ BUG_ON(ret != 0);
+ } else {
+ __raw_writeb(temp_src->u8, temp_dst);
+ }
+
+ temp_src = (union fifo_mem *)((uintptr_t)temp_src + 1);
+ temp_dst = (union fifo_mem *)((uintptr_t)temp_dst + 1);
+ num_bytes--;
+ }
+
+ /* Do double word copies */
+ while (num_bytes >= sizeof(union fifo_mem)) {
+ if (from_user) {
+ ret = copy_from_user(temp_dst, temp_src,
+ sizeof(union fifo_mem));
+ BUG_ON(ret != 0);
+ } else {
+ __raw_writeq(temp_src->u64, temp_dst);
+ }
+
+ temp_dst++;
+ temp_src++;
+ num_bytes -= sizeof(union fifo_mem);
+ }
+
+ /* Copy remaining bytes */
+ while (num_bytes--) {
+ if (from_user) {
+ ret = copy_from_user(temp_dst, temp_src, 1);
+ BUG_ON(ret != 0);
+ } else {
+ __raw_writeb(temp_src->u8, temp_dst);
+ }
+
+ temp_src = (union fifo_mem *)((uintptr_t)temp_src + 1);
+ temp_dst = (union fifo_mem *)((uintptr_t)temp_dst + 1);
+ }
+
+ return dest;
+}
+
+#define hash(val) (val % HSIZE)
+
+static void init_syms(void)
+{
+ int i;
+ int j;
+
+ for (i = 0; i < ARRAY_SIZE(tbl); ++i)
+ for (j = 0; j < HSIZE; ++j)
+ INIT_HLIST_HEAD(&tbl[i].hlist[j]);
+
+ for (i = 0; i < ARRAY_SIZE(tbl); ++i)
+ for (j = 0; j < tbl[i].size; ++j) {
+ INIT_HLIST_NODE(&tbl[i].data[j].node);
+ hlist_add_head(&tbl[i].data[j].node,
+ &tbl[i].hlist[hash(tbl[i].data[j].val)]);
+ }
+}
+
+static char *find_sym(uint32_t id, uint32_t val)
+{
+ struct hlist_node *n;
+ struct sym *s;
+
+ hlist_for_each(n, &tbl[id].hlist[hash(val)]) {
+ s = hlist_entry(n, struct sym, node);
+ if (s->val == val)
+ return s->str;
+ }
+
+ return 0;
+}
+
+#else
+static void init_syms(void) {}
+#endif
+
+static inline unsigned int read_timestamp(void)
+{
+ return 0;
+ //return (unsigned int)(arch_counter_get_cntpct());
+}
+
+static void smem_log_event_from_user(struct smem_log_inst *inst,
+ const char __user *buf, int size, int num)
+{
+ uint32_t idx;
+ uint32_t next_idx;
+ unsigned long flags;
+ uint32_t identifier = 0;
+ uint32_t timetick = 0;
+ int first = 1;
+
+ if (!inst->idx) {
+ pr_err("%s: invalid write index\n", __func__);
+ return;
+ }
+
+ remote_spin_lock_irqsave(inst->remote_spinlock, flags);
+
+ while (num--) {
+ idx = *inst->idx;
+
+ if (idx < inst->num) {
+ memcpy_to_log(&inst->events[idx], buf, size, true);
+
+ if (first) {
+ identifier =
+ inst->events[idx].
+ identifier;
+ timetick = read_timestamp();
+ first = 0;
+ } else {
+ identifier |= SMEM_LOG_CONT;
+ }
+ inst->events[idx].identifier =
+ identifier;
+ inst->events[idx].timetick =
+ timetick;
+ }
+
+ next_idx = idx + 1;
+ if (next_idx >= inst->num)
+ next_idx = 0;
+ *inst->idx = next_idx;
+ buf += sizeof(struct smem_log_item);
+ }
+
+ wmb();
+ remote_spin_unlock_irqrestore(inst->remote_spinlock, flags);
+}
+
+static void _smem_log_event(
+ struct smem_log_item __iomem *events,
+ uint32_t __iomem *_idx,
+ remote_spinlock_t *lock,
+ int num,
+ uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3)
+{
+ struct smem_log_item item;
+ uint32_t idx;
+ uint32_t next_idx;
+ unsigned long flags;
+
+ item.timetick = read_timestamp();
+ item.identifier = id;
+ item.data1 = data1;
+ item.data2 = data2;
+ item.data3 = data3;
+
+ remote_spin_lock_irqsave(lock, flags);
+
+ idx = *_idx;
+
+ if (idx < num)
+ memcpy_to_log(&events[idx], &item, sizeof(item), false);
+
+ next_idx = idx + 1;
+ if (next_idx >= num)
+ next_idx = 0;
+ *_idx = next_idx;
+ wmb();
+
+ remote_spin_unlock_irqrestore(lock, flags);
+}
+
+static void _smem_log_event6(
+ struct smem_log_item __iomem *events,
+ uint32_t __iomem *_idx,
+ remote_spinlock_t *lock,
+ int num,
+ uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3, uint32_t data4, uint32_t data5,
+ uint32_t data6)
+{
+ struct smem_log_item item[2];
+ uint32_t idx;
+ uint32_t next_idx;
+ unsigned long flags;
+
+ item[0].timetick = read_timestamp();
+ item[0].identifier = id;
+ item[0].data1 = data1;
+ item[0].data2 = data2;
+ item[0].data3 = data3;
+ item[1].identifier = item[0].identifier;
+ item[1].timetick = item[0].timetick;
+ item[1].data1 = data4;
+ item[1].data2 = data5;
+ item[1].data3 = data6;
+
+ remote_spin_lock_irqsave(lock, flags);
+
+ idx = *_idx;
+
+ /* FIXME: Wrap around */
+ if (idx < (num-1))
+ memcpy_to_log(&events[idx], &item, sizeof(item), false);
+
+ next_idx = idx + 2;
+ if (next_idx >= num)
+ next_idx = 0;
+ *_idx = next_idx;
+
+ wmb();
+ remote_spin_unlock_irqrestore(lock, flags);
+}
+
+void smem_log_event(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3)
+{
+ if (smem_log_enable)
+ _smem_log_event(inst[GEN].events, inst[GEN].idx,
+ inst[GEN].remote_spinlock,
+ SMEM_LOG_NUM_ENTRIES, id,
+ data1, data2, data3);
+}
+
+void smem_log_event6(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3, uint32_t data4, uint32_t data5,
+ uint32_t data6)
+{
+ if (smem_log_enable)
+ _smem_log_event6(inst[GEN].events, inst[GEN].idx,
+ inst[GEN].remote_spinlock,
+ SMEM_LOG_NUM_ENTRIES, id,
+ data1, data2, data3, data4, data5, data6);
+}
+
+static int _smem_log_init(void)
+{
+ int ret;
+
+ inst[GEN].which_log = GEN;
+ inst[GEN].events =
+ (struct smem_log_item *)smem_alloc(SMEM_SMEM_LOG_EVENTS,
+ SMEM_LOG_EVENTS_SIZE,
+ 0,
+ SMEM_ANY_HOST_FLAG);
+ inst[GEN].idx = (uint32_t *)smem_alloc(SMEM_SMEM_LOG_IDX,
+ sizeof(uint32_t),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+ if (IS_ERR_OR_NULL(inst[GEN].events) || IS_ERR_OR_NULL(inst[GEN].idx)) {
+ pr_err("%s: no log or log_idx allocated\n", __func__);
+ return -ENODEV;
+ }
+
+ inst[GEN].num = SMEM_LOG_NUM_ENTRIES;
+ inst[GEN].read_idx = 0;
+ inst[GEN].last_read_avail = SMEM_LOG_NUM_ENTRIES;
+ init_waitqueue_head(&inst[GEN].read_wait);
+ inst[GEN].remote_spinlock = &remote_spinlock;
+
+ ret = remote_spin_lock_init(&remote_spinlock,
+ SMEM_SPINLOCK_SMEM_LOG);
+ if (ret) {
+ mb();
+ return ret;
+ }
+
+ init_syms();
+ mb();
+
+ return 0;
+}
+
+static ssize_t smem_log_write_bin(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ if (count < sizeof(struct smem_log_item))
+ return -EINVAL;
+
+ if (smem_log_enable)
+ smem_log_event_from_user(fp->private_data, buf,
+ sizeof(struct smem_log_item),
+ count / sizeof(struct smem_log_item));
+ return count;
+}
+
+static int smem_log_open(struct inode *ip, struct file *fp)
+{
+ fp->private_data = &inst[GEN];
+
+ return 0;
+}
+
+static int smem_log_release(struct inode *ip, struct file *fp)
+{
+ return 0;
+}
+
+static const struct file_operations smem_log_bin_fops = {
+ .owner = THIS_MODULE,
+ .write = smem_log_write_bin,
+ .open = smem_log_open,
+ .release = smem_log_release,
+};
+
+static struct miscdevice smem_log_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "smem_log",
+ .fops = &smem_log_bin_fops,
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define SMEM_LOG_ITEM_PRINT_SIZE 160
+
+#define EVENTS_PRINT_SIZE \
+(SMEM_LOG_ITEM_PRINT_SIZE * SMEM_LOG_NUM_ENTRIES)
+
+static uint32_t smem_log_timeout_ms;
+module_param_named(timeout_ms, smem_log_timeout_ms,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static int smem_log_debug_mask;
+module_param_named(debug_mask, smem_log_debug_mask, int,
+ S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DBG(x...) do {\
+ if (smem_log_debug_mask) \
+ printk(KERN_DEBUG x);\
+ } while (0)
+
+static int update_read_avail(struct smem_log_inst *inst)
+{
+ int curr_read_avail;
+ unsigned long flags = 0;
+
+ if (!inst->idx)
+ return 0;
+
+ remote_spin_lock_irqsave(inst->remote_spinlock, flags);
+ curr_read_avail = (*inst->idx - inst->read_idx);
+ if (curr_read_avail < 0)
+ curr_read_avail = inst->num - inst->read_idx + *inst->idx;
+
+ DBG("%s: read = %d write = %d curr = %d last = %d\n", __func__,
+ inst->read_idx, *inst->idx, curr_read_avail, inst->last_read_avail);
+
+ if (curr_read_avail < inst->last_read_avail) {
+ if (inst->last_read_avail != inst->num)
+ pr_info("smem_log: skipping %d log entries\n",
+ inst->last_read_avail);
+ inst->read_idx = *inst->idx + 1;
+ inst->last_read_avail = inst->num - 1;
+ } else
+ inst->last_read_avail = curr_read_avail;
+
+ remote_spin_unlock_irqrestore(inst->remote_spinlock, flags);
+
+ DBG("%s: read = %d write = %d curr = %d last = %d\n", __func__,
+ inst->read_idx, *inst->idx, curr_read_avail, inst->last_read_avail);
+
+ return inst->last_read_avail;
+}
+
+static int _debug_dump(int log, char *buf, int max, uint32_t cont)
+{
+ unsigned int idx;
+ int write_idx, read_avail = 0;
+ unsigned long flags;
+ int i = 0;
+
+ if (!inst[log].events)
+ return 0;
+
+ if (cont && update_read_avail(&inst[log]) == 0)
+ return 0;
+
+ remote_spin_lock_irqsave(inst[log].remote_spinlock, flags);
+
+ if (cont) {
+ idx = inst[log].read_idx;
+ write_idx = (inst[log].read_idx + inst[log].last_read_avail);
+ if (write_idx >= inst[log].num)
+ write_idx -= inst[log].num;
+ } else {
+ write_idx = *inst[log].idx;
+ idx = (write_idx + 1);
+ }
+
+ DBG("%s: read %d write %d idx %d num %d\n", __func__,
+ inst[log].read_idx, write_idx, idx, inst[log].num - 1);
+
+ while ((max - i) > 50) {
+ if ((inst[log].num - 1) < idx)
+ idx = 0;
+
+ if (idx == write_idx)
+ break;
+
+ if (inst[log].events[idx].identifier) {
+
+ i += scnprintf(buf + i, max - i,
+ "%08x %08x %08x %08x %08x\n",
+ inst[log].events[idx].identifier,
+ inst[log].events[idx].timetick,
+ inst[log].events[idx].data1,
+ inst[log].events[idx].data2,
+ inst[log].events[idx].data3);
+ }
+ idx++;
+ }
+ if (cont) {
+ inst[log].read_idx = idx;
+ read_avail = (write_idx - inst[log].read_idx);
+ if (read_avail < 0)
+ read_avail = inst->num - inst->read_idx + write_idx;
+ inst[log].last_read_avail = read_avail;
+ }
+
+ remote_spin_unlock_irqrestore(inst[log].remote_spinlock, flags);
+
+ DBG("%s: read %d write %d idx %d num %d\n", __func__,
+ inst[log].read_idx, write_idx, idx, inst[log].num);
+
+ return i;
+}
+
+static int _debug_dump_sym(int log, char *buf, int max, uint32_t cont)
+{
+ unsigned int idx;
+ int write_idx, read_avail = 0;
+ unsigned long flags;
+ int i = 0;
+
+ char *proc;
+ char *sub;
+ char *id;
+ const char *sym = NULL;
+
+ uint32_t proc_val = 0;
+ uint32_t sub_val = 0;
+ uint32_t id_val = 0;
+ uint32_t id_only_val = 0;
+ uint32_t data1 = 0;
+ uint32_t data2 = 0;
+ uint32_t data3 = 0;
+
+ if (!inst[log].events)
+ return 0;
+
+ if (cont && update_read_avail(&inst[log]) == 0)
+ return 0;
+
+ remote_spin_lock_irqsave(inst[log].remote_spinlock, flags);
+
+ if (cont) {
+ idx = inst[log].read_idx;
+ write_idx = (inst[log].read_idx + inst[log].last_read_avail);
+ if (write_idx >= inst[log].num)
+ write_idx -= inst[log].num;
+ } else {
+ write_idx = *inst[log].idx;
+ idx = (write_idx + 1);
+ }
+
+ DBG("%s: read %d write %d idx %d num %d\n", __func__,
+ inst[log].read_idx, write_idx, idx, inst[log].num - 1);
+
+ for (; (max - i) > SMEM_LOG_ITEM_PRINT_SIZE; idx++) {
+ if (idx > (inst[log].num - 1))
+ idx = 0;
+
+ if (idx == write_idx)
+ break;
+
+ if (idx < inst[log].num) {
+ if (!inst[log].events[idx].identifier)
+ continue;
+
+ proc_val = PROC & inst[log].events[idx].identifier;
+ sub_val = SUB & inst[log].events[idx].identifier;
+ id_val = (SUB | ID) & inst[log].events[idx].identifier;
+ id_only_val = ID & inst[log].events[idx].identifier;
+ data1 = inst[log].events[idx].data1;
+ data2 = inst[log].events[idx].data2;
+ data3 = inst[log].events[idx].data3;
+
+ if (!(proc_val & SMEM_LOG_CONT)) {
+ i += scnprintf(buf + i, max - i, "\n");
+
+ proc = find_sym(ID_SYM, proc_val);
+
+ if (proc)
+ i += scnprintf(buf + i, max - i,
+ "%4s: ", proc);
+ else
+ i += scnprintf(buf + i, max - i,
+ "%04x: ",
+ PROC &
+ inst[log].events[idx].
+ identifier);
+
+ i += scnprintf(buf + i, max - i, "%10u ",
+ inst[log].events[idx].timetick);
+
+ sub = find_sym(BASE_SYM, sub_val);
+
+ if (sub)
+ i += scnprintf(buf + i, max - i,
+ "%9s: ", sub);
+ else
+ i += scnprintf(buf + i, max - i,
+ "%08x: ", sub_val);
+
+ id = find_sym(EVENT_SYM, id_val);
+
+ if (id)
+ i += scnprintf(buf + i, max - i,
+ "%11s: ", id);
+ else
+ i += scnprintf(buf + i, max - i,
+ "%08x: ", id_only_val);
+ }
+
+ if (proc_val & SMEM_LOG_CONT) {
+ i += scnprintf(buf + i, max - i,
+ " %08x %08x %08x",
+ data1, data2, data3);
+ } else if (id_val == SMEM_LOG_EVENT_CB) {
+ unsigned vals[] = {data2, data3};
+ unsigned j;
+ unsigned mask;
+ unsigned tmp;
+ unsigned once;
+ i += scnprintf(buf + i, max - i, "%08x ",
+ data1);
+ for (j = 0; j < ARRAY_SIZE(vals); ++j) {
+ i += scnprintf(buf + i, max - i, "[");
+ mask = 0x80000000;
+ once = 0;
+ while (mask) {
+ tmp = vals[j] & mask;
+ mask >>= 1;
+ if (!tmp)
+ continue;
+ sym = find_sym(SMSM_SYM, tmp);
+
+ if (once)
+ i += scnprintf(buf + i,
+ max - i,
+ " ");
+ if (sym)
+ i += scnprintf(buf + i,
+ max - i,
+ "%s",
+ sym);
+ else
+ i += scnprintf(buf + i,
+ max - i,
+ "%08x",
+ tmp);
+ once = 1;
+ }
+ i += scnprintf(buf + i, max - i, "] ");
+ }
+ } else {
+ i += scnprintf(buf + i, max - i,
+ "%08x %08x %08x",
+ data1, data2, data3);
+ }
+ }
+ }
+ if (cont) {
+ inst[log].read_idx = idx;
+ read_avail = (write_idx - inst[log].read_idx);
+ if (read_avail < 0)
+ read_avail = inst->num - inst->read_idx + write_idx;
+ inst[log].last_read_avail = read_avail;
+ }
+
+ remote_spin_unlock_irqrestore(inst[log].remote_spinlock, flags);
+
+ DBG("%s: read %d write %d idx %d num %d\n", __func__,
+ inst[log].read_idx, write_idx, idx, inst[log].num);
+
+ return i;
+}
+
+static int debug_dump(char *buf, int max, uint32_t cont)
+{
+ int r;
+
+ if (!inst[GEN].idx || !inst[GEN].events)
+ return -ENODEV;
+
+ while (cont) {
+ update_read_avail(&inst[GEN]);
+ r = wait_event_interruptible_timeout(inst[GEN].read_wait,
+ inst[GEN].last_read_avail,
+ smem_log_timeout_ms *
+ HZ / 1000);
+ DBG("%s: read available %d\n", __func__,
+ inst[GEN].last_read_avail);
+ if (r < 0)
+ return 0;
+ else if (inst[GEN].last_read_avail)
+ break;
+ }
+
+ return _debug_dump(GEN, buf, max, cont);
+}
+
+static int debug_dump_sym(char *buf, int max, uint32_t cont)
+{
+ int r;
+
+ if (!inst[GEN].idx || !inst[GEN].events)
+ return -ENODEV;
+
+ while (cont) {
+ update_read_avail(&inst[GEN]);
+ r = wait_event_interruptible_timeout(inst[GEN].read_wait,
+ inst[GEN].last_read_avail,
+ smem_log_timeout_ms *
+ HZ / 1000);
+ DBG("%s: readavailable %d\n", __func__,
+ inst[GEN].last_read_avail);
+ if (r < 0)
+ return 0;
+ else if (inst[GEN].last_read_avail)
+ break;
+ }
+
+ return _debug_dump_sym(GEN, buf, max, cont);
+}
+
+static char debug_buffer[EVENTS_PRINT_SIZE];
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int r;
+ int bsize = 0;
+ int (*fill)(char *, int, uint32_t) = file->private_data;
+ if (!(*ppos)) {
+ bsize = fill(debug_buffer, EVENTS_PRINT_SIZE, 0);
+
+ if (bsize < 0)
+ bsize = scnprintf(debug_buffer,
+ EVENTS_PRINT_SIZE, "Log not available\n");
+ }
+ DBG("%s: count %zu ppos %d\n", __func__, count, (unsigned int)*ppos);
+ r = simple_read_from_buffer(buf, count, ppos, debug_buffer,
+ bsize);
+ return r;
+}
+
+static ssize_t debug_read_cont(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int (*fill)(char *, int, uint32_t) = file->private_data;
+ char *buffer = kmalloc(count, GFP_KERNEL);
+ int bsize;
+ if (!buffer)
+ return -ENOMEM;
+
+ bsize = fill(buffer, count, 1);
+ if (bsize < 0) {
+ if (*ppos == 0)
+ bsize = scnprintf(buffer, count, "Log not available\n");
+ else
+ bsize = 0;
+ }
+
+ DBG("%s: count %zu bsize %d\n", __func__, count, bsize);
+ if (copy_to_user(buf, buffer, bsize)) {
+ kfree(buffer);
+ return -EFAULT;
+ }
+ *ppos += bsize;
+ kfree(buffer);
+ return bsize;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+
+static const struct file_operations debug_ops_cont = {
+ .read = debug_read_cont,
+ .open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+ struct dentry *dent,
+ int (*fill)(char *buf, int max, uint32_t cont),
+ const struct file_operations *fops)
+{
+ debugfs_create_file(name, mode, dent, fill, fops);
+}
+
+static void smem_log_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smem_log", 0);
+ if (IS_ERR(dent))
+ return;
+
+ debug_create("dump", 0444, dent, debug_dump, &debug_ops);
+ debug_create("dump_sym", 0444, dent, debug_dump_sym, &debug_ops);
+
+ debug_create("dump_cont", 0444, dent, debug_dump, &debug_ops_cont);
+ debug_create("dump_sym_cont", 0444, dent,
+ debug_dump_sym, &debug_ops_cont);
+
+ smem_log_timeout_ms = 500;
+ smem_log_debug_mask = 0;
+}
+#else
+static void smem_log_debugfs_init(void) {}
+#endif
+
+static int smem_log_initialize(void)
+{
+ int ret;
+
+ ret = _smem_log_init();
+ if (ret < 0) {
+ pr_err("%s: init failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = misc_register(&smem_log_dev);
+ if (ret < 0) {
+ pr_err("%s: device register failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ smem_log_enable = 1;
+ smem_log_initialized = 1;
+ smem_log_debugfs_init();
+ return ret;
+}
+
+static int smem_module_init_notifier(struct notifier_block *this,
+ unsigned long code,
+ void *_cmd)
+{
+ int ret = 0;
+ if (!smem_log_initialized)
+ ret = smem_log_initialize();
+ return ret;
+}
+
+static struct notifier_block nb = {
+ .notifier_call = smem_module_init_notifier,
+};
+
+static int __init smem_log_init(void)
+{
+ return smem_module_init_notifier_register(&nb);
+}
+
+
+module_init(smem_log_init);
+
+MODULE_DESCRIPTION("smem log");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smem_private.h b/drivers/soc/qcom/smem_private.h
new file mode 100644
index 000000000000..7aeca5eed8d2
--- /dev/null
+++ b/drivers/soc/qcom/smem_private.h
@@ -0,0 +1,104 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SMEM_PRIVATE_H_
+#define _ARCH_ARM_MACH_MSM_SMEM_PRIVATE_H_
+
+#include <linux/remote_spinlock.h>
+#include <soc/qcom/ramdump.h>
+
+
+#define SMD_HEAP_SIZE 512
+
+struct smem_heap_info {
+ unsigned initialized;
+ unsigned free_offset;
+ unsigned heap_remaining;
+ unsigned reserved;
+};
+
+struct smem_heap_entry {
+ unsigned allocated;
+ unsigned offset;
+ unsigned size;
+ unsigned reserved; /* bits 1:0 reserved, bits 31:2 aux smem base addr */
+};
+#define BASE_ADDR_MASK 0xfffffffc
+
+struct smem_proc_comm {
+ unsigned command;
+ unsigned status;
+ unsigned data1;
+ unsigned data2;
+};
+
+struct smem_shared {
+ struct smem_proc_comm proc_comm[4];
+ unsigned version[32];
+ struct smem_heap_info heap_info;
+ struct smem_heap_entry heap_toc[SMD_HEAP_SIZE];
+};
+
+struct smem_area {
+ phys_addr_t phys_addr;
+ resource_size_t size;
+ void __iomem *virt_addr;
+};
+
+/* used for unit testing spinlocks */
+remote_spinlock_t *smem_get_remote_spinlock(void);
+
+bool smem_initialized_check(void);
+
+/**
+ * smem_module_init_notifier_register() - Register a smem module
+ * init notifier block
+ * @nb: Notifier block to be registered
+ *
+ * In order to mark the dependency on SMEM Driver module initialization
+ * register a notifier using this API. Once the smem module_init is
+ * done, notification will be passed to the registered module.
+ */
+int smem_module_init_notifier_register(struct notifier_block *nb);
+
+/**
+ * smem_module_init_notifier_register() - Unregister a smem module
+ * init notifier block
+ * @nb: Notifier block to be unregistered
+ */
+int smem_module_init_notifier_unregister(struct notifier_block *nb);
+
+/**
+ * smem_get_free_space() - Get the available allocation free space for a
+ * partition
+ *
+ * @to_proc: remote SMEM host. Determines the applicable partition
+ * @returns: size in bytes available to allocate
+ *
+ * Helper function for SMD so that SMD only scans the channel allocation
+ * table for a partition when it is reasonably certain that a channel has
+ * actually been created, because scanning can be expensive. Creating a channel
+ * will consume some of the free space in a partition, so SMD can compare the
+ * last free space size against the current free space size to determine if
+ * a channel may have been created. SMD can't do this directly, because the
+ * necessary partition internals are restricted to just SMEM.
+ */
+unsigned smem_get_free_space(unsigned to_proc);
+
+/**
+ * smem_get_version() - Get the smem user version number
+ *
+ * @idx: SMEM user idx in SMEM_VERSION_INFO table.
+ * @returns: smem version number if success otherwise zero.
+ */
+unsigned smem_get_version(unsigned idx);
+#endif /* _ARCH_ARM_MACH_MSM_SMEM_PRIVATE_H_ */
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
new file mode 100644
index 000000000000..9aa1eecda0b7
--- /dev/null
+++ b/drivers/soc/qcom/smp2p.c
@@ -0,0 +1,1934 @@
+/* drivers/soc/qcom/smp2p.c
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/ipc_logging.h>
+#include <linux/err.h>
+#include <soc/qcom/smem.h>
+#include "smp2p_private_api.h"
+#include "smp2p_private.h"
+
+#define NUM_LOG_PAGES 3
+
+/**
+ * struct msm_smp2p_out - This structure represents the outbound SMP2P entry.
+ *
+ * @remote_pid: Outbound processor ID.
+ * @name: Entry name.
+ * @out_edge_list: Adds this structure into smp2p_out_list_item::list.
+ * @msm_smp2p_notifier_list: Notifier block head used to notify for open event.
+ * @open_nb: Notifier block used to notify for open event.
+ * @l_smp2p_entry: Pointer to the actual entry in the SMEM item.
+ */
+struct msm_smp2p_out {
+ int remote_pid;
+ char name[SMP2P_MAX_ENTRY_NAME];
+ struct list_head out_edge_list;
+ struct raw_notifier_head msm_smp2p_notifier_list;
+ struct notifier_block *open_nb;
+ uint32_t __iomem *l_smp2p_entry;
+};
+
+/**
+ * struct smp2p_out_list_item - Maintains the state of outbound edge.
+ *
+ * @out_item_lock_lha1: Lock protecting all elements of the structure.
+ * @list: list of outbound entries (struct msm_smp2p_out).
+ * @smem_edge_out: Pointer to outbound smem item.
+ * @smem_edge_state: State of the outbound edge.
+ * @ops_ptr: Pointer to internal version-specific SMEM item access functions.
+ *
+ * @feature_ssr_ack_enabled: SSR ACK Support Enabled
+ * @restart_ack: Current cached state of the local ack bit
+ */
+struct smp2p_out_list_item {
+ spinlock_t out_item_lock_lha1;
+
+ struct list_head list;
+ struct smp2p_smem __iomem *smem_edge_out;
+ enum msm_smp2p_edge_state smem_edge_state;
+ struct smp2p_version_if *ops_ptr;
+
+ bool feature_ssr_ack_enabled;
+ bool restart_ack;
+};
+static struct smp2p_out_list_item out_list[SMP2P_NUM_PROCS];
+
+static void *log_ctx;
+static int smp2p_debug_mask = MSM_SMP2P_INFO | MSM_SMP2P_DEBUG;
+module_param_named(debug_mask, smp2p_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+/**
+ * struct smp2p_in - Represents the entry on remote processor.
+ *
+ * @name: Name of the entry.
+ * @remote_pid: Outbound processor ID.
+ * @in_edge_list: Adds this structure into smp2p_in_list_item::list.
+ * @in_notifier_list: List for notifier block for entry opening/updates.
+ * @prev_entry_val: Previous value of the entry.
+ * @entry_ptr: Points to the current value in smem item.
+ * @notifier_count: Counts the number of notifier registered per pid,entry.
+ */
+struct smp2p_in {
+ int remote_pid;
+ char name[SMP2P_MAX_ENTRY_NAME];
+ struct list_head in_edge_list;
+ struct raw_notifier_head in_notifier_list;
+ uint32_t prev_entry_val;
+ uint32_t __iomem *entry_ptr;
+ uint32_t notifier_count;
+};
+
+/**
+ * struct smp2p_in_list_item - Maintains the inbound edge state.
+ *
+ * @in_item_lock_lhb1: Lock protecting all elements of the structure.
+ * @list: List head for the entries on remote processor.
+ * @smem_edge_in: Pointer to the remote smem item.
+ */
+struct smp2p_in_list_item {
+ spinlock_t in_item_lock_lhb1;
+ struct list_head list;
+ struct smp2p_smem __iomem *smem_edge_in;
+ uint32_t item_size;
+ uint32_t safe_total_entries;
+};
+static struct smp2p_in_list_item in_list[SMP2P_NUM_PROCS];
+
+/**
+ * SMEM Item access function interface.
+ *
+ * This interface is used to help isolate the implementation of
+ * the functionality from any changes in the shared data structures
+ * that may happen as versions are changed.
+ *
+ * @is_supported: True if this version is supported by SMP2P
+ * @negotiate_features: Returns (sub)set of supported features
+ * @negotiation_complete: Called when negotiation has been completed
+ * @find_entry: Finds existing / next empty entry
+ * @create_entry: Creates a new entry
+ * @read_entry: Reads the value of an entry
+ * @write_entry: Writes a new value to an entry
+ * @modify_entry: Does a read/modify/write of an entry
+ * validate_size: Verifies the size of the remote SMEM item to ensure that
+ * an invalid item size doesn't result in an out-of-bounds
+ * memory access.
+ */
+struct smp2p_version_if {
+ /* common functions */
+ bool is_supported;
+ uint32_t (*negotiate_features)(uint32_t features);
+ void (*negotiation_complete)(struct smp2p_out_list_item *);
+ void (*find_entry)(struct smp2p_smem __iomem *item,
+ uint32_t entries_total, char *name,
+ uint32_t **entry_ptr, int *empty_spot);
+
+ /* outbound entry functions */
+ int (*create_entry)(struct msm_smp2p_out *);
+ int (*read_entry)(struct msm_smp2p_out *, uint32_t *);
+ int (*write_entry)(struct msm_smp2p_out *, uint32_t);
+ int (*modify_entry)(struct msm_smp2p_out *, uint32_t, uint32_t);
+
+ /* inbound entry functions */
+ struct smp2p_smem __iomem *(*validate_size)(int remote_pid,
+ struct smp2p_smem __iomem *, uint32_t);
+};
+
+static int smp2p_do_negotiation(int remote_pid, struct smp2p_out_list_item *p);
+static void smp2p_send_interrupt(int remote_pid);
+
+/* v0 (uninitialized SMEM item) interface functions */
+static uint32_t smp2p_negotiate_features_v0(uint32_t features);
+static void smp2p_negotiation_complete_v0(struct smp2p_out_list_item *out_item);
+static void smp2p_find_entry_v0(struct smp2p_smem __iomem *item,
+ uint32_t entries_total, char *name, uint32_t **entry_ptr,
+ int *empty_spot);
+static int smp2p_out_create_v0(struct msm_smp2p_out *);
+static int smp2p_out_read_v0(struct msm_smp2p_out *, uint32_t *);
+static int smp2p_out_write_v0(struct msm_smp2p_out *, uint32_t);
+static int smp2p_out_modify_v0(struct msm_smp2p_out *, uint32_t, uint32_t);
+static struct smp2p_smem __iomem *smp2p_in_validate_size_v0(int remote_pid,
+ struct smp2p_smem __iomem *smem_item, uint32_t size);
+
+/* v1 interface functions */
+static uint32_t smp2p_negotiate_features_v1(uint32_t features);
+static void smp2p_negotiation_complete_v1(struct smp2p_out_list_item *out_item);
+static void smp2p_find_entry_v1(struct smp2p_smem __iomem *item,
+ uint32_t entries_total, char *name, uint32_t **entry_ptr,
+ int *empty_spot);
+static int smp2p_out_create_v1(struct msm_smp2p_out *);
+static int smp2p_out_read_v1(struct msm_smp2p_out *, uint32_t *);
+static int smp2p_out_write_v1(struct msm_smp2p_out *, uint32_t);
+static int smp2p_out_modify_v1(struct msm_smp2p_out *, uint32_t, uint32_t);
+static struct smp2p_smem __iomem *smp2p_in_validate_size_v1(int remote_pid,
+ struct smp2p_smem __iomem *smem_item, uint32_t size);
+
+/* Version interface functions */
+static struct smp2p_version_if version_if[] = {
+ [0] = {
+ .negotiate_features = smp2p_negotiate_features_v0,
+ .negotiation_complete = smp2p_negotiation_complete_v0,
+ .find_entry = smp2p_find_entry_v0,
+ .create_entry = smp2p_out_create_v0,
+ .read_entry = smp2p_out_read_v0,
+ .write_entry = smp2p_out_write_v0,
+ .modify_entry = smp2p_out_modify_v0,
+ .validate_size = smp2p_in_validate_size_v0,
+ },
+ [1] = {
+ .is_supported = true,
+ .negotiate_features = smp2p_negotiate_features_v1,
+ .negotiation_complete = smp2p_negotiation_complete_v1,
+ .find_entry = smp2p_find_entry_v1,
+ .create_entry = smp2p_out_create_v1,
+ .read_entry = smp2p_out_read_v1,
+ .write_entry = smp2p_out_write_v1,
+ .modify_entry = smp2p_out_modify_v1,
+ .validate_size = smp2p_in_validate_size_v1,
+ },
+};
+
+/* interrupt configuration (filled by device tree) */
+static struct smp2p_interrupt_config smp2p_int_cfgs[SMP2P_NUM_PROCS] = {
+ [SMP2P_MODEM_PROC].name = "modem",
+ [SMP2P_AUDIO_PROC].name = "lpass",
+ [SMP2P_WIRELESS_PROC].name = "wcnss",
+ [SMP2P_REMOTE_MOCK_PROC].name = "mock",
+};
+
+/**
+ * smp2p_get_log_ctx - Return log context for other SMP2P modules.
+ *
+ * @returns: Log context or NULL if none.
+ */
+void *smp2p_get_log_ctx(void)
+{
+ return log_ctx;
+}
+
+/**
+ * smp2p_get_debug_mask - Return debug mask.
+ *
+ * @returns: Current debug mask.
+ */
+int smp2p_get_debug_mask(void)
+{
+ return smp2p_debug_mask;
+}
+
+/**
+ * smp2p_interrupt_config - Return interrupt configuration.
+ *
+ * @returns interrupt configuration array for usage by debugfs.
+ */
+struct smp2p_interrupt_config *smp2p_get_interrupt_config(void)
+{
+ return smp2p_int_cfgs;
+}
+
+/**
+ * smp2p_pid_to_name - Lookup name for remote pid.
+ *
+ * @returns: name (may be NULL).
+ */
+const char *smp2p_pid_to_name(int remote_pid)
+{
+ if (remote_pid >= SMP2P_NUM_PROCS)
+ return NULL;
+
+ return smp2p_int_cfgs[remote_pid].name;
+}
+
+/**
+ * smp2p_get_in_item - Return pointer to remote smem item.
+ *
+ * @remote_pid: Processor ID of the remote system.
+ * @returns: Pointer to inbound SMEM item
+ *
+ * This is used by debugfs to print the smem items.
+ */
+struct smp2p_smem __iomem *smp2p_get_in_item(int remote_pid)
+{
+ void *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&in_list[remote_pid].in_item_lock_lhb1, flags);
+ if (remote_pid < SMP2P_NUM_PROCS)
+ ret = in_list[remote_pid].smem_edge_in;
+ spin_unlock_irqrestore(&in_list[remote_pid].in_item_lock_lhb1,
+ flags);
+
+ return ret;
+}
+
+/**
+ * smp2p_get_out_item - Return pointer to outbound SMEM item.
+ *
+ * @remote_pid: Processor ID of remote system.
+ * @state: Edge state of the outbound SMEM item.
+ * @returns: Pointer to outbound (remote) SMEM item.
+ */
+struct smp2p_smem __iomem *smp2p_get_out_item(int remote_pid, int *state)
+{
+ void *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&out_list[remote_pid].out_item_lock_lha1, flags);
+ if (remote_pid < SMP2P_NUM_PROCS) {
+ ret = out_list[remote_pid].smem_edge_out;
+ if (state)
+ *state = out_list[remote_pid].smem_edge_state;
+ }
+ spin_unlock_irqrestore(&out_list[remote_pid].out_item_lock_lha1, flags);
+
+ return ret;
+}
+
+/**
+ * smp2p_get_smem_item_id - Return the proper SMEM item ID.
+ *
+ * @write_id: Processor that will write to the item.
+ * @read_id: Processor that will read from the item.
+ * @returns: SMEM ID
+ */
+static int smp2p_get_smem_item_id(int write_pid, int read_pid)
+{
+ int ret = -EINVAL;
+
+ switch (write_pid) {
+ case SMP2P_APPS_PROC:
+ ret = SMEM_SMP2P_APPS_BASE + read_pid;
+ break;
+ case SMP2P_MODEM_PROC:
+ ret = SMEM_SMP2P_MODEM_BASE + read_pid;
+ break;
+ case SMP2P_AUDIO_PROC:
+ ret = SMEM_SMP2P_AUDIO_BASE + read_pid;
+ break;
+ case SMP2P_WIRELESS_PROC:
+ ret = SMEM_SMP2P_WIRLESS_BASE + read_pid;
+ break;
+ case SMP2P_POWER_PROC:
+ ret = SMEM_SMP2P_POWER_BASE + read_pid;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * Return pointer to SMEM item owned by the local processor.
+ *
+ * @remote_pid: Remote processor ID
+ * @returns: NULL for failure; otherwise pointer to SMEM item
+ *
+ * Must be called with out_item_lock_lha1 locked for mock proc.
+ */
+static void *smp2p_get_local_smem_item(int remote_pid)
+{
+ struct smp2p_smem __iomem *item_ptr = NULL;
+
+ if (remote_pid < SMP2P_REMOTE_MOCK_PROC) {
+ unsigned size;
+ int smem_id;
+
+ /* lookup or allocate SMEM item */
+ smem_id = smp2p_get_smem_item_id(SMP2P_APPS_PROC, remote_pid);
+ if (smem_id >= 0) {
+ item_ptr = smem_get_entry(smem_id, &size,
+ remote_pid, 0);
+
+ if (!item_ptr) {
+ size = sizeof(struct smp2p_smem_item);
+ item_ptr = smem_alloc(smem_id, size,
+ remote_pid, 0);
+ }
+ }
+ } else if (remote_pid == SMP2P_REMOTE_MOCK_PROC) {
+ /*
+ * This path is only used during unit testing so
+ * the GFP_ATOMIC allocation should not be a
+ * concern.
+ */
+ if (!out_list[SMP2P_REMOTE_MOCK_PROC].smem_edge_out)
+ item_ptr = kzalloc(
+ sizeof(struct smp2p_smem_item),
+ GFP_ATOMIC);
+ }
+ return item_ptr;
+}
+
+/**
+ * smp2p_get_remote_smem_item - Return remote SMEM item.
+ *
+ * @remote_pid: Remote processor ID
+ * @out_item: Pointer to the output item structure
+ * @returns: NULL for failure; otherwise pointer to SMEM item
+ *
+ * Return pointer to SMEM item owned by the remote processor.
+ *
+ * Note that this function does an SMEM lookup which uses a remote spinlock,
+ * so this function should not be called more than necessary.
+ *
+ * Must be called with out_item_lock_lha1 and in_item_lock_lhb1 locked.
+ */
+static void *smp2p_get_remote_smem_item(int remote_pid,
+ struct smp2p_out_list_item *out_item)
+{
+ void *item_ptr = NULL;
+ unsigned size = 0;
+
+ if (!out_item)
+ return item_ptr;
+
+ if (remote_pid < SMP2P_REMOTE_MOCK_PROC) {
+ int smem_id;
+
+ smem_id = smp2p_get_smem_item_id(remote_pid, SMP2P_APPS_PROC);
+ if (smem_id >= 0)
+ item_ptr = smem_get_entry(smem_id, &size,
+ remote_pid, 0);
+ } else if (remote_pid == SMP2P_REMOTE_MOCK_PROC) {
+ item_ptr = msm_smp2p_get_remote_mock_smem_item(&size);
+ }
+ item_ptr = out_item->ops_ptr->validate_size(remote_pid, item_ptr, size);
+
+ return item_ptr;
+}
+
+/**
+ * smp2p_ssr_ack_needed - Returns true if SSR ACK required
+ *
+ * @rpid: Remote processor ID
+ *
+ * Must be called with out_item_lock_lha1 and in_item_lock_lhb1 locked.
+ */
+static bool smp2p_ssr_ack_needed(uint32_t rpid)
+{
+ bool ssr_done;
+
+ if (!out_list[rpid].feature_ssr_ack_enabled)
+ return false;
+
+ ssr_done = SMP2P_GET_RESTART_DONE(in_list[rpid].smem_edge_in->flags);
+ if (ssr_done != out_list[rpid].restart_ack)
+ return true;
+
+ return false;
+}
+
+/**
+ * smp2p_do_ssr_ack - Handles SSR ACK
+ *
+ * @rpid: Remote processor ID
+ *
+ * Must be called with out_item_lock_lha1 and in_item_lock_lhb1 locked.
+ */
+static void smp2p_do_ssr_ack(uint32_t rpid)
+{
+ bool ack;
+
+ if (!smp2p_ssr_ack_needed(rpid))
+ return;
+
+ ack = !out_list[rpid].restart_ack;
+ SMP2P_INFO("%s: ssr ack pid %d: %d -> %d\n", __func__, rpid,
+ out_list[rpid].restart_ack, ack);
+ out_list[rpid].restart_ack = ack;
+ SMP2P_SET_RESTART_ACK(out_list[rpid].smem_edge_out->flags, ack);
+ smp2p_send_interrupt(rpid);
+}
+
+/**
+ * smp2p_negotiate_features_v1 - Initial feature negotiation.
+ *
+ * @features: Inbound feature set.
+ * @returns: Supported features (will be a same/subset of @features).
+ */
+static uint32_t smp2p_negotiate_features_v1(uint32_t features)
+{
+ return SMP2P_FEATURE_SSR_ACK;
+}
+
+/**
+ * smp2p_negotiation_complete_v1 - Negotiation completed
+ *
+ * @out_item: Pointer to the output item structure
+ *
+ * Can be used to do final configuration based upon the negotiated feature set.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static void smp2p_negotiation_complete_v1(struct smp2p_out_list_item *out_item)
+{
+ uint32_t features;
+
+ features = SMP2P_GET_FEATURES(out_item->smem_edge_out->feature_version);
+
+ if (features & SMP2P_FEATURE_SSR_ACK)
+ out_item->feature_ssr_ack_enabled = true;
+}
+
+/**
+ * smp2p_find_entry_v1 - Search for an entry in SMEM item.
+ *
+ * @item: Pointer to the smem item.
+ * @entries_total: Total number of entries in @item.
+ * @name: Name of the entry.
+ * @entry_ptr: Set to pointer of entry if found, NULL otherwise.
+ * @empty_spot: If non-null, set to the value of the next empty entry.
+ *
+ * Searches for entry @name in the SMEM item. If found, a pointer
+ * to the item is returned. If it isn't found, the first empty
+ * index is returned in @empty_spot.
+ */
+static void smp2p_find_entry_v1(struct smp2p_smem __iomem *item,
+ uint32_t entries_total, char *name, uint32_t **entry_ptr,
+ int *empty_spot)
+{
+ int i;
+ struct smp2p_entry_v1 *pos;
+
+ if (!item || !name || !entry_ptr) {
+ SMP2P_ERR("%s: invalid arguments %p, %p, %p\n",
+ __func__, item, name, entry_ptr);
+ return;
+ }
+
+ *entry_ptr = NULL;
+ if (empty_spot)
+ *empty_spot = -1;
+
+ pos = (struct smp2p_entry_v1 *)(char *)(item + 1);
+ for (i = 0; i < entries_total; i++, ++pos) {
+ if (pos->name[0]) {
+ if (!strncmp(pos->name, name, SMP2P_MAX_ENTRY_NAME)) {
+ *entry_ptr = &pos->entry;
+ break;
+ }
+ } else if (empty_spot && *empty_spot < 0) {
+ *empty_spot = i;
+ }
+ }
+}
+
+/**
+ * smp2p_out_create_v1 - Creates a outbound SMP2P entry.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_create_v1(struct msm_smp2p_out *out_entry)
+{
+ struct smp2p_smem __iomem *smp2p_h_ptr;
+ struct smp2p_out_list_item *p_list;
+ uint32_t *state_entry_ptr;
+ uint32_t empty_spot;
+ uint32_t entries_total;
+ uint32_t entries_valid;
+
+ if (!out_entry)
+ return -EINVAL;
+
+ p_list = &out_list[out_entry->remote_pid];
+ if (p_list->smem_edge_state != SMP2P_EDGE_STATE_OPENED) {
+ SMP2P_ERR("%s: item '%s':%d opened - wrong create called\n",
+ __func__, out_entry->name, out_entry->remote_pid);
+ return -ENODEV;
+ }
+
+ smp2p_h_ptr = p_list->smem_edge_out;
+ entries_total = SMP2P_GET_ENT_TOTAL(smp2p_h_ptr->valid_total_ent);
+ entries_valid = SMP2P_GET_ENT_VALID(smp2p_h_ptr->valid_total_ent);
+
+ p_list->ops_ptr->find_entry(smp2p_h_ptr, entries_total,
+ out_entry->name, &state_entry_ptr, &empty_spot);
+ if (state_entry_ptr) {
+ /* re-use existing entry */
+ out_entry->l_smp2p_entry = state_entry_ptr;
+
+ SMP2P_DBG("%s: item '%s':%d reused\n", __func__,
+ out_entry->name, out_entry->remote_pid);
+ } else if (entries_valid >= entries_total) {
+ /* need to allocate entry, but not more space */
+ SMP2P_ERR("%s: no space for item '%s':%d\n",
+ __func__, out_entry->name, out_entry->remote_pid);
+ return -ENOMEM;
+ } else {
+ /* allocate a new entry */
+ struct smp2p_entry_v1 *entry_ptr;
+
+ entry_ptr = (struct smp2p_entry_v1 *)((char *)(smp2p_h_ptr + 1)
+ + empty_spot * sizeof(struct smp2p_entry_v1));
+ memcpy_toio(entry_ptr->name, out_entry->name,
+ sizeof(entry_ptr->name));
+ out_entry->l_smp2p_entry = &entry_ptr->entry;
+ ++entries_valid;
+ SMP2P_DBG("%s: item '%s':%d fully created as entry %d of %d\n",
+ __func__, out_entry->name,
+ out_entry->remote_pid,
+ entries_valid, entries_total);
+ SMP2P_SET_ENT_VALID(smp2p_h_ptr->valid_total_ent,
+ entries_valid);
+ smp2p_send_interrupt(out_entry->remote_pid);
+ }
+ raw_notifier_call_chain(&out_entry->msm_smp2p_notifier_list,
+ SMP2P_OPEN, 0);
+
+ return 0;
+}
+
+/**
+ * smp2p_out_read_v1 - Read the data from an outbound entry.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @data: Out pointer, the data is available in this argument on success.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_read_v1(struct msm_smp2p_out *out_entry, uint32_t *data)
+{
+ struct smp2p_smem __iomem *smp2p_h_ptr;
+ uint32_t remote_pid;
+
+ if (!out_entry)
+ return -EINVAL;
+
+ smp2p_h_ptr = out_list[out_entry->remote_pid].smem_edge_out;
+ remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr->rem_loc_proc_id);
+
+ if (remote_pid != out_entry->remote_pid)
+ return -EINVAL;
+
+ if (out_entry->l_smp2p_entry) {
+ *data = readl_relaxed(out_entry->l_smp2p_entry);
+ } else {
+ SMP2P_ERR("%s: '%s':%d not yet OPEN\n", __func__,
+ out_entry->name, remote_pid);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * smp2p_out_write_v1 - Writes an outbound entry value.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @data: The data to be written.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_write_v1(struct msm_smp2p_out *out_entry, uint32_t data)
+{
+ struct smp2p_smem __iomem *smp2p_h_ptr;
+ uint32_t remote_pid;
+
+ if (!out_entry)
+ return -EINVAL;
+
+ smp2p_h_ptr = out_list[out_entry->remote_pid].smem_edge_out;
+ remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr->rem_loc_proc_id);
+
+ if (remote_pid != out_entry->remote_pid)
+ return -EINVAL;
+
+ if (out_entry->l_smp2p_entry) {
+ writel_relaxed(data, out_entry->l_smp2p_entry);
+ smp2p_send_interrupt(remote_pid);
+ } else {
+ SMP2P_ERR("%s: '%s':%d not yet OPEN\n", __func__,
+ out_entry->name, remote_pid);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+/**
+ * smp2p_out_modify_v1 - Modifies and outbound value.
+ *
+ * @set_mask: Mask containing the bits that needs to be set.
+ * @clear_mask: Mask containing the bits that needs to be cleared.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * The clear mask is applied first, so if a bit is set in both clear and
+ * set mask, the result will be that the bit is set.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_modify_v1(struct msm_smp2p_out *out_entry,
+ uint32_t set_mask, uint32_t clear_mask)
+{
+ struct smp2p_smem __iomem *smp2p_h_ptr;
+ uint32_t remote_pid;
+
+ if (!out_entry)
+ return -EINVAL;
+
+ smp2p_h_ptr = out_list[out_entry->remote_pid].smem_edge_out;
+ remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr->rem_loc_proc_id);
+
+ if (remote_pid != out_entry->remote_pid)
+ return -EINVAL;
+
+ if (out_entry->l_smp2p_entry) {
+ uint32_t curr_value;
+
+ curr_value = readl_relaxed(out_entry->l_smp2p_entry);
+ writel_relaxed((curr_value & ~clear_mask) | set_mask,
+ out_entry->l_smp2p_entry);
+ } else {
+ SMP2P_ERR("%s: '%s':%d not yet OPEN\n", __func__,
+ out_entry->name, remote_pid);
+ return -ENODEV;
+ }
+
+ smp2p_send_interrupt(remote_pid);
+ return 0;
+}
+
+/**
+ * smp2p_in_validate_size_v1 - Size validation for version 1.
+ *
+ * @remote_pid: Remote processor ID.
+ * @smem_item: Pointer to the inbound SMEM item.
+ * @size: Size of the SMEM item.
+ * @returns: Validated smem_item pointer (or NULL if size is too small).
+ *
+ * Validates we don't end up with out-of-bounds array access due to invalid
+ * smem item size. If out-of-bound array access can't be avoided, then an
+ * error message is printed and NULL is returned to prevent usage of the
+ * item.
+ *
+ * Must be called with in_item_lock_lhb1 locked.
+ */
+static struct smp2p_smem __iomem *smp2p_in_validate_size_v1(int remote_pid,
+ struct smp2p_smem __iomem *smem_item, uint32_t size)
+{
+ uint32_t total_entries;
+ unsigned expected_size;
+ struct smp2p_smem __iomem *item_ptr;
+ struct smp2p_in_list_item *in_item;
+
+ if (remote_pid >= SMP2P_NUM_PROCS || !smem_item)
+ return NULL;
+
+ in_item = &in_list[remote_pid];
+ item_ptr = (struct smp2p_smem __iomem *)smem_item;
+
+ total_entries = SMP2P_GET_ENT_TOTAL(item_ptr->valid_total_ent);
+ if (total_entries > 0) {
+ in_item->safe_total_entries = total_entries;
+ in_item->item_size = size;
+
+ expected_size = sizeof(struct smp2p_smem) +
+ (total_entries * sizeof(struct smp2p_entry_v1));
+
+ if (size < expected_size) {
+ unsigned new_size;
+
+ new_size = size;
+ new_size -= sizeof(struct smp2p_smem);
+ new_size /= sizeof(struct smp2p_entry_v1);
+ in_item->safe_total_entries = new_size;
+
+ SMP2P_ERR(
+ "%s pid %d item too small for %d entries; expected: %d actual: %d; reduced to %d entries\n",
+ __func__, remote_pid, total_entries,
+ expected_size, size, new_size);
+ }
+ } else {
+ /*
+ * Total entries is 0, so the entry is still being initialized
+ * or is invalid. Either way, treat it as if the item does
+ * not exist yet.
+ */
+ in_item->safe_total_entries = 0;
+ in_item->item_size = 0;
+ }
+ return item_ptr;
+}
+
+/**
+ * smp2p_negotiate_features_v0 - Initial feature negotiation.
+ *
+ * @features: Inbound feature set.
+ * @returns: 0 (no features supported for v0).
+ */
+static uint32_t smp2p_negotiate_features_v0(uint32_t features)
+{
+ /* no supported features */
+ return 0;
+}
+
+/**
+ * smp2p_negotiation_complete_v0 - Negotiation completed
+ *
+ * @out_item: Pointer to the output item structure
+ *
+ * Can be used to do final configuration based upon the negotiated feature set.
+ */
+static void smp2p_negotiation_complete_v0(struct smp2p_out_list_item *out_item)
+{
+ SMP2P_ERR("%s: invalid negotiation complete for v0 pid %d\n",
+ __func__,
+ SMP2P_GET_REMOTE_PID(out_item->smem_edge_out->rem_loc_proc_id));
+}
+
+/**
+ * smp2p_find_entry_v0 - Stub function.
+ *
+ * @item: Pointer to the smem item.
+ * @entries_total: Total number of entries in @item.
+ * @name: Name of the entry.
+ * @entry_ptr: Set to pointer of entry if found, NULL otherwise.
+ * @empty_spot: If non-null, set to the value of the next empty entry.
+ *
+ * Entries cannot be searched for until item negotiation has been completed.
+ */
+static void smp2p_find_entry_v0(struct smp2p_smem __iomem *item,
+ uint32_t entries_total, char *name, uint32_t **entry_ptr,
+ int *empty_spot)
+{
+ if (entry_ptr)
+ *entry_ptr = NULL;
+
+ if (empty_spot)
+ *empty_spot = -1;
+
+ SMP2P_ERR("%s: invalid - item negotiation incomplete\n", __func__);
+}
+
+/**
+ * smp2p_out_create_v0 - Initial creation function.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * If the outbound SMEM item negotiation is not complete, then
+ * this function is called to start the negotiation process.
+ * Eventually when the negotiation process is complete, this
+ * function pointer is switched with the appropriate function
+ * for the version of SMP2P being created.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_create_v0(struct msm_smp2p_out *out_entry)
+{
+ int edge_state;
+ struct smp2p_out_list_item *item_ptr;
+
+ if (!out_entry)
+ return -EINVAL;
+
+ edge_state = out_list[out_entry->remote_pid].smem_edge_state;
+
+ switch (edge_state) {
+ case SMP2P_EDGE_STATE_CLOSED:
+ /* start negotiation */
+ item_ptr = &out_list[out_entry->remote_pid];
+ edge_state = smp2p_do_negotiation(out_entry->remote_pid,
+ item_ptr);
+ break;
+
+ case SMP2P_EDGE_STATE_OPENING:
+ /* still negotiating */
+ break;
+
+ case SMP2P_EDGE_STATE_OPENED:
+ SMP2P_ERR("%s: item '%s':%d opened - wrong create called\n",
+ __func__, out_entry->name, out_entry->remote_pid);
+ break;
+
+ default:
+ SMP2P_ERR("%s: item '%s':%d invalid SMEM item state %d\n",
+ __func__, out_entry->name, out_entry->remote_pid,
+ edge_state);
+ break;
+ }
+ return 0;
+}
+
+/**
+ * smp2p_out_read_v0 - Stub function.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @data: Out pointer, the data is available in this argument on success.
+ * @returns: -ENODEV
+ */
+static int smp2p_out_read_v0(struct msm_smp2p_out *out_entry, uint32_t *data)
+{
+ SMP2P_ERR("%s: item '%s':%d not OPEN\n",
+ __func__, out_entry->name, out_entry->remote_pid);
+
+ return -ENODEV;
+}
+
+/**
+ * smp2p_out_write_v0 - Stub function.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @data: The data to be written.
+ * @returns: -ENODEV
+ */
+static int smp2p_out_write_v0(struct msm_smp2p_out *out_entry, uint32_t data)
+{
+ SMP2P_ERR("%s: item '%s':%d not yet OPEN\n",
+ __func__, out_entry->name, out_entry->remote_pid);
+
+ return -ENODEV;
+}
+
+/**
+ * smp2p_out_modify_v0 - Stub function.
+ *
+ * @set_mask: Mask containing the bits that needs to be set.
+ * @clear_mask: Mask containing the bits that needs to be cleared.
+ * @returns: -ENODEV
+ */
+static int smp2p_out_modify_v0(struct msm_smp2p_out *out_entry,
+ uint32_t set_mask, uint32_t clear_mask)
+{
+ SMP2P_ERR("%s: item '%s':%d not yet OPEN\n",
+ __func__, out_entry->name, out_entry->remote_pid);
+
+ return -ENODEV;
+}
+
+/**
+ * smp2p_in_validate_size_v0 - Stub function.
+ *
+ * @remote_pid: Remote processor ID.
+ * @smem_item: Pointer to the inbound SMEM item.
+ * @size: Size of the SMEM item.
+ * @returns: Validated smem_item pointer (or NULL if size is too small).
+ *
+ * Validates we don't end up with out-of-bounds array access due to invalid
+ * smem item size. If out-of-bound array access can't be avoided, then an
+ * error message is printed and NULL is returned to prevent usage of the
+ * item.
+ *
+ * Must be called with in_item_lock_lhb1 locked.
+ */
+static struct smp2p_smem __iomem *smp2p_in_validate_size_v0(int remote_pid,
+ struct smp2p_smem __iomem *smem_item, uint32_t size)
+{
+ struct smp2p_in_list_item *in_item;
+
+ if (remote_pid >= SMP2P_NUM_PROCS || !smem_item)
+ return NULL;
+
+ in_item = &in_list[remote_pid];
+
+ if (size < sizeof(struct smp2p_smem)) {
+ SMP2P_ERR(
+ "%s pid %d item size too small; expected: %zu actual: %d\n",
+ __func__, remote_pid,
+ sizeof(struct smp2p_smem), size);
+ smem_item = NULL;
+ in_item->item_size = 0;
+ } else {
+ in_item->item_size = size;
+ }
+ return smem_item;
+}
+
+/**
+ * smp2p_init_header - Initializes the header of the smem item.
+ *
+ * @header_ptr: Pointer to the smp2p header.
+ * @local_pid: Local processor ID.
+ * @remote_pid: Remote processor ID.
+ * @feature: Features of smp2p implementation.
+ * @version: Version of smp2p implementation.
+ *
+ * Initializes the header as defined in the protocol specification.
+ */
+void smp2p_init_header(struct smp2p_smem __iomem *header_ptr,
+ int local_pid, int remote_pid,
+ uint32_t features, uint32_t version)
+{
+ header_ptr->magic = SMP2P_MAGIC;
+ SMP2P_SET_LOCAL_PID(header_ptr->rem_loc_proc_id, local_pid);
+ SMP2P_SET_REMOTE_PID(header_ptr->rem_loc_proc_id, remote_pid);
+ SMP2P_SET_FEATURES(header_ptr->feature_version, features);
+ SMP2P_SET_ENT_TOTAL(header_ptr->valid_total_ent, SMP2P_MAX_ENTRY);
+ SMP2P_SET_ENT_VALID(header_ptr->valid_total_ent, 0);
+ header_ptr->flags = 0;
+
+ /* ensure that all fields are valid before version is written */
+ wmb();
+ SMP2P_SET_VERSION(header_ptr->feature_version, version);
+}
+
+/**
+ * smp2p_do_negotiation - Implements negotiation algorithm.
+ *
+ * @remote_pid: Remote processor ID.
+ * @out_item: Pointer to the outbound list item.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Must be called with out_item_lock_lha1 locked. Will internally lock
+ * in_item_lock_lhb1.
+ */
+static int smp2p_do_negotiation(int remote_pid,
+ struct smp2p_out_list_item *out_item)
+{
+ struct smp2p_smem __iomem *r_smem_ptr;
+ struct smp2p_smem __iomem *l_smem_ptr;
+ uint32_t r_version;
+ uint32_t r_feature;
+ uint32_t l_version, l_feature;
+ int prev_state;
+
+ if (remote_pid >= SMP2P_NUM_PROCS || !out_item)
+ return -EINVAL;
+ if (out_item->smem_edge_state == SMP2P_EDGE_STATE_FAILED)
+ return -EPERM;
+
+ prev_state = out_item->smem_edge_state;
+
+ /* create local item */
+ if (!out_item->smem_edge_out) {
+ out_item->smem_edge_out = smp2p_get_local_smem_item(remote_pid);
+ if (!out_item->smem_edge_out) {
+ SMP2P_ERR(
+ "%s unable to allocate SMEM item for pid %d\n",
+ __func__, remote_pid);
+ return -ENODEV;
+ }
+ out_item->smem_edge_state = SMP2P_EDGE_STATE_OPENING;
+ }
+ l_smem_ptr = out_item->smem_edge_out;
+
+ /* retrieve remote side and version */
+ spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+ r_smem_ptr = smp2p_get_remote_smem_item(remote_pid, out_item);
+ spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+
+ r_version = 0;
+ if (r_smem_ptr) {
+ r_version = SMP2P_GET_VERSION(r_smem_ptr->feature_version);
+ r_feature = SMP2P_GET_FEATURES(r_smem_ptr->feature_version);
+ }
+
+ if (r_version == 0) {
+ /*
+ * Either remote side doesn't exist, or is in the
+ * process of being initialized (the version is set last).
+ *
+ * In either case, treat as if the other side doesn't exist
+ * and write out our maximum supported version.
+ */
+ r_smem_ptr = NULL;
+ r_version = ARRAY_SIZE(version_if) - 1;
+ r_feature = ~0U;
+ }
+
+ /* find maximum supported version and feature set */
+ l_version = min(r_version, (uint32_t)ARRAY_SIZE(version_if) - 1);
+ for (; l_version > 0; --l_version) {
+ if (!version_if[l_version].is_supported)
+ continue;
+
+ /* found valid version */
+ l_feature = version_if[l_version].negotiate_features(~0U);
+ if (l_version == r_version)
+ l_feature &= r_feature;
+ break;
+ }
+
+ if (l_version == 0) {
+ SMP2P_ERR(
+ "%s: negotiation failure pid %d: RV %d RF %x\n",
+ __func__, remote_pid, r_version, r_feature
+ );
+ SMP2P_SET_VERSION(l_smem_ptr->feature_version,
+ SMP2P_EDGE_STATE_FAILED);
+ smp2p_send_interrupt(remote_pid);
+ out_item->smem_edge_state = SMP2P_EDGE_STATE_FAILED;
+ return -EPERM;
+ }
+
+ /* update header and notify remote side */
+ smp2p_init_header(l_smem_ptr, SMP2P_APPS_PROC, remote_pid,
+ l_feature, l_version);
+ smp2p_send_interrupt(remote_pid);
+
+ /* handle internal state changes */
+ if (r_smem_ptr && l_version == r_version &&
+ l_feature == r_feature) {
+ struct msm_smp2p_out *pos;
+
+ /* negotiation complete */
+ out_item->ops_ptr = &version_if[l_version];
+ out_item->ops_ptr->negotiation_complete(out_item);
+ out_item->smem_edge_state = SMP2P_EDGE_STATE_OPENED;
+ SMP2P_INFO(
+ "%s: negotiation complete pid %d: State %d->%d F0x%08x\n",
+ __func__, remote_pid, prev_state,
+ out_item->smem_edge_state, l_feature);
+
+ /* create any pending outbound entries */
+ list_for_each_entry(pos, &out_item->list, out_edge_list) {
+ out_item->ops_ptr->create_entry(pos);
+ }
+
+ /* update inbound edge */
+ spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+ (void)out_item->ops_ptr->validate_size(remote_pid, r_smem_ptr,
+ in_list[remote_pid].item_size);
+ in_list[remote_pid].smem_edge_in = r_smem_ptr;
+ spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+ } else {
+ SMP2P_INFO("%s: negotiation pid %d: State %d->%d F0x%08x\n",
+ __func__, remote_pid, prev_state,
+ out_item->smem_edge_state, l_feature);
+ }
+ return 0;
+}
+
+/**
+ * msm_smp2p_out_open - Opens an outbound entry.
+ *
+ * @remote_pid: Outbound processor ID.
+ * @name: Name of the entry.
+ * @open_notifier: Notifier block for the open notification.
+ * @handle: Handle to the smem entry structure.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Opens an outbound entry with the name specified by entry, from the
+ * local processor to the remote processor(remote_pid). If the entry, remote_pid
+ * and open_notifier are valid, then handle will be set and zero will be
+ * returned. The smem item that holds this entry will be created if it has
+ * not been created according to the version negotiation algorithm.
+ * The open_notifier will be used to notify the clients about the
+ * availability of the entry.
+ */
+int msm_smp2p_out_open(int remote_pid, const char *name,
+ struct notifier_block *open_notifier,
+ struct msm_smp2p_out **handle)
+{
+ struct msm_smp2p_out *out_entry;
+ struct msm_smp2p_out *pos;
+ int ret = 0;
+ unsigned long flags;
+
+ if (handle)
+ *handle = NULL;
+
+ if (remote_pid >= SMP2P_NUM_PROCS || !name || !open_notifier || !handle)
+ return -EINVAL;
+
+ if ((remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+ !smp2p_int_cfgs[remote_pid].is_configured) {
+ SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+ __func__, remote_pid, name);
+ return -EPROBE_DEFER;
+ }
+
+ /* Allocate the smp2p object and node */
+ out_entry = kzalloc(sizeof(*out_entry), GFP_KERNEL);
+ if (!out_entry)
+ return -ENOMEM;
+
+ /* Handle duplicate registration */
+ spin_lock_irqsave(&out_list[remote_pid].out_item_lock_lha1, flags);
+ list_for_each_entry(pos, &out_list[remote_pid].list,
+ out_edge_list) {
+ if (!strcmp(pos->name, name)) {
+ spin_unlock_irqrestore(
+ &out_list[remote_pid].out_item_lock_lha1,
+ flags);
+ kfree(out_entry);
+ SMP2P_ERR("%s: duplicate registration '%s':%d\n",
+ __func__, name, remote_pid);
+ return -EBUSY;
+ }
+ }
+
+ out_entry->remote_pid = remote_pid;
+ RAW_INIT_NOTIFIER_HEAD(&out_entry->msm_smp2p_notifier_list);
+ strlcpy(out_entry->name, name, SMP2P_MAX_ENTRY_NAME);
+ out_entry->open_nb = open_notifier;
+ raw_notifier_chain_register(&out_entry->msm_smp2p_notifier_list,
+ out_entry->open_nb);
+ list_add(&out_entry->out_edge_list, &out_list[remote_pid].list);
+
+ ret = out_list[remote_pid].ops_ptr->create_entry(out_entry);
+ if (ret) {
+ list_del(&out_entry->out_edge_list);
+ raw_notifier_chain_unregister(
+ &out_entry->msm_smp2p_notifier_list,
+ out_entry->open_nb);
+ spin_unlock_irqrestore(
+ &out_list[remote_pid].out_item_lock_lha1, flags);
+ kfree(out_entry);
+ SMP2P_ERR("%s: unable to open '%s':%d error %d\n",
+ __func__, name, remote_pid, ret);
+ return ret;
+ }
+ spin_unlock_irqrestore(&out_list[remote_pid].out_item_lock_lha1,
+ flags);
+ *handle = out_entry;
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_smp2p_out_open);
+
+/**
+ * msm_smp2p_out_close - Closes the handle to an outbound entry.
+ *
+ * @handle: Pointer to smp2p out entry handle.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * The actual entry will not be deleted and can be re-opened at a later
+ * time. The handle will be set to NULL.
+ */
+int msm_smp2p_out_close(struct msm_smp2p_out **handle)
+{
+ unsigned long flags;
+ struct msm_smp2p_out *out_entry;
+ struct smp2p_out_list_item *out_item;
+
+ if (!handle || !*handle)
+ return -EINVAL;
+
+ out_entry = *handle;
+ *handle = NULL;
+
+ if ((out_entry->remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+ !smp2p_int_cfgs[out_entry->remote_pid].is_configured) {
+ SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+ __func__, out_entry->remote_pid, out_entry->name);
+ return -EPROBE_DEFER;
+ }
+
+ out_item = &out_list[out_entry->remote_pid];
+ spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+ list_del(&out_entry->out_edge_list);
+ raw_notifier_chain_unregister(&out_entry->msm_smp2p_notifier_list,
+ out_entry->open_nb);
+ spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+ kfree(out_entry);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_smp2p_out_close);
+
+/**
+ * msm_smp2p_out_read - Allows reading the entry.
+ *
+ * @handle: Handle to the smem entry structure.
+ * @data: Out pointer that holds the read data.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Allows reading of the outbound entry for read-modify-write
+ * operation.
+ */
+int msm_smp2p_out_read(struct msm_smp2p_out *handle, uint32_t *data)
+{
+ int ret = -EINVAL;
+ unsigned long flags;
+ struct smp2p_out_list_item *out_item;
+
+ if (!handle || !data)
+ return ret;
+
+ if ((handle->remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+ !smp2p_int_cfgs[handle->remote_pid].is_configured) {
+ SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+ __func__, handle->remote_pid, handle->name);
+ return -EPROBE_DEFER;
+ }
+
+ out_item = &out_list[handle->remote_pid];
+ spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+ ret = out_item->ops_ptr->read_entry(handle, data);
+ spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_out_read);
+
+/**
+ * msm_smp2p_out_write - Allows writing to the entry.
+ *
+ * @handle: Handle to smem entry structure.
+ * @data: Data that has to be written.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Writes a new value to the output entry. Multiple back-to-back writes
+ * may overwrite previous writes before the remote processor get a chance
+ * to see them leading to ABA race condition. The client must implement
+ * their own synchronization mechanism (such as echo mechanism) if this is
+ * not acceptable.
+ */
+int msm_smp2p_out_write(struct msm_smp2p_out *handle, uint32_t data)
+{
+ int ret = -EINVAL;
+ unsigned long flags;
+ struct smp2p_out_list_item *out_item;
+
+ if (!handle)
+ return ret;
+
+ if ((handle->remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+ !smp2p_int_cfgs[handle->remote_pid].is_configured) {
+ SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+ __func__, handle->remote_pid, handle->name);
+ return -EPROBE_DEFER;
+ }
+
+ out_item = &out_list[handle->remote_pid];
+ spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+ ret = out_item->ops_ptr->write_entry(handle, data);
+ spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+ return ret;
+
+}
+EXPORT_SYMBOL(msm_smp2p_out_write);
+
+/**
+ * msm_smp2p_out_modify - Modifies the entry.
+ *
+ * @handle: Handle to the smem entry structure.
+ * @set_mask: Specifies the bits that needs to be set.
+ * @clear_mask: Specifies the bits that needs to be cleared.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * The modification is done by doing a bitwise AND of clear mask followed by
+ * the bit wise OR of set mask. The clear bit mask is applied first to the
+ * data, so if a bit is set in both the clear mask and the set mask, then in
+ * the result is a set bit. Multiple back-to-back modifications may overwrite
+ * previous values before the remote processor gets a chance to see them
+ * leading to ABA race condition. The client must implement their own
+ * synchronization mechanism (such as echo mechanism) if this is not
+ * acceptable.
+ */
+int msm_smp2p_out_modify(struct msm_smp2p_out *handle, uint32_t set_mask,
+ uint32_t clear_mask)
+{
+ int ret = -EINVAL;
+ unsigned long flags;
+ struct smp2p_out_list_item *out_item;
+
+ if (!handle)
+ return ret;
+
+ if ((handle->remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+ !smp2p_int_cfgs[handle->remote_pid].is_configured) {
+ SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+ __func__, handle->remote_pid, handle->name);
+ return -EPROBE_DEFER;
+ }
+
+ out_item = &out_list[handle->remote_pid];
+ spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+ ret = out_item->ops_ptr->modify_entry(handle, set_mask, clear_mask);
+ spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_out_modify);
+
+/**
+ * msm_smp2p_in_read - Read an entry on a remote processor.
+ *
+ * @remote_pid: Processor ID of the remote processor.
+ * @name: Name of the entry that is to be read.
+ * @data: Output pointer, the value will be placed here if successful.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ */
+int msm_smp2p_in_read(int remote_pid, const char *name, uint32_t *data)
+{
+ unsigned long flags;
+ struct smp2p_out_list_item *out_item;
+ uint32_t *entry_ptr = NULL;
+
+ if (remote_pid >= SMP2P_NUM_PROCS)
+ return -EINVAL;
+
+ if ((remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+ !smp2p_int_cfgs[remote_pid].is_configured) {
+ SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+ __func__, remote_pid, name);
+ return -EPROBE_DEFER;
+ }
+
+ out_item = &out_list[remote_pid];
+ spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+ spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+
+ if (in_list[remote_pid].smem_edge_in)
+ out_item->ops_ptr->find_entry(
+ in_list[remote_pid].smem_edge_in,
+ in_list[remote_pid].safe_total_entries,
+ (char *)name, &entry_ptr, NULL);
+
+ spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+ spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+ if (!entry_ptr)
+ return -ENODEV;
+
+ *data = readl_relaxed(entry_ptr);
+ return 0;
+}
+EXPORT_SYMBOL(msm_smp2p_in_read);
+
+/**
+ * msm_smp2p_in_register - Notifies the change in value of the entry.
+ *
+ * @pid: Remote processor ID.
+ * @name: Name of the entry.
+ * @in_notifier: Notifier block used to notify about the event.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Register for change notifications for a remote entry. If the remote entry
+ * does not exist yet, then the registration request will be held until the
+ * remote side opens. Once the entry is open, then the SMP2P_OPEN notification
+ * will be sent. Any changes to the entry will trigger a call to the notifier
+ * block with an SMP2P_ENTRY_UPDATE event and the data field will point to an
+ * msm_smp2p_update_notif structure containing the current and previous value.
+ */
+int msm_smp2p_in_register(int pid, const char *name,
+ struct notifier_block *in_notifier)
+{
+ struct smp2p_in *pos;
+ struct smp2p_in *in = NULL;
+ int ret;
+ unsigned long flags;
+ struct msm_smp2p_update_notif data;
+ uint32_t *entry_ptr;
+
+ if (pid >= SMP2P_NUM_PROCS || !name || !in_notifier)
+ return -EINVAL;
+
+ if ((pid != SMP2P_REMOTE_MOCK_PROC) &&
+ !smp2p_int_cfgs[pid].is_configured) {
+ SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+ __func__, pid, name);
+ return -EPROBE_DEFER;
+ }
+
+ /* Pre-allocate before spinlock since we will likely needed it */
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ /* Search for existing entry */
+ spin_lock_irqsave(&out_list[pid].out_item_lock_lha1, flags);
+ spin_lock(&in_list[pid].in_item_lock_lhb1);
+
+ list_for_each_entry(pos, &in_list[pid].list, in_edge_list) {
+ if (!strncmp(pos->name, name,
+ SMP2P_MAX_ENTRY_NAME)) {
+ kfree(in);
+ in = pos;
+ break;
+ }
+ }
+
+ /* Create and add it to the list */
+ if (!in->notifier_count) {
+ in->remote_pid = pid;
+ strlcpy(in->name, name, SMP2P_MAX_ENTRY_NAME);
+ RAW_INIT_NOTIFIER_HEAD(&in->in_notifier_list);
+ list_add(&in->in_edge_list, &in_list[pid].list);
+ }
+
+ ret = raw_notifier_chain_register(&in->in_notifier_list,
+ in_notifier);
+ if (ret) {
+ if (!in->notifier_count) {
+ list_del(&in->in_edge_list);
+ kfree(in);
+ }
+ SMP2P_DBG("%s: '%s':%d failed %d\n", __func__, name, pid, ret);
+ goto bail;
+ }
+ in->notifier_count++;
+
+ if (out_list[pid].smem_edge_state == SMP2P_EDGE_STATE_OPENED) {
+ out_list[pid].ops_ptr->find_entry(
+ in_list[pid].smem_edge_in,
+ in_list[pid].safe_total_entries, (char *)name,
+ &entry_ptr, NULL);
+ if (entry_ptr) {
+ in->entry_ptr = entry_ptr;
+ in->prev_entry_val = readl_relaxed(entry_ptr);
+
+ data.previous_value = in->prev_entry_val;
+ data.current_value = in->prev_entry_val;
+ in_notifier->notifier_call(in_notifier, SMP2P_OPEN,
+ (void *)&data);
+ }
+ }
+ SMP2P_DBG("%s: '%s':%d registered\n", __func__, name, pid);
+
+bail:
+ spin_unlock(&in_list[pid].in_item_lock_lhb1);
+ spin_unlock_irqrestore(&out_list[pid].out_item_lock_lha1, flags);
+ return ret;
+
+}
+EXPORT_SYMBOL(msm_smp2p_in_register);
+
+/**
+ * msm_smp2p_in_unregister - Unregister the notifier for remote entry.
+ *
+ * @remote_pid: Processor Id of the remote processor.
+ * @name: The name of the entry.
+ * @in_notifier: Notifier block passed during registration.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ */
+int msm_smp2p_in_unregister(int remote_pid, const char *name,
+ struct notifier_block *in_notifier)
+{
+ struct smp2p_in *pos;
+ struct smp2p_in *in = NULL;
+ int ret = -ENODEV;
+ unsigned long flags;
+
+ if (remote_pid >= SMP2P_NUM_PROCS || !name || !in_notifier)
+ return -EINVAL;
+
+ if ((remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+ !smp2p_int_cfgs[remote_pid].is_configured) {
+ SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+ __func__, remote_pid, name);
+ return -EPROBE_DEFER;
+ }
+
+ spin_lock_irqsave(&in_list[remote_pid].in_item_lock_lhb1, flags);
+ list_for_each_entry(pos, &in_list[remote_pid].list,
+ in_edge_list) {
+ if (!strncmp(pos->name, name, SMP2P_MAX_ENTRY_NAME)) {
+ in = pos;
+ break;
+ }
+ }
+ if (!in)
+ goto fail;
+
+ ret = raw_notifier_chain_unregister(&pos->in_notifier_list,
+ in_notifier);
+ if (ret == 0) {
+ pos->notifier_count--;
+ if (!pos->notifier_count) {
+ list_del(&pos->in_edge_list);
+ kfree(pos);
+ ret = 0;
+ }
+ } else {
+ SMP2P_ERR("%s: unregister failure '%s':%d\n", __func__,
+ name, remote_pid);
+ ret = -ENODEV;
+ }
+
+fail:
+ spin_unlock_irqrestore(&in_list[remote_pid].in_item_lock_lhb1, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_in_unregister);
+
+/**
+ * smp2p_send_interrupt - Send interrupt to remote system.
+ *
+ * @remote_pid: Processor ID of the remote system
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static void smp2p_send_interrupt(int remote_pid)
+{
+ if (smp2p_int_cfgs[remote_pid].name)
+ SMP2P_DBG("SMP2P Int Apps->%s(%d)\n",
+ smp2p_int_cfgs[remote_pid].name, remote_pid);
+
+ ++smp2p_int_cfgs[remote_pid].out_interrupt_count;
+ if (remote_pid != SMP2P_REMOTE_MOCK_PROC &&
+ smp2p_int_cfgs[remote_pid].out_int_mask) {
+ /* flush any pending writes before triggering interrupt */
+ wmb();
+ writel_relaxed(smp2p_int_cfgs[remote_pid].out_int_mask,
+ smp2p_int_cfgs[remote_pid].out_int_ptr);
+ } else {
+ smp2p_remote_mock_rx_interrupt();
+ }
+}
+
+/**
+ * smp2p_in_edge_notify - Notifies the entry changed on remote processor.
+ *
+ * @pid: Processor ID of the remote processor.
+ *
+ * This function is invoked on an incoming interrupt, it scans
+ * the list of the clients registered for the entries on the remote
+ * processor and notifies them if the data changes.
+ *
+ * Note: Edge state must be OPENED to avoid a race condition with
+ * out_list[pid].ops_ptr->find_entry.
+ */
+static void smp2p_in_edge_notify(int pid)
+{
+ struct smp2p_in *pos;
+ uint32_t *entry_ptr;
+ unsigned long flags;
+ struct smp2p_smem __iomem *smem_h_ptr;
+ uint32_t curr_data;
+ struct msm_smp2p_update_notif data;
+
+ spin_lock_irqsave(&in_list[pid].in_item_lock_lhb1, flags);
+ smem_h_ptr = in_list[pid].smem_edge_in;
+ if (!smem_h_ptr) {
+ SMP2P_DBG("%s: No remote SMEM item for pid %d\n",
+ __func__, pid);
+ spin_unlock_irqrestore(&in_list[pid].in_item_lock_lhb1, flags);
+ return;
+ }
+
+ list_for_each_entry(pos, &in_list[pid].list, in_edge_list) {
+ if (pos->entry_ptr == NULL) {
+ /* entry not open - try to open it */
+ out_list[pid].ops_ptr->find_entry(smem_h_ptr,
+ in_list[pid].safe_total_entries, pos->name,
+ &entry_ptr, NULL);
+
+ if (entry_ptr) {
+ pos->entry_ptr = entry_ptr;
+ pos->prev_entry_val = 0;
+ data.previous_value = 0;
+ data.current_value = readl_relaxed(entry_ptr);
+ raw_notifier_call_chain(
+ &pos->in_notifier_list,
+ SMP2P_OPEN, (void *)&data);
+ }
+ }
+
+ if (pos->entry_ptr != NULL) {
+ /* send update notification */
+ curr_data = readl_relaxed(pos->entry_ptr);
+ if (curr_data != pos->prev_entry_val) {
+ data.previous_value = pos->prev_entry_val;
+ data.current_value = curr_data;
+ pos->prev_entry_val = curr_data;
+ raw_notifier_call_chain(
+ &pos->in_notifier_list,
+ SMP2P_ENTRY_UPDATE, (void *)&data);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&in_list[pid].in_item_lock_lhb1, flags);
+}
+
+/**
+ * smp2p_interrupt_handler - Incoming interrupt handler.
+ *
+ * @irq: Interrupt ID
+ * @data: Edge
+ * @returns: IRQ_HANDLED or IRQ_NONE for invalid interrupt
+ */
+static irqreturn_t smp2p_interrupt_handler(int irq, void *data)
+{
+ unsigned long flags;
+ uint32_t remote_pid = (uint32_t)(uintptr_t)data;
+
+ if (remote_pid >= SMP2P_NUM_PROCS) {
+ SMP2P_ERR("%s: invalid interrupt pid %d\n",
+ __func__, remote_pid);
+ return IRQ_NONE;
+ }
+
+ if (smp2p_int_cfgs[remote_pid].name)
+ SMP2P_DBG("SMP2P Int %s(%d)->Apps\n",
+ smp2p_int_cfgs[remote_pid].name, remote_pid);
+
+ spin_lock_irqsave(&out_list[remote_pid].out_item_lock_lha1, flags);
+ ++smp2p_int_cfgs[remote_pid].in_interrupt_count;
+
+ if (out_list[remote_pid].smem_edge_state != SMP2P_EDGE_STATE_OPENED)
+ smp2p_do_negotiation(remote_pid, &out_list[remote_pid]);
+
+ if (out_list[remote_pid].smem_edge_state == SMP2P_EDGE_STATE_OPENED) {
+ bool do_restart_ack;
+
+ /*
+ * Follow double-check pattern for restart ack since:
+ * 1) we must notify clients of the X->0 transition
+ * that is part of the restart
+ * 2) lock cannot be held during the
+ * smp2p_in_edge_notify() call because clients may do
+ * re-entrant calls into our APIs.
+ *
+ * smp2p_do_ssr_ack() will only do the ack if it is
+ * necessary to handle the race condition exposed by
+ * unlocking the spinlocks.
+ */
+ spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+ do_restart_ack = smp2p_ssr_ack_needed(remote_pid);
+ spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+ spin_unlock_irqrestore(&out_list[remote_pid].out_item_lock_lha1,
+ flags);
+
+ smp2p_in_edge_notify(remote_pid);
+
+ if (do_restart_ack) {
+ spin_lock_irqsave(
+ &out_list[remote_pid].out_item_lock_lha1,
+ flags);
+ spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+
+ smp2p_do_ssr_ack(remote_pid);
+
+ spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+ spin_unlock_irqrestore(
+ &out_list[remote_pid].out_item_lock_lha1,
+ flags);
+ }
+ } else {
+ spin_unlock_irqrestore(&out_list[remote_pid].out_item_lock_lha1,
+ flags);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * smp2p_reset_mock_edge - Reinitializes the mock edge.
+ *
+ * @returns: 0 on success, -EAGAIN to retry later.
+ *
+ * Reinitializes the mock edge to initial power-up state values.
+ */
+int smp2p_reset_mock_edge(void)
+{
+ const int rpid = SMP2P_REMOTE_MOCK_PROC;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&out_list[rpid].out_item_lock_lha1, flags);
+ spin_lock(&in_list[rpid].in_item_lock_lhb1);
+
+ if (!list_empty(&out_list[rpid].list) ||
+ !list_empty(&in_list[rpid].list)) {
+ ret = -EAGAIN;
+ goto fail;
+ }
+
+ kfree(out_list[rpid].smem_edge_out);
+ out_list[rpid].smem_edge_out = NULL;
+ out_list[rpid].ops_ptr = &version_if[0];
+ out_list[rpid].smem_edge_state = SMP2P_EDGE_STATE_CLOSED;
+ out_list[rpid].feature_ssr_ack_enabled = false;
+ out_list[rpid].restart_ack = false;
+
+ in_list[rpid].smem_edge_in = NULL;
+ in_list[rpid].item_size = 0;
+ in_list[rpid].safe_total_entries = 0;
+
+fail:
+ spin_unlock(&in_list[rpid].in_item_lock_lhb1);
+ spin_unlock_irqrestore(&out_list[rpid].out_item_lock_lha1, flags);
+
+ return ret;
+}
+
+/**
+ * msm_smp2p_interrupt_handler - Triggers incoming interrupt.
+ *
+ * @remote_pid: Remote processor ID
+ *
+ * This function is used with the remote mock infrastructure
+ * used for testing. It simulates triggering of interrupt in
+ * a testing environment.
+ */
+void msm_smp2p_interrupt_handler(int remote_pid)
+{
+ smp2p_interrupt_handler(0, (void *)(uintptr_t)remote_pid);
+}
+
+/**
+ * msm_smp2p_probe - Device tree probe function.
+ *
+ * @pdev: Pointer to device tree data.
+ * @returns: 0 on success; -ENODEV otherwise
+ */
+static int msm_smp2p_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ void *irq_out_ptr = NULL;
+ char *key;
+ uint32_t edge;
+ int ret;
+ struct device_node *node;
+ uint32_t irq_bitmask;
+ uint32_t irq_line;
+ void *temp_p;
+ unsigned temp_sz;
+
+ node = pdev->dev.of_node;
+
+ key = "qcom,remote-pid";
+ ret = of_property_read_u32(node, key, &edge);
+ if (ret) {
+ SMP2P_ERR("%s: missing edge '%s'\n", __func__, key);
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ SMP2P_ERR("%s: failed gathering irq-reg resource for edge %d\n"
+ , __func__, edge);
+ ret = -ENODEV;
+ goto fail;
+ }
+ irq_out_ptr = ioremap_nocache(r->start, resource_size(r));
+ if (!irq_out_ptr) {
+ SMP2P_ERR("%s: failed remap from phys to virt for edge %d\n",
+ __func__, edge);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ key = "qcom,irq-bitmask";
+ ret = of_property_read_u32(node, key, &irq_bitmask);
+ if (ret)
+ goto missing_key;
+
+ key = "interrupts";
+ irq_line = platform_get_irq(pdev, 0);
+ if (irq_line == -ENXIO)
+ goto missing_key;
+
+ /*
+ * We depend on the SMEM driver, so do a test access to see if SMEM is
+ * ready. We don't want any side effects at this time (so no alloc)
+ * and the return doesn't matter, so long as it is not -EPROBE_DEFER.
+ */
+ temp_p = smem_get_entry(
+ smp2p_get_smem_item_id(SMP2P_APPS_PROC, SMP2P_MODEM_PROC),
+ &temp_sz,
+ 0,
+ SMEM_ANY_HOST_FLAG);
+ if (PTR_ERR(temp_p) == -EPROBE_DEFER) {
+ SMP2P_INFO("%s: edge:%d probe before smem ready\n", __func__,
+ edge);
+ ret = -EPROBE_DEFER;
+ goto fail;
+ }
+
+ ret = request_irq(irq_line, smp2p_interrupt_handler,
+ IRQF_TRIGGER_RISING, "smp2p", (void *)(uintptr_t)edge);
+ if (ret < 0) {
+ SMP2P_ERR("%s: request_irq() failed on %d (edge %d)\n",
+ __func__, irq_line, edge);
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ret = enable_irq_wake(irq_line);
+ if (ret < 0)
+ SMP2P_ERR("%s: enable_irq_wake() failed on %d (edge %d)\n",
+ __func__, irq_line, edge);
+
+ /*
+ * Set entry (keep is_configured last to prevent usage before
+ * initialization).
+ */
+ smp2p_int_cfgs[edge].in_int_id = irq_line;
+ smp2p_int_cfgs[edge].out_int_mask = irq_bitmask;
+ smp2p_int_cfgs[edge].out_int_ptr = irq_out_ptr;
+ smp2p_int_cfgs[edge].is_configured = true;
+ return 0;
+
+missing_key:
+ SMP2P_ERR("%s: missing '%s' for edge %d\n", __func__, key, edge);
+ ret = -ENODEV;
+fail:
+ if (irq_out_ptr)
+ iounmap(irq_out_ptr);
+ return ret;
+}
+
+static struct of_device_id msm_smp2p_match_table[] = {
+ { .compatible = "qcom,smp2p" },
+ {},
+};
+
+static struct platform_driver msm_smp2p_driver = {
+ .probe = msm_smp2p_probe,
+ .driver = {
+ .name = "msm_smp2p",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_smp2p_match_table,
+ },
+};
+
+/**
+ * msm_smp2p_init - Initialization function for the module.
+ *
+ * @returns: 0 on success, standard Linux error code otherwise.
+ */
+static int __init msm_smp2p_init(void)
+{
+ int i;
+ int rc;
+
+ for (i = 0; i < SMP2P_NUM_PROCS; i++) {
+ spin_lock_init(&out_list[i].out_item_lock_lha1);
+ INIT_LIST_HEAD(&out_list[i].list);
+ out_list[i].smem_edge_out = NULL;
+ out_list[i].smem_edge_state = SMP2P_EDGE_STATE_CLOSED;
+ out_list[i].ops_ptr = &version_if[0];
+ out_list[i].feature_ssr_ack_enabled = false;
+ out_list[i].restart_ack = false;
+
+ spin_lock_init(&in_list[i].in_item_lock_lhb1);
+ INIT_LIST_HEAD(&in_list[i].list);
+ in_list[i].smem_edge_in = NULL;
+ }
+
+ log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smp2p");
+ if (!log_ctx)
+ SMP2P_ERR("%s: unable to create log context\n", __func__);
+
+ rc = platform_driver_register(&msm_smp2p_driver);
+ if (rc) {
+ SMP2P_ERR("%s: msm_smp2p_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ return 0;
+}
+module_init(msm_smp2p_init);
+
+MODULE_DESCRIPTION("MSM Shared Memory Point to Point");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smp2p_loopback.c b/drivers/soc/qcom/smp2p_loopback.c
new file mode 100644
index 000000000000..c840eea94bfa
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_loopback.c
@@ -0,0 +1,449 @@
+/* drivers/soc/qcom/smp2p_loopback.c
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/termios.h>
+#include <linux/module.h>
+#include <linux/remote_spinlock.h>
+#include "smem_private.h"
+#include "smp2p_private.h"
+
+/**
+ * struct smp2p_loopback_ctx - Representation of remote loopback object.
+ *
+ * @proc_id: Processor id of the processor that sends the loopback commands.
+ * @out: Handle to the smem entry structure for providing the response.
+ * @out_nb: Notifies the opening of local entry.
+ * @out_is_active: Outbound entry events should be processed.
+ * @in_nb: Notifies changes in the remote entry.
+ * @in_is_active: Inbound entry events should be processed.
+ * @rmt_lpb_work: Work item that handles the incoming loopback commands.
+ * @rmt_cmd: Structure that holds the current and previous value of the entry.
+ */
+struct smp2p_loopback_ctx {
+ int proc_id;
+ struct msm_smp2p_out *out;
+ struct notifier_block out_nb;
+ bool out_is_active;
+ struct notifier_block in_nb;
+ bool in_is_active;
+ struct work_struct rmt_lpb_work;
+ struct msm_smp2p_update_notif rmt_cmd;
+};
+
+static struct smp2p_loopback_ctx remote_loopback[SMP2P_NUM_PROCS];
+static struct msm_smp2p_remote_mock remote_mock;
+
+/**
+ * remote_spinlock_test - Handles remote spinlock test.
+ *
+ * @ctx: Loopback context
+ */
+static void remote_spinlock_test(struct smp2p_loopback_ctx *ctx)
+{
+ uint32_t test_request;
+ uint32_t test_response;
+ unsigned long flags;
+ int n;
+ unsigned lock_count = 0;
+ remote_spinlock_t *smem_spinlock;
+
+ test_request = 0x0;
+ SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
+ smem_spinlock = smem_get_remote_spinlock();
+ if (!smem_spinlock) {
+ pr_err("%s: unable to get remote spinlock\n", __func__);
+ return;
+ }
+
+ for (;;) {
+ remote_spin_lock_irqsave(smem_spinlock, flags);
+ ++lock_count;
+ SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_LOCKED);
+ (void)msm_smp2p_out_write(ctx->out, test_request);
+
+ for (n = 0; n < 10000; ++n) {
+ (void)msm_smp2p_in_read(ctx->proc_id,
+ "smp2p", &test_response);
+ test_response = SMP2P_GET_RMT_CMD(test_response);
+
+ if (test_response == SMP2P_LB_CMD_RSPIN_END)
+ break;
+
+ if (test_response != SMP2P_LB_CMD_RSPIN_UNLOCKED)
+ SMP2P_ERR("%s: invalid spinlock command %x\n",
+ __func__, test_response);
+ }
+
+ if (test_response == SMP2P_LB_CMD_RSPIN_END) {
+ SMP2P_SET_RMT_CMD_TYPE_RESP(test_request);
+ SMP2P_SET_RMT_CMD(test_request,
+ SMP2P_LB_CMD_RSPIN_END);
+ SMP2P_SET_RMT_DATA(test_request, lock_count);
+ (void)msm_smp2p_out_write(ctx->out, test_request);
+ break;
+ }
+
+ SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_UNLOCKED);
+ (void)msm_smp2p_out_write(ctx->out, test_request);
+ remote_spin_unlock_irqrestore(smem_spinlock, flags);
+ }
+ remote_spin_unlock_irqrestore(smem_spinlock, flags);
+}
+
+/**
+ * smp2p_rmt_lpb_worker - Handles incoming remote loopback commands.
+ *
+ * @work: Work Item scheduled to handle the incoming commands.
+ */
+static void smp2p_rmt_lpb_worker(struct work_struct *work)
+{
+ struct smp2p_loopback_ctx *ctx;
+ int lpb_cmd;
+ int lpb_cmd_type;
+ int lpb_data;
+
+ ctx = container_of(work, struct smp2p_loopback_ctx, rmt_lpb_work);
+
+ if (!ctx->in_is_active || !ctx->out_is_active)
+ return;
+
+ if (ctx->rmt_cmd.previous_value == ctx->rmt_cmd.current_value)
+ return;
+
+ lpb_cmd_type = SMP2P_GET_RMT_CMD_TYPE(ctx->rmt_cmd.current_value);
+ lpb_cmd = SMP2P_GET_RMT_CMD(ctx->rmt_cmd.current_value);
+ lpb_data = SMP2P_GET_RMT_DATA(ctx->rmt_cmd.current_value);
+
+ if (lpb_cmd & SMP2P_RLPB_IGNORE)
+ return;
+
+ switch (lpb_cmd) {
+ case SMP2P_LB_CMD_NOOP:
+ /* Do nothing */
+ break;
+
+ case SMP2P_LB_CMD_ECHO:
+ SMP2P_SET_RMT_CMD_TYPE(ctx->rmt_cmd.current_value, 0);
+ SMP2P_SET_RMT_DATA(ctx->rmt_cmd.current_value,
+ lpb_data);
+ (void)msm_smp2p_out_write(ctx->out,
+ ctx->rmt_cmd.current_value);
+ break;
+
+ case SMP2P_LB_CMD_CLEARALL:
+ ctx->rmt_cmd.current_value = 0;
+ (void)msm_smp2p_out_write(ctx->out,
+ ctx->rmt_cmd.current_value);
+ break;
+
+ case SMP2P_LB_CMD_PINGPONG:
+ SMP2P_SET_RMT_CMD_TYPE(ctx->rmt_cmd.current_value, 0);
+ if (lpb_data) {
+ lpb_data--;
+ SMP2P_SET_RMT_DATA(ctx->rmt_cmd.current_value,
+ lpb_data);
+ (void)msm_smp2p_out_write(ctx->out,
+ ctx->rmt_cmd.current_value);
+ }
+ break;
+
+ case SMP2P_LB_CMD_RSPIN_START:
+ remote_spinlock_test(ctx);
+ break;
+
+ case SMP2P_LB_CMD_RSPIN_LOCKED:
+ case SMP2P_LB_CMD_RSPIN_UNLOCKED:
+ case SMP2P_LB_CMD_RSPIN_END:
+ /* not used for remote spinlock test */
+ break;
+
+ default:
+ SMP2P_DBG("%s: Unknown loopback command %x\n",
+ __func__, lpb_cmd);
+ break;
+ }
+}
+
+/**
+ * smp2p_rmt_in_edge_notify - Schedules a work item to handle the commands.
+ *
+ * @nb: Notifier block, this is called when the value in remote entry changes.
+ * @event: Takes value SMP2P_ENTRY_UPDATE or SMP2P_OPEN based on the event.
+ * @data: Consists of previous and current value in case of entry update.
+ * @returns: 0 for success (return value required for notifier chains).
+ */
+static int smp2p_rmt_in_edge_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct smp2p_loopback_ctx *ctx;
+
+ if (!(event == SMP2P_ENTRY_UPDATE || event == SMP2P_OPEN))
+ return 0;
+
+ ctx = container_of(nb, struct smp2p_loopback_ctx, in_nb);
+ if (data && ctx->in_is_active) {
+ ctx->rmt_cmd =
+ *(struct msm_smp2p_update_notif *)data;
+ schedule_work(&ctx->rmt_lpb_work);
+ }
+
+ return 0;
+}
+
+/**
+ * smp2p_rmt_out_edge_notify - Notifies on the opening of the outbound entry.
+ *
+ * @nb: Notifier block, this is called when the local entry is open.
+ * @event: Takes on value SMP2P_OPEN when the local entry is open.
+ * @data: Consist of current value of the remote entry, if entry is open.
+ * @returns: 0 for success (return value required for notifier chains).
+ */
+static int smp2p_rmt_out_edge_notify(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct smp2p_loopback_ctx *ctx;
+
+ ctx = container_of(nb, struct smp2p_loopback_ctx, out_nb);
+ if (event == SMP2P_OPEN)
+ SMP2P_DBG("%s: 'smp2p':%d opened\n", __func__,
+ ctx->proc_id);
+
+ return 0;
+}
+
+/**
+ * msm_smp2p_init_rmt_lpb - Initializes the remote loopback object.
+ *
+ * @ctx: Pointer to remote loopback object that needs to be initialized.
+ * @pid: Processor id of the processor that is sending the commands.
+ * @entry: Name of the entry that needs to be opened locally.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ */
+static int msm_smp2p_init_rmt_lpb(struct smp2p_loopback_ctx *ctx,
+ int pid, const char *entry)
+{
+ int ret = 0;
+ int tmp;
+
+ if (!ctx || !entry || pid > SMP2P_NUM_PROCS)
+ return -EINVAL;
+
+ ctx->in_nb.notifier_call = smp2p_rmt_in_edge_notify;
+ ctx->out_nb.notifier_call = smp2p_rmt_out_edge_notify;
+ ctx->proc_id = pid;
+ ctx->in_is_active = true;
+ ctx->out_is_active = true;
+ tmp = msm_smp2p_out_open(pid, entry, &ctx->out_nb,
+ &ctx->out);
+ if (tmp) {
+ SMP2P_ERR("%s: open failed outbound entry '%s':%d - ret %d\n",
+ __func__, entry, pid, tmp);
+ ret = tmp;
+ }
+
+ tmp = msm_smp2p_in_register(ctx->proc_id,
+ SMP2P_RLPB_ENTRY_NAME,
+ &ctx->in_nb);
+ if (tmp) {
+ SMP2P_ERR("%s: unable to open inbound entry '%s':%d - ret %d\n",
+ __func__, entry, pid, tmp);
+ ret = tmp;
+ }
+
+ return ret;
+}
+
+/**
+ * msm_smp2p_init_rmt_lpb_proc - Wrapper over msm_smp2p_init_rmt_lpb
+ *
+ * @remote_pid: Processor ID of the processor that sends loopback command.
+ * @returns: Pointer to outbound entry handle.
+ */
+void *msm_smp2p_init_rmt_lpb_proc(int remote_pid)
+{
+ int tmp;
+ void *ret = NULL;
+
+ tmp = msm_smp2p_init_rmt_lpb(&remote_loopback[remote_pid],
+ remote_pid, SMP2P_RLPB_ENTRY_NAME);
+ if (!tmp)
+ ret = remote_loopback[remote_pid].out;
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_init_rmt_lpb_proc);
+
+/**
+ * msm_smp2p_deinit_rmt_lpb_proc - Unregister support for remote processor.
+ *
+ * @remote_pid: Processor ID of the remote system.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Unregister loopback support for remote processor.
+ */
+int msm_smp2p_deinit_rmt_lpb_proc(int remote_pid)
+{
+ int ret = 0;
+ int tmp;
+ struct smp2p_loopback_ctx *ctx;
+
+ if (remote_pid >= SMP2P_NUM_PROCS)
+ return -EINVAL;
+
+ ctx = &remote_loopback[remote_pid];
+
+ /* abort any pending notifications */
+ remote_loopback[remote_pid].out_is_active = false;
+ remote_loopback[remote_pid].in_is_active = false;
+ flush_work(&ctx->rmt_lpb_work);
+
+ /* unregister entries */
+ tmp = msm_smp2p_out_close(&remote_loopback[remote_pid].out);
+ remote_loopback[remote_pid].out = NULL;
+ if (tmp) {
+ SMP2P_ERR("%s: outbound 'smp2p':%d close failed %d\n",
+ __func__, remote_pid, tmp);
+ ret = tmp;
+ }
+
+ tmp = msm_smp2p_in_unregister(remote_pid,
+ SMP2P_RLPB_ENTRY_NAME, &remote_loopback[remote_pid].in_nb);
+ if (tmp) {
+ SMP2P_ERR("%s: inbound 'smp2p':%d close failed %d\n",
+ __func__, remote_pid, tmp);
+ ret = tmp;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_deinit_rmt_lpb_proc);
+
+/**
+ * msm_smp2p_set_remote_mock_exists - Sets the remote mock configuration.
+ *
+ * @item_exists: true = Remote mock SMEM item exists
+ *
+ * This is used in the testing environment to simulate the existence of the
+ * remote smem item in order to test the negotiation algorithm.
+ */
+void msm_smp2p_set_remote_mock_exists(bool item_exists)
+{
+ remote_mock.item_exists = item_exists;
+}
+EXPORT_SYMBOL(msm_smp2p_set_remote_mock_exists);
+
+/**
+ * msm_smp2p_get_remote_mock - Get remote mock object.
+ *
+ * @returns: Point to the remote mock object.
+ */
+void *msm_smp2p_get_remote_mock(void)
+{
+ return &remote_mock;
+}
+EXPORT_SYMBOL(msm_smp2p_get_remote_mock);
+
+/**
+ * msm_smp2p_get_remote_mock_smem_item - Returns a pointer to remote item.
+ *
+ * @size: Size of item.
+ * @returns: Pointer to mock remote smem item.
+ */
+void *msm_smp2p_get_remote_mock_smem_item(uint32_t *size)
+{
+ void *ptr = NULL;
+ if (remote_mock.item_exists) {
+ *size = sizeof(remote_mock.remote_item);
+ ptr = &(remote_mock.remote_item);
+ }
+
+ return ptr;
+}
+EXPORT_SYMBOL(msm_smp2p_get_remote_mock_smem_item);
+
+/**
+ * smp2p_remote_mock_rx_interrupt - Triggers receive interrupt for mock proc.
+ *
+ * @returns: 0 for success
+ *
+ * This function simulates the receiving of interrupt by the mock remote
+ * processor in a testing environment.
+ */
+int smp2p_remote_mock_rx_interrupt(void)
+{
+ remote_mock.rx_interrupt_count++;
+ if (remote_mock.initialized)
+ complete(&remote_mock.cb_completion);
+ return 0;
+}
+EXPORT_SYMBOL(smp2p_remote_mock_rx_interrupt);
+
+/**
+ * smp2p_remote_mock_tx_interrupt - Calls the SMP2P interrupt handler.
+ *
+ * This function calls the interrupt handler of the Apps processor to simulate
+ * receiving interrupts from a remote processor.
+ */
+static void smp2p_remote_mock_tx_interrupt(void)
+{
+ msm_smp2p_interrupt_handler(SMP2P_REMOTE_MOCK_PROC);
+}
+
+/**
+ * smp2p_remote_mock_init - Initialize the remote mock and loopback objects.
+ *
+ * @returns: 0 for success
+ */
+static int __init smp2p_remote_mock_init(void)
+{
+ int i;
+ struct smp2p_interrupt_config *int_cfg;
+
+ smp2p_init_header(&remote_mock.remote_item.header,
+ SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
+ 0, 0);
+ remote_mock.rx_interrupt_count = 0;
+ remote_mock.rx_interrupt = smp2p_remote_mock_rx_interrupt;
+ remote_mock.tx_interrupt = smp2p_remote_mock_tx_interrupt;
+ remote_mock.item_exists = false;
+ init_completion(&remote_mock.cb_completion);
+ remote_mock.initialized = true;
+
+ for (i = 0; i < SMP2P_NUM_PROCS; i++) {
+ INIT_WORK(&(remote_loopback[i].rmt_lpb_work),
+ smp2p_rmt_lpb_worker);
+ if (i == SMP2P_REMOTE_MOCK_PROC)
+ /* do not register loopback for remote mock proc */
+ continue;
+
+ int_cfg = smp2p_get_interrupt_config();
+ if (!int_cfg) {
+ SMP2P_ERR("Remote processor config unavailable\n");
+ return 0;
+ }
+ if (!int_cfg[i].is_configured)
+ continue;
+
+ msm_smp2p_init_rmt_lpb(&remote_loopback[i],
+ i, SMP2P_RLPB_ENTRY_NAME);
+ }
+ return 0;
+}
+module_init(smp2p_remote_mock_init);
diff --git a/drivers/soc/qcom/smp2p_private.h b/drivers/soc/qcom/smp2p_private.h
new file mode 100644
index 000000000000..b1c1f09975f9
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_private.h
@@ -0,0 +1,252 @@
+/* drivers/soc/qcom/smp2p_private.h
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ARCH_ARM_MACH_MSM_MSM_SMP2P_PRIVATE_H_
+#define _ARCH_ARM_MACH_MSM_MSM_SMP2P_PRIVATE_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/ipc_logging.h>
+#include "smp2p_private_api.h"
+
+#define SMP2P_MAX_ENTRY 16
+#define SMP2P_FEATURE_SSR_ACK 0x1
+
+/* SMEM Item Header Macros */
+#define SMP2P_MAGIC 0x504D5324
+#define SMP2P_LOCAL_PID_MASK 0x0000ffff
+#define SMP2P_LOCAL_PID_BIT 0
+#define SMP2P_REMOTE_PID_MASK 0xffff0000
+#define SMP2P_REMOTE_PID_BIT 16
+#define SMP2P_VERSION_MASK 0x000000ff
+#define SMP2P_VERSION_BIT 0
+#define SMP2P_FEATURE_MASK 0xffffff00
+#define SMP2P_FEATURE_BIT 8
+#define SMP2P_ENT_TOTAL_MASK 0x0000ffff
+#define SMP2P_ENT_TOTAL_BIT 0
+#define SMP2P_ENT_VALID_MASK 0xffff0000
+#define SMP2P_ENT_VALID_BIT 16
+#define SMP2P_FLAGS_RESTART_DONE_BIT 0
+#define SMP2P_FLAGS_RESTART_DONE_MASK 0x1
+#define SMP2P_FLAGS_RESTART_ACK_BIT 1
+#define SMP2P_FLAGS_RESTART_ACK_MASK 0x2
+
+#define SMP2P_GET_BITS(hdr_val, mask, bit) \
+ (((hdr_val) & (mask)) >> (bit))
+#define SMP2P_SET_BITS(hdr_val, mask, bit, new_value) \
+ {\
+ hdr_val = (hdr_val & ~(mask)) \
+ | (((new_value) << (bit)) & (mask)); \
+ }
+
+#define SMP2P_GET_LOCAL_PID(hdr) \
+ SMP2P_GET_BITS(hdr, SMP2P_LOCAL_PID_MASK, SMP2P_LOCAL_PID_BIT)
+#define SMP2P_SET_LOCAL_PID(hdr, pid) \
+ SMP2P_SET_BITS(hdr, SMP2P_LOCAL_PID_MASK, SMP2P_LOCAL_PID_BIT, pid)
+
+#define SMP2P_GET_REMOTE_PID(hdr) \
+ SMP2P_GET_BITS(hdr, SMP2P_REMOTE_PID_MASK, SMP2P_REMOTE_PID_BIT)
+#define SMP2P_SET_REMOTE_PID(hdr, pid) \
+ SMP2P_SET_BITS(hdr, SMP2P_REMOTE_PID_MASK, SMP2P_REMOTE_PID_BIT, pid)
+
+#define SMP2P_GET_VERSION(hdr) \
+ SMP2P_GET_BITS(hdr, SMP2P_VERSION_MASK, SMP2P_VERSION_BIT)
+#define SMP2P_SET_VERSION(hdr, version) \
+ SMP2P_SET_BITS(hdr, SMP2P_VERSION_MASK, SMP2P_VERSION_BIT, version)
+
+#define SMP2P_GET_FEATURES(hdr) \
+ SMP2P_GET_BITS(hdr, SMP2P_FEATURE_MASK, SMP2P_FEATURE_BIT)
+#define SMP2P_SET_FEATURES(hdr, features) \
+ SMP2P_SET_BITS(hdr, SMP2P_FEATURE_MASK, SMP2P_FEATURE_BIT, features)
+
+#define SMP2P_GET_ENT_TOTAL(hdr) \
+ SMP2P_GET_BITS(hdr, SMP2P_ENT_TOTAL_MASK, SMP2P_ENT_TOTAL_BIT)
+#define SMP2P_SET_ENT_TOTAL(hdr, entries) \
+ SMP2P_SET_BITS(hdr, SMP2P_ENT_TOTAL_MASK, SMP2P_ENT_TOTAL_BIT, entries)
+
+#define SMP2P_GET_ENT_VALID(hdr) \
+ SMP2P_GET_BITS(hdr, SMP2P_ENT_VALID_MASK, SMP2P_ENT_VALID_BIT)
+#define SMP2P_SET_ENT_VALID(hdr, entries) \
+ SMP2P_SET_BITS(hdr, SMP2P_ENT_VALID_MASK, SMP2P_ENT_VALID_BIT,\
+ entries)
+
+#define SMP2P_GET_RESTART_DONE(hdr) \
+ SMP2P_GET_BITS(hdr, SMP2P_FLAGS_RESTART_DONE_MASK, \
+ SMP2P_FLAGS_RESTART_DONE_BIT)
+#define SMP2P_SET_RESTART_DONE(hdr, value) \
+ SMP2P_SET_BITS(hdr, SMP2P_FLAGS_RESTART_DONE_MASK, \
+ SMP2P_FLAGS_RESTART_DONE_BIT, value)
+
+#define SMP2P_GET_RESTART_ACK(hdr) \
+ SMP2P_GET_BITS(hdr, SMP2P_FLAGS_RESTART_ACK_MASK, \
+ SMP2P_FLAGS_RESTART_ACK_BIT)
+#define SMP2P_SET_RESTART_ACK(hdr, value) \
+ SMP2P_SET_BITS(hdr, SMP2P_FLAGS_RESTART_ACK_MASK, \
+ SMP2P_FLAGS_RESTART_ACK_BIT, value)
+
+/* Loopback Command Macros */
+#define SMP2P_RMT_CMD_TYPE_MASK 0x80000000
+#define SMP2P_RMT_CMD_TYPE_BIT 31
+#define SMP2P_RMT_IGNORE_MASK 0x40000000
+#define SMP2P_RMT_IGNORE_BIT 30
+#define SMP2P_RMT_CMD_MASK 0x3f000000
+#define SMP2P_RMT_CMD_BIT 24
+#define SMP2P_RMT_DATA_MASK 0x00ffffff
+#define SMP2P_RMT_DATA_BIT 0
+
+#define SMP2P_GET_RMT_CMD_TYPE(val) \
+ SMP2P_GET_BITS(val, SMP2P_RMT_CMD_TYPE_MASK, SMP2P_RMT_CMD_TYPE_BIT)
+#define SMP2P_GET_RMT_CMD(val) \
+ SMP2P_GET_BITS(val, SMP2P_RMT_CMD_MASK, SMP2P_RMT_CMD_BIT)
+
+#define SMP2P_GET_RMT_DATA(val) \
+ SMP2P_GET_BITS(val, SMP2P_RMT_DATA_MASK, SMP2P_RMT_DATA_BIT)
+
+#define SMP2P_SET_RMT_CMD_TYPE(val, cmd_type) \
+ SMP2P_SET_BITS(val, SMP2P_RMT_CMD_TYPE_MASK, SMP2P_RMT_CMD_TYPE_BIT, \
+ cmd_type)
+#define SMP2P_SET_RMT_CMD_TYPE_REQ(val) \
+ SMP2P_SET_RMT_CMD_TYPE(val, 1)
+#define SMP2P_SET_RMT_CMD_TYPE_RESP(val) \
+ SMP2P_SET_RMT_CMD_TYPE(val, 0)
+
+#define SMP2P_SET_RMT_CMD(val, cmd) \
+ SMP2P_SET_BITS(val, SMP2P_RMT_CMD_MASK, SMP2P_RMT_CMD_BIT, \
+ cmd)
+#define SMP2P_SET_RMT_DATA(val, data) \
+ SMP2P_SET_BITS(val, SMP2P_RMT_DATA_MASK, SMP2P_RMT_DATA_BIT, data)
+
+enum {
+ SMP2P_LB_CMD_NOOP = 0x0,
+ SMP2P_LB_CMD_ECHO,
+ SMP2P_LB_CMD_CLEARALL,
+ SMP2P_LB_CMD_PINGPONG,
+ SMP2P_LB_CMD_RSPIN_START,
+ SMP2P_LB_CMD_RSPIN_LOCKED,
+ SMP2P_LB_CMD_RSPIN_UNLOCKED,
+ SMP2P_LB_CMD_RSPIN_END,
+};
+#define SMP2P_RLPB_IGNORE 0x40
+#define SMP2P_RLPB_ENTRY_NAME "smp2p"
+
+/* Debug Logging Macros */
+enum {
+ MSM_SMP2P_INFO = 1U << 0,
+ MSM_SMP2P_DEBUG = 1U << 1,
+ MSM_SMP2P_GPIO = 1U << 2,
+};
+
+#define SMP2P_IPC_LOG_STR(x...) do { \
+ if (smp2p_get_log_ctx()) \
+ ipc_log_string(smp2p_get_log_ctx(), x); \
+} while (0)
+
+#define SMP2P_DBG(x...) do { \
+ if (smp2p_get_debug_mask() & MSM_SMP2P_DEBUG) \
+ SMP2P_IPC_LOG_STR(x); \
+} while (0)
+
+#define SMP2P_INFO(x...) do { \
+ if (smp2p_get_debug_mask() & MSM_SMP2P_INFO) \
+ SMP2P_IPC_LOG_STR(x); \
+} while (0)
+
+#define SMP2P_ERR(x...) do { \
+ pr_err(x); \
+ SMP2P_IPC_LOG_STR(x); \
+} while (0)
+
+#define SMP2P_GPIO(x...) do { \
+ if (smp2p_get_debug_mask() & MSM_SMP2P_GPIO) \
+ SMP2P_IPC_LOG_STR(x); \
+} while (0)
+
+
+enum msm_smp2p_edge_state {
+ SMP2P_EDGE_STATE_CLOSED,
+ SMP2P_EDGE_STATE_OPENING,
+ SMP2P_EDGE_STATE_OPENED,
+ SMP2P_EDGE_STATE_FAILED = 0xff,
+};
+
+/**
+ * struct smp2p_smem - SMP2P SMEM Item Header
+ *
+ * @magic: Set to "$SMP" -- used for identification / debug purposes
+ * @feature_version: Feature and version fields
+ * @rem_loc_proc_id: Remote (31:16) and Local (15:0) processor IDs
+ * @valid_total_ent: Valid (31:16) and total (15:0) entries
+ * @flags: Flags (bits 31:2 reserved)
+ */
+struct smp2p_smem {
+ uint32_t magic;
+ uint32_t feature_version;
+ uint32_t rem_loc_proc_id;
+ uint32_t valid_total_ent;
+ uint32_t flags;
+};
+
+struct smp2p_entry_v1 {
+ char name[SMP2P_MAX_ENTRY_NAME];
+ uint32_t entry;
+};
+
+struct smp2p_smem_item {
+ struct smp2p_smem header;
+ struct smp2p_entry_v1 entries[SMP2P_MAX_ENTRY];
+};
+
+/* Mock object for internal loopback testing. */
+struct msm_smp2p_remote_mock {
+ struct smp2p_smem_item remote_item;
+ int rx_interrupt_count;
+ int (*rx_interrupt)(void);
+ void (*tx_interrupt)(void);
+
+ bool item_exists;
+ bool initialized;
+ struct completion cb_completion;
+};
+
+void smp2p_init_header(struct smp2p_smem *header_ptr, int local_pid,
+ int remote_pid, uint32_t features, uint32_t version);
+void *msm_smp2p_get_remote_mock(void);
+int smp2p_remote_mock_rx_interrupt(void);
+int smp2p_reset_mock_edge(void);
+void msm_smp2p_interrupt_handler(int);
+void msm_smp2p_set_remote_mock_exists(bool item_exists);
+void *msm_smp2p_get_remote_mock_smem_item(uint32_t *size);
+void *msm_smp2p_init_rmt_lpb_proc(int remote_pid);
+int msm_smp2p_deinit_rmt_lpb_proc(int remote_pid);
+void *smp2p_get_log_ctx(void);
+int smp2p_get_debug_mask(void);
+
+/* Inbound / outbound Interrupt configuration. */
+struct smp2p_interrupt_config {
+ bool is_configured;
+ uint32_t *out_int_ptr;
+ uint32_t out_int_mask;
+ int in_int_id;
+ const char *name;
+
+ /* interrupt stats */
+ unsigned in_interrupt_count;
+ unsigned out_interrupt_count;
+};
+
+struct smp2p_interrupt_config *smp2p_get_interrupt_config(void);
+const char *smp2p_pid_to_name(int remote_pid);
+struct smp2p_smem *smp2p_get_in_item(int remote_pid);
+struct smp2p_smem *smp2p_get_out_item(int remote_pid, int *state);
+void smp2p_gpio_open_test_entry(const char *name, int remote_pid, bool do_open);
+#endif
diff --git a/drivers/soc/qcom/smp2p_private_api.h b/drivers/soc/qcom/smp2p_private_api.h
new file mode 100644
index 000000000000..8cecbfc27063
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_private_api.h
@@ -0,0 +1,79 @@
+/* drivers/soc/qcom/smp2p_private_api.h
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ARCH_ARM_MACH_MSM_SMP2P_PRIVATE_API_H_
+#define _ARCH_ARM_MACH_MSM_SMP2P_PRIVATE_API_H_
+
+#include <linux/notifier.h>
+
+struct msm_smp2p_out;
+
+/* Maximum size of the entry name and trailing null. */
+#define SMP2P_MAX_ENTRY_NAME 16
+
+/* Bits per entry */
+#define SMP2P_BITS_PER_ENTRY 32
+
+/* Processor ID's */
+enum {
+ SMP2P_APPS_PROC = 0,
+ SMP2P_MODEM_PROC = 1,
+ SMP2P_AUDIO_PROC = 2,
+ SMP2P_RESERVED_PROC_1 = 3,
+ SMP2P_WIRELESS_PROC = 4,
+ SMP2P_RESERVED_PROC_2 = 5,
+ SMP2P_POWER_PROC = 6,
+ /* add new processors here */
+
+ SMP2P_REMOTE_MOCK_PROC,
+ SMP2P_NUM_PROCS,
+};
+
+/**
+ * Notification events that are passed to notifier for incoming and outgoing
+ * entries.
+ *
+ * If the @metadata argument in the notifier is non-null, then it will
+ * point to the associated struct smux_meta_* structure.
+ */
+enum msm_smp2p_events {
+ SMP2P_OPEN, /* data is NULL */
+ SMP2P_ENTRY_UPDATE, /* data => struct msm_smp2p_update_notif */
+};
+
+/**
+ * Passed in response to a SMP2P_ENTRY_UPDATE event.
+ *
+ * @prev_value: previous value of entry
+ * @current_value: latest value of entry
+ */
+struct msm_smp2p_update_notif {
+ uint32_t previous_value;
+ uint32_t current_value;
+};
+
+int msm_smp2p_out_open(int remote_pid, const char *entry,
+ struct notifier_block *open_notifier,
+ struct msm_smp2p_out **handle);
+int msm_smp2p_out_close(struct msm_smp2p_out **handle);
+int msm_smp2p_out_read(struct msm_smp2p_out *handle, uint32_t *data);
+int msm_smp2p_out_write(struct msm_smp2p_out *handle, uint32_t data);
+int msm_smp2p_out_modify(struct msm_smp2p_out *handle, uint32_t set_mask,
+ uint32_t clear_mask);
+int msm_smp2p_in_read(int remote_pid, const char *entry, uint32_t *data);
+int msm_smp2p_in_register(int remote_pid, const char *entry,
+ struct notifier_block *in_notifier);
+int msm_smp2p_in_unregister(int remote_pid, const char *entry,
+ struct notifier_block *in_notifier);
+
+#endif /* _ARCH_ARM_MACH_MSM_SMP2P_PRIVATE_API_H_ */
diff --git a/drivers/soc/qcom/smp2p_spinlock_test.c b/drivers/soc/qcom/smp2p_spinlock_test.c
new file mode 100644
index 000000000000..35086ea20658
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_spinlock_test.c
@@ -0,0 +1,499 @@
+/* drivers/soc/qcom/smp2p_spinlock_test.c
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/remote_spinlock.h>
+#include <soc/qcom/smem.h>
+#include "smem_private.h"
+#include "smp2p_private.h"
+#include "smp2p_test_common.h"
+
+#define REMOTE_SPIN_PID 1
+#define RS_END_THIEF_PID_BIT 20
+#define RS_END_THIEF_MASK 0x00f00000
+
+/* Spinlock commands used for testing Apps<->RPM spinlocks. */
+enum RPM_SPINLOCK_CMDS {
+ RPM_CMD_INVALID,
+ RPM_CMD_START,
+ RPM_CMD_LOCKED,
+ RPM_CMD_UNLOCKED,
+ RPM_CMD_END,
+};
+
+/* Shared structure for testing Apps<->RPM spinlocks. */
+struct rpm_spinlock_test {
+ uint32_t apps_cmd;
+ uint32_t apps_lock_count;
+ uint32_t rpm_cmd;
+ uint32_t rpm_lock_count;
+};
+
+static uint32_t ut_remote_spinlock_run_time = 1;
+
+/**
+ * smp2p_ut_remote_spinlock_core - Verify remote spinlock.
+ *
+ * @s: Pointer to output file
+ * @remote_pid: Remote processor to test
+ * @use_trylock: Use trylock to prevent an Apps deadlock if the
+ * remote spinlock fails.
+ */
+static void smp2p_ut_remote_spinlock_core(struct seq_file *s, int remote_pid,
+ bool use_trylock)
+{
+ int failed = 0;
+ unsigned lock_count = 0;
+ struct msm_smp2p_out *handle = NULL;
+ int ret;
+ uint32_t test_request;
+ uint32_t test_response;
+ struct mock_cb_data cb_out;
+ struct mock_cb_data cb_in;
+ unsigned long flags;
+ unsigned n;
+ bool have_lock;
+ bool timeout;
+ int failed_tmp;
+ int spinlock_owner;
+ remote_spinlock_t *smem_spinlock;
+ unsigned long end;
+
+ seq_printf(s, "Running %s for '%s' remote pid %d\n",
+ __func__, smp2p_pid_to_name(remote_pid), remote_pid);
+
+ cb_out.initialized = false;
+ cb_in.initialized = false;
+ mock_cb_data_init(&cb_out);
+ mock_cb_data_init(&cb_in);
+ do {
+ smem_spinlock = smem_get_remote_spinlock();
+ UT_ASSERT_PTR(smem_spinlock, !=, NULL);
+
+ /* Open output entry */
+ ret = msm_smp2p_out_open(remote_pid, SMP2P_RLPB_ENTRY_NAME,
+ &cb_out.nb, &handle);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_out.cb_completion, HZ * 2),
+ >, 0);
+ UT_ASSERT_INT(cb_out.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_out.event_open, ==, 1);
+
+ /* Open inbound entry */
+ ret = msm_smp2p_in_register(remote_pid, SMP2P_RLPB_ENTRY_NAME,
+ &cb_in.nb);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_in.cb_completion, HZ * 2),
+ >, 0);
+ UT_ASSERT_INT(cb_in.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_in.event_open, ==, 1);
+
+ /* Send start */
+ mock_cb_data_reset(&cb_in);
+ mock_cb_data_reset(&cb_out);
+ test_request = 0x0;
+ SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
+ SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_START);
+ SMP2P_SET_RMT_DATA(test_request, 0x0);
+ ret = msm_smp2p_out_write(handle, test_request);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_in.cb_completion, HZ * 2),
+ >, 0);
+ UT_ASSERT_INT(cb_in.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
+ ret = msm_smp2p_in_read(remote_pid, SMP2P_RLPB_ENTRY_NAME,
+ &test_response);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ test_response = SMP2P_GET_RMT_CMD(test_response);
+ if (test_response != SMP2P_LB_CMD_RSPIN_LOCKED &&
+ test_response != SMP2P_LB_CMD_RSPIN_UNLOCKED) {
+ /* invalid response from remote - abort test */
+ test_request = 0x0;
+ SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+ SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
+ SMP2P_SET_RMT_DATA(test_request, 0x0);
+ ret = msm_smp2p_out_write(handle, test_request);
+ UT_ASSERT_HEX(SMP2P_LB_CMD_RSPIN_LOCKED, ==,
+ test_response);
+ }
+
+ /* Run spinlock test */
+ if (use_trylock)
+ seq_puts(s, "\tUsing remote_spin_trylock\n");
+ else
+ seq_puts(s, "\tUsing remote_spin_lock\n");
+
+ flags = 0;
+ have_lock = false;
+ timeout = false;
+ spinlock_owner = 0;
+ test_request = 0x0;
+ SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
+ end = jiffies + (ut_remote_spinlock_run_time * HZ);
+ if (ut_remote_spinlock_run_time < 300) {
+ seq_printf(s, "\tRunning test for %u seconds; ",
+ ut_remote_spinlock_run_time);
+ seq_puts(s,
+ "on physical hardware please run >= 300 seconds by doing 'echo 300 > ut_remote_spinlock_time'\n");
+ }
+ while (time_is_after_jiffies(end)) {
+ /* try to acquire spinlock */
+ if (use_trylock) {
+ unsigned long j_start = jiffies;
+ while (!remote_spin_trylock_irqsave(
+ smem_spinlock, flags)) {
+ if (jiffies_to_msecs(jiffies - j_start)
+ > 1000) {
+ seq_puts(s,
+ "\tFail: Timeout trying to get the lock\n");
+ timeout = true;
+ break;
+ }
+ }
+ if (timeout)
+ break;
+ } else {
+ remote_spin_lock_irqsave(smem_spinlock, flags);
+ }
+ have_lock = true;
+ ++lock_count;
+
+ /* tell the remote side that we have the lock */
+ SMP2P_SET_RMT_DATA(test_request, lock_count);
+ SMP2P_SET_RMT_CMD(test_request,
+ SMP2P_LB_CMD_RSPIN_LOCKED);
+ ret = msm_smp2p_out_write(handle, test_request);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ /* verify the other side doesn't say it has the lock */
+ for (n = 0; n < 1000; ++n) {
+ spinlock_owner =
+ remote_spin_owner(smem_spinlock);
+ if (spinlock_owner != REMOTE_SPIN_PID) {
+ /* lock stolen by remote side */
+ seq_puts(s, "\tFail: Remote side: ");
+ seq_printf(s, "%d stole lock pid: %d\n",
+ remote_pid, spinlock_owner);
+ failed = true;
+ break;
+ }
+ spinlock_owner = 0;
+
+ ret = msm_smp2p_in_read(remote_pid,
+ SMP2P_RLPB_ENTRY_NAME, &test_response);
+ UT_ASSERT_INT(ret, ==, 0);
+ test_response =
+ SMP2P_GET_RMT_CMD(test_response);
+ UT_ASSERT_HEX(SMP2P_LB_CMD_RSPIN_UNLOCKED, ==,
+ test_response);
+ }
+ if (failed)
+ break;
+
+ /* tell remote side we are unlocked and release lock */
+ SMP2P_SET_RMT_CMD(test_request,
+ SMP2P_LB_CMD_RSPIN_UNLOCKED);
+ (void)msm_smp2p_out_write(handle, test_request);
+ have_lock = false;
+ remote_spin_unlock_irqrestore(smem_spinlock, flags);
+ }
+ if (have_lock)
+ remote_spin_unlock_irqrestore(smem_spinlock, flags);
+
+ /* End test */
+ mock_cb_data_reset(&cb_in);
+ SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
+ SMP2P_SET_RMT_DATA(test_request, lock_count |
+ (spinlock_owner << RS_END_THIEF_PID_BIT));
+ (void)msm_smp2p_out_write(handle, test_request);
+
+ failed_tmp = failed;
+ failed = false;
+ do {
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_in.cb_completion, HZ * 2),
+ >, 0);
+ reinit_completion(&cb_in.cb_completion);
+ ret = msm_smp2p_in_read(remote_pid,
+ SMP2P_RLPB_ENTRY_NAME, &test_response);
+ UT_ASSERT_INT(ret, ==, 0);
+ } while (!failed &&
+ SMP2P_GET_RMT_CMD(test_response) !=
+ SMP2P_LB_CMD_RSPIN_END);
+ if (failed)
+ break;
+ failed = failed_tmp;
+
+ test_response = SMP2P_GET_RMT_DATA(test_response);
+ seq_puts(s, "\tLocked spinlock ");
+ seq_printf(s, "local %u times; remote %u times",
+ lock_count,
+ test_response & ((1 << RS_END_THIEF_PID_BIT) - 1)
+ );
+ if (test_response & RS_END_THIEF_MASK) {
+ seq_puts(s, "Remote side reporting lock stolen by ");
+ seq_printf(s, "pid %d.\n",
+ SMP2P_GET_BITS(test_response,
+ RS_END_THIEF_MASK,
+ RS_END_THIEF_PID_BIT));
+ failed = 1;
+ }
+ seq_puts(s, "\n");
+
+ /* Cleanup */
+ ret = msm_smp2p_out_close(&handle);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_PTR(handle, ==, NULL);
+ ret = msm_smp2p_in_unregister(remote_pid,
+ SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ if (!failed && !timeout)
+ seq_puts(s, "\tOK\n");
+ } while (0);
+
+ if (failed) {
+ if (handle) {
+ /* send end command */
+ test_request = 0;
+ SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
+ SMP2P_SET_RMT_DATA(test_request, lock_count);
+ (void)msm_smp2p_out_write(handle, test_request);
+ (void)msm_smp2p_out_close(&handle);
+ }
+ (void)msm_smp2p_in_unregister(remote_pid,
+ SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
+
+ pr_err("%s: Failed\n", __func__);
+ seq_puts(s, "\tFailed\n");
+ }
+}
+
+/**
+ * smp2p_ut_remote_spinlock_pid - Verify remote spinlock for a processor.
+ *
+ * @s: Pointer to output file
+ * @pid: Processor to test
+ * @use_trylock: Use trylock to prevent an Apps deadlock if the
+ * remote spinlock fails.
+ */
+static void smp2p_ut_remote_spinlock_pid(struct seq_file *s, int pid,
+ bool use_trylock)
+{
+ struct smp2p_interrupt_config *int_cfg;
+
+ int_cfg = smp2p_get_interrupt_config();
+ if (!int_cfg) {
+ seq_puts(s, "Remote processor config unavailable\n");
+ return;
+ }
+
+ if (pid >= SMP2P_NUM_PROCS || !int_cfg[pid].is_configured)
+ return;
+
+ msm_smp2p_deinit_rmt_lpb_proc(pid);
+ smp2p_ut_remote_spinlock_core(s, pid, use_trylock);
+ msm_smp2p_init_rmt_lpb_proc(pid);
+}
+
+/**
+ * smp2p_ut_remote_spinlock - Verify remote spinlock for all processors.
+ *
+ * @s: pointer to output file
+ */
+static void smp2p_ut_remote_spinlock(struct seq_file *s)
+{
+ int pid;
+
+ for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid)
+ smp2p_ut_remote_spinlock_pid(s, pid, false);
+}
+
+/**
+ * smp2p_ut_remote_spin_trylock - Verify remote trylock for all processors.
+ *
+ * @s: Pointer to output file
+ */
+static void smp2p_ut_remote_spin_trylock(struct seq_file *s)
+{
+ int pid;
+
+ for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid)
+ smp2p_ut_remote_spinlock_pid(s, pid, true);
+}
+
+/**
+ * smp2p_ut_remote_spinlock - Verify remote spinlock for all processors.
+ *
+ * @s: pointer to output file
+ *
+ * This test verifies inbound and outbound functionality for all
+ * configured remote processor.
+ */
+static void smp2p_ut_remote_spinlock_modem(struct seq_file *s)
+{
+ smp2p_ut_remote_spinlock_pid(s, SMP2P_MODEM_PROC, false);
+}
+
+static void smp2p_ut_remote_spinlock_adsp(struct seq_file *s)
+{
+ smp2p_ut_remote_spinlock_pid(s, SMP2P_AUDIO_PROC, false);
+}
+
+static void smp2p_ut_remote_spinlock_wcnss(struct seq_file *s)
+{
+ smp2p_ut_remote_spinlock_pid(s, SMP2P_WIRELESS_PROC, false);
+}
+
+/**
+ * smp2p_ut_remote_spinlock_rpm - Verify remote spinlock.
+ *
+ * @s: pointer to output file
+ * @remote_pid: Remote processor to test
+ */
+static void smp2p_ut_remote_spinlock_rpm(struct seq_file *s)
+{
+ int failed = 0;
+ unsigned long flags;
+ unsigned n;
+ unsigned test_num;
+ struct rpm_spinlock_test *data_ptr;
+ remote_spinlock_t *smem_spinlock;
+ bool have_lock;
+
+ seq_printf(s, "Running %s for Apps<->RPM Test\n",
+ __func__);
+ do {
+ smem_spinlock = smem_get_remote_spinlock();
+ UT_ASSERT_PTR(smem_spinlock, !=, NULL);
+
+ data_ptr = smem_alloc(SMEM_ID_VENDOR0,
+ sizeof(struct rpm_spinlock_test), 0,
+ SMEM_ANY_HOST_FLAG);
+ UT_ASSERT_PTR(0, !=, data_ptr);
+
+ /* Send start */
+ writel_relaxed(0, &data_ptr->apps_lock_count);
+ writel_relaxed(RPM_CMD_START, &data_ptr->apps_cmd);
+
+ seq_puts(s, "\tWaiting for RPM to start test\n");
+ for (n = 0; n < 1000; ++n) {
+ if (readl_relaxed(&data_ptr->rpm_cmd) !=
+ RPM_CMD_INVALID)
+ break;
+ msleep(1);
+ }
+ if (readl_relaxed(&data_ptr->rpm_cmd) == RPM_CMD_INVALID) {
+ /* timeout waiting for RPM */
+ writel_relaxed(RPM_CMD_INVALID, &data_ptr->apps_cmd);
+ UT_ASSERT_INT(RPM_CMD_LOCKED, !=, RPM_CMD_INVALID);
+ }
+
+ /* Run spinlock test */
+ flags = 0;
+ have_lock = false;
+ for (test_num = 0; !failed && test_num < 10000; ++test_num) {
+ /* acquire spinlock */
+ remote_spin_lock_irqsave(smem_spinlock, flags);
+ have_lock = true;
+ data_ptr->apps_lock_count++;
+ writel_relaxed(data_ptr->apps_lock_count,
+ &data_ptr->apps_lock_count);
+ writel_relaxed(RPM_CMD_LOCKED, &data_ptr->apps_cmd);
+ /*
+ * Ensure that the remote side sees our lock has
+ * been acquired before we start polling their status.
+ */
+ wmb();
+
+ /* verify the other side doesn't say it has the lock */
+ for (n = 0; n < 1000; ++n) {
+ UT_ASSERT_HEX(RPM_CMD_UNLOCKED, ==,
+ readl_relaxed(&data_ptr->rpm_cmd));
+ }
+ if (failed)
+ break;
+
+ /* release spinlock */
+ have_lock = false;
+ writel_relaxed(RPM_CMD_UNLOCKED, &data_ptr->apps_cmd);
+ /*
+ * Ensure that our status-update write was committed
+ * before we unlock the spinlock.
+ */
+ wmb();
+ remote_spin_unlock_irqrestore(smem_spinlock, flags);
+ }
+ if (have_lock)
+ remote_spin_unlock_irqrestore(smem_spinlock, flags);
+
+ /* End test */
+ writel_relaxed(RPM_CMD_INVALID, &data_ptr->apps_cmd);
+ seq_printf(s, "\tLocked spinlock local %u remote %u\n",
+ readl_relaxed(&data_ptr->apps_lock_count),
+ readl_relaxed(&data_ptr->rpm_lock_count));
+
+ if (!failed)
+ seq_puts(s, "\tOK\n");
+ } while (0);
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ seq_puts(s, "\tFailed\n");
+ }
+}
+
+static int __init smp2p_debugfs_init(void)
+{
+ /*
+ * Add Unit Test entries.
+ *
+ * The idea with unit tests is that you can run all of them
+ * from ADB shell by doing:
+ * adb shell
+ * cat ut*
+ *
+ * And if particular tests fail, you can then repeatedly run the
+ * failing tests as you debug and resolve the failing test.
+ */
+ smp2p_debug_create("ut_remote_spinlock",
+ smp2p_ut_remote_spinlock);
+ smp2p_debug_create("ut_remote_spin_trylock",
+ smp2p_ut_remote_spin_trylock);
+ smp2p_debug_create("ut_remote_spinlock_modem",
+ smp2p_ut_remote_spinlock_modem);
+ smp2p_debug_create("ut_remote_spinlock_adsp",
+ smp2p_ut_remote_spinlock_adsp);
+ smp2p_debug_create("ut_remote_spinlock_wcnss",
+ smp2p_ut_remote_spinlock_wcnss);
+ smp2p_debug_create("ut_remote_spinlock_rpm",
+ smp2p_ut_remote_spinlock_rpm);
+ smp2p_debug_create_u32("ut_remote_spinlock_time",
+ &ut_remote_spinlock_run_time);
+
+ return 0;
+}
+module_init(smp2p_debugfs_init);
diff --git a/drivers/soc/qcom/smp2p_test.c b/drivers/soc/qcom/smp2p_test.c
new file mode 100644
index 000000000000..f5573ddbbf17
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_test.c
@@ -0,0 +1,1248 @@
+/* drivers/soc/qcom/smp2p_test.c
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <soc/qcom/subsystem_restart.h>
+#include "smp2p_private.h"
+#include "smp2p_test_common.h"
+
+/**
+ * smp2p_ut_local_basic - Basic sanity test using local loopback.
+ *
+ * @s: pointer to output file
+ *
+ * This test simulates a simple write and read
+ * when remote processor does not exist.
+ */
+static void smp2p_ut_local_basic(struct seq_file *s)
+{
+ int failed = 0;
+ struct msm_smp2p_out *smp2p_obj;
+ struct msm_smp2p_remote_mock *rmp = NULL;
+ int ret;
+ uint32_t test_request;
+ uint32_t test_response = 0;
+ static struct mock_cb_data cb_data;
+
+ seq_printf(s, "Running %s\n", __func__);
+ mock_cb_data_init(&cb_data);
+ do {
+ /* initialize mock edge and start opening */
+ ret = smp2p_reset_mock_edge();
+ UT_ASSERT_INT(ret, ==, 0);
+
+ rmp = msm_smp2p_get_remote_mock();
+ UT_ASSERT_PTR(rmp, !=, NULL);
+
+ rmp->rx_interrupt_count = 0;
+ memset(&rmp->remote_item, 0,
+ sizeof(struct smp2p_smem_item));
+
+ msm_smp2p_set_remote_mock_exists(false);
+
+ ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
+ &cb_data.nb, &smp2p_obj);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 0);
+ rmp->rx_interrupt_count = 0;
+
+ /* simulate response from remote side */
+ rmp->remote_item.header.magic = SMP2P_MAGIC;
+ SMP2P_SET_LOCAL_PID(
+ rmp->remote_item.header.rem_loc_proc_id,
+ SMP2P_REMOTE_MOCK_PROC);
+ SMP2P_SET_REMOTE_PID(
+ rmp->remote_item.header.rem_loc_proc_id,
+ SMP2P_APPS_PROC);
+ SMP2P_SET_VERSION(
+ rmp->remote_item.header.feature_version, 1);
+ SMP2P_SET_FEATURES(
+ rmp->remote_item.header.feature_version, 0);
+ SMP2P_SET_ENT_TOTAL(
+ rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
+ SMP2P_SET_ENT_VALID(
+ rmp->remote_item.header.valid_total_ent, 0);
+ rmp->remote_item.header.flags = 0x0;
+ msm_smp2p_set_remote_mock_exists(true);
+ rmp->tx_interrupt();
+
+ /* verify port was opened */
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ / 2), >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_open, ==, 1);
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
+
+ /* do write (test outbound entries) */
+ rmp->rx_interrupt_count = 0;
+ test_request = 0xC0DE;
+ ret = msm_smp2p_out_write(smp2p_obj, test_request);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+ /* do read (test inbound entries) */
+ ret = msm_smp2p_out_read(smp2p_obj, &test_response);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(test_request, ==, test_response);
+
+ ret = msm_smp2p_out_close(&smp2p_obj);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_PTR(smp2p_obj, ==, 0);
+
+ seq_puts(s, "\tOK\n");
+ } while (0);
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ seq_puts(s, "\tFailed\n");
+ (void)msm_smp2p_out_close(&smp2p_obj);
+ }
+}
+
+/**
+ * smp2p_ut_local_late_open - Verify post-negotiation opening.
+ *
+ * @s: pointer to output file
+ *
+ * Verify entry creation for opening entries after negotiation is complete.
+ */
+static void smp2p_ut_local_late_open(struct seq_file *s)
+{
+ int failed = 0;
+ struct msm_smp2p_out *smp2p_obj;
+ struct msm_smp2p_remote_mock *rmp = NULL;
+ int ret;
+ uint32_t test_request;
+ uint32_t test_response = 0;
+ static struct mock_cb_data cb_data;
+
+ seq_printf(s, "Running %s\n", __func__);
+ mock_cb_data_init(&cb_data);
+ do {
+ /* initialize mock edge */
+ ret = smp2p_reset_mock_edge();
+ UT_ASSERT_INT(ret, ==, 0);
+
+ rmp = msm_smp2p_get_remote_mock();
+ UT_ASSERT_PTR(rmp, !=, NULL);
+
+ rmp->rx_interrupt_count = 0;
+ memset(&rmp->remote_item, 0,
+ sizeof(struct smp2p_smem_item));
+ rmp->remote_item.header.magic = SMP2P_MAGIC;
+ SMP2P_SET_LOCAL_PID(
+ rmp->remote_item.header.rem_loc_proc_id,
+ SMP2P_REMOTE_MOCK_PROC);
+ SMP2P_SET_REMOTE_PID(
+ rmp->remote_item.header.rem_loc_proc_id,
+ SMP2P_APPS_PROC);
+ SMP2P_SET_VERSION(
+ rmp->remote_item.header.feature_version, 1);
+ SMP2P_SET_FEATURES(
+ rmp->remote_item.header.feature_version, 0);
+ SMP2P_SET_ENT_TOTAL(
+ rmp->remote_item.header.valid_total_ent,
+ SMP2P_MAX_ENTRY);
+ SMP2P_SET_ENT_VALID(
+ rmp->remote_item.header.valid_total_ent, 0);
+ rmp->remote_item.header.flags = 0x0;
+
+ msm_smp2p_set_remote_mock_exists(true);
+
+ ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
+ &cb_data.nb, &smp2p_obj);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ /* verify port was opened */
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ / 2),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_open, ==, 1);
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
+
+ /* do write (test outbound entries) */
+ rmp->rx_interrupt_count = 0;
+ test_request = 0xC0DE;
+ ret = msm_smp2p_out_write(smp2p_obj, test_request);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+ /* do read (test inbound entries) */
+ ret = msm_smp2p_out_read(smp2p_obj, &test_response);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(test_request, ==, test_response);
+
+ ret = msm_smp2p_out_close(&smp2p_obj);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_PTR(smp2p_obj, ==, 0);
+
+ seq_puts(s, "\tOK\n");
+ } while (0);
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ seq_puts(s, "\tFailed\n");
+ (void)msm_smp2p_out_close(&smp2p_obj);
+ }
+}
+
+/**
+ * smp2p_ut_local_early_open - Verify pre-negotiation opening.
+ *
+ * @s: pointer to output file
+ *
+ * Verify entry creation for opening entries before negotiation is complete.
+ */
+static void smp2p_ut_local_early_open(struct seq_file *s)
+{
+ int failed = 0;
+ struct msm_smp2p_out *smp2p_obj;
+ struct msm_smp2p_remote_mock *rmp = NULL;
+ struct smp2p_smem *outbound_item;
+ int negotiation_state;
+ int ret;
+ uint32_t test_request;
+ uint32_t test_response = 0;
+ static struct mock_cb_data cb_data;
+
+ seq_printf(s, "Running %s\n", __func__);
+ mock_cb_data_init(&cb_data);
+ do {
+ /* initialize mock edge, but don't enable, yet */
+ ret = smp2p_reset_mock_edge();
+ UT_ASSERT_INT(ret, ==, 0);
+
+ rmp = msm_smp2p_get_remote_mock();
+ UT_ASSERT_PTR(rmp, !=, NULL);
+
+ rmp->rx_interrupt_count = 0;
+ memset(&rmp->remote_item, 0,
+ sizeof(struct smp2p_smem_item));
+ rmp->remote_item.header.magic = SMP2P_MAGIC;
+ SMP2P_SET_LOCAL_PID(
+ rmp->remote_item.header.rem_loc_proc_id,
+ SMP2P_REMOTE_MOCK_PROC);
+ SMP2P_SET_REMOTE_PID(
+ rmp->remote_item.header.rem_loc_proc_id,
+ SMP2P_APPS_PROC);
+ SMP2P_SET_VERSION(
+ rmp->remote_item.header.feature_version, 1);
+ SMP2P_SET_FEATURES(
+ rmp->remote_item.header.feature_version, 0);
+ SMP2P_SET_ENT_TOTAL(
+ rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
+ SMP2P_SET_ENT_VALID(
+ rmp->remote_item.header.valid_total_ent, 0);
+ rmp->remote_item.header.flags = 0x0;
+
+ msm_smp2p_set_remote_mock_exists(false);
+ UT_ASSERT_PTR(NULL, ==,
+ smp2p_get_in_item(SMP2P_REMOTE_MOCK_PROC));
+
+ /* initiate open, but verify it doesn't complete */
+ ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
+ &cb_data.nb, &smp2p_obj);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ / 8),
+ ==, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 0);
+ UT_ASSERT_INT(cb_data.event_open, ==, 0);
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+ outbound_item = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
+ &negotiation_state);
+ UT_ASSERT_PTR(outbound_item, !=, NULL);
+ UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENING);
+ UT_ASSERT_INT(0, ==,
+ SMP2P_GET_ENT_VALID(outbound_item->valid_total_ent));
+
+ /* verify that read/write don't work yet */
+ rmp->rx_interrupt_count = 0;
+ test_request = 0x0;
+ ret = msm_smp2p_out_write(smp2p_obj, test_request);
+ UT_ASSERT_INT(ret, ==, -ENODEV);
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 0);
+
+ ret = msm_smp2p_out_read(smp2p_obj, &test_response);
+ UT_ASSERT_INT(ret, ==, -ENODEV);
+
+ /* allocate remote entry and verify open */
+ msm_smp2p_set_remote_mock_exists(true);
+ rmp->tx_interrupt();
+
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_data.cb_completion, HZ / 2),
+ >, 0);
+ UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_data.event_open, ==, 1);
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
+
+ /* do write (test outbound entries) */
+ rmp->rx_interrupt_count = 0;
+ test_request = 0xC0DE;
+ ret = msm_smp2p_out_write(smp2p_obj, test_request);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+ /* do read (test inbound entries) */
+ ret = msm_smp2p_out_read(smp2p_obj, &test_response);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(test_request, ==, test_response);
+
+ ret = msm_smp2p_out_close(&smp2p_obj);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_PTR(smp2p_obj, ==, 0);
+
+ seq_puts(s, "\tOK\n");
+ } while (0);
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ seq_puts(s, "\tFailed\n");
+ (void)msm_smp2p_out_close(&smp2p_obj);
+ }
+}
+
+/**
+ * smp2p_ut_mock_loopback - Exercise the remote loopback using remote mock.
+ *
+ * @s: pointer to output file
+ *
+ * This test exercises the remote loopback code using
+ * remote mock object. The remote mock object simulates the remote
+ * processor sending remote loopback commands to the local processor.
+ */
+static void smp2p_ut_mock_loopback(struct seq_file *s)
+{
+ int failed = 0;
+ struct msm_smp2p_remote_mock *rmp = NULL;
+ int ret;
+ uint32_t test_request = 0;
+ uint32_t test_response = 0;
+ struct msm_smp2p_out *local;
+
+ seq_printf(s, "Running %s\n", __func__);
+ do {
+ /* Initialize the mock edge */
+ ret = smp2p_reset_mock_edge();
+ UT_ASSERT_INT(ret, ==, 0);
+
+ rmp = msm_smp2p_get_remote_mock();
+ UT_ASSERT_PTR(rmp, !=, NULL);
+
+ memset(&rmp->remote_item, 0,
+ sizeof(struct smp2p_smem_item));
+ rmp->remote_item.header.magic = SMP2P_MAGIC;
+ SMP2P_SET_LOCAL_PID(
+ rmp->remote_item.header.rem_loc_proc_id,
+ SMP2P_REMOTE_MOCK_PROC);
+ SMP2P_SET_REMOTE_PID(
+ rmp->remote_item.header.rem_loc_proc_id,
+ SMP2P_APPS_PROC);
+ SMP2P_SET_VERSION(
+ rmp->remote_item.header.feature_version, 1);
+ SMP2P_SET_FEATURES(
+ rmp->remote_item.header.feature_version, 0);
+ SMP2P_SET_ENT_TOTAL(
+ rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
+ SMP2P_SET_ENT_VALID(
+ rmp->remote_item.header.valid_total_ent, 1);
+ rmp->remote_item.header.flags = 0x0;
+ msm_smp2p_set_remote_mock_exists(true);
+
+ /* Create test entry and attach loopback server */
+ rmp->rx_interrupt_count = 0;
+ reinit_completion(&rmp->cb_completion);
+ strlcpy(rmp->remote_item.entries[0].name, "smp2p",
+ SMP2P_MAX_ENTRY_NAME);
+ rmp->remote_item.entries[0].entry = 0;
+ rmp->tx_interrupt();
+
+ local = msm_smp2p_init_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &rmp->cb_completion, HZ / 2),
+ >, 0);
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
+
+ /* Send Echo Command */
+ rmp->rx_interrupt_count = 0;
+ reinit_completion(&rmp->cb_completion);
+ SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+ SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_ECHO);
+ SMP2P_SET_RMT_DATA(test_request, 10);
+ rmp->remote_item.entries[0].entry = test_request;
+ rmp->tx_interrupt();
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &rmp->cb_completion, HZ / 2),
+ >, 0);
+
+ /* Verify Echo Response */
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+ ret = msm_smp2p_out_read(local,
+ &test_response);
+ UT_ASSERT_INT(ret, ==, 0);
+ test_response = SMP2P_GET_RMT_DATA(test_response);
+ UT_ASSERT_INT(test_response, ==, 10);
+
+ /* Send PINGPONG command */
+ test_request = 0;
+ test_response = 0;
+ rmp->rx_interrupt_count = 0;
+ reinit_completion(&rmp->cb_completion);
+ SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+ SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_PINGPONG);
+ SMP2P_SET_RMT_DATA(test_request, 10);
+ rmp->remote_item.entries[0].entry = test_request;
+ rmp->tx_interrupt();
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &rmp->cb_completion, HZ / 2),
+ >, 0);
+
+ /* Verify PINGPONG Response */
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+ ret = msm_smp2p_out_read(local, &test_response);
+ UT_ASSERT_INT(ret, ==, 0);
+ test_response = SMP2P_GET_RMT_DATA(test_response);
+ UT_ASSERT_INT(test_response, ==, 9);
+
+ /* Send CLEARALL command */
+ test_request = 0;
+ test_response = 0;
+ rmp->rx_interrupt_count = 0;
+ reinit_completion(&rmp->cb_completion);
+ SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+ SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_CLEARALL);
+ SMP2P_SET_RMT_DATA(test_request, 10);
+ rmp->remote_item.entries[0].entry = test_request;
+ rmp->tx_interrupt();
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &rmp->cb_completion, HZ / 2),
+ >, 0);
+
+ /* Verify CLEARALL response */
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+ ret = msm_smp2p_out_read(local, &test_response);
+ UT_ASSERT_INT(ret, ==, 0);
+ test_response = SMP2P_GET_RMT_DATA(test_response);
+ UT_ASSERT_INT(test_response, ==, 0);
+
+ ret = msm_smp2p_deinit_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
+ UT_ASSERT_INT(ret, ==, 0);
+ seq_puts(s, "\tOK\n");
+ } while (0);
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ seq_puts(s, "\tFailed\n");
+ msm_smp2p_deinit_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
+ }
+}
+
+/**
+ * smp2p_ut_remote_inout_core - Verify inbound/outbound functionality.
+ *
+ * @s: pointer to output file
+ * @remote_pid: Remote processor to test
+ *
+ * This test verifies inbound/outbound functionality for the remote processor.
+ */
+static void smp2p_ut_remote_inout_core(struct seq_file *s, int remote_pid)
+{
+ int failed = 0;
+ struct msm_smp2p_out *handle;
+ int ret;
+ uint32_t test_request;
+ uint32_t test_response = 0;
+ static struct mock_cb_data cb_out;
+ static struct mock_cb_data cb_in;
+
+ seq_printf(s, "Running %s for '%s' remote pid %d\n",
+ __func__, smp2p_pid_to_name(remote_pid), remote_pid);
+ mock_cb_data_init(&cb_out);
+ mock_cb_data_init(&cb_in);
+ do {
+ /* Open output entry */
+ ret = msm_smp2p_out_open(remote_pid, "smp2p",
+ &cb_out.nb, &handle);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_out.cb_completion, HZ / 2),
+ >, 0);
+ UT_ASSERT_INT(cb_out.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_out.event_open, ==, 1);
+
+ /* Open inbound entry */
+ ret = msm_smp2p_in_register(remote_pid, "smp2p",
+ &cb_in.nb);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_in.cb_completion, HZ / 2),
+ >, 0);
+ UT_ASSERT_INT(cb_in.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_in.event_open, ==, 1);
+
+ /* Write an echo request */
+ mock_cb_data_reset(&cb_out);
+ mock_cb_data_reset(&cb_in);
+ test_request = 0x0;
+ SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+ SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_ECHO);
+ SMP2P_SET_RMT_DATA(test_request, 0xAA55);
+ ret = msm_smp2p_out_write(handle, test_request);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ /* Verify inbound reply */
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_in.cb_completion, HZ / 2),
+ >, 0);
+ UT_ASSERT_INT(cb_in.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
+ UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
+ cb_in.entry_data.current_value), ==, 0xAA55);
+
+ ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
+ UT_ASSERT_INT(SMP2P_LB_CMD_ECHO, ==,
+ SMP2P_GET_RMT_CMD(test_response));
+ UT_ASSERT_INT(0xAA55, ==, SMP2P_GET_RMT_DATA(test_response));
+
+ /* Write a clear all request */
+ mock_cb_data_reset(&cb_in);
+ test_request = 0x0;
+ SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+ SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_CLEARALL);
+ SMP2P_SET_RMT_DATA(test_request, 0xAA55);
+ ret = msm_smp2p_out_write(handle, test_request);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ /* Verify inbound reply */
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_in.cb_completion, HZ / 2),
+ >, 0);
+ UT_ASSERT_INT(cb_in.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
+ UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
+ cb_in.entry_data.current_value), ==, 0x0000);
+
+ ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
+ UT_ASSERT_INT(0x0000, ==, SMP2P_GET_RMT_DATA(test_response));
+
+ /* Write a decrement request */
+ mock_cb_data_reset(&cb_in);
+ test_request = 0x0;
+ SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+ SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_PINGPONG);
+ SMP2P_SET_RMT_DATA(test_request, 0xAA55);
+ ret = msm_smp2p_out_write(handle, test_request);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ /* Verify inbound reply */
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_in.cb_completion, HZ / 2),
+ >, 0);
+ UT_ASSERT_INT(cb_in.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
+ UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
+ cb_in.entry_data.current_value), ==, 0xAA54);
+
+ ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
+ UT_ASSERT_INT(SMP2P_LB_CMD_PINGPONG, ==,
+ SMP2P_GET_RMT_CMD(test_response));
+ UT_ASSERT_INT(0xAA54, ==, SMP2P_GET_RMT_DATA(test_response));
+
+ /* Test the ignore flag */
+ mock_cb_data_reset(&cb_in);
+ test_request = 0x0;
+ SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+ SMP2P_SET_RMT_CMD(test_request, SMP2P_RLPB_IGNORE);
+ SMP2P_SET_RMT_DATA(test_request, 0xAA55);
+ ret = msm_smp2p_out_write(handle, test_request);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &cb_in.cb_completion, HZ / 2),
+ ==, 0);
+ UT_ASSERT_INT(cb_in.cb_count, ==, 0);
+ UT_ASSERT_INT(cb_in.event_entry_update, ==, 0);
+ ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(0xAA54, ==, SMP2P_GET_RMT_DATA(test_response));
+
+ /* Cleanup */
+ ret = msm_smp2p_out_close(&handle);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_PTR(handle, ==, 0);
+ ret = msm_smp2p_in_unregister(remote_pid, "smp2p", &cb_in.nb);
+ UT_ASSERT_INT(ret, ==, 0);
+
+ seq_puts(s, "\tOK\n");
+ } while (0);
+
+ if (failed) {
+ if (handle)
+ (void)msm_smp2p_out_close(&handle);
+ (void)msm_smp2p_in_unregister(remote_pid, "smp2p", &cb_in.nb);
+
+ pr_err("%s: Failed\n", __func__);
+ seq_puts(s, "\tFailed\n");
+ }
+}
+
+/**
+ * smp2p_ut_remote_inout - Verify inbound/outbound functionality for all.
+ *
+ * @s: pointer to output file
+ *
+ * This test verifies inbound and outbound functionality for all
+ * configured remote processor.
+ */
+static void smp2p_ut_remote_inout(struct seq_file *s)
+{
+ struct smp2p_interrupt_config *int_cfg;
+ int pid;
+
+ int_cfg = smp2p_get_interrupt_config();
+ if (!int_cfg) {
+ seq_puts(s, "Remote processor config unavailable\n");
+ return;
+ }
+
+ for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
+ if (!int_cfg[pid].is_configured)
+ continue;
+
+ msm_smp2p_deinit_rmt_lpb_proc(pid);
+ smp2p_ut_remote_inout_core(s, pid);
+ msm_smp2p_init_rmt_lpb_proc(pid);
+ }
+}
+
+/**
+ * smp2p_ut_remote_out_max_entries_core - Verify open functionality.
+ *
+ * @s: pointer to output file
+ * @remote_pid: Remote processor for which the test is executed.
+ *
+ * This test verifies open functionality by creating maximum outbound entries.
+ */
+static void smp2p_ut_remote_out_max_entries_core(struct seq_file *s,
+ int remote_pid)
+{
+ int j = 0;
+ int failed = 0;
+ struct msm_smp2p_out *handle[SMP2P_MAX_ENTRY];
+ int ret;
+ static struct mock_cb_data cb_out[SMP2P_MAX_ENTRY];
+ char entry_name[SMP2P_MAX_ENTRY_NAME];
+ int num_created;
+
+ seq_printf(s, "Running %s for '%s' remote pid %d\n",
+ __func__, smp2p_pid_to_name(remote_pid), remote_pid);
+
+ for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
+ handle[j] = NULL;
+ mock_cb_data_init(&cb_out[j]);
+ }
+
+ do {
+ num_created = 0;
+ for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
+ /* Open as many output entries as possible */
+ scnprintf((char *)entry_name, SMP2P_MAX_ENTRY_NAME,
+ "smp2p%d", j);
+ ret = msm_smp2p_out_open(remote_pid, entry_name,
+ &cb_out[j].nb, &handle[j]);
+ if (ret == -ENOMEM)
+ /* hit max number */
+ break;
+ UT_ASSERT_INT(ret, ==, 0);
+ ++num_created;
+ }
+ if (failed)
+ break;
+
+ /* verify we created more than 1 entry */
+ UT_ASSERT_INT(num_created, <=, SMP2P_MAX_ENTRY);
+ UT_ASSERT_INT(num_created, >, 0);
+
+ seq_puts(s, "\tOK\n");
+ } while (0);
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ seq_puts(s, "\tFailed\n");
+ }
+
+ /* cleanup */
+ for (j = 0; j < SMP2P_MAX_ENTRY; j++)
+ ret = msm_smp2p_out_close(&handle[j]);
+}
+
+/**
+ * smp2p_ut_remote_out_max_entries - Verify open for all configured processors.
+ *
+ * @s: pointer to output file
+ *
+ * This test verifies creating max number of entries for
+ * all configured remote processor.
+ */
+static void smp2p_ut_remote_out_max_entries(struct seq_file *s)
+{
+ struct smp2p_interrupt_config *int_cfg;
+ int pid;
+
+ int_cfg = smp2p_get_interrupt_config();
+ if (!int_cfg) {
+ seq_puts(s, "Remote processor config unavailable\n");
+ return;
+ }
+
+ for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
+ if (!int_cfg[pid].is_configured)
+ continue;
+
+ smp2p_ut_remote_out_max_entries_core(s, pid);
+ }
+}
+
+/**
+ * smp2p_ut_local_in_max_entries - Verify registering and unregistering.
+ *
+ * @s: pointer to output file
+ *
+ * This test verifies registering and unregistering for inbound entries using
+ * the remote mock processor.
+ */
+static void smp2p_ut_local_in_max_entries(struct seq_file *s)
+{
+ int j = 0;
+ int failed = 0;
+ struct msm_smp2p_remote_mock *rmp = NULL;
+ int ret;
+ static struct mock_cb_data cb_in[SMP2P_MAX_ENTRY];
+ static struct mock_cb_data cb_out;
+
+ seq_printf(s, "Running %s\n", __func__);
+
+ for (j = 0; j < SMP2P_MAX_ENTRY; j++)
+ mock_cb_data_init(&cb_in[j]);
+
+ mock_cb_data_init(&cb_out);
+
+ do {
+ /* Initialize mock edge */
+ ret = smp2p_reset_mock_edge();
+ UT_ASSERT_INT(ret, ==, 0);
+
+ rmp = msm_smp2p_get_remote_mock();
+ UT_ASSERT_PTR(rmp, !=, NULL);
+
+ rmp->rx_interrupt_count = 0;
+ memset(&rmp->remote_item, 0,
+ sizeof(struct smp2p_smem_item));
+ rmp->remote_item.header.magic = SMP2P_MAGIC;
+ SMP2P_SET_LOCAL_PID(
+ rmp->remote_item.header.rem_loc_proc_id,
+ SMP2P_REMOTE_MOCK_PROC);
+ SMP2P_SET_REMOTE_PID(
+ rmp->remote_item.header.rem_loc_proc_id,
+ SMP2P_APPS_PROC);
+ SMP2P_SET_VERSION(
+ rmp->remote_item.header.feature_version, 1);
+ SMP2P_SET_FEATURES(
+ rmp->remote_item.header.feature_version, 0);
+ SMP2P_SET_ENT_TOTAL(
+ rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
+ SMP2P_SET_ENT_VALID(
+ rmp->remote_item.header.valid_total_ent, 0);
+ rmp->remote_item.header.flags = 0x0;
+ msm_smp2p_set_remote_mock_exists(true);
+
+ /* Create Max Entries in the remote mock object */
+ for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
+ scnprintf(rmp->remote_item.entries[j].name,
+ SMP2P_MAX_ENTRY_NAME, "smp2p%d", j);
+ rmp->remote_item.entries[j].entry = 0;
+ rmp->tx_interrupt();
+ }
+
+ /* Register for in entries */
+ for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
+ ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
+ rmp->remote_item.entries[j].name,
+ &(cb_in[j].nb));
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &(cb_in[j].cb_completion), HZ / 2),
+ >, 0);
+ UT_ASSERT_INT(cb_in[j].cb_count, ==, 1);
+ UT_ASSERT_INT(cb_in[j].event_entry_update, ==, 0);
+ }
+ UT_ASSERT_INT(j, ==, SMP2P_MAX_ENTRY);
+
+ /* Unregister */
+ for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
+ ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
+ rmp->remote_item.entries[j].name,
+ &(cb_in[j].nb));
+ UT_ASSERT_INT(ret, ==, 0);
+ }
+ UT_ASSERT_INT(j, ==, SMP2P_MAX_ENTRY);
+
+ seq_puts(s, "\tOK\n");
+ } while (0);
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ seq_puts(s, "\tFailed\n");
+
+ for (j = 0; j < SMP2P_MAX_ENTRY; j++)
+ ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
+ rmp->remote_item.entries[j].name,
+ &(cb_in[j].nb));
+ }
+}
+
+/**
+ * smp2p_ut_local_in_multiple - Verify Multiple Inbound Registration.
+ *
+ * @s: pointer to output file
+ *
+ * This test verifies multiple clients registering for same inbound entries
+ * using the remote mock processor.
+ */
+static void smp2p_ut_local_in_multiple(struct seq_file *s)
+{
+ int failed = 0;
+ struct msm_smp2p_remote_mock *rmp = NULL;
+ int ret;
+ static struct mock_cb_data cb_in_1;
+ static struct mock_cb_data cb_in_2;
+ static struct mock_cb_data cb_out;
+
+ seq_printf(s, "Running %s\n", __func__);
+
+ mock_cb_data_init(&cb_in_1);
+ mock_cb_data_init(&cb_in_2);
+ mock_cb_data_init(&cb_out);
+
+ do {
+ /* Initialize mock edge */
+ ret = smp2p_reset_mock_edge();
+ UT_ASSERT_INT(ret, ==, 0);
+
+ rmp = msm_smp2p_get_remote_mock();
+ UT_ASSERT_PTR(rmp, !=, NULL);
+
+ rmp->rx_interrupt_count = 0;
+ memset(&rmp->remote_item, 0,
+ sizeof(struct smp2p_smem_item));
+ rmp->remote_item.header.magic = SMP2P_MAGIC;
+ SMP2P_SET_LOCAL_PID(
+ rmp->remote_item.header.rem_loc_proc_id,
+ SMP2P_REMOTE_MOCK_PROC);
+ SMP2P_SET_REMOTE_PID(
+ rmp->remote_item.header.rem_loc_proc_id,
+ SMP2P_APPS_PROC);
+ SMP2P_SET_VERSION(
+ rmp->remote_item.header.feature_version, 1);
+ SMP2P_SET_FEATURES(
+ rmp->remote_item.header.feature_version, 0);
+ SMP2P_SET_ENT_TOTAL(
+ rmp->remote_item.header.valid_total_ent, 1);
+ SMP2P_SET_ENT_VALID(
+ rmp->remote_item.header.valid_total_ent, 0);
+ rmp->remote_item.header.flags = 0x0;
+ msm_smp2p_set_remote_mock_exists(true);
+
+ /* Create an Entry in the remote mock object */
+ scnprintf(rmp->remote_item.entries[0].name,
+ SMP2P_MAX_ENTRY_NAME, "smp2p%d", 1);
+ rmp->remote_item.entries[0].entry = 0;
+ rmp->tx_interrupt();
+
+ /* Register multiple clients for the inbound entry */
+ ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
+ rmp->remote_item.entries[0].name,
+ &cb_in_1.nb);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &(cb_in_1.cb_completion), HZ / 2),
+ >, 0);
+ UT_ASSERT_INT(cb_in_1.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_in_1.event_entry_update, ==, 0);
+
+ ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
+ rmp->remote_item.entries[0].name,
+ &cb_in_2.nb);
+ UT_ASSERT_INT(ret, ==, 0);
+ UT_ASSERT_INT(
+ (int)wait_for_completion_timeout(
+ &(cb_in_2.cb_completion), HZ / 2),
+ >, 0);
+ UT_ASSERT_INT(cb_in_2.cb_count, ==, 1);
+ UT_ASSERT_INT(cb_in_2.event_entry_update, ==, 0);
+
+
+ /* Unregister the clients */
+ ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
+ rmp->remote_item.entries[0].name,
+ &(cb_in_1.nb));
+ UT_ASSERT_INT(ret, ==, 0);
+
+ ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
+ rmp->remote_item.entries[0].name,
+ &(cb_in_2.nb));
+ UT_ASSERT_INT(ret, ==, 0);
+
+ seq_puts(s, "\tOK\n");
+ } while (0);
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ seq_puts(s, "\tFailed\n");
+
+ ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
+ rmp->remote_item.entries[0].name,
+ &(cb_in_1.nb));
+
+ ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
+ rmp->remote_item.entries[0].name,
+ &(cb_in_2.nb));
+ }
+}
+
+/**
+ * smp2p_ut_local_ssr_ack - Verify SSR Done/ACK Feature
+ *
+ * @s: pointer to output file
+ */
+static void smp2p_ut_local_ssr_ack(struct seq_file *s)
+{
+ int failed = 0;
+ struct msm_smp2p_remote_mock *rmp = NULL;
+ int ret;
+
+ seq_printf(s, "Running %s\n", __func__);
+ do {
+ struct smp2p_smem *rhdr;
+ struct smp2p_smem *lhdr;
+ int negotiation_state;
+
+ /* initialize v1 without SMP2P_FEATURE_SSR_ACK enabled */
+ ret = smp2p_reset_mock_edge();
+ UT_ASSERT_INT(ret, ==, 0);
+ rmp = msm_smp2p_get_remote_mock();
+ UT_ASSERT_PTR(rmp, !=, NULL);
+ rhdr = &rmp->remote_item.header;
+
+ rmp->rx_interrupt_count = 0;
+ memset(&rmp->remote_item, 0, sizeof(struct smp2p_smem_item));
+ rhdr->magic = SMP2P_MAGIC;
+ SMP2P_SET_LOCAL_PID(rhdr->rem_loc_proc_id,
+ SMP2P_REMOTE_MOCK_PROC);
+ SMP2P_SET_REMOTE_PID(rhdr->rem_loc_proc_id, SMP2P_APPS_PROC);
+ SMP2P_SET_VERSION(rhdr->feature_version, 1);
+ SMP2P_SET_FEATURES(rhdr->feature_version, 0);
+ SMP2P_SET_ENT_TOTAL(rhdr->valid_total_ent, SMP2P_MAX_ENTRY);
+ SMP2P_SET_ENT_VALID(rhdr->valid_total_ent, 0);
+ rhdr->flags = 0x0;
+ msm_smp2p_set_remote_mock_exists(true);
+ rmp->tx_interrupt();
+
+ /* verify edge is open */
+ lhdr = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
+ &negotiation_state);
+ UT_ASSERT_PTR(NULL, !=, lhdr);
+ UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENED);
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+ /* verify no response to ack feature */
+ rmp->rx_interrupt_count = 0;
+ SMP2P_SET_RESTART_DONE(rhdr->flags, 1);
+ rmp->tx_interrupt();
+ UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
+ UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 0);
+
+ /* initialize v1 with SMP2P_FEATURE_SSR_ACK enabled */
+ ret = smp2p_reset_mock_edge();
+ UT_ASSERT_INT(ret, ==, 0);
+ rmp = msm_smp2p_get_remote_mock();
+ UT_ASSERT_PTR(rmp, !=, NULL);
+ rhdr = &rmp->remote_item.header;
+
+ rmp->rx_interrupt_count = 0;
+ memset(&rmp->remote_item, 0, sizeof(struct smp2p_smem_item));
+ rhdr->magic = SMP2P_MAGIC;
+ SMP2P_SET_LOCAL_PID(rhdr->rem_loc_proc_id,
+ SMP2P_REMOTE_MOCK_PROC);
+ SMP2P_SET_REMOTE_PID(rhdr->rem_loc_proc_id, SMP2P_APPS_PROC);
+ SMP2P_SET_VERSION(rhdr->feature_version, 1);
+ SMP2P_SET_FEATURES(rhdr->feature_version,
+ SMP2P_FEATURE_SSR_ACK);
+ SMP2P_SET_ENT_TOTAL(rhdr->valid_total_ent, SMP2P_MAX_ENTRY);
+ SMP2P_SET_ENT_VALID(rhdr->valid_total_ent, 0);
+ rmp->rx_interrupt_count = 0;
+ rhdr->flags = 0x0;
+ msm_smp2p_set_remote_mock_exists(true);
+ rmp->tx_interrupt();
+
+ /* verify edge is open */
+ lhdr = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
+ &negotiation_state);
+ UT_ASSERT_PTR(NULL, !=, lhdr);
+ UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENED);
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+ /* verify response to ack feature */
+ rmp->rx_interrupt_count = 0;
+ SMP2P_SET_RESTART_DONE(rhdr->flags, 1);
+ rmp->tx_interrupt();
+ UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
+ UT_ASSERT_INT(1, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+ rmp->rx_interrupt_count = 0;
+ SMP2P_SET_RESTART_DONE(rhdr->flags, 0);
+ rmp->tx_interrupt();
+ UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
+ UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
+ UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+ seq_puts(s, "\tOK\n");
+ } while (0);
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ seq_puts(s, "\tFailed\n");
+ }
+}
+
+/**
+ * smp2p_ut_local_ssr_ack - Verify SSR Done/ACK Feature
+ *
+ * @s: pointer to output file
+ * @rpid: Remote processor ID
+ * @int_cfg: Interrupt config
+ */
+static void smp2p_ut_remotesubsys_ssr_ack(struct seq_file *s, uint32_t rpid,
+ struct smp2p_interrupt_config *int_cfg)
+{
+ int failed = 0;
+
+ seq_printf(s, "Running %s\n", __func__);
+ do {
+ struct smp2p_smem *rhdr;
+ struct smp2p_smem *lhdr;
+ int negotiation_state;
+ bool ssr_ack_enabled;
+ uint32_t ssr_done_start;
+
+ lhdr = smp2p_get_out_item(rpid, &negotiation_state);
+ UT_ASSERT_PTR(NULL, !=, lhdr);
+ UT_ASSERT_INT(SMP2P_EDGE_STATE_OPENED, ==, negotiation_state);
+
+ rhdr = smp2p_get_in_item(rpid);
+ UT_ASSERT_PTR(NULL, !=, rhdr);
+
+ /* get initial state of SSR flags */
+ if (SMP2P_GET_FEATURES(rhdr->feature_version)
+ & SMP2P_FEATURE_SSR_ACK)
+ ssr_ack_enabled = true;
+ else
+ ssr_ack_enabled = false;
+
+ ssr_done_start = SMP2P_GET_RESTART_DONE(rhdr->flags);
+ UT_ASSERT_INT(ssr_done_start, ==,
+ SMP2P_GET_RESTART_ACK(lhdr->flags));
+
+ /* trigger restart */
+ seq_printf(s, "Restarting '%s'\n", int_cfg->name);
+ subsystem_restart(int_cfg->name);
+ msleep(10*1000);
+
+ /* verify ack signaling */
+ if (ssr_ack_enabled) {
+ ssr_done_start ^= 1;
+ UT_ASSERT_INT(ssr_done_start, ==,
+ SMP2P_GET_RESTART_ACK(lhdr->flags));
+ UT_ASSERT_INT(ssr_done_start, ==,
+ SMP2P_GET_RESTART_DONE(rhdr->flags));
+ UT_ASSERT_INT(0, ==,
+ SMP2P_GET_RESTART_DONE(lhdr->flags));
+ seq_puts(s, "\tSSR ACK Enabled and Toggled\n");
+ } else {
+ UT_ASSERT_INT(0, ==,
+ SMP2P_GET_RESTART_DONE(lhdr->flags));
+ UT_ASSERT_INT(0, ==,
+ SMP2P_GET_RESTART_ACK(lhdr->flags));
+
+ UT_ASSERT_INT(0, ==,
+ SMP2P_GET_RESTART_DONE(rhdr->flags));
+ UT_ASSERT_INT(0, ==,
+ SMP2P_GET_RESTART_ACK(rhdr->flags));
+ seq_puts(s, "\tSSR ACK Disabled\n");
+ }
+
+ seq_puts(s, "\tOK\n");
+ } while (0);
+
+ if (failed) {
+ pr_err("%s: Failed\n", __func__);
+ seq_puts(s, "\tFailed\n");
+ }
+}
+
+/**
+ * smp2p_ut_remote_ssr_ack - Verify SSR Done/ACK Feature
+ *
+ * @s: pointer to output file
+ *
+ * Triggers SSR for each subsystem.
+ */
+static void smp2p_ut_remote_ssr_ack(struct seq_file *s)
+{
+ struct smp2p_interrupt_config *int_cfg;
+ int pid;
+
+ int_cfg = smp2p_get_interrupt_config();
+ if (!int_cfg) {
+ seq_puts(s,
+ "Remote processor config unavailable\n");
+ return;
+ }
+
+ for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
+ if (!int_cfg[pid].is_configured)
+ continue;
+
+ msm_smp2p_deinit_rmt_lpb_proc(pid);
+ smp2p_ut_remotesubsys_ssr_ack(s, pid, &int_cfg[pid]);
+ msm_smp2p_init_rmt_lpb_proc(pid);
+ }
+}
+
+static struct dentry *dent;
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+ void (*show)(struct seq_file *) = s->private;
+
+ show(s);
+
+ return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+ .open = debug_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+void smp2p_debug_create(const char *name,
+ void (*show)(struct seq_file *))
+{
+ struct dentry *file;
+
+ file = debugfs_create_file(name, 0444, dent, show, &debug_ops);
+ if (!file)
+ pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+void smp2p_debug_create_u32(const char *name, uint32_t *value)
+{
+ struct dentry *file;
+
+ file = debugfs_create_u32(name, S_IRUGO | S_IWUSR, dent, value);
+ if (!file)
+ pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+static int __init smp2p_debugfs_init(void)
+{
+ dent = debugfs_create_dir("smp2p_test", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ /*
+ * Add Unit Test entries.
+ *
+ * The idea with unit tests is that you can run all of them
+ * from ADB shell by doing:
+ * adb shell
+ * cat ut*
+ *
+ * And if particular tests fail, you can then repeatedly run the
+ * failing tests as you debug and resolve the failing test.
+ */
+ smp2p_debug_create("ut_local_basic",
+ smp2p_ut_local_basic);
+ smp2p_debug_create("ut_local_late_open",
+ smp2p_ut_local_late_open);
+ smp2p_debug_create("ut_local_early_open",
+ smp2p_ut_local_early_open);
+ smp2p_debug_create("ut_mock_loopback",
+ smp2p_ut_mock_loopback);
+ smp2p_debug_create("ut_remote_inout",
+ smp2p_ut_remote_inout);
+ smp2p_debug_create("ut_local_in_max_entries",
+ smp2p_ut_local_in_max_entries);
+ smp2p_debug_create("ut_remote_out_max_entries",
+ smp2p_ut_remote_out_max_entries);
+ smp2p_debug_create("ut_local_in_multiple",
+ smp2p_ut_local_in_multiple);
+ smp2p_debug_create("ut_local_ssr_ack",
+ smp2p_ut_local_ssr_ack);
+ smp2p_debug_create("ut_remote_ssr_ack",
+ smp2p_ut_remote_ssr_ack);
+
+ return 0;
+}
+module_init(smp2p_debugfs_init);
diff --git a/drivers/soc/qcom/smp2p_test_common.h b/drivers/soc/qcom/smp2p_test_common.h
new file mode 100644
index 000000000000..747a812d82c5
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_test_common.h
@@ -0,0 +1,213 @@
+/* drivers/soc/qcom/smp2p_test_common.h
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SMP2P_TEST_COMMON_H_
+#define _SMP2P_TEST_COMMON_H_
+
+#include <linux/debugfs.h>
+
+/**
+ * Unit test assertion for logging test cases.
+ *
+ * @a lval
+ * @b rval
+ * @cmp comparison operator
+ *
+ * Assertion fails if (@a cmp @b) is not true which then
+ * logs the function and line number where the error occurred
+ * along with the values of @a and @b.
+ *
+ * Assumes that the following local variables exist:
+ * @s - sequential output file pointer
+ * @failed - set to true if test fails
+ */
+#define UT_ASSERT_INT(a, cmp, b) \
+ { \
+ int a_tmp = (a); \
+ int b_tmp = (b); \
+ if (!((a_tmp)cmp(b_tmp))) { \
+ seq_printf(s, "%s:%d Fail: " #a "(%d) " #cmp " " #b "(%d)\n", \
+ __func__, __LINE__, \
+ a_tmp, b_tmp); \
+ failed = 1; \
+ break; \
+ } \
+ }
+
+#define UT_ASSERT_PTR(a, cmp, b) \
+ { \
+ void *a_tmp = (a); \
+ void *b_tmp = (b); \
+ if (!((a_tmp)cmp(b_tmp))) { \
+ seq_printf(s, "%s:%d Fail: " #a "(%p) " #cmp " " #b "(%p)\n", \
+ __func__, __LINE__, \
+ a_tmp, b_tmp); \
+ failed = 1; \
+ break; \
+ } \
+ }
+
+#define UT_ASSERT_UINT(a, cmp, b) \
+ { \
+ unsigned a_tmp = (a); \
+ unsigned b_tmp = (b); \
+ if (!((a_tmp)cmp(b_tmp))) { \
+ seq_printf(s, "%s:%d Fail: " #a "(%u) " #cmp " " #b "(%u)\n", \
+ __func__, __LINE__, \
+ a_tmp, b_tmp); \
+ failed = 1; \
+ break; \
+ } \
+ }
+
+#define UT_ASSERT_HEX(a, cmp, b) \
+ { \
+ unsigned a_tmp = (a); \
+ unsigned b_tmp = (b); \
+ if (!((a_tmp)cmp(b_tmp))) { \
+ seq_printf(s, "%s:%d Fail: " #a "(%x) " #cmp " " #b "(%x)\n", \
+ __func__, __LINE__, \
+ a_tmp, b_tmp); \
+ failed = 1; \
+ break; \
+ } \
+ }
+
+/**
+ * In-range unit test assertion for test cases.
+ *
+ * @a lval
+ * @minv Minimum value
+ * @maxv Maximum value
+ *
+ * Assertion fails if @a is not on the exclusive range minv, maxv
+ * ((@a < @minv) or (@a > @maxv)). In the failure case, the macro
+ * logs the function and line number where the error occurred along
+ * with the values of @a and @minv, @maxv.
+ *
+ * Assumes that the following local variables exist:
+ * @s - sequential output file pointer
+ * @failed - set to true if test fails
+ */
+#define UT_ASSERT_INT_IN_RANGE(a, minv, maxv) \
+ { \
+ int a_tmp = (a); \
+ int minv_tmp = (minv); \
+ int maxv_tmp = (maxv); \
+ if (((a_tmp) < (minv_tmp)) || ((a_tmp) > (maxv_tmp))) { \
+ seq_printf(s, "%s:%d Fail: " #a "(%d) < " #minv "(%d) or " \
+ #a "(%d) > " #maxv "(%d)\n", \
+ __func__, __LINE__, \
+ a_tmp, minv_tmp, a_tmp, maxv_tmp); \
+ failed = 1; \
+ break; \
+ } \
+ }
+
+/* Structure to track state changes for the notifier callback. */
+struct mock_cb_data {
+ bool initialized;
+ spinlock_t lock;
+ struct notifier_block nb;
+
+ /* events */
+ struct completion cb_completion;
+ int cb_count;
+ int event_open;
+ int event_entry_update;
+ struct msm_smp2p_update_notif entry_data;
+};
+
+void smp2p_debug_create(const char *name, void (*show)(struct seq_file *));
+void smp2p_debug_create_u32(const char *name, uint32_t *value);
+static inline int smp2p_test_notify(struct notifier_block *self,
+ unsigned long event, void *data);
+
+/**
+ * Reset mock callback data to default values.
+ *
+ * @cb: Mock callback data
+ */
+static inline void mock_cb_data_reset(struct mock_cb_data *cb)
+{
+ reinit_completion(&cb->cb_completion);
+ cb->cb_count = 0;
+ cb->event_open = 0;
+ cb->event_entry_update = 0;
+ memset(&cb->entry_data, 0,
+ sizeof(struct msm_smp2p_update_notif));
+}
+
+
+/**
+ * Initialize mock callback data.
+ *
+ * @cb: Mock callback data
+ */
+static inline void mock_cb_data_init(struct mock_cb_data *cb)
+{
+ if (!cb->initialized) {
+ init_completion(&cb->cb_completion);
+ spin_lock_init(&cb->lock);
+ cb->initialized = true;
+ cb->nb.notifier_call = smp2p_test_notify;
+ memset(&cb->entry_data, 0,
+ sizeof(struct msm_smp2p_update_notif));
+ }
+ mock_cb_data_reset(cb);
+}
+
+/**
+ * Notifier function passed into SMP2P for testing.
+ *
+ * @self: Pointer to calling notifier block
+ * @event: Event
+ * @data: Event-specific data
+ * @returns: 0
+ */
+static inline int smp2p_test_notify(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ struct mock_cb_data *cb_data_ptr;
+ unsigned long flags;
+
+ cb_data_ptr = container_of(self, struct mock_cb_data, nb);
+
+ spin_lock_irqsave(&cb_data_ptr->lock, flags);
+
+ switch (event) {
+ case SMP2P_OPEN:
+ ++cb_data_ptr->event_open;
+ if (data) {
+ cb_data_ptr->entry_data =
+ *(struct msm_smp2p_update_notif *)(data);
+ }
+ break;
+ case SMP2P_ENTRY_UPDATE:
+ ++cb_data_ptr->event_entry_update;
+ if (data) {
+ cb_data_ptr->entry_data =
+ *(struct msm_smp2p_update_notif *)(data);
+ }
+ break;
+ default:
+ pr_err("%s Unknown event\n", __func__);
+ break;
+ }
+
+ ++cb_data_ptr->cb_count;
+ complete(&cb_data_ptr->cb_completion);
+ spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+ return 0;
+}
+#endif /* _SMP2P_TEST_COMMON_H_ */
diff --git a/drivers/soc/qcom/smsm_debug.c b/drivers/soc/qcom/smsm_debug.c
new file mode 100644
index 000000000000..b9e42e46c888
--- /dev/null
+++ b/drivers/soc/qcom/smsm_debug.c
@@ -0,0 +1,330 @@
+/* drivers/soc/qcom/smsm_debug.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+
+#include <soc/qcom/smem.h>
+#include <soc/qcom/smsm.h>
+
+#if defined(CONFIG_DEBUG_FS)
+
+
+static void debug_read_smsm_state(struct seq_file *s)
+{
+ uint32_t *smsm;
+ int n;
+
+ smsm = smem_find(SMEM_SMSM_SHARED_STATE,
+ SMSM_NUM_ENTRIES * sizeof(uint32_t),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (smsm)
+ for (n = 0; n < SMSM_NUM_ENTRIES; n++)
+ seq_printf(s, "entry %d: 0x%08x\n", n, smsm[n]);
+}
+
+struct SMSM_CB_DATA {
+ int cb_count;
+ void *data;
+ uint32_t old_state;
+ uint32_t new_state;
+};
+static struct SMSM_CB_DATA smsm_cb_data;
+static struct completion smsm_cb_completion;
+
+static void smsm_state_cb(void *data, uint32_t old_state, uint32_t new_state)
+{
+ smsm_cb_data.cb_count++;
+ smsm_cb_data.old_state = old_state;
+ smsm_cb_data.new_state = new_state;
+ smsm_cb_data.data = data;
+ complete_all(&smsm_cb_completion);
+}
+
+#define UT_EQ_INT(a, b) \
+ { \
+ if ((a) != (b)) { \
+ seq_printf(s, "%s:%d " #a "(%d) != " #b "(%d)\n", \
+ __func__, __LINE__, \
+ a, b); \
+ break; \
+ } \
+ }
+
+#define UT_GT_INT(a, b) \
+ { \
+ if ((a) <= (b)) { \
+ seq_printf(s, "%s:%d " #a "(%d) > " #b "(%d)\n", \
+ __func__, __LINE__, \
+ a, b); \
+ break; \
+ } \
+ }
+
+#define SMSM_CB_TEST_INIT() \
+ do { \
+ smsm_cb_data.cb_count = 0; \
+ smsm_cb_data.old_state = 0; \
+ smsm_cb_data.new_state = 0; \
+ smsm_cb_data.data = 0; \
+ } while (0)
+
+
+static void debug_test_smsm(struct seq_file *s)
+{
+ int test_num = 0;
+ int ret;
+
+ /* Test case 1 - Register new callback for notification */
+ do {
+ test_num++;
+ SMSM_CB_TEST_INIT();
+ ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 0);
+
+ /* de-assert SMSM_SMD_INIT to trigger state update */
+ UT_EQ_INT(smsm_cb_data.cb_count, 0);
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+
+ UT_EQ_INT(smsm_cb_data.cb_count, 1);
+ UT_EQ_INT(smsm_cb_data.old_state & SMSM_SMDINIT, SMSM_SMDINIT);
+ UT_EQ_INT(smsm_cb_data.new_state & SMSM_SMDINIT, 0x0);
+ UT_EQ_INT((int)(uintptr_t)smsm_cb_data.data, 0x1234);
+
+ /* re-assert SMSM_SMD_INIT to trigger state update */
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 2);
+ UT_EQ_INT(smsm_cb_data.old_state & SMSM_SMDINIT, 0x0);
+ UT_EQ_INT(smsm_cb_data.new_state & SMSM_SMDINIT, SMSM_SMDINIT);
+
+ /* deregister callback */
+ ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 2);
+
+ /* make sure state change doesn't cause any more callbacks */
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+ UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 2);
+
+ seq_printf(s, "Test %d - PASS\n", test_num);
+ } while (0);
+
+ /* Test case 2 - Update already registered callback */
+ do {
+ test_num++;
+ SMSM_CB_TEST_INIT();
+ ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 0);
+ ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_INIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 1);
+
+ /* verify both callback bits work */
+ reinit_completion(&smsm_cb_completion);
+ UT_EQ_INT(smsm_cb_data.cb_count, 0);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 1);
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 2);
+
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 3);
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 4);
+
+ /* deregister 1st callback */
+ ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 1);
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+ UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 4);
+
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 5);
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 6);
+
+ /* deregister 2nd callback */
+ ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_INIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 2);
+
+ /* make sure state change doesn't cause any more callbacks */
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT);
+ UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 6);
+
+ seq_printf(s, "Test %d - PASS\n", test_num);
+ } while (0);
+
+ /* Test case 3 - Two callback registrations with different data */
+ do {
+ test_num++;
+ SMSM_CB_TEST_INIT();
+ ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 0);
+ ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_INIT,
+ smsm_state_cb, (void *)0x3456);
+ UT_EQ_INT(ret, 0);
+
+ /* verify both callbacks work */
+ reinit_completion(&smsm_cb_completion);
+ UT_EQ_INT(smsm_cb_data.cb_count, 0);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 1);
+ UT_EQ_INT((int)(uintptr_t)smsm_cb_data.data, 0x1234);
+
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 2);
+ UT_EQ_INT((int)(uintptr_t)smsm_cb_data.data, 0x3456);
+
+ /* cleanup and unregister
+ * degregister in reverse to verify data field is
+ * being used
+ */
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT);
+ ret = smsm_state_cb_deregister(SMSM_APPS_STATE,
+ SMSM_INIT,
+ smsm_state_cb, (void *)0x3456);
+ UT_EQ_INT(ret, 2);
+ ret = smsm_state_cb_deregister(SMSM_APPS_STATE,
+ SMSM_SMDINIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 2);
+
+ seq_printf(s, "Test %d - PASS\n", test_num);
+ } while (0);
+}
+
+static void debug_read_intr_mask(struct seq_file *s)
+{
+ uint32_t *smsm;
+ int m, n;
+
+ smsm = smem_find(SMEM_SMSM_CPU_INTR_MASK,
+ SMSM_NUM_ENTRIES * SMSM_NUM_HOSTS * sizeof(uint32_t),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (smsm)
+ for (m = 0; m < SMSM_NUM_ENTRIES; m++) {
+ seq_printf(s, "entry %d:", m);
+ for (n = 0; n < SMSM_NUM_HOSTS; n++)
+ seq_printf(s, " host %d: 0x%08x",
+ n, smsm[m * SMSM_NUM_HOSTS + n]);
+ seq_puts(s, "\n");
+ }
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+ void (*show)(struct seq_file *) = s->private;
+
+ show(s);
+
+ return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+ .open = debug_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void debug_create(const char *name, umode_t mode,
+ struct dentry *dent,
+ void (*show)(struct seq_file *))
+{
+ struct dentry *file;
+
+ file = debugfs_create_file(name, mode, dent, show, &debug_ops);
+ if (!file)
+ pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+static int __init smsm_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smsm", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ debug_create("state", 0444, dent, debug_read_smsm_state);
+ debug_create("intr_mask", 0444, dent, debug_read_intr_mask);
+ debug_create("smsm_test", 0444, dent, debug_test_smsm);
+
+ init_completion(&smsm_cb_completion);
+
+ return 0;
+}
+
+late_initcall(smsm_debugfs_init);
+#endif
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
new file mode 100644
index 000000000000..d482225183b2
--- /dev/null
+++ b/drivers/soc/qcom/socinfo.c
@@ -0,0 +1,1355 @@
+/* Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * SOC Info Routines
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/system_misc.h>
+
+#include <soc/qcom/socinfo.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/boot_stats.h>
+
+#define BUILD_ID_LENGTH 32
+#define SMEM_IMAGE_VERSION_BLOCKS_COUNT 32
+#define SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE 128
+#define SMEM_IMAGE_VERSION_SIZE 4096
+#define SMEM_IMAGE_VERSION_NAME_SIZE 75
+#define SMEM_IMAGE_VERSION_VARIANT_SIZE 20
+#define SMEM_IMAGE_VERSION_VARIANT_OFFSET 75
+#define SMEM_IMAGE_VERSION_OEM_SIZE 32
+#define SMEM_IMAGE_VERSION_OEM_OFFSET 96
+#define SMEM_IMAGE_VERSION_PARTITION_APPS 10
+
+enum {
+ HW_PLATFORM_UNKNOWN = 0,
+ HW_PLATFORM_SURF = 1,
+ HW_PLATFORM_FFA = 2,
+ HW_PLATFORM_FLUID = 3,
+ HW_PLATFORM_SVLTE_FFA = 4,
+ HW_PLATFORM_SVLTE_SURF = 5,
+ HW_PLATFORM_MTP = 8,
+ HW_PLATFORM_LIQUID = 9,
+ /* Dragonboard platform id is assigned as 10 in CDT */
+ HW_PLATFORM_DRAGON = 10,
+ HW_PLATFORM_QRD = 11,
+ HW_PLATFORM_HRD = 13,
+ HW_PLATFORM_DTV = 14,
+ HW_PLATFORM_STP = 23,
+ HW_PLATFORM_SBC = 24,
+ HW_PLATFORM_INVALID
+};
+
+const char *hw_platform[] = {
+ [HW_PLATFORM_UNKNOWN] = "Unknown",
+ [HW_PLATFORM_SURF] = "Surf",
+ [HW_PLATFORM_FFA] = "FFA",
+ [HW_PLATFORM_FLUID] = "Fluid",
+ [HW_PLATFORM_SVLTE_FFA] = "SVLTE_FFA",
+ [HW_PLATFORM_SVLTE_SURF] = "SLVTE_SURF",
+ [HW_PLATFORM_MTP] = "MTP",
+ [HW_PLATFORM_LIQUID] = "Liquid",
+ [HW_PLATFORM_DRAGON] = "Dragon",
+ [HW_PLATFORM_QRD] = "QRD",
+ [HW_PLATFORM_HRD] = "HRD",
+ [HW_PLATFORM_DTV] = "DTV",
+ [HW_PLATFORM_STP] = "STP",
+ [HW_PLATFORM_SBC] = "SBC",
+};
+
+enum {
+ ACCESSORY_CHIP_UNKNOWN = 0,
+ ACCESSORY_CHIP_CHARM = 58,
+};
+
+enum {
+ PLATFORM_SUBTYPE_QRD = 0x0,
+ PLATFORM_SUBTYPE_SKUAA = 0x1,
+ PLATFORM_SUBTYPE_SKUF = 0x2,
+ PLATFORM_SUBTYPE_SKUAB = 0x3,
+ PLATFORM_SUBTYPE_SKUG = 0x5,
+ PLATFORM_SUBTYPE_QRD_INVALID,
+};
+
+const char *qrd_hw_platform_subtype[] = {
+ [PLATFORM_SUBTYPE_QRD] = "QRD",
+ [PLATFORM_SUBTYPE_SKUAA] = "SKUAA",
+ [PLATFORM_SUBTYPE_SKUF] = "SKUF",
+ [PLATFORM_SUBTYPE_SKUAB] = "SKUAB",
+ [PLATFORM_SUBTYPE_SKUG] = "SKUG",
+ [PLATFORM_SUBTYPE_QRD_INVALID] = "INVALID",
+};
+
+enum {
+ PLATFORM_SUBTYPE_UNKNOWN = 0x0,
+ PLATFORM_SUBTYPE_CHARM = 0x1,
+ PLATFORM_SUBTYPE_STRANGE = 0x2,
+ PLATFORM_SUBTYPE_STRANGE_2A = 0x3,
+ PLATFORM_SUBTYPE_INVALID,
+};
+
+const char *hw_platform_subtype[] = {
+ [PLATFORM_SUBTYPE_UNKNOWN] = "Unknown",
+ [PLATFORM_SUBTYPE_CHARM] = "charm",
+ [PLATFORM_SUBTYPE_STRANGE] = "strange",
+ [PLATFORM_SUBTYPE_STRANGE_2A] = "strange_2a,"
+};
+
+/* Used to parse shared memory. Must match the modem. */
+struct socinfo_v1 {
+ uint32_t format;
+ uint32_t id;
+ uint32_t version;
+ char build_id[BUILD_ID_LENGTH];
+};
+
+struct socinfo_v2 {
+ struct socinfo_v1 v1;
+
+ /* only valid when format==2 */
+ uint32_t raw_id;
+ uint32_t raw_version;
+};
+
+struct socinfo_v3 {
+ struct socinfo_v2 v2;
+
+ /* only valid when format==3 */
+ uint32_t hw_platform;
+};
+
+struct socinfo_v4 {
+ struct socinfo_v3 v3;
+
+ /* only valid when format==4 */
+ uint32_t platform_version;
+};
+
+struct socinfo_v5 {
+ struct socinfo_v4 v4;
+
+ /* only valid when format==5 */
+ uint32_t accessory_chip;
+};
+
+struct socinfo_v6 {
+ struct socinfo_v5 v5;
+
+ /* only valid when format==6 */
+ uint32_t hw_platform_subtype;
+};
+
+struct socinfo_v7 {
+ struct socinfo_v6 v6;
+
+ /* only valid when format==7 */
+ uint32_t pmic_model;
+ uint32_t pmic_die_revision;
+};
+
+struct socinfo_v8 {
+ struct socinfo_v7 v7;
+
+ /* only valid when format==8*/
+ uint32_t pmic_model_1;
+ uint32_t pmic_die_revision_1;
+ uint32_t pmic_model_2;
+ uint32_t pmic_die_revision_2;
+};
+
+struct socinfo_v9 {
+ struct socinfo_v8 v8;
+
+ /* only valid when format==9*/
+ uint32_t foundry_id;
+};
+
+static union {
+ struct socinfo_v1 v1;
+ struct socinfo_v2 v2;
+ struct socinfo_v3 v3;
+ struct socinfo_v4 v4;
+ struct socinfo_v5 v5;
+ struct socinfo_v6 v6;
+ struct socinfo_v7 v7;
+ struct socinfo_v8 v8;
+ struct socinfo_v9 v9;
+} *socinfo;
+
+static struct msm_soc_info cpu_of_id[] = {
+
+ /* 7x01 IDs */
+ [0] = {MSM_CPU_UNKNOWN, "Unknown CPU"},
+ [1] = {MSM_CPU_7X01, "MSM7X01"},
+ [16] = {MSM_CPU_7X01, "MSM7X01"},
+ [17] = {MSM_CPU_7X01, "MSM7X01"},
+ [18] = {MSM_CPU_7X01, "MSM7X01"},
+ [19] = {MSM_CPU_7X01, "MSM7X01"},
+ [23] = {MSM_CPU_7X01, "MSM7X01"},
+ [25] = {MSM_CPU_7X01, "MSM7X01"},
+ [26] = {MSM_CPU_7X01, "MSM7X01"},
+ [32] = {MSM_CPU_7X01, "MSM7X01"},
+ [33] = {MSM_CPU_7X01, "MSM7X01"},
+ [34] = {MSM_CPU_7X01, "MSM7X01"},
+ [35] = {MSM_CPU_7X01, "MSM7X01"},
+
+ /* 7x25 IDs */
+ [20] = {MSM_CPU_7X25, "MSM7X25"},
+ [21] = {MSM_CPU_7X25, "MSM7X25"},
+ [24] = {MSM_CPU_7X25, "MSM7X25"},
+ [27] = {MSM_CPU_7X25, "MSM7X25"},
+ [39] = {MSM_CPU_7X25, "MSM7X25"},
+ [40] = {MSM_CPU_7X25, "MSM7X25"},
+ [41] = {MSM_CPU_7X25, "MSM7X25"},
+ [42] = {MSM_CPU_7X25, "MSM7X25"},
+ [62] = {MSM_CPU_7X25, "MSM7X25"},
+ [63] = {MSM_CPU_7X25, "MSM7X25"},
+ [66] = {MSM_CPU_7X25, "MSM7X25"},
+
+
+ /* 7x27 IDs */
+ [43] = {MSM_CPU_7X27, "MSM7X27"},
+ [44] = {MSM_CPU_7X27, "MSM7X27"},
+ [61] = {MSM_CPU_7X27, "MSM7X27"},
+ [67] = {MSM_CPU_7X27, "MSM7X27"},
+ [68] = {MSM_CPU_7X27, "MSM7X27"},
+ [69] = {MSM_CPU_7X27, "MSM7X27"},
+
+
+ /* 8x50 IDs */
+ [30] = {MSM_CPU_8X50, "MSM8X50"},
+ [36] = {MSM_CPU_8X50, "MSM8X50"},
+ [37] = {MSM_CPU_8X50, "MSM8X50"},
+ [38] = {MSM_CPU_8X50, "MSM8X50"},
+
+ /* 7x30 IDs */
+ [59] = {MSM_CPU_7X30, "MSM7X30"},
+ [60] = {MSM_CPU_7X30, "MSM7X30"},
+
+ /* 8x55 IDs */
+ [74] = {MSM_CPU_8X55, "MSM8X55"},
+ [75] = {MSM_CPU_8X55, "MSM8X55"},
+ [85] = {MSM_CPU_8X55, "MSM8X55"},
+
+ /* 8x60 IDs */
+ [70] = {MSM_CPU_8X60, "MSM8X60"},
+ [71] = {MSM_CPU_8X60, "MSM8X60"},
+ [86] = {MSM_CPU_8X60, "MSM8X60"},
+
+ /* 8960 IDs */
+ [87] = {MSM_CPU_8960, "MSM8960"},
+
+ /* 7x25A IDs */
+ [88] = {MSM_CPU_7X25A, "MSM7X25A"},
+ [89] = {MSM_CPU_7X25A, "MSM7X25A"},
+ [96] = {MSM_CPU_7X25A, "MSM7X25A"},
+
+ /* 7x27A IDs */
+ [90] = {MSM_CPU_7X27A, "MSM7X27A"},
+ [91] = {MSM_CPU_7X27A, "MSM7X27A"},
+ [92] = {MSM_CPU_7X27A, "MSM7X27A"},
+ [97] = {MSM_CPU_7X27A, "MSM7X27A"},
+
+ /* FSM9xxx ID */
+ [94] = {FSM_CPU_9XXX, "FSM9XXX"},
+ [95] = {FSM_CPU_9XXX, "FSM9XXX"},
+
+ /* 7x25AA ID */
+ [98] = {MSM_CPU_7X25AA, "MSM7X25AA"},
+ [99] = {MSM_CPU_7X25AA, "MSM7X25AA"},
+ [100] = {MSM_CPU_7X25AA, "MSM7X25AA"},
+
+ /* 7x27AA ID */
+ [101] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+ [102] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+ [103] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+ [136] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+
+ /* 9x15 ID */
+ [104] = {MSM_CPU_9615, "MSM9615"},
+ [105] = {MSM_CPU_9615, "MSM9615"},
+ [106] = {MSM_CPU_9615, "MSM9615"},
+ [107] = {MSM_CPU_9615, "MSM9615"},
+ [171] = {MSM_CPU_9615, "MSM9615"},
+
+ /* 8064 IDs */
+ [109] = {MSM_CPU_8064, "APQ8064"},
+
+ /* 8930 IDs */
+ [116] = {MSM_CPU_8930, "MSM8930"},
+ [117] = {MSM_CPU_8930, "MSM8930"},
+ [118] = {MSM_CPU_8930, "MSM8930"},
+ [119] = {MSM_CPU_8930, "MSM8930"},
+ [179] = {MSM_CPU_8930, "MSM8930"},
+
+ /* 8627 IDs */
+ [120] = {MSM_CPU_8627, "MSM8627"},
+ [121] = {MSM_CPU_8627, "MSM8627"},
+
+ /* 8660A ID */
+ [122] = {MSM_CPU_8960, "MSM8960"},
+
+ /* 8260A ID */
+ [123] = {MSM_CPU_8960, "MSM8960"},
+
+ /* 8060A ID */
+ [124] = {MSM_CPU_8960, "MSM8960"},
+
+ /* 8974 IDs */
+ [126] = {MSM_CPU_8974, "MSM8974"},
+ [184] = {MSM_CPU_8974, "MSM8974"},
+ [185] = {MSM_CPU_8974, "MSM8974"},
+ [186] = {MSM_CPU_8974, "MSM8974"},
+
+ /* 8974AA IDs */
+ [208] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+ [211] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+ [214] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+ [217] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+
+ /* 8974AB IDs */
+ [209] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+ [212] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+ [215] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+ [218] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+
+ /* 8974AC IDs */
+ [194] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+ [210] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+ [213] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+ [216] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+
+ /* 8625 IDs */
+ [127] = {MSM_CPU_8625, "MSM8625"},
+ [128] = {MSM_CPU_8625, "MSM8625"},
+ [129] = {MSM_CPU_8625, "MSM8625"},
+ [137] = {MSM_CPU_8625, "MSM8625"},
+ [167] = {MSM_CPU_8625, "MSM8625"},
+
+ /* 8064 MPQ ID */
+ [130] = {MSM_CPU_8064, "APQ8064"},
+
+ /* 7x25AB IDs */
+ [131] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+ [132] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+ [133] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+ [135] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+
+ /* 9625 IDs */
+ [134] = {MSM_CPU_9625, "MSM9625"},
+ [148] = {MSM_CPU_9625, "MSM9625"},
+ [149] = {MSM_CPU_9625, "MSM9625"},
+ [150] = {MSM_CPU_9625, "MSM9625"},
+ [151] = {MSM_CPU_9625, "MSM9625"},
+ [152] = {MSM_CPU_9625, "MSM9625"},
+ [173] = {MSM_CPU_9625, "MSM9625"},
+ [174] = {MSM_CPU_9625, "MSM9625"},
+ [175] = {MSM_CPU_9625, "MSM9625"},
+
+ /* 8960AB IDs */
+ [138] = {MSM_CPU_8960AB, "MSM8960AB"},
+ [139] = {MSM_CPU_8960AB, "MSM8960AB"},
+ [140] = {MSM_CPU_8960AB, "MSM8960AB"},
+ [141] = {MSM_CPU_8960AB, "MSM8960AB"},
+
+ /* 8930AA IDs */
+ [142] = {MSM_CPU_8930AA, "MSM8930AA"},
+ [143] = {MSM_CPU_8930AA, "MSM8930AA"},
+ [144] = {MSM_CPU_8930AA, "MSM8930AA"},
+ [160] = {MSM_CPU_8930AA, "MSM8930AA"},
+ [180] = {MSM_CPU_8930AA, "MSM8930AA"},
+
+ /* 8226 IDs */
+ [145] = {MSM_CPU_8226, "MSM8626"},
+ [158] = {MSM_CPU_8226, "MSM8226"},
+ [159] = {MSM_CPU_8226, "MSM8526"},
+ [198] = {MSM_CPU_8226, "MSM8126"},
+ [199] = {MSM_CPU_8226, "APQ8026"},
+ [200] = {MSM_CPU_8226, "MSM8926"},
+ [205] = {MSM_CPU_8226, "MSM8326"},
+ [219] = {MSM_CPU_8226, "APQ8028"},
+ [220] = {MSM_CPU_8226, "MSM8128"},
+ [221] = {MSM_CPU_8226, "MSM8228"},
+ [222] = {MSM_CPU_8226, "MSM8528"},
+ [223] = {MSM_CPU_8226, "MSM8628"},
+ [224] = {MSM_CPU_8226, "MSM8928"},
+
+ /* 8092 IDs */
+ [146] = {MSM_CPU_8092, "MPQ8092"},
+
+ /* 8610 IDs */
+ [147] = {MSM_CPU_8610, "MSM8610"},
+ [161] = {MSM_CPU_8610, "MSM8110"},
+ [162] = {MSM_CPU_8610, "MSM8210"},
+ [163] = {MSM_CPU_8610, "MSM8810"},
+ [164] = {MSM_CPU_8610, "MSM8212"},
+ [165] = {MSM_CPU_8610, "MSM8612"},
+ [166] = {MSM_CPU_8610, "MSM8112"},
+ [225] = {MSM_CPU_8610, "MSM8510"},
+ [226] = {MSM_CPU_8610, "MSM8512"},
+
+ /* 8064AB IDs */
+ [153] = {MSM_CPU_8064AB, "APQ8064AB"},
+
+ /* 8930AB IDs */
+ [154] = {MSM_CPU_8930AB, "MSM8930AB"},
+ [155] = {MSM_CPU_8930AB, "MSM8930AB"},
+ [156] = {MSM_CPU_8930AB, "MSM8930AB"},
+ [157] = {MSM_CPU_8930AB, "MSM8930AB"},
+ [181] = {MSM_CPU_8930AB, "MSM8930AB"},
+
+ /* 8625Q IDs */
+ [168] = {MSM_CPU_8625Q, "MSM8225Q"},
+ [169] = {MSM_CPU_8625Q, "MSM8625Q"},
+ [170] = {MSM_CPU_8625Q, "MSM8125Q"},
+
+ /* 8064AA IDs */
+ [172] = {MSM_CPU_8064AA, "APQ8064AA"},
+
+ /* 8084 IDs */
+ [178] = {MSM_CPU_8084, "APQ8084"},
+
+ /* 9630 IDs */
+ [187] = {MSM_CPU_9630, "MDM9630"},
+ [227] = {MSM_CPU_9630, "MDM9630"},
+ [228] = {MSM_CPU_9630, "MDM9630"},
+ [229] = {MSM_CPU_9630, "MDM9630"},
+ [230] = {MSM_CPU_9630, "MDM9630"},
+ [231] = {MSM_CPU_9630, "MDM9630"},
+
+ /* FSM9900 ID */
+ [188] = {FSM_CPU_9900, "FSM9900"},
+ [189] = {FSM_CPU_9900, "FSM9900"},
+ [190] = {FSM_CPU_9900, "FSM9900"},
+ [191] = {FSM_CPU_9900, "FSM9900"},
+ [192] = {FSM_CPU_9900, "FSM9900"},
+ [193] = {FSM_CPU_9900, "FSM9900"},
+
+ /* 8916 IDs */
+ [206] = {MSM_CPU_8916, "MSM8916"},
+ [247] = {MSM_CPU_8916, "APQ8016"},
+ [248] = {MSM_CPU_8916, "MSM8216"},
+ [249] = {MSM_CPU_8916, "MSM8116"},
+ [250] = {MSM_CPU_8916, "MSM8616"},
+
+ /* 8936 IDs */
+ [233] = {MSM_CPU_8936, "MSM8936"},
+
+ /* 8939 IDs */
+ [239] = {MSM_CPU_8939, "MSM8939"},
+
+ /* ZIRC IDs */
+ [234] = {MSM_CPU_ZIRC, "MSMZIRC"},
+ [235] = {MSM_CPU_ZIRC, "MSMZIRC"},
+ [236] = {MSM_CPU_ZIRC, "MSMZIRC"},
+ [237] = {MSM_CPU_ZIRC, "MSMZIRC"},
+ [238] = {MSM_CPU_ZIRC, "MSMZIRC"},
+
+ /* Uninitialized IDs are not known to run Linux.
+ MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
+ considered as unknown CPU. */
+};
+
+static enum msm_cpu cur_cpu;
+static int current_image;
+
+static struct socinfo_v1 dummy_socinfo = {
+ .format = 1,
+ .version = 1,
+};
+
+uint32_t socinfo_get_id(void)
+{
+ return (socinfo) ? socinfo->v1.id : 0;
+}
+EXPORT_SYMBOL_GPL(socinfo_get_id);
+
+uint32_t socinfo_get_version(void)
+{
+ return (socinfo) ? socinfo->v1.version : 0;
+}
+
+char *socinfo_get_build_id(void)
+{
+ return (socinfo) ? socinfo->v1.build_id : NULL;
+}
+
+static char *msm_read_hardware_id(void)
+{
+ static char msm_soc_str[256] = "Qualcomm Technologies, Inc ";
+ static bool string_generated;
+ int ret = 0;
+
+ if (string_generated)
+ return msm_soc_str;
+ if (!socinfo)
+ goto err_path;
+ if (!cpu_of_id[socinfo->v1.id].soc_id_string)
+ goto err_path;
+
+ ret = strlcat(msm_soc_str, cpu_of_id[socinfo->v1.id].soc_id_string,
+ sizeof(msm_soc_str));
+ if (ret > sizeof(msm_soc_str))
+ goto err_path;
+
+ string_generated = true;
+ return msm_soc_str;
+err_path:
+ return "UNKNOWN SOC TYPE";
+}
+
+uint32_t socinfo_get_raw_id(void)
+{
+ return socinfo ?
+ (socinfo->v1.format >= 2 ? socinfo->v2.raw_id : 0)
+ : 0;
+}
+
+uint32_t socinfo_get_raw_version(void)
+{
+ return socinfo ?
+ (socinfo->v1.format >= 2 ? socinfo->v2.raw_version : 0)
+ : 0;
+}
+
+uint32_t socinfo_get_platform_type(void)
+{
+ return socinfo ?
+ (socinfo->v1.format >= 3 ? socinfo->v3.hw_platform : 0)
+ : 0;
+}
+
+
+uint32_t socinfo_get_platform_version(void)
+{
+ return socinfo ?
+ (socinfo->v1.format >= 4 ? socinfo->v4.platform_version : 0)
+ : 0;
+}
+
+/* This information is directly encoded by the machine id */
+/* Thus no external callers rely on this information at the moment */
+static uint32_t socinfo_get_accessory_chip(void)
+{
+ return socinfo ?
+ (socinfo->v1.format >= 5 ? socinfo->v5.accessory_chip : 0)
+ : 0;
+}
+
+uint32_t socinfo_get_platform_subtype(void)
+{
+ return socinfo ?
+ (socinfo->v1.format >= 6 ? socinfo->v6.hw_platform_subtype : 0)
+ : 0;
+}
+
+static uint32_t socinfo_get_foundry_id(void)
+{
+ return socinfo ?
+ (socinfo->v1.format >= 9 ? socinfo->v9.foundry_id : 0)
+ : 0;
+}
+
+enum pmic_model socinfo_get_pmic_model(void)
+{
+ return socinfo ?
+ (socinfo->v1.format >= 7 ? socinfo->v7.pmic_model
+ : PMIC_MODEL_UNKNOWN)
+ : PMIC_MODEL_UNKNOWN;
+}
+
+uint32_t socinfo_get_pmic_die_revision(void)
+{
+ return socinfo ?
+ (socinfo->v1.format >= 7 ? socinfo->v7.pmic_die_revision : 0)
+ : 0;
+}
+
+static char *socinfo_get_image_version_base_address(void)
+{
+ return smem_find(SMEM_IMAGE_VERSION_TABLE,
+ SMEM_IMAGE_VERSION_SIZE, 0, SMEM_ANY_HOST_FLAG);
+}
+
+static uint32_t socinfo_get_format(void)
+{
+ return socinfo ? socinfo->v1.format : 0;
+}
+
+enum msm_cpu socinfo_get_msm_cpu(void)
+{
+ return cur_cpu;
+}
+EXPORT_SYMBOL_GPL(socinfo_get_msm_cpu);
+
+static ssize_t
+msm_get_vendor(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "Qualcomm\n");
+}
+
+static ssize_t
+msm_get_raw_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ socinfo_get_raw_id());
+}
+
+static ssize_t
+msm_get_raw_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ socinfo_get_raw_version());
+}
+
+static ssize_t
+msm_get_build_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+ socinfo_get_build_id());
+}
+
+static ssize_t
+msm_get_hw_platform(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ uint32_t hw_type;
+ hw_type = socinfo_get_platform_type();
+
+ return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+ hw_platform[hw_type]);
+}
+
+static ssize_t
+msm_get_platform_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ socinfo_get_platform_version());
+}
+
+static ssize_t
+msm_get_accessory_chip(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ socinfo_get_accessory_chip());
+}
+
+static ssize_t
+msm_get_platform_subtype(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ uint32_t hw_subtype;
+ hw_subtype = socinfo_get_platform_subtype();
+ if (HW_PLATFORM_QRD == socinfo_get_platform_type()) {
+ if (hw_subtype >= PLATFORM_SUBTYPE_QRD_INVALID) {
+ pr_err("%s: Invalid hardware platform sub type for qrd found\n",
+ __func__);
+ hw_subtype = PLATFORM_SUBTYPE_QRD_INVALID;
+ }
+ return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+ qrd_hw_platform_subtype[hw_subtype]);
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+ hw_platform_subtype[hw_subtype]);
+}
+
+static ssize_t
+msm_get_platform_subtype_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ uint32_t hw_subtype;
+ hw_subtype = socinfo_get_platform_subtype();
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ hw_subtype);
+}
+
+static ssize_t
+msm_get_foundry_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ socinfo_get_foundry_id());
+}
+
+static ssize_t
+msm_get_pmic_model(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ socinfo_get_pmic_model());
+}
+
+static ssize_t
+msm_get_pmic_die_revision(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ socinfo_get_pmic_die_revision());
+}
+
+static ssize_t
+msm_get_image_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ char *string_address;
+
+ string_address = socinfo_get_image_version_base_address();
+ if (IS_ERR_OR_NULL(string_address)) {
+ pr_err("%s : Failed to get image version base address",
+ __func__);
+ return snprintf(buf, SMEM_IMAGE_VERSION_NAME_SIZE, "Unknown");
+ }
+ string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ return snprintf(buf, SMEM_IMAGE_VERSION_NAME_SIZE, "%-.75s\n",
+ string_address);
+}
+
+static ssize_t
+msm_set_image_version(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ char *store_address;
+
+ if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS)
+ return count;
+ store_address = socinfo_get_image_version_base_address();
+ if (IS_ERR_OR_NULL(store_address)) {
+ pr_err("%s : Failed to get image version base address",
+ __func__);
+ return count;
+ }
+ store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ snprintf(store_address, SMEM_IMAGE_VERSION_NAME_SIZE, "%-.75s", buf);
+ return count;
+}
+
+static ssize_t
+msm_get_image_variant(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ char *string_address;
+
+ string_address = socinfo_get_image_version_base_address();
+ if (IS_ERR_OR_NULL(string_address)) {
+ pr_err("%s : Failed to get image version base address",
+ __func__);
+ return snprintf(buf, SMEM_IMAGE_VERSION_VARIANT_SIZE,
+ "Unknown");
+ }
+ string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ string_address += SMEM_IMAGE_VERSION_VARIANT_OFFSET;
+ return snprintf(buf, SMEM_IMAGE_VERSION_VARIANT_SIZE, "%-.20s\n",
+ string_address);
+}
+
+static ssize_t
+msm_set_image_variant(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ char *store_address;
+
+ if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS)
+ return count;
+ store_address = socinfo_get_image_version_base_address();
+ if (IS_ERR_OR_NULL(store_address)) {
+ pr_err("%s : Failed to get image version base address",
+ __func__);
+ return count;
+ }
+ store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ store_address += SMEM_IMAGE_VERSION_VARIANT_OFFSET;
+ snprintf(store_address, SMEM_IMAGE_VERSION_VARIANT_SIZE, "%-.20s", buf);
+ return count;
+}
+
+static ssize_t
+msm_get_image_crm_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ char *string_address;
+
+ string_address = socinfo_get_image_version_base_address();
+ if (IS_ERR_OR_NULL(string_address)) {
+ pr_err("%s : Failed to get image version base address",
+ __func__);
+ return snprintf(buf, SMEM_IMAGE_VERSION_OEM_SIZE, "Unknown");
+ }
+ string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ string_address += SMEM_IMAGE_VERSION_OEM_OFFSET;
+ return snprintf(buf, SMEM_IMAGE_VERSION_OEM_SIZE, "%-.32s\n",
+ string_address);
+}
+
+static ssize_t
+msm_set_image_crm_version(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ char *store_address;
+
+ if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS)
+ return count;
+ store_address = socinfo_get_image_version_base_address();
+ if (IS_ERR_OR_NULL(store_address)) {
+ pr_err("%s : Failed to get image version base address",
+ __func__);
+ return count;
+ }
+ store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+ store_address += SMEM_IMAGE_VERSION_OEM_OFFSET;
+ snprintf(store_address, SMEM_IMAGE_VERSION_OEM_SIZE, "%-.32s", buf);
+ return count;
+}
+
+static ssize_t
+msm_get_image_number(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ current_image);
+}
+
+static ssize_t
+msm_select_image(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret, digit;
+
+ ret = kstrtoint(buf, 10, &digit);
+ if (ret)
+ return ret;
+ if (0 <= digit && digit < SMEM_IMAGE_VERSION_BLOCKS_COUNT)
+ current_image = digit;
+ else
+ current_image = 0;
+ return count;
+}
+
+
+static struct device_attribute msm_soc_attr_raw_version =
+ __ATTR(raw_version, S_IRUGO, msm_get_raw_version, NULL);
+
+static struct device_attribute msm_soc_attr_raw_id =
+ __ATTR(raw_id, S_IRUGO, msm_get_raw_id, NULL);
+
+static struct device_attribute msm_soc_attr_vendor =
+ __ATTR(vendor, S_IRUGO, msm_get_vendor, NULL);
+
+static struct device_attribute msm_soc_attr_build_id =
+ __ATTR(build_id, S_IRUGO, msm_get_build_id, NULL);
+
+static struct device_attribute msm_soc_attr_hw_platform =
+ __ATTR(hw_platform, S_IRUGO, msm_get_hw_platform, NULL);
+
+
+static struct device_attribute msm_soc_attr_platform_version =
+ __ATTR(platform_version, S_IRUGO,
+ msm_get_platform_version, NULL);
+
+static struct device_attribute msm_soc_attr_accessory_chip =
+ __ATTR(accessory_chip, S_IRUGO,
+ msm_get_accessory_chip, NULL);
+
+static struct device_attribute msm_soc_attr_platform_subtype =
+ __ATTR(platform_subtype, S_IRUGO,
+ msm_get_platform_subtype, NULL);
+
+/* Platform Subtype String is being deprecated. Use Platform
+ * Subtype ID instead.
+ */
+static struct device_attribute msm_soc_attr_platform_subtype_id =
+ __ATTR(platform_subtype_id, S_IRUGO,
+ msm_get_platform_subtype_id, NULL);
+
+static struct device_attribute msm_soc_attr_foundry_id =
+ __ATTR(foundry_id, S_IRUGO,
+ msm_get_foundry_id, NULL);
+
+static struct device_attribute msm_soc_attr_pmic_model =
+ __ATTR(pmic_model, S_IRUGO,
+ msm_get_pmic_model, NULL);
+
+static struct device_attribute msm_soc_attr_pmic_die_revision =
+ __ATTR(pmic_die_revision, S_IRUGO,
+ msm_get_pmic_die_revision, NULL);
+
+static struct device_attribute image_version =
+ __ATTR(image_version, S_IRUGO | S_IWUSR,
+ msm_get_image_version, msm_set_image_version);
+
+static struct device_attribute image_variant =
+ __ATTR(image_variant, S_IRUGO | S_IWUSR,
+ msm_get_image_variant, msm_set_image_variant);
+
+static struct device_attribute image_crm_version =
+ __ATTR(image_crm_version, S_IRUGO | S_IWUSR,
+ msm_get_image_crm_version, msm_set_image_crm_version);
+
+static struct device_attribute select_image =
+ __ATTR(select_image, S_IRUGO | S_IWUSR,
+ msm_get_image_number, msm_select_image);
+
+static void * __init setup_dummy_socinfo(void)
+{
+ if (early_machine_is_mpq8092()) {
+ dummy_socinfo.id = 146;
+ strlcpy(dummy_socinfo.build_id, "mpq8092 - ",
+ sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_apq8084()) {
+ dummy_socinfo.id = 178;
+ strlcpy(dummy_socinfo.build_id, "apq8084 - ",
+ sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_mdm9630()) {
+ dummy_socinfo.id = 187;
+ strlcpy(dummy_socinfo.build_id, "mdm9630 - ",
+ sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_msmsamarium()) {
+ dummy_socinfo.id = 195;
+ strlcpy(dummy_socinfo.build_id, "msmsamarium - ",
+ sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_msm8916()) {
+ dummy_socinfo.id = 206;
+ strlcpy(dummy_socinfo.build_id, "msm8916 - ",
+ sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_msm8939()) {
+ dummy_socinfo.id = 239;
+ strlcpy(dummy_socinfo.build_id, "msm8939 - ",
+ sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_msm8936()) {
+ dummy_socinfo.id = 233;
+ strlcpy(dummy_socinfo.build_id, "msm8936 - ",
+ sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_msmzirc()) {
+ dummy_socinfo.id = 238;
+ strlcpy(dummy_socinfo.build_id, "msmzirc - ",
+ sizeof(dummy_socinfo.build_id));
+ }
+
+ strlcat(dummy_socinfo.build_id, "Dummy socinfo",
+ sizeof(dummy_socinfo.build_id));
+ return (void *) &dummy_socinfo;
+}
+
+static void __init populate_soc_sysfs_files(struct device *msm_soc_device)
+{
+ uint32_t legacy_format = socinfo_get_format();
+
+ device_create_file(msm_soc_device, &msm_soc_attr_vendor);
+ device_create_file(msm_soc_device, &image_version);
+ device_create_file(msm_soc_device, &image_variant);
+ device_create_file(msm_soc_device, &image_crm_version);
+ device_create_file(msm_soc_device, &select_image);
+
+ switch (legacy_format) {
+ case 9:
+ device_create_file(msm_soc_device,
+ &msm_soc_attr_foundry_id);
+ case 8:
+ case 7:
+ device_create_file(msm_soc_device,
+ &msm_soc_attr_pmic_model);
+ device_create_file(msm_soc_device,
+ &msm_soc_attr_pmic_die_revision);
+ case 6:
+ device_create_file(msm_soc_device,
+ &msm_soc_attr_platform_subtype);
+ device_create_file(msm_soc_device,
+ &msm_soc_attr_platform_subtype_id);
+ case 5:
+ device_create_file(msm_soc_device,
+ &msm_soc_attr_accessory_chip);
+ case 4:
+ device_create_file(msm_soc_device,
+ &msm_soc_attr_platform_version);
+ case 3:
+ device_create_file(msm_soc_device,
+ &msm_soc_attr_hw_platform);
+ case 2:
+ device_create_file(msm_soc_device,
+ &msm_soc_attr_raw_id);
+ device_create_file(msm_soc_device,
+ &msm_soc_attr_raw_version);
+ case 1:
+ device_create_file(msm_soc_device,
+ &msm_soc_attr_build_id);
+ break;
+ default:
+ pr_err("%s:Unknown socinfo format:%u\n", __func__,
+ legacy_format);
+ break;
+ }
+
+ return;
+}
+
+static void __init soc_info_populate(struct soc_device_attribute *soc_dev_attr)
+{
+ uint32_t soc_version = socinfo_get_version();
+
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%d", socinfo_get_id());
+ soc_dev_attr->machine = "Snapdragon";
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%u.%u",
+ SOCINFO_VERSION_MAJOR(soc_version),
+ SOCINFO_VERSION_MINOR(soc_version));
+ return;
+
+}
+
+static int __init socinfo_init_sysfs(void)
+{
+ struct device *msm_soc_device;
+ struct soc_device *soc_dev;
+ struct soc_device_attribute *soc_dev_attr;
+
+ if (!socinfo) {
+ pr_err("%s: No socinfo found!\n", __func__);
+ return -ENODEV;
+ }
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr) {
+ pr_err("%s: Soc Device alloc failed!\n", __func__);
+ return -ENOMEM;
+ }
+
+ soc_info_populate(soc_dev_attr);
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR_OR_NULL(soc_dev)) {
+ kfree(soc_dev_attr);
+ pr_err("%s: Soc device register failed\n", __func__);
+ return -EIO;
+ }
+
+ msm_soc_device = soc_device_to_device(soc_dev);
+ populate_soc_sysfs_files(msm_soc_device);
+ return 0;
+}
+
+late_initcall(socinfo_init_sysfs);
+
+static void socinfo_print(void)
+{
+ switch (socinfo->v1.format) {
+ case 1:
+ pr_info("%s: v%u, id=%u, ver=%u.%u\n",
+ __func__, socinfo->v1.format, socinfo->v1.id,
+ SOCINFO_VERSION_MAJOR(socinfo->v1.version),
+ SOCINFO_VERSION_MINOR(socinfo->v1.version));
+ break;
+ case 2:
+ pr_info("%s: v%u, id=%u, ver=%u.%u, "
+ "raw_id=%u, raw_ver=%u\n",
+ __func__, socinfo->v1.format, socinfo->v1.id,
+ SOCINFO_VERSION_MAJOR(socinfo->v1.version),
+ SOCINFO_VERSION_MINOR(socinfo->v1.version),
+ socinfo->v2.raw_id, socinfo->v2.raw_version);
+ break;
+ case 3:
+ pr_info("%s: v%u, id=%u, ver=%u.%u, "
+ "raw_id=%u, raw_ver=%u, hw_plat=%u\n",
+ __func__, socinfo->v1.format, socinfo->v1.id,
+ SOCINFO_VERSION_MAJOR(socinfo->v1.version),
+ SOCINFO_VERSION_MINOR(socinfo->v1.version),
+ socinfo->v2.raw_id, socinfo->v2.raw_version,
+ socinfo->v3.hw_platform);
+ break;
+ case 4:
+ pr_info("%s: v%u, id=%u, ver=%u.%u, "
+ "raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n",
+ __func__, socinfo->v1.format, socinfo->v1.id,
+ SOCINFO_VERSION_MAJOR(socinfo->v1.version),
+ SOCINFO_VERSION_MINOR(socinfo->v1.version),
+ socinfo->v2.raw_id, socinfo->v2.raw_version,
+ socinfo->v3.hw_platform, socinfo->v4.platform_version);
+ break;
+ case 5:
+ pr_info("%s: v%u, id=%u, ver=%u.%u, "
+ "raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n"
+ " accessory_chip=%u\n", __func__, socinfo->v1.format,
+ socinfo->v1.id,
+ SOCINFO_VERSION_MAJOR(socinfo->v1.version),
+ SOCINFO_VERSION_MINOR(socinfo->v1.version),
+ socinfo->v2.raw_id, socinfo->v2.raw_version,
+ socinfo->v3.hw_platform, socinfo->v4.platform_version,
+ socinfo->v5.accessory_chip);
+ break;
+ case 6:
+ pr_info("%s: v%u, id=%u, ver=%u.%u, "
+ "raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n"
+ " accessory_chip=%u hw_plat_subtype=%u\n", __func__,
+ socinfo->v1.format,
+ socinfo->v1.id,
+ SOCINFO_VERSION_MAJOR(socinfo->v1.version),
+ SOCINFO_VERSION_MINOR(socinfo->v1.version),
+ socinfo->v2.raw_id, socinfo->v2.raw_version,
+ socinfo->v3.hw_platform, socinfo->v4.platform_version,
+ socinfo->v5.accessory_chip,
+ socinfo->v6.hw_platform_subtype);
+ break;
+ case 8:
+ case 7:
+ pr_info("%s: v%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u\n",
+ __func__,
+ socinfo->v1.format,
+ socinfo->v1.id,
+ SOCINFO_VERSION_MAJOR(socinfo->v1.version),
+ SOCINFO_VERSION_MINOR(socinfo->v1.version),
+ socinfo->v2.raw_id, socinfo->v2.raw_version,
+ socinfo->v3.hw_platform, socinfo->v4.platform_version,
+ socinfo->v5.accessory_chip,
+ socinfo->v6.hw_platform_subtype,
+ socinfo->v7.pmic_model,
+ socinfo->v7.pmic_die_revision);
+ break;
+ case 9:
+ pr_info("%s: v%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u\n",
+ __func__,
+ socinfo->v1.format,
+ socinfo->v1.id,
+ SOCINFO_VERSION_MAJOR(socinfo->v1.version),
+ SOCINFO_VERSION_MINOR(socinfo->v1.version),
+ socinfo->v2.raw_id, socinfo->v2.raw_version,
+ socinfo->v3.hw_platform, socinfo->v4.platform_version,
+ socinfo->v5.accessory_chip,
+ socinfo->v6.hw_platform_subtype,
+ socinfo->v7.pmic_model,
+ socinfo->v7.pmic_die_revision,
+ socinfo->v9.foundry_id);
+ break;
+
+ default:
+ pr_err("%s: Unknown format found\n", __func__);
+ break;
+ }
+}
+
+int __init socinfo_init(void)
+{
+ static bool socinfo_init_done;
+
+ if (socinfo_init_done)
+ return 0;
+
+ socinfo = smem_find(SMEM_HW_SW_BUILD_ID,
+ sizeof(struct socinfo_v9),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (IS_ERR_OR_NULL(socinfo))
+ socinfo = smem_find(SMEM_HW_SW_BUILD_ID,
+ sizeof(struct socinfo_v8),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (IS_ERR_OR_NULL(socinfo))
+ socinfo = smem_find(SMEM_HW_SW_BUILD_ID,
+ sizeof(struct socinfo_v7),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (IS_ERR_OR_NULL(socinfo))
+ socinfo = smem_find(SMEM_HW_SW_BUILD_ID,
+ sizeof(struct socinfo_v6),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (IS_ERR_OR_NULL(socinfo))
+ socinfo = smem_find(SMEM_HW_SW_BUILD_ID,
+ sizeof(struct socinfo_v5),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (IS_ERR_OR_NULL(socinfo))
+ socinfo = smem_find(SMEM_HW_SW_BUILD_ID,
+ sizeof(struct socinfo_v4),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (IS_ERR_OR_NULL(socinfo))
+ socinfo = smem_find(SMEM_HW_SW_BUILD_ID,
+ sizeof(struct socinfo_v3),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (IS_ERR_OR_NULL(socinfo))
+ socinfo = smem_find(SMEM_HW_SW_BUILD_ID,
+ sizeof(struct socinfo_v2),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (IS_ERR_OR_NULL(socinfo))
+ socinfo = smem_find(SMEM_HW_SW_BUILD_ID,
+ sizeof(struct socinfo_v1),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (IS_ERR_OR_NULL(socinfo)) {
+ pr_warn("%s: Can't find SMEM_HW_SW_BUILD_ID; falling back on dummy values.\n",
+ __func__);
+ socinfo = setup_dummy_socinfo();
+ }
+
+ WARN(!socinfo_get_id(), "Unknown SOC ID!\n");
+
+ if (socinfo_get_id() >= ARRAY_SIZE(cpu_of_id))
+ BUG_ON("New IDs added! ID => CPU mapping needs an update.\n");
+ else
+ cur_cpu = cpu_of_id[socinfo->v1.id].generic_soc_type;
+
+ boot_stats_init();
+ socinfo_print();
+ arch_read_hardware_id = msm_read_hardware_id;
+ socinfo_init_done = true;
+
+ return 0;
+}
+subsys_initcall(socinfo_init);
+
+const int get_core_count(void)
+{
+ if (!(read_cpuid_mpidr() & BIT(31)))
+ return 1;
+
+ if (read_cpuid_mpidr() & BIT(30))
+ return 1;
+
+ /* 1 + the PART[1:0] field of MIDR */
+ return ((read_cpuid_id() >> 4) & 3) + 1;
+}
+
+const int read_msm_cpu_type(void)
+{
+ if (socinfo_get_msm_cpu() != MSM_CPU_UNKNOWN)
+ return socinfo_get_msm_cpu();
+
+ switch (read_cpuid_id()) {
+ case 0x510F02D0:
+ case 0x510F02D2:
+ case 0x510F02D4:
+ return MSM_CPU_8X60;
+
+ case 0x510F04D0:
+ case 0x510F04D1:
+ case 0x510F04D2:
+ case 0x511F04D0:
+ case 0x512F04D0:
+ return MSM_CPU_8960;
+
+ case 0x51404D11: /* We can't get here unless we are in bringup */
+ return MSM_CPU_8930;
+
+ case 0x510F06F0:
+ return MSM_CPU_8064;
+
+ case 0x511F06F1:
+ case 0x511F06F2:
+ case 0x512F06F0:
+ return MSM_CPU_8974;
+
+ default:
+ return MSM_CPU_UNKNOWN;
+ };
+}
+
+const int cpu_is_krait(void)
+{
+ return ((read_cpuid_id() & 0xFF00FC00) == 0x51000400);
+}
+
+const int cpu_is_krait_v1(void)
+{
+ switch (read_cpuid_id()) {
+ case 0x510F04D0:
+ case 0x510F04D1:
+ case 0x510F04D2:
+ return 1;
+
+ default:
+ return 0;
+ };
+}
+
+const int cpu_is_krait_v2(void)
+{
+ switch (read_cpuid_id()) {
+ case 0x511F04D0:
+ case 0x511F04D1:
+ case 0x511F04D2:
+ case 0x511F04D3:
+ case 0x511F04D4:
+
+ case 0x510F06F0:
+ case 0x510F06F1:
+ case 0x510F06F2:
+ return 1;
+
+ default:
+ return 0;
+ };
+}
+
+const int cpu_is_krait_v3(void)
+{
+ switch (read_cpuid_id()) {
+ case 0x512F04D0:
+ case 0x511F06F0:
+ case 0x511F06F1:
+ case 0x511F06F2:
+ case 0x510F05D0:
+ case 0x510F07F0:
+ return 1;
+
+ default:
+ return 0;
+ };
+}
diff --git a/drivers/soc/qcom/spm-v2.c b/drivers/soc/qcom/spm-v2.c
new file mode 100644
index 000000000000..480388e2f6ed
--- /dev/null
+++ b/drivers/soc/qcom/spm-v2.c
@@ -0,0 +1,541 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "spm_driver.h"
+
+#define MSM_SPM_PMIC_STATE_IDLE 0
+
+enum {
+ MSM_SPM_DEBUG_SHADOW = 1U << 0,
+ MSM_SPM_DEBUG_VCTL = 1U << 1,
+};
+
+static int msm_spm_debug_mask;
+module_param_named(
+ debug_mask, msm_spm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+struct saw2_data {
+ const char *ver_name;
+ uint32_t major;
+ uint32_t minor;
+ uint32_t *spm_reg_offset_ptr;
+};
+
+static uint32_t msm_spm_reg_offsets_saw2_v2_1[MSM_SPM_REG_NR] = {
+ [MSM_SPM_REG_SAW2_SECURE] = 0x00,
+ [MSM_SPM_REG_SAW2_ID] = 0x04,
+ [MSM_SPM_REG_SAW2_CFG] = 0x08,
+ [MSM_SPM_REG_SAW2_SPM_STS] = 0x0C,
+ [MSM_SPM_REG_SAW2_AVS_STS] = 0x10,
+ [MSM_SPM_REG_SAW2_PMIC_STS] = 0x14,
+ [MSM_SPM_REG_SAW2_RST] = 0x18,
+ [MSM_SPM_REG_SAW2_VCTL] = 0x1C,
+ [MSM_SPM_REG_SAW2_AVS_CTL] = 0x20,
+ [MSM_SPM_REG_SAW2_AVS_LIMIT] = 0x24,
+ [MSM_SPM_REG_SAW2_AVS_DLY] = 0x28,
+ [MSM_SPM_REG_SAW2_AVS_HYSTERESIS] = 0x2C,
+ [MSM_SPM_REG_SAW2_SPM_CTL] = 0x30,
+ [MSM_SPM_REG_SAW2_SPM_DLY] = 0x34,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_0] = 0x40,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_1] = 0x44,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_2] = 0x48,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_3] = 0x4C,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_4] = 0x50,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_5] = 0x54,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_6] = 0x58,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_7] = 0x5C,
+ [MSM_SPM_REG_SAW2_SEQ_ENTRY] = 0x80,
+ [MSM_SPM_REG_SAW2_VERSION] = 0xFD0,
+};
+
+static uint32_t msm_spm_reg_offsets_saw2_v3_0[MSM_SPM_REG_NR] = {
+ [MSM_SPM_REG_SAW2_SECURE] = 0x00,
+ [MSM_SPM_REG_SAW2_ID] = 0x04,
+ [MSM_SPM_REG_SAW2_CFG] = 0x08,
+ [MSM_SPM_REG_SAW2_SPM_STS] = 0x0C,
+ [MSM_SPM_REG_SAW2_AVS_STS] = 0x10,
+ [MSM_SPM_REG_SAW2_PMIC_STS] = 0x14,
+ [MSM_SPM_REG_SAW2_RST] = 0x18,
+ [MSM_SPM_REG_SAW2_VCTL] = 0x1C,
+ [MSM_SPM_REG_SAW2_AVS_CTL] = 0x20,
+ [MSM_SPM_REG_SAW2_AVS_LIMIT] = 0x24,
+ [MSM_SPM_REG_SAW2_AVS_DLY] = 0x28,
+ [MSM_SPM_REG_SAW2_AVS_HYSTERESIS] = 0x2C,
+ [MSM_SPM_REG_SAW2_SPM_CTL] = 0x30,
+ [MSM_SPM_REG_SAW2_SPM_DLY] = 0x34,
+ [MSM_SPM_REG_SAW2_STS2] = 0x38,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_0] = 0x40,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_1] = 0x44,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_2] = 0x48,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_3] = 0x4C,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_4] = 0x50,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_5] = 0x54,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_6] = 0x58,
+ [MSM_SPM_REG_SAW2_PMIC_DATA_7] = 0x5C,
+ [MSM_SPM_REG_SAW2_SEQ_ENTRY] = 0x400,
+ [MSM_SPM_REG_SAW2_VERSION] = 0xFD0,
+};
+
+static struct saw2_data saw2_info[] = {
+ [0] = {
+ "SAW2_v2.1",
+ 0x2,
+ 0x1,
+ msm_spm_reg_offsets_saw2_v2_1,
+ },
+ [1] = {
+ "SAW2_v3.0",
+ 0x3,
+ 0x0,
+ msm_spm_reg_offsets_saw2_v3_0,
+ },
+};
+
+static inline uint32_t msm_spm_drv_get_num_spm_entry(
+ struct msm_spm_driver_data *dev)
+{
+ return 32;
+}
+
+static void msm_spm_drv_flush_shadow(struct msm_spm_driver_data *dev,
+ unsigned int reg_index)
+{
+ __raw_writel(dev->reg_shadow[reg_index],
+ dev->reg_base_addr + dev->reg_offsets[reg_index]);
+}
+
+static void msm_spm_drv_load_shadow(struct msm_spm_driver_data *dev,
+ unsigned int reg_index)
+{
+ dev->reg_shadow[reg_index] =
+ __raw_readl(dev->reg_base_addr +
+ dev->reg_offsets[reg_index]);
+}
+
+static inline void msm_spm_drv_set_start_addr(
+ struct msm_spm_driver_data *dev, uint32_t addr, bool pc_mode)
+{
+ addr &= 0x7F;
+ addr <<= 4;
+ dev->reg_shadow[MSM_SPM_REG_SAW2_SPM_CTL] &= 0xFFFFF80F;
+ dev->reg_shadow[MSM_SPM_REG_SAW2_SPM_CTL] |= addr;
+
+ if (dev->major != 0x3)
+ return;
+
+ dev->reg_shadow[MSM_SPM_REG_SAW2_SPM_CTL] &= 0xFFFEFFFF;
+ if (pc_mode)
+ dev->reg_shadow[MSM_SPM_REG_SAW2_SPM_CTL] |= 0x00010000;
+}
+
+static inline bool msm_spm_pmic_arb_present(struct msm_spm_driver_data *dev)
+{
+ msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW2_ID);
+ return (dev->reg_shadow[MSM_SPM_REG_SAW2_ID] >> 2) & 0x1;
+}
+
+static inline void msm_spm_drv_set_vctl2(struct msm_spm_driver_data *dev,
+ uint32_t vlevel)
+{
+ unsigned int pmic_data = 0;
+
+ /**
+ * VCTL_PORT has to be 0, for PMIC_STS register to be updated.
+ * Ensure that vctl_port is always set to 0.
+ */
+ WARN_ON(dev->vctl_port);
+
+ pmic_data |= vlevel;
+ pmic_data |= (dev->vctl_port & 0x7) << 16;
+
+ dev->reg_shadow[MSM_SPM_REG_SAW2_VCTL] &= ~0x700FF;
+ dev->reg_shadow[MSM_SPM_REG_SAW2_VCTL] |= pmic_data;
+
+ dev->reg_shadow[MSM_SPM_REG_SAW2_PMIC_DATA_3] &= ~0x700FF;
+ dev->reg_shadow[MSM_SPM_REG_SAW2_PMIC_DATA_3] |= pmic_data;
+
+ msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_VCTL);
+ msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_PMIC_DATA_3);
+}
+
+static inline uint32_t msm_spm_drv_get_sts_pmic_state(
+ struct msm_spm_driver_data *dev)
+{
+ msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW2_PMIC_STS);
+ return (dev->reg_shadow[MSM_SPM_REG_SAW2_PMIC_STS] >> 16) &
+ 0x03;
+}
+
+uint32_t msm_spm_drv_get_sts_curr_pmic_data(
+ struct msm_spm_driver_data *dev)
+{
+ msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW2_PMIC_STS);
+ return dev->reg_shadow[MSM_SPM_REG_SAW2_PMIC_STS] & 0xFF;
+}
+
+static inline void msm_spm_drv_get_saw2_ver(struct msm_spm_driver_data *dev,
+ uint32_t *major, uint32_t *minor)
+{
+ uint32_t val = 0;
+
+ dev->reg_shadow[MSM_SPM_REG_SAW2_VERSION] =
+ __raw_readl(dev->reg_base_addr + dev->ver_reg);
+
+ val = dev->reg_shadow[MSM_SPM_REG_SAW2_VERSION];
+
+ *major = (val >> 28) & 0xF;
+ *minor = (val >> 16) & 0xFFF;
+}
+
+inline int msm_spm_drv_set_spm_enable(
+ struct msm_spm_driver_data *dev, bool enable)
+{
+ uint32_t value = enable ? 0x01 : 0x00;
+
+ if (!dev)
+ return -EINVAL;
+
+ if ((dev->reg_shadow[MSM_SPM_REG_SAW2_SPM_CTL] & 0x01) ^ value) {
+
+ dev->reg_shadow[MSM_SPM_REG_SAW2_SPM_CTL] &= ~0x1;
+ dev->reg_shadow[MSM_SPM_REG_SAW2_SPM_CTL] |= value;
+
+ msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_SPM_CTL);
+ wmb();
+ }
+ return 0;
+}
+void msm_spm_drv_flush_seq_entry(struct msm_spm_driver_data *dev)
+{
+ int i;
+ int num_spm_entry = msm_spm_drv_get_num_spm_entry(dev);
+
+ if (!dev) {
+ __WARN();
+ return;
+ }
+
+ for (i = 0; i < num_spm_entry; i++) {
+ __raw_writel(dev->reg_seq_entry_shadow[i],
+ dev->reg_base_addr
+ + dev->reg_offsets[MSM_SPM_REG_SAW2_SEQ_ENTRY]
+ + 4 * i);
+ }
+ mb();
+}
+
+int msm_spm_drv_write_seq_data(struct msm_spm_driver_data *dev,
+ uint8_t *cmd, uint32_t *offset)
+{
+ uint32_t cmd_w;
+ uint32_t offset_w = *offset / 4;
+ uint8_t last_cmd;
+
+ if (!cmd)
+ return -EINVAL;
+
+ while (1) {
+ int i;
+ cmd_w = 0;
+ last_cmd = 0;
+ cmd_w = dev->reg_seq_entry_shadow[offset_w];
+
+ for (i = (*offset % 4); i < 4; i++) {
+ last_cmd = *(cmd++);
+ cmd_w |= last_cmd << (i * 8);
+ (*offset)++;
+ if (last_cmd == 0x0f)
+ break;
+ }
+
+ dev->reg_seq_entry_shadow[offset_w++] = cmd_w;
+ if (last_cmd == 0x0f)
+ break;
+ }
+
+ return 0;
+}
+
+int msm_spm_drv_set_low_power_mode(struct msm_spm_driver_data *dev,
+ uint32_t addr, bool pc_mode)
+{
+
+ /* SPM is configured to reset start address to zero after end of Program
+ */
+ if (!dev)
+ return -EINVAL;
+
+ msm_spm_drv_set_start_addr(dev, addr, pc_mode);
+
+ msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_SPM_CTL);
+ wmb();
+
+ if (msm_spm_debug_mask & MSM_SPM_DEBUG_SHADOW) {
+ int i;
+ for (i = 0; i < MSM_SPM_REG_NR; i++)
+ pr_info("%s: reg %02x = 0x%08x\n", __func__,
+ dev->reg_offsets[i], dev->reg_shadow[i]);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_MSM_AVS_HW
+static bool msm_spm_drv_is_avs_enabled(struct msm_spm_driver_data *dev)
+{
+ msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW2_AVS_CTL);
+ return dev->reg_shadow[MSM_SPM_REG_SAW2_AVS_CTL] & BIT(0);
+}
+
+static void msm_spm_drv_disable_avs(struct msm_spm_driver_data *dev)
+{
+ msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW2_AVS_CTL);
+ dev->reg_shadow[MSM_SPM_REG_SAW2_AVS_CTL] &= ~BIT(27);
+ msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_AVS_CTL);
+}
+
+static void msm_spm_drv_enable_avs(struct msm_spm_driver_data *dev)
+{
+ dev->reg_shadow[MSM_SPM_REG_SAW2_AVS_CTL] |= BIT(27);
+ msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_AVS_CTL);
+}
+
+static void msm_spm_drv_set_avs_vlevel(struct msm_spm_driver_data *dev,
+ unsigned int vlevel)
+{
+ vlevel &= 0x3f;
+ dev->reg_shadow[MSM_SPM_REG_SAW2_AVS_CTL] &= ~0x7efc00;
+ dev->reg_shadow[MSM_SPM_REG_SAW2_AVS_CTL] |= ((vlevel - 4) << 10);
+ dev->reg_shadow[MSM_SPM_REG_SAW2_AVS_CTL] |= (vlevel << 17);
+ msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_AVS_CTL);
+}
+
+#else
+static bool msm_spm_drv_is_avs_enabled(struct msm_spm_driver_data *dev)
+{
+ return false;
+}
+
+static void msm_spm_drv_disable_avs(struct msm_spm_driver_data *dev) { }
+
+static void msm_spm_drv_enable_avs(struct msm_spm_driver_data *dev) { }
+
+static void msm_spm_drv_set_avs_vlevel(struct msm_spm_driver_data *dev,
+ unsigned int vlevel) { }
+#endif
+
+int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev, unsigned int vlevel)
+{
+ uint32_t timeout_us, new_level;
+ bool avs_enabled;
+
+ if (!dev)
+ return -EINVAL;
+
+ avs_enabled = msm_spm_drv_is_avs_enabled(dev);
+
+ if (!msm_spm_pmic_arb_present(dev))
+ return -ENOSYS;
+
+ if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
+ pr_info("%s: requesting vlevel %#x\n", __func__, vlevel);
+
+ if (avs_enabled)
+ msm_spm_drv_disable_avs(dev);
+
+ /* Kick the state machine back to idle */
+ dev->reg_shadow[MSM_SPM_REG_SAW2_RST] = 1;
+ msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_RST);
+
+ msm_spm_drv_set_vctl2(dev, vlevel);
+
+ timeout_us = dev->vctl_timeout_us;
+ /* Confirm the voltage we set was what hardware sent */
+ do {
+ new_level = msm_spm_drv_get_sts_curr_pmic_data(dev);
+ if (new_level == vlevel)
+ break;
+ udelay(1);
+ } while (--timeout_us);
+ if (!timeout_us) {
+ pr_info("Wrong level %#x\n", new_level);
+ goto set_vdd_bail;
+ }
+
+ if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
+ pr_info("%s: done, remaining timeout %u us\n",
+ __func__, timeout_us);
+
+ /* Set AVS min/max */
+ if (avs_enabled) {
+ msm_spm_drv_set_avs_vlevel(dev, vlevel);
+ msm_spm_drv_enable_avs(dev);
+ }
+
+ return 0;
+
+set_vdd_bail:
+ if (avs_enabled)
+ msm_spm_drv_enable_avs(dev);
+
+ pr_err("%s: failed %#x, remaining timeout %uus, vlevel %#x\n",
+ __func__, vlevel, timeout_us, new_level);
+ return -EIO;
+}
+
+static int msm_spm_drv_get_pmic_port(struct msm_spm_driver_data *dev,
+ enum msm_spm_pmic_port port)
+{
+ int index = -1;
+
+ switch (port) {
+ case MSM_SPM_PMIC_VCTL_PORT:
+ index = dev->vctl_port;
+ break;
+ case MSM_SPM_PMIC_PHASE_PORT:
+ index = dev->phase_port;
+ break;
+ case MSM_SPM_PMIC_PFM_PORT:
+ index = dev->pfm_port;
+ break;
+ default:
+ break;
+ }
+
+ return index;
+}
+
+int msm_spm_drv_set_pmic_data(struct msm_spm_driver_data *dev,
+ enum msm_spm_pmic_port port, unsigned int data)
+{
+ unsigned int pmic_data = 0;
+ unsigned int timeout_us = 0;
+ int index = 0;
+
+ if (!msm_spm_pmic_arb_present(dev))
+ return -ENOSYS;
+
+ index = msm_spm_drv_get_pmic_port(dev, port);
+ if (index < 0)
+ return -ENODEV;
+
+ pmic_data |= data & 0xFF;
+ pmic_data |= (index & 0x7) << 16;
+
+ dev->reg_shadow[MSM_SPM_REG_SAW2_VCTL] &= ~0x700FF;
+ dev->reg_shadow[MSM_SPM_REG_SAW2_VCTL] |= pmic_data;
+ msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW2_VCTL);
+ mb();
+
+ timeout_us = dev->vctl_timeout_us;
+ /**
+ * Confirm the pmic data set was what hardware sent by
+ * checking the PMIC FSM state.
+ * We cannot use the sts_pmic_data and check it against
+ * the value like we do fot set_vdd, since the PMIC_STS
+ * is only updated for SAW_VCTL sent with port index 0.
+ */
+ do {
+ if (msm_spm_drv_get_sts_pmic_state(dev) ==
+ MSM_SPM_PMIC_STATE_IDLE)
+ break;
+ udelay(1);
+ } while (--timeout_us);
+
+ if (!timeout_us) {
+ pr_err("%s: failed, remaining timeout %u us, data %d\n",
+ __func__, timeout_us, data);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+void msm_spm_drv_reinit(struct msm_spm_driver_data *dev)
+{
+ int i;
+
+ for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++)
+ msm_spm_drv_flush_shadow(dev, i);
+
+ msm_spm_drv_flush_seq_entry(dev);
+ mb();
+}
+
+int msm_spm_drv_init(struct msm_spm_driver_data *dev,
+ struct msm_spm_platform_data *data)
+{
+ int i;
+ int num_spm_entry;
+ bool found = false;
+
+ BUG_ON(!dev || !data);
+
+ dev->vctl_port = data->vctl_port;
+ dev->phase_port = data->phase_port;
+ dev->pfm_port = data->pfm_port;
+ dev->reg_base_addr = data->reg_base_addr;
+ memcpy(dev->reg_shadow, data->reg_init_values,
+ sizeof(data->reg_init_values));
+
+ dev->vctl_timeout_us = data->vctl_timeout_us;
+
+ msm_spm_drv_get_saw2_ver(dev, &dev->major, &dev->minor);
+
+ for (i = 0; i < ARRAY_SIZE(saw2_info); i++)
+ if (dev->major == saw2_info[i].major &&
+ dev->minor == saw2_info[i].minor) {
+ pr_debug("%s: Version found\n",
+ saw2_info[i].ver_name);
+ dev->reg_offsets = saw2_info[i].spm_reg_offset_ptr;
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ pr_err("%s: No SAW2 version found\n", __func__);
+ BUG_ON(!found);
+ }
+
+ for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++)
+ msm_spm_drv_flush_shadow(dev, i);
+ /* barrier to ensure write completes before we update shadow
+ * registers
+ */
+ mb();
+
+ for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++)
+ msm_spm_drv_load_shadow(dev, i);
+
+ /* barrier to ensure read completes before we proceed further*/
+ mb();
+
+ num_spm_entry = msm_spm_drv_get_num_spm_entry(dev);
+
+ dev->reg_seq_entry_shadow =
+ kzalloc(sizeof(*dev->reg_seq_entry_shadow) * num_spm_entry,
+ GFP_KERNEL);
+
+ if (!dev->reg_seq_entry_shadow)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/soc/qcom/spm_devices.c b/drivers/soc/qcom/spm_devices.c
new file mode 100644
index 000000000000..778e8e48ea94
--- /dev/null
+++ b/drivers/soc/qcom/spm_devices.c
@@ -0,0 +1,562 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/spm.h>
+#include "spm_driver.h"
+
+struct msm_spm_power_modes {
+ uint32_t mode;
+ bool notify_rpm;
+ uint32_t start_addr;
+
+};
+
+struct msm_spm_device {
+ bool initialized;
+ struct msm_spm_driver_data reg_data;
+ struct msm_spm_power_modes *modes;
+ uint32_t num_modes;
+ uint32_t cpu_vdd;
+};
+
+struct msm_spm_vdd_info {
+ uint32_t cpu;
+ uint32_t vlevel;
+ int err;
+};
+
+static struct msm_spm_device msm_spm_l2_device;
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_cpu_spm_device);
+static bool msm_spm_L2_apcs_master;
+
+static void msm_spm_smp_set_vdd(void *data)
+{
+ struct msm_spm_device *dev;
+ struct msm_spm_vdd_info *info = (struct msm_spm_vdd_info *)data;
+
+ if (msm_spm_L2_apcs_master)
+ dev = &msm_spm_l2_device;
+ else
+ dev = &per_cpu(msm_cpu_spm_device, info->cpu);
+
+ if (!dev->initialized)
+ return;
+
+ if (msm_spm_L2_apcs_master)
+ get_cpu();
+
+ dev->cpu_vdd = info->vlevel;
+ info->err = msm_spm_drv_set_vdd(&dev->reg_data, info->vlevel);
+
+ if (msm_spm_L2_apcs_master)
+ put_cpu();
+}
+
+/**
+ * msm_spm_probe_done(): Verify and return the status of the cpu(s) and l2
+ * probe.
+ * Return: 0 if all spm devices have been probed, else return -EPROBE_DEFER.
+ */
+int msm_spm_probe_done(void)
+{
+ struct msm_spm_device *dev;
+ int cpu;
+
+ if (msm_spm_L2_apcs_master && !msm_spm_l2_device.initialized) {
+ return -EPROBE_DEFER;
+ } else {
+ for_each_possible_cpu(cpu) {
+ dev = &per_cpu(msm_cpu_spm_device, cpu);
+ if (!dev->initialized)
+ return -EPROBE_DEFER;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_spm_probe_done);
+
+/**
+ * msm_spm_set_vdd(): Set core voltage
+ * @cpu: core id
+ * @vlevel: Encoded PMIC data.
+ */
+int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
+{
+ struct msm_spm_vdd_info info;
+ int ret;
+ int current_cpu;
+
+ info.cpu = cpu;
+ info.vlevel = vlevel;
+ info.err = -ENODEV;
+
+ current_cpu = get_cpu();
+ if (!msm_spm_L2_apcs_master && (current_cpu != cpu) &&
+ cpu_online(cpu)) {
+ /**
+ * We do not want to set the voltage of another core from
+ * this core, as its possible that we may race the vdd change
+ * with the SPM state machine of that core, which could also
+ * be changing the voltage of that core during power collapse.
+ * Hence, set the function to be executed on that core and block
+ * until the vdd change is complete.
+ */
+ ret = smp_call_function_single(cpu, msm_spm_smp_set_vdd,
+ &info, true);
+ if (!ret)
+ ret = info.err;
+ } else {
+ /**
+ * Since the core is not online, it is safe to set the vdd
+ * directly.
+ */
+ msm_spm_smp_set_vdd(&info);
+ ret = info.err;
+ }
+ put_cpu();
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_spm_set_vdd);
+
+/**
+ * msm_spm_get_vdd(): Get core voltage
+ * @cpu: core id
+ * @return: Returns encoded PMIC data.
+ */
+unsigned int msm_spm_get_vdd(unsigned int cpu)
+{
+ struct msm_spm_device *dev;
+
+ if (msm_spm_L2_apcs_master)
+ dev = &msm_spm_l2_device;
+ else
+ dev = &per_cpu(msm_cpu_spm_device, cpu);
+ return dev->cpu_vdd;
+}
+EXPORT_SYMBOL(msm_spm_get_vdd);
+
+static int msm_spm_dev_set_low_power_mode(struct msm_spm_device *dev,
+ unsigned int mode, bool notify_rpm, bool pc_mode)
+{
+ uint32_t i;
+ uint32_t start_addr = 0;
+ int ret = -EINVAL;
+
+ if (!dev->initialized)
+ return -ENXIO;
+
+ if (mode == MSM_SPM_MODE_DISABLED) {
+ ret = msm_spm_drv_set_spm_enable(&dev->reg_data, false);
+ } else if (!msm_spm_drv_set_spm_enable(&dev->reg_data, true)) {
+ for (i = 0; i < dev->num_modes; i++) {
+ if ((dev->modes[i].mode == mode) &&
+ (dev->modes[i].notify_rpm == notify_rpm)) {
+ start_addr = dev->modes[i].start_addr;
+ break;
+ }
+ }
+ ret = msm_spm_drv_set_low_power_mode(&dev->reg_data,
+ start_addr, pc_mode);
+ }
+ return ret;
+}
+
+static int msm_spm_dev_init(struct msm_spm_device *dev,
+ struct msm_spm_platform_data *data)
+{
+ int i, ret = -ENOMEM;
+ uint32_t offset = 0;
+
+ dev->num_modes = data->num_modes;
+ dev->modes = kmalloc(
+ sizeof(struct msm_spm_power_modes) * dev->num_modes,
+ GFP_KERNEL);
+
+ if (!dev->modes)
+ goto spm_failed_malloc;
+
+ dev->reg_data.ver_reg = data->ver_reg;
+ ret = msm_spm_drv_init(&dev->reg_data, data);
+
+ if (ret)
+ goto spm_failed_init;
+
+ for (i = 0; i < dev->num_modes; i++) {
+
+ /* Default offset is 0 and gets updated as we write more
+ * sequences into SPM
+ */
+ dev->modes[i].start_addr = offset;
+ ret = msm_spm_drv_write_seq_data(&dev->reg_data,
+ data->modes[i].cmd, &offset);
+ if (ret < 0)
+ goto spm_failed_init;
+
+ dev->modes[i].mode = data->modes[i].mode;
+ dev->modes[i].notify_rpm = data->modes[i].notify_rpm;
+ }
+ msm_spm_drv_flush_seq_entry(&dev->reg_data);
+ dev->initialized = true;
+ return 0;
+
+spm_failed_init:
+ kfree(dev->modes);
+spm_failed_malloc:
+ return ret;
+}
+
+/**
+ * msm_spm_turn_on_cpu_rail(): Power on cpu rail before turning on core
+ * @base: core 0's base SAW address
+ * @cpu: core id
+ */
+int msm_spm_turn_on_cpu_rail(unsigned long base, unsigned int cpu)
+{
+ uint32_t val = 0;
+ uint32_t timeout = 512; /* delay for voltage to settle on the core */
+ void *reg = NULL;
+
+ if (cpu == 0 || cpu >= num_possible_cpus())
+ return -EINVAL;
+
+ reg = ioremap_nocache(base + (cpu * 0x10000), SZ_4K);
+ if (!reg)
+ return -ENOMEM;
+
+ reg += 0x1C;
+
+ /*
+ * Set FTS2 type CPU supply regulator to 1.15 V. This assumes that the
+ * regulator is already configured in LV range.
+ */
+ val = 0x40000E6;
+ writel_relaxed(val, reg);
+ mb();
+ udelay(timeout);
+
+ /* Enable CPU supply regulator */
+ val = 0x2030080;
+ writel_relaxed(val, reg);
+ mb();
+ udelay(timeout);
+
+ iounmap(reg);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_spm_turn_on_cpu_rail);
+
+void msm_spm_reinit(void)
+{
+ unsigned int cpu;
+ for_each_possible_cpu(cpu)
+ msm_spm_drv_reinit(&per_cpu(msm_cpu_spm_device.reg_data, cpu));
+}
+EXPORT_SYMBOL(msm_spm_reinit);
+
+/**
+ * msm_spm_set_low_power_mode() - Configure SPM start address for low power mode
+ * @mode: SPM LPM mode to enter
+ * @notify_rpm: Notify RPM in this mode
+ */
+int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
+{
+ struct msm_spm_device *dev = &__get_cpu_var(msm_cpu_spm_device);
+ bool pc_mode = (mode == MSM_SPM_MODE_POWER_COLLAPSE) ? true : false;
+ return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, pc_mode);
+}
+EXPORT_SYMBOL(msm_spm_set_low_power_mode);
+
+/**
+ * msm_spm_init(): Board initalization function
+ * @data: platform specific SPM register configuration data
+ * @nr_devs: Number of SPM devices being initialized
+ */
+int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
+{
+ unsigned int cpu;
+ int ret = 0;
+
+ BUG_ON((nr_devs < num_possible_cpus()) || !data);
+
+ for_each_possible_cpu(cpu) {
+ struct msm_spm_device *dev = &per_cpu(msm_cpu_spm_device, cpu);
+ ret = msm_spm_dev_init(dev, &data[cpu]);
+ if (ret < 0) {
+ pr_warn("%s():failed CPU:%u ret:%d\n", __func__,
+ cpu, ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_MSM_L2_SPM
+
+/**
+ * msm_spm_l2_set_low_power_mode(): Configure L2 SPM start address
+ * for low power mode
+ * @mode: SPM LPM mode to enter
+ * @notify_rpm: Notify RPM in this mode
+ */
+int msm_spm_l2_set_low_power_mode(unsigned int mode, bool notify_rpm)
+{
+ bool pc_mode = true;
+
+ if (mode == MSM_SPM_L2_MODE_DISABLED ||
+ mode == MSM_SPM_L2_MODE_RETENTION)
+ pc_mode = false;
+ return msm_spm_dev_set_low_power_mode(
+ &msm_spm_l2_device, mode, notify_rpm, pc_mode);
+}
+EXPORT_SYMBOL(msm_spm_l2_set_low_power_mode);
+
+void msm_spm_l2_reinit(void)
+{
+ if (!msm_spm_l2_device.initialized)
+ return;
+ msm_spm_drv_reinit(&msm_spm_l2_device.reg_data);
+}
+EXPORT_SYMBOL(msm_spm_l2_reinit);
+
+/**
+ * msm_spm_apcs_set_phase(): Set number of SMPS phases.
+ * phase_cnt: Number of phases to be set active
+ */
+int msm_spm_apcs_set_phase(unsigned int phase_cnt)
+{
+ if (!msm_spm_l2_device.initialized)
+ return -ENXIO;
+ return msm_spm_drv_set_pmic_data(&msm_spm_l2_device.reg_data,
+ MSM_SPM_PMIC_PHASE_PORT, phase_cnt);
+}
+EXPORT_SYMBOL(msm_spm_apcs_set_phase);
+
+/** msm_spm_enable_fts_lpm() : Enable FTS to switch to low power
+ * when the cores are in low power modes
+ * @mode: The mode configuration for FTS
+ */
+int msm_spm_enable_fts_lpm(uint32_t mode)
+{
+ if (!msm_spm_l2_device.initialized)
+ return -ENXIO;
+ return msm_spm_drv_set_pmic_data(&msm_spm_l2_device.reg_data,
+ MSM_SPM_PMIC_PFM_PORT, mode);
+}
+EXPORT_SYMBOL(msm_spm_enable_fts_lpm);
+
+/**
+ * msm_spm_l2_init(): Board initialization function
+ * @data: SPM target specific register configuration
+ */
+int __init msm_spm_l2_init(struct msm_spm_platform_data *data)
+{
+ return msm_spm_dev_init(&msm_spm_l2_device, data);
+}
+#endif
+
+static int msm_spm_dev_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ int cpu = 0;
+ int i = 0;
+ struct device_node *node = pdev->dev.of_node;
+ struct msm_spm_platform_data spm_data;
+ char *key = NULL;
+ uint32_t val = 0;
+ struct msm_spm_seq_entry modes[MSM_SPM_MODE_NR];
+ int len = 0;
+ struct msm_spm_device *dev = NULL;
+ struct resource *res = NULL;
+ uint32_t mode_count = 0;
+
+ struct spm_of {
+ char *key;
+ uint32_t id;
+ };
+
+ struct spm_of spm_of_data[] = {
+ {"qcom,saw2-cfg", MSM_SPM_REG_SAW2_CFG},
+ {"qcom,saw2-avs-ctl", MSM_SPM_REG_SAW2_AVS_CTL},
+ {"qcom,saw2-avs-hysteresis", MSM_SPM_REG_SAW2_AVS_HYSTERESIS},
+ {"qcom,saw2-avs-limit", MSM_SPM_REG_SAW2_AVS_LIMIT},
+ {"qcom,saw2-avs-dly", MSM_SPM_REG_SAW2_AVS_DLY},
+ {"qcom,saw2-spm-dly", MSM_SPM_REG_SAW2_SPM_DLY},
+ {"qcom,saw2-spm-ctl", MSM_SPM_REG_SAW2_SPM_CTL},
+ {"qcom,saw2-pmic-data0", MSM_SPM_REG_SAW2_PMIC_DATA_0},
+ {"qcom,saw2-pmic-data1", MSM_SPM_REG_SAW2_PMIC_DATA_1},
+ {"qcom,saw2-pmic-data2", MSM_SPM_REG_SAW2_PMIC_DATA_2},
+ {"qcom,saw2-pmic-data3", MSM_SPM_REG_SAW2_PMIC_DATA_3},
+ {"qcom,saw2-pmic-data4", MSM_SPM_REG_SAW2_PMIC_DATA_4},
+ {"qcom,saw2-pmic-data5", MSM_SPM_REG_SAW2_PMIC_DATA_5},
+ {"qcom,saw2-pmic-data6", MSM_SPM_REG_SAW2_PMIC_DATA_6},
+ {"qcom,saw2-pmic-data7", MSM_SPM_REG_SAW2_PMIC_DATA_7},
+ };
+
+ struct mode_of {
+ char *key;
+ uint32_t id;
+ uint32_t notify_rpm;
+ };
+
+ struct mode_of of_cpu_modes[] = {
+ {"qcom,saw2-spm-cmd-wfi", MSM_SPM_MODE_CLOCK_GATING, 0},
+ {"qcom,saw2-spm-cmd-ret", MSM_SPM_MODE_POWER_RETENTION, 0},
+ {"qcom,saw2-spm-cmd-spc", MSM_SPM_MODE_POWER_COLLAPSE, 0},
+ {"qcom,saw2-spm-cmd-pc", MSM_SPM_MODE_POWER_COLLAPSE, 1},
+ };
+
+ struct mode_of of_l2_modes[] = {
+ {"qcom,saw2-spm-cmd-ret", MSM_SPM_L2_MODE_RETENTION, 1},
+ {"qcom,saw2-spm-cmd-gdhs", MSM_SPM_L2_MODE_GDHS, 1},
+ {"qcom,saw2-spm-cmd-pc-no-rpm", MSM_SPM_L2_MODE_PC_NO_RPM, 1},
+ {"qcom,saw2-spm-cmd-pc", MSM_SPM_L2_MODE_POWER_COLLAPSE, 1},
+ };
+
+ struct mode_of *mode_of_data;
+ int num_modes;
+
+ memset(&spm_data, 0, sizeof(struct msm_spm_platform_data));
+ memset(&modes, 0,
+ (MSM_SPM_MODE_NR - 2) * sizeof(struct msm_spm_seq_entry));
+
+ key = "qcom,core-id";
+ ret = of_property_read_u32(node, key, &val);
+ if (ret)
+ goto fail;
+ cpu = val;
+
+ /*
+ * Device with id 0..NR_CPUS are SPM for apps cores
+ * Device with id 0xFFFF is for L2 SPM.
+ */
+ if (cpu >= 0 && cpu < num_possible_cpus()) {
+ mode_of_data = of_cpu_modes;
+ num_modes = ARRAY_SIZE(of_cpu_modes);
+ dev = &per_cpu(msm_cpu_spm_device, cpu);
+
+ } else if (cpu == 0xffff) {
+ mode_of_data = of_l2_modes;
+ num_modes = ARRAY_SIZE(of_l2_modes);
+ dev = &msm_spm_l2_device;
+ } else
+ return ret;
+
+ key = "qcom,saw2-ver-reg";
+ ret = of_property_read_u32(node, key, &val);
+ if (ret)
+ goto fail;
+ spm_data.ver_reg = val;
+
+ key = "qcom,vctl-timeout-us";
+ ret = of_property_read_u32(node, key, &val);
+ if (!ret)
+ spm_data.vctl_timeout_us = val;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto fail;
+
+ spm_data.reg_base_addr = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!spm_data.reg_base_addr)
+ return -ENOMEM;
+
+ spm_data.vctl_port = -1;
+ spm_data.phase_port = -1;
+ spm_data.pfm_port = -1;
+
+ key = "qcom,vctl-port";
+ of_property_read_u32(node, key, &spm_data.vctl_port);
+
+ key = "qcom,phase-port";
+ of_property_read_u32(node, key, &spm_data.phase_port);
+
+ key = "qcom,pfm-port";
+ of_property_read_u32(node, key, &spm_data.pfm_port);
+
+ /* optional */
+ if (dev == &msm_spm_l2_device) {
+ key = "qcom,L2-spm-is-apcs-master";
+ msm_spm_L2_apcs_master =
+ of_property_read_bool(pdev->dev.of_node, key);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(spm_of_data); i++) {
+ ret = of_property_read_u32(node, spm_of_data[i].key, &val);
+ if (ret)
+ continue;
+ spm_data.reg_init_values[spm_of_data[i].id] = val;
+ }
+
+ for (i = 0; i < num_modes; i++) {
+ key = mode_of_data[i].key;
+ modes[mode_count].cmd =
+ (uint8_t *)of_get_property(node, key, &len);
+ if (!modes[mode_count].cmd)
+ continue;
+ modes[mode_count].mode = mode_of_data[i].id;
+ modes[mode_count].notify_rpm = mode_of_data[i].notify_rpm;
+ mode_count++;
+ }
+
+ spm_data.modes = modes;
+ spm_data.num_modes = mode_count;
+
+ ret = msm_spm_dev_init(dev, &spm_data);
+
+ if (ret < 0)
+ pr_warn("%s():failed core-id:%u ret:%d\n", __func__, cpu, ret);
+
+ return ret;
+
+fail:
+ pr_err("%s: Failed reading node=%s, key=%s\n",
+ __func__, node->full_name, key);
+ return -EFAULT;
+}
+
+static struct of_device_id msm_spm_match_table[] = {
+ {.compatible = "qcom,spm-v2"},
+ {},
+};
+
+static struct platform_driver msm_spm_device_driver = {
+ .probe = msm_spm_dev_probe,
+ .driver = {
+ .name = "spm-v2",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_spm_match_table,
+ },
+};
+
+/**
+ * msm_spm_device_init(): Device tree initialization function
+ */
+int __init msm_spm_device_init(void)
+{
+ return platform_driver_register(&msm_spm_device_driver);
+}
+arch_initcall(msm_spm_device_init);
diff --git a/drivers/soc/qcom/spm_driver.h b/drivers/soc/qcom/spm_driver.h
new file mode 100644
index 000000000000..13e6adb8af57
--- /dev/null
+++ b/drivers/soc/qcom/spm_driver.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ARCH_ARM_MACH_MSM_SPM_DEVICES_H
+#define __ARCH_ARM_MACH_MSM_SPM_DEVICES_H
+
+#include <soc/qcom/spm.h>
+
+enum msm_spm_pmic_port {
+ MSM_SPM_PMIC_VCTL_PORT,
+ MSM_SPM_PMIC_PHASE_PORT,
+ MSM_SPM_PMIC_PFM_PORT,
+};
+
+struct msm_spm_driver_data {
+ uint32_t major;
+ uint32_t minor;
+ uint32_t ver_reg;
+ uint32_t vctl_port;
+ uint32_t phase_port;
+ uint32_t pfm_port;
+ void __iomem *reg_base_addr;
+ uint32_t vctl_timeout_us;
+ uint32_t avs_timeout_us;
+ uint32_t reg_shadow[MSM_SPM_REG_NR];
+ uint32_t *reg_seq_entry_shadow;
+ uint32_t *reg_offsets;
+};
+
+int msm_spm_drv_init(struct msm_spm_driver_data *dev,
+ struct msm_spm_platform_data *data);
+void msm_spm_drv_reinit(struct msm_spm_driver_data *dev);
+int msm_spm_drv_set_low_power_mode(struct msm_spm_driver_data *dev,
+ uint32_t addr, bool pc_mode);
+int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev,
+ unsigned int vlevel);
+uint32_t msm_spm_drv_get_sts_curr_pmic_data(
+ struct msm_spm_driver_data *dev);
+int msm_spm_drv_write_seq_data(struct msm_spm_driver_data *dev,
+ uint8_t *cmd, uint32_t *offset);
+void msm_spm_drv_flush_seq_entry(struct msm_spm_driver_data *dev);
+int msm_spm_drv_set_spm_enable(struct msm_spm_driver_data *dev,
+ bool enable);
+int msm_spm_drv_set_pmic_data(struct msm_spm_driver_data *dev,
+ enum msm_spm_pmic_port port, unsigned int data);
+#endif
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
new file mode 100644
index 000000000000..44ab4d543296
--- /dev/null
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -0,0 +1,964 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/dma-mapping.h>
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/scm.h>
+
+#include <soc/qcom/smem.h>
+
+#include "peripheral-loader.h"
+
+#define XO_FREQ 19200000
+#define PROXY_TIMEOUT_MS 10000
+#define MAX_SSR_REASON_LEN 81U
+#define STOP_ACK_TIMEOUT_MS 1000
+#define CRASH_STOP_ACK_TO_MS 200
+
+#define desc_to_data(d) container_of(d, struct pil_tz_data, desc)
+#define subsys_to_data(d) container_of(d, struct pil_tz_data, subsys_desc)
+
+/**
+ * struct reg_info - regulator info
+ * @reg: regulator handle
+ * @uV: voltage in uV
+ * @uA: current in uA
+ */
+struct reg_info {
+ struct regulator *reg;
+ int uV;
+ int uA;
+};
+
+/**
+ * struct pil_tz_data
+ * @regs: regulators that should be always on when the subsystem is
+ * brought out of reset
+ * @proxy_regs: regulators that should be on during pil proxy voting
+ * @clks: clocks that should be always on when the subsystem is
+ * brought out of reset
+ * @proxy_clks: clocks that should be on during pil proxy voting
+ * @reg_count: the number of always on regulators
+ * @proxy_reg_count: the number of proxy voting regulators
+ * @clk_count: the number of always on clocks
+ * @proxy_clk_count: the number of proxy voting clocks
+ * @smem_id: the smem id used for read the subsystem crash reason
+ * @ramdump_dev: ramdump device pointer
+ * @pas_id: the PAS id for tz
+ * @bus_client: bus client id
+ * @stop_ack: state of completion of stop ack
+ * @desc: PIL descriptor
+ * @subsys: subsystem device pointer
+ * @subsys_desc: subsystem descriptor
+ */
+struct pil_tz_data {
+ struct reg_info *regs;
+ struct reg_info *proxy_regs;
+ struct clk **clks;
+ struct clk **proxy_clks;
+ int reg_count;
+ int proxy_reg_count;
+ int clk_count;
+ int proxy_clk_count;
+ int smem_id;
+ void *ramdump_dev;
+ u32 pas_id;
+ u32 bus_client;
+ struct completion stop_ack;
+ struct pil_desc desc;
+ struct subsys_device *subsys;
+ struct subsys_desc subsys_desc;
+};
+
+enum scm_cmd {
+ PAS_INIT_IMAGE_CMD = 1,
+ PAS_MEM_SETUP_CMD,
+ PAS_AUTH_AND_RESET_CMD = 5,
+ PAS_SHUTDOWN_CMD,
+};
+
+enum pas_id {
+ PAS_MODEM,
+ PAS_Q6,
+ PAS_DSPS,
+ PAS_TZAPPS,
+ PAS_MODEM_SW,
+ PAS_MODEM_FW,
+ PAS_WCNSS,
+ PAS_SECAPP,
+ PAS_GSS,
+ PAS_VIDC,
+ PAS_VPU,
+ PAS_BCSS,
+};
+
+enum scm_clock_ids {
+ BUS_CLK = 0,
+ CORE_CLK,
+ IFACE_CLK,
+ CORE_CLK_SRC,
+ NUM_CLKS
+};
+
+static const char * const scm_clock_names[NUM_CLKS] = {
+ [BUS_CLK] = "bus_clk",
+ [CORE_CLK] = "core_clk",
+ [IFACE_CLK] = "iface_clk",
+ [CORE_CLK_SRC] = "core_clk_src",
+};
+
+static struct clk *scm_clocks[NUM_CLKS];
+
+static struct msm_bus_paths scm_pas_bw_tbl[] = {
+ {
+ .vectors = (struct msm_bus_vectors[]){
+ {
+ .src = MSM_BUS_MASTER_SPS,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ },
+ },
+ .num_paths = 1,
+ },
+ {
+ .vectors = (struct msm_bus_vectors[]){
+ {
+ .src = MSM_BUS_MASTER_SPS,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ib = 492 * 8 * 1000000UL,
+ .ab = 492 * 8 * 100000UL,
+ },
+ },
+ .num_paths = 1,
+ },
+};
+
+static struct msm_bus_scale_pdata scm_pas_bus_pdata = {
+ .usecase = scm_pas_bw_tbl,
+ .num_usecases = ARRAY_SIZE(scm_pas_bw_tbl),
+ .name = "scm_pas",
+};
+
+static uint32_t scm_perf_client;
+static int scm_pas_bw_count;
+static DEFINE_MUTEX(scm_pas_bw_mutex);
+
+static int scm_pas_enable_bw(void)
+{
+ int ret = 0, i;
+
+ if (!scm_perf_client)
+ return -EINVAL;
+
+ mutex_lock(&scm_pas_bw_mutex);
+ if (!scm_pas_bw_count) {
+ ret = msm_bus_scale_client_update_request(scm_perf_client, 1);
+ if (ret)
+ goto err_bus;
+ scm_pas_bw_count++;
+ }
+ for (i = 0; i < NUM_CLKS; i++)
+ if (clk_prepare_enable(scm_clocks[i]))
+ goto err_clk;
+
+ mutex_unlock(&scm_pas_bw_mutex);
+ return ret;
+
+err_clk:
+ pr_err("scm-pas: clk prepare_enable failed (%s)\n", scm_clock_names[i]);
+ for (i--; i >= 0; i--)
+ clk_disable_unprepare(scm_clocks[i]);
+
+err_bus:
+ pr_err("scm-pas; Bandwidth request failed (%d)\n", ret);
+ msm_bus_scale_client_update_request(scm_perf_client, 0);
+
+ mutex_unlock(&scm_pas_bw_mutex);
+ return ret;
+}
+
+static void scm_pas_disable_bw(void)
+{
+ int i;
+ mutex_lock(&scm_pas_bw_mutex);
+ if (scm_pas_bw_count-- == 1)
+ msm_bus_scale_client_update_request(scm_perf_client, 0);
+
+ for (i = NUM_CLKS - 1; i >= 0; i--)
+ clk_disable_unprepare(scm_clocks[i]);
+ mutex_unlock(&scm_pas_bw_mutex);
+}
+
+static void scm_pas_init(int id)
+{
+ int i, rate;
+ static int is_inited;
+
+ if (is_inited)
+ return;
+
+ for (i = 0; i < NUM_CLKS; i++) {
+ scm_clocks[i] = clk_get_sys("scm", scm_clock_names[i]);
+ if (IS_ERR(scm_clocks[i]))
+ scm_clocks[i] = NULL;
+ }
+
+ /* Fail silently if this clock is not supported */
+ rate = clk_round_rate(scm_clocks[CORE_CLK_SRC], 1);
+ clk_set_rate(scm_clocks[CORE_CLK_SRC], rate);
+
+ scm_pas_bw_tbl[0].vectors[0].src = id;
+ scm_pas_bw_tbl[1].vectors[0].src = id;
+
+ clk_set_rate(scm_clocks[BUS_CLK], 64000000);
+
+ scm_perf_client = msm_bus_scale_register_client(&scm_pas_bus_pdata);
+ if (!scm_perf_client)
+ pr_warn("scm-pas: Unable to register bus client\n");
+
+ is_inited = 1;
+}
+
+static int of_read_clocks(struct device *dev, struct clk ***clks_ref,
+ const char *propname)
+{
+ int clk_count, i, len;
+ struct clk **clks;
+
+ if (!of_find_property(dev->of_node, propname, &len))
+ return 0;
+
+ clk_count = of_property_count_strings(dev->of_node, propname);
+ if (IS_ERR_VALUE(clk_count)) {
+ dev_err(dev, "Failed to get clock names\n");
+ return -EINVAL;
+ }
+
+ clks = devm_kzalloc(dev, sizeof(struct clk *) * clk_count,
+ GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ for (i = 0; i < clk_count; i++) {
+ const char *clock_name;
+ of_property_read_string_index(dev->of_node,
+ propname, i,
+ &clock_name);
+
+ clks[i] = devm_clk_get(dev, clock_name);
+ if (IS_ERR(clks[i])) {
+ int rc = PTR_ERR(clks[i]);
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get %s clock\n",
+ clock_name);
+ return rc;
+ }
+
+ /* Make sure rate-settable clocks' rates are set */
+ if (clk_get_rate(clks[i]) == 0)
+ clk_set_rate(clks[i], clk_round_rate(clks[i],
+ XO_FREQ));
+ }
+
+ *clks_ref = clks;
+ return clk_count;
+}
+
+static int of_read_regs(struct device *dev, struct reg_info **regs_ref,
+ const char *propname)
+{
+ int reg_count, i, len, rc;
+ struct reg_info *regs;
+
+ if (!of_find_property(dev->of_node, propname, &len))
+ return 0;
+
+ reg_count = of_property_count_strings(dev->of_node, propname);
+ if (IS_ERR_VALUE(reg_count)) {
+ dev_err(dev, "Failed to get regulator names\n");
+ return -EINVAL;
+ }
+
+ regs = devm_kzalloc(dev, sizeof(struct reg_info) * reg_count,
+ GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
+
+ for (i = 0; i < reg_count; i++) {
+ const char *reg_name;
+ char reg_uV_uA_name[50];
+ u32 vdd_uV_uA[2];
+
+ of_property_read_string_index(dev->of_node,
+ propname, i,
+ &reg_name);
+
+ regs[i].reg = devm_regulator_get(dev, reg_name);
+ if (IS_ERR(regs[i].reg)) {
+ int rc = PTR_ERR(regs[i].reg);
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get %s\n regulator",
+ reg_name);
+ return rc;
+ }
+
+ /*
+ * Read the voltage and current values for the corresponding
+ * regulator. The device tree property name is "qcom," +
+ * "regulator_name" + "-uV-uA".
+ */
+ rc = snprintf(reg_uV_uA_name, ARRAY_SIZE(reg_uV_uA_name),
+ "qcom,%s-uV-uA", reg_name);
+ if (rc < strlen(reg_name) + 6) {
+ dev_err(dev, "Failed to hold reg_uV_uA_name\n");
+ return -EINVAL;
+ }
+
+ if (!of_find_property(dev->of_node, reg_uV_uA_name, &len))
+ continue;
+
+ len /= sizeof(vdd_uV_uA[0]);
+
+ /* There should be two entries: one for uV and one for uA */
+ if (len != 2) {
+ dev_err(dev, "Missing uV/uA value\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(dev->of_node, reg_uV_uA_name,
+ vdd_uV_uA, len);
+ if (rc) {
+ dev_err(dev, "Failed to read uV/uA values\n");
+ return rc;
+ }
+
+ regs[i].uV = vdd_uV_uA[0];
+ regs[i].uA = vdd_uV_uA[1];
+ }
+
+ *regs_ref = regs;
+ return reg_count;
+}
+
+static int of_read_bus_pdata(struct platform_device *pdev,
+ struct pil_tz_data *d)
+{
+ struct msm_bus_scale_pdata *pdata;
+ pdata = msm_bus_cl_get_pdata(pdev);
+
+ if (!pdata)
+ return -EINVAL;
+
+ d->bus_client = msm_bus_scale_register_client(pdata);
+ if (!d->bus_client)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int piltz_resc_init(struct platform_device *pdev, struct pil_tz_data *d)
+{
+ int len, count, rc;
+ struct device *dev = &pdev->dev;
+
+ count = of_read_clocks(dev, &d->clks, "qcom,active-clock-names");
+ if (count < 0) {
+ dev_err(dev, "Failed to setup clocks.\n");
+ return count;
+ }
+ d->clk_count = count;
+
+ count = of_read_clocks(dev, &d->proxy_clks, "qcom,proxy-clock-names");
+ if (count < 0) {
+ dev_err(dev, "Failed to setup proxy clocks.\n");
+ return count;
+ }
+ d->proxy_clk_count = count;
+
+ count = of_read_regs(dev, &d->regs, "qcom,active-reg-names");
+ if (count < 0) {
+ dev_err(dev, "Failed to setup regulators.\n");
+ return count;
+ }
+ d->reg_count = count;
+
+ count = of_read_regs(dev, &d->proxy_regs, "qcom,proxy-reg-names");
+ if (count < 0) {
+ dev_err(dev, "Failed to setup proxy regulators.\n");
+ return count;
+ }
+ d->proxy_reg_count = count;
+
+ if (of_find_property(dev->of_node, "qcom,msm-bus,name", &len)) {
+ rc = of_read_bus_pdata(pdev, d);
+ if (rc) {
+ dev_err(dev, "Failed to setup bus scaling client.\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int enable_regulators(struct device *dev, struct reg_info *regs,
+ int reg_count)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < reg_count; i++) {
+ if (regs[i].uV > 0) {
+ rc = regulator_set_voltage(regs[i].reg,
+ regs[i].uV, INT_MAX);
+ if (rc) {
+ dev_err(dev, "Failed to request voltage.\n");
+ goto err_voltage;
+ }
+ }
+
+ if (regs[i].uA > 0) {
+ rc = regulator_set_optimum_mode(regs[i].reg,
+ regs[i].uA);
+ if (rc < 0) {
+ dev_err(dev, "Failed to set regulator mode\n");
+ goto err_mode;
+ }
+ }
+
+ rc = regulator_enable(regs[i].reg);
+ if (rc) {
+ dev_err(dev, "Regulator enable failed\n");
+ goto err_enable;
+ }
+ }
+
+ return 0;
+err_enable:
+ if (regs[i].uA > 0) {
+ regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+ regulator_set_optimum_mode(regs[i].reg, 0);
+ }
+err_mode:
+ if (regs[i].uV > 0)
+ regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+err_voltage:
+ for (i--; i >= 0; i--) {
+ if (regs[i].uV > 0)
+ regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+
+ if (regs[i].uA > 0)
+ regulator_set_optimum_mode(regs[i].reg, 0);
+
+ regulator_disable(regs[i].reg);
+ }
+
+ return rc;
+}
+
+static void disable_regulators(struct reg_info *regs, int reg_count)
+{
+ int i;
+
+ for (i = 0; i < reg_count; i++) {
+ if (regs[i].uV > 0)
+ regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+
+ if (regs[i].uA > 0)
+ regulator_set_optimum_mode(regs[i].reg, 0);
+
+ regulator_disable(regs[i].reg);
+ }
+}
+
+static int prepare_enable_clocks(struct device *dev, struct clk **clks,
+ int clk_count)
+{
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < clk_count; i++) {
+ rc = clk_prepare_enable(clks[i]);
+ if (rc) {
+ dev_err(dev, "Clock enable failed\n");
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ for (i--; i >= 0; i--)
+ clk_disable_unprepare(clks[i]);
+
+ return rc;
+}
+
+static void disable_unprepare_clocks(struct clk **clks, int clk_count)
+{
+ int i;
+
+ for (i = 0; i < clk_count; i++)
+ clk_disable_unprepare(clks[i]);
+}
+
+static int pil_make_proxy_vote(struct pil_desc *pil)
+{
+ struct pil_tz_data *d = desc_to_data(pil);
+ int rc;
+
+ rc = enable_regulators(pil->dev, d->proxy_regs, d->proxy_reg_count);
+ if (rc)
+ return rc;
+
+ rc = prepare_enable_clocks(pil->dev, d->proxy_clks,
+ d->proxy_clk_count);
+ if (rc)
+ goto err_clks;
+
+ if (d->bus_client) {
+ rc = msm_bus_scale_client_update_request(d->bus_client, 1);
+ if (rc) {
+ dev_err(pil->dev, "bandwidth request failed\n");
+ goto err_bw;
+ }
+ }
+
+ return 0;
+err_bw:
+ disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+err_clks:
+ disable_regulators(d->proxy_regs, d->proxy_reg_count);
+
+ return rc;
+}
+
+static void pil_remove_proxy_vote(struct pil_desc *pil)
+{
+ struct pil_tz_data *d = desc_to_data(pil);
+
+ if (d->bus_client)
+ msm_bus_scale_client_update_request(d->bus_client, 0);
+
+ disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+
+ disable_regulators(d->proxy_regs, d->proxy_reg_count);
+}
+
+static int pil_init_image_trusted(struct pil_desc *pil,
+ const u8 *metadata, size_t size)
+{
+ struct pil_tz_data *d = desc_to_data(pil);
+ struct pas_init_image_req {
+ u32 proc;
+ u32 image_addr;
+ } request;
+ u32 scm_ret = 0;
+ void *mdata_buf;
+ dma_addr_t mdata_phys;
+ int ret;
+ DEFINE_DMA_ATTRS(attrs);
+ struct device dev = {0};
+
+ ret = scm_pas_enable_bw();
+ if (ret)
+ return ret;
+ dev.coherent_dma_mask =
+ DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+ dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
+ mdata_buf = dma_alloc_attrs(&dev, size, &mdata_phys, GFP_KERNEL,
+ &attrs);
+ if (!mdata_buf) {
+ pr_err("scm-pas: Allocation for metadata failed.\n");
+ scm_pas_disable_bw();
+ return -ENOMEM;
+ }
+
+ memcpy(mdata_buf, metadata, size);
+
+ request.proc = d->pas_id;
+ request.image_addr = mdata_phys;
+
+ ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request,
+ sizeof(request), &scm_ret, sizeof(scm_ret));
+
+ dma_free_attrs(&dev, size, mdata_buf, mdata_phys, &attrs);
+ scm_pas_disable_bw();
+ if (ret)
+ return ret;
+ return scm_ret;
+}
+
+static int pil_mem_setup_trusted(struct pil_desc *pil, phys_addr_t addr,
+ size_t size)
+{
+ struct pil_tz_data *d = desc_to_data(pil);
+ struct pas_init_image_req {
+ u32 proc;
+ u32 start_addr;
+ u32 len;
+ } request;
+ u32 scm_ret = 0;
+ int ret;
+
+ request.proc = d->pas_id;
+ request.start_addr = addr;
+ request.len = size;
+
+ ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request,
+ sizeof(request), &scm_ret, sizeof(scm_ret));
+ if (ret)
+ return ret;
+ return scm_ret;
+}
+
+static int pil_auth_and_reset(struct pil_desc *pil)
+{
+ struct pil_tz_data *d = desc_to_data(pil);
+ int rc;
+ u32 proc = d->pas_id, scm_ret = 0;
+
+ rc = enable_regulators(pil->dev, d->regs, d->reg_count);
+ if (rc)
+ return rc;
+
+ rc = prepare_enable_clocks(pil->dev, d->clks, d->clk_count);
+ if (rc)
+ goto err_clks;
+
+ rc = scm_pas_enable_bw();
+ if (rc)
+ goto err_reset;
+
+ rc = scm_call(SCM_SVC_PIL, PAS_AUTH_AND_RESET_CMD, &proc,
+ sizeof(proc), &scm_ret, sizeof(scm_ret));
+ scm_pas_disable_bw();
+ if (rc)
+ goto err_reset;
+
+ return scm_ret;
+err_reset:
+ disable_unprepare_clocks(d->clks, d->clk_count);
+err_clks:
+ disable_regulators(d->regs, d->reg_count);
+
+ return rc;
+}
+
+static int pil_shutdown_trusted(struct pil_desc *pil)
+{
+ struct pil_tz_data *d = desc_to_data(pil);
+ u32 proc = d->pas_id, scm_ret = 0;
+ int rc;
+
+ rc = enable_regulators(pil->dev, d->proxy_regs, d->proxy_reg_count);
+ if (rc)
+ return rc;
+
+ rc = prepare_enable_clocks(pil->dev, d->proxy_clks,
+ d->proxy_clk_count);
+ if (rc)
+ goto err_clks;
+
+ rc = scm_call(SCM_SVC_PIL, PAS_SHUTDOWN_CMD, &proc, sizeof(proc),
+ &scm_ret, sizeof(scm_ret));
+
+ disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+ disable_regulators(d->proxy_regs, d->proxy_reg_count);
+
+ if (rc)
+ return rc;
+
+ disable_unprepare_clocks(d->clks, d->clk_count);
+ disable_regulators(d->regs, d->reg_count);
+
+ return scm_ret;
+err_clks:
+ disable_regulators(d->proxy_regs, d->proxy_reg_count);
+ return rc;
+}
+
+static struct pil_reset_ops pil_ops_trusted = {
+ .init_image = pil_init_image_trusted,
+ .mem_setup = pil_mem_setup_trusted,
+ .auth_and_reset = pil_auth_and_reset,
+ .shutdown = pil_shutdown_trusted,
+ .proxy_vote = pil_make_proxy_vote,
+ .proxy_unvote = pil_remove_proxy_vote,
+};
+
+static void log_failure_reason(const struct pil_tz_data *d)
+{
+ u32 size;
+ char *smem_reason, reason[MAX_SSR_REASON_LEN];
+ const char *name = d->subsys_desc.name;
+
+ if (d->smem_id == -1)
+ return;
+
+ smem_reason = smem_get_entry_no_rlock(d->smem_id, &size, 0,
+ SMEM_ANY_HOST_FLAG);
+ if (!smem_reason || !size) {
+ pr_err("%s SFR: (unknown, smem_get_entry_no_rlock failed).\n",
+ name);
+ return;
+ }
+ if (!smem_reason[0]) {
+ pr_err("%s SFR: (unknown, empty string found).\n", name);
+ return;
+ }
+
+ strlcpy(reason, smem_reason, min(size, MAX_SSR_REASON_LEN));
+ pr_err("%s subsystem failure reason: %s.\n", name, reason);
+
+ smem_reason[0] = '\0';
+ wmb();
+}
+
+static int subsys_shutdown(const struct subsys_desc *subsys, bool force_stop)
+{
+ struct pil_tz_data *d = subsys_to_data(subsys);
+ int ret;
+
+ if (!subsys_get_crash_status(d->subsys) && force_stop &&
+ subsys->force_stop_gpio) {
+ gpio_set_value(subsys->force_stop_gpio, 1);
+ ret = wait_for_completion_timeout(&d->stop_ack,
+ msecs_to_jiffies(STOP_ACK_TIMEOUT_MS));
+ if (!ret)
+ pr_warn("Timed out on stop ack from %s.\n",
+ subsys->name);
+ gpio_set_value(subsys->force_stop_gpio, 0);
+ }
+
+ pil_shutdown(&d->desc);
+ return 0;
+}
+
+static int subsys_powerup(const struct subsys_desc *subsys)
+{
+ struct pil_tz_data *d = subsys_to_data(subsys);
+ int ret = 0;
+
+ if (subsys->stop_ack_irq)
+ INIT_COMPLETION(d->stop_ack);
+ ret = pil_boot(&d->desc);
+
+ return ret;
+}
+
+static int subsys_ramdump(int enable, const struct subsys_desc *subsys)
+{
+ struct pil_tz_data *d = subsys_to_data(subsys);
+
+ if (!enable)
+ return 0;
+
+ return pil_do_ramdump(&d->desc, d->ramdump_dev);
+}
+
+static void subsys_crash_shutdown(const struct subsys_desc *subsys)
+{
+ struct pil_tz_data *d = subsys_to_data(subsys);
+
+ if (subsys->force_stop_gpio > 0 &&
+ !subsys_get_crash_status(d->subsys)) {
+ gpio_set_value(subsys->force_stop_gpio, 1);
+ mdelay(CRASH_STOP_ACK_TO_MS);
+ }
+}
+
+static irqreturn_t subsys_err_fatal_intr_handler (int irq, void *dev_id)
+{
+ struct pil_tz_data *d = subsys_to_data(dev_id);
+
+ pr_err("Fatal error on %s!\n", d->subsys_desc.name);
+ if (subsys_get_crash_status(d->subsys)) {
+ pr_err("%s: Ignoring error fatal, restart in progress\n",
+ d->subsys_desc.name);
+ return IRQ_HANDLED;
+ }
+ subsys_set_crash_status(d->subsys, true);
+ log_failure_reason(d);
+ subsystem_restart_dev(d->subsys);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t subsys_wdog_bite_irq_handler(int irq, void *dev_id)
+{
+ struct pil_tz_data *d = subsys_to_data(dev_id);
+
+ pr_err("Watchdog bite received from %s!\n", d->subsys_desc.name);
+ if (subsys_get_crash_status(d->subsys)) {
+ pr_err("%s: Ignoring wdog bite IRQ, restart in progress\n",
+ d->subsys_desc.name);
+ return IRQ_HANDLED;
+ }
+ subsys_set_crash_status(d->subsys, true);
+ log_failure_reason(d);
+ subsystem_restart_dev(d->subsys);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t subsys_stop_ack_intr_handler(int irq, void *dev_id)
+{
+ struct pil_tz_data *d = subsys_to_data(dev_id);
+
+ pr_info("Received stop ack interrupt from %s\n", d->subsys_desc.name);
+ complete(&d->stop_ack);
+ return IRQ_HANDLED;
+}
+
+static int pil_tz_driver_probe(struct platform_device *pdev)
+{
+ struct pil_tz_data *d;
+ u32 proxy_timeout;
+ int len, rc;
+
+ d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, d);
+
+ rc = piltz_resc_init(pdev, d);
+ if (rc)
+ return -ENOENT;
+
+ rc = of_property_read_u32(pdev->dev.of_node, "qcom,pas-id", &d->pas_id);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to find the pas_id.\n");
+ return rc;
+ }
+
+ rc = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
+ &d->desc.name);
+ if (rc)
+ return rc;
+
+ /* Defaulting smem_id to be not present */
+ d->smem_id = -1;
+
+ if (of_find_property(pdev->dev.of_node, "qcom,smem-id", &len)) {
+ rc = of_property_read_u32(pdev->dev.of_node, "qcom,smem-id",
+ &d->smem_id);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to get the smem_id.\n");
+ return rc;
+ }
+ }
+
+ d->desc.dev = &pdev->dev;
+ d->desc.owner = THIS_MODULE;
+ d->desc.ops = &pil_ops_trusted;
+
+ d->desc.proxy_timeout = PROXY_TIMEOUT_MS;
+
+ rc = of_property_read_u32(pdev->dev.of_node, "qcom,proxy-timeout-ms",
+ &proxy_timeout);
+ if (!rc)
+ d->desc.proxy_timeout = proxy_timeout;
+
+ scm_pas_init(MSM_BUS_MASTER_CRYPTO_CORE0);
+
+ rc = pil_desc_init(&d->desc);
+ if (rc)
+ return rc;
+
+ init_completion(&d->stop_ack);
+
+ d->subsys_desc.name = d->desc.name;
+ d->subsys_desc.owner = THIS_MODULE;
+ d->subsys_desc.dev = &pdev->dev;
+ d->subsys_desc.shutdown = subsys_shutdown;
+ d->subsys_desc.powerup = subsys_powerup;
+ d->subsys_desc.ramdump = subsys_ramdump;
+ d->subsys_desc.crash_shutdown = subsys_crash_shutdown;
+ d->subsys_desc.err_fatal_handler = subsys_err_fatal_intr_handler;
+ d->subsys_desc.wdog_bite_handler = subsys_wdog_bite_irq_handler;
+ d->subsys_desc.stop_ack_handler = subsys_stop_ack_intr_handler;
+
+ d->ramdump_dev = create_ramdump_device(d->subsys_desc.name,
+ &pdev->dev);
+ if (!d->ramdump_dev) {
+ rc = -ENOMEM;
+ goto err_ramdump;
+ }
+
+ d->subsys = subsys_register(&d->subsys_desc);
+ if (IS_ERR(d->subsys)) {
+ rc = PTR_ERR(d->subsys);
+ goto err_subsys;
+ }
+
+ return 0;
+err_subsys:
+ destroy_ramdump_device(d->ramdump_dev);
+err_ramdump:
+ pil_desc_release(&d->desc);
+
+ return rc;
+}
+
+static int pil_tz_driver_exit(struct platform_device *pdev)
+{
+ struct pil_tz_data *d = platform_get_drvdata(pdev);
+
+ subsys_unregister(d->subsys);
+ destroy_ramdump_device(d->ramdump_dev);
+ pil_desc_release(&d->desc);
+
+ return 0;
+}
+
+static struct of_device_id pil_tz_match_table[] = {
+ {.compatible = "qcom,pil-tz-generic"},
+ {}
+};
+
+static struct platform_driver pil_tz_driver = {
+ .probe = pil_tz_driver_probe,
+ .remove = pil_tz_driver_exit,
+ .driver = {
+ .name = "subsys-pil-tz",
+ .of_match_table = pil_tz_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init pil_tz_init(void)
+{
+ return platform_driver_register(&pil_tz_driver);
+}
+module_init(pil_tz_init);
+
+static void __exit pil_tz_exit(void)
+{
+ platform_driver_unregister(&pil_tz_driver);
+}
+module_exit(pil_tz_exit);
+
+MODULE_DESCRIPTION("Support for booting subsystems");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/subsystem_notif.c b/drivers/soc/qcom/subsystem_notif.c
new file mode 100644
index 000000000000..431bbd8cee6f
--- /dev/null
+++ b/drivers/soc/qcom/subsystem_notif.c
@@ -0,0 +1,222 @@
+/* Copyright (c) 2011, 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Subsystem Notifier -- Provides notifications
+ * of subsys events.
+ *
+ * Use subsys_notif_register_notifier to register for notifications
+ * and subsys_notif_queue_notification to send notifications.
+ *
+ */
+
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/stringify.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_notif.h>
+
+
+struct subsys_notif_info {
+ char name[50];
+ struct srcu_notifier_head subsys_notif_rcvr_list;
+ struct list_head list;
+};
+
+static LIST_HEAD(subsystem_list);
+static DEFINE_MUTEX(notif_lock);
+static DEFINE_MUTEX(notif_add_lock);
+
+#if defined(SUBSYS_RESTART_DEBUG)
+static void subsys_notif_reg_test_notifier(const char *);
+#endif
+
+static struct subsys_notif_info *_notif_find_subsys(const char *subsys_name)
+{
+ struct subsys_notif_info *subsys;
+
+ mutex_lock(&notif_lock);
+ list_for_each_entry(subsys, &subsystem_list, list)
+ if (!strncmp(subsys->name, subsys_name,
+ ARRAY_SIZE(subsys->name))) {
+ mutex_unlock(&notif_lock);
+ return subsys;
+ }
+ mutex_unlock(&notif_lock);
+
+ return NULL;
+}
+
+void *subsys_notif_register_notifier(
+ const char *subsys_name, struct notifier_block *nb)
+{
+ int ret;
+ struct subsys_notif_info *subsys = _notif_find_subsys(subsys_name);
+
+ if (!subsys) {
+
+ /* Possible first time reference to this subsystem. Add it. */
+ subsys = (struct subsys_notif_info *)
+ subsys_notif_add_subsys(subsys_name);
+
+ if (!subsys)
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = srcu_notifier_chain_register(
+ &subsys->subsys_notif_rcvr_list, nb);
+
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ return subsys;
+}
+EXPORT_SYMBOL(subsys_notif_register_notifier);
+
+int subsys_notif_unregister_notifier(void *subsys_handle,
+ struct notifier_block *nb)
+{
+ int ret;
+ struct subsys_notif_info *subsys =
+ (struct subsys_notif_info *)subsys_handle;
+
+ if (!subsys)
+ return -EINVAL;
+
+ ret = srcu_notifier_chain_unregister(
+ &subsys->subsys_notif_rcvr_list, nb);
+
+ return ret;
+}
+EXPORT_SYMBOL(subsys_notif_unregister_notifier);
+
+void *subsys_notif_add_subsys(const char *subsys_name)
+{
+ struct subsys_notif_info *subsys = NULL;
+
+ if (!subsys_name)
+ goto done;
+
+ mutex_lock(&notif_add_lock);
+
+ subsys = _notif_find_subsys(subsys_name);
+
+ if (subsys) {
+ mutex_unlock(&notif_add_lock);
+ goto done;
+ }
+
+ subsys = kmalloc(sizeof(struct subsys_notif_info), GFP_KERNEL);
+
+ if (!subsys) {
+ mutex_unlock(&notif_add_lock);
+ return ERR_PTR(-EINVAL);
+ }
+
+ strlcpy(subsys->name, subsys_name, ARRAY_SIZE(subsys->name));
+
+ srcu_init_notifier_head(&subsys->subsys_notif_rcvr_list);
+
+ INIT_LIST_HEAD(&subsys->list);
+
+ mutex_lock(&notif_lock);
+ list_add_tail(&subsys->list, &subsystem_list);
+ mutex_unlock(&notif_lock);
+
+ #if defined(SUBSYS_RESTART_DEBUG)
+ subsys_notif_reg_test_notifier(subsys->name);
+ #endif
+
+ mutex_unlock(&notif_add_lock);
+
+done:
+ return subsys;
+}
+EXPORT_SYMBOL(subsys_notif_add_subsys);
+
+int subsys_notif_queue_notification(void *subsys_handle,
+ enum subsys_notif_type notif_type,
+ void *data)
+{
+ int ret = 0;
+ struct subsys_notif_info *subsys =
+ (struct subsys_notif_info *) subsys_handle;
+
+ if (!subsys)
+ return -EINVAL;
+
+ if (notif_type < 0 || notif_type >= SUBSYS_NOTIF_TYPE_COUNT)
+ return -EINVAL;
+
+ ret = srcu_notifier_call_chain(
+ &subsys->subsys_notif_rcvr_list, notif_type,
+ data);
+ return ret;
+}
+EXPORT_SYMBOL(subsys_notif_queue_notification);
+
+#if defined(SUBSYS_RESTART_DEBUG)
+static const char *notif_to_string(enum subsys_notif_type notif_type)
+{
+ switch (notif_type) {
+
+ case SUBSYS_BEFORE_SHUTDOWN:
+ return __stringify(SUBSYS_BEFORE_SHUTDOWN);
+
+ case SUBSYS_AFTER_SHUTDOWN:
+ return __stringify(SUBSYS_AFTER_SHUTDOWN);
+
+ case SUBSYS_BEFORE_POWERUP:
+ return __stringify(SUBSYS_BEFORE_POWERUP);
+
+ case SUBSYS_AFTER_POWERUP:
+ return __stringify(SUBSYS_AFTER_POWERUP);
+
+ default:
+ return "unknown";
+ }
+}
+
+static int subsys_notifier_test_call(struct notifier_block *this,
+ unsigned long code,
+ void *data)
+{
+ switch (code) {
+
+ default:
+ printk(KERN_WARNING "%s: Notification %s from subsystem %p\n",
+ __func__, notif_to_string(code), data);
+ break;
+
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nb = {
+ .notifier_call = subsys_notifier_test_call,
+};
+
+static void subsys_notif_reg_test_notifier(const char *subsys_name)
+{
+ void *handle = subsys_notif_register_notifier(subsys_name, &nb);
+ printk(KERN_WARNING "%s: Registered test notifier, handle=%p",
+ __func__, handle);
+}
+#endif
+
+MODULE_DESCRIPTION("Subsystem Restart Notifier");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
new file mode 100644
index 000000000000..deb238bf112a
--- /dev/null
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -0,0 +1,1491 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "subsys-restart: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/time.h>
+#include <linux/wakelock.h>
+#include <linux/suspend.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/socinfo.h>
+#include <soc/qcom/sysmon.h>
+
+#include <asm/current.h>
+
+static int enable_debug;
+module_param(enable_debug, int, S_IRUGO | S_IWUSR);
+
+/**
+ * enum p_subsys_state - state of a subsystem (private)
+ * @SUBSYS_NORMAL: subsystem is operating normally
+ * @SUBSYS_CRASHED: subsystem has crashed and hasn't been shutdown
+ * @SUBSYS_RESTARTING: subsystem has been shutdown and is now restarting
+ *
+ * The 'private' side of the subsytem state used to determine where in the
+ * restart process the subsystem is.
+ */
+enum p_subsys_state {
+ SUBSYS_NORMAL,
+ SUBSYS_CRASHED,
+ SUBSYS_RESTARTING,
+};
+
+/**
+ * enum subsys_state - state of a subsystem (public)
+ * @SUBSYS_OFFLINE: subsystem is offline
+ * @SUBSYS_ONLINE: subsystem is online
+ *
+ * The 'public' side of the subsytem state, exposed to userspace.
+ */
+enum subsys_state {
+ SUBSYS_OFFLINE,
+ SUBSYS_ONLINE,
+};
+
+static const char * const subsys_states[] = {
+ [SUBSYS_OFFLINE] = "OFFLINE",
+ [SUBSYS_ONLINE] = "ONLINE",
+};
+
+static const char * const restart_levels[] = {
+ [RESET_SOC] = "SYSTEM",
+ [RESET_SUBSYS_COUPLED] = "RELATED",
+};
+
+/**
+ * struct subsys_tracking - track state of a subsystem or restart order
+ * @p_state: private state of subsystem/order
+ * @state: public state of subsystem/order
+ * @s_lock: protects p_state
+ * @lock: protects subsystem/order callbacks and state
+ *
+ * Tracks the state of a subsystem or a set of subsystems (restart order).
+ * Doing this avoids the need to grab each subsystem's lock and update
+ * each subsystems state when restarting an order.
+ */
+struct subsys_tracking {
+ enum p_subsys_state p_state;
+ spinlock_t s_lock;
+ enum subsys_state state;
+ struct mutex lock;
+};
+
+/**
+ * struct subsys_soc_restart_order - subsystem restart order
+ * @subsystem_list: names of subsystems in this restart order
+ * @count: number of subsystems in order
+ * @track: state tracking and locking
+ * @subsys_ptrs: pointers to subsystems in this restart order
+ */
+struct subsys_soc_restart_order {
+ struct device_node **device_ptrs;
+ int count;
+
+ struct subsys_tracking track;
+ struct subsys_device **subsys_ptrs;
+ struct list_head list;
+};
+
+struct restart_log {
+ struct timeval time;
+ struct subsys_device *dev;
+ struct list_head list;
+};
+
+/**
+ * struct subsys_device - subsystem device
+ * @desc: subsystem descriptor
+ * @work: context for subsystem_restart_wq_func() for this device
+ * @ssr_wlock: prevents suspend during subsystem_restart()
+ * @wlname: name of wakeup source
+ * @device_restart_work: work struct for device restart
+ * @track: state tracking and locking
+ * @notify: subsys notify handle
+ * @dev: device
+ * @owner: module that provides @desc
+ * @count: reference count of subsystem_get()/subsystem_put()
+ * @id: ida
+ * @restart_level: restart level (0 - panic, 1 - related, 2 - independent, etc.)
+ * @restart_order: order of other devices this devices restarts with
+ * @crash_count: number of times the device has crashed
+ * @dentry: debugfs directory for this device
+ * @do_ramdump_on_put: ramdump on subsystem_put() if true
+ * @err_ready: completion variable to record error ready from subsystem
+ * @crashed: indicates if subsystem has crashed
+ */
+struct subsys_device {
+ struct subsys_desc *desc;
+ struct work_struct work;
+ struct wakeup_source ssr_wlock;
+ char wlname[64];
+ struct work_struct device_restart_work;
+ struct subsys_tracking track;
+
+ void *notify;
+ struct device dev;
+ struct module *owner;
+ int count;
+ int id;
+ int restart_level;
+ int crash_count;
+ struct subsys_soc_restart_order *restart_order;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dentry;
+#endif
+ bool do_ramdump_on_put;
+ struct cdev char_dev;
+ dev_t dev_no;
+ struct completion err_ready;
+ bool crashed;
+ struct list_head list;
+};
+
+static struct subsys_device *to_subsys(struct device *d)
+{
+ return container_of(d, struct subsys_device, dev);
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->desc->name);
+}
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ enum subsys_state state = to_subsys(dev)->track.state;
+ return snprintf(buf, PAGE_SIZE, "%s\n", subsys_states[state]);
+}
+
+static ssize_t crash_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", to_subsys(dev)->crash_count);
+}
+
+static ssize_t
+restart_level_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ int level = to_subsys(dev)->restart_level;
+ return snprintf(buf, PAGE_SIZE, "%s\n", restart_levels[level]);
+}
+
+static ssize_t restart_level_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct subsys_device *subsys = to_subsys(dev);
+ int i;
+ const char *p;
+
+ p = memchr(buf, '\n', count);
+ if (p)
+ count = p - buf;
+
+ for (i = 0; i < ARRAY_SIZE(restart_levels); i++)
+ if (!strncasecmp(buf, restart_levels[i], count)) {
+ subsys->restart_level = i;
+ return count;
+ }
+ return -EPERM;
+}
+
+int subsys_get_restart_level(struct subsys_device *dev)
+{
+ return dev->restart_level;
+}
+EXPORT_SYMBOL(subsys_get_restart_level);
+
+static void subsys_set_state(struct subsys_device *subsys,
+ enum subsys_state state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&subsys->track.s_lock, flags);
+ if (subsys->track.state != state) {
+ subsys->track.state = state;
+ spin_unlock_irqrestore(&subsys->track.s_lock, flags);
+ sysfs_notify(&subsys->dev.kobj, NULL, "state");
+ return;
+ }
+ spin_unlock_irqrestore(&subsys->track.s_lock, flags);
+}
+
+/**
+ * subsytem_default_online() - Mark a subsystem as online by default
+ * @dev: subsystem to mark as online
+ *
+ * Marks a subsystem as "online" without increasing the reference count
+ * on the subsystem. This is typically used by subsystems that are already
+ * online when the kernel boots up.
+ */
+void subsys_default_online(struct subsys_device *dev)
+{
+ subsys_set_state(dev, SUBSYS_ONLINE);
+}
+EXPORT_SYMBOL(subsys_default_online);
+
+static struct device_attribute subsys_attrs[] = {
+ __ATTR_RO(name),
+ __ATTR_RO(state),
+ __ATTR_RO(crash_count),
+ __ATTR(restart_level, 0644, restart_level_show, restart_level_store),
+ __ATTR_NULL,
+};
+
+static struct bus_type subsys_bus_type = {
+ .name = "msm_subsys",
+ .dev_attrs = subsys_attrs,
+};
+
+static DEFINE_IDA(subsys_ida);
+
+static int enable_ramdumps;
+module_param(enable_ramdumps, int, S_IRUGO | S_IWUSR);
+
+struct workqueue_struct *ssr_wq;
+static struct class *char_class;
+
+static LIST_HEAD(restart_log_list);
+static LIST_HEAD(subsys_list);
+static LIST_HEAD(ssr_order_list);
+static DEFINE_MUTEX(soc_order_reg_lock);
+static DEFINE_MUTEX(restart_log_mutex);
+static DEFINE_MUTEX(subsys_list_lock);
+static DEFINE_MUTEX(char_device_lock);
+static DEFINE_MUTEX(ssr_order_mutex);
+
+static struct subsys_soc_restart_order *
+update_restart_order(struct subsys_device *dev)
+{
+ int i;
+ struct subsys_soc_restart_order *order;
+ struct device_node *device = dev->desc->dev->of_node;
+
+ mutex_lock(&soc_order_reg_lock);
+ list_for_each_entry(order, &ssr_order_list, list) {
+ for (i = 0; i < order->count; i++) {
+ if (order->device_ptrs[i] == device) {
+ order->subsys_ptrs[i] = dev;
+ goto found;
+ }
+ }
+ }
+ order = NULL;
+found:
+ mutex_unlock(&soc_order_reg_lock);
+
+ return order;
+}
+
+static int max_restarts;
+module_param(max_restarts, int, 0644);
+
+static long max_history_time = 3600;
+module_param(max_history_time, long, 0644);
+
+static void do_epoch_check(struct subsys_device *dev)
+{
+ int n = 0;
+ struct timeval *time_first = NULL, *curr_time;
+ struct restart_log *r_log, *temp;
+ static int max_restarts_check;
+ static long max_history_time_check;
+
+ mutex_lock(&restart_log_mutex);
+
+ max_restarts_check = max_restarts;
+ max_history_time_check = max_history_time;
+
+ /* Check if epoch checking is enabled */
+ if (!max_restarts_check)
+ goto out;
+
+ r_log = kmalloc(sizeof(struct restart_log), GFP_KERNEL);
+ if (!r_log)
+ goto out;
+ r_log->dev = dev;
+ do_gettimeofday(&r_log->time);
+ curr_time = &r_log->time;
+ INIT_LIST_HEAD(&r_log->list);
+
+ list_add_tail(&r_log->list, &restart_log_list);
+
+ list_for_each_entry_safe(r_log, temp, &restart_log_list, list) {
+
+ if ((curr_time->tv_sec - r_log->time.tv_sec) >
+ max_history_time_check) {
+
+ pr_debug("Deleted node with restart_time = %ld\n",
+ r_log->time.tv_sec);
+ list_del(&r_log->list);
+ kfree(r_log);
+ continue;
+ }
+ if (!n) {
+ time_first = &r_log->time;
+ pr_debug("Time_first: %ld\n", time_first->tv_sec);
+ }
+ n++;
+ pr_debug("Restart_time: %ld\n", r_log->time.tv_sec);
+ }
+
+ if (time_first && n >= max_restarts_check) {
+ if ((curr_time->tv_sec - time_first->tv_sec) <
+ max_history_time_check)
+ panic("Subsystems have crashed %d times in less than "
+ "%ld seconds!", max_restarts_check,
+ max_history_time_check);
+ }
+
+out:
+ mutex_unlock(&restart_log_mutex);
+}
+
+static void for_each_subsys_device(struct subsys_device **list, unsigned count,
+ void *data, void (*fn)(struct subsys_device *, void *))
+{
+ while (count--) {
+ struct subsys_device *dev = *list++;
+ if (!dev)
+ continue;
+ fn(dev, data);
+ }
+}
+
+static void notify_each_subsys_device(struct subsys_device **list,
+ unsigned count,
+ enum subsys_notif_type notif, void *data)
+{
+ struct subsys_device *subsys;
+
+ while (count--) {
+ struct subsys_device *dev = *list++;
+ struct notif_data notif_data;
+
+ if (!dev)
+ continue;
+
+ mutex_lock(&subsys_list_lock);
+ list_for_each_entry(subsys, &subsys_list, list)
+ if (dev != subsys)
+ sysmon_send_event(subsys->desc->name,
+ dev->desc->name,
+ notif);
+ mutex_unlock(&subsys_list_lock);
+
+ notif_data.crashed = subsys_get_crash_status(dev);
+ notif_data.enable_ramdump = enable_ramdumps;
+
+ subsys_notif_queue_notification(dev->notify, notif,
+ &notif_data);
+ }
+}
+
+static void enable_all_irqs(struct subsys_device *dev)
+{
+ if (dev->desc->err_ready_irq)
+ enable_irq(dev->desc->err_ready_irq);
+ if (dev->desc->wdog_bite_irq && dev->desc->wdog_bite_handler) {
+ enable_irq(dev->desc->wdog_bite_irq);
+ irq_set_irq_wake(dev->desc->wdog_bite_irq, 1);
+ }
+ if (dev->desc->err_fatal_irq && dev->desc->err_fatal_handler)
+ enable_irq(dev->desc->err_fatal_irq);
+ if (dev->desc->stop_ack_irq && dev->desc->stop_ack_handler)
+ enable_irq(dev->desc->stop_ack_irq);
+}
+
+static void disable_all_irqs(struct subsys_device *dev)
+{
+ if (dev->desc->err_ready_irq)
+ disable_irq(dev->desc->err_ready_irq);
+ if (dev->desc->wdog_bite_irq && dev->desc->wdog_bite_handler) {
+ disable_irq(dev->desc->wdog_bite_irq);
+ irq_set_irq_wake(dev->desc->wdog_bite_irq, 0);
+ }
+ if (dev->desc->err_fatal_irq && dev->desc->err_fatal_handler)
+ disable_irq(dev->desc->err_fatal_irq);
+ if (dev->desc->stop_ack_irq && dev->desc->stop_ack_handler)
+ disable_irq(dev->desc->stop_ack_irq);
+}
+
+static int wait_for_err_ready(struct subsys_device *subsys)
+{
+ int ret;
+
+ if (!subsys->desc->err_ready_irq || enable_debug == 1)
+ return 0;
+
+ ret = wait_for_completion_timeout(&subsys->err_ready,
+ msecs_to_jiffies(10000));
+ if (!ret) {
+ pr_err("[%s]: Error ready timed out\n", subsys->desc->name);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void subsystem_shutdown(struct subsys_device *dev, void *data)
+{
+ const char *name = dev->desc->name;
+
+ pr_info("[%p]: Shutting down %s\n", current, name);
+ if (dev->desc->shutdown(dev->desc, true) < 0)
+ panic("subsys-restart: [%p]: Failed to shutdown %s!",
+ current, name);
+ dev->crash_count++;
+ subsys_set_state(dev, SUBSYS_OFFLINE);
+ disable_all_irqs(dev);
+}
+
+static void subsystem_ramdump(struct subsys_device *dev, void *data)
+{
+ const char *name = dev->desc->name;
+
+ if (dev->desc->ramdump)
+ if (dev->desc->ramdump(enable_ramdumps, dev->desc) < 0)
+ pr_warn("%s[%p]: Ramdump failed.\n", name, current);
+ dev->do_ramdump_on_put = false;
+}
+
+static void subsystem_powerup(struct subsys_device *dev, void *data)
+{
+ const char *name = dev->desc->name;
+ int ret;
+
+ pr_info("[%p]: Powering up %s\n", current, name);
+ init_completion(&dev->err_ready);
+
+ if (dev->desc->powerup(dev->desc) < 0) {
+ notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
+ NULL);
+ panic("[%p]: Powerup error: %s!", current, name);
+ }
+ enable_all_irqs(dev);
+
+ ret = wait_for_err_ready(dev);
+ if (ret) {
+ notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
+ NULL);
+ panic("[%p]: Timed out waiting for error ready: %s!",
+ current, name);
+ }
+ subsys_set_state(dev, SUBSYS_ONLINE);
+ subsys_set_crash_status(dev, false);
+}
+
+static int __find_subsys(struct device *dev, void *data)
+{
+ struct subsys_device *subsys = to_subsys(dev);
+ return !strcmp(subsys->desc->name, data);
+}
+
+static struct subsys_device *find_subsys(const char *str)
+{
+ struct device *dev;
+
+ if (!str)
+ return NULL;
+
+ dev = bus_find_device(&subsys_bus_type, NULL, (void *)str,
+ __find_subsys);
+ return dev ? to_subsys(dev) : NULL;
+}
+
+static int subsys_start(struct subsys_device *subsys)
+{
+ int ret;
+
+ notify_each_subsys_device(&subsys, 1, SUBSYS_BEFORE_POWERUP,
+ NULL);
+
+ init_completion(&subsys->err_ready);
+ ret = subsys->desc->powerup(subsys->desc);
+ if (ret) {
+ notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
+ NULL);
+ return ret;
+ }
+ enable_all_irqs(subsys);
+
+ if (subsys->desc->is_not_loadable) {
+ subsys_set_state(subsys, SUBSYS_ONLINE);
+ return 0;
+ }
+
+ ret = wait_for_err_ready(subsys);
+ if (ret) {
+ /* pil-boot succeeded but we need to shutdown
+ * the device because error ready timed out.
+ */
+ notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
+ NULL);
+ subsys->desc->shutdown(subsys->desc, false);
+ disable_all_irqs(subsys);
+ return ret;
+ } else {
+ subsys_set_state(subsys, SUBSYS_ONLINE);
+ }
+
+ notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_POWERUP,
+ NULL);
+ return ret;
+}
+
+static void subsys_stop(struct subsys_device *subsys)
+{
+ notify_each_subsys_device(&subsys, 1, SUBSYS_BEFORE_SHUTDOWN, NULL);
+ subsys->desc->shutdown(subsys->desc, false);
+ subsys_set_state(subsys, SUBSYS_OFFLINE);
+ disable_all_irqs(subsys);
+ notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_SHUTDOWN, NULL);
+}
+
+static struct subsys_tracking *subsys_get_track(struct subsys_device *subsys)
+{
+ struct subsys_soc_restart_order *order = subsys->restart_order;
+
+ if (order)
+ return &order->track;
+ else
+ return &subsys->track;
+}
+
+/**
+ * subsytem_get() - Boot a subsystem
+ * @name: pointer to a string containing the name of the subsystem to boot
+ *
+ * This function returns a pointer if it succeeds. If an error occurs an
+ * ERR_PTR is returned.
+ *
+ * If this feature is disable, the value %NULL will be returned.
+ */
+void *subsystem_get(const char *name)
+{
+ struct subsys_device *subsys;
+ struct subsys_device *subsys_d;
+ int ret;
+ void *retval;
+ struct subsys_tracking *track;
+
+ if (!name)
+ return NULL;
+
+ subsys = retval = find_subsys(name);
+ if (!subsys)
+ return ERR_PTR(-ENODEV);
+ if (!try_module_get(subsys->owner)) {
+ retval = ERR_PTR(-ENODEV);
+ goto err_module;
+ }
+
+ subsys_d = subsystem_get(subsys->desc->depends_on);
+ if (IS_ERR(subsys_d)) {
+ retval = subsys_d;
+ goto err_depends;
+ }
+
+ track = subsys_get_track(subsys);
+ mutex_lock(&track->lock);
+ if (!subsys->count) {
+ ret = subsys_start(subsys);
+ if (ret) {
+ retval = ERR_PTR(ret);
+ goto err_start;
+ }
+ }
+ subsys->count++;
+ mutex_unlock(&track->lock);
+ return retval;
+err_start:
+ mutex_unlock(&track->lock);
+ subsystem_put(subsys_d);
+err_depends:
+ module_put(subsys->owner);
+err_module:
+ put_device(&subsys->dev);
+ return retval;
+}
+EXPORT_SYMBOL(subsystem_get);
+
+/**
+ * subsystem_put() - Shutdown a subsystem
+ * @peripheral_handle: pointer from a previous call to subsystem_get()
+ *
+ * This doesn't imply that a subsystem is shutdown until all callers of
+ * subsystem_get() have called subsystem_put().
+ */
+void subsystem_put(void *subsystem)
+{
+ struct subsys_device *subsys_d, *subsys = subsystem;
+ struct subsys_tracking *track;
+
+ if (IS_ERR_OR_NULL(subsys))
+ return;
+
+ track = subsys_get_track(subsys);
+ mutex_lock(&track->lock);
+ if (WARN(!subsys->count, "%s: %s: Reference count mismatch\n",
+ subsys->desc->name, __func__))
+ goto err_out;
+ if (!--subsys->count) {
+ subsys_stop(subsys);
+ if (subsys->do_ramdump_on_put)
+ subsystem_ramdump(subsys, NULL);
+ }
+ mutex_unlock(&track->lock);
+
+ subsys_d = find_subsys(subsys->desc->depends_on);
+ if (subsys_d) {
+ subsystem_put(subsys_d);
+ put_device(&subsys_d->dev);
+ }
+ module_put(subsys->owner);
+ put_device(&subsys->dev);
+ return;
+err_out:
+ mutex_unlock(&track->lock);
+}
+EXPORT_SYMBOL(subsystem_put);
+
+static void subsystem_restart_wq_func(struct work_struct *work)
+{
+ struct subsys_device *dev = container_of(work,
+ struct subsys_device, work);
+ struct subsys_device **list;
+ struct subsys_desc *desc = dev->desc;
+ struct subsys_soc_restart_order *order = dev->restart_order;
+ struct subsys_tracking *track;
+ unsigned count;
+ unsigned long flags;
+
+ /*
+ * It's OK to not take the registration lock at this point.
+ * This is because the subsystem list inside the relevant
+ * restart order is not being traversed.
+ */
+ if (order) {
+ list = order->subsys_ptrs;
+ count = order->count;
+ track = &order->track;
+ } else {
+ list = &dev;
+ count = 1;
+ track = &dev->track;
+ }
+
+ mutex_lock(&track->lock);
+ do_epoch_check(dev);
+
+ /*
+ * It's necessary to take the registration lock because the subsystem
+ * list in the SoC restart order will be traversed and it shouldn't be
+ * changed until _this_ restart sequence completes.
+ */
+ mutex_lock(&soc_order_reg_lock);
+
+ pr_debug("[%p]: Starting restart sequence for %s\n", current,
+ desc->name);
+ notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL);
+ for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+ notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL);
+
+ notify_each_subsys_device(list, count, SUBSYS_RAMDUMP_NOTIFICATION,
+ NULL);
+
+ spin_lock_irqsave(&track->s_lock, flags);
+ track->p_state = SUBSYS_RESTARTING;
+ spin_unlock_irqrestore(&track->s_lock, flags);
+
+ /* Collect ram dumps for all subsystems in order here */
+ for_each_subsys_device(list, count, NULL, subsystem_ramdump);
+
+ notify_each_subsys_device(list, count, SUBSYS_BEFORE_POWERUP, NULL);
+ for_each_subsys_device(list, count, NULL, subsystem_powerup);
+ notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL);
+
+ pr_info("[%p]: Restart sequence for %s completed.\n",
+ current, desc->name);
+
+ mutex_unlock(&soc_order_reg_lock);
+ mutex_unlock(&track->lock);
+
+ spin_lock_irqsave(&track->s_lock, flags);
+ track->p_state = SUBSYS_NORMAL;
+ __pm_relax(&dev->ssr_wlock);
+ spin_unlock_irqrestore(&track->s_lock, flags);
+}
+
+static void __subsystem_restart_dev(struct subsys_device *dev)
+{
+ struct subsys_desc *desc = dev->desc;
+ const char *name = dev->desc->name;
+ struct subsys_tracking *track;
+ unsigned long flags;
+
+ pr_debug("Restarting %s [level=%s]!\n", desc->name,
+ restart_levels[dev->restart_level]);
+
+ track = subsys_get_track(dev);
+ /*
+ * Allow drivers to call subsystem_restart{_dev}() as many times as
+ * they want up until the point where the subsystem is shutdown.
+ */
+ spin_lock_irqsave(&track->s_lock, flags);
+ if (track->p_state != SUBSYS_CRASHED) {
+ if (dev->track.state == SUBSYS_ONLINE &&
+ track->p_state != SUBSYS_RESTARTING) {
+ track->p_state = SUBSYS_CRASHED;
+ __pm_stay_awake(&dev->ssr_wlock);
+ queue_work(ssr_wq, &dev->work);
+ } else {
+ panic("Subsystem %s crashed during SSR!", name);
+ }
+ }
+ spin_unlock_irqrestore(&track->s_lock, flags);
+}
+
+static void device_restart_work_hdlr(struct work_struct *work)
+{
+ struct subsys_device *dev = container_of(work, struct subsys_device,
+ device_restart_work);
+
+ notify_each_subsys_device(&dev, 1, SUBSYS_SOC_RESET, NULL);
+ panic("subsys-restart: Resetting the SoC - %s crashed.",
+ dev->desc->name);
+}
+
+int subsystem_restart_dev(struct subsys_device *dev)
+{
+ const char *name;
+
+ if (!get_device(&dev->dev))
+ return -ENODEV;
+
+ if (!try_module_get(dev->owner)) {
+ put_device(&dev->dev);
+ return -ENODEV;
+ }
+
+ name = dev->desc->name;
+
+ /*
+ * If a system reboot/shutdown is underway, ignore subsystem errors.
+ * However, print a message so that we know that a subsystem behaved
+ * unexpectedly here.
+ */
+ if (system_state == SYSTEM_RESTART
+ || system_state == SYSTEM_POWER_OFF) {
+ pr_err("%s crashed during a system poweroff/shutdown.\n", name);
+ return -EBUSY;
+ }
+
+ pr_info("Restart sequence requested for %s, restart_level = %s.\n",
+ name, restart_levels[dev->restart_level]);
+
+ switch (dev->restart_level) {
+
+ case RESET_SUBSYS_COUPLED:
+ __subsystem_restart_dev(dev);
+ break;
+ case RESET_SOC:
+ __pm_stay_awake(&dev->ssr_wlock);
+ schedule_work(&dev->device_restart_work);
+ return 0;
+ default:
+ panic("subsys-restart: Unknown restart level!\n");
+ break;
+ }
+ module_put(dev->owner);
+ put_device(&dev->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(subsystem_restart_dev);
+
+int subsystem_restart(const char *name)
+{
+ int ret;
+ struct subsys_device *dev = find_subsys(name);
+
+ if (!dev)
+ return -ENODEV;
+
+ ret = subsystem_restart_dev(dev);
+ put_device(&dev->dev);
+ return ret;
+}
+EXPORT_SYMBOL(subsystem_restart);
+
+int subsystem_crashed(const char *name)
+{
+ struct subsys_device *dev = find_subsys(name);
+ struct subsys_tracking *track;
+
+ if (!dev)
+ return -ENODEV;
+
+ if (!get_device(&dev->dev))
+ return -ENODEV;
+
+ track = subsys_get_track(dev);
+
+ mutex_lock(&track->lock);
+ dev->do_ramdump_on_put = true;
+ /*
+ * TODO: Make this work with multiple consumers where one is calling
+ * subsystem_restart() and another is calling this function. To do
+ * so would require updating private state, etc.
+ */
+ mutex_unlock(&track->lock);
+
+ put_device(&dev->dev);
+ return 0;
+}
+EXPORT_SYMBOL(subsystem_crashed);
+
+void subsys_set_crash_status(struct subsys_device *dev, bool crashed)
+{
+ dev->crashed = crashed;
+}
+
+bool subsys_get_crash_status(struct subsys_device *dev)
+{
+ return dev->crashed;
+}
+
+static struct subsys_device *desc_to_subsys(struct device *d)
+{
+ struct subsys_device *device, *subsys_dev = 0;
+
+ mutex_lock(&subsys_list_lock);
+ list_for_each_entry(device, &subsys_list, list)
+ if (device->desc->dev == d)
+ subsys_dev = device;
+ mutex_unlock(&subsys_list_lock);
+ return subsys_dev;
+}
+
+void notify_proxy_vote(struct device *device)
+{
+ struct subsys_device *dev = desc_to_subsys(device);
+
+ if (dev)
+ notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_VOTE, NULL);
+}
+
+void notify_proxy_unvote(struct device *device)
+{
+ struct subsys_device *dev = desc_to_subsys(device);
+
+ if (dev)
+ notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_UNVOTE, NULL);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t subsys_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ int r;
+ char buf[40];
+ struct subsys_device *subsys = filp->private_data;
+
+ r = snprintf(buf, sizeof(buf), "%d\n", subsys->count);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t subsys_debugfs_write(struct file *filp,
+ const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct subsys_device *subsys = filp->private_data;
+ char buf[10];
+ char *cmp;
+
+ cnt = min(cnt, sizeof(buf) - 1);
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = '\0';
+ cmp = strstrip(buf);
+
+ if (!strcmp(cmp, "restart")) {
+ if (subsystem_restart_dev(subsys))
+ return -EIO;
+ } else if (!strcmp(cmp, "get")) {
+ if (subsystem_get(subsys->desc->name))
+ return -EIO;
+ } else if (!strcmp(cmp, "put")) {
+ subsystem_put(subsys);
+ } else {
+ return -EINVAL;
+ }
+
+ return cnt;
+}
+
+static const struct file_operations subsys_debugfs_fops = {
+ .open = simple_open,
+ .read = subsys_debugfs_read,
+ .write = subsys_debugfs_write,
+};
+
+static struct dentry *subsys_base_dir;
+
+static int __init subsys_debugfs_init(void)
+{
+ subsys_base_dir = debugfs_create_dir("msm_subsys", NULL);
+ return !subsys_base_dir ? -ENOMEM : 0;
+}
+
+static void subsys_debugfs_exit(void)
+{
+ debugfs_remove_recursive(subsys_base_dir);
+}
+
+static int subsys_debugfs_add(struct subsys_device *subsys)
+{
+ if (!subsys_base_dir)
+ return -ENOMEM;
+
+ subsys->dentry = debugfs_create_file(subsys->desc->name,
+ S_IRUGO | S_IWUSR, subsys_base_dir,
+ subsys, &subsys_debugfs_fops);
+ return !subsys->dentry ? -ENOMEM : 0;
+}
+
+static void subsys_debugfs_remove(struct subsys_device *subsys)
+{
+ debugfs_remove(subsys->dentry);
+}
+#else
+static int __init subsys_debugfs_init(void) { return 0; };
+static void subsys_debugfs_exit(void) { }
+static int subsys_debugfs_add(struct subsys_device *subsys) { return 0; }
+static void subsys_debugfs_remove(struct subsys_device *subsys) { }
+#endif
+
+static int subsys_device_open(struct inode *inode, struct file *file)
+{
+ struct subsys_device *device, *subsys_dev = 0;
+ void *retval;
+
+ mutex_lock(&subsys_list_lock);
+ list_for_each_entry(device, &subsys_list, list)
+ if (MINOR(device->dev_no) == iminor(inode))
+ subsys_dev = device;
+ mutex_unlock(&subsys_list_lock);
+
+ if (!subsys_dev)
+ return -EINVAL;
+
+ retval = subsystem_get(subsys_dev->desc->name);
+ if (IS_ERR(retval))
+ return PTR_ERR(retval);
+
+ return 0;
+}
+
+static int subsys_device_close(struct inode *inode, struct file *file)
+{
+ struct subsys_device *device, *subsys_dev = 0;
+
+ mutex_lock(&subsys_list_lock);
+ list_for_each_entry(device, &subsys_list, list)
+ if (MINOR(device->dev_no) == iminor(inode))
+ subsys_dev = device;
+ mutex_unlock(&subsys_list_lock);
+
+ if (!subsys_dev)
+ return -EINVAL;
+
+ subsystem_put(subsys_dev);
+ return 0;
+}
+
+static const struct file_operations subsys_device_fops = {
+ .owner = THIS_MODULE,
+ .open = subsys_device_open,
+ .release = subsys_device_close,
+};
+
+static void subsys_device_release(struct device *dev)
+{
+ struct subsys_device *subsys = to_subsys(dev);
+
+ wakeup_source_trash(&subsys->ssr_wlock);
+ mutex_destroy(&subsys->track.lock);
+ ida_simple_remove(&subsys_ida, subsys->id);
+ kfree(subsys);
+}
+static irqreturn_t subsys_err_ready_intr_handler(int irq, void *subsys)
+{
+ struct subsys_device *subsys_dev = subsys;
+ dev_info(subsys_dev->desc->dev,
+ "Subsystem error monitoring/handling services are up\n");
+
+ if (subsys_dev->desc->is_not_loadable)
+ return IRQ_HANDLED;
+
+ complete(&subsys_dev->err_ready);
+ return IRQ_HANDLED;
+}
+
+static int subsys_char_device_add(struct subsys_device *subsys_dev)
+{
+ int ret = 0;
+ static int major, minor;
+ dev_t dev_no;
+
+ mutex_lock(&char_device_lock);
+ if (!major) {
+ ret = alloc_chrdev_region(&dev_no, 0, 4, "subsys");
+ if (ret < 0) {
+ pr_err("Failed to alloc subsys_dev region, err %d\n",
+ ret);
+ goto fail;
+ }
+ major = MAJOR(dev_no);
+ minor = MINOR(dev_no);
+ } else
+ dev_no = MKDEV(major, minor);
+
+ if (!device_create(char_class, subsys_dev->desc->dev, dev_no,
+ NULL, "subsys_%s", subsys_dev->desc->name)) {
+ pr_err("Failed to create subsys_%s device\n",
+ subsys_dev->desc->name);
+ goto fail_unregister_cdev_region;
+ }
+
+ cdev_init(&subsys_dev->char_dev, &subsys_device_fops);
+ subsys_dev->char_dev.owner = THIS_MODULE;
+ ret = cdev_add(&subsys_dev->char_dev, dev_no, 1);
+ if (ret < 0)
+ goto fail_destroy_device;
+
+ subsys_dev->dev_no = dev_no;
+ minor++;
+ mutex_unlock(&char_device_lock);
+
+ return 0;
+
+fail_destroy_device:
+ device_destroy(char_class, dev_no);
+fail_unregister_cdev_region:
+ unregister_chrdev_region(dev_no, 1);
+fail:
+ mutex_unlock(&char_device_lock);
+ return ret;
+}
+
+static void subsys_char_device_remove(struct subsys_device *subsys_dev)
+{
+ cdev_del(&subsys_dev->char_dev);
+ device_destroy(char_class, subsys_dev->dev_no);
+ unregister_chrdev_region(subsys_dev->dev_no, 1);
+}
+
+static struct subsys_soc_restart_order *ssr_parse_restart_orders(struct
+ subsys_desc * desc)
+{
+ int i, j, count, num = 0;
+ struct subsys_soc_restart_order *order, *tmp;
+ struct device *dev = desc->dev;
+ struct device_node *ssr_node;
+ uint32_t len;
+
+ if (!of_get_property(dev->of_node, "qcom,restart-group", &len))
+ return NULL;
+
+ count = len/sizeof(uint32_t);
+
+ order = devm_kzalloc(dev, sizeof(*order), GFP_KERNEL);
+ if (!order)
+ return ERR_PTR(-ENOMEM);
+
+ order->subsys_ptrs = devm_kzalloc(dev,
+ count * sizeof(struct subsys_device *),
+ GFP_KERNEL);
+ if (!order->subsys_ptrs)
+ return ERR_PTR(-ENOMEM);
+
+ order->device_ptrs = devm_kzalloc(dev,
+ count * sizeof(struct device_node *),
+ GFP_KERNEL);
+ if (!order->device_ptrs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < count; i++) {
+ ssr_node = of_parse_phandle(dev->of_node,
+ "qcom,restart-group", i);
+ if (!ssr_node)
+ return ERR_PTR(-ENXIO);
+ of_node_put(ssr_node);
+ pr_info("%s device has been added to %s's restart group\n",
+ ssr_node->name, desc->name);
+ order->device_ptrs[i] = ssr_node;
+ }
+
+ /*
+ * Check for similar restart groups. If found, return
+ * without adding the new group to the ssr_order_list.
+ */
+ mutex_lock(&ssr_order_mutex);
+ list_for_each_entry(tmp, &ssr_order_list, list) {
+ for (i = 0; i < count; i++) {
+ for (j = 0; j < count; j++) {
+ if (order->device_ptrs[j] !=
+ tmp->device_ptrs[i])
+ continue;
+ else
+ num++;
+ }
+ }
+
+ if (num == count && tmp->count == count)
+ return tmp;
+ else if (num)
+ return ERR_PTR(-EINVAL);
+ }
+
+ order->count = count;
+ mutex_init(&order->track.lock);
+ spin_lock_init(&order->track.s_lock);
+
+ INIT_LIST_HEAD(&order->list);
+ list_add_tail(&order->list, &ssr_order_list);
+ mutex_unlock(&ssr_order_mutex);
+
+ return order;
+}
+
+static int __get_gpio(struct subsys_desc *desc, const char *prop,
+ int *gpio)
+{
+ struct device_node *dnode = desc->dev->of_node;
+ int ret = -ENOENT;
+
+ if (of_find_property(dnode, prop, NULL)) {
+ *gpio = of_get_named_gpio(dnode, prop, 0);
+ ret = *gpio < 0 ? *gpio : 0;
+ }
+
+ return ret;
+}
+
+static int __get_irq(struct subsys_desc *desc, const char *prop,
+ unsigned int *irq)
+{
+ int ret, gpio, irql;
+
+ ret = __get_gpio(desc, prop, &gpio);
+ if (ret)
+ return ret;
+
+ irql = gpio_to_irq(gpio);
+
+ if (irql == -ENOENT)
+ irql = -ENXIO;
+
+ if (irql < 0) {
+ pr_err("[%s]: Error getting IRQ \"%s\"\n", desc->name,
+ prop);
+ return irql;
+ } else {
+ *irq = irql;
+ }
+
+ return 0;
+}
+
+static int subsys_parse_devicetree(struct subsys_desc *desc)
+{
+ struct subsys_soc_restart_order *order;
+ int ret;
+
+ struct platform_device *pdev = container_of(desc->dev,
+ struct platform_device, dev);
+
+ ret = __get_irq(desc, "qcom,gpio-err-fatal", &desc->err_fatal_irq);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ret = __get_irq(desc, "qcom,gpio-err-ready", &desc->err_ready_irq);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ret = __get_irq(desc, "qcom,gpio-stop-ack", &desc->stop_ack_irq);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ret = __get_gpio(desc, "qcom,gpio-force-stop", &desc->force_stop_gpio);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret > 0)
+ desc->wdog_bite_irq = ret;
+
+ order = ssr_parse_restart_orders(desc);
+ if (IS_ERR(order)) {
+ pr_err("Could not initialize SSR restart order, err = %ld\n",
+ PTR_ERR(order));
+ return PTR_ERR(order);
+ }
+
+ return 0;
+}
+
+static int subsys_setup_irqs(struct subsys_device *subsys)
+{
+ struct subsys_desc *desc = subsys->desc;
+ int ret;
+
+ if (desc->err_fatal_irq && desc->err_fatal_handler) {
+ ret = devm_request_irq(desc->dev, desc->err_fatal_irq,
+ desc->err_fatal_handler,
+ IRQF_TRIGGER_RISING, desc->name, desc);
+ if (ret < 0) {
+ dev_err(desc->dev, "[%s]: Unable to register error fatal IRQ handler!: %d\n",
+ desc->name, ret);
+ return ret;
+ }
+ disable_irq(desc->err_fatal_irq);
+ }
+
+ if (desc->stop_ack_irq && desc->stop_ack_handler) {
+ ret = devm_request_irq(desc->dev, desc->stop_ack_irq,
+ desc->stop_ack_handler,
+ IRQF_TRIGGER_RISING, desc->name, desc);
+ if (ret < 0) {
+ dev_err(desc->dev, "[%s]: Unable to register stop ack handler!: %d\n",
+ desc->name, ret);
+ return ret;
+ }
+ disable_irq(desc->stop_ack_irq);
+ }
+
+ if (desc->wdog_bite_irq && desc->wdog_bite_handler) {
+ ret = devm_request_irq(desc->dev, desc->wdog_bite_irq,
+ desc->wdog_bite_handler,
+ IRQF_TRIGGER_RISING, desc->name, desc);
+ if (ret < 0) {
+ dev_err(desc->dev, "[%s]: Unable to register wdog bite handler!: %d\n",
+ desc->name, ret);
+ return ret;
+ }
+ disable_irq(desc->wdog_bite_irq);
+ }
+
+ if (desc->err_ready_irq) {
+ ret = devm_request_irq(desc->dev,
+ desc->err_ready_irq,
+ subsys_err_ready_intr_handler,
+ IRQF_TRIGGER_RISING,
+ "error_ready_interrupt", subsys);
+ if (ret < 0) {
+ dev_err(desc->dev,
+ "[%s]: Unable to register err ready handler\n",
+ desc->name);
+ return ret;
+ }
+ disable_irq(desc->err_ready_irq);
+ }
+
+ return 0;
+}
+
+struct subsys_device *subsys_register(struct subsys_desc *desc)
+{
+ struct subsys_device *subsys;
+ int ret;
+
+ subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
+ if (!subsys)
+ return ERR_PTR(-ENOMEM);
+
+ subsys->desc = desc;
+ subsys->owner = desc->owner;
+ subsys->dev.parent = desc->dev;
+ subsys->dev.bus = &subsys_bus_type;
+ subsys->dev.release = subsys_device_release;
+
+ subsys->notify = subsys_notif_add_subsys(desc->name);
+
+ snprintf(subsys->wlname, sizeof(subsys->wlname), "ssr(%s)", desc->name);
+ wakeup_source_init(&subsys->ssr_wlock, subsys->wlname);
+ INIT_WORK(&subsys->work, subsystem_restart_wq_func);
+ INIT_WORK(&subsys->device_restart_work, device_restart_work_hdlr);
+ spin_lock_init(&subsys->track.s_lock);
+
+ subsys->id = ida_simple_get(&subsys_ida, 0, 0, GFP_KERNEL);
+ if (subsys->id < 0) {
+ ret = subsys->id;
+ goto err_ida;
+ }
+ dev_set_name(&subsys->dev, "subsys%d", subsys->id);
+
+ mutex_init(&subsys->track.lock);
+
+ ret = subsys_debugfs_add(subsys);
+ if (ret)
+ goto err_debugfs;
+
+ ret = device_register(&subsys->dev);
+ if (ret) {
+ device_unregister(&subsys->dev);
+ goto err_register;
+ }
+
+ ret = subsys_char_device_add(subsys);
+ if (ret) {
+ put_device(&subsys->dev);
+ goto err_register;
+ }
+
+ if (desc->dev->of_node) {
+ ret = subsys_parse_devicetree(desc);
+ if (ret)
+ goto err_register;
+
+ subsys->restart_order = update_restart_order(subsys);
+
+ ret = subsys_setup_irqs(subsys);
+ if (ret < 0)
+ goto err_register;
+ }
+
+ mutex_lock(&subsys_list_lock);
+ INIT_LIST_HEAD(&subsys->list);
+ list_add_tail(&subsys->list, &subsys_list);
+ mutex_unlock(&subsys_list_lock);
+
+ return subsys;
+
+err_register:
+ subsys_debugfs_remove(subsys);
+err_debugfs:
+ mutex_destroy(&subsys->track.lock);
+ ida_simple_remove(&subsys_ida, subsys->id);
+err_ida:
+ wakeup_source_trash(&subsys->ssr_wlock);
+ kfree(subsys);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(subsys_register);
+
+void subsys_unregister(struct subsys_device *subsys)
+{
+ struct subsys_device *subsys_dev, *tmp;
+
+ if (IS_ERR_OR_NULL(subsys))
+ return;
+
+ if (get_device(&subsys->dev)) {
+ mutex_lock(&subsys_list_lock);
+ list_for_each_entry_safe(subsys_dev, tmp, &subsys_list, list)
+ if (subsys_dev == subsys)
+ list_del(&subsys->list);
+ mutex_unlock(&subsys_list_lock);
+ mutex_lock(&subsys->track.lock);
+ WARN_ON(subsys->count);
+ device_unregister(&subsys->dev);
+ mutex_unlock(&subsys->track.lock);
+ subsys_debugfs_remove(subsys);
+ subsys_char_device_remove(subsys);
+ put_device(&subsys->dev);
+ }
+}
+EXPORT_SYMBOL(subsys_unregister);
+
+static int subsys_panic(struct device *dev, void *data)
+{
+ struct subsys_device *subsys = to_subsys(dev);
+
+ if (subsys->desc->crash_shutdown)
+ subsys->desc->crash_shutdown(subsys->desc);
+ return 0;
+}
+
+static int ssr_panic_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ bus_for_each_dev(&subsys_bus_type, NULL, NULL, subsys_panic);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_nb = {
+ .notifier_call = ssr_panic_handler,
+};
+
+static int __init subsys_restart_init(void)
+{
+ int ret;
+
+ ssr_wq = alloc_workqueue("ssr_wq", WQ_CPU_INTENSIVE, 0);
+ BUG_ON(!ssr_wq);
+
+ ret = bus_register(&subsys_bus_type);
+ if (ret)
+ goto err_bus;
+ ret = subsys_debugfs_init();
+ if (ret)
+ goto err_debugfs;
+
+ char_class = class_create(THIS_MODULE, "subsys");
+ if (IS_ERR(char_class)) {
+ ret = -ENOMEM;
+ pr_err("Failed to create subsys_dev class\n");
+ goto err_class;
+ }
+
+ ret = atomic_notifier_chain_register(&panic_notifier_list,
+ &panic_nb);
+ if (ret)
+ goto err_soc;
+
+ return 0;
+
+err_soc:
+ class_destroy(char_class);
+err_class:
+ subsys_debugfs_exit();
+err_debugfs:
+ bus_unregister(&subsys_bus_type);
+err_bus:
+ destroy_workqueue(ssr_wq);
+ return ret;
+}
+arch_initcall(subsys_restart_init);
+
+MODULE_DESCRIPTION("Subsystem Restart Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/sysmon.c b/drivers/soc/qcom/sysmon.c
new file mode 100644
index 000000000000..ec6d4bd5183d
--- /dev/null
+++ b/drivers/soc/qcom/sysmon.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/hsic_sysmon.h>
+#include <soc/qcom/sysmon.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/smd.h>
+
+#define TX_BUF_SIZE 50
+#define RX_BUF_SIZE 500
+#define TIMEOUT_MS 5000
+
+enum transports {
+ TRANSPORT_SMD,
+ TRANSPORT_HSIC,
+};
+
+struct sysmon_subsys {
+ struct mutex lock;
+ struct smd_channel *chan;
+ bool chan_open;
+ struct completion resp_ready;
+ char rx_buf[RX_BUF_SIZE];
+ enum transports transport;
+ struct device *dev;
+};
+
+static struct sysmon_subsys subsys[SYSMON_NUM_SS] = {
+ [SYSMON_SS_MODEM].transport = TRANSPORT_SMD,
+ [SYSMON_SS_LPASS].transport = TRANSPORT_SMD,
+ [SYSMON_SS_WCNSS].transport = TRANSPORT_SMD,
+ [SYSMON_SS_DSPS].transport = TRANSPORT_SMD,
+ [SYSMON_SS_Q6FW].transport = TRANSPORT_SMD,
+ [SYSMON_SS_EXT_MODEM].transport = TRANSPORT_HSIC,
+};
+
+static const char *notif_name[SUBSYS_NOTIF_TYPE_COUNT] = {
+ [SUBSYS_BEFORE_SHUTDOWN] = "before_shutdown",
+ [SUBSYS_AFTER_SHUTDOWN] = "after_shutdown",
+ [SUBSYS_BEFORE_POWERUP] = "before_powerup",
+ [SUBSYS_AFTER_POWERUP] = "after_powerup",
+};
+
+struct enum_name_map {
+ int id;
+ const char name[50];
+};
+
+static struct enum_name_map map[SYSMON_NUM_SS] = {
+ {SYSMON_SS_WCNSS, "wcnss"},
+ {SYSMON_SS_MODEM, "modem"},
+ {SYSMON_SS_LPASS, "adsp"},
+ {SYSMON_SS_Q6FW, "modem_fw"},
+ {SYSMON_SS_EXT_MODEM, "external_modem"},
+ {SYSMON_SS_DSPS, "dsps"},
+};
+
+static int sysmon_send_smd(struct sysmon_subsys *ss, const char *tx_buf,
+ size_t len)
+{
+ int ret;
+
+ if (!ss->chan_open)
+ return -ENODEV;
+
+ init_completion(&ss->resp_ready);
+ pr_debug("Sending SMD message: %s\n", tx_buf);
+ smd_write(ss->chan, tx_buf, len);
+ ret = wait_for_completion_timeout(&ss->resp_ready,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int sysmon_send_hsic(struct sysmon_subsys *ss, const char *tx_buf,
+ size_t len)
+{
+ int ret;
+ size_t actual_len;
+
+ pr_debug("Sending HSIC message: %s\n", tx_buf);
+ ret = hsic_sysmon_write(HSIC_SYSMON_DEV_EXT_MODEM,
+ tx_buf, len, TIMEOUT_MS);
+ if (ret)
+ return ret;
+ ret = hsic_sysmon_read(HSIC_SYSMON_DEV_EXT_MODEM, ss->rx_buf,
+ ARRAY_SIZE(ss->rx_buf), &actual_len, TIMEOUT_MS);
+ return ret;
+}
+
+static int sysmon_send_msg(struct sysmon_subsys *ss, const char *tx_buf,
+ size_t len)
+{
+ int ret;
+
+ switch (ss->transport) {
+ case TRANSPORT_SMD:
+ ret = sysmon_send_smd(ss, tx_buf, len);
+ break;
+ case TRANSPORT_HSIC:
+ ret = sysmon_send_hsic(ss, tx_buf, len);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (!ret)
+ pr_debug("Received response: %s\n", ss->rx_buf);
+
+ return ret;
+}
+
+/**
+ * sysmon_send_event() - Notify a subsystem of another's state change
+ * @dest_ss: ID of subsystem the notification should be sent to
+ * @event_ss: String name of the subsystem that generated the notification
+ * @notif: ID of the notification type (ex. SUBSYS_BEFORE_SHUTDOWN)
+ *
+ * Returns 0 for success, -EINVAL for invalid destination or notification IDs,
+ * -ENODEV if the transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds, but with something other than an acknowledgement.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_event(const char *dest_ss, const char *event_ss,
+ enum subsys_notif_type notif)
+{
+
+ char tx_buf[TX_BUF_SIZE];
+ int ret, i;
+ struct sysmon_subsys *ss = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(map); i++) {
+ if (!strcmp(map[i].name, dest_ss)) {
+ ss = &subsys[map[i].id];
+ break;
+ }
+ }
+
+ if (ss == NULL)
+ return -EINVAL;
+
+ if (ss->dev == NULL)
+ return -ENODEV;
+
+ if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT || event_ss == NULL ||
+ notif_name[notif] == NULL)
+ return -EINVAL;
+
+ snprintf(tx_buf, ARRAY_SIZE(tx_buf), "ssr:%s:%s", event_ss,
+ notif_name[notif]);
+
+ mutex_lock(&ss->lock);
+ ret = sysmon_send_msg(ss, tx_buf, strlen(tx_buf));
+ if (ret)
+ goto out;
+
+ if (strncmp(ss->rx_buf, "ssr:ack", ARRAY_SIZE(ss->rx_buf)))
+ ret = -ENOSYS;
+out:
+ mutex_unlock(&ss->lock);
+ return ret;
+}
+
+/**
+ * sysmon_send_shutdown() - send shutdown command to a
+ * subsystem.
+ * @dest_ss: ID of subsystem to send to.
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_shutdown(enum subsys_id dest_ss)
+{
+ struct sysmon_subsys *ss = &subsys[dest_ss];
+ const char tx_buf[] = "system:shutdown";
+ const char expect[] = "system:ack";
+ size_t prefix_len = ARRAY_SIZE(expect) - 1;
+ int ret;
+
+ if (ss->dev == NULL)
+ return -ENODEV;
+
+ if (dest_ss < 0 || dest_ss >= SYSMON_NUM_SS)
+ return -EINVAL;
+
+ mutex_lock(&ss->lock);
+ ret = sysmon_send_msg(ss, tx_buf, ARRAY_SIZE(tx_buf));
+ if (ret)
+ goto out;
+
+ if (strncmp(ss->rx_buf, expect, prefix_len))
+ ret = -ENOSYS;
+out:
+ mutex_unlock(&ss->lock);
+ return ret;
+}
+
+/**
+ * sysmon_get_reason() - Retrieve failure reason from a subsystem.
+ * @dest_ss: ID of subsystem to query
+ * @buf: Caller-allocated buffer for the returned NUL-terminated reason
+ * @len: Length of @buf
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -ENOSYS if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_get_reason(enum subsys_id dest_ss, char *buf, size_t len)
+{
+ struct sysmon_subsys *ss = &subsys[dest_ss];
+ const char tx_buf[] = "ssr:retrieve:sfr";
+ const char expect[] = "ssr:return:";
+ size_t prefix_len = ARRAY_SIZE(expect) - 1;
+ int ret;
+
+ if (ss->dev == NULL)
+ return -ENODEV;
+
+ if (dest_ss < 0 || dest_ss >= SYSMON_NUM_SS ||
+ buf == NULL || len == 0)
+ return -EINVAL;
+
+ mutex_lock(&ss->lock);
+ ret = sysmon_send_msg(ss, tx_buf, ARRAY_SIZE(tx_buf));
+ if (ret)
+ goto out;
+
+ if (strncmp(ss->rx_buf, expect, prefix_len)) {
+ ret = -ENOSYS;
+ goto out;
+ }
+ strlcpy(buf, ss->rx_buf + prefix_len, len);
+out:
+ mutex_unlock(&ss->lock);
+ return ret;
+}
+
+static void sysmon_smd_notify(void *priv, unsigned int smd_event)
+{
+ struct sysmon_subsys *ss = priv;
+
+ switch (smd_event) {
+ case SMD_EVENT_DATA: {
+ if (smd_read_avail(ss->chan) > 0) {
+ smd_read_from_cb(ss->chan, ss->rx_buf,
+ ARRAY_SIZE(ss->rx_buf));
+ complete(&ss->resp_ready);
+ }
+ break;
+ }
+ case SMD_EVENT_OPEN:
+ ss->chan_open = true;
+ break;
+ case SMD_EVENT_CLOSE:
+ ss->chan_open = false;
+ break;
+ }
+}
+
+static int sysmon_probe(struct platform_device *pdev)
+{
+ struct sysmon_subsys *ss;
+ int ret;
+
+ if (pdev->id < 0 || pdev->id >= SYSMON_NUM_SS)
+ return -ENODEV;
+
+ ss = &subsys[pdev->id];
+ mutex_init(&ss->lock);
+
+ switch (ss->transport) {
+ case TRANSPORT_SMD:
+ if (pdev->id >= SMD_NUM_TYPE)
+ return -EINVAL;
+
+ ret = smd_named_open_on_edge("sys_mon", pdev->id, &ss->chan, ss,
+ sysmon_smd_notify);
+ if (ret) {
+ pr_err("SMD open failed\n");
+ return ret;
+ }
+
+ smd_disable_read_intr(ss->chan);
+ break;
+ case TRANSPORT_HSIC:
+ if (pdev->id < SMD_NUM_TYPE)
+ return -EINVAL;
+
+ ret = hsic_sysmon_open(HSIC_SYSMON_DEV_EXT_MODEM);
+ if (ret) {
+ pr_err("HSIC open failed\n");
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ ss->dev = &pdev->dev;
+
+ return 0;
+}
+
+static int sysmon_remove(struct platform_device *pdev)
+{
+ struct sysmon_subsys *ss = &subsys[pdev->id];
+
+ ss->dev = NULL;
+
+ mutex_lock(&ss->lock);
+ switch (ss->transport) {
+ case TRANSPORT_SMD:
+ smd_close(ss->chan);
+ break;
+ case TRANSPORT_HSIC:
+ hsic_sysmon_close(HSIC_SYSMON_DEV_EXT_MODEM);
+ break;
+ }
+ mutex_unlock(&ss->lock);
+
+ return 0;
+}
+
+static struct platform_driver sysmon_driver = {
+ .probe = sysmon_probe,
+ .remove = sysmon_remove,
+ .driver = {
+ .name = "sys_mon",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sysmon_init(void)
+{
+ return platform_driver_register(&sysmon_driver);
+}
+subsys_initcall(sysmon_init);
+
+static void __exit sysmon_exit(void)
+{
+ platform_driver_unregister(&sysmon_driver);
+}
+module_exit(sysmon_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("system monitor communication library");
+MODULE_ALIAS("platform:sys_mon");
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index fc75104a5aab..3d214abfdb87 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -1,6 +1,6 @@
#
# Makefile for kernel SPMI framework.
#
-obj-$(CONFIG_SPMI) += spmi.o
+obj-$(CONFIG_SPMI) += spmi.o spmi-resources.o
obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o
diff --git a/drivers/spmi/spmi-resources.c b/drivers/spmi/spmi-resources.c
new file mode 100644
index 000000000000..d2e06fdb0be8
--- /dev/null
+++ b/drivers/spmi/spmi-resources.c
@@ -0,0 +1,151 @@
+/* Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Resource handling based on platform.c.
+ */
+
+#include <linux/export.h>
+#include <linux/spmi.h>
+#include <linux/string.h>
+
+/**
+ * spmi_get_resource - get a resource for a device
+ * @dev: spmi device
+ * @node: device node resource
+ * @type: resource type
+ * @res_num: resource index
+ *
+ * If 'node' is specified as NULL, then the API treats this as a special
+ * case to assume the first devnode. For configurations that do not use
+ * spmi-dev-container, there is only one node to begin with, so NULL should
+ * be passed in this case.
+ *
+ * Returns
+ * NULL on failure.
+ */
+struct resource *spmi_get_resource(struct spmi_device *dev,
+ struct spmi_resource *node,
+ unsigned int type, unsigned int res_num)
+{
+ int i;
+
+ /* if a node is not specified, default to the first node */
+ if (!node)
+ node = &dev->res;
+
+ for (i = 0; i < node->num_resources; i++) {
+ struct resource *r = &node->resource[i];
+
+ if (type == resource_type(r) && res_num-- == 0)
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(spmi_get_resource);
+
+#define SPMI_MAX_RES_NAME 256
+
+/**
+ * spmi_get_resource_byname - get a resource for a device given a name
+ * @dev: spmi device handle
+ * @node: device node resource
+ * @type: resource type
+ * @name: resource name to lookup
+ */
+struct resource *spmi_get_resource_byname(struct spmi_device *dev,
+ struct spmi_resource *node,
+ unsigned int type,
+ const char *name)
+{
+ int i;
+
+ /* if a node is not specified, default to the first node */
+ if (!node)
+ node = &dev->res;
+
+ for (i = 0; i < node->num_resources; i++) {
+ struct resource *r = &node->resource[i];
+
+ if (type == resource_type(r) && r->name &&
+ !strncmp(r->name, name, SPMI_MAX_RES_NAME))
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(spmi_get_resource_byname);
+
+/**
+ * spmi_get_irq - get an IRQ for a device
+ * @dev: spmi device
+ * @node: device node resource
+ * @res_num: IRQ number index
+ *
+ * Returns
+ * -ENXIO on failure.
+ */
+int spmi_get_irq(struct spmi_device *dev, struct spmi_resource *node,
+ unsigned int res_num)
+{
+ struct resource *r = spmi_get_resource(dev, node,
+ IORESOURCE_IRQ, res_num);
+
+ return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(spmi_get_irq);
+
+/**
+ * spmi_get_irq_byname - get an IRQ for a device given a name
+ * @dev: spmi device handle
+ * @node: device node resource
+ * @name: resource name to lookup
+ *
+ * Returns -ENXIO on failure
+ */
+int spmi_get_irq_byname(struct spmi_device *dev,
+ struct spmi_resource *node, const char *name)
+{
+ struct resource *r = spmi_get_resource_byname(dev, node,
+ IORESOURCE_IRQ, name);
+ return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(spmi_get_irq_byname);
+
+/*
+ * spmi_get_container_dev_byname - get a device node resource
+ * @dev: spmi device handle
+ * @label: device name to lookup
+ *
+ * Only useable in spmi-dev-container configurations. Given a name,
+ * find the associated spmi_resource that matches the name.
+ *
+ * Return NULL if the spmi_device is not a dev-container,
+ * or if the lookup fails.
+ */
+struct spmi_resource *spmi_get_dev_container_byname(struct spmi_device *dev,
+ const char *label)
+{
+ int i;
+
+ if (!label)
+ return NULL;
+
+ for (i = 0; i < dev->num_dev_node; i++) {
+ struct spmi_resource *r = &dev->dev_node[i];
+
+ if (r && r->label && !strncmp(r->label,
+ label, SPMI_MAX_RES_NAME))
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(spmi_get_dev_container_byname);
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 1d92f5103ebf..6de645eca3f2 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -22,6 +22,8 @@
#include <dt-bindings/spmi/spmi.h>
+#define DEBUG 1
+
static DEFINE_IDA(ctrl_ida);
static void spmi_dev_release(struct device *dev)
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index b24aa010f68c..fa42c2f69bb5 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -333,6 +333,25 @@ config N_GSM
This line discipline provides support for the GSM MUX protocol and
presents the mux as a set of 61 individual tty devices.
+config N_SMUX
+ tristate "SMUX line discipline support"
+ depends on NET && SERIAL_MSM_HS
+ help
+ This line discipline provides support for the Serial MUX protocol
+ and provides a TTY and kernel API for multiple logical channels.
+
+config N_SMUX_LOOPBACK
+ tristate "SMUX line discipline loopback support"
+ depends on N_SMUX
+ help
+ Provides loopback and unit testing support for the Serial MUX Protocol.
+
+config SMUX_CTL
+ tristate "SMUX control driver"
+ depends on N_SMUX
+ help
+ Support for SMUX control driver on top of serial MUX.
+
config TRACE_ROUTER
tristate "Trace data router for MIPI P1149.7 cJTAG standard"
depends on TRACE_SINK
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 58ad1c05b7f8..23de780eaf4d 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -6,6 +6,9 @@ obj-$(CONFIG_AUDIT) += tty_audit.o
obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_N_HDLC) += n_hdlc.o
obj-$(CONFIG_N_GSM) += n_gsm.o
+obj-$(CONFIG_N_SMUX) += n_smux.o smux_debug.o
+obj-$(CONFIG_N_SMUX_LOOPBACK) += smux_test.o smux_loopback.o
+obj-$(CONFIG_SMUX_CTL) += smux_ctl.o
obj-$(CONFIG_TRACE_ROUTER) += n_tracerouter.o
obj-$(CONFIG_TRACE_SINK) += n_tracesink.o
obj-$(CONFIG_R3964) += n_r3964.o
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 4b6c78331a64..4c9bcc6925bc 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1065,13 +1065,16 @@ static int msm_serial_probe(struct platform_device *pdev)
msm_port->is_uartdm = 0;
msm_port->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(msm_port->clk))
+ if (IS_ERR(msm_port->clk)) {
+ pr_err("clk\n");
return PTR_ERR(msm_port->clk);
-
+ }
if (msm_port->is_uartdm) {
msm_port->pclk = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(msm_port->pclk))
+ if (IS_ERR(msm_port->pclk)) {
+ pr_err("clk2\n");
return PTR_ERR(msm_port->pclk);
+ }
clk_set_rate(msm_port->clk, 1843200);
}
@@ -1090,7 +1093,7 @@ static int msm_serial_probe(struct platform_device *pdev)
port->irq = irq;
platform_set_drvdata(pdev, port);
-
+pr_err("c\n");
return uart_add_one_port(&msm_uart_driver, port);
}
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
index 292c571750f0..f7aae4d28bf6 100644
--- a/include/asm-generic/dma-contiguous.h
+++ b/include/asm-generic/dma-contiguous.h
@@ -1,9 +1,25 @@
#ifndef _ASM_GENERIC_DMA_CONTIGUOUS_H
#define _ASM_GENERIC_DMA_CONTIGUOUS_H
+#include <linux/device.h>
+#include <linux/dma-contiguous.h>
+
#include <linux/types.h>
static inline void
dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ if (dev && dev->cma_area)
+ return dev->cma_area;
+ return dma_contiguous_def_area;
+}
+
+static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
+{
+ if (dev)
+ dev->cma_area = cma;
+}
+
#endif
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 383ade1a211b..d38a29488d0d 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -27,7 +27,7 @@
*/
#ifndef ARCH_NR_GPIOS
-#define ARCH_NR_GPIOS 512
+#define ARCH_NR_GPIOS 1024
#endif
/*
diff --git a/include/asm-generic/percpu-defs.h b/include/asm-generic/percpu-defs.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/include/asm-generic/percpu-defs.h
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index aa70cbda327c..a0a37cfc16ea 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -165,9 +165,18 @@
#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
-#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
+/* #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) */
#define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
+#ifdef CONFIG_OF
+#define CPU_METHOD_OF_TABLES() . = ALIGN(8); \
+ VMLINUX_SYMBOL(__cpu_method_of_table) = .; \
+ *(__cpu_method_of_table) \
+ VMLINUX_SYMBOL(__cpu_method_of_table_end) = .;
+#else
+#define CPU_METHOD_OF_TABLES()
+#endif
+
#define KERNEL_DTB() \
STRUCT_ALIGN(); \
VMLINUX_SYMBOL(__dtb_start) = .; \
@@ -496,6 +505,7 @@
MEM_DISCARD(init.rodata) \
CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \
+ CPU_METHOD_OF_TABLES() \
CLKSRC_OF_TABLES() \
CPU_METHOD_OF_TABLES() \
KERNEL_DTB() \
@@ -653,6 +663,11 @@
*(.security_initcall.init) \
VMLINUX_SYMBOL(__security_initcall_end) = .;
+#define COMPAT_EXPORTS \
+ VMLINUX_SYMBOL(__compat_exports_start) = .; \
+ *(.exportcompat.init) \
+ VMLINUX_SYMBOL(__compat_exports_end) = .;
+
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
diff --git a/include/dt-bindings/clock/msm-clocks-8916.h b/include/dt-bindings/clock/msm-clocks-8916.h
new file mode 100644
index 000000000000..c609f3e2969e
--- /dev/null
+++ b/include/dt-bindings/clock/msm-clocks-8916.h
@@ -0,0 +1,222 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCKS_8916_H
+#define __MSM_CLOCKS_8916_H
+
+/* GPLLs */
+#define clk_gpll0_clk_src 0x5933b69f
+#define clk_gpll0_ao_clk_src 0x6b2fb034
+#define clk_gpll1_clk_src 0x916f8847
+#define clk_gpll2_clk_src 0x7c34503b
+
+/* SR2PLL */
+#define clk_a53sspll 0xf761da94
+
+/* SRCs */
+#define clk_apss_ahb_clk_src 0x36f8495f
+#define clk_blsp1_qup1_i2c_apps_clk_src 0x17f78f5e
+#define clk_blsp1_qup1_spi_apps_clk_src 0xf534c4fa
+#define clk_blsp1_qup2_i2c_apps_clk_src 0x8de71c79
+#define clk_blsp1_qup2_spi_apps_clk_src 0x33cf809a
+#define clk_blsp1_qup3_i2c_apps_clk_src 0xf161b902
+#define clk_blsp1_qup3_spi_apps_clk_src 0x5e95683f
+#define clk_blsp1_qup4_i2c_apps_clk_src 0xb2ecce68
+#define clk_blsp1_qup4_spi_apps_clk_src 0xddb5bbdb
+#define clk_blsp1_qup5_i2c_apps_clk_src 0x71ea7804
+#define clk_blsp1_qup5_spi_apps_clk_src 0x9752f35f
+#define clk_blsp1_qup6_i2c_apps_clk_src 0x28806803
+#define clk_blsp1_qup6_spi_apps_clk_src 0x44a1edc4
+#define clk_blsp1_uart1_apps_clk_src 0xf8146114
+#define clk_blsp1_uart2_apps_clk_src 0xfc9c2f73
+#define clk_byte0_clk_src 0x75cc885b
+#define clk_cci_clk_src 0x822f3d97
+#define clk_cpp_clk_src 0x8382f56d
+#define clk_camss_ahb_clk_src 0xa68afe9c
+#define clk_camss_gp0_clk_src 0x43b063e9
+#define clk_camss_gp1_clk_src 0xa3315f1b
+#define clk_crypto_clk_src 0x37a21414
+#define clk_csi0_clk_src 0x227e65bc
+#define clk_csi1_clk_src 0x6a2a6c36
+#define clk_csi0phytimer_clk_src 0xc8a309be
+#define clk_csi1phytimer_clk_src 0x7c0fe23a
+#define clk_esc0_clk_src 0xb41d7c38
+#define clk_gfx3d_clk_src 0x917f76ef
+#define clk_gp1_clk_src 0xad85b97a
+#define clk_gp2_clk_src 0xfb1f0065
+#define clk_gp3_clk_src 0x63b693d6
+#define clk_jpeg0_clk_src 0x9a0a0ac3
+#define clk_mdp_clk_src 0x6dc1f8f1
+#define clk_mclk0_clk_src 0x266b3853
+#define clk_mclk1_clk_src 0xa73cad0c
+#define clk_pclk0_clk_src 0xccac1f35
+#define clk_pdm2_clk_src 0x31e494fd
+#define clk_sdcc1_apps_clk_src 0xd4975db2
+#define clk_sdcc2_apps_clk_src 0xfc46c821
+#define clk_usb_hs_system_clk_src 0x28385546
+#define clk_vsync_clk_src 0xecb43940
+#define clk_vfe0_clk_src 0xa0c2bd8f
+#define clk_vcodec0_clk_src 0xbc193019
+
+/* BRANCHEs*/
+#define clk_gcc_blsp1_ahb_clk 0x8caa5b4f
+#define clk_gcc_boot_rom_ahb_clk 0xde2adeb1
+#define clk_gcc_crypto_ahb_clk 0x94de4919
+#define clk_gcc_crypto_axi_clk 0xd4415c9b
+#define clk_gcc_crypto_clk 0x00d390d2
+#define clk_gcc_prng_ahb_clk 0x397e7eaa
+#define clk_gcc_apss_tcu_clk 0xaf56a329
+#define clk_gcc_gfx_tbu_clk 0x18bb9a90
+#define clk_gcc_gtcu_ahb_clk 0xb432168e
+#define clk_gcc_jpeg_tbu_clk 0xcf8fd944
+#define clk_gcc_mdp_tbu_clk 0x82287f76
+#define clk_gcc_smmu_cfg_clk 0x75eaefa5
+#define clk_gcc_venus_tbu_clk 0x7e0b97ce
+#define clk_gcc_vfe_tbu_clk 0x061f2f95
+#define clk_gcc_blsp1_qup1_i2c_apps_clk 0xc303fae9
+#define clk_gcc_blsp1_qup1_spi_apps_clk 0x759a76b0
+#define clk_gcc_blsp1_qup2_i2c_apps_clk 0x1076f220
+#define clk_gcc_blsp1_qup2_spi_apps_clk 0x3e77d48f
+#define clk_gcc_blsp1_qup3_i2c_apps_clk 0x9e25ac82
+#define clk_gcc_blsp1_qup3_spi_apps_clk 0xfb978880
+#define clk_gcc_blsp1_qup4_i2c_apps_clk 0xd7f40f6f
+#define clk_gcc_blsp1_qup4_spi_apps_clk 0x80f8722f
+#define clk_gcc_blsp1_qup5_i2c_apps_clk 0xacae5604
+#define clk_gcc_blsp1_qup5_spi_apps_clk 0xbf3e15d7
+#define clk_gcc_blsp1_qup6_i2c_apps_clk 0x5c6ad820
+#define clk_gcc_blsp1_qup6_spi_apps_clk 0x780d9f85
+#define clk_gcc_blsp1_uart1_apps_clk 0xc7c62f90
+#define clk_gcc_blsp1_uart2_apps_clk 0xf8a61c96
+#define clk_gcc_camss_cci_ahb_clk 0xa81c11ba
+#define clk_gcc_camss_cci_clk 0xb7dd8824
+#define clk_gcc_camss_csi0_ahb_clk 0x175d672a
+#define clk_gcc_camss_csi0_clk 0x6b01b3e1
+#define clk_gcc_camss_csi0phy_clk 0x06a41ff7
+#define clk_gcc_camss_csi0pix_clk 0x61a8a930
+#define clk_gcc_camss_csi0rdi_clk 0x7053c7ae
+#define clk_gcc_camss_csi1_ahb_clk 0x2c2dc261
+#define clk_gcc_camss_csi1_clk 0x1aba4a8c
+#define clk_gcc_camss_csi1phy_clk 0x0fd1d1fa
+#define clk_gcc_camss_csi1pix_clk 0x87fc98d8
+#define clk_gcc_camss_csi1rdi_clk 0x6ac996fe
+#define clk_gcc_camss_csi_vfe0_clk 0xcc73453c
+#define clk_gcc_camss_gp0_clk 0xd2bc3892
+#define clk_gcc_camss_gp1_clk 0xe4c013e1
+#define clk_gcc_camss_ispif_ahb_clk 0x3c0a858f
+#define clk_gcc_camss_jpeg0_clk 0x1ed3f032
+#define clk_gcc_camss_jpeg_ahb_clk 0x3bfa7603
+#define clk_gcc_camss_jpeg_axi_clk 0x3e278896
+#define clk_gcc_camss_mclk0_clk 0x80902deb
+#define clk_gcc_camss_mclk1_clk 0x5002d85f
+#define clk_gcc_camss_micro_ahb_clk 0xfbbee8cf
+#define clk_gcc_camss_csi0phytimer_clk 0xf8897589
+#define clk_gcc_camss_csi1phytimer_clk 0x4d26438f
+#define clk_gcc_camss_ahb_clk 0x9894b414
+#define clk_gcc_camss_top_ahb_clk 0x4e814a78
+#define clk_gcc_camss_cpp_ahb_clk 0x4ac95e14
+#define clk_gcc_camss_cpp_clk 0x7118a0de
+#define clk_gcc_camss_vfe0_clk 0xaaa3cd97
+#define clk_gcc_camss_vfe_ahb_clk 0x4050f47a
+#define clk_gcc_camss_vfe_axi_clk 0x77fe2384
+#define clk_gcc_oxili_gmem_clk 0x5620913a
+#define clk_gcc_gp1_clk 0x057f7b69
+#define clk_gcc_gp2_clk 0x9bf83ffd
+#define clk_gcc_gp3_clk 0xec6539ee
+#define clk_gcc_mdss_ahb_clk 0xbfb92ed3
+#define clk_gcc_mdss_axi_clk 0x668f51de
+#define clk_gcc_mdss_byte0_clk 0x35da7862
+#define clk_gcc_mdss_esc0_clk 0xaec5cb25
+#define clk_gcc_mdss_mdp_clk 0x22f3521f
+#define clk_gcc_mdss_pclk0_clk 0xcc5c5c77
+#define clk_gcc_mdss_vsync_clk 0x32a09f1f
+#define clk_gcc_mss_cfg_ahb_clk 0x111cde81
+#define clk_gcc_mss_q6_bimc_axi_clk 0x67544d62
+#define clk_gcc_oxili_ahb_clk 0xd15c8a00
+#define clk_gcc_oxili_gfx3d_clk 0x49a51fd9
+#define clk_gcc_pdm2_clk 0x99d55711
+#define clk_gcc_pdm_ahb_clk 0x365664f6
+#define clk_gcc_sdcc1_ahb_clk 0x691e0caa
+#define clk_gcc_sdcc1_apps_clk 0x9ad6fb96
+#define clk_gcc_sdcc2_ahb_clk 0x23d5727f
+#define clk_gcc_sdcc2_apps_clk 0x861b20ac
+#define clk_gcc_usb2a_phy_sleep_clk 0x6caa736f
+#define clk_gcc_usb_hs_ahb_clk 0x72ce8032
+#define clk_gcc_usb_hs_system_clk 0xa11972e5
+#define clk_gcc_venus0_ahb_clk 0x08d778c6
+#define clk_gcc_venus0_axi_clk 0xcdf4c8f6
+#define clk_gcc_venus0_vcodec0_clk 0xf76a02bb
+#define clk_gcc_gfx_tcu_clk 0x59505e55
+#define clk_gcc_gtcu_ahb_bridge_clk 0x19d2c5fe
+#define clk_gcc_bimc_gpu_clk 0x19922503
+#define clk_gcc_bimc_gfx_clk 0x3edd69ad
+
+#define clk_pixel_clk_src 0x8b6f83d8
+#define clk_byte_clk_src 0x3a911c53
+
+/* RPM */
+#define clk_pcnoc_clk 0xc1296d0f
+#define clk_pcnoc_a_clk 0x9bcffee4
+#define clk_pcnoc_msmbus_clk 0x2b53b688
+#define clk_pcnoc_msmbus_a_clk 0x9753a54f
+#define clk_pcnoc_keepalive_a_clk 0x9464f720
+#define clk_pcnoc_sps_clk 0x23d3f584
+#define clk_pcnoc_usb_a_clk 0x11d6a74e
+#define clk_snoc_clk 0x2c341aa0
+#define clk_snoc_a_clk 0x8fcef2af
+#define clk_snoc_msmbus_clk 0xe6900bb6
+#define clk_snoc_msmbus_a_clk 0x5d4683bd
+#define clk_snoc_mmnoc_axi_clk 0xfedd4bd5
+#define clk_snoc_mmnoc_ahb_clk 0xd2149dbb
+#define clk_snoc_usb_a_clk 0x34b7821b
+#define clk_bimc_clk 0x4b80bf00
+#define clk_bimc_a_clk 0x4b25668a
+#define clk_bimc_acpu_a_clk 0x4446311b
+#define clk_bimc_msmbus_clk 0xd212feea
+#define clk_bimc_msmbus_a_clk 0x71d1a499
+#define clk_bimc_usb_a_clk 0xea410834
+#define clk_qdss_clk 0x1492202a
+#define clk_qdss_a_clk 0xdd121669
+#define clk_xo_clk_src 0x23f5649f
+#define clk_xo_a_clk_src 0x2fdd2c7c
+#define clk_xo_otg_clk 0x79bca5cc
+#define clk_xo_a2 0xeba5a83d
+#define clk_xo_dwc3_clk 0xfad488ce
+#define clk_xo_ehci_host_clk 0xc7c340b1
+#define clk_xo_lpm_clk 0x2be48257
+#define clk_xo_pil_mss_clk 0xe97a8354
+#define clk_xo_pil_pronto_clk 0x89dae6d0
+#define clk_xo_wlan_clk 0x0116b76f
+
+#define clk_bb_clk1 0xf5304268
+#define clk_bb_clk1_pin 0x6dd0a779
+#define clk_bb_clk2 0xfe15cb87
+#define clk_bb_clk2_pin 0x498938e5
+#define clk_rf_clk1 0xaabeea5a
+#define clk_rf_clk1_pin 0x8f463562
+#define clk_rf_clk2 0x24a30992
+#define clk_rf_clk2_pin 0xa7c5602a
+
+/* DEBUG */
+#define clk_gcc_debug_mux 0x8121ac15
+#define clk_rpm_debug_mux 0x25cd1f3a
+#define clk_wcnss_m_clk 0x709f430b
+#define clk_apss_debug_pri_mux 0xc691ff55
+#define clk_apss_debug_sec_mux 0xc0b680f9
+#define clk_apss_debug_ter_mux 0x32041c48
+#define clk_apc0_m_clk 0xce1e9473
+#define clk_apc1_m_clk 0x990fbaf7
+#define clk_apc2_m_clk 0x252cd4ae
+#define clk_apc3_m_clk 0x78c64486
+#define clk_l2_m_clk 0x4bedf4d0
+
+
+#endif
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
new file mode 100644
index 000000000000..73e15977a093
--- /dev/null
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -0,0 +1,628 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_IDS_H
+#define __MSM_BUS_IDS_H
+
+/* Topology related enums */
+#define MSM_BUS_FAB_DEFAULT 0
+#define MSM_BUS_FAB_APPSS 0
+#define MSM_BUS_FAB_SYSTEM 1024
+#define MSM_BUS_FAB_MMSS 2048
+#define MSM_BUS_FAB_SYSTEM_FPB 3072
+#define MSM_BUS_FAB_CPSS_FPB 4096
+
+#define MSM_BUS_FAB_BIMC 0
+#define MSM_BUS_FAB_SYS_NOC 1024
+#define MSM_BUS_FAB_MMSS_NOC 2048
+#define MSM_BUS_FAB_OCMEM_NOC 3072
+#define MSM_BUS_FAB_PERIPH_NOC 4096
+#define MSM_BUS_FAB_CONFIG_NOC 5120
+#define MSM_BUS_FAB_OCMEM_VNOC 6144
+
+#define MSM_BUS_MASTER_FIRST 1
+#define MSM_BUS_MASTER_AMPSS_M0 1
+#define MSM_BUS_MASTER_AMPSS_M1 2
+#define MSM_BUS_APPSS_MASTER_FAB_MMSS 3
+#define MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
+#define MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
+#define MSM_BUS_MASTER_SPS 6
+#define MSM_BUS_MASTER_ADM_PORT0 7
+#define MSM_BUS_MASTER_ADM_PORT1 8
+#define MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
+#define MSM_BUS_MASTER_ADM1_PORT1 10
+#define MSM_BUS_MASTER_LPASS_PROC 11
+#define MSM_BUS_MASTER_MSS_PROCI 12
+#define MSM_BUS_MASTER_MSS_PROCD 13
+#define MSM_BUS_MASTER_MSS_MDM_PORT0 14
+#define MSM_BUS_MASTER_LPASS 15
+#define MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
+#define MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
+#define MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
+#define MSM_BUS_MASTER_ADM1_CI 19
+#define MSM_BUS_MASTER_ADM0_CI 20
+#define MSM_BUS_MASTER_MSS_MDM_PORT1 21
+#define MSM_BUS_MASTER_MDP_PORT0 22
+#define MSM_BUS_MASTER_MDP_PORT1 23
+#define MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
+#define MSM_BUS_MASTER_ROTATOR 25
+#define MSM_BUS_MASTER_GRAPHICS_3D 26
+#define MSM_BUS_MASTER_JPEG_DEC 27
+#define MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
+#define MSM_BUS_MASTER_VFE 29
+#define MSM_BUS_MASTER_VPE 30
+#define MSM_BUS_MASTER_JPEG_ENC 31
+#define MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
+#define MSM_BUS_MMSS_MASTER_APPS_FAB 33
+#define MSM_BUS_MASTER_HD_CODEC_PORT0 34
+#define MSM_BUS_MASTER_HD_CODEC_PORT1 35
+#define MSM_BUS_MASTER_SPDM 36
+#define MSM_BUS_MASTER_RPM 37
+#define MSM_BUS_MASTER_MSS 38
+#define MSM_BUS_MASTER_RIVA 39
+#define MSM_BUS_SYSTEM_MASTER_UNUSED_6 40
+#define MSM_BUS_MASTER_MSS_SW_PROC 41
+#define MSM_BUS_MASTER_MSS_FW_PROC 42
+#define MSM_BUS_MMSS_MASTER_UNUSED_2 43
+#define MSM_BUS_MASTER_GSS_NAV 44
+#define MSM_BUS_MASTER_PCIE 45
+#define MSM_BUS_MASTER_SATA 46
+#define MSM_BUS_MASTER_CRYPTO 47
+#define MSM_BUS_MASTER_VIDEO_CAP 48
+#define MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
+#define MSM_BUS_MASTER_VIDEO_ENC 50
+#define MSM_BUS_MASTER_VIDEO_DEC 51
+#define MSM_BUS_MASTER_LPASS_AHB 52
+#define MSM_BUS_MASTER_QDSS_BAM 53
+#define MSM_BUS_MASTER_SNOC_CFG 54
+#define MSM_BUS_MASTER_CRYPTO_CORE0 55
+#define MSM_BUS_MASTER_CRYPTO_CORE1 56
+#define MSM_BUS_MASTER_MSS_NAV 57
+#define MSM_BUS_MASTER_OCMEM_DMA 58
+#define MSM_BUS_MASTER_WCSS 59
+#define MSM_BUS_MASTER_QDSS_ETR 60
+#define MSM_BUS_MASTER_USB3 61
+#define MSM_BUS_MASTER_JPEG 62
+#define MSM_BUS_MASTER_VIDEO_P0 63
+#define MSM_BUS_MASTER_VIDEO_P1 64
+#define MSM_BUS_MASTER_MSS_PROC 65
+#define MSM_BUS_MASTER_JPEG_OCMEM 66
+#define MSM_BUS_MASTER_MDP_OCMEM 67
+#define MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
+#define MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
+#define MSM_BUS_MASTER_VFE_OCMEM 70
+#define MSM_BUS_MASTER_CNOC_ONOC_CFG 71
+#define MSM_BUS_MASTER_RPM_INST 72
+#define MSM_BUS_MASTER_RPM_DATA 73
+#define MSM_BUS_MASTER_RPM_SYS 74
+#define MSM_BUS_MASTER_DEHR 75
+#define MSM_BUS_MASTER_QDSS_DAP 76
+#define MSM_BUS_MASTER_TIC 77
+#define MSM_BUS_MASTER_SDCC_1 78
+#define MSM_BUS_MASTER_SDCC_3 79
+#define MSM_BUS_MASTER_SDCC_4 80
+#define MSM_BUS_MASTER_SDCC_2 81
+#define MSM_BUS_MASTER_TSIF 82
+#define MSM_BUS_MASTER_BAM_DMA 83
+#define MSM_BUS_MASTER_BLSP_2 84
+#define MSM_BUS_MASTER_USB_HSIC 85
+#define MSM_BUS_MASTER_BLSP_1 86
+#define MSM_BUS_MASTER_USB_HS 87
+#define MSM_BUS_MASTER_PNOC_CFG 88
+#define MSM_BUS_MASTER_V_OCMEM_GFX3D 89
+#define MSM_BUS_MASTER_IPA 90
+#define MSM_BUS_MASTER_QPIC 91
+#define MSM_BUS_MASTER_MDPE 92
+#define MSM_BUS_MASTER_USB_HS2 93
+#define MSM_BUS_MASTER_VPU 94
+#define MSM_BUS_MASTER_UFS 95
+#define MSM_BUS_MASTER_BCAST 96
+#define MSM_BUS_MASTER_CRYPTO_CORE2 97
+#define MSM_BUS_MASTER_EMAC 98
+#define MSM_BUS_MASTER_VPU_1 99
+#define MSM_BUS_MASTER_PCIE_1 100
+#define MSM_BUS_MASTER_USB3_1 101
+#define MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
+#define MSM_BUS_MASTER_CNOC_MNOC_CFG 103
+#define MSM_BUS_MASTER_TCU_0 104
+#define MSM_BUS_MASTER_TCU_1 105
+#define MSM_BUS_MASTER_CPP 106
+#define MSM_BUS_MASTER_LAST 107
+
+#define MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
+#define MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
+
+#define MSM_BUS_SNOC_MM_INT_0 10000
+#define MSM_BUS_SNOC_MM_INT_1 10001
+#define MSM_BUS_SNOC_MM_INT_2 10002
+#define MSM_BUS_SNOC_MM_INT_BIMC 10003
+#define MSM_BUS_SNOC_INT_0 10004
+#define MSM_BUS_SNOC_INT_1 10005
+#define MSM_BUS_SNOC_INT_BIMC 10006
+#define MSM_BUS_SNOC_BIMC_0_MAS 10007
+#define MSM_BUS_SNOC_BIMC_1_MAS 10008
+#define MSM_BUS_SNOC_QDSS_INT 10009
+#define MSM_BUS_PNOC_SNOC_MAS 10010
+#define MSM_BUS_PNOC_SNOC_SLV 10011
+#define MSM_BUS_PNOC_INT_0 10012
+#define MSM_BUS_PNOC_INT_1 10013
+#define MSM_BUS_PNOC_M_0 10014
+#define MSM_BUS_PNOC_M_1 10015
+#define MSM_BUS_BIMC_SNOC_MAS 10016
+#define MSM_BUS_BIMC_SNOC_SLV 10017
+#define MSM_BUS_PNOC_SLV_0 10018
+#define MSM_BUS_PNOC_SLV_1 10019
+#define MSM_BUS_PNOC_SLV_2 10020
+#define MSM_BUS_PNOC_SLV_3 10021
+#define MSM_BUS_PNOC_SLV_4 10022
+#define MSM_BUS_PNOC_SLV_8 10023
+#define MSM_BUS_PNOC_SLV_9 10024
+#define MSM_BUS_SNOC_BIMC_0_SLV 10025
+#define MSM_BUS_SNOC_BIMC_1_SLV 10026
+#define MSM_BUS_MNOC_BIMC_MAS 10027
+#define MSM_BUS_MNOC_BIMC_SLV 10028
+#define MSM_BUS_BIMC_MNOC_MAS 10029
+#define MSM_BUS_BIMC_MNOC_SLV 10030
+#define MSM_BUS_SNOC_BIMC_MAS 10031
+#define MSM_BUS_SNOC_BIMC_SLV 10032
+#define MSM_BUS_CNOC_SNOC_MAS 10033
+#define MSM_BUS_CNOC_SNOC_SLV 10034
+#define MSM_BUS_SNOC_CNOC_MAS 10035
+#define MSM_BUS_SNOC_CNOC_SLV 10036
+#define MSM_BUS_OVNOC_SNOC_MAS 10037
+#define MSM_BUS_OVNOC_SNOC_SLV 10038
+#define MSM_BUS_SNOC_OVNOC_MAS 10039
+#define MSM_BUS_SNOC_OVNOC_SLV 10040
+#define MSM_BUS_SNOC_PNOC_MAS 10041
+#define MSM_BUS_SNOC_PNOC_SLV 10042
+#define MSM_BUS_BIMC_INT_APPS_EBI 10043
+#define MSM_BUS_BIMC_INT_APPS_SNOC 10044
+#define MSM_BUS_SNOC_BIMC_2_MAS 10045
+#define MSM_BUS_SNOC_BIMC_2_SLV 10046
+#define MSM_BUS_INT_LAST 10047
+
+#define MSM_BUS_SLAVE_FIRST 512
+#define MSM_BUS_SLAVE_EBI_CH0 512
+#define MSM_BUS_SLAVE_EBI_CH1 513
+#define MSM_BUS_SLAVE_AMPSS_L2 514
+#define MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
+#define MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
+#define MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
+#define MSM_BUS_SLAVE_SPS 518
+#define MSM_BUS_SLAVE_SYSTEM_IMEM 519
+#define MSM_BUS_SLAVE_AMPSS 520
+#define MSM_BUS_SLAVE_MSS 521
+#define MSM_BUS_SLAVE_LPASS 522
+#define MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
+#define MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
+#define MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
+#define MSM_BUS_SLAVE_CORESIGHT 526
+#define MSM_BUS_SLAVE_RIVA 527
+#define MSM_BUS_SLAVE_SMI 528
+#define MSM_BUS_MMSS_SLAVE_FAB_APPS 529
+#define MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
+#define MSM_BUS_SLAVE_MM_IMEM 531
+#define MSM_BUS_SLAVE_CRYPTO 532
+#define MSM_BUS_SLAVE_SPDM 533
+#define MSM_BUS_SLAVE_RPM 534
+#define MSM_BUS_SLAVE_RPM_MSG_RAM 535
+#define MSM_BUS_SLAVE_MPM 536
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
+#define MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
+#define MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
+#define MSM_BUS_SLAVE_GSBI1_UART 542
+#define MSM_BUS_SLAVE_GSBI2_UART 543
+#define MSM_BUS_SLAVE_GSBI3_UART 544
+#define MSM_BUS_SLAVE_GSBI4_UART 545
+#define MSM_BUS_SLAVE_GSBI5_UART 546
+#define MSM_BUS_SLAVE_GSBI6_UART 547
+#define MSM_BUS_SLAVE_GSBI7_UART 548
+#define MSM_BUS_SLAVE_GSBI8_UART 549
+#define MSM_BUS_SLAVE_GSBI9_UART 550
+#define MSM_BUS_SLAVE_GSBI10_UART 551
+#define MSM_BUS_SLAVE_GSBI11_UART 552
+#define MSM_BUS_SLAVE_GSBI12_UART 553
+#define MSM_BUS_SLAVE_GSBI1_QUP 554
+#define MSM_BUS_SLAVE_GSBI2_QUP 555
+#define MSM_BUS_SLAVE_GSBI3_QUP 556
+#define MSM_BUS_SLAVE_GSBI4_QUP 557
+#define MSM_BUS_SLAVE_GSBI5_QUP 558
+#define MSM_BUS_SLAVE_GSBI6_QUP 559
+#define MSM_BUS_SLAVE_GSBI7_QUP 560
+#define MSM_BUS_SLAVE_GSBI8_QUP 561
+#define MSM_BUS_SLAVE_GSBI9_QUP 562
+#define MSM_BUS_SLAVE_GSBI10_QUP 563
+#define MSM_BUS_SLAVE_GSBI11_QUP 564
+#define MSM_BUS_SLAVE_GSBI12_QUP 565
+#define MSM_BUS_SLAVE_EBI2_NAND 566
+#define MSM_BUS_SLAVE_EBI2_CS0 567
+#define MSM_BUS_SLAVE_EBI2_CS1 568
+#define MSM_BUS_SLAVE_EBI2_CS2 569
+#define MSM_BUS_SLAVE_EBI2_CS3 570
+#define MSM_BUS_SLAVE_EBI2_CS4 571
+#define MSM_BUS_SLAVE_EBI2_CS5 572
+#define MSM_BUS_SLAVE_USB_FS1 573
+#define MSM_BUS_SLAVE_USB_FS2 574
+#define MSM_BUS_SLAVE_TSIF 575
+#define MSM_BUS_SLAVE_MSM_TSSC 576
+#define MSM_BUS_SLAVE_MSM_PDM 577
+#define MSM_BUS_SLAVE_MSM_DIMEM 578
+#define MSM_BUS_SLAVE_MSM_TCSR 579
+#define MSM_BUS_SLAVE_MSM_PRNG 580
+#define MSM_BUS_SLAVE_GSS 581
+#define MSM_BUS_SLAVE_SATA 582
+#define MSM_BUS_SLAVE_USB3 583
+#define MSM_BUS_SLAVE_WCSS 584
+#define MSM_BUS_SLAVE_OCIMEM 585
+#define MSM_BUS_SLAVE_SNOC_OCMEM 586
+#define MSM_BUS_SLAVE_SERVICE_SNOC 587
+#define MSM_BUS_SLAVE_QDSS_STM 588
+#define MSM_BUS_SLAVE_CAMERA_CFG 589
+#define MSM_BUS_SLAVE_DISPLAY_CFG 590
+#define MSM_BUS_SLAVE_OCMEM_CFG 591
+#define MSM_BUS_SLAVE_CPR_CFG 592
+#define MSM_BUS_SLAVE_CPR_XPU_CFG 593
+#define MSM_BUS_SLAVE_MISC_CFG 594
+#define MSM_BUS_SLAVE_MISC_XPU_CFG 595
+#define MSM_BUS_SLAVE_VENUS_CFG 596
+#define MSM_BUS_SLAVE_MISC_VENUS_CFG 597
+#define MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
+#define MSM_BUS_SLAVE_MMSS_CLK_CFG 599
+#define MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
+#define MSM_BUS_SLAVE_MNOC_MPU_CFG 601
+#define MSM_BUS_SLAVE_ONOC_MPU_CFG 602
+#define MSM_BUS_SLAVE_SERVICE_MNOC 603
+#define MSM_BUS_SLAVE_OCMEM 604
+#define MSM_BUS_SLAVE_SERVICE_ONOC 605
+#define MSM_BUS_SLAVE_SDCC_1 606
+#define MSM_BUS_SLAVE_SDCC_3 607
+#define MSM_BUS_SLAVE_SDCC_2 608
+#define MSM_BUS_SLAVE_SDCC_4 609
+#define MSM_BUS_SLAVE_BAM_DMA 610
+#define MSM_BUS_SLAVE_BLSP_2 611
+#define MSM_BUS_SLAVE_USB_HSIC 612
+#define MSM_BUS_SLAVE_BLSP_1 613
+#define MSM_BUS_SLAVE_USB_HS 614
+#define MSM_BUS_SLAVE_PDM 615
+#define MSM_BUS_SLAVE_PERIPH_APU_CFG 616
+#define MSM_BUS_SLAVE_PNOC_MPU_CFG 617
+#define MSM_BUS_SLAVE_PRNG 618
+#define MSM_BUS_SLAVE_SERVICE_PNOC 619
+#define MSM_BUS_SLAVE_CLK_CTL 620
+#define MSM_BUS_SLAVE_CNOC_MSS 621
+#define MSM_BUS_SLAVE_SECURITY 622
+#define MSM_BUS_SLAVE_TCSR 623
+#define MSM_BUS_SLAVE_TLMM 624
+#define MSM_BUS_SLAVE_CRYPTO_0_CFG 625
+#define MSM_BUS_SLAVE_CRYPTO_1_CFG 626
+#define MSM_BUS_SLAVE_IMEM_CFG 627
+#define MSM_BUS_SLAVE_MESSAGE_RAM 628
+#define MSM_BUS_SLAVE_BIMC_CFG 629
+#define MSM_BUS_SLAVE_BOOT_ROM 630
+#define MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
+#define MSM_BUS_SLAVE_PMIC_ARB 632
+#define MSM_BUS_SLAVE_SPDM_WRAPPER 633
+#define MSM_BUS_SLAVE_DEHR_CFG 634
+#define MSM_BUS_SLAVE_QDSS_CFG 635
+#define MSM_BUS_SLAVE_RBCPR_CFG 636
+#define MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
+#define MSM_BUS_SLAVE_SNOC_MPU_CFG 638
+#define MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
+#define MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
+#define MSM_BUS_SLAVE_PNOC_CFG 641
+#define MSM_BUS_SLAVE_SNOC_CFG 642
+#define MSM_BUS_SLAVE_EBI1_DLL_CFG 643
+#define MSM_BUS_SLAVE_PHY_APU_CFG 644
+#define MSM_BUS_SLAVE_EBI1_PHY_CFG 645
+#define MSM_BUS_SLAVE_SERVICE_CNOC 646
+#define MSM_BUS_SLAVE_IPS_CFG 647
+#define MSM_BUS_SLAVE_QPIC 648
+#define MSM_BUS_SLAVE_DSI_CFG 649
+#define MSM_BUS_SLAVE_UFS_CFG 650
+#define MSM_BUS_SLAVE_RBCPR_CX_CFG 651
+#define MSM_BUS_SLAVE_RBCPR_MX_CFG 652
+#define MSM_BUS_SLAVE_PCIE_CFG 653
+#define MSM_BUS_SLAVE_USB_PHYS_CFG 654
+#define MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
+#define MSM_BUS_SLAVE_AVSYNC_CFG 656
+#define MSM_BUS_SLAVE_CRYPTO_2_CFG 657
+#define MSM_BUS_SLAVE_VPU_CFG 658
+#define MSM_BUS_SLAVE_BCAST_CFG 659
+#define MSM_BUS_SLAVE_KLM_CFG 660
+#define MSM_BUS_SLAVE_GENI_IR_CFG 661
+#define MSM_BUS_SLAVE_OCMEM_GFX 662
+#define MSM_BUS_SLAVE_CATS_128 663
+#define MSM_BUS_SLAVE_OCMEM_64 664
+#define MSM_BUS_SLAVE_PCIE_0 665
+#define MSM_BUS_SLAVE_PCIE_1 666
+#define MSM_BUS_SLAVE_PCIE_0_CFG 667
+#define MSM_BUS_SLAVE_PCIE_1_CFG 668
+#define MSM_BUS_SLAVE_SRVC_MNOC 669
+#define MSM_BUS_SLAVE_LAST 670
+
+#define MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
+#define MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
+
+/*
+ * ID's used in RPM messages
+ */
+#define ICBID_MASTER_APPSS_PROC 0
+#define ICBID_MASTER_MSS_PROC 1
+#define ICBID_MASTER_MNOC_BIMC 2
+#define ICBID_MASTER_SNOC_BIMC 3
+#define ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
+#define ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
+#define ICBID_MASTER_CNOC_MNOC_CFG 5
+#define ICBID_MASTER_GFX3D 6
+#define ICBID_MASTER_JPEG 7
+#define ICBID_MASTER_MDP 8
+#define ICBID_MASTER_MDP0 ICBID_MASTER_MDP
+#define ICBID_MASTER_MDPS ICBID_MASTER_MDP
+#define ICBID_MASTER_VIDEO 9
+#define ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
+#define ICBID_MASTER_VIDEO_P1 10
+#define ICBID_MASTER_VFE 11
+#define ICBID_MASTER_CNOC_ONOC_CFG 12
+#define ICBID_MASTER_JPEG_OCMEM 13
+#define ICBID_MASTER_MDP_OCMEM 14
+#define ICBID_MASTER_VIDEO_P0_OCMEM 15
+#define ICBID_MASTER_VIDEO_P1_OCMEM 16
+#define ICBID_MASTER_VFE_OCMEM 17
+#define ICBID_MASTER_LPASS_AHB 18
+#define ICBID_MASTER_QDSS_BAM 19
+#define ICBID_MASTER_SNOC_CFG 20
+#define ICBID_MASTER_BIMC_SNOC 21
+#define ICBID_MASTER_CNOC_SNOC 22
+#define ICBID_MASTER_CRYPTO 23
+#define ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
+#define ICBID_MASTER_CRYPTO_CORE1 24
+#define ICBID_MASTER_LPASS_PROC 25
+#define ICBID_MASTER_MSS 26
+#define ICBID_MASTER_MSS_NAV 27
+#define ICBID_MASTER_OCMEM_DMA 28
+#define ICBID_MASTER_PNOC_SNOC 29
+#define ICBID_MASTER_WCSS 30
+#define ICBID_MASTER_QDSS_ETR 31
+#define ICBID_MASTER_USB3 32
+#define ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
+#define ICBID_MASTER_SDCC_1 33
+#define ICBID_MASTER_SDCC_3 34
+#define ICBID_MASTER_SDCC_2 35
+#define ICBID_MASTER_SDCC_4 36
+#define ICBID_MASTER_TSIF 37
+#define ICBID_MASTER_BAM_DMA 38
+#define ICBID_MASTER_BLSP_2 39
+#define ICBID_MASTER_USB_HSIC 40
+#define ICBID_MASTER_BLSP_1 41
+#define ICBID_MASTER_USB_HS 42
+#define ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
+#define ICBID_MASTER_PNOC_CFG 43
+#define ICBID_MASTER_SNOC_PNOC 44
+#define ICBID_MASTER_RPM_INST 45
+#define ICBID_MASTER_RPM_DATA 46
+#define ICBID_MASTER_RPM_SYS 47
+#define ICBID_MASTER_DEHR 48
+#define ICBID_MASTER_QDSS_DAP 49
+#define ICBID_MASTER_SPDM 50
+#define ICBID_MASTER_TIC 51
+#define ICBID_MASTER_SNOC_CNOC 52
+#define ICBID_MASTER_GFX3D_OCMEM 53
+#define ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
+#define ICBID_MASTER_OVIRT_SNOC 54
+#define ICBID_MASTER_SNOC_OVIRT 55
+#define ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
+#define ICBID_MASTER_ONOC_OVIRT 56
+#define ICBID_MASTER_USB_HS2 57
+#define ICBID_MASTER_QPIC 58
+#define ICBID_MASTER_IPA 59
+#define ICBID_MASTER_DSI 60
+#define ICBID_MASTER_MDP1 61
+#define ICBID_MASTER_MDPE ICBID_MASTER_MDP1
+#define ICBID_MASTER_VPU_PROC 62
+#define ICBID_MASTER_VPU 63
+#define ICBID_MASTER_VPU0 ICBID_MASTER_VPU
+#define ICBID_MASTER_CRYPTO_CORE2 64
+#define ICBID_MASTER_PCIE_0 65
+#define ICBID_MASTER_PCIE_1 66
+#define ICBID_MASTER_SATA 67
+#define ICBID_MASTER_UFS 68
+#define ICBID_MASTER_USB3_1 69
+#define ICBID_MASTER_VIDEO_OCMEM 70
+#define ICBID_MASTER_VPU1 71
+#define ICBID_MASTER_VCAP 72
+#define ICBID_MASTER_EMAC 73
+#define ICBID_MASTER_BCAST 74
+#define ICBID_MASTER_MMSS_PROC 75
+#define ICBID_MASTER_SNOC_BIMC_1 76
+#define ICBID_MASTER_SNOC_PCNOC 77
+#define ICBID_MASTER_AUDIO 78
+#define ICBID_MASTER_MM_INT_0 79
+#define ICBID_MASTER_MM_INT_1 80
+#define ICBID_MASTER_MM_INT_2 81
+#define ICBID_MASTER_MM_INT_BIMC 82
+#define ICBID_MASTER_MSS_INT 83
+#define ICBID_MASTER_PCNOC_CFG 84
+#define ICBID_MASTER_PCNOC_INT_0 85
+#define ICBID_MASTER_PCNOC_INT_1 86
+#define ICBID_MASTER_PCNOC_M_0 87
+#define ICBID_MASTER_PCNOC_M_1 88
+#define ICBID_MASTER_PCNOC_S_0 89
+#define ICBID_MASTER_PCNOC_S_1 90
+#define ICBID_MASTER_PCNOC_S_2 91
+#define ICBID_MASTER_PCNOC_S_3 92
+#define ICBID_MASTER_PCNOC_S_4 93
+#define ICBID_MASTER_PCNOC_S_6 94
+#define ICBID_MASTER_PCNOC_S_7 95
+#define ICBID_MASTER_PCNOC_S_8 96
+#define ICBID_MASTER_PCNOC_S_9 97
+#define ICBID_MASTER_QDSS_INT 98
+#define ICBID_MASTER_SNOC_INT_0 99
+#define ICBID_MASTER_SNOC_INT_1 100
+#define ICBID_MASTER_SNOC_INT_BIMC 101
+#define ICBID_MASTER_TCU_0 102
+#define ICBID_MASTER_TCU_1 103
+#define ICBID_MASTER_BIMC_INT_0 104
+#define ICBID_MASTER_BIMC_INT_1 105
+#define ICBID_MASTER_CAMERA 106
+#define ICBID_MASTER_RICA 107
+
+#define ICBID_SLAVE_EBI1 0
+#define ICBID_SLAVE_APPSS_L2 1
+#define ICBID_SLAVE_BIMC_SNOC 2
+#define ICBID_SLAVE_CAMERA_CFG 3
+#define ICBID_SLAVE_DISPLAY_CFG 4
+#define ICBID_SLAVE_OCMEM_CFG 5
+#define ICBID_SLAVE_CPR_CFG 6
+#define ICBID_SLAVE_CPR_XPU_CFG 7
+#define ICBID_SLAVE_MISC_CFG 8
+#define ICBID_SLAVE_MISC_XPU_CFG 9
+#define ICBID_SLAVE_VENUS_CFG 10
+#define ICBID_SLAVE_GFX3D_CFG 11
+#define ICBID_SLAVE_MMSS_CLK_CFG 12
+#define ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
+#define ICBID_SLAVE_MNOC_MPU_CFG 14
+#define ICBID_SLAVE_ONOC_MPU_CFG 15
+#define ICBID_SLAVE_MNOC_BIMC 16
+#define ICBID_SLAVE_SERVICE_MNOC 17
+#define ICBID_SLAVE_OCMEM 18
+#define ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
+#define ICBID_SLAVE_SERVICE_ONOC 19
+#define ICBID_SLAVE_APPSS 20
+#define ICBID_SLAVE_LPASS 21
+#define ICBID_SLAVE_USB3 22
+#define ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
+#define ICBID_SLAVE_WCSS 23
+#define ICBID_SLAVE_SNOC_BIMC 24
+#define ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
+#define ICBID_SLAVE_SNOC_CNOC 25
+#define ICBID_SLAVE_IMEM 26
+#define ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
+#define ICBID_SLAVE_SNOC_OVIRT 27
+#define ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
+#define ICBID_SLAVE_SNOC_PNOC 28
+#define ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
+#define ICBID_SLAVE_SERVICE_SNOC 29
+#define ICBID_SLAVE_QDSS_STM 30
+#define ICBID_SLAVE_SDCC_1 31
+#define ICBID_SLAVE_SDCC_3 32
+#define ICBID_SLAVE_SDCC_2 33
+#define ICBID_SLAVE_SDCC_4 34
+#define ICBID_SLAVE_TSIF 35
+#define ICBID_SLAVE_BAM_DMA 36
+#define ICBID_SLAVE_BLSP_2 37
+#define ICBID_SLAVE_USB_HSIC 38
+#define ICBID_SLAVE_BLSP_1 39
+#define ICBID_SLAVE_USB_HS 40
+#define ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
+#define ICBID_SLAVE_PDM 41
+#define ICBID_SLAVE_PERIPH_APU_CFG 42
+#define ICBID_SLAVE_PNOC_MPU_CFG 43
+#define ICBID_SLAVE_PRNG 44
+#define ICBID_SLAVE_PNOC_SNOC 45
+#define ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
+#define ICBID_SLAVE_SERVICE_PNOC 46
+#define ICBID_SLAVE_CLK_CTL 47
+#define ICBID_SLAVE_CNOC_MSS 48
+#define ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
+#define ICBID_SLAVE_SECURITY 49
+#define ICBID_SLAVE_TCSR 50
+#define ICBID_SLAVE_TLMM 51
+#define ICBID_SLAVE_CRYPTO_0_CFG 52
+#define ICBID_SLAVE_CRYPTO_1_CFG 53
+#define ICBID_SLAVE_IMEM_CFG 54
+#define ICBID_SLAVE_MESSAGE_RAM 55
+#define ICBID_SLAVE_BIMC_CFG 56
+#define ICBID_SLAVE_BOOT_ROM 57
+#define ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
+#define ICBID_SLAVE_PMIC_ARB 59
+#define ICBID_SLAVE_SPDM_WRAPPER 60
+#define ICBID_SLAVE_DEHR_CFG 61
+#define ICBID_SLAVE_MPM 62
+#define ICBID_SLAVE_QDSS_CFG 63
+#define ICBID_SLAVE_RBCPR_CFG 64
+#define ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
+#define ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
+#define ICBID_SLAVE_CNOC_MNOC_CFG 66
+#define ICBID_SLAVE_SNOC_MPU_CFG 67
+#define ICBID_SLAVE_CNOC_ONOC_CFG 68
+#define ICBID_SLAVE_PNOC_CFG 69
+#define ICBID_SLAVE_SNOC_CFG 70
+#define ICBID_SLAVE_EBI1_DLL_CFG 71
+#define ICBID_SLAVE_PHY_APU_CFG 72
+#define ICBID_SLAVE_EBI1_PHY_CFG 73
+#define ICBID_SLAVE_RPM 74
+#define ICBID_SLAVE_CNOC_SNOC 75
+#define ICBID_SLAVE_SERVICE_CNOC 76
+#define ICBID_SLAVE_OVIRT_SNOC 77
+#define ICBID_SLAVE_OVIRT_OCMEM 78
+#define ICBID_SLAVE_USB_HS2 79
+#define ICBID_SLAVE_QPIC 80
+#define ICBID_SLAVE_IPS_CFG 81
+#define ICBID_SLAVE_DSI_CFG 82
+#define ICBID_SLAVE_USB3_1 83
+#define ICBID_SLAVE_PCIE_0 84
+#define ICBID_SLAVE_PCIE_1 85
+#define ICBID_SLAVE_PSS_SMMU_CFG 86
+#define ICBID_SLAVE_CRYPTO_2_CFG 87
+#define ICBID_SLAVE_PCIE_0_CFG 88
+#define ICBID_SLAVE_PCIE_1_CFG 89
+#define ICBID_SLAVE_SATA_CFG 90
+#define ICBID_SLAVE_SPSS_GENI_IR 91
+#define ICBID_SLAVE_UFS_CFG 92
+#define ICBID_SLAVE_AVSYNC_CFG 93
+#define ICBID_SLAVE_VPU_CFG 94
+#define ICBID_SLAVE_USB_PHY_CFG 95
+#define ICBID_SLAVE_RBCPR_MX_CFG 96
+#define ICBID_SLAVE_PCIE_PARF 97
+#define ICBID_SLAVE_VCAP_CFG 98
+#define ICBID_SLAVE_EMAC_CFG 99
+#define ICBID_SLAVE_BCAST_CFG 100
+#define ICBID_SLAVE_KLM_CFG 101
+#define ICBID_SLAVE_DISPLAY_PWM 102
+#define ICBID_SLAVE_GENI 103
+#define ICBID_SLAVE_SNOC_BIMC_1 104
+#define ICBID_SLAVE_AUDIO 105
+#define ICBID_SLAVE_CATS_0 106
+#define ICBID_SLAVE_CATS_1 107
+#define ICBID_SLAVE_MM_INT_0 108
+#define ICBID_SLAVE_MM_INT_1 109
+#define ICBID_SLAVE_MM_INT_2 110
+#define ICBID_SLAVE_MM_INT_BIMC 111
+#define ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
+#define ICBID_SLAVE_MSS_INT 113
+#define ICBID_SLAVE_PCNOC_INT_0 114
+#define ICBID_SLAVE_PCNOC_INT_1 115
+#define ICBID_SLAVE_PCNOC_M_0 116
+#define ICBID_SLAVE_PCNOC_M_1 117
+#define ICBID_SLAVE_PCNOC_S_0 118
+#define ICBID_SLAVE_PCNOC_S_1 119
+#define ICBID_SLAVE_PCNOC_S_2 120
+#define ICBID_SLAVE_PCNOC_S_3 121
+#define ICBID_SLAVE_PCNOC_S_4 122
+#define ICBID_SLAVE_PCNOC_S_6 123
+#define ICBID_SLAVE_PCNOC_S_7 124
+#define ICBID_SLAVE_PCNOC_S_8 125
+#define ICBID_SLAVE_PCNOC_S_9 126
+#define ICBID_SLAVE_PRNG_XPU_CFG 127
+#define ICBID_SLAVE_QDSS_INT 128
+#define ICBID_SLAVE_RPM_XPU_CFG 129
+#define ICBID_SLAVE_SNOC_INT_0 130
+#define ICBID_SLAVE_SNOC_INT_1 131
+#define ICBID_SLAVE_SNOC_INT_BIMC 132
+#define ICBID_SLAVE_TCU 133
+#define ICBID_SLAVE_BIMC_INT_0 134
+#define ICBID_SLAVE_BIMC_INT_1 135
+#define ICBID_SLAVE_RICA_CFG 136
+
+#endif
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 2839c639f092..d8df98c3568b 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
+typedef void (*of_clk_init_cb_t)(struct device_node *);
#ifdef CONFIG_COMMON_CLK
@@ -565,7 +566,6 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
struct of_device_id;
-typedef void (*of_clk_init_cb_t)(struct device_node *);
struct clk_onecell_data {
struct clk **clks;
diff --git a/include/linux/clk.h b/include/linux/clk.h
index c7f258a81761..a4c1834bbce0 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -424,7 +424,7 @@ int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
struct device_node;
struct of_phandle_args;
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+#if defined(CONFIG_OF)
struct clk *of_clk_get(struct device_node *np, int index);
struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
diff --git a/include/linux/clk/msm-clk-provider.h b/include/linux/clk/msm-clk-provider.h
new file mode 100644
index 000000000000..719bfd8cc22c
--- /dev/null
+++ b/include/linux/clk/msm-clk-provider.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_CLK_PROVIDER_H
+#define __MSM_CLK_PROVIDER_H
+
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/seq_file.h>
+#include <linux/clk/msm-clk.h>
+
+/*
+ * Bit manipulation macros
+ */
+#define BM(msb, lsb) (((((uint32_t)-1) << (31-msb)) >> (31-msb+lsb)) << lsb)
+#define BVAL(msb, lsb, val) (((val) << lsb) & BM(msb, lsb))
+
+/*
+ * Halt/Status Checking Mode Macros
+ */
+#define HALT 0 /* Bit pol: 1 = halted */
+#define NOCHECK 1 /* No bit to check, do nothing */
+#define HALT_VOTED 2 /* Bit pol: 1 = halted; delay on disable */
+#define ENABLE 3 /* Bit pol: 1 = running */
+#define ENABLE_VOTED 4 /* Bit pol: 1 = running; delay on disable */
+#define DELAY 5 /* No bit to check, just delay */
+
+struct clk_register_data {
+ char *name;
+ u32 offset;
+};
+#ifdef CONFIG_DEBUG_FS
+void clk_debug_print_hw(struct clk *clk, struct seq_file *f);
+#else
+static inline void clk_debug_print_hw(struct clk *clk, struct seq_file *f) {}
+#endif
+
+#define CLK_WARN(clk, cond, fmt, ...) do { \
+ clk_debug_print_hw(clk, NULL); \
+ WARN(cond, "%s: " fmt, (clk)->dbg_name, ##__VA_ARGS__); \
+} while (0)
+
+/**
+ * struct clk_vdd_class - Voltage scaling class
+ * @class_name: name of the class
+ * @regulator: array of regulators.
+ * @num_regulators: size of regulator array. Standard regulator APIs will be
+ used if this field > 0.
+ * @set_vdd: function to call when applying a new voltage setting.
+ * @vdd_uv: sorted 2D array of legal voltage settings. Indexed by level, then
+ regulator.
+ * @vdd_ua: sorted 2D array of legal cureent settings. Indexed by level, then
+ regulator. Optional parameter.
+ * @level_votes: array of votes for each level.
+ * @num_levels: specifies the size of level_votes array.
+ * @cur_level: the currently set voltage level
+ * @lock: lock to protect this struct
+ */
+struct clk_vdd_class {
+ const char *class_name;
+ struct regulator **regulator;
+ int num_regulators;
+ int (*set_vdd)(struct clk_vdd_class *v_class, int level);
+ int *vdd_uv;
+ int *vdd_ua;
+ int *level_votes;
+ int num_levels;
+ unsigned long cur_level;
+ struct mutex lock;
+};
+
+#define DEFINE_VDD_CLASS(_name, _set_vdd, _num_levels) \
+ struct clk_vdd_class _name = { \
+ .class_name = #_name, \
+ .set_vdd = _set_vdd, \
+ .level_votes = (int [_num_levels]) {}, \
+ .num_levels = _num_levels, \
+ .cur_level = _num_levels, \
+ .lock = __MUTEX_INITIALIZER(_name.lock) \
+ }
+
+#define DEFINE_VDD_REGULATORS(_name, _num_levels, _num_regulators, _vdd_uv, \
+ _vdd_ua) \
+ struct clk_vdd_class _name = { \
+ .class_name = #_name, \
+ .vdd_uv = _vdd_uv, \
+ .vdd_ua = _vdd_ua, \
+ .regulator = (struct regulator * [_num_regulators]) {}, \
+ .num_regulators = _num_regulators, \
+ .level_votes = (int [_num_levels]) {}, \
+ .num_levels = _num_levels, \
+ .cur_level = _num_levels, \
+ .lock = __MUTEX_INITIALIZER(_name.lock) \
+ }
+
+#define DEFINE_VDD_REGS_INIT(_name, _num_regulators) \
+ struct clk_vdd_class _name = { \
+ .class_name = #_name, \
+ .regulator = (struct regulator * [_num_regulators]) {}, \
+ .num_regulators = _num_regulators, \
+ .lock = __MUTEX_INITIALIZER(_name.lock) \
+ }
+
+enum handoff {
+ HANDOFF_ENABLED_CLK,
+ HANDOFF_DISABLED_CLK,
+};
+
+struct clk_ops {
+ int (*prepare)(struct clk *clk);
+ int (*enable)(struct clk *clk);
+ void (*disable)(struct clk *clk);
+ void (*unprepare)(struct clk *clk);
+ void (*enable_hwcg)(struct clk *clk);
+ void (*disable_hwcg)(struct clk *clk);
+ int (*in_hwcg_mode)(struct clk *clk);
+ enum handoff (*handoff)(struct clk *clk);
+ int (*reset)(struct clk *clk, enum clk_reset_action action);
+ int (*pre_set_rate)(struct clk *clk, unsigned long new_rate);
+ int (*set_rate)(struct clk *clk, unsigned long rate);
+ void (*post_set_rate)(struct clk *clk, unsigned long old_rate);
+ int (*set_max_rate)(struct clk *clk, unsigned long rate);
+ int (*set_flags)(struct clk *clk, unsigned flags);
+ unsigned long (*get_rate)(struct clk *clk);
+ long (*list_rate)(struct clk *clk, unsigned n);
+ int (*is_enabled)(struct clk *clk);
+ long (*round_rate)(struct clk *clk, unsigned long rate);
+ int (*set_parent)(struct clk *clk, struct clk *parent);
+ struct clk *(*get_parent)(struct clk *clk);
+ bool (*is_local)(struct clk *clk);
+ void __iomem *(*list_registers)(struct clk *clk, int n,
+ struct clk_register_data **regs, u32 *size);
+};
+
+/**
+ * struct clk
+ * @prepare_count: prepare refcount
+ * @prepare_lock: protects clk_prepare()/clk_unprepare() path and @prepare_count
+ * @count: enable refcount
+ * @lock: protects clk_enable()/clk_disable() path and @count
+ * @depends: non-direct parent of clock to enable when this clock is enabled
+ * @vdd_class: voltage scaling requirement class
+ * @fmax: maximum frequency in Hz supported at each voltage level
+ * @parent: the current source of this clock
+ */
+struct clk {
+ uint32_t flags;
+ struct clk_ops *ops;
+ const char *dbg_name;
+ struct clk *depends;
+ struct clk_vdd_class *vdd_class;
+ unsigned long *fmax;
+ int num_fmax;
+ unsigned long rate;
+ struct clk *parent;
+
+ struct list_head children;
+ struct list_head siblings;
+
+ unsigned count;
+ spinlock_t lock;
+ unsigned prepare_count;
+ struct mutex prepare_lock;
+
+ struct dentry *clk_dir;
+};
+
+#define CLK_INIT(name) \
+ .lock = __SPIN_LOCK_UNLOCKED((name).lock), \
+ .prepare_lock = __MUTEX_INITIALIZER((name).prepare_lock), \
+ .children = LIST_HEAD_INIT((name).children), \
+ .siblings = LIST_HEAD_INIT((name).siblings)
+
+int vote_vdd_level(struct clk_vdd_class *vdd_class, int level);
+int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level);
+int __clk_pre_reparent(struct clk *c, struct clk *new, unsigned long *flags);
+void __clk_post_reparent(struct clk *c, struct clk *old, unsigned long *flags);
+
+/* Register clocks with the MSM clock driver */
+int msm_clock_register(struct clk_lookup *table, size_t size);
+int of_msm_clock_register(struct device_node *np, struct clk_lookup *table,
+ size_t size);
+
+extern struct clk dummy_clk;
+extern struct clk_ops clk_ops_dummy;
+
+#define CLK_DUMMY(clk_name, clk_id, clk_dev, flags) { \
+ .con_id = clk_name, \
+ .dev_id = clk_dev, \
+ .clk = &dummy_clk, \
+ }
+
+#define DEFINE_CLK_DUMMY(name, _rate) \
+ static struct fixed_clk name = { \
+ .c = { \
+ .dbg_name = #name, \
+ .rate = _rate, \
+ .ops = &clk_ops_dummy, \
+ CLK_INIT(name.c), \
+ }, \
+ };
+
+#define CLK_LOOKUP(con, c, dev) { .con_id = con, .clk = &c, .dev_id = dev }
+#define CLK_LOOKUP_OF(con, _c, dev) { .con_id = con, .clk = &(&_c)->c, \
+ .dev_id = dev, .of_idx = clk_##_c }
+#define CLK_LIST(_c) { .clk = &(&_c)->c, .of_idx = clk_##_c }
+
+static inline bool is_better_rate(unsigned long req, unsigned long best,
+ unsigned long new)
+{
+ if (IS_ERR_VALUE(new))
+ return false;
+
+ return (req <= new && new < best) || (best < req && best < new);
+}
+
+extern int of_clk_add_provider(struct device_node *np,
+ struct clk *(*clk_src_get)(struct of_phandle_args *args,
+ void *data),
+ void *data);
+extern void of_clk_del_provider(struct device_node *np);
+
+#endif
diff --git a/include/linux/clk/msm-clk.h b/include/linux/clk/msm-clk.h
new file mode 100644
index 000000000000..59365027ff68
--- /dev/null
+++ b/include/linux/clk/msm-clk.h
@@ -0,0 +1,48 @@
+/* Copyright (c) 2009, 2012-2013 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MACH_CLK_H
+#define __MACH_CLK_H
+
+#define CLKFLAG_INVERT 0x00000001
+#define CLKFLAG_NOINVERT 0x00000002
+#define CLKFLAG_NONEST 0x00000004
+#define CLKFLAG_NORESET 0x00000008
+#define CLKFLAG_RETAIN_PERIPH 0x00000010
+#define CLKFLAG_NORETAIN_PERIPH 0x00000020
+#define CLKFLAG_RETAIN_MEM 0x00000040
+#define CLKFLAG_NORETAIN_MEM 0x00000080
+#define CLKFLAG_SKIP_HANDOFF 0x00000100
+#define CLKFLAG_MIN 0x00000400
+#define CLKFLAG_MAX 0x00000800
+#define CLKFLAG_INIT_DONE 0x00001000
+#define CLKFLAG_INIT_ERR 0x00002000
+#define CLKFLAG_NO_RATE_CACHE 0x00004000
+#define CLKFLAG_MEASURE 0x00008000
+
+struct clk_lookup;
+struct clk;
+
+enum clk_reset_action {
+ CLK_RESET_DEASSERT = 0,
+ CLK_RESET_ASSERT = 1
+};
+
+/* Rate is maximum clock rate in Hz */
+int clk_set_max_rate(struct clk *clk, unsigned long rate);
+
+/* Assert/Deassert reset to a hardware block associated with a clock */
+int clk_reset(struct clk *clk, enum clk_reset_action action);
+
+/* Set clock-specific configuration parameters */
+int clk_set_flags(struct clk *clk, unsigned long flags);
+
+#endif
diff --git a/include/linux/clk/msm-clock-generic.h b/include/linux/clk/msm-clock-generic.h
new file mode 100644
index 000000000000..efb4730d5042
--- /dev/null
+++ b/include/linux/clk/msm-clock-generic.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CLOCK_GENERIC_H
+#define __MSM_CLOCK_GENERIC_H
+
+#include <linux/clk/msm-clk-provider.h>
+
+/**
+ * struct fixed_clk - fixed rate clock
+ * @c: clk
+ */
+struct fixed_clk {
+ struct clk c;
+};
+
+/* ==================== Mux clock ==================== */
+
+struct clk_src {
+ struct clk *src;
+ int sel;
+};
+
+struct mux_clk;
+
+struct clk_mux_ops {
+ int (*set_mux_sel)(struct mux_clk *clk, int sel);
+ int (*get_mux_sel)(struct mux_clk *clk);
+
+ /* Optional */
+ bool (*is_enabled)(struct mux_clk *clk);
+ int (*enable)(struct mux_clk *clk);
+ void (*disable)(struct mux_clk *clk);
+ void __iomem *(*list_registers)(struct mux_clk *clk, int n,
+ struct clk_register_data **regs, u32 *size);
+};
+
+#define MUX_SRC_LIST(...) \
+ .parents = (struct clk_src[]){__VA_ARGS__}, \
+ .num_parents = ARRAY_SIZE(((struct clk_src[]){__VA_ARGS__}))
+
+#define MUX_REC_SRC_LIST(...) \
+ .rec_parents = (struct clk * []){__VA_ARGS__}, \
+ .num_rec_parents = ARRAY_SIZE(((struct clk * []){__VA_ARGS__}))
+
+struct mux_clk {
+ /* Parents in decreasing order of preference for obtaining rates. */
+ struct clk_src *parents;
+ int num_parents;
+ /* Recursively search for the requested parent in rec_parents. */
+ struct clk **rec_parents;
+ int num_rec_parents;
+ struct clk *safe_parent;
+ int safe_sel;
+ struct clk_mux_ops *ops;
+
+ /* Fields not used by helper function. */
+ void *const __iomem *base;
+ u32 offset;
+ u32 en_offset;
+ int en_reg;
+ u32 mask;
+ u32 shift;
+ u32 en_mask;
+ void *priv;
+
+ struct clk c;
+};
+
+static inline struct mux_clk *to_mux_clk(struct clk *c)
+{
+ return container_of(c, struct mux_clk, c);
+}
+
+int parent_to_src_sel(struct clk_src *parents, int num_parents, struct clk *p);
+
+extern struct clk_ops clk_ops_gen_mux;
+
+/* ==================== Divider clock ==================== */
+
+struct div_clk;
+
+struct clk_div_ops {
+ int (*set_div)(struct div_clk *clk, int div);
+ int (*get_div)(struct div_clk *clk);
+ bool (*is_enabled)(struct div_clk *clk);
+ int (*enable)(struct div_clk *clk);
+ void (*disable)(struct div_clk *clk);
+ void __iomem *(*list_registers)(struct div_clk *clk, int n,
+ struct clk_register_data **regs, u32 *size);
+};
+
+struct div_data {
+ unsigned int div;
+ unsigned int min_div;
+ unsigned int max_div;
+ unsigned long rate_margin;
+ /*
+ * Indicate whether this divider clock supports half-interger divider.
+ * If it is, all the min_div and max_div have been doubled. It means
+ * they are 2*N.
+ */
+ bool is_half_divider;
+};
+
+struct div_clk {
+ struct div_data data;
+
+ /* Optional */
+ struct clk_div_ops *ops;
+
+ /* Fields not used by helper function. */
+ void *const __iomem *base;
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 en_mask;
+ void *priv;
+ struct clk c;
+};
+
+static inline struct div_clk *to_div_clk(struct clk *c)
+{
+ return container_of(c, struct div_clk, c);
+}
+
+extern struct clk_ops clk_ops_div;
+extern struct clk_ops clk_ops_slave_div;
+
+struct ext_clk {
+ struct clk c;
+};
+
+long parent_round_rate(struct clk *c, unsigned long rate);
+unsigned long parent_get_rate(struct clk *c);
+extern struct clk_ops clk_ops_ext;
+
+#define DEFINE_FIXED_DIV_CLK(clk_name, _div, _parent) \
+static struct div_clk clk_name = { \
+ .data = { \
+ .max_div = _div, \
+ .min_div = _div, \
+ .div = _div, \
+ }, \
+ .c = { \
+ .parent = _parent, \
+ .dbg_name = #clk_name, \
+ .ops = &clk_ops_div, \
+ CLK_INIT(clk_name.c), \
+ } \
+}
+
+#define DEFINE_FIXED_SLAVE_DIV_CLK(clk_name, _div, _parent) \
+static struct div_clk clk_name = { \
+ .data = { \
+ .max_div = _div, \
+ .min_div = _div, \
+ .div = _div, \
+ }, \
+ .c = { \
+ .parent = _parent, \
+ .dbg_name = #clk_name, \
+ .ops = &clk_ops_slave_div, \
+ CLK_INIT(clk_name.c), \
+ } \
+}
+
+#define DEFINE_EXT_CLK(clk_name, _parent) \
+static struct ext_clk clk_name = { \
+ .c = { \
+ .parent = _parent, \
+ .dbg_name = #clk_name, \
+ .ops = &clk_ops_ext, \
+ CLK_INIT(clk_name.c), \
+ } \
+}
+
+/* ==================== Mux Div clock ==================== */
+
+struct mux_div_clk;
+
+/*
+ * struct mux_div_ops
+ * the enable and disable ops are optional.
+ */
+
+struct mux_div_ops {
+ int (*set_src_div)(struct mux_div_clk *, u32 src_sel, u32 div);
+ void (*get_src_div)(struct mux_div_clk *, u32 *src_sel, u32 *div);
+ int (*enable)(struct mux_div_clk *);
+ void (*disable)(struct mux_div_clk *);
+ bool (*is_enabled)(struct mux_div_clk *);
+ void __iomem *(*list_registers)(struct mux_div_clk *md, int n,
+ struct clk_register_data **regs, u32 *size);
+};
+
+/*
+ * struct mux_div_clk - combined mux/divider clock
+ * @priv
+ parameters needed by ops
+ * @safe_freq
+ when switching rates from A to B, the mux div clock will
+ instead switch from A -> safe_freq -> B. This allows the
+ mux_div clock to change rates while enabled, even if this
+ behavior is not supported by the parent clocks.
+
+ If changing the rate of parent A also causes the rate of
+ parent B to change, then safe_freq must be defined.
+
+ safe_freq is expected to have a source clock which is always
+ on and runs at only one rate.
+ * @parents
+ list of parents and mux indicies
+ * @ops
+ function pointers for hw specific operations
+ * @src_sel
+ the mux index which will be used if the clock is enabled.
+ */
+
+struct mux_div_clk {
+ /* Required parameters */
+ struct mux_div_ops *ops;
+ struct div_data data;
+ struct clk_src *parents;
+ u32 num_parents;
+
+ struct clk c;
+
+ /* Internal */
+ u32 src_sel;
+
+ /* Optional parameters */
+ void *priv;
+ void __iomem *base;
+ u32 div_mask;
+ u32 div_offset;
+ u32 div_shift;
+ u32 src_mask;
+ u32 src_offset;
+ u32 src_shift;
+ u32 en_mask;
+ u32 en_offset;
+
+ u32 safe_div;
+ struct clk *safe_parent;
+ unsigned long safe_freq;
+};
+
+static inline struct mux_div_clk *to_mux_div_clk(struct clk *clk)
+{
+ return container_of(clk, struct mux_div_clk, c);
+}
+
+extern struct clk_ops clk_ops_mux_div_clk;
+
+#endif
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 94bad77eeb4a..4a6ec35c3571 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -21,6 +21,7 @@ struct clk_lookup {
struct list_head node;
const char *dev_id;
const char *con_id;
+ int of_idx;
struct clk *clk;
};
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index b2d9a43012b2..10d981a245d3 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -267,4 +267,11 @@ void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
void arch_cpu_idle_dead(void);
+#define IDLE_START 1
+#define IDLE_END 2
+
+void idle_notifier_register(struct notifier_block *n);
+void idle_notifier_unregister(struct notifier_block *n);
+void idle_notifier_call_chain(unsigned long val);
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 503b085b7832..5ae628e2ca7b 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -496,6 +496,7 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
struct cpufreq_frequency_table {
unsigned int flags;
unsigned int driver_data; /* driver specific data, not used by core */
+ unsigned int index; /* any */
unsigned int frequency; /* kHz - doesn't need to be in ascending
* order */
};
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index f1863dcd83ea..c78816cc6dbd 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -52,6 +52,24 @@ struct devfreq_dev_status {
*/
#define DEVFREQ_FLAG_LEAST_UPPER_BOUND 0x1
+#define DEVFREQ_FLAG_FAST_HINT 0x2
+#define DEVFREQ_FLAG_SLOW_HINT 0x4
+#define DEVFREQ_FLAG_WAKEUP_MAXFREQ 0x8
+
+/**
+ * struct devfreq_governor_data - mapping to per device governor data
+ * @name: The name of the governor.
+ * @data: Private data for the governor.
+ *
+ * Devices may pass in an array of this structure to allow governors
+ * to get the correct data pointer when they are enabled after
+ * the devfreq_add_device() call.
+ */
+struct devfreq_governor_data {
+ const char *name;
+ void *data;
+};
+
/**
* struct devfreq_dev_profile - Devfreq's user device profile
* @initial_freq: The operating frequency when devfreq_add_device() is
@@ -75,6 +93,11 @@ struct devfreq_dev_status {
* this is the time to unregister it.
* @freq_table: Optional list of frequencies to support statistics.
* @max_state: The size of freq_table.
+ * @governor_data: Optional array of private data for governors.
+ * This is used to set devfreq->data correctly
+ * when a governor is enabled via sysfs or other
+ * mechanisms after the devfreq_add_device() call.
+ * @num_governor_data: Number of elements in governor_data.
*/
struct devfreq_dev_profile {
unsigned long initial_freq;
@@ -88,6 +111,8 @@ struct devfreq_dev_profile {
unsigned int *freq_table;
unsigned int max_state;
+ const struct devfreq_governor_data *governor_data;
+ unsigned int num_governor_data;
};
/**
@@ -111,7 +136,8 @@ struct devfreq_governor {
struct list_head node;
const char name[DEVFREQ_NAME_LEN];
- int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+ int (*get_target_freq)(struct devfreq *this, unsigned long *freq,
+ u32 *flag);
int (*event_handler)(struct devfreq *devfreq,
unsigned int event, void *data);
};
@@ -214,6 +240,9 @@ extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
* the governor may consider slowing the frequency down.
* Specify 0 to use the default. Valid value = 0 to 100.
* downdifferential < upthreshold must hold.
+ * @simple_scaling: Setting this flag will scale the clocks up only if the
+ * load is above @upthreshold and will scale the clocks
+ * down only if the load is below @downdifferential.
*
* If the fed devfreq_simple_ondemand_data pointer is NULL to the governor,
* the governor uses the default values.
@@ -221,6 +250,7 @@ extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
struct devfreq_simple_ondemand_data {
unsigned int upthreshold;
unsigned int downdifferential;
+ unsigned int simple_scaling;
};
#endif
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index c8e1831d7572..eb1b9d727f39 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -18,6 +18,8 @@ enum dma_attr {
DMA_ATTR_NO_KERNEL_MAPPING,
DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_FORCE_CONTIGUOUS,
+ DMA_ATTR_STRONGLY_ORDERED,
+ DMA_ATTR_SKIP_ZEROING,
DMA_ATTR_MAX,
};
diff --git a/include/linux/esoc_client.h b/include/linux/esoc_client.h
new file mode 100644
index 000000000000..5da1b6dc6193
--- /dev/null
+++ b/include/linux/esoc_client.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ESOC_CLIENT_H_
+#define __ESOC_CLIENT_H_
+
+#include <linux/esoc_ctrl.h>
+#include <linux/notifier.h>
+
+/*
+ * struct esoc_desc: Describes an external soc
+ * @name: external soc name
+ * @priv: private data for external soc
+ */
+struct esoc_desc {
+ const char *name;
+ const char *link;
+ void *priv;
+};
+
+#ifdef CONFIG_ESOC_CLIENT
+/* Can return probe deferral */
+struct esoc_desc *devm_register_esoc_client(struct device *dev,
+ const char *name);
+void devm_unregister_esoc_client(struct device *dev,
+ struct esoc_desc *esoc_desc);
+int esoc_register_client_notifier(struct notifier_block *nb);
+#else
+static inline struct esoc_desc *devm_register_esoc_client(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+static inline void devm_unregister_esoc_client(struct device *dev,
+ struct esoc_desc *esoc_desc)
+{
+ return;
+}
+static inline int esoc_register_client_notifier(struct notifier_block *nb)
+{
+ return -EIO;
+}
+#endif
+#endif
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 41b30fd4d041..105b35bd9a6a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -34,6 +34,7 @@ struct vm_area_struct;
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
+#define ___GFP_CMA 0x2000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@@ -49,7 +50,9 @@ struct vm_area_struct;
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
-#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
+#define __GFP_CMA ((__force gfp_t)___GFP_CMA)
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE| \
+ __GFP_CMA)
/*
* Action modifiers - doesn't change the zoning
*
@@ -97,7 +100,7 @@ struct vm_area_struct;
*/
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/* This equals 0, but use constants in case they ever change */
@@ -131,7 +134,7 @@ struct vm_area_struct;
#endif
/* This mask makes up all the page movable related flags */
-#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
+#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE|__GFP_CMA)
/* Control page allocator reclaim behavior */
#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
@@ -164,8 +167,14 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
+#ifndef CONFIG_CMA
return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
((gfp_flags & __GFP_RECLAIMABLE) != 0);
+#else
+ return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
+ (((gfp_flags & __GFP_CMA) != 0) << 1) |
+ ((gfp_flags & __GFP_RECLAIMABLE) != 0);
+#endif
}
#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index cba442ec3c66..e70442c91d9f 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -11,6 +11,74 @@
extern void synchronize_irq(unsigned int irq);
extern void synchronize_hardirq(unsigned int irq);
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+#define NMI_OFFSET (1UL << NMI_SHIFT)
+
+#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+
+#ifndef PREEMPT_ACTIVE
+#define PREEMPT_ACTIVE_BITS 1
+#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
+#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
+#endif
+
+#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
+#error PREEMPT_ACTIVE is too low!
+#endif
+
+#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ | NMI_MASK))
+
+/*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+ * in_softirq - Are we currently processing softirq or have bh disabled?
+ * in_serving_softirq - Are we currently processing softirq?
+ */
+#define in_irq() (hardirq_count())
+#define in_softirq() (softirq_count())
+#define in_interrupt() (irq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+
+/*
+ * Are we in NMI context?
+ */
+#define in_nmi() (preempt_count() & NMI_MASK)
+
+#if defined(CONFIG_PREEMPT_COUNT)
+# define PREEMPT_CHECK_OFFSET 1
+#else
+# define PREEMPT_CHECK_OFFSET 0
+#endif
+
+/*
+ * Are we running in atomic context? WARNING: this macro cannot
+ * always detect atomic context; in particular, it cannot know about
+ * held spinlocks in non-preemptible kernels. Thus it should not be
+ * used in the general case to determine whether sleeping is possible.
+ * Do not use in_atomic() in driver code.
+ */
+#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
+
+/*
+ * Check whether we were atomic before we did preempt_disable():
+ * (used by the scheduler, *after* releasing the kernel lock)
+ */
+#define in_atomic_preempt_off() \
+ ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
+
+#ifdef CONFIG_PREEMPT_COUNT
+# define preemptible() (preempt_count() == 0 && !irqs_disabled())
+#else
+# define preemptible() 0
+#endif
+
+extern void synchronize_irq(unsigned int irq);
+
#if defined(CONFIG_TINY_RCU)
static inline void rcu_nmi_enter(void)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 9286a46b7d69..bc4af51ce3e4 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -39,6 +39,12 @@ extern unsigned long totalhigh_pages;
void kmap_flush_unused(void);
+#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
+void kmap_atomic_flush_unused(void);
+#else
+static inline void kmap_atomic_flush_unused(void) { }
+#endif
+
struct page *kmap_to_page(void *addr);
#else /* CONFIG_HIGHMEM */
@@ -79,6 +85,7 @@ static inline void __kunmap_atomic(void *addr)
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#define kmap_flush_unused() do {} while(0)
+#define kmap_atomic_flush_unused() do {} while (0)
#endif
#endif /* CONFIG_HIGHMEM */
@@ -179,9 +186,24 @@ static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
unsigned long vaddr)
{
+#ifndef CONFIG_CMA
return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+#else
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
+ vaddr);
+#endif
}
+#ifdef CONFIG_CMA
+static inline struct page *
+alloc_zeroed_user_highpage_movable_cma(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
+ vaddr);
+}
+#endif
+
static inline void clear_highpage(struct page *page)
{
void *kaddr = kmap_atomic(page);
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
new file mode 100644
index 000000000000..d085e03a2c45
--- /dev/null
+++ b/include/linux/iopoll.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_IOPOLL_H
+#define _LINUX_IOPOLL_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <asm-generic/errno.h>
+#include <asm/io.h>
+
+/**
+ * readl_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in uS (0 tight-loops)
+ * @timeout_us: Timeout in uS, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ */
+#define readl_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
+({ \
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+ might_sleep_if(timeout_us); \
+ for (;;) { \
+ (val) = readl(addr); \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
+ (val) = readl(addr); \
+ break; \
+ } \
+ if (sleep_us) \
+ usleep_range(DIV_ROUND_UP(sleep_us, 4), sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+/**
+ * readl_poll_timeout_noirq - Periodically poll an address until a condition is met or a timeout occurs
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @max_reads: Maximum number of reads before giving up
+ * @time_between_us: Time to udelay() between successive reads
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout.
+ */
+#define readl_poll_timeout_noirq(addr, val, cond, max_reads, time_between_us) \
+({ \
+ int count; \
+ for (count = (max_reads); count > 0; count--) { \
+ (val) = readl(addr); \
+ if (cond) \
+ break; \
+ udelay(time_between_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+/**
+ * readl_poll - Periodically poll an address until a condition is met
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in uS (0 tight-loops)
+ *
+ * Must not be called from atomic context if sleep_us is used.
+ */
+#define readl_poll(addr, val, cond, sleep_us) \
+ readl_poll_timeout(addr, val, cond, sleep_us, 0)
+
+/**
+ * readl_tight_poll_timeout - Tight-loop on an address until a condition is met or a timeout occurs
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @timeout_us: Timeout in uS, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if timeout_us is used.
+ */
+#define readl_tight_poll_timeout(addr, val, cond, timeout_us) \
+ readl_poll_timeout(addr, val, cond, 0, timeout_us)
+
+/**
+ * readl_tight_poll - Tight-loop on an address until a condition is met
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ *
+ * May be called from atomic context.
+ */
+#define readl_tight_poll(addr, val, cond) \
+ readl_poll_timeout(addr, val, cond, 0, 0)
+
+#endif /* _LINUX_IOPOLL_H */
diff --git a/include/linux/ipc_logging.h b/include/linux/ipc_logging.h
new file mode 100644
index 000000000000..532452b133d8
--- /dev/null
+++ b/include/linux/ipc_logging.h
@@ -0,0 +1,265 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_LOGGING_H
+#define _IPC_LOGGING_H
+
+#include <linux/types.h>
+
+#define MAX_MSG_SIZE 255
+
+enum {
+ TSV_TYPE_MSG_START = 1,
+ TSV_TYPE_SKB = TSV_TYPE_MSG_START,
+ TSV_TYPE_STRING,
+ TSV_TYPE_MSG_END = TSV_TYPE_STRING,
+};
+
+struct tsv_header {
+ unsigned char type;
+ unsigned char size; /* size of data field */
+};
+
+struct encode_context {
+ struct tsv_header hdr;
+ char buff[MAX_MSG_SIZE];
+ int offset;
+};
+
+struct decode_context {
+ int output_format; /* 0 = debugfs */
+ char *buff; /* output buffer */
+ int size; /* size of output buffer */
+};
+
+#if defined(CONFIG_IPC_LOGGING)
+/*
+ * ipc_log_context_create: Create a debug log context
+ * Should not be called from atomic context
+ *
+ * @max_num_pages: Number of pages of logging space required (max. 10)
+ * @mod_name : Name of the directory entry under DEBUGFS
+ *
+ * returns context id on success, NULL on failure
+ */
+void *ipc_log_context_create(int max_num_pages, const char *modname);
+
+/*
+ * msg_encode_start: Start encoding a log message
+ *
+ * @ectxt: Temporary storage to hold the encoded message
+ * @type: Root event type defined by the module which is logging
+ */
+void msg_encode_start(struct encode_context *ectxt, uint32_t type);
+
+/*
+ * tsv_timestamp_write: Writes the current timestamp count
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ */
+int tsv_timestamp_write(struct encode_context *ectxt);
+
+/*
+ * tsv_pointer_write: Writes a data pointer
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @pointer: Pointer value to write
+ */
+int tsv_pointer_write(struct encode_context *ectxt, void *pointer);
+
+/*
+ * tsv_int32_write: Writes a 32-bit integer value
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @n: Integer to write
+ */
+int tsv_int32_write(struct encode_context *ectxt, int32_t n);
+
+/*
+ * tsv_int32_write: Writes a 32-bit integer value
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @n: Integer to write
+ */
+int tsv_byte_array_write(struct encode_context *ectxt,
+ void *data, int data_size);
+
+/*
+ * msg_encode_end: Complete the message encode process
+ *
+ * @ectxt: Temporary storage which holds the encoded message
+ */
+void msg_encode_end(struct encode_context *ectxt);
+
+/*
+ * msg_encode_end: Complete the message encode process
+ *
+ * @ectxt: Temporary storage which holds the encoded message
+ */
+void ipc_log_write(void *ctxt, struct encode_context *ectxt);
+
+/*
+ * ipc_log_string: Helper function to log a string
+ *
+ * @ilctxt: Debug Log Context created using ipc_log_context_create()
+ * @fmt: Data specified using format specifiers
+ */
+int ipc_log_string(void *ilctxt, const char *fmt, ...) __printf(2, 3);
+
+/**
+ * ipc_log_extract - Reads and deserializes log
+ *
+ * @ilctxt: logging context
+ * @buff: buffer to receive the data
+ * @size: size of the buffer
+ * @returns: 0 if no data read; >0 number of bytes read; < 0 error
+ *
+ * If no data is available to be read, then the ilctxt::read_avail
+ * completion is reinitialized. This allows clients to block
+ * until new log data is save.
+ */
+int ipc_log_extract(void *ilctxt, char *buff, int size);
+
+/*
+ * Print a string to decode context.
+ * @dctxt Decode context
+ * @args printf args
+ */
+#define IPC_SPRINTF_DECODE(dctxt, args...) \
+do { \
+ int i; \
+ i = scnprintf(dctxt->buff, dctxt->size, args); \
+ dctxt->buff += i; \
+ dctxt->size -= i; \
+} while (0)
+
+/*
+ * tsv_timestamp_read: Reads a timestamp
+ *
+ * @ectxt: Context retrieved by reading from log space
+ * @dctxt: Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_timestamp_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_pointer_read: Reads a data pointer
+ *
+ * @ectxt: Context retrieved by reading from log space
+ * @dctxt: Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_pointer_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_int32_read: Reads a 32-bit integer value
+ *
+ * @ectxt: Context retrieved by reading from log space
+ * @dctxt: Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+int32_t tsv_int32_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_int32_read: Reads a 32-bit integer value
+ *
+ * @ectxt: Context retrieved by reading from log space
+ * @dctxt: Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_byte_array_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format);
+
+/*
+ * add_deserialization_func: Register a deserialization function to
+ * to unpack the subevents of a main event
+ *
+ * @ctxt: Debug log context to which the deserialization function has
+ * to be registered
+ * @type: Main/Root event, defined by the module which is logging, to
+ * which this deserialization function has to be registered.
+ * @dfune: Deserialization function to be registered
+ *
+ * return 0 on success, -ve value on FAILURE
+ */
+int add_deserialization_func(void *ctxt, int type,
+ void (*dfunc)(struct encode_context *,
+ struct decode_context *));
+
+/*
+ * ipc_log_context_destroy: Destroy debug log context
+ *
+ * @ctxt: debug log context created by calling ipc_log_context_create API.
+ */
+int ipc_log_context_destroy(void *ctxt);
+
+#else
+
+static inline void *ipc_log_context_create(int max_num_pages,
+ const char *modname)
+{ return NULL; }
+
+static inline void msg_encode_start(struct encode_context *ectxt,
+ uint32_t type) { }
+
+static inline int tsv_timestamp_write(struct encode_context *ectxt)
+{ return -EINVAL; }
+
+static inline int tsv_pointer_write(struct encode_context *ectxt, void *pointer)
+{ return -EINVAL; }
+
+static inline int tsv_int32_write(struct encode_context *ectxt, int32_t n)
+{ return -EINVAL; }
+
+static inline int tsv_byte_array_write(struct encode_context *ectxt,
+ void *data, int data_size)
+{ return -EINVAL; }
+
+static inline void msg_encode_end(struct encode_context *ectxt) { }
+
+static inline void ipc_log_write(void *ctxt, struct encode_context *ectxt) { }
+
+static inline int ipc_log_string(void *ilctxt, const char *fmt, ...)
+{ return -EINVAL; }
+
+static inline int ipc_log_extract(void *ilctxt, char *buff, int size)
+{ return -EINVAL; }
+
+#define IPC_SPRINTF_DECODE(dctxt, args...) do { } while (0)
+
+static inline void tsv_timestamp_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format) { }
+
+static inline void tsv_pointer_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format) { }
+
+static inline int32_t tsv_int32_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format)
+{ return 0; }
+
+static inline void tsv_byte_array_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format) { }
+
+static inline int add_deserialization_func(void *ctxt, int type,
+ void (*dfunc)(struct encode_context *,
+ struct decode_context *))
+{ return 0; }
+
+static inline int ipc_log_context_destroy(void *ctxt)
+{ return 0; }
+
+#endif
+
+#endif
diff --git a/include/linux/ipc_router.h b/include/linux/ipc_router.h
new file mode 100644
index 000000000000..ace57220116f
--- /dev/null
+++ b/include/linux/ipc_router.h
@@ -0,0 +1,298 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_ROUTER_H
+#define _IPC_ROUTER_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/pm.h>
+#include <linux/msm_ipc.h>
+#include <linux/device.h>
+
+/* Maximum Wakeup Source Name Size */
+#define MAX_WS_NAME_SZ 32
+
+/**
+ * enum msm_ipc_router_event - Events that will be generated by IPC Router
+ */
+enum msm_ipc_router_event {
+ IPC_ROUTER_CTRL_CMD_DATA = 1,
+ IPC_ROUTER_CTRL_CMD_HELLO,
+ IPC_ROUTER_CTRL_CMD_BYE,
+ IPC_ROUTER_CTRL_CMD_NEW_SERVER,
+ IPC_ROUTER_CTRL_CMD_REMOVE_SERVER,
+ IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT,
+ IPC_ROUTER_CTRL_CMD_RESUME_TX,
+};
+
+/**
+ * rr_control_msg - Control message structure
+ * @cmd: Command identifier for HELLO message in Version 1.
+ * @hello: Message structure for HELLO message in Version 2.
+ * @srv: Message structure for NEW_SERVER/REMOVE_SERVER events.
+ * @cli: Message structure for REMOVE_CLIENT event.
+ */
+union rr_control_msg {
+ uint32_t cmd;
+ struct {
+ uint32_t cmd;
+ uint32_t magic;
+ uint32_t capability;
+ } hello;
+ struct {
+ uint32_t cmd;
+ uint32_t service;
+ uint32_t instance;
+ uint32_t node_id;
+ uint32_t port_id;
+ } srv;
+ struct {
+ uint32_t cmd;
+ uint32_t node_id;
+ uint32_t port_id;
+ } cli;
+};
+
+struct comm_mode_info {
+ int mode;
+ void *xprt_info;
+};
+
+/**
+ * msm_ipc_port - Definition of IPC Router port
+ * @list: List(local/control ports) in which this port is present.
+ * @this_port: Contains port's node_id and port_id information.
+ * @port_name: Contains service & instance info if the port hosts a service.
+ * @type: Type of the port - Client, Service, Control or Security Config.
+ * @flags: Flags to identify the port state.
+ * @port_lock: Lock to protect access to the port information.
+ * @mode_info: Communication mode of the port owner.
+ * @port_rx_q: Receive queue where incoming messages are queued.
+ * @port_rx_q_lock_lhb3: Lock to protect access to the port's rx_q.
+ * @rx_ws_name: Name of the receive wakeup source.
+ * @port_rx_ws: Wakeup source to prevent suspend until the rx_q is empty.
+ * @port_rx_wait_q: Wait queue to wait for the incoming messages.
+ * @restart_state: Flag to hold the restart state information.
+ * @restart_lock: Lock to protect access to the restart_state.
+ * @restart_wait: Wait Queue to wait for any restart events.
+ * @endpoint: Contains the information related to user-space interface.
+ * @notify: Function to notify the incoming events on the port.
+ * @check_send_permissions: Function to check access control from this port.
+ * @num_tx: Number of packets transmitted.
+ * @num_rx: Number of packets received.
+ * @num_tx_bytes: Number of bytes transmitted.
+ * @num_rx_bytes: Number of bytes received.
+ * @priv: Private information registered by the port owner.
+ */
+struct msm_ipc_port {
+ struct list_head list;
+
+ struct msm_ipc_port_addr this_port;
+ struct msm_ipc_port_name port_name;
+ uint32_t type;
+ unsigned flags;
+ spinlock_t port_lock;
+ struct comm_mode_info mode_info;
+
+ struct list_head port_rx_q;
+ struct mutex port_rx_q_lock_lhb3;
+ char rx_ws_name[MAX_WS_NAME_SZ];
+ struct wakeup_source port_rx_ws;
+ wait_queue_head_t port_rx_wait_q;
+
+ int restart_state;
+ spinlock_t restart_lock;
+ wait_queue_head_t restart_wait;
+
+ void *endpoint;
+ void (*notify)(unsigned event, void *oob_data,
+ size_t oob_data_len, void *priv);
+ int (*check_send_permissions)(void *data);
+
+ uint32_t num_tx;
+ uint32_t num_rx;
+ unsigned long num_tx_bytes;
+ unsigned long num_rx_bytes;
+ void *priv;
+};
+
+#ifdef CONFIG_IPC_ROUTER
+/**
+ * msm_ipc_router_create_port() - Create a IPC Router port/endpoint
+ * @notify: Callback function to notify any event on the port.
+ * @event: Event ID to be handled.
+ * @oob_data: Any out-of-band data associated with the event.
+ * @oob_data_len: Size of the out-of-band data, if valid.
+ * @priv: Private data registered during the port creation.
+ * @priv: Private info to be passed while the notification is generated.
+ *
+ * @return: Pointer to the port on success, NULL on error.
+ */
+struct msm_ipc_port *msm_ipc_router_create_port(
+ void (*notify)(unsigned event, void *oob_data,
+ size_t oob_data_len, void *priv),
+ void *priv);
+
+/**
+ * msm_ipc_router_bind_control_port() - Bind a port as a control port
+ * @port_ptr: Port which needs to be marked as a control port.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ */
+int msm_ipc_router_bind_control_port(struct msm_ipc_port *port_ptr);
+
+/**
+ * msm_ipc_router_lookup_server_name() - Resolve server address
+ * @srv_name: Name<service:instance> of the server to be resolved.
+ * @srv_info: Buffer to hold the resolved address.
+ * @num_entries_in_array: Number of server info the buffer can hold.
+ * @lookup_mask: Mask to specify the range of instances to be resolved.
+ *
+ * @return: Number of server addresses resolved on success, < 0 on error.
+ */
+int msm_ipc_router_lookup_server_name(struct msm_ipc_port_name *srv_name,
+ struct msm_ipc_server_info *srv_info,
+ int num_entries_in_array,
+ uint32_t lookup_mask);
+
+/**
+ * msm_ipc_router_send_msg() - Send a message/packet
+ * @src: Sender's address/port.
+ * @dest: Destination address.
+ * @data: Pointer to the data to be sent.
+ * @data_len: Length of the data to be sent.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int msm_ipc_router_send_msg(struct msm_ipc_port *src,
+ struct msm_ipc_addr *dest,
+ void *data, unsigned int data_len);
+
+/**
+ * msm_ipc_router_get_curr_pkt_size() - Get the packet size of the first
+ * packet in the rx queue
+ * @port_ptr: Port which owns the rx queue.
+ *
+ * @return: Returns the size of the first packet, if available.
+ * 0 if no packets available, < 0 on error.
+ */
+int msm_ipc_router_get_curr_pkt_size(struct msm_ipc_port *port_ptr);
+
+/**
+ * msm_ipc_router_read_msg() - Read a message/packet
+ * @port_ptr: Receiver's port/address.
+ * @data: Pointer containing the address of the received data.
+ * @src: Address of the sender/source.
+ * @len: Length of the data being read.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int msm_ipc_router_read_msg(struct msm_ipc_port *port_ptr,
+ struct msm_ipc_addr *src,
+ unsigned char **data,
+ unsigned int *len);
+
+/**
+ * msm_ipc_router_close_port() - Close the port
+ * @port_ptr: Pointer to the port to be closed.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int msm_ipc_router_close_port(struct msm_ipc_port *port_ptr);
+
+/**
+ * msm_ipc_router_register_server() - Register a service on a port
+ * @server_port: IPC Router port with which a service is registered.
+ * @name: Service name <service_id:instance_id> that gets registered.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ */
+int msm_ipc_router_register_server(struct msm_ipc_port *server_port,
+ struct msm_ipc_addr *name);
+
+/**
+ * msm_ipc_router_unregister_server() - Unregister a service from a port
+ * @server_port: Port with with a service is already registered.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ */
+int msm_ipc_router_unregister_server(struct msm_ipc_port *server_port);
+
+#else
+
+struct msm_ipc_port *msm_ipc_router_create_port(
+ void (*notify)(unsigned event, void *oob_data,
+ size_t oob_data_len, void *priv),
+ void *priv)
+{
+ return NULL;
+}
+
+static inline int msm_ipc_router_bind_control_port(
+ struct msm_ipc_port *port_ptr)
+{
+ return -ENODEV;
+}
+
+int msm_ipc_router_lookup_server_name(struct msm_ipc_port_name *srv_name,
+ struct msm_ipc_server_info *srv_info,
+ int num_entries_in_array,
+ uint32_t lookup_mask)
+{
+ return -ENODEV;
+}
+
+int msm_ipc_router_send_msg(struct msm_ipc_port *src,
+ struct msm_ipc_addr *dest,
+ void *data, unsigned int data_len)
+{
+ return -ENODEV;
+}
+
+int msm_ipc_router_get_curr_pkt_size(struct msm_ipc_port *port_ptr)
+{
+ return -ENODEV;
+}
+
+int msm_ipc_router_read_msg(struct msm_ipc_port *port_ptr,
+ struct msm_ipc_addr *src,
+ unsigned char **data,
+ unsigned int *len)
+{
+ return -ENODEV;
+}
+
+int msm_ipc_router_close_port(struct msm_ipc_port *port_ptr)
+{
+ return -ENODEV;
+}
+
+static inline int msm_ipc_router_register_server(
+ struct msm_ipc_port *server_port,
+ struct msm_ipc_addr *name)
+{
+ return -ENODEV;
+}
+
+static inline int msm_ipc_router_unregister_server(
+ struct msm_ipc_port *server_port)
+{
+ return -ENODEV;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 03f48d936f66..49908097445d 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -302,6 +302,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_retrigger: resend an IRQ to the CPU
* @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
* @irq_set_wake: enable/disable power-management wake-on of an IRQ
+ * @irq_read_line: return the current value on the irq line
* @irq_bus_lock: function to lock access to slow bus (i2c) chips
* @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
* @irq_cpu_online: configure an interrupt source for a secondary CPU
@@ -333,6 +334,7 @@ struct irq_chip {
int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
int (*irq_retrigger)(struct irq_data *data);
int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
+ int (*irq_read_line)(struct irq_data *data);
int (*irq_set_wake)(struct irq_data *data, unsigned int on);
void (*irq_bus_lock)(struct irq_data *data);
@@ -404,6 +406,7 @@ extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
extern int irq_set_affinity_locked(struct irq_data *data,
const struct cpumask *cpumask, bool force);
+extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask);
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
void irq_move_irq(struct irq_data *data);
@@ -442,6 +445,8 @@ extern void handle_nested_irq(unsigned int irq);
extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
irqreturn_t action_ret);
+/* Resending of interrupts :*/
+void check_irq_resend(struct irq_desc *desc, unsigned int irq);
/* Enable/disable irq debugging output: */
extern int noirqdebug_setup(char *str);
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 13eed92c7d24..671a6f724d1a 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -100,6 +100,13 @@ void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
void gic_cpu_if_down(void);
+bool gic_is_irq_pending(unsigned int irq);
+void gic_clear_irq_pending(unsigned int irq);
+#ifdef CONFIG_ARM_GIC
+void gic_set_irq_secure(unsigned int irq);
+#else
+static inline void gic_set_irq_secure(unsigned int irq) { }
+#endif
static inline void gic_init(unsigned int nr, int start,
void __iomem *dist , void __iomem *cpu)
{
@@ -117,5 +124,8 @@ static inline void __init register_routable_domain_ops
{
gic_routable_irq_domain_ops = ops;
}
+bool gic_is_spi_pending(unsigned int irq);
+void gic_clear_spi_pending(unsigned int irq);
+
#endif /* __ASSEMBLY */
#endif
diff --git a/include/linux/irqchip/msm-gpio-irq.h b/include/linux/irqchip/msm-gpio-irq.h
new file mode 100644
index 000000000000..33c2d80dd3c7
--- /dev/null
+++ b/include/linux/irqchip/msm-gpio-irq.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_GPIO_IRQ_H
+#define MSM_GPIO_IRQ_H
+
+#include <linux/irq.h>
+
+#if (defined(CONFIG_GPIO_MSM_V1) || defined(CONFIG_GPIO_MSM_V2) \
+ || defined(CONFIG_GPIO_MSM_V3) && !defined(CONFIG_USE_PINCTRL_IRQ))
+int __init msm_gpio_of_init(struct device_node *node,
+ struct device_node *parent);
+extern struct irq_chip msm_gpio_irq_extn;
+static inline int __init msm_tlmm_of_irq_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return 0;
+}
+#else
+int __init msm_tlmm_of_irq_init(struct device_node *node,
+ struct device_node *parent);
+extern struct irq_chip mpm_tlmm_irq_extn;
+static inline int __init msm_gpio_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return 0;
+}
+#endif
+#endif
diff --git a/include/linux/irqchip/msm-mpm-irq.h b/include/linux/irqchip/msm-mpm-irq.h
new file mode 100644
index 000000000000..7b8a6a476560
--- /dev/null
+++ b/include/linux/irqchip/msm-mpm-irq.h
@@ -0,0 +1,167 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_MPM_IRQ_H
+#define __MSM_MPM_IRQ_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#define MSM_MPM_NR_MPM_IRQS 64
+
+#if defined(CONFIG_MSM_MPM_OF)
+/**
+ * msm_mpm_enable_pin() - Enable/Disable a MPM pin for idle wakeups.
+ *
+ * @pin: MPM pin to set
+ * @enable: enable/disable the pin
+ *
+ * returns 0 on success or errorno
+ *
+ * Drivers can call the function to configure MPM pins for wakeup from idle low
+ * power modes. The API provides a direct access to the configuring MPM pins
+ * that are not connected to a IRQ/GPIO
+ */
+int msm_mpm_enable_pin(unsigned int pin, unsigned int enable);
+
+/**
+ * msm_mpm_set_pin_wake() - Enable/Disable a MPM pin during suspend
+ *
+ * @pin: MPM pin to set
+ * @enable: enable/disable the pin as wakeup
+ *
+ * returns 0 on success or errorno
+ *
+ * Drivers can call the function to configure MPM pins for wakeup from suspend
+ * low power modes. The API provides a direct access to the configuring MPM pins
+ * that are not connected to a IRQ/GPIO
+ */
+int msm_mpm_set_pin_wake(unsigned int pin, unsigned int on);
+/**
+ * msm_mpm_set_pin_type() - Set the flowtype of a MPM pin.
+ *
+ * @pin: MPM pin to configure
+ * @flow_type: flowtype of the MPM pin.
+ *
+ * returns 0 on success or errorno
+ *
+ * Drivers can call the function to configure the flowtype of the MPM pins
+ * The API provides a direct access to the configuring MPM pins that are not
+ * connected to a IRQ/GPIO
+ */
+int msm_mpm_set_pin_type(unsigned int pin, unsigned int flow_type);
+/**
+ * msm_mpm_irqs_detectable() - Check if active irqs can be monitored by MPM
+ *
+ * @from_idle: indicates if the sytem is entering low power mode as a part of
+ * suspend/idle task.
+ *
+ * returns true if all active interrupts can be monitored by the MPM
+ *
+ * Low power management code calls into this API to check if all active
+ * interrupts can be monitored by MPM and choose a level such that all active
+ * interrupts can wake the system up from low power mode.
+ */
+bool msm_mpm_irqs_detectable(bool from_idle);
+/**
+ * msm_mpm_gpio_detectable() - Check if active gpio irqs can be monitored by
+ * MPM
+ *
+ * @from_idle: indicates if the sytem is entering low power mode as a part of
+ * suspend/idle task.
+ *
+ * returns true if all active GPIO interrupts can be monitored by the MPM
+ *
+ * Low power management code calls into this API to check if all active
+ * GPIO interrupts can be monitored by MPM and choose a level such that all
+ * active interrupts can wake the system up from low power mode.
+ */
+bool msm_mpm_gpio_irqs_detectable(bool from_idle);
+/**
+ * msm_mpm_enter_sleep() -Called from PM code before entering low power mode
+ *
+ * @sclk_count: wakeup time in sclk counts for programmed RPM wakeup
+ * @from_idle: indicates if the sytem is entering low power mode as a part of
+ * suspend/idle task.
+ * @cpumask: the next cpu to wakeup.
+ *
+ * Low power management code calls into this API to configure the MPM to
+ * monitor the active irqs before going to sleep.
+ */
+void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle,
+ const struct cpumask *cpumask);
+/**
+ * msm_mpm_exit_sleep() -Called from PM code after resuming from low power mode
+ *
+ * @from_idle: indicates if the sytem is entering low power mode as a part of
+ * suspend/idle task.
+ *
+ * Low power management code calls into this API to query the MPM for the
+ * wakeup source and retriggering the appropriate interrupt.
+ */
+void msm_mpm_exit_sleep(bool from_idle);
+/**
+ * of_mpm_init() - Device tree initialization function
+ *
+ * The initialization function is called after * GPIO/GIC device initialization
+ * routines are called and before any device irqs are requested. MPM driver
+ * keeps track of all enabled/wakeup interrupts in the system to be able to
+ * configure MPM when entering a system wide low power mode. The MPM is a
+ * alway-on low power hardware block that monitors 64 wakeup interrupts when the
+ * system is in a low power mode. The initialization function constructs the MPM
+ * mapping between the IRQs and the MPM pin based on data in the device tree.
+ */
+void __init of_mpm_init(void);
+#else
+static inline int msm_mpm_enable_irq(unsigned int irq, unsigned int enable)
+{ return -ENODEV; }
+static inline int msm_mpm_set_irq_wake(unsigned int irq, unsigned int on)
+{ return -ENODEV; }
+static inline int msm_mpm_set_irq_type(unsigned int irq, unsigned int flow_type)
+{ return -ENODEV; }
+static inline int msm_mpm_enable_pin(unsigned int pin, unsigned int enable)
+{ return -ENODEV; }
+static inline int msm_mpm_set_pin_wake(unsigned int pin, unsigned int on)
+{ return -ENODEV; }
+static inline int msm_mpm_set_pin_type(unsigned int pin,
+ unsigned int flow_type)
+{ return -ENODEV; }
+static inline bool msm_mpm_irqs_detectable(bool from_idle)
+{ return false; }
+static inline bool msm_mpm_gpio_irqs_detectable(bool from_idle)
+{ return false; }
+static inline void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle,
+ const struct cpumask *cpumask) {}
+static inline void msm_mpm_exit_sleep(bool from_idle) {}
+static inline void __init of_mpm_init(void) {}
+#endif
+#ifdef CONFIG_MSM_MPM_OF
+/** msm_mpm_suspend_prepare() - Called at prepare_late() op during suspend
+ *
+ *
+ * When called the MPM driver checks if the wakeup interrupts can be monitored
+ * by MPM hardware and program them accordingly. If wake up interrupts cannot
+ * be monitored then it disallows system low power modes.
+ */
+void msm_mpm_suspend_prepare(void);
+/** msm_mpm_suspend_wake - Called during wake() op in suspend.
+ *
+ * When called MPM drivers sets the vote for system low power modes depending
+ * on the active interrupts.
+ */
+void msm_mpm_suspend_wake(void);
+#else
+static inline void msm_mpm_suspend_prepare(void) {}
+static inline void msm_mpm_suspend_wake(void) {}
+#endif
+#endif /* __MSM_MPM_IRQ_H */
diff --git a/include/linux/irqchip/qpnp-int.h b/include/linux/irqchip/qpnp-int.h
new file mode 100644
index 000000000000..614165ebf8b3
--- /dev/null
+++ b/include/linux/irqchip/qpnp-int.h
@@ -0,0 +1,131 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef QPNPINT_H
+#define QPNPINT_H
+
+#include <linux/spmi.h>
+
+struct qpnp_irq_spec {
+ uint8_t slave; /* 0-15 */
+ uint8_t per; /* 0-255 */
+ uint8_t irq; /* 0-7 */
+};
+
+struct qpnp_local_int {
+ /* mask - Invoke PMIC Arbiter local mask handler */
+ int (*mask)(struct spmi_controller *spmi_ctrl,
+ struct qpnp_irq_spec *spec,
+ uint32_t priv_d);
+ /* unmask - Invoke PMIC Arbiter local unmask handler */
+ int (*unmask)(struct spmi_controller *spmi_ctrl,
+ struct qpnp_irq_spec *spec,
+ uint32_t priv_d);
+ /* register_priv_data - Return per irq priv data */
+ int (*register_priv_data)(struct spmi_controller *spmi_ctrl,
+ struct qpnp_irq_spec *spec,
+ uint32_t *priv_d);
+};
+
+#ifdef CONFIG_MSM_QPNP_INT
+/**
+ * qpnpint_of_init() - Device Tree irq initialization
+ *
+ * Standard Device Tree init routine to be called from
+ * of_irq_init().
+ */
+int __init qpnpint_of_init(struct device_node *node,
+ struct device_node *parent);
+
+/**
+ * qpnpint_register_controller() - Register local interrupt callbacks
+ *
+ * Used by the PMIC Arbiter driver or equivalent to register
+ * callbacks for interrupt events.
+ */
+int qpnpint_register_controller(struct device_node *node,
+ struct spmi_controller *ctrl,
+ struct qpnp_local_int *li_cb);
+
+/**
+ * qpnpint_unregister_controller() - Unregister local interrupt callbacks
+ *
+ * Used by the PMIC Arbiter driver or equivalent to unregister
+ * callbacks for interrupt events.
+ */
+int qpnpint_unregister_controller(struct device_node *node);
+
+/**
+ * qpnpint_handle_irq - Main interrupt handling routine
+ *
+ * Pass a PMIC Arbiter interrupt to Linux.
+ */
+int qpnpint_handle_irq(struct spmi_controller *spmi_ctrl,
+ struct qpnp_irq_spec *spec);
+
+/**
+ * qpnpint_show_irq - Prints the Linux interrupt number
+ *
+ * Pass a PMIC Arbiter interrupt to Linux.
+ */
+int qpnpint_show_irq(struct spmi_controller *spmi_ctrl,
+ struct qpnp_irq_spec *spec);
+
+#ifdef CONFIG_MSM_SHOW_RESUME_IRQ
+extern int msm_show_resume_irq_mask;
+static inline bool qpnpint_show_resume_irq(void)
+{
+ return msm_show_resume_irq_mask;
+}
+#else
+static inline bool qpnpint_show_resume_irq(void)
+{
+ return false;
+}
+#endif
+
+#else
+static inline int __init qpnpint_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ return -ENXIO;
+}
+
+static inline int qpnpint_register_controller(struct device_node *node,
+ struct spmi_controller *ctrl,
+ struct qpnp_local_int *li_cb)
+{
+ return -ENXIO;
+}
+
+static inline int qpnpint_unregister_controller(struct device_node *node)
+{
+ return -ENXIO;
+}
+
+static inline int qpnpint_handle_irq(struct spmi_controller *spmi_ctrl,
+ struct qpnp_irq_spec *spec)
+{
+ return -ENXIO;
+}
+int qpnpint_show_irq(struct spmi_controller *spmi_ctrl,
+ struct qpnp_irq_spec *spec)
+{
+ return -ENXIO;
+}
+
+static inline bool qpnpint_show_resume_irq(void)
+{
+ return false;
+}
+#endif /* CONFIG_MSM_QPNP_INT */
+#endif /* QPNPINT_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3d770f5564b8..8bf73b6f66fd 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -597,7 +597,7 @@ do { \
__trace_printk_check_format(fmt, ##args); \
\
if (__builtin_constant_p(fmt)) \
- __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
+ __trace_printk(_THIS_IP_, trace_printk_fmt, ##args); \
else \
__trace_printk(_THIS_IP_, fmt, ##args); \
} while (0)
@@ -814,4 +814,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
/* Other writable? Generally considered a bad idea. */ \
BUILD_BUG_ON_ZERO((perms) & 2) + \
(perms))
+/* To identify board information in panic logs, set this */
+extern char *mach_panic_string;
+
#endif
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index b9376cd5a187..20400e7854e7 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -8,9 +8,11 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/vtime.h>
-#include <asm/irq.h>
+#include <linux/irq.h>
#include <linux/cputime.h>
+#include <asm/irq.h>
+
/*
* 'kernel_stat.h' contains the definitions needed for doing
* some kernel statistics (CPU usage, context switches ...),
@@ -53,7 +55,12 @@ extern unsigned long long nr_context_switches(void);
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
extern void kstat_incr_irq_this_cpu(unsigned int irq);
-
+#if 0
+#define kstat_incr_irqs_this_cpu(irqno, DESC) do { \
+ __this_cpu_inc(*(DESC)->kstat_irqs); \
+ __this_cpu_inc(kstat.irqs_sum); \
+} while (0)
+#endif
static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
{
__this_cpu_inc(kstat.softirqs[irq]);
diff --git a/include/linux/keyreset.h b/include/linux/keyreset.h
new file mode 100644
index 000000000000..a2ac49e5b684
--- /dev/null
+++ b/include/linux/keyreset.h
@@ -0,0 +1,28 @@
+/*
+ * include/linux/keyreset.h - platform data structure for resetkeys driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYRESET_H
+#define _LINUX_KEYRESET_H
+
+#define KEYRESET_NAME "keyreset"
+
+struct keyreset_platform_data {
+ int (*reset_fn)(void);
+ int *keys_up;
+ int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYRESET_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index e8cc45307f8f..96b7073ad452 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -285,6 +285,7 @@ phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
int memblock_is_memory(phys_addr_t addr);
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
+int memblock_overlaps_memory(phys_addr_t base, phys_addr_t size);
int memblock_is_reserved(phys_addr_t addr);
int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 44eeef0da186..03b4a765c2ea 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -432,12 +432,22 @@ struct spi_device_id {
kernel_ulong_t driver_data; /* Data private to the driver */
};
+#define SLIMBUS_NAME_SIZE 32
+#define SLIMBUS_MODULE_PREFIX "slim:"
+
+struct slim_device_id {
+ char name[SLIMBUS_NAME_SIZE];
+ kernel_ulong_t driver_data /* Data private to the driver */
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
+};
+
#define SPMI_NAME_SIZE 32
#define SPMI_MODULE_PREFIX "spmi:"
struct spmi_device_id {
char name[SPMI_NAME_SIZE];
- kernel_ulong_t driver_data; /* Data private to the driver */
+ kernel_ulong_t driver_data /* Data private to the driver */
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
};
/* dmi */
diff --git a/include/linux/msm-bus-board.h b/include/linux/msm-bus-board.h
new file mode 100644
index 000000000000..87f17915022f
--- /dev/null
+++ b/include/linux/msm-bus-board.h
@@ -0,0 +1,196 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_BUS_BOARD_H
+#define __ASM_ARCH_MSM_BUS_BOARD_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+
+enum context {
+ DUAL_CTX,
+ ACTIVE_CTX,
+ NUM_CTX
+};
+
+struct msm_bus_fabric_registration {
+ unsigned int id;
+ const char *name;
+ struct msm_bus_node_info *info;
+ unsigned int len;
+ int ahb;
+ const char *fabclk[NUM_CTX];
+ const char *iface_clk;
+ unsigned int offset;
+ unsigned int haltid;
+ unsigned int rpm_enabled;
+ unsigned int nmasters;
+ unsigned int nslaves;
+ unsigned int ntieredslaves;
+ bool il_flag;
+ const struct msm_bus_board_algorithm *board_algo;
+ int hw_sel;
+ void *hw_data;
+ uint32_t qos_freq;
+ uint32_t qos_baseoffset;
+ u64 nr_lim_thresh;
+ uint32_t eff_fact;
+ uint32_t qos_delta;
+ bool virt;
+};
+
+struct msm_bus_device_node_registration {
+ struct msm_bus_node_device_type *info;
+ unsigned int num_devices;
+ bool virt;
+};
+
+enum msm_bus_bw_tier_type {
+ MSM_BUS_BW_TIER1 = 1,
+ MSM_BUS_BW_TIER2,
+ MSM_BUS_BW_COUNT,
+ MSM_BUS_BW_SIZE = 0x7FFFFFFF,
+};
+
+struct msm_bus_halt_vector {
+ uint32_t haltval;
+ uint32_t haltmask;
+};
+
+extern struct msm_bus_fabric_registration msm_bus_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_cpss_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_def_fab_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8960_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8064_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_9615_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9615_def_fab_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8930_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8974_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_mmss_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_config_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_vnoc_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_9625_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_config_noc_pdata;
+
+extern int msm_bus_device_match_adhoc(struct device *dev, void *id);
+
+void msm_bus_rpm_set_mt_mask(void);
+int msm_bus_board_rpm_get_il_ids(uint16_t *id);
+int msm_bus_board_get_iid(int id);
+
+#define NFAB_MSM8226 6
+#define NFAB_MSM8610 5
+
+/*
+ * These macros specify the convention followed for allocating
+ * ids to fabrics, masters and slaves for 8x60.
+ *
+ * A node can be identified as a master/slave/fabric by using
+ * these ids.
+ */
+#define FABRIC_ID_KEY 1024
+#define SLAVE_ID_KEY ((FABRIC_ID_KEY) >> 1)
+#define MAX_FAB_KEY 7168 /* OR(All fabric ids) */
+#define INT_NODE_START 10000
+
+#define GET_FABID(id) ((id) & MAX_FAB_KEY)
+
+#define NODE_ID(id) ((id) & (FABRIC_ID_KEY - 1))
+#define IS_SLAVE(id) ((NODE_ID(id)) >= SLAVE_ID_KEY ? 1 : 0)
+#define CHECK_ID(iid, id) (((iid & id) != id) ? -ENXIO : iid)
+
+/*
+ * The following macros are used to format the data for port halt
+ * and unhalt requests.
+ */
+#define MSM_BUS_CLK_HALT 0x1
+#define MSM_BUS_CLK_HALT_MASK 0x1
+#define MSM_BUS_CLK_HALT_FIELDSIZE 0x1
+#define MSM_BUS_CLK_UNHALT 0x0
+
+#define MSM_BUS_MASTER_SHIFT(master, fieldsize) \
+ ((master) * (fieldsize))
+
+#define MSM_BUS_SET_BITFIELD(word, fieldmask, fieldvalue) \
+ { \
+ (word) &= ~(fieldmask); \
+ (word) |= (fieldvalue); \
+ }
+
+
+#define MSM_BUS_MASTER_HALT(u32haltmask, u32haltval, master) \
+ MSM_BUS_SET_BITFIELD(u32haltmask, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+ MSM_BUS_SET_BITFIELD(u32haltval, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_HALT<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+
+#define MSM_BUS_MASTER_UNHALT(u32haltmask, u32haltval, master) \
+ MSM_BUS_SET_BITFIELD(u32haltmask, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+ MSM_BUS_SET_BITFIELD(u32haltval, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_UNHALT<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+#define RPM_BUS_MASTER_REQ 0x73616d62
+
+enum msm_bus_rpm_slave_field_type {
+ RPM_SLAVE_FIELD_BW = 0x00007762,
+};
+
+enum msm_bus_rpm_mas_field_type {
+ RPM_MASTER_FIELD_BW = 0x00007762,
+ RPM_MASTER_FIELD_BW_T0 = 0x30747762,
+ RPM_MASTER_FIELD_BW_T1 = 0x31747762,
+ RPM_MASTER_FIELD_BW_T2 = 0x32747762,
+};
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+#endif /*__ASM_ARCH_MSM_BUS_BOARD_H */
diff --git a/include/linux/msm-bus.h b/include/linux/msm-bus.h
new file mode 100644
index 000000000000..1f5edc964c49
--- /dev/null
+++ b/include/linux/msm-bus.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_H
+#define _ARCH_ARM_MACH_MSM_BUS_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+
+/*
+ * Macros for clients to convert their data to ib and ab
+ * Ws : Time window over which to transfer the data in SECONDS
+ * Bs : Size of the data block in bytes
+ * Per : Recurrence period
+ * Tb : Throughput bandwidth to prevent stalling
+ * R : Ratio of actual bandwidth used to Tb
+ * Ib : Instantaneous bandwidth
+ * Ab : Arbitrated bandwidth
+ *
+ * IB_RECURRBLOCK and AB_RECURRBLOCK:
+ * These are used if the requirement is to transfer a
+ * recurring block of data over a known time window.
+ *
+ * IB_THROUGHPUTBW and AB_THROUGHPUTBW:
+ * These are used for CPU style masters. Here the requirement
+ * is to have minimum throughput bandwidth available to avoid
+ * stalling.
+ */
+#define IB_RECURRBLOCK(Ws, Bs) ((Ws) == 0 ? 0 : ((Bs)/(Ws)))
+#define AB_RECURRBLOCK(Ws, Per) ((Ws) == 0 ? 0 : ((Bs)/(Per)))
+#define IB_THROUGHPUTBW(Tb) (Tb)
+#define AB_THROUGHPUTBW(Tb, R) ((Tb) * (R))
+
+struct msm_bus_vectors {
+ int src; /* Master */
+ int dst; /* Slave */
+ uint64_t ab; /* Arbitrated bandwidth */
+ uint64_t ib; /* Instantaneous bandwidth */
+};
+
+struct msm_bus_paths {
+ int num_paths;
+ struct msm_bus_vectors *vectors;
+};
+
+struct msm_bus_scale_pdata {
+ struct msm_bus_paths *usecase;
+ int num_usecases;
+ const char *name;
+ /*
+ * If the active_only flag is set to 1, the BW request is applied
+ * only when at least one CPU is active (powered on). If the flag
+ * is set to 0, then the BW request is always applied irrespective
+ * of the CPU state.
+ */
+ unsigned int active_only;
+};
+
+/* Scaling APIs */
+
+/*
+ * This function returns a handle to the client. This should be used to
+ * call msm_bus_scale_client_update_request.
+ * The function returns 0 if bus driver is unable to register a client
+ */
+
+#if (defined(CONFIG_MSM_BUS_SCALING) || defined(CONFIG_BUS_TOPOLOGY_ADHOC))
+int __init msm_bus_fabric_init_driver(void);
+uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata);
+int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index);
+void msm_bus_scale_unregister_client(uint32_t cl);
+/* AXI Port configuration APIs */
+int msm_bus_axi_porthalt(int master_port);
+int msm_bus_axi_portunhalt(int master_port);
+
+#else
+static inline int __init msm_bus_fabric_init_driver(void) { return 0; }
+
+static inline uint32_t
+msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
+{
+ return 1;
+}
+
+static inline int
+msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
+{
+ return 0;
+}
+
+static inline void
+msm_bus_scale_unregister_client(uint32_t cl)
+{
+}
+
+static inline int msm_bus_axi_porthalt(int master_port)
+{
+ return 0;
+}
+
+static inline int msm_bus_axi_portunhalt(int master_port)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_OF) && defined(CONFIG_MSM_BUS_SCALING)
+struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+ struct platform_device *pdev, struct device_node *of_node);
+struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev);
+void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata);
+#else
+static inline struct msm_bus_scale_pdata
+*msm_bus_cl_get_pdata(struct platform_device *pdev)
+{
+ return NULL;
+}
+
+static inline struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+ struct platform_device *pdev, struct device_node *of_node)
+{
+ return NULL;
+}
+
+static inline void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
+{
+}
+#endif
+#endif /*_ARCH_ARM_MACH_MSM_BUS_H*/
diff --git a/include/linux/msm_audio_ion.h b/include/linux/msm_audio_ion.h
new file mode 100644
index 000000000000..38b27bf8a053
--- /dev/null
+++ b/include/linux/msm_audio_ion.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_MSM_AUDIO_ION_H
+#define _LINUX_MSM_AUDIO_ION_H
+#ifdef CONFIG_SND_SOC_QDSP6V2
+#include <sound/q6asm-v2.h>
+#else
+#include <sound/q6asm.h>
+#endif
+#include <sound/pcm.h>
+#include <linux/msm_ion.h>
+
+
+int msm_audio_ion_alloc(const char *name, struct ion_client **client,
+ struct ion_handle **handle, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr);
+
+int msm_audio_ion_import(const char *name, struct ion_client **client,
+ struct ion_handle **handle, int fd,
+ unsigned long *ionflag, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr);
+int msm_audio_ion_free(struct ion_client *client, struct ion_handle *handle);
+int msm_audio_ion_mmap(struct audio_buffer *substream,
+ struct vm_area_struct *vma);
+
+bool msm_audio_ion_is_smmu_available(void);
+int msm_audio_ion_cache_operations(struct audio_buffer *abuff, int cache_op);
+
+#ifdef CONFIG_SND_SOC_QDSP6V2
+struct ion_client *msm_audio_ion_client_create(unsigned int heap_mask,
+ const char *name);
+void msm_audio_ion_client_destroy(struct ion_client *client);
+int msm_audio_ion_import_legacy(const char *name, struct ion_client *client,
+ struct ion_handle **handle, int fd,
+ unsigned long *ionflag, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr);
+int msm_audio_ion_free_legacy(struct ion_client *client,
+ struct ion_handle *handle);
+#else
+static struct ion_client *msm_audio_ion_client_create(unsigned int heap_mask,
+ const char *name)
+{ return NULL; }
+static void msm_audio_ion_client_destroy(struct ion_client *client)
+{}
+static int msm_audio_ion_import_legacy(const char *name,
+ struct ion_client *client,
+ struct ion_handle **handle, int fd,
+ unsigned long *ionflag, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{ return 0; }
+static int msm_audio_ion_free_legacy(struct ion_client *client,
+ struct ion_handle *handle)
+{ return 0; }
+
+#endif /* CONFIG_MSM_QDSP6V2_CODECS */
+#endif /* _LINUX_MSM_AUDIO_ION_H */
+
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
new file mode 100644
index 000000000000..75b7f39e5538
--- /dev/null
+++ b/include/linux/msm_kgsl.h
@@ -0,0 +1,113 @@
+#ifndef _MSM_KGSL_H
+#define _MSM_KGSL_H
+
+#include <uapi/linux/msm_kgsl.h>
+
+/* Clock flags to show which clocks should be controled by a given platform */
+#define KGSL_CLK_SRC 0x00000001
+#define KGSL_CLK_CORE 0x00000002
+#define KGSL_CLK_IFACE 0x00000004
+#define KGSL_CLK_MEM 0x00000008
+#define KGSL_CLK_MEM_IFACE 0x00000010
+#define KGSL_CLK_AXI 0x00000020
+#define KGSL_CLK_ALT_MEM_IFACE 0x00000040
+#define KGSL_CLK_RBBMTIMER 0x00000080
+
+#define KGSL_MAX_PWRLEVELS 10
+
+#define KGSL_3D0_REG_MEMORY "kgsl_3d0_reg_memory"
+#define KGSL_3D0_SHADER_MEMORY "kgsl_3d0_shader_memory"
+#define KGSL_3D0_IRQ "kgsl_3d0_irq"
+
+enum kgsl_iommu_context_id {
+ KGSL_IOMMU_CONTEXT_USER = 0,
+ KGSL_IOMMU_CONTEXT_PRIV = 1,
+};
+
+/**
+ * struct kgsl_iommu_ctx - Struct holding context name and id
+ * @iommu_ctx_name: Context name
+ * @ctx_id: Iommu context ID - user or priv
+ */
+struct kgsl_iommu_ctx {
+ const char *iommu_ctx_name;
+ enum kgsl_iommu_context_id ctx_id;
+};
+
+/**
+ * struct kgsl_device_iommu_data - Struct holding iommu context data obtained
+ * from dtsi file
+ * @iommu_ctxs: Pointer to array of struct holding context name and id
+ * @iommu_ctx_count: Number of contexts defined in the dtsi file
+ * @iommu_halt_enable: Indicates if smmu halt h/w feature is supported
+ * @physstart: Start of iommu registers physical address
+ * @physend: End of iommu registers physical address
+ */
+struct kgsl_device_iommu_data {
+ const struct kgsl_iommu_ctx *iommu_ctxs;
+ int iommu_ctx_count;
+ int iommu_halt_enable;
+ unsigned int physstart;
+ unsigned int physend;
+};
+
+/**
+ * struct kgsl_pwrlevel - Struct holding different pwrlevel info obtained from
+ * from dtsi file
+ * @gpu_freq: GPU frequency vote in Hz
+ * @bus_freq: Bus bandwidth vote index
+ * @bus_min: Min bus index @gpu_freq
+ * @bus_max: Max bus index @gpu_freq
+ * @io_fraction: IO percetage vote to the CPU
+ */
+struct kgsl_pwrlevel {
+ unsigned int gpu_freq;
+ unsigned int bus_freq;
+ unsigned int bus_min;
+ unsigned int bus_max;
+ unsigned int io_fraction;
+};
+
+/**
+ * struct kgsl_device_platform_data - Struct holding all the device info
+ * obtained from the dtsi file
+ * @pwrlevel: Array of struct holding pwrlevel information
+ * @init_level: Pwrlevel device is initialized with
+ * @num_levels: Number of pwrlevels for the specific device
+ * @idle_timeout: Timeout for GPU to turn its resources off
+ * @strtstp_sleepwake: Flag to decide b/w SLEEP and SLUMBER
+ * @bus_control: Flag if independent bus voting is supported
+ * @clk_map: Clocks map per platform
+ * @bus_scale_table: Bus table with different b/w votes
+ * @iommu_data: Struct holding iommu context data
+ * @iommu_count: Number of IOMMU units for the GPU
+ * @csdev: Pointer to the coresight device for this device
+ * @coresight_pdata: Coresight configuration for specific device
+ * @chipid: Chip ID for the device's GPU
+ * @pm_qos_latency: latency value for cpu
+ */
+struct kgsl_device_platform_data {
+ struct kgsl_pwrlevel pwrlevel[KGSL_MAX_PWRLEVELS];
+ int init_level;
+ int num_levels;
+ unsigned int idle_timeout;
+ bool strtstp_sleepwake;
+ bool bus_control;
+ unsigned int clk_map;
+ unsigned int step_mul;
+ struct msm_bus_scale_pdata *bus_scale_table;
+ struct kgsl_device_iommu_data *iommu_data;
+ int iommu_count;
+ struct coresight_device *csdev;
+ struct coresight_platform_data *coresight_pdata;
+ unsigned int chipid;
+ unsigned int pm_qos_latency;
+};
+
+#ifdef CONFIG_MSM_KGSL_DRM
+int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
+ unsigned long *len);
+#else
+#define kgsl_gem_obj_addr(...) 0
+#endif
+#endif /* _MSM_KGSL_H */
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index fe722c1fb61d..1eb2e93e65a2 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -1,6 +1,7 @@
/* include/linux/msm_mdp.h
*
* Copyright (C) 2007 Google Incorporated
+ * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -14,66 +15,22 @@
#ifndef _MSM_MDP_H_
#define _MSM_MDP_H_
-#include <linux/types.h>
-
-#define MSMFB_IOCTL_MAGIC 'm'
-#define MSMFB_GRP_DISP _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int)
-#define MSMFB_BLIT _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int)
-
-enum {
- MDP_RGB_565, /* RGB 565 planar */
- MDP_XRGB_8888, /* RGB 888 padded */
- MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planar w/ Cb is in MSB */
- MDP_ARGB_8888, /* ARGB 888 */
- MDP_RGB_888, /* RGB 888 planar */
- MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planar w/ Cr is in MSB */
- MDP_YCRYCB_H2V1, /* YCrYCb interleave */
- MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */
- MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */
- MDP_RGBA_8888, /* ARGB 888 */
- MDP_BGRA_8888, /* ABGR 888 */
- MDP_RGBX_8888, /* RGBX 888 */
- MDP_IMGTYPE_LIMIT /* Non valid image type after this enum */
-};
-
-enum {
- PMEM_IMG,
- FB_IMG,
-};
-
-/* flag values */
-#define MDP_ROT_NOP 0
-#define MDP_FLIP_LR 0x1
-#define MDP_FLIP_UD 0x2
-#define MDP_ROT_90 0x4
-#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR)
-#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR)
-#define MDP_DITHER 0x8
-#define MDP_BLUR 0x10
-
-#define MDP_TRANSP_NOP 0xffffffff
-#define MDP_ALPHA_NOP 0xff
-
-struct mdp_rect {
- u32 x, y, w, h;
-};
-
-struct mdp_img {
- u32 width, height, format, offset;
- int memory_id; /* the file descriptor */
-};
-
-struct mdp_blit_req {
- struct mdp_img src;
- struct mdp_img dst;
- struct mdp_rect src_rect;
- struct mdp_rect dst_rect;
- u32 alpha, transp_mask, flags;
-};
-
-struct mdp_blit_req_list {
- u32 count;
- struct mdp_blit_req req[];
-};
-
-#endif /* _MSM_MDP_H_ */
+#include <uapi/linux/msm_mdp.h>
+
+int msm_fb_get_iommu_domain(struct fb_info *info, int domain);
+/* get the framebuffer physical address information */
+int get_fb_phys_info(unsigned long *start, unsigned long *len, int fb_num,
+ int subsys_id);
+struct fb_info *msm_fb_get_writeback_fb(void);
+int msm_fb_writeback_init(struct fb_info *info);
+int msm_fb_writeback_start(struct fb_info *info);
+int msm_fb_writeback_queue_buffer(struct fb_info *info,
+ struct msmfb_data *data);
+int msm_fb_writeback_dequeue_buffer(struct fb_info *info,
+ struct msmfb_data *data);
+int msm_fb_writeback_stop(struct fb_info *info);
+int msm_fb_writeback_terminate(struct fb_info *info);
+int msm_fb_writeback_set_secure(struct fb_info *info, int enable);
+int msm_fb_writeback_iommu_ref(struct fb_info *info, int enable);
+
+#endif /*_MSM_MDP_H_*/
diff --git a/include/linux/msm_remote_spinlock.h b/include/linux/msm_remote_spinlock.h
new file mode 100644
index 000000000000..f777cef4e1d7
--- /dev/null
+++ b/include/linux/msm_remote_spinlock.h
@@ -0,0 +1,74 @@
+/* Copyright (c) 2009, 2011, 2013-2014 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Part of this this code is based on the standard ARM spinlock
+ * implementation (asm/spinlock.h) found in the 2.6.29 kernel.
+ */
+
+#ifndef __ASM__ARCH_QC_REMOTE_SPINLOCK_H
+#define __ASM__ARCH_QC_REMOTE_SPINLOCK_H
+
+#include <linux/io.h>
+#include <linux/types.h>
+
+#define REMOTE_SPINLOCK_NUM_PID 128
+#define REMOTE_SPINLOCK_TID_START REMOTE_SPINLOCK_NUM_PID
+
+/* Remote spinlock definitions. */
+
+typedef struct {
+ volatile uint32_t lock;
+} raw_remote_spinlock_t;
+
+typedef raw_remote_spinlock_t *_remote_spinlock_t;
+
+#define remote_spinlock_id_t const char *
+
+#if defined(CONFIG_REMOTE_SPINLOCK_MSM)
+int _remote_spin_lock_init(remote_spinlock_id_t, _remote_spinlock_t *lock);
+void _remote_spin_release_all(uint32_t pid);
+void _remote_spin_lock(_remote_spinlock_t *lock);
+void _remote_spin_unlock(_remote_spinlock_t *lock);
+int _remote_spin_trylock(_remote_spinlock_t *lock);
+int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid);
+int _remote_spin_owner(_remote_spinlock_t *lock);
+void _remote_spin_lock_rlock_id(_remote_spinlock_t *lock, uint32_t tid);
+void _remote_spin_unlock_rlock(_remote_spinlock_t *lock);
+#else
+static inline
+int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
+{
+ return -EINVAL;
+}
+static inline void _remote_spin_release_all(uint32_t pid) {}
+static inline void _remote_spin_lock(_remote_spinlock_t *lock) {}
+static inline void _remote_spin_unlock(_remote_spinlock_t *lock) {}
+static inline int _remote_spin_trylock(_remote_spinlock_t *lock)
+{
+ return -ENODEV;
+}
+static inline int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid)
+{
+ return -ENODEV;
+}
+static inline int _remote_spin_owner(_remote_spinlock_t *lock)
+{
+ return -ENODEV;
+}
+static inline void _remote_spin_lock_rlock_id(_remote_spinlock_t *lock,
+ uint32_t tid) {}
+static inline void _remote_spin_unlock_rlock(_remote_spinlock_t *lock) {}
+#endif
+#endif /* __ASM__ARCH_QC_REMOTE_SPINLOCK_H */
diff --git a/include/linux/msm_rtb.h b/include/linux/msm_rtb.h
new file mode 100644
index 000000000000..f8033c02feb2
--- /dev/null
+++ b/include/linux/msm_rtb.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_RTB_H__
+#define __MSM_RTB_H__
+
+/*
+ * These numbers are used from the kernel command line and sysfs
+ * to control filtering. Remove items from here with extreme caution.
+ */
+enum logk_event_type {
+ LOGK_NONE = 0,
+ LOGK_READL = 1,
+ LOGK_WRITEL = 2,
+ LOGK_LOGBUF = 3,
+ LOGK_HOTPLUG = 4,
+ LOGK_CTXID = 5,
+ LOGK_TIMESTAMP = 6,
+ LOGK_L2CPREAD = 7,
+ LOGK_L2CPWRITE = 8,
+};
+
+#define LOGTYPE_NOPC 0x80
+
+struct msm_rtb_platform_data {
+ unsigned int size;
+};
+
+#if defined(CONFIG_MSM_RTB)
+/*
+ * returns 1 if data was logged, 0 otherwise
+ */
+int uncached_logk_pc(enum logk_event_type log_type, void *caller,
+ void *data);
+
+/*
+ * returns 1 if data was logged, 0 otherwise
+ */
+int uncached_logk(enum logk_event_type log_type, void *data);
+
+#define ETB_WAYPOINT do { \
+ BRANCH_TO_NEXT_ISTR; \
+ nop(); \
+ BRANCH_TO_NEXT_ISTR; \
+ nop(); \
+ } while (0)
+
+#define BRANCH_TO_NEXT_ISTR asm volatile("b .+4\n" : : : "memory")
+/*
+ * both the mb and the isb are needed to ensure enough waypoints for
+ * etb tracing
+ */
+#define LOG_BARRIER do { \
+ mb(); \
+ isb();\
+ } while (0)
+#else
+
+static inline int uncached_logk_pc(enum logk_event_type log_type,
+ void *caller,
+ void *data) { return 0; }
+
+static inline int uncached_logk(enum logk_event_type log_type,
+ void *data) { return 0; }
+
+#define ETB_WAYPOINT
+#define BRANCH_TO_NEXT_ISTR
+/*
+ * Due to a GCC bug, we need to have a nop here in order to prevent an extra
+ * read from being generated after the write.
+ */
+#define LOG_BARRIER nop()
+#endif
+#endif
diff --git a/include/linux/msm_smd_pkt.h b/include/linux/msm_smd_pkt.h
new file mode 100644
index 000000000000..cba9f6f27137
--- /dev/null
+++ b/include/linux/msm_smd_pkt.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2010, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __LINUX_MSM_SMD_PKT_H
+#define __LINUX_MSM_SMD_PKT_H
+
+#include <linux/ioctl.h>
+
+#define SMD_PKT_IOCTL_MAGIC (0xC2)
+
+#define SMD_PKT_IOCTL_BLOCKING_WRITE \
+ _IOR(SMD_PKT_IOCTL_MAGIC, 0, unsigned int)
+
+#endif /* __LINUX_MSM_SMD_PKT_H */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 420032d41d27..f3a800001028 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -53,6 +53,19 @@
__attribute__((section(".discard"), unused))
/*
+ * Macro which verifies @ptr is a percpu pointer without evaluating
+ * @ptr. This is to be used in percpu accessors to verify that the
+ * input parameter is a percpu pointer.
+ *
+ * + 0 is required in order to convert the pointer type from a
+ * potential array type to a pointer to a single item of the array.
+ */
+#define __verify_pcpu_ptr(ptr) do { \
+ const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
+ (void)__vpp_verify; \
+} while (0)
+
+/*
* s390 and alpha modules require percpu variables to be defined as
* weak to force the compiler to generate GOT based external
* references for them. This is necessary because percpu sections
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 893a0d07986f..7322ab87ad64 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -193,6 +193,8 @@ struct pmu {
struct perf_cpu_context * __percpu pmu_cpu_context;
int task_ctx_nr;
int hrtimer_interval_ms;
+ u32 events_across_hotplug:1,
+ reserved:31;
/*
* Fully disable/enable this PMU, can be used to protect from the PMI
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 0330217abfad..a70ce57ac2d2 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -16,7 +16,8 @@
#include <linux/err.h>
#include <linux/notifier.h>
-
+#include <linux/cpufreq.h>
+#
struct dev_pm_opp;
struct device;
@@ -107,13 +108,4 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
}
#endif /* CONFIG_PM_OPP */
-#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
-int of_init_opp_table(struct device *dev);
-#else
-static inline int of_init_opp_table(struct device *dev)
-{
- return -EINVAL;
-}
-#endif
-
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 096dbced02ac..9a20b6f061b0 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -44,16 +44,19 @@ enum {
POWER_SUPPLY_CHARGE_TYPE_NONE,
POWER_SUPPLY_CHARGE_TYPE_TRICKLE,
POWER_SUPPLY_CHARGE_TYPE_FAST,
+ POWER_SUPPLY_CHARGE_TYPE_TAPER,
};
enum {
POWER_SUPPLY_HEALTH_UNKNOWN = 0,
POWER_SUPPLY_HEALTH_GOOD,
POWER_SUPPLY_HEALTH_OVERHEAT,
+ POWER_SUPPLY_HEALTH_WARM,
POWER_SUPPLY_HEALTH_DEAD,
POWER_SUPPLY_HEALTH_OVERVOLTAGE,
POWER_SUPPLY_HEALTH_UNSPEC_FAILURE,
POWER_SUPPLY_HEALTH_COLD,
+ POWER_SUPPLY_HEALTH_COOL,
POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
};
@@ -91,6 +94,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_AUTHENTIC,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
@@ -101,7 +105,11 @@ enum power_supply_property {
POWER_SUPPLY_PROP_VOLTAGE_AVG,
POWER_SUPPLY_PROP_VOLTAGE_OCV,
POWER_SUPPLY_PROP_VOLTAGE_BOOT,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_CURRENT_BOOT,
@@ -114,6 +122,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_AVG,
POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
@@ -127,6 +136,8 @@ enum power_supply_property {
POWER_SUPPLY_PROP_ENERGY_EMPTY,
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_ENERGY_AVG,
+ POWER_SUPPLY_PROP_HI_POWER,
+ POWER_SUPPLY_PROP_LOW_POWER,
POWER_SUPPLY_PROP_CAPACITY, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */
@@ -136,6 +147,8 @@ enum power_supply_property {
POWER_SUPPLY_PROP_TEMP_MIN,
POWER_SUPPLY_PROP_TEMP_ALERT_MIN,
POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
+ POWER_SUPPLY_PROP_COOL_TEMP,
+ POWER_SUPPLY_PROP_WARM_TEMP,
POWER_SUPPLY_PROP_TEMP_AMBIENT,
POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN,
POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX,
@@ -147,10 +160,18 @@ enum power_supply_property {
POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
POWER_SUPPLY_PROP_CALIBRATE,
+ POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+ POWER_SUPPLY_PROP_RESISTANCE,
+ POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE,
+ /* Local extensions */
+ POWER_SUPPLY_PROP_USB_HC,
+ POWER_SUPPLY_PROP_USB_OTG,
+ POWER_SUPPLY_PROP_CHARGE_ENABLED,
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
+ POWER_SUPPLY_PROP_BATTERY_TYPE,
};
enum power_supply_type {
@@ -162,6 +183,9 @@ enum power_supply_type {
POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */
POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */
POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */
+ POWER_SUPPLY_TYPE_WIRELESS, /* Accessory Charger Adapters */
+ POWER_SUPPLY_TYPE_BMS, /* Battery Monitor System */
+ POWER_SUPPLY_TYPE_USB_PARALLEL, /* USB Parallel Path */
};
enum power_supply_notifier_events {
@@ -255,6 +279,7 @@ struct power_supply_info {
extern struct atomic_notifier_head power_supply_notifier;
extern int power_supply_reg_notifier(struct notifier_block *nb);
extern void power_supply_unreg_notifier(struct notifier_block *nb);
+#if defined(CONFIG_POWER_SUPPLY)
extern struct power_supply *power_supply_get_by_name(const char *name);
#ifdef CONFIG_OF
extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
@@ -267,19 +292,71 @@ power_supply_get_by_phandle(struct device_node *np, const char *property)
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
extern int power_supply_set_battery_charged(struct power_supply *psy);
-
-#ifdef CONFIG_POWER_SUPPLY
+extern int power_supply_set_current_limit(struct power_supply *psy, int limit);
+extern int power_supply_set_online(struct power_supply *psy, bool enable);
+extern int power_supply_set_health_state(struct power_supply *psy, int health);
+extern int power_supply_set_present(struct power_supply *psy, bool enable);
+extern int power_supply_set_scope(struct power_supply *psy, int scope);
+extern int power_supply_set_usb_otg(struct power_supply *psy, int otg);
+extern int power_supply_set_charge_type(struct power_supply *psy, int type);
+extern int power_supply_set_supply_type(struct power_supply *psy,
+ enum power_supply_type supply_type);
+extern int power_supply_set_hi_power_state(struct power_supply *psy, int value);
+extern int power_supply_set_low_power_state(struct power_supply *psy,
+ int value);
extern int power_supply_is_system_supplied(void);
-#else
-static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
-#endif
-
extern int power_supply_register(struct device *parent,
struct power_supply *psy);
extern int power_supply_register_no_ws(struct device *parent,
struct power_supply *psy);
extern void power_supply_unregister(struct power_supply *psy);
extern int power_supply_powers(struct power_supply *psy, struct device *dev);
+#else
+static inline struct power_supply *power_supply_get_by_name(char *name)
+ { return NULL; }
+static inline void power_supply_changed(struct power_supply *psy) { }
+static inline int power_supply_am_i_supplied(struct power_supply *psy)
+ { return -ENOSYS; }
+static inline int power_supply_set_battery_charged(struct power_supply *psy)
+ { return -ENOSYS; }
+static inline int power_supply_set_current_limit(struct power_supply *psy,
+ int limit)
+ { return -ENOSYS; }
+static inline int power_supply_set_online(struct power_supply *psy,
+ bool enable)
+ { return -ENOSYS; }
+static inline int power_supply_set_health_state(struct power_supply *psy,
+ int health)
+ { return -ENOSYS; }
+static inline int power_supply_set_present(struct power_supply *psy,
+ bool enable)
+ { return -ENOSYS; }
+static inline int power_supply_set_scope(struct power_supply *psy,
+ int scope)
+ { return -ENOSYS; }
+static inline int power_supply_set_usb_otg(struct power_supply *psy, int otg)
+ { return -ENOSYS; }
+static inline int power_supply_set_charge_type(struct power_supply *psy,
+ int type)
+ { return -ENOSYS; }
+static inline int power_supply_set_supply_type(struct power_supply *psy,
+ enum power_supply_type supply_type)
+ { return -ENOSYS; }
+static inline int power_supply_set_hi_power_state(struct power_supply *psy,
+ int value)
+ { return -ENOSYS; }
+static inline int power_supply_set_low_power_state(struct power_supply *psy,
+ int value)
+ { return -ENOSYS; }
+static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
+static inline int power_supply_register(struct device *parent,
+ struct power_supply *psy)
+ { return -ENOSYS; }
+static inline void power_supply_unregister(struct power_supply *psy) { }
+static inline int power_supply_powers(struct power_supply *psy,
+ struct device *dev)
+ { return -ENOSYS; }
+#endif
/* For APM emulation, think legacy userspace. */
extern struct class *power_supply_class;
@@ -294,9 +371,11 @@ static inline bool power_supply_is_amp_property(enum power_supply_property psp)
case POWER_SUPPLY_PROP_CHARGE_NOW:
case POWER_SUPPLY_PROP_CHARGE_AVG:
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_CURRENT_BOOT:
diff --git a/include/linux/qcomwlan_secif.h b/include/linux/qcomwlan_secif.h
new file mode 100644
index 000000000000..6334e3dc7b6b
--- /dev/null
+++ b/include/linux/qcomwlan_secif.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_WLAN_SECIF_H__
+#define __QCOM_WLAN_SECIF_H__
+
+#include <crypto/hash.h>
+
+#define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */
+
+/*
+ * Prototypes for WLAN Security Interface Functions
+ */
+
+extern struct crypto_ahash *
+wcnss_wlan_crypto_alloc_ahash(const char *alg_name, u32 type, u32 mask);
+
+extern int wcnss_wlan_crypto_ahash_digest(struct ahash_request *req);
+extern void wcnss_wlan_crypto_free_ahash(struct crypto_ahash *tfm);
+extern int wcnss_wlan_crypto_ahash_setkey(struct crypto_ahash *tfm,
+ const u8 *key, unsigned int keylen);
+extern struct crypto_ablkcipher *
+wcnss_wlan_crypto_alloc_ablkcipher(const char *alg_name, u32 type, u32 mask);
+extern void wcnss_wlan_ablkcipher_request_free(struct ablkcipher_request *req);
+extern void wcnss_wlan_crypto_free_cipher(struct crypto_cipher *tfm);
+extern void wcnss_wlan_crypto_free_ablkcipher(struct crypto_ablkcipher *tfm);
+extern struct crypto_cipher *
+wcnss_wlan_crypto_alloc_cipher(const char *alg_name, u32 type, u32 mask);
+extern void wcnss_wlan_cmac_calc_mic(struct crypto_cipher *tfm, u8 *m,
+ u16 length, u8 *mac);
+
+#endif /* __QCOM_WLAN_SECIF_H__ */
diff --git a/include/linux/qfp_fuse.h b/include/linux/qfp_fuse.h
new file mode 100644
index 000000000000..d2f89618adc0
--- /dev/null
+++ b/include/linux/qfp_fuse.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _QFP_FUSE_H_
+#define _QFP_FUSE_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define QFP_FUSE_IOC_MAGIC 0x92
+
+#define QFP_FUSE_IOC_WRITE _IO(QFP_FUSE_IOC_MAGIC, 1)
+#define QFP_FUSE_IOC_READ _IO(QFP_FUSE_IOC_MAGIC, 2)
+
+
+/*
+ * This structure is used to exchange the fuse parameters with the user
+ * space application. The pointer to this structure is passed to the ioctl
+ * function.
+ * offset = offset from the QFPROM base for the data to be read/written.
+ * size = number of 32-bit words to be read/written.
+ * data = pointer to the 32 bit word denoting userspace data.
+ */
+struct qfp_fuse_req {
+ u32 offset;
+ u32 size;
+ u32 *data;
+};
+
+#endif
diff --git a/include/linux/qpnp-misc.h b/include/linux/qpnp-misc.h
new file mode 100644
index 000000000000..c65e629dd3cc
--- /dev/null
+++ b/include/linux/qpnp-misc.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QPNP_MISC_H
+#define __QPNP_MISC_H
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_QPNP_MISC
+/**
+ * qpnp_misc_irqs_available - check if IRQs are available
+ *
+ * @consumer_dev: device struct
+ *
+ * This function returns true if the MISC interrupts are available
+ * based on a check in the MISC peripheral revision registers.
+ *
+ * Any consumer of this function needs to reference a MISC device phandle
+ * using the "qcom,misc-ref" property in their device tree node.
+ */
+
+int qpnp_misc_irqs_available(struct device *consumer_dev);
+#else
+static int qpnp_misc_irqs_available(struct device *consumer_dev)
+{
+ return 0;
+}
+#endif
+#endif
diff --git a/include/linux/qpnp-revid.h b/include/linux/qpnp-revid.h
new file mode 100644
index 000000000000..c5d6204afb88
--- /dev/null
+++ b/include/linux/qpnp-revid.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QPNP_REVID
+#define __QPNP_REVID
+
+#define PM8226_V2P2_REV1 0x00
+#define PM8226_V2P2_REV2 0x00
+#define PM8226_V2P2_REV3 0x02
+#define PM8226_V2P2_REV4 0x02
+#define PM8226_V2P2_TYPE 0x51
+#define PM8226_V2P2_SUBTYPE 0x04
+
+#define PM8226_V2P1_REV1 0x00
+#define PM8226_V2P1_REV2 0x00
+#define PM8226_V2P1_REV3 0x01
+#define PM8226_V2P1_REV4 0x02
+#define PM8226_V2P1_TYPE 0x51
+#define PM8226_V2P1_SUBTYPE 0x04
+
+#define PM8226_V2P0_REV1 0x00
+#define PM8226_V2P0_REV2 0x00
+#define PM8226_V2P0_REV3 0x00
+#define PM8226_V2P0_REV4 0x02
+#define PM8226_V2P0_TYPE 0x51
+#define PM8226_V2P0_SUBTYPE 0x04
+
+#define PM8226_V1P0_REV1 0x00
+#define PM8226_V1P0_REV2 0x00
+#define PM8226_V1P0_REV3 0x00
+#define PM8226_V1P0_REV4 0x00
+#define PM8226_V1P0_TYPE 0x51
+#define PM8226_V1P0_SUBTYPE 0x04
+
+#define PM8941_V1P0_REV1 0x00
+#define PM8941_V1P0_REV2 0x00
+#define PM8941_V1P0_REV3 0x00
+#define PM8941_V1P0_REV4 0x01
+#define PM8941_V1P0_TYPE 0x51
+#define PM8941_V1P0_SUBTYPE 0x01
+
+#define PM8941_V2P0_REV1 0x00
+#define PM8941_V2P0_REV2 0x00
+#define PM8941_V2P0_REV3 0x00
+#define PM8941_V2P0_REV4 0x01
+#define PM8941_V2P0_TYPE 0x51
+#define PM8941_V2P0_SUBTYPE 0x01
+
+#define PM8941_V3P0_REV1 0x00
+#define PM8941_V3P0_REV2 0x00
+#define PM8941_V3P0_REV3 0x00
+#define PM8941_V3P0_REV4 0x03
+#define PM8941_V3P0_TYPE 0x51
+#define PM8941_V3P0_SUBTYPE 0x01
+
+#define PM8941_V3P1_REV1 0x00
+#define PM8941_V3P1_REV2 0x00
+#define PM8941_V3P1_REV3 0x01
+#define PM8941_V3P1_REV4 0x03
+#define PM8941_V3P1_TYPE 0x51
+#define PM8941_V3P1_SUBTYPE 0x01
+
+#define PM8110_V1P0_REV1 0x00
+#define PM8110_V1P0_REV2 0x00
+#define PM8110_V1P0_REV3 0x00
+#define PM8110_V1P0_REV4 0x01
+#define PM8110_V1P0_TYPE 0x51
+#define PM8110_V1P0_SUBTYPE 0x05
+
+#define PM8110_V1P1_REV1 0x00
+#define PM8110_V1P1_REV2 0x01
+#define PM8110_V1P1_REV3 0x00
+#define PM8110_V1P1_REV4 0x01
+#define PM8110_V1P1_TYPE 0x51
+#define PM8110_V1P1_SUBTYPE 0x05
+
+#define PM8110_V1P3_REV1 0x00
+#define PM8110_V1P3_REV2 0x03
+#define PM8110_V1P3_REV3 0x00
+#define PM8110_V1P3_REV4 0x01
+#define PM8110_V1P3_TYPE 0x51
+#define PM8110_V1P3_SUBTYPE 0x05
+
+#define PM8110_V2P0_REV1 0x00
+#define PM8110_V2P0_REV2 0x00
+#define PM8110_V2P0_REV3 0x00
+#define PM8110_V2P0_REV4 0x02
+#define PM8110_V2P0_TYPE 0x51
+#define PM8110_V2P0_SUBTYPE 0x05
+
+#define PM8916_V1P0_REV1 0x00
+#define PM8916_V1P0_REV2 0x00
+#define PM8916_V1P0_REV3 0x00
+#define PM8916_V1P0_REV4 0x01
+#define PM8916_V1P0_TYPE 0x51
+#define PM8916_V1P0_SUBTYPE 0x0B
+
+#define PM8916_V1P1_REV1 0x00
+#define PM8916_V1P1_REV2 0x00
+#define PM8916_V1P1_REV3 0x01
+#define PM8916_V1P1_REV4 0x01
+#define PM8916_V1P1_TYPE 0x51
+#define PM8916_V1P1_SUBTYPE 0x0B
+
+#define PM8916_V2P0_REV1 0x00
+#define PM8916_V2P0_REV2 0x00
+#define PM8916_V2P0_REV3 0x00
+#define PM8916_V2P0_REV4 0x02
+#define PM8916_V2P0_TYPE 0x51
+#define PM8916_V2P0_SUBTYPE 0x0B
+
+struct pmic_revid_data {
+ u8 rev1;
+ u8 rev2;
+ u8 rev3;
+ u8 rev4;
+ u8 pmic_type;
+ u8 pmic_subtype;
+};
+
+#ifdef CONFIG_QPNP_REVID
+struct pmic_revid_data *get_revid_data(struct device_node *dev_node);
+#else
+static inline
+struct pmic_revid_data *get_revid_data(struct device_node *dev_node)
+{
+ return NULL;
+}
+#endif
+#endif
diff --git a/include/linux/qpnp/clkdiv.h b/include/linux/qpnp/clkdiv.h
new file mode 100644
index 000000000000..52537115b2ff
--- /dev/null
+++ b/include/linux/qpnp/clkdiv.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef QPNP_CLKDIV_H
+#define QPNP_CLKDIV_H
+
+enum q_clkdiv_cfg {
+ Q_CLKDIV_NO_CLK = 0,
+ Q_CLKDIV_XO_DIV_1,
+ Q_CLKDIV_XO_DIV_2,
+ Q_CLKDIV_XO_DIV_4,
+ Q_CLKDIV_XO_DIV_8,
+ Q_CLKDIV_XO_DIV_16,
+ Q_CLKDIV_XO_DIV_32,
+ Q_CLKDIV_XO_DIV_64,
+ Q_CLKDIV_INVALID,
+};
+
+struct q_clkdiv;
+
+struct q_clkdiv *qpnp_clkdiv_get(struct device *dev, const char *name);
+int qpnp_clkdiv_enable(struct q_clkdiv *q_clkdiv);
+int qpnp_clkdiv_disable(struct q_clkdiv *q_clkdiv);
+int qpnp_clkdiv_config(struct q_clkdiv *q_clkdiv,
+ enum q_clkdiv_cfg cfg);
+#endif
diff --git a/include/linux/qpnp/pin.h b/include/linux/qpnp/pin.h
new file mode 100644
index 000000000000..4c23f091dcc0
--- /dev/null
+++ b/include/linux/qpnp/pin.h
@@ -0,0 +1,190 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Mode select */
+#define QPNP_PIN_MODE_DIG_IN 0
+#define QPNP_PIN_MODE_DIG_OUT 1
+#define QPNP_PIN_MODE_DIG_IN_OUT 2
+#define QPNP_PIN_MODE_BIDIR 3
+#define QPNP_PIN_MODE_AIN 4
+#define QPNP_PIN_MODE_AOUT 5
+#define QPNP_PIN_MODE_SINK 6
+
+/* Invert source select (GPIO, MPP) */
+#define QPNP_PIN_INVERT_DISABLE 0
+#define QPNP_PIN_INVERT_ENABLE 1
+
+/* Output type (GPIO) */
+#define QPNP_PIN_OUT_BUF_CMOS 0
+#define QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS 1
+#define QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS 2
+
+/* Voltage select (GPIO, MPP) */
+#define QPNP_PIN_VIN0 0
+#define QPNP_PIN_VIN1 1
+#define QPNP_PIN_VIN2 2
+#define QPNP_PIN_VIN3 3
+#define QPNP_PIN_VIN4 4
+#define QPNP_PIN_VIN5 5
+#define QPNP_PIN_VIN6 6
+#define QPNP_PIN_VIN7 7
+
+/* Pull Up Values (GPIO) */
+#define QPNP_PIN_GPIO_PULL_UP_30 0
+#define QPNP_PIN_GPIO_PULL_UP_1P5 1
+#define QPNP_PIN_GPIO_PULL_UP_31P5 2
+#define QPNP_PIN_GPIO_PULL_UP_1P5_30 3
+#define QPNP_PIN_GPIO_PULL_DN 4
+#define QPNP_PIN_GPIO_PULL_NO 5
+
+/* Pull Up Values (MPP) */
+#define QPNP_PIN_MPP_PULL_UP_0P6KOHM 0
+#define QPNP_PIN_MPP_PULL_UP_OPEN 1
+#define QPNP_PIN_MPP_PULL_UP_10KOHM 2
+#define QPNP_PIN_MPP_PULL_UP_30KOHM 3
+
+/* Out Strength (GPIO) */
+#define QPNP_PIN_OUT_STRENGTH_LOW 1
+#define QPNP_PIN_OUT_STRENGTH_MED 2
+#define QPNP_PIN_OUT_STRENGTH_HIGH 3
+
+/* Source Select (GPIO) / Enable Select (MPP) */
+#define QPNP_PIN_SEL_FUNC_CONSTANT 0
+#define QPNP_PIN_SEL_FUNC_PAIRED 1
+#define QPNP_PIN_SEL_FUNC_1 2
+#define QPNP_PIN_SEL_FUNC_2 3
+#define QPNP_PIN_SEL_DTEST1 4
+#define QPNP_PIN_SEL_DTEST2 5
+#define QPNP_PIN_SEL_DTEST3 6
+#define QPNP_PIN_SEL_DTEST4 7
+
+/* Master enable (GPIO, MPP) */
+#define QPNP_PIN_MASTER_DISABLE 0
+#define QPNP_PIN_MASTER_ENABLE 1
+
+/* Analog Output (MPP) */
+#define QPNP_PIN_AOUT_1V25 0
+#define QPNP_PIN_AOUT_0V625 1
+#define QPNP_PIN_AOUT_0V3125 2
+#define QPNP_PIN_AOUT_MPP 3
+#define QPNP_PIN_AOUT_ABUS1 4
+#define QPNP_PIN_AOUT_ABUS2 5
+#define QPNP_PIN_AOUT_ABUS3 6
+#define QPNP_PIN_AOUT_ABUS4 7
+
+/* Analog Input (MPP) */
+#define QPNP_PIN_AIN_AMUX_CH5 0
+#define QPNP_PIN_AIN_AMUX_CH6 1
+#define QPNP_PIN_AIN_AMUX_CH7 2
+#define QPNP_PIN_AIN_AMUX_CH8 3
+#define QPNP_PIN_AIN_AMUX_ABUS1 4
+#define QPNP_PIN_AIN_AMUX_ABUS2 5
+#define QPNP_PIN_AIN_AMUX_ABUS3 6
+#define QPNP_PIN_AIN_AMUX_ABUS4 7
+
+/* Current Sink (MPP) */
+#define QPNP_PIN_CS_OUT_5MA 0
+#define QPNP_PIN_CS_OUT_10MA 1
+#define QPNP_PIN_CS_OUT_15MA 2
+#define QPNP_PIN_CS_OUT_20MA 3
+#define QPNP_PIN_CS_OUT_25MA 4
+#define QPNP_PIN_CS_OUT_30MA 5
+#define QPNP_PIN_CS_OUT_35MA 6
+#define QPNP_PIN_CS_OUT_40MA 7
+
+/**
+ * struct qpnp_pin_cfg - structure to specify pin configurtion values
+ * @mode: indicates whether the pin should be input, output, or
+ * both for gpios. mpp pins also support bidirectional,
+ * analog in, analog out and current sink. This value
+ * should be of type QPNP_PIN_MODE_*.
+ * @output_type: indicates pin should be configured as CMOS or open
+ * drain. Should be of the type QPNP_PIN_OUT_BUF_*. This
+ * setting applies for gpios only.
+ * @invert: Invert the signal of the line -
+ * QPNP_PIN_INVERT_DISABLE or QPNP_PIN_INVERT_ENABLE.
+ * @pull: This parameter should be programmed to different values
+ * depending on whether it's GPIO or MPP.
+ * For GPIO, it indicates whether a pull up or pull down
+ * should be applied. If a pullup is required the
+ * current strength needs to be specified.
+ * Current values of 30uA, 1.5uA, 31.5uA, 1.5uA with 30uA
+ * boost are supported. This value should be one of
+ * the QPNP_PIN_GPIO_PULL_*. Note that the hardware ignores
+ * this configuration if the GPIO is not set to input or
+ * output open-drain mode.
+ * For MPP, it indicates whether a pullup should be
+ * applied for bidirectitional mode only. The hardware
+ * ignores the configuration when operating in other modes.
+ * This value should be one of the QPNP_PIN_MPP_PULL_*.
+ * @vin_sel: specifies the voltage level when the output is set to 1.
+ * For an input gpio specifies the voltage level at which
+ * the input is interpreted as a logical 1.
+ * @out_strength: the amount of current supplied for an output gpio,
+ * should be of the type QPNP_PIN_STRENGTH_*.
+ * @src_sel: select alternate function for the pin. Certain pins
+ * can be paired (shorted) with each other. Some pins
+ * can act as alternate functions. In the context of
+ * gpio, this acts as a source select. For mpps,
+ * this is an enable select.
+ * This parameter should be of type QPNP_PIN_SEL_*.
+ * @master_en: QPNP_PIN_MASTER_ENABLE = Enable features within the
+ * pin block based on configurations.
+ * QPNP_PIN_MASTER_DISABLE = Completely disable the pin
+ * block and let the pin float with high impedance
+ * regardless of other settings.
+ * @aout_ref: Set the analog output reference. This parameter should
+ * be of type QPNP_PIN_AOUT_*. This parameter only applies
+ * to mpp pins.
+ * @ain_route: Set the source for analog input. This parameter
+ * should be of type QPNP_PIN_AIN_*. This parameter only
+ * applies to mpp pins.
+ * @cs_out: Set the the amount of current to sync in mA. This
+ * parameter should be of type QPNP_PIN_CS_OUT_*. This
+ * parameter only applies to mpp pins.
+ */
+struct qpnp_pin_cfg {
+ int mode;
+ int output_type;
+ int invert;
+ int pull;
+ int vin_sel;
+ int out_strength;
+ int src_sel;
+ int master_en;
+ int aout_ref;
+ int ain_route;
+ int cs_out;
+};
+
+/**
+ * qpnp_pin_config - Apply pin configuration for Linux gpio
+ * @gpio: Linux gpio number to configure.
+ * @param: parameters to configure.
+ *
+ * This routine takes a Linux gpio number that corresponds with a
+ * PMIC pin and applies the configuration specified in 'param'.
+ * This gpio number can be ascertained by of_get_gpio_flags() or
+ * the qpnp_pin_map_gpio() API.
+ */
+int qpnp_pin_config(int gpio, struct qpnp_pin_cfg *param);
+
+/**
+ * qpnp_pin_map - Obtain Linux GPIO number from device spec
+ * @name: Name assigned by the 'label' binding for the primary node.
+ * @pmic_pin: PMIC pin number to lookup.
+ *
+ * This routine is used in legacy configurations that do not support
+ * Device Tree. If you are using Device Tree, you should not use this.
+ * For such cases, use of_get_gpio() or friends instead.
+ */
+int qpnp_pin_map(const char *name, uint32_t pmic_pin);
diff --git a/include/linux/qpnp/power-on.h b/include/linux/qpnp/power-on.h
new file mode 100644
index 000000000000..ae4e731aa90a
--- /dev/null
+++ b/include/linux/qpnp/power-on.h
@@ -0,0 +1,73 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef QPNP_PON_H
+#define QPNP_PON_H
+
+#include <linux/errno.h>
+
+/**
+ * enum pon_trigger_source: List of PON trigger sources
+ * %PON_SMPL: PON triggered by SMPL - Sudden Momentary Power Loss
+ * %PON_RTC: PON triggered by RTC alarm
+ * %PON_DC_CHG: PON triggered by insertion of DC charger
+ * %PON_USB_CHG: PON triggered by insertion of USB
+ * %PON_PON1: PON triggered by other PMIC (multi-PMIC option)
+ * %PON_CBLPWR_N: PON triggered by power-cable insertion
+ * %PON_KPDPWR_N: PON triggered by long press of the power-key
+ */
+enum pon_trigger_source {
+ PON_SMPL = 1,
+ PON_RTC,
+ PON_DC_CHG,
+ PON_USB_CHG,
+ PON_PON1,
+ PON_CBLPWR_N,
+ PON_KPDPWR_N,
+};
+
+/**
+ * enum pon_power_off_type: Possible power off actions to perform
+ * %PON_POWER_OFF_WARM_RESET: Reset the MSM but not all PMIC peripherals
+ * %PON_POWER_OFF_SHUTDOWN: Shutdown the MSM and PMIC completely
+ * %PON_POWER_OFF_HARD_RESET: Reset the MSM and all PMIC peripherals
+};
+ */
+enum pon_power_off_type {
+ PON_POWER_OFF_WARM_RESET = 0x01,
+ PON_POWER_OFF_SHUTDOWN = 0x04,
+ PON_POWER_OFF_HARD_RESET = 0x07,
+};
+
+#ifdef CONFIG_QPNP_POWER_ON
+int qpnp_pon_system_pwr_off(enum pon_power_off_type type);
+int qpnp_pon_is_warm_reset(void);
+int qpnp_pon_trigger_config(enum pon_trigger_source pon_src, bool enable);
+int qpnp_pon_wd_config(bool enable);
+#else
+static int qpnp_pon_system_pwr_off(enum pon_power_off_type type)
+{
+ return -ENODEV;
+}
+static inline int qpnp_pon_is_warm_reset(void) { return -ENODEV; }
+static inline int qpnp_pon_trigger_config(enum pon_trigger_source pon_src,
+ bool enable)
+{
+ return -ENODEV;
+}
+int qpnp_pon_wd_config(bool enable)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/include/linux/qpnp/pwm.h b/include/linux/qpnp/pwm.h
new file mode 100644
index 000000000000..50fb2e52a225
--- /dev/null
+++ b/include/linux/qpnp/pwm.h
@@ -0,0 +1,184 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QPNP_PWM_H__
+#define __QPNP_PWM_H__
+
+#include <linux/pwm.h>
+
+/* usec: 19.2M, n=6, m=0, pre=2 */
+#define PM_PWM_PERIOD_MIN 7
+/* 1K, n=9, m=7, pre=6 */
+#define PM_PWM_PERIOD_MAX (384 * USEC_PER_SEC)
+#define PM_PWM_LUT_RAMP_STEP_TIME_MAX 499
+#define PM_PWM_MAX_PAUSE_CNT 8191
+/*
+ * Formula from HSID,
+ * pause_time (hi/lo) = (pause_code - 1)*(duty_ms)
+ */
+#define PM_PWM_LUT_PAUSE_MAX \
+ ((PM_PWM_MAX_PAUSE_CNT - 1) * PM_PWM_LUT_RAMP_STEP_TIME_MAX) /* ms */
+
+/* Flags for Look Up Table */
+#define PM_PWM_LUT_LOOP 0x01
+#define PM_PWM_LUT_RAMP_UP 0x02
+#define PM_PWM_LUT_REVERSE 0x04
+#define PM_PWM_LUT_PAUSE_HI_EN 0x08
+#define PM_PWM_LUT_PAUSE_LO_EN 0x10
+
+#define PM_PWM_LUT_NO_TABLE 0x20
+#define PM_PWM_LUT_USE_RAW_VALUE 0x40
+
+/*
+ * PWM frequency/period control
+ *
+ * PWM Frequency = ClockFrequency / (N * T)
+ * or
+ * PWM Period = Clock Period * (N * T)
+ * where
+ * N = 2^9 or 2^6 for 9-bit or 6-bit PWM size
+ * T = Pre-divide * 2^m, m = 0..7 (exponent)
+ */
+
+/*
+ * enum pm_pwm_size - PWM bit mode selection
+ * %PM_PWM_SIZE_6BIT - Select 6 bit mode; 64 levels
+ * %PM_PWM_SIZE_9BIT - Select 9 bit mode; 512 levels
+ */
+enum pm_pwm_size {
+ PM_PWM_SIZE_6BIT = 6,
+ PM_PWM_SIZE_9BIT = 9,
+};
+
+/*
+ * enum pm_pwm_clk - PWM clock selection
+ * %PM_PWM_CLK_1KHZ - 1KHz clock
+ * %PM_PWM_CLK_32KHZ - 32KHz clock
+ * %PM_PWM_CLK_19P2MHZ - 19.2MHz clock
+ * Note: Here 1KHz = 1024Hz
+ */
+enum pm_pwm_clk {
+ PM_PWM_CLK_1KHZ,
+ PM_PWM_CLK_32KHZ,
+ PM_PWM_CLK_19P2MHZ,
+};
+
+/* PWM pre-divider selection */
+enum pm_pwm_pre_div {
+ PM_PWM_PDIV_2,
+ PM_PWM_PDIV_3,
+ PM_PWM_PDIV_5,
+ PM_PWM_PDIV_6,
+};
+
+/*
+ * struct pwm_period_config - PWM period configuration
+ * @pwm_size: enum pm_pwm_size
+ * @clk: enum pm_pwm_clk
+ * @pre_div: enum pm_pwm_pre_div
+ * @pre_div_exp: exponent of 2 as part of pre-divider: 0..7
+ */
+struct pwm_period_config {
+ enum pm_pwm_size pwm_size;
+ enum pm_pwm_clk clk;
+ enum pm_pwm_pre_div pre_div;
+ int pre_div_exp;
+};
+
+/*
+ * struct pwm_duty_cycles - PWM duty cycle info
+ * duty_pcts - pointer to an array of duty percentage for a pwm period
+ * num_duty_pcts - total entries in duty_pcts array
+ * duty_ms - duty cycle time in ms
+ * start_idx - index in the LUT
+ */
+struct pwm_duty_cycles {
+ int *duty_pcts;
+ int num_duty_pcts;
+ int duty_ms;
+ int start_idx;
+};
+
+int pwm_config_period(struct pwm_device *pwm,
+ struct pwm_period_config *pwm_p);
+
+int pwm_config_pwm_value(struct pwm_device *pwm, int pwm_value);
+
+/*
+ * enum pm_pwm_mode - PWM mode selection
+ * %PM_PWM_MODE_PWM - Select PWM mode
+ * %PM_PWM_MODE_LPG - Select LPG mode
+ */
+enum pm_pwm_mode {
+ PM_PWM_MODE_PWM,
+ PM_PWM_MODE_LPG,
+};
+
+int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode);
+
+/*
+ * lut_params: Lookup table (LUT) parameters
+ * @start_idx: start index in lookup table from 0 to MAX-1
+ * @idx_len: number of index
+ * @pause_lo: pause time in millisecond at low index
+ * @pause_hi: pause time in millisecond at high index
+ * @ramp_step_ms: time before loading next LUT pattern in millisecond
+ * @flags: control flags
+ */
+struct lut_params {
+ int start_idx;
+ int idx_len;
+ int lut_pause_hi;
+ int lut_pause_lo;
+ int ramp_step_ms;
+ int flags;
+};
+
+int pwm_lut_config(struct pwm_device *pwm, int period_us,
+ int duty_pct[], struct lut_params lut_params);
+
+/*
+ * support microsecond level configuration
+ */
+int pwm_config_us(struct pwm_device *pwm,
+ int duty_us, int period_us);
+
+/* Standard APIs supported */
+/*
+ * pwm_request - request a PWM device
+ * @pwm_id: PWM id or channel
+ * @label: the label to identify the user
+ */
+
+/*
+ * pwm_free - free a PWM device
+ * @pwm: the PWM device
+ */
+
+/*
+ * pwm_config - change a PWM device configuration
+ * @pwm: the PWM device
+ * @period_ns: period in nanosecond
+ * @duty_ns: duty cycle in nanosecond
+ */
+
+/*
+ * pwm_enable - start a PWM output toggling
+ * @pwm: the PWM device
+ */
+
+/*
+ * pwm_disable - stop a PWM output toggling
+ * @pwm: the PWM device
+ */
+
+#endif /* __QPNP_PWM_H__ */
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 67fc8fcdc4b0..c9c38aa25ac5 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -34,7 +34,6 @@ extern int reboot_default;
extern int reboot_cpu;
extern int reboot_force;
-
extern int register_reboot_notifier(struct notifier_block *);
extern int unregister_reboot_notifier(struct notifier_block *);
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f540b1496e2f..75ae66fbdaef 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -36,6 +36,8 @@
#define __LINUX_REGULATOR_CONSUMER_H_
#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
struct device;
struct notifier_block;
@@ -138,6 +140,10 @@ struct regulator;
* using the bulk regulator APIs.
* @consumer: The regulator consumer for the supply. This will be managed
* by the bulk API.
+ * @min_uV: The minimum requested voltage for the regulator (in microvolts),
+ * or 0 to not set a voltage.
+ * @max_uV: The maximum requested voltage for the regulator (in microvolts),
+ * or 0 to use @min_uV.
*
* The regulator APIs provide a series of regulator_bulk_() API calls as
* a convenience to consumers which require multiple supplies. This
@@ -146,6 +152,8 @@ struct regulator;
struct regulator_bulk_data {
const char *supply;
struct regulator *consumer;
+ int min_uV;
+ int max_uV;
/* private: Internal use */
int ret;
@@ -210,6 +218,8 @@ int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers,
struct regulator_bulk_data *consumers);
int __must_check regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers);
+int regulator_bulk_set_voltage(int num_consumers,
+ struct regulator_bulk_data *consumers);
int regulator_bulk_disable(int num_consumers,
struct regulator_bulk_data *consumers);
int regulator_bulk_force_disable(int num_consumers,
diff --git a/include/linux/regulator/cpr-regulator.h b/include/linux/regulator/cpr-regulator.h
new file mode 100644
index 000000000000..e49bc86707f1
--- /dev/null
+++ b/include/linux/regulator/cpr-regulator.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __REGULATOR_CPR_REGULATOR_H__
+#define __REGULATOR_CPR_REGULATOR_H__
+
+#include <linux/regulator/machine.h>
+
+#define CPR_REGULATOR_DRIVER_NAME "qcom,cpr-regulator"
+
+/**
+ * enum cpr_fuse_corner_enum - CPR fuse corner enum values
+ * %CPR_FUSE_CORNER_SVS: Lowest voltage for APC
+ * %CPR_FUSE_CORNER_NORMAL: Normal mode voltage
+ * %CPR_FUSE_CORNER_TURBO: Turbo mode voltage
+ * %CPR_FUSE_CORNER_SUPER_TURBO: Super Turbo mode voltage
+ *
+ * These should be used in regulator_set_voltage() for CPR
+ * regulator as if they had units of uV.
+ */
+enum cpr_fuse_corner_enum {
+ CPR_FUSE_CORNER_SVS = 1,
+ CPR_FUSE_CORNER_NORMAL,
+ CPR_FUSE_CORNER_TURBO,
+ CPR_FUSE_CORNER_MAX,
+};
+
+/**
+ * enum vdd_mx_vmin_method - Method to determine vmin for vdd-mx
+ * %VDD_MX_VMIN_APC: Equal to APC voltage
+ * %VDD_MX_VMIN_APC_CORNER_CEILING: Equal to PVS corner ceiling voltage
+ * %VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
+ * Equal to slow speed corner ceiling
+ * %VDD_MX_VMIN_MX_VMAX: Equal to specified vdd-mx-vmax voltage
+ * %VDD_MX_VMIN_APC_CORNER_MAP: Equal to the APC corner mapped MX
+ * voltage
+ */
+enum vdd_mx_vmin_method {
+ VDD_MX_VMIN_APC,
+ VDD_MX_VMIN_APC_CORNER_CEILING,
+ VDD_MX_VMIN_APC_SLOW_CORNER_CEILING,
+ VDD_MX_VMIN_MX_VMAX,
+ VDD_MX_VMIN_APC_CORNER_MAP,
+};
+
+#ifdef CONFIG_REGULATOR_CPR
+
+int __init cpr_regulator_init(void);
+
+#else
+
+static inline int __init cpr_regulator_init(void)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_REGULATOR_CPR */
+
+#endif /* __REGULATOR_CPR_REGULATOR_H__ */
diff --git a/include/linux/regulator/krait-regulator.h b/include/linux/regulator/krait-regulator.h
new file mode 100644
index 000000000000..10d7e2675f8f
--- /dev/null
+++ b/include/linux/regulator/krait-regulator.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __KRAIT_REGULATOR_H__
+#define __KRAIT_REGULATOR_H__
+
+#define KRAIT_REGULATOR_DRIVER_NAME "krait-power-regulator"
+#define KRAIT_PDN_DRIVER_NAME "krait-pdn"
+
+/**
+ * krait_power_init - driver initialization function
+ *
+ * This function registers the krait-power-regulator platform driver. This
+ * should be called from appropriate initialization code. Returns 0 on
+ * success and error on failure.
+ */
+
+#ifdef CONFIG_KRAIT_REGULATOR
+int __init krait_power_init(void);
+void secondary_cpu_hs_init(void *base_ptr, int cpu);
+#else
+static inline int __init krait_power_init(void)
+{
+ return -ENOSYS;
+}
+
+static inline void secondary_cpu_hs_init(void *base_ptr, int cpu) {}
+#endif
+
+#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 0b08d05d470b..f144748f3f9b 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -196,10 +196,20 @@ int regulator_suspend_finish(void);
#ifdef CONFIG_REGULATOR
void regulator_has_full_constraints(void);
+void regulator_use_dummy_regulator(void);
+void regulator_suppress_info_printing(void);
#else
static inline void regulator_has_full_constraints(void)
{
}
+
+static inline void regulator_use_dummy_regulator(void)
+{
+}
+
+static inline void regulator_suppress_info_printing(void)
+{
+}
#endif
#endif
diff --git a/include/linux/regulator/qpnp-regulator.h b/include/linux/regulator/qpnp-regulator.h
new file mode 100644
index 000000000000..c7afeb50f244
--- /dev/null
+++ b/include/linux/regulator/qpnp-regulator.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __REGULATOR_QPNP_REGULATOR_H__
+#define __REGULATOR_QPNP_REGULATOR_H__
+
+#include <linux/regulator/machine.h>
+
+#define QPNP_REGULATOR_DRIVER_NAME "qcom,qpnp-regulator"
+
+/* Pin control enable input pins. */
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_NONE 0x00
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_EN0 0x01
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_EN1 0x02
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_EN2 0x04
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_EN3 0x08
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT 0x10
+
+/* Pin control high power mode input pins. */
+#define QPNP_REGULATOR_PIN_CTRL_HPM_NONE 0x00
+#define QPNP_REGULATOR_PIN_CTRL_HPM_EN0 0x01
+#define QPNP_REGULATOR_PIN_CTRL_HPM_EN1 0x02
+#define QPNP_REGULATOR_PIN_CTRL_HPM_EN2 0x04
+#define QPNP_REGULATOR_PIN_CTRL_HPM_EN3 0x08
+#define QPNP_REGULATOR_PIN_CTRL_HPM_SLEEP_B 0x10
+#define QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT 0x20
+
+/*
+ * Used with enable parameters to specify that hardware default register values
+ * should be left unaltered.
+ */
+#define QPNP_REGULATOR_DISABLE 0
+#define QPNP_REGULATOR_ENABLE 1
+#define QPNP_REGULATOR_USE_HW_DEFAULT 2
+
+/* Soft start strength of a voltage switch type regulator */
+enum qpnp_vs_soft_start_str {
+ QPNP_VS_SOFT_START_STR_0P05_UA,
+ QPNP_VS_SOFT_START_STR_0P25_UA,
+ QPNP_VS_SOFT_START_STR_0P55_UA,
+ QPNP_VS_SOFT_START_STR_0P75_UA,
+ QPNP_VS_SOFT_START_STR_HW_DEFAULT,
+};
+
+/* Current limit of a boost type regulator */
+enum qpnp_boost_current_limit {
+ QPNP_BOOST_CURRENT_LIMIT_300_MA,
+ QPNP_BOOST_CURRENT_LIMIT_600_MA,
+ QPNP_BOOST_CURRENT_LIMIT_900_MA,
+ QPNP_BOOST_CURRENT_LIMIT_1200_MA,
+ QPNP_BOOST_CURRENT_LIMIT_1500_MA,
+ QPNP_BOOST_CURRENT_LIMIT_1800_MA,
+ QPNP_BOOST_CURRENT_LIMIT_2100_MA,
+ QPNP_BOOST_CURRENT_LIMIT_2400_MA,
+ QPNP_BOOST_CURRENT_LIMIT_HW_DEFAULT,
+};
+
+/**
+ * struct qpnp_regulator_platform_data - qpnp-regulator initialization data
+ * @init_data: regulator constraints
+ * @pull_down_enable: 1 = Enable output pull down resistor when the
+ * regulator is disabled
+ * 0 = Disable pull down resistor
+ * QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ * pull down state
+ * @pin_ctrl_enable: Bit mask specifying which hardware pins should be
+ * used to enable the regulator, if any
+ * Value should be an ORing of
+ * QPNP_REGULATOR_PIN_CTRL_ENABLE_* constants. If
+ * the bit specified by
+ * QPNP_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT is
+ * set, then pin control enable hardware registers
+ * will not be modified.
+ * @pin_ctrl_hpm: Bit mask specifying which hardware pins should be
+ * used to force the regulator into high power
+ * mode, if any
+ * Value should be an ORing of
+ * QPNP_REGULATOR_PIN_CTRL_HPM_* constants. If
+ * the bit specified by
+ * QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT is
+ * set, then pin control mode hardware registers
+ * will not be modified.
+ * @system_load: Load in uA present on regulator that is not captured
+ * by any consumer request
+ * @enable_time: Time in us to delay after enabling the regulator
+ * @ocp_enable: 1 = Allow over current protection (OCP) to be
+ * enabled for voltage switch type regulators so
+ * that they latch off automatically when over
+ * current is detected. OCP is enabled when in HPM
+ * or auto mode.
+ * 0 = Disable OCP
+ * QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ * OCP state
+ * @ocp_irq: IRQ number of the voltage switch OCP IRQ. If
+ * specified the voltage switch will be toggled off
+ * and back on when OCP triggers in order to handle
+ * high in-rush current.
+ * @ocp_max_retries: Maximum number of times to try toggling a voltage
+ * switch off and back on as a result of
+ * consecutive over current events.
+ * @ocp_retry_delay_ms: Time to delay in milliseconds between each
+ * voltage switch toggle after an over current
+ * event takes place.
+ * @boost_current_limit: This parameter sets the current limit of boost type
+ * regulators. Its value should be one of
+ * QPNP_BOOST_CURRENT_LIMIT_*. If its value is
+ * QPNP_BOOST_CURRENT_LIMIT_HW_DEFAULT, then the
+ * boost current limit will be left at its default
+ * hardware value.
+ * @soft_start_enable: 1 = Enable soft start for LDO and voltage switch
+ * type regulators so that output voltage slowly
+ * ramps up when the regulator is enabled
+ * 0 = Disable soft start
+ * QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ * soft start state
+ * @vs_soft_start_strength: This parameter sets the soft start strength for
+ * voltage switch type regulators. Its value
+ * should be one of QPNP_VS_SOFT_START_STR_*. If
+ * its value is QPNP_VS_SOFT_START_STR_HW_DEFAULT,
+ * then the soft start strength will be left at its
+ * default hardware value.
+ * @auto_mode_enable: 1 = Enable automatic hardware selection of regulator
+ * mode (HPM vs LPM). Auto mode is not available
+ * on boost type regulators
+ * 0 = Disable auto mode selection
+ * QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ * auto mode state
+ * @bypass_mode_enable: 1 = Enable bypass mode for an LDO type regulator so
+ * that it acts like a switch and simply outputs
+ * its input voltage
+ * 0 = Do not enable bypass mode
+ * QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ * bypass mode state
+ * @hpm_enable: 1 = Enable high power mode (HPM), also referred to
+ * as NPM. HPM consumes more ground current than
+ * LPM, but it can source significantly higher load
+ * current. HPM is not available on boost type
+ * regulators. For voltage switch type regulators,
+ * HPM implies that over current protection and
+ * soft start are active all the time. This
+ * configuration can be overwritten by changing the
+ * regulator's mode dynamically.
+ * 0 = Do not enable HPM
+ * QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ * HPM state
+ * @base_addr: SMPI base address for the regulator peripheral
+ */
+struct qpnp_regulator_platform_data {
+ struct regulator_init_data init_data;
+ int pull_down_enable;
+ unsigned pin_ctrl_enable;
+ unsigned pin_ctrl_hpm;
+ int system_load;
+ int enable_time;
+ int ocp_enable;
+ int ocp_irq;
+ int ocp_max_retries;
+ int ocp_retry_delay_ms;
+ enum qpnp_boost_current_limit boost_current_limit;
+ int soft_start_enable;
+ enum qpnp_vs_soft_start_str vs_soft_start_strength;
+ int auto_mode_enable;
+ int bypass_mode_enable;
+ int hpm_enable;
+ u16 base_addr;
+};
+
+#ifdef CONFIG_REGULATOR_QPNP
+
+/**
+ * qpnp_regulator_init() - register spmi driver for qpnp-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ */
+int __init qpnp_regulator_init(void);
+
+#else
+
+static inline int __init qpnp_regulator_init(void)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_REGULATOR_QPNP */
+
+#endif
diff --git a/include/linux/regulator/rpm-smd-regulator.h b/include/linux/regulator/rpm-smd-regulator.h
new file mode 100644
index 000000000000..139030cd1eae
--- /dev/null
+++ b/include/linux/regulator/rpm-smd-regulator.h
@@ -0,0 +1,108 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_REGULATOR_RPM_SMD_H
+#define _LINUX_REGULATOR_RPM_SMD_H
+
+#include <linux/device.h>
+
+struct rpm_regulator;
+
+/**
+ * enum rpm_regulator_voltage_corner - possible voltage corner values
+ *
+ * These should be used in regulator_set_voltage() and
+ * rpm_regulator_set_voltage() calls for corner type regulators as if they had
+ * units of uV.
+ *
+ * Note, the meaning of corner values is set by the RPM. It is possible that
+ * future platforms will utilize different corner values. The values specified
+ * in this enum correspond to MSM8974 for PMIC PM8841 SMPS 2 (VDD_Dig).
+ */
+enum rpm_regulator_voltage_corner {
+ RPM_REGULATOR_CORNER_NONE = 1,
+ RPM_REGULATOR_CORNER_RETENTION,
+ RPM_REGULATOR_CORNER_SVS_KRAIT,
+ RPM_REGULATOR_CORNER_SVS_SOC,
+ RPM_REGULATOR_CORNER_NORMAL,
+ RPM_REGULATOR_CORNER_TURBO,
+ RPM_REGULATOR_CORNER_SUPER_TURBO,
+};
+
+/**
+ * enum rpm_regulator_mode - control mode for LDO or SMPS type regulators
+ * %RPM_REGULATOR_MODE_AUTO: For SMPS type regulators, use SMPS auto mode so
+ * that the hardware can automatically switch
+ * between PFM and PWM modes based on realtime
+ * load.
+ * LDO type regulators do not support this mode.
+ * %RPM_REGULATOR_MODE_IPEAK: For SMPS type regulators, use aggregated
+ * software current requests to determine
+ * usage of PFM or PWM mode.
+ * For LDO type regulators, use aggregated
+ * software current requests to determine
+ * usage of LPM or HPM mode.
+ * %RPM_REGULATOR_MODE_HPM: For SMPS type regulators, force the
+ * usage of PWM mode.
+ * For LDO type regulators, force the
+ * usage of HPM mode.
+ *
+ * These values should be used in calls to rpm_regulator_set_mode().
+ */
+enum rpm_regulator_mode {
+ RPM_REGULATOR_MODE_AUTO,
+ RPM_REGULATOR_MODE_IPEAK,
+ RPM_REGULATOR_MODE_HPM,
+};
+
+#ifdef CONFIG_REGULATOR_RPM_SMD
+
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply);
+
+void rpm_regulator_put(struct rpm_regulator *regulator);
+
+int rpm_regulator_enable(struct rpm_regulator *regulator);
+
+int rpm_regulator_disable(struct rpm_regulator *regulator);
+
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+ int max_uV);
+
+int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+ enum rpm_regulator_mode mode);
+
+int __init rpm_smd_regulator_driver_init(void);
+
+#else
+
+static inline struct rpm_regulator *rpm_regulator_get(struct device *dev,
+ const char *supply) { return NULL; }
+
+static inline void rpm_regulator_put(struct rpm_regulator *regulator) { }
+
+static inline int rpm_regulator_enable(struct rpm_regulator *regulator)
+ { return 0; }
+
+static inline int rpm_regulator_disable(struct rpm_regulator *regulator)
+ { return 0; }
+
+static inline int rpm_regulator_set_voltage(struct rpm_regulator *regulator,
+ int min_uV, int max_uV) { return 0; }
+
+static inline int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+ enum rpm_regulator_mode mode) { return 0; }
+
+static inline int __init rpm_smd_regulator_driver_init(void) { return 0; }
+
+#endif /* CONFIG_REGULATOR_RPM_SMD */
+
+#endif
diff --git a/include/linux/regulator/spm-regulator.h b/include/linux/regulator/spm-regulator.h
new file mode 100644
index 000000000000..bd5da2e3352b
--- /dev/null
+++ b/include/linux/regulator/spm-regulator.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_REGULATOR_SPM_H
+#define _LINUX_REGULATOR_SPM_H
+
+#include <linux/err.h>
+#include <linux/init.h>
+
+#ifdef CONFIG_REGULATOR_SPM
+int __init spm_regulator_init(void);
+#else
+static inline int __init spm_regulator_init(void) { return -ENODEV; }
+#endif
+
+#endif
diff --git a/include/linux/regulator/stub-regulator.h b/include/linux/regulator/stub-regulator.h
new file mode 100644
index 000000000000..1155d82ba27b
--- /dev/null
+++ b/include/linux/regulator/stub-regulator.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __STUB_REGULATOR_H__
+#define __STUB_REGULATOR_H__
+
+#include <linux/regulator/machine.h>
+
+#define STUB_REGULATOR_DRIVER_NAME "stub-regulator"
+
+/**
+ * struct stub_regulator_pdata - stub regulator device data
+ * @init_data: regulator constraints
+ * @hpm_min_load: minimum load in uA that will result in the regulator
+ * being set to high power mode
+ * @system_uA: current drawn from regulator not accounted for by any
+ * regulator framework consumer
+ */
+struct stub_regulator_pdata {
+ struct regulator_init_data init_data;
+ int hpm_min_load;
+ int system_uA;
+};
+
+#ifdef CONFIG_REGULATOR_STUB
+
+/**
+ * regulator_stub_init() - register platform driver for stub-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ */
+
+int __init regulator_stub_init(void);
+
+#else
+
+static inline int __init regulator_stub_init(void)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_REGULATOR_STUB */
+
+#endif
diff --git a/include/linux/remote_spinlock.h b/include/linux/remote_spinlock.h
new file mode 100644
index 000000000000..b1df5792ac78
--- /dev/null
+++ b/include/linux/remote_spinlock.h
@@ -0,0 +1,98 @@
+/* Copyright (c) 2008-2009, 2011, 2013-2014 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __LINUX_REMOTE_SPINLOCK_H
+#define __LINUX_REMOTE_SPINLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/msm_remote_spinlock.h>
+
+/* Grabbing a local spin lock before going for a remote lock has several
+ * advantages:
+ * 1. Get calls to preempt enable/disable and IRQ save/restore for free.
+ * 2. For UP kernel, there is no overhead.
+ * 3. Reduces the possibility of executing the remote spin lock code. This is
+ * especially useful when the remote CPUs' mutual exclusion instructions
+ * don't work with the local CPUs' instructions. In such cases, one has to
+ * use software based mutex algorithms (e.g. Lamport's bakery algorithm)
+ * which could get expensive when the no. of contending CPUs is high.
+ * 4. In the case of software based mutex algorithm the exection time will be
+ * smaller since the no. of contending CPUs is reduced by having just one
+ * contender for all the local CPUs.
+ * 5. Get most of the spin lock debug features for free.
+ * 6. The code will continue to work "gracefully" even when the remote spin
+ * lock code is stubbed out for debug purposes or when there is no remote
+ * CPU in some board/machine types.
+ */
+typedef struct {
+ spinlock_t local;
+ _remote_spinlock_t remote;
+} remote_spinlock_t;
+
+#define remote_spin_lock_init(lock, id) \
+ ({ \
+ spin_lock_init(&((lock)->local)); \
+ _remote_spin_lock_init(id, &((lock)->remote)); \
+ })
+#define remote_spin_lock(lock) \
+ do { \
+ spin_lock(&((lock)->local)); \
+ _remote_spin_lock(&((lock)->remote)); \
+ } while (0)
+#define remote_spin_unlock(lock) \
+ do { \
+ _remote_spin_unlock(&((lock)->remote)); \
+ spin_unlock(&((lock)->local)); \
+ } while (0)
+#define remote_spin_lock_irqsave(lock, flags) \
+ do { \
+ spin_lock_irqsave(&((lock)->local), flags); \
+ _remote_spin_lock(&((lock)->remote)); \
+ } while (0)
+#define remote_spin_unlock_irqrestore(lock, flags) \
+ do { \
+ _remote_spin_unlock(&((lock)->remote)); \
+ spin_unlock_irqrestore(&((lock)->local), flags); \
+ } while (0)
+#define remote_spin_trylock(lock) \
+ ({ \
+ spin_trylock(&((lock)->local)) \
+ ? _remote_spin_trylock(&((lock)->remote)) \
+ ? 1 \
+ : ({ spin_unlock(&((lock)->local)); 0; }) \
+ : 0; \
+ })
+#define remote_spin_trylock_irqsave(lock, flags) \
+ ({ \
+ spin_trylock_irqsave(&((lock)->local), flags) \
+ ? _remote_spin_trylock(&((lock)->remote)) \
+ ? 1 \
+ : ({ spin_unlock_irqrestore(&((lock)->local), flags); \
+ 0; }) \
+ : 0; \
+ })
+#define remote_spin_lock_rlock_id(lock, tid) \
+ _remote_spin_lock_rlock_id(&((lock)->remote), tid)
+
+#define remote_spin_unlock_rlock(lock) \
+ _remote_spin_unlock_rlock(&((lock)->remote))
+
+#define remote_spin_release(lock, pid) \
+ _remote_spin_release(&((lock)->remote), pid)
+
+#define remote_spin_release_all(pid) \
+ _remote_spin_release_all(pid)
+
+#define remote_spin_owner(lock) \
+ _remote_spin_owner(&((lock)->remote))
+#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5e344bbe63ec..34687909c153 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -3031,4 +3031,6 @@ static inline unsigned long rlimit_max(unsigned int limit)
return task_rlimit_max(current, limit);
}
+extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
+
#endif
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index efa931c5cef1..e10601e855c8 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -16,5 +16,10 @@ static inline void sched_clock_postinit(void) { }
extern void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate);
+extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
+extern void sched_clock_register(u64 (*read)(void), int bits,
+ unsigned long rate);
+
+extern unsigned long long (*sched_clock_func)(void);
#endif
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 93dff5fff524..6aaaddafd7a8 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -111,6 +111,27 @@ void generic_smp_call_function_single_interrupt(void);
generic_smp_call_function_single_interrupt
/*
+ * Call a function on all processors
+ */
+int on_each_cpu(smp_call_func_t func, void *info, int wait);
+
+/*
+ * Call a function on processors specified by mask, which might include
+ * the local one.
+ */
+void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
+ void *info, bool wait);
+
+/*
+ * Call a function on each processor for which the supplied function
+ * cond_func returns a positive value. This may include the local
+ * processor.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags);
+
+/*
* Mark the boot cpu "online" so that it can call console drivers in
* printk() and can access its per-cpu storage.
*/
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index 91f5eab9e428..68441024444a 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -39,6 +39,28 @@
#define SPMI_CMD_ZERO_WRITE 0x80
/**
+ * * struct spmi_resource: spmi_resource for one device_node
+ * * @num_resources: number of resources for this device node
+ * * @resources: array of resources for this device_node
+ * * @of_node: device_node of the resource in question
+ * * @label: name used to reference the device from the driver
+ * *
+ * * Note that we explicitly add a 'label' pointer here since per
+ * * the ePAPR 2.2.2, the device_node->name should be generic and not
+ * * reflect precise programming model. Thus label enables a
+ * * platform specific name to be assigned with the 'label' binding to
+ * * allow for unique query names.
+ * */
+struct spmi_resource {
+ struct resource *resource;
+ u32 num_resources;
+ struct device_node *of_node;
+ const char *label;
+};
+
+
+
+/**
* struct spmi_device - Basic representation of an SPMI device
* @dev: Driver model representation of the device.
* @ctrl: SPMI controller managing the bus hosting this device.
@@ -48,6 +70,11 @@ struct spmi_device {
struct device dev;
struct spmi_controller *ctrl;
u8 usid;
+
+ const char *name;
+ struct spmi_resource res;
+ struct spmi_resource *dev_node;
+ u32 num_dev_node;
};
static inline struct spmi_device *to_spmi_device(struct device *d)
@@ -77,6 +104,7 @@ int spmi_device_add(struct spmi_device *sdev);
void spmi_device_remove(struct spmi_device *sdev);
+
/**
* struct spmi_controller - interface to the SPMI master controller
* @dev: Driver model representation of the device.
@@ -188,4 +216,50 @@ int spmi_command_sleep(struct spmi_device *sdev);
int spmi_command_wakeup(struct spmi_device *sdev);
int spmi_command_shutdown(struct spmi_device *sdev);
+/**
+ * * spmi_for_each_container_dev - iterate over the array of devnode resources.
+ * * @res: spmi_resource pointer used as the array cursor
+ * * @spmi_dev: spmi_device to iterate
+ * *
+ * * Only useable in spmi-dev-container configurations.
+ * */
+#define spmi_for_each_container_dev(res, spmi_dev) \
+ for (res = ((spmi_dev)->dev_node ? &(spmi_dev)->dev_node[0] : NULL); \
+ (res - (spmi_dev)->dev_node) < (spmi_dev)->num_dev_node; res++)
+
+extern struct resource *spmi_get_resource(struct spmi_device *dev,
+ struct spmi_resource *node,
+ unsigned int type, unsigned int res_num);
+
+struct resource *spmi_get_resource_byname(struct spmi_device *dev,
+ struct spmi_resource *node,
+ unsigned int type,
+ const char *name);
+
+extern int spmi_get_irq(struct spmi_device *dev, struct spmi_resource *node,
+ unsigned int res_num);
+
+extern int spmi_get_irq_byname(struct spmi_device *dev,
+ struct spmi_resource *node, const char *name);
+
+/**
+ * * spmi_get_node_name - return device name for spmi node
+ * * @dev: spmi device handle
+ * *
+ * * Get the primary node name of a spmi_device coresponding with
+ * * with the 'label' binding.
+ * *
+ * * Returns NULL if no primary dev name has been assigned to this spmi_device.
+ * */
+static inline const char *spmi_get_primary_dev_name(struct spmi_device *dev)
+{
+ if (dev->res.label)
+ return dev->res.label;
+ return NULL;
+}
+
+struct spmi_resource *spmi_get_dev_container_byname(struct spmi_device *dev,
+ const char *label);
+
+
#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 909b6e43b694..0526d2449749 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -67,6 +67,115 @@ int arch_update_cpu_topology(void);
#define PENALTY_FOR_NODE_WITH_CPUS (1)
#endif
+/*
+ * Below are the 3 major initializers used in building sched_domains:
+ * SD_SIBLING_INIT, for SMT domains
+ * SD_CPU_INIT, for SMP domains
+ *
+ * Any architecture that cares to do any tuning to these values should do so
+ * by defining their own arch-specific initializer in include/asm/topology.h.
+ * A definition there will automagically override these default initializers
+ * and allow arch-specific performance tuning of sched_domains.
+ * (Only non-zero and non-null fields need be specified.)
+ */
+
+#ifdef CONFIG_SCHED_SMT
+/* MCD - Do we really need this? It is always on if CONFIG_SCHED_SMT is,
+ * so can't we drop this in favor of CONFIG_SCHED_SMT?
+ */
+#define ARCH_HAS_SCHED_WAKE_IDLE
+/* Common values for SMT siblings */
+#ifndef SD_SIBLING_INIT
+#define SD_SIBLING_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 2, \
+ .busy_factor = 64, \
+ .imbalance_pct = 110, \
+ \
+ .flags = 1*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 0*SD_BALANCE_WAKE \
+ | 1*SD_WAKE_AFFINE \
+ | 1*SD_SHARE_CPUPOWER \
+ | 1*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ | 0*SD_PREFER_SIBLING \
+ | arch_sd_sibling_asym_packing() \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+ .smt_gain = 1178, /* 15% */ \
+}
+#endif
+#endif /* CONFIG_SCHED_SMT */
+
+#ifdef CONFIG_SCHED_MC
+/* Common values for MC siblings. for now mostly derived from SD_CPU_INIT */
+#ifndef SD_MC_INIT
+#define SD_MC_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 4, \
+ .busy_factor = 1, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+ .wake_idx = 0, \
+ .forkexec_idx = 0, \
+ \
+ .flags = 1*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 0*SD_BALANCE_WAKE \
+ | 1*SD_WAKE_AFFINE \
+ | 0*SD_SHARE_CPUPOWER \
+ | 1*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+}
+#endif
+#endif /* CONFIG_SCHED_MC */
+
+/* Common values for CPUs */
+#ifndef SD_CPU_INIT
+#define SD_CPU_INIT (struct sched_domain) { \
+ .min_interval = 1, \
+ .max_interval = 4, \
+ .busy_factor = 64, \
+ .imbalance_pct = 125, \
+ .cache_nice_tries = 1, \
+ .busy_idx = 2, \
+ .idle_idx = 1, \
+ .newidle_idx = 0, \
+ .wake_idx = 0, \
+ .forkexec_idx = 0, \
+ \
+ .flags = 1*SD_LOAD_BALANCE \
+ | 1*SD_BALANCE_NEWIDLE \
+ | 1*SD_BALANCE_EXEC \
+ | 1*SD_BALANCE_FORK \
+ | 0*SD_BALANCE_WAKE \
+ | 1*SD_WAKE_AFFINE \
+ | 0*SD_SHARE_CPUPOWER \
+ | 1*SD_SHARE_PKG_RESOURCES \
+ | 0*SD_SERIALIZE \
+ | 1*SD_PREFER_SIBLING \
+ , \
+ .last_balance = jiffies, \
+ .balance_interval = 1, \
+}
+#endif
+
+#ifdef CONFIG_SCHED_BOOK
+#ifndef SD_BOOK_INIT
+#error Please define an appropriate SD_BOOK_INIT in include/asm/topology.h!!!
+#endif
+#endif /* CONFIG_SCHED_BOOK */
+
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DECLARE_PER_CPU(int, numa_node);
diff --git a/include/soc/qcom/avs.h b/include/soc/qcom/avs.h
new file mode 100644
index 000000000000..358fef46f5de
--- /dev/null
+++ b/include/soc/qcom/avs.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2009,2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef AVS_H
+#define AVS_H
+
+#ifdef CONFIG_MSM_AVS_HW
+u32 avs_get_avscsr(void);
+void avs_set_avscsr(u32 avscsr);
+u32 avs_get_avsdscr(void);
+void avs_set_avsdscr(u32 avsdscr);
+void avs_disable(int cpu);
+void avs_enable(int cpu, u32 avsdscr);
+#else
+static inline u32 avs_get_avscsr(void)
+{ return 0; }
+static inline void avs_set_avscsr(u32 avscsr) {}
+static inline u32 avs_get_avsdscr(void)
+{ return 0; }
+static inline void avs_set_avsdscr(u32 avsdscr) {}
+static inline void avs_disable(int cpu) {}
+static inline void avs_enable(int cpu, u32 avsdscr) {}
+#endif
+
+#define AVS_DISABLE(cpu) avs_disable(cpu)
+#define AVS_ENABLE(cpu, x) avs_enable(cpu, x)
+
+#endif
diff --git a/include/soc/qcom/boot_stats.h b/include/soc/qcom/boot_stats.h
new file mode 100644
index 000000000000..b607dc3fa5b8
--- /dev/null
+++ b/include/soc/qcom/boot_stats.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_MSM_BOOT_STATS
+int boot_stats_init(void);
+#else
+static inline int boot_stats_init(void) { return 0; }
+#endif
diff --git a/include/soc/qcom/clock-alpha-pll.h b/include/soc/qcom/clock-alpha-pll.h
new file mode 100644
index 000000000000..668f418cb792
--- /dev/null
+++ b/include/soc/qcom/clock-alpha-pll.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_ALPHA_PLL_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_ALPHA_PLL_H
+
+#include <linux/spinlock.h>
+#include <linux/clk/msm-clk-provider.h>
+
+struct alpha_pll_masks {
+ u32 lock_mask;
+ u32 update_mask;
+ u32 vco_mask;
+ u32 vco_shift;
+ u32 alpha_en_mask;
+};
+
+struct alpha_pll_vco_tbl {
+ u32 vco_val;
+ unsigned long min_freq;
+ unsigned long max_freq;
+};
+
+#define VCO(a, b, c) { \
+ .vco_val = a,\
+ .min_freq = b,\
+ .max_freq = c,\
+}
+
+struct alpha_pll_clk {
+ struct alpha_pll_masks *masks;
+ void *const __iomem *base;
+ const u32 offset;
+
+ struct alpha_pll_vco_tbl *vco_tbl;
+ u32 num_vco;
+
+ struct clk c;
+};
+
+static inline struct alpha_pll_clk *to_alpha_pll_clk(struct clk *c)
+{
+ return container_of(c, struct alpha_pll_clk, c);
+}
+#endif
+
+extern struct clk_ops clk_ops_alpha_pll;
+extern struct clk_ops clk_ops_fixed_alpha_pll;
diff --git a/include/soc/qcom/clock-krait.h b/include/soc/qcom/clock-krait.h
new file mode 100644
index 000000000000..45270c6dbefb
--- /dev/null
+++ b/include/soc/qcom/clock-krait.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_KRAIT_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_KRAIT_H
+
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clock-generic.h>
+
+extern struct clk_mux_ops clk_mux_ops_kpss;
+extern struct clk_div_ops clk_div_ops_kpss_div2;
+
+#define DEFINE_KPSS_DIV2_CLK(clk_name, _parent, _offset, _lf_tree) \
+static struct div_clk clk_name = { \
+ .data = { \
+ .div = 2, \
+ .min_div = 2, \
+ .max_div = 2, \
+ }, \
+ .ops = &clk_div_ops_kpss_div2, \
+ .offset = _offset, \
+ .mask = 0x3, \
+ .shift = 6, \
+ .priv = (void *) _lf_tree, \
+ .c = { \
+ .parent = _parent, \
+ .dbg_name = #clk_name, \
+ .ops = &clk_ops_div, \
+ .flags = CLKFLAG_NO_RATE_CACHE, \
+ CLK_INIT(clk_name.c), \
+ } \
+}
+
+struct hfpll_data {
+ const u32 mode_offset;
+ const u32 l_offset;
+ const u32 m_offset;
+ const u32 n_offset;
+ const u32 user_offset;
+ const u32 droop_offset;
+ const u32 config_offset;
+ const u32 status_offset;
+
+ const u32 droop_val;
+ u32 config_val;
+ const u32 user_val;
+ u32 user_vco_mask;
+ unsigned long low_vco_max_rate;
+
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
+struct hfpll_clk {
+ void * __iomem base;
+ struct hfpll_data const *d;
+ unsigned long src_rate;
+ int init_done;
+
+ struct clk c;
+};
+
+static inline struct hfpll_clk *to_hfpll_clk(struct clk *c)
+{
+ return container_of(c, struct hfpll_clk, c);
+}
+
+extern struct clk_ops clk_ops_hfpll;
+
+struct avs_data {
+ unsigned long *rate;
+ u32 *dscr;
+ int num;
+};
+
+struct kpss_core_clk {
+ int id;
+ u32 cp15_iaddr;
+ u32 l2_slp_delay;
+ struct avs_data *avs_tbl;
+ struct clk c;
+};
+
+static inline struct kpss_core_clk *to_kpss_core_clk(struct clk *c)
+{
+ return container_of(c, struct kpss_core_clk, c);
+}
+
+extern struct clk_ops clk_ops_kpss_cpu;
+extern struct clk_ops clk_ops_kpss_l2;
+
+extern struct kpss_core_clk krait0_clk;
+extern struct kpss_core_clk krait1_clk;
+extern struct kpss_core_clk krait2_clk;
+extern struct kpss_core_clk krait3_clk;
+extern struct kpss_core_clk l2_clk;
+
+#endif
diff --git a/include/soc/qcom/clock-local2.h b/include/soc/qcom/clock-local2.h
new file mode 100644
index 000000000000..fa723ae5910f
--- /dev/null
+++ b/include/soc/qcom/clock-local2.h
@@ -0,0 +1,239 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_2_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_2_H
+
+#include <linux/spinlock.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+
+/*
+ * Generic frequency-definition structs and macros
+ */
+
+/**
+ * @freq_hz: output rate
+ * @src_clk: source clock for freq_hz
+ * @m_val: M value corresponding to freq_hz
+ * @n_val: N value corresponding to freq_hz
+ * @d_val: D value corresponding to freq_hz
+ * @div_src_val: Pre divider value and source selection mux index for freq_hz
+ * @sys_vdd: Voltage level required for freq_hz
+ */
+struct clk_freq_tbl {
+ unsigned long freq_hz;
+ struct clk *src_clk;
+ u32 m_val;
+ u32 n_val;
+ u32 d_val;
+ u32 div_src_val;
+ const unsigned sys_vdd;
+};
+
+#define FREQ_END (ULONG_MAX-1)
+#define F_END { .freq_hz = FREQ_END }
+
+/*
+ * Generic clock-definition struct and macros
+ */
+/**
+ * struct rcg_clk - root clock generator
+ * @cmd_rcgr_reg: command register
+ * @set_rate: function to set frequency
+ * @freq_tbl: frequency table for this RCG
+ * @current_freq: current RCG frequency
+ * @c: generic clock data
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct rcg_clk {
+ const u32 cmd_rcgr_reg;
+
+ void (*set_rate)(struct rcg_clk *, struct clk_freq_tbl *);
+
+ struct clk_freq_tbl *freq_tbl;
+ struct clk_freq_tbl *current_freq;
+ struct clk c;
+
+ void *const __iomem *base;
+};
+
+static inline struct rcg_clk *to_rcg_clk(struct clk *clk)
+{
+ return container_of(clk, struct rcg_clk, c);
+}
+
+extern struct clk_freq_tbl rcg_dummy_freq;
+
+/**
+ * struct branch_clk - branch clock
+ * @set_rate: Set the frequency of this branch clock.
+ * @c: clk
+ * @cbcr_reg: branch control register
+ * @bcr_reg: block reset register
+ * @has_sibling: true if other branches are derived from this branch's source
+ * @cur_div: current branch divider value
+ * @max_div: maximum branch divider value (if zero, no divider exists)
+ * @halt_check: halt checking type
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct branch_clk {
+ void (*set_rate)(struct branch_clk *, struct clk_freq_tbl *);
+ struct clk c;
+ const u32 cbcr_reg;
+ const u32 bcr_reg;
+ int has_sibling;
+ u32 cur_div;
+ u32 max_div;
+ const u32 halt_check;
+ void *const __iomem *base;
+};
+
+static inline struct branch_clk *to_branch_clk(struct clk *clk)
+{
+ return container_of(clk, struct branch_clk, c);
+}
+
+/**
+ * struct local_vote_clk - Voteable branch clock
+ * @c: clk
+ * @cbcr_reg: branch control register
+ * @vote_reg: voting register
+ * @en_mask: enable mask
+ * @halt_check: halt checking type
+ * @base: pointer to base address of ioremapped registers.
+ * An on/off switch with a rate derived from the parent.
+ */
+struct local_vote_clk {
+ struct clk c;
+ const u32 cbcr_reg;
+ const u32 vote_reg;
+ const u32 bcr_reg;
+ const u32 en_mask;
+ const u32 halt_check;
+ void *const __iomem *base;
+};
+
+static inline struct local_vote_clk *to_local_vote_clk(struct clk *clk)
+{
+ return container_of(clk, struct local_vote_clk, c);
+}
+
+/**
+ * struct reset_clk - Reset clock
+ * @c: clk
+ * @reset_reg: block reset register
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct reset_clk {
+ struct clk c;
+ const u32 reset_reg;
+ void *const __iomem *base;
+};
+
+static inline struct reset_clk *to_reset_clk(struct clk *clk)
+{
+ return container_of(clk, struct reset_clk, c);
+}
+/**
+ * struct measure_clk - for rate measurement debug use
+ * @sample_ticks: sample period in reference clock ticks
+ * @multiplier: measurement scale-up factor
+ * @divider: measurement scale-down factor
+ * @c: clk
+*/
+struct measure_clk {
+ u64 sample_ticks;
+ u32 multiplier;
+ u32 divider;
+
+ struct clk c;
+};
+
+struct measure_clk_data {
+ struct clk *cxo;
+ u32 plltest_reg;
+ u32 plltest_val;
+ u32 xo_div4_cbcr;
+ u32 ctl_reg;
+ u32 status_reg;
+ void *const __iomem *base;
+};
+
+static inline struct measure_clk *to_measure_clk(struct clk *clk)
+{
+ return container_of(clk, struct measure_clk, c);
+}
+
+/**
+ * struct gate_clk
+ * @c: clk
+ * @en_mask: ORed with @en_reg to enable gate clk
+ * @en_reg: register used to enable/disable gate clk
+ * @base: pointer to base address of ioremapped registers
+ */
+struct gate_clk {
+ struct clk c;
+ const u32 en_mask;
+ const u32 en_reg;
+ const unsigned int delay_us;
+ void *const __iomem *base;
+};
+
+static inline struct gate_clk *to_gate_clk(struct clk *clk)
+{
+ return container_of(clk, struct gate_clk, c);
+}
+
+/*
+ * Generic set-rate implementations
+ */
+void set_rate_mnd(struct rcg_clk *clk, struct clk_freq_tbl *nf);
+void set_rate_hid(struct rcg_clk *clk, struct clk_freq_tbl *nf);
+
+/*
+ * Variables from the clock-local driver
+ */
+extern spinlock_t local_clock_reg_lock;
+
+extern struct clk_ops clk_ops_empty;
+extern struct clk_ops clk_ops_rcg;
+extern struct clk_ops clk_ops_rcg_mnd;
+extern struct clk_ops clk_ops_branch;
+extern struct clk_ops clk_ops_vote;
+extern struct clk_ops clk_ops_rcg_hdmi;
+extern struct clk_ops clk_ops_rcg_edp;
+extern struct clk_ops clk_ops_byte;
+extern struct clk_ops clk_ops_pixel;
+extern struct clk_ops clk_ops_edppixel;
+extern struct clk_ops clk_ops_gate;
+extern struct clk_ops clk_ops_rst;
+extern struct clk_mux_ops mux_reg_ops;
+extern struct mux_div_ops rcg_mux_div_ops;
+
+enum handoff pixel_rcg_handoff(struct clk *clk);
+enum handoff byte_rcg_handoff(struct clk *clk);
+unsigned long measure_get_rate(struct clk *c);
+
+/*
+ * Clock definition macros
+ */
+#define DEFINE_CLK_MEASURE(name) \
+ struct clk name = { \
+ .ops = &clk_ops_empty, \
+ .dbg_name = #name, \
+ CLK_INIT(name), \
+ }; \
+
+#endif /* __ARCH_ARM_MACH_MSM_CLOCK_LOCAL_2_H */
+
diff --git a/include/soc/qcom/clock-pll.h b/include/soc/qcom/clock-pll.h
new file mode 100644
index 000000000000..92390ec2ad73
--- /dev/null
+++ b/include/soc/qcom/clock-pll.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_PLL_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_PLL_H
+
+#include <linux/clk/msm-clk-provider.h>
+
+/**
+ * struct pll_freq_tbl - generic PLL frequency definition
+ * @freq_hz: pll frequency in hz
+ * @l_val: pll l value
+ * @m_val: pll m value
+ * @n_val: pll n value
+ * @post_div_val: pll post divider value
+ * @pre_div_val: pll pre-divider value
+ * @vco_val: pll vco value
+ */
+struct pll_freq_tbl {
+ const u32 freq_hz;
+ const u32 l_val;
+ const u32 m_val;
+ const u32 n_val;
+ const u32 post_div_val;
+ const u32 pre_div_val;
+ const u32 vco_val;
+};
+
+/**
+ * struct pll_config_masks - PLL config masks struct
+ * @post_div_mask: mask for post divider bits location
+ * @pre_div_mask: mask for pre-divider bits location
+ * @vco_mask: mask for vco bits location
+ * @mn_en_mask: ORed with pll config register to enable the mn counter
+ * @main_output_mask: ORed with pll config register to enable the main output
+ */
+struct pll_config_masks {
+ u32 post_div_mask;
+ u32 pre_div_mask;
+ u32 vco_mask;
+ u32 mn_en_mask;
+ u32 main_output_mask;
+};
+
+#define PLL_FREQ_END (UINT_MAX-1)
+#define PLL_F_END { .freq_hz = PLL_FREQ_END }
+
+/**
+ * struct pll_vote_clk - phase locked loop (HW voteable)
+ * @soft_vote: soft voting variable for multiple PLL software instances
+ * @soft_vote_mask: soft voting mask for multiple PLL software instances
+ * @en_reg: enable register
+ * @en_mask: ORed with @en_reg to enable the clock
+ * @status_mask: ANDed with @status_reg to determine if PLL is active.
+ * @status_reg: status register
+ * @c: clock
+ */
+struct pll_vote_clk {
+ u32 *soft_vote;
+ const u32 soft_vote_mask;
+ void __iomem *const en_reg;
+ const u32 en_mask;
+ void __iomem *const status_reg;
+ const u32 status_mask;
+
+ struct clk c;
+ void *const __iomem *base;
+};
+
+extern struct clk_ops clk_ops_pll_vote;
+extern struct clk_ops clk_ops_pll_acpu_vote;
+
+/* Soft voting values */
+#define PLL_SOFT_VOTE_PRIMARY BIT(0)
+#define PLL_SOFT_VOTE_ACPU BIT(1)
+#define PLL_SOFT_VOTE_AUX BIT(2)
+
+static inline struct pll_vote_clk *to_pll_vote_clk(struct clk *c)
+{
+ return container_of(c, struct pll_vote_clk, c);
+}
+
+/**
+ * struct pll_clk - phase locked loop
+ * @mode_reg: enable register
+ * @l_reg: l value register
+ * @m_reg: m value register
+ * @n_reg: n value register
+ * @config_reg: configuration register, contains mn divider enable, pre divider,
+ * post divider and vco configuration. register name can be configure register
+ * or user_ctl register depending on targets
+ * @status_reg: status register, contains the lock detection bit
+ * @masks: masks used for settings in config_reg
+ * @freq_tbl: pll freq table
+ * @c: clk
+ * @base: pointer to base address of ioremapped registers.
+ */
+struct pll_clk {
+ void __iomem *const mode_reg;
+ void __iomem *const l_reg;
+ void __iomem *const m_reg;
+ void __iomem *const n_reg;
+ void __iomem *const config_reg;
+ void __iomem *const status_reg;
+
+ struct pll_config_masks masks;
+ struct pll_freq_tbl *freq_tbl;
+
+ struct clk c;
+ void *const __iomem *base;
+};
+
+extern struct clk_ops clk_ops_local_pll;
+extern struct clk_ops clk_ops_sr2_pll;
+
+static inline struct pll_clk *to_pll_clk(struct clk *c)
+{
+ return container_of(c, struct pll_clk, c);
+}
+
+int sr_pll_clk_enable(struct clk *c);
+int sr_hpm_lp_pll_clk_enable(struct clk *c);
+
+struct pll_alt_config {
+ u32 val;
+ u32 mask;
+};
+
+struct pll_config {
+ u32 l;
+ u32 m;
+ u32 n;
+ u32 vco_val;
+ u32 vco_mask;
+ u32 pre_div_val;
+ u32 pre_div_mask;
+ u32 post_div_val;
+ u32 post_div_mask;
+ u32 mn_ena_val;
+ u32 mn_ena_mask;
+ u32 main_output_val;
+ u32 main_output_mask;
+ u32 aux_output_val;
+ u32 aux_output_mask;
+ u32 cfg_ctl_val;
+ /* SR2 PLL specific fields */
+ u32 add_factor_val;
+ u32 add_factor_mask;
+ struct pll_alt_config alt_cfg;
+};
+
+struct pll_config_regs {
+ void __iomem *l_reg;
+ void __iomem *m_reg;
+ void __iomem *n_reg;
+ void __iomem *config_reg;
+ void __iomem *config_alt_reg;
+ void __iomem *config_ctl_reg;
+ void __iomem *mode_reg;
+ void *const __iomem *base;
+};
+
+void configure_sr_pll(struct pll_config *config, struct pll_config_regs *regs,
+ u32 ena_fsm_mode);
+void configure_sr_hpm_lp_pll(struct pll_config *config,
+ struct pll_config_regs *, u32 ena_fsm_mode);
+#endif
diff --git a/include/soc/qcom/clock-rpm.h b/include/soc/qcom/clock-rpm.h
new file mode 100644
index 000000000000..b6d5ac9227d9
--- /dev/null
+++ b/include/soc/qcom/clock-rpm.h
@@ -0,0 +1,151 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_RPM_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_RPM_H
+
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/rpm-smd.h>
+
+#define RPM_SMD_KEY_RATE 0x007A484B
+#define RPM_SMD_KEY_ENABLE 0x62616E45
+#define RPM_SMD_KEY_STATE 0x54415453
+
+#define RPM_CLK_BUFFER_A_REQ 0x616B6C63
+#define RPM_KEY_SOFTWARE_ENABLE 0x6E657773
+#define RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
+
+struct clk_ops;
+struct clk_rpmrs_data;
+extern struct clk_ops clk_ops_rpm;
+extern struct clk_ops clk_ops_rpm_branch;
+
+struct rpm_clk {
+ const int rpm_res_type;
+ const int rpm_key;
+ const int rpm_clk_id;
+ const int rpm_status_id;
+ const bool active_only;
+ bool enabled;
+ bool branch; /* true: RPM only accepts 1 for ON and 0 for OFF */
+ struct clk_rpmrs_data *rpmrs_data;
+ struct rpm_clk *peer;
+ struct clk c;
+};
+
+static inline struct rpm_clk *to_rpm_clk(struct clk *clk)
+{
+ return container_of(clk, struct rpm_clk, c);
+}
+
+/*
+ * RPM scaling enable function used for target that has an RPM resource for
+ * rpm clock scaling enable.
+ */
+int enable_rpm_scaling(void);
+
+extern struct clk_rpmrs_data clk_rpmrs_data_smd;
+
+#define __DEFINE_CLK_RPM(name, active, type, r_id, stat_id, dep, key, \
+ rpmrsdata) \
+ static struct rpm_clk active; \
+ static struct rpm_clk name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &active, \
+ .rpmrs_data = (rpmrsdata),\
+ .c = { \
+ .ops = &clk_ops_rpm, \
+ .dbg_name = #name, \
+ CLK_INIT(name.c), \
+ .depends = dep, \
+ }, \
+ }; \
+ static struct rpm_clk active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &name, \
+ .active_only = true, \
+ .rpmrs_data = (rpmrsdata),\
+ .c = { \
+ .ops = &clk_ops_rpm, \
+ .dbg_name = #active, \
+ CLK_INIT(active.c), \
+ .depends = dep, \
+ }, \
+ };
+
+#define __DEFINE_CLK_RPM_BRANCH(name, active, type, r_id, stat_id, r, \
+ key, rpmrsdata) \
+ static struct rpm_clk active; \
+ static struct rpm_clk name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &active, \
+ .branch = true, \
+ .rpmrs_data = (rpmrsdata),\
+ .c = { \
+ .ops = &clk_ops_rpm_branch, \
+ .dbg_name = #name, \
+ .rate = (r), \
+ CLK_INIT(name.c), \
+ }, \
+ }; \
+ static struct rpm_clk active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &name, \
+ .active_only = true, \
+ .branch = true, \
+ .rpmrs_data = (rpmrsdata),\
+ .c = { \
+ .ops = &clk_ops_rpm_branch, \
+ .dbg_name = #active, \
+ .rate = (r), \
+ CLK_INIT(active.c), \
+ }, \
+ };
+
+#define DEFINE_CLK_RPM_SMD(name, active, type, r_id, dep) \
+ __DEFINE_CLK_RPM(name, active, type, r_id, 0, dep, \
+ RPM_SMD_KEY_RATE, &clk_rpmrs_data_smd)
+
+#define DEFINE_CLK_RPM_SMD_BRANCH(name, active, type, r_id, r) \
+ __DEFINE_CLK_RPM_BRANCH(name, active, type, r_id, 0, r, \
+ RPM_SMD_KEY_ENABLE, &clk_rpmrs_data_smd)
+
+#define DEFINE_CLK_RPM_SMD_QDSS(name, active, type, r_id) \
+ __DEFINE_CLK_RPM(name, active, type, r_id, \
+ 0, 0, RPM_SMD_KEY_STATE, &clk_rpmrs_data_smd)
+/*
+ * The RPM XO buffer clock management code aggregates votes for pin-control mode
+ * and software mode separately. Software-enable has higher priority over pin-
+ * control, and if the software-mode aggregation results in a 'disable', the
+ * buffer will be left in pin-control mode if a pin-control vote is in place.
+ */
+#define DEFINE_CLK_RPM_SMD_XO_BUFFER(name, active, r_id) \
+ __DEFINE_CLK_RPM_BRANCH(name, active, RPM_CLK_BUFFER_A_REQ, r_id, 0, \
+ 1000, RPM_KEY_SOFTWARE_ENABLE, &clk_rpmrs_data_smd)
+
+#define DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(name, active, r_id) \
+ __DEFINE_CLK_RPM_BRANCH(name, active, RPM_CLK_BUFFER_A_REQ, r_id, 0, \
+ 1000, RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY, &clk_rpmrs_data_smd)
+#endif
diff --git a/include/soc/qcom/clock-voter.h b/include/soc/qcom/clock-voter.h
new file mode 100644
index 000000000000..9eb3898db1e8
--- /dev/null
+++ b/include/soc/qcom/clock-voter.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_VOTER_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_VOTER_H
+
+#include <linux/clk/msm-clk-provider.h>
+
+struct clk_ops;
+extern struct clk_ops clk_ops_voter;
+
+struct clk_voter {
+ int is_branch;
+ bool enabled;
+ struct clk c;
+};
+
+static inline struct clk_voter *to_clk_voter(struct clk *clk)
+{
+ return container_of(clk, struct clk_voter, c);
+}
+
+#define __DEFINE_CLK_VOTER(clk_name, _parent, _default_rate, _is_branch) \
+ struct clk_voter clk_name = { \
+ .is_branch = (_is_branch), \
+ .c = { \
+ .parent = _parent, \
+ .dbg_name = #clk_name, \
+ .ops = &clk_ops_voter, \
+ .rate = _default_rate, \
+ CLK_INIT(clk_name.c), \
+ }, \
+ }
+
+#define DEFINE_CLK_VOTER(clk_name, _parent, _default_rate) \
+ __DEFINE_CLK_VOTER(clk_name, _parent, _default_rate, 0)
+
+#define DEFINE_CLK_BRANCH_VOTER(clk_name, _parent) \
+ __DEFINE_CLK_VOTER(clk_name, _parent, 1000, 1)
+
+#endif
diff --git a/include/soc/qcom/cpufreq.h b/include/soc/qcom/cpufreq.h
new file mode 100644
index 000000000000..46872d78371e
--- /dev/null
+++ b/include/soc/qcom/cpufreq.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2013 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MACH_CPUFREQ_H
+#define __MACH_CPUFREQ_H
+
+#if defined(CONFIG_DEVFREQ_GOV_MSM_CPUFREQ)
+extern int devfreq_msm_cpufreq_update_bw(void);
+extern int register_devfreq_msm_cpufreq(void);
+#else
+static int devfreq_msm_cpufreq_update_bw(void)
+{
+ return 0;
+}
+static int register_devfreq_msm_cpufreq(void)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_CPU_FREQ_MSM)
+extern unsigned long msm_cpufreq_get_bw(void);
+#else
+extern unsigned long msm_cpufreq_get_bw(void)
+{
+ return ULONG_MAX;
+}
+#endif
+
+#endif
diff --git a/include/soc/qcom/event_timer.h b/include/soc/qcom/event_timer.h
new file mode 100644
index 000000000000..7a00b2375120
--- /dev/null
+++ b/include/soc/qcom/event_timer.h
@@ -0,0 +1,77 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_EVENT_TIMER_H
+#define __ARCH_ARM_MACH_MSM_EVENT_TIMER_H
+
+#include <linux/hrtimer.h>
+
+struct event_timer_info;
+
+#ifdef CONFIG_MSM_EVENT_TIMER
+/**
+ * add_event_timer() : Add a wakeup event. Intended to be called
+ * by clients once. Returns a handle to be used
+ * for future transactions.
+ * @function : The callback function will be called when event
+ * timer expires.
+ * @data : Callback data provided by client.
+ */
+struct event_timer_info *add_event_timer(void (*function)(void *), void *data);
+
+/** activate_event_timer() : Set the expiration time for an event in absolute
+ * ktime. This is a oneshot event timer, clients
+ * should call this again to set another expiration.
+ * @event : Event handle.
+ * @event_time : Event time in absolute ktime.
+ */
+void activate_event_timer(struct event_timer_info *event, ktime_t event_time);
+
+/**
+ * deactivate_event_timer() : Deactivate an event timer.
+ * @event: event handle.
+ */
+void deactivate_event_timer(struct event_timer_info *event);
+
+/**
+ * destroy_event_timer() : Free the event info data structure allocated during
+ * add_event_timer().
+ * @event: event handle.
+ */
+void destroy_event_timer(struct event_timer_info *event);
+
+/**
+ * get_next_event_timer() : Get the next wakeup event.
+ * returns a ktime value of the next
+ * expiring event.
+ */
+ktime_t get_next_event_time(void);
+#else
+static inline void *add_event_timer(void (*function)(void *), void *data)
+{
+ return NULL;
+}
+
+static inline void activate_event_timer(void *event, ktime_t event_time) {}
+
+static inline void deactivate_event_timer(void *event) {}
+
+static inline void destroy_event_timer(void *event) {}
+
+static inline ktime_t get_next_event_time(void)
+{
+ return ns_to_ktime(0);
+}
+
+#endif /* CONFIG_MSM_EVENT_TIMER_MANAGER */
+#endif /* __ARCH_ARM_MACH_MSM_EVENT_TIMER_H */
diff --git a/include/soc/qcom/jtag.h b/include/soc/qcom/jtag.h
new file mode 100644
index 000000000000..86df834ee7cb
--- /dev/null
+++ b/include/soc/qcom/jtag.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MACH_JTAG_H
+#define __MACH_JTAG_H
+
+#if defined(CONFIG_MSM_JTAG) || defined(CONFIG_MSM_JTAG_MM) || \
+ defined(CONFIG_MSM_JTAGV8)
+extern void msm_jtag_save_state(void);
+extern void msm_jtag_restore_state(void);
+extern void msm_jtag_mm_save_state(void);
+extern void msm_jtag_mm_restore_state(void);
+extern bool msm_jtag_fuse_apps_access_disabled(void);
+#else
+static inline void msm_jtag_save_state(void) {}
+static inline void msm_jtag_restore_state(void) {}
+static inline void msm_jtag_mm_save_state(void) {}
+static inline void msm_jtag_mm_restore_state(void){}
+static inline bool msm_jtag_fuse_apps_access_disabled(void) { return false; }
+#endif
+#ifdef CONFIG_MSM_JTAGV8
+extern int msm_jtag_save_register(struct notifier_block *nb);
+extern int msm_jtag_save_unregister(struct notifier_block *nb);
+extern int msm_jtag_restore_register(struct notifier_block *nb);
+extern int msm_jtag_restore_unregister(struct notifier_block *nb);
+#else
+static inline int msm_jtag_save_register(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline int msm_jtag_save_unregister(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline int msm_jtag_restore_register(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline int msm_jtag_restore_unregister(struct notifier_block *nb)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/include/soc/qcom/pm.h b/include/soc/qcom/pm.h
new file mode 100644
index 000000000000..47de77c385fd
--- /dev/null
+++ b/include/soc/qcom/pm.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_PM_H
+#define __ARCH_ARM_MACH_MSM_PM_H
+
+#include <linux/types.h>
+#include <linux/cpuidle.h>
+#include <asm/smp_plat.h>
+
+#if !defined(CONFIG_SMP)
+#define msm_secondary_startup NULL
+#elif defined(CONFIG_CPU_V7)
+extern void msm_secondary_startup(void);
+#else
+#define msm_secondary_startup secondary_holding_pen
+#endif
+
+enum msm_pm_sleep_mode {
+ MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
+ MSM_PM_SLEEP_MODE_RETENTION,
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_STANDALONE,
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND,
+ MSM_PM_SLEEP_MODE_NR,
+ MSM_PM_SLEEP_MODE_NOT_SELECTED,
+};
+
+enum msm_pm_l2_scm_flag {
+ MSM_SCM_L2_ON = 0,
+ MSM_SCM_L2_OFF = 1,
+ MSM_SCM_L2_RET = 2,
+ MSM_SCM_L2_GDHS = 3,
+};
+
+#define MSM_PM_MODE(cpu, mode_nr) ((cpu) * MSM_PM_SLEEP_MODE_NR + (mode_nr))
+
+struct msm_pm_time_params {
+ uint32_t latency_us;
+ uint32_t sleep_us;
+ uint32_t next_event_us;
+ uint32_t modified_time_us;
+};
+
+struct msm_pm_sleep_status_data {
+ void *base_addr;
+ uint32_t cpu_offset;
+ uint32_t mask;
+};
+
+int msm_pm_mode_sysfs_add(const char *);
+
+/**
+ * msm_pm_sleep_mode_allow() - API to determine if sleep mode is allowed.
+ * @cpu: CPU on which to check for the sleep mode.
+ * @mode: Sleep Mode to check for.
+ * @idle: Idle or Suspend Sleep Mode.
+ *
+ * Helper function to determine if a Idle or Suspend
+ * Sleep mode is allowed for a specific CPU.
+ *
+ * Return: 1 for allowed; 0 if not allowed.
+ */
+int msm_pm_sleep_mode_allow(unsigned int, unsigned int, bool);
+
+/**
+ * msm_pm_sleep_mode_supported() - API to determine if sleep mode is
+ * supported.
+ * @cpu: CPU on which to check for the sleep mode.
+ * @mode: Sleep Mode to check for.
+ * @idle: Idle or Suspend Sleep Mode.
+ *
+ * Helper function to determine if a Idle or Suspend
+ * Sleep mode is allowed and enabled for a specific CPU.
+ *
+ * Return: 1 for supported; 0 if not supported.
+ */
+int msm_pm_sleep_mode_supported(unsigned int, unsigned int, bool);
+
+struct msm_pm_cpr_ops {
+ void (*cpr_suspend)(void);
+ void (*cpr_resume)(void);
+};
+
+void __init msm_pm_set_tz_retention_flag(unsigned int flag);
+void msm_pm_enable_retention(bool enable);
+bool msm_pm_retention_enabled(void);
+void msm_cpu_pm_enter_sleep(enum msm_pm_sleep_mode mode, bool from_idle);
+
+#ifdef CONFIG_MSM_PM
+void msm_pm_set_rpm_wakeup_irq(unsigned int irq);
+int msm_pm_wait_cpu_shutdown(unsigned int cpu);
+int __init msm_pm_sleep_status_init(void);
+void msm_pm_set_l2_flush_flag(enum msm_pm_l2_scm_flag flag);
+void lpm_cpu_hotplug_enter(unsigned int cpu);
+s32 msm_cpuidle_get_deep_idle_latency(void);
+int msm_pm_collapse(unsigned long unused);
+#else
+static inline void msm_pm_set_rpm_wakeup_irq(unsigned int irq) {}
+static inline int msm_pm_wait_cpu_shutdown(unsigned int cpu) { return 0; }
+static inline int msm_pm_sleep_status_init(void) { return 0; };
+static inline void msm_pm_set_l2_flush_flag(unsigned int flag) { }
+static inline void lpm_cpu_hotplug_enter(unsigned int cpu) {};
+static inline s32 msm_cpuidle_get_deep_idle_latency(void) { return 0; }
+#define msm_pm_collapse NULL
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+int msm_platform_secondary_init(unsigned int cpu);
+#else
+static inline int msm_platform_secondary_init(unsigned int cpu) { return 0; }
+#endif
+
+enum msm_pm_time_stats_id {
+ MSM_PM_STAT_REQUESTED_IDLE = 0,
+ MSM_PM_STAT_IDLE_SPIN,
+ MSM_PM_STAT_IDLE_WFI,
+ MSM_PM_STAT_RETENTION,
+ MSM_PM_STAT_IDLE_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_STANDALONE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
+ MSM_PM_STAT_SUSPEND,
+ MSM_PM_STAT_FAILED_SUSPEND,
+ MSM_PM_STAT_NOT_IDLE,
+ MSM_PM_STAT_COUNT
+};
+
+#ifdef CONFIG_MSM_IDLE_STATS
+void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats, int size);
+void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t);
+void msm_pm_l2_add_stat(uint32_t id, int64_t t);
+#else
+static inline void msm_pm_add_stats(enum msm_pm_time_stats_id *enable_stats,
+ int size) {}
+static inline void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t) {}
+static inline void msm_pm_l2_add_stat(uint32_t id, int64_t t) {}
+#endif
+
+void msm_pm_set_cpr_ops(struct msm_pm_cpr_ops *ops);
+extern unsigned long msm_pc_debug_counters_phys;
+#endif /* __ARCH_ARM_MACH_MSM_PM_H */
diff --git a/include/soc/qcom/ramdump.h b/include/soc/qcom/ramdump.h
new file mode 100644
index 000000000000..7cd59dd89042
--- /dev/null
+++ b/include/soc/qcom/ramdump.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RAMDUMP_HEADER
+#define _RAMDUMP_HEADER
+
+struct device;
+
+struct ramdump_segment {
+ unsigned long address;
+ unsigned long size;
+};
+
+#ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+extern void *create_ramdump_device(const char *dev_name, struct device *parent);
+extern void destroy_ramdump_device(void *dev);
+extern int do_ramdump(void *handle, struct ramdump_segment *segments,
+ int nsegments);
+extern int do_elf_ramdump(void *handle, struct ramdump_segment *segments,
+ int nsegments);
+
+#else
+static inline void *create_ramdump_device(const char *dev_name,
+ struct device *parent)
+{
+ return NULL;
+}
+
+static inline void destroy_ramdump_device(void *dev)
+{
+}
+
+static inline int do_ramdump(void *handle, struct ramdump_segment *segments,
+ int nsegments)
+{
+ return -ENODEV;
+}
+
+static inline int do_elf_ramdump(void *handle, struct ramdump_segment *segments,
+ int nsegments)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
+
+#endif
diff --git a/include/soc/qcom/restart.h b/include/soc/qcom/restart.h
new file mode 100644
index 000000000000..bd0f139b548a
--- /dev/null
+++ b/include/soc/qcom/restart.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ASM_ARCH_MSM_RESTART_H_
+#define _ASM_ARCH_MSM_RESTART_H_
+
+#define RESTART_NORMAL 0x0
+#define RESTART_DLOAD 0x1
+
+void msm_set_restart_mode(int mode);
+extern int pmic_reset_irq;
+
+#endif
+
diff --git a/include/soc/qcom/rpm-notifier.h b/include/soc/qcom/rpm-notifier.h
new file mode 100644
index 000000000000..ea6d95e313a8
--- /dev/null
+++ b/include/soc/qcom/rpm-notifier.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+#define __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+
+struct msm_rpm_notifier_data {
+ uint32_t rsc_type;
+ uint32_t rsc_id;
+ uint32_t key;
+ uint32_t size;
+ uint8_t *value;
+};
+/**
+ * msm_rpm_register_notifier - Register for sleep set notifications
+ *
+ * @nb - notifier block to register
+ *
+ * return 0 on success, errno on failure.
+ */
+int msm_rpm_register_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_unregister_notifier - Unregister previously registered notifications
+ *
+ * @nb - notifier block to unregister
+ *
+ * return 0 on success, errno on failure.
+ */
+int msm_rpm_unregister_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_enter_sleep - Notify RPM driver to prepare for entering sleep
+ *
+ * @bool - flag to enable print contents of sleep buffer.
+ * @cpumask - cpumask of next wakeup cpu
+ *
+ * return 0 on success errno on failure.
+ */
+int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask);
+
+/**
+ * msm_rpm_exit_sleep - Notify RPM driver about resuming from power collapse
+ */
+void msm_rpm_exit_sleep(void);
+
+/**
+ * msm_rpm_waiting_for_ack - Indicate if there is RPM message
+ * pending acknowledgement.
+ * returns true for pending messages and false otherwise
+ */
+bool msm_rpm_waiting_for_ack(void);
+
+#endif /*__ARCH_ARM_MACH_MSM_RPM_NOTIF_H */
diff --git a/include/soc/qcom/rpm-smd.h b/include/soc/qcom/rpm-smd.h
new file mode 100644
index 000000000000..79f9dff75f83
--- /dev/null
+++ b/include/soc/qcom/rpm-smd.h
@@ -0,0 +1,268 @@
+/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_SMD_H
+#define __ARCH_ARM_MACH_MSM_RPM_SMD_H
+
+/**
+ * enum msm_rpm_set - RPM enumerations for sleep/active set
+ * %MSM_RPM_CTX_SET_0: Set resource parameters for active mode.
+ * %MSM_RPM_CTX_SET_SLEEP: Set resource parameters for sleep.
+ */
+enum msm_rpm_set {
+ MSM_RPM_CTX_ACTIVE_SET,
+ MSM_RPM_CTX_SLEEP_SET,
+};
+
+struct msm_rpm_request;
+
+struct msm_rpm_kvp {
+ uint32_t key;
+ uint32_t length;
+ uint8_t *data;
+};
+#ifdef CONFIG_MSM_RPM_SMD
+/**
+ * msm_rpm_request() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_request_noirq() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource. This function is similar to msm_rpm_create_request
+ * except that it has to be called with interrupts masked.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_add_kvp_data() - Adds a Key value pair to a existing RPM resource.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key: unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size: size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size);
+
+/**
+ * msm_rpm_add_kvp_data_noirq() - Adds a Key value pair to a existing RPM
+ * resource. This function is similar to msm_rpm_add_kvp_data except that it
+ * has to be called with interrupts masked.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key: unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size: size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size);
+
+/** msm_rpm_free_request() - clean up the RPM request handle created with
+ * msm_rpm_create_request
+ *
+ * @handle: RPM resource handle to be cleared.
+ */
+
+void msm_rpm_free_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request() - Send the RPM messages using SMD. The function
+ * assigns a message id before sending the data out to the RPM. RPM hardware
+ * uses the message id to acknowledge the messages.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request_noirq() - Send the RPM messages using SMD. The
+ * function assigns a message id before sending the data out to the RPM.
+ * RPM hardware uses the message id to acknowledge the messages. This function
+ * is similar to msm_rpm_send_request except that it has to be called with
+ * interrupts masked.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_wait_for_ack() - A blocking call that waits for acknowledgment of
+ * a message from RPM.
+ *
+ * @msg_id: the return from msm_rpm_send_requests
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack(uint32_t msg_id);
+
+/**
+ * msm_rpm_wait_for_ack_noirq() - A blocking call that waits for acknowledgment
+ * of a message from RPM. This function is similar to msm_rpm_wait_for_ack
+ * except that it has to be called with interrupts masked.
+ *
+ * @msg_id: the return from msm_rpm_send_request
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id);
+
+/**
+ * msm_rpm_send_message() -Wrapper function for clients to send data given an
+ * array of key value pairs.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns 0 on success and errno on failure.
+ */
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_send_message_noirq() -Wrapper function for clients to send data
+ * given an array of key value pairs. This function is similar to the
+ * msm_rpm_send_message() except that it has to be called with interrupts
+ * disabled. Clients should choose the irq version when possible for system
+ * performance.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns 0 on success and errno on failure.
+ */
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_driver_init() - Initialization function that registers for a
+ * rpm platform driver.
+ *
+ * returns 0 on success.
+ */
+int __init msm_rpm_driver_init(void);
+
+#else
+
+static inline struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return NULL;
+}
+
+static inline struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return NULL;
+
+}
+static inline uint32_t msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int count)
+{
+ return 0;
+}
+static inline uint32_t msm_rpm_add_kvp_data_noirq(
+ struct msm_rpm_request *handle, uint32_t key,
+ const uint8_t *data, int count)
+{
+ return 0;
+}
+
+static inline void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+ return;
+}
+
+static inline int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+ return 0;
+}
+
+static inline int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+ return 0;
+
+}
+
+static inline int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+ return 0;
+}
+
+static inline int msm_rpm_send_message_noirq(enum msm_rpm_set set,
+ uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp,
+ int nelems)
+{
+ return 0;
+}
+
+static inline int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+ return 0;
+
+}
+static inline int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+ return 0;
+}
+
+static inline int __init msm_rpm_driver_init(void)
+{
+ return 0;
+}
+#endif
+#endif /*__ARCH_ARM_MACH_MSM_RPM_SMD_H*/
diff --git a/include/soc/qcom/scm-boot.h b/include/soc/qcom/scm-boot.h
new file mode 100644
index 000000000000..0b2e05f80b6a
--- /dev/null
+++ b/include/soc/qcom/scm-boot.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2010, 2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MACH_SCM_BOOT_H
+#define __MACH_SCM_BOOT_H
+
+#define SCM_BOOT_ADDR 0x1
+#define SCM_FLAG_COLDBOOT_CPU1 0x01
+#define SCM_FLAG_COLDBOOT_CPU2 0x08
+#define SCM_FLAG_COLDBOOT_CPU3 0x20
+#define SCM_FLAG_WARMBOOT_CPU1 0x02
+#define SCM_FLAG_WARMBOOT_CPU0 0x04
+#define SCM_FLAG_WARMBOOT_CPU2 0x10
+#define SCM_FLAG_WARMBOOT_CPU3 0x40
+
+/* Multicluster Variants */
+#define SCM_BOOT_ADDR_MC 0x11
+#define SCM_FLAG_COLDBOOT_MC 0x02
+#define SCM_FLAG_WARMBOOT_MC 0x04
+
+#ifdef CONFIG_ARM64
+#define SCM_FLAG_HLOS 0x01
+#else
+#define SCM_FLAG_HLOS 0x0
+#endif
+
+#ifdef CONFIG_MSM_SCM
+int scm_set_boot_addr(phys_addr_t addr, unsigned int flags);
+int scm_set_boot_addr_mc(phys_addr_t addr, u32 aff0,
+ u32 aff1, u32 aff2, u32 flags);
+int scm_set_warm_boot_addr_mc_for_all(phys_addr_t addr);
+int scm_is_mc_boot_available(void);
+#else
+static inline int scm_set_boot_addr(phys_addr_t addr, unsigned int flags)
+{
+ WARN_ONCE(1, "CONFIG_MSM_SCM disabled, SCM call will fail silently\n");
+ return 0;
+}
+static inline int scm_set_boot_addr_mc(phys_addr_t addr, u32 aff0,
+ u32 aff1, u32 aff2, u32 flags)
+{
+ WARN_ONCE(1, "CONFIG_MSM_SCM disabled, SCM call will fail silently\n");
+ return 0;
+}
+static inline int scm_set_warm_boot_addr_mc_for_all(phys_addr_t addr)
+{
+ WARN_ONCE(1, "CONFIG_MSM_SCM disabled, SCM call will fail silently\n");
+ return 0;
+}
+static inline int scm_is_mc_boot_available(void)
+{
+ WARN_ONCE(1, "CONFIG_MSM_SCM disabled, SCM call will fail silently\n");
+ return 0;
+}
+#endif
+
+#endif
diff --git a/include/soc/qcom/scm.h b/include/soc/qcom/scm.h
new file mode 100644
index 000000000000..b05a7cb61d05
--- /dev/null
+++ b/include/soc/qcom/scm.h
@@ -0,0 +1,124 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MACH_SCM_H
+#define __MACH_SCM_H
+
+#define SCM_SVC_BOOT 0x1
+#define SCM_SVC_PIL 0x2
+#define SCM_SVC_UTIL 0x3
+#define SCM_SVC_TZ 0x4
+#define SCM_SVC_IO 0x5
+#define SCM_SVC_INFO 0x6
+#define SCM_SVC_SSD 0x7
+#define SCM_SVC_FUSE 0x8
+#define SCM_SVC_PWR 0x9
+#define SCM_SVC_MP 0xC
+#define SCM_SVC_DCVS 0xD
+#define SCM_SVC_ES 0x10
+#define SCM_SVC_HDCP 0x11
+#define SCM_SVC_TZSCHEDULER 0xFC
+
+#define SCM_FUSE_READ 0x7
+#define SCM_CMD_HDCP 0x01
+
+/* SCM Features */
+#define SCM_SVC_SEC_CAMERA 0xD
+
+#define DEFINE_SCM_BUFFER(__n) \
+static char __n[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+#define SCM_BUFFER_SIZE(__buf) sizeof(__buf)
+
+#define SCM_BUFFER_PHYS(__buf) virt_to_phys(__buf)
+
+#ifdef CONFIG_MSM_SCM
+extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
+ void *resp_buf, size_t resp_len);
+
+extern int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+ size_t cmd_len, void *resp_buf, size_t resp_len,
+ void *scm_buf, size_t scm_buf_size);
+
+
+extern s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1);
+extern s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2);
+extern s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3);
+extern s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3,
+ u32 arg4, u32 *ret1, u32 *ret2);
+
+#define SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+
+extern u32 scm_get_version(void);
+extern int scm_is_call_available(u32 svc_id, u32 cmd_id);
+extern int scm_get_feat_version(u32 feat);
+
+#define SCM_HDCP_MAX_REG 5
+
+struct scm_hdcp_req {
+ u32 addr;
+ u32 val;
+};
+
+#else
+
+static inline int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+ size_t cmd_len, void *resp_buf, size_t resp_len)
+{
+ return 0;
+}
+
+static inline int scm_call_noalloc(u32 svc_id, u32 cmd_id,
+ const void *cmd_buf, size_t cmd_len, void *resp_buf,
+ size_t resp_len, void *scm_buf, size_t scm_buf_size)
+{
+ return 0;
+}
+
+static inline s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
+{
+ return 0;
+}
+
+static inline s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
+{
+ return 0;
+}
+
+static inline s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+ u32 arg3)
+{
+ return 0;
+}
+
+static inline s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+ u32 arg3, u32 arg4, u32 *ret1, u32 *ret2)
+{
+ return 0;
+}
+
+static inline u32 scm_get_version(void)
+{
+ return 0;
+}
+
+static inline int scm_is_call_available(u32 svc_id, u32 cmd_id)
+{
+ return 0;
+}
+
+static inline int scm_get_feat_version(u32 feat)
+{
+ return 0;
+}
+
+#endif
+#endif
diff --git a/include/soc/qcom/smd.h b/include/soc/qcom/smd.h
new file mode 100644
index 000000000000..926990ed0533
--- /dev/null
+++ b/include/soc/qcom/smd.h
@@ -0,0 +1,401 @@
+/* include/soc/qcom/smd.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_SMD_H
+#define __ASM_ARCH_MSM_SMD_H
+
+#include <linux/io.h>
+
+#include <soc/qcom/smem.h>
+
+typedef struct smd_channel smd_channel_t;
+struct cpumask;
+
+#define SMD_MAX_CH_NAME_LEN 20 /* includes null char at end */
+
+#define SMD_EVENT_DATA 1
+#define SMD_EVENT_OPEN 2
+#define SMD_EVENT_CLOSE 3
+#define SMD_EVENT_STATUS 4
+#define SMD_EVENT_REOPEN_READY 5
+
+/*
+ * SMD Processor ID's.
+ *
+ * For all processors that have both SMSM and SMD clients,
+ * the SMSM Processor ID and the SMD Processor ID will
+ * be the same. In cases where a processor only supports
+ * SMD, the entry will only exist in this enum.
+ */
+enum {
+ SMD_APPS = SMEM_APPS,
+ SMD_MODEM = SMEM_MODEM,
+ SMD_Q6 = SMEM_Q6,
+ SMD_DSPS = SMEM_DSPS,
+ SMD_TZ = SMEM_DSPS,
+ SMD_WCNSS = SMEM_WCNSS,
+ SMD_MODEM_Q6_FW = SMEM_MODEM_Q6_FW,
+ SMD_RPM = SMEM_RPM,
+ NUM_SMD_SUBSYSTEMS,
+};
+
+enum {
+ SMD_APPS_MODEM = 0,
+ SMD_APPS_QDSP,
+ SMD_MODEM_QDSP,
+ SMD_APPS_DSPS,
+ SMD_MODEM_DSPS,
+ SMD_QDSP_DSPS,
+ SMD_APPS_WCNSS,
+ SMD_MODEM_WCNSS,
+ SMD_QDSP_WCNSS,
+ SMD_DSPS_WCNSS,
+ SMD_APPS_Q6FW,
+ SMD_MODEM_Q6FW,
+ SMD_QDSP_Q6FW,
+ SMD_DSPS_Q6FW,
+ SMD_WCNSS_Q6FW,
+ SMD_APPS_RPM,
+ SMD_MODEM_RPM,
+ SMD_QDSP_RPM,
+ SMD_WCNSS_RPM,
+ SMD_TZ_RPM,
+ SMD_NUM_TYPE,
+
+};
+
+#ifdef CONFIG_MSM_SMD
+int smd_close(smd_channel_t *ch);
+
+/* passing a null pointer for data reads and discards */
+int smd_read(smd_channel_t *ch, void *data, int len);
+int smd_read_from_cb(smd_channel_t *ch, void *data, int len);
+/* Same as smd_read() but takes a data buffer from userspace
+ * The function might sleep. Only safe to call from user context
+ */
+int smd_read_user_buffer(smd_channel_t *ch, void *data, int len);
+
+/* Write to stream channels may do a partial write and return
+** the length actually written.
+** Write to packet channels will never do a partial write --
+** it will return the requested length written or an error.
+*/
+int smd_write(smd_channel_t *ch, const void *data, int len);
+/* Same as smd_write() but takes a data buffer from userspace
+ * The function might sleep. Only safe to call from user context
+ */
+int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len);
+
+int smd_write_avail(smd_channel_t *ch);
+int smd_read_avail(smd_channel_t *ch);
+
+/* Returns the total size of the current packet being read.
+** Returns 0 if no packets available or a stream channel.
+*/
+int smd_cur_packet_size(smd_channel_t *ch);
+
+/* these are used to get and set the IF sigs of a channel.
+ * DTR and RTS can be set; DSR, CTS, CD and RI can be read.
+ */
+int smd_tiocmget(smd_channel_t *ch);
+int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear);
+int
+smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear);
+int smd_named_open_on_edge(const char *name, uint32_t edge, smd_channel_t **_ch,
+ void *priv, void (*notify)(void *, unsigned));
+
+/* Tells the other end of the smd channel that this end wants to recieve
+ * interrupts when the written data is read. Read interrupts should only
+ * enabled when there is no space left in the buffer to write to, thus the
+ * interrupt acts as notification that space may be avaliable. If the
+ * other side does not support enabling/disabling interrupts on demand,
+ * then this function has no effect if called.
+ */
+void smd_enable_read_intr(smd_channel_t *ch);
+
+/* Tells the other end of the smd channel that this end does not want
+ * interrupts when written data is read. The interrupts should be
+ * disabled by default. If the other side does not support enabling/
+ * disabling interrupts on demand, then this function has no effect if
+ * called.
+ */
+void smd_disable_read_intr(smd_channel_t *ch);
+
+/**
+ * Enable/disable receive interrupts for the remote processor used by a
+ * particular channel.
+ * @ch: open channel handle to use for the edge
+ * @mask: 1 = mask interrupts; 0 = unmask interrupts
+ * @cpumask cpumask for the next cpu scheduled to be woken up
+ * @returns: 0 for success; < 0 for failure
+ *
+ * Note that this enables/disables all interrupts from the remote subsystem for
+ * all channels. As such, it should be used with care and only for specific
+ * use cases such as power-collapse sequencing.
+ */
+int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask,
+ const struct cpumask *cpumask);
+
+/* Starts a packet transaction. The size of the packet may exceed the total
+ * size of the smd ring buffer.
+ *
+ * @ch: channel to write the packet to
+ * @len: total length of the packet
+ *
+ * Returns:
+ * 0 - success
+ * -ENODEV - invalid smd channel
+ * -EACCES - non-packet channel specified
+ * -EINVAL - invalid length
+ * -EBUSY - transaction already in progress
+ * -EAGAIN - no enough memory in ring buffer to start transaction
+ * -EPERM - unable to sucessfully start transaction due to write error
+ */
+int smd_write_start(smd_channel_t *ch, int len);
+
+/* Writes a segment of the packet for a packet transaction.
+ *
+ * @ch: channel to write packet to
+ * @data: buffer of data to write
+ * @len: length of data buffer
+ * @user_buf: (0) - buffer from kernelspace (1) - buffer from userspace
+ *
+ * Returns:
+ * number of bytes written
+ * -ENODEV - invalid smd channel
+ * -EINVAL - invalid length
+ * -ENOEXEC - transaction not started
+ */
+int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf);
+
+/* Completes a packet transaction. Do not call from interrupt context.
+ *
+ * @ch: channel to complete transaction on
+ *
+ * Returns:
+ * 0 - success
+ * -ENODEV - invalid smd channel
+ * -E2BIG - some ammount of packet is not yet written
+ */
+int smd_write_end(smd_channel_t *ch);
+
+/**
+ * smd_write_segment_avail() - available write space for packet transactions
+ * @ch: channel to write packet to
+ * @returns: number of bytes available to write to, or -ENODEV for invalid ch
+ *
+ * This is a version of smd_write_avail() intended for use with packet
+ * transactions. This version correctly accounts for any internal reserved
+ * space at all stages of the transaction.
+ */
+int smd_write_segment_avail(smd_channel_t *ch);
+
+/*
+ * Returns a pointer to the subsystem name or NULL if no
+ * subsystem name is available.
+ *
+ * @type - Edge definition
+ */
+const char *smd_edge_to_subsystem(uint32_t type);
+
+/*
+ * Returns a pointer to the subsystem name given the
+ * remote processor ID.
+ *
+ * @pid Remote processor ID
+ * @returns Pointer to subsystem name or NULL if not found
+ */
+const char *smd_pid_to_subsystem(uint32_t pid);
+
+/*
+ * Checks to see if a new packet has arrived on the channel. Only to be
+ * called with interrupts disabled.
+ *
+ * @ch: channel to check if a packet has arrived
+ *
+ * Returns:
+ * 0 - packet not available
+ * 1 - packet available
+ * -EINVAL - NULL parameter or non-packet based channel provided
+ */
+int smd_is_pkt_avail(smd_channel_t *ch);
+
+/*
+ * SMD initialization function that registers for a SMD platform driver.
+ *
+ * returns success on successful driver registration.
+ */
+int __init msm_smd_init(void);
+
+/**
+ * smd_remote_ss_to_edge() - return edge type from remote ss type
+ * @name: remote subsystem name
+ *
+ * Returns the edge type connected between the local subsystem(APPS)
+ * and remote subsystem @name.
+ */
+int smd_remote_ss_to_edge(const char *name);
+
+/**
+ * smd_edge_to_pil_str - Returns the PIL string used to load the remote side of
+ * the indicated edge.
+ *
+ * @type - Edge definition
+ * @returns - The PIL string to load the remove side of @type or NULL if the
+ * PIL string does not exist.
+ */
+const char *smd_edge_to_pil_str(uint32_t type);
+
+#else
+
+static inline int smd_close(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline int smd_read(smd_channel_t *ch, void *data, int len)
+{
+ return -ENODEV;
+}
+
+static inline int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
+{
+ return -ENODEV;
+}
+
+static inline int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
+{
+ return -ENODEV;
+}
+
+static inline int smd_write(smd_channel_t *ch, const void *data, int len)
+{
+ return -ENODEV;
+}
+
+static inline int
+smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
+{
+ return -ENODEV;
+}
+
+static inline int smd_write_avail(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline int smd_read_avail(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline int smd_cur_packet_size(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline int smd_tiocmget(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline int
+smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
+{
+ return -ENODEV;
+}
+
+static inline int
+smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
+{
+ return -ENODEV;
+}
+
+static inline int
+smd_named_open_on_edge(const char *name, uint32_t edge, smd_channel_t **_ch,
+ void *priv, void (*notify)(void *, unsigned))
+{
+ return -ENODEV;
+}
+
+static inline void smd_enable_read_intr(smd_channel_t *ch)
+{
+}
+
+static inline void smd_disable_read_intr(smd_channel_t *ch)
+{
+}
+
+static inline int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask,
+ const struct cpumask *cpumask)
+{
+ return -ENODEV;
+}
+
+static inline int smd_write_start(smd_channel_t *ch, int len)
+{
+ return -ENODEV;
+}
+
+static inline int
+smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
+{
+ return -ENODEV;
+}
+
+static inline int smd_write_end(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline int smd_write_segment_avail(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline const char *smd_edge_to_subsystem(uint32_t type)
+{
+ return NULL;
+}
+
+static inline const char *smd_pid_to_subsystem(uint32_t pid)
+{
+ return NULL;
+}
+
+static inline int smd_is_pkt_avail(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline int __init msm_smd_init(void)
+{
+ return 0;
+}
+
+static inline int smd_remote_ss_to_edge(const char *name)
+{
+ return -EINVAL;
+}
+
+static inline const char *smd_edge_to_pil_str(uint32_t type)
+{
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/include/soc/qcom/smem.h b/include/soc/qcom/smem.h
new file mode 100644
index 000000000000..c734dee2d9d0
--- /dev/null
+++ b/include/soc/qcom/smem.h
@@ -0,0 +1,241 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SMEM_H_
+#define _ARCH_ARM_MACH_MSM_SMEM_H_
+
+#include <linux/types.h>
+
+enum {
+ SMEM_APPS,
+ SMEM_MODEM,
+ SMEM_Q6,
+ SMEM_DSPS,
+ SMEM_WCNSS,
+ SMEM_MODEM_Q6_FW,
+ SMEM_RPM,
+ NUM_SMEM_SUBSYSTEMS,
+};
+
+/*
+ * Flag options for the XXX_to_proc() API
+ *
+ * SMEM_ITEM_CACHED_FLAG - Indicates this operation should use cachable smem
+ *
+ * SMEM_ANY_HOST_FLAG - Indicates this operation should not apply to smem items
+ * which are limited to a specific host pairing. Will
+ * cause this operation to ignore the to_proc parameter.
+ */
+#define SMEM_ITEM_CACHED_FLAG 1
+#define SMEM_ANY_HOST_FLAG 2
+
+#define SMEM_NUM_SMD_STREAM_CHANNELS 64
+
+/**
+ * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
+ *
+ * @type: type to check for overflow
+ * @a: left value to use
+ * @b: right value to use
+ * @returns: true if a + b will result in overflow; false otherwise
+ */
+#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
+ (((type)~0 - (a)) < (b) ? true : false)
+
+enum {
+ /* fixed items */
+ SMEM_PROC_COMM = 0,
+ SMEM_HEAP_INFO,
+ SMEM_ALLOCATION_TABLE,
+ SMEM_VERSION_INFO,
+ SMEM_HW_RESET_DETECT,
+ SMEM_AARM_WARM_BOOT,
+ SMEM_DIAG_ERR_MESSAGE,
+ SMEM_SPINLOCK_ARRAY,
+ SMEM_MEMORY_BARRIER_LOCATION,
+ SMEM_FIXED_ITEM_LAST = SMEM_MEMORY_BARRIER_LOCATION,
+
+ /* dynamic items */
+ SMEM_AARM_PARTITION_TABLE,
+ SMEM_AARM_BAD_BLOCK_TABLE,
+ SMEM_RESERVE_BAD_BLOCKS,
+ SMEM_WM_UUID,
+ SMEM_CHANNEL_ALLOC_TBL,
+ SMEM_SMD_BASE_ID,
+ SMEM_SMEM_LOG_IDX = SMEM_SMD_BASE_ID + SMEM_NUM_SMD_STREAM_CHANNELS,
+ SMEM_SMEM_LOG_EVENTS,
+ SMEM_SMEM_STATIC_LOG_IDX,
+ SMEM_SMEM_STATIC_LOG_EVENTS,
+ SMEM_SMEM_SLOW_CLOCK_SYNC,
+ SMEM_SMEM_SLOW_CLOCK_VALUE,
+ SMEM_BIO_LED_BUF,
+ SMEM_SMSM_SHARED_STATE,
+ SMEM_SMSM_INT_INFO,
+ SMEM_SMSM_SLEEP_DELAY,
+ SMEM_SMSM_LIMIT_SLEEP,
+ SMEM_SLEEP_POWER_COLLAPSE_DISABLED,
+ SMEM_KEYPAD_KEYS_PRESSED,
+ SMEM_KEYPAD_STATE_UPDATED,
+ SMEM_KEYPAD_STATE_IDX,
+ SMEM_GPIO_INT,
+ SMEM_MDDI_LCD_IDX,
+ SMEM_MDDI_HOST_DRIVER_STATE,
+ SMEM_MDDI_LCD_DISP_STATE,
+ SMEM_LCD_CUR_PANEL,
+ SMEM_MARM_BOOT_SEGMENT_INFO,
+ SMEM_AARM_BOOT_SEGMENT_INFO,
+ SMEM_SLEEP_STATIC,
+ SMEM_SCORPION_FREQUENCY,
+ SMEM_SMD_PROFILES,
+ SMEM_TSSC_BUSY,
+ SMEM_HS_SUSPEND_FILTER_INFO,
+ SMEM_BATT_INFO,
+ SMEM_APPS_BOOT_MODE,
+ SMEM_VERSION_FIRST,
+ SMEM_VERSION_SMD = SMEM_VERSION_FIRST,
+ SMEM_VERSION_LAST = SMEM_VERSION_FIRST + 24,
+ SMEM_OSS_RRCASN1_BUF1,
+ SMEM_OSS_RRCASN1_BUF2,
+ SMEM_ID_VENDOR0,
+ SMEM_ID_VENDOR1,
+ SMEM_ID_VENDOR2,
+ SMEM_HW_SW_BUILD_ID,
+ SMEM_SMD_BASE_ID_2,
+ SMEM_SMD_FIFO_BASE_ID_2 = SMEM_SMD_BASE_ID_2 +
+ SMEM_NUM_SMD_STREAM_CHANNELS,
+ SMEM_CHANNEL_ALLOC_TBL_2 = SMEM_SMD_FIFO_BASE_ID_2 +
+ SMEM_NUM_SMD_STREAM_CHANNELS,
+ SMEM_I2C_MUTEX = SMEM_CHANNEL_ALLOC_TBL_2 +
+ SMEM_NUM_SMD_STREAM_CHANNELS,
+ SMEM_SCLK_CONVERSION,
+ SMEM_SMD_SMSM_INTR_MUX,
+ SMEM_SMSM_CPU_INTR_MASK,
+ SMEM_APPS_DEM_SLAVE_DATA,
+ SMEM_QDSP6_DEM_SLAVE_DATA,
+ SMEM_CLKREGIM_BSP,
+ SMEM_CLKREGIM_SOURCES,
+ SMEM_SMD_FIFO_BASE_ID,
+ SMEM_USABLE_RAM_PARTITION_TABLE = SMEM_SMD_FIFO_BASE_ID +
+ SMEM_NUM_SMD_STREAM_CHANNELS,
+ SMEM_POWER_ON_STATUS_INFO,
+ SMEM_DAL_AREA,
+ SMEM_SMEM_LOG_POWER_IDX,
+ SMEM_SMEM_LOG_POWER_WRAP,
+ SMEM_SMEM_LOG_POWER_EVENTS,
+ SMEM_ERR_CRASH_LOG,
+ SMEM_ERR_F3_TRACE_LOG,
+ SMEM_SMD_BRIDGE_ALLOC_TABLE,
+ SMEM_SMDLITE_TABLE,
+ SMEM_SD_IMG_UPGRADE_STATUS,
+ SMEM_SEFS_INFO,
+ SMEM_RESET_LOG,
+ SMEM_RESET_LOG_SYMBOLS,
+ SMEM_MODEM_SW_BUILD_ID,
+ SMEM_SMEM_LOG_MPROC_WRAP,
+ SMEM_BOOT_INFO_FOR_APPS,
+ SMEM_SMSM_SIZE_INFO,
+ SMEM_SMD_LOOPBACK_REGISTER,
+ SMEM_SSR_REASON_MSS0,
+ SMEM_SSR_REASON_WCNSS0,
+ SMEM_SSR_REASON_LPASS0,
+ SMEM_SSR_REASON_DSPS0,
+ SMEM_SSR_REASON_VCODEC0,
+ SMEM_SMP2P_APPS_BASE = 427,
+ SMEM_SMP2P_MODEM_BASE = SMEM_SMP2P_APPS_BASE + 8, /* 435 */
+ SMEM_SMP2P_AUDIO_BASE = SMEM_SMP2P_MODEM_BASE + 8, /* 443 */
+ SMEM_SMP2P_WIRLESS_BASE = SMEM_SMP2P_AUDIO_BASE + 8, /* 451 */
+ SMEM_SMP2P_POWER_BASE = SMEM_SMP2P_WIRLESS_BASE + 8, /* 459 */
+ SMEM_FLASH_DEVICE_INFO = SMEM_SMP2P_POWER_BASE + 8, /* 467 */
+ SMEM_BAM_PIPE_MEMORY, /* 468 */
+ SMEM_IMAGE_VERSION_TABLE, /* 469 */
+ SMEM_LC_DEBUGGER, /* 470 */
+ SMEM_FLASH_NAND_DEV_INFO, /* 471 */
+ SMEM_A2_BAM_DESCRIPTOR_FIFO, /* 472 */
+ SMEM_CPR_CONFIG, /* 473 */
+ SMEM_CLOCK_INFO, /* 474 */
+ SMEM_IPC_FIFO, /* 475 */
+ SMEM_NUM_ITEMS,
+};
+
+#ifdef CONFIG_MSM_SMEM
+void *smem_alloc(unsigned id, unsigned size_in, unsigned to_proc,
+ unsigned flags);
+void *smem_find(unsigned id, unsigned size_in, unsigned to_proc,
+ unsigned flags);
+void *smem_get_entry(unsigned id, unsigned *size, unsigned to_proc,
+ unsigned flags);
+
+/**
+ * smem_get_entry_no_rlock - Get existing item without using remote spinlock
+ *
+ * @id: ID of SMEM item
+ * @size_out: Pointer to size variable for storing the result
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item or NULL if it doesn't exist
+ *
+ * This function does not lock the remote spinlock and should only be used in
+ * failure-recover cases such as retrieving the subsystem failure reason during
+ * subsystem restart.
+ */
+void *smem_get_entry_no_rlock(unsigned id, unsigned *size_out, unsigned to_proc,
+ unsigned flags);
+
+/**
+ * smem_virt_to_phys() - Convert SMEM address to physical address.
+ *
+ * @smem_address: Virtual address returned by smem_alloc()
+ * @returns: Physical address (or NULL if there is a failure)
+ *
+ * This function should only be used if an SMEM item needs to be handed
+ * off to a DMA engine.
+ */
+phys_addr_t smem_virt_to_phys(void *smem_address);
+
+/**
+ * SMEM initialization function that registers for a SMEM platform driver.
+ *
+ * @returns: success on successful driver registration.
+ */
+int __init msm_smem_init(void);
+
+#else
+static inline void *smem_alloc(unsigned id, unsigned size_in, unsigned to_proc,
+ unsigned flags)
+{
+ return NULL;
+}
+static inline void *smem_find(unsigned id, unsigned size_in,
+ unsigned to_proc, unsigned flags)
+{
+ return NULL;
+}
+static inline void *smem_get_entry(unsigned id, unsigned *size,
+ unsigned to_proc, unsigned flags)
+{
+ return NULL;
+}
+static inline void *smem_get_entry_no_rlock(unsigned id, unsigned *size_out,
+ unsigned to_proc, unsigned flags)
+{
+ return NULL;
+}
+static inline phys_addr_t smem_virt_to_phys(void *smem_address)
+{
+ return (phys_addr_t) NULL;
+}
+static inline int __init msm_smem_init(void)
+{
+ return 0;
+}
+#endif /* CONFIG_MSM_SMEM */
+#endif /* _ARCH_ARM_MACH_MSM_SMEM_H_ */
diff --git a/include/soc/qcom/smem_log.h b/include/soc/qcom/smem_log.h
new file mode 100644
index 000000000000..85b8bfd6d514
--- /dev/null
+++ b/include/soc/qcom/smem_log.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2008-2009, 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+
+/* Event indentifier format:
+ * bit 31-28 is processor ID 8 => apps, 4 => Q6, 0 => modem
+ * bits 27-16 are subsystem id (event base)
+ * bits 15-0 are event id
+ */
+
+#define PROC 0xF0000000
+#define SUB 0x0FFF0000
+#define ID 0x0000FFFF
+
+#define SMEM_LOG_PROC_ID_MODEM 0x00000000
+#define SMEM_LOG_PROC_ID_Q6 0x40000000
+#define SMEM_LOG_PROC_ID_APPS 0x80000000
+#define SMEM_LOG_PROC_ID_WCNSS 0xC0000000
+
+#define SMEM_LOG_CONT 0x10000000
+
+#define SMEM_LOG_SMEM_EVENT_BASE 0x00020000
+#define SMEM_LOG_ERROR_EVENT_BASE 0x00060000
+#define SMEM_LOG_IPC_ROUTER_EVENT_BASE 0x000D0000
+#define SMEM_LOG_QMI_CCI_EVENT_BASE 0x000E0000
+#define SMEM_LOG_QMI_CSI_EVENT_BASE 0x000F0000
+#define ERR_ERROR_FATAL (SMEM_LOG_ERROR_EVENT_BASE + 1)
+#define ERR_ERROR_FATAL_TASK (SMEM_LOG_ERROR_EVENT_BASE + 2)
+#define SMEM_LOG_EVENT_CB (SMEM_LOG_SMEM_EVENT_BASE + 0)
+#define SMEM_LOG_EVENT_START (SMEM_LOG_SMEM_EVENT_BASE + 1)
+#define SMEM_LOG_EVENT_INIT (SMEM_LOG_SMEM_EVENT_BASE + 2)
+#define SMEM_LOG_EVENT_RUNNING (SMEM_LOG_SMEM_EVENT_BASE + 3)
+#define SMEM_LOG_EVENT_STOP (SMEM_LOG_SMEM_EVENT_BASE + 4)
+#define SMEM_LOG_EVENT_RESTART (SMEM_LOG_SMEM_EVENT_BASE + 5)
+#define SMEM_LOG_EVENT_SS (SMEM_LOG_SMEM_EVENT_BASE + 6)
+#define SMEM_LOG_EVENT_READ (SMEM_LOG_SMEM_EVENT_BASE + 7)
+#define SMEM_LOG_EVENT_WRITE (SMEM_LOG_SMEM_EVENT_BASE + 8)
+#define SMEM_LOG_EVENT_SIGS1 (SMEM_LOG_SMEM_EVENT_BASE + 9)
+#define SMEM_LOG_EVENT_SIGS2 (SMEM_LOG_SMEM_EVENT_BASE + 10)
+#define SMEM_LOG_EVENT_WRITE_DM (SMEM_LOG_SMEM_EVENT_BASE + 11)
+#define SMEM_LOG_EVENT_READ_DM (SMEM_LOG_SMEM_EVENT_BASE + 12)
+#define SMEM_LOG_EVENT_SKIP_DM (SMEM_LOG_SMEM_EVENT_BASE + 13)
+#define SMEM_LOG_EVENT_STOP_DM (SMEM_LOG_SMEM_EVENT_BASE + 14)
+#define SMEM_LOG_EVENT_ISR (SMEM_LOG_SMEM_EVENT_BASE + 15)
+#define SMEM_LOG_EVENT_TASK (SMEM_LOG_SMEM_EVENT_BASE + 16)
+#define SMEM_LOG_EVENT_RS (SMEM_LOG_SMEM_EVENT_BASE + 17)
+
+#ifdef CONFIG_MSM_SMEM_LOGGING
+void smem_log_event(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3);
+void smem_log_event6(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3, uint32_t data4, uint32_t data5,
+ uint32_t data6);
+#else
+void smem_log_event(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3) { }
+void smem_log_event6(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3, uint32_t data4, uint32_t data5,
+ uint32_t data6) { }
+#endif
+
diff --git a/include/soc/qcom/smsm.h b/include/soc/qcom/smsm.h
new file mode 100644
index 000000000000..96ca0c86912c
--- /dev/null
+++ b/include/soc/qcom/smsm.h
@@ -0,0 +1,147 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SMSM_H_
+#define _ARCH_ARM_MACH_MSM_SMSM_H_
+
+#include <soc/qcom/smem.h>
+
+enum {
+ SMSM_APPS_STATE,
+ SMSM_MODEM_STATE,
+ SMSM_Q6_STATE,
+ SMSM_APPS_DEM,
+ SMSM_WCNSS_STATE = SMSM_APPS_DEM,
+ SMSM_MODEM_DEM,
+ SMSM_DSPS_STATE = SMSM_MODEM_DEM,
+ SMSM_Q6_DEM,
+ SMSM_POWER_MASTER_DEM,
+ SMSM_TIME_MASTER_DEM,
+};
+extern uint32_t SMSM_NUM_ENTRIES;
+
+/*
+ * Ordered by when processors adopted the SMSM protocol. May not be 1-to-1
+ * with SMEM PIDs, despite initial expectations.
+ */
+enum {
+ SMSM_APPS = SMEM_APPS,
+ SMSM_MODEM = SMEM_MODEM,
+ SMSM_Q6 = SMEM_Q6,
+ SMSM_WCNSS,
+ SMSM_DSPS,
+};
+extern uint32_t SMSM_NUM_HOSTS;
+
+#define SMSM_INIT 0x00000001
+#define SMSM_SMDINIT 0x00000008
+#define SMSM_RPCINIT 0x00000020
+#define SMSM_RESET 0x00000040
+#define SMSM_TIMEWAIT 0x00000400
+#define SMSM_TIMEINIT 0x00000800
+#define SMSM_PROC_AWAKE 0x00001000
+#define SMSM_SMD_LOOPBACK 0x00800000
+
+#define SMSM_USB_PLUG_UNPLUG 0x00002000
+
+#define SMSM_A2_POWER_CONTROL 0x00000002
+#define SMSM_A2_POWER_CONTROL_ACK 0x00000800
+
+#ifdef CONFIG_MSM_SMD
+int smsm_change_state(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask);
+
+/*
+ * Changes the global interrupt mask. The set and clear masks are re-applied
+ * every time the global interrupt mask is updated for callback registration
+ * and de-registration.
+ *
+ * The clear mask is applied first, so if a bit is set to 1 in both the clear
+ * mask and the set mask, the result will be that the interrupt is set.
+ *
+ * @smsm_entry SMSM entry to change
+ * @clear_mask 1 = clear bit, 0 = no-op
+ * @set_mask 1 = set bit, 0 = no-op
+ *
+ * @returns 0 for success, < 0 for error
+ */
+int smsm_change_intr_mask(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask);
+int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask);
+uint32_t smsm_get_state(uint32_t smsm_entry);
+int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
+ void (*notify)(void *, uint32_t old_state, uint32_t new_state),
+ void *data);
+int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
+ void (*notify)(void *, uint32_t, uint32_t), void *data);
+
+#else
+static inline int smsm_change_state(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ return -ENODEV;
+}
+
+/*
+ * Changes the global interrupt mask. The set and clear masks are re-applied
+ * every time the global interrupt mask is updated for callback registration
+ * and de-registration.
+ *
+ * The clear mask is applied first, so if a bit is set to 1 in both the clear
+ * mask and the set mask, the result will be that the interrupt is set.
+ *
+ * @smsm_entry SMSM entry to change
+ * @clear_mask 1 = clear bit, 0 = no-op
+ * @set_mask 1 = set bit, 0 = no-op
+ *
+ * @returns 0 for success, < 0 for error
+ */
+static inline int smsm_change_intr_mask(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ return -ENODEV;
+}
+
+static inline int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
+{
+ return -ENODEV;
+}
+static inline uint32_t smsm_get_state(uint32_t smsm_entry)
+{
+ return 0;
+}
+static inline int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
+ void (*notify)(void *, uint32_t old_state, uint32_t new_state),
+ void *data)
+{
+ return -ENODEV;
+}
+static inline int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
+ void (*notify)(void *, uint32_t, uint32_t), void *data)
+{
+ return -ENODEV;
+}
+static inline void smsm_reset_modem(unsigned mode)
+{
+}
+static inline void smsm_reset_modem_cont(void)
+{
+}
+static inline void smd_sleep_exit(void)
+{
+}
+static inline int smsm_check_for_modem_crash(void)
+{
+ return -ENODEV;
+}
+#endif
+#endif
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
new file mode 100644
index 000000000000..9f18fd90a94b
--- /dev/null
+++ b/include/soc/qcom/socinfo.h
@@ -0,0 +1,604 @@
+/* Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SOCINFO_H_
+#define _ARCH_ARM_MACH_MSM_SOCINFO_H_
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/of_fdt.h>
+#include <linux/of.h>
+
+#include <asm/cputype.h>
+/*
+ * SOC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits. For example:
+ * 1.0 -> 0x00010000
+ * 2.3 -> 0x00020003
+ */
+#define SOCINFO_VERSION_MAJOR(ver) ((ver & 0xffff0000) >> 16)
+#define SOCINFO_VERSION_MINOR(ver) (ver & 0x0000ffff)
+
+#ifdef CONFIG_OF
+#define of_board_is_cdp() of_machine_is_compatible("qcom,cdp")
+#define of_board_is_sim() of_machine_is_compatible("qcom,sim")
+#define of_board_is_rumi() of_machine_is_compatible("qcom,rumi")
+#define of_board_is_fluid() of_machine_is_compatible("qcom,fluid")
+#define of_board_is_liquid() of_machine_is_compatible("qcom,liquid")
+#define of_board_is_dragonboard() \
+ of_machine_is_compatible("qcom,dragonboard")
+#define of_board_is_cdp() of_machine_is_compatible("qcom,cdp")
+#define of_board_is_mtp() of_machine_is_compatible("qcom,mtp")
+#define of_board_is_qrd() of_machine_is_compatible("qcom,qrd")
+#define of_board_is_xpm() of_machine_is_compatible("qcom,xpm")
+#define of_board_is_skuf() of_machine_is_compatible("qcom,skuf")
+#define of_board_is_sbc() of_machine_is_compatible("qcom,sbc")
+
+#define machine_is_msm8974() of_machine_is_compatible("qcom,msm8974")
+#define machine_is_msm9625() of_machine_is_compatible("qcom,msm9625")
+#define machine_is_msm8610() of_machine_is_compatible("qcom,msm8610")
+#define machine_is_msm8226() of_machine_is_compatible("qcom,msm8226")
+#define machine_is_apq8074() of_machine_is_compatible("qcom,apq8074")
+#define machine_is_msm8926() of_machine_is_compatible("qcom,msm8926")
+
+#define early_machine_is_msm8610() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8610")
+#define early_machine_is_mpq8092() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mpq8092")
+#define early_machine_is_msm8916() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8916")
+#define early_machine_is_msm8936() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8936")
+#define early_machine_is_msm8939() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8939")
+#define early_machine_is_apq8084() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,apq8084")
+#define early_machine_is_mdm9630() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mdm9630")
+#define early_machine_is_msmzirc() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmzirc")
+#define early_machine_is_fsm9900() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,fsm9900")
+#define early_machine_is_msmsamarium() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmsamarium")
+#else
+#define of_board_is_sim() 0
+#define of_board_is_rumi() 0
+#define of_board_is_fluid() 0
+#define of_board_is_liquid() 0
+#define of_board_is_dragonboard() 0
+#define of_board_is_cdp() 0
+#define of_board_is_mtp() 0
+#define of_board_is_qrd() 0
+#define of_board_is_xpm() 0
+#define of_board_is_skuf() 0
+#define of_board_is_sbc() 0
+
+#define machine_is_msm8974() 0
+#define machine_is_msm9625() 0
+#define machine_is_msm8610() 0
+#define machine_is_msm8226() 0
+#define machine_is_apq8074() 0
+#define machine_is_msm8926() 0
+
+#define early_machine_is_msm8610() 0
+#define early_machine_is_mpq8092() 0
+#define early_machine_is_msm8916() 0
+#define early_machine_is_msm8936() 0
+#define early_machine_is_msm8939() 0
+#define early_machine_is_apq8084() 0
+#define early_machine_is_mdm9630() 0
+#define early_machine_is_fsm9900() 0
+#define early_machine_is_msmsamarium() 0
+#endif
+
+#define PLATFORM_SUBTYPE_MDM 1
+#define PLATFORM_SUBTYPE_INTERPOSERV3 2
+#define PLATFORM_SUBTYPE_SGLTE 6
+
+enum msm_cpu {
+ MSM_CPU_UNKNOWN = 0,
+ MSM_CPU_7X01,
+ MSM_CPU_7X25,
+ MSM_CPU_7X27,
+ MSM_CPU_8X50,
+ MSM_CPU_8X50A,
+ MSM_CPU_7X30,
+ MSM_CPU_8X55,
+ MSM_CPU_8X60,
+ MSM_CPU_8960,
+ MSM_CPU_8960AB,
+ MSM_CPU_7X27A,
+ FSM_CPU_9XXX,
+ MSM_CPU_7X25A,
+ MSM_CPU_7X25AA,
+ MSM_CPU_7X25AB,
+ MSM_CPU_8064,
+ MSM_CPU_8064AB,
+ MSM_CPU_8064AA,
+ MSM_CPU_8930,
+ MSM_CPU_8930AA,
+ MSM_CPU_8930AB,
+ MSM_CPU_7X27AA,
+ MSM_CPU_9615,
+ MSM_CPU_8974,
+ MSM_CPU_8974PRO_AA,
+ MSM_CPU_8974PRO_AB,
+ MSM_CPU_8974PRO_AC,
+ MSM_CPU_8627,
+ MSM_CPU_8625,
+ MSM_CPU_9625,
+ MSM_CPU_8092,
+ MSM_CPU_8916,
+ MSM_CPU_8936,
+ MSM_CPU_8939,
+ MSM_CPU_8226,
+ MSM_CPU_8610,
+ MSM_CPU_8625Q,
+ MSM_CPU_8084,
+ MSM_CPU_9630,
+ FSM_CPU_9900,
+ MSM_CPU_ZIRC,
+};
+
+struct msm_soc_info {
+ enum msm_cpu generic_soc_type;
+ char *soc_id_string;
+};
+
+enum pmic_model {
+ PMIC_MODEL_PM8058 = 13,
+ PMIC_MODEL_PM8028 = 14,
+ PMIC_MODEL_PM8901 = 15,
+ PMIC_MODEL_PM8027 = 16,
+ PMIC_MODEL_ISL_9519 = 17,
+ PMIC_MODEL_PM8921 = 18,
+ PMIC_MODEL_PM8018 = 19,
+ PMIC_MODEL_PM8015 = 20,
+ PMIC_MODEL_PM8014 = 21,
+ PMIC_MODEL_PM8821 = 22,
+ PMIC_MODEL_PM8038 = 23,
+ PMIC_MODEL_PM8922 = 24,
+ PMIC_MODEL_PM8917 = 25,
+ PMIC_MODEL_UNKNOWN = 0xFFFFFFFF
+};
+
+enum msm_cpu socinfo_get_msm_cpu(void);
+uint32_t socinfo_get_id(void);
+uint32_t socinfo_get_version(void);
+uint32_t socinfo_get_raw_id(void);
+char *socinfo_get_build_id(void);
+uint32_t socinfo_get_platform_type(void);
+uint32_t socinfo_get_platform_subtype(void);
+uint32_t socinfo_get_platform_version(void);
+enum pmic_model socinfo_get_pmic_model(void);
+uint32_t socinfo_get_pmic_die_revision(void);
+int __init socinfo_init(void) __must_check;
+const int read_msm_cpu_type(void);
+const int get_core_count(void);
+const int cpu_is_krait(void);
+const int cpu_is_krait_v1(void);
+const int cpu_is_krait_v2(void);
+const int cpu_is_krait_v3(void);
+
+static inline int cpu_is_msm7x01(void)
+{
+#ifdef CONFIG_ARCH_MSM7X01A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X01;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25(void)
+{
+#ifdef CONFIG_ARCH_MSM7X25
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27(void)
+{
+#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27a(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27A;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27aa(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25a(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25A;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25aa(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25ab(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x30(void)
+{
+#ifdef CONFIG_ARCH_MSM7X30
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X30;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_qsd8x50(void)
+{
+#ifdef CONFIG_ARCH_QSD8X50
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8X50;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8x55(void)
+{
+#ifdef CONFIG_ARCH_MSM7X30
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8X55;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8x60(void)
+{
+#ifdef CONFIG_ARCH_MSM8X60
+ return read_msm_cpu_type() == MSM_CPU_8X60;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8960(void)
+{
+ return 0;
+}
+
+static inline int cpu_is_msm8960ab(void)
+{
+ return 0;
+}
+
+static inline int cpu_is_apq8064(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_apq8064ab(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_apq8064aa(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930aa(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930ab(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8627(void)
+{
+/* 8930 and 8627 will share the same CONFIG_ARCH type unless otherwise needed */
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8627;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_fsm9xxx(void)
+{
+#ifdef CONFIG_ARCH_FSM9XXX
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == FSM_CPU_9XXX;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm9615(void)
+{
+#ifdef CONFIG_ARCH_MSM9615
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_9615;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8625(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8625;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_aa(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_ab(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_ac(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AC;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_mpq8092(void)
+{
+#ifdef CONFIG_ARCH_MPQ8092
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8092;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8916(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8916;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8936(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8936;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8939(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8939;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8226(void)
+{
+#ifdef CONFIG_ARCH_MSM8226
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8226;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8610(void)
+{
+#ifdef CONFIG_ARCH_MSM8610
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8610;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8625q(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8625Q;
+#else
+ return 0;
+#endif
+}
+
+static inline int soc_class_is_msm8960(void)
+{
+ return cpu_is_msm8960() || cpu_is_msm8960ab();
+}
+
+static inline int soc_class_is_apq8064(void)
+{
+ return cpu_is_apq8064() || cpu_is_apq8064ab() || cpu_is_apq8064aa();
+}
+
+static inline int soc_class_is_msm8930(void)
+{
+ return cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8930ab() ||
+ cpu_is_msm8627();
+}
+
+static inline int soc_class_is_msm8974(void)
+{
+ return cpu_is_msm8974() || cpu_is_msm8974pro_aa() ||
+ cpu_is_msm8974pro_ab() || cpu_is_msm8974pro_ac();
+}
+
+#endif
diff --git a/include/soc/qcom/spm.h b/include/soc/qcom/spm.h
new file mode 100644
index 000000000000..1e0fc0505704
--- /dev/null
+++ b/include/soc/qcom/spm.h
@@ -0,0 +1,185 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_SPM_H
+#define __ARCH_ARM_MACH_MSM_SPM_H
+enum {
+ MSM_SPM_MODE_DISABLED,
+ MSM_SPM_MODE_CLOCK_GATING,
+ MSM_SPM_MODE_POWER_RETENTION,
+ MSM_SPM_MODE_POWER_COLLAPSE,
+ MSM_SPM_MODE_NR
+};
+
+enum {
+ MSM_SPM_L2_MODE_DISABLED = MSM_SPM_MODE_DISABLED,
+ MSM_SPM_L2_MODE_RETENTION,
+ MSM_SPM_L2_MODE_GDHS,
+ MSM_SPM_L2_MODE_PC_NO_RPM,
+ MSM_SPM_L2_MODE_POWER_COLLAPSE,
+ MSM_SPM_L2_MODE_LAST,
+};
+
+enum {
+ MSM_SPM_REG_SAW2_CFG,
+ MSM_SPM_REG_SAW2_AVS_CTL,
+ MSM_SPM_REG_SAW2_AVS_HYSTERESIS,
+ MSM_SPM_REG_SAW2_SPM_CTL,
+ MSM_SPM_REG_SAW2_PMIC_DLY,
+ MSM_SPM_REG_SAW2_AVS_LIMIT,
+ MSM_SPM_REG_SAW2_AVS_DLY,
+ MSM_SPM_REG_SAW2_SPM_DLY,
+ MSM_SPM_REG_SAW2_PMIC_DATA_0,
+ MSM_SPM_REG_SAW2_PMIC_DATA_1,
+ MSM_SPM_REG_SAW2_PMIC_DATA_2,
+ MSM_SPM_REG_SAW2_PMIC_DATA_3,
+ MSM_SPM_REG_SAW2_PMIC_DATA_4,
+ MSM_SPM_REG_SAW2_PMIC_DATA_5,
+ MSM_SPM_REG_SAW2_PMIC_DATA_6,
+ MSM_SPM_REG_SAW2_PMIC_DATA_7,
+ MSM_SPM_REG_SAW2_RST,
+
+ MSM_SPM_REG_NR_INITIALIZE = MSM_SPM_REG_SAW2_RST,
+
+ MSM_SPM_REG_SAW2_ID,
+ MSM_SPM_REG_SAW2_SECURE,
+ MSM_SPM_REG_SAW2_STS0,
+ MSM_SPM_REG_SAW2_STS1,
+ MSM_SPM_REG_SAW2_STS2,
+ MSM_SPM_REG_SAW2_VCTL,
+ MSM_SPM_REG_SAW2_SEQ_ENTRY,
+ MSM_SPM_REG_SAW2_SPM_STS,
+ MSM_SPM_REG_SAW2_AVS_STS,
+ MSM_SPM_REG_SAW2_PMIC_STS,
+ MSM_SPM_REG_SAW2_VERSION,
+
+ MSM_SPM_REG_NR,
+};
+
+struct msm_spm_seq_entry {
+ uint32_t mode;
+ uint8_t *cmd;
+ bool notify_rpm;
+};
+
+struct msm_spm_platform_data {
+ void __iomem *reg_base_addr;
+ uint32_t reg_init_values[MSM_SPM_REG_NR_INITIALIZE];
+
+ uint32_t ver_reg;
+ uint32_t vctl_port;
+ uint32_t phase_port;
+ uint32_t pfm_port;
+
+ uint8_t awake_vlevel;
+ uint32_t vctl_timeout_us;
+ uint32_t avs_timeout_us;
+
+ uint32_t num_modes;
+ struct msm_spm_seq_entry *modes;
+};
+
+#if defined(CONFIG_MSM_SPM_V2)
+
+/* Public functions */
+
+int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm);
+int msm_spm_probe_done(void);
+int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel);
+unsigned int msm_spm_get_vdd(unsigned int cpu);
+int msm_spm_turn_on_cpu_rail(unsigned long base, unsigned int cpu);
+
+/* Internal low power management specific functions */
+
+void msm_spm_reinit(void);
+int msm_spm_init(struct msm_spm_platform_data *data, int nr_devs);
+int msm_spm_device_init(void);
+
+#if defined(CONFIG_MSM_L2_SPM)
+
+/* Public functions */
+
+int msm_spm_l2_set_low_power_mode(unsigned int mode, bool notify_rpm);
+int msm_spm_apcs_set_phase(unsigned int phase_cnt);
+int msm_spm_enable_fts_lpm(uint32_t mode);
+
+/* Internal low power management specific functions */
+
+int msm_spm_l2_init(struct msm_spm_platform_data *data);
+void msm_spm_l2_reinit(void);
+
+#else
+
+static inline int msm_spm_l2_set_low_power_mode(unsigned int mode,
+ bool notify_rpm)
+{
+ return -ENOSYS;
+}
+
+static inline int msm_spm_l2_init(struct msm_spm_platform_data *data)
+{
+ return -ENOSYS;
+}
+
+static inline void msm_spm_l2_reinit(void)
+{
+ /* empty */
+}
+
+static inline int msm_spm_apcs_set_phase(unsigned int phase_cnt)
+{
+ return -ENOSYS;
+}
+
+static inline int msm_spm_enable_fts_lpm(uint32_t mode)
+{
+ return -ENOSYS;
+}
+#endif /* defined(CONFIG_MSM_L2_SPM) */
+#else /* defined(CONFIG_MSM_SPM_V2) */
+static inline int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
+{
+ return -ENOSYS;
+}
+
+static inline int msm_spm_probe_done(void)
+{
+ return -ENOSYS;
+}
+
+static inline int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
+{
+ return -ENOSYS;
+}
+
+static inline unsigned int msm_spm_get_vdd(unsigned int cpu)
+{
+ return 0;
+}
+
+static inline void msm_spm_reinit(void)
+{
+ /* empty */
+}
+
+static inline int msm_spm_turn_on_cpu_rail(unsigned long base, unsigned int cpu)
+{
+ return -ENOSYS;
+}
+
+static inline int msm_spm_device_init(void)
+{
+ return -ENOSYS;
+}
+
+#endif /* defined (CONFIG_MSM_SPM_V2) */
+#endif /* __ARCH_ARM_MACH_MSM_SPM_H */
diff --git a/include/soc/qcom/subsystem_notif.h b/include/soc/qcom/subsystem_notif.h
new file mode 100644
index 000000000000..db421ca6e54a
--- /dev/null
+++ b/include/soc/qcom/subsystem_notif.h
@@ -0,0 +1,87 @@
+/* Copyright (c) 2011, 2013 - 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Subsystem restart notifier API header
+ *
+ */
+
+#ifndef _SUBSYS_NOTIFIER_H
+#define _SUBSYS_NOTIFIER_H
+
+#include <linux/notifier.h>
+
+enum subsys_notif_type {
+ SUBSYS_BEFORE_SHUTDOWN,
+ SUBSYS_AFTER_SHUTDOWN,
+ SUBSYS_BEFORE_POWERUP,
+ SUBSYS_AFTER_POWERUP,
+ SUBSYS_RAMDUMP_NOTIFICATION,
+ SUBSYS_POWERUP_FAILURE,
+ SUBSYS_PROXY_VOTE,
+ SUBSYS_PROXY_UNVOTE,
+ SUBSYS_SOC_RESET,
+ SUBSYS_NOTIF_TYPE_COUNT
+};
+
+#if defined(CONFIG_MSM_SUBSYSTEM_RESTART)
+/* Use the subsys_notif_register_notifier API to register for notifications for
+ * a particular subsystem. This API will return a handle that can be used to
+ * un-reg for notifications using the subsys_notif_unregister_notifier API by
+ * passing in that handle as an argument.
+ *
+ * On receiving a notification, the second (unsigned long) argument of the
+ * notifier callback will contain the notification type, and the third (void *)
+ * argument will contain the handle that was returned by
+ * subsys_notif_register_notifier.
+ */
+void *subsys_notif_register_notifier(
+ const char *subsys_name, struct notifier_block *nb);
+int subsys_notif_unregister_notifier(void *subsys_handle,
+ struct notifier_block *nb);
+
+/* Use the subsys_notif_init_subsys API to initialize the notifier chains form
+ * a particular subsystem. This API will return a handle that can be used to
+ * queue notifications using the subsys_notif_queue_notification API by passing
+ * in that handle as an argument.
+ */
+void *subsys_notif_add_subsys(const char *);
+int subsys_notif_queue_notification(void *subsys_handle,
+ enum subsys_notif_type notif_type,
+ void *data);
+#else
+
+static inline void *subsys_notif_register_notifier(
+ const char *subsys_name, struct notifier_block *nb)
+{
+ return NULL;
+}
+
+static inline int subsys_notif_unregister_notifier(void *subsys_handle,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline void *subsys_notif_add_subsys(const char *subsys_name)
+{
+ return NULL;
+}
+
+static inline int subsys_notif_queue_notification(void *subsys_handle,
+ enum subsys_notif_type notif_type,
+ void *data)
+{
+ return 0;
+}
+#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
+
+#endif
diff --git a/include/soc/qcom/subsystem_restart.h b/include/soc/qcom/subsystem_restart.h
new file mode 100644
index 000000000000..3f979faa9ad6
--- /dev/null
+++ b/include/soc/qcom/subsystem_restart.h
@@ -0,0 +1,143 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SUBSYS_RESTART_H
+#define __SUBSYS_RESTART_H
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+#define SUBSYS_NAME_MAX_LENGTH 40
+
+struct subsys_device;
+
+enum {
+ RESET_SOC = 0,
+ RESET_SUBSYS_COUPLED,
+ RESET_LEVEL_MAX
+};
+
+struct device;
+struct module;
+
+/**
+ * struct subsys_desc - subsystem descriptor
+ * @name: name of subsystem
+ * @depends_on: subsystem this subsystem depends on to operate
+ * @dev: parent device
+ * @owner: module the descriptor belongs to
+ * @shutdown: Stop a subsystem
+ * @powerup: Start a subsystem
+ * @crash_shutdown: Shutdown a subsystem when the system crashes (can't sleep)
+ * @ramdump: Collect a ramdump of the subsystem
+ * @is_not_loadable: Indicate if subsystem firmware is not loadable via pil
+ * framework
+ */
+struct subsys_desc {
+ const char *name;
+ const char *depends_on;
+ struct device *dev;
+ struct module *owner;
+
+ int (*shutdown)(const struct subsys_desc *desc, bool force_stop);
+ int (*powerup)(const struct subsys_desc *desc);
+ void (*crash_shutdown)(const struct subsys_desc *desc);
+ int (*ramdump)(int, const struct subsys_desc *desc);
+ irqreturn_t (*err_fatal_handler) (int irq, void *dev_id);
+ irqreturn_t (*stop_ack_handler) (int irq, void *dev_id);
+ irqreturn_t (*wdog_bite_handler) (int irq, void *dev_id);
+ int is_not_loadable;
+ unsigned int err_fatal_irq;
+ unsigned int err_ready_irq;
+ unsigned int stop_ack_irq;
+ unsigned int wdog_bite_irq;
+ int force_stop_gpio;
+};
+
+/**
+ * struct notif_data - additional notif information
+ * @crashed: indicates if subsystem has crashed
+ * @enable_ramdump: ramdumps disabled if set to 0
+ */
+struct notif_data {
+ bool crashed;
+ int enable_ramdump;
+};
+
+#if defined(CONFIG_MSM_SUBSYSTEM_RESTART)
+
+extern int subsys_get_restart_level(struct subsys_device *dev);
+extern int subsystem_restart_dev(struct subsys_device *dev);
+extern int subsystem_restart(const char *name);
+extern int subsystem_crashed(const char *name);
+
+extern void *subsystem_get(const char *name);
+extern void subsystem_put(void *subsystem);
+
+extern struct subsys_device *subsys_register(struct subsys_desc *desc);
+extern void subsys_unregister(struct subsys_device *dev);
+
+extern void subsys_default_online(struct subsys_device *dev);
+extern void subsys_set_crash_status(struct subsys_device *dev, bool crashed);
+extern bool subsys_get_crash_status(struct subsys_device *dev);
+void notify_proxy_vote(struct device *device);
+void notify_proxy_unvote(struct device *device);
+#else
+
+static inline int subsys_get_restart_level(struct subsys_device *dev)
+{
+ return 0;
+}
+
+static inline int subsystem_restart_dev(struct subsys_device *dev)
+{
+ return 0;
+}
+
+static inline int subsystem_restart(const char *name)
+{
+ return 0;
+}
+
+static inline int subsystem_crashed(const char *name)
+{
+ return 0;
+}
+
+static inline void *subsystem_get(const char *name)
+{
+ return NULL;
+}
+
+static inline void subsystem_put(void *subsystem) { }
+
+static inline
+struct subsys_device *subsys_register(struct subsys_desc *desc)
+{
+ return NULL;
+}
+
+static inline void subsys_unregister(struct subsys_device *dev) { }
+
+static inline void subsys_default_online(struct subsys_device *dev) { }
+static inline
+void subsys_set_crash_status(struct subsys_device *dev, bool crashed) { }
+static inline bool subsys_get_crash_status(struct subsys_device *dev)
+{
+ return false;
+}
+static inline void notify_proxy_vote(struct device *device) { }
+static inline void notify_proxy_unvote(struct device *device) { }
+#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
+
+#endif
diff --git a/include/soc/qcom/sysmon.h b/include/soc/qcom/sysmon.h
new file mode 100644
index 000000000000..8b2f85baba10
--- /dev/null
+++ b/include/soc/qcom/sysmon.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_SYSMON_H
+#define __MSM_SYSMON_H
+
+#include <soc/qcom/smd.h>
+#include <soc/qcom/subsystem_notif.h>
+
+/**
+ * enum subsys_id - Destination subsystems for events.
+ */
+enum subsys_id {
+ /* SMD subsystems */
+ SYSMON_SS_MODEM = SMD_APPS_MODEM,
+ SYSMON_SS_LPASS = SMD_APPS_QDSP,
+ SYSMON_SS_WCNSS = SMD_APPS_WCNSS,
+ SYSMON_SS_DSPS = SMD_APPS_DSPS,
+ SYSMON_SS_Q6FW = SMD_APPS_Q6FW,
+
+ /* Non-SMD subsystems */
+ SYSMON_SS_EXT_MODEM = SMD_NUM_TYPE,
+ SYSMON_NUM_SS
+};
+
+#ifdef CONFIG_MSM_SYSMON_COMM
+int sysmon_send_event(const char *dest_ss, const char *event_ss,
+ enum subsys_notif_type notif);
+int sysmon_get_reason(enum subsys_id dest_ss, char *buf, size_t len);
+int sysmon_send_shutdown(enum subsys_id dest_ss);
+#else
+static inline int sysmon_send_event(const char *dest_ss,
+ const char *event_ss,
+ enum subsys_notif_type notif)
+{
+ return 0;
+}
+static inline int sysmon_get_reason(enum subsys_id dest_ss, char *buf,
+ size_t len)
+{
+ return 0;
+}
+static inline int sysmon_send_shutdown(enum subsys_id dest_ss)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/init/Kconfig b/init/Kconfig
index 2081a4d3d917..7bc67dab58f1 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1345,6 +1345,12 @@ config HAVE_PCSPKR_PLATFORM
config BPF
bool
+config PANIC_TIMEOUT
+ int "Default panic timeout"
+ default 0
+ help
+ Set default panic timeout.
+
menuconfig EXPERT
bool "Configure standard kernel features (expert users)"
# Unhide debug options, to make the on-by-default options visible
diff --git a/init/main.c b/init/main.c
index 321d0ceb26d3..2a6ba80da31f 100644
--- a/init/main.c
+++ b/init/main.c
@@ -393,6 +393,7 @@ static __initdata DECLARE_COMPLETION(kthreadd_done);
static noinline void __init_refok rest_init(void)
{
int pid;
+ const struct sched_param param = { .sched_priority = 1 };
rcu_scheduler_starting();
/*
@@ -406,6 +407,7 @@ static noinline void __init_refok rest_init(void)
rcu_read_lock();
kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
rcu_read_unlock();
+ sched_setscheduler_nocheck(kthreadd_task, SCHED_FIFO, &param);
complete(&kthreadd_done);
/*
@@ -511,11 +513,6 @@ asmlinkage __visible void __init start_kernel(void)
smp_setup_processor_id();
debug_objects_early_init();
- /*
- * Set up the the initial canary ASAP:
- */
- boot_init_stack_canary();
-
cgroup_init_early();
local_irq_disable();
@@ -529,6 +526,11 @@ asmlinkage __visible void __init start_kernel(void)
page_address_init();
pr_notice("%s", linux_banner);
setup_arch(&command_line);
+ /*
+ * Set up the the initial canary ASAP:
+ */
+ boot_init_stack_canary();
+ mm_init_owner(&init_mm, &init_task);
mm_init_cpumask(&init_mm);
setup_command_line(command_line);
setup_nr_cpu_ids();
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 90a3d017b90c..53e409d529b0 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -791,3 +791,25 @@ void init_cpu_online(const struct cpumask *src)
{
cpumask_copy(to_cpumask(cpu_online_bits), src);
}
+
+
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+ atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+ atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+
+void idle_notifier_call_chain(unsigned long val)
+{
+ atomic_notifier_call_chain(&idle_notifier, val, NULL);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
+
diff --git a/kernel/fork.c b/kernel/fork.c
index 9b7d746d6d62..28d5475fa9e4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -541,12 +541,13 @@ static void mm_init_aio(struct mm_struct *mm)
#endif
}
-static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
+void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{
#ifdef CONFIG_MEMCG
mm->owner = p;
#endif
}
+EXPORT_SYMBOL(mm_init_owner);
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
{
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0a9104b4608b..57ad5ddc319a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -547,6 +547,32 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
}
EXPORT_SYMBOL(irq_set_irq_wake);
+/**
+ * irq_read_line - read the value on an irq line
+ * @irq: Interrupt number representing a hardware line
+ *
+ * This function is meant to be called from within the irq handler.
+ * Slowbus irq controllers might sleep, but it is assumed that the irq
+ * handler for slowbus interrupts will execute in thread context, so
+ * sleeping is okay.
+ */
+int irq_read_line(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ int val;
+
+ if (!desc || !desc->irq_data.chip->irq_read_line)
+ return -EINVAL;
+
+ chip_bus_lock(desc);
+ raw_spin_lock(&desc->lock);
+ val = desc->irq_data.chip->irq_read_line(&desc->irq_data);
+ raw_spin_unlock(&desc->lock);
+ chip_bus_sync_unlock(desc);
+ return val;
+}
+EXPORT_SYMBOL_GPL(irq_read_line);
+
/*
* Internal function that tells the architecture code whether a
* particular irq has been exclusively allocated or is available
@@ -1326,6 +1352,12 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
if (!desc->action) {
irq_shutdown(desc);
irq_release_resources(desc);
+
+ /* Explicitly mask the interrupt */
+ if (desc->irq_data.chip->irq_mask)
+ desc->irq_data.chip->irq_mask(&desc->irq_data);
+ else if (desc->irq_data.chip->irq_mask_ack)
+ desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
}
#ifdef CONFIG_SMP
@@ -1564,6 +1596,19 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
}
EXPORT_SYMBOL_GPL(request_any_context_irq);
+void irq_set_pending(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ unsigned long flags;
+
+ if (desc) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ desc->istate |= IRQS_PENDING;
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(irq_set_pending);
+
void enable_percpu_irq(unsigned int irq, unsigned int type)
{
unsigned int cpu = smp_processor_id();
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 54be19a0fa51..5f7772fcc953 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -295,6 +295,12 @@ _dtbinst_pre_:
%.dtb_dtbinst_: $(obj)/%.dtb _dtbinst_pre_
$(call cmd,dtb_install,$(INSTALL_DTBS_PATH))
+# cat
+# ---------------------------------------------------------------------------
+# Concatentate multiple files together
+quiet_cmd_cat = CAT $@
+cmd_cat = (cat $(filter-out FORCE,$^) > $@) || (rm -f $@; false)
+
# Bzip2
# ---------------------------------------------------------------------------