aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSrinivas Kandagatla <srinivas.kandagatla@linaro.org>2015-04-07 15:16:03 +0100
committerSrinivas Kandagatla <srinivas.kandagatla@linaro.org>2015-04-07 15:16:03 +0100
commit8cb3a2dfb116925da331ce743b81fde4d9201267 (patch)
tree74f1432f12f73546599c317c5c7f25d15db2e33a
parent0b9702eefb577338e5ee6fea30fde1487f2fa1a7 (diff)
parentbf2ae3fb98a15e9ca6f947fe3a1ce0706f9ef882 (diff)
Merge branch 'tracking-qcomlt-msm8916-smd-rpm' into integration-linux-qcomltqcomlt-v4.0-rc7
* tracking-qcomlt-msm8916-smd-rpm: smd: perform SMD channel scanning synchronously at probe regulator: add support for RPM SMD regulator - build fix regulator: add support for RPM SMD regulator soc: qcom: add RPM SMD driver soc: qcom: add support for SMD soc: qcom: add subsystem_restart.h header file soc: qcom: add SMSM header file qcom: add support for SMEM qcom: add subsystem_notif header file qcom: ramdump: add header file msm: add ipc logging header hwspinlock: msm: add hardware spinlock support Conflicts: drivers/soc/qcom/Kconfig drivers/soc/qcom/Makefile
-rw-r--r--Documentation/arm/msm/rpm.txt157
-rw-r--r--Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt27
-rw-r--r--Documentation/devicetree/bindings/arm/msm/rpm-smd.txt34
-rw-r--r--Documentation/devicetree/bindings/arm/msm/smem.txt122
-rw-r--r--Documentation/devicetree/bindings/regulator/rpm-smd-regulator.txt222
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/hwspinlock/Kconfig11
-rw-r--r--drivers/hwspinlock/Makefile1
-rw-r--r--drivers/hwspinlock/msm_remote_spinlock.c484
-rw-r--r--drivers/regulator/Kconfig11
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/rpm-smd-regulator.c1722
-rw-r--r--drivers/soc/qcom/Kconfig38
-rw-r--r--drivers/soc/qcom/Makefile6
-rw-r--r--drivers/soc/qcom/rpm-smd-debug.c141
-rw-r--r--drivers/soc/qcom/rpm-smd.c1475
-rw-r--r--drivers/soc/qcom/smd.c3359
-rw-r--r--drivers/soc/qcom/smd_debug.c423
-rw-r--r--drivers/soc/qcom/smd_init_dt.c346
-rw-r--r--drivers/soc/qcom/smd_private.c333
-rw-r--r--drivers/soc/qcom/smd_private.h246
-rw-r--r--drivers/soc/qcom/smem.c1483
-rw-r--r--drivers/soc/qcom/smem_debug.c139
-rw-r--r--drivers/soc/qcom/smem_private.h104
-rw-r--r--drivers/soc/qcom/smsm_debug.c330
-rw-r--r--include/linux/ipc_logging.h267
-rw-r--r--include/linux/msm_remote_spinlock.h74
-rw-r--r--include/linux/regulator/rpm-smd-regulator.h108
-rw-r--r--include/linux/remote_spinlock.h98
-rw-r--r--include/soc/qcom/ramdump.h55
-rw-r--r--include/soc/qcom/rpm-notifier.h63
-rw-r--r--include/soc/qcom/rpm-smd.h268
-rw-r--r--include/soc/qcom/smd.h401
-rw-r--r--include/soc/qcom/smem.h243
-rw-r--r--include/soc/qcom/smsm.h147
-rw-r--r--include/soc/qcom/subsystem_notif.h87
-rw-r--r--include/soc/qcom/subsystem_restart.h154
-rw-r--r--include/trace/events/trace_rpm_smd.h77
38 files changed, 13259 insertions, 0 deletions
diff --git a/Documentation/arm/msm/rpm.txt b/Documentation/arm/msm/rpm.txt
new file mode 100644
index 000000000000..9c9511fb0302
--- /dev/null
+++ b/Documentation/arm/msm/rpm.txt
@@ -0,0 +1,157 @@
+Introduction
+============
+
+Resource Power Manager (RPM)
+
+RPM is a dedicated hardware engine for managing shared SoC resources,
+which includes buses, clocks, power rails, etc. The goal of RPM is
+to achieve the maximum power savings while satisfying the SoC's
+operational and performance requirements. RPM accepts resource
+requests from multiple RPM masters. It arbitrates and aggregates the
+requests, and configures the shared resources. The RPM masters are
+the application processor, the modem processor, as well as some
+hardware accelerators.
+
+The RPM driver provides an API for interacting with RPM. Kernel code
+calls the RPM driver to request RPM-managed, shared resources.
+Kernel code can also register with the driver for RPM notifications,
+which are sent when the status of shared resources change.
+
+Hardware description
+====================
+
+RPM exposes a separate region of registers to each of the RPM masters.
+In general, each register represents some shared resource(s). At a
+very basic level, a master requests resources by writing to the
+registers, then generating an interrupt to RPM. RPM processes the
+request, writes acknowledgement to the registers, then generates an
+interrupt to the master.
+
+In addition to the master-specific regions, RPM also exposes a shared
+region that contains the current status of the shared resources. Only
+RPM can write to the status region, but every master can read from it.
+
+RPM contains internal logics that aggregate and arbitrate among
+requests from the various RPM masters. It interfaces with the PMIC,
+the bus arbitration block, and the clock controller block in order to
+configure the shared resources.
+
+Software description
+====================
+
+The RPM driver encapsulates the low level RPM interactions, which
+rely on reading/writing registers and generating/processing
+interrupts, and provides a higher level synchronuous set/clear/get
+interface. Most functions take an array of id-value pairs.
+The ids identify the RPM registers which would correspond to some
+RPM resources, the values specify the new resource values.
+
+The RPM driver synchronizes accesses to RPM. It protects against
+simultaneous accesses from multiple tasks, on SMP cores, in task
+contexts, and in atomic contexts.
+
+Design
+======
+
+Design goals:
+- Encapsulate low level RPM interactions.
+- Provide a synchronuous set/clear/get interface.
+- Synchronize simultaneous software accesses to RPM.
+
+Power Management
+================
+
+RPM is part of the power management architecture for MSM 8660. RPM
+manages shared system resources to lower system power.
+
+SMP/multi-core
+==============
+
+The RPM driver uses mutex to synchronize client accesses among tasks.
+It uses spinlocks to synchronize accesses from atomic contexts and
+SMP cores.
+
+Security
+========
+
+None.
+
+Performance
+===========
+
+None.
+
+Interface
+=========
+
+msm_rpm_get_status():
+The function reads the shared status region and returns the current
+resource values, which are the arbitrated/aggregated results across
+all RPM masters.
+
+msm_rpm_set():
+The function makes a resource request to RPM.
+
+msm_rpm_set_noirq():
+The function is similar to msm_rpm_set() except that it must be
+called with interrupts masked. If possible, use msm_rpm_set()
+instead, to maximize CPU throughput.
+
+msm_rpm_clear():
+The function makes a resource request to RPM to clear resource values.
+Once the values are cleared, the resources revert back to their default
+values for this RPM master. RPM internally uses the default values as
+the requests from this RPM master when arbitrating and aggregating with
+requests from other RPM masters.
+
+msm_rpm_clear_noirq():
+The function is similar to msm_rpm_clear() except that it must be
+called with interrupts masked. If possible, use msm_rpm_clear()
+instead, to maximize CPU throughput.
+
+msm_rpm_register_notification():
+The function registers for RPM notification. When the specified
+resources change their status on RPM, RPM sends out notifications
+and the driver will "up" the semaphore in struct
+msm_rpm_notification.
+
+msm_rpm_unregister_notification():
+The function unregisters a notification.
+
+msm_rpm_init():
+The function initializes the RPM driver with platform specific data.
+
+Driver parameters
+=================
+
+None.
+
+Config options
+==============
+
+MSM_RPM
+
+Dependencies
+============
+
+None.
+
+User space utilities
+====================
+
+None.
+
+Other
+=====
+
+None.
+
+Known issues
+============
+
+None.
+
+To do
+=====
+
+None.
diff --git a/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt b/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
new file mode 100644
index 000000000000..24dbb4b83d99
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
@@ -0,0 +1,27 @@
+Qualcomm Interprocessor Communication Spinlock
+
+--Dedicated Hardware Implementation--
+Required properties:
+- compatible : should be "qcom,ipc-spinlock-sfpb"
+- reg : the location and size of the spinlock hardware
+- qcom,num-locks : the number of locks supported
+
+Example:
+
+ qcom,ipc-spinlock@fd484000 {
+ compatible = "qcom,ipc-spinlock-sfpb";
+ reg = <0xfd484000 0x1000>;
+ qcom,num-locks = <32>;
+ };
+
+--LDREX Implementation--
+Required properties:
+- compatible : should be "qcom,ipc-spinlock-ldrex"
+- reg : the location and size of the shared lock memory
+
+Example:
+
+ qcom,ipc-spinlock@fa00000 {
+ compatible = "qcom,ipc-spinlock-ldrex";
+ reg = <0xfa00000 0x200000>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt b/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt
new file mode 100644
index 000000000000..c422477fd2a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/rpm-smd.txt
@@ -0,0 +1,34 @@
+Resource Power Manager(RPM)
+
+RPM is a dedicated hardware engine for managing shared SoC resources,
+which includes buses, clocks, power rails, etc. The goal of RPM is
+to achieve the maximum power savings while satisfying the SoC's
+operational and performance requirements. RPM accepts resource
+requests from multiple RPM masters. It arbitrates and aggregates the
+requests, and configures the shared resources. The RPM masters are
+the application processor, the modem processor, as well as hardware
+accelerators. The RPM driver communicates with the hardware engine using
+SMD.
+
+The devicetree representation of the SPM block should be:
+
+Required properties
+
+- compatible: "qcom,rpm-smd"
+- rpm-channel-name: The string corresponding to the channel name of the
+ peripheral subsystem
+- rpm-channel-type: The interal SMD edge for this subsystem found in
+ <soc/qcom/smd.h>
+
+Optional properties
+- rpm-standalone: Allow RPM driver to run in standalone mode irrespective of RPM
+ channel presence.
+
+Example:
+
+ qcom,rpm-smd {
+ compatible = "qcom,rpm-smd"
+ qcom,rpm-channel-name = "rpm_requests";
+ qcom,rpm-channel-type = 15; /* SMD_APPS_RPM */
+ }
+}
diff --git a/Documentation/devicetree/bindings/arm/msm/smem.txt b/Documentation/devicetree/bindings/arm/msm/smem.txt
new file mode 100644
index 000000000000..2f92c0c2c4c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/smem.txt
@@ -0,0 +1,122 @@
+MSM Shared Memory
+
+[Root level node]
+Required properties:
+-compatible : should be "qcom,smem"
+-reg : the location and size of smem (optional)
+ the irq register base address (required)
+ the location and size of auxiliary smem areas (optional)
+ the smem target info either from IMEM or register (optional)
+-reg-names : "smem" - optional string to identify the shared memory region
+ "irq-reg-base" - string to identify the irq register region
+ "aux-mem1", "aux-mem2", "aux-mem3", ... - optional strings to
+ identify any auxiliary shared memory regions
+ "smem_targ_info_imem" - optional string to identify
+ the smem target info from IMEM memory
+ "smem_targ_info_reg" - optional string to identify
+ the smem target info from registers
+ one of the optional register names smem_targ_info_imem,
+ smem_targ_info_reg, or smem is required.
+
+Optional properties:
+-qcom,mpu-enabled : boolean value indicating that Memory Protection Unit based
+ security is enabled on the "smem" shared memory region
+
+[Second level nodes]
+
+qcom,smd
+Required properties:
+-compatible : should be "qcom,smd"
+-qcom,smd-edge : the smd edge
+-qcom,smd-irq-offset : the offset into the irq register base memory for sending
+ interrupts
+-qcom,smd-irq-bitmask : the sending irq bitmask
+-interrupts : the receiving interrupt line
+-label : the name of the remote subsystem for this edge
+
+Optional properties:
+-qcom,irq-no-suspend: configure the incoming irq line as active during suspend
+-qcom,not-loadable : indicates this processor cannot be loaded by PIL
+
+qcom,smsm
+Required properties:
+-compatible : should be "qcom,smsm"
+-qcom,smsm-edge : the smsm edge
+-qcom,smsm-irq-offset : the offset into the irq register base memory for sending
+ interrupts
+-qcom,smsm-irq-bitmask : the sending irq bitmask
+-interrupts : the receiving interrupt line
+
+
+Example:
+
+ qcom,smem@fa00000 {
+ compatible = "qcom,smem";
+ reg = <0xfa00000 0x200000>,
+ <0xfa006000 0x1000>,
+ <0xfc428000 0x4000>;
+ reg-names = "smem", "irq-reg-base", "aux-mem1";
+
+ qcom,smd-modem {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <0>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x1000>;
+ interrupts = <0 25 1>;
+ label = "modem";
+ };
+
+ qcom,smsm-modem {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <0>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x2000>;
+ interrupts = <0 26 1>;
+ };
+
+ qcom,smd-adsp {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <1>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x100>;
+ interrupts = <0 156 1>;
+ label = "adsp";
+ };
+
+ qcom,smsm-adsp {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <1>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x200>;
+ interrupts = <0 157 1>;
+ };
+
+ qcom,smd-wcnss {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <6>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x20000>;
+ interrupts = <0 142 1>;
+ label = "wcnss";
+ };
+
+ qcom,smsm-wcnss {
+ compatible = "qcom,smsm";
+ qcom,smsm-edge = <6>;
+ qcom,smsm-irq-offset = <0x8>;
+ qcom,smsm-irq-bitmask = <0x80000>;
+ interrupts = <0 144 1>;
+ };
+
+ qcom,smd-rpm {
+ compatible = "qcom,smd";
+ qcom,smd-edge = <15>;
+ qcom,smd-irq-offset = <0x8>;
+ qcom,smd-irq-bitmask = <0x1>;
+ interrupts = <0 168 1>;
+ label = "rpm";
+ qcom,irq-no-syspend;
+ qcom,not-loadable;
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/regulator/rpm-smd-regulator.txt b/Documentation/devicetree/bindings/regulator/rpm-smd-regulator.txt
new file mode 100644
index 000000000000..e9ddf6771c2d
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/rpm-smd-regulator.txt
@@ -0,0 +1,222 @@
+Qualcomm RPM Regulators
+
+rpm-regulator-smd is a regulator driver which supports regulators inside of
+PMICs which are controlled by the RPM processor. Communication with the RPM
+processor takes place over SMD.
+
+Required structure:
+- RPM regulators must be described in two levels of devices nodes. The first
+ level describes the interface with the RPM. The second level describes
+ properties of one regulator framework interface (of potentially many) to
+ the regulator.
+
+[First Level Nodes]
+
+Required properties:
+- compatible: Must be "qcom,rpm-smd-regulator-resource"
+- qcom,resource-name: Resource name string for this regulator to be used in RPM
+ transactions. Length is 4 characters max.
+- qcom,resource-id: Resource instance ID for this regulator to be used in RPM
+ transactions.
+- qcom,regulator-type: Type of this regulator. Supported values are:
+ 0 = LDO
+ 1 = SMPS
+ 2 = VS
+ 3 = NCP
+
+Optional properties:
+- qcom,allow-atomic: Flag specifying if atomic access is allowed for this
+ regulator. Supported values are:
+ 0 or not present = mutex locks used
+ 1 = spinlocks used
+- qcom,enable-time: Time in us to delay after enabling the regulator
+- qcom,hpm-min-load: Load current in uA which corresponds to the minimum load
+ which requires the regulator to be in high power mode.
+- qcom,apps-only: Flag which indicates that the regulator only has
+ consumers on the application processor. If this flag
+ is specified, then voltage and current updates are
+ only sent to the RPM if the regulator is enabled.
+
+[Second Level Nodes]
+
+Required properties:
+- compatible: Must be "qcom,rpm-smd-regulator"
+- regulator-name: A string used as a descriptive name for regulator outputs
+- qcom,set: Specifies which sets that requests made with this
+ regulator interface should be sent to. Regulator
+ requests sent in the active set take effect immediately.
+ Requests sent in the sleep set take effect when the Apps
+ processor transitions into RPM assisted power collapse.
+ Supported values are:
+ 1 = Active set only
+ 2 = Sleep set only
+ 3 = Both active and sleep sets
+
+
+
+Optional properties:
+- parent-supply: phandle to the parent supply/regulator node
+- qcom,system-load: Load in uA present on regulator that is not
+ captured by any consumer request
+- qcom,use-voltage-corner: Flag that signifies if regulator_set_voltage
+ calls should modify the corner parameter instead
+ of the voltage parameter. When used, voltages
+ specified inside of the regulator framework
+ represent corners that have been incremented by
+ 1. This value shift is necessary to work around
+ limitations in the regulator framework which
+ treat 0 uV as an error.
+- qcom,use-voltage-floor-corner: Flag that signifies if regulator_set_voltage
+ calls should modify the floor corner parameter
+ instead of the voltage parameter. When used,
+ voltages specified inside of the regulator
+ framework represent corners that have been
+ incremented by 1. The properties
+ qcom,use-voltage-corner and
+ qcom,use-voltage-floor-corner are mutually
+ exclusive. Only one may be specified for a
+ given regulator.
+- qcom,always-send-voltage: Flag which indicates that updates to the voltage
+ or voltage corner set point should always be
+ sent immediately to the RPM. If this flag is
+ not specified, then voltage set point updates
+ are only sent if the given regulator has also
+ been enabled by a Linux consumer.
+- qcom,always-send-current: Flag which indicates that updates to the load
+ current should always be sent immediately to the
+ RPM. If this flag is not specified, then load
+ current updates are only sent if the given
+ regulator has also been enabled by a Linux
+ consumer.
+
+The following properties specify initial values for parameters to be sent to the
+RPM in regulator requests.
+- qcom,init-enable: 0 = regulator disabled
+ 1 = regulator enabled
+- qcom,init-voltage: Voltage in uV
+- qcom,init-current: Current in mA
+- qcom,init-ldo-mode: Operating mode to be used with LDO regulators
+ Supported values are:
+ 0 = mode determined by current requests
+ 1 = force HPM (NPM)
+- qcom,init-smps-mode: Operating mode to be used with SMPS regulators
+ Supported values are:
+ 0 = auto; hardware determines mode
+ 1 = mode determined by current requests
+ 2 = force HPM (PWM)
+- qcom,init-pin-ctrl-enable: Bit mask specifying which hardware pins should be
+ used to enable the regulator, if any; supported
+ bits are:
+ 0 = ignore all hardware enable signals
+ BIT(0) = follow HW0_EN signal
+ BIT(1) = follow HW1_EN signal
+ BIT(2) = follow HW2_EN signal
+ BIT(3) = follow HW3_EN signal
+- qcom,init-pin-ctrl-mode: Bit mask specifying which hardware pins should be
+ used to force the regulator into high power
+ mode, if any. Supported bits are:
+ 0 = ignore all hardware enable signals
+ BIT(0) = follow HW0_EN signal
+ BIT(1) = follow HW1_EN signal
+ BIT(2) = follow HW2_EN signal
+ BIT(3) = follow HW3_EN signal
+ BIT(4) = follow PMIC awake state
+- qcom,init-frequency: Switching frequency divisor for SMPS regulators.
+ Supported values are n = 0 to 31 where
+ freq = 19.2 MHz / (n + 1).
+- qcom,init-head-room: Voltage head room in mV required for the
+ regulator. This head room value should be used
+ in situations where the device connected to the
+ output of the regulator has low noise tolerance.
+ Note that the RPM independently enforces a
+ safety head room value for subregulated LDOs
+ which is sufficient to account for LDO drop-out
+ voltage.
+- qcom,init-quiet-mode: Specify that quiet mode is needed for an SMPS
+ regulator in order to have lower output noise.
+ Supported values are:
+ 0 = No quiet mode
+ 1 = Quiet mode
+ 2 = Super quiet mode
+- qcom,init-freq-reason: Consumer requiring specified frequency for an
+ SMPS regulator. Supported values are:
+ 0 = None
+ 1 = Bluetooth
+ 2 = GPS
+ 4 = WLAN
+ 8 = WAN
+- qcom,init-voltage-corner: Performance corner to use in order to determine
+ voltage set point. This value corresponds to
+ the actual value that will be sent and is not
+ incremented by 1 like the values used inside of
+ the regulator framework. The meaning of corner
+ values is set by the RPM. It is possible that
+ different regulators on a given platform or
+ similar regulators on different platforms will
+ utilize different corner values. These are
+ corner values supported on MSM8974 for PMIC
+ PM8841 SMPS 2 (VDD_Dig); nominal voltages for
+ these corners are also shown:
+ 0 = None (don't care)
+ 1 = Retention (0.5000 V)
+ 2 = SVS Krait (0.7250 V)
+ 3 = SVS SOC (0.8125 V)
+ 4 = Normal (0.9000 V)
+ 5 = Turbo (0.9875 V)
+ 6 = Super Turbo (1.0500 V)
+- qcom,init-disallow-bypass: Specify that bypass mode should not be used for a
+ given LDO regulator. When in bypass mode, an
+ LDO performs no regulation and acts as a simple
+ switch. The RPM can utilize this mode for an
+ LDO that is subregulated from an SMPS when it is
+ possible to reduce the SMPS voltage to the
+ desired LDO output level. Bypass mode may be
+ disallowed if lower LDO output noise is
+ required. Supported values are:
+ 0 = Allow RPM to utilize LDO bypass mode
+ if possible
+ 1 = Disallow LDO bypass mode
+- qcom,init-voltage-floor-corner: Minimum performance corner to use if any
+ processor in the system is awake. This property
+ supports the same values as
+ qcom,init-voltage-corner.
+
+All properties specified within the core regulator framework can also be used in
+second level nodes. These bindings can be found in:
+Documentation/devicetree/bindings/regulator/regulator.txt.
+
+Example:
+
+rpm-regulator-smpb1 {
+ qcom,resource-name = "smpb";
+ qcom,resource-id = <1>;
+ qcom,regulator-type = <1>;
+ qcom,hpm-min-load = <100000>;
+ compatible = "qcom,rpm-smd-regulator-resource";
+ status = "disabled";
+
+ pm8841_s1: regulator-s1 {
+ regulator-name = "8841_s1";
+ qcom,set = <3>;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1150000>;
+ qcom,init-voltage = <1150000>;
+ compatible = "qcom,rpm-smd-regulator";
+ };
+ pm8841_s1_ao: regulator-s1-ao {
+ regulator-name = "8841_s1_ao";
+ qcom,set = <1>;
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1150000>;
+ compatible = "qcom,rpm-smd-regulator";
+ };
+ pm8841_s1_corner: regulator-s1-corner {
+ regulator-name = "8841_s1_corner";
+ qcom,set = <3>;
+ regulator-min-microvolt = <1>;
+ regulator-max-microvolt = <6>;
+ qcom,init-voltage-corner = <3>;
+ qcom,use-voltage-corner;
+ compatible = "qcom,rpm-smd-regulator";
+ };
+};
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 1a0f99e474ea..6767a2fc3294 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -172,6 +172,8 @@ source "drivers/fmc/Kconfig"
source "drivers/phy/Kconfig"
+source "drivers/soc/Kconfig"
+
source "drivers/powercap/Kconfig"
source "drivers/mcb/Kconfig"
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 3612cb5b30b2..ee8453029f45 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -29,4 +29,15 @@ config HSEM_U8500
If unsure, say N.
+config REMOTE_SPINLOCK_MSM
+ bool "MSM Remote Spinlock Functionality"
+ depends on ARCH_QCOM
+ select HWSPINLOCK
+ help
+ Say y here to support the MSM Remote Spinlock functionality, which
+ provides a synchronisation mechanism for the various processor on the
+ SoC.
+
+ If unsure, say N.
+
endmenu
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
index 93eb64b66486..cb3ce4c08f97 100644
--- a/drivers/hwspinlock/Makefile
+++ b/drivers/hwspinlock/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o
+obj-$(CONFIG_REMOTE_SPINLOCK_MSM) += msm_remote_spinlock.o
diff --git a/drivers/hwspinlock/msm_remote_spinlock.c b/drivers/hwspinlock/msm_remote_spinlock.c
new file mode 100644
index 000000000000..179b4f8bdea1
--- /dev/null
+++ b/drivers/hwspinlock/msm_remote_spinlock.c
@@ -0,0 +1,484 @@
+/* Copyright (c) 2008-2009, 2011-2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/msm_remote_spinlock.h>
+
+#include <soc/qcom/smem.h>
+
+
+#define SPINLOCK_PID_APPS 1
+
+static int is_hw_lock_type;
+static DEFINE_MUTEX(ops_init_lock);
+
+struct spinlock_ops {
+ void (*lock)(raw_remote_spinlock_t *lock);
+ void (*unlock)(raw_remote_spinlock_t *lock);
+ int (*trylock)(raw_remote_spinlock_t *lock);
+ int (*release)(raw_remote_spinlock_t *lock, uint32_t pid);
+ int (*owner)(raw_remote_spinlock_t *lock);
+ void (*lock_rlock_id)(raw_remote_spinlock_t *lock, uint32_t tid);
+ void (*unlock_rlock)(raw_remote_spinlock_t *lock);
+};
+
+static struct spinlock_ops current_ops;
+
+static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock);
+
+/* ldrex implementation ----------------------------------------------------- */
+static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
+
+#ifdef CONFIG_ARM
+static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+"1: ldrex %0, [%1]\n"
+" teq %0, #0\n"
+" strexeq %0, %2, [%1]\n"
+" teqeq %0, #0\n"
+" bne 1b"
+ : "=&r" (tmp)
+ : "r" (&lock->lock), "r" (SPINLOCK_PID_APPS)
+ : "cc");
+
+ smp_mb();
+}
+
+static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+" ldrex %0, [%1]\n"
+" teq %0, #0\n"
+" strexeq %0, %2, [%1]\n"
+ : "=&r" (tmp)
+ : "r" (&lock->lock), "r" (SPINLOCK_PID_APPS)
+ : "cc");
+
+ if (tmp == 0) {
+ smp_mb();
+ return 1;
+ }
+ return 0;
+}
+
+static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
+{
+ int lock_owner;
+
+ smp_mb();
+ lock_owner = readl_relaxed(&lock->lock);
+ if (lock_owner != SPINLOCK_PID_APPS) {
+ pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
+ __func__, lock_owner);
+ }
+
+ __asm__ __volatile__(
+" str %1, [%0]\n"
+ :
+ : "r" (&lock->lock), "r" (0)
+ : "cc");
+}
+#else
+static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
+{
+}
+
+static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
+{
+ return 0;
+}
+
+static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
+{
+}
+#endif /* CONFIG_ARM */
+/* end ldrex implementation ------------------------------------------------- */
+
+/* sfpb implementation ------------------------------------------------------ */
+static uint32_t lock_count;
+static phys_addr_t reg_base;
+static uint32_t reg_size;
+static uint32_t lock_offset; /* offset into the hardware block before lock 0 */
+static uint32_t lock_size;
+
+static void *hw_mutex_reg_base;
+static DEFINE_MUTEX(hw_map_init_lock);
+
+static char *sfpb_compatible_string = "qcom,ipc-spinlock-sfpb";
+
+static int init_hw_mutex(struct device_node *node)
+{
+ struct resource r;
+ int rc;
+
+ rc = of_address_to_resource(node, 0, &r);
+ if (rc)
+ BUG();
+
+ rc = of_property_read_u32(node, "qcom,num-locks", &lock_count);
+ if (rc)
+ BUG();
+
+ reg_base = r.start;
+ reg_size = (uint32_t)(resource_size(&r));
+ lock_offset = 0;
+ lock_size = reg_size / lock_count;
+
+ return 0;
+}
+
+static void find_and_init_hw_mutex(void)
+{
+ struct device_node *node;
+
+ node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
+ BUG_ON(node == NULL);
+ init_hw_mutex(node);
+ hw_mutex_reg_base = ioremap(reg_base, reg_size);
+ BUG_ON(hw_mutex_reg_base == NULL);
+}
+
+static int remote_spinlock_init_address_hw(int id, _remote_spinlock_t *lock)
+{
+ /*
+ * Optimistic locking. Init only needs to be done once by the first
+ * caller. After that, serializing inits between different callers
+ * is unnecessary. The second check after the lock ensures init
+ * wasn't previously completed by someone else before the lock could
+ * be grabbed.
+ */
+ if (!hw_mutex_reg_base) {
+ mutex_lock(&hw_map_init_lock);
+ if (!hw_mutex_reg_base)
+ find_and_init_hw_mutex();
+ mutex_unlock(&hw_map_init_lock);
+ }
+
+ if (id >= lock_count)
+ return -EINVAL;
+
+ *lock = hw_mutex_reg_base + lock_offset + id * lock_size;
+ return 0;
+}
+
+static void __raw_remote_sfpb_spin_lock(raw_remote_spinlock_t *lock)
+{
+ do {
+ writel_relaxed(SPINLOCK_PID_APPS, lock);
+ smp_mb();
+ } while (readl_relaxed(lock) != SPINLOCK_PID_APPS);
+}
+
+static int __raw_remote_sfpb_spin_trylock(raw_remote_spinlock_t *lock)
+{
+ writel_relaxed(SPINLOCK_PID_APPS, lock);
+ smp_mb();
+ return readl_relaxed(lock) == SPINLOCK_PID_APPS;
+}
+
+static void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
+{
+ int lock_owner;
+
+ lock_owner = readl_relaxed(lock);
+ if (lock_owner != SPINLOCK_PID_APPS) {
+ pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
+ __func__, lock_owner);
+ }
+
+ writel_relaxed(0, lock);
+ smp_mb();
+}
+
+static void __raw_remote_sfpb_spin_lock_rlock_id(raw_remote_spinlock_t *lock,
+ uint32_t tid)
+{
+ if (unlikely(!tid)) {
+ pr_err("%s: unsupported rlock tid=0\n", __func__);
+ BUG();
+ }
+
+ do {
+ writel_relaxed(tid, lock);
+ smp_mb();
+ } while (readl_relaxed(lock) != tid);
+}
+
+static void __raw_remote_sfpb_spin_unlock_rlock(raw_remote_spinlock_t *lock)
+{
+ writel_relaxed(0, lock);
+ smp_mb();
+}
+
+/* end sfpb implementation -------------------------------------------------- */
+
+/* common spinlock API ------------------------------------------------------ */
+/**
+ * Release spinlock if it is owned by @pid.
+ *
+ * This is only to be used for situations where the processor owning
+ * the spinlock has crashed and the spinlock must be released.
+ *
+ * @lock: lock structure
+ * @pid: processor ID of processor to release
+ */
+static int __raw_remote_gen_spin_release(raw_remote_spinlock_t *lock,
+ uint32_t pid)
+{
+ int ret = 1;
+
+ if (readl_relaxed(&lock->lock) == pid) {
+ writel_relaxed(0, &lock->lock);
+ wmb();
+ ret = 0;
+ }
+ return ret;
+}
+
+/**
+ * Return owner of the spinlock.
+ *
+ * @lock: pointer to lock structure
+ * @returns: >= 0 owned PID; < 0 for error case
+ *
+ * Used for testing. PID's are assumed to be 31 bits or less.
+ */
+static int __raw_remote_gen_spin_owner(raw_remote_spinlock_t *lock)
+{
+ rmb();
+ return readl_relaxed(&lock->lock);
+}
+
+
+static int dt_node_is_valid(const struct device_node *node)
+{
+ const char *status;
+ int statlen;
+
+ status = of_get_property(node, "status", &statlen);
+ if (status == NULL)
+ return 1;
+
+ if (statlen > 0) {
+ if (!strcmp(status, "okay") || !strcmp(status, "ok"))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void initialize_ops(void)
+{
+ struct device_node *node;
+
+ /*
+ * of_find_compatible_node() returns a valid pointer even if
+ * the status property is "disabled", so the validity needs
+ * to be checked
+ */
+ node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
+ if (node && dt_node_is_valid(node)) {
+ current_ops.lock = __raw_remote_sfpb_spin_lock;
+ current_ops.unlock = __raw_remote_sfpb_spin_unlock;
+ current_ops.trylock = __raw_remote_sfpb_spin_trylock;
+ current_ops.release = __raw_remote_gen_spin_release;
+ current_ops.owner = __raw_remote_gen_spin_owner;
+ current_ops.lock_rlock_id =
+ __raw_remote_sfpb_spin_lock_rlock_id;
+ current_ops.unlock_rlock = __raw_remote_sfpb_spin_unlock_rlock;
+ is_hw_lock_type = 1;
+ return;
+ }
+
+ node = of_find_compatible_node(NULL, NULL, ldrex_compatible_string);
+ if (node && dt_node_is_valid(node)) {
+ current_ops.lock = __raw_remote_ex_spin_lock;
+ current_ops.unlock = __raw_remote_ex_spin_unlock;
+ current_ops.trylock = __raw_remote_ex_spin_trylock;
+ current_ops.release = __raw_remote_gen_spin_release;
+ current_ops.owner = __raw_remote_gen_spin_owner;
+ is_hw_lock_type = 0;
+ return;
+ }
+
+ current_ops.lock = __raw_remote_ex_spin_lock;
+ current_ops.unlock = __raw_remote_ex_spin_unlock;
+ current_ops.trylock = __raw_remote_ex_spin_trylock;
+ current_ops.release = __raw_remote_gen_spin_release;
+ current_ops.owner = __raw_remote_gen_spin_owner;
+ is_hw_lock_type = 0;
+ pr_warn("Falling back to LDREX remote spinlock implementation");
+}
+
+/**
+ * Release all spinlocks owned by @pid.
+ *
+ * This is only to be used for situations where the processor owning
+ * spinlocks has crashed and the spinlocks must be released.
+ *
+ * @pid - processor ID of processor to release
+ */
+static void remote_spin_release_all_locks(uint32_t pid, int count)
+{
+ int n;
+ _remote_spinlock_t lock;
+
+ if (pid >= REMOTE_SPINLOCK_NUM_PID) {
+ pr_err("%s: Unsupported PID %d\n", __func__, pid);
+ return;
+ }
+
+ for (n = 0; n < count; ++n) {
+ if (remote_spinlock_init_address(n, &lock) == 0)
+ _remote_spin_release(&lock, pid);
+ }
+}
+
+void _remote_spin_release_all(uint32_t pid)
+{
+ remote_spin_release_all_locks(pid, lock_count);
+}
+
+#define SMEM_SPINLOCK_COUNT 8
+#define SMEM_SPINLOCK_ARRAY_SIZE (SMEM_SPINLOCK_COUNT * sizeof(uint32_t))
+
+static int remote_spinlock_init_address_smem(int id, _remote_spinlock_t *lock)
+{
+ _remote_spinlock_t spinlock_start;
+
+ if (id >= SMEM_SPINLOCK_COUNT)
+ return -EINVAL;
+
+ spinlock_start = smem_find(SMEM_SPINLOCK_ARRAY,
+ SMEM_SPINLOCK_ARRAY_SIZE,
+ 0,
+ SMEM_ANY_HOST_FLAG);
+ if (spinlock_start == NULL)
+ return -ENXIO;
+
+ *lock = spinlock_start + id;
+
+ lock_count = SMEM_SPINLOCK_COUNT;
+
+ return 0;
+}
+
+static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock)
+{
+ if (is_hw_lock_type)
+ return remote_spinlock_init_address_hw(id, lock);
+ else
+ return remote_spinlock_init_address_smem(id, lock);
+}
+
+int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
+{
+ BUG_ON(id == NULL);
+
+ /*
+ * Optimistic locking. Init only needs to be done once by the first
+ * caller. After that, serializing inits between different callers
+ * is unnecessary. The second check after the lock ensures init
+ * wasn't previously completed by someone else before the lock could
+ * be grabbed.
+ */
+ if (!current_ops.lock) {
+ mutex_lock(&ops_init_lock);
+ if (!current_ops.lock)
+ initialize_ops();
+ mutex_unlock(&ops_init_lock);
+ }
+
+ if (id[0] == 'S' && id[1] == ':') {
+ /* Single-digit lock ID follows "S:" */
+ BUG_ON(id[3] != '\0');
+
+ return remote_spinlock_init_address((((uint8_t)id[2])-'0'),
+ lock);
+ } else {
+ return -EINVAL;
+ }
+}
+
+/*
+ * lock comes in as a pointer to a pointer to the lock location, so it must
+ * be dereferenced and casted to the right type for the actual lock
+ * implementation functions
+ */
+void _remote_spin_lock(_remote_spinlock_t *lock)
+{
+ if (unlikely(!current_ops.lock))
+ BUG();
+ current_ops.lock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_lock);
+
+void _remote_spin_unlock(_remote_spinlock_t *lock)
+{
+ if (unlikely(!current_ops.unlock))
+ BUG();
+ current_ops.unlock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_unlock);
+
+int _remote_spin_trylock(_remote_spinlock_t *lock)
+{
+ if (unlikely(!current_ops.trylock))
+ BUG();
+ return current_ops.trylock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_trylock);
+
+int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid)
+{
+ if (unlikely(!current_ops.release))
+ BUG();
+ return current_ops.release((raw_remote_spinlock_t *)(*lock), pid);
+}
+EXPORT_SYMBOL(_remote_spin_release);
+
+int _remote_spin_owner(_remote_spinlock_t *lock)
+{
+ if (unlikely(!current_ops.owner))
+ BUG();
+ return current_ops.owner((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_owner);
+
+void _remote_spin_lock_rlock_id(_remote_spinlock_t *lock, uint32_t tid)
+{
+ if (unlikely(!current_ops.lock_rlock_id))
+ BUG();
+ current_ops.lock_rlock_id((raw_remote_spinlock_t *)(*lock), tid);
+}
+EXPORT_SYMBOL(_remote_spin_lock_rlock_id);
+
+void _remote_spin_unlock_rlock(_remote_spinlock_t *lock)
+{
+ if (unlikely(!current_ops.unlock_rlock))
+ BUG();
+ current_ops.unlock_rlock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_unlock_rlock);
+
+/* end common spinlock API -------------------------------------------------- */
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index a6f116aa5235..1c7b04fbdbfb 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -750,5 +750,16 @@ config REGULATOR_WM8994
This driver provides support for the voltage regulators on the
WM8994 CODEC.
+config REGULATOR_RPM_SMD
+ bool "RPM SMD regulator driver"
+ depends on OF
+ depends on MSM_RPM_SMD
+ help
+ Compile in support for the RPM SMD regulator driver which is used for
+ setting voltages and other parameters of the various power rails
+ supplied by some Qualcomm PMICs. The RPM SMD regulator driver should
+ be used on systems which contain an RPM which communicates with the
+ application processor over SMD.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 2c4da15e1545..fe43fca12544 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -98,6 +98,7 @@ obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
+obj-$(CONFIG_REGULATOR_RPM_SMD) += rpm-smd-regulator.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/rpm-smd-regulator.c b/drivers/regulator/rpm-smd-regulator.c
new file mode 100644
index 000000000000..577a021c4e0b
--- /dev/null
+++ b/drivers/regulator/rpm-smd-regulator.c
@@ -0,0 +1,1722 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <soc/qcom/rpm-smd.h>
+
+/* Debug Definitions */
+
+enum {
+ RPM_VREG_DEBUG_REQUEST = BIT(0),
+ RPM_VREG_DEBUG_FULL_REQUEST = BIT(1),
+ RPM_VREG_DEBUG_DUPLICATE = BIT(2),
+};
+
+static int rpm_vreg_debug_mask;
+module_param_named(
+ debug_mask, rpm_vreg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+#define vreg_err(req, fmt, ...) \
+ pr_err("%s: " fmt, req->rdesc.name, ##__VA_ARGS__)
+
+/* RPM regulator request types */
+enum rpm_regulator_type {
+ RPM_REGULATOR_TYPE_LDO,
+ RPM_REGULATOR_TYPE_SMPS,
+ RPM_REGULATOR_TYPE_VS,
+ RPM_REGULATOR_TYPE_NCP,
+ RPM_REGULATOR_TYPE_MAX,
+};
+
+/* RPM resource parameters */
+enum rpm_regulator_param_index {
+ RPM_REGULATOR_PARAM_ENABLE,
+ RPM_REGULATOR_PARAM_VOLTAGE,
+ RPM_REGULATOR_PARAM_CURRENT,
+ RPM_REGULATOR_PARAM_MODE_LDO,
+ RPM_REGULATOR_PARAM_MODE_SMPS,
+ RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE,
+ RPM_REGULATOR_PARAM_PIN_CTRL_MODE,
+ RPM_REGULATOR_PARAM_FREQUENCY,
+ RPM_REGULATOR_PARAM_HEAD_ROOM,
+ RPM_REGULATOR_PARAM_QUIET_MODE,
+ RPM_REGULATOR_PARAM_FREQ_REASON,
+ RPM_REGULATOR_PARAM_CORNER,
+ RPM_REGULATOR_PARAM_BYPASS,
+ RPM_REGULATOR_PARAM_FLOOR_CORNER,
+ RPM_REGULATOR_PARAM_MAX,
+};
+
+enum rpm_regulator_smps_mode {
+ RPM_REGULATOR_SMPS_MODE_AUTO = 0,
+ RPM_REGULATOR_SMPS_MODE_IPEAK = 1,
+ RPM_REGULATOR_SMPS_MODE_PWM = 2,
+};
+
+enum rpm_regulator_ldo_mode {
+ RPM_REGULATOR_LDO_MODE_IPEAK = 0,
+ RPM_REGULATOR_LDO_MODE_HPM = 1,
+};
+
+#define RPM_SET_CONFIG_ACTIVE BIT(0)
+#define RPM_SET_CONFIG_SLEEP BIT(1)
+#define RPM_SET_CONFIG_BOTH (RPM_SET_CONFIG_ACTIVE \
+ | RPM_SET_CONFIG_SLEEP)
+struct rpm_regulator_param {
+ char *name;
+ char *property_name;
+ u32 key;
+ u32 min;
+ u32 max;
+ u32 supported_regulator_types;
+};
+
+#define PARAM(_idx, _support_ldo, _support_smps, _support_vs, _support_ncp, \
+ _name, _min, _max, _property_name) \
+ [RPM_REGULATOR_PARAM_##_idx] = { \
+ .name = _name, \
+ .property_name = _property_name, \
+ .min = _min, \
+ .max = _max, \
+ .supported_regulator_types = \
+ _support_ldo << RPM_REGULATOR_TYPE_LDO | \
+ _support_smps << RPM_REGULATOR_TYPE_SMPS | \
+ _support_vs << RPM_REGULATOR_TYPE_VS | \
+ _support_ncp << RPM_REGULATOR_TYPE_NCP, \
+ }
+
+static struct rpm_regulator_param params[RPM_REGULATOR_PARAM_MAX] = {
+ /* ID LDO SMPS VS NCP name min max property-name */
+ PARAM(ENABLE, 1, 1, 1, 1, "swen", 0, 1, "qcom,init-enable"),
+ PARAM(VOLTAGE, 1, 1, 0, 1, "uv", 0, 0x7FFFFFF, "qcom,init-voltage"),
+ PARAM(CURRENT, 1, 1, 0, 0, "ma", 0, 0x1FFF, "qcom,init-current"),
+ PARAM(MODE_LDO, 1, 0, 0, 0, "lsmd", 0, 1, "qcom,init-ldo-mode"),
+ PARAM(MODE_SMPS, 0, 1, 0, 0, "ssmd", 0, 2, "qcom,init-smps-mode"),
+ PARAM(PIN_CTRL_ENABLE, 1, 1, 1, 0, "pcen", 0, 0xF, "qcom,init-pin-ctrl-enable"),
+ PARAM(PIN_CTRL_MODE, 1, 1, 1, 0, "pcmd", 0, 0x1F, "qcom,init-pin-ctrl-mode"),
+ PARAM(FREQUENCY, 0, 1, 0, 1, "freq", 0, 31, "qcom,init-frequency"),
+ PARAM(HEAD_ROOM, 1, 0, 0, 1, "hr", 0, 0x7FFFFFFF, "qcom,init-head-room"),
+ PARAM(QUIET_MODE, 0, 1, 0, 0, "qm", 0, 2, "qcom,init-quiet-mode"),
+ PARAM(FREQ_REASON, 0, 1, 0, 1, "resn", 0, 8, "qcom,init-freq-reason"),
+ PARAM(CORNER, 1, 1, 0, 0, "corn", 0, 6, "qcom,init-voltage-corner"),
+ PARAM(BYPASS, 1, 0, 0, 0, "bypa", 0, 1, "qcom,init-disallow-bypass"),
+ PARAM(FLOOR_CORNER, 1, 1, 0, 0, "vfc", 0, 6, "qcom,init-voltage-floor-corner"),
+};
+
+struct rpm_regulator_mode_map {
+ int ldo_mode;
+ int smps_mode;
+};
+
+static struct rpm_regulator_mode_map mode_mapping[] = {
+ [RPM_REGULATOR_MODE_AUTO]
+ = {-1, RPM_REGULATOR_SMPS_MODE_AUTO},
+ [RPM_REGULATOR_MODE_IPEAK]
+ = {RPM_REGULATOR_LDO_MODE_IPEAK, RPM_REGULATOR_SMPS_MODE_IPEAK},
+ [RPM_REGULATOR_MODE_HPM]
+ = {RPM_REGULATOR_LDO_MODE_HPM, RPM_REGULATOR_SMPS_MODE_PWM},
+};
+
+struct rpm_vreg_request {
+ u32 param[RPM_REGULATOR_PARAM_MAX];
+ u32 valid;
+ u32 modified;
+};
+
+struct rpm_vreg {
+ struct rpm_vreg_request aggr_req_active;
+ struct rpm_vreg_request aggr_req_sleep;
+ struct list_head reg_list;
+ const char *resource_name;
+ u32 resource_id;
+ bool allow_atomic;
+ int regulator_type;
+ int hpm_min_load;
+ int enable_time;
+ spinlock_t slock;
+ struct mutex mlock;
+ unsigned long flags;
+ bool sleep_request_sent;
+ bool apps_only;
+ struct msm_rpm_request *handle_active;
+ struct msm_rpm_request *handle_sleep;
+};
+
+struct rpm_regulator {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ struct rpm_vreg *rpm_vreg;
+ struct list_head list;
+ bool set_active;
+ bool set_sleep;
+ bool always_send_voltage;
+ bool always_send_current;
+ struct rpm_vreg_request req;
+ int system_load;
+ int min_uV;
+ int max_uV;
+};
+
+/*
+ * This voltage in uV is returned by get_voltage functions when there is no way
+ * to determine the current voltage level. It is needed because the regulator
+ * framework treats a 0 uV voltage as an error.
+ */
+#define VOLTAGE_UNKNOWN 1
+
+/*
+ * Regulator requests sent in the active set take effect immediately. Requests
+ * sent in the sleep set take effect when the Apps processor transitions into
+ * RPM assisted power collapse. For any given regulator, if an active set
+ * request is present, but not a sleep set request, then the active set request
+ * is used at all times, even when the Apps processor is power collapsed.
+ *
+ * The rpm-regulator-smd takes advantage of this default usage of the active set
+ * request by only sending a sleep set request if it differs from the
+ * corresponding active set request.
+ */
+#define RPM_SET_ACTIVE MSM_RPM_CTX_ACTIVE_SET
+#define RPM_SET_SLEEP MSM_RPM_CTX_SLEEP_SET
+
+static u32 rpm_vreg_string_to_int(const u8 *str)
+{
+ int i, len;
+ u32 output = 0;
+
+ len = strnlen(str, sizeof(u32));
+ for (i = 0; i < len; i++)
+ output |= str[i] << (i * 8);
+
+ return output;
+}
+
+static inline void rpm_vreg_lock(struct rpm_vreg *rpm_vreg)
+{
+ if (rpm_vreg->allow_atomic)
+ spin_lock_irqsave(&rpm_vreg->slock, rpm_vreg->flags);
+ else
+ mutex_lock(&rpm_vreg->mlock);
+}
+
+static inline void rpm_vreg_unlock(struct rpm_vreg *rpm_vreg)
+{
+ if (rpm_vreg->allow_atomic)
+ spin_unlock_irqrestore(&rpm_vreg->slock, rpm_vreg->flags);
+ else
+ mutex_unlock(&rpm_vreg->mlock);
+}
+
+static inline bool rpm_vreg_active_or_sleep_enabled(struct rpm_vreg *rpm_vreg)
+{
+ return (rpm_vreg->aggr_req_active.param[RPM_REGULATOR_PARAM_ENABLE]
+ && (rpm_vreg->aggr_req_active.valid
+ & BIT(RPM_REGULATOR_PARAM_ENABLE)))
+ || ((rpm_vreg->aggr_req_sleep.param[RPM_REGULATOR_PARAM_ENABLE])
+ && (rpm_vreg->aggr_req_sleep.valid
+ & BIT(RPM_REGULATOR_PARAM_ENABLE)));
+}
+
+static inline bool rpm_vreg_shared_active_or_sleep_enabled_valid
+ (struct rpm_vreg *rpm_vreg)
+{
+ return !rpm_vreg->apps_only &&
+ ((rpm_vreg->aggr_req_active.valid
+ & BIT(RPM_REGULATOR_PARAM_ENABLE))
+ || (rpm_vreg->aggr_req_sleep.valid
+ & BIT(RPM_REGULATOR_PARAM_ENABLE)));
+}
+
+/*
+ * This is used when voting for LPM or HPM by subtracting or adding to the
+ * hpm_min_load of a regulator. It has units of uA.
+ */
+#define LOAD_THRESHOLD_STEP 1000
+
+static inline int rpm_vreg_hpm_min_uA(struct rpm_vreg *rpm_vreg)
+{
+ return rpm_vreg->hpm_min_load;
+}
+
+static inline int rpm_vreg_lpm_max_uA(struct rpm_vreg *rpm_vreg)
+{
+ return rpm_vreg->hpm_min_load - LOAD_THRESHOLD_STEP;
+}
+
+#define MICRO_TO_MILLI(uV) ((uV) / 1000)
+#define MILLI_TO_MICRO(uV) ((uV) * 1000)
+
+#define DEBUG_PRINT_BUFFER_SIZE 512
+#define REQ_SENT 0
+#define REQ_PREV 1
+#define REQ_CACHED 2
+#define REQ_TYPES 3
+
+static void rpm_regulator_req(struct rpm_regulator *regulator, int set,
+ bool sent)
+{
+ char buf[DEBUG_PRINT_BUFFER_SIZE];
+ size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ struct rpm_vreg_request *aggr;
+ bool first;
+ u32 mask[REQ_TYPES] = {0, 0, 0};
+ const char *req_names[REQ_TYPES] = {"sent", "prev", "cached"};
+ int pos = 0;
+ int i, j;
+
+ aggr = (set == RPM_SET_ACTIVE)
+ ? &rpm_vreg->aggr_req_active : &rpm_vreg->aggr_req_sleep;
+
+ if (rpm_vreg_debug_mask & RPM_VREG_DEBUG_DUPLICATE) {
+ mask[REQ_SENT] = aggr->modified;
+ mask[REQ_PREV] = aggr->valid & ~aggr->modified;
+ } else if (sent
+ && (rpm_vreg_debug_mask & RPM_VREG_DEBUG_FULL_REQUEST)) {
+ mask[REQ_SENT] = aggr->modified;
+ mask[REQ_PREV] = aggr->valid & ~aggr->modified;
+ } else if (sent && (rpm_vreg_debug_mask & RPM_VREG_DEBUG_REQUEST)) {
+ mask[REQ_SENT] = aggr->modified;
+ }
+
+ if (!(mask[REQ_SENT] | mask[REQ_PREV]))
+ return;
+
+ if (set == RPM_SET_SLEEP && !rpm_vreg->sleep_request_sent) {
+ mask[REQ_CACHED] = mask[REQ_SENT] | mask[REQ_PREV];
+ mask[REQ_SENT] = 0;
+ mask[REQ_PREV] = 0;
+ }
+
+ pos += scnprintf(buf + pos, buflen - pos, "%s%s: ",
+ KERN_INFO, __func__);
+
+ pos += scnprintf(buf + pos, buflen - pos, "%s %u (%s): s=%s",
+ rpm_vreg->resource_name, rpm_vreg->resource_id,
+ regulator->rdesc.name,
+ (set == RPM_SET_ACTIVE ? "act" : "slp"));
+
+ for (i = 0; i < REQ_TYPES; i++) {
+ if (mask[i])
+ pos += scnprintf(buf + pos, buflen - pos, "; %s: ",
+ req_names[i]);
+
+ first = true;
+ for (j = 0; j < RPM_REGULATOR_PARAM_MAX; j++) {
+ if (mask[i] & BIT(j)) {
+ pos += scnprintf(buf + pos, buflen - pos,
+ "%s%s=%u", (first ? "" : ", "),
+ params[j].name, aggr->param[j]);
+ first = false;
+ }
+ }
+ }
+
+ pos += scnprintf(buf + pos, buflen - pos, "\n");
+ printk(buf);
+}
+
+#define RPM_VREG_SET_PARAM(_regulator, _param, _val) \
+{ \
+ (_regulator)->req.param[RPM_REGULATOR_PARAM_##_param] = _val; \
+ (_regulator)->req.modified |= BIT(RPM_REGULATOR_PARAM_##_param); \
+} \
+
+static int rpm_vreg_add_kvp_to_request(struct rpm_vreg *rpm_vreg,
+ const u32 *param, int idx, u32 set)
+{
+ struct msm_rpm_request *handle;
+
+ handle = (set == RPM_SET_ACTIVE ? rpm_vreg->handle_active
+ : rpm_vreg->handle_sleep);
+
+ if (rpm_vreg->allow_atomic)
+ return msm_rpm_add_kvp_data_noirq(handle, params[idx].key,
+ (u8 *)&param[idx], 4);
+ else
+ return msm_rpm_add_kvp_data(handle, params[idx].key,
+ (u8 *)&param[idx], 4);
+}
+
+static void rpm_vreg_check_modified_requests(const u32 *prev_param,
+ const u32 *param, u32 prev_valid, u32 *modified)
+{
+ u32 value_changed = 0;
+ int i;
+
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ if (param[i] != prev_param[i])
+ value_changed |= BIT(i);
+ }
+
+ /*
+ * Only keep bits that are for changed parameters or previously
+ * invalid parameters.
+ */
+ *modified &= value_changed | ~prev_valid;
+}
+
+static int rpm_vreg_add_modified_requests(struct rpm_regulator *regulator,
+ u32 set, const u32 *param, u32 modified)
+{
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ /* Only send requests for modified parameters. */
+ if (modified & BIT(i)) {
+ rc = rpm_vreg_add_kvp_to_request(rpm_vreg, param, i,
+ set);
+ if (rc) {
+ vreg_err(regulator,
+ "add KVP failed: %s %u; %s, rc=%d\n",
+ rpm_vreg->resource_name,
+ rpm_vreg->resource_id, params[i].name,
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int rpm_vreg_send_request(struct rpm_regulator *regulator, u32 set)
+{
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ struct msm_rpm_request *handle
+ = (set == RPM_SET_ACTIVE ? rpm_vreg->handle_active
+ : rpm_vreg->handle_sleep);
+ int rc;
+
+ if (rpm_vreg->allow_atomic)
+ rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(
+ handle));
+ else
+ rc = msm_rpm_wait_for_ack(msm_rpm_send_request(handle));
+
+ if (rc)
+ vreg_err(regulator,
+ "msm rpm send failed: %s %u; set=%s, rc=%d\n",
+ rpm_vreg->resource_name,
+ rpm_vreg->resource_id,
+ (set == RPM_SET_ACTIVE ? "act" : "slp"), rc);
+
+ return rc;
+}
+
+#define RPM_VREG_AGGR_MIN(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ = min(_param_aggr[RPM_REGULATOR_PARAM_##_idx], \
+ _param_reg[RPM_REGULATOR_PARAM_##_idx]); \
+}
+
+#define RPM_VREG_AGGR_MAX(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ = max(_param_aggr[RPM_REGULATOR_PARAM_##_idx], \
+ _param_reg[RPM_REGULATOR_PARAM_##_idx]); \
+}
+
+#define RPM_VREG_AGGR_SUM(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ += _param_reg[RPM_REGULATOR_PARAM_##_idx]; \
+}
+
+#define RPM_VREG_AGGR_OR(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ |= _param_reg[RPM_REGULATOR_PARAM_##_idx]; \
+}
+
+/*
+ * Aggregation is performed on each parameter based on the way that the RPM
+ * aggregates that type internally between RPM masters.
+ */
+static void rpm_vreg_aggregate_params(u32 *param_aggr, const u32 *param_reg)
+{
+ RPM_VREG_AGGR_MAX(ENABLE, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(VOLTAGE, param_aggr, param_reg);
+ RPM_VREG_AGGR_SUM(CURRENT, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(MODE_LDO, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(MODE_SMPS, param_aggr, param_reg);
+ RPM_VREG_AGGR_OR(PIN_CTRL_ENABLE, param_aggr, param_reg);
+ RPM_VREG_AGGR_OR(PIN_CTRL_MODE, param_aggr, param_reg);
+ RPM_VREG_AGGR_MIN(FREQUENCY, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(HEAD_ROOM, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(QUIET_MODE, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(FREQ_REASON, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(CORNER, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(BYPASS, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(FLOOR_CORNER, param_aggr, param_reg);
+}
+
+static int rpm_vreg_aggregate_requests(struct rpm_regulator *regulator)
+{
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ u32 param_active[RPM_REGULATOR_PARAM_MAX];
+ u32 param_sleep[RPM_REGULATOR_PARAM_MAX];
+ u32 modified_active, modified_sleep;
+ struct rpm_regulator *reg;
+ bool sleep_set_differs = false;
+ bool send_active = false;
+ bool send_sleep = false;
+ int rc = 0;
+ int i;
+
+ memset(param_active, 0, sizeof(param_active));
+ memset(param_sleep, 0, sizeof(param_sleep));
+ modified_active = rpm_vreg->aggr_req_active.modified;
+ modified_sleep = rpm_vreg->aggr_req_sleep.modified;
+
+ /*
+ * Aggregate all of the requests for this regulator in both active
+ * and sleep sets.
+ */
+ list_for_each_entry(reg, &rpm_vreg->reg_list, list) {
+ if (reg->set_active) {
+ rpm_vreg_aggregate_params(param_active, reg->req.param);
+ modified_active |= reg->req.modified;
+ }
+ if (reg->set_sleep) {
+ rpm_vreg_aggregate_params(param_sleep, reg->req.param);
+ modified_sleep |= reg->req.modified;
+ }
+ }
+
+ /*
+ * Check if the aggregated sleep set parameter values differ from the
+ * aggregated active set parameter values.
+ */
+ if (!rpm_vreg->sleep_request_sent) {
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ if ((param_active[i] != param_sleep[i])
+ && (modified_sleep & BIT(i))) {
+ sleep_set_differs = true;
+ break;
+ }
+ }
+ }
+
+ /* Add KVPs to the active set RPM request if they have new values. */
+ rpm_vreg_check_modified_requests(rpm_vreg->aggr_req_active.param,
+ param_active, rpm_vreg->aggr_req_active.valid,
+ &modified_active);
+ rc = rpm_vreg_add_modified_requests(regulator, RPM_SET_ACTIVE,
+ param_active, modified_active);
+ if (rc)
+ return rc;
+ send_active = modified_active;
+
+ /*
+ * Sleep set configurations are only sent if they differ from the
+ * active set values. This is because the active set values will take
+ * effect during rpm assisted power collapse in the absence of sleep set
+ * values.
+ *
+ * However, once a sleep set request is sent for a given regulator,
+ * additional sleep set requests must be sent in the future even if they
+ * match the corresponding active set requests.
+ */
+ if (rpm_vreg->sleep_request_sent || sleep_set_differs) {
+ /* Add KVPs to the sleep set RPM request if they are new. */
+ rpm_vreg_check_modified_requests(rpm_vreg->aggr_req_sleep.param,
+ param_sleep, rpm_vreg->aggr_req_sleep.valid,
+ &modified_sleep);
+ rc = rpm_vreg_add_modified_requests(regulator, RPM_SET_SLEEP,
+ param_sleep, modified_sleep);
+ if (rc)
+ return rc;
+ send_sleep = modified_sleep;
+ }
+
+ /* Send active set request to the RPM if it contains new KVPs. */
+ if (send_active) {
+ rc = rpm_vreg_send_request(regulator, RPM_SET_ACTIVE);
+ if (rc)
+ return rc;
+ rpm_vreg->aggr_req_active.valid |= modified_active;
+ }
+ /* Store the results of the aggregation. */
+ rpm_vreg->aggr_req_active.modified = modified_active;
+ memcpy(rpm_vreg->aggr_req_active.param, param_active,
+ sizeof(param_active));
+
+ /* Handle debug printing of the active set request. */
+ rpm_regulator_req(regulator, RPM_SET_ACTIVE, send_active);
+ if (send_active)
+ rpm_vreg->aggr_req_active.modified = 0;
+
+ /* Send sleep set request to the RPM if it contains new KVPs. */
+ if (send_sleep) {
+ rc = rpm_vreg_send_request(regulator, RPM_SET_SLEEP);
+ if (rc)
+ return rc;
+ else
+ rpm_vreg->sleep_request_sent = true;
+ rpm_vreg->aggr_req_sleep.valid |= modified_sleep;
+ }
+ /* Store the results of the aggregation. */
+ rpm_vreg->aggr_req_sleep.modified = modified_sleep;
+ memcpy(rpm_vreg->aggr_req_sleep.param, param_sleep,
+ sizeof(param_sleep));
+
+ /* Handle debug printing of the sleep set request. */
+ rpm_regulator_req(regulator, RPM_SET_SLEEP, send_sleep);
+ if (send_sleep)
+ rpm_vreg->aggr_req_sleep.modified = 0;
+
+ /*
+ * Loop over all requests for this regulator to update the valid and
+ * modified values for use in future aggregation.
+ */
+ list_for_each_entry(reg, &rpm_vreg->reg_list, list) {
+ reg->req.valid |= reg->req.modified;
+ reg->req.modified = 0;
+ }
+
+ return rc;
+}
+
+static int rpm_vreg_is_enabled(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+}
+
+static int rpm_vreg_enable(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc;
+ u32 prev_enable;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_enable = reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+ RPM_VREG_SET_PARAM(reg, ENABLE, 1);
+ rc = rpm_vreg_aggregate_requests(reg);
+ if (rc) {
+ vreg_err(reg, "enable failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, ENABLE, prev_enable);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_disable(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc;
+ u32 prev_enable;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_enable = reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+ RPM_VREG_SET_PARAM(reg, ENABLE, 0);
+ rc = rpm_vreg_aggregate_requests(reg);
+ if (rc) {
+ vreg_err(reg, "enable failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, ENABLE, prev_enable);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ u32 prev_voltage;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_voltage = reg->req.param[RPM_REGULATOR_PARAM_VOLTAGE];
+ RPM_VREG_SET_PARAM(reg, VOLTAGE, min_uV);
+
+ /*
+ * Only send a new voltage if the regulator is currently enabled or
+ * if the regulator has been configured to always send voltage updates.
+ */
+ if (reg->always_send_voltage
+ || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+ || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+ rc = rpm_vreg_aggregate_requests(reg);
+
+ if (rc) {
+ vreg_err(reg, "set voltage failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, VOLTAGE, prev_voltage);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_get_voltage(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int uV;
+
+ uV = reg->req.param[RPM_REGULATOR_PARAM_VOLTAGE];
+ if (uV == 0)
+ uV = VOLTAGE_UNKNOWN;
+
+ return uV;
+}
+
+static int rpm_vreg_set_voltage_corner(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ int corner;
+ u32 prev_corner;
+
+ /*
+ * Translate from values which work as inputs in the
+ * regulator_set_voltage function to the actual corner values
+ * sent to the RPM.
+ */
+ corner = min_uV - RPM_REGULATOR_CORNER_NONE;
+
+ if (corner < params[RPM_REGULATOR_PARAM_CORNER].min
+ || corner > params[RPM_REGULATOR_PARAM_CORNER].max) {
+ vreg_err(reg, "corner=%d is not within allowed range: [%u, %u]\n",
+ corner, params[RPM_REGULATOR_PARAM_CORNER].min,
+ params[RPM_REGULATOR_PARAM_CORNER].max);
+ return -EINVAL;
+ }
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_corner = reg->req.param[RPM_REGULATOR_PARAM_CORNER];
+ RPM_VREG_SET_PARAM(reg, CORNER, corner);
+
+ /*
+ * Only send a new voltage corner if the regulator is currently enabled
+ * or if the regulator has been configured to always send voltage
+ * updates.
+ */
+ if (reg->always_send_voltage
+ || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+ || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+ rc = rpm_vreg_aggregate_requests(reg);
+
+ if (rc) {
+ vreg_err(reg, "set voltage corner failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, CORNER, prev_corner);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_get_voltage_corner(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return reg->req.param[RPM_REGULATOR_PARAM_CORNER]
+ + RPM_REGULATOR_CORNER_NONE;
+}
+
+static int rpm_vreg_set_voltage_floor_corner(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ int corner;
+ u32 prev_corner;
+
+ /*
+ * Translate from values which work as inputs in the
+ * regulator_set_voltage function to the actual corner values
+ * sent to the RPM.
+ */
+ corner = min_uV - RPM_REGULATOR_CORNER_NONE;
+
+ if (corner < params[RPM_REGULATOR_PARAM_FLOOR_CORNER].min
+ || corner > params[RPM_REGULATOR_PARAM_FLOOR_CORNER].max) {
+ vreg_err(reg, "corner=%d is not within allowed range: [%u, %u]\n",
+ corner, params[RPM_REGULATOR_PARAM_FLOOR_CORNER].min,
+ params[RPM_REGULATOR_PARAM_FLOOR_CORNER].max);
+ return -EINVAL;
+ }
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_corner = reg->req.param[RPM_REGULATOR_PARAM_FLOOR_CORNER];
+ RPM_VREG_SET_PARAM(reg, FLOOR_CORNER, corner);
+
+ /*
+ * Only send a new voltage floor corner if the regulator is currently
+ * enabled or if the regulator has been configured to always send
+ * voltage updates.
+ */
+ if (reg->always_send_voltage
+ || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+ || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+ rc = rpm_vreg_aggregate_requests(reg);
+
+ if (rc) {
+ vreg_err(reg, "set voltage corner failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, FLOOR_CORNER, prev_corner);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_get_voltage_floor_corner(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return reg->req.param[RPM_REGULATOR_PARAM_FLOOR_CORNER]
+ + RPM_REGULATOR_CORNER_NONE;
+}
+
+static int rpm_vreg_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ u32 prev_current;
+ int prev_uA;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_current = reg->req.param[RPM_REGULATOR_PARAM_CURRENT];
+ prev_uA = MILLI_TO_MICRO(prev_current);
+
+ if (mode == REGULATOR_MODE_NORMAL) {
+ /* Make sure that request current is in HPM range. */
+ if (prev_uA < rpm_vreg_hpm_min_uA(reg->rpm_vreg))
+ RPM_VREG_SET_PARAM(reg, CURRENT,
+ MICRO_TO_MILLI(rpm_vreg_hpm_min_uA(reg->rpm_vreg)));
+ } else if (REGULATOR_MODE_IDLE) {
+ /* Make sure that request current is in LPM range. */
+ if (prev_uA > rpm_vreg_lpm_max_uA(reg->rpm_vreg))
+ RPM_VREG_SET_PARAM(reg, CURRENT,
+ MICRO_TO_MILLI(rpm_vreg_lpm_max_uA(reg->rpm_vreg)));
+ } else {
+ vreg_err(reg, "invalid mode: %u\n", mode);
+ rpm_vreg_unlock(reg->rpm_vreg);
+ return -EINVAL;
+ }
+
+ /*
+ * Only send a new load current value if the regulator is currently
+ * enabled or if the regulator has been configured to always send
+ * current updates.
+ */
+ if (reg->always_send_current
+ || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+ || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+ rc = rpm_vreg_aggregate_requests(reg);
+
+ if (rc) {
+ vreg_err(reg, "set mode failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, CURRENT, prev_current);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static unsigned int rpm_vreg_get_mode(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return (reg->req.param[RPM_REGULATOR_PARAM_CURRENT]
+ >= MICRO_TO_MILLI(reg->rpm_vreg->hpm_min_load))
+ ? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static unsigned int rpm_vreg_get_optimum_mode(struct regulator_dev *rdev,
+ int input_uV, int output_uV, int load_uA)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ u32 load_mA;
+
+ load_uA += reg->system_load;
+
+ load_mA = MICRO_TO_MILLI(load_uA);
+ if (load_mA > params[RPM_REGULATOR_PARAM_CURRENT].max)
+ load_mA = params[RPM_REGULATOR_PARAM_CURRENT].max;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+ RPM_VREG_SET_PARAM(reg, CURRENT, load_mA);
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return (load_uA >= reg->rpm_vreg->hpm_min_load)
+ ? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int rpm_vreg_enable_time(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return reg->rpm_vreg->enable_time;
+}
+
+/**
+ * rpm_regulator_get() - lookup and obtain a handle to an RPM regulator
+ * @dev: device for regulator consumer
+ * @supply: supply name
+ *
+ * Returns a struct rpm_regulator corresponding to the regulator producer,
+ * or ERR_PTR() containing errno.
+ *
+ * This function may only be called from nonatomic context.
+ */
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply)
+{
+ struct rpm_regulator *framework_reg;
+ struct rpm_regulator *priv_reg = NULL;
+ struct regulator *regulator;
+ struct rpm_vreg *rpm_vreg;
+
+ regulator = regulator_get(dev, supply);
+ if (IS_ERR(regulator)) {
+ pr_err("could not find regulator for: dev=%s, supply=%s, rc=%ld\n",
+ (dev ? dev_name(dev) : ""), (supply ? supply : ""),
+ PTR_ERR(regulator));
+ return ERR_CAST(regulator);
+ }
+
+ framework_reg = regulator_get_drvdata(regulator);
+ if (framework_reg == NULL) {
+ pr_err("regulator structure not found.\n");
+ regulator_put(regulator);
+ return ERR_PTR(-ENODEV);
+ }
+ regulator_put(regulator);
+
+ rpm_vreg = framework_reg->rpm_vreg;
+
+ priv_reg = kzalloc(sizeof(struct rpm_regulator), GFP_KERNEL);
+ if (priv_reg == NULL) {
+ vreg_err(framework_reg,
+ "could not allocate memory for regulator\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Allocate a regulator_dev struct so that framework callback functions
+ * can be called from the private API functions.
+ */
+ priv_reg->rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
+ if (priv_reg->rdev == NULL) {
+ vreg_err(framework_reg,
+ "could not allocate memory for regulator_dev\n");
+ kfree(priv_reg);
+ return ERR_PTR(-ENOMEM);
+ }
+ priv_reg->rdev->reg_data = priv_reg;
+ priv_reg->rpm_vreg = rpm_vreg;
+ priv_reg->rdesc.name = framework_reg->rdesc.name;
+ priv_reg->rdesc.ops = framework_reg->rdesc.ops;
+ priv_reg->set_active = framework_reg->set_active;
+ priv_reg->set_sleep = framework_reg->set_sleep;
+ priv_reg->min_uV = framework_reg->min_uV;
+ priv_reg->max_uV = framework_reg->max_uV;
+ priv_reg->system_load = framework_reg->system_load;
+
+ might_sleep_if(!rpm_vreg->allow_atomic);
+ rpm_vreg_lock(rpm_vreg);
+ list_add(&priv_reg->list, &rpm_vreg->reg_list);
+ rpm_vreg_unlock(rpm_vreg);
+
+ return priv_reg;
+}
+EXPORT_SYMBOL(rpm_regulator_get);
+
+static int rpm_regulator_check_input(struct rpm_regulator *regulator)
+{
+ if (IS_ERR_OR_NULL(regulator) || regulator->rpm_vreg == NULL) {
+ pr_err("invalid rpm_regulator pointer\n");
+ return -EINVAL;
+ }
+
+ might_sleep_if(!regulator->rpm_vreg->allow_atomic);
+
+ return 0;
+}
+
+/**
+ * rpm_regulator_put() - free the RPM regulator handle
+ * @regulator: RPM regulator handle
+ *
+ * Parameter reaggregation does not take place when rpm_regulator_put is called.
+ * Therefore, regulator enable state and voltage must be configured
+ * appropriately before calling rpm_regulator_put.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+void rpm_regulator_put(struct rpm_regulator *regulator)
+{
+ struct rpm_vreg *rpm_vreg;
+ int rc = rpm_regulator_check_input(regulator);
+
+ if (rc)
+ return;
+
+ rpm_vreg = regulator->rpm_vreg;
+
+ might_sleep_if(!rpm_vreg->allow_atomic);
+ rpm_vreg_lock(rpm_vreg);
+ list_del(&regulator->list);
+ rpm_vreg_unlock(rpm_vreg);
+
+ kfree(regulator->rdev);
+ kfree(regulator);
+}
+EXPORT_SYMBOL(rpm_regulator_put);
+
+/**
+ * rpm_regulator_enable() - enable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_enable(struct rpm_regulator *regulator)
+{
+ int rc = rpm_regulator_check_input(regulator);
+
+ if (rc)
+ return rc;
+
+ return rpm_vreg_enable(regulator->rdev);
+}
+EXPORT_SYMBOL(rpm_regulator_enable);
+
+/**
+ * rpm_regulator_disable() - disable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The enable state of the regulator is determined by aggregating the requests
+ * of all consumers. Therefore, it is possible that the regulator will remain
+ * enabled even after rpm_regulator_disable is called.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_disable(struct rpm_regulator *regulator)
+{
+ int rc = rpm_regulator_check_input(regulator);
+
+ if (rc)
+ return rc;
+
+ return rpm_vreg_disable(regulator->rdev);
+}
+EXPORT_SYMBOL(rpm_regulator_disable);
+
+/**
+ * rpm_regulator_set_voltage() - set regulator output voltage
+ * @regulator: RPM regulator handle
+ * @min_uV: minimum required voltage in uV
+ * @max_uV: maximum acceptable voltage in uV
+ *
+ * Sets a voltage regulator to the desired output voltage. This can be set
+ * while the regulator is disabled or enabled. If the regulator is enabled then
+ * the voltage will change to the new value immediately; otherwise, if the
+ * regulator is disabled, then the regulator will output at the new voltage when
+ * enabled.
+ *
+ * The min_uV to max_uV voltage range requested must intersect with the
+ * voltage constraint range configured for the regulator.
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The final voltage value that is sent to the RPM is aggregated based upon the
+ * values requested by all consumers of the regulator. This corresponds to the
+ * maximum min_uV value.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+ int max_uV)
+{
+ int rc = rpm_regulator_check_input(regulator);
+ int uV = min_uV;
+
+ if (rc)
+ return rc;
+
+ if (regulator->rpm_vreg->regulator_type == RPM_REGULATOR_TYPE_VS) {
+ vreg_err(regulator, "unsupported regulator type: %d\n",
+ regulator->rpm_vreg->regulator_type);
+ return -EINVAL;
+ }
+
+ if (min_uV > max_uV) {
+ vreg_err(regulator, "min_uV=%d must be less than max_uV=%d\n",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ if (uV < regulator->min_uV && max_uV >= regulator->min_uV)
+ uV = regulator->min_uV;
+
+ if (uV < regulator->min_uV || uV > regulator->max_uV) {
+ vreg_err(regulator,
+ "request v=[%d, %d] is outside allowed v=[%d, %d]\n",
+ min_uV, max_uV, regulator->min_uV, regulator->max_uV);
+ return -EINVAL;
+ }
+
+ return regulator->rdesc.ops->set_voltage(regulator->rdev, uV, uV, NULL);
+}
+EXPORT_SYMBOL(rpm_regulator_set_voltage);
+
+/**
+ * rpm_regulator_set_mode() - set regulator operating mode
+ * @regulator: RPM regulator handle
+ * @mode: operating mode requested for the regulator
+ *
+ * Requests that the mode of the regulator be set to the mode specified. This
+ * parameter is aggregated using a max function such that AUTO < IPEAK < HPM.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+ enum rpm_regulator_mode mode)
+{
+ int index = 0;
+ u32 new_mode, prev_mode;
+ int rc;
+
+ rc = rpm_regulator_check_input(regulator);
+ if (rc)
+ return rc;
+
+ if (mode < 0 || mode >= ARRAY_SIZE(mode_mapping)) {
+ vreg_err(regulator, "invalid mode requested: %d\n", mode);
+ return -EINVAL;
+ }
+
+ switch (regulator->rpm_vreg->regulator_type) {
+ case RPM_REGULATOR_TYPE_SMPS:
+ index = RPM_REGULATOR_PARAM_MODE_SMPS;
+ new_mode = mode_mapping[mode].smps_mode;
+ break;
+ case RPM_REGULATOR_TYPE_LDO:
+ index = RPM_REGULATOR_PARAM_MODE_LDO;
+ new_mode = mode_mapping[mode].ldo_mode;
+ break;
+ default:
+ vreg_err(regulator, "unsupported regulator type: %d\n",
+ regulator->rpm_vreg->regulator_type);
+ return -EINVAL;
+ };
+
+ if (new_mode < params[index].min || new_mode > params[index].max) {
+ vreg_err(regulator, "invalid mode requested: %d for type: %d\n",
+ mode, regulator->rpm_vreg->regulator_type);
+ return -EINVAL;
+ }
+
+ rpm_vreg_lock(regulator->rpm_vreg);
+
+ prev_mode = regulator->req.param[index];
+ regulator->req.param[index] = new_mode;
+ regulator->req.modified |= BIT(index);
+
+ rc = rpm_vreg_aggregate_requests(regulator);
+ if (rc) {
+ vreg_err(regulator, "set mode failed, rc=%d", rc);
+ regulator->req.param[index] = prev_mode;
+ }
+
+ rpm_vreg_unlock(regulator->rpm_vreg);
+
+ return rc;
+}
+EXPORT_SYMBOL(rpm_regulator_set_mode);
+
+static struct regulator_ops ldo_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage,
+ .get_voltage = rpm_vreg_get_voltage,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops ldo_corner_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage_corner,
+ .get_voltage = rpm_vreg_get_voltage_corner,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops ldo_floor_corner_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage_floor_corner,
+ .get_voltage = rpm_vreg_get_voltage_floor_corner,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops smps_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage,
+ .get_voltage = rpm_vreg_get_voltage,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops smps_corner_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage_corner,
+ .get_voltage = rpm_vreg_get_voltage_corner,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops smps_floor_corner_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage_floor_corner,
+ .get_voltage = rpm_vreg_get_voltage_floor_corner,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops switch_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops ncp_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage,
+ .get_voltage = rpm_vreg_get_voltage,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops *vreg_ops[] = {
+ [RPM_REGULATOR_TYPE_LDO] = &ldo_ops,
+ [RPM_REGULATOR_TYPE_SMPS] = &smps_ops,
+ [RPM_REGULATOR_TYPE_VS] = &switch_ops,
+ [RPM_REGULATOR_TYPE_NCP] = &ncp_ops,
+};
+
+static int rpm_vreg_device_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpm_regulator *reg;
+ struct rpm_vreg *rpm_vreg;
+
+ reg = platform_get_drvdata(pdev);
+ if (reg) {
+ rpm_vreg = reg->rpm_vreg;
+ rpm_vreg_lock(rpm_vreg);
+ regulator_unregister(reg->rdev);
+ list_del(&reg->list);
+ kfree(reg);
+ rpm_vreg_unlock(rpm_vreg);
+ } else {
+ dev_err(dev, "%s: drvdata missing\n", __func__);
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int rpm_vreg_resource_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpm_regulator *reg, *reg_temp;
+ struct rpm_vreg *rpm_vreg;
+
+ rpm_vreg = platform_get_drvdata(pdev);
+ if (rpm_vreg) {
+ rpm_vreg_lock(rpm_vreg);
+ list_for_each_entry_safe(reg, reg_temp, &rpm_vreg->reg_list,
+ list) {
+ /* Only touch data for private consumers. */
+ if (reg->rdev->desc == NULL) {
+ list_del(&reg->list);
+ kfree(reg->rdev);
+ kfree(reg);
+ } else {
+ dev_err(dev, "%s: not all child devices have been removed\n",
+ __func__);
+ }
+ }
+ rpm_vreg_unlock(rpm_vreg);
+
+ msm_rpm_free_request(rpm_vreg->handle_active);
+ msm_rpm_free_request(rpm_vreg->handle_sleep);
+
+ kfree(rpm_vreg);
+ } else {
+ dev_err(dev, "%s: drvdata missing\n", __func__);
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+/*
+ * This probe is called for child rpm-regulator devices which have
+ * properties which are required to configure individual regulator
+ * framework regulators for a given RPM regulator resource.
+ */
+static int rpm_vreg_device_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct regulator_init_data *init_data;
+ struct rpm_vreg *rpm_vreg;
+ struct rpm_regulator *reg;
+ struct regulator_config reg_config = {};
+ int rc = 0;
+ int i, regulator_type;
+ u32 val;
+
+ if (!dev->of_node) {
+ dev_err(dev, "%s: device tree information missing\n", __func__);
+ return -ENODEV;
+ }
+
+ if (pdev->dev.parent == NULL) {
+ dev_err(dev, "%s: parent device missing\n", __func__);
+ return -ENODEV;
+ }
+
+ rpm_vreg = dev_get_drvdata(pdev->dev.parent);
+ if (rpm_vreg == NULL) {
+ dev_err(dev, "%s: rpm_vreg not found in parent device\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ reg = kzalloc(sizeof(struct rpm_regulator), GFP_KERNEL);
+ if (reg == NULL) {
+ dev_err(dev, "%s: could not allocate memory for reg\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ regulator_type = rpm_vreg->regulator_type;
+ reg->rpm_vreg = rpm_vreg;
+ reg->rdesc.ops = vreg_ops[regulator_type];
+ reg->rdesc.owner = THIS_MODULE;
+ reg->rdesc.type = REGULATOR_VOLTAGE;
+
+ /*
+ * Switch to voltage corner regulator ops if qcom,use-voltage-corner
+ * is specified in the device node (SMPS and LDO only).
+ */
+ if (of_property_read_bool(node, "qcom,use-voltage-corner")) {
+ if (of_property_read_bool(node,
+ "qcom,use-voltage-floor-corner")) {
+ dev_err(dev, "%s: invalid properties: both qcom,use-voltage-corner and qcom,use-voltage-floor-corner specified\n",
+ __func__);
+ goto fail_free_reg;
+ }
+
+ if (regulator_type == RPM_REGULATOR_TYPE_SMPS)
+ reg->rdesc.ops = &smps_corner_ops;
+ else if (regulator_type == RPM_REGULATOR_TYPE_LDO)
+ reg->rdesc.ops = &ldo_corner_ops;
+ } else if (of_property_read_bool(node,
+ "qcom,use-voltage-floor-corner")) {
+ if (regulator_type == RPM_REGULATOR_TYPE_SMPS)
+ reg->rdesc.ops = &smps_floor_corner_ops;
+ else if (regulator_type == RPM_REGULATOR_TYPE_LDO)
+ reg->rdesc.ops = &ldo_floor_corner_ops;
+ }
+
+ reg->always_send_voltage
+ = of_property_read_bool(node, "qcom,always-send-voltage");
+ reg->always_send_current
+ = of_property_read_bool(node, "qcom,always-send-current");
+
+ if (regulator_type == RPM_REGULATOR_TYPE_VS)
+ reg->rdesc.n_voltages = 0;
+ else
+ reg->rdesc.n_voltages = 2;
+
+ rc = of_property_read_u32(node, "qcom,set", &val);
+ if (rc) {
+ dev_err(dev, "%s: sleep set and/or active set must be configured via qcom,set property, rc=%d\n",
+ __func__, rc);
+ goto fail_free_reg;
+ } else if (!(val & RPM_SET_CONFIG_BOTH)) {
+ dev_err(dev, "%s: qcom,set=%u property is invalid\n", __func__,
+ val);
+ rc = -EINVAL;
+ goto fail_free_reg;
+ }
+
+ reg->set_active = !!(val & RPM_SET_CONFIG_ACTIVE);
+ reg->set_sleep = !!(val & RPM_SET_CONFIG_SLEEP);
+
+ init_data = of_get_regulator_init_data(dev, node, &reg->rdesc);
+ if (init_data == NULL) {
+ dev_err(dev, "%s: unable to allocate memory\n", __func__);
+ rc = -ENOMEM;
+ goto fail_free_reg;
+ }
+ if (init_data->constraints.name == NULL) {
+ dev_err(dev, "%s: regulator name not specified\n", __func__);
+ rc = -EINVAL;
+ goto fail_free_reg;
+ }
+
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+
+ if (of_get_property(node, "parent-supply", NULL))
+ init_data->supply_regulator = "parent";
+
+ /*
+ * Fill in ops and mode masks based on callbacks specified for
+ * this type of regulator.
+ */
+ if (reg->rdesc.ops->enable)
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_STATUS;
+ if (reg->rdesc.ops->get_voltage)
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE;
+ if (reg->rdesc.ops->get_mode) {
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
+ init_data->constraints.valid_modes_mask
+ |= REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+ }
+
+ reg->rdesc.name = init_data->constraints.name;
+ reg->min_uV = init_data->constraints.min_uV;
+ reg->max_uV = init_data->constraints.max_uV;
+
+ /* Initialize the param array based on optional properties. */
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ rc = of_property_read_u32(node, params[i].property_name, &val);
+ if (rc == 0) {
+ if (params[i].supported_regulator_types
+ & BIT(regulator_type)) {
+ if (val < params[i].min
+ || val > params[i].max) {
+ pr_warn("%s: device tree property: %s=%u is outsided allowed range [%u, %u]\n",
+ reg->rdesc.name,
+ params[i].property_name, val,
+ params[i].min, params[i].max);
+ continue;
+ }
+ reg->req.param[i] = val;
+ reg->req.modified |= BIT(i);
+ } else {
+ pr_warn("%s: regulator type=%d does not support device tree property: %s\n",
+ reg->rdesc.name, regulator_type,
+ params[i].property_name);
+ }
+ }
+ }
+
+ of_property_read_u32(node, "qcom,system-load", &reg->system_load);
+
+ rpm_vreg_lock(rpm_vreg);
+ list_add(&reg->list, &rpm_vreg->reg_list);
+ rpm_vreg_unlock(rpm_vreg);
+
+ reg_config.dev = dev;
+ reg_config.init_data = init_data;
+ reg_config.of_node = node;
+ reg_config.driver_data = reg;
+ reg->rdev = regulator_register(&reg->rdesc, &reg_config);
+ if (IS_ERR(reg->rdev)) {
+ rc = PTR_ERR(reg->rdev);
+ reg->rdev = NULL;
+ pr_err("regulator_register failed: %s, rc=%d\n",
+ reg->rdesc.name, rc);
+ goto fail_remove_from_list;
+ }
+
+ platform_set_drvdata(pdev, reg);
+
+ pr_debug("successfully probed: %s\n", reg->rdesc.name);
+
+ return 0;
+
+fail_remove_from_list:
+ rpm_vreg_lock(rpm_vreg);
+ list_del(&reg->list);
+ rpm_vreg_unlock(rpm_vreg);
+
+fail_free_reg:
+ kfree(reg);
+ return rc;
+}
+
+/*
+ * This probe is called for parent rpm-regulator devices which have
+ * properties which are required to identify a given RPM resource.
+ */
+static int rpm_vreg_resource_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct rpm_vreg *rpm_vreg;
+ int val = 0;
+ u32 resource_type;
+ int rc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "%s: device tree information missing\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Create new rpm_vreg entry. */
+ rpm_vreg = kzalloc(sizeof(struct rpm_vreg), GFP_KERNEL);
+ if (rpm_vreg == NULL) {
+ dev_err(dev, "%s: could not allocate memory for vreg\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Required device tree properties: */
+ rc = of_property_read_string(node, "qcom,resource-name",
+ &rpm_vreg->resource_name);
+ if (rc) {
+ dev_err(dev, "%s: qcom,resource-name missing in DT node\n",
+ __func__);
+ goto fail_free_vreg;
+ }
+ resource_type = rpm_vreg_string_to_int(rpm_vreg->resource_name);
+
+ rc = of_property_read_u32(node, "qcom,resource-id",
+ &rpm_vreg->resource_id);
+ if (rc) {
+ dev_err(dev, "%s: qcom,resource-id missing in DT node\n",
+ __func__);
+ goto fail_free_vreg;
+ }
+
+ rc = of_property_read_u32(node, "qcom,regulator-type",
+ &rpm_vreg->regulator_type);
+ if (rc) {
+ dev_err(dev, "%s: qcom,regulator-type missing in DT node\n",
+ __func__);
+ goto fail_free_vreg;
+ }
+
+ if ((rpm_vreg->regulator_type < 0)
+ || (rpm_vreg->regulator_type >= RPM_REGULATOR_TYPE_MAX)) {
+ dev_err(dev, "%s: invalid regulator type: %d\n", __func__,
+ rpm_vreg->regulator_type);
+ rc = -EINVAL;
+ goto fail_free_vreg;
+ }
+
+ /* Optional device tree properties: */
+ of_property_read_u32(node, "qcom,allow-atomic", &val);
+ rpm_vreg->allow_atomic = !!val;
+ of_property_read_u32(node, "qcom,enable-time", &rpm_vreg->enable_time);
+ of_property_read_u32(node, "qcom,hpm-min-load",
+ &rpm_vreg->hpm_min_load);
+ rpm_vreg->apps_only = of_property_read_bool(node, "qcom,apps-only");
+
+ rpm_vreg->handle_active = msm_rpm_create_request(RPM_SET_ACTIVE,
+ resource_type, rpm_vreg->resource_id, RPM_REGULATOR_PARAM_MAX);
+ if (rpm_vreg->handle_active == NULL
+ || IS_ERR(rpm_vreg->handle_active)) {
+ rc = PTR_ERR(rpm_vreg->handle_active);
+ dev_err(dev, "%s: failed to create active RPM handle, rc=%d\n",
+ __func__, rc);
+ goto fail_free_vreg;
+ }
+
+ rpm_vreg->handle_sleep = msm_rpm_create_request(RPM_SET_SLEEP,
+ resource_type, rpm_vreg->resource_id, RPM_REGULATOR_PARAM_MAX);
+ if (rpm_vreg->handle_sleep == NULL || IS_ERR(rpm_vreg->handle_sleep)) {
+ rc = PTR_ERR(rpm_vreg->handle_sleep);
+ dev_err(dev, "%s: failed to create sleep RPM handle, rc=%d\n",
+ __func__, rc);
+ goto fail_free_handle_active;
+ }
+
+ INIT_LIST_HEAD(&rpm_vreg->reg_list);
+
+ if (rpm_vreg->allow_atomic)
+ spin_lock_init(&rpm_vreg->slock);
+ else
+ mutex_init(&rpm_vreg->mlock);
+
+ platform_set_drvdata(pdev, rpm_vreg);
+
+ rc = of_platform_populate(node, NULL, NULL, dev);
+ if (rc) {
+ dev_err(dev, "%s: failed to add child nodes, rc=%d\n", __func__,
+ rc);
+ goto fail_unset_drvdata;
+ }
+
+ pr_debug("successfully probed: %s (%08X) %u\n", rpm_vreg->resource_name,
+ resource_type, rpm_vreg->resource_id);
+
+ return rc;
+
+fail_unset_drvdata:
+ platform_set_drvdata(pdev, NULL);
+ msm_rpm_free_request(rpm_vreg->handle_sleep);
+
+fail_free_handle_active:
+ msm_rpm_free_request(rpm_vreg->handle_active);
+
+fail_free_vreg:
+ kfree(rpm_vreg);
+
+ return rc;
+}
+
+static struct of_device_id rpm_vreg_match_table_device[] = {
+ { .compatible = "qcom,rpm-smd-regulator", },
+ {}
+};
+
+static struct of_device_id rpm_vreg_match_table_resource[] = {
+ { .compatible = "qcom,rpm-smd-regulator-resource", },
+ {}
+};
+
+static struct platform_driver rpm_vreg_device_driver = {
+ .probe = rpm_vreg_device_probe,
+ .remove = rpm_vreg_device_remove,
+ .driver = {
+ .name = "qcom,rpm-smd-regulator",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_vreg_match_table_device,
+ },
+};
+
+static struct platform_driver rpm_vreg_resource_driver = {
+ .probe = rpm_vreg_resource_probe,
+ .remove = rpm_vreg_resource_remove,
+ .driver = {
+ .name = "qcom,rpm-smd-regulator-resource",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_vreg_match_table_resource,
+ },
+};
+
+/**
+ * rpm_smd_regulator_driver_init() - initialize the RPM SMD regulator drivers
+ *
+ * This function registers the RPM SMD regulator platform drivers.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int __init rpm_smd_regulator_driver_init(void)
+{
+ static bool initialized;
+ int i, rc;
+
+ if (initialized)
+ return 0;
+ else
+ initialized = true;
+
+ /* Store parameter string names as integers */
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++)
+ params[i].key = rpm_vreg_string_to_int(params[i].name);
+
+ rc = platform_driver_register(&rpm_vreg_device_driver);
+ if (rc)
+ return rc;
+
+ return platform_driver_register(&rpm_vreg_resource_driver);
+}
+EXPORT_SYMBOL(rpm_smd_regulator_driver_init);
+
+static void __exit rpm_vreg_exit(void)
+{
+ platform_driver_unregister(&rpm_vreg_device_driver);
+ platform_driver_unregister(&rpm_vreg_resource_driver);
+}
+
+arch_initcall(rpm_smd_regulator_driver_init);
+module_exit(rpm_vreg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPM SMD regulator driver");
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 628bcbacbf38..47b0e4180845 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -16,3 +16,41 @@ config QCOM_PM
QCOM Platform specific power driver to manage cores and L2 low power
modes. It interface with various system drivers to put the cores in
low power modes.
+
+config MSM_SMD
+ depends on MSM_SMEM
+ bool "MSM Shared Memory Driver (SMD)"
+ help
+ Support for the shared memory interprocessor communication protocol
+ which provides virual point to point serial channels between processes
+ on the apps processor and processes on other processors in the SoC.
+ Also includes support for the Shared Memory State Machine (SMSM)
+ protocol which provides a mechanism to publish single bit state
+ information to one or more processors in the SoC.
+
+config MSM_SMD_DEBUG
+ depends on MSM_SMD
+ bool "MSM SMD debug support"
+ help
+ Support for debugging SMD and SMSM communication between apps and
+ other processors in the SoC. Debug support primarily consists of
+ logs consisting of information such as what interrupts were processed,
+ what channels caused interrupt activity, and when internal state
+ change events occur.
+
+config MSM_RPM_SMD
+ bool "RPM driver using SMD protocol"
+ help
+ RPM is the dedicated hardware engine for managing shared SoC
+ resources. This config adds driver support for using SMD as a
+ transport layer communication with RPM hardware. It also selects
+ the MSM_MPM config that programs the MPM module to monitor interrupts
+ during sleep modes.
+
+config MSM_SMEM
+ depends on REMOTE_SPINLOCK_MSM
+ bool "MSM Shared Memory (SMEM)"
+ help
+ Support for the shared memory interface between the various
+ processors in the System on a Chip (SoC) which allows basic
+ inter-processor communication.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 931d385386c5..bc9dc1fdd137 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,2 +1,8 @@
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_PM) += spm.o
+obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd.o
+ifdef CONFIG_DEBUG_FS
+obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd-debug.o
+endif
+obj-$(CONFIG_MSM_SMD) += smd.o smd_debug.o smd_private.o smd_init_dt.o smsm_debug.o
+obj-$(CONFIG_MSM_SMEM) += smem.o smem_debug.o
diff --git a/drivers/soc/qcom/rpm-smd-debug.c b/drivers/soc/qcom/rpm-smd-debug.c
new file mode 100644
index 000000000000..c08668149636
--- /dev/null
+++ b/drivers/soc/qcom/rpm-smd-debug.c
@@ -0,0 +1,141 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "rpm-smd-debug: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <soc/qcom/rpm-smd.h>
+
+#define MAX_MSG_BUFFER 350
+#define MAX_KEY_VALUE_PAIRS 20
+
+static struct dentry *rpm_debugfs_dir;
+
+static u32 string_to_uint(const u8 *str)
+{
+ int i, len;
+ u32 output = 0;
+
+ len = strnlen(str, sizeof(u32));
+ for (i = 0; i < len; i++)
+ output |= str[i] << (i * 8);
+
+ return output;
+}
+
+static ssize_t rsc_ops_write(struct file *fp, const char __user *user_buffer,
+ size_t count, loff_t *position)
+{
+ char buf[MAX_MSG_BUFFER], rsc_type_str[6] = {}, rpm_set[8] = {},
+ key_str[6] = {};
+ int i, pos, set = -1, nelems;
+ char *cmp;
+ uint32_t rsc_type, rsc_id, key, data;
+ struct msm_rpm_request *req;
+
+ count = min(count, sizeof(buf) - 1);
+ if (copy_from_user(&buf, user_buffer, count))
+ return -EFAULT;
+ buf[count] = '\0';
+ cmp = strstrip(buf);
+
+ sscanf(cmp, "%7s %5s %u %d %n", rpm_set, rsc_type_str, &rsc_id,
+ &nelems, &pos);
+ if (strlen(rpm_set) > 6 || strlen(rsc_type_str) > 4) {
+ pr_err("Invalid value of set or resource type\n");
+ goto err;
+ }
+
+ if (!strcmp(rpm_set, "active"))
+ set = 0;
+ else if (!strcmp(rpm_set, "sleep"))
+ set = 1;
+
+ rsc_type = string_to_uint(rsc_type_str);
+
+ if (set < 0 || nelems < 0) {
+ pr_err("Invalid value of set or nelems\n");
+ goto err;
+ }
+ if (nelems > MAX_KEY_VALUE_PAIRS) {
+ pr_err("Exceeded max no of key-value entries\n");
+ goto err;
+ }
+
+ req = msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
+ if (!req)
+ return -ENOMEM;
+
+ for (i = 0; i < nelems; i++) {
+ cmp += pos;
+ sscanf(cmp, "%5s %n", key_str, &pos);
+ if (strlen(key_str) > 4) {
+ pr_err("Key value cannot be more than 4 charecters");
+ goto err;
+ }
+ key = string_to_uint(key_str);
+ if (!key) {
+ pr_err("Key values entered incorrectly\n");
+ goto err;
+ }
+
+ cmp += pos;
+ sscanf(cmp, "%u %n", &data, &pos);
+ if (msm_rpm_add_kvp_data(req, key,
+ (void *)&data, sizeof(data)))
+ goto err_request;
+ }
+
+ if (msm_rpm_wait_for_ack(msm_rpm_send_request(req)))
+ pr_err("Sending the RPM message failed\n");
+ else
+ pr_info("RPM message sent succesfully\n");
+
+err_request:
+ msm_rpm_free_request(req);
+err:
+ return count;
+}
+
+static const struct file_operations rsc_ops = {
+ .write = rsc_ops_write,
+};
+
+static int __init rpm_smd_debugfs_init(void)
+{
+ rpm_debugfs_dir = debugfs_create_dir("rpm_send_msg", NULL);
+ if (!rpm_debugfs_dir)
+ return -ENOMEM;
+
+ if (!debugfs_create_file("message", S_IWUSR, rpm_debugfs_dir, NULL,
+ &rsc_ops))
+ return -ENOMEM;
+
+ return 0;
+}
+late_initcall(rpm_smd_debugfs_init);
+
+static void __exit rpm_smd_debugfs_exit(void)
+{
+ debugfs_remove_recursive(rpm_debugfs_dir);
+}
+module_exit(rpm_smd_debugfs_exit);
+
+MODULE_DESCRIPTION("RPM SMD Debug Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/rpm-smd.c b/drivers/soc/qcom/rpm-smd.c
new file mode 100644
index 000000000000..fb0ff067f365
--- /dev/null
+++ b/drivers/soc/qcom/rpm-smd.c
@@ -0,0 +1,1475 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/rbtree.h>
+#include <soc/qcom/rpm-notifier.h>
+#include <soc/qcom/rpm-smd.h>
+#include <soc/qcom/smd.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_rpm_smd.h>
+
+/* Debug Definitions */
+enum {
+ MSM_RPM_LOG_REQUEST_PRETTY = BIT(0),
+ MSM_RPM_LOG_REQUEST_RAW = BIT(1),
+ MSM_RPM_LOG_REQUEST_SHOW_MSG_ID = BIT(2),
+};
+
+static int msm_rpm_debug_mask;
+module_param_named(
+ debug_mask, msm_rpm_debug_mask, int, S_IRUGO | S_IWUSR
+);
+
+struct msm_rpm_driver_data {
+ const char *ch_name;
+ uint32_t ch_type;
+ smd_channel_t *ch_info;
+ struct work_struct work;
+ spinlock_t smd_lock_write;
+ spinlock_t smd_lock_read;
+ struct completion smd_open;
+};
+
+#define DEFAULT_BUFFER_SIZE 256
+#define DEBUG_PRINT_BUFFER_SIZE 512
+#define MAX_SLEEP_BUFFER 128
+#define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_KERNEL)
+#define INV_RSC "resource does not exist"
+#define ERR "err\0"
+#define MAX_ERR_BUFFER_SIZE 128
+#define MAX_WAIT_ON_ACK 24
+#define INIT_ERROR 1
+
+static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier);
+static bool standalone;
+static int probe_status = -EPROBE_DEFER;
+
+int msm_rpm_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb);
+}
+
+int msm_rpm_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb);
+}
+
+static struct workqueue_struct *msm_rpm_smd_wq;
+
+enum {
+ MSM_RPM_MSG_REQUEST_TYPE = 0,
+ MSM_RPM_MSG_TYPE_NR,
+};
+
+static const uint32_t msm_rpm_request_service[MSM_RPM_MSG_TYPE_NR] = {
+ 0x716572, /* 'req\0' */
+};
+
+/*the order of fields matter and reflect the order expected by the RPM*/
+struct rpm_request_header {
+ uint32_t service_type;
+ uint32_t request_len;
+};
+
+struct rpm_message_header {
+ uint32_t msg_id;
+ enum msm_rpm_set set;
+ uint32_t resource_type;
+ uint32_t resource_id;
+ uint32_t data_len;
+};
+
+struct kvp {
+ unsigned int k;
+ unsigned int s;
+};
+
+struct msm_rpm_kvp_data {
+ uint32_t key;
+ uint32_t nbytes; /* number of bytes */
+ uint8_t *value;
+ bool valid;
+};
+
+struct slp_buf {
+ struct rb_node node;
+ char ubuf[MAX_SLEEP_BUFFER];
+ char *buf;
+ bool valid;
+};
+static struct rb_root tr_root = RB_ROOT;
+static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq);
+static uint32_t msm_rpm_get_next_msg_id(void);
+
+static inline unsigned int get_rsc_type(char *buf)
+{
+ struct rpm_message_header *h;
+ h = (struct rpm_message_header *)
+ (buf + sizeof(struct rpm_request_header));
+ return h->resource_type;
+}
+
+static inline unsigned int get_rsc_id(char *buf)
+{
+ struct rpm_message_header *h;
+ h = (struct rpm_message_header *)
+ (buf + sizeof(struct rpm_request_header));
+ return h->resource_id;
+}
+
+#define get_data_len(buf) \
+ (((struct rpm_message_header *) \
+ (buf + sizeof(struct rpm_request_header)))->data_len)
+
+#define get_req_len(buf) \
+ (((struct rpm_request_header *)(buf))->request_len)
+
+#define get_msg_id(buf) \
+ (((struct rpm_message_header *) \
+ (buf + sizeof(struct rpm_request_header)))->msg_id)
+
+
+static inline int get_buf_len(char *buf)
+{
+ return get_req_len(buf) + sizeof(struct rpm_request_header);
+}
+
+static inline struct kvp *get_first_kvp(char *buf)
+{
+ return (struct kvp *)(buf + sizeof(struct rpm_request_header)
+ + sizeof(struct rpm_message_header));
+}
+
+static inline struct kvp *get_next_kvp(struct kvp *k)
+{
+ return (struct kvp *)((void *)k + sizeof(*k) + k->s);
+}
+
+static inline void *get_data(struct kvp *k)
+{
+ return (void *)k + sizeof(*k);
+}
+
+
+static void delete_kvp(char *msg, struct kvp *d)
+{
+ struct kvp *n;
+ int dec;
+ uint32_t size;
+
+ n = get_next_kvp(d);
+ dec = (void *)n - (void *)d;
+ size = get_data_len(msg) - ((void *)n - (void *)get_first_kvp(msg));
+
+ memcpy((void *)d, (void *)n, size);
+
+ get_data_len(msg) -= dec;
+ get_req_len(msg) -= dec;
+}
+
+static inline void update_kvp_data(struct kvp *dest, struct kvp *src)
+{
+ memcpy(get_data(dest), get_data(src), src->s);
+}
+
+static void add_kvp(char *buf, struct kvp *n)
+{
+ uint32_t inc = sizeof(*n) + n->s;
+ BUG_ON((get_req_len(buf) + inc) > MAX_SLEEP_BUFFER);
+
+ memcpy(buf + get_buf_len(buf), n, inc);
+
+ get_data_len(buf) += inc;
+ get_req_len(buf) += inc;
+}
+
+static struct slp_buf *tr_search(struct rb_root *root, char *slp)
+{
+ unsigned int type = get_rsc_type(slp);
+ unsigned int id = get_rsc_id(slp);
+
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct slp_buf *cur = rb_entry(node, struct slp_buf, node);
+ unsigned int ctype = get_rsc_type(cur->buf);
+ unsigned int cid = get_rsc_id(cur->buf);
+
+ if (type < ctype)
+ node = node->rb_left;
+ else if (type > ctype)
+ node = node->rb_right;
+ else if (id < cid)
+ node = node->rb_left;
+ else if (id > cid)
+ node = node->rb_right;
+ else
+ return cur;
+ }
+ return NULL;
+}
+
+static int tr_insert(struct rb_root *root, struct slp_buf *slp)
+{
+ unsigned int type = get_rsc_type(slp->buf);
+ unsigned int id = get_rsc_id(slp->buf);
+
+ struct rb_node **node = &(root->rb_node), *parent = NULL;
+
+ while (*node) {
+ struct slp_buf *curr = rb_entry(*node, struct slp_buf, node);
+ unsigned int ctype = get_rsc_type(curr->buf);
+ unsigned int cid = get_rsc_id(curr->buf);
+
+ parent = *node;
+
+ if (type < ctype)
+ node = &((*node)->rb_left);
+ else if (type > ctype)
+ node = &((*node)->rb_right);
+ else if (id < cid)
+ node = &((*node)->rb_left);
+ else if (id > cid)
+ node = &((*node)->rb_right);
+ else
+ return -EINVAL;
+ }
+
+ rb_link_node(&slp->node, parent, node);
+ rb_insert_color(&slp->node, root);
+ slp->valid = true;
+ return 0;
+}
+
+#define for_each_kvp(buf, k) \
+ for (k = (struct kvp *)get_first_kvp(buf); \
+ ((void *)k - (void *)get_first_kvp(buf)) < get_data_len(buf);\
+ k = get_next_kvp(k))
+
+
+static void tr_update(struct slp_buf *s, char *buf)
+{
+ struct kvp *e, *n;
+
+ for_each_kvp(buf, n) {
+ bool found = false;
+ for_each_kvp(s->buf, e) {
+ if (n->k == e->k) {
+ found = true;
+ if (n->s == e->s) {
+ void *e_data = get_data(e);
+ void *n_data = get_data(n);
+ if (memcmp(e_data, n_data, n->s)) {
+ update_kvp_data(e, n);
+ s->valid = true;
+ }
+ } else {
+ delete_kvp(s->buf, e);
+ add_kvp(s->buf, n);
+ s->valid = true;
+ }
+ break;
+ }
+
+ }
+ if (!found) {
+ add_kvp(s->buf, n);
+ s->valid = true;
+ }
+ }
+}
+
+int msm_rpm_smd_buffer_request(char *buf, uint32_t size, gfp_t flag)
+{
+ struct slp_buf *slp;
+ static DEFINE_SPINLOCK(slp_buffer_lock);
+ unsigned long flags;
+
+ if (size > MAX_SLEEP_BUFFER)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&slp_buffer_lock, flags);
+ slp = tr_search(&tr_root, buf);
+
+ if (!slp) {
+ slp = kzalloc(sizeof(struct slp_buf), GFP_ATOMIC);
+ if (!slp) {
+ spin_unlock_irqrestore(&slp_buffer_lock, flags);
+ return -ENOMEM;
+ }
+ slp->buf = PTR_ALIGN(&slp->ubuf[0], sizeof(u32));
+ memcpy(slp->buf, buf, size);
+ if (tr_insert(&tr_root, slp))
+ pr_err("%s(): Error updating sleep request\n",
+ __func__);
+ } else {
+ /* handle unsent requests */
+ tr_update(slp, buf);
+ }
+
+ spin_unlock_irqrestore(&slp_buffer_lock, flags);
+
+ return 0;
+}
+static void msm_rpm_print_sleep_buffer(struct slp_buf *s)
+{
+ char buf[DEBUG_PRINT_BUFFER_SIZE] = {0};
+ int pos;
+ int buflen = DEBUG_PRINT_BUFFER_SIZE;
+ char ch[5] = {0};
+ u32 type;
+ struct kvp *e;
+
+ if (!s)
+ return;
+
+ if (!s->valid)
+ return;
+
+ type = get_rsc_type(s->buf);
+ memcpy(ch, &type, sizeof(u32));
+
+ pos = scnprintf(buf, buflen,
+ "Sleep request type = 0x%08x(%s)",
+ get_rsc_type(s->buf), ch);
+ pos += scnprintf(buf + pos, buflen - pos, " id = 0%x",
+ get_rsc_id(s->buf));
+ for_each_kvp(s->buf, e) {
+ uint32_t i;
+ char *data = get_data(e);
+
+ memcpy(ch, &e->k, sizeof(u32));
+
+ pos += scnprintf(buf + pos, buflen - pos,
+ "\n\t\tkey = 0x%08x(%s)",
+ e->k, ch);
+ pos += scnprintf(buf + pos, buflen - pos,
+ " sz= %d data =", e->s);
+
+ for (i = 0; i < e->s; i++)
+ pos += scnprintf(buf + pos, buflen - pos,
+ " 0x%02X", data[i]);
+ }
+ pos += scnprintf(buf + pos, buflen - pos, "\n");
+ printk(buf);
+}
+
+static struct msm_rpm_driver_data msm_rpm_data = {
+ .smd_open = COMPLETION_INITIALIZER(msm_rpm_data.smd_open),
+};
+
+static int msm_rpm_flush_requests(bool print)
+{
+ struct rb_node *t;
+ int ret;
+ int pkt_sz;
+ char buf[MAX_ERR_BUFFER_SIZE] = {0};
+ int count = 0;
+
+ for (t = rb_first(&tr_root); t; t = rb_next(t)) {
+
+ struct slp_buf *s = rb_entry(t, struct slp_buf, node);
+
+ if (!s->valid)
+ continue;
+
+ if (print)
+ msm_rpm_print_sleep_buffer(s);
+
+ get_msg_id(s->buf) = msm_rpm_get_next_msg_id();
+ ret = msm_rpm_send_smd_buffer(s->buf,
+ get_buf_len(s->buf), true);
+
+ WARN_ON(ret != get_buf_len(s->buf));
+
+ s->valid = false;
+ count++;
+
+ trace_rpm_send_message(true, MSM_RPM_CTX_SLEEP_SET,
+ get_rsc_type(s->buf),
+ get_rsc_id(s->buf),
+ get_msg_id(s->buf));
+
+ /*
+ * RPM acks need to be handled here if we have sent 24
+ * messages such that we do not overrun SMD buffer. Since
+ * we expect only sleep sets at this point (RPM PC would be
+ * disallowed if we had pending active requests), we need not
+ * process these sleep set acks.
+ */
+ if (count >= MAX_WAIT_ON_ACK) {
+ int len;
+ int timeout = 10;
+
+ while (timeout) {
+ if (smd_is_pkt_avail(msm_rpm_data.ch_info))
+ break;
+ /*
+ * Sleep for 50us at a time before checking
+ * for packet availability. The 50us is based
+ * on the the time rpm could take to process
+ * and send an ack for the sleep set request.
+ */
+ udelay(50);
+ timeout--;
+ }
+ /*
+ * On timeout return an error and exit the spinlock
+ * control on this cpu. This will allow any other
+ * core that has wokenup and trying to acquire the
+ * spinlock from being locked out.
+ */
+ if (!timeout) {
+ pr_err("%s: Timed out waiting for RPM ACK\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
+ len = smd_read(msm_rpm_data.ch_info, buf, pkt_sz);
+ count--;
+ }
+ }
+ return 0;
+}
+
+static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
+
+struct msm_rpm_request {
+ struct rpm_request_header req_hdr;
+ struct rpm_message_header msg_hdr;
+ struct msm_rpm_kvp_data *kvp;
+ uint32_t num_elements;
+ uint32_t write_idx;
+ uint8_t *buf;
+ uint32_t numbytes;
+};
+
+/*
+ * Data related to message acknowledgement
+ */
+
+LIST_HEAD(msm_rpm_wait_list);
+
+struct msm_rpm_wait_data {
+ struct list_head list;
+ uint32_t msg_id;
+ bool ack_recd;
+ int errno;
+ struct completion ack;
+};
+DEFINE_SPINLOCK(msm_rpm_list_lock);
+
+struct msm_rpm_ack_msg {
+ uint32_t req;
+ uint32_t req_len;
+ uint32_t rsc_id;
+ uint32_t msg_len;
+ uint32_t id_ack;
+};
+
+LIST_HEAD(msm_rpm_ack_list);
+
+static DECLARE_COMPLETION(data_ready);
+
+static void msm_rpm_notify_sleep_chain(struct rpm_message_header *hdr,
+ struct msm_rpm_kvp_data *kvp)
+{
+ struct msm_rpm_notifier_data notif;
+
+ notif.rsc_type = hdr->resource_type;
+ notif.rsc_id = hdr->resource_id;
+ notif.key = kvp->key;
+ notif.size = kvp->nbytes;
+ notif.value = kvp->value;
+ atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, &notif);
+}
+
+static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size, bool noirq)
+{
+ uint32_t i;
+ uint32_t data_size, msg_size;
+
+ if (probe_status)
+ return probe_status;
+
+ if (!handle) {
+ pr_err("%s(): Invalid handle\n", __func__);
+ return -EINVAL;
+ }
+
+ if (size < 0)
+ return -EINVAL;
+
+ data_size = ALIGN(size, SZ_4);
+ msg_size = data_size + sizeof(struct rpm_request_header);
+
+ for (i = 0; i < handle->write_idx; i++) {
+ if (handle->kvp[i].key != key)
+ continue;
+ if (handle->kvp[i].nbytes != data_size) {
+ kfree(handle->kvp[i].value);
+ handle->kvp[i].value = NULL;
+ } else {
+ if (!memcmp(handle->kvp[i].value, data, data_size))
+ return 0;
+ }
+ break;
+ }
+
+ if (i >= handle->num_elements) {
+ pr_err("%s(): Number of resources exceeds max allocated\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (i == handle->write_idx)
+ handle->write_idx++;
+
+ if (!handle->kvp[i].value) {
+ handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq));
+
+ if (!handle->kvp[i].value) {
+ pr_err("%s(): Failed malloc\n", __func__);
+ return -ENOMEM;
+ }
+ } else {
+ /* We enter the else case, if a key already exists but the
+ * data doesn't match. In which case, we should zero the data
+ * out.
+ */
+ memset(handle->kvp[i].value, 0, data_size);
+ }
+
+ if (!handle->kvp[i].valid)
+ handle->msg_hdr.data_len += msg_size;
+ else
+ handle->msg_hdr.data_len += (data_size - handle->kvp[i].nbytes);
+
+ handle->kvp[i].nbytes = data_size;
+ handle->kvp[i].key = key;
+ memcpy(handle->kvp[i].value, data, size);
+ handle->kvp[i].valid = true;
+
+ return 0;
+
+}
+
+static struct msm_rpm_request *msm_rpm_create_request_common(
+ enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
+ int num_elements, bool noirq)
+{
+ struct msm_rpm_request *cdata;
+
+ if (probe_status)
+ return ERR_PTR(probe_status);
+
+ cdata = kzalloc(sizeof(struct msm_rpm_request),
+ GFP_FLAG(noirq));
+
+ if (!cdata) {
+ pr_err("%s():Cannot allocate memory for client data\n",
+ __func__);
+ goto cdata_alloc_fail;
+ }
+
+ cdata->msg_hdr.set = set;
+ cdata->msg_hdr.resource_type = rsc_type;
+ cdata->msg_hdr.resource_id = rsc_id;
+ cdata->msg_hdr.data_len = 0;
+
+ cdata->num_elements = num_elements;
+ cdata->write_idx = 0;
+
+ cdata->kvp = kzalloc(sizeof(struct msm_rpm_kvp_data) * num_elements,
+ GFP_FLAG(noirq));
+
+ if (!cdata->kvp) {
+ pr_warn("%s(): Cannot allocate memory for key value data\n",
+ __func__);
+ goto kvp_alloc_fail;
+ }
+
+ cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq));
+
+ if (!cdata->buf)
+ goto buf_alloc_fail;
+
+ cdata->numbytes = DEFAULT_BUFFER_SIZE;
+ return cdata;
+
+buf_alloc_fail:
+ kfree(cdata->kvp);
+kvp_alloc_fail:
+ kfree(cdata);
+cdata_alloc_fail:
+ return NULL;
+
+}
+
+void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+ int i;
+
+ if (!handle)
+ return;
+ for (i = 0; i < handle->num_elements; i++)
+ kfree(handle->kvp[i].value);
+ kfree(handle->kvp);
+ kfree(handle->buf);
+ kfree(handle);
+}
+EXPORT_SYMBOL(msm_rpm_free_request);
+
+struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+ num_elements, false);
+}
+EXPORT_SYMBOL(msm_rpm_create_request);
+
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return msm_rpm_create_request_common(set, rsc_type, rsc_id,
+ num_elements, true);
+}
+EXPORT_SYMBOL(msm_rpm_create_request_noirq);
+
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size)
+{
+ return msm_rpm_add_kvp_data_common(handle, key, data, size, false);
+
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data);
+
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size)
+{
+ return msm_rpm_add_kvp_data_common(handle, key, data, size, true);
+}
+EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
+
+/* Runs in interrupt context */
+static void msm_rpm_notify(void *data, unsigned event)
+{
+ struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data;
+ BUG_ON(!pdata);
+
+ if (!(pdata->ch_info))
+ return;
+
+ switch (event) {
+ case SMD_EVENT_DATA:
+ complete(&data_ready);
+ break;
+ case SMD_EVENT_OPEN:
+ complete(&pdata->smd_open);
+ break;
+ case SMD_EVENT_CLOSE:
+ case SMD_EVENT_STATUS:
+ case SMD_EVENT_REOPEN_READY:
+ break;
+ default:
+ pr_info("Unknown SMD event\n");
+
+ }
+}
+
+bool msm_rpm_waiting_for_ack(void)
+{
+ bool ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+ ret = list_empty(&msm_rpm_wait_list);
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+
+ return !ret;
+}
+
+static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
+{
+ struct list_head *ptr;
+ struct msm_rpm_wait_data *elem = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+ list_for_each(ptr, &msm_rpm_wait_list) {
+ elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+ if (elem && (elem->msg_id == msg_id))
+ break;
+ elem = NULL;
+ }
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+ return elem;
+}
+
+static uint32_t msm_rpm_get_next_msg_id(void)
+{
+ uint32_t id;
+
+ /*
+ * A message id of 0 is used by the driver to indicate a error
+ * condition. The RPM driver uses a id of 1 to indicate unsent data
+ * when the data sent over hasn't been modified. This isn't a error
+ * scenario and wait for ack returns a success when the message id is 1.
+ */
+
+ do {
+ id = atomic_inc_return(&msm_rpm_msg_id);
+ } while ((id == 0) || (id == 1) || msm_rpm_get_entry_from_msg_id(id));
+
+ return id;
+}
+
+static int msm_rpm_add_wait_list(uint32_t msg_id)
+{
+ unsigned long flags;
+ struct msm_rpm_wait_data *data =
+ kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
+
+ if (!data)
+ return -ENOMEM;
+
+ init_completion(&data->ack);
+ data->ack_recd = false;
+ data->msg_id = msg_id;
+ data->errno = INIT_ERROR;
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+ list_add(&data->list, &msm_rpm_wait_list);
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+
+ return 0;
+}
+
+static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+ list_del(&elem->list);
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+ kfree(elem);
+}
+
+static void msm_rpm_process_ack(uint32_t msg_id, int errno)
+{
+ struct list_head *ptr;
+ struct msm_rpm_wait_data *elem = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_rpm_list_lock, flags);
+
+ list_for_each(ptr, &msm_rpm_wait_list) {
+ elem = list_entry(ptr, struct msm_rpm_wait_data, list);
+ if (elem && (elem->msg_id == msg_id)) {
+ elem->errno = errno;
+ elem->ack_recd = true;
+ complete(&elem->ack);
+ break;
+ }
+ elem = NULL;
+ }
+ /* Special case where the sleep driver doesn't
+ * wait for ACKs. This would decrease the latency involved with
+ * entering RPM assisted power collapse.
+ */
+ if (!elem)
+ trace_rpm_ack_recd(0, msg_id);
+
+ spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
+}
+
+struct msm_rpm_kvp_packet {
+ uint32_t id;
+ uint32_t len;
+ uint32_t val;
+};
+
+static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
+{
+ return ((struct msm_rpm_ack_msg *)buf)->id_ack;
+}
+
+static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
+{
+ uint8_t *tmp;
+ uint32_t req_len = ((struct msm_rpm_ack_msg *)buf)->req_len;
+
+ int rc = -ENODEV;
+
+ req_len -= sizeof(struct msm_rpm_ack_msg);
+ req_len += 2 * sizeof(uint32_t);
+ if (!req_len)
+ return 0;
+
+ tmp = buf + sizeof(struct msm_rpm_ack_msg);
+
+ BUG_ON(memcmp(tmp, ERR, sizeof(uint32_t)));
+
+ tmp += 2 * sizeof(uint32_t);
+
+ if (!(memcmp(tmp, INV_RSC, min_t(uint32_t, req_len,
+ sizeof(INV_RSC))-1))) {
+ pr_err("%s(): RPM NACK Unsupported resource\n", __func__);
+ rc = -EINVAL;
+ } else {
+ pr_err("%s(): RPM NACK Invalid header\n", __func__);
+ }
+
+ return rc;
+}
+
+static int msm_rpm_read_smd_data(char *buf)
+{
+ int pkt_sz;
+ int bytes_read = 0;
+
+ pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
+
+ if (!pkt_sz)
+ return -EAGAIN;
+
+ BUG_ON(pkt_sz > MAX_ERR_BUFFER_SIZE);
+
+ if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info))
+ return -EAGAIN;
+
+ do {
+ int len;
+
+ len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz);
+ pkt_sz -= len;
+ bytes_read += len;
+
+ } while (pkt_sz > 0);
+
+ BUG_ON(pkt_sz < 0);
+
+ return 0;
+}
+
+static void msm_rpm_smd_work(struct work_struct *work)
+{
+ uint32_t msg_id;
+ int errno;
+ char buf[MAX_ERR_BUFFER_SIZE] = {0};
+
+ while (1) {
+ wait_for_completion(&data_ready);
+
+ spin_lock(&msm_rpm_data.smd_lock_read);
+ while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+ if (msm_rpm_read_smd_data(buf))
+ break;
+ msg_id = msm_rpm_get_msg_id_from_ack(buf);
+ errno = msm_rpm_get_error_from_ack(buf);
+ msm_rpm_process_ack(msg_id, errno);
+ }
+ spin_unlock(&msm_rpm_data.smd_lock_read);
+ }
+}
+
+static void msm_rpm_log_request(struct msm_rpm_request *cdata)
+{
+ char buf[DEBUG_PRINT_BUFFER_SIZE];
+ size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+ char name[5];
+ u32 value;
+ uint32_t i;
+ int j, prev_valid;
+ int valid_count = 0;
+ int pos = 0;
+
+ name[4] = 0;
+
+ for (i = 0; i < cdata->write_idx; i++)
+ if (cdata->kvp[i].valid)
+ valid_count++;
+
+ pos += scnprintf(buf + pos, buflen - pos, "%sRPM req: ", KERN_INFO);
+ if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_SHOW_MSG_ID)
+ pos += scnprintf(buf + pos, buflen - pos, "msg_id=%u, ",
+ cdata->msg_hdr.msg_id);
+ pos += scnprintf(buf + pos, buflen - pos, "s=%s",
+ (cdata->msg_hdr.set == MSM_RPM_CTX_ACTIVE_SET ? "act" : "slp"));
+
+ if ((msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY)
+ && (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_RAW)) {
+ /* Both pretty and raw formatting */
+ memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos,
+ ", rsc_type=0x%08X (%s), rsc_id=%u; ",
+ cdata->msg_hdr.resource_type, name,
+ cdata->msg_hdr.resource_id);
+
+ for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos,
+ "[key=0x%08X (%s), value=%s",
+ cdata->kvp[i].key, name,
+ (cdata->kvp[i].nbytes ? "0x" : "null"));
+
+ for (j = 0; j < cdata->kvp[i].nbytes; j++)
+ pos += scnprintf(buf + pos, buflen - pos,
+ "%02X ",
+ cdata->kvp[i].value[j]);
+
+ if (cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos, buflen - pos, "(");
+
+ for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
+ value = 0;
+ memcpy(&value, &cdata->kvp[i].value[j],
+ min_t(uint32_t, sizeof(uint32_t),
+ cdata->kvp[i].nbytes - j));
+ pos += scnprintf(buf + pos, buflen - pos, "%u",
+ value);
+ if (j + 4 < cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos,
+ buflen - pos, " ");
+ }
+ if (cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos, buflen - pos, ")");
+ pos += scnprintf(buf + pos, buflen - pos, "]");
+ if (prev_valid + 1 < valid_count)
+ pos += scnprintf(buf + pos, buflen - pos, ", ");
+ prev_valid++;
+ }
+ } else if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) {
+ /* Pretty formatting only */
+ memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos, " %s %u; ", name,
+ cdata->msg_hdr.resource_id);
+
+ for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
+ pos += scnprintf(buf + pos, buflen - pos, "%s=%s",
+ name, (cdata->kvp[i].nbytes ? "" : "null"));
+
+ for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
+ value = 0;
+ memcpy(&value, &cdata->kvp[i].value[j],
+ min_t(uint32_t, sizeof(uint32_t),
+ cdata->kvp[i].nbytes - j));
+ pos += scnprintf(buf + pos, buflen - pos, "%u",
+ value);
+
+ if (j + 4 < cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos,
+ buflen - pos, " ");
+ }
+ if (prev_valid + 1 < valid_count)
+ pos += scnprintf(buf + pos, buflen - pos, ", ");
+ prev_valid++;
+ }
+ } else {
+ /* Raw formatting only */
+ pos += scnprintf(buf + pos, buflen - pos,
+ ", rsc_type=0x%08X, rsc_id=%u; ",
+ cdata->msg_hdr.resource_type,
+ cdata->msg_hdr.resource_id);
+
+ for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ pos += scnprintf(buf + pos, buflen - pos,
+ "[key=0x%08X, value=%s",
+ cdata->kvp[i].key,
+ (cdata->kvp[i].nbytes ? "0x" : "null"));
+ for (j = 0; j < cdata->kvp[i].nbytes; j++) {
+ pos += scnprintf(buf + pos, buflen - pos,
+ "%02X",
+ cdata->kvp[i].value[j]);
+ if (j + 1 < cdata->kvp[i].nbytes)
+ pos += scnprintf(buf + pos,
+ buflen - pos, " ");
+ }
+ pos += scnprintf(buf + pos, buflen - pos, "]");
+ if (prev_valid + 1 < valid_count)
+ pos += scnprintf(buf + pos, buflen - pos, ", ");
+ prev_valid++;
+ }
+ }
+
+ pos += scnprintf(buf + pos, buflen - pos, "\n");
+ printk(buf);
+}
+static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+ ret = smd_write_avail(msm_rpm_data.ch_info);
+
+ while ((ret = smd_write_avail(msm_rpm_data.ch_info)) < size) {
+ if (ret < 0)
+ break;
+ if (!noirq) {
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write,
+ flags);
+ cpu_relax();
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
+ } else
+ udelay(5);
+ }
+
+ if (ret < 0) {
+ pr_err("%s(): SMD not initialized\n", __func__);
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+ return ret;
+ }
+
+ ret = smd_write(msm_rpm_data.ch_info, buf, size);
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
+ return ret;
+
+}
+static int msm_rpm_send_data(struct msm_rpm_request *cdata,
+ int msg_type, bool noirq)
+{
+ uint8_t *tmpbuff;
+ int ret;
+ uint32_t i;
+ uint32_t msg_size;
+ int req_hdr_sz, msg_hdr_sz;
+
+ if (probe_status)
+ return probe_status;
+
+ if (!cdata->msg_hdr.data_len)
+ return 1;
+
+ req_hdr_sz = sizeof(cdata->req_hdr);
+ msg_hdr_sz = sizeof(cdata->msg_hdr);
+
+ cdata->req_hdr.service_type = msm_rpm_request_service[msg_type];
+
+ cdata->req_hdr.request_len = cdata->msg_hdr.data_len + msg_hdr_sz;
+ msg_size = cdata->req_hdr.request_len + req_hdr_sz;
+
+ /* populate data_len */
+ if (msg_size > cdata->numbytes) {
+ kfree(cdata->buf);
+ cdata->numbytes = msg_size;
+ cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq));
+ }
+
+ if (!cdata->buf) {
+ pr_err("%s(): Failed malloc\n", __func__);
+ return 0;
+ }
+
+ tmpbuff = cdata->buf;
+
+ tmpbuff += req_hdr_sz + msg_hdr_sz;
+
+ for (i = 0; (i < cdata->write_idx); i++) {
+ /* Sanity check */
+ BUG_ON((tmpbuff - cdata->buf) > cdata->numbytes);
+
+ if (!cdata->kvp[i].valid)
+ continue;
+
+ memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
+ tmpbuff += sizeof(uint32_t);
+
+ memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
+ tmpbuff += sizeof(uint32_t);
+
+ memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
+ tmpbuff += cdata->kvp[i].nbytes;
+
+ if (cdata->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET)
+ msm_rpm_notify_sleep_chain(&cdata->msg_hdr,
+ &cdata->kvp[i]);
+
+ }
+
+ memcpy(cdata->buf, &cdata->req_hdr, req_hdr_sz + msg_hdr_sz);
+
+ if ((cdata->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET) &&
+ !msm_rpm_smd_buffer_request(cdata->buf, msg_size,
+ GFP_FLAG(noirq)))
+ return 1;
+
+ cdata->msg_hdr.msg_id = msm_rpm_get_next_msg_id();
+
+ memcpy(cdata->buf + req_hdr_sz, &cdata->msg_hdr, msg_hdr_sz);
+
+ if (msm_rpm_debug_mask
+ & (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW))
+ msm_rpm_log_request(cdata);
+
+ if (standalone) {
+ for (i = 0; (i < cdata->write_idx); i++)
+ cdata->kvp[i].valid = false;
+
+ cdata->msg_hdr.data_len = 0;
+ ret = cdata->msg_hdr.msg_id;
+ return ret;
+ }
+
+ msm_rpm_add_wait_list(cdata->msg_hdr.msg_id);
+
+ ret = msm_rpm_send_smd_buffer(&cdata->buf[0], msg_size, noirq);
+
+ if (ret == msg_size) {
+ trace_rpm_send_message(noirq, cdata->msg_hdr.set,
+ cdata->msg_hdr.resource_type,
+ cdata->msg_hdr.resource_id,
+ cdata->msg_hdr.msg_id);
+ for (i = 0; (i < cdata->write_idx); i++)
+ cdata->kvp[i].valid = false;
+ cdata->msg_hdr.data_len = 0;
+ ret = cdata->msg_hdr.msg_id;
+ } else if (ret < msg_size) {
+ struct msm_rpm_wait_data *rc;
+ ret = 0;
+ pr_err("Failed to write data msg_size:%d ret:%d\n",
+ msg_size, ret);
+ rc = msm_rpm_get_entry_from_msg_id(cdata->msg_hdr.msg_id);
+ if (rc)
+ msm_rpm_free_list_entry(rc);
+ }
+ return ret;
+}
+
+int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+ int ret;
+ static DEFINE_MUTEX(send_mtx);
+
+ mutex_lock(&send_mtx);
+ ret = msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false);
+ mutex_unlock(&send_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_rpm_send_request);
+
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+ return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true);
+}
+EXPORT_SYMBOL(msm_rpm_send_request_noirq);
+
+int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+ struct msm_rpm_wait_data *elem;
+ int rc = 0;
+
+ if (!msg_id) {
+ pr_err("%s(): Invalid msg id\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (msg_id == 1)
+ return rc;
+
+ if (standalone)
+ return rc;
+
+ elem = msm_rpm_get_entry_from_msg_id(msg_id);
+ if (!elem)
+ return rc;
+
+ wait_for_completion(&elem->ack);
+ trace_rpm_ack_recd(0, msg_id);
+
+ rc = elem->errno;
+ msm_rpm_free_list_entry(elem);
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack);
+
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+ struct msm_rpm_wait_data *elem;
+ unsigned long flags;
+ int rc = 0;
+ uint32_t id = 0;
+
+ if (!msg_id) {
+ pr_err("%s(): Invalid msg id\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (msg_id == 1)
+ return 0;
+
+ if (standalone)
+ return 0;
+
+ spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
+
+ elem = msm_rpm_get_entry_from_msg_id(msg_id);
+
+ if (!elem)
+ /* Should this be a bug
+ * Is it ok for another thread to read the msg?
+ */
+ goto wait_ack_cleanup;
+
+ if (elem->errno != INIT_ERROR) {
+ rc = elem->errno;
+ msm_rpm_free_list_entry(elem);
+ goto wait_ack_cleanup;
+ }
+
+ while (id != msg_id) {
+ if (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
+ int errno;
+ char buf[MAX_ERR_BUFFER_SIZE] = {};
+
+ msm_rpm_read_smd_data(buf);
+ id = msm_rpm_get_msg_id_from_ack(buf);
+ errno = msm_rpm_get_error_from_ack(buf);
+ msm_rpm_process_ack(id, errno);
+ }
+ }
+
+ rc = elem->errno;
+ trace_rpm_ack_recd(1, msg_id);
+
+ msm_rpm_free_list_entry(elem);
+wait_ack_cleanup:
+ spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
+
+ if (smd_is_pkt_avail(msm_rpm_data.ch_info))
+ complete(&data_ready);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
+
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+ int i, rc;
+ struct msm_rpm_request *req =
+ msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (!req)
+ return -ENOMEM;
+
+ for (i = 0; i < nelems; i++) {
+ rc = msm_rpm_add_kvp_data(req, kvp[i].key,
+ kvp[i].data, kvp[i].length);
+ if (rc)
+ goto bail;
+ }
+
+ rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
+bail:
+ msm_rpm_free_request(req);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message);
+
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+ int i, rc;
+ struct msm_rpm_request *req =
+ msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems);
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (!req)
+ return -ENOMEM;
+
+ for (i = 0; i < nelems; i++) {
+ rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key,
+ kvp[i].data, kvp[i].length);
+ if (rc)
+ goto bail;
+ }
+
+ rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req));
+bail:
+ msm_rpm_free_request(req);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpm_send_message_noirq);
+
+/**
+ * During power collapse, the rpm driver disables the SMD interrupts to make
+ * sure that the interrupt doesn't wakes us from sleep.
+ */
+int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask)
+{
+ int ret = 0;
+
+ if (standalone)
+ return 0;
+
+ ret = smd_mask_receive_interrupt(msm_rpm_data.ch_info, true, cpumask);
+ if (!ret) {
+ ret = msm_rpm_flush_requests(print);
+ if (ret)
+ smd_mask_receive_interrupt(msm_rpm_data.ch_info,
+ false, NULL);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(msm_rpm_enter_sleep);
+
+/**
+ * When the system resumes from power collapse, the SMD interrupt disabled by
+ * enter function has to reenabled to continue processing SMD message.
+ */
+void msm_rpm_exit_sleep(void)
+{
+ if (standalone)
+ return;
+
+ smd_mask_receive_interrupt(msm_rpm_data.ch_info, false, NULL);
+}
+EXPORT_SYMBOL(msm_rpm_exit_sleep);
+
+static int msm_rpm_dev_probe(struct platform_device *pdev)
+{
+ char *key = NULL;
+ int ret = 0;
+
+ key = "rpm-channel-name";
+ ret = of_property_read_string(pdev->dev.of_node, key,
+ &msm_rpm_data.ch_name);
+ if (ret) {
+ pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
+ pdev->dev.of_node->full_name, key);
+ goto fail;
+ }
+
+ key = "rpm-channel-type";
+ ret = of_property_read_u32(pdev->dev.of_node, key,
+ &msm_rpm_data.ch_type);
+ if (ret) {
+ pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
+ pdev->dev.of_node->full_name, key);
+ goto fail;
+ }
+
+ key = "rpm-standalone";
+ standalone = of_property_read_bool(pdev->dev.of_node, key);
+ if (standalone)
+ goto skip_smd_init;
+
+ ret = smd_named_open_on_edge(msm_rpm_data.ch_name,
+ msm_rpm_data.ch_type,
+ &msm_rpm_data.ch_info,
+ &msm_rpm_data,
+ msm_rpm_notify);
+ if (ret) {
+ if (ret != -EPROBE_DEFER) {
+ pr_err("%s: Cannot open RPM channel %s %d\n",
+ __func__, msm_rpm_data.ch_name,
+ msm_rpm_data.ch_type);
+ }
+ goto fail;
+ }
+
+ spin_lock_init(&msm_rpm_data.smd_lock_write);
+ spin_lock_init(&msm_rpm_data.smd_lock_read);
+ INIT_WORK(&msm_rpm_data.work, msm_rpm_smd_work);
+
+ wait_for_completion(&msm_rpm_data.smd_open);
+
+ smd_disable_read_intr(msm_rpm_data.ch_info);
+
+ msm_rpm_smd_wq = alloc_workqueue("rpm-smd",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
+ if (!msm_rpm_smd_wq) {
+ pr_err("%s: Unable to alloc rpm-smd workqueue\n", __func__);
+ ret = -EINVAL;
+ goto fail;
+ }
+ queue_work(msm_rpm_smd_wq, &msm_rpm_data.work);
+
+ probe_status = ret;
+skip_smd_init:
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+ if (standalone)
+ pr_info("%s: RPM running in standalone mode\n", __func__);
+fail:
+ return probe_status;
+}
+
+static struct of_device_id msm_rpm_match_table[] = {
+ {.compatible = "qcom,rpm-smd"},
+ {},
+};
+
+static struct platform_driver msm_rpm_device_driver = {
+ .probe = msm_rpm_dev_probe,
+ .driver = {
+ .name = "rpm-smd",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_rpm_match_table,
+ },
+};
+
+int __init msm_rpm_driver_init(void)
+{
+ static bool registered;
+
+ if (registered)
+ return 0;
+ registered = true;
+
+ return platform_driver_register(&msm_rpm_device_driver);
+}
+EXPORT_SYMBOL(msm_rpm_driver_init);
+arch_initcall(msm_rpm_driver_init);
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
new file mode 100644
index 000000000000..a7d714914771
--- /dev/null
+++ b/drivers/soc/qcom/smd.c
@@ -0,0 +1,3359 @@
+/* drivers/soc/qcom/smd.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/termios.h>
+#include <linux/ctype.h>
+#include <linux/remote_spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/kfifo.h>
+#include <linux/pm.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/ipc_logging.h>
+
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/smd.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include "smd_private.h"
+#include "smem_private.h"
+
+#define SMSM_SNAPSHOT_CNT 64
+#define SMSM_SNAPSHOT_SIZE ((SMSM_NUM_ENTRIES + 1) * 4 + sizeof(uint64_t))
+#define RSPIN_INIT_WAIT_MS 1000
+#define SMD_FIFO_FULL_RESERVE 4
+#define SMD_FIFO_ADDR_ALIGN_BYTES 3
+
+uint32_t SMSM_NUM_ENTRIES = 8;
+uint32_t SMSM_NUM_HOSTS = 3;
+
+/* Legacy SMSM interrupt notifications */
+#define LEGACY_MODEM_SMSM_MASK (SMSM_RESET | SMSM_INIT | SMSM_SMDINIT)
+
+struct smsm_shared_info {
+ uint32_t *state;
+ uint32_t *intr_mask;
+ uint32_t *intr_mux;
+};
+
+static struct smsm_shared_info smsm_info;
+static struct kfifo smsm_snapshot_fifo;
+static struct wakeup_source smsm_snapshot_ws;
+static int smsm_snapshot_count;
+static DEFINE_SPINLOCK(smsm_snapshot_count_lock);
+
+struct smsm_size_info_type {
+ uint32_t num_hosts;
+ uint32_t num_entries;
+ uint32_t reserved0;
+ uint32_t reserved1;
+};
+
+struct smsm_state_cb_info {
+ struct list_head cb_list;
+ uint32_t mask;
+ void *data;
+ void (*notify)(void *data, uint32_t old_state, uint32_t new_state);
+};
+
+struct smsm_state_info {
+ struct list_head callbacks;
+ uint32_t last_value;
+ uint32_t intr_mask_set;
+ uint32_t intr_mask_clear;
+};
+
+static irqreturn_t smsm_irq_handler(int irq, void *data);
+
+/*
+ * Interrupt configuration consists of static configuration for the supported
+ * processors that is done here along with interrupt configuration that is
+ * added by the separate initialization modules (device tree, platform data, or
+ * hard coded).
+ */
+static struct interrupt_config private_intr_config[NUM_SMD_SUBSYSTEMS] = {
+ [SMD_MODEM] = {
+ .smd.irq_handler = smd_modem_irq_handler,
+ .smsm.irq_handler = smsm_modem_irq_handler,
+ },
+ [SMD_Q6] = {
+ .smd.irq_handler = smd_dsp_irq_handler,
+ .smsm.irq_handler = smsm_dsp_irq_handler,
+ },
+ [SMD_DSPS] = {
+ .smd.irq_handler = smd_dsps_irq_handler,
+ .smsm.irq_handler = smsm_dsps_irq_handler,
+ },
+ [SMD_WCNSS] = {
+ .smd.irq_handler = smd_wcnss_irq_handler,
+ .smsm.irq_handler = smsm_wcnss_irq_handler,
+ },
+ [SMD_MODEM_Q6_FW] = {
+ .smd.irq_handler = smd_modemfw_irq_handler,
+ .smsm.irq_handler = NULL, /* does not support smsm */
+ },
+ [SMD_RPM] = {
+ .smd.irq_handler = smd_rpm_irq_handler,
+ .smsm.irq_handler = NULL, /* does not support smsm */
+ },
+};
+
+union fifo_mem {
+ uint64_t u64;
+ uint8_t u8;
+};
+
+struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
+
+#define SMSM_STATE_ADDR(entry) (smsm_info.state + entry)
+#define SMSM_INTR_MASK_ADDR(entry, host) (smsm_info.intr_mask + \
+ entry * SMSM_NUM_HOSTS + host)
+#define SMSM_INTR_MUX_ADDR(entry) (smsm_info.intr_mux + entry)
+
+int msm_smd_debug_mask = MSM_SMD_POWER_INFO | MSM_SMD_INFO |
+ MSM_SMSM_POWER_INFO;
+module_param_named(debug_mask, msm_smd_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+void *smd_log_ctx;
+void *smsm_log_ctx;
+#define NUM_LOG_PAGES 4
+
+#define IPC_LOG_SMD(level, x...) do { \
+ if (smd_log_ctx) \
+ ipc_log_string(smd_log_ctx, x); \
+ else \
+ printk(level x); \
+ } while (0)
+
+#define IPC_LOG_SMSM(level, x...) do { \
+ if (smsm_log_ctx) \
+ ipc_log_string(smsm_log_ctx, x); \
+ else \
+ printk(level x); \
+ } while (0)
+
+#if defined(CONFIG_MSM_SMD_DEBUG)
+#define SMD_DBG(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
+ IPC_LOG_SMD(KERN_DEBUG, x); \
+ } while (0)
+
+#define SMSM_DBG(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
+ IPC_LOG_SMSM(KERN_DEBUG, x); \
+ } while (0)
+
+#define SMD_INFO(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMD_INFO) \
+ IPC_LOG_SMD(KERN_INFO, x); \
+ } while (0)
+
+#define SMSM_INFO(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMSM_INFO) \
+ IPC_LOG_SMSM(KERN_INFO, x); \
+ } while (0)
+
+#define SMD_POWER_INFO(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMD_POWER_INFO) \
+ IPC_LOG_SMD(KERN_INFO, x); \
+ } while (0)
+
+#define SMSM_POWER_INFO(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMSM_POWER_INFO) \
+ IPC_LOG_SMSM(KERN_INFO, x); \
+ } while (0)
+#else
+#define SMD_DBG(x...) do { } while (0)
+#define SMSM_DBG(x...) do { } while (0)
+#define SMD_INFO(x...) do { } while (0)
+#define SMSM_INFO(x...) do { } while (0)
+#define SMD_POWER_INFO(x...) do { } while (0)
+#define SMSM_POWER_INFO(x...) do { } while (0)
+#endif
+
+static void smd_fake_irq_handler(unsigned long arg);
+static void smsm_cb_snapshot(uint32_t use_wakeup_source);
+
+static struct workqueue_struct *smsm_cb_wq;
+static void notify_smsm_cb_clients_worker(struct work_struct *work);
+static DECLARE_WORK(smsm_cb_work, notify_smsm_cb_clients_worker);
+static DEFINE_MUTEX(smsm_lock);
+static struct smsm_state_info *smsm_states;
+
+static int smd_stream_write_avail(struct smd_channel *ch);
+static int smd_stream_read_avail(struct smd_channel *ch);
+
+static bool pid_is_on_edge(uint32_t edge_num, unsigned pid);
+
+static inline void smd_write_intr(unsigned int val, void __iomem *addr)
+{
+ wmb();
+ __raw_writel(val, addr);
+}
+
+/**
+ * smd_memcpy_to_fifo() - copy to SMD channel FIFO
+ * @dest: Destination address
+ * @src: Source address
+ * @num_bytes: Number of bytes to copy
+ * @from_user: true if data being copied is from userspace, false otherwise
+ *
+ * @return: Address of destination
+ *
+ * This function copies num_bytes from src to dest. This is used as the memcpy
+ * function to copy data to SMD FIFO in case the SMD FIFO is naturally aligned.
+ */
+static void *smd_memcpy_to_fifo(void *dest, const void *src, size_t num_bytes,
+ bool from_user)
+{
+ union fifo_mem *temp_dst = (union fifo_mem *)dest;
+ union fifo_mem *temp_src = (union fifo_mem *)src;
+ uintptr_t mask = sizeof(union fifo_mem) - 1;
+ int ret;
+
+ /* Do byte copies until we hit 8-byte (double word) alignment */
+ while ((uintptr_t)temp_dst & mask && num_bytes) {
+ if (from_user) {
+ ret = copy_from_user(temp_dst, temp_src, 1);
+ BUG_ON(ret != 0);
+ } else {
+ __raw_writeb(temp_src->u8, temp_dst);
+ }
+
+ temp_src = (union fifo_mem *)((uintptr_t)temp_src + 1);
+ temp_dst = (union fifo_mem *)((uintptr_t)temp_dst + 1);
+ num_bytes--;
+ }
+
+ /* Do double word copies */
+ while (num_bytes >= sizeof(union fifo_mem)) {
+ if (from_user) {
+ ret = copy_from_user(temp_dst, temp_src,
+ sizeof(union fifo_mem));
+ BUG_ON(ret != 0);
+ } else {
+ __raw_writeq(temp_src->u64, temp_dst);
+ }
+
+ temp_dst++;
+ temp_src++;
+ num_bytes -= sizeof(union fifo_mem);
+ }
+
+ /* Copy remaining bytes */
+ while (num_bytes--) {
+ if (from_user) {
+ ret = copy_from_user(temp_dst, temp_src, 1);
+ BUG_ON(ret != 0);
+ } else {
+ __raw_writeb(temp_src->u8, temp_dst);
+ }
+
+ temp_src = (union fifo_mem *)((uintptr_t)temp_src + 1);
+ temp_dst = (union fifo_mem *)((uintptr_t)temp_dst + 1);
+ }
+
+ return dest;
+}
+
+/**
+ * smd_memcpy_from_fifo() - copy from SMD channel FIFO
+ * @dest: Destination address
+ * @src: Source address
+ * @num_bytes: Number of bytes to copy
+ * @to_user: true if data being copied is from userspace, false otherwise
+ *
+ * @return: Address of destination
+ *
+ * This function copies num_bytes from src to dest. This is used as the memcpy
+ * function to copy data from SMD FIFO in case the SMD FIFO is naturally
+ * aligned.
+ */
+static void *smd_memcpy_from_fifo(void *dest, const void *src, size_t num_bytes,
+ bool to_user)
+{
+ union fifo_mem *temp_dst = (union fifo_mem *)dest;
+ union fifo_mem *temp_src = (union fifo_mem *)src;
+ uintptr_t mask = sizeof(union fifo_mem) - 1;
+ int ret;
+
+ /* Do byte copies until we hit 8-byte (double word) alignment */
+ while ((uintptr_t)temp_src & mask && num_bytes) {
+ if (to_user) {
+ ret = copy_to_user(temp_dst, temp_src, 1);
+ BUG_ON(ret != 0);
+ } else {
+ temp_dst->u8 = __raw_readb(temp_src);
+ }
+
+ temp_src = (union fifo_mem *)((uintptr_t)temp_src + 1);
+ temp_dst = (union fifo_mem *)((uintptr_t)temp_dst + 1);
+ num_bytes--;
+ }
+
+ /* Do double word copies */
+ while (num_bytes >= sizeof(union fifo_mem)) {
+ if (to_user) {
+ ret = copy_to_user(temp_dst, temp_src,
+ sizeof(union fifo_mem));
+ BUG_ON(ret != 0);
+ } else {
+ temp_dst->u64 = __raw_readq(temp_src);
+ }
+
+ temp_dst++;
+ temp_src++;
+ num_bytes -= sizeof(union fifo_mem);
+ }
+
+ /* Copy remaining bytes */
+ while (num_bytes--) {
+ if (to_user) {
+ ret = copy_to_user(temp_dst, temp_src, 1);
+ BUG_ON(ret != 0);
+ } else {
+ temp_dst->u8 = __raw_readb(temp_src);
+ }
+
+ temp_src = (union fifo_mem *)((uintptr_t)temp_src + 1);
+ temp_dst = (union fifo_mem *)((uintptr_t)temp_dst + 1);
+ }
+
+ return dest;
+}
+
+/**
+ * smd_memcpy32_to_fifo() - Copy to SMD channel FIFO
+ *
+ * @dest: Destination address
+ * @src: Source address
+ * @num_bytes: Number of bytes to copy
+ * @from_user: always false
+ *
+ * @return: On Success, address of destination
+ *
+ * This function copies num_bytes data from src to dest. This is used as the
+ * memcpy function to copy data to SMD FIFO in case the SMD FIFO is 4 byte
+ * aligned.
+ */
+static void *smd_memcpy32_to_fifo(void *dest, const void *src, size_t num_bytes,
+ bool from_user)
+{
+ uint32_t *dest_local = (uint32_t *)dest;
+ uint32_t *src_local = (uint32_t *)src;
+
+ if (from_user) {
+ panic("%s: Word Based Access not supported",
+ __func__);
+ }
+
+ BUG_ON(num_bytes & SMD_FIFO_ADDR_ALIGN_BYTES);
+ BUG_ON(!dest_local ||
+ ((uintptr_t)dest_local & SMD_FIFO_ADDR_ALIGN_BYTES));
+ BUG_ON(!src_local ||
+ ((uintptr_t)src_local & SMD_FIFO_ADDR_ALIGN_BYTES));
+ num_bytes /= sizeof(uint32_t);
+
+ while (num_bytes--)
+ __raw_writel(*src_local++, dest_local++);
+
+ return dest;
+}
+
+/**
+ * smd_memcpy32_from_fifo() - Copy from SMD channel FIFO
+ * @dest: Destination address
+ * @src: Source address
+ * @num_bytes: Number of bytes to copy
+ * @to_user: true if data being copied is from userspace, false otherwise
+ *
+ * @return: On Success, destination address
+ *
+ * This function copies num_bytes data from SMD FIFO to dest. This is used as
+ * the memcpy function to copy data from SMD FIFO in case the SMD FIFO is 4 byte
+ * aligned.
+ */
+static void *smd_memcpy32_from_fifo(void *dest, const void *src,
+ size_t num_bytes, bool to_user)
+{
+
+ uint32_t *dest_local = (uint32_t *)dest;
+ uint32_t *src_local = (uint32_t *)src;
+
+ if (to_user) {
+ panic("%s: Word Based Access not supported",
+ __func__);
+ }
+
+ BUG_ON(num_bytes & SMD_FIFO_ADDR_ALIGN_BYTES);
+ BUG_ON(!dest_local ||
+ ((uintptr_t)dest_local & SMD_FIFO_ADDR_ALIGN_BYTES));
+ BUG_ON(!src_local ||
+ ((uintptr_t)src_local & SMD_FIFO_ADDR_ALIGN_BYTES));
+ num_bytes /= sizeof(uint32_t);
+
+ while (num_bytes--)
+ *dest_local++ = __raw_readl(src_local++);
+
+ return dest;
+}
+
+static inline void log_notify(uint32_t subsystem, smd_channel_t *ch)
+{
+ const char *subsys = smd_edge_to_subsystem(subsystem);
+
+ (void) subsys;
+
+ if (!ch)
+ SMD_POWER_INFO("Apps->%s\n", subsys);
+ else
+ SMD_POWER_INFO(
+ "Apps->%s ch%d '%s': tx%d/rx%d %dr/%dw : %dr/%dw\n",
+ subsys, ch->n, ch->name,
+ ch->fifo_size -
+ (smd_stream_write_avail(ch) + 1),
+ smd_stream_read_avail(ch),
+ ch->half_ch->get_tail(ch->send),
+ ch->half_ch->get_head(ch->send),
+ ch->half_ch->get_tail(ch->recv),
+ ch->half_ch->get_head(ch->recv)
+ );
+}
+
+static inline void notify_modem_smd(smd_channel_t *ch)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_MODEM].smd;
+
+ log_notify(SMD_APPS_MODEM, ch);
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_MODEM].smd_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_dsp_smd(smd_channel_t *ch)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_Q6].smd;
+
+ log_notify(SMD_APPS_QDSP, ch);
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_Q6].smd_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_dsps_smd(smd_channel_t *ch)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_DSPS].smd;
+
+ log_notify(SMD_APPS_DSPS, ch);
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_DSPS].smd_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_wcnss_smd(struct smd_channel *ch)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_WCNSS].smd;
+
+ log_notify(SMD_APPS_WCNSS, ch);
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_WCNSS].smd_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_modemfw_smd(smd_channel_t *ch)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_MODEM_Q6_FW].smd;
+
+ log_notify(SMD_APPS_Q6FW, ch);
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_MODEM_Q6_FW].smd_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_rpm_smd(smd_channel_t *ch)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_RPM].smd;
+
+ if (intr->out_base) {
+ log_notify(SMD_APPS_RPM, ch);
+ ++interrupt_stats[SMD_RPM].smd_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_modem_smsm(void)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_MODEM].smsm;
+
+ SMSM_POWER_INFO("SMSM Apps->%s", "MODEM");
+
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_MODEM].smsm_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_dsp_smsm(void)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_Q6].smsm;
+
+ SMSM_POWER_INFO("SMSM Apps->%s", "ADSP");
+
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_Q6].smsm_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_dsps_smsm(void)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_DSPS].smsm;
+
+ SMSM_POWER_INFO("SMSM Apps->%s", "DSPS");
+
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_DSPS].smsm_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static inline void notify_wcnss_smsm(void)
+{
+ static const struct interrupt_config_item *intr
+ = &private_intr_config[SMD_WCNSS].smsm;
+
+ SMSM_POWER_INFO("SMSM Apps->%s", "WCNSS");
+
+ if (intr->out_base) {
+ ++interrupt_stats[SMD_WCNSS].smsm_out_count;
+ smd_write_intr(intr->out_bit_pos,
+ intr->out_base + intr->out_offset);
+ }
+}
+
+static void notify_other_smsm(uint32_t smsm_entry, uint32_t notify_mask)
+{
+ if (smsm_info.intr_mask &&
+ (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_MODEM))
+ & notify_mask))
+ notify_modem_smsm();
+
+ if (smsm_info.intr_mask &&
+ (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_Q6))
+ & notify_mask))
+ notify_dsp_smsm();
+
+ if (smsm_info.intr_mask &&
+ (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_WCNSS))
+ & notify_mask)) {
+ notify_wcnss_smsm();
+ }
+
+ if (smsm_info.intr_mask &&
+ (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_DSPS))
+ & notify_mask)) {
+ notify_dsps_smsm();
+ }
+
+ if (smsm_info.intr_mask &&
+ (__raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS))
+ & notify_mask)) {
+ smsm_cb_snapshot(1);
+ }
+}
+
+static int smsm_pm_notifier(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ smsm_change_state(SMSM_APPS_STATE, SMSM_PROC_AWAKE, 0);
+ break;
+
+ case PM_POST_SUSPEND:
+ smsm_change_state(SMSM_APPS_STATE, 0, SMSM_PROC_AWAKE);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block smsm_pm_nb = {
+ .notifier_call = smsm_pm_notifier,
+ .priority = 0,
+};
+
+/* the spinlock is used to synchronize between the
+ * irq handler and code that mutates the channel
+ * list or fiddles with channel state
+ */
+static DEFINE_SPINLOCK(smd_lock);
+DEFINE_SPINLOCK(smem_lock);
+
+/* the mutex is used during open() and close()
+ * operations to avoid races while creating or
+ * destroying smd_channel structures
+ */
+static DEFINE_MUTEX(smd_creation_mutex);
+
+struct smd_shared {
+ struct smd_half_channel ch0;
+ struct smd_half_channel ch1;
+};
+
+struct smd_shared_word_access {
+ struct smd_half_channel_word_access ch0;
+ struct smd_half_channel_word_access ch1;
+};
+
+/**
+ * Maps edge type to local and remote processor ID's.
+ */
+static struct edge_to_pid edge_to_pids[] = {
+ [SMD_APPS_MODEM] = {SMD_APPS, SMD_MODEM, "modem"},
+ [SMD_APPS_QDSP] = {SMD_APPS, SMD_Q6, "adsp"},
+ [SMD_MODEM_QDSP] = {SMD_MODEM, SMD_Q6},
+ [SMD_APPS_DSPS] = {SMD_APPS, SMD_DSPS, "dsps"},
+ [SMD_MODEM_DSPS] = {SMD_MODEM, SMD_DSPS},
+ [SMD_QDSP_DSPS] = {SMD_Q6, SMD_DSPS},
+ [SMD_APPS_WCNSS] = {SMD_APPS, SMD_WCNSS, "wcnss"},
+ [SMD_MODEM_WCNSS] = {SMD_MODEM, SMD_WCNSS},
+ [SMD_QDSP_WCNSS] = {SMD_Q6, SMD_WCNSS},
+ [SMD_DSPS_WCNSS] = {SMD_DSPS, SMD_WCNSS},
+ [SMD_APPS_Q6FW] = {SMD_APPS, SMD_MODEM_Q6_FW},
+ [SMD_MODEM_Q6FW] = {SMD_MODEM, SMD_MODEM_Q6_FW},
+ [SMD_QDSP_Q6FW] = {SMD_Q6, SMD_MODEM_Q6_FW},
+ [SMD_DSPS_Q6FW] = {SMD_DSPS, SMD_MODEM_Q6_FW},
+ [SMD_WCNSS_Q6FW] = {SMD_WCNSS, SMD_MODEM_Q6_FW},
+ [SMD_APPS_RPM] = {SMD_APPS, SMD_RPM},
+ [SMD_MODEM_RPM] = {SMD_MODEM, SMD_RPM},
+ [SMD_QDSP_RPM] = {SMD_Q6, SMD_RPM},
+ [SMD_WCNSS_RPM] = {SMD_WCNSS, SMD_RPM},
+ [SMD_TZ_RPM] = {SMD_TZ, SMD_RPM},
+};
+
+struct restart_notifier_block {
+ unsigned processor;
+ char *name;
+ struct notifier_block nb;
+};
+
+static struct platform_device loopback_tty_pdev = {.name = "LOOPBACK_TTY"};
+
+static LIST_HEAD(smd_ch_closed_list);
+static LIST_HEAD(smd_ch_closing_list);
+static LIST_HEAD(smd_ch_to_close_list);
+
+struct remote_proc_info {
+ unsigned remote_pid;
+ unsigned free_space;
+ struct work_struct probe_work;
+ struct list_head ch_list;
+ /* 2 total supported tables of channels */
+ unsigned char ch_allocated[SMEM_NUM_SMD_STREAM_CHANNELS * 2];
+ bool skip_pil;
+};
+
+static struct remote_proc_info remote_info[NUM_SMD_SUBSYSTEMS];
+
+static void finalize_channel_close_fn(struct work_struct *work);
+static DECLARE_WORK(finalize_channel_close_work, finalize_channel_close_fn);
+static struct workqueue_struct *channel_close_wq;
+
+#define PRI_ALLOC_TBL 1
+#define SEC_ALLOC_TBL 2
+static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm, int table_id,
+ struct remote_proc_info *r_info);
+
+static bool smd_edge_inited(int edge)
+{
+ return edge_to_pids[edge].initialized;
+}
+
+/* on smp systems, the probe might get called from multiple cores,
+ hence use a lock */
+static DEFINE_MUTEX(smd_probe_lock);
+
+/**
+ * scan_alloc_table - Scans a specified SMD channel allocation table in SMEM for
+ * newly created channels that need to be made locally
+ * visable
+ *
+ * @shared: pointer to the table array in SMEM
+ * @smd_ch_allocated: pointer to an array indicating already allocated channels
+ * @table_id: identifier for this channel allocation table
+ * @num_entries: number of entries in this allocation table
+ * @r_info: pointer to the info structure of the remote proc we care about
+ *
+ * The smd_probe_lock must be locked by the calling function. Shared and
+ * smd_ch_allocated are assumed to be valid pointers.
+ */
+static void scan_alloc_table(struct smd_alloc_elm *shared,
+ char *smd_ch_allocated,
+ int table_id,
+ unsigned num_entries,
+ struct remote_proc_info *r_info)
+{
+ unsigned n;
+ uint32_t type;
+
+ for (n = 0; n < num_entries; n++) {
+ if (smd_ch_allocated[n])
+ continue;
+
+ /*
+ * channel should be allocated only if APPS processor is
+ * involved
+ */
+ type = SMD_CHANNEL_TYPE(shared[n].type);
+ if (!pid_is_on_edge(type, SMD_APPS) ||
+ !pid_is_on_edge(type, r_info->remote_pid))
+ continue;
+ if (!shared[n].ref_count)
+ continue;
+ if (!shared[n].name[0])
+ continue;
+
+ if (!smd_edge_inited(type)) {
+ SMD_INFO(
+ "Probe skipping proc %d, tbl %d, ch %d, edge not inited\n",
+ r_info->remote_pid, table_id, n);
+ continue;
+ }
+
+ if (!smd_alloc_channel(&shared[n], table_id, r_info))
+ smd_ch_allocated[n] = 1;
+ else
+ SMD_INFO(
+ "Probe skipping proc %d, tbl %d, ch %d, not allocated\n",
+ r_info->remote_pid, table_id, n);
+ }
+}
+
+static void smd_channel_probe_now(struct remote_proc_info *r_info)
+{
+ struct smd_alloc_elm *shared;
+ unsigned tbl_size;
+
+ shared = smem_get_entry(ID_CH_ALLOC_TBL, &tbl_size,
+ r_info->remote_pid, 0);
+
+ if (!shared) {
+ pr_err("%s: allocation table not initialized\n", __func__);
+ return;
+ }
+
+ mutex_lock(&smd_probe_lock);
+
+ scan_alloc_table(shared, r_info->ch_allocated, PRI_ALLOC_TBL,
+ tbl_size / sizeof(*shared),
+ r_info);
+
+ shared = smem_get_entry(SMEM_CHANNEL_ALLOC_TBL_2, &tbl_size,
+ r_info->remote_pid, 0);
+ if (shared)
+ scan_alloc_table(shared,
+ &(r_info->ch_allocated[SMEM_NUM_SMD_STREAM_CHANNELS]),
+ SEC_ALLOC_TBL,
+ tbl_size / sizeof(*shared),
+ r_info);
+
+ mutex_unlock(&smd_probe_lock);
+}
+
+/**
+ * smd_channel_probe_worker() - Scan for newly created SMD channels and init
+ * local structures so the channels are visable to
+ * local clients
+ *
+ * @work: work_struct corresponding to an instance of this function running on
+ * a workqueue.
+ */
+static void smd_channel_probe_worker(struct work_struct *work)
+{
+ struct remote_proc_info *r_info;
+
+ r_info = container_of(work, struct remote_proc_info, probe_work);
+
+ smd_channel_probe_now(r_info);
+}
+
+/**
+ * get_remote_ch() - gathers remote channel info
+ *
+ * @shared2: Pointer to v2 shared channel structure
+ * @type: Edge type
+ * @pid: Processor ID of processor on edge
+ * @remote_ch: Channel that belongs to processor @pid
+ * @is_word_access_ch: Bool, is this a word aligned access channel
+ *
+ * @returns: 0 on success, error code on failure
+ */
+static int get_remote_ch(void *shared2,
+ uint32_t type, uint32_t pid,
+ void **remote_ch,
+ int is_word_access_ch
+ )
+{
+ if (!remote_ch || !shared2 || !pid_is_on_edge(type, pid) ||
+ !pid_is_on_edge(type, SMD_APPS))
+ return -EINVAL;
+
+ if (is_word_access_ch)
+ *remote_ch =
+ &((struct smd_shared_word_access *)(shared2))->ch1;
+ else
+ *remote_ch = &((struct smd_shared *)(shared2))->ch1;
+
+ return 0;
+}
+
+/**
+ * smd_remote_ss_to_edge() - return edge type from remote ss type
+ * @name: remote subsystem name
+ *
+ * Returns the edge type connected between the local subsystem(APPS)
+ * and remote subsystem @name.
+ */
+int smd_remote_ss_to_edge(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
+ if (edge_to_pids[i].subsys_name[0] != 0x0) {
+ if (!strncmp(edge_to_pids[i].subsys_name, name,
+ strlen(name)))
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(smd_remote_ss_to_edge);
+
+/**
+ * smd_edge_to_pil_str - Returns the PIL string used to load the remote side of
+ * the indicated edge.
+ *
+ * @type - Edge definition
+ * @returns - The PIL string to load the remove side of @type or NULL if the
+ * PIL string does not exist.
+ */
+const char *smd_edge_to_pil_str(uint32_t type)
+{
+ const char *pil_str = NULL;
+
+ if (type < ARRAY_SIZE(edge_to_pids)) {
+ if (!edge_to_pids[type].initialized)
+ return ERR_PTR(-EPROBE_DEFER);
+ if (!remote_info[smd_edge_to_remote_pid(type)].skip_pil) {
+ pil_str = edge_to_pids[type].subsys_name;
+ if (pil_str[0] == 0x0)
+ pil_str = NULL;
+ }
+ }
+ return pil_str;
+}
+EXPORT_SYMBOL(smd_edge_to_pil_str);
+
+/*
+ * Returns a pointer to the subsystem name or NULL if no
+ * subsystem name is available.
+ *
+ * @type - Edge definition
+ */
+const char *smd_edge_to_subsystem(uint32_t type)
+{
+ const char *subsys = NULL;
+
+ if (type < ARRAY_SIZE(edge_to_pids)) {
+ subsys = edge_to_pids[type].subsys_name;
+ if (subsys[0] == 0x0)
+ subsys = NULL;
+ if (!edge_to_pids[type].initialized)
+ subsys = ERR_PTR(-EPROBE_DEFER);
+ }
+ return subsys;
+}
+EXPORT_SYMBOL(smd_edge_to_subsystem);
+
+/*
+ * Returns a pointer to the subsystem name given the
+ * remote processor ID.
+ * subsystem is not necessarily PIL-loadable
+ *
+ * @pid Remote processor ID
+ * @returns Pointer to subsystem name or NULL if not found
+ */
+const char *smd_pid_to_subsystem(uint32_t pid)
+{
+ const char *subsys = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(edge_to_pids); ++i) {
+ if (pid == edge_to_pids[i].remote_pid) {
+ if (!edge_to_pids[i].initialized) {
+ subsys = ERR_PTR(-EPROBE_DEFER);
+ break;
+ }
+ if (edge_to_pids[i].subsys_name[0] != 0x0) {
+ subsys = edge_to_pids[i].subsys_name;
+ break;
+ } else if (pid == SMD_RPM) {
+ subsys = "rpm";
+ break;
+ }
+ }
+ }
+
+ return subsys;
+}
+EXPORT_SYMBOL(smd_pid_to_subsystem);
+
+static void smd_reset_edge(void *void_ch, unsigned new_state,
+ int is_word_access_ch)
+{
+ if (is_word_access_ch) {
+ struct smd_half_channel_word_access *ch =
+ (struct smd_half_channel_word_access *)(void_ch);
+ if (ch->state != SMD_SS_CLOSED) {
+ ch->state = new_state;
+ ch->fDSR = 0;
+ ch->fCTS = 0;
+ ch->fCD = 0;
+ ch->fSTATE = 1;
+ }
+ } else {
+ struct smd_half_channel *ch =
+ (struct smd_half_channel *)(void_ch);
+ if (ch->state != SMD_SS_CLOSED) {
+ ch->state = new_state;
+ ch->fDSR = 0;
+ ch->fCTS = 0;
+ ch->fCD = 0;
+ ch->fSTATE = 1;
+ }
+ }
+}
+
+/**
+ * smd_channel_reset_state() - find channels in an allocation table and set them
+ * to the specified state
+ *
+ * @shared: Pointer to the allocation table to scan
+ * @table_id: ID of the table
+ * @new_state: New state that channels should be set to
+ * @pid: Processor ID of the remote processor for the channels
+ * @num_entries: Number of entries in the table
+ *
+ * Scan the indicated table for channels between Apps and @pid. If a valid
+ * channel is found, set the remote side of the channel to @new_state.
+ */
+static void smd_channel_reset_state(struct smd_alloc_elm *shared, int table_id,
+ unsigned new_state, unsigned pid, unsigned num_entries)
+{
+ unsigned n;
+ void *shared2;
+ uint32_t type;
+ void *remote_ch;
+ int is_word_access;
+ unsigned base_id;
+
+ switch (table_id) {
+ case PRI_ALLOC_TBL:
+ base_id = SMEM_SMD_BASE_ID;
+ break;
+ case SEC_ALLOC_TBL:
+ base_id = SMEM_SMD_BASE_ID_2;
+ break;
+ default:
+ SMD_INFO("%s: invalid table_id:%d\n", __func__, table_id);
+ return;
+ }
+
+ for (n = 0; n < num_entries; n++) {
+ if (!shared[n].ref_count)
+ continue;
+ if (!shared[n].name[0])
+ continue;
+
+ type = SMD_CHANNEL_TYPE(shared[n].type);
+ is_word_access = is_word_access_ch(type);
+ if (is_word_access)
+ shared2 = smem_find(base_id + n,
+ sizeof(struct smd_shared_word_access), pid,
+ 0);
+ else
+ shared2 = smem_find(base_id + n,
+ sizeof(struct smd_shared), pid, 0);
+ if (!shared2)
+ continue;
+
+ if (!get_remote_ch(shared2, type, pid,
+ &remote_ch, is_word_access))
+ smd_reset_edge(remote_ch, new_state, is_word_access);
+ }
+}
+
+/**
+ * pid_is_on_edge() - checks to see if the processor with id pid is on the
+ * edge specified by edge_num
+ *
+ * @edge_num: the number of the edge which is being tested
+ * @pid: the id of the processor being tested
+ *
+ * @returns: true if on edge, false otherwise
+ */
+static bool pid_is_on_edge(uint32_t edge_num, unsigned pid)
+{
+ struct edge_to_pid edge;
+
+ if (edge_num >= ARRAY_SIZE(edge_to_pids))
+ return 0;
+
+ edge = edge_to_pids[edge_num];
+ return (edge.local_pid == pid || edge.remote_pid == pid);
+}
+
+void smd_channel_reset(uint32_t restart_pid)
+{
+ struct smd_alloc_elm *shared_pri;
+ struct smd_alloc_elm *shared_sec;
+ unsigned long flags;
+ unsigned pri_size;
+ unsigned sec_size;
+
+ SMD_POWER_INFO("%s: starting reset\n", __func__);
+
+ shared_pri = smem_get_entry(ID_CH_ALLOC_TBL, &pri_size, restart_pid, 0);
+ if (!shared_pri) {
+ pr_err("%s: allocation table not initialized\n", __func__);
+ return;
+ }
+ shared_sec = smem_get_entry(SMEM_CHANNEL_ALLOC_TBL_2, &sec_size,
+ restart_pid, 0);
+
+ /* reset SMSM entry */
+ if (smsm_info.state) {
+ writel_relaxed(0, SMSM_STATE_ADDR(restart_pid));
+
+ /* restart SMSM init handshake */
+ if (restart_pid == SMSM_MODEM) {
+ smsm_change_state(SMSM_APPS_STATE,
+ SMSM_INIT | SMSM_SMD_LOOPBACK | SMSM_RESET,
+ 0);
+ }
+
+ /* notify SMSM processors */
+ smsm_irq_handler(0, 0);
+ notify_modem_smsm();
+ notify_dsp_smsm();
+ notify_dsps_smsm();
+ notify_wcnss_smsm();
+ }
+
+ /* change all remote states to CLOSING */
+ mutex_lock(&smd_probe_lock);
+ spin_lock_irqsave(&smd_lock, flags);
+ smd_channel_reset_state(shared_pri, PRI_ALLOC_TBL, SMD_SS_CLOSING,
+ restart_pid, pri_size / sizeof(*shared_pri));
+ if (shared_sec)
+ smd_channel_reset_state(shared_sec, SEC_ALLOC_TBL,
+ SMD_SS_CLOSING, restart_pid,
+ sec_size / sizeof(*shared_sec));
+ spin_unlock_irqrestore(&smd_lock, flags);
+ mutex_unlock(&smd_probe_lock);
+
+ mb();
+ smd_fake_irq_handler(0);
+
+ /* change all remote states to CLOSED */
+ mutex_lock(&smd_probe_lock);
+ spin_lock_irqsave(&smd_lock, flags);
+ smd_channel_reset_state(shared_pri, PRI_ALLOC_TBL, SMD_SS_CLOSED,
+ restart_pid, pri_size / sizeof(*shared_pri));
+ if (shared_sec)
+ smd_channel_reset_state(shared_sec, SEC_ALLOC_TBL,
+ SMD_SS_CLOSED, restart_pid,
+ sec_size / sizeof(*shared_sec));
+ spin_unlock_irqrestore(&smd_lock, flags);
+ mutex_unlock(&smd_probe_lock);
+
+ mb();
+ smd_fake_irq_handler(0);
+
+ SMD_POWER_INFO("%s: finished reset\n", __func__);
+}
+
+/* how many bytes are available for reading */
+static int smd_stream_read_avail(struct smd_channel *ch)
+{
+ unsigned head = ch->half_ch->get_head(ch->recv);
+ unsigned tail = ch->half_ch->get_tail(ch->recv);
+ unsigned fifo_size = ch->fifo_size;
+ unsigned bytes_avail = head - tail;
+
+ if (head < tail)
+ bytes_avail += fifo_size;
+
+ BUG_ON(bytes_avail >= fifo_size);
+ return bytes_avail;
+}
+
+/* how many bytes we are free to write */
+static int smd_stream_write_avail(struct smd_channel *ch)
+{
+ unsigned head = ch->half_ch->get_head(ch->send);
+ unsigned tail = ch->half_ch->get_tail(ch->send);
+ unsigned fifo_size = ch->fifo_size;
+ unsigned bytes_avail = tail - head;
+
+ if (tail <= head)
+ bytes_avail += fifo_size;
+ if (bytes_avail < SMD_FIFO_FULL_RESERVE)
+ bytes_avail = 0;
+ else
+ bytes_avail -= SMD_FIFO_FULL_RESERVE;
+
+ BUG_ON(bytes_avail >= fifo_size);
+ return bytes_avail;
+}
+
+static int smd_packet_read_avail(struct smd_channel *ch)
+{
+ if (ch->current_packet) {
+ int n = smd_stream_read_avail(ch);
+ if (n > ch->current_packet)
+ n = ch->current_packet;
+ return n;
+ } else {
+ return 0;
+ }
+}
+
+static int smd_packet_write_avail(struct smd_channel *ch)
+{
+ int n = smd_stream_write_avail(ch);
+ return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
+}
+
+static int ch_is_open(struct smd_channel *ch)
+{
+ return (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED ||
+ ch->half_ch->get_state(ch->recv) == SMD_SS_FLUSHING)
+ && (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED);
+}
+
+/* provide a pointer and length to readable data in the fifo */
+static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
+{
+ unsigned head = ch->half_ch->get_head(ch->recv);
+ unsigned tail = ch->half_ch->get_tail(ch->recv);
+ unsigned fifo_size = ch->fifo_size;
+
+ BUG_ON(fifo_size >= SZ_1M);
+ BUG_ON(head >= fifo_size);
+ BUG_ON(tail >= fifo_size);
+ BUG_ON(OVERFLOW_ADD_UNSIGNED(uintptr_t, (uintptr_t)ch->recv_data,
+ tail));
+ *ptr = (void *) (ch->recv_data + tail);
+ if (tail <= head)
+ return head - tail;
+ else
+ return fifo_size - tail;
+}
+
+static int read_intr_blocked(struct smd_channel *ch)
+{
+ return ch->half_ch->get_fBLOCKREADINTR(ch->recv);
+}
+
+/* advance the fifo read pointer after data from ch_read_buffer is consumed */
+static void ch_read_done(struct smd_channel *ch, unsigned count)
+{
+ unsigned tail = ch->half_ch->get_tail(ch->recv);
+ unsigned fifo_size = ch->fifo_size;
+
+ BUG_ON(count > smd_stream_read_avail(ch));
+
+ tail += count;
+ if (tail >= fifo_size)
+ tail -= fifo_size;
+ ch->half_ch->set_tail(ch->recv, tail);
+ wmb();
+ ch->half_ch->set_fTAIL(ch->send, 1);
+}
+
+/* basic read interface to ch_read_{buffer,done} used
+ * by smd_*_read() and update_packet_state()
+ * will read-and-discard if the _data pointer is null
+ */
+static int ch_read(struct smd_channel *ch, void *_data, int len, int user_buf)
+{
+ void *ptr;
+ unsigned n;
+ unsigned char *data = _data;
+ int orig_len = len;
+
+ while (len > 0) {
+ n = ch_read_buffer(ch, &ptr);
+ if (n == 0)
+ break;
+
+ if (n > len)
+ n = len;
+ if (_data)
+ ch->read_from_fifo(data, ptr, n, user_buf);
+
+ data += n;
+ len -= n;
+ ch_read_done(ch, n);
+ }
+
+ return orig_len - len;
+}
+
+static void update_stream_state(struct smd_channel *ch)
+{
+ /* streams have no special state requiring updating */
+}
+
+static void update_packet_state(struct smd_channel *ch)
+{
+ unsigned hdr[5];
+ int r;
+ const char *peripheral = NULL;
+
+ /* can't do anything if we're in the middle of a packet */
+ while (ch->current_packet == 0) {
+ /* discard 0 length packets if any */
+
+ /* don't bother unless we can get the full header */
+ if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
+ return;
+
+ r = ch_read(ch, hdr, SMD_HEADER_SIZE, 0);
+ BUG_ON(r != SMD_HEADER_SIZE);
+
+ ch->current_packet = hdr[0];
+ if (ch->current_packet > (uint32_t)INT_MAX) {
+ pr_err("%s: Invalid packet size of %d bytes detected. Edge: %d, Channel : %s, RPTR: %d, WPTR: %d",
+ __func__, ch->current_packet, ch->type,
+ ch->name, ch->half_ch->get_tail(ch->recv),
+ ch->half_ch->get_head(ch->recv));
+ peripheral = smd_edge_to_pil_str(ch->type);
+ if (peripheral) {
+ if (subsystem_restart(peripheral) < 0)
+ BUG();
+ } else {
+ BUG();
+ }
+ }
+ }
+}
+
+/**
+ * ch_write_buffer() - Provide a pointer and length for the next segment of
+ * free space in the FIFO.
+ * @ch: channel
+ * @ptr: Address to pointer for the next segment write
+ * @returns: Maximum size that can be written until the FIFO is either full
+ * or the end of the FIFO has been reached.
+ *
+ * The returned pointer and length are passed to memcpy, so the next segment is
+ * defined as either the space available between the read index (tail) and the
+ * write index (head) or the space available to the end of the FIFO.
+ */
+static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
+{
+ unsigned head = ch->half_ch->get_head(ch->send);
+ unsigned tail = ch->half_ch->get_tail(ch->send);
+ unsigned fifo_size = ch->fifo_size;
+
+ BUG_ON(fifo_size >= SZ_1M);
+ BUG_ON(head >= fifo_size);
+ BUG_ON(tail >= fifo_size);
+ BUG_ON(OVERFLOW_ADD_UNSIGNED(uintptr_t, (uintptr_t)ch->send_data,
+ head));
+
+ *ptr = (void *) (ch->send_data + head);
+ if (head < tail) {
+ return tail - head - SMD_FIFO_FULL_RESERVE;
+ } else {
+ if (tail < SMD_FIFO_FULL_RESERVE)
+ return fifo_size + tail - head
+ - SMD_FIFO_FULL_RESERVE;
+ else
+ return fifo_size - head;
+ }
+}
+
+/* advace the fifo write pointer after freespace
+ * from ch_write_buffer is filled
+ */
+static void ch_write_done(struct smd_channel *ch, unsigned count)
+{
+ unsigned head = ch->half_ch->get_head(ch->send);
+ unsigned fifo_size = ch->fifo_size;
+
+ BUG_ON(count > smd_stream_write_avail(ch));
+ head += count;
+ if (head >= fifo_size)
+ head -= fifo_size;
+ ch->half_ch->set_head(ch->send, head);
+ wmb();
+ ch->half_ch->set_fHEAD(ch->send, 1);
+}
+
+static void ch_set_state(struct smd_channel *ch, unsigned n)
+{
+ if (n == SMD_SS_OPENED) {
+ ch->half_ch->set_fDSR(ch->send, 1);
+ ch->half_ch->set_fCTS(ch->send, 1);
+ ch->half_ch->set_fCD(ch->send, 1);
+ } else {
+ ch->half_ch->set_fDSR(ch->send, 0);
+ ch->half_ch->set_fCTS(ch->send, 0);
+ ch->half_ch->set_fCD(ch->send, 0);
+ }
+ ch->half_ch->set_state(ch->send, n);
+ ch->half_ch->set_fSTATE(ch->send, 1);
+ ch->notify_other_cpu(ch);
+}
+
+/**
+ * do_smd_probe() - Look for newly created SMD channels a specific processor
+ *
+ * @remote_pid: remote processor id of the proc that may have created channels
+ */
+static void do_smd_probe(unsigned remote_pid)
+{
+ unsigned free_space;
+
+ free_space = smem_get_free_space(remote_pid);
+ if (free_space != remote_info[remote_pid].free_space) {
+ remote_info[remote_pid].free_space = free_space;
+ schedule_work(&remote_info[remote_pid].probe_work);
+ }
+}
+
+static void smd_state_change(struct smd_channel *ch,
+ unsigned last, unsigned next)
+{
+ ch->last_state = next;
+
+ SMD_INFO("SMD: ch %d %d -> %d\n", ch->n, last, next);
+
+ switch (next) {
+ case SMD_SS_OPENING:
+ if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSING ||
+ ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
+ ch->half_ch->set_tail(ch->recv, 0);
+ ch->half_ch->set_head(ch->send, 0);
+ ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
+ ch_set_state(ch, SMD_SS_OPENING);
+ }
+ break;
+ case SMD_SS_OPENED:
+ if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENING) {
+ ch_set_state(ch, SMD_SS_OPENED);
+ ch->notify(ch->priv, SMD_EVENT_OPEN);
+ }
+ break;
+ case SMD_SS_FLUSHING:
+ case SMD_SS_RESET:
+ /* we should force them to close? */
+ break;
+ case SMD_SS_CLOSED:
+ if (ch->half_ch->get_state(ch->send) == SMD_SS_OPENED) {
+ ch_set_state(ch, SMD_SS_CLOSING);
+ ch->current_packet = 0;
+ ch->pending_pkt_sz = 0;
+ ch->notify(ch->priv, SMD_EVENT_CLOSE);
+ }
+ break;
+ case SMD_SS_CLOSING:
+ if (ch->half_ch->get_state(ch->send) == SMD_SS_CLOSED) {
+ list_move(&ch->ch_list,
+ &smd_ch_to_close_list);
+ queue_work(channel_close_wq,
+ &finalize_channel_close_work);
+ }
+ break;
+ }
+}
+
+static void handle_smd_irq_closing_list(void)
+{
+ unsigned long flags;
+ struct smd_channel *ch;
+ struct smd_channel *index;
+ unsigned tmp;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_for_each_entry_safe(ch, index, &smd_ch_closing_list, ch_list) {
+ if (ch->half_ch->get_fSTATE(ch->recv))
+ ch->half_ch->set_fSTATE(ch->recv, 0);
+ tmp = ch->half_ch->get_state(ch->recv);
+ if (tmp != ch->last_state)
+ smd_state_change(ch, ch->last_state, tmp);
+ }
+ spin_unlock_irqrestore(&smd_lock, flags);
+}
+
+static void handle_smd_irq(struct remote_proc_info *r_info,
+ void (*notify)(smd_channel_t *ch))
+{
+ unsigned long flags;
+ struct smd_channel *ch;
+ unsigned ch_flags;
+ unsigned tmp;
+ unsigned char state_change;
+ struct list_head *list;
+
+ list = &r_info->ch_list;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_for_each_entry(ch, list, ch_list) {
+ state_change = 0;
+ ch_flags = 0;
+ if (ch_is_open(ch)) {
+ if (ch->half_ch->get_fHEAD(ch->recv)) {
+ ch->half_ch->set_fHEAD(ch->recv, 0);
+ ch_flags |= 1;
+ }
+ if (ch->half_ch->get_fTAIL(ch->recv)) {
+ ch->half_ch->set_fTAIL(ch->recv, 0);
+ ch_flags |= 2;
+ }
+ if (ch->half_ch->get_fSTATE(ch->recv)) {
+ ch->half_ch->set_fSTATE(ch->recv, 0);
+ ch_flags |= 4;
+ }
+ }
+ tmp = ch->half_ch->get_state(ch->recv);
+ if (tmp != ch->last_state) {
+ SMD_POWER_INFO("SMD ch%d '%s' State change %d->%d\n",
+ ch->n, ch->name, ch->last_state, tmp);
+ smd_state_change(ch, ch->last_state, tmp);
+ state_change = 1;
+ }
+ if (ch_flags & 0x3) {
+ ch->update_state(ch);
+ SMD_POWER_INFO(
+ "SMD ch%d '%s' Data event 0x%x tx%d/rx%d %dr/%dw : %dr/%dw\n",
+ ch->n, ch->name,
+ ch_flags,
+ ch->fifo_size -
+ (smd_stream_write_avail(ch) + 1),
+ smd_stream_read_avail(ch),
+ ch->half_ch->get_tail(ch->send),
+ ch->half_ch->get_head(ch->send),
+ ch->half_ch->get_tail(ch->recv),
+ ch->half_ch->get_head(ch->recv)
+ );
+ ch->notify(ch->priv, SMD_EVENT_DATA);
+ }
+ if (ch_flags & 0x4 && !state_change) {
+ SMD_POWER_INFO("SMD ch%d '%s' State update\n",
+ ch->n, ch->name);
+ ch->notify(ch->priv, SMD_EVENT_STATUS);
+ }
+ }
+ spin_unlock_irqrestore(&smd_lock, flags);
+ do_smd_probe(r_info->remote_pid);
+}
+
+static inline void log_irq(uint32_t subsystem)
+{
+ const char *subsys = smd_edge_to_subsystem(subsystem);
+
+ (void) subsys;
+
+ SMD_POWER_INFO("SMD Int %s->Apps\n", subsys);
+}
+
+irqreturn_t smd_modem_irq_handler(int irq, void *data)
+{
+ if (unlikely(!edge_to_pids[SMD_APPS_MODEM].initialized))
+ return IRQ_HANDLED;
+ log_irq(SMD_APPS_MODEM);
+ ++interrupt_stats[SMD_MODEM].smd_in_count;
+ handle_smd_irq(&remote_info[SMD_MODEM], notify_modem_smd);
+ handle_smd_irq_closing_list();
+ return IRQ_HANDLED;
+}
+
+irqreturn_t smd_dsp_irq_handler(int irq, void *data)
+{
+ if (unlikely(!edge_to_pids[SMD_APPS_QDSP].initialized))
+ return IRQ_HANDLED;
+ log_irq(SMD_APPS_QDSP);
+ ++interrupt_stats[SMD_Q6].smd_in_count;
+ handle_smd_irq(&remote_info[SMD_Q6], notify_dsp_smd);
+ handle_smd_irq_closing_list();
+ return IRQ_HANDLED;
+}
+
+irqreturn_t smd_dsps_irq_handler(int irq, void *data)
+{
+ if (unlikely(!edge_to_pids[SMD_APPS_DSPS].initialized))
+ return IRQ_HANDLED;
+ log_irq(SMD_APPS_DSPS);
+ ++interrupt_stats[SMD_DSPS].smd_in_count;
+ handle_smd_irq(&remote_info[SMD_DSPS], notify_dsps_smd);
+ handle_smd_irq_closing_list();
+ return IRQ_HANDLED;
+}
+
+irqreturn_t smd_wcnss_irq_handler(int irq, void *data)
+{
+ if (unlikely(!edge_to_pids[SMD_APPS_WCNSS].initialized))
+ return IRQ_HANDLED;
+ log_irq(SMD_APPS_WCNSS);
+ ++interrupt_stats[SMD_WCNSS].smd_in_count;
+ handle_smd_irq(&remote_info[SMD_WCNSS], notify_wcnss_smd);
+ handle_smd_irq_closing_list();
+ return IRQ_HANDLED;
+}
+
+irqreturn_t smd_modemfw_irq_handler(int irq, void *data)
+{
+ if (unlikely(!edge_to_pids[SMD_APPS_Q6FW].initialized))
+ return IRQ_HANDLED;
+ log_irq(SMD_APPS_Q6FW);
+ ++interrupt_stats[SMD_MODEM_Q6_FW].smd_in_count;
+ handle_smd_irq(&remote_info[SMD_MODEM_Q6_FW], notify_modemfw_smd);
+ handle_smd_irq_closing_list();
+ return IRQ_HANDLED;
+}
+
+irqreturn_t smd_rpm_irq_handler(int irq, void *data)
+{
+ if (unlikely(!edge_to_pids[SMD_APPS_RPM].initialized))
+ return IRQ_HANDLED;
+ log_irq(SMD_APPS_RPM);
+ ++interrupt_stats[SMD_RPM].smd_in_count;
+ handle_smd_irq(&remote_info[SMD_RPM], notify_rpm_smd);
+ handle_smd_irq_closing_list();
+ return IRQ_HANDLED;
+}
+
+static void smd_fake_irq_handler(unsigned long arg)
+{
+ handle_smd_irq(&remote_info[SMD_MODEM], notify_modem_smd);
+ handle_smd_irq(&remote_info[SMD_Q6], notify_dsp_smd);
+ handle_smd_irq(&remote_info[SMD_DSPS], notify_dsps_smd);
+ handle_smd_irq(&remote_info[SMD_WCNSS], notify_wcnss_smd);
+ handle_smd_irq(&remote_info[SMD_MODEM_Q6_FW], notify_modemfw_smd);
+ handle_smd_irq(&remote_info[SMD_RPM], notify_rpm_smd);
+ handle_smd_irq_closing_list();
+}
+
+static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
+{
+ if (SMD_XFER_TYPE(alloc_elm->type) == 1)
+ return 0;
+ else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
+ return 1;
+
+ panic("Unsupported SMD xfer type: %d name:%s edge:%d\n",
+ SMD_XFER_TYPE(alloc_elm->type),
+ alloc_elm->name,
+ SMD_CHANNEL_TYPE(alloc_elm->type));
+}
+
+static int smd_stream_write(smd_channel_t *ch, const void *_data, int len,
+ int user_buf, bool intr_ntfy)
+{
+ void *ptr;
+ const unsigned char *buf = _data;
+ unsigned xfer;
+ int orig_len = len;
+
+ SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
+ if (len < 0)
+ return -EINVAL;
+ else if (len == 0)
+ return 0;
+
+ while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
+ if (!ch_is_open(ch)) {
+ len = orig_len;
+ break;
+ }
+ if (xfer > len)
+ xfer = len;
+
+ ch->write_to_fifo(ptr, buf, xfer, user_buf);
+ ch_write_done(ch, xfer);
+ len -= xfer;
+ buf += xfer;
+ if (len == 0)
+ break;
+ }
+
+ if (orig_len - len && intr_ntfy)
+ ch->notify_other_cpu(ch);
+
+ return orig_len - len;
+}
+
+static int smd_packet_write(smd_channel_t *ch, const void *_data, int len,
+ int user_buf, bool intr_ntfy)
+{
+ int ret;
+ unsigned hdr[5];
+
+ SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
+ if (len < 0)
+ return -EINVAL;
+ else if (len == 0)
+ return 0;
+
+ if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
+ return -ENOMEM;
+
+ hdr[0] = len;
+ hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
+
+
+ ret = smd_stream_write(ch, hdr, sizeof(hdr), 0, false);
+ if (ret < 0 || ret != sizeof(hdr)) {
+ SMD_DBG("%s failed to write pkt header: %d returned\n",
+ __func__, ret);
+ return -EFAULT;
+ }
+
+
+ ret = smd_stream_write(ch, _data, len, user_buf, true);
+ if (ret < 0 || ret != len) {
+ SMD_DBG("%s failed to write pkt data: %d returned\n",
+ __func__, ret);
+ return ret;
+ }
+
+ return len;
+}
+
+static int smd_stream_read(smd_channel_t *ch, void *data, int len, int user_buf)
+{
+ int r;
+
+ if (len < 0)
+ return -EINVAL;
+
+ r = ch_read(ch, data, len, user_buf);
+ if (r > 0)
+ if (!read_intr_blocked(ch))
+ ch->notify_other_cpu(ch);
+
+ return r;
+}
+
+static int smd_packet_read(smd_channel_t *ch, void *data, int len, int user_buf)
+{
+ unsigned long flags;
+ int r;
+
+ if (len < 0)
+ return -EINVAL;
+
+ if (ch->current_packet > (uint32_t)INT_MAX) {
+ pr_err("%s: Invalid packet size for Edge %d and Channel %s",
+ __func__, ch->type, ch->name);
+ return -EFAULT;
+ }
+
+ if (len > ch->current_packet)
+ len = ch->current_packet;
+
+ r = ch_read(ch, data, len, user_buf);
+ if (r > 0)
+ if (!read_intr_blocked(ch))
+ ch->notify_other_cpu(ch);
+
+ spin_lock_irqsave(&smd_lock, flags);
+ ch->current_packet -= r;
+ update_packet_state(ch);
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ return r;
+}
+
+static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len,
+ int user_buf)
+{
+ int r;
+
+ if (len < 0)
+ return -EINVAL;
+
+ if (ch->current_packet > (uint32_t)INT_MAX) {
+ pr_err("%s: Invalid packet size for Edge %d and Channel %s",
+ __func__, ch->type, ch->name);
+ return -EFAULT;
+ }
+
+ if (len > ch->current_packet)
+ len = ch->current_packet;
+
+ r = ch_read(ch, data, len, user_buf);
+ if (r > 0)
+ if (!read_intr_blocked(ch))
+ ch->notify_other_cpu(ch);
+
+ ch->current_packet -= r;
+ update_packet_state(ch);
+
+ return r;
+}
+
+/**
+ * smd_alloc_v2() - Init local channel structure with information stored in SMEM
+ *
+ * @ch: pointer to the local structure for this channel
+ * @table_id: the id of the table this channel resides in. 1 = first table, 2 =
+ * second table, etc
+ * @r_info: pointer to the info structure of the remote proc for this channel
+ * @returns: -EINVAL for failure; 0 for success
+ *
+ * ch must point to an allocated instance of struct smd_channel that is zeroed
+ * out, and has the n and type members already initialized to the correct values
+ */
+static int smd_alloc(struct smd_channel *ch, int table_id,
+ struct remote_proc_info *r_info)
+{
+ void *buffer;
+ unsigned buffer_sz;
+ unsigned base_id;
+ unsigned fifo_id;
+
+ switch (table_id) {
+ case PRI_ALLOC_TBL:
+ base_id = SMEM_SMD_BASE_ID;
+ fifo_id = SMEM_SMD_FIFO_BASE_ID;
+ break;
+ case SEC_ALLOC_TBL:
+ base_id = SMEM_SMD_BASE_ID_2;
+ fifo_id = SMEM_SMD_FIFO_BASE_ID_2;
+ break;
+ default:
+ SMD_INFO("Invalid table_id:%d passed to smd_alloc\n", table_id);
+ return -EINVAL;
+ }
+
+ if (is_word_access_ch(ch->type)) {
+ struct smd_shared_word_access *shared2;
+ shared2 = smem_find(base_id + ch->n, sizeof(*shared2),
+ r_info->remote_pid, 0);
+ if (!shared2) {
+ SMD_INFO("smem_find failed ch=%d\n", ch->n);
+ return -EINVAL;
+ }
+ ch->send = &shared2->ch0;
+ ch->recv = &shared2->ch1;
+ } else {
+ struct smd_shared *shared2;
+ shared2 = smem_find(base_id + ch->n, sizeof(*shared2),
+ r_info->remote_pid, 0);
+ if (!shared2) {
+ SMD_INFO("smem_find failed ch=%d\n", ch->n);
+ return -EINVAL;
+ }
+ ch->send = &shared2->ch0;
+ ch->recv = &shared2->ch1;
+ }
+ ch->half_ch = get_half_ch_funcs(ch->type);
+
+ buffer = smem_get_entry(fifo_id + ch->n, &buffer_sz,
+ r_info->remote_pid, 0);
+ if (!buffer) {
+ SMD_INFO("smem_get_entry failed\n");
+ return -EINVAL;
+ }
+
+ /* buffer must be a multiple of 32 size */
+ if ((buffer_sz & (SZ_32 - 1)) != 0) {
+ SMD_INFO("Buffer size: %u not multiple of 32\n", buffer_sz);
+ return -EINVAL;
+ }
+ buffer_sz /= 2;
+ ch->send_data = buffer;
+ ch->recv_data = buffer + buffer_sz;
+ ch->fifo_size = buffer_sz;
+
+ return 0;
+}
+
+/**
+ * smd_alloc_channel() - Create and init local structures for a newly allocated
+ * SMD channel
+ *
+ * @alloc_elm: the allocation element stored in SMEM for this channel
+ * @table_id: the id of the table this channel resides in. 1 = first table, 2 =
+ * seconds table, etc
+ * @r_info: pointer to the info structure of the remote proc for this channel
+ * @returns: error code for failure; 0 for success
+ */
+static int smd_alloc_channel(struct smd_alloc_elm *alloc_elm, int table_id,
+ struct remote_proc_info *r_info)
+{
+ struct smd_channel *ch;
+
+ ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
+ if (ch == 0) {
+ pr_err("smd_alloc_channel() out of memory\n");
+ return -ENOMEM;
+ }
+ ch->n = alloc_elm->cid;
+ ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
+
+ if (smd_alloc(ch, table_id, r_info)) {
+ kfree(ch);
+ return -ENODEV;
+ }
+
+ /* probe_worker guarentees ch->type will be a valid type */
+ if (ch->type == SMD_APPS_MODEM)
+ ch->notify_other_cpu = notify_modem_smd;
+ else if (ch->type == SMD_APPS_QDSP)
+ ch->notify_other_cpu = notify_dsp_smd;
+ else if (ch->type == SMD_APPS_DSPS)
+ ch->notify_other_cpu = notify_dsps_smd;
+ else if (ch->type == SMD_APPS_WCNSS)
+ ch->notify_other_cpu = notify_wcnss_smd;
+ else if (ch->type == SMD_APPS_Q6FW)
+ ch->notify_other_cpu = notify_modemfw_smd;
+ else if (ch->type == SMD_APPS_RPM)
+ ch->notify_other_cpu = notify_rpm_smd;
+
+ if (smd_is_packet(alloc_elm)) {
+ ch->read = smd_packet_read;
+ ch->write = smd_packet_write;
+ ch->read_avail = smd_packet_read_avail;
+ ch->write_avail = smd_packet_write_avail;
+ ch->update_state = update_packet_state;
+ ch->read_from_cb = smd_packet_read_from_cb;
+ ch->is_pkt_ch = 1;
+ } else {
+ ch->read = smd_stream_read;
+ ch->write = smd_stream_write;
+ ch->read_avail = smd_stream_read_avail;
+ ch->write_avail = smd_stream_write_avail;
+ ch->update_state = update_stream_state;
+ ch->read_from_cb = smd_stream_read;
+ }
+
+ if (is_word_access_ch(ch->type)) {
+ ch->read_from_fifo = smd_memcpy32_from_fifo;
+ ch->write_to_fifo = smd_memcpy32_to_fifo;
+ } else {
+ ch->read_from_fifo = smd_memcpy_from_fifo;
+ ch->write_to_fifo = smd_memcpy_to_fifo;
+ }
+
+ smd_memcpy_from_fifo(ch->name, alloc_elm->name, SMD_MAX_CH_NAME_LEN,
+ false);
+ ch->name[SMD_MAX_CH_NAME_LEN-1] = 0;
+
+ ch->pdev.name = ch->name;
+ ch->pdev.id = ch->type;
+
+ SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
+ ch->name, ch->n);
+
+ mutex_lock(&smd_creation_mutex);
+ list_add(&ch->ch_list, &smd_ch_closed_list);
+ mutex_unlock(&smd_creation_mutex);
+
+ platform_device_register(&ch->pdev);
+ if (!strncmp(ch->name, "LOOPBACK", 8) && ch->type == SMD_APPS_MODEM) {
+ /* create a platform driver to be used by smd_tty driver
+ * so that it can access the loopback port
+ */
+ loopback_tty_pdev.id = ch->type;
+ platform_device_register(&loopback_tty_pdev);
+ }
+ return 0;
+}
+
+static void do_nothing_notify(void *priv, unsigned flags)
+{
+}
+
+static void finalize_channel_close_fn(struct work_struct *work)
+{
+ unsigned long flags;
+ struct smd_channel *ch;
+ struct smd_channel *index;
+
+ mutex_lock(&smd_creation_mutex);
+ spin_lock_irqsave(&smd_lock, flags);
+ list_for_each_entry_safe(ch, index, &smd_ch_to_close_list, ch_list) {
+ list_del(&ch->ch_list);
+ list_add(&ch->ch_list, &smd_ch_closed_list);
+ ch->notify(ch->priv, SMD_EVENT_REOPEN_READY);
+ ch->notify = do_nothing_notify;
+ }
+ spin_unlock_irqrestore(&smd_lock, flags);
+ mutex_unlock(&smd_creation_mutex);
+}
+
+struct smd_channel *smd_get_channel(const char *name, uint32_t type)
+{
+ struct smd_channel *ch;
+
+ mutex_lock(&smd_creation_mutex);
+ list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
+ if (!strcmp(name, ch->name) &&
+ (type == ch->type)) {
+ list_del(&ch->ch_list);
+ mutex_unlock(&smd_creation_mutex);
+ return ch;
+ }
+ }
+ mutex_unlock(&smd_creation_mutex);
+
+ return NULL;
+}
+
+int smd_named_open_on_edge(const char *name, uint32_t edge,
+ smd_channel_t **_ch,
+ void *priv, void (*notify)(void *, unsigned))
+{
+ struct smd_channel *ch;
+ unsigned long flags;
+
+ if (!smd_edge_inited(edge)) {
+ SMD_INFO("smd_open() before smd_init()\n");
+ return -EPROBE_DEFER;
+ }
+
+ SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
+
+ ch = smd_get_channel(name, edge);
+ if (!ch) {
+ spin_lock_irqsave(&smd_lock, flags);
+ /* check opened list for port */
+ list_for_each_entry(ch,
+ &remote_info[edge_to_pids[edge].remote_pid].ch_list,
+ ch_list) {
+ if (!strcmp(name, ch->name)) {
+ /* channel is already open */
+ spin_unlock_irqrestore(&smd_lock, flags);
+ SMD_DBG("smd_open: channel '%s' already open\n",
+ ch->name);
+ return -EBUSY;
+ }
+ }
+
+ /* check closing list for port */
+ list_for_each_entry(ch, &smd_ch_closing_list, ch_list) {
+ if (!strncmp(name, ch->name, 20) &&
+ (edge == ch->type)) {
+ /* channel exists, but is being closed */
+ spin_unlock_irqrestore(&smd_lock, flags);
+ return -EAGAIN;
+ }
+ }
+
+ /* check closing workqueue list for port */
+ list_for_each_entry(ch, &smd_ch_to_close_list, ch_list) {
+ if (!strncmp(name, ch->name, 20) &&
+ (edge == ch->type)) {
+ /* channel exists, but is being closed */
+ spin_unlock_irqrestore(&smd_lock, flags);
+ return -EAGAIN;
+ }
+ }
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ /* one final check to handle closing->closed race condition */
+ ch = smd_get_channel(name, edge);
+ if (!ch)
+ return -ENODEV;
+ }
+
+ if (notify == 0)
+ notify = do_nothing_notify;
+
+ ch->notify = notify;
+ ch->current_packet = 0;
+ ch->last_state = SMD_SS_CLOSED;
+ ch->priv = priv;
+
+ *_ch = ch;
+
+ SMD_DBG("smd_open: opening '%s'\n", ch->name);
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_add(&ch->ch_list,
+ &remote_info[edge_to_pids[ch->type].remote_pid].ch_list);
+
+ SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
+
+ smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
+
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_named_open_on_edge);
+
+int smd_close(smd_channel_t *ch)
+{
+ unsigned long flags;
+
+ if (ch == 0)
+ return -EINVAL;
+
+ SMD_INFO("smd_close(%s)\n", ch->name);
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_del(&ch->ch_list);
+
+ ch_set_state(ch, SMD_SS_CLOSED);
+
+ if (ch->half_ch->get_state(ch->recv) == SMD_SS_OPENED) {
+ list_add(&ch->ch_list, &smd_ch_closing_list);
+ spin_unlock_irqrestore(&smd_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&smd_lock, flags);
+ ch->notify = do_nothing_notify;
+ mutex_lock(&smd_creation_mutex);
+ list_add(&ch->ch_list, &smd_ch_closed_list);
+ mutex_unlock(&smd_creation_mutex);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_close);
+
+int smd_write_start(smd_channel_t *ch, int len)
+{
+ int ret;
+ unsigned hdr[5];
+
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+ if (!ch->is_pkt_ch) {
+ pr_err("%s: non-packet channel specified\n", __func__);
+ return -EACCES;
+ }
+ if (len < 1) {
+ pr_err("%s: invalid length: %d\n", __func__, len);
+ return -EINVAL;
+ }
+
+ if (ch->pending_pkt_sz) {
+ pr_err("%s: packet of size: %d in progress\n", __func__,
+ ch->pending_pkt_sz);
+ return -EBUSY;
+ }
+ ch->pending_pkt_sz = len;
+
+ if (smd_stream_write_avail(ch) < (SMD_HEADER_SIZE)) {
+ ch->pending_pkt_sz = 0;
+ SMD_DBG("%s: no space to write packet header\n", __func__);
+ return -EAGAIN;
+ }
+
+ hdr[0] = len;
+ hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
+
+
+ ret = smd_stream_write(ch, hdr, sizeof(hdr), 0, true);
+ if (ret < 0 || ret != sizeof(hdr)) {
+ ch->pending_pkt_sz = 0;
+ pr_err("%s: packet header failed to write\n", __func__);
+ return -EPERM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(smd_write_start);
+
+int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
+{
+ int bytes_written;
+
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+ if (len < 1) {
+ pr_err("%s: invalid length: %d\n", __func__, len);
+ return -EINVAL;
+ }
+
+ if (!ch->pending_pkt_sz) {
+ pr_err("%s: no transaction in progress\n", __func__);
+ return -ENOEXEC;
+ }
+ if (ch->pending_pkt_sz - len < 0) {
+ pr_err("%s: segment of size: %d will make packet go over length\n",
+ __func__, len);
+ return -EINVAL;
+ }
+
+ bytes_written = smd_stream_write(ch, data, len, user_buf, true);
+
+ ch->pending_pkt_sz -= bytes_written;
+
+ return bytes_written;
+}
+EXPORT_SYMBOL(smd_write_segment);
+
+int smd_write_end(smd_channel_t *ch)
+{
+
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+ if (ch->pending_pkt_sz) {
+ pr_err("%s: current packet not completely written\n", __func__);
+ return -E2BIG;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_write_end);
+
+int smd_write_segment_avail(smd_channel_t *ch)
+{
+ int n;
+
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+ if (!ch->is_pkt_ch) {
+ pr_err("%s: non-packet channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ n = smd_stream_write_avail(ch);
+
+ /* pkt hdr already written, no need to reserve space for it */
+ if (ch->pending_pkt_sz)
+ return n;
+
+ return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
+}
+EXPORT_SYMBOL(smd_write_segment_avail);
+
+int smd_read(smd_channel_t *ch, void *data, int len)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ return ch->read(ch, data, len, 0);
+}
+EXPORT_SYMBOL(smd_read);
+
+int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ return ch->read(ch, data, len, 1);
+}
+EXPORT_SYMBOL(smd_read_user_buffer);
+
+int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ return ch->read_from_cb(ch, data, len, 0);
+}
+EXPORT_SYMBOL(smd_read_from_cb);
+
+int smd_write(smd_channel_t *ch, const void *data, int len)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 0, true);
+}
+EXPORT_SYMBOL(smd_write);
+
+int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ return ch->pending_pkt_sz ? -EBUSY : ch->write(ch, data, len, 1, true);
+}
+EXPORT_SYMBOL(smd_write_user_buffer);
+
+int smd_read_avail(smd_channel_t *ch)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ if (ch->current_packet > (uint32_t)INT_MAX) {
+ pr_err("%s: Invalid packet size for Edge %d and Channel %s",
+ __func__, ch->type, ch->name);
+ return -EFAULT;
+ }
+ return ch->read_avail(ch);
+}
+EXPORT_SYMBOL(smd_read_avail);
+
+int smd_write_avail(smd_channel_t *ch)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ return ch->write_avail(ch);
+}
+EXPORT_SYMBOL(smd_write_avail);
+
+void smd_enable_read_intr(smd_channel_t *ch)
+{
+ if (ch)
+ ch->half_ch->set_fBLOCKREADINTR(ch->send, 0);
+}
+EXPORT_SYMBOL(smd_enable_read_intr);
+
+void smd_disable_read_intr(smd_channel_t *ch)
+{
+ if (ch)
+ ch->half_ch->set_fBLOCKREADINTR(ch->send, 1);
+}
+EXPORT_SYMBOL(smd_disable_read_intr);
+
+/**
+ * Enable/disable receive interrupts for the remote processor used by a
+ * particular channel.
+ * @ch: open channel handle to use for the edge
+ * @mask: 1 = mask interrupts; 0 = unmask interrupts
+ * @cpumask cpumask for the next cpu scheduled to be woken up
+ * @returns: 0 for success; < 0 for failure
+ *
+ * Note that this enables/disables all interrupts from the remote subsystem for
+ * all channels. As such, it should be used with care and only for specific
+ * use cases such as power-collapse sequencing.
+ */
+int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask,
+ const struct cpumask *cpumask)
+{
+ struct irq_chip *irq_chip;
+ struct irq_data *irq_data;
+ struct interrupt_config_item *int_cfg;
+
+ if (!ch)
+ return -EINVAL;
+
+ if (ch->type >= ARRAY_SIZE(edge_to_pids))
+ return -ENODEV;
+
+ int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
+
+ if (int_cfg->irq_id < 0)
+ return -ENODEV;
+
+ irq_chip = irq_get_chip(int_cfg->irq_id);
+ if (!irq_chip)
+ return -ENODEV;
+
+ irq_data = irq_get_irq_data(int_cfg->irq_id);
+ if (!irq_data)
+ return -ENODEV;
+
+ if (mask) {
+ SMD_POWER_INFO("SMD Masking interrupts from %s\n",
+ edge_to_pids[ch->type].subsys_name);
+ irq_chip->irq_mask(irq_data);
+ if (cpumask)
+ irq_set_affinity(int_cfg->irq_id, cpumask);
+ } else {
+ SMD_POWER_INFO("SMD Unmasking interrupts from %s\n",
+ edge_to_pids[ch->type].subsys_name);
+ irq_chip->irq_unmask(irq_data);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_mask_receive_interrupt);
+
+int smd_cur_packet_size(smd_channel_t *ch)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ if (ch->current_packet > (uint32_t)INT_MAX) {
+ pr_err("%s: Invalid packet size for Edge %d and Channel %s",
+ __func__, ch->type, ch->name);
+ return -EFAULT;
+ }
+ return ch->current_packet;
+}
+EXPORT_SYMBOL(smd_cur_packet_size);
+
+int smd_tiocmget(smd_channel_t *ch)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ return (ch->half_ch->get_fDSR(ch->recv) ? TIOCM_DSR : 0) |
+ (ch->half_ch->get_fCTS(ch->recv) ? TIOCM_CTS : 0) |
+ (ch->half_ch->get_fCD(ch->recv) ? TIOCM_CD : 0) |
+ (ch->half_ch->get_fRI(ch->recv) ? TIOCM_RI : 0) |
+ (ch->half_ch->get_fCTS(ch->send) ? TIOCM_RTS : 0) |
+ (ch->half_ch->get_fDSR(ch->send) ? TIOCM_DTR : 0);
+}
+EXPORT_SYMBOL(smd_tiocmget);
+
+/* this api will be called while holding smd_lock */
+int
+smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
+{
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ if (set & TIOCM_DTR)
+ ch->half_ch->set_fDSR(ch->send, 1);
+
+ if (set & TIOCM_RTS)
+ ch->half_ch->set_fCTS(ch->send, 1);
+
+ if (clear & TIOCM_DTR)
+ ch->half_ch->set_fDSR(ch->send, 0);
+
+ if (clear & TIOCM_RTS)
+ ch->half_ch->set_fCTS(ch->send, 0);
+
+ ch->half_ch->set_fSTATE(ch->send, 1);
+ barrier();
+ ch->notify_other_cpu(ch);
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_tiocmset_from_cb);
+
+int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
+{
+ unsigned long flags;
+
+ if (!ch) {
+ pr_err("%s: Invalid channel specified\n", __func__);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&smd_lock, flags);
+ smd_tiocmset_from_cb(ch, set, clear);
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_tiocmset);
+
+int smd_is_pkt_avail(smd_channel_t *ch)
+{
+ unsigned long flags;
+
+ if (!ch || !ch->is_pkt_ch)
+ return -EINVAL;
+
+ if (ch->current_packet)
+ return 1;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ update_packet_state(ch);
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ return ch->current_packet ? 1 : 0;
+}
+EXPORT_SYMBOL(smd_is_pkt_avail);
+
+static int smsm_cb_init(void)
+{
+ struct smsm_state_info *state_info;
+ int n;
+ int ret = 0;
+
+ smsm_states = kmalloc(sizeof(struct smsm_state_info)*SMSM_NUM_ENTRIES,
+ GFP_KERNEL);
+
+ if (!smsm_states) {
+ pr_err("%s: SMSM init failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ smsm_cb_wq = create_singlethread_workqueue("smsm_cb_wq");
+ if (!smsm_cb_wq) {
+ pr_err("%s: smsm_cb_wq creation failed\n", __func__);
+ kfree(smsm_states);
+ return -EFAULT;
+ }
+
+ mutex_lock(&smsm_lock);
+ for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
+ state_info = &smsm_states[n];
+ state_info->last_value = __raw_readl(SMSM_STATE_ADDR(n));
+ state_info->intr_mask_set = 0x0;
+ state_info->intr_mask_clear = 0x0;
+ INIT_LIST_HEAD(&state_info->callbacks);
+ }
+ mutex_unlock(&smsm_lock);
+
+ return ret;
+}
+
+static int smsm_init(void)
+{
+ int i;
+ struct smsm_size_info_type *smsm_size_info;
+ unsigned long flags;
+ unsigned long j_start;
+ static int first = 1;
+ remote_spinlock_t *remote_spinlock;
+
+ if (!first)
+ return 0;
+ first = 0;
+
+ /* Verify that remote spinlock is not deadlocked */
+ remote_spinlock = smem_get_remote_spinlock();
+ j_start = jiffies;
+ while (!remote_spin_trylock_irqsave(remote_spinlock, flags)) {
+ if (jiffies_to_msecs(jiffies - j_start) > RSPIN_INIT_WAIT_MS) {
+ panic("%s: Remote processor %d will not release spinlock\n",
+ __func__, remote_spin_owner(remote_spinlock));
+ }
+ }
+ remote_spin_unlock_irqrestore(remote_spinlock, flags);
+
+ smsm_size_info = smem_find(SMEM_SMSM_SIZE_INFO,
+ sizeof(struct smsm_size_info_type), 0,
+ SMEM_ANY_HOST_FLAG);
+ if (smsm_size_info) {
+ SMSM_NUM_ENTRIES = smsm_size_info->num_entries;
+ SMSM_NUM_HOSTS = smsm_size_info->num_hosts;
+ }
+
+ i = kfifo_alloc(&smsm_snapshot_fifo,
+ sizeof(uint32_t) * SMSM_NUM_ENTRIES * SMSM_SNAPSHOT_CNT,
+ GFP_KERNEL);
+ if (i) {
+ pr_err("%s: SMSM state fifo alloc failed %d\n", __func__, i);
+ return i;
+ }
+ wakeup_source_init(&smsm_snapshot_ws, "smsm_snapshot");
+
+ if (!smsm_info.state) {
+ smsm_info.state = smem_alloc(ID_SHARED_STATE,
+ SMSM_NUM_ENTRIES *
+ sizeof(uint32_t), 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (smsm_info.state)
+ __raw_writel(0, SMSM_STATE_ADDR(SMSM_APPS_STATE));
+ }
+
+ if (!smsm_info.intr_mask) {
+ smsm_info.intr_mask = smem_alloc(SMEM_SMSM_CPU_INTR_MASK,
+ SMSM_NUM_ENTRIES *
+ SMSM_NUM_HOSTS *
+ sizeof(uint32_t), 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (smsm_info.intr_mask) {
+ for (i = 0; i < SMSM_NUM_ENTRIES; i++)
+ __raw_writel(0x0,
+ SMSM_INTR_MASK_ADDR(i, SMSM_APPS));
+
+ /* Configure legacy modem bits */
+ __raw_writel(LEGACY_MODEM_SMSM_MASK,
+ SMSM_INTR_MASK_ADDR(SMSM_MODEM_STATE,
+ SMSM_APPS));
+ }
+ }
+
+ i = smsm_cb_init();
+ if (i)
+ return i;
+
+ wmb();
+
+ smsm_pm_notifier(&smsm_pm_nb, PM_POST_SUSPEND, NULL);
+ i = register_pm_notifier(&smsm_pm_nb);
+ if (i)
+ pr_err("%s: power state notif error %d\n", __func__, i);
+
+ return 0;
+}
+
+static void smsm_cb_snapshot(uint32_t use_wakeup_source)
+{
+ int n;
+ uint32_t new_state;
+ unsigned long flags;
+ int ret;
+ uint64_t timestamp;
+
+ timestamp = sched_clock();
+ ret = kfifo_avail(&smsm_snapshot_fifo);
+ if (ret < SMSM_SNAPSHOT_SIZE) {
+ pr_err("%s: SMSM snapshot full %d\n", __func__, ret);
+ return;
+ }
+
+ /*
+ * To avoid a race condition with notify_smsm_cb_clients_worker, the
+ * following sequence must be followed:
+ * 1) increment snapshot count
+ * 2) insert data into FIFO
+ *
+ * Potentially in parallel, the worker:
+ * a) verifies >= 1 snapshots are in FIFO
+ * b) processes snapshot
+ * c) decrements reference count
+ *
+ * This order ensures that 1 will always occur before abc.
+ */
+ if (use_wakeup_source) {
+ spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
+ if (smsm_snapshot_count == 0) {
+ SMSM_POWER_INFO("SMSM snapshot wake lock\n");
+ __pm_stay_awake(&smsm_snapshot_ws);
+ }
+ ++smsm_snapshot_count;
+ spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
+ }
+
+ /* queue state entries */
+ for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
+ new_state = __raw_readl(SMSM_STATE_ADDR(n));
+
+ ret = kfifo_in(&smsm_snapshot_fifo,
+ &new_state, sizeof(new_state));
+ if (ret != sizeof(new_state)) {
+ pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
+ goto restore_snapshot_count;
+ }
+ }
+
+ ret = kfifo_in(&smsm_snapshot_fifo, &timestamp, sizeof(timestamp));
+ if (ret != sizeof(timestamp)) {
+ pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
+ goto restore_snapshot_count;
+ }
+
+ /* queue wakelock usage flag */
+ ret = kfifo_in(&smsm_snapshot_fifo,
+ &use_wakeup_source, sizeof(use_wakeup_source));
+ if (ret != sizeof(use_wakeup_source)) {
+ pr_err("%s: SMSM snapshot failure %d\n", __func__, ret);
+ goto restore_snapshot_count;
+ }
+
+ queue_work(smsm_cb_wq, &smsm_cb_work);
+ return;
+
+restore_snapshot_count:
+ if (use_wakeup_source) {
+ spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
+ if (smsm_snapshot_count) {
+ --smsm_snapshot_count;
+ if (smsm_snapshot_count == 0) {
+ SMSM_POWER_INFO("SMSM snapshot wake unlock\n");
+ __pm_relax(&smsm_snapshot_ws);
+ }
+ } else {
+ pr_err("%s: invalid snapshot count\n", __func__);
+ }
+ spin_unlock_irqrestore(&smsm_snapshot_count_lock, flags);
+ }
+}
+
+static irqreturn_t smsm_irq_handler(int irq, void *data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&smem_lock, flags);
+ if (!smsm_info.state) {
+ SMSM_INFO("<SM NO STATE>\n");
+ } else {
+ unsigned old_apps, apps;
+ unsigned modm = __raw_readl(SMSM_STATE_ADDR(SMSM_MODEM_STATE));
+
+ old_apps = apps = __raw_readl(SMSM_STATE_ADDR(SMSM_APPS_STATE));
+
+ SMSM_DBG("<SM %08x %08x>\n", apps, modm);
+ if (modm & SMSM_RESET) {
+ pr_err("\nSMSM: Modem SMSM state changed to SMSM_RESET.");
+ } else if (modm & SMSM_INIT) {
+ if (!(apps & SMSM_INIT))
+ apps |= SMSM_INIT;
+ if (modm & SMSM_SMDINIT)
+ apps |= SMSM_SMDINIT;
+ }
+
+ if (old_apps != apps) {
+ SMSM_DBG("<SM %08x NOTIFY>\n", apps);
+ __raw_writel(apps, SMSM_STATE_ADDR(SMSM_APPS_STATE));
+ notify_other_smsm(SMSM_APPS_STATE, (old_apps ^ apps));
+ }
+
+ smsm_cb_snapshot(1);
+ }
+ spin_unlock_irqrestore(&smem_lock, flags);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t smsm_modem_irq_handler(int irq, void *data)
+{
+ SMSM_POWER_INFO("SMSM Int Modem->Apps\n");
+ ++interrupt_stats[SMD_MODEM].smsm_in_count;
+ return smsm_irq_handler(irq, data);
+}
+
+irqreturn_t smsm_dsp_irq_handler(int irq, void *data)
+{
+ SMSM_POWER_INFO("SMSM Int LPASS->Apps\n");
+ ++interrupt_stats[SMD_Q6].smsm_in_count;
+ return smsm_irq_handler(irq, data);
+}
+
+irqreturn_t smsm_dsps_irq_handler(int irq, void *data)
+{
+ SMSM_POWER_INFO("SMSM Int DSPS->Apps\n");
+ ++interrupt_stats[SMD_DSPS].smsm_in_count;
+ return smsm_irq_handler(irq, data);
+}
+
+irqreturn_t smsm_wcnss_irq_handler(int irq, void *data)
+{
+ SMSM_POWER_INFO("SMSM Int WCNSS->Apps\n");
+ ++interrupt_stats[SMD_WCNSS].smsm_in_count;
+ return smsm_irq_handler(irq, data);
+}
+
+/*
+ * Changes the global interrupt mask. The set and clear masks are re-applied
+ * every time the global interrupt mask is updated for callback registration
+ * and de-registration.
+ *
+ * The clear mask is applied first, so if a bit is set to 1 in both the clear
+ * mask and the set mask, the result will be that the interrupt is set.
+ *
+ * @smsm_entry SMSM entry to change
+ * @clear_mask 1 = clear bit, 0 = no-op
+ * @set_mask 1 = set bit, 0 = no-op
+ *
+ * @returns 0 for success, < 0 for error
+ */
+int smsm_change_intr_mask(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ uint32_t old_mask, new_mask;
+ unsigned long flags;
+
+ if (smsm_entry >= SMSM_NUM_ENTRIES) {
+ pr_err("smsm_change_state: Invalid entry %d\n",
+ smsm_entry);
+ return -EINVAL;
+ }
+
+ if (!smsm_info.intr_mask) {
+ pr_err("smsm_change_intr_mask <SM NO STATE>\n");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&smem_lock, flags);
+ smsm_states[smsm_entry].intr_mask_clear = clear_mask;
+ smsm_states[smsm_entry].intr_mask_set = set_mask;
+
+ old_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+ new_mask = (old_mask & ~clear_mask) | set_mask;
+ __raw_writel(new_mask, SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+
+ wmb();
+ spin_unlock_irqrestore(&smem_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(smsm_change_intr_mask);
+
+int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
+{
+ if (smsm_entry >= SMSM_NUM_ENTRIES) {
+ pr_err("smsm_change_state: Invalid entry %d\n",
+ smsm_entry);
+ return -EINVAL;
+ }
+
+ if (!smsm_info.intr_mask) {
+ pr_err("smsm_change_intr_mask <SM NO STATE>\n");
+ return -EIO;
+ }
+
+ *intr_mask = __raw_readl(SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+ return 0;
+}
+EXPORT_SYMBOL(smsm_get_intr_mask);
+
+int smsm_change_state(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ unsigned long flags;
+ uint32_t old_state, new_state;
+
+ if (smsm_entry >= SMSM_NUM_ENTRIES) {
+ pr_err("smsm_change_state: Invalid entry %d",
+ smsm_entry);
+ return -EINVAL;
+ }
+
+ if (!smsm_info.state) {
+ pr_err("smsm_change_state <SM NO STATE>\n");
+ return -EIO;
+ }
+ spin_lock_irqsave(&smem_lock, flags);
+
+ old_state = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
+ new_state = (old_state & ~clear_mask) | set_mask;
+ __raw_writel(new_state, SMSM_STATE_ADDR(smsm_entry));
+ SMSM_POWER_INFO("%s %d:%08x->%08x", __func__, smsm_entry,
+ old_state, new_state);
+ notify_other_smsm(SMSM_APPS_STATE, (old_state ^ new_state));
+
+ spin_unlock_irqrestore(&smem_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(smsm_change_state);
+
+uint32_t smsm_get_state(uint32_t smsm_entry)
+{
+ uint32_t rv = 0;
+
+ /* needs interface change to return error code */
+ if (smsm_entry >= SMSM_NUM_ENTRIES) {
+ pr_err("smsm_change_state: Invalid entry %d",
+ smsm_entry);
+ return 0;
+ }
+
+ if (!smsm_info.state)
+ pr_err("smsm_get_state <SM NO STATE>\n");
+ else
+ rv = __raw_readl(SMSM_STATE_ADDR(smsm_entry));
+
+ return rv;
+}
+EXPORT_SYMBOL(smsm_get_state);
+
+/**
+ * Performs SMSM callback client notifiction.
+ */
+void notify_smsm_cb_clients_worker(struct work_struct *work)
+{
+ struct smsm_state_cb_info *cb_info;
+ struct smsm_state_info *state_info;
+ int n;
+ uint32_t new_state;
+ uint32_t state_changes;
+ uint32_t use_wakeup_source;
+ int ret;
+ unsigned long flags;
+ uint64_t t_snapshot;
+ uint64_t t_start;
+ unsigned long nanosec_rem;
+
+ while (kfifo_len(&smsm_snapshot_fifo) >= SMSM_SNAPSHOT_SIZE) {
+ t_start = sched_clock();
+ mutex_lock(&smsm_lock);
+ for (n = 0; n < SMSM_NUM_ENTRIES; n++) {
+ state_info = &smsm_states[n];
+
+ ret = kfifo_out(&smsm_snapshot_fifo, &new_state,
+ sizeof(new_state));
+ if (ret != sizeof(new_state)) {
+ pr_err("%s: snapshot underflow %d\n",
+ __func__, ret);
+ mutex_unlock(&smsm_lock);
+ return;
+ }
+
+ state_changes = state_info->last_value ^ new_state;
+ if (state_changes) {
+ SMSM_POWER_INFO("SMSM Change %d: %08x->%08x\n",
+ n, state_info->last_value,
+ new_state);
+ list_for_each_entry(cb_info,
+ &state_info->callbacks, cb_list) {
+
+ if (cb_info->mask & state_changes)
+ cb_info->notify(cb_info->data,
+ state_info->last_value,
+ new_state);
+ }
+ state_info->last_value = new_state;
+ }
+ }
+
+ ret = kfifo_out(&smsm_snapshot_fifo, &t_snapshot,
+ sizeof(t_snapshot));
+ if (ret != sizeof(t_snapshot)) {
+ pr_err("%s: snapshot underflow %d\n",
+ __func__, ret);
+ mutex_unlock(&smsm_lock);
+ return;
+ }
+
+ /* read wakelock flag */
+ ret = kfifo_out(&smsm_snapshot_fifo, &use_wakeup_source,
+ sizeof(use_wakeup_source));
+ if (ret != sizeof(use_wakeup_source)) {
+ pr_err("%s: snapshot underflow %d\n",
+ __func__, ret);
+ mutex_unlock(&smsm_lock);
+ return;
+ }
+ mutex_unlock(&smsm_lock);
+
+ if (use_wakeup_source) {
+ spin_lock_irqsave(&smsm_snapshot_count_lock, flags);
+ if (smsm_snapshot_count) {
+ --smsm_snapshot_count;
+ if (smsm_snapshot_count == 0) {
+ SMSM_POWER_INFO(
+ "SMSM snapshot wake unlock\n");
+ __pm_relax(&smsm_snapshot_ws);
+ }
+ } else {
+ pr_err("%s: invalid snapshot count\n",
+ __func__);
+ }
+ spin_unlock_irqrestore(&smsm_snapshot_count_lock,
+ flags);
+ }
+
+ t_start = t_start - t_snapshot;
+ nanosec_rem = do_div(t_start, 1000000000U);
+ SMSM_POWER_INFO(
+ "SMSM snapshot queue response time %6u.%09lu s\n",
+ (unsigned)t_start, nanosec_rem);
+ }
+}
+
+
+/**
+ * Registers callback for SMSM state notifications when the specified
+ * bits change.
+ *
+ * @smsm_entry Processor entry to deregister
+ * @mask Bits to deregister (if result is 0, callback is removed)
+ * @notify Notification function to deregister
+ * @data Opaque data passed in to callback
+ *
+ * @returns Status code
+ * <0 error code
+ * 0 inserted new entry
+ * 1 updated mask of existing entry
+ */
+int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
+ void (*notify)(void *, uint32_t, uint32_t), void *data)
+{
+ struct smsm_state_info *state;
+ struct smsm_state_cb_info *cb_info;
+ struct smsm_state_cb_info *cb_found = 0;
+ uint32_t new_mask = 0;
+ int ret = 0;
+
+ if (smsm_entry >= SMSM_NUM_ENTRIES)
+ return -EINVAL;
+
+ mutex_lock(&smsm_lock);
+
+ if (!smsm_states) {
+ /* smsm not yet initialized */
+ ret = -ENODEV;
+ goto cleanup;
+ }
+
+ state = &smsm_states[smsm_entry];
+ list_for_each_entry(cb_info,
+ &state->callbacks, cb_list) {
+ if (!ret && (cb_info->notify == notify) &&
+ (cb_info->data == data)) {
+ cb_info->mask |= mask;
+ cb_found = cb_info;
+ ret = 1;
+ }
+ new_mask |= cb_info->mask;
+ }
+
+ if (!cb_found) {
+ cb_info = kmalloc(sizeof(struct smsm_state_cb_info),
+ GFP_ATOMIC);
+ if (!cb_info) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ cb_info->mask = mask;
+ cb_info->notify = notify;
+ cb_info->data = data;
+ INIT_LIST_HEAD(&cb_info->cb_list);
+ list_add_tail(&cb_info->cb_list,
+ &state->callbacks);
+ new_mask |= mask;
+ }
+
+ /* update interrupt notification mask */
+ if (smsm_entry == SMSM_MODEM_STATE)
+ new_mask |= LEGACY_MODEM_SMSM_MASK;
+
+ if (smsm_info.intr_mask) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&smem_lock, flags);
+ new_mask = (new_mask & ~state->intr_mask_clear)
+ | state->intr_mask_set;
+ __raw_writel(new_mask,
+ SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+ wmb();
+ spin_unlock_irqrestore(&smem_lock, flags);
+ }
+
+cleanup:
+ mutex_unlock(&smsm_lock);
+ return ret;
+}
+EXPORT_SYMBOL(smsm_state_cb_register);
+
+
+/**
+ * Deregisters for SMSM state notifications for the specified bits.
+ *
+ * @smsm_entry Processor entry to deregister
+ * @mask Bits to deregister (if result is 0, callback is removed)
+ * @notify Notification function to deregister
+ * @data Opaque data passed in to callback
+ *
+ * @returns Status code
+ * <0 error code
+ * 0 not found
+ * 1 updated mask
+ * 2 removed callback
+ */
+int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
+ void (*notify)(void *, uint32_t, uint32_t), void *data)
+{
+ struct smsm_state_cb_info *cb_info;
+ struct smsm_state_cb_info *cb_tmp;
+ struct smsm_state_info *state;
+ uint32_t new_mask = 0;
+ int ret = 0;
+
+ if (smsm_entry >= SMSM_NUM_ENTRIES)
+ return -EINVAL;
+
+ mutex_lock(&smsm_lock);
+
+ if (!smsm_states) {
+ /* smsm not yet initialized */
+ mutex_unlock(&smsm_lock);
+ return -ENODEV;
+ }
+
+ state = &smsm_states[smsm_entry];
+ list_for_each_entry_safe(cb_info, cb_tmp,
+ &state->callbacks, cb_list) {
+ if (!ret && (cb_info->notify == notify) &&
+ (cb_info->data == data)) {
+ cb_info->mask &= ~mask;
+ ret = 1;
+ if (!cb_info->mask) {
+ /* no mask bits set, remove callback */
+ list_del(&cb_info->cb_list);
+ kfree(cb_info);
+ ret = 2;
+ continue;
+ }
+ }
+ new_mask |= cb_info->mask;
+ }
+
+ /* update interrupt notification mask */
+ if (smsm_entry == SMSM_MODEM_STATE)
+ new_mask |= LEGACY_MODEM_SMSM_MASK;
+
+ if (smsm_info.intr_mask) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&smem_lock, flags);
+ new_mask = (new_mask & ~state->intr_mask_clear)
+ | state->intr_mask_set;
+ __raw_writel(new_mask,
+ SMSM_INTR_MASK_ADDR(smsm_entry, SMSM_APPS));
+ wmb();
+ spin_unlock_irqrestore(&smem_lock, flags);
+ }
+
+ mutex_unlock(&smsm_lock);
+ return ret;
+}
+EXPORT_SYMBOL(smsm_state_cb_deregister);
+
+static int restart_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data);
+
+static struct restart_notifier_block restart_notifiers[] = {
+ {SMD_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
+ {SMD_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
+ {SMD_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
+ {SMD_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
+ {SMD_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
+ {SMD_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
+};
+
+static int restart_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data)
+{
+ remote_spinlock_t *remote_spinlock;
+
+ /*
+ * Some SMD or SMSM clients assume SMD/SMSM SSR handling will be
+ * done in the AFTER_SHUTDOWN level. If this ever changes, extra
+ * care should be taken to verify no clients are broken.
+ */
+ if (code == SUBSYS_AFTER_SHUTDOWN) {
+ struct restart_notifier_block *notifier;
+
+ notifier = container_of(this,
+ struct restart_notifier_block, nb);
+ SMD_INFO("%s: ssrestart for processor %d ('%s')\n",
+ __func__, notifier->processor,
+ notifier->name);
+
+ remote_spinlock = smem_get_remote_spinlock();
+ remote_spin_release(remote_spinlock, notifier->processor);
+ remote_spin_release_all(notifier->processor);
+
+ smd_channel_reset(notifier->processor);
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * smd_post_init() - SMD post initialization
+ * @remote_pid: remote pid that has been initialized. Ignored when is_legacy=1
+ *
+ * This function is used by the device tree initialization to complete the SMD
+ * init sequence.
+ */
+void smd_post_init(unsigned remote_pid)
+{
+ smd_channel_probe_now(&remote_info[remote_pid]);
+}
+
+/**
+ * smsm_post_init() - SMSM post initialization
+ * @returns: 0 for success, standard Linux error code otherwise
+ *
+ * This function is used by the legacy and device tree initialization
+ * to complete the SMSM init sequence.
+ */
+int smsm_post_init(void)
+{
+ int ret;
+
+ ret = smsm_init();
+ if (ret) {
+ pr_err("smsm_init() failed ret = %d\n", ret);
+ return ret;
+ }
+ smsm_irq_handler(0, 0);
+
+ return ret;
+}
+
+/**
+ * smd_get_intr_config() - Get interrupt configuration structure
+ * @edge: edge type identifes local and remote processor
+ * @returns: pointer to interrupt configuration
+ *
+ * This function returns the interrupt configuration of remote processor
+ * based on the edge type.
+ */
+struct interrupt_config *smd_get_intr_config(uint32_t edge)
+{
+ if (edge >= ARRAY_SIZE(edge_to_pids))
+ return NULL;
+ return &private_intr_config[edge_to_pids[edge].remote_pid];
+}
+
+/**
+ * smd_get_edge_remote_pid() - Get the remote processor ID
+ * @edge: edge type identifes local and remote processor
+ * @returns: remote processor ID
+ *
+ * This function returns remote processor ID based on edge type.
+ */
+int smd_edge_to_remote_pid(uint32_t edge)
+{
+ if (edge >= ARRAY_SIZE(edge_to_pids))
+ return -EINVAL;
+ return edge_to_pids[edge].remote_pid;
+}
+
+/**
+ * smd_get_edge_local_pid() - Get the local processor ID
+ * @edge: edge type identifies local and remote processor
+ * @returns: local processor ID
+ *
+ * This function returns local processor ID based on edge type.
+ */
+int smd_edge_to_local_pid(uint32_t edge)
+{
+ if (edge >= ARRAY_SIZE(edge_to_pids))
+ return -EINVAL;
+ return edge_to_pids[edge].local_pid;
+}
+
+/**
+ * smd_proc_set_skip_pil() - Mark if the indicated processor is be loaded by PIL
+ * @pid: the processor id to mark
+ * @skip_pil: true if @pid cannot by loaded by PIL
+ */
+void smd_proc_set_skip_pil(unsigned pid, bool skip_pil)
+{
+ if (pid >= NUM_SMD_SUBSYSTEMS) {
+ pr_err("%s: invalid pid:%d\n", __func__, pid);
+ return;
+ }
+ remote_info[pid].skip_pil = skip_pil;
+}
+
+/**
+ * smd_set_edge_subsys_name() - Set the subsystem name
+ * @edge: edge type identifies local and remote processor
+ * @subsys_name: pointer to subsystem name
+ *
+ * This function is used to set the subsystem name for given edge type.
+ */
+void smd_set_edge_subsys_name(uint32_t edge, const char *subsys_name)
+{
+ if (edge < ARRAY_SIZE(edge_to_pids))
+ if (subsys_name)
+ strlcpy(edge_to_pids[edge].subsys_name,
+ subsys_name, SMD_MAX_CH_NAME_LEN);
+ else
+ strlcpy(edge_to_pids[edge].subsys_name,
+ "", SMD_MAX_CH_NAME_LEN);
+ else
+ pr_err("%s: Invalid edge type[%d]\n", __func__, edge);
+}
+
+/**
+ * smd_reset_all_edge_subsys_name() - Reset the subsystem name
+ *
+ * This function is used to reset the subsystem name of all edges in
+ * targets where configuration information is available through
+ * device tree.
+ */
+void smd_reset_all_edge_subsys_name(void)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(edge_to_pids); i++)
+ strlcpy(edge_to_pids[i].subsys_name,
+ "", sizeof(""));
+}
+
+/**
+ * smd_set_edge_initialized() - Set the edge initialized status
+ * @edge: edge type identifies local and remote processor
+ *
+ * This function set the initialized varibale based on edge type.
+ */
+void smd_set_edge_initialized(uint32_t edge)
+{
+ if (edge < ARRAY_SIZE(edge_to_pids))
+ edge_to_pids[edge].initialized = true;
+ else
+ pr_err("%s: Invalid edge type[%d]\n", __func__, edge);
+}
+
+/**
+ * smd_cfg_smd_intr() - Set the SMD interrupt configuration
+ * @proc: remote processor ID
+ * @mask: bit position in IRQ register
+ * @ptr: IRQ register
+ *
+ * This function is called in Legacy init sequence and used to set
+ * the SMD interrupt configurations for particular processor.
+ */
+void smd_cfg_smd_intr(uint32_t proc, uint32_t mask, void *ptr)
+{
+ private_intr_config[proc].smd.out_bit_pos = mask;
+ private_intr_config[proc].smd.out_base = ptr;
+ private_intr_config[proc].smd.out_offset = 0;
+}
+
+/*
+ * smd_cfg_smsm_intr() - Set the SMSM interrupt configuration
+ * @proc: remote processor ID
+ * @mask: bit position in IRQ register
+ * @ptr: IRQ register
+ *
+ * This function is called in Legacy init sequence and used to set
+ * the SMSM interrupt configurations for particular processor.
+ */
+void smd_cfg_smsm_intr(uint32_t proc, uint32_t mask, void *ptr)
+{
+ private_intr_config[proc].smsm.out_bit_pos = mask;
+ private_intr_config[proc].smsm.out_base = ptr;
+ private_intr_config[proc].smsm.out_offset = 0;
+}
+
+static __init int modem_restart_late_init(void)
+{
+ int i;
+ void *handle;
+ struct restart_notifier_block *nb;
+
+ for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
+ nb = &restart_notifiers[i];
+ handle = subsys_notif_register_notifier(nb->name, &nb->nb);
+ SMD_DBG("%s: registering notif for '%s', handle=%p\n",
+ __func__, nb->name, handle);
+ }
+
+ return 0;
+}
+late_initcall(modem_restart_late_init);
+
+int __init msm_smd_init(void)
+{
+ static bool registered;
+ int rc;
+ int i;
+
+ if (registered)
+ return 0;
+
+ smd_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smd", 0);
+ if (!smd_log_ctx) {
+ pr_err("%s: unable to create SMD logging context\n", __func__);
+ msm_smd_debug_mask = 0;
+ }
+
+ smsm_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smsm", 0);
+ if (!smsm_log_ctx) {
+ pr_err("%s: unable to create SMSM logging context\n", __func__);
+ msm_smd_debug_mask = 0;
+ }
+
+ registered = true;
+
+ for (i = 0; i < NUM_SMD_SUBSYSTEMS; ++i) {
+ remote_info[i].remote_pid = i;
+ remote_info[i].free_space = UINT_MAX;
+ INIT_WORK(&remote_info[i].probe_work, smd_channel_probe_worker);
+ INIT_LIST_HEAD(&remote_info[i].ch_list);
+ }
+
+ channel_close_wq = create_singlethread_workqueue("smd_channel_close");
+ if (IS_ERR(channel_close_wq)) {
+ pr_err("%s: create_singlethread_workqueue ENOMEM\n", __func__);
+ return -ENOMEM;
+ }
+
+ rc = msm_smd_driver_register();
+ if (rc) {
+ pr_err("%s: msm_smd_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+ return 0;
+}
+
+arch_initcall(msm_smd_init);
+
+MODULE_DESCRIPTION("MSM Shared Memory Core");
+MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/qcom/smd_debug.c b/drivers/soc/qcom/smd_debug.c
new file mode 100644
index 000000000000..67b24ccbd987
--- /dev/null
+++ b/drivers/soc/qcom/smd_debug.c
@@ -0,0 +1,423 @@
+/* drivers/soc/qcom/smd_debug.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+
+#include <soc/qcom/smem.h>
+
+#include "smd_private.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+static char *chstate(unsigned n)
+{
+ switch (n) {
+ case SMD_SS_CLOSED:
+ return "CLOSED";
+ case SMD_SS_OPENING:
+ return "OPENING";
+ case SMD_SS_OPENED:
+ return "OPENED";
+ case SMD_SS_FLUSHING:
+ return "FLUSHING";
+ case SMD_SS_CLOSING:
+ return "CLOSING";
+ case SMD_SS_RESET:
+ return "RESET";
+ case SMD_SS_RESET_OPENING:
+ return "ROPENING";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static void debug_int_stats(struct seq_file *s)
+{
+ int subsys;
+ struct interrupt_stat *stats = interrupt_stats;
+ const char *subsys_name;
+
+ seq_puts(s,
+ " Subsystem | Interrupt ID | In | Out |\n");
+
+ for (subsys = 0; subsys < NUM_SMD_SUBSYSTEMS; ++subsys) {
+ subsys_name = smd_pid_to_subsystem(subsys);
+ if (!IS_ERR_OR_NULL(subsys_name)) {
+ seq_printf(s, "%-10s %4s | %9d | %9u | %9u |\n",
+ smd_pid_to_subsystem(subsys), "smd",
+ stats->smd_interrupt_id,
+ stats->smd_in_count,
+ stats->smd_out_count);
+
+ seq_printf(s, "%-10s %4s | %9d | %9u | %9u |\n",
+ smd_pid_to_subsystem(subsys), "smsm",
+ stats->smsm_interrupt_id,
+ stats->smsm_in_count,
+ stats->smsm_out_count);
+ }
+ ++stats;
+ }
+}
+
+static void debug_int_stats_reset(struct seq_file *s)
+{
+ int subsys;
+ struct interrupt_stat *stats = interrupt_stats;
+
+ seq_puts(s, "Resetting interrupt stats.\n");
+
+ for (subsys = 0; subsys < NUM_SMD_SUBSYSTEMS; ++subsys) {
+ stats->smd_in_count = 0;
+ stats->smd_out_count = 0;
+ stats->smsm_in_count = 0;
+ stats->smsm_out_count = 0;
+ ++stats;
+ }
+}
+
+/* NNV: revist, it may not be smd version */
+static void debug_read_smd_version(struct seq_file *s)
+{
+ uint32_t *smd_ver;
+ uint32_t n, version;
+
+ smd_ver = smem_find(SMEM_VERSION_SMD, 32 * sizeof(uint32_t),
+ 0, SMEM_ANY_HOST_FLAG);
+
+ if (smd_ver)
+ for (n = 0; n < 32; n++) {
+ version = smd_ver[n];
+ seq_printf(s, "entry %d: %d.%d\n", n,
+ version >> 16,
+ version & 0xffff);
+ }
+}
+
+/**
+ * pid_to_str - Convert a numeric processor id value into a human readable
+ * string value.
+ *
+ * @pid: the processor id to convert
+ * @returns: a string representation of @pid
+ */
+static char *pid_to_str(int pid)
+{
+ switch (pid) {
+ case SMD_APPS:
+ return "APPS";
+ case SMD_MODEM:
+ return "MDMSW";
+ case SMD_Q6:
+ return "ADSP";
+ case SMD_TZ:
+ return "TZ";
+ case SMD_WCNSS:
+ return "WCNSS";
+ case SMD_MODEM_Q6_FW:
+ return "MDMFW";
+ case SMD_RPM:
+ return "RPM";
+ default:
+ return "???";
+ }
+}
+
+/**
+ * print_half_ch_state - Print the state of half of a SMD channel in a human
+ * readable format.
+ *
+ * @s: the sequential file to print to
+ * @half_ch: half of a SMD channel that should have its state printed
+ * @half_ch_funcs: the relevant channel access functions for @half_ch
+ * @size: size of the fifo in bytes associated with @half_ch
+ * @proc: the processor id that owns the part of the SMD channel associated with
+ * @half_ch
+ * @is_restricted: true if memory access is restricted
+ */
+static void print_half_ch_state(struct seq_file *s,
+ void *half_ch,
+ struct smd_half_channel_access *half_ch_funcs,
+ unsigned size,
+ int proc,
+ bool is_restricted)
+{
+ seq_printf(s, "%-5s|", pid_to_str(proc));
+
+ if (!is_restricted) {
+ seq_printf(s, "%-7s|0x%05X|0x%05X|0x%05X",
+ chstate(half_ch_funcs->get_state(half_ch)),
+ size,
+ half_ch_funcs->get_tail(half_ch),
+ half_ch_funcs->get_head(half_ch));
+ seq_printf(s, "|%c%c%c%c%c%c%c%c|0x%05X",
+ half_ch_funcs->get_fDSR(half_ch) ? 'D' : 'd',
+ half_ch_funcs->get_fCTS(half_ch) ? 'C' : 'c',
+ half_ch_funcs->get_fCD(half_ch) ? 'C' : 'c',
+ half_ch_funcs->get_fRI(half_ch) ? 'I' : 'i',
+ half_ch_funcs->get_fHEAD(half_ch) ? 'W' : 'w',
+ half_ch_funcs->get_fTAIL(half_ch) ? 'R' : 'r',
+ half_ch_funcs->get_fSTATE(half_ch) ? 'S' : 's',
+ half_ch_funcs->get_fBLOCKREADINTR(half_ch) ? 'B' : 'b',
+ (half_ch_funcs->get_head(half_ch) -
+ half_ch_funcs->get_tail(half_ch)) & (size - 1));
+ } else {
+ seq_puts(s, " Access Restricted");
+ }
+}
+
+/**
+ * smd_xfer_type_to_str - Convert a numeric transfer type value into a human
+ * readable string value.
+ *
+ * @xfer_type: the processor id to convert
+ * @returns: a string representation of @xfer_type
+ */
+static char *smd_xfer_type_to_str(uint32_t xfer_type)
+{
+ if (xfer_type == 1)
+ return "S"; /* streaming type */
+ else if (xfer_type == 2)
+ return "P"; /* packet type */
+ else
+ return "L"; /* legacy type */
+}
+
+/**
+ * print_smd_ch_table - Print the current state of every valid SMD channel in a
+ * specific SMD channel allocation table to a human
+ * readable formatted output.
+ *
+ * @s: the sequential file to print to
+ * @tbl: a valid pointer to the channel allocation table to print from
+ * @num_tbl_entries: total number of entries in the table referenced by @tbl
+ * @ch_base_id: the SMEM item id corresponding to the array of channel
+ * structures for the channels found in @tbl
+ * @fifo_base_id: the SMEM item id corresponding to the array of channel fifos
+ * for the channels found in @tbl
+ * @pid: processor id to use for any SMEM operations
+ * @flags: flags to use for any SMEM operations
+ */
+static void print_smd_ch_table(struct seq_file *s,
+ struct smd_alloc_elm *tbl,
+ unsigned num_tbl_entries,
+ unsigned ch_base_id,
+ unsigned fifo_base_id,
+ unsigned pid,
+ unsigned flags)
+{
+ void *half_ch;
+ unsigned half_ch_size;
+ uint32_t ch_type;
+ void *buffer;
+ unsigned buffer_size;
+ int n;
+ bool is_restricted;
+
+/*
+ * formatted, human readable channel state output, ie:
+ID|CHANNEL NAME |T|PROC |STATE |FIFO SZ|RDPTR |WRPTR |FLAGS |DATAPEN
+-------------------------------------------------------------------------------
+00|DS |S|APPS |CLOSED |0x02000|0x00000|0x00000|dcCiwrsb|0x00000
+ | | |MDMSW|OPENING|0x02000|0x00000|0x00000|dcCiwrsb|0x00000
+-------------------------------------------------------------------------------
+ */
+
+ seq_printf(s, "%2s|%-19s|%1s|%-5s|%-7s|%-7s|%-7s|%-7s|%-8s|%-7s\n",
+ "ID",
+ "CHANNEL NAME",
+ "T",
+ "PROC",
+ "STATE",
+ "FIFO SZ",
+ "RDPTR",
+ "WRPTR",
+ "FLAGS",
+ "DATAPEN");
+ seq_puts(s,
+ "-------------------------------------------------------------------------------\n");
+ for (n = 0; n < num_tbl_entries; ++n) {
+ if (strlen(tbl[n].name) == 0)
+ continue;
+
+ seq_printf(s, "%2u|%-19s|%s|", tbl[n].cid, tbl[n].name,
+ smd_xfer_type_to_str(SMD_XFER_TYPE(tbl[n].type)));
+ ch_type = SMD_CHANNEL_TYPE(tbl[n].type);
+
+
+ if (smd_edge_to_remote_pid(ch_type) == SMD_RPM &&
+ smd_edge_to_local_pid(ch_type) != SMD_APPS)
+ is_restricted = true;
+ else
+ is_restricted = false;
+
+ if (is_word_access_ch(ch_type))
+ half_ch_size =
+ sizeof(struct smd_half_channel_word_access);
+ else
+ half_ch_size = sizeof(struct smd_half_channel);
+
+ half_ch = smem_find(ch_base_id + n, 2 * half_ch_size,
+ pid, flags);
+ buffer = smem_get_entry(fifo_base_id + n, &buffer_size,
+ pid, flags);
+ if (half_ch && buffer)
+ print_half_ch_state(s,
+ half_ch,
+ get_half_ch_funcs(ch_type),
+ buffer_size / 2,
+ smd_edge_to_local_pid(ch_type),
+ is_restricted);
+
+ seq_puts(s, "\n");
+ seq_printf(s, "%2s|%-19s|%1s|", "", "", "");
+
+ if (half_ch && buffer)
+ print_half_ch_state(s,
+ half_ch + half_ch_size,
+ get_half_ch_funcs(ch_type),
+ buffer_size / 2,
+ smd_edge_to_remote_pid(ch_type),
+ is_restricted);
+
+ seq_puts(s, "\n");
+ seq_puts(s,
+ "-------------------------------------------------------------------------------\n");
+ }
+}
+
+/**
+ * debug_ch - Print the current state of every valid SMD channel in a human
+ * readable formatted table.
+ *
+ * @s: the sequential file to print to
+ */
+static void debug_ch(struct seq_file *s)
+{
+ struct smd_alloc_elm *tbl;
+ struct smd_alloc_elm *default_pri_tbl;
+ struct smd_alloc_elm *default_sec_tbl;
+ unsigned tbl_size;
+ int i;
+
+ tbl = smem_get_entry(ID_CH_ALLOC_TBL, &tbl_size, 0, SMEM_ANY_HOST_FLAG);
+ default_pri_tbl = tbl;
+
+ if (!tbl) {
+ seq_puts(s, "Channel allocation table not found\n");
+ return;
+ }
+
+ seq_puts(s, "Primary allocation table:\n");
+ print_smd_ch_table(s, tbl, tbl_size / sizeof(*tbl), ID_SMD_CHANNELS,
+ SMEM_SMD_FIFO_BASE_ID,
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ tbl = smem_get_entry(SMEM_CHANNEL_ALLOC_TBL_2, &tbl_size, 0,
+ SMEM_ANY_HOST_FLAG);
+ default_sec_tbl = tbl;
+ if (tbl) {
+ seq_puts(s, "\n\nSecondary allocation table:\n");
+ print_smd_ch_table(s, tbl, tbl_size / sizeof(*tbl),
+ SMEM_SMD_BASE_ID_2,
+ SMEM_SMD_FIFO_BASE_ID_2,
+ 0,
+ SMEM_ANY_HOST_FLAG);
+ }
+
+ for (i = 1; i < NUM_SMD_SUBSYSTEMS; ++i) {
+ tbl = smem_get_entry(ID_CH_ALLOC_TBL, &tbl_size, i, 0);
+ if (tbl && tbl != default_pri_tbl) {
+ seq_puts(s, "\n\n");
+ seq_printf(s, "%s <-> %s Primary allocation table:\n",
+ pid_to_str(SMD_APPS),
+ pid_to_str(i));
+ print_smd_ch_table(s, tbl, tbl_size / sizeof(*tbl),
+ ID_SMD_CHANNELS,
+ SMEM_SMD_FIFO_BASE_ID,
+ i,
+ 0);
+ }
+
+ tbl = smem_get_entry(SMEM_CHANNEL_ALLOC_TBL_2, &tbl_size, i, 0);
+ if (tbl && tbl != default_sec_tbl) {
+ seq_puts(s, "\n\n");
+ seq_printf(s, "%s <-> %s Secondary allocation table:\n",
+ pid_to_str(SMD_APPS),
+ pid_to_str(i));
+ print_smd_ch_table(s, tbl, tbl_size / sizeof(*tbl),
+ SMEM_SMD_BASE_ID_2,
+ SMEM_SMD_FIFO_BASE_ID_2,
+ i,
+ 0);
+ }
+ }
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+ void (*show)(struct seq_file *) = s->private;
+
+ show(s);
+
+ return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+ .open = debug_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void debug_create(const char *name, umode_t mode,
+ struct dentry *dent,
+ void (*show)(struct seq_file *))
+{
+ struct dentry *file;
+
+ file = debugfs_create_file(name, mode, dent, show, &debug_ops);
+ if (!file)
+ pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+static int __init smd_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smd", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ debug_create("ch", 0444, dent, debug_ch);
+ debug_create("version", 0444, dent, debug_read_smd_version);
+ debug_create("int_stats", 0444, dent, debug_int_stats);
+ debug_create("int_stats_reset", 0444, dent, debug_int_stats_reset);
+
+ return 0;
+}
+
+late_initcall(smd_debugfs_init);
+#endif
diff --git a/drivers/soc/qcom/smd_init_dt.c b/drivers/soc/qcom/smd_init_dt.c
new file mode 100644
index 000000000000..993f3536ae04
--- /dev/null
+++ b/drivers/soc/qcom/smd_init_dt.c
@@ -0,0 +1,346 @@
+/* drivers/soc/qcom/smd_init_dt.c
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ipc_logging.h>
+
+#include "smd_private.h"
+
+#define MODULE_NAME "msm_smd"
+#define IPC_LOG(level, x...) do { \
+ if (smd_log_ctx) \
+ ipc_log_string(smd_log_ctx, x); \
+ else \
+ printk(level x); \
+ } while (0)
+
+#if defined(CONFIG_MSM_SMD_DEBUG)
+#define SMD_DBG(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
+ IPC_LOG(KERN_DEBUG, x); \
+ } while (0)
+
+#define SMSM_DBG(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
+ IPC_LOG(KERN_DEBUG, x); \
+ } while (0)
+#else
+#define SMD_DBG(x...) do { } while (0)
+#define SMSM_DBG(x...) do { } while (0)
+#endif
+
+static DEFINE_MUTEX(smd_probe_lock);
+static int first_probe_done;
+
+static int msm_smsm_probe(struct platform_device *pdev)
+{
+ uint32_t edge;
+ char *key;
+ int ret;
+ uint32_t irq_offset;
+ uint32_t irq_bitmask;
+ uint32_t irq_line;
+ struct interrupt_config_item *private_irq;
+ struct device_node *node;
+ void *irq_out_base;
+ resource_size_t irq_out_size;
+ struct platform_device *parent_pdev;
+ struct resource *r;
+ struct interrupt_config *private_intr_config;
+ uint32_t remote_pid;
+
+ node = pdev->dev.of_node;
+
+ if (!pdev->dev.parent) {
+ pr_err("%s: missing link to parent device\n", __func__);
+ return -ENODEV;
+ }
+
+ parent_pdev = to_platform_device(pdev->dev.parent);
+
+ key = "irq-reg-base";
+ r = platform_get_resource_byname(parent_pdev, IORESOURCE_MEM, key);
+ if (!r)
+ goto missing_key;
+ irq_out_size = resource_size(r);
+ irq_out_base = ioremap_nocache(r->start, irq_out_size);
+ if (!irq_out_base) {
+ pr_err("%s: ioremap_nocache() of irq_out_base addr:%pr size:%pr\n",
+ __func__, &r->start, &irq_out_size);
+ return -ENOMEM;
+ }
+ SMSM_DBG("%s: %s = %p", __func__, key, irq_out_base);
+
+ key = "qcom,smsm-edge";
+ ret = of_property_read_u32(node, key, &edge);
+ if (ret)
+ goto missing_key;
+ SMSM_DBG("%s: %s = %d", __func__, key, edge);
+
+ key = "qcom,smsm-irq-offset";
+ ret = of_property_read_u32(node, key, &irq_offset);
+ if (ret)
+ goto missing_key;
+ SMSM_DBG("%s: %s = %x", __func__, key, irq_offset);
+
+ key = "qcom,smsm-irq-bitmask";
+ ret = of_property_read_u32(node, key, &irq_bitmask);
+ if (ret)
+ goto missing_key;
+ SMSM_DBG("%s: %s = %x", __func__, key, irq_bitmask);
+
+ key = "interrupts";
+ irq_line = irq_of_parse_and_map(node, 0);
+ if (!irq_line)
+ goto missing_key;
+ SMSM_DBG("%s: %s = %d", __func__, key, irq_line);
+
+ private_intr_config = smd_get_intr_config(edge);
+ if (!private_intr_config) {
+ pr_err("%s: invalid edge\n", __func__);
+ return -ENODEV;
+ }
+ private_irq = &private_intr_config->smsm;
+ private_irq->out_bit_pos = irq_bitmask;
+ private_irq->out_offset = irq_offset;
+ private_irq->out_base = irq_out_base;
+ private_irq->irq_id = irq_line;
+ remote_pid = smd_edge_to_remote_pid(edge);
+ interrupt_stats[remote_pid].smsm_interrupt_id = irq_line;
+
+ ret = request_irq(irq_line,
+ private_irq->irq_handler,
+ IRQF_TRIGGER_RISING,
+ node->name,
+ NULL);
+ if (ret < 0) {
+ pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
+ return ret;
+ } else {
+ ret = enable_irq_wake(irq_line);
+ if (ret < 0)
+ pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+ irq_line);
+ }
+
+ ret = smsm_post_init();
+ if (ret) {
+ pr_err("smd_post_init() failed ret=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+
+missing_key:
+ pr_err("%s: missing key: %s", __func__, key);
+ return -ENODEV;
+}
+
+static int msm_smd_probe(struct platform_device *pdev)
+{
+ uint32_t edge;
+ char *key;
+ int ret;
+ uint32_t irq_offset;
+ uint32_t irq_bitmask;
+ uint32_t irq_line;
+ unsigned long irq_flags = IRQF_TRIGGER_RISING;
+ const char *subsys_name;
+ struct interrupt_config_item *private_irq;
+ struct device_node *node;
+ void *irq_out_base;
+ resource_size_t irq_out_size;
+ struct platform_device *parent_pdev;
+ struct resource *r;
+ struct interrupt_config *private_intr_config;
+ uint32_t remote_pid;
+ bool skip_pil;
+
+ node = pdev->dev.of_node;
+
+ if (!pdev->dev.parent) {
+ pr_err("%s: missing link to parent device\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&smd_probe_lock);
+ if (!first_probe_done) {
+ smd_reset_all_edge_subsys_name();
+ first_probe_done = 1;
+ }
+ mutex_unlock(&smd_probe_lock);
+
+ parent_pdev = to_platform_device(pdev->dev.parent);
+
+ key = "irq-reg-base";
+ r = platform_get_resource_byname(parent_pdev, IORESOURCE_MEM, key);
+ if (!r)
+ goto missing_key;
+ irq_out_size = resource_size(r);
+ irq_out_base = ioremap_nocache(r->start, irq_out_size);
+ if (!irq_out_base) {
+ pr_err("%s: ioremap_nocache() of irq_out_base addr:%pr size:%pr\n",
+ __func__, &r->start, &irq_out_size);
+ return -ENOMEM;
+ }
+ SMD_DBG("%s: %s = %p", __func__, key, irq_out_base);
+
+ key = "qcom,smd-edge";
+ ret = of_property_read_u32(node, key, &edge);
+ if (ret)
+ goto missing_key;
+ SMD_DBG("%s: %s = %d", __func__, key, edge);
+
+ key = "qcom,smd-irq-offset";
+ ret = of_property_read_u32(node, key, &irq_offset);
+ if (ret)
+ goto missing_key;
+ SMD_DBG("%s: %s = %x", __func__, key, irq_offset);
+
+ key = "qcom,smd-irq-bitmask";
+ ret = of_property_read_u32(node, key, &irq_bitmask);
+ if (ret)
+ goto missing_key;
+ SMD_DBG("%s: %s = %x", __func__, key, irq_bitmask);
+
+ key = "interrupts";
+ irq_line = irq_of_parse_and_map(node, 0);
+ if (!irq_line)
+ goto missing_key;
+ SMD_DBG("%s: %s = %d", __func__, key, irq_line);
+
+ key = "label";
+ subsys_name = of_get_property(node, key, NULL);
+ SMD_DBG("%s: %s = %s", __func__, key, subsys_name);
+ /*
+ * Backwards compatibility. Although label is required, some DTs may
+ * still list the legacy pil-string. Sanely handle pil-string.
+ */
+ if (!subsys_name) {
+ pr_warn("msm_smd: Missing required property - label. Using legacy parsing\n");
+ key = "qcom,pil-string";
+ subsys_name = of_get_property(node, key, NULL);
+ SMD_DBG("%s: %s = %s", __func__, key, subsys_name);
+ if (subsys_name)
+ skip_pil = false;
+ else
+ skip_pil = true;
+ } else {
+ key = "qcom,not-loadable";
+ skip_pil = of_property_read_bool(node, key);
+ SMD_DBG("%s: %s = %d\n", __func__, key, skip_pil);
+ }
+
+ key = "qcom,irq-no-suspend";
+ ret = of_property_read_bool(node, key);
+ if (ret)
+ irq_flags |= IRQF_NO_SUSPEND;
+
+ private_intr_config = smd_get_intr_config(edge);
+ if (!private_intr_config) {
+ pr_err("%s: invalid edge\n", __func__);
+ return -ENODEV;
+ }
+ private_irq = &private_intr_config->smd;
+ private_irq->out_bit_pos = irq_bitmask;
+ private_irq->out_offset = irq_offset;
+ private_irq->out_base = irq_out_base;
+ private_irq->irq_id = irq_line;
+ remote_pid = smd_edge_to_remote_pid(edge);
+ interrupt_stats[remote_pid].smd_interrupt_id = irq_line;
+
+ ret = request_irq(irq_line,
+ private_irq->irq_handler,
+ irq_flags,
+ node->name,
+ NULL);
+ if (ret < 0) {
+ pr_err("%s: request_irq() failed on %d\n", __func__, irq_line);
+ return ret;
+ } else {
+ ret = enable_irq_wake(irq_line);
+ if (ret < 0)
+ pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+ irq_line);
+ }
+
+ smd_set_edge_subsys_name(edge, subsys_name);
+ smd_proc_set_skip_pil(smd_edge_to_remote_pid(edge), skip_pil);
+
+ smd_set_edge_initialized(edge);
+ smd_post_init(remote_pid);
+ return 0;
+
+missing_key:
+ pr_err("%s: missing key: %s", __func__, key);
+ return -ENODEV;
+}
+
+static struct of_device_id msm_smd_match_table[] = {
+ { .compatible = "qcom,smd" },
+ {},
+};
+
+static struct platform_driver msm_smd_driver = {
+ .probe = msm_smd_probe,
+ .driver = {
+ .name = MODULE_NAME ,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_smd_match_table,
+ },
+};
+
+static struct of_device_id msm_smsm_match_table[] = {
+ { .compatible = "qcom,smsm" },
+ {},
+};
+
+static struct platform_driver msm_smsm_driver = {
+ .probe = msm_smsm_probe,
+ .driver = {
+ .name = "msm_smsm",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_smsm_match_table,
+ },
+};
+
+int msm_smd_driver_register(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&msm_smd_driver);
+ if (rc) {
+ pr_err("%s: smd_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ rc = platform_driver_register(&msm_smsm_driver);
+ if (rc) {
+ pr_err("%s: msm_smsm_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_smd_driver_register);
+
+MODULE_DESCRIPTION("MSM SMD Device Tree Init");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smd_private.c b/drivers/soc/qcom/smd_private.c
new file mode 100644
index 000000000000..a7ef87fc723d
--- /dev/null
+++ b/drivers/soc/qcom/smd_private.c
@@ -0,0 +1,333 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "smd_private.h"
+
+void set_state(volatile void __iomem *half_channel, unsigned data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->state = data;
+}
+
+unsigned get_state(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->state;
+}
+
+void set_fDSR(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->fDSR = data;
+}
+
+unsigned get_fDSR(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->fDSR;
+}
+
+void set_fCTS(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->fCTS = data;
+}
+
+unsigned get_fCTS(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->fCTS;
+}
+
+void set_fCD(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->fCD = data;
+}
+
+unsigned get_fCD(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->fCD;
+}
+
+void set_fRI(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->fRI = data;
+}
+
+unsigned get_fRI(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->fRI;
+}
+
+void set_fHEAD(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->fHEAD = data;
+}
+
+unsigned get_fHEAD(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->fHEAD;
+}
+
+void set_fTAIL(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->fTAIL = data;
+}
+
+unsigned get_fTAIL(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->fTAIL;
+}
+
+void set_fSTATE(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->fSTATE = data;
+}
+
+unsigned get_fSTATE(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->fSTATE;
+}
+
+void set_fBLOCKREADINTR(volatile void __iomem *half_channel, unsigned char data)
+{
+ ((struct smd_half_channel __force *)
+ (half_channel))->fBLOCKREADINTR = data;
+}
+
+unsigned get_fBLOCKREADINTR(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)
+ (half_channel))->fBLOCKREADINTR;
+}
+
+void set_tail(volatile void __iomem *half_channel, unsigned data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->tail = data;
+}
+
+unsigned get_tail(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->tail;
+}
+
+void set_head(volatile void __iomem *half_channel, unsigned data)
+{
+ ((struct smd_half_channel __force *)(half_channel))->head = data;
+}
+
+unsigned get_head(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel __force *)(half_channel))->head;
+}
+
+void set_state_word_access(volatile void __iomem *half_channel, unsigned data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->state = data;
+}
+
+unsigned get_state_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->state;
+}
+
+void set_fDSR_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fDSR = data;
+}
+
+unsigned get_fDSR_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fDSR;
+}
+
+void set_fCTS_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fCTS = data;
+}
+
+unsigned get_fCTS_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fCTS;
+}
+
+void set_fCD_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fCD = data;
+}
+
+unsigned get_fCD_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fCD;
+}
+
+void set_fRI_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fRI = data;
+}
+
+unsigned get_fRI_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fRI;
+}
+
+void set_fHEAD_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fHEAD = data;
+}
+
+unsigned get_fHEAD_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fHEAD;
+}
+
+void set_fTAIL_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fTAIL = data;
+}
+
+unsigned get_fTAIL_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fTAIL;
+}
+
+void set_fSTATE_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fSTATE = data;
+}
+
+unsigned get_fSTATE_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fSTATE;
+}
+
+void set_fBLOCKREADINTR_word_access(volatile void __iomem *half_channel,
+ unsigned char data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fBLOCKREADINTR = data;
+}
+
+unsigned get_fBLOCKREADINTR_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->fBLOCKREADINTR;
+}
+
+void set_tail_word_access(volatile void __iomem *half_channel, unsigned data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->tail = data;
+}
+
+unsigned get_tail_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->tail;
+}
+
+void set_head_word_access(volatile void __iomem *half_channel, unsigned data)
+{
+ ((struct smd_half_channel_word_access __force *)
+ (half_channel))->head = data;
+}
+
+unsigned get_head_word_access(volatile void __iomem *half_channel)
+{
+ return ((struct smd_half_channel_word_access __force *)
+ (half_channel))->head;
+}
+
+int is_word_access_ch(unsigned ch_type)
+{
+ if (ch_type == SMD_APPS_RPM || ch_type == SMD_MODEM_RPM ||
+ ch_type == SMD_QDSP_RPM || ch_type == SMD_WCNSS_RPM ||
+ ch_type == SMD_TZ_RPM)
+ return 1;
+ else
+ return 0;
+}
+
+struct smd_half_channel_access *get_half_ch_funcs(unsigned ch_type)
+{
+ static struct smd_half_channel_access byte_access = {
+ .set_state = set_state,
+ .get_state = get_state,
+ .set_fDSR = set_fDSR,
+ .get_fDSR = get_fDSR,
+ .set_fCTS = set_fCTS,
+ .get_fCTS = get_fCTS,
+ .set_fCD = set_fCD,
+ .get_fCD = get_fCD,
+ .set_fRI = set_fRI,
+ .get_fRI = get_fRI,
+ .set_fHEAD = set_fHEAD,
+ .get_fHEAD = get_fHEAD,
+ .set_fTAIL = set_fTAIL,
+ .get_fTAIL = get_fTAIL,
+ .set_fSTATE = set_fSTATE,
+ .get_fSTATE = get_fSTATE,
+ .set_fBLOCKREADINTR = set_fBLOCKREADINTR,
+ .get_fBLOCKREADINTR = get_fBLOCKREADINTR,
+ .set_tail = set_tail,
+ .get_tail = get_tail,
+ .set_head = set_head,
+ .get_head = get_head,
+ };
+ static struct smd_half_channel_access word_access = {
+ .set_state = set_state_word_access,
+ .get_state = get_state_word_access,
+ .set_fDSR = set_fDSR_word_access,
+ .get_fDSR = get_fDSR_word_access,
+ .set_fCTS = set_fCTS_word_access,
+ .get_fCTS = get_fCTS_word_access,
+ .set_fCD = set_fCD_word_access,
+ .get_fCD = get_fCD_word_access,
+ .set_fRI = set_fRI_word_access,
+ .get_fRI = get_fRI_word_access,
+ .set_fHEAD = set_fHEAD_word_access,
+ .get_fHEAD = get_fHEAD_word_access,
+ .set_fTAIL = set_fTAIL_word_access,
+ .get_fTAIL = get_fTAIL_word_access,
+ .set_fSTATE = set_fSTATE_word_access,
+ .get_fSTATE = get_fSTATE_word_access,
+ .set_fBLOCKREADINTR = set_fBLOCKREADINTR_word_access,
+ .get_fBLOCKREADINTR = get_fBLOCKREADINTR_word_access,
+ .set_tail = set_tail_word_access,
+ .get_tail = get_tail_word_access,
+ .set_head = set_head_word_access,
+ .get_head = get_head_word_access,
+ };
+
+ if (is_word_access_ch(ch_type))
+ return &word_access;
+ else
+ return &byte_access;
+}
+
diff --git a/drivers/soc/qcom/smd_private.h b/drivers/soc/qcom/smd_private.h
new file mode 100644
index 000000000000..914899195fbc
--- /dev/null
+++ b/drivers/soc/qcom/smd_private.h
@@ -0,0 +1,246 @@
+/* drivers/soc/qcom/smd_private.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
+#define _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/remote_spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+
+#include <soc/qcom/smd.h>
+#include <soc/qcom/smsm.h>
+
+#define VERSION_QDSP6 4
+#define VERSION_APPS_SBL 6
+#define VERSION_MODEM_SBL 7
+#define VERSION_APPS 8
+#define VERSION_MODEM 9
+#define VERSION_DSPS 10
+
+#define ID_SMD_CHANNELS SMEM_SMD_BASE_ID
+#define ID_SHARED_STATE SMEM_SMSM_SHARED_STATE
+#define ID_CH_ALLOC_TBL SMEM_CHANNEL_ALLOC_TBL
+
+#define SMD_SS_CLOSED 0x00000000
+#define SMD_SS_OPENING 0x00000001
+#define SMD_SS_OPENED 0x00000002
+#define SMD_SS_FLUSHING 0x00000003
+#define SMD_SS_CLOSING 0x00000004
+#define SMD_SS_RESET 0x00000005
+#define SMD_SS_RESET_OPENING 0x00000006
+
+#define SMD_HEADER_SIZE 20
+
+/* 'type' field of smd_alloc_elm structure
+ * has the following breakup
+ * bits 0-7 -> channel type
+ * bits 8-11 -> xfer type
+ * bits 12-31 -> reserved
+ */
+struct smd_alloc_elm {
+ char name[20];
+ uint32_t cid;
+ uint32_t type;
+ uint32_t ref_count;
+};
+
+#define SMD_CHANNEL_TYPE(x) ((x) & 0x000000FF)
+#define SMD_XFER_TYPE(x) (((x) & 0x00000F00) >> 8)
+
+struct smd_half_channel {
+ unsigned state;
+ unsigned char fDSR;
+ unsigned char fCTS;
+ unsigned char fCD;
+ unsigned char fRI;
+ unsigned char fHEAD;
+ unsigned char fTAIL;
+ unsigned char fSTATE;
+ unsigned char fBLOCKREADINTR;
+ unsigned tail;
+ unsigned head;
+};
+
+struct smd_half_channel_word_access {
+ unsigned state;
+ unsigned fDSR;
+ unsigned fCTS;
+ unsigned fCD;
+ unsigned fRI;
+ unsigned fHEAD;
+ unsigned fTAIL;
+ unsigned fSTATE;
+ unsigned fBLOCKREADINTR;
+ unsigned tail;
+ unsigned head;
+};
+
+struct smd_half_channel_access {
+ void (*set_state)(volatile void __iomem *half_channel, unsigned data);
+ unsigned (*get_state)(volatile void __iomem *half_channel);
+ void (*set_fDSR)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fDSR)(volatile void __iomem *half_channel);
+ void (*set_fCTS)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fCTS)(volatile void __iomem *half_channel);
+ void (*set_fCD)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fCD)(volatile void __iomem *half_channel);
+ void (*set_fRI)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fRI)(volatile void __iomem *half_channel);
+ void (*set_fHEAD)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fHEAD)(volatile void __iomem *half_channel);
+ void (*set_fTAIL)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fTAIL)(volatile void __iomem *half_channel);
+ void (*set_fSTATE)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fSTATE)(volatile void __iomem *half_channel);
+ void (*set_fBLOCKREADINTR)(volatile void __iomem *half_channel,
+ unsigned char data);
+ unsigned (*get_fBLOCKREADINTR)(volatile void __iomem *half_channel);
+ void (*set_tail)(volatile void __iomem *half_channel, unsigned data);
+ unsigned (*get_tail)(volatile void __iomem *half_channel);
+ void (*set_head)(volatile void __iomem *half_channel, unsigned data);
+ unsigned (*get_head)(volatile void __iomem *half_channel);
+};
+
+int is_word_access_ch(unsigned ch_type);
+
+struct smd_half_channel_access *get_half_ch_funcs(unsigned ch_type);
+
+struct smd_channel {
+ volatile void __iomem *send; /* some variant of smd_half_channel */
+ volatile void __iomem *recv; /* some variant of smd_half_channel */
+ unsigned char *send_data;
+ unsigned char *recv_data;
+ unsigned fifo_size;
+ struct list_head ch_list;
+
+ unsigned current_packet;
+ unsigned n;
+ void *priv;
+ void (*notify)(void *priv, unsigned flags);
+
+ int (*read)(smd_channel_t *ch, void *data, int len, int user_buf);
+ int (*write)(smd_channel_t *ch, const void *data, int len,
+ int user_buf, bool int_ntfy);
+ int (*read_avail)(smd_channel_t *ch);
+ int (*write_avail)(smd_channel_t *ch);
+ int (*read_from_cb)(smd_channel_t *ch, void *data, int len,
+ int user_buf);
+
+ void (*update_state)(smd_channel_t *ch);
+ unsigned last_state;
+ void (*notify_other_cpu)(smd_channel_t *ch);
+ void *(*read_from_fifo)(void *dest, const void *src, size_t num_bytes,
+ bool to_user);
+ void *(*write_to_fifo)(void *dest, const void *src, size_t num_bytes,
+ bool from_user);
+
+ char name[20];
+ struct platform_device pdev;
+ unsigned type;
+
+ int pending_pkt_sz;
+
+ char is_pkt_ch;
+
+ /*
+ * private internal functions to access *send and *recv.
+ * never to be exported outside of smd
+ */
+ struct smd_half_channel_access *half_ch;
+};
+
+extern spinlock_t smem_lock;
+
+struct interrupt_stat {
+ uint32_t smd_in_count;
+ uint32_t smd_out_count;
+ uint32_t smd_interrupt_id;
+
+ uint32_t smsm_in_count;
+ uint32_t smsm_out_count;
+ uint32_t smsm_interrupt_id;
+};
+extern struct interrupt_stat interrupt_stats[NUM_SMD_SUBSYSTEMS];
+
+struct interrupt_config_item {
+ /* must be initialized */
+ irqreturn_t (*irq_handler)(int req, void *data);
+ /* outgoing interrupt config (set from platform data) */
+ uint32_t out_bit_pos;
+ void __iomem *out_base;
+ uint32_t out_offset;
+ int irq_id;
+};
+
+enum {
+ MSM_SMD_DEBUG = 1U << 0,
+ MSM_SMSM_DEBUG = 1U << 1,
+ MSM_SMD_INFO = 1U << 2,
+ MSM_SMSM_INFO = 1U << 3,
+ MSM_SMD_POWER_INFO = 1U << 4,
+ MSM_SMSM_POWER_INFO = 1U << 5,
+};
+
+struct interrupt_config {
+ struct interrupt_config_item smd;
+ struct interrupt_config_item smsm;
+};
+
+struct edge_to_pid {
+ uint32_t local_pid;
+ uint32_t remote_pid;
+ char subsys_name[SMD_MAX_CH_NAME_LEN];
+ bool initialized;
+};
+
+extern void *smd_log_ctx;
+extern int msm_smd_debug_mask;
+
+extern irqreturn_t smd_modem_irq_handler(int irq, void *data);
+extern irqreturn_t smsm_modem_irq_handler(int irq, void *data);
+extern irqreturn_t smd_dsp_irq_handler(int irq, void *data);
+extern irqreturn_t smsm_dsp_irq_handler(int irq, void *data);
+extern irqreturn_t smd_dsps_irq_handler(int irq, void *data);
+extern irqreturn_t smsm_dsps_irq_handler(int irq, void *data);
+extern irqreturn_t smd_wcnss_irq_handler(int irq, void *data);
+extern irqreturn_t smsm_wcnss_irq_handler(int irq, void *data);
+extern irqreturn_t smd_rpm_irq_handler(int irq, void *data);
+extern irqreturn_t smd_modemfw_irq_handler(int irq, void *data);
+
+extern int msm_smd_driver_register(void);
+extern void smd_post_init(unsigned remote_pid);
+extern int smsm_post_init(void);
+
+extern struct interrupt_config *smd_get_intr_config(uint32_t edge);
+extern int smd_edge_to_remote_pid(uint32_t edge);
+extern int smd_edge_to_local_pid(uint32_t edge);
+extern void smd_set_edge_subsys_name(uint32_t edge, const char *subsys_name);
+extern void smd_reset_all_edge_subsys_name(void);
+extern void smd_proc_set_skip_pil(unsigned pid, bool skip_pil);
+extern void smd_set_edge_initialized(uint32_t edge);
+extern void smd_cfg_smd_intr(uint32_t proc, uint32_t mask, void *ptr);
+extern void smd_cfg_smsm_intr(uint32_t proc, uint32_t mask, void *ptr);
+#endif
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
new file mode 100644
index 000000000000..6fe203eda3cc
--- /dev/null
+++ b/drivers/soc/qcom/smem.c
@@ -0,0 +1,1483 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/ramdump.h>
+
+#include <soc/qcom/smem.h>
+
+
+#include "smem_private.h"
+
+#define MODEM_SBL_VERSION_INDEX 7
+#define SMEM_VERSION_INFO_SIZE (32 * 4)
+#define SMEM_VERSION 0x000B
+
+enum {
+ MSM_SMEM_DEBUG = 1U << 0,
+ MSM_SMEM_INFO = 1U << 1,
+};
+
+static int msm_smem_debug_mask = MSM_SMEM_INFO;
+module_param_named(debug_mask, msm_smem_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+static void *smem_ipc_log_ctx;
+#define NUM_LOG_PAGES 4
+
+#define IPC_LOG(x...) do { \
+ if (smem_ipc_log_ctx) \
+ ipc_log_string(smem_ipc_log_ctx, x); \
+ } while (0)
+
+
+#define LOG_ERR(x...) do { \
+ pr_err(x); \
+ IPC_LOG(x); \
+ } while (0)
+#define SMEM_DBG(x...) do { \
+ if (msm_smem_debug_mask & MSM_SMEM_DEBUG) \
+ IPC_LOG(x); \
+ } while (0)
+#define SMEM_INFO(x...) do { \
+ if (msm_smem_debug_mask & MSM_SMEM_INFO) \
+ IPC_LOG(x); \
+ } while (0)
+
+#define SMEM_SPINLOCK_SMEM_ALLOC "S:3"
+
+static void *smem_ram_base;
+static resource_size_t smem_ram_size;
+static phys_addr_t smem_ram_phys;
+static remote_spinlock_t remote_spinlock;
+static uint32_t num_smem_areas;
+static struct smem_area *smem_areas;
+static struct ramdump_segment *smem_ramdump_segments;
+static int spinlocks_initialized;
+static void *smem_ramdump_dev;
+static DEFINE_MUTEX(spinlock_init_lock);
+static DEFINE_SPINLOCK(smem_init_check_lock);
+static int smem_module_inited;
+static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
+static DEFINE_MUTEX(smem_module_init_notifier_lock);
+static bool probe_done;
+
+/* smem security feature components */
+#define SMEM_TOC_IDENTIFIER 0x434f5424 /* "$TOC" */
+#define SMEM_TOC_MAX_EXCLUSIONS 4
+#define SMEM_PART_HDR_IDENTIFIER 0x54525024 /* "$PRT" */
+#define SMEM_ALLOCATION_CANARY 0xa5a5
+
+struct smem_toc_entry {
+ uint32_t offset;
+ uint32_t size;
+ uint32_t flags;
+ uint16_t host0;
+ uint16_t host1;
+ uint32_t size_cacheline;
+ uint32_t reserved[3];
+ uint32_t exclusion_sizes[SMEM_TOC_MAX_EXCLUSIONS];
+};
+
+struct smem_toc {
+ /* Identifier is a constant, set to SMEM_TOC_IDENTIFIER. */
+ uint32_t identifier;
+ uint32_t version;
+ uint32_t num_entries;
+ uint32_t reserved[5];
+ struct smem_toc_entry entry[];
+};
+
+struct smem_partition_header {
+ /* Identifier is a constant, set to SMEM_PART_HDR_IDENTIFIER. */
+ uint32_t identifier;
+ uint16_t host0;
+ uint16_t host1;
+ uint32_t size;
+ uint32_t offset_free_uncached;
+ uint32_t offset_free_cached;
+ uint32_t reserved[3];
+};
+
+struct smem_partition_allocation_header {
+ /* Canary is a constant, set to SMEM_ALLOCATION_CANARY */
+ uint16_t canary;
+ uint16_t smem_type;
+ uint32_t size; /* includes padding bytes */
+ uint16_t padding_data;
+ uint16_t padding_hdr;
+ uint32_t reserved[1];
+};
+
+struct smem_partition_info {
+ uint32_t partition_num;
+ uint32_t offset;
+ uint32_t size_cacheline;
+};
+
+static struct smem_partition_info partitions[NUM_SMEM_SUBSYSTEMS];
+/* end smem security feature components */
+
+/* Identifier for the SMEM target info struct. */
+#define SMEM_TARG_INFO_IDENTIFIER 0x49494953 /* "SIII" in little-endian. */
+
+struct smem_targ_info_type {
+ /* Identifier is a constant, set to SMEM_TARG_INFO_IDENTIFIER. */
+ uint32_t identifier;
+ uint32_t size;
+ phys_addr_t phys_base_addr;
+};
+
+struct restart_notifier_block {
+ unsigned processor;
+ char *name;
+ struct notifier_block nb;
+};
+
+static int restart_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data);
+
+static struct restart_notifier_block restart_notifiers[] = {
+ {SMEM_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
+ {SMEM_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
+ {SMEM_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
+ {SMEM_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
+ {SMEM_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
+ {SMEM_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
+};
+
+static int init_smem_remote_spinlock(void);
+
+/**
+ * is_probe_done() - Did the probe function successfully complete
+ *
+ * @return - true if probe successfully completed, false if otherwise
+ *
+ * Helper function for EPROBE_DEFER support. If this function returns false,
+ * the calling function should immediately return -EPROBE_DEFER.
+ */
+static bool is_probe_done(void)
+{
+ return probe_done;
+}
+
+/**
+ * smem_phys_to_virt() - Convert a physical base and offset to virtual address
+ *
+ * @base: physical base address to check
+ * @offset: offset from the base to get the final address
+ * @returns: virtual SMEM address; NULL for failure
+ *
+ * Takes a physical address and an offset and checks if the resulting physical
+ * address would fit into one of the smem regions. If so, returns the
+ * corresponding virtual address. Otherwise returns NULL.
+ */
+static void *smem_phys_to_virt(phys_addr_t base, unsigned offset)
+{
+ int i;
+ phys_addr_t phys_addr;
+ resource_size_t size;
+
+ if (OVERFLOW_ADD_UNSIGNED(phys_addr_t, base, offset))
+ return NULL;
+
+ if (!smem_areas) {
+ /*
+ * Early boot - no area configuration yet, so default
+ * to using the main memory region.
+ *
+ * To remove the MSM_SHARED_RAM_BASE and the static
+ * mapping of SMEM in the future, add dump_stack()
+ * to identify the early callers of smem_get_entry()
+ * (which calls this function) and replace those calls
+ * with a new function that knows how to lookup the
+ * SMEM base address before SMEM has been probed.
+ */
+ phys_addr = smem_ram_phys;
+ size = smem_ram_size;
+
+ if (base >= phys_addr && base + offset < phys_addr + size) {
+ if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+ (uintptr_t)smem_ram_base, offset)) {
+ SMEM_INFO("%s: overflow %p %x\n", __func__,
+ smem_ram_base, offset);
+ return NULL;
+ }
+
+ return smem_ram_base + offset;
+ } else {
+ return NULL;
+ }
+ }
+ for (i = 0; i < num_smem_areas; ++i) {
+ phys_addr = smem_areas[i].phys_addr;
+ size = smem_areas[i].size;
+
+ if (base < phys_addr || base + offset >= phys_addr + size)
+ continue;
+
+ if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+ (uintptr_t)smem_areas[i].virt_addr, offset)) {
+ SMEM_INFO("%s: overflow %p %x\n", __func__,
+ smem_areas[i].virt_addr, offset);
+ return NULL;
+ }
+
+ return smem_areas[i].virt_addr + offset;
+ }
+
+ return NULL;
+}
+
+/**
+ * smem_virt_to_phys() - Convert SMEM address to physical address.
+ *
+ * @smem_address: Address of SMEM item (returned by smem_alloc(), etc)
+ * @returns: Physical address (or NULL if there is a failure)
+ *
+ * This function should only be used if an SMEM item needs to be handed
+ * off to a DMA engine. This function will not return a version of EPROBE_DEFER
+ * if the driver is not ready since the caller should obtain @smem_address from
+ * one of the other public APIs and get EPROBE_DEFER at that time, if
+ * applicable.
+ */
+phys_addr_t smem_virt_to_phys(void *smem_address)
+{
+ phys_addr_t phys_addr = 0;
+ int i;
+ void *vend;
+
+ if (!smem_areas)
+ return phys_addr;
+
+ for (i = 0; i < num_smem_areas; ++i) {
+ vend = (void *)(smem_areas[i].virt_addr + smem_areas[i].size);
+
+ if (smem_address >= smem_areas[i].virt_addr &&
+ smem_address < vend) {
+ phys_addr = smem_address - smem_areas[i].virt_addr;
+ phys_addr += smem_areas[i].phys_addr;
+ break;
+ }
+ }
+
+ return phys_addr;
+}
+EXPORT_SYMBOL(smem_virt_to_phys);
+
+/**
+ * __smem_get_entry_nonsecure - Get pointer and size of existing SMEM item
+ *
+ * @id: ID of SMEM item
+ * @size: Pointer to size variable for storing the result
+ * @skip_init_check: True means do not verify that SMEM has been initialized
+ * @use_rspinlock: True to use the remote spinlock
+ * @returns: Pointer to SMEM item or NULL if it doesn't exist
+ */
+static void *__smem_get_entry_nonsecure(unsigned id, unsigned *size,
+ bool skip_init_check, bool use_rspinlock)
+{
+ struct smem_shared *shared = smem_ram_base;
+ struct smem_heap_entry *toc = shared->heap_toc;
+ int use_spinlocks = spinlocks_initialized && use_rspinlock;
+ void *ret = 0;
+ unsigned long flags = 0;
+
+ if (!skip_init_check && !smem_initialized_check())
+ return ret;
+
+ if (id >= SMEM_NUM_ITEMS)
+ return ret;
+
+ if (use_spinlocks)
+ remote_spin_lock_irqsave(&remote_spinlock, flags);
+ /* toc is in device memory and cannot be speculatively accessed */
+ if (toc[id].allocated) {
+ phys_addr_t phys_base;
+
+ *size = toc[id].size;
+ barrier();
+
+ phys_base = toc[id].reserved & BASE_ADDR_MASK;
+ if (!phys_base)
+ phys_base = smem_ram_phys;
+ ret = smem_phys_to_virt(phys_base, toc[id].offset);
+ } else {
+ *size = 0;
+ }
+ if (use_spinlocks)
+ remote_spin_unlock_irqrestore(&remote_spinlock, flags);
+
+ return ret;
+}
+
+/**
+ * __smem_get_entry_secure - Get pointer and size of existing SMEM item with
+ * security support
+ *
+ * @id: ID of SMEM item
+ * @size: Pointer to size variable for storing the result
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @skip_init_check: True means do not verify that SMEM has been initialized
+ * @use_rspinlock: True to use the remote spinlock
+ * @returns: Pointer to SMEM item or NULL if it doesn't exist
+ */
+static void *__smem_get_entry_secure(unsigned id,
+ unsigned *size,
+ unsigned to_proc,
+ unsigned flags,
+ bool skip_init_check,
+ bool use_rspinlock)
+{
+ struct smem_partition_header *hdr;
+ unsigned long lflags = 0;
+ void *item = NULL;
+ struct smem_partition_allocation_header *alloc_hdr;
+ uint32_t partition_num;
+ uint32_t a_hdr_size;
+ int rc;
+
+ SMEM_DBG("%s(%u, %u, %u, %u, %d, %d)\n", __func__, id, *size, to_proc,
+ flags, skip_init_check, use_rspinlock);
+
+ if (!skip_init_check && !smem_initialized_check())
+ return NULL;
+
+ if (id >= SMEM_NUM_ITEMS) {
+ SMEM_INFO("%s: invalid id %d\n", __func__, id);
+ return NULL;
+ }
+
+ if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
+ SMEM_INFO("%s: id %u invalid to_proc %d\n", __func__, id,
+ to_proc);
+ return NULL;
+ }
+
+ if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset)
+ return __smem_get_entry_nonsecure(id, size, skip_init_check,
+ use_rspinlock);
+
+ partition_num = partitions[to_proc].partition_num;
+ hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
+ if (unlikely(!spinlocks_initialized)) {
+ rc = init_smem_remote_spinlock();
+ if (unlikely(rc)) {
+ SMEM_INFO(
+ "%s: id:%u remote spinlock init failed %d\n",
+ __func__, id, rc);
+ return NULL;
+ }
+ }
+ if (use_rspinlock)
+ remote_spin_lock_irqsave(&remote_spinlock, lflags);
+ if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
+ LOG_ERR(
+ "%s: SMEM corruption detected. Partition %d to %d at %p\n",
+ __func__,
+ partition_num,
+ to_proc,
+ hdr);
+ BUG();
+ }
+
+ if (flags & SMEM_ITEM_CACHED_FLAG) {
+ a_hdr_size = ALIGN(sizeof(*alloc_hdr),
+ partitions[to_proc].size_cacheline);
+ for (alloc_hdr = (void *)(hdr) + hdr->size - a_hdr_size;
+ (void *)(alloc_hdr) > (void *)(hdr) +
+ hdr->offset_free_cached;
+ alloc_hdr = (void *)(alloc_hdr) -
+ alloc_hdr->size - a_hdr_size) {
+ if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
+ LOG_ERR(
+ "%s: SMEM corruption detected. Partition %d to %d at %p\n",
+ __func__,
+ partition_num,
+ to_proc,
+ alloc_hdr);
+ BUG();
+
+ }
+ if (alloc_hdr->smem_type == id) {
+ /* 8 byte alignment to match legacy */
+ *size = ALIGN(alloc_hdr->size -
+ alloc_hdr->padding_data, 8);
+ item = (void *)(alloc_hdr) - alloc_hdr->size;
+ break;
+ }
+ }
+ } else {
+ for (alloc_hdr = (void *)(hdr) + sizeof(*hdr);
+ (void *)(alloc_hdr) < (void *)(hdr) +
+ hdr->offset_free_uncached;
+ alloc_hdr = (void *)(alloc_hdr) +
+ sizeof(*alloc_hdr) +
+ alloc_hdr->padding_hdr +
+ alloc_hdr->size) {
+ if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
+ LOG_ERR(
+ "%s: SMEM corruption detected. Partition %d to %d at %p\n",
+ __func__,
+ partition_num,
+ to_proc,
+ alloc_hdr);
+ BUG();
+
+ }
+ if (alloc_hdr->smem_type == id) {
+ /* 8 byte alignment to match legacy */
+ *size = ALIGN(alloc_hdr->size -
+ alloc_hdr->padding_data, 8);
+ item = (void *)(alloc_hdr) +
+ sizeof(*alloc_hdr) +
+ alloc_hdr->padding_hdr;
+ break;
+ }
+ }
+ }
+ if (use_rspinlock)
+ remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+
+ return item;
+}
+
+static void *__smem_find(unsigned id, unsigned size_in, bool skip_init_check)
+{
+ unsigned size;
+ void *ptr;
+
+ ptr = __smem_get_entry_nonsecure(id, &size, skip_init_check, true);
+ if (!ptr)
+ return 0;
+
+ size_in = ALIGN(size_in, 8);
+ if (size_in != size) {
+ SMEM_INFO("smem_find(%u, %u): wrong size %u\n",
+ id, size_in, size);
+ return 0;
+ }
+
+ return ptr;
+}
+
+/**
+ * smem_find - Find existing item with security support
+ *
+ * @id: ID of SMEM item
+ * @size_in: Size of the SMEM item
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ * if the driver is not ready
+ */
+void *smem_find(unsigned id, unsigned size_in, unsigned to_proc, unsigned flags)
+{
+ unsigned size;
+ void *ptr;
+
+ SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
+ flags);
+
+ /*
+ * Handle the circular dependecy between SMEM and software implemented
+ * remote spinlocks. SMEM must initialize the remote spinlocks in
+ * probe() before it is done. EPROBE_DEFER handling will not resolve
+ * this code path, so we must be intellegent to know that the spinlock
+ * item is a special case.
+ */
+ if (!is_probe_done() && id != SMEM_SPINLOCK_ARRAY)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ ptr = smem_get_entry(id, &size, to_proc, flags);
+ if (!ptr)
+ return 0;
+
+ size_in = ALIGN(size_in, 8);
+ if (size_in != size) {
+ SMEM_INFO("smem_find(%u, %u, %u, %u): wrong size %u\n",
+ id, size_in, to_proc, flags, size);
+ return 0;
+ }
+
+ return ptr;
+}
+EXPORT_SYMBOL(smem_find);
+
+/**
+ * alloc_item_nonsecure - Allocate an SMEM item in the nonsecure partition
+ *
+ * @id: ID of SMEM item
+ * @size_in: Size to allocate
+ * @returns: Pointer to SMEM item or NULL for error
+ *
+ * Assumes the id parameter is valid and does not already exist. Assumes
+ * size_in is already adjusted for alignment, if necessary. Requires the
+ * remote spinlock to already be locked.
+ */
+static void *alloc_item_nonsecure(unsigned id, unsigned size_in)
+{
+ void *smem_base = smem_ram_base;
+ struct smem_shared *shared = smem_base;
+ struct smem_heap_entry *toc = shared->heap_toc;
+ void *ret = NULL;
+
+ if (shared->heap_info.heap_remaining >= size_in) {
+ toc[id].offset = shared->heap_info.free_offset;
+ toc[id].size = size_in;
+ /*
+ * wmb() is necessary to ensure the allocation data is
+ * consistent before setting the allocated flag to prevent race
+ * conditions with remote processors
+ */
+ wmb();
+ toc[id].allocated = 1;
+
+ shared->heap_info.free_offset += size_in;
+ shared->heap_info.heap_remaining -= size_in;
+ ret = smem_base + toc[id].offset;
+ /*
+ * wmb() is necessary to ensure the heap data is consistent
+ * before continuing to prevent race conditions with remote
+ * processors
+ */
+ wmb();
+ } else {
+ SMEM_INFO("%s: id %u not enough memory %u (required %u)\n",
+ __func__, id, shared->heap_info.heap_remaining,
+ size_in);
+ }
+
+ return ret;
+}
+
+/**
+ * alloc_item_secure - Allocate an SMEM item in a secure partition
+ *
+ * @id: ID of SMEM item
+ * @size_in: Size to allocate
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item or NULL for error
+ *
+ * Assumes the id parameter is valid and does not already exist. Assumes
+ * size_in is the raw size requested by the client. Assumes to_proc is a valid
+ * host, and a valid partition to that host exists. Requires the remote
+ * spinlock to already be locked.
+ */
+static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
+ unsigned flags)
+{
+ void *smem_base = smem_ram_base;
+ struct smem_partition_header *hdr;
+ struct smem_partition_allocation_header *alloc_hdr;
+ uint32_t a_hdr_size;
+ uint32_t a_data_size;
+ uint32_t size_cacheline;
+ uint32_t free_space;
+ uint32_t partition_num;
+ void *ret = NULL;
+
+ hdr = smem_base + partitions[to_proc].offset;
+ partition_num = partitions[to_proc].partition_num;
+
+ if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
+ LOG_ERR(
+ "%s: SMEM corruption detected. Partition %d to %d at %p\n",
+ __func__,
+ partition_num,
+ to_proc,
+ hdr);
+ BUG();
+ }
+
+ size_cacheline = partitions[to_proc].size_cacheline;
+ free_space = hdr->offset_free_cached -
+ hdr->offset_free_uncached;
+
+ if (flags & SMEM_ITEM_CACHED_FLAG) {
+ a_hdr_size = ALIGN(sizeof(*alloc_hdr), size_cacheline);
+ a_data_size = ALIGN(size_in, size_cacheline);
+ if (free_space < a_hdr_size + a_data_size) {
+ SMEM_INFO(
+ "%s: id %u not enough memory %u (required %u)\n",
+ __func__, id, free_space,
+ a_hdr_size + a_data_size);
+ return ret;
+ }
+ alloc_hdr = (void *)(hdr) + hdr->offset_free_cached -
+ a_hdr_size;
+ alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
+ alloc_hdr->smem_type = id;
+ alloc_hdr->size = a_data_size;
+ alloc_hdr->padding_data = a_data_size - size_in;
+ alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
+ hdr->offset_free_cached = hdr->offset_free_cached -
+ a_hdr_size - a_data_size;
+ ret = (void *)(alloc_hdr) - a_data_size;
+ /*
+ * The SMEM protocol currently does not support cacheable
+ * areas within the smem region, but if it ever does in the
+ * future, then cache management needs to be done here.
+ * The area of memory this item is allocated from will need to
+ * be dynamically made cachable, and a cache flush of the
+ * allocation header using __cpuc_flush_dcache_area and
+ * outer_flush_area will need to be done.
+ */
+ } else {
+ a_hdr_size = sizeof(*alloc_hdr);
+ a_data_size = ALIGN(size_in, 8);
+ if (free_space < a_hdr_size + a_data_size) {
+ SMEM_INFO(
+ "%s: id %u not enough memory %u (required %u)\n",
+ __func__, id, free_space,
+ a_hdr_size + a_data_size);
+ return ret;
+ }
+ alloc_hdr = (void *)(hdr) + hdr->offset_free_uncached;
+ alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
+ alloc_hdr->smem_type = id;
+ alloc_hdr->size = a_data_size;
+ alloc_hdr->padding_data = a_data_size - size_in;
+ alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
+ hdr->offset_free_uncached = hdr->offset_free_uncached +
+ a_hdr_size + a_data_size;
+ ret = alloc_hdr + 1;
+ }
+ /*
+ * wmb() is necessary to ensure the heap and allocation data is
+ * consistent before continuing to prevent race conditions with remote
+ * processors
+ */
+ wmb();
+
+ return ret;
+}
+
+/**
+ * smem_alloc - Find an existing item, otherwise allocate it with security
+ * support
+ *
+ * @id: ID of SMEM item
+ * @size_in: Size of the SMEM item
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item, NULL if it couldn't be found/allocated,
+ * or -EPROBE_DEFER if the driver is not ready
+ */
+void *smem_alloc(unsigned id, unsigned size_in, unsigned to_proc,
+ unsigned flags)
+{
+ unsigned long lflags;
+ void *ret = NULL;
+ int rc;
+ unsigned size_out;
+ unsigned a_size_in;
+
+ SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
+ flags);
+
+ if (!is_probe_done())
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (!smem_initialized_check())
+ return NULL;
+
+ if (id >= SMEM_NUM_ITEMS) {
+ SMEM_INFO("%s: invalid id %u\n", __func__, id);
+ return NULL;
+ }
+
+ if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
+ SMEM_INFO("%s: invalid to_proc %u for id %u\n", __func__,
+ to_proc, id);
+ return NULL;
+ }
+
+ if (unlikely(!spinlocks_initialized)) {
+ rc = init_smem_remote_spinlock();
+ if (unlikely(rc)) {
+ SMEM_INFO("%s: id:%u remote spinlock init failed %d\n",
+ __func__, id, rc);
+ return NULL;
+ }
+ }
+
+ a_size_in = ALIGN(size_in, 8);
+ remote_spin_lock_irqsave(&remote_spinlock, lflags);
+
+ ret = __smem_get_entry_secure(id, &size_out, to_proc, flags, true,
+ false);
+ if (ret) {
+ SMEM_INFO("%s: %u already allocated\n", __func__, id);
+ if (a_size_in == size_out) {
+ remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+ return ret;
+ } else {
+ remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+ SMEM_INFO("%s: id %u wrong size %u (expected %u)\n",
+ __func__, id, size_out, a_size_in);
+ return NULL;
+ }
+ }
+
+ if (id > SMEM_FIXED_ITEM_LAST) {
+ SMEM_INFO("%s: allocating %u size %u to_proc %u flags %u\n",
+ __func__, id, size_in, to_proc, flags);
+ if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset)
+ ret = alloc_item_nonsecure(id, a_size_in);
+ else
+ ret = alloc_item_secure(id, size_in, to_proc, flags);
+
+ } else {
+ SMEM_INFO("%s: attempted to allocate non-dynamic item %u\n",
+ __func__, id);
+ }
+
+ remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+ return ret;
+}
+EXPORT_SYMBOL(smem_alloc);
+
+/**
+ * smem_get_entry - Get existing item with security support
+ *
+ * @id: ID of SMEM item
+ * @size: Pointer to size variable for storing the result
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ * if the driver isn't ready
+ */
+void *smem_get_entry(unsigned id, unsigned *size, unsigned to_proc,
+ unsigned flags)
+{
+ SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, *size, to_proc, flags);
+
+ /*
+ * Handle the circular dependecy between SMEM and software implemented
+ * remote spinlocks. SMEM must initialize the remote spinlocks in
+ * probe() before it is done. EPROBE_DEFER handling will not resolve
+ * this code path, so we must be intellegent to know that the spinlock
+ * item is a special case.
+ */
+ if (!is_probe_done() && id != SMEM_SPINLOCK_ARRAY)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return __smem_get_entry_secure(id, size, to_proc, flags, false, true);
+}
+EXPORT_SYMBOL(smem_get_entry);
+
+/**
+ * smem_get_entry_no_rlock - Get existing item without using remote spinlock
+ *
+ * @id: ID of SMEM item
+ * @size_out: Pointer to size variable for storing the result
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ * if the driver isn't ready
+ *
+ * This function does not lock the remote spinlock and should only be used in
+ * failure-recover cases such as retrieving the subsystem failure reason during
+ * subsystem restart.
+ */
+void *smem_get_entry_no_rlock(unsigned id, unsigned *size_out, unsigned to_proc,
+ unsigned flags)
+{
+ if (!is_probe_done())
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return __smem_get_entry_secure(id, size_out, to_proc, flags, false,
+ false);
+}
+EXPORT_SYMBOL(smem_get_entry_no_rlock);
+
+/**
+ * smem_get_remote_spinlock - Remote spinlock pointer for unit testing.
+ *
+ * @returns: pointer to SMEM remote spinlock
+ */
+remote_spinlock_t *smem_get_remote_spinlock(void)
+{
+ if (unlikely(!spinlocks_initialized))
+ init_smem_remote_spinlock();
+ return &remote_spinlock;
+}
+EXPORT_SYMBOL(smem_get_remote_spinlock);
+
+/**
+ * smem_get_free_space() - Get the available allocation free space for a
+ * partition
+ *
+ * @to_proc: remote SMEM host. Determines the applicable partition
+ * @returns: size in bytes available to allocate
+ *
+ * Helper function for SMD so that SMD only scans the channel allocation
+ * table for a partition when it is reasonably certain that a channel has
+ * actually been created, because scanning can be expensive. Creating a channel
+ * will consume some of the free space in a partition, so SMD can compare the
+ * last free space size against the current free space size to determine if
+ * a channel may have been created. SMD can't do this directly, because the
+ * necessary partition internals are restricted to just SMEM.
+ */
+unsigned smem_get_free_space(unsigned to_proc)
+{
+ struct smem_partition_header *hdr;
+ struct smem_shared *shared;
+
+ if (to_proc >= NUM_SMEM_SUBSYSTEMS) {
+ pr_err("%s: invalid to_proc:%d\n", __func__, to_proc);
+ return UINT_MAX;
+ }
+
+ if (partitions[to_proc].offset) {
+ if (unlikely(OVERFLOW_ADD_UNSIGNED(uintptr_t,
+ (uintptr_t)smem_areas[0].virt_addr,
+ partitions[to_proc].offset))) {
+ pr_err("%s: unexpected overflow detected\n", __func__);
+ return UINT_MAX;
+ }
+ hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
+ return hdr->offset_free_cached - hdr->offset_free_uncached;
+ } else {
+ shared = smem_ram_base;
+ return shared->heap_info.heap_remaining;
+ }
+}
+EXPORT_SYMBOL(smem_get_free_space);
+
+/**
+ * smem_get_version() - Get the smem user version number
+ *
+ * @idx: SMEM user idx in SMEM_VERSION_INFO table.
+ * @returns: smem version number if success otherwise zero.
+ */
+unsigned smem_get_version(unsigned idx)
+{
+ int *version_array;
+
+ if (idx > 32) {
+ pr_err("%s: invalid idx:%d\n", __func__, idx);
+ return 0;
+ }
+
+ version_array = __smem_find(SMEM_VERSION_INFO, SMEM_VERSION_INFO_SIZE,
+ true);
+ if (version_array == NULL)
+ return 0;
+
+ return version_array[idx];
+}
+EXPORT_SYMBOL(smem_get_version);
+
+/**
+ * init_smem_remote_spinlock - Reentrant remote spinlock initialization
+ *
+ * @returns: success or error code for failure
+ */
+static int init_smem_remote_spinlock(void)
+{
+ int rc = 0;
+
+ /*
+ * Optimistic locking. Init only needs to be done once by the first
+ * caller. After that, serializing inits between different callers
+ * is unnecessary. The second check after the lock ensures init
+ * wasn't previously completed by someone else before the lock could
+ * be grabbed.
+ */
+ if (!spinlocks_initialized) {
+ mutex_lock(&spinlock_init_lock);
+ if (!spinlocks_initialized) {
+ rc = remote_spin_lock_init(&remote_spinlock,
+ SMEM_SPINLOCK_SMEM_ALLOC);
+ if (!rc)
+ spinlocks_initialized = 1;
+ }
+ mutex_unlock(&spinlock_init_lock);
+ }
+ return rc;
+}
+
+/**
+ * smem_initialized_check - Reentrant check that smem has been initialized
+ *
+ * @returns: true if initialized, false if not.
+ */
+bool smem_initialized_check(void)
+{
+ static int checked;
+ static int is_inited;
+ unsigned long flags;
+ struct smem_shared *smem;
+
+ if (likely(checked)) {
+ if (unlikely(!is_inited))
+ LOG_ERR("%s: smem not initialized\n", __func__);
+ return is_inited;
+ }
+
+ spin_lock_irqsave(&smem_init_check_lock, flags);
+ if (checked) {
+ spin_unlock_irqrestore(&smem_init_check_lock, flags);
+ if (unlikely(!is_inited))
+ LOG_ERR("%s: smem not initialized\n", __func__);
+ return is_inited;
+ }
+
+ smem = smem_ram_base;
+
+ if (smem->heap_info.initialized != 1)
+ goto failed;
+ if (smem->heap_info.reserved != 0)
+ goto failed;
+
+ /*
+ * The Modem SBL is now the Master SBL version and is required to
+ * pre-initialize SMEM and fill in any necessary configuration
+ * structures. Without the extra configuration data, the SMEM driver
+ * cannot be properly initialized.
+ */
+ if (smem_get_version(MODEM_SBL_VERSION_INDEX) != SMEM_VERSION << 16) {
+ pr_err("%s: SBL version not correct\n", __func__);
+ goto failed;
+ }
+
+ is_inited = 1;
+ checked = 1;
+ spin_unlock_irqrestore(&smem_init_check_lock, flags);
+ return is_inited;
+
+failed:
+ is_inited = 0;
+ checked = 1;
+ spin_unlock_irqrestore(&smem_init_check_lock, flags);
+ LOG_ERR(
+ "%s: shared memory needs to be initialized by SBL before booting\n",
+ __func__);
+ return is_inited;
+}
+EXPORT_SYMBOL(smem_initialized_check);
+
+static int restart_notifier_cb(struct notifier_block *this,
+ unsigned long code,
+ void *data)
+{
+ if (code == SUBSYS_AFTER_SHUTDOWN) {
+ struct restart_notifier_block *notifier;
+
+ notifier = container_of(this,
+ struct restart_notifier_block, nb);
+ SMEM_INFO("%s: ssrestart for processor %d ('%s')\n",
+ __func__, notifier->processor,
+ notifier->name);
+
+ remote_spin_release(&remote_spinlock, notifier->processor);
+ remote_spin_release_all(notifier->processor);
+
+ if (smem_ramdump_dev) {
+ int ret;
+
+ SMEM_DBG("%s: saving ramdump\n", __func__);
+ /*
+ * XPU protection does not currently allow the
+ * auxiliary memory regions to be dumped. If this
+ * changes, then num_smem_areas + 1 should be passed
+ * into do_elf_ramdump() to dump all regions.
+ */
+ ret = do_elf_ramdump(smem_ramdump_dev,
+ smem_ramdump_segments, 1);
+ if (ret < 0)
+ LOG_ERR("%s: unable to dump smem %d\n",
+ __func__, ret);
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static __init int modem_restart_late_init(void)
+{
+ int i;
+ void *handle;
+ struct restart_notifier_block *nb;
+
+ smem_ramdump_dev = create_ramdump_device("smem", NULL);
+ if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
+ LOG_ERR("%s: Unable to create smem ramdump device.\n",
+ __func__);
+ smem_ramdump_dev = NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
+ nb = &restart_notifiers[i];
+ handle = subsys_notif_register_notifier(nb->name, &nb->nb);
+ SMEM_DBG("%s: registering notif for '%s', handle=%p\n",
+ __func__, nb->name, handle);
+ }
+
+ return 0;
+}
+late_initcall(modem_restart_late_init);
+
+int smem_module_init_notifier_register(struct notifier_block *nb)
+{
+ int ret;
+ if (!nb)
+ return -EINVAL;
+ mutex_lock(&smem_module_init_notifier_lock);
+ ret = raw_notifier_chain_register(&smem_module_init_notifier_list, nb);
+ if (smem_module_inited)
+ nb->notifier_call(nb, 0, NULL);
+ mutex_unlock(&smem_module_init_notifier_lock);
+ return ret;
+}
+EXPORT_SYMBOL(smem_module_init_notifier_register);
+
+int smem_module_init_notifier_unregister(struct notifier_block *nb)
+{
+ int ret;
+ if (!nb)
+ return -EINVAL;
+ mutex_lock(&smem_module_init_notifier_lock);
+ ret = raw_notifier_chain_unregister(&smem_module_init_notifier_list,
+ nb);
+ mutex_unlock(&smem_module_init_notifier_lock);
+ return ret;
+}
+EXPORT_SYMBOL(smem_module_init_notifier_unregister);
+
+static void smem_module_init_notify(uint32_t state, void *data)
+{
+ mutex_lock(&smem_module_init_notifier_lock);
+ smem_module_inited = 1;
+ raw_notifier_call_chain(&smem_module_init_notifier_list,
+ state, data);
+ mutex_unlock(&smem_module_init_notifier_lock);
+}
+
+/**
+ * smem_init_security_partition - Init local structures for a secured smem
+ * partition that has apps as one of the hosts
+ *
+ * @entry: Entry in the security TOC for the partition to init
+ * @num: Partition ID
+ *
+ * Initialize local data structures to point to a secured smem partition
+ * that is accessible by apps and another processor. Assumes that one of the
+ * listed hosts is apps. Verifiess that the partition is valid, otherwise will
+ * skip. Checks for memory corruption and will BUG() if detected. Assumes
+ * smem_areas is already initialized and that smem_areas[0] corresponds to the
+ * smem region with the secured partitions.
+ */
+static void smem_init_security_partition(struct smem_toc_entry *entry,
+ uint32_t num)
+{
+ uint16_t remote_host;
+ struct smem_partition_header *hdr;
+
+ if (!entry->offset) {
+ SMEM_INFO("Skipping smem partition %d - bad offset\n", num);
+ return;
+ }
+ if (!entry->size) {
+ SMEM_INFO("Skipping smem partition %d - bad size\n", num);
+ return;
+ }
+ if (!entry->size_cacheline) {
+ SMEM_INFO("Skipping smem partition %d - bad cacheline\n", num);
+ return;
+ }
+
+ if (entry->host0 == SMEM_APPS)
+ remote_host = entry->host1;
+ else
+ remote_host = entry->host0;
+
+ if (remote_host >= NUM_SMEM_SUBSYSTEMS) {
+ SMEM_INFO("Skipping smem partition %d - bad remote:%d\n", num,
+ remote_host);
+ return;
+ }
+ if (partitions[remote_host].offset) {
+ SMEM_INFO("Skipping smem partition %d - duplicate of %d\n", num,
+ partitions[remote_host].partition_num);
+ return;
+ }
+
+ hdr = smem_areas[0].virt_addr + entry->offset;
+
+ if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
+ LOG_ERR("Smem partition %d hdr magic is bad\n", num);
+ BUG();
+ }
+ if (!hdr->size) {
+ LOG_ERR("Smem partition %d size is 0\n", num);
+ BUG();
+ }
+ if (hdr->offset_free_uncached > hdr->size) {
+ LOG_ERR("Smem partition %d uncached heap exceeds size\n", num);
+ BUG();
+ }
+ if (hdr->offset_free_cached > hdr->size) {
+ LOG_ERR("Smem partition %d cached heap exceeds size\n", num);
+ BUG();
+ }
+ if (hdr->host0 != SMEM_APPS && hdr->host1 != SMEM_APPS) {
+ LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
+ BUG();
+ }
+ if (hdr->host0 != remote_host && hdr->host1 != remote_host) {
+ LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
+ BUG();
+ }
+
+ partitions[remote_host].partition_num = num;
+ partitions[remote_host].offset = entry->offset;
+ partitions[remote_host].size_cacheline = entry->size_cacheline;
+ SMEM_INFO("Partition %d offset:%x remote:%d\n", num, entry->offset,
+ remote_host);
+}
+
+/**
+ * smem_init_security - Init local support for secured smem
+ *
+ * Looks for a valid security TOC, and if one is found, parses it looking for
+ * partitions that apps can access. If any such partitions are found, do the
+ * required local initialization to support them. Assumes smem_areas is inited
+ * and smem_area[0] corresponds to the smem region with the TOC.
+ */
+static void smem_init_security(void)
+{
+ struct smem_toc *toc;
+ uint32_t i;
+
+ SMEM_DBG("%s\n", __func__);
+
+ toc = smem_areas[0].virt_addr + smem_areas[0].size - 4 * 1024;
+
+ if (toc->identifier != SMEM_TOC_IDENTIFIER) {
+ LOG_ERR("%s failed: invalid TOC magic\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < toc->num_entries; ++i) {
+ SMEM_DBG("Partition %d host0:%d host1:%d\n", i,
+ toc->entry[i].host0,
+ toc->entry[i].host1);
+
+ if (toc->entry[i].host0 == SMEM_APPS ||
+ toc->entry[i].host1 == SMEM_APPS)
+ smem_init_security_partition(&toc->entry[i], i);
+ }
+
+ SMEM_DBG("%s done\n", __func__);
+}
+
+/**
+ * smem_init_target_info - Init smem target information
+ *
+ * @info_addr : smem target info physical address.
+ * @size : size of the smem target info structure.
+ *
+ * This function is used to initialize the smem_targ_info structure and checks
+ * for valid identifier, if identifier is valid initialize smem variables.
+ */
+static int smem_init_target_info(phys_addr_t info_addr, resource_size_t size)
+{
+ struct smem_targ_info_type *smem_targ_info;
+ void *smem_targ_info_addr;
+ smem_targ_info_addr = ioremap_nocache(info_addr, size);
+ if (!smem_targ_info_addr) {
+ LOG_ERR("%s: failed ioremap_nocache() of addr:%pa size:%pa\n",
+ __func__, &info_addr, &size);
+ return -ENODEV;
+ }
+ smem_targ_info =
+ (struct smem_targ_info_type __iomem *)smem_targ_info_addr;
+
+ if (smem_targ_info->identifier != SMEM_TARG_INFO_IDENTIFIER) {
+ LOG_ERR("%s failed: invalid TARGET INFO magic\n", __func__);
+ return -ENODEV;
+ }
+ smem_ram_phys = smem_targ_info->phys_base_addr;
+ smem_ram_size = smem_targ_info->size;
+ iounmap(smem_targ_info_addr);
+ return 0;
+}
+
+static int msm_smem_probe(struct platform_device *pdev)
+{
+ char *key;
+ struct resource *r;
+ phys_addr_t aux_mem_base;
+ resource_size_t aux_mem_size;
+ int temp_string_size = 11; /* max 3 digit count */
+ char temp_string[temp_string_size];
+ int ret;
+ struct ramdump_segment *ramdump_segments_tmp = NULL;
+ struct smem_area *smem_areas_tmp = NULL;
+ int smem_idx = 0;
+ bool security_enabled;
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "smem_targ_info_imem");
+ if (r) {
+ if (smem_init_target_info(r->start, resource_size(r)))
+ goto smem_targ_info_legacy;
+ goto smem_targ_info_done;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "smem_targ_info_reg");
+ if (r) {
+ void *reg_base_addr;
+ uint64_t base_addr;
+ reg_base_addr = ioremap_nocache(r->start, resource_size(r));
+ base_addr = (uint32_t)readl_relaxed(reg_base_addr);
+ base_addr |=
+ ((uint64_t)readl_relaxed(reg_base_addr + 0x4) << 32);
+ iounmap(reg_base_addr);
+ if ((base_addr == 0) || ((base_addr >> 32) != 0)) {
+ SMEM_INFO("%s: Invalid SMEM address\n", __func__);
+ goto smem_targ_info_legacy;
+ }
+ if (smem_init_target_info(base_addr,
+ sizeof(struct smem_targ_info_type)))
+ goto smem_targ_info_legacy;
+ goto smem_targ_info_done;
+ }
+
+smem_targ_info_legacy:
+ SMEM_INFO("%s: reading dt-specified SMEM address\n", __func__);
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smem");
+ if (r) {
+ smem_ram_size = resource_size(r);
+ smem_ram_phys = r->start;
+ }
+
+smem_targ_info_done:
+ if (!smem_ram_phys || !smem_ram_size) {
+ LOG_ERR("%s: Missing SMEM TARGET INFO\n", __func__);
+ return -ENODEV;
+ }
+
+ smem_ram_base = ioremap_nocache(smem_ram_phys, smem_ram_size);
+
+ if (!smem_ram_base) {
+ LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
+ __func__,
+ &smem_ram_phys, &smem_ram_size);
+ return -ENODEV;
+ }
+
+ if (!smem_initialized_check())
+ return -ENODEV;
+
+ /*
+ * The software implementation requires smem_find(), which needs
+ * smem_ram_base to be intitialized. The remote spinlock item is
+ * guarenteed to be allocated by the bootloader, so this is the
+ * safest and earliest place to init the spinlock.
+ */
+ ret = init_smem_remote_spinlock();
+ if (ret) {
+ LOG_ERR("%s: remote spinlock init failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ key = "irq-reg-base";
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+ if (!r) {
+ LOG_ERR("%s: missing '%s'\n", __func__, key);
+ return -ENODEV;
+ }
+
+ num_smem_areas = 1;
+ while (1) {
+ scnprintf(temp_string, temp_string_size, "aux-mem%d",
+ num_smem_areas);
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ temp_string);
+ if (!r)
+ break;
+
+ ++num_smem_areas;
+ if (num_smem_areas > 999) {
+ LOG_ERR("%s: max num aux mem regions reached\n",
+ __func__);
+ break;
+ }
+ }
+ /* Initialize main SMEM region and SSR ramdump region */
+ smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
+ GFP_KERNEL);
+ if (!smem_areas_tmp) {
+ LOG_ERR("%s: smem areas kmalloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto free_smem_areas;
+ }
+
+ ramdump_segments_tmp = kmalloc_array(num_smem_areas,
+ sizeof(struct ramdump_segment), GFP_KERNEL);
+ if (!ramdump_segments_tmp) {
+ LOG_ERR("%s: ramdump segment kmalloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto free_smem_areas;
+ }
+ smem_areas_tmp[smem_idx].phys_addr = smem_ram_phys;
+ smem_areas_tmp[smem_idx].size = smem_ram_size;
+ smem_areas_tmp[smem_idx].virt_addr = smem_ram_base;
+
+ ramdump_segments_tmp[smem_idx].address = smem_ram_phys;
+ ramdump_segments_tmp[smem_idx].size = smem_ram_size;
+ ++smem_idx;
+
+ /* Configure auxiliary SMEM regions */
+ while (1) {
+ scnprintf(temp_string, temp_string_size, "aux-mem%d",
+ smem_idx);
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ temp_string);
+ if (!r)
+ break;
+ aux_mem_base = r->start;
+ aux_mem_size = resource_size(r);
+
+ ramdump_segments_tmp[smem_idx].address = aux_mem_base;
+ ramdump_segments_tmp[smem_idx].size = aux_mem_size;
+
+ smem_areas_tmp[smem_idx].phys_addr = aux_mem_base;
+ smem_areas_tmp[smem_idx].size = aux_mem_size;
+ smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
+ (unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
+ smem_areas_tmp[smem_idx].size);
+ SMEM_DBG("%s: %s = %pa %pa -> %p", __func__, temp_string,
+ &aux_mem_base, &aux_mem_size,
+ smem_areas_tmp[smem_idx].virt_addr);
+
+ if (!smem_areas_tmp[smem_idx].virt_addr) {
+ LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
+ __func__,
+ &smem_areas_tmp[smem_idx].phys_addr,
+ &smem_areas_tmp[smem_idx].size);
+ ret = -ENOMEM;
+ goto free_smem_areas;
+ }
+
+ if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+ (uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
+ smem_areas_tmp[smem_idx].size)) {
+ LOG_ERR(
+ "%s: invalid virtual address block %i: %p:%pa\n",
+ __func__, smem_idx,
+ smem_areas_tmp[smem_idx].virt_addr,
+ &smem_areas_tmp[smem_idx].size);
+ ++smem_idx;
+ ret = -EINVAL;
+ goto free_smem_areas;
+ }
+
+ ++smem_idx;
+ if (smem_idx > 999) {
+ LOG_ERR("%s: max num aux mem regions reached\n",
+ __func__);
+ break;
+ }
+ }
+
+ smem_areas = smem_areas_tmp;
+ smem_ramdump_segments = ramdump_segments_tmp;
+
+ key = "qcom,mpu-enabled";
+ security_enabled = of_property_read_bool(pdev->dev.of_node, key);
+ if (security_enabled) {
+ SMEM_INFO("smem security enabled\n");
+ smem_init_security();
+ }
+
+ probe_done = true;
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret)
+ LOG_ERR("%s: of_platform_populate failed %d\n", __func__, ret);
+
+ return 0;
+
+free_smem_areas:
+ for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
+ iounmap(smem_areas_tmp[smem_idx].virt_addr);
+
+ num_smem_areas = 0;
+ kfree(ramdump_segments_tmp);
+ kfree(smem_areas_tmp);
+ return ret;
+}
+
+static struct of_device_id msm_smem_match_table[] = {
+ { .compatible = "qcom,smem" },
+ {},
+};
+
+static struct platform_driver msm_smem_driver = {
+ .probe = msm_smem_probe,
+ .driver = {
+ .name = "msm_smem",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_smem_match_table,
+ },
+};
+
+int __init msm_smem_init(void)
+{
+ static bool registered;
+ int rc;
+
+ if (registered)
+ return 0;
+
+ registered = true;
+
+ smem_ipc_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smem", 0);
+ if (!smem_ipc_log_ctx) {
+ pr_err("%s: unable to create logging context\n", __func__);
+ msm_smem_debug_mask = 0;
+ }
+
+ rc = platform_driver_register(&msm_smem_driver);
+ if (rc) {
+ LOG_ERR("%s: msm_smem_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ smem_module_init_notify(0, NULL);
+
+ return 0;
+}
+
+arch_initcall(msm_smem_init);
diff --git a/drivers/soc/qcom/smem_debug.c b/drivers/soc/qcom/smem_debug.c
new file mode 100644
index 000000000000..ace89afb614c
--- /dev/null
+++ b/drivers/soc/qcom/smem_debug.c
@@ -0,0 +1,139 @@
+/* arch/arm/mach-msm/smem_debug.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+
+#include <soc/qcom/smem.h>
+
+#include "smem_private.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define SZ_SMEM_ALLOCATION_TABLE 8192
+
+static void debug_read_mem(struct seq_file *s)
+{
+ unsigned n;
+ struct smem_heap_info *heap_info;
+ struct smem_heap_entry *toc;
+
+ heap_info = smem_find(SMEM_HEAP_INFO, sizeof(struct smem_heap_info),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+ if (!heap_info) {
+ seq_puts(s, "SMEM_HEAP_INFO is NULL\n");
+ return;
+ }
+ toc = smem_find(SMEM_ALLOCATION_TABLE, SZ_SMEM_ALLOCATION_TABLE,
+ 0, SMEM_ANY_HOST_FLAG);
+ if (!toc) {
+ seq_puts(s, "SMEM_ALLOCATION_TABLE is NULL\n");
+ return;
+ }
+
+ seq_printf(s, "heap: init=%d free=%d remain=%d\n",
+ heap_info->initialized,
+ heap_info->free_offset,
+ heap_info->heap_remaining);
+
+ for (n = 0; n < SMEM_NUM_ITEMS; n++) {
+ if (toc[n].allocated == 0)
+ continue;
+ seq_printf(s, "%04d: offset %08x size %08x\n",
+ n, toc[n].offset, toc[n].size);
+ }
+}
+
+static void debug_read_smem_version(struct seq_file *s)
+{
+ uint32_t n, version;
+
+ for (n = 0; n < 32; n++) {
+ version = smem_get_version(n);
+ seq_printf(s, "entry %d: smem = %d proc_comm = %d\n", n,
+ version >> 16,
+ version & 0xffff);
+ }
+}
+
+static void debug_read_build_id(struct seq_file *s)
+{
+ unsigned size;
+ void *data;
+
+ data = smem_get_entry(SMEM_HW_SW_BUILD_ID, &size, 0,
+ SMEM_ANY_HOST_FLAG);
+ if (!data)
+ return;
+
+ seq_write(s, data, size);
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+ void (*show)(struct seq_file *) = s->private;
+
+ show(s);
+
+ return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+ .open = debug_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void debug_create(const char *name, umode_t mode,
+ struct dentry *dent,
+ void (*show)(struct seq_file *))
+{
+ struct dentry *file;
+
+ file = debugfs_create_file(name, mode, dent, show, &debug_ops);
+ if (!file)
+ pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+static int __init smem_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smem", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ debug_create("mem", 0444, dent, debug_read_mem);
+ debug_create("version", 0444, dent, debug_read_smem_version);
+
+ /* NNV: this is google only stuff */
+ debug_create("build", 0444, dent, debug_read_build_id);
+
+ return 0;
+}
+
+late_initcall(smem_debugfs_init);
+#endif
diff --git a/drivers/soc/qcom/smem_private.h b/drivers/soc/qcom/smem_private.h
new file mode 100644
index 000000000000..7aeca5eed8d2
--- /dev/null
+++ b/drivers/soc/qcom/smem_private.h
@@ -0,0 +1,104 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SMEM_PRIVATE_H_
+#define _ARCH_ARM_MACH_MSM_SMEM_PRIVATE_H_
+
+#include <linux/remote_spinlock.h>
+#include <soc/qcom/ramdump.h>
+
+
+#define SMD_HEAP_SIZE 512
+
+struct smem_heap_info {
+ unsigned initialized;
+ unsigned free_offset;
+ unsigned heap_remaining;
+ unsigned reserved;
+};
+
+struct smem_heap_entry {
+ unsigned allocated;
+ unsigned offset;
+ unsigned size;
+ unsigned reserved; /* bits 1:0 reserved, bits 31:2 aux smem base addr */
+};
+#define BASE_ADDR_MASK 0xfffffffc
+
+struct smem_proc_comm {
+ unsigned command;
+ unsigned status;
+ unsigned data1;
+ unsigned data2;
+};
+
+struct smem_shared {
+ struct smem_proc_comm proc_comm[4];
+ unsigned version[32];
+ struct smem_heap_info heap_info;
+ struct smem_heap_entry heap_toc[SMD_HEAP_SIZE];
+};
+
+struct smem_area {
+ phys_addr_t phys_addr;
+ resource_size_t size;
+ void __iomem *virt_addr;
+};
+
+/* used for unit testing spinlocks */
+remote_spinlock_t *smem_get_remote_spinlock(void);
+
+bool smem_initialized_check(void);
+
+/**
+ * smem_module_init_notifier_register() - Register a smem module
+ * init notifier block
+ * @nb: Notifier block to be registered
+ *
+ * In order to mark the dependency on SMEM Driver module initialization
+ * register a notifier using this API. Once the smem module_init is
+ * done, notification will be passed to the registered module.
+ */
+int smem_module_init_notifier_register(struct notifier_block *nb);
+
+/**
+ * smem_module_init_notifier_register() - Unregister a smem module
+ * init notifier block
+ * @nb: Notifier block to be unregistered
+ */
+int smem_module_init_notifier_unregister(struct notifier_block *nb);
+
+/**
+ * smem_get_free_space() - Get the available allocation free space for a
+ * partition
+ *
+ * @to_proc: remote SMEM host. Determines the applicable partition
+ * @returns: size in bytes available to allocate
+ *
+ * Helper function for SMD so that SMD only scans the channel allocation
+ * table for a partition when it is reasonably certain that a channel has
+ * actually been created, because scanning can be expensive. Creating a channel
+ * will consume some of the free space in a partition, so SMD can compare the
+ * last free space size against the current free space size to determine if
+ * a channel may have been created. SMD can't do this directly, because the
+ * necessary partition internals are restricted to just SMEM.
+ */
+unsigned smem_get_free_space(unsigned to_proc);
+
+/**
+ * smem_get_version() - Get the smem user version number
+ *
+ * @idx: SMEM user idx in SMEM_VERSION_INFO table.
+ * @returns: smem version number if success otherwise zero.
+ */
+unsigned smem_get_version(unsigned idx);
+#endif /* _ARCH_ARM_MACH_MSM_SMEM_PRIVATE_H_ */
diff --git a/drivers/soc/qcom/smsm_debug.c b/drivers/soc/qcom/smsm_debug.c
new file mode 100644
index 000000000000..b9e42e46c888
--- /dev/null
+++ b/drivers/soc/qcom/smsm_debug.c
@@ -0,0 +1,330 @@
+/* drivers/soc/qcom/smsm_debug.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+
+#include <soc/qcom/smem.h>
+#include <soc/qcom/smsm.h>
+
+#if defined(CONFIG_DEBUG_FS)
+
+
+static void debug_read_smsm_state(struct seq_file *s)
+{
+ uint32_t *smsm;
+ int n;
+
+ smsm = smem_find(SMEM_SMSM_SHARED_STATE,
+ SMSM_NUM_ENTRIES * sizeof(uint32_t),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (smsm)
+ for (n = 0; n < SMSM_NUM_ENTRIES; n++)
+ seq_printf(s, "entry %d: 0x%08x\n", n, smsm[n]);
+}
+
+struct SMSM_CB_DATA {
+ int cb_count;
+ void *data;
+ uint32_t old_state;
+ uint32_t new_state;
+};
+static struct SMSM_CB_DATA smsm_cb_data;
+static struct completion smsm_cb_completion;
+
+static void smsm_state_cb(void *data, uint32_t old_state, uint32_t new_state)
+{
+ smsm_cb_data.cb_count++;
+ smsm_cb_data.old_state = old_state;
+ smsm_cb_data.new_state = new_state;
+ smsm_cb_data.data = data;
+ complete_all(&smsm_cb_completion);
+}
+
+#define UT_EQ_INT(a, b) \
+ { \
+ if ((a) != (b)) { \
+ seq_printf(s, "%s:%d " #a "(%d) != " #b "(%d)\n", \
+ __func__, __LINE__, \
+ a, b); \
+ break; \
+ } \
+ }
+
+#define UT_GT_INT(a, b) \
+ { \
+ if ((a) <= (b)) { \
+ seq_printf(s, "%s:%d " #a "(%d) > " #b "(%d)\n", \
+ __func__, __LINE__, \
+ a, b); \
+ break; \
+ } \
+ }
+
+#define SMSM_CB_TEST_INIT() \
+ do { \
+ smsm_cb_data.cb_count = 0; \
+ smsm_cb_data.old_state = 0; \
+ smsm_cb_data.new_state = 0; \
+ smsm_cb_data.data = 0; \
+ } while (0)
+
+
+static void debug_test_smsm(struct seq_file *s)
+{
+ int test_num = 0;
+ int ret;
+
+ /* Test case 1 - Register new callback for notification */
+ do {
+ test_num++;
+ SMSM_CB_TEST_INIT();
+ ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 0);
+
+ /* de-assert SMSM_SMD_INIT to trigger state update */
+ UT_EQ_INT(smsm_cb_data.cb_count, 0);
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+
+ UT_EQ_INT(smsm_cb_data.cb_count, 1);
+ UT_EQ_INT(smsm_cb_data.old_state & SMSM_SMDINIT, SMSM_SMDINIT);
+ UT_EQ_INT(smsm_cb_data.new_state & SMSM_SMDINIT, 0x0);
+ UT_EQ_INT((int)(uintptr_t)smsm_cb_data.data, 0x1234);
+
+ /* re-assert SMSM_SMD_INIT to trigger state update */
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 2);
+ UT_EQ_INT(smsm_cb_data.old_state & SMSM_SMDINIT, 0x0);
+ UT_EQ_INT(smsm_cb_data.new_state & SMSM_SMDINIT, SMSM_SMDINIT);
+
+ /* deregister callback */
+ ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 2);
+
+ /* make sure state change doesn't cause any more callbacks */
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+ UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 2);
+
+ seq_printf(s, "Test %d - PASS\n", test_num);
+ } while (0);
+
+ /* Test case 2 - Update already registered callback */
+ do {
+ test_num++;
+ SMSM_CB_TEST_INIT();
+ ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 0);
+ ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_INIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 1);
+
+ /* verify both callback bits work */
+ reinit_completion(&smsm_cb_completion);
+ UT_EQ_INT(smsm_cb_data.cb_count, 0);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 1);
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 2);
+
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 3);
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 4);
+
+ /* deregister 1st callback */
+ ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_SMDINIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 1);
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+ UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 4);
+
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 5);
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 6);
+
+ /* deregister 2nd callback */
+ ret = smsm_state_cb_deregister(SMSM_APPS_STATE, SMSM_INIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 2);
+
+ /* make sure state change doesn't cause any more callbacks */
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT);
+ UT_EQ_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 6);
+
+ seq_printf(s, "Test %d - PASS\n", test_num);
+ } while (0);
+
+ /* Test case 3 - Two callback registrations with different data */
+ do {
+ test_num++;
+ SMSM_CB_TEST_INIT();
+ ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_SMDINIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 0);
+ ret = smsm_state_cb_register(SMSM_APPS_STATE, SMSM_INIT,
+ smsm_state_cb, (void *)0x3456);
+ UT_EQ_INT(ret, 0);
+
+ /* verify both callbacks work */
+ reinit_completion(&smsm_cb_completion);
+ UT_EQ_INT(smsm_cb_data.cb_count, 0);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_SMDINIT, 0x0);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 1);
+ UT_EQ_INT((int)(uintptr_t)smsm_cb_data.data, 0x1234);
+
+ reinit_completion(&smsm_cb_completion);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_INIT, 0x0);
+ UT_GT_INT((int)wait_for_completion_timeout(&smsm_cb_completion,
+ msecs_to_jiffies(20)), 0);
+ UT_EQ_INT(smsm_cb_data.cb_count, 2);
+ UT_EQ_INT((int)(uintptr_t)smsm_cb_data.data, 0x3456);
+
+ /* cleanup and unregister
+ * degregister in reverse to verify data field is
+ * being used
+ */
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_SMDINIT);
+ smsm_change_state(SMSM_APPS_STATE, 0x0, SMSM_INIT);
+ ret = smsm_state_cb_deregister(SMSM_APPS_STATE,
+ SMSM_INIT,
+ smsm_state_cb, (void *)0x3456);
+ UT_EQ_INT(ret, 2);
+ ret = smsm_state_cb_deregister(SMSM_APPS_STATE,
+ SMSM_SMDINIT,
+ smsm_state_cb, (void *)0x1234);
+ UT_EQ_INT(ret, 2);
+
+ seq_printf(s, "Test %d - PASS\n", test_num);
+ } while (0);
+}
+
+static void debug_read_intr_mask(struct seq_file *s)
+{
+ uint32_t *smsm;
+ int m, n;
+
+ smsm = smem_find(SMEM_SMSM_CPU_INTR_MASK,
+ SMSM_NUM_ENTRIES * SMSM_NUM_HOSTS * sizeof(uint32_t),
+ 0,
+ SMEM_ANY_HOST_FLAG);
+
+ if (smsm)
+ for (m = 0; m < SMSM_NUM_ENTRIES; m++) {
+ seq_printf(s, "entry %d:", m);
+ for (n = 0; n < SMSM_NUM_HOSTS; n++)
+ seq_printf(s, " host %d: 0x%08x",
+ n, smsm[m * SMSM_NUM_HOSTS + n]);
+ seq_puts(s, "\n");
+ }
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+ void (*show)(struct seq_file *) = s->private;
+
+ show(s);
+
+ return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+ .open = debug_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void debug_create(const char *name, umode_t mode,
+ struct dentry *dent,
+ void (*show)(struct seq_file *))
+{
+ struct dentry *file;
+
+ file = debugfs_create_file(name, mode, dent, show, &debug_ops);
+ if (!file)
+ pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+static int __init smsm_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smsm", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ debug_create("state", 0444, dent, debug_read_smsm_state);
+ debug_create("intr_mask", 0444, dent, debug_read_intr_mask);
+ debug_create("smsm_test", 0444, dent, debug_test_smsm);
+
+ init_completion(&smsm_cb_completion);
+
+ return 0;
+}
+
+late_initcall(smsm_debugfs_init);
+#endif
diff --git a/include/linux/ipc_logging.h b/include/linux/ipc_logging.h
new file mode 100644
index 000000000000..9d10e0ac0234
--- /dev/null
+++ b/include/linux/ipc_logging.h
@@ -0,0 +1,267 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_LOGGING_H
+#define _IPC_LOGGING_H
+
+#include <linux/types.h>
+
+#define MAX_MSG_SIZE 255
+
+enum {
+ TSV_TYPE_MSG_START = 1,
+ TSV_TYPE_SKB = TSV_TYPE_MSG_START,
+ TSV_TYPE_STRING,
+ TSV_TYPE_MSG_END = TSV_TYPE_STRING,
+};
+
+struct tsv_header {
+ unsigned char type;
+ unsigned char size; /* size of data field */
+};
+
+struct encode_context {
+ struct tsv_header hdr;
+ char buff[MAX_MSG_SIZE];
+ int offset;
+};
+
+struct decode_context {
+ int output_format; /* 0 = debugfs */
+ char *buff; /* output buffer */
+ int size; /* size of output buffer */
+};
+
+#if defined(CONFIG_IPC_LOGGING)
+/*
+ * ipc_log_context_create: Create a debug log context
+ * Should not be called from atomic context
+ *
+ * @max_num_pages: Number of pages of logging space required (max. 10)
+ * @mod_name : Name of the directory entry under DEBUGFS
+ * @user_version : Version number of user-defined message formats
+ *
+ * returns context id on success, NULL on failure
+ */
+void *ipc_log_context_create(int max_num_pages, const char *modname,
+ uint16_t user_version);
+
+/*
+ * msg_encode_start: Start encoding a log message
+ *
+ * @ectxt: Temporary storage to hold the encoded message
+ * @type: Root event type defined by the module which is logging
+ */
+void msg_encode_start(struct encode_context *ectxt, uint32_t type);
+
+/*
+ * tsv_timestamp_write: Writes the current timestamp count
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ */
+int tsv_timestamp_write(struct encode_context *ectxt);
+
+/*
+ * tsv_pointer_write: Writes a data pointer
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @pointer: Pointer value to write
+ */
+int tsv_pointer_write(struct encode_context *ectxt, void *pointer);
+
+/*
+ * tsv_int32_write: Writes a 32-bit integer value
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @n: Integer to write
+ */
+int tsv_int32_write(struct encode_context *ectxt, int32_t n);
+
+/*
+ * tsv_int32_write: Writes a 32-bit integer value
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @n: Integer to write
+ */
+int tsv_byte_array_write(struct encode_context *ectxt,
+ void *data, int data_size);
+
+/*
+ * msg_encode_end: Complete the message encode process
+ *
+ * @ectxt: Temporary storage which holds the encoded message
+ */
+void msg_encode_end(struct encode_context *ectxt);
+
+/*
+ * msg_encode_end: Complete the message encode process
+ *
+ * @ectxt: Temporary storage which holds the encoded message
+ */
+void ipc_log_write(void *ctxt, struct encode_context *ectxt);
+
+/*
+ * ipc_log_string: Helper function to log a string
+ *
+ * @ilctxt: Debug Log Context created using ipc_log_context_create()
+ * @fmt: Data specified using format specifiers
+ */
+int ipc_log_string(void *ilctxt, const char *fmt, ...) __printf(2, 3);
+
+/**
+ * ipc_log_extract - Reads and deserializes log
+ *
+ * @ilctxt: logging context
+ * @buff: buffer to receive the data
+ * @size: size of the buffer
+ * @returns: 0 if no data read; >0 number of bytes read; < 0 error
+ *
+ * If no data is available to be read, then the ilctxt::read_avail
+ * completion is reinitialized. This allows clients to block
+ * until new log data is save.
+ */
+int ipc_log_extract(void *ilctxt, char *buff, int size);
+
+/*
+ * Print a string to decode context.
+ * @dctxt Decode context
+ * @args printf args
+ */
+#define IPC_SPRINTF_DECODE(dctxt, args...) \
+do { \
+ int i; \
+ i = scnprintf(dctxt->buff, dctxt->size, args); \
+ dctxt->buff += i; \
+ dctxt->size -= i; \
+} while (0)
+
+/*
+ * tsv_timestamp_read: Reads a timestamp
+ *
+ * @ectxt: Context retrieved by reading from log space
+ * @dctxt: Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_timestamp_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_pointer_read: Reads a data pointer
+ *
+ * @ectxt: Context retrieved by reading from log space
+ * @dctxt: Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_pointer_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_int32_read: Reads a 32-bit integer value
+ *
+ * @ectxt: Context retrieved by reading from log space
+ * @dctxt: Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+int32_t tsv_int32_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_int32_read: Reads a 32-bit integer value
+ *
+ * @ectxt: Context retrieved by reading from log space
+ * @dctxt: Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_byte_array_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format);
+
+/*
+ * add_deserialization_func: Register a deserialization function to
+ * to unpack the subevents of a main event
+ *
+ * @ctxt: Debug log context to which the deserialization function has
+ * to be registered
+ * @type: Main/Root event, defined by the module which is logging, to
+ * which this deserialization function has to be registered.
+ * @dfune: Deserialization function to be registered
+ *
+ * return 0 on success, -ve value on FAILURE
+ */
+int add_deserialization_func(void *ctxt, int type,
+ void (*dfunc)(struct encode_context *,
+ struct decode_context *));
+
+/*
+ * ipc_log_context_destroy: Destroy debug log context
+ *
+ * @ctxt: debug log context created by calling ipc_log_context_create API.
+ */
+int ipc_log_context_destroy(void *ctxt);
+
+#else
+
+static inline void *ipc_log_context_create(int max_num_pages,
+ const char *modname, uint16_t user_version)
+{ return NULL; }
+
+static inline void msg_encode_start(struct encode_context *ectxt,
+ uint32_t type) { }
+
+static inline int tsv_timestamp_write(struct encode_context *ectxt)
+{ return -EINVAL; }
+
+static inline int tsv_pointer_write(struct encode_context *ectxt, void *pointer)
+{ return -EINVAL; }
+
+static inline int tsv_int32_write(struct encode_context *ectxt, int32_t n)
+{ return -EINVAL; }
+
+static inline int tsv_byte_array_write(struct encode_context *ectxt,
+ void *data, int data_size)
+{ return -EINVAL; }
+
+static inline void msg_encode_end(struct encode_context *ectxt) { }
+
+static inline void ipc_log_write(void *ctxt, struct encode_context *ectxt) { }
+
+static inline int ipc_log_string(void *ilctxt, const char *fmt, ...)
+{ return -EINVAL; }
+
+static inline int ipc_log_extract(void *ilctxt, char *buff, int size)
+{ return -EINVAL; }
+
+#define IPC_SPRINTF_DECODE(dctxt, args...) do { } while (0)
+
+static inline void tsv_timestamp_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format) { }
+
+static inline void tsv_pointer_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format) { }
+
+static inline int32_t tsv_int32_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format)
+{ return 0; }
+
+static inline void tsv_byte_array_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format) { }
+
+static inline int add_deserialization_func(void *ctxt, int type,
+ void (*dfunc)(struct encode_context *,
+ struct decode_context *))
+{ return 0; }
+
+static inline int ipc_log_context_destroy(void *ctxt)
+{ return 0; }
+
+#endif
+
+#endif
diff --git a/include/linux/msm_remote_spinlock.h b/include/linux/msm_remote_spinlock.h
new file mode 100644
index 000000000000..f777cef4e1d7
--- /dev/null
+++ b/include/linux/msm_remote_spinlock.h
@@ -0,0 +1,74 @@
+/* Copyright (c) 2009, 2011, 2013-2014 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Part of this this code is based on the standard ARM spinlock
+ * implementation (asm/spinlock.h) found in the 2.6.29 kernel.
+ */
+
+#ifndef __ASM__ARCH_QC_REMOTE_SPINLOCK_H
+#define __ASM__ARCH_QC_REMOTE_SPINLOCK_H
+
+#include <linux/io.h>
+#include <linux/types.h>
+
+#define REMOTE_SPINLOCK_NUM_PID 128
+#define REMOTE_SPINLOCK_TID_START REMOTE_SPINLOCK_NUM_PID
+
+/* Remote spinlock definitions. */
+
+typedef struct {
+ volatile uint32_t lock;
+} raw_remote_spinlock_t;
+
+typedef raw_remote_spinlock_t *_remote_spinlock_t;
+
+#define remote_spinlock_id_t const char *
+
+#if defined(CONFIG_REMOTE_SPINLOCK_MSM)
+int _remote_spin_lock_init(remote_spinlock_id_t, _remote_spinlock_t *lock);
+void _remote_spin_release_all(uint32_t pid);
+void _remote_spin_lock(_remote_spinlock_t *lock);
+void _remote_spin_unlock(_remote_spinlock_t *lock);
+int _remote_spin_trylock(_remote_spinlock_t *lock);
+int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid);
+int _remote_spin_owner(_remote_spinlock_t *lock);
+void _remote_spin_lock_rlock_id(_remote_spinlock_t *lock, uint32_t tid);
+void _remote_spin_unlock_rlock(_remote_spinlock_t *lock);
+#else
+static inline
+int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
+{
+ return -EINVAL;
+}
+static inline void _remote_spin_release_all(uint32_t pid) {}
+static inline void _remote_spin_lock(_remote_spinlock_t *lock) {}
+static inline void _remote_spin_unlock(_remote_spinlock_t *lock) {}
+static inline int _remote_spin_trylock(_remote_spinlock_t *lock)
+{
+ return -ENODEV;
+}
+static inline int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid)
+{
+ return -ENODEV;
+}
+static inline int _remote_spin_owner(_remote_spinlock_t *lock)
+{
+ return -ENODEV;
+}
+static inline void _remote_spin_lock_rlock_id(_remote_spinlock_t *lock,
+ uint32_t tid) {}
+static inline void _remote_spin_unlock_rlock(_remote_spinlock_t *lock) {}
+#endif
+#endif /* __ASM__ARCH_QC_REMOTE_SPINLOCK_H */
diff --git a/include/linux/regulator/rpm-smd-regulator.h b/include/linux/regulator/rpm-smd-regulator.h
new file mode 100644
index 000000000000..139030cd1eae
--- /dev/null
+++ b/include/linux/regulator/rpm-smd-regulator.h
@@ -0,0 +1,108 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_REGULATOR_RPM_SMD_H
+#define _LINUX_REGULATOR_RPM_SMD_H
+
+#include <linux/device.h>
+
+struct rpm_regulator;
+
+/**
+ * enum rpm_regulator_voltage_corner - possible voltage corner values
+ *
+ * These should be used in regulator_set_voltage() and
+ * rpm_regulator_set_voltage() calls for corner type regulators as if they had
+ * units of uV.
+ *
+ * Note, the meaning of corner values is set by the RPM. It is possible that
+ * future platforms will utilize different corner values. The values specified
+ * in this enum correspond to MSM8974 for PMIC PM8841 SMPS 2 (VDD_Dig).
+ */
+enum rpm_regulator_voltage_corner {
+ RPM_REGULATOR_CORNER_NONE = 1,
+ RPM_REGULATOR_CORNER_RETENTION,
+ RPM_REGULATOR_CORNER_SVS_KRAIT,
+ RPM_REGULATOR_CORNER_SVS_SOC,
+ RPM_REGULATOR_CORNER_NORMAL,
+ RPM_REGULATOR_CORNER_TURBO,
+ RPM_REGULATOR_CORNER_SUPER_TURBO,
+};
+
+/**
+ * enum rpm_regulator_mode - control mode for LDO or SMPS type regulators
+ * %RPM_REGULATOR_MODE_AUTO: For SMPS type regulators, use SMPS auto mode so
+ * that the hardware can automatically switch
+ * between PFM and PWM modes based on realtime
+ * load.
+ * LDO type regulators do not support this mode.
+ * %RPM_REGULATOR_MODE_IPEAK: For SMPS type regulators, use aggregated
+ * software current requests to determine
+ * usage of PFM or PWM mode.
+ * For LDO type regulators, use aggregated
+ * software current requests to determine
+ * usage of LPM or HPM mode.
+ * %RPM_REGULATOR_MODE_HPM: For SMPS type regulators, force the
+ * usage of PWM mode.
+ * For LDO type regulators, force the
+ * usage of HPM mode.
+ *
+ * These values should be used in calls to rpm_regulator_set_mode().
+ */
+enum rpm_regulator_mode {
+ RPM_REGULATOR_MODE_AUTO,
+ RPM_REGULATOR_MODE_IPEAK,
+ RPM_REGULATOR_MODE_HPM,
+};
+
+#ifdef CONFIG_REGULATOR_RPM_SMD
+
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply);
+
+void rpm_regulator_put(struct rpm_regulator *regulator);
+
+int rpm_regulator_enable(struct rpm_regulator *regulator);
+
+int rpm_regulator_disable(struct rpm_regulator *regulator);
+
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+ int max_uV);
+
+int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+ enum rpm_regulator_mode mode);
+
+int __init rpm_smd_regulator_driver_init(void);
+
+#else
+
+static inline struct rpm_regulator *rpm_regulator_get(struct device *dev,
+ const char *supply) { return NULL; }
+
+static inline void rpm_regulator_put(struct rpm_regulator *regulator) { }
+
+static inline int rpm_regulator_enable(struct rpm_regulator *regulator)
+ { return 0; }
+
+static inline int rpm_regulator_disable(struct rpm_regulator *regulator)
+ { return 0; }
+
+static inline int rpm_regulator_set_voltage(struct rpm_regulator *regulator,
+ int min_uV, int max_uV) { return 0; }
+
+static inline int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+ enum rpm_regulator_mode mode) { return 0; }
+
+static inline int __init rpm_smd_regulator_driver_init(void) { return 0; }
+
+#endif /* CONFIG_REGULATOR_RPM_SMD */
+
+#endif
diff --git a/include/linux/remote_spinlock.h b/include/linux/remote_spinlock.h
new file mode 100644
index 000000000000..b1df5792ac78
--- /dev/null
+++ b/include/linux/remote_spinlock.h
@@ -0,0 +1,98 @@
+/* Copyright (c) 2008-2009, 2011, 2013-2014 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __LINUX_REMOTE_SPINLOCK_H
+#define __LINUX_REMOTE_SPINLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/msm_remote_spinlock.h>
+
+/* Grabbing a local spin lock before going for a remote lock has several
+ * advantages:
+ * 1. Get calls to preempt enable/disable and IRQ save/restore for free.
+ * 2. For UP kernel, there is no overhead.
+ * 3. Reduces the possibility of executing the remote spin lock code. This is
+ * especially useful when the remote CPUs' mutual exclusion instructions
+ * don't work with the local CPUs' instructions. In such cases, one has to
+ * use software based mutex algorithms (e.g. Lamport's bakery algorithm)
+ * which could get expensive when the no. of contending CPUs is high.
+ * 4. In the case of software based mutex algorithm the exection time will be
+ * smaller since the no. of contending CPUs is reduced by having just one
+ * contender for all the local CPUs.
+ * 5. Get most of the spin lock debug features for free.
+ * 6. The code will continue to work "gracefully" even when the remote spin
+ * lock code is stubbed out for debug purposes or when there is no remote
+ * CPU in some board/machine types.
+ */
+typedef struct {
+ spinlock_t local;
+ _remote_spinlock_t remote;
+} remote_spinlock_t;
+
+#define remote_spin_lock_init(lock, id) \
+ ({ \
+ spin_lock_init(&((lock)->local)); \
+ _remote_spin_lock_init(id, &((lock)->remote)); \
+ })
+#define remote_spin_lock(lock) \
+ do { \
+ spin_lock(&((lock)->local)); \
+ _remote_spin_lock(&((lock)->remote)); \
+ } while (0)
+#define remote_spin_unlock(lock) \
+ do { \
+ _remote_spin_unlock(&((lock)->remote)); \
+ spin_unlock(&((lock)->local)); \
+ } while (0)
+#define remote_spin_lock_irqsave(lock, flags) \
+ do { \
+ spin_lock_irqsave(&((lock)->local), flags); \
+ _remote_spin_lock(&((lock)->remote)); \
+ } while (0)
+#define remote_spin_unlock_irqrestore(lock, flags) \
+ do { \
+ _remote_spin_unlock(&((lock)->remote)); \
+ spin_unlock_irqrestore(&((lock)->local), flags); \
+ } while (0)
+#define remote_spin_trylock(lock) \
+ ({ \
+ spin_trylock(&((lock)->local)) \
+ ? _remote_spin_trylock(&((lock)->remote)) \
+ ? 1 \
+ : ({ spin_unlock(&((lock)->local)); 0; }) \
+ : 0; \
+ })
+#define remote_spin_trylock_irqsave(lock, flags) \
+ ({ \
+ spin_trylock_irqsave(&((lock)->local), flags) \
+ ? _remote_spin_trylock(&((lock)->remote)) \
+ ? 1 \
+ : ({ spin_unlock_irqrestore(&((lock)->local), flags); \
+ 0; }) \
+ : 0; \
+ })
+#define remote_spin_lock_rlock_id(lock, tid) \
+ _remote_spin_lock_rlock_id(&((lock)->remote), tid)
+
+#define remote_spin_unlock_rlock(lock) \
+ _remote_spin_unlock_rlock(&((lock)->remote))
+
+#define remote_spin_release(lock, pid) \
+ _remote_spin_release(&((lock)->remote), pid)
+
+#define remote_spin_release_all(pid) \
+ _remote_spin_release_all(pid)
+
+#define remote_spin_owner(lock) \
+ _remote_spin_owner(&((lock)->remote))
+#endif
diff --git a/include/soc/qcom/ramdump.h b/include/soc/qcom/ramdump.h
new file mode 100644
index 000000000000..7cd59dd89042
--- /dev/null
+++ b/include/soc/qcom/ramdump.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RAMDUMP_HEADER
+#define _RAMDUMP_HEADER
+
+struct device;
+
+struct ramdump_segment {
+ unsigned long address;
+ unsigned long size;
+};
+
+#ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+extern void *create_ramdump_device(const char *dev_name, struct device *parent);
+extern void destroy_ramdump_device(void *dev);
+extern int do_ramdump(void *handle, struct ramdump_segment *segments,
+ int nsegments);
+extern int do_elf_ramdump(void *handle, struct ramdump_segment *segments,
+ int nsegments);
+
+#else
+static inline void *create_ramdump_device(const char *dev_name,
+ struct device *parent)
+{
+ return NULL;
+}
+
+static inline void destroy_ramdump_device(void *dev)
+{
+}
+
+static inline int do_ramdump(void *handle, struct ramdump_segment *segments,
+ int nsegments)
+{
+ return -ENODEV;
+}
+
+static inline int do_elf_ramdump(void *handle, struct ramdump_segment *segments,
+ int nsegments)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
+
+#endif
diff --git a/include/soc/qcom/rpm-notifier.h b/include/soc/qcom/rpm-notifier.h
new file mode 100644
index 000000000000..ea6d95e313a8
--- /dev/null
+++ b/include/soc/qcom/rpm-notifier.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+#define __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+
+struct msm_rpm_notifier_data {
+ uint32_t rsc_type;
+ uint32_t rsc_id;
+ uint32_t key;
+ uint32_t size;
+ uint8_t *value;
+};
+/**
+ * msm_rpm_register_notifier - Register for sleep set notifications
+ *
+ * @nb - notifier block to register
+ *
+ * return 0 on success, errno on failure.
+ */
+int msm_rpm_register_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_unregister_notifier - Unregister previously registered notifications
+ *
+ * @nb - notifier block to unregister
+ *
+ * return 0 on success, errno on failure.
+ */
+int msm_rpm_unregister_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_enter_sleep - Notify RPM driver to prepare for entering sleep
+ *
+ * @bool - flag to enable print contents of sleep buffer.
+ * @cpumask - cpumask of next wakeup cpu
+ *
+ * return 0 on success errno on failure.
+ */
+int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask);
+
+/**
+ * msm_rpm_exit_sleep - Notify RPM driver about resuming from power collapse
+ */
+void msm_rpm_exit_sleep(void);
+
+/**
+ * msm_rpm_waiting_for_ack - Indicate if there is RPM message
+ * pending acknowledgement.
+ * returns true for pending messages and false otherwise
+ */
+bool msm_rpm_waiting_for_ack(void);
+
+#endif /*__ARCH_ARM_MACH_MSM_RPM_NOTIF_H */
diff --git a/include/soc/qcom/rpm-smd.h b/include/soc/qcom/rpm-smd.h
new file mode 100644
index 000000000000..79f9dff75f83
--- /dev/null
+++ b/include/soc/qcom/rpm-smd.h
@@ -0,0 +1,268 @@
+/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_SMD_H
+#define __ARCH_ARM_MACH_MSM_RPM_SMD_H
+
+/**
+ * enum msm_rpm_set - RPM enumerations for sleep/active set
+ * %MSM_RPM_CTX_SET_0: Set resource parameters for active mode.
+ * %MSM_RPM_CTX_SET_SLEEP: Set resource parameters for sleep.
+ */
+enum msm_rpm_set {
+ MSM_RPM_CTX_ACTIVE_SET,
+ MSM_RPM_CTX_SLEEP_SET,
+};
+
+struct msm_rpm_request;
+
+struct msm_rpm_kvp {
+ uint32_t key;
+ uint32_t length;
+ uint8_t *data;
+};
+#ifdef CONFIG_MSM_RPM_SMD
+/**
+ * msm_rpm_request() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_request_noirq() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource. This function is similar to msm_rpm_create_request
+ * except that it has to be called with interrupts masked.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_add_kvp_data() - Adds a Key value pair to a existing RPM resource.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key: unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size: size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size);
+
+/**
+ * msm_rpm_add_kvp_data_noirq() - Adds a Key value pair to a existing RPM
+ * resource. This function is similar to msm_rpm_add_kvp_data except that it
+ * has to be called with interrupts masked.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key: unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size: size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size);
+
+/** msm_rpm_free_request() - clean up the RPM request handle created with
+ * msm_rpm_create_request
+ *
+ * @handle: RPM resource handle to be cleared.
+ */
+
+void msm_rpm_free_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request() - Send the RPM messages using SMD. The function
+ * assigns a message id before sending the data out to the RPM. RPM hardware
+ * uses the message id to acknowledge the messages.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request_noirq() - Send the RPM messages using SMD. The
+ * function assigns a message id before sending the data out to the RPM.
+ * RPM hardware uses the message id to acknowledge the messages. This function
+ * is similar to msm_rpm_send_request except that it has to be called with
+ * interrupts masked.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_wait_for_ack() - A blocking call that waits for acknowledgment of
+ * a message from RPM.
+ *
+ * @msg_id: the return from msm_rpm_send_requests
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack(uint32_t msg_id);
+
+/**
+ * msm_rpm_wait_for_ack_noirq() - A blocking call that waits for acknowledgment
+ * of a message from RPM. This function is similar to msm_rpm_wait_for_ack
+ * except that it has to be called with interrupts masked.
+ *
+ * @msg_id: the return from msm_rpm_send_request
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id);
+
+/**
+ * msm_rpm_send_message() -Wrapper function for clients to send data given an
+ * array of key value pairs.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns 0 on success and errno on failure.
+ */
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_send_message_noirq() -Wrapper function for clients to send data
+ * given an array of key value pairs. This function is similar to the
+ * msm_rpm_send_message() except that it has to be called with interrupts
+ * disabled. Clients should choose the irq version when possible for system
+ * performance.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns 0 on success and errno on failure.
+ */
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_driver_init() - Initialization function that registers for a
+ * rpm platform driver.
+ *
+ * returns 0 on success.
+ */
+int __init msm_rpm_driver_init(void);
+
+#else
+
+static inline struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return NULL;
+}
+
+static inline struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return NULL;
+
+}
+static inline uint32_t msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int count)
+{
+ return 0;
+}
+static inline uint32_t msm_rpm_add_kvp_data_noirq(
+ struct msm_rpm_request *handle, uint32_t key,
+ const uint8_t *data, int count)
+{
+ return 0;
+}
+
+static inline void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+ return;
+}
+
+static inline int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+ return 0;
+}
+
+static inline int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+ return 0;
+
+}
+
+static inline int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+ return 0;
+}
+
+static inline int msm_rpm_send_message_noirq(enum msm_rpm_set set,
+ uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp,
+ int nelems)
+{
+ return 0;
+}
+
+static inline int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+ return 0;
+
+}
+static inline int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+ return 0;
+}
+
+static inline int __init msm_rpm_driver_init(void)
+{
+ return 0;
+}
+#endif
+#endif /*__ARCH_ARM_MACH_MSM_RPM_SMD_H*/
diff --git a/include/soc/qcom/smd.h b/include/soc/qcom/smd.h
new file mode 100644
index 000000000000..926990ed0533
--- /dev/null
+++ b/include/soc/qcom/smd.h
@@ -0,0 +1,401 @@
+/* include/soc/qcom/smd.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_SMD_H
+#define __ASM_ARCH_MSM_SMD_H
+
+#include <linux/io.h>
+
+#include <soc/qcom/smem.h>
+
+typedef struct smd_channel smd_channel_t;
+struct cpumask;
+
+#define SMD_MAX_CH_NAME_LEN 20 /* includes null char at end */
+
+#define SMD_EVENT_DATA 1
+#define SMD_EVENT_OPEN 2
+#define SMD_EVENT_CLOSE 3
+#define SMD_EVENT_STATUS 4
+#define SMD_EVENT_REOPEN_READY 5
+
+/*
+ * SMD Processor ID's.
+ *
+ * For all processors that have both SMSM and SMD clients,
+ * the SMSM Processor ID and the SMD Processor ID will
+ * be the same. In cases where a processor only supports
+ * SMD, the entry will only exist in this enum.
+ */
+enum {
+ SMD_APPS = SMEM_APPS,
+ SMD_MODEM = SMEM_MODEM,
+ SMD_Q6 = SMEM_Q6,
+ SMD_DSPS = SMEM_DSPS,
+ SMD_TZ = SMEM_DSPS,
+ SMD_WCNSS = SMEM_WCNSS,
+ SMD_MODEM_Q6_FW = SMEM_MODEM_Q6_FW,
+ SMD_RPM = SMEM_RPM,
+ NUM_SMD_SUBSYSTEMS,
+};
+
+enum {
+ SMD_APPS_MODEM = 0,
+ SMD_APPS_QDSP,
+ SMD_MODEM_QDSP,
+ SMD_APPS_DSPS,
+ SMD_MODEM_DSPS,
+ SMD_QDSP_DSPS,
+ SMD_APPS_WCNSS,
+ SMD_MODEM_WCNSS,
+ SMD_QDSP_WCNSS,
+ SMD_DSPS_WCNSS,
+ SMD_APPS_Q6FW,
+ SMD_MODEM_Q6FW,
+ SMD_QDSP_Q6FW,
+ SMD_DSPS_Q6FW,
+ SMD_WCNSS_Q6FW,
+ SMD_APPS_RPM,
+ SMD_MODEM_RPM,
+ SMD_QDSP_RPM,
+ SMD_WCNSS_RPM,
+ SMD_TZ_RPM,
+ SMD_NUM_TYPE,
+
+};
+
+#ifdef CONFIG_MSM_SMD
+int smd_close(smd_channel_t *ch);
+
+/* passing a null pointer for data reads and discards */
+int smd_read(smd_channel_t *ch, void *data, int len);
+int smd_read_from_cb(smd_channel_t *ch, void *data, int len);
+/* Same as smd_read() but takes a data buffer from userspace
+ * The function might sleep. Only safe to call from user context
+ */
+int smd_read_user_buffer(smd_channel_t *ch, void *data, int len);
+
+/* Write to stream channels may do a partial write and return
+** the length actually written.
+** Write to packet channels will never do a partial write --
+** it will return the requested length written or an error.
+*/
+int smd_write(smd_channel_t *ch, const void *data, int len);
+/* Same as smd_write() but takes a data buffer from userspace
+ * The function might sleep. Only safe to call from user context
+ */
+int smd_write_user_buffer(smd_channel_t *ch, const void *data, int len);
+
+int smd_write_avail(smd_channel_t *ch);
+int smd_read_avail(smd_channel_t *ch);
+
+/* Returns the total size of the current packet being read.
+** Returns 0 if no packets available or a stream channel.
+*/
+int smd_cur_packet_size(smd_channel_t *ch);
+
+/* these are used to get and set the IF sigs of a channel.
+ * DTR and RTS can be set; DSR, CTS, CD and RI can be read.
+ */
+int smd_tiocmget(smd_channel_t *ch);
+int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear);
+int
+smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear);
+int smd_named_open_on_edge(const char *name, uint32_t edge, smd_channel_t **_ch,
+ void *priv, void (*notify)(void *, unsigned));
+
+/* Tells the other end of the smd channel that this end wants to recieve
+ * interrupts when the written data is read. Read interrupts should only
+ * enabled when there is no space left in the buffer to write to, thus the
+ * interrupt acts as notification that space may be avaliable. If the
+ * other side does not support enabling/disabling interrupts on demand,
+ * then this function has no effect if called.
+ */
+void smd_enable_read_intr(smd_channel_t *ch);
+
+/* Tells the other end of the smd channel that this end does not want
+ * interrupts when written data is read. The interrupts should be
+ * disabled by default. If the other side does not support enabling/
+ * disabling interrupts on demand, then this function has no effect if
+ * called.
+ */
+void smd_disable_read_intr(smd_channel_t *ch);
+
+/**
+ * Enable/disable receive interrupts for the remote processor used by a
+ * particular channel.
+ * @ch: open channel handle to use for the edge
+ * @mask: 1 = mask interrupts; 0 = unmask interrupts
+ * @cpumask cpumask for the next cpu scheduled to be woken up
+ * @returns: 0 for success; < 0 for failure
+ *
+ * Note that this enables/disables all interrupts from the remote subsystem for
+ * all channels. As such, it should be used with care and only for specific
+ * use cases such as power-collapse sequencing.
+ */
+int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask,
+ const struct cpumask *cpumask);
+
+/* Starts a packet transaction. The size of the packet may exceed the total
+ * size of the smd ring buffer.
+ *
+ * @ch: channel to write the packet to
+ * @len: total length of the packet
+ *
+ * Returns:
+ * 0 - success
+ * -ENODEV - invalid smd channel
+ * -EACCES - non-packet channel specified
+ * -EINVAL - invalid length
+ * -EBUSY - transaction already in progress
+ * -EAGAIN - no enough memory in ring buffer to start transaction
+ * -EPERM - unable to sucessfully start transaction due to write error
+ */
+int smd_write_start(smd_channel_t *ch, int len);
+
+/* Writes a segment of the packet for a packet transaction.
+ *
+ * @ch: channel to write packet to
+ * @data: buffer of data to write
+ * @len: length of data buffer
+ * @user_buf: (0) - buffer from kernelspace (1) - buffer from userspace
+ *
+ * Returns:
+ * number of bytes written
+ * -ENODEV - invalid smd channel
+ * -EINVAL - invalid length
+ * -ENOEXEC - transaction not started
+ */
+int smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf);
+
+/* Completes a packet transaction. Do not call from interrupt context.
+ *
+ * @ch: channel to complete transaction on
+ *
+ * Returns:
+ * 0 - success
+ * -ENODEV - invalid smd channel
+ * -E2BIG - some ammount of packet is not yet written
+ */
+int smd_write_end(smd_channel_t *ch);
+
+/**
+ * smd_write_segment_avail() - available write space for packet transactions
+ * @ch: channel to write packet to
+ * @returns: number of bytes available to write to, or -ENODEV for invalid ch
+ *
+ * This is a version of smd_write_avail() intended for use with packet
+ * transactions. This version correctly accounts for any internal reserved
+ * space at all stages of the transaction.
+ */
+int smd_write_segment_avail(smd_channel_t *ch);
+
+/*
+ * Returns a pointer to the subsystem name or NULL if no
+ * subsystem name is available.
+ *
+ * @type - Edge definition
+ */
+const char *smd_edge_to_subsystem(uint32_t type);
+
+/*
+ * Returns a pointer to the subsystem name given the
+ * remote processor ID.
+ *
+ * @pid Remote processor ID
+ * @returns Pointer to subsystem name or NULL if not found
+ */
+const char *smd_pid_to_subsystem(uint32_t pid);
+
+/*
+ * Checks to see if a new packet has arrived on the channel. Only to be
+ * called with interrupts disabled.
+ *
+ * @ch: channel to check if a packet has arrived
+ *
+ * Returns:
+ * 0 - packet not available
+ * 1 - packet available
+ * -EINVAL - NULL parameter or non-packet based channel provided
+ */
+int smd_is_pkt_avail(smd_channel_t *ch);
+
+/*
+ * SMD initialization function that registers for a SMD platform driver.
+ *
+ * returns success on successful driver registration.
+ */
+int __init msm_smd_init(void);
+
+/**
+ * smd_remote_ss_to_edge() - return edge type from remote ss type
+ * @name: remote subsystem name
+ *
+ * Returns the edge type connected between the local subsystem(APPS)
+ * and remote subsystem @name.
+ */
+int smd_remote_ss_to_edge(const char *name);
+
+/**
+ * smd_edge_to_pil_str - Returns the PIL string used to load the remote side of
+ * the indicated edge.
+ *
+ * @type - Edge definition
+ * @returns - The PIL string to load the remove side of @type or NULL if the
+ * PIL string does not exist.
+ */
+const char *smd_edge_to_pil_str(uint32_t type);
+
+#else
+
+static inline int smd_close(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline int smd_read(smd_channel_t *ch, void *data, int len)
+{
+ return -ENODEV;
+}
+
+static inline int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
+{
+ return -ENODEV;
+}
+
+static inline int smd_read_user_buffer(smd_channel_t *ch, void *data, int len)
+{
+ return -ENODEV;
+}
+
+static inline int smd_write(smd_channel_t *ch, const void *data, int len)
+{
+ return -ENODEV;
+}
+
+static inline int
+smd_write_user_buffer(smd_channel_t *ch, const void *data, int len)
+{
+ return -ENODEV;
+}
+
+static inline int smd_write_avail(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline int smd_read_avail(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline int smd_cur_packet_size(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline int smd_tiocmget(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline int
+smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
+{
+ return -ENODEV;
+}
+
+static inline int
+smd_tiocmset_from_cb(smd_channel_t *ch, unsigned int set, unsigned int clear)
+{
+ return -ENODEV;
+}
+
+static inline int
+smd_named_open_on_edge(const char *name, uint32_t edge, smd_channel_t **_ch,
+ void *priv, void (*notify)(void *, unsigned))
+{
+ return -ENODEV;
+}
+
+static inline void smd_enable_read_intr(smd_channel_t *ch)
+{
+}
+
+static inline void smd_disable_read_intr(smd_channel_t *ch)
+{
+}
+
+static inline int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask,
+ const struct cpumask *cpumask)
+{
+ return -ENODEV;
+}
+
+static inline int smd_write_start(smd_channel_t *ch, int len)
+{
+ return -ENODEV;
+}
+
+static inline int
+smd_write_segment(smd_channel_t *ch, void *data, int len, int user_buf)
+{
+ return -ENODEV;
+}
+
+static inline int smd_write_end(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline int smd_write_segment_avail(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline const char *smd_edge_to_subsystem(uint32_t type)
+{
+ return NULL;
+}
+
+static inline const char *smd_pid_to_subsystem(uint32_t pid)
+{
+ return NULL;
+}
+
+static inline int smd_is_pkt_avail(smd_channel_t *ch)
+{
+ return -ENODEV;
+}
+
+static inline int __init msm_smd_init(void)
+{
+ return 0;
+}
+
+static inline int smd_remote_ss_to_edge(const char *name)
+{
+ return -EINVAL;
+}
+
+static inline const char *smd_edge_to_pil_str(uint32_t type)
+{
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/include/soc/qcom/smem.h b/include/soc/qcom/smem.h
new file mode 100644
index 000000000000..7550c533151e
--- /dev/null
+++ b/include/soc/qcom/smem.h
@@ -0,0 +1,243 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SMEM_H_
+#define _ARCH_ARM_MACH_MSM_SMEM_H_
+
+#include <linux/types.h>
+
+enum {
+ SMEM_APPS,
+ SMEM_MODEM,
+ SMEM_Q6,
+ SMEM_DSPS,
+ SMEM_WCNSS,
+ SMEM_MODEM_Q6_FW,
+ SMEM_RPM,
+ NUM_SMEM_SUBSYSTEMS,
+};
+
+/*
+ * Flag options for the XXX_to_proc() API
+ *
+ * SMEM_ITEM_CACHED_FLAG - Indicates this operation should use cachable smem
+ *
+ * SMEM_ANY_HOST_FLAG - Indicates this operation should not apply to smem items
+ * which are limited to a specific host pairing. Will
+ * cause this operation to ignore the to_proc parameter.
+ */
+#define SMEM_ITEM_CACHED_FLAG 1
+#define SMEM_ANY_HOST_FLAG 2
+
+#define SMEM_NUM_SMD_STREAM_CHANNELS 64
+
+/**
+ * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
+ *
+ * @type: type to check for overflow
+ * @a: left value to use
+ * @b: right value to use
+ * @returns: true if a + b will result in overflow; false otherwise
+ */
+#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
+ (((type)~0 - (a)) < (b) ? true : false)
+
+enum {
+ /* fixed items */
+ SMEM_PROC_COMM = 0,
+ SMEM_HEAP_INFO,
+ SMEM_ALLOCATION_TABLE,
+ SMEM_VERSION_INFO,
+ SMEM_HW_RESET_DETECT,
+ SMEM_AARM_WARM_BOOT,
+ SMEM_DIAG_ERR_MESSAGE,
+ SMEM_SPINLOCK_ARRAY,
+ SMEM_MEMORY_BARRIER_LOCATION,
+ SMEM_FIXED_ITEM_LAST = SMEM_MEMORY_BARRIER_LOCATION,
+
+ /* dynamic items */
+ SMEM_AARM_PARTITION_TABLE,
+ SMEM_AARM_BAD_BLOCK_TABLE,
+ SMEM_RESERVE_BAD_BLOCKS,
+ SMEM_WM_UUID,
+ SMEM_CHANNEL_ALLOC_TBL,
+ SMEM_SMD_BASE_ID,
+ SMEM_SMEM_LOG_IDX = SMEM_SMD_BASE_ID + SMEM_NUM_SMD_STREAM_CHANNELS,
+ SMEM_SMEM_LOG_EVENTS,
+ SMEM_SMEM_STATIC_LOG_IDX,
+ SMEM_SMEM_STATIC_LOG_EVENTS,
+ SMEM_SMEM_SLOW_CLOCK_SYNC,
+ SMEM_SMEM_SLOW_CLOCK_VALUE,
+ SMEM_BIO_LED_BUF,
+ SMEM_SMSM_SHARED_STATE,
+ SMEM_SMSM_INT_INFO,
+ SMEM_SMSM_SLEEP_DELAY,
+ SMEM_SMSM_LIMIT_SLEEP,
+ SMEM_SLEEP_POWER_COLLAPSE_DISABLED,
+ SMEM_KEYPAD_KEYS_PRESSED,
+ SMEM_KEYPAD_STATE_UPDATED,
+ SMEM_KEYPAD_STATE_IDX,
+ SMEM_GPIO_INT,
+ SMEM_MDDI_LCD_IDX,
+ SMEM_MDDI_HOST_DRIVER_STATE,
+ SMEM_MDDI_LCD_DISP_STATE,
+ SMEM_LCD_CUR_PANEL,
+ SMEM_MARM_BOOT_SEGMENT_INFO,
+ SMEM_AARM_BOOT_SEGMENT_INFO,
+ SMEM_SLEEP_STATIC,
+ SMEM_SCORPION_FREQUENCY,
+ SMEM_SMD_PROFILES,
+ SMEM_TSSC_BUSY,
+ SMEM_HS_SUSPEND_FILTER_INFO,
+ SMEM_BATT_INFO,
+ SMEM_APPS_BOOT_MODE,
+ SMEM_VERSION_FIRST,
+ SMEM_VERSION_SMD = SMEM_VERSION_FIRST,
+ SMEM_VERSION_LAST = SMEM_VERSION_FIRST + 24,
+ SMEM_OSS_RRCASN1_BUF1,
+ SMEM_OSS_RRCASN1_BUF2,
+ SMEM_ID_VENDOR0,
+ SMEM_ID_VENDOR1,
+ SMEM_ID_VENDOR2,
+ SMEM_HW_SW_BUILD_ID,
+ SMEM_SMD_BASE_ID_2,
+ SMEM_SMD_FIFO_BASE_ID_2 = SMEM_SMD_BASE_ID_2 +
+ SMEM_NUM_SMD_STREAM_CHANNELS,
+ SMEM_CHANNEL_ALLOC_TBL_2 = SMEM_SMD_FIFO_BASE_ID_2 +
+ SMEM_NUM_SMD_STREAM_CHANNELS,
+ SMEM_I2C_MUTEX = SMEM_CHANNEL_ALLOC_TBL_2 +
+ SMEM_NUM_SMD_STREAM_CHANNELS,
+ SMEM_SCLK_CONVERSION,
+ SMEM_SMD_SMSM_INTR_MUX,
+ SMEM_SMSM_CPU_INTR_MASK,
+ SMEM_APPS_DEM_SLAVE_DATA,
+ SMEM_QDSP6_DEM_SLAVE_DATA,
+ SMEM_CLKREGIM_BSP,
+ SMEM_CLKREGIM_SOURCES,
+ SMEM_SMD_FIFO_BASE_ID,
+ SMEM_USABLE_RAM_PARTITION_TABLE = SMEM_SMD_FIFO_BASE_ID +
+ SMEM_NUM_SMD_STREAM_CHANNELS,
+ SMEM_POWER_ON_STATUS_INFO,
+ SMEM_DAL_AREA,
+ SMEM_SMEM_LOG_POWER_IDX,
+ SMEM_SMEM_LOG_POWER_WRAP,
+ SMEM_SMEM_LOG_POWER_EVENTS,
+ SMEM_ERR_CRASH_LOG,
+ SMEM_ERR_F3_TRACE_LOG,
+ SMEM_SMD_BRIDGE_ALLOC_TABLE,
+ SMEM_SMDLITE_TABLE,
+ SMEM_SD_IMG_UPGRADE_STATUS,
+ SMEM_SEFS_INFO,
+ SMEM_RESET_LOG,
+ SMEM_RESET_LOG_SYMBOLS,
+ SMEM_MODEM_SW_BUILD_ID,
+ SMEM_SMEM_LOG_MPROC_WRAP,
+ SMEM_BOOT_INFO_FOR_APPS,
+ SMEM_SMSM_SIZE_INFO,
+ SMEM_SMD_LOOPBACK_REGISTER,
+ SMEM_SSR_REASON_MSS0,
+ SMEM_SSR_REASON_WCNSS0,
+ SMEM_SSR_REASON_LPASS0,
+ SMEM_SSR_REASON_DSPS0,
+ SMEM_SSR_REASON_VCODEC0,
+ SMEM_SMP2P_APPS_BASE = 427,
+ SMEM_SMP2P_MODEM_BASE = SMEM_SMP2P_APPS_BASE + 8, /* 435 */
+ SMEM_SMP2P_AUDIO_BASE = SMEM_SMP2P_MODEM_BASE + 8, /* 443 */
+ SMEM_SMP2P_WIRLESS_BASE = SMEM_SMP2P_AUDIO_BASE + 8, /* 451 */
+ SMEM_SMP2P_POWER_BASE = SMEM_SMP2P_WIRLESS_BASE + 8, /* 459 */
+ SMEM_FLASH_DEVICE_INFO = SMEM_SMP2P_POWER_BASE + 8, /* 467 */
+ SMEM_BAM_PIPE_MEMORY, /* 468 */
+ SMEM_IMAGE_VERSION_TABLE, /* 469 */
+ SMEM_LC_DEBUGGER, /* 470 */
+ SMEM_FLASH_NAND_DEV_INFO, /* 471 */
+ SMEM_A2_BAM_DESCRIPTOR_FIFO, /* 472 */
+ SMEM_CPR_CONFIG, /* 473 */
+ SMEM_CLOCK_INFO, /* 474 */
+ SMEM_IPC_FIFO, /* 475 */
+ SMEM_RF_EEPROM_DATA, /* 476 */
+ SMEM_COEX_MDM_WCN, /* 477 */
+ SMEM_NUM_ITEMS,
+};
+
+#ifdef CONFIG_MSM_SMEM
+void *smem_alloc(unsigned id, unsigned size_in, unsigned to_proc,
+ unsigned flags);
+void *smem_find(unsigned id, unsigned size_in, unsigned to_proc,
+ unsigned flags);
+void *smem_get_entry(unsigned id, unsigned *size, unsigned to_proc,
+ unsigned flags);
+
+/**
+ * smem_get_entry_no_rlock - Get existing item without using remote spinlock
+ *
+ * @id: ID of SMEM item
+ * @size_out: Pointer to size variable for storing the result
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item or NULL if it doesn't exist
+ *
+ * This function does not lock the remote spinlock and should only be used in
+ * failure-recover cases such as retrieving the subsystem failure reason during
+ * subsystem restart.
+ */
+void *smem_get_entry_no_rlock(unsigned id, unsigned *size_out, unsigned to_proc,
+ unsigned flags);
+
+/**
+ * smem_virt_to_phys() - Convert SMEM address to physical address.
+ *
+ * @smem_address: Virtual address returned by smem_alloc()
+ * @returns: Physical address (or NULL if there is a failure)
+ *
+ * This function should only be used if an SMEM item needs to be handed
+ * off to a DMA engine.
+ */
+phys_addr_t smem_virt_to_phys(void *smem_address);
+
+/**
+ * SMEM initialization function that registers for a SMEM platform driver.
+ *
+ * @returns: success on successful driver registration.
+ */
+int __init msm_smem_init(void);
+
+#else
+static inline void *smem_alloc(unsigned id, unsigned size_in, unsigned to_proc,
+ unsigned flags)
+{
+ return NULL;
+}
+static inline void *smem_find(unsigned id, unsigned size_in,
+ unsigned to_proc, unsigned flags)
+{
+ return NULL;
+}
+static inline void *smem_get_entry(unsigned id, unsigned *size,
+ unsigned to_proc, unsigned flags)
+{
+ return NULL;
+}
+static inline void *smem_get_entry_no_rlock(unsigned id, unsigned *size_out,
+ unsigned to_proc, unsigned flags)
+{
+ return NULL;
+}
+static inline phys_addr_t smem_virt_to_phys(void *smem_address)
+{
+ return (phys_addr_t) NULL;
+}
+static inline int __init msm_smem_init(void)
+{
+ return 0;
+}
+#endif /* CONFIG_MSM_SMEM */
+#endif /* _ARCH_ARM_MACH_MSM_SMEM_H_ */
diff --git a/include/soc/qcom/smsm.h b/include/soc/qcom/smsm.h
new file mode 100644
index 000000000000..96ca0c86912c
--- /dev/null
+++ b/include/soc/qcom/smsm.h
@@ -0,0 +1,147 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SMSM_H_
+#define _ARCH_ARM_MACH_MSM_SMSM_H_
+
+#include <soc/qcom/smem.h>
+
+enum {
+ SMSM_APPS_STATE,
+ SMSM_MODEM_STATE,
+ SMSM_Q6_STATE,
+ SMSM_APPS_DEM,
+ SMSM_WCNSS_STATE = SMSM_APPS_DEM,
+ SMSM_MODEM_DEM,
+ SMSM_DSPS_STATE = SMSM_MODEM_DEM,
+ SMSM_Q6_DEM,
+ SMSM_POWER_MASTER_DEM,
+ SMSM_TIME_MASTER_DEM,
+};
+extern uint32_t SMSM_NUM_ENTRIES;
+
+/*
+ * Ordered by when processors adopted the SMSM protocol. May not be 1-to-1
+ * with SMEM PIDs, despite initial expectations.
+ */
+enum {
+ SMSM_APPS = SMEM_APPS,
+ SMSM_MODEM = SMEM_MODEM,
+ SMSM_Q6 = SMEM_Q6,
+ SMSM_WCNSS,
+ SMSM_DSPS,
+};
+extern uint32_t SMSM_NUM_HOSTS;
+
+#define SMSM_INIT 0x00000001
+#define SMSM_SMDINIT 0x00000008
+#define SMSM_RPCINIT 0x00000020
+#define SMSM_RESET 0x00000040
+#define SMSM_TIMEWAIT 0x00000400
+#define SMSM_TIMEINIT 0x00000800
+#define SMSM_PROC_AWAKE 0x00001000
+#define SMSM_SMD_LOOPBACK 0x00800000
+
+#define SMSM_USB_PLUG_UNPLUG 0x00002000
+
+#define SMSM_A2_POWER_CONTROL 0x00000002
+#define SMSM_A2_POWER_CONTROL_ACK 0x00000800
+
+#ifdef CONFIG_MSM_SMD
+int smsm_change_state(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask);
+
+/*
+ * Changes the global interrupt mask. The set and clear masks are re-applied
+ * every time the global interrupt mask is updated for callback registration
+ * and de-registration.
+ *
+ * The clear mask is applied first, so if a bit is set to 1 in both the clear
+ * mask and the set mask, the result will be that the interrupt is set.
+ *
+ * @smsm_entry SMSM entry to change
+ * @clear_mask 1 = clear bit, 0 = no-op
+ * @set_mask 1 = set bit, 0 = no-op
+ *
+ * @returns 0 for success, < 0 for error
+ */
+int smsm_change_intr_mask(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask);
+int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask);
+uint32_t smsm_get_state(uint32_t smsm_entry);
+int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
+ void (*notify)(void *, uint32_t old_state, uint32_t new_state),
+ void *data);
+int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
+ void (*notify)(void *, uint32_t, uint32_t), void *data);
+
+#else
+static inline int smsm_change_state(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ return -ENODEV;
+}
+
+/*
+ * Changes the global interrupt mask. The set and clear masks are re-applied
+ * every time the global interrupt mask is updated for callback registration
+ * and de-registration.
+ *
+ * The clear mask is applied first, so if a bit is set to 1 in both the clear
+ * mask and the set mask, the result will be that the interrupt is set.
+ *
+ * @smsm_entry SMSM entry to change
+ * @clear_mask 1 = clear bit, 0 = no-op
+ * @set_mask 1 = set bit, 0 = no-op
+ *
+ * @returns 0 for success, < 0 for error
+ */
+static inline int smsm_change_intr_mask(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ return -ENODEV;
+}
+
+static inline int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
+{
+ return -ENODEV;
+}
+static inline uint32_t smsm_get_state(uint32_t smsm_entry)
+{
+ return 0;
+}
+static inline int smsm_state_cb_register(uint32_t smsm_entry, uint32_t mask,
+ void (*notify)(void *, uint32_t old_state, uint32_t new_state),
+ void *data)
+{
+ return -ENODEV;
+}
+static inline int smsm_state_cb_deregister(uint32_t smsm_entry, uint32_t mask,
+ void (*notify)(void *, uint32_t, uint32_t), void *data)
+{
+ return -ENODEV;
+}
+static inline void smsm_reset_modem(unsigned mode)
+{
+}
+static inline void smsm_reset_modem_cont(void)
+{
+}
+static inline void smd_sleep_exit(void)
+{
+}
+static inline int smsm_check_for_modem_crash(void)
+{
+ return -ENODEV;
+}
+#endif
+#endif
diff --git a/include/soc/qcom/subsystem_notif.h b/include/soc/qcom/subsystem_notif.h
new file mode 100644
index 000000000000..db421ca6e54a
--- /dev/null
+++ b/include/soc/qcom/subsystem_notif.h
@@ -0,0 +1,87 @@
+/* Copyright (c) 2011, 2013 - 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Subsystem restart notifier API header
+ *
+ */
+
+#ifndef _SUBSYS_NOTIFIER_H
+#define _SUBSYS_NOTIFIER_H
+
+#include <linux/notifier.h>
+
+enum subsys_notif_type {
+ SUBSYS_BEFORE_SHUTDOWN,
+ SUBSYS_AFTER_SHUTDOWN,
+ SUBSYS_BEFORE_POWERUP,
+ SUBSYS_AFTER_POWERUP,
+ SUBSYS_RAMDUMP_NOTIFICATION,
+ SUBSYS_POWERUP_FAILURE,
+ SUBSYS_PROXY_VOTE,
+ SUBSYS_PROXY_UNVOTE,
+ SUBSYS_SOC_RESET,
+ SUBSYS_NOTIF_TYPE_COUNT
+};
+
+#if defined(CONFIG_MSM_SUBSYSTEM_RESTART)
+/* Use the subsys_notif_register_notifier API to register for notifications for
+ * a particular subsystem. This API will return a handle that can be used to
+ * un-reg for notifications using the subsys_notif_unregister_notifier API by
+ * passing in that handle as an argument.
+ *
+ * On receiving a notification, the second (unsigned long) argument of the
+ * notifier callback will contain the notification type, and the third (void *)
+ * argument will contain the handle that was returned by
+ * subsys_notif_register_notifier.
+ */
+void *subsys_notif_register_notifier(
+ const char *subsys_name, struct notifier_block *nb);
+int subsys_notif_unregister_notifier(void *subsys_handle,
+ struct notifier_block *nb);
+
+/* Use the subsys_notif_init_subsys API to initialize the notifier chains form
+ * a particular subsystem. This API will return a handle that can be used to
+ * queue notifications using the subsys_notif_queue_notification API by passing
+ * in that handle as an argument.
+ */
+void *subsys_notif_add_subsys(const char *);
+int subsys_notif_queue_notification(void *subsys_handle,
+ enum subsys_notif_type notif_type,
+ void *data);
+#else
+
+static inline void *subsys_notif_register_notifier(
+ const char *subsys_name, struct notifier_block *nb)
+{
+ return NULL;
+}
+
+static inline int subsys_notif_unregister_notifier(void *subsys_handle,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline void *subsys_notif_add_subsys(const char *subsys_name)
+{
+ return NULL;
+}
+
+static inline int subsys_notif_queue_notification(void *subsys_handle,
+ enum subsys_notif_type notif_type,
+ void *data)
+{
+ return 0;
+}
+#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
+
+#endif
diff --git a/include/soc/qcom/subsystem_restart.h b/include/soc/qcom/subsystem_restart.h
new file mode 100644
index 000000000000..8dc44023756a
--- /dev/null
+++ b/include/soc/qcom/subsystem_restart.h
@@ -0,0 +1,154 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SUBSYS_RESTART_H
+#define __SUBSYS_RESTART_H
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+struct subsys_device;
+
+enum {
+ RESET_SOC = 0,
+ RESET_SUBSYS_COUPLED,
+ RESET_LEVEL_MAX
+};
+
+struct device;
+struct module;
+
+/**
+ * struct subsys_desc - subsystem descriptor
+ * @name: name of subsystem
+ * @depends_on: subsystem this subsystem depends on to operate
+ * @dev: parent device
+ * @owner: module the descriptor belongs to
+ * @shutdown: Stop a subsystem
+ * @powerup: Start a subsystem
+ * @crash_shutdown: Shutdown a subsystem when the system crashes (can't sleep)
+ * @ramdump: Collect a ramdump of the subsystem
+ * @is_not_loadable: Indicate if subsystem firmware is not loadable via pil
+ * framework
+ * @no_auth: Set if subsystem does not rely on PIL to authenticate and bring
+ * it out of reset
+ * @ssctl_instance_id: Instance id used to connect with SSCTL service
+ * @sysmon_pid: pdev id that sysmon is probed with for the subsystem
+ * @sysmon_shutdown_ret: Return value for the call to sysmon_send_shutdown
+ */
+struct subsys_desc {
+ const char *name;
+ const char *depends_on;
+ struct device *dev;
+ struct module *owner;
+
+ int (*shutdown)(const struct subsys_desc *desc, bool force_stop);
+ int (*powerup)(const struct subsys_desc *desc);
+ void (*crash_shutdown)(const struct subsys_desc *desc);
+ int (*ramdump)(int, const struct subsys_desc *desc);
+ irqreturn_t (*err_fatal_handler) (int irq, void *dev_id);
+ irqreturn_t (*stop_ack_handler) (int irq, void *dev_id);
+ irqreturn_t (*wdog_bite_handler) (int irq, void *dev_id);
+ int is_not_loadable;
+ unsigned int err_fatal_irq;
+ unsigned int err_ready_irq;
+ unsigned int stop_ack_irq;
+ unsigned int wdog_bite_irq;
+ int force_stop_gpio;
+ bool no_auth;
+ int ssctl_instance_id;
+ u32 sysmon_pid;
+ int sysmon_shutdown_ret;
+};
+
+/**
+ * struct notif_data - additional notif information
+ * @crashed: indicates if subsystem has crashed
+ * @enable_ramdump: ramdumps disabled if set to 0
+ * @no_auth: set if subsystem does not use PIL to bring it out of reset
+ * @pdev: subsystem platform device pointer
+ */
+struct notif_data {
+ bool crashed;
+ int enable_ramdump;
+ bool no_auth;
+ struct platform_device *pdev;
+};
+
+#if defined(CONFIG_MSM_SUBSYSTEM_RESTART)
+
+extern int subsys_get_restart_level(struct subsys_device *dev);
+extern int subsystem_restart_dev(struct subsys_device *dev);
+extern int subsystem_restart(const char *name);
+extern int subsystem_crashed(const char *name);
+
+extern void *subsystem_get(const char *name);
+extern void subsystem_put(void *subsystem);
+
+extern struct subsys_device *subsys_register(struct subsys_desc *desc);
+extern void subsys_unregister(struct subsys_device *dev);
+
+extern void subsys_default_online(struct subsys_device *dev);
+extern void subsys_set_crash_status(struct subsys_device *dev, bool crashed);
+extern bool subsys_get_crash_status(struct subsys_device *dev);
+void notify_proxy_vote(struct device *device);
+void notify_proxy_unvote(struct device *device);
+#else
+
+static inline int subsys_get_restart_level(struct subsys_device *dev)
+{
+ return 0;
+}
+
+static inline int subsystem_restart_dev(struct subsys_device *dev)
+{
+ return 0;
+}
+
+static inline int subsystem_restart(const char *name)
+{
+ return 0;
+}
+
+static inline int subsystem_crashed(const char *name)
+{
+ return 0;
+}
+
+static inline void *subsystem_get(const char *name)
+{
+ return NULL;
+}
+
+static inline void subsystem_put(void *subsystem) { }
+
+static inline
+struct subsys_device *subsys_register(struct subsys_desc *desc)
+{
+ return NULL;
+}
+
+static inline void subsys_unregister(struct subsys_device *dev) { }
+
+static inline void subsys_default_online(struct subsys_device *dev) { }
+static inline
+void subsys_set_crash_status(struct subsys_device *dev, bool crashed) { }
+static inline bool subsys_get_crash_status(struct subsys_device *dev)
+{
+ return false;
+}
+static inline void notify_proxy_vote(struct device *device) { }
+static inline void notify_proxy_unvote(struct device *device) { }
+#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
+
+#endif
diff --git a/include/trace/events/trace_rpm_smd.h b/include/trace/events/trace_rpm_smd.h
new file mode 100644
index 000000000000..f93baf4228bd
--- /dev/null
+++ b/include/trace/events/trace_rpm_smd.h
@@ -0,0 +1,77 @@
+/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpm_smd
+
+#if !defined(_TRACE_RPM_SMD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPM_SMD_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rpm_ack_recd,
+
+ TP_PROTO(unsigned int irq, unsigned int msg_id),
+
+ TP_ARGS(irq, msg_id),
+
+ TP_STRUCT__entry(
+ __field(int, irq)
+ __field(int, msg_id)
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __entry->msg_id = msg_id;
+ ),
+
+ TP_printk("ctx:%s id:%d",
+ __entry->irq ? "noslp" : "sleep",
+ __entry->msg_id)
+);
+
+TRACE_EVENT(rpm_send_message,
+
+ TP_PROTO(unsigned int irq, unsigned int set, unsigned int rsc_type,
+ unsigned int rsc_id, unsigned int msg_id),
+
+ TP_ARGS(irq, set, rsc_type, rsc_id, msg_id),
+
+ TP_STRUCT__entry(
+ __field(u32, irq)
+ __field(u32, set)
+ __field(u32, rsc_type)
+ __field(u32, rsc_id)
+ __field(u32, msg_id)
+ __array(char, name, 5)
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __entry->name[4] = 0;
+ __entry->set = set;
+ __entry->rsc_type = rsc_type;
+ __entry->rsc_id = rsc_id;
+ __entry->msg_id = msg_id;
+ memcpy(__entry->name, &rsc_type, sizeof(uint32_t));
+
+ ),
+
+ TP_printk("ctx:%s set:%s rsc_type:0x%08x(%s), rsc_id:0x%08x, id:%d",
+ __entry->irq ? "noslp" : "sleep",
+ __entry->set ? "slp" : "act",
+ __entry->rsc_type, __entry->name,
+ __entry->rsc_id, __entry->msg_id)
+);
+#endif
+#define TRACE_INCLUDE_FILE trace_rpm_smd
+#include <trace/define_trace.h>