aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/amba/bus.c139
-rw-r--r--drivers/base/platform.c9
-rw-r--r--drivers/base/power/opp.c102
-rw-r--r--drivers/clk/clk-mux.c75
-rw-r--r--drivers/clk/clk.c100
-rw-r--r--drivers/clk/qcom/Kconfig68
-rw-r--r--drivers/clk/qcom/Makefile11
-rw-r--r--drivers/clk/qcom/clk-a53.c192
-rw-r--r--drivers/clk/qcom/clk-hfpll.c253
-rw-r--r--drivers/clk/qcom/clk-hfpll.h54
-rw-r--r--drivers/clk/qcom/clk-krait.c170
-rw-r--r--drivers/clk/qcom/clk-krait.h49
-rw-r--r--drivers/clk/qcom/clk-rcg.h4
-rw-r--r--drivers/clk/qcom/clk-rcg2.c159
-rw-r--r--drivers/clk/qcom/clk-regmap-mux-div.c295
-rw-r--r--drivers/clk/qcom/clk-regmap-mux-div.h63
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c261
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.h142
-rw-r--r--drivers/clk/qcom/common.c16
-rw-r--r--drivers/clk/qcom/common.h2
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c38
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c83
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c573
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c198
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c14
-rw-r--r--drivers/clk/qcom/gdsc.c228
-rw-r--r--drivers/clk/qcom/gdsc.h50
-rw-r--r--drivers/clk/qcom/hfpll.c109
-rw-r--r--drivers/clk/qcom/kpss-xcc.c95
-rw-r--r--drivers/clk/qcom/krait-cc.c352
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c74
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c65
-rw-r--r--drivers/clk/qcom/rpmcc-apq8064.c347
-rw-r--r--drivers/clk/qcom/rpmcc.c211
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c124
-rw-r--r--drivers/cpufreq/qcom-cpufreq.c204
-rw-r--r--drivers/cpuidle/Kconfig.arm7
-rw-r--r--drivers/dma/Kconfig19
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/qcom_adm.c856
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/qcom_scm-32.c418
-rw-r--r--drivers/firmware/qcom_scm-64.c863
-rw-r--r--drivers/firmware/qcom_scm.c175
-rw-r--r--drivers/firmware/qcom_scm.h72
-rw-r--r--drivers/gpio/Kconfig15
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-qcom-smp2p.c591
-rw-r--r--drivers/gpio/gpio-qcom-smsm.c328
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c161
-rw-r--r--drivers/gpu/drm/i2c/Makefile2
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c592
-rw-r--r--drivers/gpu/drm/i2c/adv7511.h58
-rw-r--r--drivers/gpu/drm/i2c/adv7511_audio.c312
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c59
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c94
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c7
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c43
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c83
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/i2c/busses/i2c-qup.c943
-rw-r--r--drivers/iommu/Kconfig3
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/msm_iommu.c536
-rw-r--r--drivers/iommu/msm_iommu.h73
-rw-r--r--drivers/iommu/msm_iommu_dev.c392
-rw-r--r--drivers/iommu/msm_iommu_hw-8xxx.h6
-rw-r--r--drivers/iommu/of_iommu.c29
-rw-r--r--drivers/iommu/qcom/Kconfig43
-rw-r--r--drivers/iommu/qcom/Makefile7
-rw-r--r--drivers/iommu/qcom/msm_iommu-v1.c1538
-rw-r--r--drivers/iommu/qcom/msm_iommu.c206
-rw-r--r--drivers/iommu/qcom/msm_iommu_dev-v1.c627
-rw-r--r--drivers/iommu/qcom/msm_iommu_hw-v1.h2320
-rw-r--r--drivers/iommu/qcom/msm_iommu_pagetable.c645
-rw-r--r--drivers/iommu/qcom/msm_iommu_pagetable.h33
-rw-r--r--drivers/iommu/qcom/msm_iommu_perfmon.h233
-rw-r--r--drivers/iommu/qcom/msm_iommu_priv.h71
-rw-r--r--drivers/iommu/qcom/msm_iommu_sec.c876
-rw-r--r--drivers/mfd/qcom_rpm.c36
-rw-r--r--drivers/mfd/ssbi.c7
-rw-r--r--drivers/mmc/Kconfig2
-rw-r--r--drivers/mmc/core/Kconfig8
-rw-r--r--drivers/mmc/core/Makefile4
-rw-r--r--drivers/mmc/core/pwrseq.c92
-rw-r--r--drivers/mmc/core/pwrseq.h25
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c91
-rw-r--r--drivers/mmc/core/pwrseq_simple.c118
-rw-r--r--drivers/mmc/host/mmci.c10
-rw-r--r--drivers/mmc/host/sdhci-msm.c1
-rw-r--r--drivers/mmc/host/sdhci.c9
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c63
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h18
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c53
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c34
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h9
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx-msm.c273
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h40
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcnss_core.c296
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcnss_core.h99
-rw-r--r--drivers/of/address.c20
-rw-r--r--drivers/of/device.c77
-rw-r--r--drivers/of/of_pci.c3
-rw-r--r--drivers/of/platform.c16
-rw-r--r--drivers/pci/host/Kconfig5
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-qcom.c967
-rw-r--r--drivers/power/avs/Kconfig14
-rw-r--r--drivers/power/avs/Makefile1
-rw-r--r--drivers/power/avs/qcom-cpr.c1988
-rw-r--r--drivers/regulator/Kconfig12
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/qcom_smd-regulator.c116
-rw-r--r--drivers/remoteproc/Kconfig16
-rw-r--r--drivers/remoteproc/Makefile2
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c1075
-rw-r--r--drivers/remoteproc/qcom_tz_pil.c719
-rw-r--r--drivers/remoteproc/remoteproc_core.c44
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/cpu_ops.c343
-rw-r--r--drivers/soc/qcom/smd-rpm.c3
-rw-r--r--drivers/soc/qcom/smd.c146
-rw-r--r--drivers/soc/qcom/smem.c122
-rw-r--r--drivers/soc/qcom/spm.c172
-rw-r--r--drivers/thermal/Kconfig5
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/qcom/Kconfig10
-rw-r--r--drivers/thermal/qcom/Makefile2
-rw-r--r--drivers/thermal/qcom/tsens-8916.c107
-rw-r--r--drivers/thermal/qcom/tsens-8960.c291
-rw-r--r--drivers/thermal/qcom/tsens-8974.c239
-rw-r--r--drivers/thermal/qcom/tsens-common.c130
-rw-r--r--drivers/thermal/qcom/tsens.c206
-rw-r--r--drivers/thermal/qcom/tsens.h69
-rw-r--r--drivers/tty/serial/Kconfig8
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/msm_serial.c618
-rw-r--r--drivers/tty/serial/msm_serial.h55
-rw-r--r--drivers/tty/serial/msm_smd_tty.c417
-rw-r--r--drivers/usb/chipidea/Kconfig1
-rw-r--r--drivers/usb/chipidea/core.c125
-rw-r--r--drivers/usb/chipidea/otg.c39
-rw-r--r--drivers/usb/phy/phy-msm-usb.c52
152 files changed, 26284 insertions, 1559 deletions
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index f0099360039e..65438123f087 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -24,11 +24,80 @@
#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
+static int amba_get_enable_pclk(struct amba_device *pcdev)
+{
+ int ret;
+
+ pcdev->pclk = clk_get(&pcdev->dev, "apb_pclk");
+ if (IS_ERR(pcdev->pclk))
+ return PTR_ERR(pcdev->pclk);
+
+ ret = clk_prepare_enable(pcdev->pclk);
+ if (ret)
+ clk_put(pcdev->pclk);
+
+ return ret;
+}
+
+static void amba_put_disable_pclk(struct amba_device *pcdev)
+{
+ clk_disable_unprepare(pcdev->pclk);
+ clk_put(pcdev->pclk);
+}
+
+static int amba_read_periphid(struct amba_device *adev)
+{
+ resource_size_t size;
+ void __iomem *tmp;
+ int r, i;
+
+ if (adev->periphid)
+ return 0;
+
+ /*
+ * Dynamically calculate the size of the resource
+ * and use this for iomap
+ */
+ size = resource_size(&adev->res);
+ tmp = ioremap(adev->res.start, size);
+ if (!tmp)
+ return -ENODEV;
+
+ r = amba_get_enable_pclk(adev);
+ if (r == 0) {
+ u32 pid, cid;
+
+ /*
+ * Read pid and cid based on size of resource
+ * they are located at end of region
+ */
+ for (pid = 0, i = 0; i < 4; i++)
+ pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) <<
+ (i * 8);
+ for (cid = 0, i = 0; i < 4; i++)
+ cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) <<
+ (i * 8);
+
+ amba_put_disable_pclk(adev);
+
+ if (cid == AMBA_CID || cid == CORESIGHT_CID)
+ adev->periphid = pid;
+
+ }
+
+ iounmap(tmp);
+
+ return adev->periphid ? 0 : -ENODEV;
+}
+
static const struct amba_id *
amba_lookup(const struct amba_id *table, struct amba_device *dev)
{
int ret = 0;
+ if (amba_read_periphid(dev))
+ return NULL;
+
while (table->mask) {
ret = (dev->periphid & table->mask) == table->id;
if (ret)
@@ -204,27 +273,6 @@ static int __init amba_init(void)
postcore_initcall(amba_init);
-static int amba_get_enable_pclk(struct amba_device *pcdev)
-{
- int ret;
-
- pcdev->pclk = clk_get(&pcdev->dev, "apb_pclk");
- if (IS_ERR(pcdev->pclk))
- return PTR_ERR(pcdev->pclk);
-
- ret = clk_prepare_enable(pcdev->pclk);
- if (ret)
- clk_put(pcdev->pclk);
-
- return ret;
-}
-
-static void amba_put_disable_pclk(struct amba_device *pcdev)
-{
- clk_disable_unprepare(pcdev->pclk);
- clk_put(pcdev->pclk);
-}
-
/*
* These are the device model conversion veneers; they convert the
* device model structures to our more specific structures.
@@ -347,9 +395,7 @@ static void amba_device_release(struct device *dev)
*/
int amba_device_add(struct amba_device *dev, struct resource *parent)
{
- u32 size;
- void __iomem *tmp;
- int i, ret;
+ int ret;
WARN_ON(dev->irq[0] == (unsigned int)-1);
WARN_ON(dev->irq[1] == (unsigned int)-1);
@@ -358,51 +404,6 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
if (ret)
goto err_out;
- /* Hard-coded primecell ID instead of plug-n-play */
- if (dev->periphid != 0)
- goto skip_probe;
-
- /*
- * Dynamically calculate the size of the resource
- * and use this for iomap
- */
- size = resource_size(&dev->res);
- tmp = ioremap(dev->res.start, size);
- if (!tmp) {
- ret = -ENOMEM;
- goto err_release;
- }
-
- ret = amba_get_enable_pclk(dev);
- if (ret == 0) {
- u32 pid, cid;
-
- /*
- * Read pid and cid based on size of resource
- * they are located at end of region
- */
- for (pid = 0, i = 0; i < 4; i++)
- pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) <<
- (i * 8);
- for (cid = 0, i = 0; i < 4; i++)
- cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) <<
- (i * 8);
-
- amba_put_disable_pclk(dev);
-
- if (cid == AMBA_CID || cid == CORESIGHT_CID)
- dev->periphid = pid;
-
- if (!dev->periphid)
- ret = -ENODEV;
- }
-
- iounmap(tmp);
-
- if (ret)
- goto err_release;
-
- skip_probe:
ret = device_add(&dev->dev);
if (ret)
goto err_release;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f80aaaf9f610..af0dd0284675 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -512,6 +512,10 @@ static int platform_drv_probe(struct device *_dev)
if (ret < 0)
return ret;
+ ret = of_dma_configure_ops(_dev, _dev->of_node);
+ if (ret < 0)
+ goto done;
+
ret = dev_pm_domain_attach(_dev, true);
if (ret != -EPROBE_DEFER) {
ret = drv->probe(dev);
@@ -519,6 +523,10 @@ static int platform_drv_probe(struct device *_dev)
dev_pm_domain_detach(_dev, true);
}
+ if (ret)
+ of_dma_deconfigure(_dev);
+
+done:
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
dev_warn(_dev, "probe deferral not supported\n");
ret = -ENXIO;
@@ -540,6 +548,7 @@ static int platform_drv_remove(struct device *_dev)
ret = drv->remove(dev);
dev_pm_domain_detach(_dev, true);
+ of_dma_deconfigure(_dev);
return ret;
}
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 7ae7cd990fbf..c25624e829b0 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -146,10 +146,11 @@ static LIST_HEAD(dev_opp_list);
/* Lock to allow exclusive modification to the device and opp lists */
static DEFINE_MUTEX(dev_opp_list_lock);
-#define opp_rcu_lockdep_assert() \
+#define opp_rcu_lockdep_assert(s) \
do { \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
- !lockdep_is_held(&dev_opp_list_lock), \
+ !((s && srcu_read_lock_held(s)) || \
+ lockdep_is_held(&dev_opp_list_lock)), \
"Missing rcu_read_lock() or " \
"dev_opp_list_lock protection"); \
} while (0)
@@ -236,9 +237,10 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
struct dev_pm_opp *tmp_opp;
unsigned long v = 0;
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(&opp->dev_opp->srcu_head.srcu);
- tmp_opp = rcu_dereference(opp);
+ tmp_opp = srcu_dereference_check(opp, &opp->dev_opp->srcu_head.srcu,
+ rcu_read_lock_held());
if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
pr_err("%s: Invalid parameters\n", __func__);
else
@@ -268,9 +270,10 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
struct dev_pm_opp *tmp_opp;
unsigned long f = 0;
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(&opp->dev_opp->srcu_head.srcu);
- tmp_opp = rcu_dereference(opp);
+ tmp_opp = srcu_dereference_check(opp, &opp->dev_opp->srcu_head.srcu,
+ rcu_read_lock_held());
if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
pr_err("%s: Invalid parameters\n", __func__);
else
@@ -302,7 +305,7 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
{
struct dev_pm_opp *tmp_opp;
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(&opp->dev_opp->srcu_head.srcu);
tmp_opp = rcu_dereference(opp);
if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
@@ -357,7 +360,7 @@ struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
{
struct device_opp *dev_opp;
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
dev_opp = _find_device_opp(dev);
if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
@@ -437,7 +440,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
dev_opp = _find_device_opp(dev);
if (IS_ERR(dev_opp)) {
@@ -485,7 +488,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -535,7 +538,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -1143,6 +1146,83 @@ unlock:
}
/**
+ * dev_pm_opp_adjust_voltage() - helper to change the voltage of an opp
+ * @dev: device for which we do this operation
+ * @freq: OPP frequency to adjust voltage of
+ * @u_volt: new OPP voltage
+ *
+ * Change the voltage of an OPP with an RCU operation.
+ *
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * copy operation, returns 0 if no modifcation was done OR modification was
+ * successful.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks to
+ * keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex locking or synchronize_rcu() blocking calls cannot be used.
+ */
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt)
+{
+ struct device_opp *dev_opp;
+ struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+ int r = 0;
+
+ /* keep the node allocated */
+ new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
+ if (!new_opp)
+ return -ENOMEM;
+
+ mutex_lock(&dev_opp_list_lock);
+
+ /* Find the device_opp */
+ dev_opp = _find_device_opp(dev);
+ if (IS_ERR(dev_opp)) {
+ r = PTR_ERR(dev_opp);
+ dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
+ goto unlock;
+ }
+
+ /* Do we have the frequency? */
+ list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
+ if (tmp_opp->rate == freq) {
+ opp = tmp_opp;
+ break;
+ }
+ }
+ if (IS_ERR(opp)) {
+ r = PTR_ERR(opp);
+ goto unlock;
+ }
+
+ /* Is update really needed? */
+ if (opp->u_volt == u_volt)
+ goto unlock;
+ /* copy the old data over */
+ *new_opp = *opp;
+
+ /* plug in new node */
+ new_opp->u_volt = u_volt;
+
+ list_replace_rcu(&opp->node, &new_opp->node);
+ mutex_unlock(&dev_opp_list_lock);
+ call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+
+ /* Notify the change of the OPP */
+ srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADJUST_VOLTAGE,
+ new_opp);
+
+ return 0;
+
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+ kfree(new_opp);
+ return r;
+}
+
+/**
* dev_pm_opp_enable() - Enable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to enable
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 7129c86a79db..2c97af0f978c 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -28,35 +28,25 @@
#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
-static u8 clk_mux_get_parent(struct clk_hw *hw)
+unsigned int clk_mux_get_parent(struct clk_hw *hw, unsigned int val,
+ unsigned int *table, unsigned long flags)
{
struct clk_mux *mux = to_clk_mux(hw);
int num_parents = clk_hw_get_num_parents(hw);
- u32 val;
- /*
- * FIXME need a mux-specific flag to determine if val is bitwise or numeric
- * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1
- * to 0x7 (index starts at one)
- * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
- * val = 0x4 really means "bit 2, index starts at bit 0"
- */
- val = clk_readl(mux->reg) >> mux->shift;
- val &= mux->mask;
-
- if (mux->table) {
+ if (table) {
int i;
for (i = 0; i < num_parents; i++)
- if (mux->table[i] == val)
+ if (table[i] == val)
return i;
return -EINVAL;
}
- if (val && (mux->flags & CLK_MUX_INDEX_BIT))
+ if (val && (flags & CLK_MUX_INDEX_BIT))
val = ffs(val) - 1;
- if (val && (mux->flags & CLK_MUX_INDEX_ONE))
+ if (val && (flags & CLK_MUX_INDEX_ONE))
val--;
if (val >= num_parents)
@@ -64,24 +54,53 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
return val;
}
+EXPORT_SYMBOL_GPL(clk_mux_get_parent);
-static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
+static u8 _clk_mux_get_parent(struct clk_hw *hw)
{
struct clk_mux *mux = to_clk_mux(hw);
u32 val;
- unsigned long flags = 0;
- if (mux->table)
- index = mux->table[index];
+ /*
+ * FIXME need a mux-specific flag to determine if val is bitwise or numeric
+ * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1
+ * to 0x7 (index starts at one)
+ * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
+ * val = 0x4 really means "bit 2, index starts at bit 0"
+ */
+ val = clk_readl(mux->reg) >> mux->shift;
+ val &= mux->mask;
- else {
- if (mux->flags & CLK_MUX_INDEX_BIT)
- index = 1 << index;
+ return clk_mux_get_parent(hw, val, mux->table, mux->flags);
+}
- if (mux->flags & CLK_MUX_INDEX_ONE)
- index++;
+unsigned int clk_mux_reindex(u8 index, unsigned int *table,
+ unsigned long flags)
+{
+ unsigned int val = index;
+
+ if (table) {
+ val = table[val];
+ } else {
+ if (flags & CLK_MUX_INDEX_BIT)
+ val = 1 << index;
+
+ if (flags & CLK_MUX_INDEX_ONE)
+ val++;
}
+ return val;
+}
+EXPORT_SYMBOL_GPL(clk_mux_reindex);
+
+static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val;
+ unsigned long flags = 0;
+
+ index = clk_mux_reindex(index, mux->table, mux->flags);
+
if (mux->lock)
spin_lock_irqsave(mux->lock, flags);
else
@@ -105,14 +124,14 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
}
const struct clk_ops clk_mux_ops = {
- .get_parent = clk_mux_get_parent,
+ .get_parent = _clk_mux_get_parent,
.set_parent = clk_mux_set_parent,
.determine_rate = __clk_mux_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_mux_ops);
const struct clk_ops clk_mux_ro_ops = {
- .get_parent = clk_mux_get_parent,
+ .get_parent = _clk_mux_get_parent,
};
EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
@@ -120,7 +139,7 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags,
void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock)
+ u8 clk_mux_flags, unsigned int *table, spinlock_t *lock)
{
struct clk_mux *mux;
struct clk *clk;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0ebcf449778a..1fa4aaaa6d65 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -51,9 +51,13 @@ struct clk_core {
struct clk_core **parents;
u8 num_parents;
u8 new_parent_index;
+ u8 safe_parent_index;
unsigned long rate;
unsigned long req_rate;
+ unsigned long old_rate;
unsigned long new_rate;
+ unsigned long safe_freq;
+ struct clk_core *safe_parent;
struct clk_core *new_parent;
struct clk_core *new_child;
unsigned long flags;
@@ -1266,7 +1270,9 @@ out:
static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
struct clk_core *new_parent, u8 p_index)
{
- struct clk_core *child;
+ struct clk_core *child, *parent;
+ struct clk_hw *parent_hw;
+ unsigned long safe_freq = 0;
core->new_rate = new_rate;
core->new_parent = new_parent;
@@ -1276,6 +1282,23 @@ static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
if (new_parent && new_parent != core->parent)
new_parent->new_child = core;
+ if (core->ops->get_safe_parent) {
+ parent_hw = core->ops->get_safe_parent(core->hw, &safe_freq);
+ if (parent_hw) {
+ parent = parent_hw->core;
+ p_index = clk_fetch_parent_index(core, parent);
+ core->safe_parent_index = p_index;
+ core->safe_parent = parent;
+ if (safe_freq)
+ core->safe_freq = safe_freq;
+ else
+ core->safe_freq = 0;
+ }
+ } else {
+ core->safe_parent = NULL;
+ core->safe_freq = 0;
+ }
+
hlist_for_each_entry(child, &core->children, child_node) {
child->new_rate = clk_recalc(child, new_rate);
clk_calc_subtree(child, child->new_rate, NULL, 0);
@@ -1388,14 +1411,51 @@ static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
unsigned long event)
{
struct clk_core *child, *tmp_clk, *fail_clk = NULL;
+ struct clk_core *old_parent;
int ret = NOTIFY_DONE;
- if (core->rate == core->new_rate)
+ if (core->rate == core->new_rate && event != POST_RATE_CHANGE)
return NULL;
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ if (core->safe_parent) {
+ if (core->safe_freq)
+ core->ops->set_rate_and_parent(core->hw,
+ core->safe_freq,
+ core->safe_parent->rate,
+ core->safe_parent_index);
+ else
+ core->ops->set_parent(core->hw,
+ core->safe_parent_index);
+ }
+ core->old_rate = core->rate;
+ break;
+ case POST_RATE_CHANGE:
+ if (core->safe_parent) {
+ old_parent = __clk_set_parent_before(core,
+ core->new_parent);
+ if (core->ops->set_rate_and_parent) {
+ core->ops->set_rate_and_parent(core->hw,
+ core->new_rate,
+ core->new_parent ?
+ core->new_parent->rate : 0,
+ core->new_parent_index);
+ } else if (core->ops->set_parent) {
+ core->ops->set_parent(core->hw,
+ core->new_parent_index);
+ }
+ __clk_set_parent_after(core, core->new_parent,
+ old_parent);
+ }
+ break;
+ }
+
if (core->notifier_count) {
- ret = __clk_notify(core, event, core->rate, core->new_rate);
- if (ret & NOTIFY_STOP_MASK)
+ if (event != POST_RATE_CHANGE || core->old_rate != core->rate)
+ ret = __clk_notify(core, event, core->old_rate,
+ core->new_rate);
+ if (ret & NOTIFY_STOP_MASK && event != POST_RATE_CHANGE)
fail_clk = core;
}
@@ -1422,23 +1482,19 @@ static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
* walk down a subtree and set the new rates notifying the rate
* change on the way
*/
-static void clk_change_rate(struct clk_core *core)
+static void
+clk_change_rate(struct clk_core *core, unsigned long best_parent_rate)
{
struct clk_core *child;
struct hlist_node *tmp;
unsigned long old_rate;
- unsigned long best_parent_rate = 0;
bool skip_set_rate = false;
struct clk_core *old_parent;
old_rate = core->rate;
- if (core->new_parent)
- best_parent_rate = core->new_parent->rate;
- else if (core->parent)
- best_parent_rate = core->parent->rate;
-
- if (core->new_parent && core->new_parent != core->parent) {
+ if (core->new_parent && core->new_parent != core->parent &&
+ !core->safe_parent) {
old_parent = __clk_set_parent_before(core, core->new_parent);
trace_clk_set_parent(core, core->new_parent);
@@ -1463,6 +1519,7 @@ static void clk_change_rate(struct clk_core *core)
trace_clk_set_rate_complete(core, core->new_rate);
core->rate = clk_recalc(core, best_parent_rate);
+ core->rate = core->new_rate;
if (core->notifier_count && old_rate != core->rate)
__clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
@@ -1478,12 +1535,13 @@ static void clk_change_rate(struct clk_core *core)
/* Skip children who will be reparented to another clock */
if (child->new_parent && child->new_parent != core)
continue;
- clk_change_rate(child);
+ if (child->new_rate != child->rate)
+ clk_change_rate(child, core->new_rate);
}
- /* handle the new child who might not be in core->children yet */
- if (core->new_child)
- clk_change_rate(core->new_child);
+ /* handle the new child who might not be in clk->children yet */
+ if (core->new_child && core->new_child->new_rate != core->new_child->rate)
+ clk_change_rate(core->new_child, core->new_rate);
}
static int clk_core_set_rate_nolock(struct clk_core *core,
@@ -1492,6 +1550,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
struct clk_core *top, *fail_clk;
unsigned long rate = req_rate;
int ret = 0;
+ unsigned long parent_rate;
if (!core)
return 0;
@@ -1517,11 +1576,18 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
return -EBUSY;
}
+ if (top->parent)
+ parent_rate = top->parent->rate;
+ else
+ parent_rate = 0;
+
/* change the rates */
- clk_change_rate(top);
+ clk_change_rate(top, parent_rate);
core->req_rate = req_rate;
+ clk_propagate_rate_change(top, POST_RATE_CHANGE);
+
return ret;
}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 59d16668bdf5..5b85d27d8622 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -5,8 +5,23 @@ config COMMON_CLK_QCOM
select REGMAP_MMIO
select RESET_CONTROLLER
+config QCOM_CLK_SMD_RPM
+ tristate "Qualcomm SMD based RPM clock driver"
+ depends on QCOM_SMD_RPM && COMMON_CLK_QCOM
+ help
+ Support for the clocks exposed by the Resource Power Manager
+ processor found in Qualcomm based devices like apq8016.
+
+config QCOM_RPMCC
+ tristate "Qualcomm RPM Clock Controller"
+ depends on QCOM_CLK_SMD_RPM
+ help
+ Support for the clocks exposed by the Resource Power Manager
+ processor on devices like apq8016, apq8084 and msm8974.
+
config APQ_GCC_8084
tristate "APQ8084 Global Clock Controller"
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the global clock controller on apq8084 devices.
@@ -16,6 +31,7 @@ config APQ_GCC_8084
config APQ_MMCC_8084
tristate "APQ8084 Multimedia Clock Controller"
select APQ_GCC_8084
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the multimedia clock controller on apq8084 devices.
@@ -39,6 +55,11 @@ config IPQ_LCC_806X
Say Y if you want to use audio devices such as i2s, pcm,
S/PDIF, etc.
+config QCOM_GDSC
+ bool
+ select PM_GENERIC_DOMAINS if PM
+ depends on COMMON_CLK_QCOM
+
config MSM_GCC_8660
tristate "MSM8660 Global Clock Controller"
depends on COMMON_CLK_QCOM
@@ -49,6 +70,7 @@ config MSM_GCC_8660
config MSM_GCC_8916
tristate "MSM8916 Global Clock Controller"
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the global clock controller on msm8916 devices.
@@ -83,6 +105,7 @@ config MSM_MMCC_8960
config MSM_GCC_8974
tristate "MSM8974 Global Clock Controller"
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the global clock controller on msm8974 devices.
@@ -92,8 +115,53 @@ config MSM_GCC_8974
config MSM_MMCC_8974
tristate "MSM8974 Multimedia Clock Controller"
select MSM_GCC_8974
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the multimedia clock controller on msm8974 devices.
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+
+config QCOM_HFPLL
+ tristate "High-Frequency PLL (HFPLL) Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the high-frequency PLLs present on Qualcomm devices.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as MSM8974, APQ8084, etc.
+
+config KPSS_XCC
+ tristate "KPSS Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the Krait ACC and GCC clock controllers. Say Y
+ if you want to support CPU frequency scaling on devices such
+ as MSM8960, APQ8064, etc.
+
+config KRAITCC
+ tristate "Krait Clock Controller"
+ depends on COMMON_CLK_QCOM && ARM
+ select KRAIT_CLOCKS
+ help
+ Support for the Krait CPU clocks on Qualcomm devices.
+ Say Y if you want to support CPU frequency scaling.
+
+config KRAIT_CLOCKS
+ bool
+ select KRAIT_L2_ACCESSORS
+
+config MSM_RPMCC_8064
+ tristate "MSM8064 RPM Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the RPM clock controller which controls fabric clocks
+ on SoCs like MSM8064/APQ8064.
+ Say Y if you want to use fabric clocks like AFAB, DAYTONA etc.
+
+config QCOM_A53
+ tristate "A53 Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the A53 clock controller on Qualcomm devices.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as MSM8916.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 50b337a24a87..dbc98a97c0ca 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -8,7 +8,13 @@ clk-qcom-y += clk-rcg2.o
clk-qcom-y += clk-branch.o
clk-qcom-y += clk-regmap-divider.o
clk-qcom-y += clk-regmap-mux.o
+clk-qcom-y += clk-regmap-mux-div.o
+clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o
+clk-qcom-y += clk-hfpll.o
clk-qcom-y += reset.o
+clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
+clk-qcom-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+clk-qcom-$(CONFIG_QCOM_RPMCC) += rpmcc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
@@ -18,6 +24,11 @@ obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
+obj-$(CONFIG_MSM_RPMCC_8064) += rpmcc-apq8064.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
+obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
+obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
+obj-$(CONFIG_KRAITCC) += krait-cc.o
+obj-$(CONFIG_QCOM_A53) += clk-a53.o
diff --git a/drivers/clk/qcom/clk-a53.c b/drivers/clk/qcom/clk-a53.c
new file mode 100644
index 000000000000..7320c7b4b3ff
--- /dev/null
+++ b/drivers/clk/qcom/clk-a53.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "clk-pll.h"
+#include "clk-regmap.h"
+#include "clk-regmap-mux-div.h"
+
+#define F_APCS_PLL(f, l, m, n) { (f), (l), (m), (n), 0 }
+
+static struct pll_freq_tbl apcs_pll_freq[] = {
+ F_APCS_PLL( 998400000, 52, 0x0, 0x1),
+ F_APCS_PLL(1094400000, 57, 0x0, 0x1),
+ F_APCS_PLL(1152000000, 62, 0x0, 0x1),
+ F_APCS_PLL(1209600000, 65, 0x0, 0x1),
+ F_APCS_PLL(1401600000, 73, 0x0, 0x1),
+};
+
+static struct clk_pll a53sspll = {
+ .l_reg = 0x04,
+ .m_reg = 0x08,
+ .n_reg = 0x0c,
+ .config_reg = 0x14,
+ .mode_reg = 0x00,
+ .status_reg = 0x1c,
+ .status_bit = 16,
+ .freq_tbl = apcs_pll_freq,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "a53sspll",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_pll_sr2_ops,
+ },
+};
+
+static const struct regmap_config a53sspll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x40,
+ .fast_io = true,
+};
+
+static struct clk *a53ss_add_pll(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+ struct regmap *regmap;
+ struct clk_pll *pll;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return base;
+
+ pll = &a53sspll;
+
+ regmap = devm_regmap_init_mmio(dev, base, &a53sspll_regmap_config);
+ if (IS_ERR(regmap))
+ return ERR_CAST(regmap);
+
+ return devm_clk_register_regmap(dev, &pll->clkr);
+}
+
+enum {
+ P_GPLL0,
+ P_A53SSPLL,
+};
+
+static const struct parent_map gpll0_a53sspll_map[] = {
+ { P_GPLL0, 4 },
+ { P_A53SSPLL, 5 },
+};
+
+static const char *gpll0_a53sspll[] = {
+ "gpll0_vote",
+ "a53sspll",
+};
+
+static struct clk_regmap_mux_div a53ssmux = {
+ .reg_offset = 0x50,
+ .hid_width = 5,
+ .hid_shift = 0,
+ .src_width = 3,
+ .src_shift = 8,
+ .safe_src = 4,
+ .safe_freq = 400000000,
+ .parent_map = gpll0_a53sspll_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "a53ssmux",
+ .parent_names = gpll0_a53sspll,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_div_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk *a53ss_add_mux(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct regmap *regmap;
+ struct clk_regmap_mux_div *mux;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux = &a53ssmux;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "qcom,apcs");
+ if (IS_ERR(regmap))
+ return ERR_CAST(regmap);
+
+ mux->clkr.regmap = regmap;
+ return devm_clk_register(dev, &mux->clkr.hw);
+}
+
+static const struct of_device_id qcom_a53_match_table[] = {
+ { .compatible = "qcom,clock-a53-msm8916" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_a53_match_table);
+
+static int qcom_a53_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *clk_pll, *clk_mux;
+ struct clk_onecell_data *data;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->clks = devm_kcalloc(dev, 2, sizeof(struct clk *), GFP_KERNEL);
+ if (!data->clks)
+ return -ENOMEM;
+
+ clk_pll = a53ss_add_pll(pdev);
+ if (IS_ERR(clk_pll))
+ return PTR_ERR(clk_pll);
+
+ clk_mux = a53ss_add_mux(pdev);
+ if (IS_ERR(clk_mux))
+ return PTR_ERR(clk_mux);
+
+ data->clks[0] = clk_pll;
+ data->clks[1] = clk_mux;
+ data->clk_num = 2;
+
+ clk_prepare_enable(clk_pll);
+
+ return of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+}
+
+static struct platform_driver qcom_a53_driver = {
+ .probe = qcom_a53_probe,
+ .driver = {
+ .name = "qcom-a53",
+ .of_match_table = qcom_a53_match_table,
+ },
+};
+
+module_platform_driver(qcom_a53_driver);
+
+MODULE_DESCRIPTION("Qualcomm A53 Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-a53");
diff --git a/drivers/clk/qcom/clk-hfpll.c b/drivers/clk/qcom/clk-hfpll.c
new file mode 100644
index 000000000000..eacf853c132e
--- /dev/null
+++ b/drivers/clk/qcom/clk-hfpll.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include "clk-regmap.h"
+#include "clk-hfpll.h"
+
+#define PLL_OUTCTRL BIT(0)
+#define PLL_BYPASSNL BIT(1)
+#define PLL_RESET_N BIT(2)
+
+/* Initialize a HFPLL at a given rate and enable it. */
+static void __clk_hfpll_init_once(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+
+ if (likely(h->init_done))
+ return;
+
+ /* Configure PLL parameters for integer mode. */
+ if (hd->config_val)
+ regmap_write(regmap, hd->config_reg, hd->config_val);
+ regmap_write(regmap, hd->m_reg, 0);
+ regmap_write(regmap, hd->n_reg, 1);
+
+ if (hd->user_reg) {
+ u32 regval = hd->user_val;
+ unsigned long rate;
+
+ rate = clk_hw_get_rate(hw);
+
+ /* Pick the right VCO. */
+ if (hd->user_vco_mask && rate > hd->low_vco_max_rate)
+ regval |= hd->user_vco_mask;
+ regmap_write(regmap, hd->user_reg, regval);
+ }
+
+ if (hd->droop_reg)
+ regmap_write(regmap, hd->droop_reg, hd->droop_val);
+
+ h->init_done = true;
+}
+
+static void __clk_hfpll_enable(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 val;
+
+ __clk_hfpll_init_once(hw);
+
+ /* Disable PLL bypass mode. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_BYPASSNL, PLL_BYPASSNL);
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_RESET_N, PLL_RESET_N);
+
+ /* Wait for PLL to lock. */
+ if (hd->status_reg) {
+ do {
+ regmap_read(regmap, hd->status_reg, &val);
+ } while (!(val & BIT(hd->lock_bit)));
+ } else {
+ udelay(60);
+ }
+
+ /* Enable PLL output. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_OUTCTRL, PLL_OUTCTRL);
+}
+
+/* Enable an already-configured HFPLL. */
+static int clk_hfpll_enable(struct clk_hw *hw)
+{
+ unsigned long flags;
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode;
+
+ spin_lock_irqsave(&h->lock, flags);
+ regmap_read(regmap, hd->mode_reg, &mode);
+ if (!(mode & (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)))
+ __clk_hfpll_enable(hw);
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ return 0;
+}
+
+static void __clk_hfpll_disable(struct clk_hfpll *h)
+{
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+
+ /*
+ * Disable the PLL output, disable test mode, enable the bypass mode,
+ * and assert the reset.
+ */
+ regmap_update_bits(regmap, hd->mode_reg,
+ PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL, 0);
+}
+
+static void clk_hfpll_disable(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->lock, flags);
+ __clk_hfpll_disable(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static long clk_hfpll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ unsigned long rrate;
+
+ rate = clamp(rate, hd->min_rate, hd->max_rate);
+
+ rrate = DIV_ROUND_UP(rate, *parent_rate) * *parent_rate;
+ if (rrate > hd->max_rate)
+ rrate -= *parent_rate;
+
+ return rrate;
+}
+
+/*
+ * For optimization reasons, assumes no downstream clocks are actively using
+ * it.
+ */
+static int clk_hfpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ unsigned long flags;
+ u32 l_val, val;
+ bool enabled;
+
+ l_val = rate / parent_rate;
+
+ spin_lock_irqsave(&h->lock, flags);
+
+ enabled = __clk_is_enabled(hw->clk);
+ if (enabled)
+ __clk_hfpll_disable(h);
+
+ /* Pick the right VCO. */
+ if (hd->user_reg && hd->user_vco_mask) {
+ regmap_read(regmap, hd->user_reg, &val);
+ if (rate <= hd->low_vco_max_rate)
+ val &= ~hd->user_vco_mask;
+ else
+ val |= hd->user_vco_mask;
+ regmap_write(regmap, hd->user_reg, val);
+ }
+
+ regmap_write(regmap, hd->l_reg, l_val);
+
+ if (enabled)
+ __clk_hfpll_enable(hw);
+
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ return 0;
+}
+
+static unsigned long clk_hfpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 l_val;
+
+ regmap_read(regmap, hd->l_reg, &l_val);
+
+ return l_val * parent_rate;
+}
+
+static void clk_hfpll_init(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode, status;
+
+ regmap_read(regmap, hd->mode_reg, &mode);
+ if (mode != (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)) {
+ __clk_hfpll_init_once(hw);
+ return;
+ }
+
+ if (hd->status_reg) {
+ regmap_read(regmap, hd->status_reg, &status);
+ if (!(status & BIT(hd->lock_bit))) {
+ WARN(1, "HFPLL %s is ON, but not locked!\n",
+ __clk_get_name(hw->clk));
+ clk_hfpll_disable(hw);
+ __clk_hfpll_init_once(hw);
+ }
+ }
+}
+
+static int hfpll_is_enabled(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode;
+
+ regmap_read(regmap, hd->mode_reg, &mode);
+ mode &= 0x7;
+ return mode == (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL);
+}
+
+const struct clk_ops clk_ops_hfpll = {
+ .enable = clk_hfpll_enable,
+ .disable = clk_hfpll_disable,
+ .is_enabled = hfpll_is_enabled,
+ .round_rate = clk_hfpll_round_rate,
+ .set_rate = clk_hfpll_set_rate,
+ .recalc_rate = clk_hfpll_recalc_rate,
+ .init = clk_hfpll_init,
+};
+EXPORT_SYMBOL_GPL(clk_ops_hfpll);
diff --git a/drivers/clk/qcom/clk-hfpll.h b/drivers/clk/qcom/clk-hfpll.h
new file mode 100644
index 000000000000..48c18d664f4e
--- /dev/null
+++ b/drivers/clk/qcom/clk-hfpll.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_CLK_HFPLL_H__
+#define __QCOM_CLK_HFPLL_H__
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include "clk-regmap.h"
+
+struct hfpll_data {
+ u32 mode_reg;
+ u32 l_reg;
+ u32 m_reg;
+ u32 n_reg;
+ u32 user_reg;
+ u32 droop_reg;
+ u32 config_reg;
+ u32 status_reg;
+ u8 lock_bit;
+
+ u32 droop_val;
+ u32 config_val;
+ u32 user_val;
+ u32 user_vco_mask;
+ unsigned long low_vco_max_rate;
+
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
+struct clk_hfpll {
+ struct hfpll_data const *d;
+ int init_done;
+
+ struct clk_regmap clkr;
+ spinlock_t lock;
+};
+
+#define to_clk_hfpll(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_hfpll, clkr)
+
+extern const struct clk_ops clk_ops_hfpll;
+
+#endif
diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c
new file mode 100644
index 000000000000..dd69146a22f5
--- /dev/null
+++ b/drivers/clk/qcom/clk-krait.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include <asm/krait-l2-accessors.h>
+
+#include "clk-krait.h"
+
+/* Secondary and primary muxes share the same cp15 register */
+static DEFINE_SPINLOCK(krait_clock_reg_lock);
+
+#define LPL_SHIFT 8
+static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel)
+{
+ unsigned long flags;
+ u32 regval;
+
+ spin_lock_irqsave(&krait_clock_reg_lock, flags);
+ regval = krait_get_l2_indirect_reg(mux->offset);
+ regval &= ~(mux->mask << mux->shift);
+ regval |= (sel & mux->mask) << mux->shift;
+ if (mux->lpl) {
+ regval &= ~(mux->mask << (mux->shift + LPL_SHIFT));
+ regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT);
+ }
+ krait_set_l2_indirect_reg(mux->offset, regval);
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
+
+ /* Wait for switch to complete. */
+ mb();
+ udelay(1);
+}
+
+static int krait_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+ u32 sel;
+
+ sel = clk_mux_reindex(index, mux->parent_map, 0);
+ mux->en_mask = sel;
+ /* Don't touch mux if CPU is off as it won't work */
+ if (__clk_is_enabled(hw->clk))
+ __krait_mux_set_sel(mux, sel);
+ return 0;
+}
+
+static u8 krait_mux_get_parent(struct clk_hw *hw)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+ u32 sel;
+
+ sel = krait_get_l2_indirect_reg(mux->offset);
+ sel >>= mux->shift;
+ sel &= mux->mask;
+ mux->en_mask = sel;
+
+ return clk_mux_get_parent(hw, sel, mux->parent_map, 0);
+}
+
+static struct clk_hw *krait_mux_get_safe_parent(struct clk_hw *hw,
+ unsigned long *safe_freq)
+{
+ int i;
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+
+ i = mux->safe_sel;
+ for (i = 0; i < num_parents; i++)
+ if (mux->safe_sel == mux->parent_map[i])
+ break;
+
+ if (safe_freq)
+ *safe_freq = 0;
+
+ return clk_hw_get_parent_by_index(hw, i);
+}
+
+static int krait_mux_enable(struct clk_hw *hw)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+
+ __krait_mux_set_sel(mux, mux->en_mask);
+
+ return 0;
+}
+
+static void krait_mux_disable(struct clk_hw *hw)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+
+ __krait_mux_set_sel(mux, mux->safe_sel);
+}
+
+const struct clk_ops krait_mux_clk_ops = {
+ .enable = krait_mux_enable,
+ .disable = krait_mux_disable,
+ .set_parent = krait_mux_set_parent,
+ .get_parent = krait_mux_get_parent,
+ .determine_rate = __clk_mux_determine_rate_closest,
+ .get_safe_parent = krait_mux_get_safe_parent,
+};
+EXPORT_SYMBOL_GPL(krait_mux_clk_ops);
+
+/* The divider can divide by 2, 4, 6 and 8. But we only really need div-2. */
+static long krait_div2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ *parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), rate * 2);
+ return DIV_ROUND_UP(*parent_rate, 2);
+}
+
+static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct krait_div2_clk *d = to_krait_div2_clk(hw);
+ unsigned long flags;
+ u32 val;
+ u32 mask = BIT(d->width) - 1;
+
+ if (d->lpl)
+ mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift;
+
+ spin_lock_irqsave(&krait_clock_reg_lock, flags);
+ val = krait_get_l2_indirect_reg(d->offset);
+ val &= ~mask;
+ krait_set_l2_indirect_reg(d->offset, val);
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
+
+ return 0;
+}
+
+static unsigned long
+krait_div2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct krait_div2_clk *d = to_krait_div2_clk(hw);
+ u32 mask = BIT(d->width) - 1;
+ u32 div;
+
+ div = krait_get_l2_indirect_reg(d->offset);
+ div >>= d->shift;
+ div &= mask;
+ div = (div + 1) * 2;
+
+ return DIV_ROUND_UP(parent_rate, div);
+}
+
+const struct clk_ops krait_div2_clk_ops = {
+ .round_rate = krait_div2_round_rate,
+ .set_rate = krait_div2_set_rate,
+ .recalc_rate = krait_div2_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(krait_div2_clk_ops);
diff --git a/drivers/clk/qcom/clk-krait.h b/drivers/clk/qcom/clk-krait.h
new file mode 100644
index 000000000000..5d0063538e5d
--- /dev/null
+++ b/drivers/clk/qcom/clk-krait.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_KRAIT_H
+#define __QCOM_CLK_KRAIT_H
+
+#include <linux/clk-provider.h>
+
+struct krait_mux_clk {
+ unsigned int *parent_map;
+ bool has_safe_parent;
+ u8 safe_sel;
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 en_mask;
+ bool lpl;
+
+ struct clk_hw hw;
+};
+
+#define to_krait_mux_clk(_hw) container_of(_hw, struct krait_mux_clk, hw)
+
+extern const struct clk_ops krait_mux_clk_ops;
+
+struct krait_div2_clk {
+ u32 offset;
+ u8 width;
+ u32 shift;
+ bool lpl;
+
+ struct clk_hw hw;
+};
+
+#define to_krait_div2_clk(_hw) container_of(_hw, struct krait_div2_clk, hw)
+
+extern const struct clk_ops krait_div2_clk_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 56028bb31d87..9b3ef5d67895 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -153,6 +153,7 @@ extern const struct clk_ops clk_dyn_rcg_ops;
* @hid_width: number of bits in half integer divider
* @parent_map: map from software's parent index to hardware's src_sel field
* @freq_tbl: frequency table
+ * @current_freq: cached frequency, used for shared branches
* @clkr: regmap clock handle
* @lock: register lock
*
@@ -163,14 +164,17 @@ struct clk_rcg2 {
u8 hid_width;
const struct parent_map *parent_map;
const struct freq_tbl *freq_tbl;
+ unsigned long current_freq;
struct clk_regmap clkr;
};
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
extern const struct clk_ops clk_rcg2_ops;
+extern const struct clk_ops clk_rcg2_shared_ops;
extern const struct clk_ops clk_edp_pixel_ops;
extern const struct clk_ops clk_byte_ops;
+extern const struct clk_ops clk_byte2_ops;
extern const struct clk_ops clk_pixel_ops;
#endif
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 9aec1761fd29..3e8d0bde4704 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -300,6 +300,74 @@ const struct clk_ops clk_rcg2_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_ops);
+static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const char *name = __clk_get_name(hw->clk);
+ int ret, count;
+
+ /* cache the frequency */
+ rcg->current_freq = rate;
+
+ /* do not set any rate while the clock is off */
+ if (!__clk_is_enabled(hw->clk))
+ return 0;
+
+ /* force enable RCG */
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, CMD_ROOT_EN);
+ if (ret)
+ return ret;
+
+ /* wait for RCG to turn ON */
+ for (count = 500; count > 0; count--) {
+ ret = clk_rcg2_is_enabled(hw);
+ if (ret)
+ break;
+ udelay(1);
+ }
+ if (!count)
+ pr_err("%s: RCG did not turn on\n", name);
+
+ /* set clock rate */
+ ret = __clk_rcg2_set_rate(hw, rate);
+ if (ret)
+ return ret;
+
+ /* clear force enable RCG */
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, 0);
+}
+
+static int clk_rcg2_shared_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (!rcg->current_freq)
+ return 0;
+
+ return clk_rcg2_shared_set_rate(hw, rcg->current_freq, 0);
+}
+
+static void clk_rcg2_shared_disable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ /* switch to XO, which is the lowest entry in the freq table */
+ clk_rcg2_shared_set_rate(hw, rcg->freq_tbl[0].freq, 0);
+}
+
+const struct clk_ops clk_rcg2_shared_ops = {
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_get_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_shared_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
+
struct frac_entry {
int num;
int den;
@@ -485,6 +553,76 @@ const struct clk_ops clk_byte_ops = {
};
EXPORT_SYMBOL_GPL(clk_byte_ops);
+static int clk_byte2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ unsigned long parent_rate, div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk_hw *p;
+ unsigned long rate = req->rate;
+
+ if (rate == 0)
+ return -EINVAL;
+
+ p = req->best_parent_hw;
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
+}
+
+static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ unsigned long div;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 cfg;
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ f.pre_div = div;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++) {
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ return clk_rcg2_configure(rcg, &f);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Read the hardware to determine parent during set_rate */
+ return clk_byte2_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_byte2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_byte2_set_rate,
+ .set_rate_and_parent = clk_byte2_set_rate_and_parent,
+ .determine_rate = clk_byte2_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_byte2_ops);
+
static const struct frac_entry frac_table_pixel[] = {
{ 3, 8 },
{ 2, 9 },
@@ -496,14 +634,9 @@ static const struct frac_entry frac_table_pixel[] = {
static int clk_pixel_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
unsigned long request, src_rate;
int delta = 100000;
- const struct freq_tbl *f = rcg->freq_tbl;
const struct frac_entry *frac = frac_table_pixel;
- int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
-
- req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
for (; frac->num; frac++) {
request = (req->rate * frac->den) / frac->num;
@@ -525,12 +658,23 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- struct freq_tbl f = *rcg->freq_tbl;
+ struct freq_tbl f = { 0 };
const struct frac_entry *frac = frac_table_pixel;
unsigned long request;
int delta = 100000;
u32 mask = BIT(rcg->hid_width) - 1;
- u32 hid_div;
+ u32 hid_div, cfg;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ break;
+ }
for (; frac->num; frac++) {
request = (rate * frac->den) / frac->num;
@@ -555,7 +699,6 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate, u8 index)
{
- /* Parent index is set statically in frequency table */
return clk_pixel_set_rate(hw, rate, parent_rate);
}
diff --git a/drivers/clk/qcom/clk-regmap-mux-div.c b/drivers/clk/qcom/clk-regmap-mux-div.c
new file mode 100644
index 000000000000..f43da2acfab3
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-mux-div.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+
+#include "clk-regmap-mux-div.h"
+
+#define CMD_RCGR 0x0
+#define CMD_RCGR_UPDATE BIT(0)
+#define CMD_RCGR_DIRTY_CFG BIT(4)
+#define CMD_RCGR_ROOT_OFF BIT(31)
+#define CFG_RCGR 0x4
+
+static int __mux_div_update_config(struct clk_regmap_mux_div *md)
+{
+ int ret;
+ u32 val, count;
+ const char *name = __clk_get_name(md->clkr.hw.clk);
+
+ ret = regmap_update_bits(md->clkr.regmap, CMD_RCGR + md->reg_offset,
+ CMD_RCGR_UPDATE, CMD_RCGR_UPDATE);
+ if (ret)
+ return ret;
+
+ /* Wait for update to take effect */
+ for (count = 500; count > 0; count--) {
+ ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset,
+ &val);
+ if (ret)
+ return ret;
+ if (!(val & CMD_RCGR_UPDATE))
+ return 0;
+ udelay(1);
+ }
+
+ pr_err("%s: rcg did not update its configuration.", name);
+ return -EBUSY;
+}
+
+static int __mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src_sel,
+ u32 src_div)
+{
+ int ret;
+ u32 val, mask;
+
+ val = (src_div << md->hid_shift) | (src_sel << md->src_shift);
+ mask = ((BIT(md->hid_width) - 1) << md->hid_shift) |
+ ((BIT(md->src_width) - 1) << md->src_shift);
+
+ ret = regmap_update_bits(md->clkr.regmap, CFG_RCGR + md->reg_offset,
+ mask, val);
+ if (ret)
+ return ret;
+
+ ret = __mux_div_update_config(md);
+ return ret;
+}
+
+static void __mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src_sel,
+ u32 *src_div)
+{
+ u32 val, div, src;
+ const char *name = __clk_get_name(md->clkr.hw.clk);
+
+ regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, &val);
+
+ if (val & CMD_RCGR_DIRTY_CFG) {
+ pr_err("%s: rcg configuration is pending.\n", name);
+ return;
+ }
+
+ regmap_read(md->clkr.regmap, CFG_RCGR + md->reg_offset, &val);
+ src = (val >> md->src_shift);
+ src &= BIT(md->src_width) - 1;
+ *src_sel = src;
+
+ div = (val >> md->hid_shift);
+ div &= BIT(md->hid_width) - 1;
+ *src_div = div;
+}
+
+static int mux_div_enable(struct clk_hw *hw)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+ return __mux_div_set_src_div(md, md->src_sel, md->div);
+}
+
+static inline bool is_better_rate(unsigned long req, unsigned long best,
+ unsigned long new)
+{
+ return (req <= new && new < best) || (best < req && best < new);
+}
+
+static int mux_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ unsigned int i, div, max_div;
+ unsigned long actual_rate, rrate = 0;
+ unsigned long rate = req->rate;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+ unsigned long parent_rate = clk_hw_get_rate(parent);
+
+ max_div = BIT(md->hid_width) - 1;
+ for (div = 1; div < max_div; div++) {
+ parent_rate = mult_frac(rate, div, 2);
+ parent_rate = clk_hw_round_rate(parent, parent_rate);
+ actual_rate = mult_frac(parent_rate, 2, div);
+
+ if (is_better_rate(rate, rrate, actual_rate)) {
+ rrate = actual_rate;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+
+ if (actual_rate < rate || rrate <= rate)
+ break;
+ }
+ }
+
+ if (!rrate)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate, u32 src_sel)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ int ret, i;
+ u32 div, max_div, best_src = 0, best_div = 0;
+ unsigned long actual_rate = 0, rrate = 0;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+ unsigned long parent_rate = clk_hw_get_rate(parent);
+
+ max_div = BIT(md->hid_width) - 1;
+ for (div = 1; div < max_div; div++) {
+ parent_rate = mult_frac(rate, div, 2);
+ parent_rate = clk_hw_round_rate(parent, parent_rate);
+ actual_rate = mult_frac(parent_rate, 2, div);
+
+ if (is_better_rate(rate, rrate, actual_rate)) {
+ rrate = actual_rate;
+ best_src = md->parent_map[i].cfg;
+ best_div = div - 1;
+ }
+
+ if (actual_rate < rate || rrate <= rate)
+ break;
+ }
+ }
+
+ ret = __mux_div_set_src_div(md, best_src, best_div);
+ if (!ret) {
+ md->div = best_div;
+ md->src_sel = best_src;
+ }
+
+ return ret;
+}
+
+static u8 mux_div_get_parent(struct clk_hw *hw)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+ const char *name = __clk_get_name(hw->clk);
+ u32 i, div, src;
+
+ __mux_div_get_src_div(md, &src, &div);
+
+ for (i = 0; i < num_parents; i++)
+ if (src == md->parent_map[i].cfg)
+ return i;
+
+ pr_err("%s: Can't find parent %d\n", name, src);
+ return 0;
+}
+
+static int mux_div_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+ return __mux_div_set_src_div(md, md->parent_map[index].cfg, md->div);
+}
+
+static int mux_div_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long prate)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ u8 pindex = mux_div_get_parent(hw);
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, pindex);
+ unsigned long current_prate = clk_hw_get_rate(parent);
+
+ if (rate > current_prate)
+ return -EINVAL;
+
+ return __mux_div_set_rate_and_parent(hw, rate, prate, md->src_sel);
+}
+
+static int mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate, u8 index)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+ return __mux_div_set_rate_and_parent(hw, rate, prate,
+ md->parent_map[index].cfg);
+}
+
+static unsigned long mux_div_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ u32 div, src;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+ const char *name = __clk_get_name(hw->clk);
+
+ __mux_div_get_src_div(md, &src, &div);
+ for (i = 0; i < num_parents; i++)
+ if (src == md->parent_map[i].cfg) {
+ struct clk_hw *p = clk_hw_get_parent_by_index(hw, i);
+ unsigned long parent_rate = clk_hw_get_rate(p);
+
+ return mult_frac(parent_rate, 2, div + 1);
+ }
+
+ pr_err("%s: Can't find parent %d\n", name, src);
+ return 0;
+}
+
+static struct clk_hw *mux_div_get_safe_parent(struct clk_hw *hw,
+ unsigned long *safe_freq)
+{
+ int i;
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+
+ if (md->safe_freq)
+ *safe_freq = md->safe_freq;
+
+ for (i = 0; i < num_parents; i++)
+ if (md->safe_src == md->parent_map[i].cfg)
+ break;
+
+ return clk_hw_get_parent_by_index(hw, i);
+}
+
+static void mux_div_disable(struct clk_hw *hw)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ struct clk_hw *parent;
+ u32 div;
+
+ if (!md->safe_freq || !md->safe_src)
+ return;
+
+ parent = mux_div_get_safe_parent(hw, &md->safe_freq);
+ div = divider_get_val(md->safe_freq, clk_hw_get_rate(parent), NULL,
+ md->hid_width, CLK_DIVIDER_ROUND_CLOSEST);
+ div = 2 * div + 1;
+
+ __mux_div_set_src_div(md, md->safe_src, div);
+}
+
+const struct clk_ops clk_regmap_mux_div_ops = {
+ .enable = mux_div_enable,
+ .disable = mux_div_disable,
+ .get_parent = mux_div_get_parent,
+ .set_parent = mux_div_set_parent,
+ .set_rate = mux_div_set_rate,
+ .set_rate_and_parent = mux_div_set_rate_and_parent,
+ .determine_rate = mux_div_determine_rate,
+ .recalc_rate = mux_div_recalc_rate,
+ .get_safe_parent = mux_div_get_safe_parent,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_mux_div_ops);
diff --git a/drivers/clk/qcom/clk-regmap-mux-div.h b/drivers/clk/qcom/clk-regmap-mux-div.h
new file mode 100644
index 000000000000..b887a900ad89
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-mux-div.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_MUX_DIV_H__
+#define __QCOM_CLK_REGMAP_MUX_DIV_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+#include "clk-rcg.h"
+
+/**
+ * struct mux_div_clk - combined mux/divider clock
+ * @reg_offset: offset of the mux/divider register
+ * @hid_width: number of bits in half integer divider
+ * @hid_shift: lowest bit of hid value field
+ * @src_width: number of bits in source select
+ * @src_shift: lowest bit of source select field
+ * @div: the divider configuration value
+ * @src_sel: the mux index which will be used if the clock is enabled
+ * @safe_src: value for safe source
+ * @safe_freq: When switching rates from A to B, the mux div clock will
+ * instead switch from A -> safe_freq -> B. This allows the
+ * mux_div clock to change rates while enabled, even if this
+ * behavior is not supported by the parent clocks.
+ * If changing the rate of parent A also causes the rate of
+ * parent B to change, then safe_freq must be defined.
+ * safe_freq is expected to have a source clock which is always
+ * on and runs at only one rate.
+ * @parent_map: pointer to parent_map struct
+ * @clkr: handle between common and hardware-specific interfaces
+ */
+
+struct clk_regmap_mux_div {
+ u32 reg_offset;
+ u32 hid_width;
+ u32 hid_shift;
+ u32 src_width;
+ u32 src_shift;
+ u32 div;
+ u32 src_sel;
+ u32 safe_src;
+ unsigned long safe_freq;
+ const struct parent_map *parent_map;
+ struct clk_regmap clkr;
+};
+
+#define to_clk_regmap_mux_div(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_regmap_mux_div, clkr)
+
+extern const struct clk_ops clk_regmap_mux_div_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
new file mode 100644
index 000000000000..859b709385f6
--- /dev/null
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include "clk-smd-rpm.h"
+
+#define to_clk_smd_rpm(_hw) container_of(_hw, struct clk_smd_rpm, hw)
+
+static DEFINE_MUTEX(rpm_clk_lock);
+
+static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
+ unsigned long value)
+{
+ struct clk_smd_rpm_req req = {
+ .key = r->rpm_key,
+ .nbytes = sizeof(u32),
+ .value = DIV_ROUND_UP(value, 1000), /* RPM expects kHz */
+ };
+
+ return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+}
+
+static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r,
+ unsigned long value)
+{
+ struct clk_smd_rpm_req req = {
+ .key = r->rpm_key,
+ .nbytes = sizeof(u32),
+ .value = DIV_ROUND_UP(value, 1000), /* RPM expects kHz */
+ };
+
+ return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+}
+
+static void to_active_sleep(struct clk_smd_rpm *r, unsigned long rate,
+ unsigned long *active, unsigned long *sleep)
+{
+ *active = rate;
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int clk_smd_rpm_prepare(struct clk_hw *hw)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret = 0;
+
+ mutex_lock(&rpm_clk_lock);
+
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (!r->rate)
+ goto out;
+
+ to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+
+ if (r->branch)
+ active_rate = !!active_rate;
+
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ if (r->branch)
+ sleep_rate = !!sleep_rate;
+
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ /* Undo the active set vote and restore it */
+ ret = clk_smd_rpm_set_rate_active(r, peer_rate);
+
+out:
+ if (!ret)
+ r->enabled = true;
+
+ mutex_unlock(&rpm_clk_lock);
+
+ return ret;
+}
+
+static void clk_smd_rpm_unprepare(struct clk_hw *hw)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+
+ mutex_lock(&rpm_clk_lock);
+
+ if (r->rate) {
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate, &peer_rate,
+ &peer_sleep_rate);
+
+ active_rate = r->branch ? !!peer_rate : peer_rate;
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+ }
+ r->enabled = false;
+
+out:
+ mutex_unlock(&rpm_clk_lock);
+}
+
+static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ int ret = 0;
+
+ mutex_lock(&rpm_clk_lock);
+
+ if (r->enabled) {
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long active_rate, sleep_rate;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+
+ to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+ }
+ r->rate = rate;
+out:
+ mutex_unlock(&rpm_clk_lock);
+
+ return ret;
+}
+
+static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate is requested.
+ */
+ return rate;
+}
+
+static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate was set.
+ */
+ return r->rate;
+}
+
+int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm)
+{
+ int ret;
+ struct clk_smd_rpm_req req = {
+ .key = QCOM_RPM_SMD_KEY_ENABLE,
+ .nbytes = sizeof(u32),
+ .value = 1,
+ };
+
+ ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ QCOM_SMD_RPM_MISC_CLK,
+ QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+ if (ret) {
+ pr_err("RPM clock scaling (sleep set) not enabled!\n");
+ return ret;
+ }
+
+ ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ QCOM_SMD_RPM_MISC_CLK,
+ QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+ if (ret) {
+ pr_err("RPM clock scaling (active set) not enabled!\n");
+ return ret;
+ }
+
+ pr_debug("%s: RPM clock scaling is enabled\n", __func__);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(clk_smd_rpm_enable_scaling);
+
+const struct clk_ops clk_smd_rpm_ops = {
+ .prepare = clk_smd_rpm_prepare,
+ .unprepare = clk_smd_rpm_unprepare,
+ .set_rate = clk_smd_rpm_set_rate,
+ .round_rate = clk_smd_rpm_round_rate,
+ .recalc_rate = clk_smd_rpm_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_smd_rpm_ops);
+
+const struct clk_ops clk_smd_rpm_branch_ops = {
+ .prepare = clk_smd_rpm_prepare,
+ .unprepare = clk_smd_rpm_unprepare,
+ .round_rate = clk_smd_rpm_round_rate,
+ .recalc_rate = clk_smd_rpm_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_smd_rpm_branch_ops);
diff --git a/drivers/clk/qcom/clk-smd-rpm.h b/drivers/clk/qcom/clk-smd-rpm.h
new file mode 100644
index 000000000000..d5dafad6b0fc
--- /dev/null
+++ b/drivers/clk/qcom/clk-smd-rpm.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_SMD_RPM_H__
+#define __QCOM_CLK_SMD_RPM_H__
+
+#include <linux/clk-provider.h>
+
+#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773
+#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
+#define QCOM_RPM_SMD_KEY_RATE 0x007a484b
+#define QCOM_RPM_SMD_KEY_ENABLE 0x62616e45
+#define QCOM_RPM_SMD_KEY_STATE 0x54415453
+#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+
+struct qcom_smd_rpm;
+
+struct clk_smd_rpm {
+ const int rpm_res_type;
+ const int rpm_key;
+ const int rpm_clk_id;
+ const int rpm_status_id;
+ const bool active_only;
+ bool enabled;
+ bool branch;
+ struct clk_smd_rpm *peer;
+ struct clk_hw hw;
+ unsigned long rate;
+ struct qcom_smd_rpm *rpm;
+};
+
+struct clk_smd_rpm_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
+extern const struct clk_ops clk_smd_rpm_ops;
+extern const struct clk_ops clk_smd_rpm_branch_ops;
+int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm);
+
+#define __DEFINE_CLK_SMD_RPM(_name, active, type, r_id, stat_id, dep, key) \
+ static struct clk_smd_rpm active; \
+ static struct clk_smd_rpm _name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &active, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_smd_rpm active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &_name, \
+ .active_only = true, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_ops, \
+ .name = #active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ };
+
+#define __DEFINE_CLK_SMD_RPM_BRANCH(_name, active, type, r_id, stat_id, r, \
+ key) \
+ static struct clk_smd_rpm active; \
+ static struct clk_smd_rpm _name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &active, \
+ .branch = true, \
+ .rate = (r), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_branch_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_smd_rpm active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &_name, \
+ .active_only = true, \
+ .branch = true, \
+ .rate = (r), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_branch_ops, \
+ .name = #active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ };
+
+#define DEFINE_CLK_SMD_RPM(_name, active, type, r_id, dep) \
+ __DEFINE_CLK_SMD_RPM(_name, active, type, r_id, 0, dep, \
+ QCOM_RPM_SMD_KEY_RATE)
+
+#define DEFINE_CLK_SMD_RPM_BRANCH(_name, active, type, r_id, r) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_name, active, type, r_id, 0, r, \
+ QCOM_RPM_SMD_KEY_ENABLE)
+
+#define DEFINE_CLK_SMD_RPM_QDSS(_name, active, type, r_id) \
+ __DEFINE_CLK_SMD_RPM(_name, active, type, r_id, \
+ 0, 0, QCOM_RPM_SMD_KEY_STATE)
+
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_name, active, r_id) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_name, active, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_RPM_KEY_SOFTWARE_ENABLE)
+
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_name, active, r_id) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_name, active, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY)
+
+#endif
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 2dedceefd21d..62490df44e4e 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -22,6 +22,7 @@
#include "clk-rcg.h"
#include "clk-regmap.h"
#include "reset.h"
+#include "gdsc.h"
struct qcom_cc {
struct qcom_reset_controller reset;
@@ -121,8 +122,20 @@ int qcom_cc_really_probe(struct platform_device *pdev,
ret = reset_controller_register(&reset->rcdev);
if (ret)
- of_clk_del_provider(dev->of_node);
+ goto err_reset;
+ if (desc->gdscs && desc->num_gdscs) {
+ ret = gdsc_register(dev, desc->gdscs, desc->num_gdscs, regmap);
+ if (ret)
+ goto err_pd;
+ }
+
+ return 0;
+err_pd:
+ dev_err(dev, "Failed to register power domains\n");
+ reset_controller_unregister(&reset->rcdev);
+err_reset:
+ of_clk_del_provider(dev->of_node);
return ret;
}
EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
@@ -141,6 +154,7 @@ EXPORT_SYMBOL_GPL(qcom_cc_probe);
void qcom_cc_remove(struct platform_device *pdev)
{
+ gdsc_unregister(&pdev->dev);
of_clk_del_provider(pdev->dev.of_node);
reset_controller_unregister(platform_get_drvdata(pdev));
}
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 7a0e73713063..2892b71fbd71 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -28,6 +28,8 @@ struct qcom_cc_desc {
size_t num_clks;
const struct qcom_reset_map *resets;
size_t num_resets;
+ struct gdsc **gdscs;
+ size_t num_gdscs;
};
extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f,
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index 3563019b8e3c..0c93bf8f2da5 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -31,6 +31,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -3254,6 +3255,34 @@ static struct clk_branch gcc_usb_hsic_system_clk = {
},
};
+static struct gdsc usb_hs_hsic_gdsc = {
+ .gdscr = 0x404,
+ .pd = {
+ .name = "usb_hs_hsic",
+ },
+};
+
+static struct gdsc pcie0_gdsc = {
+ .gdscr = 0x1ac4,
+ .pd = {
+ .name = "pcie0",
+ },
+};
+
+static struct gdsc pcie1_gdsc = {
+ .gdscr = 0x1b44,
+ .pd = {
+ .name = "pcie1",
+ },
+};
+
+static struct gdsc usb30_gdsc = {
+ .gdscr = 0x1e84,
+ .pd = {
+ .name = "usb30",
+ },
+};
+
static struct clk_regmap *gcc_apq8084_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL0_VOTE] = &gpll0_vote,
@@ -3447,6 +3476,13 @@ static struct clk_regmap *gcc_apq8084_clocks[] = {
[GCC_USB_HSIC_SYSTEM_CLK] = &gcc_usb_hsic_system_clk.clkr,
};
+static struct gdsc *gcc_apq8084_gdscs[] = {
+ [USB_HS_HSIC_GDSC] = &usb_hs_hsic_gdsc,
+ [PCIE0_GDSC] = &pcie0_gdsc,
+ [PCIE1_GDSC] = &pcie1_gdsc,
+ [USB30_GDSC] = &usb30_gdsc,
+};
+
static const struct qcom_reset_map gcc_apq8084_resets[] = {
[GCC_SYSTEM_NOC_BCR] = { 0x0100 },
[GCC_CONFIG_NOC_BCR] = { 0x0140 },
@@ -3555,6 +3591,8 @@ static const struct qcom_cc_desc gcc_apq8084_desc = {
.num_clks = ARRAY_SIZE(gcc_apq8084_clocks),
.resets = gcc_apq8084_resets,
.num_resets = ARRAY_SIZE(gcc_apq8084_resets),
+ .gdscs = gcc_apq8084_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_apq8084_gdscs),
};
static const struct of_device_id gcc_apq8084_match_table[] = {
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 40e480220cd3..c6b59f199e97 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -30,6 +30,7 @@
#include "clk-pll.h"
#include "clk-rcg.h"
#include "clk-branch.h"
+#include "clk-hfpll.h"
#include "reset.h"
static struct clk_pll pll0 = {
@@ -113,6 +114,85 @@ static struct clk_regmap pll8_vote = {
},
};
+static struct hfpll_data hfpll0_data = {
+ .mode_reg = 0x3200,
+ .l_reg = 0x3208,
+ .m_reg = 0x320c,
+ .n_reg = 0x3210,
+ .config_reg = 0x3204,
+ .status_reg = 0x321c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3214,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll0 = {
+ .d = &hfpll0_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll0",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll0.lock),
+};
+
+static struct hfpll_data hfpll1_data = {
+ .mode_reg = 0x3240,
+ .l_reg = 0x3248,
+ .m_reg = 0x324c,
+ .n_reg = 0x3250,
+ .config_reg = 0x3244,
+ .status_reg = 0x325c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll1 = {
+ .d = &hfpll1_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll1",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll1.lock),
+};
+
+static struct hfpll_data hfpll_l2_data = {
+ .mode_reg = 0x3300,
+ .l_reg = 0x3308,
+ .m_reg = 0x330c,
+ .n_reg = 0x3310,
+ .config_reg = 0x3304,
+ .status_reg = 0x331c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll_l2 = {
+ .d = &hfpll_l2_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll_l2",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll_l2.lock),
+};
+
+
static struct clk_pll pll14 = {
.l_reg = 0x31c4,
.m_reg = 0x31c8,
@@ -2837,6 +2917,9 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
[UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr,
[NSSTCM_CLK_SRC] = &nss_tcm_src.clkr,
[NSSTCM_CLK] = &nss_tcm_clk.clkr,
+ [PLL9] = &hfpll0.clkr,
+ [PLL10] = &hfpll1.clkr,
+ [PLL12] = &hfpll_l2.clkr,
};
static const struct qcom_reset_map gcc_ipq806x_resets[] = {
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 22a4e1e732c0..ea2345e1abe2 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -31,6 +31,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -44,6 +45,9 @@ enum {
P_SLEEP_CLK,
P_DSI0_PHYPLL_BYTE,
P_DSI0_PHYPLL_DSI,
+ P_EXT_PRI_I2S,
+ P_EXT_SEC_I2S,
+ P_EXT_MCLK,
};
static const struct parent_map gcc_xo_gpll0_map[] = {
@@ -190,6 +194,76 @@ static const char * const gcc_xo_gpll0a_gpll1_gpll2[] = {
"gpll2_vote",
};
+static const struct parent_map gcc_xo_gpll0_gpll1_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1, 2 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char *gcc_xo_gpll0_gpll1_sleep[] = {
+ "xo",
+ "gpll0_vote",
+ "gpll1_vote",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_xo_gpll1_epi2s_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_PRI_I2S, 2 },
+ { P_EXT_MCLK, 3 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char *gcc_xo_gpll1_epi2s_emclk_sleep[] = {
+ "xo",
+ "gpll1_vote",
+ "ext_pri_i2s",
+ "ext_mclk",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_xo_gpll1_esi2s_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_SEC_I2S, 2 },
+ { P_EXT_MCLK, 3 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char *gcc_xo_gpll1_esi2s_emclk_sleep[] = {
+ "xo",
+ "gpll1_vote",
+ "ext_sec_i2s",
+ "ext_mclk",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_xo_sleep_map[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char *gcc_xo_sleep[] = {
+ "xo",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_xo_gpll1_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_MCLK, 2 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char *gcc_xo_gpll1_emclk_sleep[] = {
+ "xo",
+ "gpll1_vote",
+ "ext_mclk",
+ "sleep_clk",
+};
+
#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
static struct clk_pll gpll0 = {
@@ -906,21 +980,15 @@ static struct clk_rcg2 gp3_clk_src = {
},
};
-static struct freq_tbl ftbl_gcc_mdss_byte0_clk[] = {
- { .src = P_DSI0_PHYPLL_BYTE },
- { }
-};
-
static struct clk_rcg2 byte0_clk_src = {
.cmd_rcgr = 0x4d044,
.hid_width = 5,
.parent_map = gcc_xo_gpll0a_dsibyte_map,
- .freq_tbl = ftbl_gcc_mdss_byte0_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
.parent_names = gcc_xo_gpll0a_dsibyte,
.num_parents = 3,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -968,17 +1036,11 @@ static struct clk_rcg2 mdp_clk_src = {
},
};
-static struct freq_tbl ftbl_gcc_mdss_pclk[] = {
- { .src = P_DSI0_PHYPLL_DSI },
- { }
-};
-
static struct clk_rcg2 pclk0_clk_src = {
.cmd_rcgr = 0x4d000,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0a_dsiphy_map,
- .freq_tbl = ftbl_gcc_mdss_pclk,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_names = gcc_xo_gpll0a_dsiphy,
@@ -1094,6 +1156,29 @@ static struct clk_rcg2 apss_tcu_clk_src = {
},
};
+static const struct freq_tbl ftbl_gcc_bimc_gpu_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266500000, P_BIMC, 4, 0, 0),
+ F(533000000, P_BIMC, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 bimc_gpu_clk_src = {
+ .cmd_rcgr = 0x31028,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .freq_tbl = ftbl_gcc_bimc_gpu_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_gpu_clk_src",
+ .parent_names = gcc_xo_gpll0_bimc,
+ .num_parents = 3,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
F(80000000, P_GPLL0, 10, 0, 0),
{ }
@@ -1112,6 +1197,305 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
},
};
+static const struct freq_tbl ftbl_gcc_ultaudio_ahb_clk[] = {
+ F(3200000, P_XO, 6, 0, 0),
+ F(6400000, P_XO, 3, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(19200000, P_XO, 1, 0, 0),
+ F(40000000, P_GPLL0, 10, 1, 2),
+ F(66670000, P_GPLL0, 12, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_ahbfabric_clk_src = {
+ .cmd_rcgr = 0x1c010,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll0_gpll1_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_ahb_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_ahbfabric_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll1_sleep,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_clk = {
+ .halt_reg = 0x1c028,
+ .clkr = {
+ .enable_reg = 0x1c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_ahbfabric_ixfabric_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_ahbfabric_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_lpm_clk = {
+ .halt_reg = 0x1c024,
+ .clkr = {
+ .enable_reg = 0x1c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_ahbfabric_ixfabric_lpm_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_ahbfabric_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ultaudio_lpaif_i2s_clk[] = {
+ F(256000, P_XO, 5, 1, 15),
+ F(512000, P_XO, 5, 2, 15),
+ F(705600, P_GPLL1, 16, 1, 80),
+ F(768000, P_XO, 5, 1, 5),
+ F(800000, P_XO, 5, 5, 24),
+ F(1024000, P_GPLL1, 14, 1, 63),
+ F(1152000, P_XO, 1, 3, 50),
+ F(1411200, P_GPLL1, 16, 1, 40),
+ F(1536000, P_XO, 1, 2, 25),
+ F(1600000, P_XO, 12, 0, 0),
+ F(2048000, P_GPLL1, 9, 1, 49),
+ F(2400000, P_XO, 8, 0, 0),
+ F(2822400, P_GPLL1, 16, 1, 20),
+ F(3072000, P_GPLL1, 14, 1, 21),
+ F(4096000, P_GPLL1, 9, 2, 49),
+ F(4800000, P_XO, 4, 0, 0),
+ F(5644800, P_GPLL1, 16, 1, 10),
+ F(6144000, P_GPLL1, 7, 1, 21),
+ F(8192000, P_GPLL1, 9, 4, 49),
+ F(9600000, P_XO, 2, 0, 0),
+ F(11289600, P_GPLL1, 16, 1, 5),
+ F(12288000, P_GPLL1, 7, 2, 21),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_lpaif_pri_i2s_clk_src = {
+ .cmd_rcgr = 0x1c054,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_epi2s_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_pri_i2s_clk_src",
+ .parent_names = gcc_xo_gpll1_epi2s_emclk_sleep,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_pri_i2s_clk = {
+ .halt_reg = 0x1c068,
+ .clkr = {
+ .enable_reg = 0x1c068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_pri_i2s_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_lpaif_pri_i2s_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 ultaudio_lpaif_sec_i2s_clk_src = {
+ .cmd_rcgr = 0x1c06c,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_esi2s_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_sec_i2s_clk_src",
+ .parent_names = gcc_xo_gpll1_esi2s_emclk_sleep,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_sec_i2s_clk = {
+ .halt_reg = 0x1c080,
+ .clkr = {
+ .enable_reg = 0x1c080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_sec_i2s_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_lpaif_sec_i2s_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 ultaudio_lpaif_aux_i2s_clk_src = {
+ .cmd_rcgr = 0x1c084,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_aux_i2s_clk_src",
+ .parent_names = gcc_xo_gpll1_emclk_sleep,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_aux_i2s_clk = {
+ .halt_reg = 0x1c098,
+ .clkr = {
+ .enable_reg = 0x1c098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_aux_i2s_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_lpaif_aux_i2s_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ultaudio_xo_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_xo_clk_src = {
+ .cmd_rcgr = 0x1c034,
+ .hid_width = 5,
+ .parent_map = gcc_xo_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_xo_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_xo_clk_src",
+ .parent_names = gcc_xo_sleep,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_avsync_xo_clk = {
+ .halt_reg = 0x1c04c,
+ .clkr = {
+ .enable_reg = 0x1c04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_avsync_xo_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_xo_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_stc_xo_clk = {
+ .halt_reg = 0x1c050,
+ .clkr = {
+ .enable_reg = 0x1c050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_stc_xo_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_xo_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_codec_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(11289600, P_EXT_MCLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 codec_digcodec_clk_src = {
+ .cmd_rcgr = 0x1c09c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll1_emclk_sleep_map,
+ .freq_tbl = ftbl_codec_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "codec_digcodec_clk_src",
+ .parent_names = gcc_xo_gpll1_emclk_sleep,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_codec_digcodec_clk = {
+ .halt_reg = 0x1c0b0,
+ .clkr = {
+ .enable_reg = 0x1c0b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_codec_digcodec_clk",
+ .parent_names = (const char *[]){
+ "codec_digcodec_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_pcnoc_mport_clk = {
+ .halt_reg = 0x1c000,
+ .clkr = {
+ .enable_reg = 0x1c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_pcnoc_mport_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_pcnoc_sway_clk = {
+ .halt_reg = 0x1c004,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_pcnoc_sway_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
F(100000000, P_GPLL0, 8, 0, 0),
F(160000000, P_GPLL0, 5, 0, 0),
@@ -2358,6 +2742,51 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
},
};
+static struct clk_rcg2 bimc_ddr_clk_src = {
+ .cmd_rcgr = 0x32004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_ddr_clk_src",
+ .parent_names = gcc_xo_gpll0_bimc,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_branch gcc_apss_tcu_clk = {
+ .halt_reg = 0x12018,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_tcu_clk",
+ .parent_names = (const char *[]){
+ "bimc_ddr_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_tcu_clk = {
+ .halt_reg = 0x12020,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tcu_clk",
+ .parent_names = (const char *[]){
+ "bimc_ddr_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_gtcu_ahb_clk = {
.halt_reg = 0x12044,
.clkr = {
@@ -2375,6 +2804,40 @@ static struct clk_branch gcc_gtcu_ahb_clk = {
},
};
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x31024,
+ .clkr = {
+ .enable_reg = 0x31024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gfx_clk",
+ .parent_names = (const char *[]){
+ "bimc_gpu_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gpu_clk = {
+ .halt_reg = 0x31040,
+ .clkr = {
+ .enable_reg = 0x31040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gpu_clk",
+ .parent_names = (const char *[]){
+ "bimc_gpu_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_jpeg_tbu_clk = {
.halt_reg = 0x12034,
.clkr = {
@@ -2562,6 +3025,42 @@ static struct clk_branch gcc_venus0_vcodec0_clk = {
},
};
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x4c018,
+ .pd = {
+ .name = "venus",
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x4d078,
+ .pd = {
+ .name = "mdss",
+ },
+};
+
+static struct gdsc jpeg_gdsc = {
+ .gdscr = 0x5701c,
+ .pd = {
+ .name = "jpeg",
+ },
+};
+
+static struct gdsc vfe_gdsc = {
+ .gdscr = 0x58034,
+ .pd = {
+ .name = "vfe",
+ },
+};
+
+static struct gdsc oxili_gdsc = {
+ .gdscr = 0x5901c,
+ .pd = {
+ .name = "oxili",
+ },
+ .root_con_id = "gfx3d_clk_src",
+};
+
static struct clk_regmap *gcc_msm8916_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL0_VOTE] = &gpll0_vote,
@@ -2701,6 +3200,36 @@ static struct clk_regmap *gcc_msm8916_clocks[] = {
[GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
[GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
[GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+ [BIMC_DDR_CLK_SRC] = &bimc_ddr_clk_src.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_GFX_TCU_CLK] = &gcc_gfx_tcu_clk.clkr,
+ [BIMC_GPU_CLK_SRC] = &bimc_gpu_clk_src.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
+ [ULTAUDIO_AHBFABRIC_CLK_SRC] = &ultaudio_ahbfabric_clk_src.clkr,
+ [ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC] = &ultaudio_lpaif_pri_i2s_clk_src.clkr,
+ [ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC] = &ultaudio_lpaif_sec_i2s_clk_src.clkr,
+ [ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC] = &ultaudio_lpaif_aux_i2s_clk_src.clkr,
+ [ULTAUDIO_XO_CLK_SRC] = &ultaudio_xo_clk_src.clkr,
+ [CODEC_DIGCODEC_CLK_SRC] = &codec_digcodec_clk_src.clkr,
+ [GCC_ULTAUDIO_PCNOC_MPORT_CLK] = &gcc_ultaudio_pcnoc_mport_clk.clkr,
+ [GCC_ULTAUDIO_PCNOC_SWAY_CLK] = &gcc_ultaudio_pcnoc_sway_clk.clkr,
+ [GCC_ULTAUDIO_AVSYNC_XO_CLK] = &gcc_ultaudio_avsync_xo_clk.clkr,
+ [GCC_ULTAUDIO_STC_XO_CLK] = &gcc_ultaudio_stc_xo_clk.clkr,
+ [GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK] = &gcc_ultaudio_ahbfabric_ixfabric_clk.clkr,
+ [GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK] = &gcc_ultaudio_ahbfabric_ixfabric_lpm_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK] = &gcc_ultaudio_lpaif_pri_i2s_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK] = &gcc_ultaudio_lpaif_sec_i2s_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK] = &gcc_ultaudio_lpaif_aux_i2s_clk.clkr,
+ [GCC_CODEC_DIGCODEC_CLK] = &gcc_codec_digcodec_clk.clkr,
+};
+
+static struct gdsc *gcc_msm8916_gdscs[] = {
+ [VENUS_GDSC] = &venus_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [VFE_GDSC] = &vfe_gdsc,
+ [OXILI_GDSC] = &oxili_gdsc,
};
static const struct qcom_reset_map gcc_msm8916_resets[] = {
@@ -2810,6 +3339,8 @@ static const struct qcom_cc_desc gcc_msm8916_desc = {
.num_clks = ARRAY_SIZE(gcc_msm8916_clocks),
.resets = gcc_msm8916_resets,
.num_resets = ARRAY_SIZE(gcc_msm8916_resets),
+ .gdscs = gcc_msm8916_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8916_gdscs),
};
static const struct of_device_id gcc_msm8916_match_table[] = {
@@ -2823,15 +3354,15 @@ static int gcc_msm8916_probe(struct platform_device *pdev)
struct clk *clk;
struct device *dev = &pdev->dev;
- /* Temporary until RPM clocks supported */
- clk = clk_register_fixed_rate(dev, "xo", NULL, CLK_IS_ROOT, 19200000);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ if (!IS_ENABLED(CONFIG_QCOM_RPMCC)) {
+ /* RPM clocks are not enabled */
+ clk = clk_register_fixed_factor(dev, "xo", "xo_board", 0, 1, 1);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
- clk = clk_register_fixed_rate(dev, "sleep_clk_src", NULL,
- CLK_IS_ROOT, 32768);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ clk_register_fixed_rate(dev, "sleep_clk_src", NULL,
+ CLK_IS_ROOT, 32768);
+ }
return qcom_cc_probe(pdev, &gcc_msm8916_desc);
}
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index aa294b1bad34..e7cdf482d427 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -30,6 +30,7 @@
#include "clk-pll.h"
#include "clk-rcg.h"
#include "clk-branch.h"
+#include "clk-hfpll.h"
#include "reset.h"
static struct clk_pll pll3 = {
@@ -86,6 +87,164 @@ static struct clk_regmap pll8_vote = {
},
};
+static struct hfpll_data hfpll0_data = {
+ .mode_reg = 0x3200,
+ .l_reg = 0x3208,
+ .m_reg = 0x320c,
+ .n_reg = 0x3210,
+ .config_reg = 0x3204,
+ .status_reg = 0x321c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3214,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll0 = {
+ .d = &hfpll0_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll0",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll0.lock),
+};
+
+static struct hfpll_data hfpll1_8064_data = {
+ .mode_reg = 0x3240,
+ .l_reg = 0x3248,
+ .m_reg = 0x324c,
+ .n_reg = 0x3250,
+ .config_reg = 0x3244,
+ .status_reg = 0x325c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3254,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct hfpll_data hfpll1_data = {
+ .mode_reg = 0x3300,
+ .l_reg = 0x3308,
+ .m_reg = 0x330c,
+ .n_reg = 0x3310,
+ .config_reg = 0x3304,
+ .status_reg = 0x331c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll1 = {
+ .d = &hfpll1_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll1",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll1.lock),
+};
+
+static struct hfpll_data hfpll2_data = {
+ .mode_reg = 0x3280,
+ .l_reg = 0x3288,
+ .m_reg = 0x328c,
+ .n_reg = 0x3290,
+ .config_reg = 0x3284,
+ .status_reg = 0x329c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3294,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll2 = {
+ .d = &hfpll2_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll2",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll2.lock),
+};
+
+static struct hfpll_data hfpll3_data = {
+ .mode_reg = 0x32c0,
+ .l_reg = 0x32c8,
+ .m_reg = 0x32cc,
+ .n_reg = 0x32d0,
+ .config_reg = 0x32c4,
+ .status_reg = 0x32dc,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x32d4,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll3 = {
+ .d = &hfpll3_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll3",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll3.lock),
+};
+
+static struct hfpll_data hfpll_l2_8064_data = {
+ .mode_reg = 0x3300,
+ .l_reg = 0x3308,
+ .m_reg = 0x330c,
+ .n_reg = 0x3310,
+ .config_reg = 0x3304,
+ .status_reg = 0x331c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct hfpll_data hfpll_l2_data = {
+ .mode_reg = 0x3400,
+ .l_reg = 0x3408,
+ .m_reg = 0x340c,
+ .n_reg = 0x3410,
+ .config_reg = 0x3404,
+ .status_reg = 0x341c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3414,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll_l2 = {
+ .d = &hfpll_l2_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll_l2",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll_l2.lock),
+};
+
static struct clk_pll pll14 = {
.l_reg = 0x31c4,
.m_reg = 0x31c8,
@@ -3154,6 +3313,9 @@ static struct clk_regmap *gcc_msm8960_clks[] = {
[PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
[PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
[RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+ [PLL9] = &hfpll0.clkr,
+ [PLL10] = &hfpll1.clkr,
+ [PLL12] = &hfpll_l2.clkr,
};
static const struct qcom_reset_map gcc_msm8960_resets[] = {
@@ -3365,6 +3527,11 @@ static struct clk_regmap *gcc_apq8064_clks[] = {
[PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
[PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
[RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+ [PLL9] = &hfpll0.clkr,
+ [PLL10] = &hfpll1.clkr,
+ [PLL12] = &hfpll_l2.clkr,
+ [PLL16] = &hfpll2.clkr,
+ [PLL17] = &hfpll3.clkr,
};
static const struct qcom_reset_map gcc_apq8064_resets[] = {
@@ -3503,28 +3670,39 @@ MODULE_DEVICE_TABLE(of, gcc_msm8960_match_table);
static int gcc_msm8960_probe(struct platform_device *pdev)
{
- struct clk *clk;
- struct device *dev = &pdev->dev;
const struct of_device_id *match;
+ struct platform_device *tsens;
+ int ret;
match = of_match_device(gcc_msm8960_match_table, &pdev->dev);
if (!match)
return -EINVAL;
- /* Temporary until RPM clocks supported */
- clk = clk_register_fixed_rate(dev, "cxo", NULL, CLK_IS_ROOT, 19200000);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ if (match->data == &gcc_apq8064_desc) {
+ hfpll1.d = &hfpll1_8064_data;
+ hfpll_l2.d = &hfpll_l2_8064_data;
+ }
+
+ ret = qcom_cc_probe(pdev, match->data);
+ if (ret)
+ return ret;
- clk = clk_register_fixed_rate(dev, "pxo", NULL, CLK_IS_ROOT, 27000000);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ tsens = platform_device_register_data(&pdev->dev, "qcom-tsens", -1,
+ NULL, 0);
+ if (IS_ERR(tsens)) {
+ qcom_cc_remove(pdev);
+ return PTR_ERR(tsens);
+ }
+ platform_set_drvdata(pdev, tsens);
- return qcom_cc_probe(pdev, match->data);
+ return 0;
}
static int gcc_msm8960_remove(struct platform_device *pdev)
{
+ struct platform_device *tsens = platform_get_drvdata(pdev);
+
+ platform_device_unregister(tsens);
qcom_cc_remove(pdev);
return 0;
}
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index 2bcf87538f9d..c9c010fcf650 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -31,6 +31,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -2432,6 +2433,13 @@ static struct clk_branch gcc_usb_hsic_system_clk = {
},
};
+static struct gdsc usb_hs_hsic_gdsc = {
+ .gdscr = 0x404,
+ .pd = {
+ .name = "usb_hs_hsic",
+ },
+};
+
static struct clk_regmap *gcc_msm8974_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL0_VOTE] = &gpll0_vote,
@@ -2661,6 +2669,10 @@ static const struct qcom_reset_map gcc_msm8974_resets[] = {
[GCC_VENUS_RESTART] = { 0x1740 },
};
+static struct gdsc *gcc_msm8974_gdscs[] = {
+ [USB_HS_HSIC_GDSC] = &usb_hs_hsic_gdsc,
+};
+
static const struct regmap_config gcc_msm8974_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -2675,6 +2687,8 @@ static const struct qcom_cc_desc gcc_msm8974_desc = {
.num_clks = ARRAY_SIZE(gcc_msm8974_clocks),
.resets = gcc_msm8974_resets,
.num_resets = ARRAY_SIZE(gcc_msm8974_resets),
+ .gdscs = gcc_msm8974_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8974_gdscs),
};
static const struct of_device_id gcc_msm8974_match_table[] = {
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
new file mode 100644
index 000000000000..480ebf6e0657
--- /dev/null
+++ b/drivers/clk/qcom/gdsc.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/jiffies.h>
+#include <linux/pm_clock.h>
+#include <linux/slab.h>
+#include "gdsc.h"
+
+#define PWR_ON_MASK BIT(31)
+#define EN_REST_WAIT_MASK GENMASK(23, 20)
+#define EN_FEW_WAIT_MASK GENMASK(19, 16)
+#define CLK_DIS_WAIT_MASK GENMASK(15, 12)
+#define SW_OVERRIDE_MASK BIT(2)
+#define HW_CONTROL_MASK BIT(1)
+#define SW_COLLAPSE_MASK BIT(0)
+
+/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
+#define EN_REST_WAIT_VAL (0x2 << 20)
+#define EN_FEW_WAIT_VAL (0x8 << 16)
+#define CLK_DIS_WAIT_VAL (0x2 << 12)
+
+#define TIMEOUT_US 100
+
+#define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
+
+static int gdsc_is_enabled(struct gdsc *sc)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(sc->regmap, sc->gdscr, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & PWR_ON_MASK);
+}
+
+static int gdsc_toggle_logic(struct gdsc *sc, bool en)
+{
+ int ret;
+ u32 val = en ? 0 : SW_COLLAPSE_MASK;
+ u32 check = en ? PWR_ON_MASK : 0;
+ unsigned long timeout;
+
+ ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val);
+ if (ret)
+ return ret;
+
+ timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
+ do {
+ ret = regmap_read(sc->regmap, sc->gdscr, &val);
+ if (ret)
+ return ret;
+
+ if ((val & PWR_ON_MASK) == check)
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ ret = regmap_read(sc->regmap, sc->gdscr, &val);
+ if (ret)
+ return ret;
+
+ if ((val & PWR_ON_MASK) == check)
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+static int gdsc_enable(struct generic_pm_domain *domain)
+{
+ struct gdsc *sc = domain_to_gdsc(domain);
+ int ret;
+
+ if (sc->root_clk)
+ clk_prepare_enable(sc->root_clk);
+
+ ret = gdsc_toggle_logic(sc, true);
+ if (ret)
+ return ret;
+ /*
+ * If clocks to this power domain were already on, they will take an
+ * additional 4 clock cycles to re-enable after the power domain is
+ * enabled. Delay to account for this. A delay is also needed to ensure
+ * clocks are not enabled within 400ns of enabling power to the
+ * memories.
+ */
+ udelay(1);
+
+ return 0;
+}
+
+static int gdsc_disable(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct gdsc *sc = domain_to_gdsc(domain);
+
+ ret = gdsc_toggle_logic(sc, false);
+
+ if (sc->root_clk)
+ clk_disable_unprepare(sc->root_clk);
+
+ return ret;
+}
+
+static int gdsc_attach(struct generic_pm_domain *domain, struct device *dev)
+{
+ int ret;
+ struct gdsc *sc = domain_to_gdsc(domain);
+ char **con_id;
+
+ if (!sc->con_ids[0])
+ return 0;
+
+ ret = pm_clk_create(dev);
+ if (ret) {
+ dev_err(dev, "pm_clk_create failed %d\n", ret);
+ return ret;
+ }
+
+ for (con_id = sc->con_ids; *con_id; con_id++) {
+ ret = pm_clk_add(dev, *con_id);
+ if (ret) {
+ dev_err(dev, "pm_clk_add failed %d\n", ret);
+ goto fail;
+ }
+ }
+
+ if (sc->root_con_id) {
+ sc->root_clk = clk_get(dev, sc->root_con_id);
+ if (IS_ERR(sc->root_clk)) {
+ dev_err(dev, "failed to get root clock\n");
+ return PTR_ERR(sc->root_clk);
+ }
+ }
+
+ return 0;
+fail:
+ pm_clk_destroy(dev);
+ return ret;
+};
+
+static void gdsc_detach(struct generic_pm_domain *domain, struct device *dev)
+{
+ struct gdsc *sc = domain_to_gdsc(domain);
+
+ if (!sc->con_ids[0])
+ return;
+
+ pm_clk_destroy(dev);
+ return;
+};
+
+static int gdsc_init(struct gdsc *sc)
+{
+ u32 mask, val;
+ int on, ret;
+
+ /*
+ * Disable HW trigger: collapse/restore occur based on registers writes.
+ * Disable SW override: Use hardware state-machine for sequencing.
+ * Configure wait time between states.
+ */
+ mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
+ EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
+ val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
+ ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val);
+ if (ret)
+ return ret;
+
+ on = gdsc_is_enabled(sc);
+ if (on < 0)
+ return on;
+
+ sc->pd.power_off = gdsc_disable;
+ sc->pd.power_on = gdsc_enable;
+ sc->pd.attach_dev = gdsc_attach;
+ sc->pd.detach_dev = gdsc_detach;
+ sc->pd.flags = GENPD_FLAG_PM_CLK;
+ pm_genpd_init(&sc->pd, NULL, !on);
+
+ return 0;
+}
+
+int gdsc_register(struct device *dev, struct gdsc **scs, size_t num,
+ struct regmap *regmap)
+{
+ int i, ret;
+ struct genpd_onecell_data *data;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
+ GFP_KERNEL);
+ if (!data->domains)
+ return -ENOMEM;
+
+ data->num_domains = num;
+ for (i = 0; i < num; i++) {
+ if (!scs[i])
+ continue;
+ scs[i]->regmap = regmap;
+ ret = gdsc_init(scs[i]);
+ if (ret)
+ return ret;
+ data->domains[i] = &scs[i]->pd;
+ }
+
+ return of_genpd_add_provider_onecell(dev->of_node, data);
+}
+
+void gdsc_unregister(struct device *dev)
+{
+ of_genpd_del_provider(dev->of_node);
+}
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
new file mode 100644
index 000000000000..1ad9d53fd776
--- /dev/null
+++ b/drivers/clk/qcom/gdsc.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_GDSC_H__
+#define __QCOM_GDSC_H__
+
+#include <linux/clk.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+
+/**
+ * struct gdsc - Globally Distributed Switch Controller
+ * @pd: generic power domain
+ * @regmap: regmap for MMIO accesses
+ * @gdscr: gsdc control register
+ * @con_ids: List of clocks to be controlled for the gdsc
+ */
+struct gdsc {
+ struct generic_pm_domain pd;
+ struct regmap *regmap;
+ unsigned int gdscr;
+ char *root_con_id;
+ struct clk *root_clk;
+ char *con_ids[];
+};
+
+#ifdef CONFIG_QCOM_GDSC
+int gdsc_register(struct device *, struct gdsc **, size_t n, struct regmap *);
+void gdsc_unregister(struct device *);
+#else
+static inline int gdsc_register(struct device *d, struct gdsc **g, size_t n,
+ struct regmap *r)
+{
+ return -ENOSYS;
+}
+
+static inline void gdsc_unregister(struct device *d)
+{};
+#endif /* CONFIG_QCOM_GDSC */
+#endif /* __QCOM_GDSC_H__ */
diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c
new file mode 100644
index 000000000000..1492f4c79c35
--- /dev/null
+++ b/drivers/clk/qcom/hfpll.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include "clk-regmap.h"
+#include "clk-hfpll.h"
+
+static const struct hfpll_data hdata = {
+ .mode_reg = 0x00,
+ .l_reg = 0x04,
+ .m_reg = 0x08,
+ .n_reg = 0x0c,
+ .user_reg = 0x10,
+ .config_reg = 0x14,
+ .config_val = 0x430405d,
+ .status_reg = 0x1c,
+ .lock_bit = 16,
+
+ .user_val = 0x8,
+ .user_vco_mask = 0x100000,
+ .low_vco_max_rate = 1248000000,
+ .min_rate = 537600000UL,
+ .max_rate = 2900000000UL,
+};
+
+static const struct of_device_id qcom_hfpll_match_table[] = {
+ { .compatible = "qcom,hfpll" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_hfpll_match_table);
+
+static const struct regmap_config hfpll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x30,
+ .fast_io = true,
+};
+
+static int qcom_hfpll_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ struct regmap *regmap;
+ struct clk_hfpll *h;
+ struct clk_init_data init = {
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_ops_hfpll,
+ };
+
+ h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, &hfpll_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ if (of_property_read_string_index(dev->of_node, "clock-output-names",
+ 0, &init.name))
+ return -ENODEV;
+
+ h->d = &hdata;
+ h->clkr.hw.init = &init;
+ spin_lock_init(&h->lock);
+
+ clk = devm_clk_register_regmap(&pdev->dev, &h->clkr);
+
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static struct platform_driver qcom_hfpll_driver = {
+ .probe = qcom_hfpll_probe,
+ .driver = {
+ .name = "qcom-hfpll",
+ .of_match_table = qcom_hfpll_match_table,
+ },
+};
+module_platform_driver(qcom_hfpll_driver);
+
+MODULE_DESCRIPTION("QCOM HFPLL Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-hfpll");
diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c
new file mode 100644
index 000000000000..abf6bfd053c1
--- /dev/null
+++ b/drivers/clk/qcom/kpss-xcc.c
@@ -0,0 +1,95 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+static const char *aux_parents[] = {
+ "pll8_vote",
+ "pxo",
+};
+
+static unsigned int aux_parent_map[] = {
+ 3,
+ 0,
+};
+
+static const struct of_device_id kpss_xcc_match_table[] = {
+ { .compatible = "qcom,kpss-acc-v1", .data = (void *)1UL },
+ { .compatible = "qcom,kpss-gcc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, kpss_xcc_match_table);
+
+static int kpss_xcc_driver_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+ struct clk *clk;
+ struct resource *res;
+ void __iomem *base;
+ const char *name;
+
+ id = of_match_device(kpss_xcc_match_table, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (id->data) {
+ if (of_property_read_string_index(pdev->dev.of_node,
+ "clock-output-names", 0, &name))
+ return -ENODEV;
+ base += 0x14;
+ } else {
+ name = "acpu_l2_aux";
+ base += 0x28;
+ }
+
+ clk = clk_register_mux_table(&pdev->dev, name, aux_parents,
+ ARRAY_SIZE(aux_parents), 0, base, 0, 0x3,
+ 0, aux_parent_map, NULL);
+
+ platform_set_drvdata(pdev, clk);
+
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static int kpss_xcc_driver_remove(struct platform_device *pdev)
+{
+ clk_unregister_mux(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver kpss_xcc_driver = {
+ .probe = kpss_xcc_driver_probe,
+ .remove = kpss_xcc_driver_remove,
+ .driver = {
+ .name = "kpss-xcc",
+ .of_match_table = kpss_xcc_match_table,
+ },
+};
+module_platform_driver(kpss_xcc_driver);
+
+MODULE_DESCRIPTION("Krait Processor Sub System (KPSS) Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kpss-xcc");
diff --git a/drivers/clk/qcom/krait-cc.c b/drivers/clk/qcom/krait-cc.c
new file mode 100644
index 000000000000..f55b5ecd0df8
--- /dev/null
+++ b/drivers/clk/qcom/krait-cc.c
@@ -0,0 +1,352 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+
+#include "clk-krait.h"
+
+static unsigned int sec_mux_map[] = {
+ 2,
+ 0,
+};
+
+static unsigned int pri_mux_map[] = {
+ 1,
+ 2,
+ 0,
+};
+
+static int
+krait_add_div(struct device *dev, int id, const char *s, unsigned offset)
+{
+ struct krait_div2_clk *div;
+ struct clk_init_data init = {
+ .num_parents = 1,
+ .ops = &krait_div2_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ const char *p_names[1];
+ struct clk *clk;
+
+ div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return -ENOMEM;
+
+ div->width = 2;
+ div->shift = 6;
+ div->lpl = id >= 0;
+ div->offset = offset;
+ div->hw.init = &init;
+
+ init.name = kasprintf(GFP_KERNEL, "hfpll%s_div", s);
+ if (!init.name)
+ return -ENOMEM;
+
+ init.parent_names = p_names;
+ p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s);
+ if (!p_names[0]) {
+ kfree(init.name);
+ return -ENOMEM;
+ }
+
+ clk = devm_clk_register(dev, &div->hw);
+ kfree(p_names[0]);
+ kfree(init.name);
+
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static int
+krait_add_sec_mux(struct device *dev, int id, const char *s, unsigned offset,
+ bool unique_aux)
+{
+ struct krait_mux_clk *mux;
+ static const char *sec_mux_list[] = {
+ "acpu_aux",
+ "qsb",
+ };
+ struct clk_init_data init = {
+ .parent_names = sec_mux_list,
+ .num_parents = ARRAY_SIZE(sec_mux_list),
+ .ops = &krait_mux_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ struct clk *clk;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ mux->offset = offset;
+ mux->lpl = id >= 0;
+ mux->has_safe_parent = true;
+ mux->safe_sel = 2;
+ mux->mask = 0x3;
+ mux->shift = 2;
+ mux->parent_map = sec_mux_map;
+ mux->hw.init = &init;
+
+ init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
+ if (!init.name)
+ return -ENOMEM;
+
+ if (unique_aux) {
+ sec_mux_list[0] = kasprintf(GFP_KERNEL, "acpu%s_aux", s);
+ if (!sec_mux_list[0]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_aux;
+ }
+ }
+
+ clk = devm_clk_register(dev, &mux->hw);
+
+ if (unique_aux)
+ kfree(sec_mux_list[0]);
+err_aux:
+ kfree(init.name);
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static struct clk *
+krait_add_pri_mux(struct device *dev, int id, const char *s, unsigned offset)
+{
+ struct krait_mux_clk *mux;
+ const char *p_names[3];
+ struct clk_init_data init = {
+ .parent_names = p_names,
+ .num_parents = ARRAY_SIZE(p_names),
+ .ops = &krait_mux_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ struct clk *clk;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->has_safe_parent = true;
+ mux->safe_sel = 0;
+ mux->mask = 0x3;
+ mux->shift = 0;
+ mux->offset = offset;
+ mux->lpl = id >= 0;
+ mux->parent_map = pri_mux_map;
+ mux->hw.init = &init;
+
+ init.name = kasprintf(GFP_KERNEL, "krait%s_pri_mux", s);
+ if (!init.name)
+ return ERR_PTR(-ENOMEM);
+
+ p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s);
+ if (!p_names[0]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p0;
+ }
+
+ p_names[1] = kasprintf(GFP_KERNEL, "hfpll%s_div", s);
+ if (!p_names[1]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p1;
+ }
+
+ p_names[2] = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
+ if (!p_names[2]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p2;
+ }
+
+ clk = devm_clk_register(dev, &mux->hw);
+
+ kfree(p_names[2]);
+err_p2:
+ kfree(p_names[1]);
+err_p1:
+ kfree(p_names[0]);
+err_p0:
+ kfree(init.name);
+ return clk;
+}
+
+/* id < 0 for L2, otherwise id == physical CPU number */
+static struct clk *krait_add_clks(struct device *dev, int id, bool unique_aux)
+{
+ int ret;
+ unsigned offset;
+ void *p = NULL;
+ const char *s;
+ struct clk *clk;
+
+ if (id >= 0) {
+ offset = 0x4501 + (0x1000 * id);
+ s = p = kasprintf(GFP_KERNEL, "%d", id);
+ if (!s)
+ return ERR_PTR(-ENOMEM);
+ } else {
+ offset = 0x500;
+ s = "_l2";
+ }
+
+ ret = krait_add_div(dev, id, s, offset);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto err;
+ }
+
+ ret = krait_add_sec_mux(dev, id, s, offset, unique_aux);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto err;
+ }
+
+ clk = krait_add_pri_mux(dev, id, s, offset);
+err:
+ kfree(p);
+ return clk;
+}
+
+static struct clk *krait_of_get(struct of_phandle_args *clkspec, void *data)
+{
+ unsigned int idx = clkspec->args[0];
+ struct clk **clks = data;
+
+ if (idx >= 5) {
+ pr_err("%s: invalid clock index %d\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return clks[idx] ? : ERR_PTR(-ENODEV);
+}
+
+static const struct of_device_id krait_cc_match_table[] = {
+ { .compatible = "qcom,krait-cc-v1", (void *)1UL },
+ { .compatible = "qcom,krait-cc-v2" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, krait_cc_match_table);
+
+static int krait_cc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *id;
+ unsigned long cur_rate, aux_rate;
+ int cpu;
+ struct clk *clk;
+ struct clk **clks;
+ struct clk *l2_pri_mux_clk;
+
+ id = of_match_device(krait_cc_match_table, dev);
+ if (!id)
+ return -ENODEV;
+
+ /* Rate is 1 because 0 causes problems for __clk_mux_determine_rate */
+ clk = clk_register_fixed_rate(dev, "qsb", NULL, CLK_IS_ROOT, 1);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (!id->data) {
+ clk = clk_register_fixed_factor(dev, "acpu_aux",
+ "gpll0_vote", 0, 1, 2);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ }
+
+ /* Krait configurations have at most 4 CPUs and one L2 */
+ clks = devm_kcalloc(dev, 5, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ clk = krait_add_clks(dev, cpu, id->data);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[cpu] = clk;
+ }
+
+ l2_pri_mux_clk = krait_add_clks(dev, -1, id->data);
+ if (IS_ERR(l2_pri_mux_clk))
+ return PTR_ERR(l2_pri_mux_clk);
+ clks[4] = l2_pri_mux_clk;
+
+ /*
+ * We don't want the CPU or L2 clocks to be turned off at late init
+ * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
+ * refcount of these clocks. Any cpufreq/hotplug manager can assume
+ * that the clocks have already been prepared and enabled by the time
+ * they take over.
+ */
+ for_each_online_cpu(cpu) {
+ clk_prepare_enable(l2_pri_mux_clk);
+ WARN(clk_prepare_enable(clks[cpu]),
+ "Unable to turn on CPU%d clock", cpu);
+ }
+
+ /*
+ * Force reinit of HFPLLs and muxes to overwrite any potential
+ * incorrect configuration of HFPLLs and muxes by the bootloader.
+ * While at it, also make sure the cores are running at known rates
+ * and print the current rate.
+ *
+ * The clocks are set to aux clock rate first to make sure the
+ * secondary mux is not sourcing off of QSB. The rate is then set to
+ * two different rates to force a HFPLL reinit under all
+ * circumstances.
+ */
+ cur_rate = clk_get_rate(l2_pri_mux_clk);
+ aux_rate = 384000000;
+ if (cur_rate == 1) {
+ pr_info("L2 @ QSB rate. Forcing new rate.\n");
+ cur_rate = aux_rate;
+ }
+ clk_set_rate(l2_pri_mux_clk, aux_rate);
+ clk_set_rate(l2_pri_mux_clk, 2);
+ clk_set_rate(l2_pri_mux_clk, cur_rate);
+ pr_info("L2 @ %lu KHz\n", clk_get_rate(l2_pri_mux_clk) / 1000);
+ for_each_possible_cpu(cpu) {
+ clk = clks[cpu];
+ cur_rate = clk_get_rate(clk);
+ if (cur_rate == 1) {
+ pr_info("CPU%d @ QSB rate. Forcing new rate.\n", cpu);
+ cur_rate = aux_rate;
+ }
+ clk_set_rate(clk, aux_rate);
+ clk_set_rate(clk, 2);
+ clk_set_rate(clk, cur_rate);
+ pr_info("CPU%d @ %lu KHz\n", cpu, clk_get_rate(clk) / 1000);
+ }
+
+ of_clk_add_provider(dev->of_node, krait_of_get, clks);
+
+ return 0;
+}
+
+static struct platform_driver krait_cc_driver = {
+ .probe = krait_cc_probe,
+ .driver = {
+ .name = "krait-cc",
+ .of_match_table = krait_cc_match_table,
+ },
+};
+module_platform_driver(krait_cc_driver);
+
+MODULE_DESCRIPTION("Krait CPU Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:krait-cc");
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index f0ee6bde11af..2a817d2b8d76 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -26,6 +26,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -571,17 +572,11 @@ static struct clk_rcg2 jpeg2_clk_src = {
},
};
-static struct freq_tbl pixel_freq_tbl[] = {
- { .src = P_DSI0PLL },
- { }
-};
-
static struct clk_rcg2 pclk0_clk_src = {
.cmd_rcgr = 0x2000,
.mnd_width = 8,
.hid_width = 5,
.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
- .freq_tbl = pixel_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
@@ -596,7 +591,6 @@ static struct clk_rcg2 pclk1_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
- .freq_tbl = pixel_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
@@ -844,21 +838,15 @@ static struct clk_rcg2 cpp_clk_src = {
},
};
-static struct freq_tbl byte_freq_tbl[] = {
- { .src = P_DSI0PLL_BYTE },
- { }
-};
-
static struct clk_rcg2 byte0_clk_src = {
.cmd_rcgr = 0x2120,
.hid_width = 5,
.parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
- .freq_tbl = byte_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
.num_parents = 6,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -867,12 +855,11 @@ static struct clk_rcg2 byte1_clk_src = {
.cmd_rcgr = 0x2140,
.hid_width = 5,
.parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
- .freq_tbl = byte_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte1_clk_src",
.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
.num_parents = 6,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -3077,6 +3064,48 @@ static const struct pll_config mmpll3_config = {
.aux_output_mask = BIT(1),
};
+static struct gdsc venus0_gdsc = {
+ .gdscr = 0x1024,
+ .pd = {
+ .name = "venus0",
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x2304,
+ .pd = {
+ .name = "mdss",
+ },
+};
+
+static struct gdsc camss_jpeg_gdsc = {
+ .gdscr = 0x35a4,
+ .pd = {
+ .name = "camss_jpeg",
+ },
+};
+
+static struct gdsc camss_vfe_gdsc = {
+ .gdscr = 0x36a4,
+ .pd = {
+ .name = "camss_vfe",
+ },
+};
+
+static struct gdsc oxili_gdsc = {
+ .gdscr = 0x4024,
+ .pd = {
+ .name = "oxili",
+ },
+};
+
+static struct gdsc oxilicx_gdsc = {
+ .gdscr = 0x4034,
+ .pd = {
+ .name = "oxilicx",
+ },
+};
+
static struct clk_regmap *mmcc_apq8084_clocks[] = {
[MMSS_AHB_CLK_SRC] = &mmss_ahb_clk_src.clkr,
[MMSS_AXI_CLK_SRC] = &mmss_axi_clk_src.clkr,
@@ -3294,6 +3323,15 @@ static const struct qcom_reset_map mmcc_apq8084_resets[] = {
[MMSSNOCAXI_RESET] = { 0x5060 },
};
+static struct gdsc *mmcc_apq8084_gdscs[] = {
+ [VENUS0_GDSC] = &venus0_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [CAMSS_JPEG_GDSC] = &camss_jpeg_gdsc,
+ [CAMSS_VFE_GDSC] = &camss_vfe_gdsc,
+ [OXILI_GDSC] = &oxili_gdsc,
+ [OXILICX_GDSC] = &oxilicx_gdsc,
+};
+
static const struct regmap_config mmcc_apq8084_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3308,6 +3346,8 @@ static const struct qcom_cc_desc mmcc_apq8084_desc = {
.num_clks = ARRAY_SIZE(mmcc_apq8084_clocks),
.resets = mmcc_apq8084_resets,
.num_resets = ARRAY_SIZE(mmcc_apq8084_resets),
+ .gdscs = mmcc_apq8084_gdscs,
+ .num_gdscs = ARRAY_SIZE(mmcc_apq8084_gdscs),
};
static const struct of_device_id mmcc_apq8084_match_table[] = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 0987bf443e1f..8606f88ae926 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -31,6 +31,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -522,17 +523,11 @@ static struct clk_rcg2 jpeg2_clk_src = {
},
};
-static struct freq_tbl pixel_freq_tbl[] = {
- { .src = P_DSI0PLL },
- { }
-};
-
static struct clk_rcg2 pclk0_clk_src = {
.cmd_rcgr = 0x2000,
.mnd_width = 8,
.hid_width = 5,
.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
- .freq_tbl = pixel_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
@@ -547,7 +542,6 @@ static struct clk_rcg2 pclk1_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
- .freq_tbl = pixel_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
@@ -785,7 +779,7 @@ static struct clk_rcg2 byte0_clk_src = {
.name = "byte0_clk_src",
.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
.num_parents = 6,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -799,7 +793,7 @@ static struct clk_rcg2 byte1_clk_src = {
.name = "byte1_clk_src",
.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
.num_parents = 6,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2349,6 +2343,48 @@ static struct pll_config mmpll3_config = {
.aux_output_mask = BIT(1),
};
+static struct gdsc venus0_gdsc = {
+ .gdscr = 0x1024,
+ .pd = {
+ .name = "venus0",
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x2304,
+ .pd = {
+ .name = "mdss",
+ },
+};
+
+static struct gdsc camss_jpeg_gdsc = {
+ .gdscr = 0x35a4,
+ .pd = {
+ .name = "camss_jpeg",
+ },
+};
+
+static struct gdsc camss_vfe_gdsc = {
+ .gdscr = 0x36a4,
+ .pd = {
+ .name = "camss_vfe",
+ },
+};
+
+static struct gdsc oxili_gdsc = {
+ .gdscr = 0x4024,
+ .pd = {
+ .name = "oxili",
+ },
+};
+
+static struct gdsc oxilicx_gdsc = {
+ .gdscr = 0x4034,
+ .pd = {
+ .name = "oxilicx",
+ },
+};
+
static struct clk_regmap *mmcc_msm8974_clocks[] = {
[MMSS_AHB_CLK_SRC] = &mmss_ahb_clk_src.clkr,
[MMSS_AXI_CLK_SRC] = &mmss_axi_clk_src.clkr,
@@ -2525,6 +2561,15 @@ static const struct qcom_reset_map mmcc_msm8974_resets[] = {
[OCMEMNOC_RESET] = { 0x50b0 },
};
+static struct gdsc *mmcc_msm8974_gdscs[] = {
+ [VENUS0_GDSC] = &venus0_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [CAMSS_JPEG_GDSC] = &camss_jpeg_gdsc,
+ [CAMSS_VFE_GDSC] = &camss_vfe_gdsc,
+ [OXILI_GDSC] = &oxili_gdsc,
+ [OXILICX_GDSC] = &oxilicx_gdsc,
+};
+
static const struct regmap_config mmcc_msm8974_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -2539,6 +2584,8 @@ static const struct qcom_cc_desc mmcc_msm8974_desc = {
.num_clks = ARRAY_SIZE(mmcc_msm8974_clocks),
.resets = mmcc_msm8974_resets,
.num_resets = ARRAY_SIZE(mmcc_msm8974_resets),
+ .gdscs = mmcc_msm8974_gdscs,
+ .num_gdscs = ARRAY_SIZE(mmcc_msm8974_gdscs),
};
static const struct of_device_id mmcc_msm8974_match_table[] = {
diff --git a/drivers/clk/qcom/rpmcc-apq8064.c b/drivers/clk/qcom/rpmcc-apq8064.c
new file mode 100644
index 000000000000..392cca7d69c1
--- /dev/null
+++ b/drivers/clk/qcom/rpmcc-apq8064.c
@@ -0,0 +1,347 @@
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/qcom_rpm.h>
+#include <dt-bindings/mfd/qcom-rpm.h>
+
+struct rpm_cc {
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+struct rpm_clk {
+ const int rpm_clk_id;
+ struct qcom_rpm *rpm;
+ unsigned last_set_khz;
+ bool enabled;
+ bool branch; /* true: RPM only accepts 1 for ON and 0 for OFF */
+ unsigned factor;
+ struct clk_hw hw;
+};
+
+#define to_rpm_clk(_hw) container_of(_hw, struct rpm_clk, hw)
+
+static int rpm_clk_prepare(struct clk_hw *hw)
+{
+ struct rpm_clk *r = to_rpm_clk(hw);
+ uint32_t value;
+ int rc = 0;
+ unsigned long this_khz;
+
+ this_khz = r->last_set_khz;
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (r->last_set_khz == 0)
+ goto out;
+
+ value = this_khz;
+ if (r->branch)
+ value = !!value;
+
+ rc = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (rc)
+ goto out;
+
+out:
+ if (!rc)
+ r->enabled = true;
+ return rc;
+}
+
+static void rpm_clk_unprepare(struct clk_hw *hw)
+{
+ struct rpm_clk *r = to_rpm_clk(hw);
+
+ if (r->last_set_khz) {
+ uint32_t value = 0;
+ int rc;
+
+ rc = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (rc)
+ return;
+
+ }
+ r->enabled = false;
+}
+
+int rpm_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long prate)
+{
+ struct rpm_clk *r = to_rpm_clk(hw);
+ unsigned long this_khz;
+ int rc = 0;
+
+ this_khz = DIV_ROUND_UP(rate, r->factor);
+
+ if (r->enabled) {
+ uint32_t value = this_khz;
+
+ rc = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (rc)
+ goto out;
+ }
+
+ if (!rc)
+ r->last_set_khz = this_khz;
+
+out:
+ return rc;
+}
+
+static long rpm_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
+static unsigned long rpm_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rpm_clk *r = to_rpm_clk(hw);
+ u32 val;
+ int rc;
+
+ rc = qcom_rpm_read(r->rpm, r->rpm_clk_id, &val, 1);
+ if (rc < 0)
+ return 0;
+
+ return val * r->factor;
+}
+
+static unsigned long rpm_branch_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rpm_clk *r = to_rpm_clk(hw);
+
+ return r->last_set_khz * r->factor;
+}
+
+static const struct clk_ops branch_clk_ops_rpm = {
+ .prepare = rpm_clk_prepare,
+ .unprepare = rpm_clk_unprepare,
+ .recalc_rate = rpm_branch_clk_recalc_rate,
+ .round_rate = rpm_clk_round_rate,
+};
+
+static const struct clk_ops clk_ops_rpm = {
+ .prepare = rpm_clk_prepare,
+ .unprepare = rpm_clk_unprepare,
+ .set_rate = rpm_clk_set_rate,
+ .recalc_rate = rpm_clk_recalc_rate,
+ .round_rate = rpm_clk_round_rate,
+};
+
+static struct rpm_clk pxo_clk = {
+ .rpm_clk_id = QCOM_RPM_PXO_CLK,
+ .branch = true,
+ .factor = 1000,
+ .last_set_khz = 27000,
+ .hw.init = &(struct clk_init_data){
+ .name = "pxo",
+ .ops = &branch_clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk cxo_clk = {
+ .rpm_clk_id = QCOM_RPM_CXO_CLK,
+ .branch = true,
+ .factor = 1000,
+ .last_set_khz = 19200,
+ .hw.init = &(struct clk_init_data){
+ .name = "cxo",
+ .ops = &branch_clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk afab_clk = {
+ .rpm_clk_id = QCOM_RPM_APPS_FABRIC_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "afab_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk cfpb_clk = {
+ .rpm_clk_id = QCOM_RPM_CFPB_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "cfpb_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk daytona_clk = {
+ .rpm_clk_id = QCOM_RPM_DAYTONA_FABRIC_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "daytona_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk ebi1_clk = {
+ .rpm_clk_id = QCOM_RPM_EBI1_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "ebi1_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk mmfab_clk = {
+ .rpm_clk_id = QCOM_RPM_MM_FABRIC_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "mmfab_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk mmfpb_clk = {
+ .rpm_clk_id = QCOM_RPM_MMFPB_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "mmfpb_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk sfab_clk = {
+ .rpm_clk_id = QCOM_RPM_SYS_FABRIC_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "sfab_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk sfpb_clk = {
+ .rpm_clk_id = QCOM_RPM_SFPB_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "sfpb_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+/*
+static struct rpm_clk qdss_clk = {
+ .rpm_clk_id = QCOM_RPM_QDSS_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "qdss_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+*/
+
+struct rpm_clk *rpm_clks[] = {
+ [QCOM_RPM_PXO_CLK] = &pxo_clk,
+ [QCOM_RPM_CXO_CLK] = &cxo_clk,
+ [QCOM_RPM_APPS_FABRIC_CLK] = &afab_clk,
+ [QCOM_RPM_CFPB_CLK] = &cfpb_clk,
+ [QCOM_RPM_DAYTONA_FABRIC_CLK] = &daytona_clk,
+ [QCOM_RPM_EBI1_CLK] = &ebi1_clk,
+ [QCOM_RPM_MM_FABRIC_CLK] = &mmfab_clk,
+ [QCOM_RPM_MMFPB_CLK] = &mmfpb_clk,
+ [QCOM_RPM_SYS_FABRIC_CLK] = &sfab_clk,
+ [QCOM_RPM_SFPB_CLK] = &sfpb_clk,
+/** [QCOM_RPM_QDSS_CLK] = &qdss_clk, Needs more checking here **/
+};
+
+static int rpm_clk_probe(struct platform_device *pdev)
+{
+ struct clk **clks;
+ struct clk *clk;
+ struct rpm_cc *cc;
+ struct qcom_rpm *rpm;
+ int num_clks = ARRAY_SIZE(rpm_clks);
+ struct clk_onecell_data *data;
+ int i, ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ cc = devm_kzalloc(&pdev->dev, sizeof(*cc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ clks = cc->clks;
+ data = &cc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "unable to retrieve handle to rpm\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_clks[i]) {
+ clks[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+ rpm_clks[i]->rpm = rpm;
+ clk = devm_clk_register(&pdev->dev, &rpm_clks[i]->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+ data);
+ if (ret)
+ return ret;
+
+ /* Hold and active set vote */
+ clk_set_rate(afab_clk.hw.clk, INT_MAX);
+ clk_prepare_enable(afab_clk.hw.clk);
+
+ return 0;
+}
+
+static int rpm_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static const struct of_device_id rpm_clk_of_match[] = {
+ { .compatible = "qcom,apq8064-rpm-clk" },
+ { },
+};
+
+static struct platform_driver rpm_clk_driver = {
+ .driver = {
+ .name = "qcom-rpm-clk",
+ .of_match_table = rpm_clk_of_match,
+ },
+ .probe = rpm_clk_probe,
+ .remove = rpm_clk_remove,
+};
+
+static int __init rpm_clk_init(void)
+{
+ return platform_driver_register(&rpm_clk_driver);
+}
+subsys_initcall(rpm_clk_init);
+
+static void __exit rpm_clk_exit(void)
+{
+ platform_driver_unregister(&rpm_clk_driver);
+}
+module_exit(rpm_clk_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
+MODULE_DESCRIPTION("Driver for the RPM clocks");
diff --git a/drivers/clk/qcom/rpmcc.c b/drivers/clk/qcom/rpmcc.c
new file mode 100644
index 000000000000..d0a3f1c63f8c
--- /dev/null
+++ b/drivers/clk/qcom/rpmcc.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include "clk-smd-rpm.h"
+#include <dt-bindings/clock/qcom,rpmcc-msm8916.h>
+
+#define CXO_ID 0x0
+#define QDSS_ID 0x1
+#define BUS_SCALING 0x2
+
+#define PCNOC_ID 0x0
+#define SNOC_ID 0x1
+#define BIMC_ID 0x0
+
+#define BB_CLK1_ID 0x1
+#define BB_CLK2_ID 0x2
+#define RF_CLK1_ID 0x4
+#define RF_CLK2_ID 0x5
+
+struct rpm_cc {
+ struct qcom_rpm *rpm;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+/* SMD clocks */
+DEFINE_CLK_SMD_RPM(pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, PCNOC_ID, NULL);
+DEFINE_CLK_SMD_RPM(snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, SNOC_ID, NULL);
+DEFINE_CLK_SMD_RPM(bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, BIMC_ID, NULL);
+
+DEFINE_CLK_SMD_RPM_BRANCH(xo, xo_a, QCOM_SMD_RPM_MISC_CLK, CXO_ID, 19200000);
+DEFINE_CLK_SMD_RPM_QDSS(qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, QDSS_ID);
+
+/* SMD_XO_BUFFER */
+DEFINE_CLK_SMD_RPM_XO_BUFFER(bb_clk1, bb_clk1_a, BB_CLK1_ID);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(bb_clk2, bb_clk2_a, BB_CLK2_ID);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(rf_clk1, rf_clk1_a, RF_CLK1_ID);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(rf_clk2, rf_clk2_a, RF_CLK2_ID);
+
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(bb_clk1_pin, bb_clk1_a_pin, BB_CLK1_ID);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(bb_clk2_pin, bb_clk2_a_pin, BB_CLK2_ID);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(rf_clk1_pin, rf_clk1_a_pin, RF_CLK1_ID);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(rf_clk2_pin, rf_clk2_a_pin, RF_CLK2_ID);
+
+static struct clk_smd_rpm *rpmcc_msm8916_clks[] = {
+ [RPM_XO_CLK_SRC] = &xo,
+ [RPM_XO_A_CLK_SRC] = &xo_a,
+ [RPM_PCNOC_CLK] = &pcnoc_clk,
+ [RPM_PCNOC_A_CLK] = &pcnoc_a_clk,
+ [RPM_SNOC_CLK] = &snoc_clk,
+ [RPM_SNOC_A_CLK] = &snoc_a_clk,
+ [RPM_BIMC_CLK] = &bimc_clk,
+ [RPM_BIMC_A_CLK] = &bimc_a_clk,
+ [RPM_QDSS_CLK] = &qdss_clk,
+ [RPM_QDSS_A_CLK] = &qdss_a_clk,
+ [RPM_BB_CLK1] = &bb_clk1,
+ [RPM_BB_CLK1_A] = &bb_clk1_a,
+ [RPM_BB_CLK2] = &bb_clk2,
+ [RPM_BB_CLK2_A] = &bb_clk2_a,
+ [RPM_RF_CLK1] = &rf_clk1,
+ [RPM_RF_CLK1_A] = &rf_clk1_a,
+ [RPM_RF_CLK2] = &rf_clk2,
+ [RPM_RF_CLK2_A] = &rf_clk2_a,
+ [RPM_BB_CLK1_PIN] = &bb_clk1_pin,
+ [RPM_BB_CLK1_A_PIN] = &bb_clk1_a_pin,
+ [RPM_BB_CLK2_PIN] = &bb_clk2_pin,
+ [RPM_BB_CLK2_A_PIN] = &bb_clk2_a_pin,
+ [RPM_RF_CLK1_PIN] = &rf_clk1_pin,
+ [RPM_RF_CLK1_A_PIN] = &rf_clk1_a_pin,
+ [RPM_RF_CLK2_PIN] = &rf_clk2_pin,
+ [RPM_RF_CLK2_A_PIN] = &rf_clk2_a_pin,
+};
+
+struct rpmcc_desc {
+ struct clk_smd_rpm **clks;
+ size_t num_clks;
+};
+
+static const struct rpmcc_desc rpmcc_msm8916 = {
+ .clks = rpmcc_msm8916_clks,
+ .num_clks = ARRAY_SIZE(rpmcc_msm8916_clks),
+};
+
+static const struct of_device_id rpmcc_match_table[] = {
+ { .compatible = "qcom,rpmcc-msm8916", .data = &rpmcc_msm8916},
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpmcc_match_table);
+
+static int rpmcc_probe(struct platform_device *pdev)
+{
+ struct clk **clks;
+ struct clk *clk;
+ struct clk_smd_rpm **rpm_clks;
+ struct rpm_cc *rcc;
+ const struct rpmcc_desc *desc;
+ struct qcom_smd_rpm *rpm;
+ struct clk_onecell_data *data;
+ int ret, i;
+ size_t num_clks;
+ const struct of_device_id *match;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ match = of_match_device(rpmcc_match_table, &pdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ desc = match->data;
+ rpm_clks = desc->clks;
+ num_clks = desc->num_clks;
+
+ rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!rcc)
+ return -ENOMEM;
+
+ clks = rcc->clks;
+ data = &rcc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_clks[i]) {
+ clks[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ rpm_clks[i]->rpm = rpm;
+ clk = devm_clk_register(&pdev->dev, &rpm_clks[i]->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+ data);
+ if (ret)
+ return ret;
+
+ ret = clk_smd_rpm_enable_scaling(rpm);
+ if (ret)
+ return ret;
+
+ /* Hold a vote for max rates */
+ clk_set_rate(bimc_a_clk.hw.clk, INT_MAX);
+ clk_prepare_enable(bimc_a_clk.hw.clk);
+ clk_set_rate(bimc_clk.hw.clk, INT_MAX);
+ clk_prepare_enable(bimc_clk.hw.clk);
+ clk_set_rate(snoc_clk.hw.clk, INT_MAX);
+ clk_prepare_enable(snoc_clk.hw.clk);
+ clk_prepare_enable(xo.hw.clk);
+
+ return 0;
+}
+
+static int rpmcc_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static struct platform_driver rpmcc_driver = {
+ .driver = {
+ .name = "qcom-rpmcc",
+ .of_match_table = rpmcc_match_table,
+ },
+ .probe = rpmcc_probe,
+ .remove = rpmcc_remove,
+};
+
+static int __init rpmcc_init(void)
+{
+ return platform_driver_register(&rpmcc_driver);
+}
+core_initcall(rpmcc_init);
+
+static void __exit rpmcc_exit(void)
+{
+ platform_driver_unregister(&rpmcc_driver);
+}
+module_exit(rpmcc_exit);
+
+MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:rpmcc");
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index c737f7359974..852d8ef2ee8b 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -94,6 +94,15 @@ config ARM_OMAP2PLUS_CPUFREQ
depends on ARCH_OMAP2PLUS
default ARCH_OMAP2PLUS
+config ARM_QCOM_CPUFREQ
+ tristate "Qualcomm based"
+ depends on ARCH_QCOM
+ select PM_OPP
+ help
+ This adds the CPUFreq driver for Qualcomm SoC based boards.
+
+ If in doubt, say N.
+
config ARM_S3C_CPUFREQ
bool
help
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index e8bce2b44666..849e88d50aef 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
+obj-$(CONFIG_ARM_QCOM_CPUFREQ) += qcom-cpufreq.o
obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 7c0d70e2a861..e3152155a180 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -34,6 +34,9 @@ struct private_data {
struct regulator *cpu_reg;
struct thermal_cooling_device *cdev;
unsigned int voltage_tolerance; /* in percentage */
+ struct notifier_block opp_nb;
+ struct mutex lock;
+ unsigned long opp_freq;
};
static struct freq_attr *cpufreq_dt_attr[] = {
@@ -42,17 +45,56 @@ static struct freq_attr *cpufreq_dt_attr[] = {
NULL,
};
+static int opp_notifier(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct dev_pm_opp *opp = data;
+ struct private_data *priv = container_of(nb, struct private_data,
+ opp_nb);
+ struct device *cpu_dev = priv->cpu_dev;
+ struct regulator *cpu_reg = priv->cpu_reg;
+ unsigned long volt, tol, freq;
+ int ret = 0;
+
+ switch (event) {
+ case OPP_EVENT_ADJUST_VOLTAGE:
+ volt = dev_pm_opp_get_voltage(opp);
+ freq = dev_pm_opp_get_freq(opp);
+ tol = volt * priv->voltage_tolerance / 100;
+
+ mutex_lock(&priv->lock);
+ if (freq == priv->opp_freq)
+ ret = regulator_set_voltage_tol(cpu_reg, volt,
+ tol);
+ mutex_unlock(&priv->lock);
+ if (ret) {
+ dev_err(cpu_dev,
+ "failed to scale voltage up: %d\n",
+ ret);
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct dev_pm_opp *opp;
struct cpufreq_frequency_table *freq_table = policy->freq_table;
struct clk *cpu_clk = policy->clk;
+ struct clk *l2_clk = policy->l2_clk;
struct private_data *priv = policy->driver_data;
struct device *cpu_dev = priv->cpu_dev;
struct regulator *cpu_reg = priv->cpu_reg;
unsigned long volt = 0, volt_old = 0, tol = 0;
- unsigned int old_freq, new_freq;
+ unsigned int old_freq, new_freq, l2_freq;
+ unsigned long new_l2_freq = 0;
long freq_Hz, freq_exact;
+ unsigned long opp_freq = 0;
int ret;
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
@@ -63,8 +105,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
new_freq = freq_Hz / 1000;
old_freq = clk_get_rate(cpu_clk) / 1000;
+ mutex_lock(&priv->lock);
if (!IS_ERR(cpu_reg)) {
- unsigned long opp_freq;
rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
@@ -72,7 +114,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
rcu_read_unlock();
dev_err(cpu_dev, "failed to find OPP for %ld\n",
freq_Hz);
- return PTR_ERR(opp);
+ ret = PTR_ERR(opp);
+ goto out;
}
volt = dev_pm_opp_get_voltage(opp);
opp_freq = dev_pm_opp_get_freq(opp);
@@ -93,7 +136,7 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
if (ret) {
dev_err(cpu_dev, "failed to scale voltage up: %d\n",
ret);
- return ret;
+ goto out;
}
}
@@ -102,7 +145,31 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
if (!IS_ERR(cpu_reg) && volt_old > 0)
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
- return ret;
+ goto out;
+ }
+
+ if (!IS_ERR(l2_clk) && policy->l2_rate[0] && policy->l2_rate[1] &&
+ policy->l2_rate[2]) {
+ static unsigned long krait_l2[CONFIG_NR_CPUS] = { };
+ int cpu, ret = 0;
+
+ if (freq_exact >= policy->l2_rate[2])
+ new_l2_freq = policy->l2_rate[2];
+ else if (freq_exact >= policy->l2_rate[1])
+ new_l2_freq = policy->l2_rate[1];
+ else
+ new_l2_freq = policy->l2_rate[0];
+
+ krait_l2[policy->cpu] = new_l2_freq;
+ for_each_present_cpu(cpu)
+ new_l2_freq = max(new_l2_freq, krait_l2[cpu]);
+
+ l2_freq = clk_get_rate(l2_clk);
+
+ if (l2_freq != new_l2_freq) {
+ /* scale l2 with the core */
+ ret = clk_set_rate(l2_clk, new_l2_freq);
+ }
}
/* scaling down? scale voltage after frequency */
@@ -112,18 +179,24 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
dev_err(cpu_dev, "failed to scale voltage down: %d\n",
ret);
clk_set_rate(cpu_clk, old_freq * 1000);
+ goto out;
}
}
+ priv->opp_freq = opp_freq;
+
+out:
+ mutex_unlock(&priv->lock);
return ret;
}
static int allocate_resources(int cpu, struct device **cdev,
- struct regulator **creg, struct clk **cclk)
+ struct regulator **creg, struct clk **cclk,
+ struct clk **l2)
{
struct device *cpu_dev;
struct regulator *cpu_reg;
- struct clk *cpu_clk;
+ struct clk *cpu_clk, *l2_clk = NULL;
int ret = 0;
char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
@@ -183,6 +256,10 @@ try_again:
*cdev = cpu_dev;
*creg = cpu_reg;
*cclk = cpu_clk;
+
+ l2_clk = clk_get(cpu_dev, "l2");
+ if (!IS_ERR(l2_clk))
+ *l2 = l2_clk;
}
return ret;
@@ -192,17 +269,20 @@ static int cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
struct device_node *np;
+ struct device_node *l2_np;
struct private_data *priv;
struct device *cpu_dev;
struct regulator *cpu_reg;
- struct clk *cpu_clk;
struct dev_pm_opp *suspend_opp;
+ struct clk *cpu_clk, *l2_clk;
unsigned long min_uV = ~0, max_uV = 0;
unsigned int transition_latency;
bool need_update = false;
int ret;
+ struct srcu_notifier_head *opp_srcu_head;
- ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
+ ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk,
+ &l2_clk);
if (ret) {
pr_err("%s: Failed to allocate resources: %d\n", __func__, ret);
return ret;
@@ -277,6 +357,19 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
+ mutex_init(&priv->lock);
+
+ opp_srcu_head = dev_pm_opp_get_notifier(cpu_dev);
+ if (IS_ERR(opp_srcu_head)) {
+ ret = PTR_ERR(opp_srcu_head);
+ goto out_free_priv;
+ }
+
+ priv->opp_nb.notifier_call = opp_notifier;
+ ret = srcu_notifier_chain_register(opp_srcu_head, &priv->opp_nb);
+ if (ret)
+ goto out_free_priv;
+
of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
if (!transition_latency)
@@ -326,7 +419,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
pr_err("failed to init cpufreq table: %d\n", ret);
- goto out_free_priv;
+ goto out_unregister_nb;
}
priv->cpu_dev = cpu_dev;
@@ -340,6 +433,11 @@ static int cpufreq_init(struct cpufreq_policy *policy)
if (suspend_opp)
policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
rcu_read_unlock();
+ policy->l2_clk = l2_clk;
+
+ l2_np = of_find_node_by_name(NULL, "qcom,l2");
+ if (l2_np)
+ of_property_read_u32_array(l2_np, "qcom,l2-rates", policy->l2_rate, 3);
ret = cpufreq_table_validate_and_show(policy, freq_table);
if (ret) {
@@ -365,6 +463,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
out_free_cpufreq_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_unregister_nb:
+ srcu_notifier_chain_unregister(opp_srcu_head, &priv->opp_nb);
out_free_priv:
kfree(priv);
out_free_opp:
@@ -438,7 +538,7 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
{
struct device *cpu_dev;
struct regulator *cpu_reg;
- struct clk *cpu_clk;
+ struct clk *cpu_clk, *l2_clk;
int ret;
/*
@@ -448,7 +548,7 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
*
* FIXME: Is checking this only for CPU0 sufficient ?
*/
- ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
+ ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk, &l2_clk);
if (ret)
return ret;
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
new file mode 100644
index 000000000000..c9f86a062cdd
--- /dev/null
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -0,0 +1,204 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/cpufreq-dt.h>
+
+static void __init get_krait_bin_format_a(int *speed, int *pvs, int *pvs_ver)
+{
+ void __iomem *base;
+ u32 pte_efuse;
+
+ *speed = *pvs = *pvs_ver = 0;
+
+ base = ioremap(0x007000c0, 4);
+ if (!base) {
+ pr_warn("Unable to read efuse data. Defaulting to 0!\n");
+ return;
+ }
+
+ pte_efuse = readl_relaxed(base);
+ iounmap(base);
+
+ *speed = pte_efuse & 0xf;
+ if (*speed == 0xf)
+ *speed = (pte_efuse >> 4) & 0xf;
+
+ if (*speed == 0xf) {
+ *speed = 0;
+ pr_warn("Speed bin: Defaulting to %d\n", *speed);
+ } else {
+ pr_info("Speed bin: %d\n", *speed);
+ }
+
+ *pvs = (pte_efuse >> 10) & 0x7;
+ if (*pvs == 0x7)
+ *pvs = (pte_efuse >> 13) & 0x7;
+
+ if (*pvs == 0x7) {
+ *pvs = 0;
+ pr_warn("PVS bin: Defaulting to %d\n", *pvs);
+ } else {
+ pr_info("PVS bin: %d\n", *pvs);
+ }
+}
+
+static void __init get_krait_bin_format_b(int *speed, int *pvs, int *pvs_ver)
+{
+ u32 pte_efuse, redundant_sel;
+ void __iomem *base;
+
+ *speed = 0;
+ *pvs = 0;
+ *pvs_ver = 0;
+
+ base = ioremap(0xfc4b80b0, 8);
+ if (!base) {
+ pr_warn("Unable to read efuse data. Defaulting to 0!\n");
+ return;
+ }
+
+ pte_efuse = readl_relaxed(base);
+ redundant_sel = (pte_efuse >> 24) & 0x7;
+ *speed = pte_efuse & 0x7;
+ /* 4 bits of PVS are in efuse register bits 31, 8-6. */
+ *pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7);
+ *pvs_ver = (pte_efuse >> 4) & 0x3;
+
+ switch (redundant_sel) {
+ case 1:
+ *speed = (pte_efuse >> 27) & 0xf;
+ break;
+ case 2:
+ *pvs = (pte_efuse >> 27) & 0xf;
+ break;
+ }
+
+ /* Check SPEED_BIN_BLOW_STATUS */
+ if (pte_efuse & BIT(3)) {
+ pr_info("Speed bin: %d\n", *speed);
+ } else {
+ pr_warn("Speed bin not set. Defaulting to 0!\n");
+ *speed = 0;
+ }
+
+ /* Check PVS_BLOW_STATUS */
+ pte_efuse = readl_relaxed(base + 0x4) & BIT(21);
+ if (pte_efuse) {
+ pr_info("PVS bin: %d\n", *pvs);
+ } else {
+ pr_warn("PVS bin not set. Defaulting to 0!\n");
+ *pvs = 0;
+ }
+
+ pr_info("PVS version: %d\n", *pvs_ver);
+ iounmap(base);
+}
+
+static int __init qcom_cpufreq_populate_opps(void)
+{
+ int len, rows, cols, i, k, speed, pvs, pvs_ver;
+ char table_name[] = "qcom,speedXX-pvsXX-bin-vXX";
+ struct device_node *np;
+ struct device *dev;
+ int cpu = 0;
+
+ np = of_find_node_by_name(NULL, "qcom,pvs");
+ if (!np)
+ return -ENODEV;
+
+ if (of_property_read_bool(np, "qcom,pvs-format-a")) {
+ get_krait_bin_format_a(&speed, &pvs, &pvs_ver);
+ cols = 2;
+ } else if (of_property_read_bool(np, "qcom,pvs-format-b")) {
+ get_krait_bin_format_b(&speed, &pvs, &pvs_ver);
+ cols = 3;
+ } else {
+ return -ENODEV;
+ }
+
+ snprintf(table_name, sizeof(table_name),
+ "qcom,speed%d-pvs%d-bin-v%d", speed, pvs, pvs_ver);
+
+ if (!of_find_property(np, table_name, &len))
+ return -EINVAL;
+
+ len /= sizeof(u32);
+ if (len % cols || len == 0)
+ return -EINVAL;
+
+ rows = len / cols;
+
+ for (i = 0, k = 0; i < rows; i++) {
+ u32 freq, volt;
+
+ of_property_read_u32_index(np, table_name, k++, &freq);
+ of_property_read_u32_index(np, table_name, k++, &volt);
+ while (k % cols)
+ k++; /* Skip uA entries if present */
+ for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+ dev = get_cpu_device(cpu);
+ if (!dev)
+ return -ENODEV;
+ if (dev_pm_opp_add(dev, freq, volt))
+ pr_warn("failed to add OPP %u\n", freq);
+ }
+ }
+
+ return 0;
+}
+
+static int __init qcom_cpufreq_driver_init(void)
+{
+ struct cpufreq_dt_platform_data pdata = { .independent_clocks = true };
+ struct platform_device_info devinfo = {
+ .name = "cpufreq-dt",
+ .data = &pdata,
+ .size_data = sizeof(pdata),
+ };
+ struct device *cpu_dev;
+ struct device_node *np;
+ int ret;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np)
+ return -ENOENT;
+
+ if (!of_device_is_compatible(np, "qcom,krait")) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+ of_node_put(np);
+
+ ret = qcom_cpufreq_populate_opps();
+ if (ret)
+ return ret;
+
+ return PTR_ERR_OR_ZERO(platform_device_register_full(&devinfo));
+}
+module_init(qcom_cpufreq_driver_init);
+
+MODULE_DESCRIPTION("Qualcomm CPUfreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 21340e0be73e..325be7fe4e8d 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -74,3 +74,10 @@ config ARM_MVEBU_V7_CPUIDLE
depends on ARCH_MVEBU && !ARM64
help
Select this to enable cpuidle on Armada 370, 38x and XP processors.
+
+config ARM_QCOM_CPUIDLE
+ bool "CPU Idle Driver for QCOM processors"
+ depends on ARCH_QCOM
+ select ARM_CPUIDLE
+ help
+ Select this to enable cpuidle on QCOM processors.
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index b4584757dae0..b7336aec3386 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -558,4 +558,23 @@ config DMATEST
config DMA_ENGINE_RAID
bool
+config QCOM_BAM_DMA
+ tristate "QCOM BAM DMA support"
+ depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ ---help---
+ Enable support for the QCOM BAM DMA controller. This controller
+ provides DMA capabilities for a variety of on-chip devices.
+
+config QCOM_ADM
+ tristate "Qualcomm ADM support"
+ depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ ---help---
+ Enable support for the Qualcomm ADM DMA controller. This controller
+ provides DMA capabilities for both general purpose and on-chip
+ peripheral devices.
+
endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 7711a7180726..ea264ee33eff 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
+obj-$(CONFIG_QCOM_ADM) += qcom_adm.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
diff --git a/drivers/dma/qcom_adm.c b/drivers/dma/qcom_adm.c
new file mode 100644
index 000000000000..9c4f8962f0c6
--- /dev/null
+++ b/drivers/dma/qcom_adm.c
@@ -0,0 +1,856 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/* ADM registers - calculated from channel number and security domain */
+#define ADM_CHAN_MULTI 0x4
+#define ADM_CI_MULTI 0x4
+#define ADM_CRCI_MULTI 0x4
+#define ADM_EE_MULTI 0x800
+#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * chan)
+#define ADM_EE_OFFS(ee) (ADM_EE_MULTI * ee)
+#define ADM_CHAN_EE_OFFS(chan, ee) (ADM_CHAN_OFFS(chan) + ADM_EE_OFFS(ee))
+#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * chan)
+#define ADM_CI_OFFS(ci) (ADM_CHAN_OFF(ci))
+#define ADM_CH_CMD_PTR(chan, ee) (ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_RSLT(chan, ee) (0x40 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_FLUSH_STATE0(chan, ee) (0x80 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_STATUS_SD(chan, ee) (0x200 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_CONF(chan) (0x240 + ADM_CHAN_OFFS(chan))
+#define ADM_CH_RSLT_CONF(chan, ee) (0x300 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_SEC_DOMAIN_IRQ_STATUS(ee) (0x380 + ADM_EE_OFFS(ee))
+#define ADM_CI_CONF(ci) (0x390 + ci * ADM_CI_MULTI)
+#define ADM_GP_CTL 0x3d8
+#define ADM_CRCI_CTL(crci, ee) (0x400 + crci * ADM_CRCI_MULTI + \
+ ADM_EE_OFFS(ee))
+
+/* channel status */
+#define ADM_CH_STATUS_VALID BIT(1)
+
+/* channel result */
+#define ADM_CH_RSLT_VALID BIT(31)
+#define ADM_CH_RSLT_ERR BIT(3)
+#define ADM_CH_RSLT_FLUSH BIT(2)
+#define ADM_CH_RSLT_TPD BIT(1)
+
+/* channel conf */
+#define ADM_CH_CONF_SHADOW_EN BIT(12)
+#define ADM_CH_CONF_MPU_DISABLE BIT(11)
+#define ADM_CH_CONF_PERM_MPU_CONF BIT(9)
+#define ADM_CH_CONF_FORCE_RSLT_EN BIT(7)
+#define ADM_CH_CONF_SEC_DOMAIN(ee) (((ee & 0x3) << 4) | ((ee & 0x4) << 11))
+
+/* channel result conf */
+#define ADM_CH_RSLT_CONF_FLUSH_EN BIT(1)
+#define ADM_CH_RSLT_CONF_IRQ_EN BIT(0)
+
+/* CRCI CTL */
+#define ADM_CRCI_CTL_MUX_SEL BIT(18)
+#define ADM_CRCI_CTL_RST BIT(17)
+
+/* CI configuration */
+#define ADM_CI_RANGE_END(x) (x << 24)
+#define ADM_CI_RANGE_START(x) (x << 16)
+#define ADM_CI_BURST_4_WORDS BIT(2)
+#define ADM_CI_BURST_8_WORDS BIT(3)
+
+/* GP CTL */
+#define ADM_GP_CTL_LP_EN BIT(12)
+#define ADM_GP_CTL_LP_CNT(x) (x << 8)
+
+/* Command pointer list entry */
+#define ADM_CPLE_LP BIT(31)
+#define ADM_CPLE_CMD_PTR_LIST BIT(29)
+
+/* Command list entry */
+#define ADM_CMD_LC BIT(31)
+#define ADM_CMD_DST_CRCI(n) (((n) & 0xf) << 7)
+#define ADM_CMD_SRC_CRCI(n) (((n) & 0xf) << 3)
+
+#define ADM_CMD_TYPE_SINGLE 0x0
+#define ADM_CMD_TYPE_BOX 0x3
+
+#define ADM_CRCI_MUX_SEL BIT(4)
+#define ADM_DESC_ALIGN 8
+#define ADM_MAX_XFER (SZ_64K-1)
+#define ADM_MAX_ROWS (SZ_64K-1)
+#define ADM_MAX_CHANNELS 16
+
+struct adm_desc_hw_box {
+ u32 cmd;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 row_len;
+ u32 num_rows;
+ u32 row_offset;
+};
+
+struct adm_desc_hw_single {
+ u32 cmd;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 len;
+};
+
+struct adm_async_desc {
+ struct virt_dma_desc vd;
+ struct adm_device *adev;
+
+ size_t length;
+ enum dma_transfer_direction dir;
+ dma_addr_t dma_addr;
+ size_t dma_len;
+
+ void *cpl;
+ dma_addr_t cp_addr;
+ u32 crci;
+ u32 mux;
+ u32 blk_size;
+};
+
+struct adm_chan {
+ struct virt_dma_chan vc;
+ struct adm_device *adev;
+
+ /* parsed from DT */
+ u32 id; /* channel id */
+
+ struct adm_async_desc *curr_txd;
+ struct dma_slave_config slave;
+ struct list_head node;
+
+ int error;
+ int initialized;
+};
+
+static inline struct adm_chan *to_adm_chan(struct dma_chan *common)
+{
+ return container_of(common, struct adm_chan, vc.chan);
+}
+
+struct adm_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct dma_device common;
+ struct device_dma_parameters dma_parms;
+ struct adm_chan *channels;
+
+ u32 ee;
+
+ struct clk *core_clk;
+ struct clk *iface_clk;
+
+ int irq;
+};
+
+/**
+ * adm_free_chan - Frees dma resources associated with the specific channel
+ *
+ * Free all allocated descriptors associated with this channel
+ *
+ */
+static void adm_free_chan(struct dma_chan *chan)
+{
+ /* free all queued descriptors */
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+/**
+ * adm_get_blksize - Get block size from burst value
+ *
+ */
+static int adm_get_blksize(unsigned int burst)
+{
+ int ret;
+
+ switch (burst) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ ret = ffs(burst>>4) - 1;
+ break;
+ case 192:
+ ret = 4;
+ break;
+ case 256:
+ ret = 5;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * adm_process_fc_descriptors - Process descriptors for flow controlled xfers
+ *
+ * @achan: ADM channel
+ * @desc: Descriptor memory pointer
+ * @sg: Scatterlist entry
+ * @crci: CRCI value
+ * @burst: Burst size of transaction
+ * @direction: DMA transfer direction
+ */
+static void *adm_process_fc_descriptors(struct adm_chan *achan,
+ void *desc, struct scatterlist *sg, u32 crci, u32 burst,
+ enum dma_transfer_direction direction)
+{
+ struct adm_desc_hw_box *box_desc = NULL;
+ struct adm_desc_hw_single *single_desc;
+ u32 remainder = sg_dma_len(sg);
+ u32 rows, row_offset, crci_cmd;
+ u32 mem_addr = sg_dma_address(sg);
+ u32 *incr_addr = &mem_addr;
+ u32 *src, *dst;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ crci_cmd = ADM_CMD_SRC_CRCI(crci);
+ row_offset = burst;
+ src = &achan->slave.src_addr;
+ dst = &mem_addr;
+ } else {
+ crci_cmd = ADM_CMD_DST_CRCI(crci);
+ row_offset = burst << 16;
+ src = &mem_addr;
+ dst = &achan->slave.dst_addr;
+ }
+
+ while (remainder >= burst) {
+ box_desc = desc;
+ box_desc->cmd = ADM_CMD_TYPE_BOX | crci_cmd;
+ box_desc->row_offset = row_offset;
+ box_desc->src_addr = *src;
+ box_desc->dst_addr = *dst;
+
+ rows = remainder / burst;
+ rows = min_t(u32, rows, ADM_MAX_ROWS);
+ box_desc->num_rows = rows << 16 | rows;
+ box_desc->row_len = burst << 16 | burst;
+
+ *incr_addr += burst * rows;
+ remainder -= burst * rows;
+ desc += sizeof(*box_desc);
+ }
+
+ /* if leftover bytes, do one single descriptor */
+ if (remainder) {
+ single_desc = desc;
+ single_desc->cmd = ADM_CMD_TYPE_SINGLE | crci_cmd;
+ single_desc->len = remainder;
+ single_desc->src_addr = *src;
+ single_desc->dst_addr = *dst;
+ desc += sizeof(*single_desc);
+
+ if (sg_is_last(sg))
+ single_desc->cmd |= ADM_CMD_LC;
+ } else {
+ if (box_desc && sg_is_last(sg))
+ box_desc->cmd |= ADM_CMD_LC;
+ }
+
+ return desc;
+}
+
+/**
+ * adm_process_non_fc_descriptors - Process descriptors for non-fc xfers
+ *
+ * @achan: ADM channel
+ * @desc: Descriptor memory pointer
+ * @sg: Scatterlist entry
+ * @direction: DMA transfer direction
+ */
+static void *adm_process_non_fc_descriptors(struct adm_chan *achan,
+ void *desc, struct scatterlist *sg,
+ enum dma_transfer_direction direction)
+{
+ struct adm_desc_hw_single *single_desc;
+ u32 remainder = sg_dma_len(sg);
+ u32 mem_addr = sg_dma_address(sg);
+ u32 *incr_addr = &mem_addr;
+ u32 *src, *dst;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ src = &achan->slave.src_addr;
+ dst = &mem_addr;
+ } else {
+ src = &mem_addr;
+ dst = &achan->slave.dst_addr;
+ }
+
+ do {
+ single_desc = desc;
+ single_desc->cmd = ADM_CMD_TYPE_SINGLE;
+ single_desc->src_addr = *src;
+ single_desc->dst_addr = *dst;
+ single_desc->len = (remainder > ADM_MAX_XFER) ?
+ ADM_MAX_XFER : remainder;
+
+ remainder -= single_desc->len;
+ *incr_addr += single_desc->len;
+ desc += sizeof(*single_desc);
+ } while (remainder);
+
+ /* set last command if this is the end of the whole transaction */
+ if (sg_is_last(sg))
+ single_desc->cmd |= ADM_CMD_LC;
+
+ return desc;
+}
+
+/**
+ * adm_prep_slave_sg - Prep slave sg transaction
+ *
+ * @chan: dma channel
+ * @sgl: scatter gather list
+ * @sg_len: length of sg
+ * @direction: DMA transfer direction
+ * @flags: DMA flags
+ * @context: transfer context (unused)
+ */
+static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct adm_device *adev = achan->adev;
+ struct adm_async_desc *async_desc;
+ struct scatterlist *sg;
+ u32 i, burst;
+ u32 single_count = 0, box_count = 0, crci = 0;
+ void *desc;
+ u32 *cple;
+ int blk_size = 0;
+
+ if (!is_slave_direction(direction)) {
+ dev_err(adev->dev, "invalid dma direction\n");
+ return NULL;
+ }
+
+ /*
+ * get burst value from slave configuration
+ */
+ burst = (direction == DMA_MEM_TO_DEV) ?
+ achan->slave.dst_maxburst :
+ achan->slave.src_maxburst;
+
+ /* if using flow control, validate burst and crci values */
+ if (achan->slave.device_fc) {
+
+ blk_size = adm_get_blksize(burst);
+ if (blk_size < 0) {
+ dev_err(adev->dev, "invalid burst value: %d\n",
+ burst);
+ return ERR_PTR(-EINVAL);
+ }
+
+ crci = achan->slave.slave_id & 0xf;
+ if (!crci || achan->slave.slave_id > 0x1f) {
+ dev_err(adev->dev, "invalid crci value\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* iterate through sgs and compute allocation size of structures */
+ for_each_sg(sgl, sg, sg_len, i) {
+ if (achan->slave.device_fc) {
+ box_count += DIV_ROUND_UP(sg_dma_len(sg) / burst,
+ ADM_MAX_ROWS);
+ if (sg_dma_len(sg) % burst)
+ single_count++;
+ } else {
+ single_count += DIV_ROUND_UP(sg_dma_len(sg),
+ ADM_MAX_XFER);
+ }
+ }
+
+ async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
+ if (!async_desc)
+ return ERR_PTR(-ENOMEM);
+
+ if (crci)
+ async_desc->mux = achan->slave.slave_id & ADM_CRCI_MUX_SEL ?
+ ADM_CRCI_CTL_MUX_SEL : 0;
+ async_desc->crci = crci;
+ async_desc->blk_size = blk_size;
+ async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) +
+ box_count * sizeof(struct adm_desc_hw_box) +
+ sizeof(*cple) + 2 * ADM_DESC_ALIGN;
+
+ async_desc->cpl = dma_alloc_writecombine(adev->dev, async_desc->dma_len,
+ &async_desc->dma_addr, GFP_NOWAIT);
+
+ if (!async_desc->cpl) {
+ kfree(async_desc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ async_desc->adev = adev;
+
+ /* both command list entry and descriptors must be 8 byte aligned */
+ cple = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
+ desc = PTR_ALIGN(cple + 1, ADM_DESC_ALIGN);
+
+ /* init cmd list */
+ *cple = ADM_CPLE_LP;
+ *cple |= (desc - async_desc->cpl + async_desc->dma_addr) >> 3;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ async_desc->length += sg_dma_len(sg);
+
+ if (achan->slave.device_fc)
+ desc = adm_process_fc_descriptors(achan, desc, sg, crci,
+ burst, direction);
+ else
+ desc = adm_process_non_fc_descriptors(achan, desc, sg,
+ direction);
+ }
+
+ return vchan_tx_prep(&achan->vc, &async_desc->vd, flags);
+}
+
+/**
+ * adm_terminate_all - terminate all transactions on a channel
+ * @achan: adm dma channel
+ *
+ * Dequeues and frees all transactions, aborts current transaction
+ * No callbacks are done
+ *
+ */
+static int adm_terminate_all(struct dma_chan *chan)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct adm_device *adev = achan->adev;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+ vchan_get_all_descriptors(&achan->vc, &head);
+
+ /* send flush command to terminate current transaction */
+ writel_relaxed(BIT(31),
+ adev->regs + ADM_CH_FLUSH_STATE0(achan->id, adev->ee));
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&achan->vc, &head);
+
+ return 0;
+}
+
+static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ unsigned long flag;
+
+ spin_lock_irqsave(&achan->vc.lock, flag);
+ memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
+ spin_unlock_irqrestore(&achan->vc.lock, flag);
+
+ return 0;
+}
+
+/**
+ * adm_start_dma - start next transaction
+ * @achan - ADM dma channel
+ */
+static void adm_start_dma(struct adm_chan *achan)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&achan->vc);
+ struct adm_device *adev = achan->adev;
+ struct adm_async_desc *async_desc;
+
+ lockdep_assert_held(&achan->vc.lock);
+
+ if (!vd)
+ return;
+
+ list_del(&vd->node);
+
+ /* write next command list out to the CMD FIFO */
+ async_desc = container_of(vd, struct adm_async_desc, vd);
+ achan->curr_txd = async_desc;
+
+ /* reset channel error */
+ achan->error = 0;
+
+ if (!achan->initialized) {
+ /* enable interrupts */
+ writel(ADM_CH_CONF_SHADOW_EN |
+ ADM_CH_CONF_PERM_MPU_CONF |
+ ADM_CH_CONF_MPU_DISABLE |
+ ADM_CH_CONF_SEC_DOMAIN(adev->ee),
+ adev->regs + ADM_CH_CONF(achan->id));
+
+ writel(ADM_CH_RSLT_CONF_IRQ_EN | ADM_CH_RSLT_CONF_FLUSH_EN,
+ adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
+
+ achan->initialized = 1;
+ }
+
+ /* set the crci block size if this transaction requires CRCI */
+ if (async_desc->crci) {
+ writel(async_desc->mux | async_desc->blk_size,
+ adev->regs + ADM_CRCI_CTL(async_desc->crci, adev->ee));
+ }
+
+ /* make sure IRQ enable doesn't get reordered */
+ wmb();
+
+ /* write next command list out to the CMD FIFO */
+ writel(ALIGN(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3,
+ adev->regs + ADM_CH_CMD_PTR(achan->id, adev->ee));
+}
+
+/**
+ * adm_dma_irq - irq handler for ADM controller
+ * @irq: IRQ of interrupt
+ * @data: callback data
+ *
+ * IRQ handler for the bam controller
+ */
+static irqreturn_t adm_dma_irq(int irq, void *data)
+{
+ struct adm_device *adev = data;
+ u32 srcs, i;
+ struct adm_async_desc *async_desc;
+ unsigned long flags;
+
+ srcs = readl_relaxed(adev->regs +
+ ADM_SEC_DOMAIN_IRQ_STATUS(adev->ee));
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++) {
+ struct adm_chan *achan = &adev->channels[i];
+ u32 status, result;
+
+ if (srcs & BIT(i)) {
+ status = readl_relaxed(adev->regs +
+ ADM_CH_STATUS_SD(i, adev->ee));
+
+ /* if no result present, skip */
+ if (!(status & ADM_CH_STATUS_VALID))
+ continue;
+
+ result = readl_relaxed(adev->regs +
+ ADM_CH_RSLT(i, adev->ee));
+
+ /* no valid results, skip */
+ if (!(result & ADM_CH_RSLT_VALID))
+ continue;
+
+ /* flag error if transaction was flushed or failed */
+ if (result & (ADM_CH_RSLT_ERR | ADM_CH_RSLT_FLUSH))
+ achan->error = 1;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+ async_desc = achan->curr_txd;
+
+ achan->curr_txd = NULL;
+
+ if (async_desc)
+ vchan_cookie_complete(&async_desc->vd);
+
+ /* kick off next DMA */
+ adm_start_dma(achan);
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * adm_tx_status - returns status of transaction
+ * @chan: dma channel
+ * @cookie: transaction cookie
+ * @txstate: DMA transaction state
+ *
+ * Return status of dma transaction
+ */
+static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+ size_t residue = 0;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+
+ vd = vchan_find_desc(&achan->vc, cookie);
+ if (vd)
+ residue = container_of(vd, struct adm_async_desc, vd)->length;
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+
+ /*
+ * residue is either the full length if it is in the issued list, or 0
+ * if it is in progress. We have no reliable way of determining
+ * anything inbetween
+ */
+ dma_set_residue(txstate, residue);
+
+ if (achan->error)
+ return DMA_ERROR;
+
+ return ret;
+}
+
+/**
+ * adm_issue_pending - starts pending transactions
+ * @chan: dma channel
+ *
+ * Issues all pending transactions and starts DMA
+ */
+static void adm_issue_pending(struct dma_chan *chan)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+
+ if (vchan_issue_pending(&achan->vc) && !achan->curr_txd)
+ adm_start_dma(achan);
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+}
+
+/**
+ * adm_dma_free_desc - free descriptor memory
+ * @vd: virtual descriptor
+ *
+ */
+static void adm_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct adm_async_desc *async_desc = container_of(vd,
+ struct adm_async_desc, vd);
+
+ dma_free_writecombine(async_desc->adev->dev, async_desc->dma_len,
+ async_desc->cpl, async_desc->dma_addr);
+ kfree(async_desc);
+}
+
+static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
+ u32 index)
+{
+ achan->id = index;
+ achan->adev = adev;
+
+ vchan_init(&achan->vc, &adev->common);
+ achan->vc.desc_free = adm_dma_free_desc;
+}
+
+static int adm_dma_probe(struct platform_device *pdev)
+{
+ struct adm_device *adev;
+ struct resource *iores;
+ int ret;
+ u32 i;
+
+ adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->dev = &pdev->dev;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adev->regs = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(adev->regs))
+ return PTR_ERR(adev->regs);
+
+ adev->irq = platform_get_irq(pdev, 0);
+ if (adev->irq < 0)
+ return adev->irq;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee);
+ if (ret) {
+ dev_err(adev->dev, "Execution environment unspecified\n");
+ return ret;
+ }
+
+ adev->core_clk = devm_clk_get(adev->dev, "core");
+ if (IS_ERR(adev->core_clk))
+ return PTR_ERR(adev->core_clk);
+
+ ret = clk_prepare_enable(adev->core_clk);
+ if (ret) {
+ dev_err(adev->dev, "failed to prepare/enable core clock\n");
+ return ret;
+ }
+
+ adev->iface_clk = devm_clk_get(adev->dev, "iface");
+ if (IS_ERR(adev->iface_clk)) {
+ ret = PTR_ERR(adev->iface_clk);
+ goto err_disable_core_clk;
+ }
+
+ ret = clk_prepare_enable(adev->iface_clk);
+ if (ret) {
+ dev_err(adev->dev, "failed to prepare/enable iface clock\n");
+ goto err_disable_core_clk;
+ }
+
+ adev->channels = devm_kcalloc(adev->dev, ADM_MAX_CHANNELS,
+ sizeof(*adev->channels), GFP_KERNEL);
+
+ if (!adev->channels) {
+ ret = -ENOMEM;
+ goto err_disable_clks;
+ }
+
+ /* allocate and initialize channels */
+ INIT_LIST_HEAD(&adev->common.channels);
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++)
+ adm_channel_init(adev, &adev->channels[i], i);
+
+ /* reset CRCIs */
+ for (i = 0; i < 16; i++)
+ writel(ADM_CRCI_CTL_RST, adev->regs +
+ ADM_CRCI_CTL(i, adev->ee));
+
+ /* configure client interfaces */
+ writel(ADM_CI_RANGE_START(0x40) | ADM_CI_RANGE_END(0xb0) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(0));
+ writel(ADM_CI_RANGE_START(0x2a) | ADM_CI_RANGE_END(0x2c) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(1));
+ writel(ADM_CI_RANGE_START(0x12) | ADM_CI_RANGE_END(0x28) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(2));
+ writel(ADM_GP_CTL_LP_EN | ADM_GP_CTL_LP_CNT(0xf),
+ adev->regs + ADM_GP_CTL);
+
+ ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq,
+ 0, "adm_dma", adev);
+ if (ret)
+ goto err_disable_clks;
+
+ platform_set_drvdata(pdev, adev);
+
+ adev->common.dev = adev->dev;
+ adev->common.dev->dma_parms = &adev->dma_parms;
+
+ /* set capabilities */
+ dma_cap_zero(adev->common.cap_mask);
+ dma_cap_set(DMA_SLAVE, adev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, adev->common.cap_mask);
+
+ /* initialize dmaengine apis */
+ adev->common.directions = BIT(DMA_DEV_TO_MEM | DMA_MEM_TO_DEV);
+ adev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ adev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ adev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ adev->common.device_free_chan_resources = adm_free_chan;
+ adev->common.device_prep_slave_sg = adm_prep_slave_sg;
+ adev->common.device_issue_pending = adm_issue_pending;
+ adev->common.device_tx_status = adm_tx_status;
+ adev->common.device_terminate_all = adm_terminate_all;
+ adev->common.device_config = adm_slave_config;
+
+ ret = dma_async_device_register(&adev->common);
+ if (ret) {
+ dev_err(adev->dev, "failed to register dma async device\n");
+ goto err_disable_clks;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ of_dma_xlate_by_chan_id,
+ &adev->common);
+ if (ret)
+ goto err_unregister_dma;
+
+ return 0;
+
+err_unregister_dma:
+ dma_async_device_unregister(&adev->common);
+err_disable_clks:
+ clk_disable_unprepare(adev->iface_clk);
+err_disable_core_clk:
+ clk_disable_unprepare(adev->core_clk);
+
+ return ret;
+}
+
+static int adm_dma_remove(struct platform_device *pdev)
+{
+ struct adm_device *adev = platform_get_drvdata(pdev);
+ struct adm_chan *achan;
+ u32 i;
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&adev->common);
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++) {
+ achan = &adev->channels[i];
+
+ /* mask IRQs for this channel/EE pair */
+ writel(0, adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
+
+ adm_terminate_all(&adev->channels[i].vc.chan);
+ }
+
+ devm_free_irq(adev->dev, adev->irq, adev);
+
+ clk_disable_unprepare(adev->core_clk);
+ clk_disable_unprepare(adev->iface_clk);
+
+ return 0;
+}
+
+static const struct of_device_id adm_of_match[] = {
+ { .compatible = "qcom,adm", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adm_of_match);
+
+static struct platform_driver adm_dma_driver = {
+ .probe = adm_dma_probe,
+ .remove = adm_dma_remove,
+ .driver = {
+ .name = "adm-dma-engine",
+ .of_match_table = adm_of_match,
+ },
+};
+
+module_platform_driver(adm_dma_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM ADM DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index b8a521741418..3a0455496efe 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -14,8 +14,11 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
+ifdef CONFIG_64BIT
obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
+else
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
+endif
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
obj-y += broadcom/
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 29e6850665eb..0a7bcfa44409 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
+>>>>>>> firmware: qcom: scm: Split out 32-bit specific SCM code
* Copyright (C) 2015 Linaro Ltd.
*
* This program is free software; you can redistribute it and/or modify
@@ -23,11 +24,19 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/qcom_scm.h>
+#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include "qcom_scm.h"
+#define QCOM_SCM_ENOMEM -5
+#define QCOM_SCM_EOPNOTSUPP -4
+#define QCOM_SCM_EINVAL_ADDR -3
+#define QCOM_SCM_EINVAL_ARG -2
+#define QCOM_SCM_ERROR -1
+#define QCOM_SCM_INTERRUPTED 1
+
#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
@@ -38,6 +47,15 @@
#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
+#define IOMMU_SECURE_PTBL_SIZE 3
+#define IOMMU_SECURE_PTBL_INIT 4
+#define IOMMU_SET_CP_POOL_SIZE 5
+#define IOMMU_SECURE_MAP 6
+#define IOMMU_SECURE_UNMAP 7
+#define IOMMU_SECURE_MAP2 0xb
+#define IOMMU_SECURE_MAP2_FLAT 0x12
+#define IOMMU_SECURE_UNMAP2 0xc
+
struct qcom_scm_entry {
int flag;
void *entry;
@@ -168,23 +186,6 @@ static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response
return (void *)rsp + le32_to_cpu(rsp->buf_offset);
}
-static int qcom_scm_remap_error(int err)
-{
- pr_err("qcom_scm_call failed with error code %d\n", err);
- switch (err) {
- case QCOM_SCM_ERROR:
- return -EIO;
- case QCOM_SCM_EINVAL_ADDR:
- case QCOM_SCM_EINVAL_ARG:
- return -EINVAL;
- case QCOM_SCM_EOPNOTSUPP:
- return -EOPNOTSUPP;
- case QCOM_SCM_ENOMEM:
- return -ENOMEM;
- }
- return -EINVAL;
-}
-
static u32 smc(u32 cmd_addr)
{
int context_id;
@@ -499,3 +500,386 @@ int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
return qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
req, req_cnt * sizeof(*req), resp, sizeof(*resp));
}
+
+bool __qcom_scm_pas_supported(u32 peripheral)
+{
+ u32 ret_val;
+ int ret;
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+ &peripheral, sizeof(peripheral),
+ &ret_val, sizeof(ret_val));
+
+ return ret ? false : !!ret_val;
+}
+
+int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, const void *metadata, size_t size)
+{
+ dma_addr_t mdata_phys;
+ void *mdata_buf;
+ u32 scm_ret;
+ int ret;
+ struct pas_init_image_req {
+ u32 proc;
+ u32 image_addr;
+ } request;
+
+ /*
+ * During the scm call memory protection will be enabled for the meta
+ * data blob, so make sure it's physically contiguous, 4K aligned and
+ * non-cachable to avoid XPU violations.
+ */
+ mdata_buf = dma_alloc_coherent(dev, size, &mdata_phys, GFP_KERNEL);
+ if (!mdata_buf) {
+ pr_err("Allocation of metadata buffer failed.\n");
+ return -ENOMEM;
+ }
+ memcpy(mdata_buf, metadata, size);
+
+ request.proc = peripheral;
+ request.image_addr = mdata_phys;
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
+ &request, sizeof(request),
+ &scm_ret, sizeof(scm_ret));
+
+ dma_free_coherent(dev, size, mdata_buf, mdata_phys);
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+ u32 scm_ret;
+ int ret;
+ struct pas_init_image_req {
+ u32 proc;
+ u32 addr;
+ u32 len;
+ } request;
+
+ request.proc = peripheral;
+ request.addr = addr;
+ request.len = size;
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
+ &request, sizeof(request),
+ &scm_ret, sizeof(scm_ret));
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+ u32 scm_ret;
+ int ret;
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+ &peripheral, sizeof(peripheral),
+ &scm_ret, sizeof(scm_ret));
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_shutdown(u32 peripheral)
+{
+ u32 scm_ret;
+ int ret;
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
+ &peripheral, sizeof(peripheral),
+ &scm_ret, sizeof(scm_ret));
+
+ return ret ? : scm_ret;
+}
+
+
+int __qcom_scm_pil_init_image_cmd(u32 proc, u64 image_addr)
+{
+ int ret;
+ u32 scm_ret = 0;
+ struct {
+ u32 proc;
+ u32 image_addr;
+ } req;
+
+ req.proc = proc;
+ req.image_addr = image_addr;
+
+ ret = qcom_scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+int __qcom_scm_pil_mem_setup_cmd(u32 proc, u64 start_addr, u32 len)
+{
+ u32 scm_ret = 0;
+ int ret;
+ struct {
+ u32 proc;
+ u32 start_addr;
+ u32 len;
+ } req;
+
+ req.proc = proc;
+ req.start_addr = start_addr;
+ req.len = len;
+
+ ret = qcom_scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+int __qcom_scm_pil_auth_and_reset_cmd(u32 proc)
+{
+ u32 scm_ret = 0;
+ int ret;
+ u32 req;
+
+ req = proc;
+
+ ret = qcom_scm_call(SCM_SVC_PIL, PAS_AUTH_AND_RESET_CMD, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+int __qcom_scm_pil_shutdown_cmd(u32 proc)
+{
+ u32 scm_ret = 0;
+ int ret;
+ u32 req;
+
+ req = proc;
+
+ ret = qcom_scm_call(SCM_SVC_PIL, PAS_SHUTDOWN_CMD, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+#define SCM_SVC_UTIL 0x3
+#define SCM_SVC_MP 0xc
+#define IOMMU_DUMP_SMMU_FAULT_REGS 0x0c
+
+int __qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr, u32 len)
+{
+ struct {
+ u32 id;
+ u32 cb_num;
+ u32 buff;
+ u32 len;
+ } req;
+ int resp = 0;
+
+ return qcom_scm_call(SCM_SVC_UTIL, IOMMU_DUMP_SMMU_FAULT_REGS,
+ &req, sizeof(req), &resp, 1);
+}
+
+int __qcom_scm_iommu_set_cp_pool_size(u32 size, u32 spare)
+{
+ struct {
+ u32 size;
+ u32 spare;
+ } req;
+ int retval;
+
+ req.size = size;
+ req.spare = spare;
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SET_CP_POOL_SIZE,
+ &req, sizeof(req), &retval, sizeof(retval));
+}
+
+int __qcom_scm_iommu_secure_ptbl_size(u32 spare, int psize[2])
+{
+ struct {
+ u32 spare;
+ } req;
+
+ req.spare = spare;
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &req,
+ sizeof(req), psize, sizeof(psize));
+}
+
+int __qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+{
+ struct {
+ u32 addr;
+ u32 size;
+ u32 spare;
+ } req = {0};
+ int ret, ptbl_ret = 0;
+
+ req.addr = addr;
+ req.size = size;
+ req.spare = spare;
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &req,
+ sizeof(req), &ptbl_ret, sizeof(ptbl_ret));
+
+ if (ret)
+ return ret;
+
+ if (ptbl_ret)
+ return ptbl_ret;
+
+ return 0;
+}
+
+int __qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size,
+ u32 id, u32 ctx_id, u64 va, u32 info_size,
+ u32 flags)
+{
+ struct {
+ struct {
+ unsigned int list;
+ unsigned int list_size;
+ unsigned int size;
+ } plist;
+ struct {
+ unsigned int id;
+ unsigned int ctx_id;
+ unsigned int va;
+ unsigned int size;
+ } info;
+ unsigned int flags;
+ } req;
+ u32 resp;
+ int ret;
+
+ req.plist.list = list;
+ req.plist.list_size = list_size;
+ req.plist.size = size;
+ req.info.id = id;
+ req.info.ctx_id = ctx_id;
+ req.info.va = va;
+ req.info.size = info_size;
+ req.flags = flags;
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2, &req, sizeof(req),
+ &resp, sizeof(resp));
+
+ if (ret || resp)
+ return -EINVAL;
+
+ return 0;
+}
+
+int __qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va,
+ u32 size, u32 flags)
+{
+ struct {
+ struct {
+ unsigned int id;
+ unsigned int ctx_id;
+ unsigned int va;
+ unsigned int size;
+ } info;
+ unsigned int flags;
+ } req;
+ int ret, scm_ret;
+
+ req.info.id = id;
+ req.info.ctx_id = ctx_id;
+ req.info.va = va;
+ req.info.size = size;
+ req.flags = flags;
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_UNMAP2, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+}
+
+int __qcom_scm_get_feat_version(u32 feat)
+{
+ int ret;
+
+ if (__qcom_scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) {
+ u32 version;
+
+ if (!qcom_scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &feat,
+ sizeof(feat), &version, sizeof(version)))
+ return version;
+ }
+
+ return 0;
+}
+
+#define RESTORE_SEC_CFG 2
+int __qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+{
+ struct {
+ u32 device_id;
+ u32 spare;
+ } req;
+ int ret, scm_ret = 0;
+
+ req.device_id = device_id;
+ req.spare = spare;
+
+ ret = qcom_scm_call(SCM_SVC_MP, RESTORE_SEC_CFG, &req, sizeof(req),
+ scm_ret, sizeof(scm_ret));
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define TZBSP_VIDEO_SET_STATE 0xa
+int __qcom_scm_set_video_state(u32 state, u32 spare)
+{
+ struct {
+ u32 state;
+ u32 spare;
+ } req;
+ int scm_ret = 0;
+ int ret;
+
+ req.state = state;
+ req.spare = spare;
+
+ ret = qcom_scm_call(SCM_SVC_BOOT, TZBSP_VIDEO_SET_STATE, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define TZBSP_MEM_PROTECT_VIDEO_VAR 0x8
+
+int __qcom_scm_mem_protect_video_var(u32 start, u32 size, u32 nonpixel_start,
+ u32 nonpixel_size)
+{
+ struct {
+ u32 cp_start;
+ u32 cp_size;
+ u32 cp_nonpixel_start;
+ u32 cp_nonpixel_size;
+ } req;
+ int ret, scm_ret;
+
+ req.cp_start = start;
+ req.cp_size = size;
+ req.cp_nonpixel_start = nonpixel_start;
+ req.cp_nonpixel_size = nonpixel_size;
+
+ ret = qcom_scm_call(SCM_SVC_MP, TZBSP_MEM_PROTECT_VIDEO_VAR, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index bb6555f6d63b..b3a9784c15ef 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -8,56 +8,869 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
*/
-#include <linux/io.h>
-#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
#include <linux/qcom_scm.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/cacheflush.h>
+#include <asm/compiler.h>
+#include <asm/smp_plat.h>
+
+#include "qcom_scm.h"
+
+#define QCOM_SCM_SIP_FNID(s, c) (((((s) & 0xFF) << 8) | ((c) & 0xFF)) | 0x02000000)
+
+#define MAX_QCOM_SCM_ARGS 10
+#define MAX_QCOM_SCM_RETS 3
+
+enum qcom_scm_arg_types {
+ QCOM_SCM_VAL,
+ QCOM_SCM_RO,
+ QCOM_SCM_RW,
+ QCOM_SCM_BUFVAL,
+};
+
+#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
+ (((a) & 0xff) << 4) | \
+ (((b) & 0xff) << 6) | \
+ (((c) & 0xff) << 8) | \
+ (((d) & 0xff) << 10) | \
+ (((e) & 0xff) << 12) | \
+ (((f) & 0xff) << 14) | \
+ (((g) & 0xff) << 16) | \
+ (((h) & 0xff) << 18) | \
+ (((i) & 0xff) << 20) | \
+ (((j) & 0xff) << 22) | \
+ (num & 0xffff))
+
+#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
/**
- * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the cold boot address of the cpus. Any cpu outside the supported
- * range would be removed from the cpu present mask.
+ * struct qcom_scm_desc
+ * @arginfo: Metadata describing the arguments in args[]
+ * @args: The array of arguments for the secure syscall
+ * @ret: The values returned by the secure syscall
+ * @extra_arg_buf: The buffer containing extra arguments
+ (that don't fit in available registers)
+ * @x5: The 4rd argument to the secure syscall or physical address of
+ extra_arg_buf
*/
-int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+struct qcom_scm_desc {
+ u32 arginfo;
+ u64 args[MAX_QCOM_SCM_ARGS];
+ u64 ret[MAX_QCOM_SCM_RETS];
+
+ /* private */
+ void *extra_arg_buf;
+ u64 x5;
+};
+
+
+#define QCOM_SCM_ENOMEM -5
+#define QCOM_SCM_EOPNOTSUPP -4
+#define QCOM_SCM_EINVAL_ADDR -3
+#define QCOM_SCM_EINVAL_ARG -2
+#define QCOM_SCM_ERROR -1
+#define QCOM_SCM_INTERRUPTED 1
+#define QCOM_SCM_EBUSY -55
+#define QCOM_SCM_V2_EBUSY -12
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+#define QCOM_SCM_EBUSY_WAIT_MS 30
+#define QCOM_SCM_EBUSY_MAX_RETRY 20
+
+#define N_EXT_QCOM_SCM_ARGS 7
+#define FIRST_EXT_ARG_IDX 3
+#define SMC_ATOMIC_SYSCALL 31
+#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
+#define SMC64_MASK 0x40000000
+#define SMC_ATOMIC_MASK 0x80000000
+#define IS_CALL_AVAIL_CMD 1
+
+#define R0_STR "x0"
+#define R1_STR "x1"
+#define R2_STR "x2"
+#define R3_STR "x3"
+#define R4_STR "x4"
+#define R5_STR "x5"
+#define R6_STR "x6"
+
+
+int __qcom_scm_call_armv8_64(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5,
+ u64 *ret1, u64 *ret2, u64 *ret3)
+{
+ register u64 r0 asm("r0") = x0;
+ register u64 r1 asm("r1") = x1;
+ register u64 r2 asm("r2") = x2;
+ register u64 r3 asm("r3") = x3;
+ register u64 r4 asm("r4") = x4;
+ register u64 r5 asm("r5") = x5;
+ register u64 r6 asm("r6") = 0;
+
+ do {
+ asm volatile(
+ __asmeq("%0", R0_STR)
+ __asmeq("%1", R1_STR)
+ __asmeq("%2", R2_STR)
+ __asmeq("%3", R3_STR)
+ __asmeq("%4", R0_STR)
+ __asmeq("%5", R1_STR)
+ __asmeq("%6", R2_STR)
+ __asmeq("%7", R3_STR)
+ __asmeq("%8", R4_STR)
+ __asmeq("%9", R5_STR)
+ __asmeq("%10", R6_STR)
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0\n"
+ : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+ : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
+ "r" (r5), "r" (r6)
+ : "x7", "x8", "x9", "x10", "x11", "x12", "x13",
+ "x14", "x15", "x16", "x17");
+ } while (r0 == QCOM_SCM_INTERRUPTED);
+
+ if (ret1)
+ *ret1 = r1;
+ if (ret2)
+ *ret2 = r2;
+ if (ret3)
+ *ret3 = r3;
+
+ return r0;
+}
+
+int __qcom_scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
+ u64 *ret1, u64 *ret2, u64 *ret3)
{
- return -ENOTSUPP;
+ register u32 r0 asm("r0") = w0;
+ register u32 r1 asm("r1") = w1;
+ register u32 r2 asm("r2") = w2;
+ register u32 r3 asm("r3") = w3;
+ register u32 r4 asm("r4") = w4;
+ register u32 r5 asm("r5") = w5;
+ register u32 r6 asm("r6") = 0;
+
+ do {
+ asm volatile(
+ __asmeq("%0", R0_STR)
+ __asmeq("%1", R1_STR)
+ __asmeq("%2", R2_STR)
+ __asmeq("%3", R3_STR)
+ __asmeq("%4", R0_STR)
+ __asmeq("%5", R1_STR)
+ __asmeq("%6", R2_STR)
+ __asmeq("%7", R3_STR)
+ __asmeq("%8", R4_STR)
+ __asmeq("%9", R5_STR)
+ __asmeq("%10", R6_STR)
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0\n"
+ : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+ : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
+ "r" (r5), "r" (r6)
+ : "x7", "x8", "x9", "x10", "x11", "x12", "x13",
+ "x14", "x15", "x16", "x17");
+
+ } while (r0 == QCOM_SCM_INTERRUPTED);
+
+ if (ret1)
+ *ret1 = r1;
+ if (ret2)
+ *ret2 = r2;
+ if (ret3)
+ *ret3 = r3;
+
+ return r0;
+}
+
+struct qcom_scm_extra_arg {
+ union {
+ u32 args32[N_EXT_QCOM_SCM_ARGS];
+ u64 args64[N_EXT_QCOM_SCM_ARGS];
+ };
+};
+
+static enum qcom_scm_interface_version {
+ QCOM_SCM_UNKNOWN,
+ QCOM_SCM_LEGACY,
+ QCOM_SCM_ARMV8_32,
+ QCOM_SCM_ARMV8_64,
+} qcom_scm_version = QCOM_SCM_UNKNOWN;
+
+/* This will be set to specify SMC32 or SMC64 */
+static u32 qcom_scm_version_mask;
+
+/*
+ * If there are more than N_REGISTER_ARGS, allocate a buffer and place
+ * the additional arguments in it. The extra argument buffer will be
+ * pointed to by X5.
+ */
+static int allocate_extra_arg_buffer(struct qcom_scm_desc *desc, gfp_t flags)
+{
+ int i, j;
+ struct qcom_scm_extra_arg *argbuf;
+ int arglen = desc->arginfo & 0xf;
+ size_t argbuflen = PAGE_ALIGN(sizeof(struct qcom_scm_extra_arg));
+
+ desc->x5 = desc->args[FIRST_EXT_ARG_IDX];
+
+ if (likely(arglen <= N_REGISTER_ARGS)) {
+ desc->extra_arg_buf = NULL;
+ return 0;
+ }
+
+ argbuf = kzalloc(argbuflen, flags);
+ if (!argbuf) {
+ pr_err("qcom_scm_call: failed to alloc mem for extended argument buffer\n");
+ return -ENOMEM;
+ }
+
+ desc->extra_arg_buf = argbuf;
+
+ j = FIRST_EXT_ARG_IDX;
+ if (qcom_scm_version == QCOM_SCM_ARMV8_64)
+ for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
+ argbuf->args64[i] = desc->args[j++];
+ else
+ for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
+ argbuf->args32[i] = desc->args[j++];
+ desc->x5 = virt_to_phys(argbuf);
+ __flush_dcache_area(argbuf, argbuflen);
+
+ return 0;
}
/**
- * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @fn_id: The function ID for this syscall
+ * @desc: Descriptor structure containing arguments and return values
*
- * Set the Linux entry point for the SCM to transfer control to when coming
- * out of a power down. CPU power down may be executed on cpuidle or hotplug.
- */
-int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking qcom_scm_call and invalidated in the cache
+ * immediately after qcom_scm_call returns. An important point that must be noted
+ * is that on ARMV8 architectures, invalidation actually also causes a dirty
+ * cache line to be cleaned (flushed + unset-dirty-bit). Therefore it is of
+ * paramount importance that the buffer be flushed before invoking qcom_scm_call,
+ * even if you don't care about the contents of that buffer.
+ *
+ * Note that cache maintenance on the argument buffer (desc->args) is taken care
+ * of by qcom_scm_call; however, callers are responsible for any other cached
+ * buffers passed over to the secure world.
+*/
+static int qcom_scm_call(u32 svc_id, u32 cmd_id, struct qcom_scm_desc *desc)
{
- return -ENOTSUPP;
+ int arglen = desc->arginfo & 0xf;
+ int ret, retry_count = 0;
+ u32 fn_id = QCOM_SCM_SIP_FNID(svc_id, cmd_id);
+ u64 x0;
+
+ ret = allocate_extra_arg_buffer(desc, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ x0 = fn_id | qcom_scm_version_mask;
+
+ do {
+ mutex_lock(&qcom_scm_lock);
+
+ desc->ret[0] = desc->ret[1] = desc->ret[2] = 0;
+
+ pr_debug("qcom_scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5);
+
+ if (qcom_scm_version == QCOM_SCM_ARMV8_64)
+ ret = __qcom_scm_call_armv8_64(x0, desc->arginfo,
+ desc->args[0], desc->args[1],
+ desc->args[2], desc->x5,
+ &desc->ret[0], &desc->ret[1],
+ &desc->ret[2]);
+ else
+ ret = __qcom_scm_call_armv8_32(x0, desc->arginfo,
+ desc->args[0], desc->args[1],
+ desc->args[2], desc->x5,
+ &desc->ret[0], &desc->ret[1],
+ &desc->ret[2]);
+ mutex_unlock(&qcom_scm_lock);
+
+ if (ret == QCOM_SCM_V2_EBUSY)
+ msleep(QCOM_SCM_EBUSY_WAIT_MS);
+ } while (ret == QCOM_SCM_V2_EBUSY && (retry_count++ < QCOM_SCM_EBUSY_MAX_RETRY));
+
+ if (ret < 0)
+ pr_err("qcom_scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5, ret, desc->ret[0],
+ desc->ret[1], desc->ret[2]);
+
+ if (arglen > N_REGISTER_ARGS)
+ kfree(desc->extra_arg_buf);
+ if (ret < 0)
+ return qcom_scm_remap_error(ret);
+ return 0;
}
/**
- * qcom_scm_cpu_power_down() - Power down the cpu
- * @flags - Flags to flush cache
+ * qcom_scm_call_atomic() - Invoke a syscall in the secure world
*
- * This is an end point to power down cpu. If there was a pending interrupt,
- * the control would return from this function, otherwise, the cpu jumps to the
- * warm boot entry point set for this cpu upon reset.
+ * Similar to qcom_scm_call except that this can be invoked in atomic context.
+ * There is also no retry mechanism implemented. Please ensure that the
+ * secure world syscall can be executed in such a context and can complete
+ * in a timely manner.
*/
+static int qcom_scm_call_atomic(u32 s, u32 c, struct qcom_scm_desc *desc)
+{
+ int arglen = desc->arginfo & 0xf;
+ int ret;
+ u32 fn_id = QCOM_SCM_SIP_FNID(s, c);
+ u64 x0;
+
+ ret = allocate_extra_arg_buffer(desc, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ x0 = fn_id | BIT(SMC_ATOMIC_SYSCALL) | qcom_scm_version_mask;
+
+ pr_debug("qcom_scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5);
+
+ if (qcom_scm_version == QCOM_SCM_ARMV8_64)
+ ret = __qcom_scm_call_armv8_64(x0, desc->arginfo, desc->args[0],
+ desc->args[1], desc->args[2],
+ desc->x5, &desc->ret[0],
+ &desc->ret[1], &desc->ret[2]);
+ else
+ ret = __qcom_scm_call_armv8_32(x0, desc->arginfo, desc->args[0],
+ desc->args[1], desc->args[2],
+ desc->x5, &desc->ret[0],
+ &desc->ret[1], &desc->ret[2]);
+ if (ret < 0)
+ pr_err("qcom_scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5, ret, desc->ret[0],
+ desc->ret[1], desc->ret[2]);
+
+ if (arglen > N_REGISTER_ARGS)
+ kfree(desc->extra_arg_buf);
+ if (ret < 0)
+ return qcom_scm_remap_error(ret);
+ return ret;
+}
+
+static int qcom_scm_set_boot_addr(void *entry, const cpumask_t *cpus, int flags)
+{
+ struct qcom_scm_desc desc = {0};
+ unsigned int cpu = cpumask_first(cpus);
+ u64 mpidr_el1 = cpu_logical_map(cpu);
+
+ /* For now we assume only a single cpu is set in the mask */
+ WARN_ON(cpumask_weight(cpus) != 1);
+
+ if (mpidr_el1 & ~MPIDR_HWID_BITMASK) {
+ pr_err("CPU%d:Failed to set boot address\n", cpu);
+ return -ENOSYS;
+ }
+
+ desc.args[0] = virt_to_phys(entry);
+ desc.args[1] = BIT(MPIDR_AFFINITY_LEVEL(mpidr_el1, 0));
+ desc.args[2] = BIT(MPIDR_AFFINITY_LEVEL(mpidr_el1, 1));
+ desc.args[3] = BIT(MPIDR_AFFINITY_LEVEL(mpidr_el1, 2));
+ desc.args[4] = ~0ULL;
+ desc.args[5] = QCOM_SCM_FLAG_HLOS | flags;
+ desc.arginfo = QCOM_SCM_ARGS(6);
+
+ return qcom_scm_call(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR_MC, &desc);
+}
+
+int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ int flags = QCOM_SCM_FLAG_COLDBOOT_MC;
+
+ return qcom_scm_set_boot_addr(entry, cpus, flags);
+}
+
+int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ int flags = QCOM_SCM_FLAG_WARMBOOT_MC;
+
+ return qcom_scm_set_boot_addr(entry, cpus, flags);
+}
+
void __qcom_scm_cpu_power_down(u32 flags)
{
+ struct qcom_scm_desc desc = {0};
+ desc.args[0] = QCOM_SCM_CMD_CORE_HOTPLUGGED |
+ (flags & QCOM_SCM_FLUSH_FLAG_MASK);
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ qcom_scm_call_atomic(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC, &desc);
}
int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
{
- return -ENOTSUPP;
+ int ret;
+ struct qcom_scm_desc desc = {0};
+
+ desc.arginfo = QCOM_SCM_ARGS(1);
+ desc.args[0] = QCOM_SCM_SIP_FNID(svc_id, cmd_id);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &desc);
+
+ if (ret)
+ return ret;
+
+ return desc.ret[0];
}
int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
{
- return -ENOTSUPP;
+ int ret;
+ struct qcom_scm_desc desc = {0};
+
+ if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
+ return -ERANGE;
+
+ desc.args[0] = req[0].addr;
+ desc.args[1] = req[0].val;
+ desc.args[2] = req[1].addr;
+ desc.args[3] = req[1].val;
+ desc.args[4] = req[2].addr;
+ desc.args[5] = req[2].val;
+ desc.args[6] = req[3].addr;
+ desc.args[7] = req[3].val;
+ desc.args[8] = req[4].addr;
+ desc.args[9] = req[4].val;
+ desc.arginfo = QCOM_SCM_ARGS(10);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc);
+ *resp = desc.ret[0];
+
+ return ret;
+}
+
+bool __qcom_scm_pas_supported(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+ &desc);
+
+ return ret ? false : !!desc.ret[0];
+}
+
+int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, const void *metadata, size_t size)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ u32 scm_ret;
+ dma_addr_t mdata_phys;
+ void *mdata_buf;
+
+dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+
+ /*
+ * During the scm call memory protection will be enabled for the meta
+ * data blob, so make sure it's physically contiguous, 4K aligned and
+ * non-cachable to avoid XPU violations.
+ */
+ mdata_buf = dma_alloc_coherent(dev, size, &mdata_phys, GFP_KERNEL);
+ if (!mdata_buf) {
+ pr_err("Allocation of metadata buffer failed.\n");
+ return -ENOMEM;
+ }
+ memcpy(mdata_buf, metadata, size);
+
+ desc.args[0] = peripheral;
+ desc.args[1] = mdata_phys;
+ desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
+ &desc);
+ scm_ret = desc.ret[0];
+
+ dma_free_coherent(dev, size, mdata_buf, mdata_phys);
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ u32 scm_ret;
+
+ desc.args[0] = peripheral;
+ desc.args[1] = addr;
+ desc.args[2] = size;
+ desc.arginfo = QCOM_SCM_ARGS(3);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
+ &desc);
+ scm_ret = desc.ret[0];
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ u32 scm_ret;
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+ &desc);
+ scm_ret = desc.ret[0];
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_shutdown(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ u32 scm_ret;
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
+ &desc);
+ scm_ret = desc.ret[0];
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pil_init_image_cmd(u32 proc, u64 image_addr)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret;
+
+ desc.args[0] = proc;
+ desc.args[1] = image_addr;
+ desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &desc);
+ scm_ret = desc.ret[0];
+
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+int __qcom_scm_pil_mem_setup_cmd(u32 proc, u64 start_addr, u32 len)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret;
+
+ desc.args[0] = proc;
+ desc.args[1] = start_addr;
+ desc.args[2] = len;
+ desc.arginfo = QCOM_SCM_ARGS(3);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &desc);
+ scm_ret = desc.ret[0];
+
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+int __qcom_scm_pil_auth_and_reset_cmd(u32 proc)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret;
+
+ desc.args[0] = proc;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, PAS_AUTH_AND_RESET_CMD, &desc);
+ scm_ret = desc.ret[0];
+
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+int __qcom_scm_pil_shutdown_cmd(u32 proc)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret;
+
+ desc.args[0] = proc;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, PAS_SHUTDOWN_CMD, &desc);
+ scm_ret = desc.ret[0];
+
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+#define SCM_SVC_UTIL 0x3
+#define SCM_SVC_MP 0xc
+#define IOMMU_DUMP_SMMU_FAULT_REGS 0xc
+
+#define IOMMU_SECURE_PTBL_SIZE 3
+#define IOMMU_SECURE_PTBL_INIT 4
+#define IOMMU_SET_CP_POOL_SIZE 5
+#define IOMMU_SECURE_MAP 6
+#define IOMMU_SECURE_UNMAP 7
+#define IOMMU_SECURE_MAP2 0xb
+#define IOMMU_SECURE_MAP2_FLAT 0x12
+#define IOMMU_SECURE_UNMAP2 0xc
+#define IOMMU_SECURE_UNMAP2_FLAT 0x13
+
+int __qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr, u32 len)
+{
+ struct qcom_scm_desc desc = {0};
+
+ desc.args[0] = id;
+ desc.args[1] = context;
+ desc.args[2] = addr;
+ desc.args[3] = len;
+ desc.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_RW, QCOM_SCM_VAL);
+
+ return qcom_scm_call(SCM_SVC_UTIL, IOMMU_DUMP_SMMU_FAULT_REGS, &desc);
+}
+
+int __qcom_scm_iommu_set_cp_pool_size(u32 size, u32 spare)
+{
+ struct qcom_scm_desc desc = {0};
+
+ desc.args[0] = size;
+ desc.args[1] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SET_CP_POOL_SIZE, &desc);
+}
+
+int __qcom_scm_iommu_secure_ptbl_size(u32 spare, int psize[2])
+{
+ struct qcom_scm_desc desc = {0};
+ int ret;
+ desc.args[0] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &desc);
+
+ psize[0] = desc.ret[0];
+ psize[1] = desc.ret[1];
+
+ return ret;
+}
+
+int __qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret;
+ u64 ptbl_ret;
+
+ desc.args[0] = addr;
+ desc.args[1] = size;
+ desc.args[2] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, QCOM_SCM_VAL);
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &desc);
+
+ ptbl_ret = desc.ret[0];
+
+ if (ret)
+ return ret;
+
+ if (ptbl_ret)
+ return ptbl_ret;
+
+ return 0;
+}
+
+int __qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size,
+ u32 id, u32 ctx_id, u64 va, u32 info_size,
+ u32 flags)
+{
+ struct qcom_scm_desc desc = {0};
+ u32 resp;
+ int ret;
+
+ desc.args[0] = list;
+ desc.args[1] = list_size;
+ desc.args[2] = size;
+ desc.args[3] = id;
+ desc.args[4] = ctx_id;
+ desc.args[5] = va;
+ desc.args[6] = info_size;
+ desc.args[7] = flags;
+ desc.arginfo = QCOM_SCM_ARGS(8, QCOM_SCM_RW, QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_VAL,
+ QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_VAL);
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2_FLAT, &desc);
+
+ resp = desc.ret[0];
+
+ if (ret || resp)
+ return -EINVAL;
+
+ return 0;
+}
+
+int __qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va,
+ u32 size, u32 flags)
+{
+ struct qcom_scm_desc desc = {0};
+
+ desc.args[0] = id;
+ desc.args[1] = ctx_id;
+ desc.args[2] = va;
+ desc.args[3] = size;
+ desc.args[4] = flags;
+ desc.arginfo = QCOM_SCM_ARGS(5);
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_UNMAP2_FLAT, &desc);
+}
+
+int __qcom_scm_get_feat_version(u32 feat)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret;
+
+ ret = __qcom_scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD);
+ if (ret <= 0)
+ return 0;
+
+ desc.args[0] = feat;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &desc);
+ if (!ret)
+ return desc.ret[0];
+
+ return 0;
+}
+
+#define RESTORE_SEC_CFG 2
+int __qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret = 0;
+
+ desc.args[0] = device_id;
+ desc.args[1] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ ret = qcom_scm_call(SCM_SVC_MP, RESTORE_SEC_CFG, &desc);
+
+ scm_ret = desc.ret[0];
+
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define TZBSP_VIDEO_SET_STATE 0xa
+int __qcom_scm_set_video_state(u32 state, u32 spare)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret;
+
+ desc.args[0] = state;
+ desc.args[1] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ ret = qcom_scm_call(SCM_SVC_BOOT, TZBSP_VIDEO_SET_STATE, &desc);
+
+ scm_ret = desc.ret[0];
+
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define TZBSP_MEM_PROTECT_VIDEO_VAR 0x8
+
+int __qcom_scm_mem_protect_video_var(u32 start, u32 size, u32 nonpixel_start,
+ u32 nonpixel_size)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret;
+
+ desc.args[0] = start;
+ desc.args[1] = size;
+ desc.args[2] = nonpixel_start;
+ desc.args[3] = nonpixel_size;
+ desc.arginfo = QCOM_SCM_ARGS(4);
+
+ ret = qcom_scm_call(SCM_SVC_MP, TZBSP_MEM_PROTECT_VIDEO_VAR, &desc);
+
+ scm_ret = desc.ret[0];
+
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define QCOM_SCM_SVC_INFO 0x6
+static int __init qcom_scm_init(void)
+{
+ int ret;
+ u64 ret1 = 0, x0;
+
+ /* First try a SMC64 call */
+ qcom_scm_version = QCOM_SCM_ARMV8_64;
+ x0 = QCOM_SCM_SIP_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD) | SMC_ATOMIC_MASK;
+ ret = __qcom_scm_call_armv8_64(x0 | SMC64_MASK, QCOM_SCM_ARGS(1), x0, 0, 0, 0,
+ &ret1, NULL, NULL);
+ if (ret || !ret1) {
+ /* Try SMC32 call */
+ ret1 = 0;
+ ret = __qcom_scm_call_armv8_32(x0, QCOM_SCM_ARGS(1), x0, 0, 0,
+ 0, &ret1, NULL, NULL);
+ if (ret || !ret1)
+ qcom_scm_version = QCOM_SCM_LEGACY;
+ else
+ qcom_scm_version = QCOM_SCM_ARMV8_32;
+ } else
+ qcom_scm_version_mask = SMC64_MASK;
+
+ pr_debug("qcom_scm_call: qcom_scm version is %x, mask is %x\n",
+ qcom_scm_version, qcom_scm_version_mask);
+
+ return 0;
}
+early_initcall(qcom_scm_init);
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 45c008d68891..5fff0858f7ba 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -16,6 +16,7 @@
* 02110-1301, USA.
*/
+#include <linux/platform_device.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/types.h>
@@ -94,3 +95,177 @@ int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
return __qcom_scm_hdcp_req(req, req_cnt, resp);
}
EXPORT_SYMBOL(qcom_scm_hdcp_req);
+
+/**
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
+ * available for the given peripherial
+ * @peripheral: peripheral id
+ *
+ * Returns true if PAS is supported for this peripheral, otherwise false.
+ */
+bool qcom_scm_pas_supported(u32 peripheral)
+{
+ int ret;
+
+ ret = __qcom_scm_is_call_available(QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_IS_SUPPORTED_CMD);
+ if (ret <= 0)
+ return false;
+
+ return __qcom_scm_pas_supported(peripheral);
+}
+EXPORT_SYMBOL(qcom_scm_pas_supported);
+
+/**
+ * qcom_scm_pas_init_image() - Initialize peripheral authentication service
+ * state machine for a given peripheral, using the
+ * metadata
+ * @peripheral: peripheral id
+ * @metadata: pointer to memory containing ELF header, program header table
+ * and optional blob of data used for authenticating the metadata
+ * and the rest of the firmware
+ * @size: size of the metadata
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_init_image(struct device *dev, u32 peripheral, const void *metadata, size_t size)
+{
+ return __qcom_scm_pas_init_image(dev, peripheral, metadata, size);
+}
+EXPORT_SYMBOL(qcom_scm_pas_init_image);
+
+/**
+ * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
+ * for firmware loading
+ * @peripheral: peripheral id
+ * @addr: start address of memory area to prepare
+ * @size: size of the memory area to prepare
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+ return __qcom_scm_pas_mem_setup(peripheral, addr, size);
+}
+EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
+
+/**
+ * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
+ * and reset the remote processor
+ * @peripheral: peripheral id
+ *
+ * Return 0 on success.
+ */
+int qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+ return __qcom_scm_pas_auth_and_reset(peripheral);
+}
+EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
+
+/**
+ * qcom_scm_pas_shutdown() - Shut down the remote processor
+ * @peripheral: peripheral id
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_shutdown(u32 peripheral)
+{
+ return __qcom_scm_pas_shutdown(peripheral);
+}
+EXPORT_SYMBOL(qcom_scm_pas_shutdown);
+
+int qcom_scm_pil_init_image_cmd(u32 proc, u64 image_addr)
+{
+ return __qcom_scm_pil_init_image_cmd(proc, image_addr);
+}
+EXPORT_SYMBOL(qcom_scm_pil_init_image_cmd);
+
+int qcom_scm_pil_mem_setup_cmd(u32 proc, u64 start_addr, u32 len)
+{
+ return __qcom_scm_pil_mem_setup_cmd(proc, start_addr, len);
+}
+EXPORT_SYMBOL(qcom_scm_pil_mem_setup_cmd);
+
+int qcom_scm_pil_auth_and_reset_cmd(u32 proc)
+{
+ return __qcom_scm_pil_auth_and_reset_cmd(proc);
+}
+EXPORT_SYMBOL(qcom_scm_pil_auth_and_reset_cmd);
+
+int qcom_scm_pil_shutdown_cmd(u32 proc)
+{
+ return __qcom_scm_pil_shutdown_cmd(proc);
+}
+EXPORT_SYMBOL(qcom_scm_pil_shutdown_cmd);
+
+int qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr, u32 len)
+{
+ return __qcom_scm_iommu_dump_fault_regs(id, context, addr, len);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_dump_fault_regs);
+
+int qcom_scm_iommu_set_cp_pool_size(u32 size, u32 spare)
+{
+ return __qcom_scm_iommu_set_cp_pool_size(size, spare);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_set_cp_pool_size);
+
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, int psize[2])
+{
+ return __qcom_scm_iommu_secure_ptbl_size(spare, psize);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
+
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+{
+ return __qcom_scm_iommu_secure_ptbl_init(addr, size, spare);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+
+int qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size,
+ u32 id, u32 ctx_id, u64 va, u32 info_size,
+ u32 flags)
+{
+ return __qcom_scm_iommu_secure_map(list, list_size, size, id,
+ ctx_id, va, info_size, flags);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_map);
+
+int qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va, u32 size, u32 flags)
+{
+ return __qcom_scm_iommu_secure_unmap(id, ctx_id, va, size, flags);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_unmap);
+
+int qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
+{
+ return __qcom_scm_is_call_available(svc_id, cmd_id);
+}
+EXPORT_SYMBOL(qcom_scm_is_call_available);
+
+int qcom_scm_get_feat_version(u32 feat)
+{
+ return __qcom_scm_get_feat_version(feat);
+}
+EXPORT_SYMBOL(qcom_scm_get_feat_version);
+
+int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+{
+ return __qcom_scm_restore_sec_cfg(device_id, spare);
+}
+EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
+
+int qcom_scm_set_video_state(u32 state, u32 spare)
+{
+ return __qcom_scm_set_video_state(state, spare);
+}
+EXPORT_SYMBOL(qcom_scm_set_video_state);
+
+int qcom_scm_mem_protect_video_var(u32 start, u32 size,
+ u32 nonpixel_start,
+ u32 nonpixel_size)
+{
+ return __qcom_scm_mem_protect_video_var(start, size, nonpixel_start,
+ nonpixel_size);
+}
+EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 2cce75c08b99..5d7f0ef6fee7 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -36,6 +36,18 @@ extern int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id);
extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp);
+#define QCOM_SCM_SVC_PIL 0x2
+#define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1
+#define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2
+#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD 0x5
+#define QCOM_SCM_PAS_SHUTDOWN_CMD 0x6
+#define QCOM_SCM_PAS_IS_SUPPORTED_CMD 0x7
+extern bool __qcom_scm_pas_supported(u32 peripheral);
+extern int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, const void *metadata, size_t size);
+extern int __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
+extern int __qcom_scm_pas_auth_and_reset(u32 peripheral);
+extern int __qcom_scm_pas_shutdown(u32 peripheral);
+
/* common error codes */
#define QCOM_SCM_ENOMEM -5
#define QCOM_SCM_EOPNOTSUPP -4
@@ -43,5 +55,65 @@ extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
#define QCOM_SCM_EINVAL_ARG -2
#define QCOM_SCM_ERROR -1
#define QCOM_SCM_INTERRUPTED 1
+#define QCOM_SCM_EBUSY -55
+#define QCOM_SCM_V2_EBUSY -12
+
+
+static inline int qcom_scm_remap_error(int err)
+{
+ switch (err) {
+ case QCOM_SCM_ERROR:
+ return -EIO;
+ case QCOM_SCM_EINVAL_ADDR:
+ case QCOM_SCM_EINVAL_ARG:
+ return -EINVAL;
+ case QCOM_SCM_EOPNOTSUPP:
+ return -EOPNOTSUPP;
+ case QCOM_SCM_ENOMEM:
+ return -ENOMEM;
+ case QCOM_SCM_EBUSY:
+ return QCOM_SCM_EBUSY;
+ case QCOM_SCM_V2_EBUSY:
+ return QCOM_SCM_V2_EBUSY;
+ }
+ return -EINVAL;
+}
+
+enum scm_cmd {
+ PAS_INIT_IMAGE_CMD = 1,
+ PAS_MEM_SETUP_CMD,
+ PAS_AUTH_AND_RESET_CMD = 5,
+ PAS_SHUTDOWN_CMD,
+};
+
+#define SCM_SVC_BOOT 0x1
+#define SCM_SVC_PIL 0x2
+#define SCM_SVC_INFO 0x6
+
+#define GET_FEAT_VERSION_CMD 3
+
+extern int __qcom_scm_pil_init_image_cmd(u32 proc, u64 image_addr);
+extern int __qcom_scm_pil_mem_setup_cmd(u32 proc, u64 start_addr, u32 len);
+extern int __qcom_scm_pil_auth_and_reset_cmd(u32 proc);
+extern int __qcom_scm_pil_shutdown_cmd(u32 proc);
+
+extern int __qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr,
+ u32 len);
+extern int __qcom_scm_iommu_set_cp_pool_size(u32 size, u32 spare);
+extern int __qcom_scm_iommu_secure_ptbl_size(u32 spare, int psize[2]);
+extern int __qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+extern int __qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size,
+ u32 id, u32 ctx_id, u64 va,
+ u32 info_size, u32 flags);
+extern int __qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va,
+ u32 size, u32 flags);
+
+extern int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id);
+extern int __qcom_scm_get_feat_version(u32 feat);
+extern int __qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+extern int __qcom_scm_set_video_state(u32 state, u32 spare);
+extern int __qcom_scm_mem_protect_video_var(u32 start, u32 size,
+ u32 nonpixel_start,
+ u32 nonpixel_size);
#endif
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8949b3f6f74d..c7bb70a3894e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -354,6 +354,21 @@ config GPIO_PXA
help
Say yes here to support the PXA GPIO device
+config GPIO_QCOM_SMSM
+ bool "Qualcomm Shared Memory State Machine"
+ depends on QCOM_SMEM
+ help
+ Say yes here to support the Qualcomm Shared Memory State Machine.
+ The state machine is represented by bits in shared memory and is
+ exposed to the system as GPIOs.
+
+config GPIO_QCOM_SMP2P
+ bool "Qualcomm Shared Memory Point to Point support"
+ depends on QCOM_SMEM
+ help
+ Say yes here to support the Qualcomm Shared Memory Point to Point
+ protocol, exposed to the system as GPIOs.
+
config GPIO_RCAR
tristate "Renesas R-Car GPIO"
depends on ARCH_SHMOBILE || COMPILE_TEST
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index f79a7c482a99..cc5c4221ccf2 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -75,6 +75,8 @@ obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
+obj-$(CONFIG_GPIO_QCOM_SMSM) += gpio-qcom-smsm.o
+obj-$(CONFIG_GPIO_QCOM_SMP2P) += gpio-qcom-smp2p.o
obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
diff --git a/drivers/gpio/gpio-qcom-smp2p.c b/drivers/gpio/gpio-qcom-smp2p.c
new file mode 100644
index 000000000000..6e4d09a40c4a
--- /dev/null
+++ b/drivers/gpio/gpio-qcom-smp2p.c
@@ -0,0 +1,591 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/list.h>
+#include <linux/gpio.h>
+#include <linux/mfd/syscon.h>
+
+#include <linux/delay.h>
+
+#include <linux/soc/qcom/smem.h>
+
+/*
+ * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
+ * of a single 32-bit value between two processors. Each value has a single
+ * writer (the local side) and a single reader (the remote side). Values are
+ * uniquely identified in the system by the directed edge (local processor ID
+ * to remote processor ID) and a string identifier.
+ *
+ * Each processor is responsible for creating the outgoing SMEM items and each
+ * item is writable by the local processor and readable by the remote
+ * processor. By using two separate SMEM items that are single-reader and
+ * single-writer, SMP2P does not require any remote locking mechanisms.
+ *
+ * The driver uses the Linux GPIO and interrupt framework to expose a virtual
+ * GPIO for each outbound entry and a virtual interrupt controller for each
+ * inbound entry.
+ */
+
+#define SMP2P_MAX_ENTRY 16
+#define SMP2P_MAX_ENTRY_NAME 16
+
+#define SMP2P_FEATURE_SSR_ACK 0x1
+
+#define SMP2P_MAGIC 0x504d5324
+
+/**
+ * struct smp2p_smem_item - in memory communication structure
+ * @magic: magic number
+ * @version: version - must be 1
+ * @features: features flag - currently unused
+ * @local_pid: processor id of sending end
+ * @remote_pid: processor id of receiving end
+ * @total_entries: number of entries - always SMP2P_MAX_ENTRY
+ * @valid_entries: number of allocated entries
+ * @flags:
+ * @entries: individual communication entries
+ * @name: name of the entry
+ * @value: content of the entry
+ */
+struct smp2p_smem_item {
+ u32 magic;
+ u8 version;
+ unsigned features:24;
+ u16 local_pid;
+ u16 remote_pid;
+ u16 total_entries;
+ u16 valid_entries;
+ u32 flags;
+
+ struct {
+ u8 name[SMP2P_MAX_ENTRY_NAME];
+ u32 value;
+ } entries[SMP2P_MAX_ENTRY];
+} __packed;
+
+/**
+ * struct smp2p_entry - driver context matching one entry
+ * @node: list entry to keep track of allocated entries
+ * @smp2p: reference to the device driver context
+ * @name: name of the entry, to match against smp2p_smem_item
+ * @value: pointer to smp2p_smem_item entry value
+ * @last_value: last handled value
+ * @domain: irq_domain for inbound entries
+ * @irq_enabled:bitmap to track enabled irq bits
+ * @irq_rising: bitmap to mark irq bits for rising detection
+ * @irq_falling:bitmap to mark irq bits for falling detection
+ * @chip: gpio_chip for outbound entries
+ */
+struct smp2p_entry {
+ struct list_head node;
+ struct qcom_smp2p *smp2p;
+
+ const char *name;
+ u32 *value;
+ u32 last_value;
+
+ struct irq_domain *domain;
+ DECLARE_BITMAP(irq_enabled, 32);
+ DECLARE_BITMAP(irq_rising, 32);
+ DECLARE_BITMAP(irq_falling, 32);
+
+ struct gpio_chip chip;
+};
+
+#define SMP2P_INBOUND 0
+#define SMP2P_OUTBOUND 1
+
+/**
+ * struct qcom_smp2p - device driver context
+ * @dev: device driver handle
+ * @in: pointer to the inbound smem item
+ * @smem_items: ids of the two smem items
+ * @valid_entries: already scanned inbound entries
+ * @local_pid: processor id of the inbound edge
+ * @remote_pid: processor id of the outbound edge
+ * @ipc_regmap: regmap for the outbound ipc
+ * @ipc_offset: offset within the regmap
+ * @ipc_bit: bit in regmap@offset to kick to signal remote processor
+ * @inbound: list of inbound entries
+ * @outbound: list of outbound entries
+ */
+struct qcom_smp2p {
+ struct device *dev;
+
+ struct smp2p_smem_item *in;
+ struct smp2p_smem_item *out;
+
+ unsigned smem_items[SMP2P_OUTBOUND + 1];
+
+ unsigned valid_entries;
+
+ unsigned local_pid;
+ unsigned remote_pid;
+
+ struct regmap *ipc_regmap;
+ int ipc_offset;
+ int ipc_bit;
+
+ struct list_head inbound;
+ struct list_head outbound;
+};
+
+static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
+{
+ /* Make sure any updated data is written before the kick */
+ wmb();
+ regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
+}
+
+static irqreturn_t qcom_smp2p_intr(int irq, void *data)
+{
+ struct smp2p_smem_item *in;
+ struct smp2p_entry *entry;
+ struct qcom_smp2p *smp2p = data;
+ unsigned smem_id = smp2p->smem_items[SMP2P_INBOUND];
+ unsigned pid = smp2p->remote_pid;
+ size_t size;
+ int irq_pin;
+ u32 status;
+ u32 val;
+ int ret;
+ int i;
+ u8 tmp_name[SMP2P_MAX_ENTRY_NAME];
+
+ in = smp2p->in;
+
+ /* Acquire smem item, if not already found */
+ if (!in) {
+ ret = qcom_smem_get(pid, smem_id, (void **)&in, &size);
+ if (ret < 0) {
+ dev_err(smp2p->dev,
+ "Unable to acquire remote smp2p item\n");
+ return IRQ_HANDLED;
+ }
+
+ smp2p->in = in;
+ }
+
+ /* Match newly created entries */
+ for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
+ list_for_each_entry(entry, &smp2p->inbound, node) {
+ memcpy_fromio(tmp_name, in->entries[i].name,
+ SMP2P_MAX_ENTRY_NAME);
+ if (!strcmp(tmp_name, entry->name)) {
+ entry->value = &in->entries[i].value;
+ break;
+ }
+ }
+ }
+ smp2p->valid_entries = i;
+
+ /* Fire interrupts based on any value changes */
+ list_for_each_entry(entry, &smp2p->inbound, node) {
+ /* Ignore entries not yet allocated by the remote side */
+ if (!entry->value)
+ continue;
+
+ val = *entry->value;
+
+ status = val ^ entry->last_value;
+ entry->last_value = val;
+
+ /* No changes of this entry? */
+ if (!status)
+ continue;
+
+ for_each_set_bit(i, entry->irq_enabled, 32) {
+ if (!(status & BIT(i)))
+ continue;
+
+ if (val & BIT(i)) {
+ if (test_bit(i, entry->irq_rising)) {
+ irq_pin = irq_find_mapping(entry->domain, i);
+ handle_nested_irq(irq_pin);
+ }
+ } else {
+ if (test_bit(i, entry->irq_falling)) {
+ irq_pin = irq_find_mapping(entry->domain, i);
+ handle_nested_irq(irq_pin);
+ }
+ }
+
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void smp2p_mask_irq(struct irq_data *irqd)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ clear_bit(irq, entry->irq_enabled);
+}
+
+static void smp2p_unmask_irq(struct irq_data *irqd)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ set_bit(irq, entry->irq_enabled);
+}
+
+static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH))
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ set_bit(irq, entry->irq_rising);
+ else
+ clear_bit(irq, entry->irq_rising);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ set_bit(irq, entry->irq_falling);
+ else
+ clear_bit(irq, entry->irq_falling);
+
+ return 0;
+}
+
+static struct irq_chip smp2p_irq_chip = {
+ .name = "smp2p",
+ .irq_mask = smp2p_mask_irq,
+ .irq_unmask = smp2p_unmask_irq,
+ .irq_set_type = smp2p_set_irq_type,
+};
+
+static int smp2p_irq_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct smp2p_entry *entry = d->host_data;
+
+ irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, entry);
+ irq_set_nested_thread(irq, 1);
+
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops smp2p_irq_ops = {
+ .map = smp2p_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int smp2p_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset,
+ int value)
+{
+ struct smp2p_entry *entry = container_of(chip, struct smp2p_entry, chip);
+
+ if (value)
+ *entry->value |= BIT(offset);
+ else
+ *entry->value &= ~BIT(offset);
+
+ qcom_smp2p_kick(entry->smp2p);
+
+ return 0;
+}
+
+static void smp2p_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ smp2p_gpio_direction_output(chip, offset, value);
+}
+
+static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
+ struct smp2p_entry *entry,
+ struct device_node *node)
+{
+ entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
+ if (!entry->domain) {
+ dev_err(smp2p->dev, "failed to add irq_domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
+ struct smp2p_entry *entry,
+ struct device_node *node)
+{
+ struct smp2p_smem_item *out = smp2p->out;
+ struct gpio_chip *chip;
+ int ret;
+
+ /* Allocate an entry from the smem item */
+ memcpy_toio(out->entries[out->valid_entries].name, entry->name, SMP2P_MAX_ENTRY_NAME);
+ out->valid_entries++;
+
+ /* Make the logical entry reference the physical value */
+ entry->value = &out->entries[out->valid_entries].value;
+
+ chip = &entry->chip;
+ chip->base = -1;
+ chip->ngpio = 32;
+ chip->label = entry->name;
+ chip->dev = smp2p->dev;
+ chip->owner = THIS_MODULE;
+ chip->of_node = node;
+
+ chip->set = smp2p_gpio_set;
+ chip->direction_output = smp2p_gpio_direction_output;
+
+ ret = gpiochip_add(chip);
+ if (ret)
+ dev_err(smp2p->dev, "failed register gpiochip\n");
+
+ return 0;
+}
+
+static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *out;
+ unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND];
+ unsigned pid = smp2p->remote_pid;
+ int ret;
+
+ ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
+ if (ret < 0 && ret != -EEXIST) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(smp2p->dev,
+ "unable to allocate local smp2p item\n");
+ return ret;
+ }
+
+ ret = qcom_smem_get(pid, smem_id, (void **)&out, NULL);
+ if (ret < 0) {
+ dev_err(smp2p->dev, "Unable to acquire local smp2p item\n");
+ return ret;
+ }
+
+ memset_io(out, 0, sizeof(*out));
+ out->magic = SMP2P_MAGIC;
+ out->local_pid = smp2p->local_pid;
+ out->remote_pid = smp2p->remote_pid;
+ out->total_entries = SMP2P_MAX_ENTRY;
+ out->valid_entries = 0;
+
+ /*
+ * Make sure the rest of the header is written before we validate the
+ * item by writing a valid version number.
+ */
+ wmb();
+ out->version = 1;
+
+ qcom_smp2p_kick(smp2p);
+
+ smp2p->out = out;
+
+ return 0;
+}
+
+static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
+{
+ struct device_node *syscon;
+ struct device *dev = smp2p->dev;
+ const char *key;
+ int ret;
+
+ syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0);
+ if (!syscon) {
+ dev_err(dev, "no qcom,ipc node\n");
+ return -ENODEV;
+ }
+
+ smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
+ if (IS_ERR(smp2p->ipc_regmap))
+ return PTR_ERR(smp2p->ipc_regmap);
+
+ key = "qcom,ipc";
+ ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset);
+ if (ret < 0) {
+ dev_err(dev, "no offset in %s\n", key);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit);
+ if (ret < 0) {
+ dev_err(dev, "no bit in %s\n", key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qcom_smp2p_probe(struct platform_device *pdev)
+{
+ struct smp2p_entry *entry;
+ struct device_node *node;
+ struct qcom_smp2p *smp2p;
+ const char *key;
+ int irq;
+ int ret;
+
+ smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL);
+ if (!smp2p)
+ return -ENOMEM;
+
+ smp2p->dev = &pdev->dev;
+ INIT_LIST_HEAD(&smp2p->inbound);
+ INIT_LIST_HEAD(&smp2p->outbound);
+
+ platform_set_drvdata(pdev, smp2p);
+
+ ret = smp2p_parse_ipc(smp2p);
+ if (ret)
+ return ret;
+
+ key = "qcom,smem";
+ ret = of_property_read_u32_array(pdev->dev.of_node, key,
+ smp2p->smem_items, 2);
+ if (ret)
+ return ret;
+
+ key = "qcom,local-pid";
+ ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read %s\n", key);
+ return -EINVAL;
+ }
+
+ key = "qcom,remote-pid";
+ ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read %s\n", key);
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "unable to acquire smp2p interrupt\n");
+ return irq;
+ }
+
+ ret = qcom_smp2p_alloc_outbound_item(smp2p);
+ if (ret < 0)
+ return ret;
+
+ for_each_available_child_of_node(pdev->dev.of_node, node) {
+ entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ ret = -ENOMEM;
+ goto unwind_interfaces;
+ }
+
+ entry->smp2p = smp2p;
+
+ ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
+ if (ret < 0)
+ goto unwind_interfaces;
+
+ if (of_property_read_bool(node, "qcom,inbound")) {
+ ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
+ if (ret < 0)
+ goto unwind_interfaces;
+
+ list_add(&entry->node, &smp2p->inbound);
+ } else if (of_property_read_bool(node, "qcom,outbound")) {
+ ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
+ if (ret < 0)
+ goto unwind_interfaces;
+
+ list_add(&entry->node, &smp2p->outbound);
+ } else {
+ dev_err(&pdev->dev, "neither inbound nor outbound\n");
+ ret = -EINVAL;
+ goto unwind_interfaces;
+ }
+ }
+
+ /* Kick the outgoing edge after allocating entries */
+ qcom_smp2p_kick(smp2p);
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, qcom_smp2p_intr,
+ IRQF_ONESHOT,
+ "smp2p", (void *)smp2p);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request interrupt\n");
+ goto unwind_interfaces;
+ }
+
+
+ return 0;
+
+unwind_interfaces:
+ list_for_each_entry(entry, &smp2p->inbound, node)
+ irq_domain_remove(entry->domain);
+
+ list_for_each_entry(entry, &smp2p->outbound, node)
+ gpiochip_remove(&entry->chip);
+
+ smp2p->out->valid_entries = 0;
+
+ return ret;
+}
+
+static int qcom_smp2p_remove(struct platform_device *pdev)
+{
+ struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
+ struct smp2p_entry *entry;
+
+ list_for_each_entry(entry, &smp2p->inbound, node)
+ irq_domain_remove(entry->domain);
+
+ list_for_each_entry(entry, &smp2p->outbound, node)
+ gpiochip_remove(&entry->chip);
+
+ smp2p->out->valid_entries = 0;
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smp2p_of_match[] = {
+ { .compatible = "qcom,smp2p" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match);
+
+static struct platform_driver qcom_smp2p_driver = {
+ .probe = qcom_smp2p_probe,
+ .remove = qcom_smp2p_remove,
+ .driver = {
+ .name = "qcom_smp2p",
+ .of_match_table = qcom_smp2p_of_match,
+ },
+};
+module_platform_driver(qcom_smp2p_driver);
+
+MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-qcom-smsm.c b/drivers/gpio/gpio-qcom-smsm.c
new file mode 100644
index 000000000000..3bbe9bb73f84
--- /dev/null
+++ b/drivers/gpio/gpio-qcom-smsm.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2014, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/hwspinlock.h>
+#include <linux/regmap.h>
+#include <linux/gpio.h>
+#include <linux/mfd/syscon.h>
+
+#include <linux/delay.h>
+
+#include <linux/soc/qcom/smem.h>
+
+#define SMSM_APPS_STATE 0
+#define SMEM_SMSM_SHARED_STATE 85
+
+#define SMSM_MAX_STATES 8
+
+struct qcom_smsm_state {
+ unsigned state_id;
+ struct gpio_chip chip;
+
+ int irq;
+
+ struct regmap *ipc_regmap;
+ int ipc_bit;
+ int ipc_offset;
+};
+
+struct qcom_smsm {
+ struct device *dev;
+
+ u32 *shared_state;
+ size_t shared_state_size;
+
+ struct qcom_smsm_state states[SMSM_MAX_STATES];
+};
+
+#if 0
+int qcom_smsm_change_state(struct qcom_smsm *smsm, u32 clear_mask, u32 set_mask)
+{
+ u32 state;
+
+ dev_dbg(smsm->dev, "SMSM_APPS_STATE clear 0x%x set 0x%x\n", clear_mask, set_mask);
+ print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_OFFSET, 16, 1, smsm->shared_state, smsm->shared_state_size, true);
+
+ state = readl(&smsm->shared_state[SMSM_APPS_STATE]);
+ state &= ~clear_mask;
+ state |= set_mask;
+ writel(state, &smsm->shared_state[SMSM_APPS_STATE]);
+
+ print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_OFFSET, 16, 1, smsm->shared_state, smsm->shared_state_size, true);
+
+ // qcom_smem_signal(-1, smsm->smem, smsm->signal_offset, smsm->signal_bit);
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_smsm_change_state);
+#endif
+
+static struct qcom_smsm_state *to_smsm_state(struct gpio_chip *chip)
+{
+ return container_of(chip, struct qcom_smsm_state, chip);
+}
+
+static struct qcom_smsm *to_smsm(struct qcom_smsm_state *state)
+{
+ return container_of(state, struct qcom_smsm, states[state->state_id]);
+}
+
+static int smsm_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct qcom_smsm_state *state = to_smsm_state(chip);
+
+ if (state->state_id == SMSM_APPS_STATE)
+ return -EINVAL;
+ return 0;
+}
+
+static int smsm_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset,
+ int value)
+{
+ struct qcom_smsm_state *ipc_state;
+ struct qcom_smsm_state *state = to_smsm_state(chip);
+ struct qcom_smsm *smsm = to_smsm(state);
+ unsigned word;
+ unsigned bit;
+ u32 val;
+ int i;
+
+ /* Only SMSM_APPS_STATE supports writing */
+ if (state->state_id != SMSM_APPS_STATE)
+ return -EINVAL;
+
+ offset += state->state_id * 32;
+
+ word = ALIGN(offset / 32, 4);
+ bit = offset % 32;
+
+ val = readl(smsm->shared_state + word);
+ if (value)
+ val |= BIT(bit);
+ else
+ val &= ~BIT(bit);
+ writel(val, smsm->shared_state + word);
+
+ /* XXX: send out interrupts */
+ for (i = 0; i < SMSM_MAX_STATES; i++) {
+ ipc_state = &smsm->states[i];
+ if (!ipc_state->ipc_regmap)
+ continue;
+
+ regmap_write(ipc_state->ipc_regmap, ipc_state->ipc_offset, BIT(ipc_state->ipc_bit));
+ }
+
+ dev_err(smsm->dev, "set %d %d\n", offset, value);
+
+ return 0;
+}
+
+static int smsm_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct qcom_smsm_state *state = to_smsm_state(chip);
+ struct qcom_smsm *smsm = to_smsm(state);
+ unsigned word;
+ unsigned bit;
+ u32 val;
+
+ offset += state->state_id * 32;
+
+ word = ALIGN(offset / 32, 4);
+ bit = offset % 32;
+
+ val = readl(smsm->shared_state + word);
+
+ return !!(val & BIT(bit));
+}
+
+static void smsm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ smsm_gpio_direction_output(chip, offset, value);
+}
+
+static int smsm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return -EINVAL;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static void smsm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ struct qcom_smsm_state *state = to_smsm_state(chip);
+ struct qcom_smsm *smsm = to_smsm(state);
+ unsigned i;
+ u32 val;
+
+ val = readl(smsm->shared_state + state->state_id * 4);
+
+ for (i = 0; i < 32; i++) {
+ if (val & BIT(i))
+ seq_puts(s, "1");
+ else
+ seq_puts(s, "0");
+
+ if (i == 7 || i == 15 || i == 23)
+ seq_puts(s, " ");
+ }
+ seq_puts(s, "\n");
+}
+
+#else
+#define smsm_gpio_dbg_show NULL
+#endif
+
+static struct gpio_chip smsm_gpio_template = {
+ .direction_input = smsm_gpio_direction_input,
+ .direction_output = smsm_gpio_direction_output,
+ .get = smsm_gpio_get,
+ .set = smsm_gpio_set,
+ .to_irq = smsm_gpio_to_irq,
+ .dbg_show = smsm_gpio_dbg_show,
+ .owner = THIS_MODULE,
+};
+
+static int qcom_smsm_probe(struct platform_device *pdev)
+{
+ struct qcom_smsm_state *state;
+ struct device_node *syscon_np;
+ struct device_node *node;
+ struct qcom_smsm *smsm;
+ char *key;
+ u32 sid;
+ int ret;
+
+ smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL);
+ if (!smsm)
+ return -ENOMEM;
+ smsm->dev = &pdev->dev;
+
+ ret = qcom_smem_alloc(-1, SMEM_SMSM_SHARED_STATE, 8 * sizeof(uint32_t));
+ if (ret < 0 && ret != -EEXIST) {
+ dev_err(&pdev->dev, "unable to allocate shared state entry\n");
+ return ret;
+ }
+
+ ret = qcom_smem_get(-1, SMEM_SMSM_SHARED_STATE,
+ (void**)&smsm->shared_state,
+ &smsm->shared_state_size);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
+ return ret;
+ }
+
+ dev_err(smsm->dev, "SMEM_SMSM_SHARED_STATE: %d, %zu\n", ret, smsm->shared_state_size);
+ print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_OFFSET, 16, 1, smsm->shared_state, smsm->shared_state_size, true);
+
+ for_each_child_of_node(pdev->dev.of_node, node) {
+ key = "reg";
+ ret = of_property_read_u32(node, key, &sid);
+ if (ret || sid >= SMSM_MAX_STATES) {
+ dev_err(&pdev->dev, "smsm state missing %s\n", key);
+ return -EINVAL;
+ }
+ state = &smsm->states[sid];
+ state->state_id = sid;
+
+ state->chip = smsm_gpio_template;
+ state->chip.base = -1;
+ state->chip.dev = &pdev->dev;
+ state->chip.of_node = node;
+ state->chip.label = node->name;
+ state->chip.ngpio = 8 * sizeof(u32);
+ ret = gpiochip_add(&state->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "failed register gpiochip\n");
+ // goto wooha;
+ }
+
+ /* The remaining properties are only for non-apps state */
+ if (sid == SMSM_APPS_STATE)
+ continue;
+
+ state->irq = irq_of_parse_and_map(node, 0);
+ if (state->irq < 0 && state->irq != -EINVAL) {
+ dev_err(&pdev->dev, "failed to parse smsm interrupt\n");
+ return -EINVAL;
+ }
+
+ syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
+ if (!syscon_np) {
+ dev_err(&pdev->dev, "no qcom,ipc node\n");
+ return -ENODEV;
+ }
+
+ state->ipc_regmap = syscon_node_to_regmap(syscon_np);
+ if (IS_ERR(state->ipc_regmap))
+ return PTR_ERR(state->ipc_regmap);
+
+ key = "qcom,ipc";
+ ret = of_property_read_u32_index(node, key, 1, &state->ipc_offset);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no offset in %s\n", key);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(node, key, 2, &state->ipc_bit);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no bit in %s\n", key);
+ return -EINVAL;
+ }
+
+ }
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smsm_of_match[] = {
+ { .compatible = "qcom,smsm" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
+
+static struct platform_driver qcom_smsm_driver = {
+ .probe = qcom_smsm_probe,
+ .driver = {
+ .name = "qcom_smsm",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_smsm_of_match,
+ },
+};
+
+static int __init qcom_smsm_init(void)
+{
+ return platform_driver_register(&qcom_smsm_driver);
+}
+arch_initcall(qcom_smsm_init);
+
+static void __exit qcom_smsm_exit(void)
+{
+ platform_driver_unregister(&qcom_smsm_driver);
+}
+module_exit(qcom_smsm_exit)
+
+MODULE_DESCRIPTION("Qualcomm Shared Memory Signaling Mechanism");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 05bb7311ac5d..f32e090b3d85 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1388,6 +1388,14 @@ struct edid *drm_get_edid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_get_edid);
+struct edid *drm_get_edid_early(struct i2c_adapter *adapter)
+{
+ struct drm_connector dummy_connector;
+
+ return drm_get_edid(&dummy_connector, adapter);
+}
+EXPORT_SYMBOL(drm_get_edid_early);
+
/**
* drm_edid_duplicate - duplicate an EDID and the extensions
* @edid: EDID to duplicate
@@ -1409,7 +1417,7 @@ EXPORT_SYMBOL(drm_edid_duplicate);
*
* Returns true if @vendor is in @edid, false otherwise
*/
-static bool edid_vendor(struct edid *edid, char *vendor)
+bool edid_vendor(struct edid *edid, char *vendor)
{
char edid_vendor[3];
@@ -1420,7 +1428,7 @@ static bool edid_vendor(struct edid *edid, char *vendor)
return !strncmp(edid_vendor, vendor, 3);
}
-
+EXPORT_SYMBOL(edid_vendor);
/**
* edid_get_quirks - return quirk flags for a given EDID
* @edid: EDID to process
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 2d5ca8eec13a..e487f56ffc4c 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -45,9 +45,26 @@
* subset of the MIPI DCS command set.
*/
+static const struct device_type mipi_dsi_device_type;
+
static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
{
- return of_driver_match_device(dev, drv);
+ struct mipi_dsi_device *dsi;
+
+ dsi = dev->type == &mipi_dsi_device_type ?
+ to_mipi_dsi_device(dev) : NULL;
+
+ if (!dsi)
+ return 0;
+
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (!strcmp(drv->name, "mipi_dsi_dummy") &&
+ !strcmp(dsi->name, "dummy"))
+ return 1;
+
+ return 0;
}
static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
@@ -102,9 +119,41 @@ static const struct device_type mipi_dsi_device_type = {
.release = mipi_dsi_dev_release,
};
-static struct mipi_dsi_device *mipi_dsi_device_alloc(struct mipi_dsi_host *host)
+struct mipi_dsi_device_info {
+ char name[DSI_DEV_NAME_SIZE];
+ u32 reg;
+ struct device_node *node;
+};
+
+static int __dsi_check_chan_busy(struct device *dev, void *data)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ u32 reg = *(u32 *) data;
+
+ if (dsi && dsi->channel == reg)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int mipi_dsi_check_chan_busy(struct mipi_dsi_host *host, u32 reg)
+{
+ return device_for_each_child(host->dev, &reg, __dsi_check_chan_busy);
+}
+
+static struct mipi_dsi_device *
+mipi_dsi_device_new(struct mipi_dsi_host *host,
+ struct mipi_dsi_device_info *info)
{
+ struct device *dev = host->dev;
struct mipi_dsi_device *dsi;
+ int r;
+
+ if (info->reg > 3) {
+ dev_err(dev, "dsi device %s has invalid channel value: %u\n",
+ info->name, info->reg);
+ return ERR_PTR(-EINVAL);
+ }
dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
if (!dsi)
@@ -114,62 +163,90 @@ static struct mipi_dsi_device *mipi_dsi_device_alloc(struct mipi_dsi_host *host)
dsi->dev.bus = &mipi_dsi_bus_type;
dsi->dev.parent = host->dev;
dsi->dev.type = &mipi_dsi_device_type;
+ dsi->dev.of_node = info->node;
+ dsi->channel = info->reg;
+ strlcpy(dsi->name, info->name, sizeof(dsi->name));
- device_initialize(&dsi->dev);
+ dev_set_name(&dsi->dev, "%s.%d", dev_name(host->dev), info->reg);
- return dsi;
-}
-
-static int mipi_dsi_device_add(struct mipi_dsi_device *dsi)
-{
- struct mipi_dsi_host *host = dsi->host;
+ r = mipi_dsi_check_chan_busy(host, info->reg);
+ if (r)
+ goto err;
- dev_set_name(&dsi->dev, "%s.%d", dev_name(host->dev), dsi->channel);
+ r = device_register(&dsi->dev);
+ if (r)
+ goto err;
- return device_add(&dsi->dev);
+ return dsi;
+err:
+ kfree(dsi);
+ return ERR_PTR(r);
}
static struct mipi_dsi_device *
of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
{
- struct mipi_dsi_device *dsi;
struct device *dev = host->dev;
+ struct mipi_dsi_device_info info = { };
int ret;
- u32 reg;
- ret = of_property_read_u32(node, "reg", &reg);
+ if (of_modalias_node(node, info.name, sizeof(info.name)) < 0) {
+ dev_err(dev, "modalias failure on %s\n", node->full_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = of_property_read_u32(node, "reg", &info.reg);
if (ret) {
dev_err(dev, "device node %s has no valid reg property: %d\n",
node->full_name, ret);
return ERR_PTR(-EINVAL);
}
- if (reg > 3) {
- dev_err(dev, "device node %s has invalid reg property: %u\n",
- node->full_name, reg);
- return ERR_PTR(-EINVAL);
- }
+ info.node = of_node_get(node);
- dsi = mipi_dsi_device_alloc(host);
- if (IS_ERR(dsi)) {
- dev_err(dev, "failed to allocate DSI device %s: %ld\n",
- node->full_name, PTR_ERR(dsi));
- return dsi;
- }
+ return mipi_dsi_device_new(host, &info);
+}
+
+static struct mipi_dsi_driver dummy_dsi_driver = {
+ .driver.name = "mipi_dsi_dummy",
+};
- dsi->dev.of_node = of_node_get(node);
- dsi->channel = reg;
+struct mipi_dsi_device *mipi_dsi_new_dummy(struct mipi_dsi_host *host, u32 reg)
+{
+ struct mipi_dsi_device_info info = { "dummy", reg, NULL, };
- ret = mipi_dsi_device_add(dsi);
- if (ret) {
- dev_err(dev, "failed to add DSI device %s: %d\n",
- node->full_name, ret);
- kfree(dsi);
- return ERR_PTR(ret);
+ return mipi_dsi_device_new(host, &info);
+}
+EXPORT_SYMBOL(mipi_dsi_new_dummy);
+
+void mipi_dsi_unregister_device(struct mipi_dsi_device *dsi)
+{
+ if (dsi)
+ device_unregister(&dsi->dev);
+}
+EXPORT_SYMBOL(mipi_dsi_unregister_device);
+
+static DEFINE_MUTEX(host_lock);
+static LIST_HEAD(host_list);
+
+struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node)
+{
+ struct mipi_dsi_host *host;
+
+ mutex_lock(&host_lock);
+
+ list_for_each_entry(host, &host_list, list) {
+ if (host->dev->of_node == node) {
+ mutex_unlock(&host_lock);
+ return host;
+ }
}
- return dsi;
+ mutex_unlock(&host_lock);
+
+ return NULL;
}
+EXPORT_SYMBOL(of_find_mipi_dsi_host_by_node);
int mipi_dsi_host_register(struct mipi_dsi_host *host)
{
@@ -182,6 +259,10 @@ int mipi_dsi_host_register(struct mipi_dsi_host *host)
of_mipi_dsi_device_add(host, node);
}
+ mutex_lock(&host_lock);
+ list_add_tail(&host->list, &host_list);
+ mutex_unlock(&host_lock);
+
return 0;
}
EXPORT_SYMBOL(mipi_dsi_host_register);
@@ -198,6 +279,10 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
void mipi_dsi_host_unregister(struct mipi_dsi_host *host)
{
device_for_each_child(host->dev, NULL, mipi_dsi_remove_device_fn);
+
+ mutex_lock(&host_lock);
+ list_del_init(&host->list);
+ mutex_unlock(&host_lock);
}
EXPORT_SYMBOL(mipi_dsi_host_unregister);
@@ -924,7 +1009,13 @@ EXPORT_SYMBOL(mipi_dsi_driver_unregister);
static int __init mipi_dsi_bus_init(void)
{
- return bus_register(&mipi_dsi_bus_type);
+ int ret;
+
+ ret = bus_register(&mipi_dsi_bus_type);
+ if (ret < 0)
+ return ret;
+
+ return mipi_dsi_driver_register(&dummy_dsi_driver);
}
postcore_initcall(mipi_dsi_bus_init);
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 59aea5aa1fff..a4f55974fef2 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -1,6 +1,6 @@
ccflags-y := -Iinclude/drm
-obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o adv7511_audio.o
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 00416f23b5cb..7298f794ae20 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -12,41 +12,19 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/of_graph.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder_slave.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_mipi_dsi.h>
#include "adv7511.h"
-struct adv7511 {
- struct i2c_client *i2c_main;
- struct i2c_client *i2c_edid;
-
- struct regmap *regmap;
- struct regmap *packet_memory_regmap;
- enum drm_connector_status status;
- bool powered;
-
- unsigned int f_tmds;
-
- unsigned int current_edid_segment;
- uint8_t edid_buf[256];
- bool edid_read;
-
- wait_queue_head_t wq;
- struct drm_encoder *encoder;
-
- bool embedded_sync;
- enum adv7511_sync_polarity vsync_polarity;
- enum adv7511_sync_polarity hsync_polarity;
- bool rgb;
-
- struct edid *edid;
-
- struct gpio_desc *gpio_pd;
-};
+#define HPD_ENABLE 0
static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
{
@@ -66,6 +44,24 @@ static const struct reg_sequence adv7511_fixed_registers[] = {
{ 0x55, 0x02 },
};
+/* ADI recommended values for proper operation. */
+static const struct reg_sequence adv7533_fixed_registers[] = {
+ { 0x16, 0x20 },
+ { 0x9a, 0xe0 },
+ { 0xba, 0x70 },
+ { 0xde, 0x82 },
+ { 0xe4, 0x40 },
+ { 0xe5, 0x80 },
+};
+
+static const struct reg_sequence adv7533_cec_fixed_registers[] = {
+ { 0x15, 0xd0 },
+ { 0x17, 0xd0 },
+ { 0x24, 0x20 },
+ { 0x57, 0x11 },
+ { 0x05, 0xc8 },
+};
+
/* -----------------------------------------------------------------------------
* Register access
*/
@@ -158,6 +154,15 @@ static const struct regmap_config adv7511_regmap_config = {
.volatile_reg = adv7511_register_volatile,
};
+static const struct regmap_config adv7533_cec_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+
/* -----------------------------------------------------------------------------
* Hardware configuration
*/
@@ -193,7 +198,7 @@ static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable,
ADV7511_CSC_UPDATE_MODE, 0);
}
-static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
+int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
{
if (packet & 0xff)
regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
@@ -208,7 +213,7 @@ static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
return 0;
}
-static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
+int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
{
if (packet & 0xff)
regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
@@ -358,6 +363,73 @@ static void adv7511_set_link_config(struct adv7511 *adv7511,
adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
}
+static void adv7511_dsi_config_tgen(struct adv7511 *adv7511)
+{
+ struct mipi_dsi_device *dsi = adv7511->dsi;
+ struct drm_display_mode *mode = &adv7511->curr_mode;
+ u8 clock_div_by_lanes[] = { 6, 4, 3 }; /* 2, 3, 4 lanes */
+ unsigned int hsw, hfp, hbp, vsw, vfp, vbp;
+
+ hsw = mode->hsync_end - mode->hsync_start;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ /* set pixel clock divider mode */
+ regmap_write(adv7511->regmap_cec, 0x16,
+ clock_div_by_lanes[dsi->lanes - 2] << 3);
+
+ /* horizontal porch params */
+ regmap_write(adv7511->regmap_cec, 0x28, mode->htotal >> 4);
+ regmap_write(adv7511->regmap_cec, 0x29, (mode->htotal << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x2a, hsw >> 4);
+ regmap_write(adv7511->regmap_cec, 0x2b, (hsw << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x2c, hfp >> 4);
+ regmap_write(adv7511->regmap_cec, 0x2d, (hfp << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x2e, hbp >> 4);
+ regmap_write(adv7511->regmap_cec, 0x2f, (hbp << 4) & 0xff);
+
+ /* vertical porch params */
+ regmap_write(adv7511->regmap_cec, 0x30, mode->vtotal >> 4);
+ regmap_write(adv7511->regmap_cec, 0x31, (mode->vtotal << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x32, vsw >> 4);
+ regmap_write(adv7511->regmap_cec, 0x33, (vsw << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x34, vfp >> 4);
+ regmap_write(adv7511->regmap_cec, 0x35, (vfp << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x36, vbp >> 4);
+ regmap_write(adv7511->regmap_cec, 0x37, (vbp << 4) & 0xff);
+}
+
+static void adv7511_dsi_receiver_dpms(struct adv7511 *adv7511)
+{
+ if (adv7511->type != ADV7533)
+ return;
+
+ if (adv7511->powered) {
+ struct mipi_dsi_device *dsi = adv7511->dsi;
+
+ adv7511_dsi_config_tgen(adv7511);
+
+ /* set number of dsi lanes */
+ regmap_write(adv7511->regmap_cec, 0x1c, dsi->lanes << 4);
+
+ /* reset internal timing generator */
+ regmap_write(adv7511->regmap_cec, 0x27, 0xcb);
+ regmap_write(adv7511->regmap_cec, 0x27, 0x8b);
+ regmap_write(adv7511->regmap_cec, 0x27, 0xcb);
+
+ /* enable hdmi */
+ regmap_write(adv7511->regmap_cec, 0x03, 0x89);
+ /* disable test mode */
+ regmap_write(adv7511->regmap_cec, 0x55, 0x00);
+ } else {
+ regmap_write(adv7511->regmap_cec, 0x03, 0x0b);
+ regmap_write(adv7511->regmap_cec, 0x27, 0x0b);
+ }
+}
+
static void adv7511_power_on(struct adv7511 *adv7511)
{
adv7511->current_edid_segment = -1;
@@ -386,7 +458,13 @@ static void adv7511_power_on(struct adv7511 *adv7511)
*/
regcache_sync(adv7511->regmap);
+ if (adv7511->type == ADV7533)
+ regmap_register_patch(adv7511->regmap_cec,
+ adv7533_cec_fixed_registers,
+ ARRAY_SIZE(adv7533_cec_fixed_registers));
adv7511->powered = true;
+
+ adv7511_dsi_receiver_dpms(adv7511);
}
static void adv7511_power_off(struct adv7511 *adv7511)
@@ -398,12 +476,15 @@ static void adv7511_power_off(struct adv7511 *adv7511)
regcache_mark_dirty(adv7511->regmap);
adv7511->powered = false;
+
+ adv7511_dsi_receiver_dpms(adv7511);
}
/* -----------------------------------------------------------------------------
* Interrupt and hotplug detection
*/
+#if HPD_ENABLE
static bool adv7511_hpd(struct adv7511 *adv7511)
{
unsigned int irq0;
@@ -421,8 +502,9 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
return false;
}
+#endif
-static int adv7511_irq_process(struct adv7511 *adv7511)
+static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
{
unsigned int irq0, irq1;
int ret;
@@ -438,7 +520,7 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
+ if (process_hpd && irq0 & ADV7511_INT0_HDP && adv7511->encoder)
drm_helper_hpd_irq_event(adv7511->encoder->dev);
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
@@ -456,7 +538,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid)
struct adv7511 *adv7511 = devid;
int ret;
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, true);
return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
}
@@ -473,7 +555,7 @@ static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
adv7511->edid_read, msecs_to_jiffies(timeout));
} else {
for (; timeout > 0; timeout -= 25) {
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, false);
if (ret < 0)
break;
@@ -555,18 +637,19 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
}
/* -----------------------------------------------------------------------------
- * Encoder operations
+ * ADV75xx helpers
*/
-
-static int adv7511_get_modes(struct drm_encoder *encoder,
- struct drm_connector *connector)
+static int adv7511_get_modes(struct adv7511 *adv7511,
+ struct drm_connector *connector)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
struct edid *edid;
unsigned int count;
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HDP_SRC_MASK,
+ ADV7511_REG_POWER2_HDP_SRC_NONE);
regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
ADV7511_INT0_EDID_READY);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
@@ -596,24 +679,15 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
return count;
}
-static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
-
- if (mode == DRM_MODE_DPMS_ON)
- adv7511_power_on(adv7511);
- else
- adv7511_power_off(adv7511);
-}
-
static enum drm_connector_status
-adv7511_encoder_detect(struct drm_encoder *encoder,
+adv7511_detect(struct adv7511 *adv7511,
struct drm_connector *connector)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
enum drm_connector_status status;
unsigned int val;
+#if HPD_ENABLE
bool hpd;
+#endif
int ret;
ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
@@ -625,6 +699,7 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
else
status = connector_status_disconnected;
+#if HPD_ENABLE
hpd = adv7511_hpd(adv7511);
/* The chip resets itself when the cable is disconnected, so in case
@@ -634,7 +709,7 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
if (status == connector_status_connected && hpd && adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
- adv7511_get_modes(encoder, connector);
+ adv7511_get_modes(adv7511, connector);
if (adv7511->status == connector_status_connected)
status = connector_status_disconnected;
} else {
@@ -643,13 +718,14 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
ADV7511_REG_POWER2_HDP_SRC_MASK,
ADV7511_REG_POWER2_HDP_SRC_BOTH);
}
+#endif
adv7511->status = status;
return status;
}
-static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+static int adv7511_mode_valid(struct adv7511 *adv7511,
+ const struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
@@ -657,11 +733,10 @@ static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
return MODE_OK;
}
-static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
+static void adv7511_mode_set(struct adv7511 *adv7511,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
unsigned int low_refresh_rate;
unsigned int hsync_polarity = 0;
unsigned int vsync_polarity = 0;
@@ -744,6 +819,28 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
regmap_update_bits(adv7511->regmap, 0x17,
0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+ if (adv7511->type == ADV7533 && adv7511->num_dsi_lanes == 4) {
+ struct mipi_dsi_device *dsi = adv7511->dsi;
+ int lanes, ret;
+
+ if (adj_mode->clock > 80000)
+ lanes = 4;
+ else
+ lanes = 3;
+
+ if (lanes != dsi->lanes) {
+ mipi_dsi_detach(dsi);
+ dsi->lanes = lanes;
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ DRM_ERROR("Failed to change host lanes\n");
+ return;
+ }
+ }
+ }
+
+ drm_mode_copy(&adv7511->curr_mode, adj_mode);
+
/*
* TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
* supposed to give better results.
@@ -752,12 +849,247 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
adv7511->f_tmds = mode->clock;
}
+/* -----------------------------------------------------------------------------
+ * Encoder operations
+ */
+
+static int adv7511_encoder_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ return adv7511_get_modes(adv7511, connector);
+}
+
+static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ adv7511_power_on(adv7511);
+ else
+ adv7511_power_off(adv7511);
+}
+
+static enum drm_connector_status
+adv7511_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ return adv7511_detect(adv7511, connector);
+}
+
+static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ return adv7511_mode_valid(adv7511, mode);
+}
+
+static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ adv7511_mode_set(adv7511, mode, adj_mode);
+}
+
static struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
.dpms = adv7511_encoder_dpms,
.mode_valid = adv7511_encoder_mode_valid,
.mode_set = adv7511_encoder_mode_set,
.detect = adv7511_encoder_detect,
- .get_modes = adv7511_get_modes,
+ .get_modes = adv7511_encoder_get_modes,
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge and connector functions
+ */
+
+static struct adv7511 *connector_to_adv7511(struct drm_connector *connector)
+{
+ return container_of(connector, struct adv7511, connector);
+}
+
+/* Connector helper functions */
+static int adv7533_connector_get_modes(struct drm_connector *connector)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_get_modes(adv, connector);
+}
+
+static struct drm_encoder *
+adv7533_connector_best_encoder(struct drm_connector *connector)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv->bridge.encoder;
+}
+
+static enum drm_mode_status
+adv7533_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_mode_valid(adv, mode);
+}
+
+static struct drm_connector_helper_funcs adv7533_connector_helper_funcs = {
+ .get_modes = adv7533_connector_get_modes,
+ .best_encoder = adv7533_connector_best_encoder,
+ .mode_valid = adv7533_connector_mode_valid,
+};
+
+static enum drm_connector_status
+adv7533_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_detect(adv, connector);
+}
+
+static struct drm_connector_funcs adv7533_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = adv7533_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+/* Bridge funcs */
+static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct adv7511, bridge);
+}
+
+static void adv7533_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_power_on(adv);
+}
+
+static void adv7533_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+#if HPD_ENABLE
+ if (!adv7511->powered)
+ return;
+#endif
+
+ adv7511_power_off(adv);
+}
+
+static void adv7533_bridge_enable(struct drm_bridge *bridge)
+{
+}
+
+static void adv7533_bridge_disable(struct drm_bridge *bridge)
+{
+}
+
+static void adv7533_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_mode_set(adv, mode, adj_mode);
+}
+
+static int adv7533_attach_dsi(struct adv7511 *adv7511)
+{
+ struct device *dev = &adv7511->i2c_main->dev;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret;
+
+ host = of_find_mipi_dsi_host_by_node(adv7511->host_node);
+ if (!host) {
+ dev_err(dev, "failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* can adv7533 virtual channel be non-zero? */
+ dsi = mipi_dsi_new_dummy(host, 0);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to create dummy dsi device\n");
+ ret = PTR_ERR(dsi);
+ goto err_dsi_device;
+ }
+
+ adv7511->dsi = dsi;
+
+ dsi->lanes = adv7511->num_dsi_lanes;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE
+ | MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to attach dsi to host\n");
+ goto err_dsi_attach;
+ }
+
+ return 0;
+
+err_dsi_attach:
+ mipi_dsi_unregister_device(dsi);
+err_dsi_device:
+ return ret;
+}
+
+static int adv7533_bridge_attach(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+ int ret;
+
+ adv->encoder = bridge->encoder;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+#if HPD_ENABLE
+ adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
+#endif
+
+ ret = drm_connector_init(bridge->dev, &adv->connector,
+ &adv7533_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+ drm_connector_helper_add(&adv->connector,
+ &adv7533_connector_helper_funcs);
+ drm_connector_register(&adv->connector);
+ drm_mode_connector_attach_encoder(&adv->connector, adv->encoder);
+
+#if HPD_ENABLE
+ drm_helper_hpd_irq_event(adv->connector.dev);
+#endif
+
+ adv7533_attach_dsi(adv);
+
+ return ret;
+}
+
+static struct drm_bridge_funcs adv7533_bridge_funcs = {
+ .pre_enable = adv7533_bridge_pre_enable,
+ .enable = adv7533_bridge_enable,
+ .disable = adv7533_bridge_disable,
+ .post_disable = adv7533_bridge_post_disable,
+ .mode_set = adv7533_bridge_mode_set,
+ .attach = adv7533_bridge_attach,
};
/* -----------------------------------------------------------------------------
@@ -770,8 +1102,6 @@ static int adv7511_parse_dt(struct device_node *np,
const char *str;
int ret;
- memset(config, 0, sizeof(*config));
-
of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
config->input_color_depth != 12)
@@ -849,10 +1179,54 @@ static int adv7511_parse_dt(struct device_node *np,
return 0;
}
+static int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv7511)
+{
+ u32 num_lanes;
+ struct device_node *endpoint;
+
+ of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
+
+ if (num_lanes < 1 || num_lanes > 4)
+ return -EINVAL;
+
+ adv7511->num_dsi_lanes = num_lanes;
+
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint) {
+ DRM_ERROR("adv dsi input endpoint not found\n");
+ return -ENODEV;
+ }
+
+ adv7511->host_node = of_graph_get_remote_port_parent(endpoint);
+ if (!adv7511->host_node) {
+ DRM_ERROR("dsi host node not found\n");
+ of_node_put(endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(endpoint);
+ of_node_put(adv7511->host_node);
+
+ /* TODO: Check if these need to be parsed by DT or not */
+ adv7511->rgb = true;
+ adv7511->embedded_sync = false;
+
+ return 0;
+}
+
static const int edid_i2c_addr = 0x7e;
static const int packet_i2c_addr = 0x70;
static const int cec_i2c_addr = 0x78;
+static const struct of_device_id adv7511_of_ids[] = {
+ { .compatible = "adi,adv7511", .data = (void *) ADV7511 },
+ { .compatible = "adi,adv7511w", .data = (void *) ADV7511 },
+ { .compatible = "adi,adv7513", .data = (void *) ADV7511 },
+ { .compatible = "adi,adv7533", .data = (void *) ADV7533 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adv7511_of_ids);
+
static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
struct adv7511_link_config link_config;
@@ -871,7 +1245,21 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511->powered = false;
adv7511->status = connector_status_disconnected;
- ret = adv7511_parse_dt(dev->of_node, &link_config);
+ if (dev->of_node) {
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(adv7511_of_ids, dev->of_node);
+ adv7511->type = (enum adv7511_type) of_id->data;
+ } else {
+ adv7511->type = id->driver_data;
+ }
+
+ memset(&link_config, 0, sizeof(link_config));
+
+ if (adv7511->type == ADV7511)
+ ret = adv7511_parse_dt(dev->of_node, &link_config);
+ else
+ ret = adv7533_parse_dt(dev->of_node, adv7511);
if (ret)
return ret;
@@ -897,10 +1285,19 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return ret;
dev_dbg(dev, "Rev. %d\n", val);
- ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers,
- ARRAY_SIZE(adv7511_fixed_registers));
- if (ret)
- return ret;
+ if (adv7511->type == ADV7511) {
+ ret = regmap_register_patch(adv7511->regmap,
+ adv7511_fixed_registers,
+ ARRAY_SIZE(adv7511_fixed_registers));
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_register_patch(adv7511->regmap,
+ adv7533_fixed_registers,
+ ARRAY_SIZE(adv7533_fixed_registers));
+ if (ret)
+ return ret;
+ }
regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
@@ -913,6 +1310,27 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (!adv7511->i2c_edid)
return -ENOMEM;
+ adv7511->i2c_cec = i2c_new_dummy(i2c->adapter, cec_i2c_addr >> 1);
+ if (!adv7511->i2c_cec) {
+ ret = -ENOMEM;
+ goto err_i2c_unregister_edid;
+ }
+
+ adv7511->regmap_cec = devm_regmap_init_i2c(adv7511->i2c_cec,
+ &adv7533_cec_regmap_config);
+ if (IS_ERR(adv7511->regmap_cec)) {
+ ret = PTR_ERR(adv7511->regmap_cec);
+ goto err_i2c_unregister_cec;
+ }
+
+ if (adv7511->type == ADV7533) {
+ ret = regmap_register_patch(adv7511->regmap_cec,
+ adv7533_cec_fixed_registers,
+ ARRAY_SIZE(adv7533_cec_fixed_registers));
+ if (ret)
+ return ret;
+ }
+
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
@@ -921,7 +1339,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
IRQF_ONESHOT, dev_name(dev),
adv7511);
if (ret)
- goto err_i2c_unregister_device;
+ goto err_i2c_unregister_cec;
}
/* CEC is unused for now */
@@ -932,11 +1350,27 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
i2c_set_clientdata(i2c, adv7511);
- adv7511_set_link_config(adv7511, &link_config);
+ if (adv7511->type == ADV7511)
+ adv7511_set_link_config(adv7511, &link_config);
+
+ if (adv7511->type == ADV7533) {
+ adv7511->bridge.funcs = &adv7533_bridge_funcs;
+ adv7511->bridge.of_node = dev->of_node;
+
+ ret = drm_bridge_add(&adv7511->bridge);
+ if (ret) {
+ dev_err(dev, "failed to add adv7533 bridge\n");
+ goto err_i2c_unregister_cec;
+ }
+ }
+
+ adv7511_audio_init(dev);
return 0;
-err_i2c_unregister_device:
+err_i2c_unregister_cec:
+ i2c_unregister_device(adv7511->i2c_cec);
+err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
return ret;
@@ -946,10 +1380,18 @@ static int adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+ adv7511_audio_exit(&i2c->dev);
+ i2c_unregister_device(adv7511->i2c_cec);
i2c_unregister_device(adv7511->i2c_edid);
kfree(adv7511->edid);
+ if (adv7511->type == ADV7533) {
+ mipi_dsi_detach(adv7511->dsi);
+ mipi_dsi_unregister_device(adv7511->dsi);
+ drm_bridge_remove(&adv7511->bridge);
+ }
+
return 0;
}
@@ -959,6 +1401,9 @@ static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+ if (adv7511->type == ADV7533)
+ return -ENODEV;
+
encoder->slave_priv = adv7511;
encoder->slave_funcs = &adv7511_encoder_funcs;
@@ -968,21 +1413,14 @@ static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
}
static const struct i2c_device_id adv7511_i2c_ids[] = {
- { "adv7511", 0 },
- { "adv7511w", 0 },
- { "adv7513", 0 },
+ { "adv7511", ADV7511 },
+ { "adv7511w", ADV7511 },
+ { "adv7513", ADV7511 },
+ { "adv7533", ADV7533 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
-static const struct of_device_id adv7511_of_ids[] = {
- { .compatible = "adi,adv7511", },
- { .compatible = "adi,adv7511w", },
- { .compatible = "adi,adv7513", },
- { }
-};
-MODULE_DEVICE_TABLE(of, adv7511_of_ids);
-
static struct drm_i2c_encoder_driver adv7511_driver = {
.i2c_driver = {
.driver = {
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h
index 6599ed538426..e9008bf69caa 100644
--- a/drivers/gpu/drm/i2c/adv7511.h
+++ b/drivers/gpu/drm/i2c/adv7511.h
@@ -10,6 +10,16 @@
#define __DRM_I2C_ADV7511_H__
#include <linux/hdmi.h>
+#include <drm/drm_crtc_helper.h>
+
+struct regmap;
+struct adv7511;
+
+int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet);
+int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet);
+
+int adv7511_audio_init(struct device *dev);
+void adv7511_audio_exit(struct device *dev);
#define ADV7511_REG_CHIP_REVISION 0x00
#define ADV7511_REG_N0 0x01
@@ -229,6 +239,54 @@ enum adv7511_sync_polarity {
ADV7511_SYNC_POLARITY_HIGH,
};
+enum adv7511_type {
+ ADV7511,
+ ADV7533,
+};
+
+struct adv7511 {
+ struct i2c_client *i2c_main;
+ struct i2c_client *i2c_edid;
+ struct i2c_client *i2c_cec;
+
+ struct regmap *regmap;
+ struct regmap *regmap_cec;
+ enum drm_connector_status status;
+ bool powered;
+
+ struct drm_display_mode curr_mode;
+
+ unsigned int f_tmds;
+ unsigned int f_audio;
+ unsigned int audio_source;
+
+ unsigned int current_edid_segment;
+ uint8_t edid_buf[256];
+ bool edid_read;
+
+ wait_queue_head_t wq;
+ struct drm_encoder *encoder;
+
+ struct drm_connector connector;
+ struct drm_bridge bridge;
+
+ bool embedded_sync;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+ bool rgb;
+
+ struct edid *edid;
+
+ struct gpio_desc *gpio_pd;
+
+ /* ADV7533 DSI RX related params */
+ struct device_node *host_node;
+ struct mipi_dsi_device *dsi;
+ u8 num_dsi_lanes;
+
+ enum adv7511_type type;
+};
+
/**
* struct adv7511_link_config - Describes adv7511 hardware configuration
* @input_color_depth: Number of bits per color component (8, 10 or 12)
diff --git a/drivers/gpu/drm/i2c/adv7511_audio.c b/drivers/gpu/drm/i2c/adv7511_audio.c
new file mode 100644
index 000000000000..52019e95d007
--- /dev/null
+++ b/drivers/gpu/drm/i2c/adv7511_audio.c
@@ -0,0 +1,312 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "adv7511.h"
+
+static const struct snd_soc_dapm_widget adv7511_dapm_widgets[] = {
+ SND_SOC_DAPM_OUTPUT("TMDS"),
+ SND_SOC_DAPM_AIF_IN("AIFIN", "Playback", 0, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_route adv7511_routes[] = {
+ { "TMDS", NULL, "AIFIN" },
+};
+
+static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
+ unsigned int *cts, unsigned int *n)
+{
+ switch (fs) {
+ case 32000:
+ *n = 4096;
+ break;
+ case 44100:
+ *n = 6272;
+ break;
+ case 48000:
+ *n = 6144;
+ break;
+ }
+
+ *cts = ((f_tmds * *n) / (128 * fs)) * 1000;
+}
+
+static int adv7511_update_cts_n(struct adv7511 *adv7511)
+{
+ unsigned int cts = 0;
+ unsigned int n = 0;
+
+ adv7511_calc_cts_n(adv7511->f_tmds, adv7511->f_audio, &cts, &n);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_N0, (n >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_N1, (n >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_N2, n & 0xff);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL0,
+ (cts >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL1,
+ (cts >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL2,
+ cts & 0xff);
+
+ return 0;
+}
+
+static int adv7511_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct adv7511 *adv7511 = snd_soc_codec_get_drvdata(codec);
+ unsigned int rate;
+ unsigned int len;
+ switch (params_rate(params)) {
+ case 32000:
+ rate = ADV7511_SAMPLE_FREQ_32000;
+ break;
+ case 44100:
+ rate = ADV7511_SAMPLE_FREQ_44100;
+ break;
+ case 48000:
+ rate = ADV7511_SAMPLE_FREQ_48000;
+ break;
+ case 88200:
+ rate = ADV7511_SAMPLE_FREQ_88200;
+ break;
+ case 96000:
+ rate = ADV7511_SAMPLE_FREQ_96000;
+ break;
+ case 176400:
+ rate = ADV7511_SAMPLE_FREQ_176400;
+ break;
+ case 192000:
+ rate = ADV7511_SAMPLE_FREQ_192000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ len = ADV7511_I2S_SAMPLE_LEN_16;
+ break;
+ case SNDRV_PCM_FORMAT_S18_3LE:
+ len = ADV7511_I2S_SAMPLE_LEN_18;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ len = ADV7511_I2S_SAMPLE_LEN_20;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ len = ADV7511_I2S_SAMPLE_LEN_24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ adv7511->f_audio = params_rate(params);
+
+ adv7511_update_cts_n(adv7511);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG3,
+ ADV7511_AUDIO_CFG3_LEN_MASK, len);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
+ ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
+ regmap_write(adv7511->regmap, 0x73, 0x1);
+
+ return 0;
+}
+
+static int adv7511_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct adv7511 *adv7511 = snd_soc_codec_get_drvdata(codec);
+ unsigned int audio_source, i2s_format = 0;
+ unsigned int invert_clock;
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_I2S;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_RIGHT_J;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_LEFT_J;
+ break;
+// case SND_SOC_DAIFMT_SPDIF:
+// audio_source = ADV7511_AUDIO_SOURCE_SPDIF;
+// break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ invert_clock = 0;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ invert_clock = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_SOURCE, 0x70,
+ audio_source << 4);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(6),
+ invert_clock << 6);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2S_CONFIG, 0x03,
+ i2s_format);
+
+ adv7511->audio_source = audio_source;
+
+ return 0;
+}
+
+static int adv7511_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct adv7511 *adv7511 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ switch (adv7511->audio_source) {
+ case ADV7511_AUDIO_SOURCE_I2S:
+ break;
+ case ADV7511_AUDIO_SOURCE_SPDIF:
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_AUDIO_CONFIG, BIT(7),
+ BIT(7));
+ break;
+ }
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ if (dapm->bias_level == SND_SOC_BIAS_STANDBY) {
+ adv7511_packet_enable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_SAMPLE);
+ adv7511_packet_enable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ adv7511_packet_enable(adv7511,
+ ADV7511_PACKET_ENABLE_N_CTS);
+ } else {
+ adv7511_packet_disable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_SAMPLE);
+ adv7511_packet_disable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ adv7511_packet_disable(adv7511,
+ ADV7511_PACKET_ENABLE_N_CTS);
+ }
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+ BIT(7), 0);
+ break;
+ case SND_SOC_BIAS_OFF:
+ break;
+ }
+ dapm->bias_level = level;
+ return 0;
+}
+
+#define ADV7511_RATES (SNDRV_PCM_RATE_32000 |\
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
+
+#define ADV7511_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE |\
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE)
+
+static const struct snd_soc_dai_ops adv7511_dai_ops = {
+ .hw_params = adv7511_hw_params,
+ /*.set_sysclk = adv7511_set_dai_sysclk,*/
+ .set_fmt = adv7511_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver adv7511_dai = {
+ .name = "adv7511",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = ADV7511_RATES,
+ .formats = ADV7511_FORMATS,
+ },
+ .ops = &adv7511_dai_ops,
+};
+
+static int adv7511_suspend(struct snd_soc_codec *codec)
+{
+ return adv7511_set_bias_level(codec, SND_SOC_BIAS_OFF);
+}
+
+static int adv7511_resume(struct snd_soc_codec *codec)
+{
+ return adv7511_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+}
+
+static int adv7511_probe(struct snd_soc_codec *codec)
+{
+ return adv7511_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+}
+
+static int adv7511_remove(struct snd_soc_codec *codec)
+{
+ adv7511_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ return 0;
+}
+
+static struct snd_soc_codec_driver adv7511_codec_driver = {
+ .probe = adv7511_probe,
+ .remove = adv7511_remove,
+ .suspend = adv7511_suspend,
+ .resume = adv7511_resume,
+ .set_bias_level = adv7511_set_bias_level,
+
+ .dapm_widgets = adv7511_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adv7511_dapm_widgets),
+ .dapm_routes = adv7511_routes,
+ .num_dapm_routes = ARRAY_SIZE(adv7511_routes),
+};
+
+int adv7511_audio_init(struct device *dev)
+{
+ return snd_soc_register_codec(dev, &adv7511_codec_driver,
+ &adv7511_dai, 1);
+}
+
+void adv7511_audio_exit(struct device *dev)
+{
+ snd_soc_unregister_codec(dev);
+}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 8d82973fe9db..d62e11eb3a55 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -131,6 +131,7 @@ struct msm_dsi_host {
enum mipi_dsi_pixel_format format;
unsigned long mode_flags;
+ u32 dlane_swap;
u32 dma_cmd_ctrl_restore;
bool registered;
@@ -684,19 +685,9 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
data = DSI_CTRL_CLK_EN;
DBG("lane number=%d", msm_host->lanes);
- if (msm_host->lanes == 2) {
- data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
- /* swap lanes for 2-lane panel for better performance */
- dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
- DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
- } else {
- /* Take 4 lanes as default */
- data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
- DSI_CTRL_LANE3;
- /* Do not swap lanes for 4-lane panel */
- dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
- DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
- }
+ data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
+ dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
+ DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
dsi_write(msm_host, REG_DSI_LANE_CTRL,
@@ -765,7 +756,9 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host)
dsi_write(msm_host, REG_DSI_RESET, 1);
wmb(); /* make sure reset happen */
+ mdelay(100);
dsi_write(msm_host, REG_DSI_RESET, 0);
+ wmb();
}
static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
@@ -1289,20 +1282,21 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
int ret;
+ if (dsi->lanes > 4 || dsi->channel > 3)
+ return -EINVAL;
+
msm_host->channel = dsi->channel;
msm_host->lanes = dsi->lanes;
msm_host->format = dsi->format;
msm_host->mode_flags = dsi->mode_flags;
- WARN_ON(dsi->dev.of_node != msm_host->device_node);
-
/* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
if (ret)
return ret;
DBG("id=%d", msm_host->id);
- if (msm_host->dev)
+ if (msm_host->dev && of_drm_find_panel(msm_host->device_node))
drm_helper_hpd_irq_event(msm_host->dev);
return 0;
@@ -1316,7 +1310,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
msm_host->device_node = NULL;
DBG("id=%d", msm_host->id);
- if (msm_host->dev)
+ if (msm_host->dev && of_drm_find_panel(msm_host->device_node))
drm_helper_hpd_irq_event(msm_host->dev);
return 0;
@@ -1344,6 +1338,33 @@ static struct mipi_dsi_host_ops dsi_host_ops = {
.transfer = dsi_host_transfer,
};
+static void dsi_parse_dlane_swap(struct msm_dsi_host *msm_host,
+ struct device_node *np)
+{
+ const char *lane_swap;
+
+ lane_swap = of_get_property(np, "qcom,dsi-logical-lane-swap", NULL);
+
+ if (!lane_swap)
+ msm_host->dlane_swap = LANE_SWAP_0123;
+ else if (!strncmp(lane_swap, "3012", 5))
+ msm_host->dlane_swap = LANE_SWAP_3012;
+ else if (!strncmp(lane_swap, "2301", 5))
+ msm_host->dlane_swap = LANE_SWAP_2301;
+ else if (!strncmp(lane_swap, "1230", 5))
+ msm_host->dlane_swap = LANE_SWAP_1230;
+ else if (!strncmp(lane_swap, "0321", 5))
+ msm_host->dlane_swap = LANE_SWAP_0321;
+ else if (!strncmp(lane_swap, "1032", 5))
+ msm_host->dlane_swap = LANE_SWAP_1032;
+ else if (!strncmp(lane_swap, "2103", 5))
+ msm_host->dlane_swap = LANE_SWAP_2103;
+ else if (!strncmp(lane_swap, "3210", 5))
+ msm_host->dlane_swap = LANE_SWAP_3210;
+ else
+ msm_host->dlane_swap = LANE_SWAP_0123;
+}
+
static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
{
struct device *dev = &msm_host->pdev->dev;
@@ -1358,6 +1379,8 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
return ret;
}
+ dsi_parse_dlane_swap(msm_host, np);
+
/*
* Get the first endpoint node. In our case, dsi has one output port
* to which the panel is connected. Don't return an error if a port
@@ -1862,7 +1885,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
dsi_phy_sw_reset(msm_host);
ret = msm_dsi_manager_phy_enable(msm_host->id,
msm_host->byte_clk_rate * 8,
- clk_get_rate(msm_host->esc_clk),
+ 19200000,/*clk_get_rate(msm_host->esc_clk),*/
&clk_pre, &clk_post);
dsi_bus_clk_disable(msm_host);
if (ret) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 077f7521a971..5e812b60a69d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -182,9 +182,20 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
}
+static const char *iommu_ports[] = {
+ "mdp_port0_cb0", "mdp_port1_cb0",
+};
+
static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct msm_mmu *mmu = mdp4_kms->mmu;
+
+ if (mmu) {
+ mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
+ mmu->funcs->destroy(mmu);
+ }
+
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
if (mdp4_kms->blank_cursor_bo)
@@ -262,6 +273,11 @@ static struct drm_panel *detect_panel(struct drm_device *dev)
of_node_put(endpoint);
+ if (!of_device_is_available(panel_node)) {
+ dev_err(dev->dev, "panel is not enabled in DT\n");
+ return ERR_PTR(-ENODEV);
+ }
+
panel = of_drm_find_panel(panel_node);
if (!panel) {
of_node_put(panel_node);
@@ -313,45 +329,55 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
if (IS_ERR(panel)) {
ret = PTR_ERR(panel);
dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret);
- goto fail;
- }
+ /**
+ * Only fail if there is panel but not ready yet
+ * continue with other stuff if there is no panel connected.
+ */
+ if (ret == -EPROBE_DEFER)
+ goto fail;
+ } else {
+ plane = mdp4_plane_init(dev, RGB2, true);
+ if (IS_ERR(plane)) {
+ dev_err(dev->dev,
+ "failed to construct plane for RGB2\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
- plane = mdp4_plane_init(dev, RGB2, true);
- if (IS_ERR(plane)) {
- dev_err(dev->dev, "failed to construct plane for RGB2\n");
- ret = PTR_ERR(plane);
- goto fail;
- }
+ crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 0, DMA_P);
+ if (IS_ERR(crtc)) {
+ dev_err(dev->dev,
+ "failed to construct crtc for DMA_P\n");
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
- crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 0, DMA_P);
- if (IS_ERR(crtc)) {
- dev_err(dev->dev, "failed to construct crtc for DMA_P\n");
- ret = PTR_ERR(crtc);
- goto fail;
- }
+ encoder = mdp4_lcdc_encoder_init(dev, panel);
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev,
+ "failed to construct LCDC encoder\n");
+ ret = PTR_ERR(encoder);
+ goto fail;
+ }
- encoder = mdp4_lcdc_encoder_init(dev, panel);
- if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct LCDC encoder\n");
- ret = PTR_ERR(encoder);
- goto fail;
- }
+ /* LCDC can be hooked to DMA_P: */
+ encoder->possible_crtcs = 1 << priv->num_crtcs;
- /* LCDC can be hooked to DMA_P: */
- encoder->possible_crtcs = 1 << priv->num_crtcs;
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ priv->encoders[priv->num_encoders++] = encoder;
- priv->crtcs[priv->num_crtcs++] = crtc;
- priv->encoders[priv->num_encoders++] = encoder;
+ connector = mdp4_lvds_connector_init(dev, panel, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ dev_err(dev->dev,
+ "failed to initialize LVDS connector: %d\n",
+ ret);
+ goto fail;
+ }
- connector = mdp4_lvds_connector_init(dev, panel, encoder);
- if (IS_ERR(connector)) {
- ret = PTR_ERR(connector);
- dev_err(dev->dev, "failed to initialize LVDS connector: %d\n", ret);
- goto fail;
+ priv->connectors[priv->num_connectors++] = connector;
}
- priv->connectors[priv->num_connectors++] = connector;
-
/*
* Setup DTV/HDMI path: RGB1 -> DMA_E -> DTV -> HDMI:
*/
@@ -398,10 +424,6 @@ fail:
return ret;
}
-static const char *iommu_ports[] = {
- "mdp_port0_cb0", "mdp_port1_cb0",
-};
-
struct msm_kms *mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
@@ -506,6 +528,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
ARRAY_SIZE(iommu_ports));
if (ret)
goto fail;
+
+ mdp4_kms->mmu = mmu;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 8a7f6e1e2bca..b3ab4ea0ecf0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -45,6 +45,7 @@ struct mdp4_kms {
struct clk *pclk;
struct clk *lut_clk;
struct clk *axi_clk;
+ struct msm_mmu *mmu;
struct mdp_irq error_handler;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 047cb0433ccb..b50cc5c65b65 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -32,6 +32,7 @@ static int mdp5_hw_init(struct msm_kms *kms)
unsigned long flags;
pm_runtime_get_sync(dev->dev);
+ mdp5_enable(mdp5_kms);
/* Magic unknown register writes:
*
@@ -63,7 +64,8 @@ static int mdp5_hw_init(struct msm_kms *kms)
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
- pm_runtime_put_sync(dev->dev);
+ mdp5_disable(mdp5_kms);
+ //pm_runtime_put_sync(dev->dev);
return 0;
}
@@ -578,7 +580,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
continue;
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
}
- mdp5_disable(mdp5_kms);
+ /* TODO: Remove this after runtime pm adaptation */
+ //mdp5_disable(mdp5_kms);
mdelay(16);
if (config->platform.iommu) {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0339c5d82d37..28c9a2aeb314 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -415,7 +415,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
pm_runtime_get_sync(dev->dev);
ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
- pm_runtime_put_sync(dev->dev);
+ //pm_runtime_put_sync(dev->dev);
if (ret < 0) {
dev_err(dev->dev, "failed to install IRQ handler\n");
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 7ac2f1997e4a..f033d48cd1f3 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -18,6 +18,8 @@
#include "msm_drv.h"
#include "msm_mmu.h"
+#define DUMMY_CONTEXT 0x1
+
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
@@ -33,14 +35,51 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
{
+ struct device *dev = mmu->dev;
struct msm_iommu *iommu = to_msm_iommu(mmu);
- return iommu_attach_device(iommu->domain, mmu->dev);
+ int i, ret;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (IS_ERR_OR_NULL(ctx)) {
+ dev_warn(dev, "couldn't get %s context", names[i]);
+ continue;
+ }
+
+ if (ctx == (struct device *)DUMMY_CONTEXT) {
+ return iommu_attach_device(iommu->domain, mmu->dev);
+ } else {
+ ret = iommu_attach_device(iommu->domain, ctx);
+ }
+
+ if (ret) {
+ dev_warn(dev, "could not attach iommu to %s", names[i]);
+ return ret;
+ }
+ }
+
+ return 0;
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- iommu_detach_device(iommu->domain, mmu->dev);
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (IS_ERR_OR_NULL(ctx))
+ continue;
+
+ if (ctx == (struct device *)DUMMY_CONTEXT) {
+ iommu_detach_device(iommu->domain, mmu->dev);
+ break;
+ } else {
+ iommu_detach_device(iommu->domain, ctx);
+ }
+ }
}
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index f97b73ec4713..34d73a32e78a 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -32,6 +32,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
+#include <drm/drm_edid.h>
#include <video/display_timing.h>
#include <video/videomode.h>
@@ -70,6 +71,18 @@ struct panel_desc {
u32 bus_format;
};
+#define PANEL_PICKER_ENTRY(vend, pid, pdesc) \
+ .vendor = vend, \
+ .product_id = (pid), \
+ .data = (pdesc)
+
+/* Panel picker entry with vendor and product id */
+struct panel_picker_entry {
+ char vendor[4]; /* Vendor string */
+ int product_id; /* product id field */
+ const struct panel_desc *data;
+};
+
struct panel_simple {
struct drm_panel base;
bool prepared;
@@ -84,6 +97,8 @@ struct panel_simple {
struct gpio_desc *enable_gpio;
};
+static const struct panel_desc *panel_picker_find_panel(struct edid *edid);
+
static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
{
return container_of(panel, struct panel_simple, base);
@@ -276,11 +291,28 @@ static const struct drm_panel_funcs panel_simple_funcs = {
.get_timings = panel_simple_get_timings,
};
+static void __init simple_panel_node_disable(struct device_node *node)
+{
+ struct property *prop;
+
+ prop = kzalloc(sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return;
+
+ prop->name = "status";
+ prop->value = "disabled";
+ prop->length = strlen((char *)prop->value)+1;
+
+ of_update_property(node, prop);
+}
+
+
static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
{
struct device_node *backlight, *ddc;
struct panel_simple *panel;
int err;
+ struct edid *edid;
panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
if (!panel)
@@ -288,7 +320,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
panel->enabled = false;
panel->prepared = false;
- panel->desc = desc;
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
@@ -316,7 +347,25 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
panel->ddc = of_find_i2c_adapter_by_node(ddc);
of_node_put(ddc);
- if (!panel->ddc) {
+ if (panel->ddc) {
+ /* detect panel presence */
+ if (!drm_probe_ddc(panel->ddc)) {
+ err = -ENODEV;
+ goto nodev;
+ }
+
+ /* get panel from edid */
+ if (of_device_is_compatible(dev->of_node,
+ "panel-simple")) {
+ edid = drm_get_edid_early(panel->ddc);
+ if (edid) {
+ desc = panel_picker_find_panel(edid);
+ } else {
+ err = -ENODEV;
+ goto nodev;
+ }
+ }
+ } else {
err = -EPROBE_DEFER;
goto free_backlight;
}
@@ -325,6 +374,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
drm_panel_init(&panel->base);
panel->base.dev = dev;
panel->base.funcs = &panel_simple_funcs;
+ panel->desc = desc;
err = drm_panel_add(&panel->base);
if (err < 0)
@@ -334,6 +384,10 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return 0;
+nodev:
+ /* mark the dt as disabled */
+ simple_panel_node_disable(dev->of_node);
+
free_ddc:
if (panel->ddc)
put_device(&panel->ddc->dev);
@@ -1096,6 +1150,10 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct panel_picker_entry panel_picker_list[] = {
+ { PANEL_PICKER_ENTRY("AUO", 0x10dc, &auo_b101xtn01) },
+};
+
static const struct of_device_id platform_of_match[] = {
{
.compatible = "ampire,am800480r3tmqwa1h",
@@ -1191,11 +1249,32 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
+ /* Panel Picker Vendor ID and Product ID based Lookup */
+ .compatible = "panel-simple",
+ }, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, platform_of_match);
+static const struct panel_desc *panel_picker_find_panel(struct edid *edid)
+{
+ int i;
+ const struct panel_desc *desc = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(panel_picker_list); i++) {
+ const struct panel_picker_entry *vp = &panel_picker_list[i];
+
+ if (edid_vendor(edid, (char *)vp->vendor) &&
+ (EDID_PRODUCT_ID(edid) == vp->product_id)) {
+ desc = vp->data;
+ break;
+ }
+ }
+
+ return desc;
+}
+
static int panel_simple_platform_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 70a11ac38119..47031883d8d1 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2043,6 +2043,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LILLIPUT, USB_PROD_ID_LILLIPUT) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f769208276ae..e10908b7ec88 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1047,4 +1047,8 @@
#define USB_DEVICE_ID_RAPHNET_2NES2SNES 0x0002
#define USB_DEVICE_ID_RAPHNET_4NES4SNES 0x0003
+/* Lilliput Capacitive TouchScreen */
+#define USB_VENDOR_ID_LILLIPUT 0x1391
+#define USB_PROD_ID_LILLIPUT 0x2112
+
#endif
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 5c6795509001..0d5880021590 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1169,6 +1169,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040) },
+ /* Lilliput multitouch panels */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_LILLIPUT,
+ USB_PROD_ID_LILLIPUT) },
+
/* CVTouch panels */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index fdcbdab808e9..810b021c7ea0 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -24,6 +24,11 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/atomic.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
/* QUP Registers */
#define QUP_CONFIG 0x000
@@ -33,6 +38,7 @@
#define QUP_OPERATIONAL 0x018
#define QUP_ERROR_FLAGS 0x01c
#define QUP_ERROR_FLAGS_EN 0x020
+#define QUP_OPERATIONAL_MASK 0x028
#define QUP_HW_VERSION 0x030
#define QUP_MX_OUTPUT_CNT 0x100
#define QUP_OUT_FIFO_BASE 0x110
@@ -43,6 +49,8 @@
#define QUP_I2C_CLK_CTL 0x400
#define QUP_I2C_STATUS 0x404
+#define QUP_I2C_MASTER_GEN 0x408
+
/* QUP States and reset values */
#define QUP_RESET_STATE 0
#define QUP_RUN_STATE 1
@@ -51,6 +59,7 @@
#define QUP_STATE_VALID BIT(2)
#define QUP_I2C_MAST_GEN BIT(4)
+#define QUP_I2C_FLUSH BIT(6)
#define QUP_OPERATIONAL_RESET 0x000ff0
#define QUP_I2C_STATUS_RESET 0xfffffc
@@ -69,16 +78,22 @@
#define QUP_CLOCK_AUTO_GATE BIT(13)
#define I2C_MINI_CORE (2 << 8)
#define I2C_N_VAL 15
+#define I2C_N_VAL_V2 7
+
/* Most significant word offset in FIFO port */
#define QUP_MSW_SHIFT (I2C_N_VAL + 1)
/* Packing/Unpacking words in FIFOs, and IO modes */
#define QUP_OUTPUT_BLK_MODE (1 << 10)
+#define QUP_OUTPUT_BAM_MODE (3 << 10)
#define QUP_INPUT_BLK_MODE (1 << 12)
+#define QUP_INPUT_BAM_MODE (3 << 12)
+#define QUP_BAM_MODE (QUP_OUTPUT_BAM_MODE | QUP_INPUT_BAM_MODE)
#define QUP_UNPACK_EN BIT(14)
#define QUP_PACK_EN BIT(15)
#define QUP_REPACK_EN (QUP_UNPACK_EN | QUP_PACK_EN)
+#define QUP_V2_TAGS_EN 1
#define QUP_OUTPUT_BLOCK_SIZE(x)(((x) >> 0) & 0x03)
#define QUP_OUTPUT_FIFO_SIZE(x) (((x) >> 2) & 0x07)
@@ -90,6 +105,15 @@
#define QUP_TAG_DATA (2 << 8)
#define QUP_TAG_STOP (3 << 8)
#define QUP_TAG_REC (4 << 8)
+#define QUP_BAM_INPUT_EOT 0x93
+#define QUP_BAM_FLUSH_STOP 0x96
+
+/* QUP v2 tags */
+#define QUP_TAG_V2_START 0x81
+#define QUP_TAG_V2_DATAWR 0x82
+#define QUP_TAG_V2_DATAWR_STOP 0x83
+#define QUP_TAG_V2_DATARD 0x85
+#define QUP_TAG_V2_DATARD_STOP 0x87
/* Status, Error flags */
#define I2C_STATUS_WR_BUFFER_FULL BIT(0)
@@ -98,6 +122,43 @@
#define QUP_STATUS_ERROR_FLAGS 0x7c
#define QUP_READ_LIMIT 256
+#define SET_BIT 0x1
+#define RESET_BIT 0x0
+#define ONE_BYTE 0x1
+#define QUP_I2C_MX_CONFIG_DURING_RUN BIT(31)
+
+#define MX_TX_RX_LEN SZ_64K
+#define MX_BLOCKS (MX_TX_RX_LEN / QUP_READ_LIMIT)
+
+/* Max timeout in ms for 32k bytes */
+#define TOUT_MAX 300
+
+struct qup_i2c_block {
+ int count;
+ int pos;
+ int tx_tag_len;
+ int rx_tag_len;
+ int data_len;
+ u8 tags[6];
+ int config_run;
+};
+
+struct qup_i2c_tag {
+ u8 *start;
+ dma_addr_t addr;
+};
+
+struct qup_i2c_bam_rx {
+ struct qup_i2c_tag scratch_tag;
+ struct dma_chan *dma_rx;
+ struct scatterlist *sg_rx;
+};
+
+struct qup_i2c_bam_tx {
+ struct qup_i2c_tag footer_tag;
+ struct dma_chan *dma_tx;
+ struct scatterlist *sg_tx;
+};
struct qup_i2c_dev {
struct device *dev;
@@ -114,6 +175,7 @@ struct qup_i2c_dev {
int in_blk_sz;
unsigned long one_byte_t;
+ struct qup_i2c_block blk;
struct i2c_msg *msg;
/* Current posion in user message buffer */
@@ -123,6 +185,24 @@ struct qup_i2c_dev {
/* QUP core errors */
u32 qup_err;
+ int use_v2_tags;
+
+ int (*qup_i2c_write_one)(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg);
+ int (*qup_i2c_read_one)(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg);
+
+ /* Current i2c_msg in i2c_msgs */
+ int cmsg;
+ /* total num of i2c_msgs */
+ int num;
+
+ /* dma parameters */
+ bool is_dma;
+ struct dma_pool *dpool;
+ struct qup_i2c_tag start_tag;
+ struct qup_i2c_bam_rx brx;
+ struct qup_i2c_bam_tx btx;
struct completion xfer;
};
@@ -199,6 +279,14 @@ static int qup_i2c_poll_state(struct qup_i2c_dev *qup, u32 req_state)
return qup_i2c_poll_state_mask(qup, req_state, QUP_STATE_MASK);
}
+static void qup_i2c_flush(struct qup_i2c_dev *qup)
+{
+ u32 val = readl(qup->base + QUP_STATE);
+
+ val |= QUP_I2C_FLUSH;
+ writel(val, qup->base + QUP_STATE);
+}
+
static int qup_i2c_poll_state_valid(struct qup_i2c_dev *qup)
{
return qup_i2c_poll_state_mask(qup, 0, 0);
@@ -221,53 +309,72 @@ static int qup_i2c_change_state(struct qup_i2c_dev *qup, u32 state)
return 0;
}
-static int qup_i2c_wait_writeready(struct qup_i2c_dev *qup)
+/**
+ * qup_i2c_wait_ready - wait for a give number of bytes in tx/rx path
+ * @qup: The qup_i2c_dev device
+ * @op: The bit/event to wait on
+ * @val: value of the bit to wait on, 0 or 1
+ * @len: The length the bytes to be transferred
+ */
+static int qup_i2c_wait_ready(struct qup_i2c_dev *qup, int op, bool val,
+ int len)
{
unsigned long timeout;
u32 opflags;
u32 status;
+ u32 shift = __ffs(op);
- timeout = jiffies + HZ;
+ len *= qup->one_byte_t;
+ /* timeout after a wait of twice the max time */
+ timeout = jiffies + len * 4;
for (;;) {
opflags = readl(qup->base + QUP_OPERATIONAL);
status = readl(qup->base + QUP_I2C_STATUS);
- if (!(opflags & QUP_OUT_NOT_EMPTY) &&
- !(status & I2C_STATUS_BUS_ACTIVE))
- return 0;
+ if (((opflags & op) >> shift) == val) {
+ if ((op == QUP_OUT_NOT_EMPTY) &&
+ (qup->cmsg == (qup->num - 1))) {
+ if (!(status & I2C_STATUS_BUS_ACTIVE))
+ return 0;
+ } else {
+ return 0;
+ }
+ }
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
- usleep_range(qup->one_byte_t, qup->one_byte_t * 2);
+ usleep_range(len, len * 2);
}
}
static void qup_i2c_set_write_mode(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
/* Number of entries to shift out, including the start */
- int total = msg->len + 1;
+ int total = msg->len + qup->blk.tx_tag_len;
if (total < qup->out_fifo_sz) {
/* FIFO mode */
writel(QUP_REPACK_EN, qup->base + QUP_IO_MODE);
- writel(total, qup->base + QUP_MX_WRITE_CNT);
+ writel(total | qup->blk.config_run,
+ qup->base + QUP_MX_WRITE_CNT);
} else {
/* BLOCK mode (transfer data on chunks) */
writel(QUP_OUTPUT_BLK_MODE | QUP_REPACK_EN,
qup->base + QUP_IO_MODE);
- writel(total, qup->base + QUP_MX_OUTPUT_CNT);
+ writel(total | qup->blk.config_run,
+ qup->base + QUP_MX_OUTPUT_CNT);
}
}
-static void qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static int qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
u32 addr = msg->addr << 1;
u32 qup_tag;
- u32 opflags;
int idx;
u32 val;
+ int ret = 0;
if (qup->pos == 0) {
val = QUP_TAG_START | addr;
@@ -279,9 +386,10 @@ static void qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
while (qup->pos < msg->len) {
/* Check that there's space in the FIFO for our pair */
- opflags = readl(qup->base + QUP_OPERATIONAL);
- if (opflags & QUP_OUT_FULL)
- break;
+ ret = qup_i2c_wait_ready(qup, QUP_OUT_FULL, RESET_BIT,
+ 4 * ONE_BYTE);
+ if (ret)
+ return ret;
if (qup->pos == msg->len - 1)
qup_tag = QUP_TAG_STOP;
@@ -300,55 +408,238 @@ static void qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
qup->pos++;
idx++;
}
+
+ return ret;
}
-static int qup_i2c_write_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static void qup_i2c_get_blk_data(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
{
- unsigned long left;
+ memset(&qup->blk, 0, sizeof(qup->blk));
+
+ if (!qup->use_v2_tags) {
+ if (!(msg->flags & I2C_M_RD))
+ qup->blk.tx_tag_len = 1;
+ return;
+ }
+
+ qup->blk.data_len = msg->len;
+ qup->blk.count = (msg->len + QUP_READ_LIMIT - 1) / QUP_READ_LIMIT;
+
+ /* 4 bytes for first block and 2 writes for rest */
+ qup->blk.tx_tag_len = 4 + (qup->blk.count - 1) * 2;
+
+ /* There are 2 tag bytes that are read in to fifo for every block */
+ if (msg->flags & I2C_M_RD)
+ qup->blk.rx_tag_len = qup->blk.count * 2;
+
+ if (qup->cmsg)
+ qup->blk.config_run = QUP_I2C_MX_CONFIG_DURING_RUN;
+}
+
+static int qup_i2c_send_data(struct qup_i2c_dev *qup, int tlen, u8 *tbuf,
+ int dlen, u8 *dbuf)
+{
+ u32 val = 0, idx = 0, pos = 0, i = 0, t;
+ int len = tlen + dlen;
+ u8 *buf = tbuf;
+ int ret = 0;
+
+ while (len > 0) {
+ ret = qup_i2c_wait_ready(qup, QUP_OUT_FULL,
+ RESET_BIT, 4 * ONE_BYTE);
+ if (ret) {
+ dev_err(qup->dev, "timeout for fifo out full");
+ return ret;
+ }
+
+ t = (len >= 4) ? 4 : len;
+
+ while (idx < t) {
+ if (!i && (pos >= tlen)) {
+ buf = dbuf;
+ pos = 0;
+ i = 1;
+ }
+ val |= buf[pos++] << (idx++ * 8);
+ }
+
+ writel(val, qup->base + QUP_OUT_FIFO_BASE);
+ idx = 0;
+ val = 0;
+ len -= 4;
+ }
+
+ return ret;
+}
+
+static int qup_i2c_get_data_len(struct qup_i2c_dev *qup)
+{
+ int data_len;
+
+ if (qup->blk.data_len > QUP_READ_LIMIT)
+ data_len = QUP_READ_LIMIT;
+ else
+ data_len = qup->blk.data_len;
+
+ return data_len;
+}
+
+static int qup_i2c_get_tags(u8 *tags, struct qup_i2c_dev *qup,
+ struct i2c_msg *msg, int is_dma)
+{
+ u16 addr = (msg->addr << 1) | ((msg->flags & I2C_M_RD) == I2C_M_RD);
+ int len = 0;
+ int data_len;
+
+ int last = (qup->blk.pos == (qup->blk.count - 1)) &&
+ (qup->cmsg == (qup->num - 1));
+
+ if (qup->blk.pos == 0) {
+ tags[len++] = QUP_TAG_V2_START;
+ tags[len++] = addr & 0xff;
+
+ if (msg->flags & I2C_M_TEN)
+ tags[len++] = addr >> 8;
+ }
+
+ /* Send _STOP commands for the last block */
+ if (last) {
+ if (msg->flags & I2C_M_RD)
+ tags[len++] = QUP_TAG_V2_DATARD_STOP;
+ else
+ tags[len++] = QUP_TAG_V2_DATAWR_STOP;
+ } else {
+ if (msg->flags & I2C_M_RD)
+ tags[len++] = QUP_TAG_V2_DATARD;
+ else
+ tags[len++] = QUP_TAG_V2_DATAWR;
+ }
+
+ data_len = qup_i2c_get_data_len(qup);
+
+ /* 0 implies 256 bytes */
+ if (data_len == QUP_READ_LIMIT)
+ tags[len++] = 0;
+ else
+ tags[len++] = data_len;
+
+ if ((msg->flags & I2C_M_RD) && last && is_dma) {
+ tags[len++] = QUP_BAM_INPUT_EOT;
+ tags[len++] = QUP_BAM_FLUSH_STOP;
+ }
+
+ return len;
+}
+
+static int qup_i2c_issue_xfer_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int data_len = 0, tag_len, index;
int ret;
- qup->msg = msg;
- qup->pos = 0;
+ tag_len = qup_i2c_get_tags(qup->blk.tags, qup, msg, 0);
+ index = msg->len - qup->blk.data_len;
- enable_irq(qup->irq);
+ /* only tags are written for read */
+ if (!(msg->flags & I2C_M_RD))
+ data_len = qup_i2c_get_data_len(qup);
- qup_i2c_set_write_mode(qup, msg);
+ ret = qup_i2c_send_data(qup, tag_len, qup->blk.tags,
+ data_len, &msg->buf[index]);
+ qup->blk.data_len -= data_len;
- ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
- if (ret)
- goto err;
+ return ret;
+}
- writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+static int qup_i2c_wait_for_complete(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
+{
+ unsigned long left;
+ int ret = 0;
+
+ left = wait_for_completion_timeout(&qup->xfer, HZ);
+ if (!left) {
+ writel(1, qup->base + QUP_SW_RESET);
+ ret = -ETIMEDOUT;
+ }
+
+ if (qup->bus_err || qup->qup_err) {
+ if (qup->bus_err & QUP_I2C_NACK_FLAG) {
+ dev_err(qup->dev, "NACK from %x\n", msg->addr);
+ ret = -EIO;
+ }
+ }
+
+ return ret;
+}
+
+static int qup_i2c_write_one_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret = 0;
do {
- ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
+ ret = qup_i2c_issue_xfer_v2(qup, msg);
if (ret)
goto err;
- qup_i2c_issue_write(qup, msg);
-
- ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ ret = qup_i2c_wait_for_complete(qup, msg);
if (ret)
goto err;
- left = wait_for_completion_timeout(&qup->xfer, HZ);
- if (!left) {
- writel(1, qup->base + QUP_SW_RESET);
- ret = -ETIMEDOUT;
- goto err;
- }
+ qup->blk.pos++;
+ } while (qup->blk.pos < qup->blk.count);
- if (qup->bus_err || qup->qup_err) {
- if (qup->bus_err & QUP_I2C_NACK_FLAG)
- dev_err(qup->dev, "NACK from %x\n", msg->addr);
- ret = -EIO;
- goto err;
- }
+err:
+ return ret;
+}
+
+static int qup_i2c_write_one_v1(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret = 0;
+
+ do {
+ ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
+ if (ret)
+ return ret;
+
+ ret = qup_i2c_issue_write(qup, msg);
+ if (ret)
+ return ret;
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ return ret;
+
+ ret = qup_i2c_wait_for_complete(qup, msg);
+ if (ret)
+ return ret;
} while (qup->pos < msg->len);
- /* Wait for the outstanding data in the fifo to drain */
- ret = qup_i2c_wait_writeready(qup);
+ return ret;
+}
+
+static int qup_i2c_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret;
+
+ qup->msg = msg;
+ qup->pos = 0;
+ enable_irq(qup->irq);
+ qup_i2c_get_blk_data(qup, msg);
+
+ qup_i2c_set_write_mode(qup, msg);
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto err;
+
+ writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+ ret = qup->qup_i2c_write_one(qup, msg);
+ if (ret)
+ goto err;
+
+ ret = qup_i2c_wait_ready(qup, QUP_OUT_NOT_EMPTY, RESET_BIT, ONE_BYTE);
err:
disable_irq(qup->irq);
qup->msg = NULL;
@@ -358,15 +649,22 @@ err:
static void qup_i2c_set_read_mode(struct qup_i2c_dev *qup, int len)
{
+ int tx_len = qup->blk.tx_tag_len;
+
+ len += qup->blk.rx_tag_len;
+ tx_len |= qup->blk.config_run;
+
if (len < qup->in_fifo_sz) {
/* FIFO mode */
writel(QUP_REPACK_EN, qup->base + QUP_IO_MODE);
- writel(len, qup->base + QUP_MX_READ_CNT);
+ writel(tx_len, qup->base + QUP_MX_WRITE_CNT);
+ writel(len | qup->blk.config_run, qup->base + QUP_MX_READ_CNT);
} else {
/* BLOCK mode (transfer data on chunks) */
writel(QUP_INPUT_BLK_MODE | QUP_REPACK_EN,
qup->base + QUP_IO_MODE);
- writel(len, qup->base + QUP_MX_INPUT_CNT);
+ writel(tx_len, qup->base + QUP_MX_OUTPUT_CNT);
+ writel(len | qup->blk.config_run, qup->base + QUP_MX_INPUT_CNT);
}
}
@@ -383,19 +681,19 @@ static void qup_i2c_issue_read(struct qup_i2c_dev *qup, struct i2c_msg *msg)
writel(val, qup->base + QUP_OUT_FIFO_BASE);
}
-
-static void qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static int qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
- u32 opflags;
u32 val = 0;
int idx;
+ int ret = 0;
for (idx = 0; qup->pos < msg->len; idx++) {
if ((idx & 1) == 0) {
/* Check that FIFO have data */
- opflags = readl(qup->base + QUP_OPERATIONAL);
- if (!(opflags & QUP_IN_NOT_EMPTY))
- break;
+ ret = qup_i2c_wait_ready(qup, QUP_IN_NOT_EMPTY,
+ SET_BIT, 4 * ONE_BYTE);
+ if (ret)
+ return ret;
/* Reading 2 words at time */
val = readl(qup->base + QUP_IN_FIFO_BASE);
@@ -405,17 +703,117 @@ static void qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg)
msg->buf[qup->pos++] = val >> QUP_MSW_SHIFT;
}
}
+
+ return ret;
}
-static int qup_i2c_read_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static int qup_i2c_read_fifo_v2(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
+{
+ u32 val;
+ int idx, pos = 0, ret = 0, total;
+
+ total = qup_i2c_get_data_len(qup);
+
+ /* 2 extra bytes for read tags */
+ while (pos < (total + 2)) {
+ /* Check that FIFO have data */
+ ret = qup_i2c_wait_ready(qup, QUP_IN_NOT_EMPTY,
+ SET_BIT, 4 * ONE_BYTE);
+ if (ret) {
+ dev_err(qup->dev, "timeout for fifo not empty");
+ return ret;
+ }
+ val = readl(qup->base + QUP_IN_FIFO_BASE);
+
+ for (idx = 0; idx < 4; idx++, val >>= 8, pos++) {
+ /* first 2 bytes are tag bytes */
+ if (pos < 2)
+ continue;
+
+ if (pos >= (total + 2))
+ goto out;
+
+ msg->buf[qup->pos++] = val & 0xff;
+ }
+ }
+
+out:
+ qup->blk.data_len -= total;
+
+ return ret;
+}
+
+static int qup_i2c_read_one_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret = 0;
+
+ do {
+ ret = qup_i2c_issue_xfer_v2(qup, msg);
+ if (ret)
+ goto err;
+
+ ret = qup_i2c_wait_for_complete(qup, msg);
+ if (ret)
+ goto err;
+
+ ret = qup_i2c_read_fifo_v2(qup, msg);
+ if (ret)
+ goto err;
+
+ qup->blk.pos++;
+ } while (qup->blk.pos < qup->blk.count);
+
+err:
+ return ret;
+}
+
+static int qup_i2c_read_one_v1(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret = 0;
+
+ /*
+ * The QUP block will issue a NACK and STOP on the bus when reaching
+ * the end of the read, the length of the read is specified as one byte
+ * which limits the possible read to 256 (QUP_READ_LIMIT) bytes.
+ */
+ if (msg->len > QUP_READ_LIMIT) {
+ dev_err(qup->dev, "HW not capable of reads over %d bytes\n",
+ QUP_READ_LIMIT);
+ return -EINVAL;
+ }
+
+ ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
+ if (ret)
+ return ret;
+
+ qup_i2c_issue_read(qup, msg);
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ return ret;
+
+ do {
+ ret = qup_i2c_wait_for_complete(qup, msg);
+ if (ret)
+ return ret;
+ ret = qup_i2c_read_fifo(qup, msg);
+ if (ret)
+ return ret;
+ } while (qup->pos < msg->len);
+
+ return ret;
+}
+
+static int qup_i2c_read(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
- unsigned long left;
int ret;
qup->msg = msg;
qup->pos = 0;
enable_irq(qup->irq);
+ qup_i2c_get_blk_data(qup, msg);
qup_i2c_set_read_mode(qup, msg->len);
@@ -425,38 +823,263 @@ static int qup_i2c_read_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
- ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
- if (ret)
- goto err;
+ ret = qup->qup_i2c_read_one(qup, msg);
- qup_i2c_issue_read(qup, msg);
+err:
+ disable_irq(qup->irq);
+ qup->msg = NULL;
- ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
- if (ret)
- goto err;
+ return ret;
+}
- do {
- left = wait_for_completion_timeout(&qup->xfer, HZ);
- if (!left) {
- writel(1, qup->base + QUP_SW_RESET);
- ret = -ETIMEDOUT;
- goto err;
+static void qup_i2c_bam_cb(void *data)
+{
+ struct qup_i2c_dev *qup = data;
+
+ complete(&qup->xfer);
+}
+
+void qup_sg_set_buf(struct scatterlist *sg, void *buf, struct qup_i2c_tag *tg,
+ unsigned int buflen, struct qup_i2c_dev *qup,
+ int map, int dir)
+{
+ sg_set_buf(sg, buf, buflen);
+ dma_map_sg(qup->dev, sg, 1, dir);
+
+ if (!map)
+ sg_dma_address(sg) = tg->addr + ((u8 *)buf - tg->start);
+}
+
+static void qup_i2c_rel_dma(struct qup_i2c_dev *qup)
+{
+ if (qup->btx.dma_tx)
+ dma_release_channel(qup->btx.dma_tx);
+ if (qup->brx.dma_rx)
+ dma_release_channel(qup->brx.dma_rx);
+ qup->btx.dma_tx = NULL;
+ qup->brx.dma_rx = NULL;
+}
+
+static int qup_i2c_req_dma(struct qup_i2c_dev *qup)
+{
+ if (!qup->btx.dma_tx) {
+ qup->btx.dma_tx = dma_request_slave_channel(qup->dev, "tx");
+ if (!qup->btx.dma_tx) {
+ dev_err(qup->dev, "\n tx channel not available");
+ return -ENODEV;
+ }
+ }
+
+ if (!qup->brx.dma_rx) {
+ qup->brx.dma_rx = dma_request_slave_channel(qup->dev, "rx");
+ if (!qup->brx.dma_rx) {
+ dev_err(qup->dev, "\n rx channel not available");
+ qup_i2c_rel_dma(qup);
+ return -ENODEV;
}
+ }
+ return 0;
+}
- if (qup->bus_err || qup->qup_err) {
- if (qup->bus_err & QUP_I2C_NACK_FLAG)
- dev_err(qup->dev, "NACK from %x\n", msg->addr);
+static int bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ struct dma_async_tx_descriptor *txd, *rxd = NULL;
+ int ret = 0;
+ dma_cookie_t cookie_rx, cookie_tx;
+ u32 rx_nents = 0, tx_nents = 0, len, blocks, rem;
+ u32 i, tlen, tx_len, tx_buf = 0, rx_buf = 0, off = 0;
+ u8 *tags;
+
+ while (qup->cmsg < qup->num) {
+ blocks = (msg->len + QUP_READ_LIMIT) / QUP_READ_LIMIT;
+ rem = msg->len % QUP_READ_LIMIT;
+ tx_len = 0, len = 0, i = 0;
+
+ qup_i2c_get_blk_data(qup, msg);
+
+ if (msg->flags & I2C_M_RD) {
+ rx_nents += (blocks * 2) + 1;
+ tx_nents += 1;
+
+ while (qup->blk.pos < blocks) {
+ /* length set to '0' implies 256 bytes */
+ tlen = (i == (blocks - 1)) ? rem : 0;
+ tags = &qup->start_tag.start[off + len];
+ len += qup_i2c_get_tags(tags, qup, msg, 1);
+
+ /* scratch buf to read the start and len tags */
+ qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+ &qup->brx.scratch_tag.start[0],
+ &qup->brx.scratch_tag,
+ 2, qup, 0, 0);
+
+ qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+ &msg->buf[QUP_READ_LIMIT * i],
+ NULL, tlen, qup,
+ 1, DMA_FROM_DEVICE);
+ i++;
+ qup->blk.pos = i;
+ }
+ qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+ &qup->start_tag.start[off],
+ &qup->start_tag, len, qup, 0, 0);
+ off += len;
+ /* scratch buf to read the BAM EOT and FLUSH tags */
+ qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+ &qup->brx.scratch_tag.start[0],
+ &qup->brx.scratch_tag, 2,
+ qup, 0, 0);
+ } else {
+ tx_nents += (blocks * 2);
+
+ while (qup->blk.pos < blocks) {
+ tlen = (i == (blocks - 1)) ? rem : 0;
+ tags = &qup->start_tag.start[off + tx_len];
+ len = qup_i2c_get_tags(tags, qup, msg, 1);
+
+ qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+ tags,
+ &qup->start_tag, len,
+ qup, 0, 0);
+
+ tx_len += len;
+ qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+ &msg->buf[QUP_READ_LIMIT * i],
+ NULL, tlen, qup, 1,
+ DMA_TO_DEVICE);
+ i++;
+ qup->blk.pos = i;
+ }
+ off += tx_len;
+
+ if (qup->cmsg == (qup->num - 1)) {
+ qup->btx.footer_tag.start[0] =
+ QUP_BAM_FLUSH_STOP;
+ qup->btx.footer_tag.start[1] =
+ QUP_BAM_FLUSH_STOP;
+ qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+ &qup->btx.footer_tag.start[0],
+ &qup->btx.footer_tag, 2,
+ qup, 0, 0);
+ tx_nents += 1;
+ }
+ }
+ qup->cmsg++;
+ msg++;
+ }
+
+ txd = dmaengine_prep_slave_sg(qup->btx.dma_tx, qup->btx.sg_tx, tx_nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_PREP_FENCE);
+ if (!txd) {
+ dev_err(qup->dev, "failed to get tx desc\n");
+ ret = -EINVAL;
+ goto desc_err;
+ }
+
+ if (!rx_nents) {
+ txd->callback = qup_i2c_bam_cb;
+ txd->callback_param = qup;
+ }
+
+ cookie_tx = dmaengine_submit(txd);
+ dma_async_issue_pending(qup->btx.dma_tx);
+
+ if (rx_nents) {
+ rxd = dmaengine_prep_slave_sg(qup->brx.dma_rx, qup->brx.sg_rx,
+ rx_nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!rxd) {
+ dev_err(qup->dev, "failed to get rx desc\n");
+ ret = -EINVAL;
+
+ /* abort TX descriptors */
+ dmaengine_terminate_all(qup->btx.dma_tx);
+ goto desc_err;
+ }
+
+ rxd->callback = qup_i2c_bam_cb;
+ rxd->callback_param = qup;
+ cookie_rx = dmaengine_submit(rxd);
+ dma_async_issue_pending(qup->brx.dma_rx);
+ }
+
+ if (!wait_for_completion_timeout(&qup->xfer, TOUT_MAX * HZ)) {
+ dev_err(qup->dev, "normal trans timed out\n");
+ ret = -ETIMEDOUT;
+ }
+
+ if (ret || qup->bus_err || qup->qup_err) {
+ if (qup->bus_err & QUP_I2C_NACK_FLAG) {
+ msg--;
+ dev_err(qup->dev, "NACK from %x\n", msg->addr);
ret = -EIO;
- goto err;
+
+ if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
+ dev_err(qup->dev, "change to run state timed out");
+ return ret;
+ }
+
+ if (rx_nents)
+ writel(QUP_BAM_INPUT_EOT,
+ qup->base + QUP_OUT_FIFO_BASE);
+
+ writel(QUP_BAM_FLUSH_STOP,
+ qup->base + QUP_OUT_FIFO_BASE);
+
+ qup_i2c_flush(qup);
+
+ /* wait for remaining interrupts to occur */
+ if (!wait_for_completion_timeout(&qup->xfer, HZ))
+ dev_err(qup->dev, "flush timed out\n");
+
+ qup_i2c_rel_dma(qup);
}
+ }
- qup_i2c_read_fifo(qup, msg);
- } while (qup->pos < msg->len);
+ dma_unmap_sg(qup->dev, qup->btx.sg_tx, tx_nents, DMA_TO_DEVICE);
-err:
+ if (rx_nents)
+ dma_unmap_sg(qup->dev, qup->brx.sg_rx, rx_nents,
+ DMA_FROM_DEVICE);
+desc_err:
+ return ret;
+}
+
+static int qup_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
+ int ret = 0;
+
+ enable_irq(qup->irq);
+ if (qup_i2c_req_dma(qup))
+ goto out;
+
+ qup->bus_err = 0;
+ qup->qup_err = 0;
+
+ writel(0, qup->base + QUP_MX_INPUT_CNT);
+ writel(0, qup->base + QUP_MX_OUTPUT_CNT);
+
+ /* set BAM mode */
+ writel(QUP_REPACK_EN | QUP_BAM_MODE, qup->base + QUP_IO_MODE);
+
+ /* mask fifo irqs */
+ writel((0x3 << 8), qup->base + QUP_OPERATIONAL_MASK);
+
+ /* set RUN STATE */
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto out;
+
+ writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+
+ qup->msg = msg;
+ ret = bam_do_xfer(qup, qup->msg);
+out:
disable_irq(qup->irq);
- qup->msg = NULL;
+ qup->msg = NULL;
return ret;
}
@@ -467,6 +1090,8 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
int ret, idx;
+ qup->num = 1;
+
ret = pm_runtime_get_sync(qup->dev);
if (ret < 0)
goto out;
@@ -491,9 +1116,9 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
}
if (msgs[idx].flags & I2C_M_RD)
- ret = qup_i2c_read_one(qup, &msgs[idx]);
+ ret = qup_i2c_read(qup, &msgs[idx]);
else
- ret = qup_i2c_write_one(qup, &msgs[idx]);
+ ret = qup_i2c_write(qup, &msgs[idx]);
if (ret)
break;
@@ -513,6 +1138,87 @@ out:
return ret;
}
+static int qup_i2c_xfer_v2(struct i2c_adapter *adap,
+ struct i2c_msg msgs[],
+ int num)
+{
+ struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
+ int ret, idx, len, use_dma = 0;
+
+ qup->num = num;
+ qup->cmsg = 0;
+
+ ret = pm_runtime_get_sync(qup->dev);
+ if (ret < 0)
+ goto out;
+
+ writel(1, qup->base + QUP_SW_RESET);
+ ret = qup_i2c_poll_state(qup, QUP_RESET_STATE);
+ if (ret)
+ goto out;
+
+ /* Configure QUP as I2C mini core */
+ writel(I2C_MINI_CORE | I2C_N_VAL_V2, qup->base + QUP_CONFIG);
+ writel(QUP_V2_TAGS_EN, qup->base + QUP_I2C_MASTER_GEN);
+
+ if ((qup->is_dma)) {
+ /* All i2c_msgs should be transferred using either dma or cpu */
+ for (idx = 0; idx < num; idx++) {
+ if (msgs[idx].len == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ len = (msgs[idx].len > qup->out_fifo_sz) ||
+ (msgs[idx].len > qup->in_fifo_sz);
+
+ if ((!is_vmalloc_addr(msgs[idx].buf)) && len) {
+ use_dma = 1;
+ } else {
+ use_dma = 0;
+ break;
+ }
+ }
+ }
+
+ for (idx = 0; idx < num; idx++) {
+ if (qup_i2c_poll_state_i2c_master(qup)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ reinit_completion(&qup->xfer);
+
+ len = msgs[idx].len;
+
+ if (use_dma) {
+ ret = qup_bam_xfer(adap, &msgs[idx]);
+ idx = num;
+ } else {
+ if (msgs[idx].flags & I2C_M_RD)
+ ret = qup_i2c_read(qup, &msgs[idx]);
+ else
+ ret = qup_i2c_write(qup, &msgs[idx]);
+ }
+
+ if (ret)
+ break;
+
+ qup->cmsg++;
+ }
+
+ if (!ret)
+ ret = qup_i2c_change_state(qup, QUP_RESET_STATE);
+
+ if (ret == 0)
+ ret = num;
+out:
+ pm_runtime_mark_last_busy(qup->dev);
+ pm_runtime_put_autosuspend(qup->dev);
+
+ return ret;
+}
+
static u32 qup_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
@@ -532,6 +1238,11 @@ static struct i2c_adapter_quirks qup_i2c_quirks = {
.max_read_len = QUP_READ_LIMIT,
};
+static const struct i2c_algorithm qup_i2c_algo_v2 = {
+ .master_xfer = qup_i2c_xfer_v2,
+ .functionality = qup_i2c_func,
+};
+
static void qup_i2c_enable_clocks(struct qup_i2c_dev *qup)
{
clk_prepare_enable(qup->clk);
@@ -561,6 +1272,7 @@ static int qup_i2c_probe(struct platform_device *pdev)
int ret, fs_div, hs_div;
int src_clk_freq;
u32 clk_freq = 100000;
+ int blocks;
qup = devm_kzalloc(&pdev->dev, sizeof(*qup), GFP_KERNEL);
if (!qup)
@@ -572,6 +1284,69 @@ static int qup_i2c_probe(struct platform_device *pdev)
of_property_read_u32(node, "clock-frequency", &clk_freq);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,i2c-qup-v1.1.1")) {
+ qup->adap.algo = &qup_i2c_algo;
+ qup->qup_i2c_write_one = qup_i2c_write_one_v1;
+ qup->qup_i2c_read_one = qup_i2c_read_one_v1;
+ } else {
+ qup->adap.algo = &qup_i2c_algo_v2;
+ qup->qup_i2c_write_one = qup_i2c_write_one_v2;
+ qup->qup_i2c_read_one = qup_i2c_read_one_v2;
+ qup->use_v2_tags = 1;
+
+ if (qup_i2c_req_dma(qup))
+ goto nodma;
+
+ blocks = (MX_BLOCKS << 1) + 1;
+ qup->btx.sg_tx = devm_kzalloc(&pdev->dev,
+ sizeof(*qup->btx.sg_tx) * blocks,
+ GFP_KERNEL);
+ if (!qup->btx.sg_tx) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ sg_init_table(qup->btx.sg_tx, blocks);
+
+ qup->brx.sg_rx = devm_kzalloc(&pdev->dev,
+ sizeof(*qup->btx.sg_tx) * blocks,
+ GFP_KERNEL);
+ if (!qup->brx.sg_rx) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ sg_init_table(qup->brx.sg_rx, blocks);
+
+ /* 2 tag bytes for each block + 5 for start, stop tags */
+ size = blocks * 2 + 5;
+ qup->dpool = dma_pool_create("qup_i2c-dma-pool", &pdev->dev,
+ size, 4, 0);
+
+ qup->start_tag.start = dma_pool_alloc(qup->dpool, GFP_KERNEL,
+ &qup->start_tag.addr);
+ if (!qup->start_tag.start) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ qup->brx.scratch_tag.start = dma_pool_alloc(qup->dpool,
+ GFP_KERNEL,
+ &qup->brx.scratch_tag.addr);
+ if (!qup->brx.scratch_tag.start) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ qup->btx.footer_tag.start = dma_pool_alloc(qup->dpool,
+ GFP_KERNEL,
+ &qup->btx.footer_tag.addr);
+ if (!qup->btx.footer_tag.start) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ qup->is_dma = 1;
+ }
+
+nodma:
/* We support frequencies up to FAST Mode (400KHz) */
if (!clk_freq || clk_freq > 400000) {
dev_err(qup->dev, "clock frequency not supported %d\n",
@@ -667,7 +1442,6 @@ static int qup_i2c_probe(struct platform_device *pdev)
qup->out_blk_sz, qup->out_fifo_sz);
i2c_set_adapdata(&qup->adap, qup);
- qup->adap.algo = &qup_i2c_algo;
qup->adap.quirks = &qup_i2c_quirks;
qup->adap.dev.parent = qup->dev;
qup->adap.dev.of_node = pdev->dev.of_node;
@@ -688,6 +1462,11 @@ fail_runtime:
pm_runtime_disable(qup->dev);
pm_runtime_set_suspended(qup->dev);
fail:
+ if (qup->btx.dma_tx)
+ dma_release_channel(qup->btx.dma_tx);
+ if (qup->brx.dma_rx)
+ dma_release_channel(qup->brx.dma_rx);
+
qup_i2c_disable_clocks(qup);
return ret;
}
@@ -696,6 +1475,18 @@ static int qup_i2c_remove(struct platform_device *pdev)
{
struct qup_i2c_dev *qup = platform_get_drvdata(pdev);
+ if (qup->is_dma) {
+ dma_pool_free(qup->dpool, qup->start_tag.start,
+ qup->start_tag.addr);
+ dma_pool_free(qup->dpool, qup->brx.scratch_tag.start,
+ qup->brx.scratch_tag.addr);
+ dma_pool_free(qup->dpool, qup->btx.footer_tag.start,
+ qup->btx.footer_tag.addr);
+ dma_pool_destroy(qup->dpool);
+ dma_release_channel(qup->btx.dma_tx);
+ dma_release_channel(qup->brx.dma_rx);
+ }
+
disable_irq(qup->irq);
qup_i2c_disable_clocks(qup);
i2c_del_adapter(&qup->adap);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index cbe6a890a93a..2cc40e352c6a 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -64,7 +64,6 @@ config MSM_IOMMU
bool "MSM IOMMU Support"
depends on ARM
depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
- depends on BROKEN
select IOMMU_API
help
Support for the IOMMUs found on certain Qualcomm SOCs.
@@ -77,6 +76,8 @@ config IOMMU_PGTABLES_L2
def_bool y
depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
+source "drivers/iommu/qcom/Kconfig"
+
# AMD IOMMU support
config AMD_IOMMU
bool "AMD IOMMU support"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index c6dcc513d711..c665bbb5704d 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -5,7 +5,8 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
-obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
+obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
+obj-$(CONFIG_QCOM_IOMMU_V1) += qcom/
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index e321fa517a45..cf871ffc2aa3 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -27,6 +27,8 @@
#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of_iommu.h>
#include <asm/cacheflush.h>
#include <asm/sizes.h>
@@ -39,15 +41,12 @@ __asm__ __volatile__ ( \
" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
: "=r" (reg))
-#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
-#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
-
/* bitmap of the page sizes currently supported */
#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
-
-static int msm_iommu_tex_class[4];
+#define DUMMY_CONTEXT 1
DEFINE_SPINLOCK(msm_iommu_lock);
+static LIST_HEAD(qcom_iommu_devices);
struct msm_priv {
unsigned long *pgtable;
@@ -55,40 +54,87 @@ struct msm_priv {
struct iommu_domain domain;
};
+struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ return (struct device *) DUMMY_CONTEXT;
+}
+EXPORT_SYMBOL(msm_iommu_get_ctx);
+
static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
{
return container_of(dom, struct msm_priv, domain);
}
-static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
+static int __enable_clocks(struct msm_iommu_dev *iommu)
{
int ret;
- ret = clk_enable(drvdata->pclk);
+ ret = clk_enable(iommu->pclk);
if (ret)
goto fail;
- if (drvdata->clk) {
- ret = clk_enable(drvdata->clk);
+ if (iommu->clk) {
+ ret = clk_enable(iommu->clk);
if (ret)
- clk_disable(drvdata->pclk);
+ clk_disable(iommu->pclk);
}
fail:
return ret;
}
-static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
+static void __disable_clocks(struct msm_iommu_dev *iommu)
{
- clk_disable(drvdata->clk);
- clk_disable(drvdata->pclk);
+ if (iommu->clk)
+ clk_disable(iommu->clk);
+ clk_disable(iommu->pclk);
+}
+
+static void msm_iommu_reset(void __iomem *base, int ncb)
+{
+ int ctx;
+
+ SET_RPUE(base, 0);
+ SET_RPUEIE(base, 0);
+ SET_ESRRESTORE(base, 0);
+ SET_TBE(base, 0);
+ SET_CR(base, 0);
+ SET_SPDMBE(base, 0);
+ SET_TESTBUSCR(base, 0);
+ SET_TLBRSW(base, 0);
+ SET_GLOBAL_TLBIALL(base, 0);
+ SET_RPU_ACR(base, 0);
+ SET_TLBLKCRWE(base, 1);
+
+ for (ctx = 0; ctx < ncb; ctx++) {
+ SET_BPRCOSH(base, ctx, 0);
+ SET_BPRCISH(base, ctx, 0);
+ SET_BPRCNSH(base, ctx, 0);
+ SET_BPSHCFG(base, ctx, 0);
+ SET_BPMTCFG(base, ctx, 0);
+ SET_ACTLR(base, ctx, 0);
+ SET_SCTLR(base, ctx, 0);
+ SET_FSRRESTORE(base, ctx, 0);
+ SET_TTBR0(base, ctx, 0);
+ SET_TTBR1(base, ctx, 0);
+ SET_TTBCR(base, ctx, 0);
+ SET_BFBCR(base, ctx, 0);
+ SET_PAR(base, ctx, 0);
+ SET_FAR(base, ctx, 0);
+ SET_CTX_TLBIALL(base, ctx, 0);
+ SET_TLBFLPTER(base, ctx, 0);
+ SET_TLBSLPTER(base, ctx, 0);
+ SET_TLBLKCR(base, ctx, 0);
+ SET_CONTEXTIDR(base, ctx, 0);
+ }
}
static int __flush_iotlb(struct iommu_domain *domain)
{
struct msm_priv *priv = to_msm_priv(domain);
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_dev *iommu = NULL;
+ struct msm_iommu_ctx_dev *master;
int ret = 0;
+
#ifndef CONFIG_IOMMU_PGTABLES_L2
unsigned long *fl_table = priv->pgtable;
int i;
@@ -105,24 +151,67 @@ static int __flush_iotlb(struct iommu_domain *domain)
}
#endif
- list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
-
- BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
-
- iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
- BUG_ON(!iommu_drvdata);
-
- ret = __enable_clocks(iommu_drvdata);
+ list_for_each_entry(iommu, &priv->list_attached, dom_node) {
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
- SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
- __disable_clocks(iommu_drvdata);
+ list_for_each_entry(master, &iommu->ctx_list, list)
+ SET_CTX_TLBIALL(iommu->base, master->num, 0);
+
+ __disable_clocks(iommu);
}
fail:
return ret;
}
+static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
+{
+ int idx;
+
+ do {
+ idx = find_next_zero_bit(map, end, start);
+ if (idx == end)
+ return -ENOSPC;
+ } while (test_and_set_bit(idx, map));
+
+ return idx;
+}
+
+static void msm_iommu_free_ctx(unsigned long *map, int idx)
+{
+ clear_bit(idx, map);
+}
+
+static void config_mids(struct msm_iommu_dev *iommu,
+ struct msm_iommu_ctx_dev *master)
+{
+ int mid, ctx, i;
+
+ for (i = 0; i < master->num_mids; i++) {
+ mid = master->mids[i];
+ ctx = master->num;
+
+ SET_M2VCBR_N(iommu->base, mid, 0);
+ SET_CBACR_N(iommu->base, ctx, 0);
+
+ /* Set VMID = 0 */
+ SET_VMID(iommu->base, mid, 0);
+
+ /* Set the context number for that MID to this context */
+ SET_CBNDX(iommu->base, mid, ctx);
+
+ /* Set MID associated with this context bank to 0*/
+ SET_CBVMID(iommu->base, ctx, 0);
+
+ /* Set the ASID for TLB tagging for this context */
+ SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
+
+ /* Set security bit override to be Non-secure */
+ SET_NSCFG(iommu->base, mid, 3);
+ }
+}
+
static void __reset_context(void __iomem *base, int ctx)
{
SET_BPRCOSH(base, ctx, 0);
@@ -143,13 +232,10 @@ static void __reset_context(void __iomem *base, int ctx)
SET_TLBFLPTER(base, ctx, 0);
SET_TLBSLPTER(base, ctx, 0);
SET_TLBLKCR(base, ctx, 0);
- SET_PRRR(base, ctx, 0);
- SET_NMRR(base, ctx, 0);
}
static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
{
- unsigned int prrr, nmrr;
__reset_context(base, ctx);
/* Set up HTW mode */
@@ -179,15 +265,6 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
SET_RCOSH(base, ctx, 1);
SET_RCNSH(base, ctx, 1);
- /* Turn on TEX Remap */
- SET_TRE(base, ctx, 1);
-
- /* Set TEX remap attributes */
- RCP15_PRRR(prrr);
- RCP15_NMRR(nmrr);
- SET_PRRR(base, ctx, prrr);
- SET_NMRR(base, ctx, nmrr);
-
/* Turn on BFB prefetch */
SET_BFBDFE(base, ctx, 1);
@@ -272,94 +349,76 @@ static void msm_iommu_domain_free(struct iommu_domain *domain)
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct msm_priv *priv;
- struct msm_iommu_ctx_dev *ctx_dev;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- struct msm_iommu_ctx_drvdata *tmp_drvdata;
int ret = 0;
unsigned long flags;
+ struct msm_iommu_dev *iommu;
+ struct msm_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_ctx_dev *master;
spin_lock_irqsave(&msm_iommu_lock, flags);
-
- priv = to_msm_priv(domain);
-
- if (!dev) {
- ret = -EINVAL;
- goto fail;
- }
-
- iommu_drvdata = dev_get_drvdata(dev->parent);
- ctx_drvdata = dev_get_drvdata(dev);
- ctx_dev = dev->platform_data;
-
- if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
- ret = -EINVAL;
- goto fail;
- }
-
- if (!list_empty(&ctx_drvdata->attached_elm)) {
- ret = -EBUSY;
- goto fail;
- }
-
- list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
- if (tmp_drvdata == ctx_drvdata) {
- ret = -EBUSY;
- goto fail;
+ list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
+ master = list_first_entry(&iommu->ctx_list,
+ struct msm_iommu_ctx_dev,
+ list);
+ if (master->of_node == dev->of_node) {
+ ret = __enable_clocks(iommu);
+ if (ret)
+ goto fail;
+
+ list_for_each_entry(master, &iommu->ctx_list, list) {
+ if (master->num) {
+ dev_err(dev, "domain already attached");
+ ret = -EEXIST;
+ goto fail;
+ }
+ master->num =
+ msm_iommu_alloc_ctx(iommu->context_map,
+ 0, iommu->ncb);
+ if (IS_ERR_VALUE(master->num)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+ config_mids(iommu, master);
+ __program_context(iommu->base, master->num,
+ __pa(priv->pgtable));
+ }
+ __disable_clocks(iommu);
+ list_add(&iommu->dom_node, &priv->list_attached);
}
+ }
- ret = __enable_clocks(iommu_drvdata);
- if (ret)
- goto fail;
-
- __program_context(iommu_drvdata->base, ctx_dev->num,
- __pa(priv->pgtable));
-
- __disable_clocks(iommu_drvdata);
- list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
ret = __flush_iotlb(domain);
-
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
return ret;
}
static void msm_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct msm_priv *priv;
- struct msm_iommu_ctx_dev *ctx_dev;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
+ struct msm_iommu_dev *iommu;
+ struct msm_iommu_ctx_dev *master;
int ret;
spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = to_msm_priv(domain);
-
- if (!dev)
- goto fail;
-
- iommu_drvdata = dev_get_drvdata(dev->parent);
- ctx_drvdata = dev_get_drvdata(dev);
- ctx_dev = dev->platform_data;
-
- if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
- goto fail;
-
ret = __flush_iotlb(domain);
if (ret)
goto fail;
- ret = __enable_clocks(iommu_drvdata);
- if (ret)
- goto fail;
-
- __reset_context(iommu_drvdata->base, ctx_dev->num);
- __disable_clocks(iommu_drvdata);
- list_del_init(&ctx_drvdata->attached_elm);
+ list_for_each_entry(iommu, &priv->list_attached, dom_node) {
+ ret = __enable_clocks(iommu);
+ if (ret)
+ goto fail;
+ list_for_each_entry(master, &iommu->ctx_list, list) {
+ msm_iommu_free_ctx(iommu->context_map, master->num);
+ __reset_context(iommu->base, master->num);
+ }
+ __disable_clocks(iommu);
+ }
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
}
@@ -376,12 +435,14 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
unsigned long *sl_pte;
unsigned long sl_offset;
unsigned int pgprot;
- int ret = 0, tex, sh;
+ int ret = 0, tex = 0, sh;
spin_lock_irqsave(&msm_iommu_lock, flags);
sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
- tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
+
+ if (prot & IOMMU_CACHE)
+ tex = FL_BUFFERABLE | FL_CACHEABLE;
if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
ret = -EINVAL;
@@ -424,12 +485,12 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
int i = 0;
for (i = 0; i < 16; i++)
*(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
- FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
+ FL_AP1 | FL_AP0 | FL_TYPE_SECT |
FL_SHARED | FL_NG | pgprot;
}
if (len == SZ_1M)
- *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
+ *fl_pte = (pa & 0xFFF00000) | FL_AP1 | FL_AP0 | FL_NG |
FL_TYPE_SECT | FL_SHARED | pgprot;
/* Need a 2nd level table */
@@ -555,47 +616,46 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t va)
{
struct msm_priv *priv;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_dev *iommu;
+ struct msm_iommu_ctx_dev *master;
unsigned int par;
unsigned long flags;
- void __iomem *base;
phys_addr_t ret = 0;
- int ctx;
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = to_msm_priv(domain);
- if (list_empty(&priv->list_attached))
- goto fail;
+ iommu = list_first_entry(&priv->list_attached,
+ struct msm_iommu_dev, dom_node);
- ctx_drvdata = list_entry(priv->list_attached.next,
- struct msm_iommu_ctx_drvdata, attached_elm);
- iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ if (list_empty(&iommu->ctx_list))
+ goto fail;
- base = iommu_drvdata->base;
- ctx = ctx_drvdata->num;
+ master = list_first_entry(&iommu->ctx_list,
+ struct msm_iommu_ctx_dev, list);
+ if (!master)
+ goto fail;
- ret = __enable_clocks(iommu_drvdata);
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
/* Invalidate context TLB */
- SET_CTX_TLBIALL(base, ctx, 0);
- SET_V2PPR(base, ctx, va & V2Pxx_VA);
+ SET_CTX_TLBIALL(iommu->base, master->num, 0);
+ SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
- par = GET_PAR(base, ctx);
+ par = GET_PAR(iommu->base, master->num);
/* We are dealing with a supersection */
- if (GET_NOFAULT_SS(base, ctx))
+ if (GET_NOFAULT_SS(iommu->base, master->num))
ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
else /* Upper 20 bits from PAR, lower 12 from VA */
ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
- if (GET_FAULT(base, ctx))
+ if (GET_FAULT(iommu->base, master->num))
ret = 0;
- __disable_clocks(iommu_drvdata);
+ __disable_clocks(iommu);
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
return ret;
@@ -629,49 +689,82 @@ static void print_ctx_regs(void __iomem *base, int ctx)
GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
pr_err("SCTLR = %08x ACTLR = %08x\n",
GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
- pr_err("PRRR = %08x NMRR = %08x\n",
- GET_PRRR(base, ctx), GET_NMRR(base, ctx));
+}
+
+static void insert_iommu_master(struct device *dev,
+ struct msm_iommu_dev *iommu,
+ struct of_phandle_args *spec)
+{
+ struct msm_iommu_ctx_dev *master;
+ int sid;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ master->of_node = dev->of_node;
+ list_add(&master->list, &iommu->ctx_list);
+
+ for (sid = 0; sid < spec->args_count; sid++)
+ master->mids[sid] = spec->args[sid];
+
+ master->num_mids = spec->args_count;
+}
+
+static int qcom_iommu_of_xlate(struct device *dev,
+ struct of_phandle_args *spec)
+{
+ struct msm_iommu_dev *iommu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+ list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
+ if (iommu->dev->of_node == spec->np)
+ break;
+ }
+
+ if (!iommu || (iommu->dev->of_node != spec->np))
+ return -ENODEV;
+
+ insert_iommu_master(dev, iommu, spec);
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
+ return 0;
}
irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
{
- struct msm_iommu_drvdata *drvdata = dev_id;
- void __iomem *base;
+ struct msm_iommu_dev *iommu = dev_id;
unsigned int fsr;
int i, ret;
spin_lock(&msm_iommu_lock);
- if (!drvdata) {
+ if (!iommu) {
pr_err("Invalid device ID in context interrupt handler\n");
goto fail;
}
- base = drvdata->base;
-
pr_err("Unexpected IOMMU page fault!\n");
- pr_err("base = %08x\n", (unsigned int) base);
+ pr_err("base = %08x\n", (unsigned int)iommu->base);
- ret = __enable_clocks(drvdata);
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
- for (i = 0; i < drvdata->ncb; i++) {
- fsr = GET_FSR(base, i);
+ for (i = 0; i < iommu->ncb; i++) {
+ fsr = GET_FSR(iommu->base, i);
if (fsr) {
pr_err("Fault occurred in context %d.\n", i);
pr_err("Interesting registers:\n");
- print_ctx_regs(base, i);
- SET_FSR(base, i, 0x4000000F);
+ print_ctx_regs(iommu->base, i);
+ SET_FSR(iommu->base, i, 0x4000000F);
}
}
- __disable_clocks(drvdata);
+ __disable_clocks(iommu);
fail:
spin_unlock(&msm_iommu_lock);
return 0;
}
-static const struct iommu_ops msm_iommu_ops = {
+static struct iommu_ops msm_iommu_ops = {
.capable = msm_iommu_capable,
.domain_alloc = msm_iommu_domain_alloc,
.domain_free = msm_iommu_domain_free,
@@ -682,54 +775,163 @@ static const struct iommu_ops msm_iommu_ops = {
.map_sg = default_iommu_map_sg,
.iova_to_phys = msm_iommu_iova_to_phys,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
+ .of_xlate = qcom_iommu_of_xlate,
};
-static int __init get_tex_class(int icp, int ocp, int mt, int nos)
+static int msm_iommu_probe(struct platform_device *pdev)
{
- int i = 0;
- unsigned int prrr = 0;
- unsigned int nmrr = 0;
- int c_icp, c_ocp, c_mt, c_nos;
-
- RCP15_PRRR(prrr);
- RCP15_NMRR(nmrr);
-
- for (i = 0; i < NUM_TEX_CLASS; i++) {
- c_nos = PRRR_NOS(prrr, i);
- c_mt = PRRR_MT(prrr, i);
- c_icp = NMRR_ICP(nmrr, i);
- c_ocp = NMRR_OCP(nmrr, i);
-
- if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
- return i;
+ struct resource *r;
+ struct msm_iommu_dev *iommu;
+ int ret, par, val;
+
+ iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return -ENODEV;
+
+ iommu->dev = &pdev->dev;
+ INIT_LIST_HEAD(&iommu->ctx_list);
+
+ iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
+ if (IS_ERR(iommu->pclk)) {
+ dev_err(iommu->dev, "could not get smmu_pclk\n");
+ return PTR_ERR(iommu->pclk);
+ }
+ ret = clk_prepare(iommu->pclk);
+ if (ret) {
+ dev_err(iommu->dev, "could not prepare smmu_pclk\n");
+ return ret;
+ }
+
+ iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
+ if (IS_ERR(iommu->clk)) {
+ dev_err(iommu->dev, "could not get iommu_clk\n");
+ clk_unprepare(iommu->pclk);
+ return PTR_ERR(iommu->clk);
+ }
+ ret = clk_prepare(iommu->clk);
+ if (ret) {
+ dev_err(iommu->dev, "could not prepare iommu_clk\n");
+ clk_unprepare(iommu->pclk);
+ return ret;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iommu->base = devm_ioremap_resource(iommu->dev, r);
+ if (IS_ERR(iommu->base)) {
+ dev_err(iommu->dev, "could not get iommu base\n");
+ ret = PTR_ERR(iommu->base);
+ goto fail;
+ }
+
+ iommu->irq = platform_get_irq(pdev, 0);
+ if (iommu->irq < 0) {
+ dev_err(iommu->dev, "could not get iommu irq\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ret = of_property_read_u32(iommu->dev->of_node, "ncb", &val);
+ if (ret) {
+ dev_err(iommu->dev, "could not get ncb\n");
+ goto fail;
+ }
+ iommu->ncb = val;
+
+ msm_iommu_reset(iommu->base, iommu->ncb);
+ SET_M(iommu->base, 0, 1);
+ SET_PAR(iommu->base, 0, 0);
+ SET_V2PCFG(iommu->base, 0, 1);
+ SET_V2PPR(iommu->base, 0, 0);
+ par = GET_PAR(iommu->base, 0);
+ SET_V2PCFG(iommu->base, 0, 0);
+ SET_M(iommu->base, 0, 0);
+
+ if (!par) {
+ pr_err("Invalid PAR value detected\n");
+ ret = -ENODEV;
+ goto fail;
}
- return -ENODEV;
+ ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
+ msm_iommu_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_secure_irpt_handler",
+ iommu);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
+ goto fail;
+ }
+
+ list_add(&iommu->dev_node, &qcom_iommu_devices);
+ of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops);
+
+ platform_set_drvdata(pdev, iommu);
+
+ pr_info("device mapped at %p, irq %d with %d ctx banks\n",
+ iommu->base, iommu->irq, iommu->ncb);
+
+ return 0;
+fail:
+ clk_unprepare(iommu->clk);
+ clk_unprepare(iommu->pclk);
+ return ret;
}
-static void __init setup_iommu_tex_classes(void)
+static const struct of_device_id msm_iommu_dt_match[] = {
+ { .compatible = "qcom,iommu-v0"},
+ {}
+};
+
+static int msm_iommu_remove(struct platform_device *pdev)
+{
+ struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
+
+ clk_unprepare(iommu->clk);
+ clk_unprepare(iommu->pclk);
+ return 0;
+}
+
+static struct platform_driver msm_iommu_driver = {
+ .driver = {
+ .name = "msm_iommu",
+ .of_match_table = msm_iommu_dt_match,
+ },
+ .probe = msm_iommu_probe,
+ .remove = msm_iommu_remove,
+};
+
+static int __init msm_iommu_driver_init(void)
{
- msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
- get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
+ int ret;
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
- get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
+ ret = platform_driver_register(&msm_iommu_driver);
+ if (ret != 0)
+ pr_err("Failed to register IOMMU driver\n");
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
- get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
+ return ret;
+}
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
- get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
+static void __exit msm_iommu_driver_exit(void)
+{
+ platform_driver_unregister(&msm_iommu_driver);
}
+subsys_initcall(msm_iommu_driver_init);
+module_exit(msm_iommu_driver_exit);
+
static int __init msm_iommu_init(void)
{
- setup_iommu_tex_classes();
bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
return 0;
}
-subsys_initcall(msm_iommu_init);
+static int __init msm_iommu_of_setup(struct device_node *np)
+{
+ msm_iommu_init();
+ return 0;
+}
+
+IOMMU_OF_DECLARE(msm_iommu_of, "qcom,iommu-v0", msm_iommu_of_setup);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/msm_iommu.h b/drivers/iommu/msm_iommu.h
index 5c7c955e6d25..4ca25d50d679 100644
--- a/drivers/iommu/msm_iommu.h
+++ b/drivers/iommu/msm_iommu.h
@@ -42,74 +42,53 @@
*/
#define MAX_NUM_MIDS 32
+/* Maximum number of context banks that can be present in IOMMU */
+#define IOMMU_MAX_CBS 128
+
/**
* struct msm_iommu_dev - a single IOMMU hardware instance
- * name Human-readable name given to this IOMMU HW instance
* ncb Number of context banks present on this IOMMU HW instance
+ * dev: IOMMU device
+ * irq: Interrupt number
+ * clk: The bus clock for this IOMMU hardware instance
+ * pclk: The clock for the IOMMU bus interconnect
+ * dev_node: list head in qcom_iommu_device_list
+ * dom_node: list head for domain
+ * ctx_list: list of 'struct msm_iommu_ctx_dev'
+ * context_map: Bitmap to track allocated context banks
*/
struct msm_iommu_dev {
- const char *name;
+ void __iomem *base;
int ncb;
+ struct device *dev;
+ int irq;
+ struct clk *clk;
+ struct clk *pclk;
+ struct list_head dev_node;
+ struct list_head dom_node;
+ struct list_head ctx_list;
+ DECLARE_BITMAP(context_map, IOMMU_MAX_CBS);
};
/**
* struct msm_iommu_ctx_dev - an IOMMU context bank instance
- * name Human-readable name given to this context bank
+ * of_node node ptr of client device
* num Index of this context bank within the hardware
* mids List of Machine IDs that are to be mapped into this context
* bank, terminated by -1. The MID is a set of signals on the
* AXI bus that identifies the function associated with a specific
* memory request. (See ARM spec).
+ * num_mids Total number of mids
+ * node list head in ctx_list
*/
struct msm_iommu_ctx_dev {
- const char *name;
+ struct device_node *of_node;
int num;
int mids[MAX_NUM_MIDS];
+ int num_mids;
+ struct list_head list;
};
-
-/**
- * struct msm_iommu_drvdata - A single IOMMU hardware instance
- * @base: IOMMU config port base address (VA)
- * @ncb The number of contexts on this IOMMU
- * @irq: Interrupt number
- * @clk: The bus clock for this IOMMU hardware instance
- * @pclk: The clock for the IOMMU bus interconnect
- *
- * A msm_iommu_drvdata holds the global driver data about a single piece
- * of an IOMMU hardware instance.
- */
-struct msm_iommu_drvdata {
- void __iomem *base;
- int irq;
- int ncb;
- struct clk *clk;
- struct clk *pclk;
-};
-
-/**
- * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance
- * @num: Hardware context number of this context
- * @pdev: Platform device associated wit this HW instance
- * @attached_elm: List element for domains to track which devices are
- * attached to them
- *
- * A msm_iommu_ctx_drvdata holds the driver data for a single context bank
- * within each IOMMU hardware instance
- */
-struct msm_iommu_ctx_drvdata {
- int num;
- struct platform_device *pdev;
- struct list_head attached_elm;
-};
-
-/*
- * Look up an IOMMU context device by its context name. NULL if none found.
- * Useful for testing and drivers that do not yet fully have IOMMU stuff in
- * their platform devices.
- */
-struct device *msm_iommu_get_ctx(const char *ctx_name);
-
/*
* Interrupt handler for the IOMMU context fault interrupt. Hooking the
* interrupt is not supported in the API yet, but this will print an error
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
deleted file mode 100644
index b6d01f97e537..000000000000
--- a/drivers/iommu/msm_iommu_dev.c
+++ /dev/null
@@ -1,392 +0,0 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/iommu.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-
-#include "msm_iommu_hw-8xxx.h"
-#include "msm_iommu.h"
-
-struct iommu_ctx_iter_data {
- /* input */
- const char *name;
-
- /* output */
- struct device *dev;
-};
-
-static struct platform_device *msm_iommu_root_dev;
-
-static int each_iommu_ctx(struct device *dev, void *data)
-{
- struct iommu_ctx_iter_data *res = data;
- struct msm_iommu_ctx_dev *c = dev->platform_data;
-
- if (!res || !c || !c->name || !res->name)
- return -EINVAL;
-
- if (!strcmp(res->name, c->name)) {
- res->dev = dev;
- return 1;
- }
- return 0;
-}
-
-static int each_iommu(struct device *dev, void *data)
-{
- return device_for_each_child(dev, data, each_iommu_ctx);
-}
-
-struct device *msm_iommu_get_ctx(const char *ctx_name)
-{
- struct iommu_ctx_iter_data r;
- int found;
-
- if (!msm_iommu_root_dev) {
- pr_err("No root IOMMU device.\n");
- goto fail;
- }
-
- r.name = ctx_name;
- found = device_for_each_child(&msm_iommu_root_dev->dev, &r, each_iommu);
-
- if (!found) {
- pr_err("Could not find context <%s>\n", ctx_name);
- goto fail;
- }
-
- return r.dev;
-fail:
- return NULL;
-}
-EXPORT_SYMBOL(msm_iommu_get_ctx);
-
-static void msm_iommu_reset(void __iomem *base, int ncb)
-{
- int ctx;
-
- SET_RPUE(base, 0);
- SET_RPUEIE(base, 0);
- SET_ESRRESTORE(base, 0);
- SET_TBE(base, 0);
- SET_CR(base, 0);
- SET_SPDMBE(base, 0);
- SET_TESTBUSCR(base, 0);
- SET_TLBRSW(base, 0);
- SET_GLOBAL_TLBIALL(base, 0);
- SET_RPU_ACR(base, 0);
- SET_TLBLKCRWE(base, 1);
-
- for (ctx = 0; ctx < ncb; ctx++) {
- SET_BPRCOSH(base, ctx, 0);
- SET_BPRCISH(base, ctx, 0);
- SET_BPRCNSH(base, ctx, 0);
- SET_BPSHCFG(base, ctx, 0);
- SET_BPMTCFG(base, ctx, 0);
- SET_ACTLR(base, ctx, 0);
- SET_SCTLR(base, ctx, 0);
- SET_FSRRESTORE(base, ctx, 0);
- SET_TTBR0(base, ctx, 0);
- SET_TTBR1(base, ctx, 0);
- SET_TTBCR(base, ctx, 0);
- SET_BFBCR(base, ctx, 0);
- SET_PAR(base, ctx, 0);
- SET_FAR(base, ctx, 0);
- SET_CTX_TLBIALL(base, ctx, 0);
- SET_TLBFLPTER(base, ctx, 0);
- SET_TLBSLPTER(base, ctx, 0);
- SET_TLBLKCR(base, ctx, 0);
- SET_PRRR(base, ctx, 0);
- SET_NMRR(base, ctx, 0);
- SET_CONTEXTIDR(base, ctx, 0);
- }
-}
-
-static int msm_iommu_probe(struct platform_device *pdev)
-{
- struct resource *r;
- struct clk *iommu_clk;
- struct clk *iommu_pclk;
- struct msm_iommu_drvdata *drvdata;
- struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev);
- void __iomem *regs_base;
- int ret, irq, par;
-
- if (pdev->id == -1) {
- msm_iommu_root_dev = pdev;
- return 0;
- }
-
- drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
-
- if (!drvdata) {
- ret = -ENOMEM;
- goto fail;
- }
-
- if (!iommu_dev) {
- ret = -ENODEV;
- goto fail;
- }
-
- iommu_pclk = clk_get(NULL, "smmu_pclk");
- if (IS_ERR(iommu_pclk)) {
- ret = -ENODEV;
- goto fail;
- }
-
- ret = clk_prepare_enable(iommu_pclk);
- if (ret)
- goto fail_enable;
-
- iommu_clk = clk_get(&pdev->dev, "iommu_clk");
-
- if (!IS_ERR(iommu_clk)) {
- if (clk_get_rate(iommu_clk) == 0)
- clk_set_rate(iommu_clk, 1);
-
- ret = clk_prepare_enable(iommu_clk);
- if (ret) {
- clk_put(iommu_clk);
- goto fail_pclk;
- }
- } else
- iommu_clk = NULL;
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase");
- regs_base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(regs_base)) {
- ret = PTR_ERR(regs_base);
- goto fail_clk;
- }
-
- irq = platform_get_irq_byname(pdev, "secure_irq");
- if (irq < 0) {
- ret = -ENODEV;
- goto fail_clk;
- }
-
- msm_iommu_reset(regs_base, iommu_dev->ncb);
-
- SET_M(regs_base, 0, 1);
- SET_PAR(regs_base, 0, 0);
- SET_V2PCFG(regs_base, 0, 1);
- SET_V2PPR(regs_base, 0, 0);
- par = GET_PAR(regs_base, 0);
- SET_V2PCFG(regs_base, 0, 0);
- SET_M(regs_base, 0, 0);
-
- if (!par) {
- pr_err("%s: Invalid PAR value detected\n", iommu_dev->name);
- ret = -ENODEV;
- goto fail_clk;
- }
-
- ret = request_irq(irq, msm_iommu_fault_handler, 0,
- "msm_iommu_secure_irpt_handler", drvdata);
- if (ret) {
- pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
- goto fail_clk;
- }
-
-
- drvdata->pclk = iommu_pclk;
- drvdata->clk = iommu_clk;
- drvdata->base = regs_base;
- drvdata->irq = irq;
- drvdata->ncb = iommu_dev->ncb;
-
- pr_info("device %s mapped at %p, irq %d with %d ctx banks\n",
- iommu_dev->name, regs_base, irq, iommu_dev->ncb);
-
- platform_set_drvdata(pdev, drvdata);
-
- clk_disable(iommu_clk);
-
- clk_disable(iommu_pclk);
-
- return 0;
-fail_clk:
- if (iommu_clk) {
- clk_disable(iommu_clk);
- clk_put(iommu_clk);
- }
-fail_pclk:
- clk_disable_unprepare(iommu_pclk);
-fail_enable:
- clk_put(iommu_pclk);
-fail:
- kfree(drvdata);
- return ret;
-}
-
-static int msm_iommu_remove(struct platform_device *pdev)
-{
- struct msm_iommu_drvdata *drv = NULL;
-
- drv = platform_get_drvdata(pdev);
- if (drv) {
- if (drv->clk) {
- clk_unprepare(drv->clk);
- clk_put(drv->clk);
- }
- clk_unprepare(drv->pclk);
- clk_put(drv->pclk);
- memset(drv, 0, sizeof(*drv));
- kfree(drv);
- }
- return 0;
-}
-
-static int msm_iommu_ctx_probe(struct platform_device *pdev)
-{
- struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev);
- struct msm_iommu_drvdata *drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- int i, ret;
-
- if (!c || !pdev->dev.parent)
- return -EINVAL;
-
- drvdata = dev_get_drvdata(pdev->dev.parent);
- if (!drvdata)
- return -ENODEV;
-
- ctx_drvdata = kzalloc(sizeof(*ctx_drvdata), GFP_KERNEL);
- if (!ctx_drvdata)
- return -ENOMEM;
-
- ctx_drvdata->num = c->num;
- ctx_drvdata->pdev = pdev;
-
- INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
- platform_set_drvdata(pdev, ctx_drvdata);
-
- ret = clk_prepare_enable(drvdata->pclk);
- if (ret)
- goto fail;
-
- if (drvdata->clk) {
- ret = clk_prepare_enable(drvdata->clk);
- if (ret) {
- clk_disable_unprepare(drvdata->pclk);
- goto fail;
- }
- }
-
- /* Program the M2V tables for this context */
- for (i = 0; i < MAX_NUM_MIDS; i++) {
- int mid = c->mids[i];
- if (mid == -1)
- break;
-
- SET_M2VCBR_N(drvdata->base, mid, 0);
- SET_CBACR_N(drvdata->base, c->num, 0);
-
- /* Set VMID = 0 */
- SET_VMID(drvdata->base, mid, 0);
-
- /* Set the context number for that MID to this context */
- SET_CBNDX(drvdata->base, mid, c->num);
-
- /* Set MID associated with this context bank to 0*/
- SET_CBVMID(drvdata->base, c->num, 0);
-
- /* Set the ASID for TLB tagging for this context */
- SET_CONTEXTIDR_ASID(drvdata->base, c->num, c->num);
-
- /* Set security bit override to be Non-secure */
- SET_NSCFG(drvdata->base, mid, 3);
- }
-
- clk_disable(drvdata->clk);
- clk_disable(drvdata->pclk);
-
- dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num);
- return 0;
-fail:
- kfree(ctx_drvdata);
- return ret;
-}
-
-static int msm_iommu_ctx_remove(struct platform_device *pdev)
-{
- struct msm_iommu_ctx_drvdata *drv = NULL;
- drv = platform_get_drvdata(pdev);
- if (drv) {
- memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata));
- kfree(drv);
- }
- return 0;
-}
-
-static struct platform_driver msm_iommu_driver = {
- .driver = {
- .name = "msm_iommu",
- },
- .probe = msm_iommu_probe,
- .remove = msm_iommu_remove,
-};
-
-static struct platform_driver msm_iommu_ctx_driver = {
- .driver = {
- .name = "msm_iommu_ctx",
- },
- .probe = msm_iommu_ctx_probe,
- .remove = msm_iommu_ctx_remove,
-};
-
-static int __init msm_iommu_driver_init(void)
-{
- int ret;
- ret = platform_driver_register(&msm_iommu_driver);
- if (ret != 0) {
- pr_err("Failed to register IOMMU driver\n");
- goto error;
- }
-
- ret = platform_driver_register(&msm_iommu_ctx_driver);
- if (ret != 0) {
- platform_driver_unregister(&msm_iommu_driver);
- pr_err("Failed to register IOMMU context driver\n");
- goto error;
- }
-
-error:
- return ret;
-}
-
-static void __exit msm_iommu_driver_exit(void)
-{
- platform_driver_unregister(&msm_iommu_ctx_driver);
- platform_driver_unregister(&msm_iommu_driver);
-}
-
-subsys_initcall(msm_iommu_driver_init);
-module_exit(msm_iommu_driver_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/msm_iommu_hw-8xxx.h b/drivers/iommu/msm_iommu_hw-8xxx.h
index fc160101dead..84ba573927d1 100644
--- a/drivers/iommu/msm_iommu_hw-8xxx.h
+++ b/drivers/iommu/msm_iommu_hw-8xxx.h
@@ -61,8 +61,9 @@ do { \
#define FL_TYPE_TABLE (1 << 0)
#define FL_TYPE_SECT (2 << 0)
#define FL_SUPERSECTION (1 << 18)
-#define FL_AP_WRITE (1 << 10)
-#define FL_AP_READ (1 << 11)
+#define FL_AP0 (1 << 10)
+#define FL_AP1 (1 << 11)
+#define FL_AP2 (1 << 15)
#define FL_SHARED (1 << 16)
#define FL_BUFFERABLE (1 << 2)
#define FL_CACHEABLE (1 << 3)
@@ -77,6 +78,7 @@ do { \
#define SL_TYPE_SMALL (2 << 0)
#define SL_AP0 (1 << 4)
#define SL_AP1 (2 << 4)
+#define SL_AP2 (1 << 9)
#define SL_SHARED (1 << 10)
#define SL_BUFFERABLE (1 << 2)
#define SL_CACHEABLE (1 << 3)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 60ba238090d9..37b7c3e4c0b6 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -133,6 +133,19 @@ struct iommu_ops *of_iommu_get_ops(struct device_node *np)
return ops;
}
+/**
+ * of_iommu_configure - Configure and return the IOMMU for a device
+ * @dev: device for which to configure the IOMMU
+ * @master_np: device node of the bus master connected to the IOMMU
+ *
+ * The master_np parameter specifies the device node of the bus master seen by
+ * the IOMMU. This is usually the device node of the dev device, but can be the
+ * device node of a bridge when the device is dynamically discovered and
+ * instantiated and thus has no device node (such as PCI devices for instance).
+ *
+ * Return a pointer to the iommu_ops for the device, NULL if the device isn't
+ * connected to an IOMMU, or a negative value if an error occurs.
+ */
struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
@@ -159,8 +172,18 @@ struct iommu_ops *of_iommu_configure(struct device *dev,
np = iommu_spec.np;
ops = of_iommu_get_ops(np);
- if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
+ if (!ops) {
+ const struct of_device_id *oid;
+
+ oid = of_match_node(&__iommu_of_table, np);
+ ops = oid ? ERR_PTR(-EPROBE_DEFER) : NULL;
goto err_put_node;
+ }
+
+ if (!ops->of_xlate || ops->of_xlate(dev, &iommu_spec)) {
+ ops = NULL;
+ goto err_put_node;
+ }
of_node_put(np);
idx++;
@@ -170,7 +193,7 @@ struct iommu_ops *of_iommu_configure(struct device *dev,
err_put_node:
of_node_put(np);
- return NULL;
+ return ops;
}
void __init of_iommu_init(void)
@@ -181,7 +204,7 @@ void __init of_iommu_init(void)
for_each_matching_node_and_match(np, matches, &match) {
const of_iommu_init_fn init_fn = match->data;
- if (init_fn(np))
+ if (init_fn && init_fn(np))
pr_err("Failed to initialise IOMMU %s\n",
of_node_full_name(np));
}
diff --git a/drivers/iommu/qcom/Kconfig b/drivers/iommu/qcom/Kconfig
new file mode 100644
index 000000000000..a0e41bb305af
--- /dev/null
+++ b/drivers/iommu/qcom/Kconfig
@@ -0,0 +1,43 @@
+# Qualcomm IOMMU support
+
+# QCOM IOMMUv1 support
+config QCOM_IOMMU_V1
+ bool "Qualcomm IOMMUv1 Support"
+ depends on ARCH_QCOM
+ select IOMMU_API
+ select ARM_DMA_USE_IOMMU if ARM
+ select ARM64_DMA_USE_IOMMU if ARM64
+ help
+ Support for the IOMMUs (v1) found on certain Qualcomm SOCs.
+ These IOMMUs allow virtualization of the address space used by most
+ cores within the multimedia subsystem.
+
+ If unsure, say N here.
+
+config MMU500_ACTIVE_PREFETCH_BUG_WITH_SECTION_MAPPING
+ bool "Don't align virtual address at 1MB boundary"
+ depends on QCOM_IOMMU_V1
+ help
+ Say Y here if the MMU500 revision has a bug in active prefetch
+ which can cause TLB corruptions due to 1MB alignment of a buffer.
+ Here is the sequence which will surface this BUG.
+ 1) Create a 2-level mapping in v7S format for 1MB buffer. Start of
+ the buffer should be at even MB boundary.
+ 2) Create a section mapping for 1MB buffer adjacent to previous
+ mapping in step 1.
+ 3) Access last page from 2 level mapping followed by an access into
+ section mapped area.
+ 4) Step 3 will result into TLB corruption and this corruption can
+ lead to any misbehavior (like Permission fault) for sub-sequent
+ transactions.
+
+ If unsure, say Y here if IOMMU mapping will not exhaust the VA space.
+
+config IOMMU_PGTABLES_L2
+ bool "Allow SMMU page tables in the L2 cache (Experimental)"
+ depends on QCOM_IOMMU_V1 && MMU && SMP && CPU_DCACHE_DISABLE=n
+ help
+ Improves TLB miss latency at the expense of potential L2 pollution.
+ However, with large multimedia buffers, the TLB should mostly contain
+ section mappings and TLB misses should be quite infrequent.
+ Most people can probably say Y here.
diff --git a/drivers/iommu/qcom/Makefile b/drivers/iommu/qcom/Makefile
new file mode 100644
index 000000000000..e0b1159227be
--- /dev/null
+++ b/drivers/iommu/qcom/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_QCOM_IOMMU_V1) += qcom-iommu.o
+
+qcom-iommu-y += msm_iommu.o
+qcom-iommu-y += msm_iommu-v1.o
+qcom-iommu-y += msm_iommu_dev-v1.o
+qcom-iommu-y += msm_iommu_sec.o
+qcom-iommu-y += msm_iommu_pagetable.o
diff --git a/drivers/iommu/qcom/msm_iommu-v1.c b/drivers/iommu/qcom/msm_iommu-v1.c
new file mode 100644
index 000000000000..82c7f2f16658
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu-v1.c
@@ -0,0 +1,1538 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/msm-bus.h>
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/notifier.h>
+#include <linux/iopoll.h>
+#include <linux/qcom_iommu.h>
+#include <asm/sizes.h>
+
+#include "msm_iommu_hw-v1.h"
+#include "msm_iommu_priv.h"
+#include "msm_iommu_perfmon.h"
+#include "msm_iommu_pagetable.h"
+
+#ifdef CONFIG_IOMMU_LPAE
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_2M | SZ_32M | SZ_1G)
+#else
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+#endif
+
+#define IOMMU_MSEC_STEP 10
+#define IOMMU_MSEC_TIMEOUT 5000
+
+static DEFINE_MUTEX(msm_iommu_lock);
+struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS];
+
+static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
+{
+ ++drvdata->powered_on;
+
+ return 0;
+}
+
+static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
+{
+ --drvdata->powered_on;
+}
+
+static int apply_bus_vote(struct msm_iommu_drvdata *drvdata, unsigned int vote)
+{
+ int ret = 0;
+
+ if (drvdata->bus_client) {
+ ret = msm_bus_scale_client_update_request(drvdata->bus_client,
+ vote);
+ if (ret)
+ pr_err("%s: Failed to vote for bus: %d\n", __func__,
+ vote);
+ }
+
+ return ret;
+}
+
+static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->iface);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(drvdata->core);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ clk_disable_unprepare(drvdata->iface);
+ return ret;
+}
+
+static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+ clk_disable_unprepare(drvdata->core);
+ clk_disable_unprepare(drvdata->iface);
+}
+
+static void _iommu_lock_acquire(unsigned int need_extra_lock)
+{
+ mutex_lock(&msm_iommu_lock);
+}
+
+static void _iommu_lock_release(unsigned int need_extra_lock)
+{
+ mutex_unlock(&msm_iommu_lock);
+}
+
+struct iommu_access_ops iommu_access_ops_v1 = {
+ .iommu_power_on = __enable_regulators,
+ .iommu_power_off = __disable_regulators,
+ .iommu_bus_vote = apply_bus_vote,
+ .iommu_clk_on = __enable_clocks,
+ .iommu_clk_off = __disable_clocks,
+ .iommu_lock_acquire = _iommu_lock_acquire,
+ .iommu_lock_release = _iommu_lock_release,
+};
+
+static BLOCKING_NOTIFIER_HEAD(msm_iommu_notifier_list);
+
+void msm_iommu_register_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_register(&msm_iommu_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_iommu_register_notify);
+
+#ifdef CONFIG_MSM_IOMMU_VBIF_CHECK
+
+#define VBIF_XIN_HALT_CTRL0 0x200
+#define VBIF_XIN_HALT_CTRL1 0x204
+#define VBIF_AXI_HALT_CTRL0 0x208
+#define VBIF_AXI_HALT_CTRL1 0x20C
+
+static void __halt_vbif_xin(void __iomem *vbif_base)
+{
+ pr_err("Halting VBIF_XIN\n");
+ writel_relaxed(0xFFFFFFFF, vbif_base + VBIF_XIN_HALT_CTRL0);
+}
+
+static void __dump_vbif_state(void __iomem *base, void __iomem *vbif_base)
+{
+ unsigned int reg_val;
+
+ reg_val = readl_relaxed(base + MICRO_MMU_CTRL);
+ pr_err("Value of SMMU_IMPLDEF_MICRO_MMU_CTRL = 0x%x\n", reg_val);
+
+ reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL0);
+ pr_err("Value of VBIF_XIN_HALT_CTRL0 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL1);
+ pr_err("Value of VBIF_XIN_HALT_CTRL1 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL0);
+ pr_err("Value of VBIF_AXI_HALT_CTRL0 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL1);
+ pr_err("Value of VBIF_AXI_HALT_CTRL1 = 0x%x\n", reg_val);
+}
+
+static int __check_vbif_state(struct msm_iommu_drvdata const *drvdata)
+{
+ phys_addr_t addr = (phys_addr_t) (drvdata->phys_base
+ - (phys_addr_t) 0x4000);
+ void __iomem *base = ioremap(addr, 0x1000);
+ int ret = 0;
+
+ if (base) {
+ __dump_vbif_state(drvdata->base, base);
+ __halt_vbif_xin(base);
+ __dump_vbif_state(drvdata->base, base);
+ iounmap(base);
+ } else {
+ pr_err("%s: Unable to ioremap\n", __func__);
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
+{
+ int res;
+ unsigned int val;
+ void __iomem *base = drvdata->base;
+ char const *name = drvdata->name;
+
+ pr_err("Timed out waiting for IOMMU halt to complete for %s\n", name);
+ res = __check_vbif_state(drvdata);
+ if (res)
+ BUG();
+
+ pr_err("Checking if IOMMU halt completed for %s\n", name);
+
+ res = readl_poll_timeout(GLB_REG(MICRO_MMU_CTRL, base), val,
+ (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 0, 5000000);
+
+ if (res) {
+ pr_err("Timed out (again) waiting for IOMMU halt to complete for %s\n",
+ name);
+ } else {
+ pr_err("IOMMU halt completed. VBIF FIFO most likely not getting drained by master\n");
+ }
+ BUG();
+}
+
+static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
+ int ctx, struct msm_iommu_priv *priv)
+{
+ int res;
+ unsigned int val;
+ void __iomem *base = drvdata->cb_base;
+ char const *name = drvdata->name;
+
+ pr_err("Timed out waiting for TLB SYNC to complete for %s (client: %s)\n",
+ name, priv->client_name);
+ blocking_notifier_call_chain(&msm_iommu_notifier_list, TLB_SYNC_TIMEOUT,
+ (void *) priv->client_name);
+ res = __check_vbif_state(drvdata);
+ if (res)
+ BUG();
+
+ pr_err("Checking if TLB sync completed for %s\n", name);
+
+ res = readl_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
+ (val & CB_TLBSTATUS_SACTIVE) == 0, 0, 5000000);
+ if (res) {
+ pr_err("Timed out (again) waiting for TLB SYNC to complete for %s\n",
+ name);
+ } else {
+ pr_err("TLB Sync completed. VBIF FIFO most likely not getting drained by master\n");
+ }
+ BUG();
+}
+
+#else
+
+/*
+ * For targets without VBIF or for targets with the VBIF check disabled
+ * we directly just crash to capture the issue
+ */
+static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
+{
+ BUG();
+}
+
+static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
+ int ctx, struct msm_iommu_priv *priv)
+{
+ BUG();
+}
+
+#endif
+
+void iommu_halt(struct msm_iommu_drvdata const *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+ unsigned int val;
+ int res;
+
+ if (!iommu_drvdata->halt_enabled)
+ return;
+
+ SET_MICRO_MMU_CTRL_HALT_REQ(base, 1);
+ res = readl_poll_timeout(GLB_REG(MICRO_MMU_CTRL, base), val,
+ (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 0, 5000000);
+ if (res)
+ check_halt_state(iommu_drvdata);
+
+ /* Ensure device is idle before continuing */
+ mb();
+}
+
+void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata)
+{
+ if (!iommu_drvdata->halt_enabled)
+ return;
+
+ /* Ensure transactions have completed before releasing the halt */
+ mb();
+
+ SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 0);
+
+ /*
+ * Ensure write is complete before continuing to ensure
+ * we don't turn off clocks while transaction is still
+ * pending.
+ */
+ mb();
+}
+
+static void __sync_tlb(struct msm_iommu_drvdata *iommu_drvdata, int ctx,
+ struct msm_iommu_priv *priv)
+{
+ void __iomem *base = iommu_drvdata->cb_base;
+ unsigned int val;
+ unsigned int res;
+
+ SET_TLBSYNC(base, ctx, 0);
+ /* No barrier needed due to read dependency */
+
+ res = readl_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
+ (val & CB_TLBSTATUS_SACTIVE) == 0, 0, 5000000);
+ if (res)
+ check_tlb_sync_state(iommu_drvdata, ctx, priv);
+}
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+ BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
+
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ BUG_ON(!iommu_drvdata);
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ SET_TLBIVA(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid | (va & CB_TLBIVA_VA));
+ mb();
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ __disable_clocks(iommu_drvdata);
+ }
+
+fail:
+ return ret;
+}
+#endif
+
+static int __flush_iotlb(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+ BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
+
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ BUG_ON(!iommu_drvdata);
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ __disable_clocks(iommu_drvdata);
+ }
+
+fail:
+ return ret;
+}
+
+/*
+ * May only be called for non-secure iommus
+ */
+static void __reset_iommu(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+ int i, smt_size, res;
+ unsigned long val;
+
+ /* SMMU_ACR is an implementation defined register.
+ * Resetting is not required for some implementation.
+ */
+ if (iommu_drvdata->model != MMU_500)
+ SET_ACR(base, 0);
+ SET_CR2(base, 0);
+ SET_GFAR(base, 0);
+ SET_GFSRRESTORE(base, 0);
+
+ /* Invalidate the entire non-secure TLB */
+ SET_TLBIALLNSNH(base, 0);
+ SET_TLBGSYNC(base, 0);
+ res = readl_poll_timeout(GLB_REG(TLBGSTATUS, base), val,
+ (val & TLBGSTATUS_GSACTIVE) == 0, 0, 5000000);
+ if (res)
+ BUG();
+
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ for (i = 0; i < smt_size; i++)
+ SET_SMR_VALID(base, i, 0);
+
+ mb();
+}
+
+static void __reset_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+
+ if (iommu_drvdata->model != MMU_500)
+ SET_NSACR(base, 0);
+ SET_NSCR2(base, 0);
+ SET_NSGFAR(base, 0);
+ SET_NSGFSRRESTORE(base, 0);
+ mb();
+}
+
+static void __program_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+
+ if (iommu_drvdata->model == MMU_500) {
+ SET_NSACR_SMTNMC_BPTLBEN(base, 1);
+ SET_NSACR_MMUDIS_BPTLBEN(base, 1);
+ SET_NSACR_S2CR_BPTLBEN(base, 1);
+ }
+ SET_NSCR0_SMCFCFG(base, 1);
+ SET_NSCR0_USFCFG(base, 1);
+ SET_NSCR0_STALLD(base, 1);
+ SET_NSCR0_GCFGFIE(base, 1);
+ SET_NSCR0_GCFGFRE(base, 1);
+ SET_NSCR0_GFIE(base, 1);
+ SET_NSCR0_GFRE(base, 1);
+ SET_NSCR0_CLIENTPD(base, 0);
+}
+
+/*
+ * May only be called for non-secure iommus
+ */
+static void __program_iommu(struct msm_iommu_drvdata *drvdata)
+{
+ __reset_iommu(drvdata);
+
+ if (!msm_iommu_get_scm_call_avail())
+ __reset_iommu_secure(drvdata);
+
+ if (drvdata->model == MMU_500) {
+ SET_ACR_SMTNMC_BPTLBEN(drvdata->base, 1);
+ SET_ACR_MMUDIS_BPTLBEN(drvdata->base, 1);
+ SET_ACR_S2CR_BPTLBEN(drvdata->base, 1);
+ }
+ SET_CR0_SMCFCFG(drvdata->base, 1);
+ SET_CR0_USFCFG(drvdata->base, 1);
+ SET_CR0_STALLD(drvdata->base, 1);
+ SET_CR0_GCFGFIE(drvdata->base, 1);
+ SET_CR0_GCFGFRE(drvdata->base, 1);
+ SET_CR0_GFIE(drvdata->base, 1);
+ SET_CR0_GFRE(drvdata->base, 1);
+ SET_CR0_CLIENTPD(drvdata->base, 0);
+
+ if (!msm_iommu_get_scm_call_avail())
+ __program_iommu_secure(drvdata);
+
+ if (drvdata->smmu_local_base)
+ writel_relaxed(0xFFFFFFFF,
+ drvdata->smmu_local_base + SMMU_INTR_SEL_NS);
+
+ mb(); /* Make sure writes complete before returning */
+}
+
+void program_iommu_bfb_settings(void __iomem *base,
+ const struct msm_iommu_bfb_settings *bfb_settings)
+{
+ unsigned int i;
+
+ if (bfb_settings)
+ for (i = 0; i < bfb_settings->length; i++)
+ SET_GLOBAL_REG(base, bfb_settings->regs[i],
+ bfb_settings->data[i]);
+
+ /* Make sure writes complete before returning */
+ mb();
+}
+
+static void __reset_context(struct msm_iommu_drvdata *iommu_drvdata, int ctx)
+{
+ void __iomem *base = iommu_drvdata->cb_base;
+
+ /* Don't set ACTLR to zero because if context bank is in
+ * bypass mode (say after iommu_detach), still this ACTLR
+ * value matters for micro-TLB caching.
+ */
+ if (iommu_drvdata->model != MMU_500)
+ SET_ACTLR(base, ctx, 0);
+ SET_FAR(base, ctx, 0);
+ SET_FSRRESTORE(base, ctx, 0);
+ SET_NMRR(base, ctx, 0);
+ SET_PAR(base, ctx, 0);
+ SET_PRRR(base, ctx, 0);
+ SET_SCTLR(base, ctx, 0);
+ SET_TTBCR(base, ctx, 0);
+ SET_TTBR0(base, ctx, 0);
+ SET_TTBR1(base, ctx, 0);
+ mb();
+}
+
+static void __release_smg(void __iomem *base)
+{
+ int i, smt_size;
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ /* Invalidate all SMGs */
+ for (i = 0; i < smt_size; i++)
+ if (GET_SMR_VALID(base, i))
+ SET_SMR_VALID(base, i, 0);
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
+ unsigned int asid)
+{
+ SET_CB_TTBR0_ASID(base, ctx_num, asid);
+}
+#else
+static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
+ unsigned int asid)
+{
+ SET_CB_CONTEXTIDR_ASID(base, ctx_num, asid);
+}
+#endif
+
+static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *curr_ctx,
+ struct msm_iommu_priv *priv)
+{
+ void __iomem *cb_base = iommu_drvdata->cb_base;
+
+ curr_ctx->asid = curr_ctx->num;
+ msm_iommu_set_ASID(cb_base, curr_ctx->num, curr_ctx->asid);
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
+{
+ SET_CB_TTBCR_EAE(base, ctx, 1); /* Extended Address Enable (EAE) */
+}
+
+static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx)
+{
+ SET_CB_MAIR0(base, ctx, msm_iommu_get_mair0());
+ SET_CB_MAIR1(base, ctx, msm_iommu_get_mair1());
+}
+
+static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
+{
+ /*
+ * Configure page tables as inner-cacheable and shareable to reduce
+ * the TLB miss penalty.
+ */
+ SET_CB_TTBCR_SH0(base, ctx, 3); /* Inner shareable */
+ SET_CB_TTBCR_ORGN0(base, ctx, 1); /* outer cachable*/
+ SET_CB_TTBCR_IRGN0(base, ctx, 1); /* inner cachable*/
+ SET_CB_TTBCR_T0SZ(base, ctx, 0); /* 0GB-4GB */
+
+
+ SET_CB_TTBCR_SH1(base, ctx, 3); /* Inner shareable */
+ SET_CB_TTBCR_ORGN1(base, ctx, 1); /* outer cachable*/
+ SET_CB_TTBCR_IRGN1(base, ctx, 1); /* inner cachable*/
+ SET_CB_TTBCR_T1SZ(base, ctx, 0); /* TTBR1 not used */
+}
+
+#else
+
+static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
+{
+ /* Turn on TEX Remap */
+ SET_CB_SCTLR_TRE(base, ctx, 1);
+}
+
+static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx)
+{
+ SET_PRRR(base, ctx, msm_iommu_get_prrr());
+ SET_NMRR(base, ctx, msm_iommu_get_nmrr());
+}
+
+static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
+{
+ /* Configure page tables as inner-cacheable and shareable to reduce
+ * the TLB miss penalty.
+ */
+ SET_CB_TTBR0_S(base, ctx, 1);
+ SET_CB_TTBR0_NOS(base, ctx, 1);
+ SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
+ SET_CB_TTBR0_IRGN0(base, ctx, 1);
+ SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
+}
+
+#endif
+
+static int program_m2v_table(struct device *dev, void __iomem *base)
+{
+ struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_get_drvdata(dev);
+ u32 *sids = ctx_drvdata->sids;
+ u32 *sid_mask = ctx_drvdata->sid_mask;
+ unsigned int ctx = ctx_drvdata->num;
+ int num = 0, i, smt_size;
+ int len = ctx_drvdata->nsid;
+
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ /* Program the M2V tables for this context */
+ for (i = 0; i < len / sizeof(*sids); i++) {
+ for (; num < smt_size; num++)
+ if (GET_SMR_VALID(base, num) == 0)
+ break;
+ BUG_ON(num >= smt_size);
+
+ SET_SMR_VALID(base, num, 1);
+ SET_SMR_MASK(base, num, sid_mask[i]);
+ SET_SMR_ID(base, num, sids[i]);
+
+ SET_S2CR_N(base, num, 0);
+ SET_S2CR_CBNDX(base, num, ctx);
+ SET_S2CR_MEMATTR(base, num, 0x0A);
+ /* Set security bit override to be Non-secure */
+ SET_S2CR_NSCFG(base, num, 3);
+ }
+
+ return 0;
+}
+
+static void program_all_m2v_tables(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ device_for_each_child(iommu_drvdata->dev, iommu_drvdata->base,
+ program_m2v_table);
+}
+
+static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ struct msm_iommu_priv *priv, bool is_secure,
+ bool program_m2v)
+{
+ phys_addr_t pn;
+ void __iomem *base = iommu_drvdata->base;
+ void __iomem *cb_base = iommu_drvdata->cb_base;
+ unsigned int ctx = ctx_drvdata->num;
+ phys_addr_t pgtable = __pa(priv->pt.fl_table);
+
+ __reset_context(iommu_drvdata, ctx);
+ msm_iommu_setup_ctx(cb_base, ctx);
+
+ if (priv->pt.redirect)
+ msm_iommu_setup_pg_l2_redirect(cb_base, ctx);
+
+ msm_iommu_setup_memory_remap(cb_base, ctx);
+
+ pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
+ SET_CB_TTBR0_ADDR(cb_base, ctx, pn);
+
+ /* Enable context fault interrupt */
+ SET_CB_SCTLR_CFIE(cb_base, ctx, 1);
+
+ if (iommu_drvdata->model != MMU_500) {
+ /* Redirect all cacheable requests to L2 slave port. */
+ SET_CB_ACTLR_BPRCISH(cb_base, ctx, 1);
+ SET_CB_ACTLR_BPRCOSH(cb_base, ctx, 1);
+ SET_CB_ACTLR_BPRCNSH(cb_base, ctx, 1);
+ }
+
+ /* Enable private ASID namespace */
+ SET_CB_SCTLR_ASIDPNE(cb_base, ctx, 1);
+
+ if (!is_secure) {
+ if (program_m2v)
+ program_all_m2v_tables(iommu_drvdata);
+
+ SET_CBAR_N(base, ctx, 0);
+
+ /* Stage 1 Context with Stage 2 bypass */
+ SET_CBAR_TYPE(base, ctx, 1);
+
+ /* Route page faults to the non-secure interrupt */
+ SET_CBAR_IRPTNDX(base, ctx, 1);
+
+ /* Set VMID to non-secure HLOS */
+ SET_CBAR_VMID(base, ctx, 3);
+
+ /* Bypass is treated as inner-shareable */
+ SET_CBAR_BPSHCFG(base, ctx, 2);
+
+ /* Do not downgrade memory attributes */
+ SET_CBAR_MEMATTR(base, ctx, 0x0A);
+
+ }
+
+ msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, priv);
+
+ /* Ensure that ASID assignment has completed before we use
+ * ASID for TLB invalidation. Here, mb() is required because
+ * both these registers are separated by more than 1KB. */
+ mb();
+
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+
+ /* Enable the MMU */
+ SET_CB_SCTLR_M(cb_base, ctx, 1);
+ mb();
+}
+
+#ifdef CONFIG_IOMMU_PGTABLES_L2
+#define INITIAL_REDIRECT_VAL 1
+#else
+#define INITIAL_REDIRECT_VAL 0
+#endif
+
+static struct iommu_domain * msm_iommu_domain_alloc(unsigned type)
+{
+ struct msm_iommu_priv *priv;
+ int ret = -ENOMEM;
+ struct iommu_domain *domain;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto fail_nomem;
+
+ priv->pt.redirect = INITIAL_REDIRECT_VAL;
+
+ INIT_LIST_HEAD(&priv->list_attached);
+
+ ret = msm_iommu_pagetable_alloc(&priv->pt);
+ if (ret)
+ goto fail_nomem;
+
+ domain = &priv->domain;
+
+ return domain;
+
+fail_nomem:
+ kfree(priv);
+ return 0;
+}
+
+static void msm_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+
+ if (priv)
+ msm_iommu_pagetable_free(&priv->pt);
+
+ kfree(priv);
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_ctx_drvdata *tmp_drvdata;
+ int ret = 0;
+ int is_secure;
+ bool set_m2v = false;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv || !dev) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ++ctx_drvdata->attach_count;
+
+ if (ctx_drvdata->attach_count > 1)
+ goto already_attached;
+
+ if (!list_empty(&ctx_drvdata->attached_elm)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
+ if (tmp_drvdata == ctx_drvdata) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ is_secure = iommu_drvdata->sec_id != -1;
+
+ ret = __enable_regulators(iommu_drvdata);
+ if (ret)
+ goto unlock;
+
+ ret = apply_bus_vote(iommu_drvdata, 1);
+ if (ret)
+ goto unlock;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret) {
+ __disable_regulators(iommu_drvdata);
+ goto unlock;
+ }
+
+ /* We can only do this once */
+ if (!iommu_drvdata->ctx_attach_count) {
+ if (!is_secure) {
+ iommu_halt(iommu_drvdata);
+ __program_iommu(iommu_drvdata);
+ iommu_resume(iommu_drvdata);
+ } else {
+ ret = msm_iommu_sec_program_iommu(iommu_drvdata,
+ ctx_drvdata);
+ if (ret) {
+ __disable_regulators(iommu_drvdata);
+ __disable_clocks(iommu_drvdata);
+ goto unlock;
+ }
+ }
+ program_iommu_bfb_settings(iommu_drvdata->base,
+ iommu_drvdata->bfb_settings);
+ set_m2v = true;
+ }
+
+ iommu_halt(iommu_drvdata);
+
+ __program_context(iommu_drvdata, ctx_drvdata, priv, is_secure, set_m2v);
+
+ iommu_resume(iommu_drvdata);
+
+ /* Ensure TLB is clear */
+ if (iommu_drvdata->model != MMU_500) {
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ }
+
+ __disable_clocks(iommu_drvdata);
+
+ list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
+ ctx_drvdata->attached_domain = domain;
+ ++iommu_drvdata->ctx_attach_count;
+
+already_attached:
+ mutex_unlock(&msm_iommu_lock);
+
+ msm_iommu_attached(dev->parent);
+ return ret;
+unlock:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static void msm_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret;
+ int is_secure;
+
+ if (!dev)
+ return;
+
+ msm_iommu_detached(dev->parent);
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ if (!priv)
+ goto unlock;
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
+ goto unlock;
+
+ --ctx_drvdata->attach_count;
+ BUG_ON(ctx_drvdata->attach_count < 0);
+
+ if (ctx_drvdata->attach_count > 0)
+ goto unlock;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto unlock;
+
+ is_secure = iommu_drvdata->sec_id != -1;
+
+ if (iommu_drvdata->model == MMU_500) {
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ }
+
+ ctx_drvdata->asid = -1;
+
+ __reset_context(iommu_drvdata, ctx_drvdata->num);
+
+ /*
+ * Only reset the M2V tables on the very last detach */
+ if (!is_secure && iommu_drvdata->ctx_attach_count == 1) {
+ iommu_halt(iommu_drvdata);
+ __release_smg(iommu_drvdata->base);
+ iommu_resume(iommu_drvdata);
+ }
+
+ __disable_clocks(iommu_drvdata);
+
+ apply_bus_vote(iommu_drvdata, 0);
+
+ __disable_regulators(iommu_drvdata);
+
+ list_del_init(&ctx_drvdata->attached_elm);
+ ctx_drvdata->attached_domain = NULL;
+ BUG_ON(iommu_drvdata->ctx_attach_count == 0);
+ --iommu_drvdata->ctx_attach_count;
+unlock:
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ struct msm_iommu_priv *priv;
+ int ret = 0;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot);
+ if (ret)
+ goto fail;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ ret = __flush_iotlb_va(domain, va);
+#endif
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ size_t len)
+{
+ struct msm_iommu_priv *priv;
+ int ret = -ENODEV;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv)
+ goto fail;
+
+ ret = msm_iommu_pagetable_unmap(&priv->pt, va, len);
+ if (ret < 0)
+ goto fail;
+
+ ret = __flush_iotlb(domain);
+
+ msm_iommu_pagetable_free_tables(&priv->pt, va, len);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ /* the IOMMU API requires us to return how many bytes were unmapped */
+ len = ret ? 0 : len;
+ return len;
+}
+
+static size_t msm_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int prot)
+{
+ struct msm_iommu_priv *priv;
+ struct scatterlist *tmp;
+ unsigned int len = 0;
+ int ret, i;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ for_each_sg(sg, tmp, nents, i)
+ len += tmp->length;
+
+ ret = msm_iommu_pagetable_map_range(&priv->pt, iova, sg, len, prot);
+ if (ret)
+ goto fail;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ __flush_iotlb(domain);
+#endif
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
+ unsigned int len)
+{
+ struct msm_iommu_priv *priv;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
+
+ __flush_iotlb(domain);
+
+ msm_iommu_pagetable_free_tables(&priv->pt, va, len);
+ mutex_unlock(&msm_iommu_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
+{
+ phys_addr_t phy;
+
+ /* Upper 28 bits from PAR, lower 12 from VA */
+ phy = (par & 0xFFFFFFF000ULL) | (va & 0x00000FFF);
+
+ return phy;
+}
+#else
+static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
+{
+ phys_addr_t phy;
+
+ /* We are dealing with a supersection */
+ if (par & CB_PAR_SS)
+ phy = (par & 0xFF000000) | (va & 0x00FFFFFF);
+ else /* Upper 20 bits from PAR, lower 12 from VA */
+ phy = (par & 0xFFFFF000) | (va & 0x00000FFF);
+
+ return phy;
+}
+#endif
+
+static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ u64 par;
+ void __iomem *base;
+ phys_addr_t ret = 0;
+ int ctx;
+ int i;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (list_empty(&priv->list_attached))
+ goto fail;
+
+ ctx_drvdata = list_entry(priv->list_attached.next,
+ struct msm_iommu_ctx_drvdata, attached_elm);
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+
+ if (iommu_drvdata->model == MMU_500) {
+ ret = msm_iommu_iova_to_phys_soft(domain, va);
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+ }
+
+ base = iommu_drvdata->cb_base;
+ ctx = ctx_drvdata->num;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret) {
+ ret = 0; /* 0 indicates translation failed */
+ goto fail;
+ }
+
+ SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
+ mb();
+ for (i = 0; i < IOMMU_MSEC_TIMEOUT; i += IOMMU_MSEC_STEP)
+ if (GET_CB_ATSR_ACTIVE(base, ctx) == 0)
+ break;
+ else
+ msleep(IOMMU_MSEC_STEP);
+
+ if (i >= IOMMU_MSEC_TIMEOUT) {
+ pr_err("%s: iova to phys timed out on %pa for %s (%s)\n",
+ __func__, &va, iommu_drvdata->name, ctx_drvdata->name);
+ ret = 0;
+ goto fail;
+ }
+
+ par = GET_PAR(base, ctx);
+ __disable_clocks(iommu_drvdata);
+
+ if (par & CB_PAR_F) {
+ unsigned int level = (par & CB_PAR_PLVL) >> CB_PAR_PLVL_SHIFT;
+ pr_err("IOMMU translation fault!\n");
+ pr_err("name = %s\n", iommu_drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ pr_err("PAR = %16llx [%s%s%s%s%s%s%s%sPLVL%u %s]\n", par,
+ (par & CB_PAR_F) ? "F " : "",
+ (par & CB_PAR_TF) ? "TF " : "",
+ (par & CB_PAR_AFF) ? "AFF " : "",
+ (par & CB_PAR_PF) ? "PF " : "",
+ (par & CB_PAR_EF) ? "EF " : "",
+ (par & CB_PAR_TLBMCF) ? "TLBMCF " : "",
+ (par & CB_PAR_TLBLKF) ? "TLBLKF " : "",
+ (par & CB_PAR_ATOT) ? "ATOT " : "",
+ level,
+ (par & CB_PAR_STAGE) ? "S2 " : "S1 ");
+ ret = 0;
+ } else {
+ ret = msm_iommu_get_phy_from_PAR(va, par);
+ }
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[])
+{
+ pr_err("MAIR0 = %08x MAIR1 = %08x\n",
+ regs[DUMP_REG_MAIR0].val, regs[DUMP_REG_MAIR1].val);
+}
+#else
+static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[])
+{
+ pr_err("PRRR = %08x NMRR = %08x\n",
+ regs[DUMP_REG_PRRR].val, regs[DUMP_REG_NMRR].val);
+}
+#endif
+
+void print_ctx_regs(struct msm_iommu_context_reg regs[])
+{
+ uint32_t fsr = regs[DUMP_REG_FSR].val;
+ u64 ttbr;
+ enum dump_reg iter;
+
+ pr_err("FAR = %016llx\n",
+ COMBINE_DUMP_REG(
+ regs[DUMP_REG_FAR1].val,
+ regs[DUMP_REG_FAR0].val));
+ pr_err("PAR = %016llx\n",
+ COMBINE_DUMP_REG(
+ regs[DUMP_REG_PAR1].val,
+ regs[DUMP_REG_PAR0].val));
+ pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
+ (fsr & 0x02) ? "TF " : "",
+ (fsr & 0x04) ? "AFF " : "",
+ (fsr & 0x08) ? "PF " : "",
+ (fsr & 0x10) ? "EF " : "",
+ (fsr & 0x20) ? "TLBMCF " : "",
+ (fsr & 0x40) ? "TLBLKF " : "",
+ (fsr & 0x80) ? "MHF " : "",
+ (fsr & 0x40000000) ? "SS " : "",
+ (fsr & 0x80000000) ? "MULTI " : "");
+
+ pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
+ regs[DUMP_REG_FSYNR0].val, regs[DUMP_REG_FSYNR1].val);
+
+ ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR0_1].val,
+ regs[DUMP_REG_TTBR0_0].val);
+ if (regs[DUMP_REG_TTBR0_1].valid)
+ pr_err("TTBR0 = %016llx\n", ttbr);
+ else
+ pr_err("TTBR0 = %016llx (32b)\n", ttbr);
+
+ ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR1_1].val,
+ regs[DUMP_REG_TTBR1_0].val);
+
+ if (regs[DUMP_REG_TTBR1_1].valid)
+ pr_err("TTBR1 = %016llx\n", ttbr);
+ else
+ pr_err("TTBR1 = %016llx (32b)\n", ttbr);
+
+ pr_err("SCTLR = %08x ACTLR = %08x\n",
+ regs[DUMP_REG_SCTLR].val, regs[DUMP_REG_ACTLR].val);
+ pr_err("CBAR = %08x CBFRSYNRA = %08x\n",
+ regs[DUMP_REG_CBAR_N].val, regs[DUMP_REG_CBFRSYNRA_N].val);
+ print_ctx_mem_attr_regs(regs);
+
+ for (iter = DUMP_REG_FIRST; iter < MAX_DUMP_REGS; ++iter)
+ if (!regs[iter].valid)
+ pr_err("NOTE: Value actually unknown for %s\n",
+ dump_regs_tbl[iter].name);
+}
+
+static void __print_ctx_regs(struct msm_iommu_drvdata *drvdata, int ctx,
+ unsigned int fsr)
+{
+ void __iomem *base = drvdata->base;
+ void __iomem *cb_base = drvdata->cb_base;
+ bool is_secure = drvdata->sec_id != -1;
+
+ struct msm_iommu_context_reg regs[MAX_DUMP_REGS];
+ unsigned int i;
+ memset(regs, 0, sizeof(regs));
+
+ for (i = DUMP_REG_FIRST; i < MAX_DUMP_REGS; ++i) {
+ struct msm_iommu_context_reg *r = &regs[i];
+ unsigned long regaddr = dump_regs_tbl[i].reg_offset;
+ if (is_secure &&
+ dump_regs_tbl[i].dump_reg_type != DRT_CTX_REG) {
+ r->valid = 0;
+ continue;
+ }
+ r->valid = 1;
+ switch (dump_regs_tbl[i].dump_reg_type) {
+ case DRT_CTX_REG:
+ r->val = GET_CTX_REG(regaddr, cb_base, ctx);
+ break;
+ case DRT_GLOBAL_REG:
+ r->val = GET_GLOBAL_REG(regaddr, base);
+ break;
+ case DRT_GLOBAL_REG_N:
+ r->val = GET_GLOBAL_REG_N(regaddr, ctx, base);
+ break;
+ default:
+ pr_info("Unknown dump_reg_type...\n");
+ r->valid = 0;
+ break;
+ }
+ }
+ print_ctx_regs(regs);
+}
+
+static void print_global_regs(void __iomem *base, unsigned int gfsr)
+{
+ pr_err("GFAR = %016llx\n", GET_GFAR(base));
+
+ pr_err("GFSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", gfsr,
+ (gfsr & 0x01) ? "ICF " : "",
+ (gfsr & 0x02) ? "USF " : "",
+ (gfsr & 0x04) ? "SMCF " : "",
+ (gfsr & 0x08) ? "UCBF " : "",
+ (gfsr & 0x10) ? "UCIF " : "",
+ (gfsr & 0x20) ? "CAF " : "",
+ (gfsr & 0x40) ? "EF " : "",
+ (gfsr & 0x80) ? "PF " : "",
+ (gfsr & 0x40000000) ? "SS " : "",
+ (gfsr & 0x80000000) ? "MULTI " : "");
+
+ pr_err("GFSYNR0 = %08x\n", GET_GFSYNR0(base));
+ pr_err("GFSYNR1 = %08x\n", GET_GFSYNR1(base));
+ pr_err("GFSYNR2 = %08x\n", GET_GFSYNR2(base));
+}
+
+irqreturn_t msm_iommu_global_fault_handler(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ unsigned int gfsr;
+ int ret;
+
+ mutex_lock(&msm_iommu_lock);
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!drvdata);
+
+ if (!drvdata->powered_on) {
+ pr_err("Unexpected IOMMU global fault !!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Can't read global fault information\n");
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ if (drvdata->sec_id != -1) {
+ pr_err("NON-secure interrupt from secure %s\n", drvdata->name);
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ ret = __enable_clocks(drvdata);
+ if (ret) {
+ ret = IRQ_NONE;
+ goto fail;
+ }
+
+ gfsr = GET_GFSR(drvdata->base);
+ if (gfsr) {
+ pr_err("Unexpected %s global fault !!\n", drvdata->name);
+ print_global_regs(drvdata->base, gfsr);
+ SET_GFSR(drvdata->base, gfsr);
+ ret = IRQ_HANDLED;
+ } else
+ ret = IRQ_NONE;
+
+ __disable_clocks(drvdata);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ unsigned int fsr;
+ int ret;
+
+ phys_addr_t pagetable_phys;
+ u64 faulty_iova = 0;
+
+ mutex_lock(&msm_iommu_lock);
+
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+ BUG_ON(!drvdata);
+
+ ctx_drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!ctx_drvdata);
+
+ if (!drvdata->powered_on) {
+ pr_err("Unexpected IOMMU page fault!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Unable to read page fault information\n");
+ /*
+ * We cannot determine which context bank caused the issue so
+ * we just return handled here to ensure IRQ handler code is
+ * happy
+ */
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ ret = __enable_clocks(drvdata);
+ if (ret) {
+ ret = IRQ_NONE;
+ goto fail;
+ }
+
+ fsr = GET_FSR(drvdata->cb_base, ctx_drvdata->num);
+ if (fsr) {
+ if (!ctx_drvdata->attached_domain) {
+ pr_err("Bad domain in interrupt handler\n");
+ ret = -ENOSYS;
+ } else {
+ faulty_iova =
+ GET_FAR(drvdata->cb_base, ctx_drvdata->num);
+ ret = report_iommu_fault(ctx_drvdata->attached_domain,
+ &ctx_drvdata->pdev->dev,
+ faulty_iova, 0);
+
+ }
+ if (ret == -ENOSYS) {
+ pr_err("Unexpected IOMMU page fault!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ __print_ctx_regs(drvdata,
+ ctx_drvdata->num, fsr);
+
+ if (ctx_drvdata->attached_domain) {
+ pagetable_phys = msm_iommu_iova_to_phys_soft(
+ ctx_drvdata->attached_domain,
+ faulty_iova);
+ pr_err("Page table in DDR shows PA = %x\n",
+ (unsigned int) pagetable_phys);
+ }
+ }
+
+ if (ret != -EBUSY)
+ SET_FSR(drvdata->cb_base, ctx_drvdata->num, fsr);
+ ret = IRQ_HANDLED;
+ } else
+ ret = IRQ_NONE;
+
+ __disable_clocks(drvdata);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+
+ return __pa(priv->pt.fl_table);
+}
+
+#define DUMP_REG_INIT(dump_reg, cb_reg, mbp, drt) \
+ do { \
+ dump_regs_tbl[dump_reg].reg_offset = cb_reg; \
+ dump_regs_tbl[dump_reg].name = #cb_reg; \
+ dump_regs_tbl[dump_reg].must_be_present = mbp; \
+ dump_regs_tbl[dump_reg].dump_reg_type = drt; \
+ } while (0)
+
+static void msm_iommu_build_dump_regs_table(void)
+{
+ DUMP_REG_INIT(DUMP_REG_FAR0, CB_FAR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FAR1, CB_FAR + 4, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PAR0, CB_PAR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PAR1, CB_PAR + 4, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSR, CB_FSR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSYNR0, CB_FSYNR0, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSYNR1, CB_FSYNR1, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR0_0, CB_TTBR0, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR0_1, CB_TTBR0 + 4, 0, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR1_0, CB_TTBR1, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR1_1, CB_TTBR1 + 4, 0, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_SCTLR, CB_SCTLR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_ACTLR, CB_ACTLR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PRRR, CB_PRRR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_NMRR, CB_NMRR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_CBAR_N, CBAR, 1, DRT_GLOBAL_REG_N);
+ DUMP_REG_INIT(DUMP_REG_CBFRSYNRA_N, CBFRSYNRA, 1, DRT_GLOBAL_REG_N);
+}
+
+#ifdef CONFIG_IOMMU_PGTABLES_L2
+static void __do_set_redirect(struct iommu_domain *domain, void *data)
+{
+ struct msm_iommu_priv *priv;
+ int *no_redirect = data;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ priv->pt.redirect = !(*no_redirect);
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static void __do_get_redirect(struct iommu_domain *domain, void *data)
+{
+ struct msm_iommu_priv *priv;
+ int *no_redirect = data;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ *no_redirect = !priv->pt.redirect;
+ mutex_unlock(&msm_iommu_lock);
+}
+
+#else
+
+static void __do_set_redirect(struct iommu_domain *domain, void *data)
+{
+}
+
+static void __do_get_redirect(struct iommu_domain *domain, void *data)
+{
+}
+#endif
+
+static int msm_iommu_domain_set_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (attr) {
+ case DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE:
+ __do_set_redirect(domain, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int msm_iommu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (attr) {
+ case DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE:
+ __do_get_redirect(domain, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct iommu_ops msm_iommu_ops = {
+ .domain_alloc = msm_iommu_domain_alloc,
+ .domain_free = msm_iommu_domain_free,
+ .attach_dev = msm_iommu_attach_dev,
+ .detach_dev = msm_iommu_detach_dev,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .map_sg = msm_iommu_map_sg,
+/* .unmap_range = msm_iommu_unmap_range,*/
+ .iova_to_phys = msm_iommu_iova_to_phys,
+/* .get_pt_base_addr = msm_iommu_get_pt_base_addr,*/
+ .pgsize_bitmap = MSM_IOMMU_PGSIZES,
+ .domain_set_attr = msm_iommu_domain_set_attr,
+ .domain_get_attr = msm_iommu_domain_get_attr,
+};
+
+static int __init msm_iommu_init(void)
+{
+ int ret;
+
+ msm_iommu_pagetable_init();
+
+ ret = bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
+ if (ret)
+ return ret;
+
+ msm_iommu_build_dump_regs_table();
+
+ return 0;
+}
+
+subsys_initcall(msm_iommu_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU v2 Driver");
diff --git a/drivers/iommu/qcom/msm_iommu.c b/drivers/iommu/qcom/msm_iommu.c
new file mode 100644
index 000000000000..08d8d10da2c4
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu.c
@@ -0,0 +1,206 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/export.h>
+#include <linux/iommu.h>
+#include <linux/qcom_iommu.h>
+
+static DEFINE_MUTEX(iommu_list_lock);
+static LIST_HEAD(iommu_list);
+
+#define MRC(reg, processor, op1, crn, crm, op2) \
+__asm__ __volatile__ ( \
+" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
+: "=r" (reg))
+
+#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+#define RCP15_MAIR0(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_MAIR1(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+/* These values come from proc-v7-2level.S */
+#define PRRR_VALUE 0xff0a81a8
+#define NMRR_VALUE 0x40e040e0
+
+/* These values come from proc-v7-3level.S */
+#define MAIR0_VALUE 0xeeaa4400
+#define MAIR1_VALUE 0xff000004
+
+static struct iommu_access_ops *iommu_access_ops;
+
+struct bus_type msm_iommu_sec_bus_type = {
+ .name = "msm_iommu_sec_bus",
+};
+
+void msm_set_iommu_access_ops(struct iommu_access_ops *ops)
+{
+ iommu_access_ops = ops;
+}
+
+struct iommu_access_ops *msm_get_iommu_access_ops()
+{
+ BUG_ON(iommu_access_ops == NULL);
+ return iommu_access_ops;
+}
+EXPORT_SYMBOL(msm_get_iommu_access_ops);
+
+void msm_iommu_add_drv(struct msm_iommu_drvdata *drv)
+{
+ mutex_lock(&iommu_list_lock);
+ list_add(&drv->list, &iommu_list);
+ mutex_unlock(&iommu_list_lock);
+}
+
+void msm_iommu_remove_drv(struct msm_iommu_drvdata *drv)
+{
+ mutex_lock(&iommu_list_lock);
+ list_del(&drv->list);
+ mutex_unlock(&iommu_list_lock);
+}
+
+static int find_iommu_ctx(struct device *dev, void *data)
+{
+ struct msm_iommu_ctx_drvdata *c;
+
+ c = dev_get_drvdata(dev);
+ if (!c || !c->name)
+ return 0;
+
+ return !strcmp(data, c->name);
+}
+
+static struct device *find_context(struct device *dev, const char *name)
+{
+ return device_find_child(dev, (void *)name, find_iommu_ctx);
+}
+
+struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ struct msm_iommu_drvdata *drv;
+ struct device *dev = NULL;
+
+ mutex_lock(&iommu_list_lock);
+ list_for_each_entry(drv, &iommu_list, list) {
+ dev = find_context(drv->dev, ctx_name);
+ if (dev)
+ break;
+ }
+ mutex_unlock(&iommu_list_lock);
+
+ put_device(dev);
+
+ if (!dev || !dev_get_drvdata(dev)) {
+ pr_debug("Could not find context <%s>\n", ctx_name);
+ dev = ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL(msm_iommu_get_ctx);
+
+#ifdef CONFIG_ARM
+#ifdef CONFIG_IOMMU_LPAE
+#ifdef CONFIG_ARM_LPAE
+/*
+ * If CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are enabled we can use the MAIR
+ * register directly
+ */
+u32 msm_iommu_get_mair0(void)
+{
+ unsigned int mair0;
+
+ RCP15_MAIR0(mair0);
+ return mair0;
+}
+
+u32 msm_iommu_get_mair1(void)
+{
+ unsigned int mair1;
+
+ RCP15_MAIR1(mair1);
+ return mair1;
+}
+#else
+/*
+ * However, If CONFIG_ARM_LPAE is not enabled but CONFIG_IOMMU_LPAE is enabled
+ * we'll just use the hard coded values directly..
+ */
+u32 msm_iommu_get_mair0(void)
+{
+ return MAIR0_VALUE;
+}
+
+u32 msm_iommu_get_mair1(void)
+{
+ return MAIR1_VALUE;
+}
+#endif
+
+#else
+#ifdef CONFIG_ARM_LPAE
+/*
+ * If CONFIG_ARM_LPAE is enabled AND CONFIG_IOMMU_LPAE is disabled
+ * we must use the hardcoded values.
+ */
+u32 msm_iommu_get_prrr(void)
+{
+ return PRRR_VALUE;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ return NMRR_VALUE;
+}
+#else
+/*
+ * If both CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are disabled
+ * we can use the registers directly.
+ */
+#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+u32 msm_iommu_get_prrr(void)
+{
+ u32 prrr;
+
+ RCP15_PRRR(prrr);
+ return prrr;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ u32 nmrr;
+
+ RCP15_NMRR(nmrr);
+ return nmrr;
+}
+#endif
+#endif
+#endif
+#ifdef CONFIG_ARM64
+u32 msm_iommu_get_prrr(void)
+{
+ return PRRR_VALUE;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ return NMRR_VALUE;
+}
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_dev-v1.c b/drivers/iommu/qcom/msm_iommu_dev-v1.c
new file mode 100644
index 000000000000..9520cc296e3f
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_dev-v1.c
@@ -0,0 +1,627 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/iommu.h>
+#include <linux/interrupt.h>
+#include <linux/msm-bus.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+
+#include "msm_iommu_hw-v1.h"
+#include <linux/qcom_iommu.h>
+#include "msm_iommu_perfmon.h"
+
+static const struct of_device_id msm_iommu_ctx_match_table[];
+
+#ifdef CONFIG_IOMMU_LPAE
+static const char *BFB_REG_NODE_NAME = "qcom,iommu-lpae-bfb-regs";
+static const char *BFB_DATA_NODE_NAME = "qcom,iommu-lpae-bfb-data";
+#else
+static const char *BFB_REG_NODE_NAME = "qcom,iommu-bfb-regs";
+static const char *BFB_DATA_NODE_NAME = "qcom,iommu-bfb-data";
+#endif
+
+static int msm_iommu_parse_bfb_settings(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ struct msm_iommu_bfb_settings *bfb_settings;
+ u32 nreg, nval;
+ int ret;
+
+ /*
+ * It is not valid for a device to have the BFB_REG_NODE_NAME
+ * property but not the BFB_DATA_NODE_NAME property, and vice versa.
+ */
+ if (!of_get_property(pdev->dev.of_node, BFB_REG_NODE_NAME, &nreg)) {
+ if (of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME,
+ &nval))
+ return -EINVAL;
+ return 0;
+ }
+
+ if (!of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME, &nval))
+ return -EINVAL;
+
+ if (nreg >= sizeof(bfb_settings->regs))
+ return -EINVAL;
+
+ if (nval >= sizeof(bfb_settings->data))
+ return -EINVAL;
+
+ if (nval != nreg)
+ return -EINVAL;
+
+ bfb_settings = devm_kzalloc(&pdev->dev, sizeof(*bfb_settings),
+ GFP_KERNEL);
+ if (!bfb_settings)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ BFB_REG_NODE_NAME,
+ bfb_settings->regs,
+ nreg / sizeof(*bfb_settings->regs));
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ BFB_DATA_NODE_NAME,
+ bfb_settings->data,
+ nval / sizeof(*bfb_settings->data));
+ if (ret)
+ return ret;
+
+ bfb_settings->length = nreg / sizeof(*bfb_settings->regs);
+
+ drvdata->bfb_settings = bfb_settings;
+
+ return 0;
+}
+
+static int __get_bus_vote_client(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ int ret = 0;
+ struct msm_bus_scale_pdata *bs_table;
+ const char *dummy;
+
+ /* Check whether bus scaling has been specified for this node */
+ ret = of_property_read_string(pdev->dev.of_node, "qcom,msm-bus,name",
+ &dummy);
+ if (ret)
+ return 0;
+
+ bs_table = msm_bus_cl_get_pdata(pdev);
+ if (bs_table) {
+ drvdata->bus_client = msm_bus_scale_register_client(bs_table);
+ if (IS_ERR(&drvdata->bus_client)) {
+ pr_err("%s(): Bus client register failed.\n", __func__);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static void __put_bus_vote_client(struct msm_iommu_drvdata *drvdata)
+{
+ msm_bus_scale_unregister_client(drvdata->bus_client);
+ drvdata->bus_client = 0;
+}
+
+/*
+ * CONFIG_IOMMU_NON_SECURE allows us to override the secure
+ * designation of SMMUs in device tree. With this config enabled
+ * all SMMUs will be programmed by this driver.
+ */
+#ifdef CONFIG_IOMMU_NON_SECURE
+static inline void get_secure_id(struct device_node *node,
+ struct msm_iommu_drvdata *drvdata)
+{
+}
+
+static inline void get_secure_ctx(struct device_node *node,
+ struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ ctx_drvdata->secure_context = 0;
+}
+#else
+static void get_secure_id(struct device_node *node,
+ struct msm_iommu_drvdata *drvdata)
+{
+ if (msm_iommu_get_scm_call_avail())
+ of_property_read_u32(node, "qcom,iommu-secure-id",
+ &drvdata->sec_id);
+}
+
+static void get_secure_ctx(struct device_node *node,
+ struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ u32 secure_ctx = 0;
+
+ if (msm_iommu_get_scm_call_avail())
+ secure_ctx = of_property_read_bool(node, "qcom,secure-context");
+
+ ctx_drvdata->secure_context = secure_ctx;
+}
+#endif
+
+static int msm_iommu_parse_dt(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ struct device_node *child;
+ int ret;
+
+ drvdata->dev = &pdev->dev;
+
+ ret = __get_bus_vote_client(pdev, drvdata);
+ if (ret)
+ goto fail;
+
+ ret = msm_iommu_parse_bfb_settings(pdev, drvdata);
+ if (ret)
+ goto fail;
+
+ for_each_available_child_of_node(pdev->dev.of_node, child)
+ drvdata->ncb++;
+
+ ret = of_property_read_string(pdev->dev.of_node, "label",
+ &drvdata->name);
+ if (ret)
+ goto fail;
+
+ drvdata->sec_id = -1;
+ get_secure_id(pdev->dev.of_node, drvdata);
+
+ drvdata->halt_enabled = of_property_read_bool(pdev->dev.of_node,
+ "qcom,iommu-enable-halt");
+
+ msm_iommu_add_drv(drvdata);
+
+ return 0;
+
+fail:
+ __put_bus_vote_client(drvdata);
+ return ret;
+}
+
+static int msm_iommu_pmon_parse_dt(struct platform_device *pdev,
+ struct iommu_pmon *pmon_info)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int cls_prop_size;
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (irq <= 0) {
+ pmon_info->iommu.evt_irq = -1;
+ return irq;
+ }
+
+ pmon_info->iommu.evt_irq = irq;
+
+ ret = of_property_read_u32(np, "qcom,iommu-pmu-ngroups",
+ &pmon_info->num_groups);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-ngroups\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "qcom,iommu-pmu-ncounters",
+ &pmon_info->num_counters);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-ncounters\n");
+ return ret;
+ }
+
+ if (!of_get_property(np, "qcom,iommu-pmu-event-classes",
+ &cls_prop_size)) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-event-classes\n");
+ return -EINVAL;
+ }
+
+ pmon_info->event_cls_supported = devm_kzalloc(dev, cls_prop_size,
+ GFP_KERNEL);
+ if (!pmon_info->event_cls_supported) {
+ dev_err(dev, "Unable to get memory for event class array\n");
+ return -ENOMEM;
+ }
+
+ pmon_info->nevent_cls_supported = cls_prop_size / sizeof(u32);
+
+ ret = of_property_read_u32_array(np, "qcom,iommu-pmu-event-classes",
+ pmon_info->event_cls_supported,
+ pmon_info->nevent_cls_supported);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-event-classes\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int msm_iommu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct iommu_pmon *pmon_info;
+ struct msm_iommu_drvdata *drvdata;
+ struct resource *res;
+ int ret;
+ int global_cfg_irq, global_client_irq;
+ u32 temp;
+ unsigned long rate;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iommu_base");
+ drvdata->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+
+ drvdata->glb_base = drvdata->base;
+ drvdata->phys_base = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "smmu_local_base");
+ drvdata->smmu_local_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drvdata->smmu_local_base) &&
+ PTR_ERR(drvdata->smmu_local_base) != -EPROBE_DEFER)
+ drvdata->smmu_local_base = NULL;
+
+ if (of_device_is_compatible(np, "qcom,msm-mmu-500"))
+ drvdata->model = MMU_500;
+
+ drvdata->iface = devm_clk_get(dev, "iface_clk");
+ if (IS_ERR(drvdata->iface))
+ return PTR_ERR(drvdata->iface);
+
+ drvdata->core = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(drvdata->core))
+ return PTR_ERR(drvdata->core);
+
+ if (!of_property_read_u32(np, "qcom,cb-base-offset", &temp))
+ drvdata->cb_base = drvdata->base + temp;
+ else
+ drvdata->cb_base = drvdata->base + 0x8000;
+
+ rate = clk_get_rate(drvdata->core);
+ if (!rate) {
+ rate = clk_round_rate(drvdata->core, 1000);
+ clk_set_rate(drvdata->core, rate);
+ }
+
+ dev_info(&pdev->dev, "iface: %lu, core: %lu\n",
+ clk_get_rate(drvdata->iface), clk_get_rate(drvdata->core));
+
+ ret = msm_iommu_parse_dt(pdev, drvdata);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "device %s (model: %d) mapped at %p, with %d ctx banks\n",
+ drvdata->name, drvdata->model, drvdata->base, drvdata->ncb);
+
+ platform_set_drvdata(pdev, drvdata);
+
+ pmon_info = msm_iommu_pm_alloc(dev);
+ if (pmon_info) {
+ ret = msm_iommu_pmon_parse_dt(pdev, pmon_info);
+ if (ret) {
+ msm_iommu_pm_free(dev);
+ dev_info(dev, "%s: pmon not available\n",
+ drvdata->name);
+ } else {
+ pmon_info->iommu.base = drvdata->base;
+ pmon_info->iommu.ops = msm_get_iommu_access_ops();
+ pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v1();
+ pmon_info->iommu.iommu_name = drvdata->name;
+ ret = msm_iommu_pm_iommu_register(pmon_info);
+ if (ret) {
+ dev_err(dev, "%s iommu register fail\n",
+ drvdata->name);
+ msm_iommu_pm_free(dev);
+ } else {
+ dev_dbg(dev, "%s iommu registered for pmon\n",
+ pmon_info->iommu.iommu_name);
+ }
+ }
+ }
+
+ global_cfg_irq = platform_get_irq_byname(pdev, "global_cfg_NS_irq");
+ if (global_cfg_irq < 0 && global_cfg_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (global_cfg_irq > 0) {
+ ret = devm_request_threaded_irq(dev, global_cfg_irq,
+ NULL,
+ msm_iommu_global_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED |
+ IRQF_TRIGGER_RISING,
+ "msm_iommu_global_cfg_irq",
+ pdev);
+ if (ret < 0)
+ dev_err(dev, "Request Global CFG IRQ %d failed with ret=%d\n",
+ global_cfg_irq, ret);
+ }
+
+ global_client_irq =
+ platform_get_irq_byname(pdev, "global_client_NS_irq");
+ if (global_client_irq < 0 && global_client_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (global_client_irq > 0) {
+ ret = devm_request_threaded_irq(dev, global_client_irq,
+ NULL,
+ msm_iommu_global_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED |
+ IRQF_TRIGGER_RISING,
+ "msm_iommu_global_client_irq",
+ pdev);
+ if (ret < 0)
+ dev_err(dev, "Request Global Client IRQ %d failed with ret=%d\n",
+ global_client_irq, ret);
+ }
+
+ ret = of_platform_populate(np, msm_iommu_ctx_match_table, NULL, dev);
+ if (ret)
+ dev_err(dev, "Failed to create iommu context device\n");
+
+ return ret;
+}
+
+static int msm_iommu_remove(struct platform_device *pdev)
+{
+ struct msm_iommu_drvdata *drv;
+
+ msm_iommu_pm_iommu_unregister(&pdev->dev);
+ msm_iommu_pm_free(&pdev->dev);
+
+ drv = platform_get_drvdata(pdev);
+ if (drv) {
+ __put_bus_vote_client(drv);
+ msm_iommu_remove_drv(drv);
+ platform_set_drvdata(pdev, NULL);
+ }
+
+ return 0;
+}
+
+static int msm_iommu_ctx_parse_dt(struct platform_device *pdev,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ struct resource *r, rp;
+ int irq = 0, ret = 0;
+ struct msm_iommu_drvdata *drvdata;
+ u32 nsid;
+ u32 n_sid_mask;
+ unsigned long cb_offset;
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+
+ get_secure_ctx(pdev->dev.of_node, drvdata, ctx_drvdata);
+
+ if (ctx_drvdata->secure_context) {
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ msm_iommu_secure_fault_handler_v2,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_secure_irq", pdev);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n",
+ irq, ret);
+ return ret;
+ }
+ }
+ } else {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ msm_iommu_fault_handler_v2,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_nonsecure_irq", pdev);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n",
+ irq, ret);
+ goto out;
+ }
+ }
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_address_to_resource(pdev->dev.parent->of_node, 0, &rp);
+ if (ret)
+ goto out;
+
+ /* Calculate the context bank number using the base addresses.
+ * Typically CB0 base address is 0x8000 pages away if the number
+ * of CBs are <=8. So, assume the offset 0x8000 until mentioned
+ * explicitely.
+ */
+ cb_offset = drvdata->cb_base - drvdata->base;
+ ctx_drvdata->num = (r->start - rp.start - cb_offset) >> CTX_SHIFT;
+
+ if (of_property_read_string(pdev->dev.of_node, "label",
+ &ctx_drvdata->name))
+ ctx_drvdata->name = dev_name(&pdev->dev);
+
+ if (!of_get_property(pdev->dev.of_node, "qcom,iommu-ctx-sids", &nsid)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (nsid >= sizeof(ctx_drvdata->sids)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-ctx-sids",
+ ctx_drvdata->sids,
+ nsid / sizeof(*ctx_drvdata->sids))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx_drvdata->nsid = nsid;
+ ctx_drvdata->asid = -1;
+
+ if (!of_get_property(pdev->dev.of_node, "qcom,iommu-sid-mask",
+ &n_sid_mask)) {
+ memset(ctx_drvdata->sid_mask, 0, MAX_NUM_SMR);
+ goto out;
+ }
+
+ if (n_sid_mask != nsid) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-sid-mask",
+ ctx_drvdata->sid_mask,
+ n_sid_mask / sizeof(*ctx_drvdata->sid_mask))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx_drvdata->n_sid_mask = n_sid_mask;
+
+out:
+ return ret;
+}
+
+static int msm_iommu_ctx_probe(struct platform_device *pdev)
+{
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret;
+
+ if (!pdev->dev.parent)
+ return -EINVAL;
+
+ ctx_drvdata = devm_kzalloc(&pdev->dev, sizeof(*ctx_drvdata),
+ GFP_KERNEL);
+ if (!ctx_drvdata)
+ return -ENOMEM;
+
+ ctx_drvdata->pdev = pdev;
+ INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
+
+ ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, ctx_drvdata);
+
+ dev_info(&pdev->dev, "context %s using bank %d\n",
+ ctx_drvdata->name, ctx_drvdata->num);
+
+ return 0;
+}
+
+static int msm_iommu_ctx_remove(struct platform_device *pdev)
+{
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id msm_iommu_match_table[] = {
+ { .compatible = "qcom,msm-smmu-v1", },
+ { .compatible = "qcom,msm-smmu-v2", },
+ {}
+};
+
+static struct platform_driver msm_iommu_driver = {
+ .driver = {
+ .name = "msm_iommu",
+ .of_match_table = msm_iommu_match_table,
+ },
+ .probe = msm_iommu_probe,
+ .remove = msm_iommu_remove,
+};
+
+static const struct of_device_id msm_iommu_ctx_match_table[] = {
+ { .compatible = "qcom,msm-smmu-v1-ctx", },
+ { .compatible = "qcom,msm-smmu-v2-ctx", },
+ {}
+};
+
+static struct platform_driver msm_iommu_ctx_driver = {
+ .driver = {
+ .name = "msm_iommu_ctx",
+ .of_match_table = msm_iommu_ctx_match_table,
+ },
+ .probe = msm_iommu_ctx_probe,
+ .remove = msm_iommu_ctx_remove,
+};
+
+static int __init msm_iommu_driver_init(void)
+{
+ int ret;
+
+ msm_iommu_check_scm_call_avail();
+ msm_set_iommu_access_ops(&iommu_access_ops_v1);
+ msm_iommu_sec_set_access_ops(&iommu_access_ops_v1);
+
+ ret = platform_driver_register(&msm_iommu_driver);
+ if (ret) {
+ pr_err("Failed to register IOMMU driver\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&msm_iommu_ctx_driver);
+ if (ret) {
+ pr_err("Failed to register IOMMU context driver\n");
+ platform_driver_unregister(&msm_iommu_driver);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit msm_iommu_driver_exit(void)
+{
+ platform_driver_unregister(&msm_iommu_ctx_driver);
+ platform_driver_unregister(&msm_iommu_driver);
+}
+
+subsys_initcall(msm_iommu_driver_init);
+module_exit(msm_iommu_driver_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/qcom/msm_iommu_hw-v1.h b/drivers/iommu/qcom/msm_iommu_hw-v1.h
new file mode 100644
index 000000000000..53e2f4874adb
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_hw-v1.h
@@ -0,0 +1,2320 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H
+#define __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H
+
+#define CTX_SHIFT 12
+
+#define CTX_REG(reg, base, ctx) \
+ ((base) + (reg) + ((ctx) << CTX_SHIFT))
+#define GLB_REG(reg, base) \
+ ((base) + (reg))
+#define GLB_REG_N(b, n, r) GLB_REG(b, ((r) + ((n) << 2)))
+#define GLB_FIELD(b, r) ((b) + (r))
+#define GLB_CTX_FIELD(b, c, r) (GLB_FIELD(b, r) + ((c) << CTX_SHIFT))
+#define GLB_FIELD_N(b, n, r) (GLB_FIELD(b, r) + ((n) << 2))
+
+
+#define GET_GLOBAL_REG(reg, base) (readl_relaxed(GLB_REG(reg, base)))
+#define GET_GLOBAL_REG_Q(reg, base) (readq_relaxed(GLB_REG(reg, base)))
+#define GET_CTX_REG(reg, base, ctx) (readl_relaxed(CTX_REG(reg, base, ctx)))
+#define GET_CTX_REG_Q(reg, base, ctx) (readq_relaxed(CTX_REG(reg, base, ctx)))
+
+#define SET_GLOBAL_REG(reg, base, val) writel_relaxed((val), GLB_REG(reg, base))
+#define SET_GLOBAL_REG_Q(reg, base, val) \
+ (writeq_relaxed((val), GLB_REG(reg, base)))
+
+#define SET_CTX_REG(reg, base, ctx, val) \
+ writel_relaxed((val), (CTX_REG(reg, base, ctx)))
+#define SET_CTX_REG_Q(reg, base, ctx, val) \
+ writeq_relaxed((val), CTX_REG(reg, base, ctx))
+
+/* Wrappers for numbered registers */
+#define SET_GLOBAL_REG_N(b, n, r, v) writel_relaxed(((v)), GLB_REG_N(b, n, r))
+#define GET_GLOBAL_REG_N(b, n, r) (readl_relaxed(GLB_REG_N(b, n, r)))
+
+/* Field wrappers */
+#define GET_GLOBAL_FIELD(b, r, F) \
+ GET_FIELD(GLB_FIELD(b, r), r##_##F##_MASK, r##_##F##_SHIFT)
+#define GET_CONTEXT_FIELD(b, c, r, F) \
+ GET_FIELD(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT)
+#define GET_CONTEXT_FIELD_Q(b, c, r, F) \
+ GET_FIELD_Q(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT)
+
+#define SET_GLOBAL_FIELD(b, r, F, v) \
+ SET_FIELD(GLB_FIELD(b, r), r##_##F##_MASK, r##_##F##_SHIFT, (v))
+#define SET_CONTEXT_FIELD(b, c, r, F, v) \
+ SET_FIELD(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT, (v))
+#define SET_CONTEXT_FIELD_Q(b, c, r, F, v) \
+ SET_FIELD_Q(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT, (v))
+
+/* Wrappers for numbered field registers */
+#define SET_GLOBAL_FIELD_N(b, n, r, F, v) \
+ SET_FIELD(GLB_FIELD_N(b, n, r), r##_##F##_MASK, r##_##F##_SHIFT, v)
+#define GET_GLOBAL_FIELD_N(b, n, r, F) \
+ GET_FIELD(GLB_FIELD_N(b, n, r), r##_##F##_MASK, r##_##F##_SHIFT)
+
+#define GET_FIELD(addr, mask, shift) ((readl_relaxed(addr) >> (shift)) & (mask))
+#define GET_FIELD_Q(addr, mask, shift) \
+ ((readq_relaxed(addr) >> (shift)) & (mask))
+
+#define SET_FIELD(addr, mask, shift, v) \
+do { \
+ int t = readl_relaxed(addr); \
+ writel_relaxed((t & ~((mask) << (shift))) + (((v) & \
+ (mask)) << (shift)), addr); \
+} while (0)
+
+#define SET_FIELD_Q(addr, mask, shift, v) \
+do { \
+ u64 t = readq_relaxed(addr); \
+ writeq_relaxed((t & ~(((u64) mask) << (shift))) + (((v) & \
+ ((u64) mask)) << (shift)), addr); \
+} while (0)
+
+
+/* Global register space 0 setters / getters */
+#define SET_CR0(b, v) SET_GLOBAL_REG(CR0, (b), (v))
+#define SET_SCR1(b, v) SET_GLOBAL_REG(SCR1, (b), (v))
+#define SET_CR2(b, v) SET_GLOBAL_REG(CR2, (b), (v))
+#define SET_ACR(b, v) SET_GLOBAL_REG(ACR, (b), (v))
+#define SET_IDR0(b, N, v) SET_GLOBAL_REG(IDR0, (b), (v))
+#define SET_IDR1(b, N, v) SET_GLOBAL_REG(IDR1, (b), (v))
+#define SET_IDR2(b, N, v) SET_GLOBAL_REG(IDR2, (b), (v))
+#define SET_IDR7(b, N, v) SET_GLOBAL_REG(IDR7, (b), (v))
+#define SET_GFAR(b, v) SET_GLOBAL_REG_Q(GFAR, (b), (v))
+#define SET_GFSR(b, v) SET_GLOBAL_REG(GFSR, (b), (v))
+#define SET_GFSRRESTORE(b, v) SET_GLOBAL_REG(GFSRRESTORE, (b), (v))
+#define SET_GFSYNR0(b, v) SET_GLOBAL_REG(GFSYNR0, (b), (v))
+#define SET_GFSYNR1(b, v) SET_GLOBAL_REG(GFSYNR1, (b), (v))
+#define SET_GFSYNR2(b, v) SET_GLOBAL_REG(GFSYNR2, (b), (v))
+#define SET_TLBIVMID(b, v) SET_GLOBAL_REG(TLBIVMID, (b), (v))
+#define SET_TLBIALLNSNH(b, v) SET_GLOBAL_REG(TLBIALLNSNH, (b), (v))
+#define SET_TLBIALLH(b, v) SET_GLOBAL_REG(TLBIALLH, (b), (v))
+#define SET_TLBGSYNC(b, v) SET_GLOBAL_REG(TLBGSYNC, (b), (v))
+#define SET_TLBGSTATUS(b, v) SET_GLOBAL_REG(TLBGSTATUS, (b), (v))
+#define SET_TLBIVAH(b, v) SET_GLOBAL_REG(TLBIVAH, (b), (v))
+#define SET_GATS1UR(b, v) SET_GLOBAL_REG(GATS1UR, (b), (v))
+#define SET_GATS1UW(b, v) SET_GLOBAL_REG(GATS1UW, (b), (v))
+#define SET_GATS1PR(b, v) SET_GLOBAL_REG(GATS1PR, (b), (v))
+#define SET_GATS1PW(b, v) SET_GLOBAL_REG(GATS1PW, (b), (v))
+#define SET_GATS12UR(b, v) SET_GLOBAL_REG(GATS12UR, (b), (v))
+#define SET_GATS12UW(b, v) SET_GLOBAL_REG(GATS12UW, (b), (v))
+#define SET_GATS12PR(b, v) SET_GLOBAL_REG(GATS12PR, (b), (v))
+#define SET_GATS12PW(b, v) SET_GLOBAL_REG(GATS12PW, (b), (v))
+#define SET_GPAR(b, v) SET_GLOBAL_REG(GPAR, (b), (v))
+#define SET_GATSR(b, v) SET_GLOBAL_REG(GATSR, (b), (v))
+#define SET_NSCR0(b, v) SET_GLOBAL_REG(NSCR0, (b), (v))
+#define SET_NSCR2(b, v) SET_GLOBAL_REG(NSCR2, (b), (v))
+#define SET_NSACR(b, v) SET_GLOBAL_REG(NSACR, (b), (v))
+#define SET_NSGFAR(b, v) SET_GLOBAL_REG(NSGFAR, (b), (v))
+#define SET_NSGFSRRESTORE(b, v) SET_GLOBAL_REG(NSGFSRRESTORE, (b), (v))
+#define SET_PMCR(b, v) SET_GLOBAL_REG(PMCR, (b), (v))
+#define SET_SMR_N(b, N, v) SET_GLOBAL_REG_N(SMR, N, (b), (v))
+#define SET_S2CR_N(b, N, v) SET_GLOBAL_REG_N(S2CR, N, (b), (v))
+
+#define GET_CR0(b) GET_GLOBAL_REG(CR0, (b))
+#define GET_SCR1(b) GET_GLOBAL_REG(SCR1, (b))
+#define GET_CR2(b) GET_GLOBAL_REG(CR2, (b))
+#define GET_ACR(b) GET_GLOBAL_REG(ACR, (b))
+#define GET_IDR0(b, N) GET_GLOBAL_REG(IDR0, (b))
+#define GET_IDR1(b, N) GET_GLOBAL_REG(IDR1, (b))
+#define GET_IDR2(b, N) GET_GLOBAL_REG(IDR2, (b))
+#define GET_IDR7(b, N) GET_GLOBAL_REG(IDR7, (b))
+#define GET_GFAR(b) GET_GLOBAL_REG_Q(GFAR, (b))
+#define GET_GFSR(b) GET_GLOBAL_REG(GFSR, (b))
+#define GET_GFSRRESTORE(b) GET_GLOBAL_REG(GFSRRESTORE, (b))
+#define GET_GFSYNR0(b) GET_GLOBAL_REG(GFSYNR0, (b))
+#define GET_GFSYNR1(b) GET_GLOBAL_REG(GFSYNR1, (b))
+#define GET_GFSYNR2(b) GET_GLOBAL_REG(GFSYNR2, (b))
+#define GET_TLBIVMID(b) GET_GLOBAL_REG(TLBIVMID, (b))
+#define GET_TLBIALLNSNH(b) GET_GLOBAL_REG(TLBIALLNSNH, (b))
+#define GET_TLBIALLH(b) GET_GLOBAL_REG(TLBIALLH, (b))
+#define GET_TLBGSYNC(b) GET_GLOBAL_REG(TLBGSYNC, (b))
+#define GET_TLBGSTATUS(b) GET_GLOBAL_REG(TLBGSTATUS, (b))
+#define GET_TLBIVAH(b) GET_GLOBAL_REG(TLBIVAH, (b))
+#define GET_GATS1UR(b) GET_GLOBAL_REG(GATS1UR, (b))
+#define GET_GATS1UW(b) GET_GLOBAL_REG(GATS1UW, (b))
+#define GET_GATS1PR(b) GET_GLOBAL_REG(GATS1PR, (b))
+#define GET_GATS1PW(b) GET_GLOBAL_REG(GATS1PW, (b))
+#define GET_GATS12UR(b) GET_GLOBAL_REG(GATS12UR, (b))
+#define GET_GATS12UW(b) GET_GLOBAL_REG(GATS12UW, (b))
+#define GET_GATS12PR(b) GET_GLOBAL_REG(GATS12PR, (b))
+#define GET_GATS12PW(b) GET_GLOBAL_REG(GATS12PW, (b))
+#define GET_GPAR(b) GET_GLOBAL_REG(GPAR, (b))
+#define GET_GATSR(b) GET_GLOBAL_REG(GATSR, (b))
+#define GET_NSCR0(b) GET_GLOBAL_REG(NSCR0, (b))
+#define GET_NSCR2(b) GET_GLOBAL_REG(NSCR2, (b))
+#define GET_NSACR(b) GET_GLOBAL_REG(NSACR, (b))
+#define GET_PMCR(b) GET_GLOBAL_REG(PMCR, (b))
+#define GET_SMR_N(b, N) GET_GLOBAL_REG_N(SMR, N, (b))
+#define GET_S2CR_N(b, N) GET_GLOBAL_REG_N(S2CR, N, (b))
+
+/* Global register space 1 setters / getters */
+#define SET_CBAR_N(b, N, v) SET_GLOBAL_REG_N(CBAR, N, (b), (v))
+#define SET_CBFRSYNRA_N(b, N, v) SET_GLOBAL_REG_N(CBFRSYNRA, N, (b), (v))
+
+#define GET_CBAR_N(b, N) GET_GLOBAL_REG_N(CBAR, N, (b))
+#define GET_CBFRSYNRA_N(b, N) GET_GLOBAL_REG_N(CBFRSYNRA, N, (b))
+
+/* Implementation defined register setters/getters */
+#define SET_MICRO_MMU_CTRL_HALT_REQ(b, v) \
+ SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, HALT_REQ, v)
+#define GET_MICRO_MMU_CTRL_IDLE(b) \
+ GET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, IDLE)
+#define SET_MICRO_MMU_CTRL_RESERVED(b, v) \
+ SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, RESERVED, v)
+
+#define MMU_CTRL_IDLE (MICRO_MMU_CTRL_IDLE_MASK << MICRO_MMU_CTRL_IDLE_SHIFT)
+
+#define SET_PREDICTIONDIS0(b, v) SET_GLOBAL_REG(PREDICTIONDIS0, (b), (v))
+#define SET_PREDICTIONDIS1(b, v) SET_GLOBAL_REG(PREDICTIONDIS1, (b), (v))
+#define SET_S1L1BFBLP0(b, v) SET_GLOBAL_REG(S1L1BFBLP0, (b), (v))
+
+/* SSD register setters/getters */
+#define SET_SSDR_N(b, N, v) SET_GLOBAL_REG_N(SSDR_N, N, (b), (v))
+
+#define GET_SSDR_N(b, N) GET_GLOBAL_REG_N(SSDR_N, N, (b))
+
+/* Context bank register setters/getters */
+#define SET_SCTLR(b, c, v) SET_CTX_REG(CB_SCTLR, (b), (c), (v))
+#define SET_ACTLR(b, c, v) SET_CTX_REG(CB_ACTLR, (b), (c), (v))
+#define SET_RESUME(b, c, v) SET_CTX_REG(CB_RESUME, (b), (c), (v))
+#define SET_TTBCR(b, c, v) SET_CTX_REG(CB_TTBCR, (b), (c), (v))
+#define SET_CONTEXTIDR(b, c, v) SET_CTX_REG(CB_CONTEXTIDR, (b), (c), (v))
+#define SET_PRRR(b, c, v) SET_CTX_REG(CB_PRRR, (b), (c), (v))
+#define SET_NMRR(b, c, v) SET_CTX_REG(CB_NMRR, (b), (c), (v))
+#define SET_PAR(b, c, v) SET_CTX_REG(CB_PAR, (b), (c), (v))
+#define SET_FSR(b, c, v) SET_CTX_REG(CB_FSR, (b), (c), (v))
+#define SET_FSRRESTORE(b, c, v) SET_CTX_REG(CB_FSRRESTORE, (b), (c), (v))
+#define SET_FAR(b, c, v) SET_CTX_REG(CB_FAR, (b), (c), (v))
+#define SET_FSYNR0(b, c, v) SET_CTX_REG(CB_FSYNR0, (b), (c), (v))
+#define SET_FSYNR1(b, c, v) SET_CTX_REG(CB_FSYNR1, (b), (c), (v))
+#define SET_TLBIVA(b, c, v) SET_CTX_REG(CB_TLBIVA, (b), (c), (v))
+#define SET_TLBIVAA(b, c, v) SET_CTX_REG(CB_TLBIVAA, (b), (c), (v))
+#define SET_TLBIASID(b, c, v) SET_CTX_REG(CB_TLBIASID, (b), (c), (v))
+#define SET_TLBIALL(b, c, v) SET_CTX_REG(CB_TLBIALL, (b), (c), (v))
+#define SET_TLBIVAL(b, c, v) SET_CTX_REG(CB_TLBIVAL, (b), (c), (v))
+#define SET_TLBIVAAL(b, c, v) SET_CTX_REG(CB_TLBIVAAL, (b), (c), (v))
+#define SET_TLBSYNC(b, c, v) SET_CTX_REG(CB_TLBSYNC, (b), (c), (v))
+#define SET_TLBSTATUS(b, c, v) SET_CTX_REG(CB_TLBSTATUS, (b), (c), (v))
+#define SET_ATS1PR(b, c, v) SET_CTX_REG(CB_ATS1PR, (b), (c), (v))
+#define SET_ATS1PW(b, c, v) SET_CTX_REG(CB_ATS1PW, (b), (c), (v))
+#define SET_ATS1UR(b, c, v) SET_CTX_REG(CB_ATS1UR, (b), (c), (v))
+#define SET_ATS1UW(b, c, v) SET_CTX_REG(CB_ATS1UW, (b), (c), (v))
+#define SET_ATSR(b, c, v) SET_CTX_REG(CB_ATSR, (b), (c), (v))
+
+#define GET_SCTLR(b, c) GET_CTX_REG(CB_SCTLR, (b), (c))
+#define GET_ACTLR(b, c) GET_CTX_REG(CB_ACTLR, (b), (c))
+#define GET_RESUME(b, c) GET_CTX_REG(CB_RESUME, (b), (c))
+#define GET_TTBR0(b, c) GET_CTX_REG(CB_TTBR0, (b), (c))
+#define GET_TTBR1(b, c) GET_CTX_REG(CB_TTBR1, (b), (c))
+#define GET_TTBCR(b, c) GET_CTX_REG(CB_TTBCR, (b), (c))
+#define GET_CONTEXTIDR(b, c) GET_CTX_REG(CB_CONTEXTIDR, (b), (c))
+#define GET_PRRR(b, c) GET_CTX_REG(CB_PRRR, (b), (c))
+#define GET_NMRR(b, c) GET_CTX_REG(CB_NMRR, (b), (c))
+#define GET_PAR(b, c) GET_CTX_REG_Q(CB_PAR, (b), (c))
+#define GET_FSR(b, c) GET_CTX_REG(CB_FSR, (b), (c))
+#define GET_FSRRESTORE(b, c) GET_CTX_REG(CB_FSRRESTORE, (b), (c))
+#define GET_FAR(b, c) GET_CTX_REG_Q(CB_FAR, (b), (c))
+#define GET_FSYNR0(b, c) GET_CTX_REG(CB_FSYNR0, (b), (c))
+#define GET_FSYNR1(b, c) GET_CTX_REG(CB_FSYNR1, (b), (c))
+#define GET_TLBIVA(b, c) GET_CTX_REG(CB_TLBIVA, (b), (c))
+#define GET_TLBIVAA(b, c) GET_CTX_REG(CB_TLBIVAA, (b), (c))
+#define GET_TLBIASID(b, c) GET_CTX_REG(CB_TLBIASID, (b), (c))
+#define GET_TLBIALL(b, c) GET_CTX_REG(CB_TLBIALL, (b), (c))
+#define GET_TLBIVAL(b, c) GET_CTX_REG(CB_TLBIVAL, (b), (c))
+#define GET_TLBIVAAL(b, c) GET_CTX_REG(CB_TLBIVAAL, (b), (c))
+#define GET_TLBSYNC(b, c) GET_CTX_REG(CB_TLBSYNC, (b), (c))
+#define GET_TLBSTATUS(b, c) GET_CTX_REG(CB_TLBSTATUS, (b), (c))
+#define GET_ATS1PR(b, c) GET_CTX_REG(CB_ATS1PR, (b), (c))
+#define GET_ATS1PW(b, c) GET_CTX_REG(CB_ATS1PW, (b), (c))
+#define GET_ATS1UR(b, c) GET_CTX_REG(CB_ATS1UR, (b), (c))
+#define GET_ATS1UW(b, c) GET_CTX_REG(CB_ATS1UW, (b), (c))
+#define GET_ATSR(b, c) GET_CTX_REG(CB_ATSR, (b), (c))
+
+/* Global Register field setters / getters */
+/* Configuration Register: CR0/NSCR0 */
+#define SET_CR0_NSCFG(b, v) SET_GLOBAL_FIELD(b, CR0, NSCFG, v)
+#define SET_CR0_WACFG(b, v) SET_GLOBAL_FIELD(b, CR0, WACFG, v)
+#define SET_CR0_RACFG(b, v) SET_GLOBAL_FIELD(b, CR0, RACFG, v)
+#define SET_CR0_SHCFG(b, v) SET_GLOBAL_FIELD(b, CR0, SHCFG, v)
+#define SET_CR0_SMCFCFG(b, v) SET_GLOBAL_FIELD(b, CR0, SMCFCFG, v)
+#define SET_NSCR0_SMCFCFG(b, v) SET_GLOBAL_FIELD(b, NSCR0, SMCFCFG, v)
+#define SET_CR0_MTCFG(b, v) SET_GLOBAL_FIELD(b, CR0, MTCFG, v)
+#define SET_CR0_BSU(b, v) SET_GLOBAL_FIELD(b, CR0, BSU, v)
+#define SET_CR0_FB(b, v) SET_GLOBAL_FIELD(b, CR0, FB, v)
+#define SET_CR0_PTM(b, v) SET_GLOBAL_FIELD(b, CR0, PTM, v)
+#define SET_CR0_VMIDPNE(b, v) SET_GLOBAL_FIELD(b, CR0, VMIDPNE, v)
+#define SET_CR0_USFCFG(b, v) SET_GLOBAL_FIELD(b, CR0, USFCFG, v)
+#define SET_NSCR0_USFCFG(b, v) SET_GLOBAL_FIELD(b, NSCR0, USFCFG, v)
+#define SET_CR0_GSE(b, v) SET_GLOBAL_FIELD(b, CR0, GSE, v)
+#define SET_CR0_STALLD(b, v) SET_GLOBAL_FIELD(b, CR0, STALLD, v)
+#define SET_NSCR0_STALLD(b, v) SET_GLOBAL_FIELD(b, NSCR0, STALLD, v)
+#define SET_CR0_TRANSIENTCFG(b, v) SET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG, v)
+#define SET_CR0_GCFGFIE(b, v) SET_GLOBAL_FIELD(b, CR0, GCFGFIE, v)
+#define SET_NSCR0_GCFGFIE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GCFGFIE, v)
+#define SET_CR0_GCFGFRE(b, v) SET_GLOBAL_FIELD(b, CR0, GCFGFRE, v)
+#define SET_NSCR0_GCFGFRE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GCFGFRE, v)
+#define SET_CR0_GFIE(b, v) SET_GLOBAL_FIELD(b, CR0, GFIE, v)
+#define SET_NSCR0_GFIE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GFIE, v)
+#define SET_CR0_GFRE(b, v) SET_GLOBAL_FIELD(b, CR0, GFRE, v)
+#define SET_NSCR0_GFRE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GFRE, v)
+#define SET_CR0_CLIENTPD(b, v) SET_GLOBAL_FIELD(b, CR0, CLIENTPD, v)
+#define SET_NSCR0_CLIENTPD(b, v) SET_GLOBAL_FIELD(b, NSCR0, CLIENTPD, v)
+
+#define SET_ACR_SMTNMC_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, SMTNMC_BPTLBEN, v)
+#define SET_ACR_MMUDIS_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, MMUDIS_BPTLBEN, v)
+#define SET_ACR_S2CR_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, S2CR_BPTLBEN, v)
+
+#define SET_NSACR_SMTNMC_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, SMTNMC_BPTLBEN, v)
+#define SET_NSACR_MMUDIS_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, MMUDIS_BPTLBEN, v)
+#define SET_NSACR_S2CR_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, S2CR_BPTLBEN, v)
+
+#define GET_CR0_NSCFG(b) GET_GLOBAL_FIELD(b, CR0, NSCFG)
+#define GET_CR0_WACFG(b) GET_GLOBAL_FIELD(b, CR0, WACFG)
+#define GET_CR0_RACFG(b) GET_GLOBAL_FIELD(b, CR0, RACFG)
+#define GET_CR0_SHCFG(b) GET_GLOBAL_FIELD(b, CR0, SHCFG)
+#define GET_CR0_SMCFCFG(b) GET_GLOBAL_FIELD(b, CR0, SMCFCFG)
+#define GET_CR0_MTCFG(b) GET_GLOBAL_FIELD(b, CR0, MTCFG)
+#define GET_CR0_BSU(b) GET_GLOBAL_FIELD(b, CR0, BSU)
+#define GET_CR0_FB(b) GET_GLOBAL_FIELD(b, CR0, FB)
+#define GET_CR0_PTM(b) GET_GLOBAL_FIELD(b, CR0, PTM)
+#define GET_CR0_VMIDPNE(b) GET_GLOBAL_FIELD(b, CR0, VMIDPNE)
+#define GET_CR0_USFCFG(b) GET_GLOBAL_FIELD(b, CR0, USFCFG)
+#define GET_CR0_GSE(b) GET_GLOBAL_FIELD(b, CR0, GSE)
+#define GET_CR0_STALLD(b) GET_GLOBAL_FIELD(b, CR0, STALLD)
+#define GET_CR0_TRANSIENTCFG(b) GET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG)
+#define GET_CR0_GCFGFIE(b) GET_GLOBAL_FIELD(b, CR0, GCFGFIE)
+#define GET_CR0_GCFGFRE(b) GET_GLOBAL_FIELD(b, CR0, GCFGFRE)
+#define GET_CR0_GFIE(b) GET_GLOBAL_FIELD(b, CR0, GFIE)
+#define GET_CR0_GFRE(b) GET_GLOBAL_FIELD(b, CR0, GFRE)
+#define GET_CR0_CLIENTPD(b) GET_GLOBAL_FIELD(b, CR0, CLIENTPD)
+
+/* Configuration Register: CR2 */
+#define SET_CR2_BPVMID(b, v) SET_GLOBAL_FIELD(b, CR2, BPVMID, v)
+
+#define GET_CR2_BPVMID(b) GET_GLOBAL_FIELD(b, CR2, BPVMID)
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define SET_GATS1PR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1PR, ADDR, v)
+#define SET_GATS1PR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1PR, NDX, v)
+
+#define GET_GATS1PR_ADDR(b) GET_GLOBAL_FIELD(b, GATS1PR, ADDR)
+#define GET_GATS1PR_NDX(b) GET_GLOBAL_FIELD(b, GATS1PR, NDX)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define SET_GATS1PW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1PW, ADDR, v)
+#define SET_GATS1PW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1PW, NDX, v)
+
+#define GET_GATS1PW_ADDR(b) GET_GLOBAL_FIELD(b, GATS1PW, ADDR)
+#define GET_GATS1PW_NDX(b) GET_GLOBAL_FIELD(b, GATS1PW, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define SET_GATS1UR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1UR, ADDR, v)
+#define SET_GATS1UR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1UR, NDX, v)
+
+#define GET_GATS1UR_ADDR(b) GET_GLOBAL_FIELD(b, GATS1UR, ADDR)
+#define GET_GATS1UR_NDX(b) GET_GLOBAL_FIELD(b, GATS1UR, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UW */
+#define SET_GATS1UW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1UW, ADDR, v)
+#define SET_GATS1UW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1UW, NDX, v)
+
+#define GET_GATS1UW_ADDR(b) GET_GLOBAL_FIELD(b, GATS1UW, ADDR)
+#define GET_GATS1UW_NDX(b) GET_GLOBAL_FIELD(b, GATS1UW, NDX)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */
+#define SET_GATS12PR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12PR, ADDR, v)
+#define SET_GATS12PR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12PR, NDX, v)
+
+#define GET_GATS12PR_ADDR(b) GET_GLOBAL_FIELD(b, GATS12PR, ADDR)
+#define GET_GATS12PR_NDX(b) GET_GLOBAL_FIELD(b, GATS12PR, NDX)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define SET_GATS12PW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12PW, ADDR, v)
+#define SET_GATS12PW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12PW, NDX, v)
+
+#define GET_GATS12PW_ADDR(b) GET_GLOBAL_FIELD(b, GATS12PW, ADDR)
+#define GET_GATS12PW_NDX(b) GET_GLOBAL_FIELD(b, GATS12PW, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define SET_GATS12UR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12UR, ADDR, v)
+#define SET_GATS12UR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12UR, NDX, v)
+
+#define GET_GATS12UR_ADDR(b) GET_GLOBAL_FIELD(b, GATS12UR, ADDR)
+#define GET_GATS12UR_NDX(b) GET_GLOBAL_FIELD(b, GATS12UR, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UW */
+#define SET_GATS12UW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12UW, ADDR, v)
+#define SET_GATS12UW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12UW, NDX, v)
+
+#define GET_GATS12UW_ADDR(b) GET_GLOBAL_FIELD(b, GATS12UW, ADDR)
+#define GET_GATS12UW_NDX(b) GET_GLOBAL_FIELD(b, GATS12UW, NDX)
+
+/* Global Address Translation Status Register: GATSR */
+#define SET_GATSR_ACTIVE(b, v) SET_GLOBAL_FIELD(b, GATSR, ACTIVE, v)
+
+#define GET_GATSR_ACTIVE(b) GET_GLOBAL_FIELD(b, GATSR, ACTIVE)
+
+/* Global Fault Address Register: GFAR */
+#define SET_GFAR_FADDR(b, v) SET_GLOBAL_FIELD(b, GFAR, FADDR, v)
+
+#define GET_GFAR_FADDR(b) GET_GLOBAL_FIELD(b, GFAR, FADDR)
+
+/* Global Fault Status Register: GFSR */
+#define SET_GFSR_ICF(b, v) SET_GLOBAL_FIELD(b, GFSR, ICF, v)
+#define SET_GFSR_USF(b, v) SET_GLOBAL_FIELD(b, GFSR, USF, v)
+#define SET_GFSR_SMCF(b, v) SET_GLOBAL_FIELD(b, GFSR, SMCF, v)
+#define SET_GFSR_UCBF(b, v) SET_GLOBAL_FIELD(b, GFSR, UCBF, v)
+#define SET_GFSR_UCIF(b, v) SET_GLOBAL_FIELD(b, GFSR, UCIF, v)
+#define SET_GFSR_CAF(b, v) SET_GLOBAL_FIELD(b, GFSR, CAF, v)
+#define SET_GFSR_EF(b, v) SET_GLOBAL_FIELD(b, GFSR, EF, v)
+#define SET_GFSR_PF(b, v) SET_GLOBAL_FIELD(b, GFSR, PF, v)
+#define SET_GFSR_MULTI(b, v) SET_GLOBAL_FIELD(b, GFSR, MULTI, v)
+
+#define GET_GFSR_ICF(b) GET_GLOBAL_FIELD(b, GFSR, ICF)
+#define GET_GFSR_USF(b) GET_GLOBAL_FIELD(b, GFSR, USF)
+#define GET_GFSR_SMCF(b) GET_GLOBAL_FIELD(b, GFSR, SMCF)
+#define GET_GFSR_UCBF(b) GET_GLOBAL_FIELD(b, GFSR, UCBF)
+#define GET_GFSR_UCIF(b) GET_GLOBAL_FIELD(b, GFSR, UCIF)
+#define GET_GFSR_CAF(b) GET_GLOBAL_FIELD(b, GFSR, CAF)
+#define GET_GFSR_EF(b) GET_GLOBAL_FIELD(b, GFSR, EF)
+#define GET_GFSR_PF(b) GET_GLOBAL_FIELD(b, GFSR, PF)
+#define GET_GFSR_MULTI(b) GET_GLOBAL_FIELD(b, GFSR, MULTI)
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define SET_GFSYNR0_NESTED(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NESTED, v)
+#define SET_GFSYNR0_WNR(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, WNR, v)
+#define SET_GFSYNR0_PNU(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, PNU, v)
+#define SET_GFSYNR0_IND(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, IND, v)
+#define SET_GFSYNR0_NSSTATE(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE, v)
+#define SET_GFSYNR0_NSATTR(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NSATTR, v)
+
+#define GET_GFSYNR0_NESTED(b) GET_GLOBAL_FIELD(b, GFSYNR0, NESTED)
+#define GET_GFSYNR0_WNR(b) GET_GLOBAL_FIELD(b, GFSYNR0, WNR)
+#define GET_GFSYNR0_PNU(b) GET_GLOBAL_FIELD(b, GFSYNR0, PNU)
+#define GET_GFSYNR0_IND(b) GET_GLOBAL_FIELD(b, GFSYNR0, IND)
+#define GET_GFSYNR0_NSSTATE(b) GET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE)
+#define GET_GFSYNR0_NSATTR(b) GET_GLOBAL_FIELD(b, GFSYNR0, NSATTR)
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define SET_GFSYNR1_SID(b, v) SET_GLOBAL_FIELD(b, GFSYNR1, SID, v)
+
+#define GET_GFSYNR1_SID(b) GET_GLOBAL_FIELD(b, GFSYNR1, SID)
+
+/* Global Physical Address Register: GPAR */
+#define SET_GPAR_F(b, v) SET_GLOBAL_FIELD(b, GPAR, F, v)
+#define SET_GPAR_SS(b, v) SET_GLOBAL_FIELD(b, GPAR, SS, v)
+#define SET_GPAR_OUTER(b, v) SET_GLOBAL_FIELD(b, GPAR, OUTER, v)
+#define SET_GPAR_INNER(b, v) SET_GLOBAL_FIELD(b, GPAR, INNER, v)
+#define SET_GPAR_SH(b, v) SET_GLOBAL_FIELD(b, GPAR, SH, v)
+#define SET_GPAR_NS(b, v) SET_GLOBAL_FIELD(b, GPAR, NS, v)
+#define SET_GPAR_NOS(b, v) SET_GLOBAL_FIELD(b, GPAR, NOS, v)
+#define SET_GPAR_PA(b, v) SET_GLOBAL_FIELD(b, GPAR, PA, v)
+#define SET_GPAR_TF(b, v) SET_GLOBAL_FIELD(b, GPAR, TF, v)
+#define SET_GPAR_AFF(b, v) SET_GLOBAL_FIELD(b, GPAR, AFF, v)
+#define SET_GPAR_PF(b, v) SET_GLOBAL_FIELD(b, GPAR, PF, v)
+#define SET_GPAR_EF(b, v) SET_GLOBAL_FIELD(b, GPAR, EF, v)
+#define SET_GPAR_TLCMCF(b, v) SET_GLOBAL_FIELD(b, GPAR, TLCMCF, v)
+#define SET_GPAR_TLBLKF(b, v) SET_GLOBAL_FIELD(b, GPAR, TLBLKF, v)
+#define SET_GPAR_UCBF(b, v) SET_GLOBAL_FIELD(b, GPAR, UCBF, v)
+
+#define GET_GPAR_F(b) GET_GLOBAL_FIELD(b, GPAR, F)
+#define GET_GPAR_SS(b) GET_GLOBAL_FIELD(b, GPAR, SS)
+#define GET_GPAR_OUTER(b) GET_GLOBAL_FIELD(b, GPAR, OUTER)
+#define GET_GPAR_INNER(b) GET_GLOBAL_FIELD(b, GPAR, INNER)
+#define GET_GPAR_SH(b) GET_GLOBAL_FIELD(b, GPAR, SH)
+#define GET_GPAR_NS(b) GET_GLOBAL_FIELD(b, GPAR, NS)
+#define GET_GPAR_NOS(b) GET_GLOBAL_FIELD(b, GPAR, NOS)
+#define GET_GPAR_PA(b) GET_GLOBAL_FIELD(b, GPAR, PA)
+#define GET_GPAR_TF(b) GET_GLOBAL_FIELD(b, GPAR, TF)
+#define GET_GPAR_AFF(b) GET_GLOBAL_FIELD(b, GPAR, AFF)
+#define GET_GPAR_PF(b) GET_GLOBAL_FIELD(b, GPAR, PF)
+#define GET_GPAR_EF(b) GET_GLOBAL_FIELD(b, GPAR, EF)
+#define GET_GPAR_TLCMCF(b) GET_GLOBAL_FIELD(b, GPAR, TLCMCF)
+#define GET_GPAR_TLBLKF(b) GET_GLOBAL_FIELD(b, GPAR, TLBLKF)
+#define GET_GPAR_UCBF(b) GET_GLOBAL_FIELD(b, GPAR, UCBF)
+
+/* Identification Register: IDR0 */
+#define SET_IDR0_NUMSMRG(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMSMRG, v)
+#define SET_IDR0_NUMSIDB(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMSIDB, v)
+#define SET_IDR0_BTM(b, v) SET_GLOBAL_FIELD(b, IDR0, BTM, v)
+#define SET_IDR0_CTTW(b, v) SET_GLOBAL_FIELD(b, IDR0, CTTW, v)
+#define SET_IDR0_NUMIRPT(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMIRPT, v)
+#define SET_IDR0_PTFS(b, v) SET_GLOBAL_FIELD(b, IDR0, PTFS, v)
+#define SET_IDR0_SMS(b, v) SET_GLOBAL_FIELD(b, IDR0, SMS, v)
+#define SET_IDR0_NTS(b, v) SET_GLOBAL_FIELD(b, IDR0, NTS, v)
+#define SET_IDR0_S2TS(b, v) SET_GLOBAL_FIELD(b, IDR0, S2TS, v)
+#define SET_IDR0_S1TS(b, v) SET_GLOBAL_FIELD(b, IDR0, S1TS, v)
+#define SET_IDR0_SES(b, v) SET_GLOBAL_FIELD(b, IDR0, SES, v)
+
+#define GET_IDR0_NUMSMRG(b) GET_GLOBAL_FIELD(b, IDR0, NUMSMRG)
+#define GET_IDR0_NUMSIDB(b) GET_GLOBAL_FIELD(b, IDR0, NUMSIDB)
+#define GET_IDR0_BTM(b) GET_GLOBAL_FIELD(b, IDR0, BTM)
+#define GET_IDR0_CTTW(b) GET_GLOBAL_FIELD(b, IDR0, CTTW)
+#define GET_IDR0_NUMIRPT(b) GET_GLOBAL_FIELD(b, IDR0, NUMIRPT)
+#define GET_IDR0_PTFS(b) GET_GLOBAL_FIELD(b, IDR0, PTFS)
+#define GET_IDR0_SMS(b) GET_GLOBAL_FIELD(b, IDR0, SMS)
+#define GET_IDR0_NTS(b) GET_GLOBAL_FIELD(b, IDR0, NTS)
+#define GET_IDR0_S2TS(b) GET_GLOBAL_FIELD(b, IDR0, S2TS)
+#define GET_IDR0_S1TS(b) GET_GLOBAL_FIELD(b, IDR0, S1TS)
+#define GET_IDR0_SES(b) GET_GLOBAL_FIELD(b, IDR0, SES)
+
+/* Identification Register: IDR1 */
+#define SET_IDR1_NUMCB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMCB, v)
+#define SET_IDR1_NUMSSDNDXB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB, v)
+#define SET_IDR1_SSDTP(b, v) SET_GLOBAL_FIELD(b, IDR1, SSDTP, v)
+#define SET_IDR1_SMCD(b, v) SET_GLOBAL_FIELD(b, IDR1, SMCD, v)
+#define SET_IDR1_NUMS2CB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMS2CB, v)
+#define SET_IDR1_NUMPAGENDXB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB, v)
+#define SET_IDR1_PAGESIZE(b, v) SET_GLOBAL_FIELD(b, IDR1, PAGESIZE, v)
+
+#define GET_IDR1_NUMCB(b) GET_GLOBAL_FIELD(b, IDR1, NUMCB)
+#define GET_IDR1_NUMSSDNDXB(b) GET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB)
+#define GET_IDR1_SSDTP(b) GET_GLOBAL_FIELD(b, IDR1, SSDTP)
+#define GET_IDR1_SMCD(b) GET_GLOBAL_FIELD(b, IDR1, SMCD)
+#define GET_IDR1_NUMS2CB(b) GET_GLOBAL_FIELD(b, IDR1, NUMS2CB)
+#define GET_IDR1_NUMPAGENDXB(b) GET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB)
+#define GET_IDR1_PAGESIZE(b) GET_GLOBAL_FIELD(b, IDR1, PAGESIZE)
+
+/* Identification Register: IDR2 */
+#define SET_IDR2_IAS(b, v) SET_GLOBAL_FIELD(b, IDR2, IAS, v)
+#define SET_IDR2_OAS(b, v) SET_GLOBAL_FIELD(b, IDR2, OAS, v)
+
+#define GET_IDR2_IAS(b) GET_GLOBAL_FIELD(b, IDR2, IAS)
+#define GET_IDR2_OAS(b) GET_GLOBAL_FIELD(b, IDR2, OAS)
+
+/* Identification Register: IDR7 */
+#define SET_IDR7_MINOR(b, v) SET_GLOBAL_FIELD(b, IDR7, MINOR, v)
+#define SET_IDR7_MAJOR(b, v) SET_GLOBAL_FIELD(b, IDR7, MAJOR, v)
+
+#define GET_IDR7_MINOR(b) GET_GLOBAL_FIELD(b, IDR7, MINOR)
+#define GET_IDR7_MAJOR(b) GET_GLOBAL_FIELD(b, IDR7, MAJOR)
+
+/* Stream to Context Register: S2CR_N */
+#define SET_S2CR_CBNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX, v)
+#define SET_S2CR_SHCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG, v)
+#define SET_S2CR_MTCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG, v)
+#define SET_S2CR_MEMATTR(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR, v)
+#define SET_S2CR_TYPE(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, TYPE, v)
+#define SET_S2CR_NSCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG, v)
+#define SET_S2CR_RACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, RACFG, v)
+#define SET_S2CR_WACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, WACFG, v)
+#define SET_S2CR_PRIVCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG, v)
+#define SET_S2CR_INSTCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG, v)
+#define SET_S2CR_TRANSIENTCFG(b, n, v) \
+ SET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG, v)
+#define SET_S2CR_VMID(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, VMID, v)
+#define SET_S2CR_BSU(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, BSU, v)
+#define SET_S2CR_FB(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, FB, v)
+
+#define GET_S2CR_CBNDX(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX)
+#define GET_S2CR_SHCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG)
+#define GET_S2CR_MTCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG)
+#define GET_S2CR_MEMATTR(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR)
+#define GET_S2CR_TYPE(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, TYPE)
+#define GET_S2CR_NSCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG)
+#define GET_S2CR_RACFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, RACFG)
+#define GET_S2CR_WACFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, WACFG)
+#define GET_S2CR_PRIVCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG)
+#define GET_S2CR_INSTCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG)
+#define GET_S2CR_TRANSIENTCFG(b, n) \
+ GET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG)
+#define GET_S2CR_VMID(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, VMID)
+#define GET_S2CR_BSU(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, BSU)
+#define GET_S2CR_FB(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, FB)
+
+/* Stream Match Register: SMR_N */
+#define SET_SMR_ID(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, ID, v)
+#define SET_SMR_MASK(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, MASK, v)
+#define SET_SMR_VALID(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, VALID, v)
+
+#define GET_SMR_ID(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, ID)
+#define GET_SMR_MASK(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, MASK)
+#define GET_SMR_VALID(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, VALID)
+
+/* Global TLB Status: TLBGSTATUS */
+#define SET_TLBGSTATUS_GSACTIVE(b, v) \
+ SET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE, v)
+
+#define GET_TLBGSTATUS_GSACTIVE(b) \
+ GET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE)
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define SET_TLBIVAH_ADDR(b, v) SET_GLOBAL_FIELD(b, TLBIVAH, ADDR, v)
+
+#define GET_TLBIVAH_ADDR(b) GET_GLOBAL_FIELD(b, TLBIVAH, ADDR)
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define SET_TLBIVMID_VMID(b, v) SET_GLOBAL_FIELD(b, TLBIVMID, VMID, v)
+
+#define GET_TLBIVMID_VMID(b) GET_GLOBAL_FIELD(b, TLBIVMID, VMID)
+
+/* Global Register Space 1 Field setters/getters*/
+/* Context Bank Attribute Register: CBAR_N */
+#define SET_CBAR_VMID(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, VMID, v)
+#define SET_CBAR_CBNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX, v)
+#define SET_CBAR_BPSHCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG, v)
+#define SET_CBAR_HYPC(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, HYPC, v)
+#define SET_CBAR_FB(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, FB, v)
+#define SET_CBAR_MEMATTR(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR, v)
+#define SET_CBAR_TYPE(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, TYPE, v)
+#define SET_CBAR_BSU(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, BSU, v)
+#define SET_CBAR_RACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, RACFG, v)
+#define SET_CBAR_WACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, WACFG, v)
+#define SET_CBAR_IRPTNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX, v)
+
+#define GET_CBAR_VMID(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, VMID)
+#define GET_CBAR_CBNDX(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX)
+#define GET_CBAR_BPSHCFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG)
+#define GET_CBAR_HYPC(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, HYPC)
+#define GET_CBAR_FB(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, FB)
+#define GET_CBAR_MEMATTR(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR)
+#define GET_CBAR_TYPE(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, TYPE)
+#define GET_CBAR_BSU(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, BSU)
+#define GET_CBAR_RACFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, RACFG)
+#define GET_CBAR_WACFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, WACFG)
+#define GET_CBAR_IRPTNDX(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX)
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA_N */
+#define SET_CBFRSYNRA_SID(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID, v)
+
+#define GET_CBFRSYNRA_SID(b, n) GET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID)
+
+/* Stage 1 Context Bank Format Fields */
+#define SET_CB_ACTLR_REQPRIORITY (b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY, v)
+#define SET_CB_ACTLR_REQPRIORITYCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG, v)
+#define SET_CB_ACTLR_PRIVCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG, v)
+#define SET_CB_ACTLR_BPRCOSH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH, v)
+#define SET_CB_ACTLR_BPRCISH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH, v)
+#define SET_CB_ACTLR_BPRCNSH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH, v)
+
+#define GET_CB_ACTLR_REQPRIORITY (b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY)
+#define GET_CB_ACTLR_REQPRIORITYCFG(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG)
+#define GET_CB_ACTLR_PRIVCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG)
+#define GET_CB_ACTLR_BPRCOSH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH)
+#define GET_CB_ACTLR_BPRCISH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH)
+#define GET_CB_ACTLR_BPRCNSH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH)
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define SET_CB_ATS1PR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR, v)
+
+#define GET_CB_ATS1PR_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR)
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define SET_CB_ATS1PW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR, v)
+
+#define GET_CB_ATS1PW_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR)
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define SET_CB_ATS1UR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR, v)
+
+#define GET_CB_ATS1UR_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR)
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define SET_CB_ATS1UW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR, v)
+
+#define GET_CB_ATS1UW_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR)
+
+/* Address Translation Status Register: CB_ATSR */
+#define SET_CB_ATSR_ACTIVE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE, v)
+
+#define GET_CB_ATSR_ACTIVE(b, c) GET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE)
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define SET_CB_CONTEXTIDR_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID, v)
+#define SET_CB_CONTEXTIDR_PROCID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID, v)
+
+#define GET_CB_CONTEXTIDR_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID)
+#define GET_CB_CONTEXTIDR_PROCID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID)
+
+/* Fault Address Register: CB_FAR */
+#define SET_CB_FAR_FADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FAR, FADDR, v)
+
+#define GET_CB_FAR_FADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_FAR, FADDR)
+
+/* Fault Status Register: CB_FSR */
+#define SET_CB_FSR_TF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TF, v)
+#define SET_CB_FSR_AFF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, AFF, v)
+#define SET_CB_FSR_PF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, PF, v)
+#define SET_CB_FSR_EF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, EF, v)
+#define SET_CB_FSR_TLBMCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF, v)
+#define SET_CB_FSR_TLBLKF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF, v)
+#define SET_CB_FSR_SS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, SS, v)
+#define SET_CB_FSR_MULTI(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, MULTI, v)
+
+#define GET_CB_FSR_TF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TF)
+#define GET_CB_FSR_AFF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, AFF)
+#define GET_CB_FSR_PF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, PF)
+#define GET_CB_FSR_EF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, EF)
+#define GET_CB_FSR_TLBMCF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF)
+#define GET_CB_FSR_TLBLKF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF)
+#define GET_CB_FSR_SS(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, SS)
+#define GET_CB_FSR_MULTI(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, MULTI)
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define SET_CB_FSYNR0_PLVL(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL, v)
+#define SET_CB_FSYNR0_S1PTWF(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF, v)
+#define SET_CB_FSYNR0_WNR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR, v)
+#define SET_CB_FSYNR0_PNU(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU, v)
+#define SET_CB_FSYNR0_IND(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND, v)
+#define SET_CB_FSYNR0_NSSTATE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE, v)
+#define SET_CB_FSYNR0_NSATTR(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR, v)
+#define SET_CB_FSYNR0_ATOF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF, v)
+#define SET_CB_FSYNR0_PTWF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF, v)
+#define SET_CB_FSYNR0_AFR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR, v)
+#define SET_CB_FSYNR0_S1CBNDX(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX, v)
+
+#define GET_CB_FSYNR0_PLVL(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL)
+#define GET_CB_FSYNR0_S1PTWF(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF)
+#define GET_CB_FSYNR0_WNR(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR)
+#define GET_CB_FSYNR0_PNU(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU)
+#define GET_CB_FSYNR0_IND(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND)
+#define GET_CB_FSYNR0_NSSTATE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE)
+#define GET_CB_FSYNR0_NSATTR(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR)
+#define GET_CB_FSYNR0_ATOF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF)
+#define GET_CB_FSYNR0_PTWF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF)
+#define GET_CB_FSYNR0_AFR(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR)
+#define GET_CB_FSYNR0_S1CBNDX(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX)
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define SET_CB_NMRR_IR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR0, v)
+#define SET_CB_NMRR_IR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR1, v)
+#define SET_CB_NMRR_IR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR2, v)
+#define SET_CB_NMRR_IR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR3, v)
+#define SET_CB_NMRR_IR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR4, v)
+#define SET_CB_NMRR_IR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR5, v)
+#define SET_CB_NMRR_IR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR6, v)
+#define SET_CB_NMRR_IR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR7, v)
+#define SET_CB_NMRR_OR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR0, v)
+#define SET_CB_NMRR_OR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR1, v)
+#define SET_CB_NMRR_OR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR2, v)
+#define SET_CB_NMRR_OR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR3, v)
+#define SET_CB_NMRR_OR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR4, v)
+#define SET_CB_NMRR_OR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR5, v)
+#define SET_CB_NMRR_OR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR6, v)
+#define SET_CB_NMRR_OR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR7, v)
+
+#define GET_CB_NMRR_IR0(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR0)
+#define GET_CB_NMRR_IR1(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR1)
+#define GET_CB_NMRR_IR2(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR2)
+#define GET_CB_NMRR_IR3(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR3)
+#define GET_CB_NMRR_IR4(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR4)
+#define GET_CB_NMRR_IR5(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR5)
+#define GET_CB_NMRR_IR6(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR6)
+#define GET_CB_NMRR_IR7(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR7)
+#define GET_CB_NMRR_OR0(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR0)
+#define GET_CB_NMRR_OR1(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR1)
+#define GET_CB_NMRR_OR2(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR2)
+#define GET_CB_NMRR_OR3(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR3)
+#define GET_CB_NMRR_OR4(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR4)
+#define GET_CB_NMRR_OR5(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR5)
+
+/* Physical Address Register: CB_PAR */
+#define SET_CB_PAR_F(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, F, v)
+#define SET_CB_PAR_SS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, SS, v)
+#define SET_CB_PAR_OUTER(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, OUTER, v)
+#define SET_CB_PAR_INNER(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, INNER, v)
+#define SET_CB_PAR_SH(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, SH, v)
+#define SET_CB_PAR_NS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, NS, v)
+#define SET_CB_PAR_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, NOS, v)
+#define SET_CB_PAR_PA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PA, v)
+#define SET_CB_PAR_TF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TF, v)
+#define SET_CB_PAR_AFF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, AFF, v)
+#define SET_CB_PAR_PF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PF, v)
+#define SET_CB_PAR_TLBMCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF, v)
+#define SET_CB_PAR_TLBLKF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF, v)
+#define SET_CB_PAR_ATOT(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, ATOT, v)
+#define SET_CB_PAR_PLVL(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PLVL, v)
+#define SET_CB_PAR_STAGE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, STAGE, v)
+
+#define GET_CB_PAR_F(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, F)
+#define GET_CB_PAR_SS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, SS)
+#define GET_CB_PAR_OUTER(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, OUTER)
+#define GET_CB_PAR_INNER(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, INNER)
+#define GET_CB_PAR_SH(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, SH)
+#define GET_CB_PAR_NS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, NS)
+#define GET_CB_PAR_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, NOS)
+#define GET_CB_PAR_PA(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PA)
+#define GET_CB_PAR_TF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TF)
+#define GET_CB_PAR_AFF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, AFF)
+#define GET_CB_PAR_PF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PF)
+#define GET_CB_PAR_TLBMCF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF)
+#define GET_CB_PAR_TLBLKF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF)
+#define GET_CB_PAR_ATOT(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, ATOT)
+#define GET_CB_PAR_PLVL(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PLVL)
+#define GET_CB_PAR_STAGE(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, STAGE)
+
+/* Primary Region Remap Register: CB_PRRR */
+#define SET_CB_PRRR_TR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR0, v)
+#define SET_CB_PRRR_TR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR1, v)
+#define SET_CB_PRRR_TR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR2, v)
+#define SET_CB_PRRR_TR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR3, v)
+#define SET_CB_PRRR_TR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR4, v)
+#define SET_CB_PRRR_TR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR5, v)
+#define SET_CB_PRRR_TR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR6, v)
+#define SET_CB_PRRR_TR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR7, v)
+#define SET_CB_PRRR_DS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, DS0, v)
+#define SET_CB_PRRR_DS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, DS1, v)
+#define SET_CB_PRRR_NS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NS0, v)
+#define SET_CB_PRRR_NS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NS1, v)
+#define SET_CB_PRRR_NOS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0, v)
+#define SET_CB_PRRR_NOS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1, v)
+#define SET_CB_PRRR_NOS2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2, v)
+#define SET_CB_PRRR_NOS3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3, v)
+#define SET_CB_PRRR_NOS4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4, v)
+#define SET_CB_PRRR_NOS5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5, v)
+#define SET_CB_PRRR_NOS6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6, v)
+#define SET_CB_PRRR_NOS7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7, v)
+
+#define GET_CB_PRRR_TR0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR0)
+#define GET_CB_PRRR_TR1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR1)
+#define GET_CB_PRRR_TR2(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR2)
+#define GET_CB_PRRR_TR3(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR3)
+#define GET_CB_PRRR_TR4(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR4)
+#define GET_CB_PRRR_TR5(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR5)
+#define GET_CB_PRRR_TR6(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR6)
+#define GET_CB_PRRR_TR7(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR7)
+#define GET_CB_PRRR_DS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, DS0)
+#define GET_CB_PRRR_DS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, DS1)
+#define GET_CB_PRRR_NS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NS0)
+#define GET_CB_PRRR_NS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NS1)
+#define GET_CB_PRRR_NOS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0)
+#define GET_CB_PRRR_NOS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1)
+#define GET_CB_PRRR_NOS2(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2)
+#define GET_CB_PRRR_NOS3(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3)
+#define GET_CB_PRRR_NOS4(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4)
+#define GET_CB_PRRR_NOS5(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5)
+#define GET_CB_PRRR_NOS6(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6)
+#define GET_CB_PRRR_NOS7(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7)
+
+/* Transaction Resume: CB_RESUME */
+#define SET_CB_RESUME_TNR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_RESUME, TNR, v)
+
+#define GET_CB_RESUME_TNR(b, c) GET_CONTEXT_FIELD(b, c, CB_RESUME, TNR)
+
+/* System Control Register: CB_SCTLR */
+#define SET_CB_SCTLR_M(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, M, v)
+#define SET_CB_SCTLR_TRE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE, v)
+#define SET_CB_SCTLR_AFE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE, v)
+#define SET_CB_SCTLR_AFFD(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD, v)
+#define SET_CB_SCTLR_E(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, E, v)
+#define SET_CB_SCTLR_CFRE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE, v)
+#define SET_CB_SCTLR_CFIE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE, v)
+#define SET_CB_SCTLR_CFCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG, v)
+#define SET_CB_SCTLR_HUPCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF, v)
+#define SET_CB_SCTLR_WXN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN, v)
+#define SET_CB_SCTLR_UWXN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN, v)
+#define SET_CB_SCTLR_ASIDPNE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE, v)
+#define SET_CB_SCTLR_TRANSIENTCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG, v)
+#define SET_CB_SCTLR_MEMATTR(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR, v)
+#define SET_CB_SCTLR_MTCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG, v)
+#define SET_CB_SCTLR_SHCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG, v)
+#define SET_CB_SCTLR_RACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG, v)
+#define SET_CB_SCTLR_WACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG, v)
+#define SET_CB_SCTLR_NSCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG, v)
+
+#define GET_CB_SCTLR_M(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, M)
+#define GET_CB_SCTLR_TRE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE)
+#define GET_CB_SCTLR_AFE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE)
+#define GET_CB_SCTLR_AFFD(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD)
+#define GET_CB_SCTLR_E(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, E)
+#define GET_CB_SCTLR_CFRE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE)
+#define GET_CB_SCTLR_CFIE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE)
+#define GET_CB_SCTLR_CFCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG)
+#define GET_CB_SCTLR_HUPCF(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF)
+#define GET_CB_SCTLR_WXN(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN)
+#define GET_CB_SCTLR_UWXN(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN)
+#define GET_CB_SCTLR_ASIDPNE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE)
+#define GET_CB_SCTLR_TRANSIENTCFG(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG)
+#define GET_CB_SCTLR_MEMATTR(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR)
+#define GET_CB_SCTLR_MTCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG)
+#define GET_CB_SCTLR_SHCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG)
+#define GET_CB_SCTLR_RACFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG)
+#define GET_CB_SCTLR_WACFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG)
+#define GET_CB_SCTLR_NSCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG)
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define SET_CB_TLBIASID_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID, v)
+
+#define GET_CB_TLBIASID_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID)
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define SET_CB_TLBIVA_ASID(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID, v)
+#define SET_CB_TLBIVA_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA, v)
+
+#define GET_CB_TLBIVA_ASID(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID)
+#define GET_CB_TLBIVA_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA)
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define SET_CB_TLBIVAA_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA, v)
+
+#define GET_CB_TLBIVAA_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA)
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define SET_CB_TLBIVAAL_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA, v)
+
+#define GET_CB_TLBIVAAL_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA)
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define SET_CB_TLBIVAL_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID, v)
+#define SET_CB_TLBIVAL_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA, v)
+
+#define GET_CB_TLBIVAL_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID)
+#define GET_CB_TLBIVAL_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA)
+
+/* TLB Status: CB_TLBSTATUS */
+#define SET_CB_TLBSTATUS_SACTIVE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE, v)
+
+#define GET_CB_TLBSTATUS_SACTIVE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+/* These are shared between VMSA and LPAE */
+#define GET_CB_TTBCR_EAE(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE)
+#define SET_CB_TTBCR_EAE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE, v)
+
+#define SET_CB_TTBCR_NSCFG0(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0, v)
+#define SET_CB_TTBCR_NSCFG1(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1, v)
+
+#define GET_CB_TTBCR_NSCFG0(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0)
+#define GET_CB_TTBCR_NSCFG1(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1)
+
+#ifdef CONFIG_IOMMU_LPAE
+
+/* LPAE format */
+
+/* Translation Table Base Register 0: CB_TTBR */
+#define SET_TTBR0(b, c, v) SET_CTX_REG_Q(CB_TTBR0, (b), (c), (v))
+#define SET_TTBR1(b, c, v) SET_CTX_REG_Q(CB_TTBR1, (b), (c), (v))
+
+#define SET_CB_TTBR0_ASID(b, c, v) SET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ASID, v)
+#define SET_CB_TTBR0_ADDR(b, c, v) SET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ADDR, v)
+
+#define GET_CB_TTBR0_ASID(b, c) GET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ASID)
+#define GET_CB_TTBR0_ADDR(b, c) GET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ADDR)
+#define GET_CB_TTBR0(b, c) GET_CTX_REG_Q(CB_TTBR0, (b), (c))
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define SET_CB_TTBCR_T0SZ(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ, v)
+#define SET_CB_TTBCR_T1SZ(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, T1SZ, v)
+#define SET_CB_TTBCR_EPD0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD0, v)
+#define SET_CB_TTBCR_EPD1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD1, v)
+#define SET_CB_TTBCR_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN0, v)
+#define SET_CB_TTBCR_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN1, v)
+#define SET_CB_TTBCR_ORGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN0, v)
+#define SET_CB_TTBCR_ORGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN1, v)
+#define SET_CB_TTBCR_NSCFG0(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0, v)
+#define SET_CB_TTBCR_NSCFG1(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1, v)
+
+#define SET_CB_TTBCR_SH0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, SH0, v)
+#define SET_CB_TTBCR_SH1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, SH1, v)
+#define SET_CB_TTBCR_A1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, A1, v)
+
+#define GET_CB_TTBCR_T0SZ(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ)
+#define GET_CB_TTBCR_T1SZ(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, T1SZ)
+#define GET_CB_TTBCR_EPD0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD0)
+#define GET_CB_TTBCR_EPD1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD1)
+#define GET_CB_TTBCR_IRGN0(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN0)
+#define GET_CB_TTBCR_IRGN1(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN1)
+#define GET_CB_TTBCR_ORGN0(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN0)
+#define GET_CB_TTBCR_ORGN1(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN1)
+
+#define SET_CB_MAIR0(b, c, v) SET_CTX_REG(CB_MAIR0, (b), (c), (v))
+#define SET_CB_MAIR1(b, c, v) SET_CTX_REG(CB_MAIR1, (b), (c), (v))
+
+#define GET_CB_MAIR0(b, c) GET_CTX_REG(CB_MAIR0, (b), (c))
+#define GET_CB_MAIR1(b, c) GET_CTX_REG(CB_MAIR1, (b), (c))
+#else
+#define SET_TTBR0(b, c, v) SET_CTX_REG(CB_TTBR0, (b), (c), (v))
+#define SET_TTBR1(b, c, v) SET_CTX_REG(CB_TTBR1, (b), (c), (v))
+
+#define SET_CB_TTBCR_PD0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD0, v)
+#define SET_CB_TTBCR_PD1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD1, v)
+
+#define SET_CB_TTBR0_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1, v)
+#define SET_CB_TTBR0_S(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, S, v)
+#define SET_CB_TTBR0_RGN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN, v)
+#define SET_CB_TTBR0_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS, v)
+#define SET_CB_TTBR0_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0, v)
+#define SET_CB_TTBR0_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR, v)
+
+#define GET_CB_TTBR0_IRGN1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1)
+#define GET_CB_TTBR0_S(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, S)
+#define GET_CB_TTBR0_RGN(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN)
+#define GET_CB_TTBR0_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS)
+#define GET_CB_TTBR0_IRGN0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0)
+#define GET_CB_TTBR0_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR)
+#endif
+
+/* Translation Table Base Register 1: CB_TTBR1 */
+#define SET_CB_TTBR1_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1, v)
+#define SET_CB_TTBR1_0S(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, S, v)
+#define SET_CB_TTBR1_RGN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN, v)
+#define SET_CB_TTBR1_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS, v)
+#define SET_CB_TTBR1_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0, v)
+#define SET_CB_TTBR1_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR, v)
+
+#define GET_CB_TTBR1_IRGN1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1)
+#define GET_CB_TTBR1_0S(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, S)
+#define GET_CB_TTBR1_RGN(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN)
+#define GET_CB_TTBR1_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS)
+#define GET_CB_TTBR1_IRGN0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0)
+#define GET_CB_TTBR1_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR)
+
+/* Global Register Space 0 */
+#define CR0 (0x0000)
+#define SCR1 (0x0004)
+#define CR2 (0x0008)
+#define ACR (0x0010)
+#define IDR0 (0x0020)
+#define IDR1 (0x0024)
+#define IDR2 (0x0028)
+#define IDR7 (0x003C)
+#define GFAR (0x0040)
+#define GFSR (0x0048)
+#define GFSRRESTORE (0x004C)
+#define GFSYNR0 (0x0050)
+#define GFSYNR1 (0x0054)
+#define GFSYNR2 (0x0058)
+#define TLBIVMID (0x0064)
+#define TLBIALLNSNH (0x0068)
+#define TLBIALLH (0x006C)
+#define TLBGSYNC (0x0070)
+#define TLBGSTATUS (0x0074)
+#define TLBIVAH (0x0078)
+#define GATS1UR (0x0100)
+#define GATS1UW (0x0108)
+#define GATS1PR (0x0110)
+#define GATS1PW (0x0118)
+#define GATS12UR (0x0120)
+#define GATS12UW (0x0128)
+#define GATS12PR (0x0130)
+#define GATS12PW (0x0138)
+#define GPAR (0x0180)
+#define GATSR (0x0188)
+#define NSCR0 (0x0400)
+#define NSCR2 (0x0408)
+#define NSACR (0x0410)
+#define NSGFAR (0x0440)
+#define NSGFSRRESTORE (0x044C)
+#define SMR (0x0800)
+#define S2CR (0x0C00)
+
+/* SMMU_LOCAL */
+#define SMMU_INTR_SEL_NS (0x2000)
+
+/* Global Register Space 1 */
+#define CBAR (0x1000)
+#define CBFRSYNRA (0x1400)
+
+/* Implementation defined Register Space */
+#define MICRO_MMU_CTRL (0x2000)
+#define PREDICTIONDIS0 (0x204C)
+#define PREDICTIONDIS1 (0x2050)
+#define S1L1BFBLP0 (0x215C)
+
+/* Performance Monitoring Register Space */
+#define PMEVCNTR_N (0x3000)
+#define PMEVTYPER_N (0x3400)
+#define PMCGCR_N (0x3800)
+#define PMCGSMR_N (0x3A00)
+#define PMCNTENSET_N (0x3C00)
+#define PMCNTENCLR_N (0x3C20)
+#define PMINTENSET_N (0x3C40)
+#define PMINTENCLR_N (0x3C60)
+#define PMOVSCLR_N (0x3C80)
+#define PMOVSSET_N (0x3CC0)
+#define PMCFGR (0x3E00)
+#define PMCR (0x3E04)
+#define PMCEID0 (0x3E20)
+#define PMCEID1 (0x3E24)
+#define PMAUTHSTATUS (0x3FB8)
+#define PMDEVTYPE (0x3FCC)
+
+/* Secure Status Determination Address Space */
+#define SSDR_N (0x4000)
+
+/* Stage 1 Context Bank Format */
+#define CB_SCTLR (0x000)
+#define CB_ACTLR (0x004)
+#define CB_RESUME (0x008)
+#define CB_TTBR0 (0x020)
+#define CB_TTBR1 (0x028)
+#define CB_TTBCR (0x030)
+#define CB_CONTEXTIDR (0x034)
+#define CB_PRRR (0x038)
+#define CB_MAIR0 (0x038)
+#define CB_NMRR (0x03C)
+#define CB_MAIR1 (0x03C)
+#define CB_PAR (0x050)
+#define CB_FSR (0x058)
+#define CB_FSRRESTORE (0x05C)
+#define CB_FAR (0x060)
+#define CB_FSYNR0 (0x068)
+#define CB_FSYNR1 (0x06C)
+#define CB_TLBIVA (0x600)
+#define CB_TLBIVAA (0x608)
+#define CB_TLBIASID (0x610)
+#define CB_TLBIALL (0x618)
+#define CB_TLBIVAL (0x620)
+#define CB_TLBIVAAL (0x628)
+#define CB_TLBSYNC (0x7F0)
+#define CB_TLBSTATUS (0x7F4)
+#define CB_ATS1PR (0x800)
+#define CB_ATS1PW (0x808)
+#define CB_ATS1UR (0x810)
+#define CB_ATS1UW (0x818)
+#define CB_ATSR (0x8F0)
+#define CB_PMXEVCNTR_N (0xE00)
+#define CB_PMXEVTYPER_N (0xE80)
+#define CB_PMCFGR (0xF00)
+#define CB_PMCR (0xF04)
+#define CB_PMCEID0 (0xF20)
+#define CB_PMCEID1 (0xF24)
+#define CB_PMCNTENSET (0xF40)
+#define CB_PMCNTENCLR (0xF44)
+#define CB_PMINTENSET (0xF48)
+#define CB_PMINTENCLR (0xF4C)
+#define CB_PMOVSCLR (0xF50)
+#define CB_PMOVSSET (0xF58)
+#define CB_PMAUTHSTATUS (0xFB8)
+
+/* Global Register Fields */
+/* Configuration Register: CR0 */
+#define CR0_NSCFG (CR0_NSCFG_MASK << CR0_NSCFG_SHIFT)
+#define CR0_WACFG (CR0_WACFG_MASK << CR0_WACFG_SHIFT)
+#define CR0_RACFG (CR0_RACFG_MASK << CR0_RACFG_SHIFT)
+#define CR0_SHCFG (CR0_SHCFG_MASK << CR0_SHCFG_SHIFT)
+#define CR0_SMCFCFG (CR0_SMCFCFG_MASK << CR0_SMCFCFG_SHIFT)
+#define CR0_MTCFG (CR0_MTCFG_MASK << CR0_MTCFG_SHIFT)
+#define CR0_MEMATTR (CR0_MEMATTR_MASK << CR0_MEMATTR_SHIFT)
+#define CR0_BSU (CR0_BSU_MASK << CR0_BSU_SHIFT)
+#define CR0_FB (CR0_FB_MASK << CR0_FB_SHIFT)
+#define CR0_PTM (CR0_PTM_MASK << CR0_PTM_SHIFT)
+#define CR0_VMIDPNE (CR0_VMIDPNE_MASK << CR0_VMIDPNE_SHIFT)
+#define CR0_USFCFG (CR0_USFCFG_MASK << CR0_USFCFG_SHIFT)
+#define CR0_GSE (CR0_GSE_MASK << CR0_GSE_SHIFT)
+#define CR0_STALLD (CR0_STALLD_MASK << CR0_STALLD_SHIFT)
+#define CR0_TRANSIENTCFG (CR0_TRANSIENTCFG_MASK << CR0_TRANSIENTCFG_SHIFT)
+#define CR0_GCFGFIE (CR0_GCFGFIE_MASK << CR0_GCFGFIE_SHIFT)
+#define CR0_GCFGFRE (CR0_GCFGFRE_MASK << CR0_GCFGFRE_SHIFT)
+#define CR0_GFIE (CR0_GFIE_MASK << CR0_GFIE_SHIFT)
+#define CR0_GFRE (CR0_GFRE_MASK << CR0_GFRE_SHIFT)
+#define CR0_CLIENTPD (CR0_CLIENTPD_MASK << CR0_CLIENTPD_SHIFT)
+
+/* Configuration Register: CR2 */
+#define CR2_BPVMID (CR2_BPVMID_MASK << CR2_BPVMID_SHIFT)
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR (GATS1PR_ADDR_MASK << GATS1PR_ADDR_SHIFT)
+#define GATS1PR_NDX (GATS1PR_NDX_MASK << GATS1PR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR (GATS1PW_ADDR_MASK << GATS1PW_ADDR_SHIFT)
+#define GATS1PW_NDX (GATS1PW_NDX_MASK << GATS1PW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR (GATS1UR_ADDR_MASK << GATS1UR_ADDR_SHIFT)
+#define GATS1UR_NDX (GATS1UR_NDX_MASK << GATS1UR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR (GATS1UW_ADDR_MASK << GATS1UW_ADDR_SHIFT)
+#define GATS1UW_NDX (GATS1UW_NDX_MASK << GATS1UW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */
+#define GATS12PR_ADDR (GATS12PR_ADDR_MASK << GATS12PR_ADDR_SHIFT)
+#define GATS12PR_NDX (GATS12PR_NDX_MASK << GATS12PR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */
+#define GATS12PW_ADDR (GATS12PW_ADDR_MASK << GATS12PW_ADDR_SHIFT)
+#define GATS12PW_NDX (GATS12PW_NDX_MASK << GATS12PW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */
+#define GATS12UR_ADDR (GATS12UR_ADDR_MASK << GATS12UR_ADDR_SHIFT)
+#define GATS12UR_NDX (GATS12UR_NDX_MASK << GATS12UR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */
+#define GATS12UW_ADDR (GATS12UW_ADDR_MASK << GATS12UW_ADDR_SHIFT)
+#define GATS12UW_NDX (GATS12UW_NDX_MASK << GATS12UW_NDX_SHIFT)
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE (GATSR_ACTIVE_MASK << GATSR_ACTIVE_SHIFT)
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR (GFAR_FADDR_MASK << GFAR_FADDR_SHIFT)
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF (GFSR_ICF_MASK << GFSR_ICF_SHIFT)
+#define GFSR_USF (GFSR_USF_MASK << GFSR_USF_SHIFT)
+#define GFSR_SMCF (GFSR_SMCF_MASK << GFSR_SMCF_SHIFT)
+#define GFSR_UCBF (GFSR_UCBF_MASK << GFSR_UCBF_SHIFT)
+#define GFSR_UCIF (GFSR_UCIF_MASK << GFSR_UCIF_SHIFT)
+#define GFSR_CAF (GFSR_CAF_MASK << GFSR_CAF_SHIFT)
+#define GFSR_EF (GFSR_EF_MASK << GFSR_EF_SHIFT)
+#define GFSR_PF (GFSR_PF_MASK << GFSR_PF_SHIFT)
+#define GFSR_MULTI (GFSR_MULTI_MASK << GFSR_MULTI_SHIFT)
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED (GFSYNR0_NESTED_MASK << GFSYNR0_NESTED_SHIFT)
+#define GFSYNR0_WNR (GFSYNR0_WNR_MASK << GFSYNR0_WNR_SHIFT)
+#define GFSYNR0_PNU (GFSYNR0_PNU_MASK << GFSYNR0_PNU_SHIFT)
+#define GFSYNR0_IND (GFSYNR0_IND_MASK << GFSYNR0_IND_SHIFT)
+#define GFSYNR0_NSSTATE (GFSYNR0_NSSTATE_MASK << GFSYNR0_NSSTATE_SHIFT)
+#define GFSYNR0_NSATTR (GFSYNR0_NSATTR_MASK << GFSYNR0_NSATTR_SHIFT)
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID (GFSYNR1_SID_MASK << GFSYNR1_SID_SHIFT)
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F (GPAR_F_MASK << GPAR_F_SHIFT)
+#define GPAR_SS (GPAR_SS_MASK << GPAR_SS_SHIFT)
+#define GPAR_OUTER (GPAR_OUTER_MASK << GPAR_OUTER_SHIFT)
+#define GPAR_INNER (GPAR_INNER_MASK << GPAR_INNER_SHIFT)
+#define GPAR_SH (GPAR_SH_MASK << GPAR_SH_SHIFT)
+#define GPAR_NS (GPAR_NS_MASK << GPAR_NS_SHIFT)
+#define GPAR_NOS (GPAR_NOS_MASK << GPAR_NOS_SHIFT)
+#define GPAR_PA (GPAR_PA_MASK << GPAR_PA_SHIFT)
+#define GPAR_TF (GPAR_TF_MASK << GPAR_TF_SHIFT)
+#define GPAR_AFF (GPAR_AFF_MASK << GPAR_AFF_SHIFT)
+#define GPAR_PF (GPAR_PF_MASK << GPAR_PF_SHIFT)
+#define GPAR_EF (GPAR_EF_MASK << GPAR_EF_SHIFT)
+#define GPAR_TLCMCF (GPAR_TLBMCF_MASK << GPAR_TLCMCF_SHIFT)
+#define GPAR_TLBLKF (GPAR_TLBLKF_MASK << GPAR_TLBLKF_SHIFT)
+#define GPAR_UCBF (GPAR_UCBF_MASK << GFAR_UCBF_SHIFT)
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG (IDR0_NUMSMRG_MASK << IDR0_NUMSMGR_SHIFT)
+#define IDR0_NUMSIDB (IDR0_NUMSIDB_MASK << IDR0_NUMSIDB_SHIFT)
+#define IDR0_BTM (IDR0_BTM_MASK << IDR0_BTM_SHIFT)
+#define IDR0_CTTW (IDR0_CTTW_MASK << IDR0_CTTW_SHIFT)
+#define IDR0_NUMIRPT (IDR0_NUMIPRT_MASK << IDR0_NUMIRPT_SHIFT)
+#define IDR0_PTFS (IDR0_PTFS_MASK << IDR0_PTFS_SHIFT)
+#define IDR0_SMS (IDR0_SMS_MASK << IDR0_SMS_SHIFT)
+#define IDR0_NTS (IDR0_NTS_MASK << IDR0_NTS_SHIFT)
+#define IDR0_S2TS (IDR0_S2TS_MASK << IDR0_S2TS_SHIFT)
+#define IDR0_S1TS (IDR0_S1TS_MASK << IDR0_S1TS_SHIFT)
+#define IDR0_SES (IDR0_SES_MASK << IDR0_SES_SHIFT)
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB (IDR1_NUMCB_MASK << IDR1_NUMCB_SHIFT)
+#define IDR1_NUMSSDNDXB (IDR1_NUMSSDNDXB_MASK << IDR1_NUMSSDNDXB_SHIFT)
+#define IDR1_SSDTP (IDR1_SSDTP_MASK << IDR1_SSDTP_SHIFT)
+#define IDR1_SMCD (IDR1_SMCD_MASK << IDR1_SMCD_SHIFT)
+#define IDR1_NUMS2CB (IDR1_NUMS2CB_MASK << IDR1_NUMS2CB_SHIFT)
+#define IDR1_NUMPAGENDXB (IDR1_NUMPAGENDXB_MASK << IDR1_NUMPAGENDXB_SHIFT)
+#define IDR1_PAGESIZE (IDR1_PAGESIZE_MASK << IDR1_PAGESIZE_SHIFT)
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS (IDR2_IAS_MASK << IDR2_IAS_SHIFT)
+#define IDR1_OAS (IDR2_OAS_MASK << IDR2_OAS_SHIFT)
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR (IDR7_MINOR_MASK << IDR7_MINOR_SHIFT)
+#define IDR7_MAJOR (IDR7_MAJOR_MASK << IDR7_MAJOR_SHIFT)
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX (S2CR_CBNDX_MASK << S2cR_CBNDX_SHIFT)
+#define S2CR_SHCFG (S2CR_SHCFG_MASK << s2CR_SHCFG_SHIFT)
+#define S2CR_MTCFG (S2CR_MTCFG_MASK << S2CR_MTCFG_SHIFT)
+#define S2CR_MEMATTR (S2CR_MEMATTR_MASK << S2CR_MEMATTR_SHIFT)
+#define S2CR_TYPE (S2CR_TYPE_MASK << S2CR_TYPE_SHIFT)
+#define S2CR_NSCFG (S2CR_NSCFG_MASK << S2CR_NSCFG_SHIFT)
+#define S2CR_RACFG (S2CR_RACFG_MASK << S2CR_RACFG_SHIFT)
+#define S2CR_WACFG (S2CR_WACFG_MASK << S2CR_WACFG_SHIFT)
+#define S2CR_PRIVCFG (S2CR_PRIVCFG_MASK << S2CR_PRIVCFG_SHIFT)
+#define S2CR_INSTCFG (S2CR_INSTCFG_MASK << S2CR_INSTCFG_SHIFT)
+#define S2CR_TRANSIENTCFG (S2CR_TRANSIENTCFG_MASK << S2CR_TRANSIENTCFG_SHIFT)
+#define S2CR_VMID (S2CR_VMID_MASK << S2CR_VMID_SHIFT)
+#define S2CR_BSU (S2CR_BSU_MASK << S2CR_BSU_SHIFT)
+#define S2CR_FB (S2CR_FB_MASK << S2CR_FB_SHIFT)
+
+/* Stream Match Register: SMR */
+#define SMR_ID (SMR_ID_MASK << SMR_ID_SHIFT)
+#define SMR_MASK (SMR_MASK_MASK << SMR_MASK_SHIFT)
+#define SMR_VALID (SMR_VALID_MASK << SMR_VALID_SHIFT)
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE (TLBGSTATUS_GSACTIVE_MASK << \
+ TLBGSTATUS_GSACTIVE_SHIFT)
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR (TLBIVAH_ADDR_MASK << TLBIVAH_ADDR_SHIFT)
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID (TLBIVMID_VMID_MASK << TLBIVMID_VMID_SHIFT)
+
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID (CBAR_VMID_MASK << CBAR_VMID_SHIFT)
+#define CBAR_CBNDX (CBAR_CBNDX_MASK << CBAR_CBNDX_SHIFT)
+#define CBAR_BPSHCFG (CBAR_BPSHCFG_MASK << CBAR_BPSHCFG_SHIFT)
+#define CBAR_HYPC (CBAR_HYPC_MASK << CBAR_HYPC_SHIFT)
+#define CBAR_FB (CBAR_FB_MASK << CBAR_FB_SHIFT)
+#define CBAR_MEMATTR (CBAR_MEMATTR_MASK << CBAR_MEMATTR_SHIFT)
+#define CBAR_TYPE (CBAR_TYPE_MASK << CBAR_TYPE_SHIFT)
+#define CBAR_BSU (CBAR_BSU_MASK << CBAR_BSU_SHIFT)
+#define CBAR_RACFG (CBAR_RACFG_MASK << CBAR_RACFG_SHIFT)
+#define CBAR_WACFG (CBAR_WACFG_MASK << CBAR_WACFG_SHIFT)
+#define CBAR_IRPTNDX (CBAR_IRPTNDX_MASK << CBAR_IRPTNDX_SHIFT)
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID (CBFRSYNRA_SID_MASK << CBFRSYNRA_SID_SHIFT)
+
+/* Performance Monitoring Register Fields */
+
+/* Stage 1 Context Bank Format Fields */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY \
+ (CB_ACTLR_REQPRIORITY_MASK << CB_ACTLR_REQPRIORITY_SHIFT)
+#define CB_ACTLR_REQPRIORITYCFG \
+ (CB_ACTLR_REQPRIORITYCFG_MASK << CB_ACTLR_REQPRIORITYCFG_SHIFT)
+#define CB_ACTLR_PRIVCFG (CB_ACTLR_PRIVCFG_MASK << CB_ACTLR_PRIVCFG_SHIFT)
+#define CB_ACTLR_BPRCOSH (CB_ACTLR_BPRCOSH_MASK << CB_ACTLR_BPRCOSH_SHIFT)
+#define CB_ACTLR_BPRCISH (CB_ACTLR_BPRCISH_MASK << CB_ACTLR_BPRCISH_SHIFT)
+#define CB_ACTLR_BPRCNSH (CB_ACTLR_BPRCNSH_MASK << CB_ACTLR_BPRCNSH_SHIFT)
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR (CB_ATS1PR_ADDR_MASK << CB_ATS1PR_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR (CB_ATS1PW_ADDR_MASK << CB_ATS1PW_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR (CB_ATS1UR_ADDR_MASK << CB_ATS1UR_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR (CB_ATS1UW_ADDR_MASK << CB_ATS1UW_ADDR_SHIFT)
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE (CB_ATSR_ACTIVE_MASK << CB_ATSR_ACTIVE_SHIFT)
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID (CB_CONTEXTIDR_ASID_MASK << \
+ CB_CONTEXTIDR_ASID_SHIFT)
+#define CB_CONTEXTIDR_PROCID (CB_CONTEXTIDR_PROCID_MASK << \
+ CB_CONTEXTIDR_PROCID_SHIFT)
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR (CB_FAR_FADDR_MASK << CB_FAR_FADDR_SHIFT)
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF (CB_FSR_TF_MASK << CB_FSR_TF_SHIFT)
+#define CB_FSR_AFF (CB_FSR_AFF_MASK << CB_FSR_AFF_SHIFT)
+#define CB_FSR_PF (CB_FSR_PF_MASK << CB_FSR_PF_SHIFT)
+#define CB_FSR_EF (CB_FSR_EF_MASK << CB_FSR_EF_SHIFT)
+#define CB_FSR_TLBMCF (CB_FSR_TLBMCF_MASK << CB_FSR_TLBMCF_SHIFT)
+#define CB_FSR_TLBLKF (CB_FSR_TLBLKF_MASK << CB_FSR_TLBLKF_SHIFT)
+#define CB_FSR_SS (CB_FSR_SS_MASK << CB_FSR_SS_SHIFT)
+#define CB_FSR_MULTI (CB_FSR_MULTI_MASK << CB_FSR_MULTI_SHIFT)
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL (CB_FSYNR0_PLVL_MASK << CB_FSYNR0_PLVL_SHIFT)
+#define CB_FSYNR0_S1PTWF (CB_FSYNR0_S1PTWF_MASK << CB_FSYNR0_S1PTWF_SHIFT)
+#define CB_FSYNR0_WNR (CB_FSYNR0_WNR_MASK << CB_FSYNR0_WNR_SHIFT)
+#define CB_FSYNR0_PNU (CB_FSYNR0_PNU_MASK << CB_FSYNR0_PNU_SHIFT)
+#define CB_FSYNR0_IND (CB_FSYNR0_IND_MASK << CB_FSYNR0_IND_SHIFT)
+#define CB_FSYNR0_NSSTATE (CB_FSYNR0_NSSTATE_MASK << CB_FSYNR0_NSSTATE_SHIFT)
+#define CB_FSYNR0_NSATTR (CB_FSYNR0_NSATTR_MASK << CB_FSYNR0_NSATTR_SHIFT)
+#define CB_FSYNR0_ATOF (CB_FSYNR0_ATOF_MASK << CB_FSYNR0_ATOF_SHIFT)
+#define CB_FSYNR0_PTWF (CB_FSYNR0_PTWF_MASK << CB_FSYNR0_PTWF_SHIFT)
+#define CB_FSYNR0_AFR (CB_FSYNR0_AFR_MASK << CB_FSYNR0_AFR_SHIFT)
+#define CB_FSYNR0_S1CBNDX (CB_FSYNR0_S1CBNDX_MASK << CB_FSYNR0_S1CBNDX_SHIFT)
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0 (CB_NMRR_IR0_MASK << CB_NMRR_IR0_SHIFT)
+#define CB_NMRR_IR1 (CB_NMRR_IR1_MASK << CB_NMRR_IR1_SHIFT)
+#define CB_NMRR_IR2 (CB_NMRR_IR2_MASK << CB_NMRR_IR2_SHIFT)
+#define CB_NMRR_IR3 (CB_NMRR_IR3_MASK << CB_NMRR_IR3_SHIFT)
+#define CB_NMRR_IR4 (CB_NMRR_IR4_MASK << CB_NMRR_IR4_SHIFT)
+#define CB_NMRR_IR5 (CB_NMRR_IR5_MASK << CB_NMRR_IR5_SHIFT)
+#define CB_NMRR_IR6 (CB_NMRR_IR6_MASK << CB_NMRR_IR6_SHIFT)
+#define CB_NMRR_IR7 (CB_NMRR_IR7_MASK << CB_NMRR_IR7_SHIFT)
+#define CB_NMRR_OR0 (CB_NMRR_OR0_MASK << CB_NMRR_OR0_SHIFT)
+#define CB_NMRR_OR1 (CB_NMRR_OR1_MASK << CB_NMRR_OR1_SHIFT)
+#define CB_NMRR_OR2 (CB_NMRR_OR2_MASK << CB_NMRR_OR2_SHIFT)
+#define CB_NMRR_OR3 (CB_NMRR_OR3_MASK << CB_NMRR_OR3_SHIFT)
+#define CB_NMRR_OR4 (CB_NMRR_OR4_MASK << CB_NMRR_OR4_SHIFT)
+#define CB_NMRR_OR5 (CB_NMRR_OR5_MASK << CB_NMRR_OR5_SHIFT)
+#define CB_NMRR_OR6 (CB_NMRR_OR6_MASK << CB_NMRR_OR6_SHIFT)
+#define CB_NMRR_OR7 (CB_NMRR_OR7_MASK << CB_NMRR_OR7_SHIFT)
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F (CB_PAR_F_MASK << CB_PAR_F_SHIFT)
+#define CB_PAR_SS (CB_PAR_SS_MASK << CB_PAR_SS_SHIFT)
+#define CB_PAR_OUTER (CB_PAR_OUTER_MASK << CB_PAR_OUTER_SHIFT)
+#define CB_PAR_INNER (CB_PAR_INNER_MASK << CB_PAR_INNER_SHIFT)
+#define CB_PAR_SH (CB_PAR_SH_MASK << CB_PAR_SH_SHIFT)
+#define CB_PAR_NS (CB_PAR_NS_MASK << CB_PAR_NS_SHIFT)
+#define CB_PAR_NOS (CB_PAR_NOS_MASK << CB_PAR_NOS_SHIFT)
+#define CB_PAR_PA (CB_PAR_PA_MASK << CB_PAR_PA_SHIFT)
+#define CB_PAR_TF (CB_PAR_TF_MASK << CB_PAR_TF_SHIFT)
+#define CB_PAR_AFF (CB_PAR_AFF_MASK << CB_PAR_AFF_SHIFT)
+#define CB_PAR_PF (CB_PAR_PF_MASK << CB_PAR_PF_SHIFT)
+#define CB_PAR_EF (CB_PAR_EF_MASK << CB_PAR_EF_SHIFT)
+#define CB_PAR_TLBMCF (CB_PAR_TLBMCF_MASK << CB_PAR_TLBMCF_SHIFT)
+#define CB_PAR_TLBLKF (CB_PAR_TLBLKF_MASK << CB_PAR_TLBLKF_SHIFT)
+#define CB_PAR_ATOT (CB_PAR_ATOT_MASK << CB_PAR_ATOT_SHIFT)
+#define CB_PAR_PLVL (CB_PAR_PLVL_MASK << CB_PAR_PLVL_SHIFT)
+#define CB_PAR_STAGE (CB_PAR_STAGE_MASK << CB_PAR_STAGE_SHIFT)
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0 (CB_PRRR_TR0_MASK << CB_PRRR_TR0_SHIFT)
+#define CB_PRRR_TR1 (CB_PRRR_TR1_MASK << CB_PRRR_TR1_SHIFT)
+#define CB_PRRR_TR2 (CB_PRRR_TR2_MASK << CB_PRRR_TR2_SHIFT)
+#define CB_PRRR_TR3 (CB_PRRR_TR3_MASK << CB_PRRR_TR3_SHIFT)
+#define CB_PRRR_TR4 (CB_PRRR_TR4_MASK << CB_PRRR_TR4_SHIFT)
+#define CB_PRRR_TR5 (CB_PRRR_TR5_MASK << CB_PRRR_TR5_SHIFT)
+#define CB_PRRR_TR6 (CB_PRRR_TR6_MASK << CB_PRRR_TR6_SHIFT)
+#define CB_PRRR_TR7 (CB_PRRR_TR7_MASK << CB_PRRR_TR7_SHIFT)
+#define CB_PRRR_DS0 (CB_PRRR_DS0_MASK << CB_PRRR_DS0_SHIFT)
+#define CB_PRRR_DS1 (CB_PRRR_DS1_MASK << CB_PRRR_DS1_SHIFT)
+#define CB_PRRR_NS0 (CB_PRRR_NS0_MASK << CB_PRRR_NS0_SHIFT)
+#define CB_PRRR_NS1 (CB_PRRR_NS1_MASK << CB_PRRR_NS1_SHIFT)
+#define CB_PRRR_NOS0 (CB_PRRR_NOS0_MASK << CB_PRRR_NOS0_SHIFT)
+#define CB_PRRR_NOS1 (CB_PRRR_NOS1_MASK << CB_PRRR_NOS1_SHIFT)
+#define CB_PRRR_NOS2 (CB_PRRR_NOS2_MASK << CB_PRRR_NOS2_SHIFT)
+#define CB_PRRR_NOS3 (CB_PRRR_NOS3_MASK << CB_PRRR_NOS3_SHIFT)
+#define CB_PRRR_NOS4 (CB_PRRR_NOS4_MASK << CB_PRRR_NOS4_SHIFT)
+#define CB_PRRR_NOS5 (CB_PRRR_NOS5_MASK << CB_PRRR_NOS5_SHIFT)
+#define CB_PRRR_NOS6 (CB_PRRR_NOS6_MASK << CB_PRRR_NOS6_SHIFT)
+#define CB_PRRR_NOS7 (CB_PRRR_NOS7_MASK << CB_PRRR_NOS7_SHIFT)
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR (CB_RESUME_TNR_MASK << CB_RESUME_TNR_SHIFT)
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M (CB_SCTLR_M_MASK << CB_SCTLR_M_SHIFT)
+#define CB_SCTLR_TRE (CB_SCTLR_TRE_MASK << CB_SCTLR_TRE_SHIFT)
+#define CB_SCTLR_AFE (CB_SCTLR_AFE_MASK << CB_SCTLR_AFE_SHIFT)
+#define CB_SCTLR_AFFD (CB_SCTLR_AFFD_MASK << CB_SCTLR_AFFD_SHIFT)
+#define CB_SCTLR_E (CB_SCTLR_E_MASK << CB_SCTLR_E_SHIFT)
+#define CB_SCTLR_CFRE (CB_SCTLR_CFRE_MASK << CB_SCTLR_CFRE_SHIFT)
+#define CB_SCTLR_CFIE (CB_SCTLR_CFIE_MASK << CB_SCTLR_CFIE_SHIFT)
+#define CB_SCTLR_CFCFG (CB_SCTLR_CFCFG_MASK << CB_SCTLR_CFCFG_SHIFT)
+#define CB_SCTLR_HUPCF (CB_SCTLR_HUPCF_MASK << CB_SCTLR_HUPCF_SHIFT)
+#define CB_SCTLR_WXN (CB_SCTLR_WXN_MASK << CB_SCTLR_WXN_SHIFT)
+#define CB_SCTLR_UWXN (CB_SCTLR_UWXN_MASK << CB_SCTLR_UWXN_SHIFT)
+#define CB_SCTLR_ASIDPNE (CB_SCTLR_ASIDPNE_MASK << CB_SCTLR_ASIDPNE_SHIFT)
+#define CB_SCTLR_TRANSIENTCFG (CB_SCTLR_TRANSIENTCFG_MASK << \
+ CB_SCTLR_TRANSIENTCFG_SHIFT)
+#define CB_SCTLR_MEMATTR (CB_SCTLR_MEMATTR_MASK << CB_SCTLR_MEMATTR_SHIFT)
+#define CB_SCTLR_MTCFG (CB_SCTLR_MTCFG_MASK << CB_SCTLR_MTCFG_SHIFT)
+#define CB_SCTLR_SHCFG (CB_SCTLR_SHCFG_MASK << CB_SCTLR_SHCFG_SHIFT)
+#define CB_SCTLR_RACFG (CB_SCTLR_RACFG_MASK << CB_SCTLR_RACFG_SHIFT)
+#define CB_SCTLR_WACFG (CB_SCTLR_WACFG_MASK << CB_SCTLR_WACFG_SHIFT)
+#define CB_SCTLR_NSCFG (CB_SCTLR_NSCFG_MASK << CB_SCTLR_NSCFG_SHIFT)
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID (CB_TLBIASID_ASID_MASK << CB_TLBIASID_ASID_SHIFT)
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID (CB_TLBIVA_ASID_MASK << CB_TLBIVA_ASID_SHIFT)
+#define CB_TLBIVA_VA (CB_TLBIVA_VA_MASK << CB_TLBIVA_VA_SHIFT)
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA (CB_TLBIVAA_VA_MASK << CB_TLBIVAA_VA_SHIFT)
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA (CB_TLBIVAAL_VA_MASK << CB_TLBIVAAL_VA_SHIFT)
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID (CB_TLBIVAL_ASID_MASK << CB_TLBIVAL_ASID_SHIFT)
+#define CB_TLBIVAL_VA (CB_TLBIVAL_VA_MASK << CB_TLBIVAL_VA_SHIFT)
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE (CB_TLBSTATUS_SACTIVE_MASK << \
+ CB_TLBSTATUS_SACTIVE_SHIFT)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_EAE (CB_TTBCR_EAE_MASK << CB_TTBCR_EAE_SHIFT)
+
+#define CB_TTBR0_ADDR (CB_TTBR0_ADDR_MASK << CB_TTBR0_ADDR_SHIFT)
+
+#ifdef CONFIG_IOMMU_LPAE
+/* Translation Table Base Register: CB_TTBR */
+#define CB_TTBR0_ASID (CB_TTBR0_ASID_MASK << CB_TTBR0_ASID_SHIFT)
+#define CB_TTBR1_ASID (CB_TTBR1_ASID_MASK << CB_TTBR1_ASID_SHIFT)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ (CB_TTBCR_T0SZ_MASK << CB_TTBCR_T0SZ_SHIFT)
+#define CB_TTBCR_T1SZ (CB_TTBCR_T1SZ_MASK << CB_TTBCR_T1SZ_SHIFT)
+#define CB_TTBCR_EPD0 (CB_TTBCR_EPD0_MASK << CB_TTBCR_EPD0_SHIFT)
+#define CB_TTBCR_EPD1 (CB_TTBCR_EPD1_MASK << CB_TTBCR_EPD1_SHIFT)
+#define CB_TTBCR_IRGN0 (CB_TTBCR_IRGN0_MASK << CB_TTBCR_IRGN0_SHIFT)
+#define CB_TTBCR_IRGN1 (CB_TTBCR_IRGN1_MASK << CB_TTBCR_IRGN1_SHIFT)
+#define CB_TTBCR_ORGN0 (CB_TTBCR_ORGN0_MASK << CB_TTBCR_ORGN0_SHIFT)
+#define CB_TTBCR_ORGN1 (CB_TTBCR_ORGN1_MASK << CB_TTBCR_ORGN1_SHIFT)
+#define CB_TTBCR_NSCFG0 (CB_TTBCR_NSCFG0_MASK << CB_TTBCR_NSCFG0_SHIFT)
+#define CB_TTBCR_NSCFG1 (CB_TTBCR_NSCFG1_MASK << CB_TTBCR_NSCFG1_SHIFT)
+#define CB_TTBCR_SH0 (CB_TTBCR_SH0_MASK << CB_TTBCR_SH0_SHIFT)
+#define CB_TTBCR_SH1 (CB_TTBCR_SH1_MASK << CB_TTBCR_SH1_SHIFT)
+#define CB_TTBCR_A1 (CB_TTBCR_A1_MASK << CB_TTBCR_A1_SHIFT)
+
+#else
+
+/* Translation Table Base Register 0: CB_TTBR0 */
+#define CB_TTBR0_IRGN1 (CB_TTBR0_IRGN1_MASK << CB_TTBR0_IRGN1_SHIFT)
+#define CB_TTBR0_S (CB_TTBR0_S_MASK << CB_TTBR0_S_SHIFT)
+#define CB_TTBR0_RGN (CB_TTBR0_RGN_MASK << CB_TTBR0_RGN_SHIFT)
+#define CB_TTBR0_NOS (CB_TTBR0_NOS_MASK << CB_TTBR0_NOS_SHIFT)
+#define CB_TTBR0_IRGN0 (CB_TTBR0_IRGN0_MASK << CB_TTBR0_IRGN0_SHIFT)
+
+/* Translation Table Base Register 1: CB_TTBR1 */
+#define CB_TTBR1_IRGN1 (CB_TTBR1_IRGN1_MASK << CB_TTBR1_IRGN1_SHIFT)
+#define CB_TTBR1_S (CB_TTBR1_S_MASK << CB_TTBR1_S_SHIFT)
+#define CB_TTBR1_RGN (CB_TTBR1_RGN_MASK << CB_TTBR1_RGN_SHIFT)
+#define CB_TTBR1_NOS (CB_TTBR1_NOS_MASK << CB_TTBR1_NOS_SHIFT)
+#define CB_TTBR1_IRGN0 (CB_TTBR1_IRGN0_MASK << CB_TTBR1_IRGN0_SHIFT)
+#endif
+
+/* Global Register Masks */
+/* Configuration Register 0 */
+#define CR0_NSCFG_MASK 0x03
+#define CR0_WACFG_MASK 0x03
+#define CR0_RACFG_MASK 0x03
+#define CR0_SHCFG_MASK 0x03
+#define CR0_SMCFCFG_MASK 0x01
+#define NSCR0_SMCFCFG_MASK 0x01
+#define CR0_MTCFG_MASK 0x01
+#define CR0_MEMATTR_MASK 0x0F
+#define CR0_BSU_MASK 0x03
+#define CR0_FB_MASK 0x01
+#define CR0_PTM_MASK 0x01
+#define CR0_VMIDPNE_MASK 0x01
+#define CR0_USFCFG_MASK 0x01
+#define NSCR0_USFCFG_MASK 0x01
+#define CR0_GSE_MASK 0x01
+#define CR0_STALLD_MASK 0x01
+#define NSCR0_STALLD_MASK 0x01
+#define CR0_TRANSIENTCFG_MASK 0x03
+#define CR0_GCFGFIE_MASK 0x01
+#define NSCR0_GCFGFIE_MASK 0x01
+#define CR0_GCFGFRE_MASK 0x01
+#define NSCR0_GCFGFRE_MASK 0x01
+#define CR0_GFIE_MASK 0x01
+#define NSCR0_GFIE_MASK 0x01
+#define CR0_GFRE_MASK 0x01
+#define NSCR0_GFRE_MASK 0x01
+#define CR0_CLIENTPD_MASK 0x01
+#define NSCR0_CLIENTPD_MASK 0x01
+
+/* ACR */
+#define ACR_SMTNMC_BPTLBEN_MASK 0x01
+#define ACR_MMUDIS_BPTLBEN_MASK 0x01
+#define ACR_S2CR_BPTLBEN_MASK 0x01
+
+/* NSACR */
+#define NSACR_SMTNMC_BPTLBEN_MASK 0x01
+#define NSACR_MMUDIS_BPTLBEN_MASK 0x01
+#define NSACR_S2CR_BPTLBEN_MASK 0x01
+
+/* Configuration Register 2 */
+#define CR2_BPVMID_MASK 0xFF
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR_MASK 0xFFFFF
+#define GATS1PR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR_MASK 0xFFFFF
+#define GATS1PW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR_MASK 0xFFFFF
+#define GATS1UR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR_MASK 0xFFFFF
+#define GATS1UW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */
+#define GATS12PR_ADDR_MASK 0xFFFFF
+#define GATS12PR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */
+#define GATS12PW_ADDR_MASK 0xFFFFF
+#define GATS12PW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */
+#define GATS12UR_ADDR_MASK 0xFFFFF
+#define GATS12UR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */
+#define GATS12UW_ADDR_MASK 0xFFFFF
+#define GATS12UW_NDX_MASK 0xFF
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE_MASK 0x01
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR_MASK 0xFFFFFFFF
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF_MASK 0x01
+#define GFSR_USF_MASK 0x01
+#define GFSR_SMCF_MASK 0x01
+#define GFSR_UCBF_MASK 0x01
+#define GFSR_UCIF_MASK 0x01
+#define GFSR_CAF_MASK 0x01
+#define GFSR_EF_MASK 0x01
+#define GFSR_PF_MASK 0x01
+#define GFSR_MULTI_MASK 0x01
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED_MASK 0x01
+#define GFSYNR0_WNR_MASK 0x01
+#define GFSYNR0_PNU_MASK 0x01
+#define GFSYNR0_IND_MASK 0x01
+#define GFSYNR0_NSSTATE_MASK 0x01
+#define GFSYNR0_NSATTR_MASK 0x01
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID_MASK 0x7FFF
+#define GFSYNr1_SSD_IDX_MASK 0x7FFF
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F_MASK 0x01
+#define GPAR_SS_MASK 0x01
+#define GPAR_OUTER_MASK 0x03
+#define GPAR_INNER_MASK 0x03
+#define GPAR_SH_MASK 0x01
+#define GPAR_NS_MASK 0x01
+#define GPAR_NOS_MASK 0x01
+#define GPAR_PA_MASK 0xFFFFF
+#define GPAR_TF_MASK 0x01
+#define GPAR_AFF_MASK 0x01
+#define GPAR_PF_MASK 0x01
+#define GPAR_EF_MASK 0x01
+#define GPAR_TLBMCF_MASK 0x01
+#define GPAR_TLBLKF_MASK 0x01
+#define GPAR_UCBF_MASK 0x01
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG_MASK 0xFF
+#define IDR0_NUMSIDB_MASK 0x0F
+#define IDR0_BTM_MASK 0x01
+#define IDR0_CTTW_MASK 0x01
+#define IDR0_NUMIPRT_MASK 0xFF
+#define IDR0_PTFS_MASK 0x01
+#define IDR0_SMS_MASK 0x01
+#define IDR0_NTS_MASK 0x01
+#define IDR0_S2TS_MASK 0x01
+#define IDR0_S1TS_MASK 0x01
+#define IDR0_SES_MASK 0x01
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB_MASK 0xFF
+#define IDR1_NUMSSDNDXB_MASK 0x0F
+#define IDR1_SSDTP_MASK 0x01
+#define IDR1_SMCD_MASK 0x01
+#define IDR1_NUMS2CB_MASK 0xFF
+#define IDR1_NUMPAGENDXB_MASK 0x07
+#define IDR1_PAGESIZE_MASK 0x01
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS_MASK 0x0F
+#define IDR2_OAS_MASK 0x0F
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR_MASK 0x0F
+#define IDR7_MAJOR_MASK 0x0F
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX_MASK 0xFF
+#define S2CR_SHCFG_MASK 0x03
+#define S2CR_MTCFG_MASK 0x01
+#define S2CR_MEMATTR_MASK 0x0F
+#define S2CR_TYPE_MASK 0x03
+#define S2CR_NSCFG_MASK 0x03
+#define S2CR_RACFG_MASK 0x03
+#define S2CR_WACFG_MASK 0x03
+#define S2CR_PRIVCFG_MASK 0x03
+#define S2CR_INSTCFG_MASK 0x03
+#define S2CR_TRANSIENTCFG_MASK 0x03
+#define S2CR_VMID_MASK 0xFF
+#define S2CR_BSU_MASK 0x03
+#define S2CR_FB_MASK 0x01
+
+/* Stream Match Register: SMR */
+#define SMR_ID_MASK 0x7FFF
+#define SMR_MASK_MASK 0x7FFF
+#define SMR_VALID_MASK 0x01
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE_MASK 0x01
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR_MASK 0xFFFFF
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID_MASK 0xFF
+
+/* Global Register Space 1 Mask */
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID_MASK 0xFF
+#define CBAR_CBNDX_MASK 0x03
+#define CBAR_BPSHCFG_MASK 0x03
+#define CBAR_HYPC_MASK 0x01
+#define CBAR_FB_MASK 0x01
+#define CBAR_MEMATTR_MASK 0x0F
+#define CBAR_TYPE_MASK 0x03
+#define CBAR_BSU_MASK 0x03
+#define CBAR_RACFG_MASK 0x03
+#define CBAR_WACFG_MASK 0x03
+#define CBAR_IRPTNDX_MASK 0xFF
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID_MASK 0x7FFF
+
+/* Implementation defined register space masks */
+#define MICRO_MMU_CTRL_RESERVED_MASK 0x03
+#define MICRO_MMU_CTRL_HALT_REQ_MASK 0x01
+#define MICRO_MMU_CTRL_IDLE_MASK 0x01
+
+/* Stage 1 Context Bank Format Masks */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY_MASK 0x3
+#define CB_ACTLR_REQPRIORITYCFG_MASK 0x1
+#define CB_ACTLR_PRIVCFG_MASK 0x3
+#define CB_ACTLR_BPRCOSH_MASK 0x1
+#define CB_ACTLR_BPRCISH_MASK 0x1
+#define CB_ACTLR_BPRCNSH_MASK 0x1
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR_MASK 0xFFFFF
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE_MASK 0x01
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID_MASK 0xFF
+#define CB_CONTEXTIDR_PROCID_MASK 0xFFFFFF
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR_MASK 0xFFFFFFFF
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF_MASK 0x01
+#define CB_FSR_AFF_MASK 0x01
+#define CB_FSR_PF_MASK 0x01
+#define CB_FSR_EF_MASK 0x01
+#define CB_FSR_TLBMCF_MASK 0x01
+#define CB_FSR_TLBLKF_MASK 0x01
+#define CB_FSR_SS_MASK 0x01
+#define CB_FSR_MULTI_MASK 0x01
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL_MASK 0x03
+#define CB_FSYNR0_S1PTWF_MASK 0x01
+#define CB_FSYNR0_WNR_MASK 0x01
+#define CB_FSYNR0_PNU_MASK 0x01
+#define CB_FSYNR0_IND_MASK 0x01
+#define CB_FSYNR0_NSSTATE_MASK 0x01
+#define CB_FSYNR0_NSATTR_MASK 0x01
+#define CB_FSYNR0_ATOF_MASK 0x01
+#define CB_FSYNR0_PTWF_MASK 0x01
+#define CB_FSYNR0_AFR_MASK 0x01
+#define CB_FSYNR0_S1CBNDX_MASK 0xFF
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0_MASK 0x03
+#define CB_NMRR_IR1_MASK 0x03
+#define CB_NMRR_IR2_MASK 0x03
+#define CB_NMRR_IR3_MASK 0x03
+#define CB_NMRR_IR4_MASK 0x03
+#define CB_NMRR_IR5_MASK 0x03
+#define CB_NMRR_IR6_MASK 0x03
+#define CB_NMRR_IR7_MASK 0x03
+#define CB_NMRR_OR0_MASK 0x03
+#define CB_NMRR_OR1_MASK 0x03
+#define CB_NMRR_OR2_MASK 0x03
+#define CB_NMRR_OR3_MASK 0x03
+#define CB_NMRR_OR4_MASK 0x03
+#define CB_NMRR_OR5_MASK 0x03
+#define CB_NMRR_OR6_MASK 0x03
+#define CB_NMRR_OR7_MASK 0x03
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F_MASK 0x01
+#define CB_PAR_SS_MASK 0x01
+#define CB_PAR_OUTER_MASK 0x03
+#define CB_PAR_INNER_MASK 0x07
+#define CB_PAR_SH_MASK 0x01
+#define CB_PAR_NS_MASK 0x01
+#define CB_PAR_NOS_MASK 0x01
+#define CB_PAR_PA_MASK 0xFFFFF
+#define CB_PAR_TF_MASK 0x01
+#define CB_PAR_AFF_MASK 0x01
+#define CB_PAR_PF_MASK 0x01
+#define CB_PAR_EF_MASK 0x01
+#define CB_PAR_TLBMCF_MASK 0x01
+#define CB_PAR_TLBLKF_MASK 0x01
+#define CB_PAR_ATOT_MASK 0x01ULL
+#define CB_PAR_PLVL_MASK 0x03ULL
+#define CB_PAR_STAGE_MASK 0x01ULL
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0_MASK 0x03
+#define CB_PRRR_TR1_MASK 0x03
+#define CB_PRRR_TR2_MASK 0x03
+#define CB_PRRR_TR3_MASK 0x03
+#define CB_PRRR_TR4_MASK 0x03
+#define CB_PRRR_TR5_MASK 0x03
+#define CB_PRRR_TR6_MASK 0x03
+#define CB_PRRR_TR7_MASK 0x03
+#define CB_PRRR_DS0_MASK 0x01
+#define CB_PRRR_DS1_MASK 0x01
+#define CB_PRRR_NS0_MASK 0x01
+#define CB_PRRR_NS1_MASK 0x01
+#define CB_PRRR_NOS0_MASK 0x01
+#define CB_PRRR_NOS1_MASK 0x01
+#define CB_PRRR_NOS2_MASK 0x01
+#define CB_PRRR_NOS3_MASK 0x01
+#define CB_PRRR_NOS4_MASK 0x01
+#define CB_PRRR_NOS5_MASK 0x01
+#define CB_PRRR_NOS6_MASK 0x01
+#define CB_PRRR_NOS7_MASK 0x01
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR_MASK 0x01
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M_MASK 0x01
+#define CB_SCTLR_TRE_MASK 0x01
+#define CB_SCTLR_AFE_MASK 0x01
+#define CB_SCTLR_AFFD_MASK 0x01
+#define CB_SCTLR_E_MASK 0x01
+#define CB_SCTLR_CFRE_MASK 0x01
+#define CB_SCTLR_CFIE_MASK 0x01
+#define CB_SCTLR_CFCFG_MASK 0x01
+#define CB_SCTLR_HUPCF_MASK 0x01
+#define CB_SCTLR_WXN_MASK 0x01
+#define CB_SCTLR_UWXN_MASK 0x01
+#define CB_SCTLR_ASIDPNE_MASK 0x01
+#define CB_SCTLR_TRANSIENTCFG_MASK 0x03
+#define CB_SCTLR_MEMATTR_MASK 0x0F
+#define CB_SCTLR_MTCFG_MASK 0x01
+#define CB_SCTLR_SHCFG_MASK 0x03
+#define CB_SCTLR_RACFG_MASK 0x03
+#define CB_SCTLR_WACFG_MASK 0x03
+#define CB_SCTLR_NSCFG_MASK 0x03
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID_MASK 0xFF
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID_MASK 0xFF
+#define CB_TLBIVA_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID_MASK 0xFF
+#define CB_TLBIVAL_VA_MASK 0xFFFFF
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE_MASK 0x01
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ_MASK 0x07
+#define CB_TTBCR_T1SZ_MASK 0x07
+#define CB_TTBCR_EPD0_MASK 0x01
+#define CB_TTBCR_EPD1_MASK 0x01
+#define CB_TTBCR_IRGN0_MASK 0x03
+#define CB_TTBCR_IRGN1_MASK 0x03
+#define CB_TTBCR_ORGN0_MASK 0x03
+#define CB_TTBCR_ORGN1_MASK 0x03
+#define CB_TTBCR_NSCFG0_MASK 0x01
+#define CB_TTBCR_NSCFG1_MASK 0x01
+#define CB_TTBCR_SH0_MASK 0x03
+#define CB_TTBCR_SH1_MASK 0x03
+#define CB_TTBCR_A1_MASK 0x01
+#define CB_TTBCR_EAE_MASK 0x01
+
+/* Translation Table Base Register 0/1: CB_TTBR */
+#ifdef CONFIG_IOMMU_LPAE
+#define CB_TTBR0_ADDR_MASK 0x7FFFFFFFFULL
+#define CB_TTBR0_ASID_MASK 0xFF
+#define CB_TTBR1_ASID_MASK 0xFF
+#else
+#define CB_TTBR0_IRGN1_MASK 0x01
+#define CB_TTBR0_S_MASK 0x01
+#define CB_TTBR0_RGN_MASK 0x01
+#define CB_TTBR0_NOS_MASK 0x01
+#define CB_TTBR0_IRGN0_MASK 0x01
+#define CB_TTBR0_ADDR_MASK 0xFFFFFF
+
+#define CB_TTBR1_IRGN1_MASK 0x1
+#define CB_TTBR1_S_MASK 0x1
+#define CB_TTBR1_RGN_MASK 0x1
+#define CB_TTBR1_NOS_MASK 0X1
+#define CB_TTBR1_IRGN0_MASK 0X1
+#endif
+
+/* Global Register Shifts */
+/* Configuration Register: CR0 */
+#define CR0_NSCFG_SHIFT 28
+#define CR0_WACFG_SHIFT 26
+#define CR0_RACFG_SHIFT 24
+#define CR0_SHCFG_SHIFT 22
+#define CR0_SMCFCFG_SHIFT 21
+#define NSCR0_SMCFCFG_SHIFT 21
+#define CR0_MTCFG_SHIFT 20
+#define CR0_MEMATTR_SHIFT 16
+#define CR0_BSU_SHIFT 14
+#define CR0_FB_SHIFT 13
+#define CR0_PTM_SHIFT 12
+#define CR0_VMIDPNE_SHIFT 11
+#define CR0_USFCFG_SHIFT 10
+#define NSCR0_USFCFG_SHIFT 10
+#define CR0_GSE_SHIFT 9
+#define CR0_STALLD_SHIFT 8
+#define NSCR0_STALLD_SHIFT 8
+#define CR0_TRANSIENTCFG_SHIFT 6
+#define CR0_GCFGFIE_SHIFT 5
+#define NSCR0_GCFGFIE_SHIFT 5
+#define CR0_GCFGFRE_SHIFT 4
+#define NSCR0_GCFGFRE_SHIFT 4
+#define CR0_GFIE_SHIFT 2
+#define NSCR0_GFIE_SHIFT 2
+#define CR0_GFRE_SHIFT 1
+#define NSCR0_GFRE_SHIFT 1
+#define CR0_CLIENTPD_SHIFT 0
+#define NSCR0_CLIENTPD_SHIFT 0
+
+/* ACR */
+#define ACR_SMTNMC_BPTLBEN_SHIFT 8
+#define ACR_MMUDIS_BPTLBEN_SHIFT 9
+#define ACR_S2CR_BPTLBEN_SHIFT 10
+
+/* NSACR */
+#define NSACR_SMTNMC_BPTLBEN_SHIFT 8
+#define NSACR_MMUDIS_BPTLBEN_SHIFT 9
+#define NSACR_S2CR_BPTLBEN_SHIFT 10
+
+/* Configuration Register: CR2 */
+#define CR2_BPVMID_SHIFT 0
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR_SHIFT 12
+#define GATS1PR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR_SHIFT 12
+#define GATS1PW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR_SHIFT 12
+#define GATS1UR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR_SHIFT 12
+#define GATS1UW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */
+#define GATS12PR_ADDR_SHIFT 12
+#define GATS12PR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS12PW */
+#define GATS12PW_ADDR_SHIFT 12
+#define GATS12PW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS12UR */
+#define GATS12UR_ADDR_SHIFT 12
+#define GATS12UR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS12UW */
+#define GATS12UW_ADDR_SHIFT 12
+#define GATS12UW_NDX_SHIFT 0
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE_SHIFT 0
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR_SHIFT 0
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF_SHIFT 0
+#define GFSR_USF_SHIFT 1
+#define GFSR_SMCF_SHIFT 2
+#define GFSR_UCBF_SHIFT 3
+#define GFSR_UCIF_SHIFT 4
+#define GFSR_CAF_SHIFT 5
+#define GFSR_EF_SHIFT 6
+#define GFSR_PF_SHIFT 7
+#define GFSR_MULTI_SHIFT 31
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED_SHIFT 0
+#define GFSYNR0_WNR_SHIFT 1
+#define GFSYNR0_PNU_SHIFT 2
+#define GFSYNR0_IND_SHIFT 3
+#define GFSYNR0_NSSTATE_SHIFT 4
+#define GFSYNR0_NSATTR_SHIFT 5
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID_SHIFT 0
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F_SHIFT 0
+#define GPAR_SS_SHIFT 1
+#define GPAR_OUTER_SHIFT 2
+#define GPAR_INNER_SHIFT 4
+#define GPAR_SH_SHIFT 7
+#define GPAR_NS_SHIFT 9
+#define GPAR_NOS_SHIFT 10
+#define GPAR_PA_SHIFT 12
+#define GPAR_TF_SHIFT 1
+#define GPAR_AFF_SHIFT 2
+#define GPAR_PF_SHIFT 3
+#define GPAR_EF_SHIFT 4
+#define GPAR_TLCMCF_SHIFT 5
+#define GPAR_TLBLKF_SHIFT 6
+#define GFAR_UCBF_SHIFT 30
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG_SHIFT 0
+#define IDR0_NUMSIDB_SHIFT 9
+#define IDR0_BTM_SHIFT 13
+#define IDR0_CTTW_SHIFT 14
+#define IDR0_NUMIRPT_SHIFT 16
+#define IDR0_PTFS_SHIFT 24
+#define IDR0_SMS_SHIFT 27
+#define IDR0_NTS_SHIFT 28
+#define IDR0_S2TS_SHIFT 29
+#define IDR0_S1TS_SHIFT 30
+#define IDR0_SES_SHIFT 31
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB_SHIFT 0
+#define IDR1_NUMSSDNDXB_SHIFT 8
+#define IDR1_SSDTP_SHIFT 12
+#define IDR1_SMCD_SHIFT 15
+#define IDR1_NUMS2CB_SHIFT 16
+#define IDR1_NUMPAGENDXB_SHIFT 28
+#define IDR1_PAGESIZE_SHIFT 31
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS_SHIFT 0
+#define IDR2_OAS_SHIFT 4
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR_SHIFT 0
+#define IDR7_MAJOR_SHIFT 4
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX_SHIFT 0
+#define s2CR_SHCFG_SHIFT 8
+#define S2CR_MTCFG_SHIFT 11
+#define S2CR_MEMATTR_SHIFT 12
+#define S2CR_TYPE_SHIFT 16
+#define S2CR_NSCFG_SHIFT 18
+#define S2CR_RACFG_SHIFT 20
+#define S2CR_WACFG_SHIFT 22
+#define S2CR_PRIVCFG_SHIFT 24
+#define S2CR_INSTCFG_SHIFT 26
+#define S2CR_TRANSIENTCFG_SHIFT 28
+#define S2CR_VMID_SHIFT 0
+#define S2CR_BSU_SHIFT 24
+#define S2CR_FB_SHIFT 26
+
+/* Stream Match Register: SMR */
+#define SMR_ID_SHIFT 0
+#define SMR_MASK_SHIFT 16
+#define SMR_VALID_SHIFT 31
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE_SHIFT 0
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR_SHIFT 12
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID_SHIFT 0
+
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID_SHIFT 0
+#define CBAR_CBNDX_SHIFT 8
+#define CBAR_BPSHCFG_SHIFT 8
+#define CBAR_HYPC_SHIFT 10
+#define CBAR_FB_SHIFT 11
+#define CBAR_MEMATTR_SHIFT 12
+#define CBAR_TYPE_SHIFT 16
+#define CBAR_BSU_SHIFT 18
+#define CBAR_RACFG_SHIFT 20
+#define CBAR_WACFG_SHIFT 22
+#define CBAR_IRPTNDX_SHIFT 24
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID_SHIFT 0
+
+/* Implementation defined register space shift */
+#define MICRO_MMU_CTRL_RESERVED_SHIFT 0x00
+#define MICRO_MMU_CTRL_HALT_REQ_SHIFT 0x02
+#define MICRO_MMU_CTRL_IDLE_SHIFT 0x03
+
+/* Stage 1 Context Bank Format Shifts */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY_SHIFT 0
+#define CB_ACTLR_REQPRIORITYCFG_SHIFT 4
+#define CB_ACTLR_PRIVCFG_SHIFT 8
+#define CB_ACTLR_BPRCOSH_SHIFT 28
+#define CB_ACTLR_BPRCISH_SHIFT 29
+#define CB_ACTLR_BPRCNSH_SHIFT 30
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR_SHIFT 12
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE_SHIFT 0
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID_SHIFT 0
+#define CB_CONTEXTIDR_PROCID_SHIFT 8
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR_SHIFT 0
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF_SHIFT 1
+#define CB_FSR_AFF_SHIFT 2
+#define CB_FSR_PF_SHIFT 3
+#define CB_FSR_EF_SHIFT 4
+#define CB_FSR_TLBMCF_SHIFT 5
+#define CB_FSR_TLBLKF_SHIFT 6
+#define CB_FSR_SS_SHIFT 30
+#define CB_FSR_MULTI_SHIFT 31
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL_SHIFT 0
+#define CB_FSYNR0_S1PTWF_SHIFT 3
+#define CB_FSYNR0_WNR_SHIFT 4
+#define CB_FSYNR0_PNU_SHIFT 5
+#define CB_FSYNR0_IND_SHIFT 6
+#define CB_FSYNR0_NSSTATE_SHIFT 7
+#define CB_FSYNR0_NSATTR_SHIFT 8
+#define CB_FSYNR0_ATOF_SHIFT 9
+#define CB_FSYNR0_PTWF_SHIFT 10
+#define CB_FSYNR0_AFR_SHIFT 11
+#define CB_FSYNR0_S1CBNDX_SHIFT 16
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0_SHIFT 0
+#define CB_NMRR_IR1_SHIFT 2
+#define CB_NMRR_IR2_SHIFT 4
+#define CB_NMRR_IR3_SHIFT 6
+#define CB_NMRR_IR4_SHIFT 8
+#define CB_NMRR_IR5_SHIFT 10
+#define CB_NMRR_IR6_SHIFT 12
+#define CB_NMRR_IR7_SHIFT 14
+#define CB_NMRR_OR0_SHIFT 16
+#define CB_NMRR_OR1_SHIFT 18
+#define CB_NMRR_OR2_SHIFT 20
+#define CB_NMRR_OR3_SHIFT 22
+#define CB_NMRR_OR4_SHIFT 24
+#define CB_NMRR_OR5_SHIFT 26
+#define CB_NMRR_OR6_SHIFT 28
+#define CB_NMRR_OR7_SHIFT 30
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F_SHIFT 0
+#define CB_PAR_SS_SHIFT 1
+#define CB_PAR_OUTER_SHIFT 2
+#define CB_PAR_INNER_SHIFT 4
+#define CB_PAR_SH_SHIFT 7
+#define CB_PAR_NS_SHIFT 9
+#define CB_PAR_NOS_SHIFT 10
+#define CB_PAR_PA_SHIFT 12
+#define CB_PAR_TF_SHIFT 1
+#define CB_PAR_AFF_SHIFT 2
+#define CB_PAR_PF_SHIFT 3
+#define CB_PAR_EF_SHIFT 4
+#define CB_PAR_TLBMCF_SHIFT 5
+#define CB_PAR_TLBLKF_SHIFT 6
+#define CB_PAR_ATOT_SHIFT 31
+#define CB_PAR_PLVL_SHIFT 32
+#define CB_PAR_STAGE_SHIFT 35
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0_SHIFT 0
+#define CB_PRRR_TR1_SHIFT 2
+#define CB_PRRR_TR2_SHIFT 4
+#define CB_PRRR_TR3_SHIFT 6
+#define CB_PRRR_TR4_SHIFT 8
+#define CB_PRRR_TR5_SHIFT 10
+#define CB_PRRR_TR6_SHIFT 12
+#define CB_PRRR_TR7_SHIFT 14
+#define CB_PRRR_DS0_SHIFT 16
+#define CB_PRRR_DS1_SHIFT 17
+#define CB_PRRR_NS0_SHIFT 18
+#define CB_PRRR_NS1_SHIFT 19
+#define CB_PRRR_NOS0_SHIFT 24
+#define CB_PRRR_NOS1_SHIFT 25
+#define CB_PRRR_NOS2_SHIFT 26
+#define CB_PRRR_NOS3_SHIFT 27
+#define CB_PRRR_NOS4_SHIFT 28
+#define CB_PRRR_NOS5_SHIFT 29
+#define CB_PRRR_NOS6_SHIFT 30
+#define CB_PRRR_NOS7_SHIFT 31
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR_SHIFT 0
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M_SHIFT 0
+#define CB_SCTLR_TRE_SHIFT 1
+#define CB_SCTLR_AFE_SHIFT 2
+#define CB_SCTLR_AFFD_SHIFT 3
+#define CB_SCTLR_E_SHIFT 4
+#define CB_SCTLR_CFRE_SHIFT 5
+#define CB_SCTLR_CFIE_SHIFT 6
+#define CB_SCTLR_CFCFG_SHIFT 7
+#define CB_SCTLR_HUPCF_SHIFT 8
+#define CB_SCTLR_WXN_SHIFT 9
+#define CB_SCTLR_UWXN_SHIFT 10
+#define CB_SCTLR_ASIDPNE_SHIFT 12
+#define CB_SCTLR_TRANSIENTCFG_SHIFT 14
+#define CB_SCTLR_MEMATTR_SHIFT 16
+#define CB_SCTLR_MTCFG_SHIFT 20
+#define CB_SCTLR_SHCFG_SHIFT 22
+#define CB_SCTLR_RACFG_SHIFT 24
+#define CB_SCTLR_WACFG_SHIFT 26
+#define CB_SCTLR_NSCFG_SHIFT 28
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID_SHIFT 0
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID_SHIFT 0
+#define CB_TLBIVA_VA_SHIFT 12
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA_SHIFT 12
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA_SHIFT 12
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID_SHIFT 0
+#define CB_TLBIVAL_VA_SHIFT 12
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE_SHIFT 0
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ_SHIFT 0
+#define CB_TTBCR_T1SZ_SHIFT 16
+#define CB_TTBCR_EPD0_SHIFT 4
+#define CB_TTBCR_EPD1_SHIFT 5
+#define CB_TTBCR_NSCFG0_SHIFT 14
+#define CB_TTBCR_NSCFG1_SHIFT 30
+#define CB_TTBCR_EAE_SHIFT 31
+#define CB_TTBCR_IRGN0_SHIFT 8
+#define CB_TTBCR_IRGN1_SHIFT 24
+#define CB_TTBCR_ORGN0_SHIFT 10
+#define CB_TTBCR_ORGN1_SHIFT 26
+#define CB_TTBCR_A1_SHIFT 22
+#define CB_TTBCR_SH0_SHIFT 12
+#define CB_TTBCR_SH1_SHIFT 28
+
+/* Translation Table Base Register 0/1: CB_TTBR */
+#ifdef CONFIG_IOMMU_LPAE
+#define CB_TTBR0_ADDR_SHIFT 5
+#define CB_TTBR0_ASID_SHIFT 48
+#define CB_TTBR1_ASID_SHIFT 48
+#else
+#define CB_TTBR0_IRGN1_SHIFT 0
+#define CB_TTBR0_S_SHIFT 1
+#define CB_TTBR0_RGN_SHIFT 3
+#define CB_TTBR0_NOS_SHIFT 5
+#define CB_TTBR0_IRGN0_SHIFT 6
+#define CB_TTBR0_ADDR_SHIFT 14
+
+#define CB_TTBR1_IRGN1_SHIFT 0
+#define CB_TTBR1_S_SHIFT 1
+#define CB_TTBR1_RGN_SHIFT 3
+#define CB_TTBR1_NOS_SHIFT 5
+#define CB_TTBR1_IRGN0_SHIFT 6
+#define CB_TTBR1_ADDR_SHIFT 14
+#endif
+
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_pagetable.c b/drivers/iommu/qcom/msm_iommu_pagetable.c
new file mode 100644
index 000000000000..1f11abb9db7b
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_pagetable.c
@@ -0,0 +1,645 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cacheflush.h>
+
+#include <linux/qcom_iommu.h>
+#include "msm_iommu_priv.h"
+#include <trace/events/kmem.h>
+#include "msm_iommu_pagetable.h"
+
+#define NUM_FL_PTE 4096
+#define NUM_SL_PTE 256
+#define GUARD_PTE 2
+#define NUM_TEX_CLASS 8
+
+/* First-level page table bits */
+#define FL_BASE_MASK 0xFFFFFC00
+#define FL_TYPE_TABLE (1 << 0)
+#define FL_TYPE_SECT (2 << 0)
+#define FL_SUPERSECTION (1 << 18)
+#define FL_AP0 (1 << 10)
+#define FL_AP1 (1 << 11)
+#define FL_AP2 (1 << 15)
+#define FL_SHARED (1 << 16)
+#define FL_BUFFERABLE (1 << 2)
+#define FL_CACHEABLE (1 << 3)
+#define FL_TEX0 (1 << 12)
+#define FL_OFFSET(va) (((va) & 0xFFF00000) >> 20)
+#define FL_NG (1 << 17)
+
+/* Second-level page table bits */
+#define SL_BASE_MASK_LARGE 0xFFFF0000
+#define SL_BASE_MASK_SMALL 0xFFFFF000
+#define SL_TYPE_LARGE (1 << 0)
+#define SL_TYPE_SMALL (2 << 0)
+#define SL_AP0 (1 << 4)
+#define SL_AP1 (2 << 4)
+#define SL_AP2 (1 << 9)
+#define SL_SHARED (1 << 10)
+#define SL_BUFFERABLE (1 << 2)
+#define SL_CACHEABLE (1 << 3)
+#define SL_TEX0 (1 << 6)
+#define SL_OFFSET(va) (((va) & 0xFF000) >> 12)
+#define SL_NG (1 << 11)
+
+/* Memory type and cache policy attributes */
+#define MT_SO 0
+#define MT_DEV 1
+#define MT_IOMMU_NORMAL 2
+#define CP_NONCACHED 0
+#define CP_WB_WA 1
+#define CP_WT 2
+#define CP_WB_NWA 3
+
+/* Sharability attributes of MSM IOMMU mappings */
+#define MSM_IOMMU_ATTR_NON_SH 0x0
+#define MSM_IOMMU_ATTR_SH 0x4
+
+/* Cacheability attributes of MSM IOMMU mappings */
+#define MSM_IOMMU_ATTR_NONCACHED 0x0
+#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
+#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
+#define MSM_IOMMU_ATTR_CACHED_WT 0x3
+
+static int msm_iommu_tex_class[4];
+
+/* TEX Remap Registers */
+#define NMRR_ICP(nmrr, n) (((nmrr) & (3 << ((n) * 2))) >> ((n) * 2))
+#define NMRR_OCP(nmrr, n) (((nmrr) & (3 << ((n) * 2 + 16))) >> ((n) * 2 + 16))
+
+#define PRRR_NOS(prrr, n) ((prrr) & (1 << ((n) + 24)) ? 1 : 0)
+#define PRRR_MT(prrr, n) ((((prrr) & (3 << ((n) * 2))) >> ((n) * 2)))
+
+static inline void clean_pte(u32 *start, u32 *end, int redirect)
+{
+ if (!redirect)
+ dmac_flush_range(start, end);
+}
+
+int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt)
+{
+ pt->fl_table = (u32 *)__get_free_pages(GFP_KERNEL, get_order(SZ_16K));
+ if (!pt->fl_table)
+ return -ENOMEM;
+
+ pt->fl_table_shadow = (u32 *)__get_free_pages(GFP_KERNEL,
+ get_order(SZ_16K));
+ if (!pt->fl_table_shadow) {
+ free_pages((unsigned long)pt->fl_table, get_order(SZ_16K));
+ return -ENOMEM;
+ }
+
+ memset(pt->fl_table, 0, SZ_16K);
+ memset(pt->fl_table_shadow, 0, SZ_16K);
+ clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect);
+
+ return 0;
+}
+
+void msm_iommu_pagetable_free(struct msm_iommu_pt *pt)
+{
+ u32 *fl_table;
+ u32 *fl_table_shadow;
+ int i;
+
+ fl_table = pt->fl_table;
+ fl_table_shadow = pt->fl_table_shadow;
+ for (i = 0; i < NUM_FL_PTE; i++)
+ if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
+ free_page((unsigned long) __va(((fl_table[i]) &
+ FL_BASE_MASK)));
+ free_pages((unsigned long)fl_table, get_order(SZ_16K));
+ pt->fl_table = 0;
+
+ free_pages((unsigned long)fl_table_shadow, get_order(SZ_16K));
+ pt->fl_table_shadow = 0;
+}
+
+void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len)
+{
+ /*
+ * Adding 2 for worst case. We could be spanning 3 second level pages
+ * if we unmapped just over 1MB.
+ */
+ u32 n_entries = len / SZ_1M + 2;
+ u32 fl_offset = FL_OFFSET(va);
+ u32 i;
+
+ for (i = 0; i < n_entries && fl_offset < NUM_FL_PTE; ++i) {
+ u32 *fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+ void *sl_table_va = __va(((*fl_pte_shadow) & ~0x1FF));
+ u32 sl_table = *fl_pte_shadow;
+
+ if (sl_table && !(sl_table & 0x1FF)) {
+ free_pages((unsigned long) sl_table_va,
+ get_order(SZ_4K));
+ *fl_pte_shadow = 0;
+ }
+ ++fl_offset;
+ }
+}
+
+static int __get_pgprot(int prot, int len)
+{
+ unsigned int pgprot;
+ int tex;
+
+ if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
+ prot |= IOMMU_READ | IOMMU_WRITE;
+ WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
+ }
+
+ if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
+ prot |= IOMMU_READ;
+ WARN_ONCE(1, "Write-only unsupported; falling back to RW\n");
+ }
+
+ if (prot & IOMMU_CACHE)
+ tex = (pgprot_val(PAGE_KERNEL) >> 2) & 0x07;
+ else
+ tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
+
+ if (tex < 0 || tex > NUM_TEX_CLASS - 1)
+ return 0;
+
+ if (len == SZ_16M || len == SZ_1M) {
+ pgprot = FL_SHARED;
+ pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
+ pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
+ pgprot |= tex & 0x04 ? FL_TEX0 : 0;
+ pgprot |= prot & IOMMU_PRIV ? FL_AP0 :
+ (FL_AP0 | FL_AP1);
+ pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
+ } else {
+ pgprot = SL_SHARED;
+ pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
+ pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
+ pgprot |= tex & 0x04 ? SL_TEX0 : 0;
+ pgprot |= prot & IOMMU_PRIV ? SL_AP0 :
+ (SL_AP0 | SL_AP1);
+ pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
+ }
+
+ return pgprot;
+}
+
+static u32 *make_second_level(struct msm_iommu_pt *pt, u32 *fl_pte,
+ u32 *fl_pte_shadow)
+{
+ u32 *sl;
+ sl = (u32 *) __get_free_pages(GFP_KERNEL,
+ get_order(SZ_4K));
+
+ if (!sl) {
+ pr_debug("Could not allocate second level table\n");
+ goto fail;
+ }
+ memset(sl, 0, SZ_4K);
+ clean_pte(sl, sl + NUM_SL_PTE + GUARD_PTE, pt->redirect);
+
+ *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
+ FL_TYPE_TABLE);
+ *fl_pte_shadow = *fl_pte & ~0x1FF;
+
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+fail:
+ return sl;
+}
+
+static int sl_4k(u32 *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+ int ret = 0;
+
+ if (*sl_pte) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
+ | SL_TYPE_SMALL | pgprot;
+fail:
+ return ret;
+}
+
+static int sl_64k(u32 *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+ int ret = 0;
+
+ int i;
+
+ for (i = 0; i < 16; i++)
+ if (*(sl_pte+i)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ for (i = 0; i < 16; i++)
+ *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
+ | SL_SHARED | SL_TYPE_LARGE | pgprot;
+
+fail:
+ return ret;
+}
+
+static inline int fl_1m(u32 *fl_pte, phys_addr_t pa, int pgprot)
+{
+ if (*fl_pte)
+ return -EBUSY;
+
+ *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
+ | pgprot;
+
+ return 0;
+}
+
+static inline int fl_16m(u32 *fl_pte, phys_addr_t pa, int pgprot)
+{
+ int i;
+ int ret = 0;
+ for (i = 0; i < 16; i++)
+ if (*(fl_pte+i)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+ for (i = 0; i < 16; i++)
+ *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
+ | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
+fail:
+ return ret;
+}
+
+int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ int ret;
+ struct scatterlist sg;
+
+ if (len != SZ_16M && len != SZ_1M &&
+ len != SZ_64K && len != SZ_4K) {
+ pr_debug("Bad size: %zd\n", len);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = pa;
+ sg.length = len;
+
+ ret = msm_iommu_pagetable_map_range(pt, va, &sg, len, prot);
+
+fail:
+ return ret;
+}
+
+size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len)
+{
+ msm_iommu_pagetable_unmap_range(pt, va, len);
+ return len;
+}
+
+static phys_addr_t get_phys_addr(struct scatterlist *sg)
+{
+ /*
+ * Try sg_dma_address first so that we can
+ * map carveout regions that do not have a
+ * struct page associated with them.
+ */
+ phys_addr_t pa = sg_dma_address(sg);
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
+}
+
+/*
+ * For debugging we may want to force mappings to be 4K only
+ */
+#ifdef CONFIG_IOMMU_FORCE_4K_MAPPINGS
+static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
+ int align)
+{
+ if (align == SZ_4K) {
+ return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
+ && (len >= align);
+ } else {
+ return 0;
+ }
+}
+#else
+static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
+ int align)
+{
+ return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
+ && (len >= align);
+}
+#endif
+
+int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va,
+ struct scatterlist *sg, unsigned int len, int prot)
+{
+ phys_addr_t pa;
+ unsigned int start_va = va;
+ unsigned int offset = 0;
+ u32 *fl_pte;
+ u32 *fl_pte_shadow;
+ u32 fl_offset;
+ u32 *sl_table = NULL;
+ u32 sl_offset, sl_start;
+ unsigned int chunk_size, chunk_offset = 0;
+ int ret = 0;
+ unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
+
+ BUG_ON(len & (SZ_4K - 1));
+
+ pgprot4k = __get_pgprot(prot, SZ_4K);
+ pgprot64k = __get_pgprot(prot, SZ_64K);
+ pgprot1m = __get_pgprot(prot, SZ_1M);
+ pgprot16m = __get_pgprot(prot, SZ_16M);
+ if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ fl_offset = FL_OFFSET(va); /* Upper 12 bits */
+ fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
+ fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+ pa = get_phys_addr(sg);
+
+ while (offset < len) {
+ chunk_size = SZ_4K;
+
+ if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_16M))
+ chunk_size = SZ_16M;
+ else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_1M))
+ chunk_size = SZ_1M;
+ /* 64k or 4k determined later */
+
+// trace_iommu_map_range(va, pa, sg->length, chunk_size);
+
+ /* for 1M and 16M, only first level entries are required */
+ if (chunk_size >= SZ_1M) {
+ if (chunk_size == SZ_16M) {
+ ret = fl_16m(fl_pte, pa, pgprot16m);
+ if (ret)
+ goto fail;
+ clean_pte(fl_pte, fl_pte + 16, pt->redirect);
+ fl_pte += 16;
+ fl_pte_shadow += 16;
+ } else if (chunk_size == SZ_1M) {
+ ret = fl_1m(fl_pte, pa, pgprot1m);
+ if (ret)
+ goto fail;
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ fl_pte++;
+ fl_pte_shadow++;
+ }
+
+ offset += chunk_size;
+ chunk_offset += chunk_size;
+ va += chunk_size;
+ pa += chunk_size;
+
+ if (chunk_offset >= sg->length && offset < len) {
+ chunk_offset = 0;
+ sg = sg_next(sg);
+ pa = get_phys_addr(sg);
+ }
+ continue;
+ }
+ /* for 4K or 64K, make sure there is a second level table */
+ if (*fl_pte == 0) {
+ if (!make_second_level(pt, fl_pte, fl_pte_shadow)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+ if (!(*fl_pte & FL_TYPE_TABLE)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_offset = SL_OFFSET(va);
+ /* Keep track of initial position so we
+ * don't clean more than we have to
+ */
+ sl_start = sl_offset;
+
+ /* Build the 2nd level page table */
+ while (offset < len && sl_offset < NUM_SL_PTE) {
+ /* Map a large 64K page if the chunk is large enough and
+ * the pa and va are aligned
+ */
+
+ if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_64K))
+ chunk_size = SZ_64K;
+ else
+ chunk_size = SZ_4K;
+
+// trace_iommu_map_range(va, pa, sg->length,
+// chunk_size);
+
+ if (chunk_size == SZ_4K) {
+ sl_4k(&sl_table[sl_offset], pa, pgprot4k);
+ sl_offset++;
+ /* Increment map count */
+ (*fl_pte_shadow)++;
+ } else {
+ BUG_ON(sl_offset + 16 > NUM_SL_PTE);
+ sl_64k(&sl_table[sl_offset], pa, pgprot64k);
+ sl_offset += 16;
+ /* Increment map count */
+ *fl_pte_shadow += 16;
+ }
+
+ offset += chunk_size;
+ chunk_offset += chunk_size;
+ va += chunk_size;
+ pa += chunk_size;
+
+ if (chunk_offset >= sg->length && offset < len) {
+ chunk_offset = 0;
+ sg = sg_next(sg);
+ pa = get_phys_addr(sg);
+ }
+ }
+
+ clean_pte(sl_table + sl_start, sl_table + sl_offset,
+ pt->redirect);
+ fl_pte++;
+ fl_pte_shadow++;
+ sl_offset = 0;
+ }
+
+fail:
+ if (ret && offset > 0)
+ msm_iommu_pagetable_unmap_range(pt, start_va, offset);
+
+ return ret;
+}
+
+void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va,
+ unsigned int len)
+{
+ unsigned int offset = 0;
+ u32 *fl_pte;
+ u32 *fl_pte_shadow;
+ u32 fl_offset;
+ u32 *sl_table;
+ u32 sl_start, sl_end;
+ int used;
+
+ BUG_ON(len & (SZ_4K - 1));
+
+ fl_offset = FL_OFFSET(va); /* Upper 12 bits */
+ fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
+ fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+
+ while (offset < len) {
+ if (*fl_pte & FL_TYPE_TABLE) {
+ unsigned int n_entries;
+
+ sl_start = SL_OFFSET(va);
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_end = ((len - offset) / SZ_4K) + sl_start;
+
+ if (sl_end > NUM_SL_PTE)
+ sl_end = NUM_SL_PTE;
+ n_entries = sl_end - sl_start;
+
+ memset(sl_table + sl_start, 0, n_entries * 4);
+ clean_pte(sl_table + sl_start, sl_table + sl_end,
+ pt->redirect);
+
+ offset += n_entries * SZ_4K;
+ va += n_entries * SZ_4K;
+
+ BUG_ON((*fl_pte_shadow & 0x1FF) < n_entries);
+
+ /* Decrement map count */
+ *fl_pte_shadow -= n_entries;
+ used = *fl_pte_shadow & 0x1FF;
+
+ if (!used) {
+ *fl_pte = 0;
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ }
+
+ sl_start = 0;
+ } else {
+ *fl_pte = 0;
+ *fl_pte_shadow = 0;
+
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ va += SZ_1M;
+ offset += SZ_1M;
+ sl_start = 0;
+ }
+ fl_pte++;
+ fl_pte_shadow++;
+ }
+}
+
+phys_addr_t msm_iommu_iova_to_phys_soft(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_pt *pt = &priv->pt;
+ u32 *fl_pte;
+ u32 fl_offset;
+ u32 *sl_table = NULL;
+ u32 sl_offset;
+ u32 *sl_pte;
+
+ if (!pt->fl_table) {
+ pr_err("Page table doesn't exist\n");
+ return 0;
+ }
+
+ fl_offset = FL_OFFSET(va);
+ fl_pte = pt->fl_table + fl_offset;
+
+ if (*fl_pte & FL_TYPE_TABLE) {
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_offset = SL_OFFSET(va);
+ sl_pte = sl_table + sl_offset;
+ /* 64 KB section */
+ if (*sl_pte & SL_TYPE_LARGE)
+ return (*sl_pte & 0xFFFF0000) | (va & ~0xFFFF0000);
+ /* 4 KB section */
+ if (*sl_pte & SL_TYPE_SMALL)
+ return (*sl_pte & 0xFFFFF000) | (va & ~0xFFFFF000);
+ } else {
+ /* 16 MB section */
+ if (*fl_pte & FL_SUPERSECTION)
+ return (*fl_pte & 0xFF000000) | (va & ~0xFF000000);
+ /* 1 MB section */
+ if (*fl_pte & FL_TYPE_SECT)
+ return (*fl_pte & 0xFFF00000) | (va & ~0xFFF00000);
+ }
+ return 0;
+}
+
+static int __init get_tex_class(int icp, int ocp, int mt, int nos)
+{
+ int i = 0;
+ unsigned int prrr;
+ unsigned int nmrr;
+ int c_icp, c_ocp, c_mt, c_nos;
+
+ prrr = msm_iommu_get_prrr();
+ nmrr = msm_iommu_get_nmrr();
+
+ for (i = 0; i < NUM_TEX_CLASS; i++) {
+ c_nos = PRRR_NOS(prrr, i);
+ c_mt = PRRR_MT(prrr, i);
+ c_icp = NMRR_ICP(nmrr, i);
+ c_ocp = NMRR_OCP(nmrr, i);
+
+ if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static void __init setup_iommu_tex_classes(void)
+{
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
+ get_tex_class(CP_NONCACHED, CP_NONCACHED,
+ MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
+ get_tex_class(CP_WB_WA, CP_WB_WA, MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
+ get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
+ get_tex_class(CP_WT, CP_WT, MT_IOMMU_NORMAL, 1);
+}
+
+void __init msm_iommu_pagetable_init(void)
+{
+ setup_iommu_tex_classes();
+}
diff --git a/drivers/iommu/qcom/msm_iommu_pagetable.h b/drivers/iommu/qcom/msm_iommu_pagetable.h
new file mode 100644
index 000000000000..12a8d274f95e
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_pagetable.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_IOMMU_PAGETABLE_H
+#define __ARCH_ARM_MACH_MSM_IOMMU_PAGETABLE_H
+
+struct msm_iommu_pt;
+
+void msm_iommu_pagetable_init(void);
+int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt);
+void msm_iommu_pagetable_free(struct msm_iommu_pt *pt);
+int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
+ phys_addr_t pa, size_t len, int prot);
+size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len);
+int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va,
+ struct scatterlist *sg, unsigned int len, int prot);
+void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va,
+ unsigned int len);
+phys_addr_t msm_iommu_iova_to_phys_soft(struct iommu_domain *domain,
+ phys_addr_t va);
+void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len);
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_perfmon.h b/drivers/iommu/qcom/msm_iommu_perfmon.h
new file mode 100644
index 000000000000..45683f4ebd88
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_perfmon.h
@@ -0,0 +1,233 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/irqreturn.h>
+
+#ifndef MSM_IOMMU_PERFMON_H
+#define MSM_IOMMU_PERFMON_H
+
+/**
+ * struct iommu_pmon_counter - container for a performance counter.
+ * @counter_no: counter number within the group
+ * @absolute_counter_no: counter number within IOMMU PMU
+ * @value: cached counter value
+ * @overflow_count: no of times counter has overflowed
+ * @enabled: indicates whether counter is enabled or not
+ * @current_event_class: current selected event class, -1 if none
+ * @counter_dir: debugfs directory for this counter
+ * @cnt_group: group this counter belongs to
+ */
+struct iommu_pmon_counter {
+ unsigned int counter_no;
+ unsigned int absolute_counter_no;
+ unsigned long value;
+ unsigned long overflow_count;
+ unsigned int enabled;
+ int current_event_class;
+ struct dentry *counter_dir;
+ struct iommu_pmon_cnt_group *cnt_group;
+};
+
+/**
+ * struct iommu_pmon_cnt_group - container for a perf mon counter group.
+ * @grp_no: group number
+ * @num_counters: number of counters in this group
+ * @counters: list of counter in this group
+ * @group_dir: debugfs directory for this group
+ * @pmon: pointer to the iommu_pmon object this group belongs to
+ */
+struct iommu_pmon_cnt_group {
+ unsigned int grp_no;
+ unsigned int num_counters;
+ struct iommu_pmon_counter *counters;
+ struct dentry *group_dir;
+ struct iommu_pmon *pmon;
+};
+
+/**
+ * struct iommu_info - container for a perf mon iommu info.
+ * @iommu_name: name of the iommu from device tree
+ * @base: virtual base address for this iommu
+ * @evt_irq: irq number for event overflow interrupt
+ * @iommu_dev: pointer to iommu device
+ * @ops: iommu access operations pointer.
+ * @hw_ops: iommu pm hw access operations pointer.
+ * @always_on: 1 if iommu is always on, 0 otherwise.
+ */
+struct iommu_info {
+ const char *iommu_name;
+ void *base;
+ int evt_irq;
+ struct device *iommu_dev;
+ struct iommu_access_ops *ops;
+ struct iommu_pm_hw_ops *hw_ops;
+ unsigned int always_on;
+};
+
+/**
+ * struct iommu_pmon - main container for a perf mon data.
+ * @iommu_dir: debugfs directory for this iommu
+ * @iommu: iommu_info instance
+ * @iommu_list: iommu_list head
+ * @cnt_grp: list of counter groups
+ * @num_groups: number of counter groups
+ * @num_counters: number of counters per group
+ * @event_cls_supported: an array of event classes supported for this PMU
+ * @nevent_cls_supported: number of event classes supported.
+ * @enabled: Indicates whether perf. mon is enabled or not
+ * @iommu_attached Indicates whether iommu is attached or not.
+ * @lock: mutex used to synchronize access to shared data
+ */
+struct iommu_pmon {
+ struct dentry *iommu_dir;
+ struct iommu_info iommu;
+ struct list_head iommu_list;
+ struct iommu_pmon_cnt_group *cnt_grp;
+ u32 num_groups;
+ u32 num_counters;
+ u32 *event_cls_supported;
+ u32 nevent_cls_supported;
+ unsigned int enabled;
+ unsigned int iommu_attach_count;
+ struct mutex lock;
+};
+
+/**
+ * struct iommu_hw_ops - Callbacks for accessing IOMMU HW
+ * @initialize_hw: Call to do any initialization before enabling ovf interrupts
+ * @is_hw_access_ok: Returns 1 if we can access HW, 0 otherwise
+ * @grp_enable: Call to enable a counter group
+ * @grp_disable: Call to disable a counter group
+ * @enable_pm: Call to enable PM
+ * @disable_pm: Call to disable PM
+ * @reset_counters: Call to reset counters
+ * @check_for_overflow: Call to check for overflow
+ * @evt_ovfl_int_handler: Overflow interrupt handler callback
+ * @counter_enable: Call to enable counters
+ * @counter_disable: Call to disable counters
+ * @ovfl_int_enable: Call to enable overflow interrupts
+ * @ovfl_int_disable: Call to disable overflow interrupts
+ * @set_event_class: Call to set event class
+ * @read_counter: Call to read a counter value
+ */
+struct iommu_pm_hw_ops {
+ void (*initialize_hw)(const struct iommu_pmon *);
+ unsigned int (*is_hw_access_OK)(const struct iommu_pmon *);
+ void (*grp_enable)(struct iommu_info *, unsigned int);
+ void (*grp_disable)(struct iommu_info *, unsigned int);
+ void (*enable_pm)(struct iommu_info *);
+ void (*disable_pm)(struct iommu_info *);
+ void (*reset_counters)(const struct iommu_info *);
+ void (*check_for_overflow)(struct iommu_pmon *);
+ irqreturn_t (*evt_ovfl_int_handler)(int, void *);
+ void (*counter_enable)(struct iommu_info *,
+ struct iommu_pmon_counter *);
+ void (*counter_disable)(struct iommu_info *,
+ struct iommu_pmon_counter *);
+ void (*ovfl_int_enable)(struct iommu_info *,
+ const struct iommu_pmon_counter *);
+ void (*ovfl_int_disable)(struct iommu_info *,
+ const struct iommu_pmon_counter *);
+ void (*set_event_class)(struct iommu_pmon *pmon, unsigned int,
+ unsigned int);
+ unsigned int (*read_counter)(struct iommu_pmon_counter *);
+};
+
+#define MSM_IOMMU_PMU_NO_EVENT_CLASS -1
+
+#ifdef CONFIG_MSM_IOMMU_PMON
+
+/**
+ * Get pointer to PMU hardware access functions for IOMMUv0 PMU
+ */
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void);
+
+/**
+ * Get pointer to PMU hardware access functions for IOMMUv1 PMU
+ */
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void);
+
+/**
+ * Allocate memory for performance monitor structure. Must
+ * be called before iommu_pm_iommu_register
+ */
+struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev);
+
+/**
+ * Free memory previously allocated with iommu_pm_alloc
+ */
+void msm_iommu_pm_free(struct device *iommu_dev);
+
+/**
+ * Register iommu with the performance monitor module.
+ */
+int msm_iommu_pm_iommu_register(struct iommu_pmon *info);
+
+/**
+ * Unregister iommu with the performance monitor module.
+ */
+void msm_iommu_pm_iommu_unregister(struct device *dev);
+
+/**
+ * Called by iommu driver when attaching is complete
+ * Must NOT be called with IOMMU mutexes held.
+ * @param iommu_dev IOMMU device that is attached
+ */
+void msm_iommu_attached(struct device *dev);
+
+/**
+ * Called by iommu driver before detaching.
+ * Must NOT be called with IOMMU mutexes held.
+ * @param iommu_dev IOMMU device that is going to be detached
+ */
+void msm_iommu_detached(struct device *dev);
+#else
+static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void)
+{
+ return NULL;
+}
+
+static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void)
+{
+ return NULL;
+}
+
+static inline struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev)
+{
+ return NULL;
+}
+
+static inline void msm_iommu_pm_free(struct device *iommu_dev)
+{
+ return;
+}
+
+static inline int msm_iommu_pm_iommu_register(struct iommu_pmon *info)
+{
+ return -EIO;
+}
+
+static inline void msm_iommu_pm_iommu_unregister(struct device *dev)
+{
+}
+
+static inline void msm_iommu_attached(struct device *dev)
+{
+}
+
+static inline void msm_iommu_detached(struct device *dev)
+{
+}
+#endif
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_priv.h b/drivers/iommu/qcom/msm_iommu_priv.h
new file mode 100644
index 000000000000..4de0d7ef19e6
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_priv.h
@@ -0,0 +1,71 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_IOMMU_PRIV_H
+#define MSM_IOMMU_PRIV_H
+
+/**
+ * struct msm_iommu_pt - Container for first level page table and its
+ * attributes.
+ * fl_table: Pointer to the first level page table.
+ * redirect: Set to 1 if L2 redirect for page tables are enabled, 0 otherwise.
+ * unaligned_fl_table: Original address of memory for the page table.
+ * fl_table is manually aligned (as per spec) but we need the original address
+ * to free the table.
+ * fl_table_shadow: This is "copy" of the fl_table with some differences.
+ * It stores the same information as fl_table except that instead of storing
+ * second level page table address + page table entry descriptor bits it
+ * stores the second level page table address and the number of used second
+ * level page tables entries. This is used to check whether we need to free
+ * the second level page table which allows us to also free the second level
+ * page table after doing a TLB invalidate which should catch bugs with
+ * clients trying to unmap an address that is being used.
+ * fl_table_shadow will use the lower 9 bits for the use count and the upper
+ * bits for the second level page table address.
+ * sl_table_shadow uses the same concept as fl_table_shadow but for LPAE 2nd
+ * level page tables.
+ */
+#ifdef CONFIG_IOMMU_LPAE
+struct msm_iommu_pt {
+ u64 *fl_table;
+ u64 **sl_table_shadow;
+ int redirect;
+ u64 *unaligned_fl_table;
+};
+#else
+struct msm_iommu_pt {
+ u32 *fl_table;
+ int redirect;
+ u32 *fl_table_shadow;
+};
+#endif
+/**
+ * struct msm_iommu_priv - Container for page table attributes and other
+ * private iommu domain information.
+ * attributes.
+ * pt: Page table attribute structure
+ * list_attached: List of devices (contexts) attached to this domain.
+ * client_name: Name of the domain client.
+ */
+struct msm_iommu_priv {
+ struct msm_iommu_pt pt;
+ struct list_head list_attached;
+ struct iommu_domain domain;
+ const char *client_name;
+};
+
+static inline struct msm_iommu_priv *to_msm_priv(struct iommu_domain *dom)
+{
+ return container_of(dom, struct msm_iommu_priv, domain);
+}
+
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_sec.c b/drivers/iommu/qcom/msm_iommu_sec.c
new file mode 100644
index 000000000000..33a1e988ae1e
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_sec.c
@@ -0,0 +1,876 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/kmemleak.h>
+#include <linux/dma-mapping.h>
+#include <linux/qcom_scm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/sizes.h>
+
+#include "msm_iommu_perfmon.h"
+#include "msm_iommu_hw-v1.h"
+#include "msm_iommu_priv.h"
+#include <linux/qcom_iommu.h>
+#include <trace/events/kmem.h>
+
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
+/* commands for SCM_SVC_MP */
+#define IOMMU_SECURE_CFG 2
+#define IOMMU_SECURE_PTBL_SIZE 3
+#define IOMMU_SECURE_PTBL_INIT 4
+#define IOMMU_SET_CP_POOL_SIZE 5
+#define IOMMU_SECURE_MAP 6
+#define IOMMU_SECURE_UNMAP 7
+#define IOMMU_SECURE_MAP2 0x0B
+#define IOMMU_SECURE_MAP2_FLAT 0x12
+#define IOMMU_SECURE_UNMAP2 0x0C
+#define IOMMU_SECURE_UNMAP2_FLAT 0x13
+#define IOMMU_TLBINVAL_FLAG 0x00000001
+
+/* commands for SCM_SVC_UTIL */
+#define IOMMU_DUMP_SMMU_FAULT_REGS 0X0C
+#define MAXIMUM_VIRT_SIZE (300*SZ_1M)
+
+
+#define MAKE_VERSION(major, minor, patch) \
+ (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+
+static struct iommu_access_ops *iommu_access_ops;
+static int is_secure;
+
+static const struct of_device_id msm_smmu_list[] = {
+ { .compatible = "qcom,msm-smmu-v1", },
+ { .compatible = "qcom,msm-smmu-v2", },
+ { }
+};
+
+struct msm_scm_paddr_list {
+ unsigned int list;
+ unsigned int list_size;
+ unsigned int size;
+};
+
+struct msm_scm_mapping_info {
+ unsigned int id;
+ unsigned int ctx_id;
+ unsigned int va;
+ unsigned int size;
+};
+
+struct msm_scm_map2_req {
+ struct msm_scm_paddr_list plist;
+ struct msm_scm_mapping_info info;
+ unsigned int flags;
+};
+
+struct msm_scm_unmap2_req {
+ struct msm_scm_mapping_info info;
+ unsigned int flags;
+};
+
+struct msm_cp_pool_size {
+ uint32_t size;
+ uint32_t spare;
+};
+
+#define NUM_DUMP_REGS 14
+/*
+ * some space to allow the number of registers returned by the secure
+ * environment to grow
+ */
+#define WIGGLE_ROOM (NUM_DUMP_REGS * 2)
+/* Each entry is a (reg_addr, reg_val) pair, hence the * 2 */
+#define SEC_DUMP_SIZE ((NUM_DUMP_REGS * 2) + WIGGLE_ROOM)
+
+struct msm_scm_fault_regs_dump {
+ uint32_t dump_size;
+ uint32_t dump_data[SEC_DUMP_SIZE];
+} __aligned(PAGE_SIZE);
+
+void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops)
+{
+ iommu_access_ops = access_ops;
+}
+
+static int msm_iommu_dump_fault_regs(int smmu_id, int cb_num,
+ struct msm_scm_fault_regs_dump *regs)
+{
+ int ret;
+
+ dmac_clean_range(regs, regs + 1);
+
+ ret = qcom_scm_iommu_dump_fault_regs(smmu_id, cb_num,
+ virt_to_phys(regs), sizeof(*regs));
+
+ dmac_inv_range(regs, regs + 1);
+
+ return ret;
+}
+
+static int msm_iommu_reg_dump_to_regs(
+ struct msm_iommu_context_reg ctx_regs[],
+ struct msm_scm_fault_regs_dump *dump, struct msm_iommu_drvdata *drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ int i, j, ret = 0;
+ const uint32_t nvals = (dump->dump_size / sizeof(uint32_t));
+ uint32_t *it = (uint32_t *) dump->dump_data;
+ const uint32_t * const end = ((uint32_t *) dump) + nvals;
+ phys_addr_t phys_base = drvdata->phys_base;
+ int ctx = ctx_drvdata->num;
+
+ if (!nvals)
+ return -EINVAL;
+
+ for (i = 1; it < end; it += 2, i += 2) {
+ unsigned int reg_offset;
+ uint32_t addr = *it;
+ uint32_t val = *(it + 1);
+ struct msm_iommu_context_reg *reg = NULL;
+ if (addr < phys_base) {
+ pr_err("Bogus-looking register (0x%x) for Iommu with base at %pa. Skipping.\n",
+ addr, &phys_base);
+ continue;
+ }
+ reg_offset = addr - phys_base;
+
+ for (j = 0; j < MAX_DUMP_REGS; ++j) {
+ struct dump_regs_tbl_entry dump_reg = dump_regs_tbl[j];
+ void *test_reg;
+ unsigned int test_offset;
+ switch (dump_reg.dump_reg_type) {
+ case DRT_CTX_REG:
+ test_reg = CTX_REG(dump_reg.reg_offset,
+ drvdata->cb_base, ctx);
+ break;
+ case DRT_GLOBAL_REG:
+ test_reg = GLB_REG(
+ dump_reg.reg_offset, drvdata->glb_base);
+ break;
+ case DRT_GLOBAL_REG_N:
+ test_reg = GLB_REG_N(
+ drvdata->glb_base, ctx,
+ dump_reg.reg_offset);
+ break;
+ default:
+ pr_err("Unknown dump_reg_type: 0x%x\n",
+ dump_reg.dump_reg_type);
+ BUG();
+ break;
+ }
+ test_offset = test_reg - drvdata->glb_base;
+ if (test_offset == reg_offset) {
+ reg = &ctx_regs[j];
+ break;
+ }
+ }
+
+ if (reg == NULL) {
+ pr_debug("Unknown register in secure CB dump: %x\n",
+ addr);
+ continue;
+ }
+
+ if (reg->valid) {
+ WARN(1, "Invalid (repeated?) register in CB dump: %x\n",
+ addr);
+ continue;
+ }
+
+ reg->val = val;
+ reg->valid = true;
+ }
+
+ if (i != nvals) {
+ pr_err("Invalid dump! %d != %d\n", i, nvals);
+ ret = 1;
+ }
+
+ for (i = 0; i < MAX_DUMP_REGS; ++i) {
+ if (!ctx_regs[i].valid) {
+ if (dump_regs_tbl[i].must_be_present) {
+ pr_err("Register missing from dump for ctx %d: %s, 0x%x\n",
+ ctx,
+ dump_regs_tbl[i].name,
+ dump_regs_tbl[i].reg_offset);
+ ret = 1;
+ }
+ ctx_regs[i].val = 0xd00dfeed;
+ }
+ }
+
+ return ret;
+}
+
+irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_scm_fault_regs_dump *regs;
+ int tmp, ret = IRQ_HANDLED;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+ BUG_ON(!drvdata);
+
+ ctx_drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!ctx_drvdata);
+
+ regs = kzalloc(sizeof(*regs), GFP_KERNEL);
+ if (!regs) {
+ pr_err("%s: Couldn't allocate memory\n", __func__);
+ goto lock_release;
+ }
+
+ if (!drvdata->ctx_attach_count) {
+ pr_err("Unexpected IOMMU page fault from secure context bank!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Unable to read page fault information\n");
+ /*
+ * We cannot determine which context bank caused the issue so
+ * we just return handled here to ensure IRQ handler code is
+ * happy
+ */
+ goto free_regs;
+ }
+
+ iommu_access_ops->iommu_clk_on(drvdata);
+ tmp = msm_iommu_dump_fault_regs(drvdata->sec_id,
+ ctx_drvdata->num, regs);
+ iommu_access_ops->iommu_clk_off(drvdata);
+
+ if (tmp) {
+ pr_err("%s: Couldn't dump fault registers (%d) %s, ctx: %d\n",
+ __func__, tmp, drvdata->name, ctx_drvdata->num);
+ goto free_regs;
+ } else {
+ struct msm_iommu_context_reg ctx_regs[MAX_DUMP_REGS];
+ memset(ctx_regs, 0, sizeof(ctx_regs));
+ tmp = msm_iommu_reg_dump_to_regs(
+ ctx_regs, regs, drvdata, ctx_drvdata);
+ if (tmp < 0) {
+ ret = IRQ_NONE;
+ pr_err("Incorrect response from secure environment\n");
+ goto free_regs;
+ }
+
+ if (ctx_regs[DUMP_REG_FSR].val) {
+ if (tmp)
+ pr_err("Incomplete fault register dump. Printout will be incomplete.\n");
+ if (!ctx_drvdata->attached_domain) {
+ pr_err("Bad domain in interrupt handler\n");
+ tmp = -ENOSYS;
+ } else {
+ tmp = report_iommu_fault(
+ ctx_drvdata->attached_domain,
+ &ctx_drvdata->pdev->dev,
+ COMBINE_DUMP_REG(
+ ctx_regs[DUMP_REG_FAR1].val,
+ ctx_regs[DUMP_REG_FAR0].val),
+ 0);
+ }
+
+ /* if the fault wasn't handled by someone else: */
+ if (tmp == -ENOSYS) {
+ pr_err("Unexpected IOMMU page fault from secure context bank!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ print_ctx_regs(ctx_regs);
+ }
+ } else {
+ ret = IRQ_NONE;
+ }
+ }
+free_regs:
+ kfree(regs);
+lock_release:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+#define SCM_SVC_MP 0xc
+
+static int msm_iommu_sec_ptbl_init(void)
+{
+ struct device_node *np;
+ int psize[2] = {0, 0};
+ unsigned int spare = 0;
+ int ret;
+ int version;
+ /* Use a dummy device for dma_alloc_attrs allocation */
+ struct device dev = { 0 };
+ void *cpu_addr;
+ dma_addr_t paddr;
+ DEFINE_DMA_ATTRS(attrs);
+
+ for_each_matching_node(np, msm_smmu_list)
+ if (of_find_property(np, "qcom,iommu-secure-id", NULL) &&
+ of_device_is_available(np))
+ break;
+
+ if (!np)
+ return 0;
+
+ of_node_put(np);
+
+ version = qcom_scm_get_feat_version(SCM_SVC_MP);
+
+ if (version >= MAKE_VERSION(1, 1, 1)) {
+ ret = qcom_scm_iommu_set_cp_pool_size(MAXIMUM_VIRT_SIZE, 0);
+ if (ret) {
+ pr_err("scm call IOMMU_SET_CP_POOL_SIZE failed\n");
+ goto fail;
+ }
+ }
+
+ ret = qcom_scm_iommu_secure_ptbl_size(spare, psize);
+ if (ret) {
+ pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
+ goto fail;
+ }
+
+ pr_err("iommu sec: psize[0]: %d, psize[1]: %d\n", psize[0], psize[1]);
+
+ if (psize[1]) {
+ pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
+ goto fail;
+ }
+
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+
+ cpu_addr = dma_alloc_attrs(&dev, psize[0], &paddr, GFP_KERNEL, &attrs);
+ if (!cpu_addr) {
+ pr_err("%s: Failed to allocate %d bytes for PTBL\n",
+ __func__, psize[0]);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize[0], spare);
+
+ if (ret) {
+ pr_err("scm call IOMMU_SECURE_PTBL_INIT failed (%d)\n", ret);
+ goto fail_mem;
+ }
+
+ return 0;
+
+fail_mem:
+ dma_free_attrs(&dev, psize[0], cpu_addr, paddr, &attrs);
+fail:
+ return ret;
+}
+
+int msm_iommu_sec_program_iommu(struct msm_iommu_drvdata *drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ if (drvdata->smmu_local_base) {
+ writel_relaxed(0xFFFFFFFF,
+ drvdata->smmu_local_base + SMMU_INTR_SEL_NS);
+ mb();
+ }
+
+ return qcom_scm_restore_sec_cfg(drvdata->sec_id, ctx_drvdata->num);
+}
+
+static int msm_iommu_sec_map2(struct msm_scm_map2_req *map)
+{
+ u32 flags;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ flags = IOMMU_TLBINVAL_FLAG;
+#else
+ flags = 0;
+#endif
+
+ return qcom_scm_iommu_secure_map(map->plist.list,
+ map->plist.list_size,
+ map->plist.size,
+ map->info.id,
+ map->info.ctx_id,
+ map->info.va,
+ map->info.size,
+ flags);
+}
+
+static int msm_iommu_sec_ptbl_map(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, phys_addr_t pa, size_t len)
+{
+ struct msm_scm_map2_req map;
+ void *flush_va, *flush_va_end;
+ int ret = 0;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M) ||
+ !IS_ALIGNED(pa, SZ_1M))
+ return -EINVAL;
+ map.plist.list = virt_to_phys(&pa);
+ map.plist.list_size = 1;
+ map.plist.size = len;
+ map.info.id = iommu_drvdata->sec_id;
+ map.info.ctx_id = ctx_drvdata->num;
+ map.info.va = va;
+ map.info.size = len;
+
+ flush_va = &pa;
+ flush_va_end = (void *)
+ (((unsigned long) flush_va) + sizeof(phys_addr_t));
+
+ /*
+ * Ensure that the buffer is in RAM by the time it gets to TZ
+ */
+ dmac_clean_range(flush_va, flush_va_end);
+
+ ret = msm_iommu_sec_map2(&map);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned int get_phys_addr(struct scatterlist *sg)
+{
+ /*
+ * Try sg_dma_address first so that we can
+ * map carveout regions that do not have a
+ * struct page associated with them.
+ */
+ unsigned int pa = sg_dma_address(sg);
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
+}
+
+static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, struct scatterlist *sg, size_t len)
+{
+ struct scatterlist *sgiter;
+ struct msm_scm_map2_req map;
+ unsigned int *pa_list = 0;
+ unsigned int pa, cnt;
+ void *flush_va, *flush_va_end;
+ unsigned int offset = 0, chunk_offset = 0;
+ int ret;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ map.info.id = iommu_drvdata->sec_id;
+ map.info.ctx_id = ctx_drvdata->num;
+ map.info.va = va;
+ map.info.size = len;
+
+ if (sg->length == len) {
+ /*
+ * physical address for secure mapping needs
+ * to be 1MB aligned
+ */
+ pa = get_phys_addr(sg);
+ if (!IS_ALIGNED(pa, SZ_1M))
+ return -EINVAL;
+ map.plist.list = virt_to_phys(&pa);
+ map.plist.list_size = 1;
+ map.plist.size = len;
+ flush_va = &pa;
+ } else {
+ sgiter = sg;
+ if (!IS_ALIGNED(sgiter->length, SZ_1M))
+ return -EINVAL;
+ cnt = sg->length / SZ_1M;
+ while ((sgiter = sg_next(sgiter))) {
+ if (!IS_ALIGNED(sgiter->length, SZ_1M))
+ return -EINVAL;
+ cnt += sgiter->length / SZ_1M;
+ }
+
+ pa_list = kmalloc(cnt * sizeof(*pa_list), GFP_KERNEL);
+ if (!pa_list)
+ return -ENOMEM;
+
+ sgiter = sg;
+ cnt = 0;
+ pa = get_phys_addr(sgiter);
+ if (!IS_ALIGNED(pa, SZ_1M)) {
+ kfree(pa_list);
+ return -EINVAL;
+ }
+ while (offset < len) {
+ pa += chunk_offset;
+ pa_list[cnt] = pa;
+ chunk_offset += SZ_1M;
+ offset += SZ_1M;
+ cnt++;
+
+ if (chunk_offset >= sgiter->length && offset < len) {
+ chunk_offset = 0;
+ sgiter = sg_next(sgiter);
+ pa = get_phys_addr(sgiter);
+ }
+ }
+
+ map.plist.list = virt_to_phys(pa_list);
+ map.plist.list_size = cnt;
+ map.plist.size = SZ_1M;
+ flush_va = pa_list;
+ }
+
+ /*
+ * Ensure that the buffer is in RAM by the time it gets to TZ
+ */
+ flush_va_end = (void *) (((unsigned long) flush_va) +
+ (map.plist.list_size * sizeof(*pa_list)));
+ dmac_clean_range(flush_va, flush_va_end);
+
+ ret = msm_iommu_sec_map2(&map);
+ kfree(pa_list);
+
+ return ret;
+}
+
+static int msm_iommu_sec_ptbl_unmap(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, size_t len)
+{
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ return qcom_scm_iommu_secure_unmap(iommu_drvdata->sec_id,
+ ctx_drvdata->num,
+ va,
+ len,
+ IOMMU_TLBINVAL_FLAG);
+}
+
+static struct iommu_domain * msm_iommu_domain_alloc(unsigned type)
+{
+ struct msm_iommu_priv *priv;
+ struct iommu_domain *domain;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ INIT_LIST_HEAD(&priv->list_attached);
+ domain = &priv->domain;
+ return domain;
+}
+
+static void msm_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+ priv = to_msm_priv(domain);
+
+ kfree(priv);
+ iommu_access_ops->iommu_lock_release(0);
+}
+
+static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_ctx_drvdata *tmp_drvdata;
+ int ret = 0;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ priv = to_msm_priv(domain);
+ if (!priv || !dev) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!list_empty(&ctx_drvdata->attached_elm)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
+ if (tmp_drvdata == ctx_drvdata) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ ret = iommu_access_ops->iommu_power_on(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ /* We can only do this once */
+ if (!iommu_drvdata->ctx_attach_count) {
+ ret = iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ if (ret) {
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ goto fail;
+ }
+
+ ret = msm_iommu_sec_program_iommu(iommu_drvdata,
+ ctx_drvdata);
+
+ /* bfb settings are always programmed by HLOS */
+ program_iommu_bfb_settings(iommu_drvdata->base,
+ iommu_drvdata->bfb_settings);
+
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+ if (ret) {
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ goto fail;
+ }
+ }
+
+ list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
+ ctx_drvdata->attached_domain = domain;
+ ++iommu_drvdata->ctx_attach_count;
+
+ iommu_access_ops->iommu_lock_release(0);
+
+ msm_iommu_attached(dev->parent);
+ return ret;
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+static void msm_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+
+ if (!dev)
+ return;
+
+ msm_iommu_detached(dev->parent);
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
+ goto fail;
+
+ list_del_init(&ctx_drvdata->attached_elm);
+ ctx_drvdata->attached_domain = NULL;
+
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ BUG_ON(iommu_drvdata->ctx_attach_count == 0);
+ --iommu_drvdata->ctx_attach_count;
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+}
+
+static int get_drvdata(struct iommu_domain *domain,
+ struct msm_iommu_drvdata **iommu_drvdata,
+ struct msm_iommu_ctx_drvdata **ctx_drvdata)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_ctx_drvdata *ctx;
+
+ list_for_each_entry(ctx, &priv->list_attached, attached_elm) {
+ if (ctx->attached_domain == domain)
+ break;
+ }
+
+ if (ctx->attached_domain != domain)
+ return -EINVAL;
+
+ *ctx_drvdata = ctx;
+ *iommu_drvdata = dev_get_drvdata(ctx->pdev->dev.parent);
+ return 0;
+}
+
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_map(iommu_drvdata, ctx_drvdata,
+ va, pa, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ size_t len)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = -ENODEV;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata,
+ va, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+
+ /* the IOMMU API requires us to return how many bytes were unmapped */
+ len = ret ? 0 : len;
+ return len;
+}
+
+static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
+ struct scatterlist *sg, unsigned int len,
+ int prot)
+{
+ int ret;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_map_range(iommu_drvdata, ctx_drvdata,
+ va, sg, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+
+static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
+ unsigned int len)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = -EINVAL;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata, va, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret ? ret : 0;
+}
+
+static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ return 0;
+}
+
+void msm_iommu_check_scm_call_avail(void)
+{
+ is_secure = qcom_scm_is_call_available(SCM_SVC_MP, IOMMU_SECURE_CFG);
+}
+
+int msm_iommu_get_scm_call_avail(void)
+{
+ return is_secure;
+}
+
+static struct iommu_ops msm_iommu_ops = {
+ .domain_alloc = msm_iommu_domain_alloc,
+ .domain_free = msm_iommu_domain_free,
+ .attach_dev = msm_iommu_attach_dev,
+ .detach_dev = msm_iommu_detach_dev,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+/* .map_range = msm_iommu_map_range,*/
+ .map_sg = default_iommu_map_sg,
+/* .unmap_range = msm_iommu_unmap_range,*/
+ .iova_to_phys = msm_iommu_iova_to_phys,
+ .pgsize_bitmap = MSM_IOMMU_PGSIZES,
+};
+
+static int __init msm_iommu_sec_init(void)
+{
+ int ret;
+
+ ret = bus_register(&msm_iommu_sec_bus_type);
+ if (ret)
+ return ret;
+
+ ret = bus_set_iommu(&msm_iommu_sec_bus_type, &msm_iommu_ops);
+ if (ret) {
+ bus_unregister(&msm_iommu_sec_bus_type);
+ return ret;
+ }
+
+ ret = msm_iommu_sec_ptbl_init();
+
+ return ret;
+}
+
+subsys_initcall(msm_iommu_sec_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU Secure Driver");
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 6afc9fabd94c..63886e443a5e 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -373,6 +373,42 @@ static const struct of_device_id qcom_rpm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, qcom_rpm_of_match);
+#define QCOM_RPM_STATUS_ID_SEQUENCE 7
+int qcom_rpm_read(struct qcom_rpm *rpm,
+ int resource,
+ u32 *val,
+ size_t count)
+{
+ const struct qcom_rpm_resource *res;
+ const struct qcom_rpm_data *data = rpm->data;
+ uint32_t seq_begin;
+ uint32_t seq_end;
+ int rc;
+ int i;
+
+ if (WARN_ON(resource < 0 || resource >= data->n_resources))
+ return -EINVAL;
+
+ res = &data->resource_table[resource];
+ if (WARN_ON(res->size != count))
+ return -EINVAL;
+
+ mutex_lock(&rpm->lock);
+ seq_begin = readl(RPM_STATUS_REG(rpm, QCOM_RPM_STATUS_ID_SEQUENCE));
+
+ for (i = 0; i < count; i++)
+ *val++ = readl(RPM_STATUS_REG(rpm, res->status_id));
+
+ seq_end = readl(RPM_STATUS_REG(rpm, QCOM_RPM_STATUS_ID_SEQUENCE));
+
+ rc = (seq_begin != seq_end || (seq_begin & 0x01)) ? -EBUSY : 0;
+
+ mutex_unlock(&rpm->lock);
+ return rc;
+
+}
+EXPORT_SYMBOL(qcom_rpm_read);
+
int qcom_rpm_write(struct qcom_rpm *rpm,
int state,
int resource,
diff --git a/drivers/mfd/ssbi.c b/drivers/mfd/ssbi.c
index 27986f641f7d..39734999fe40 100644
--- a/drivers/mfd/ssbi.c
+++ b/drivers/mfd/ssbi.c
@@ -330,7 +330,12 @@ static struct platform_driver ssbi_driver = {
.of_match_table = ssbi_match_table,
},
};
-module_platform_driver(ssbi_driver);
+
+static int ssbi_init(void)
+{
+ return platform_driver_register(&ssbi_driver);
+}
+subsys_initcall(ssbi_init);
MODULE_LICENSE("GPL v2");
MODULE_VERSION("1.0");
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index f2eeb38efa65..bc08c3b1335c 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -5,6 +5,8 @@
menuconfig MMC
tristate "MMC/SD/SDIO card support"
depends on HAS_IOMEM
+ select PWRSEQ_SIMPLE
+ select PWRSEQ_EMMC
help
This selects MultiMediaCard, Secure Digital and Secure
Digital I/O support.
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index f771bc3496a4..0525054f4078 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -26,3 +26,11 @@ config MMC_PARANOID_SD_INIT
about re-trying SD init requests. This can be a useful
work-around for buggy controllers and hardware. Enable
if you are experiencing issues with SD detection.
+
+config PWRSEQ_EMMC
+ tristate "PwrSeq EMMC"
+ depends on OF
+
+config PWRSEQ_SIMPLE
+ tristate "PwrSeq Simple"
+ depends on OF
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 2c25138f28b7..b281675edb92 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -8,5 +8,7 @@ mmc_core-y := core.o bus.o host.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
quirks.o slot-gpio.o
-mmc_core-$(CONFIG_OF) += pwrseq.o pwrseq_simple.o pwrseq_emmc.o
+mmc_core-$(CONFIG_OF) += pwrseq.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
+obj-$(CONFIG_PWRSEQ_EMMC) += pwrseq_emmc.o
diff --git a/drivers/mmc/core/pwrseq.c b/drivers/mmc/core/pwrseq.c
index 4c1d1757dbf9..9af53a66e646 100644
--- a/drivers/mmc/core/pwrseq.c
+++ b/drivers/mmc/core/pwrseq.c
@@ -8,50 +8,28 @@
* MMC power sequence management
*/
#include <linux/kernel.h>
-#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
-
#include <linux/mmc/host.h>
-
#include "pwrseq.h"
-struct mmc_pwrseq_match {
- const char *compatible;
- struct mmc_pwrseq *(*alloc)(struct mmc_host *host, struct device *dev);
-};
-
-static struct mmc_pwrseq_match pwrseq_match[] = {
- {
- .compatible = "mmc-pwrseq-simple",
- .alloc = mmc_pwrseq_simple_alloc,
- }, {
- .compatible = "mmc-pwrseq-emmc",
- .alloc = mmc_pwrseq_emmc_alloc,
- },
-};
-
-static struct mmc_pwrseq_match *mmc_pwrseq_find(struct device_node *np)
+static DEFINE_MUTEX(pwrseq_list_mutex);
+static LIST_HEAD(pwrseq_list);
+
+static struct mmc_pwrseq *of_find_mmc_pwrseq(struct device_node *np)
{
- struct mmc_pwrseq_match *match = ERR_PTR(-ENODEV);
- int i;
+ struct mmc_pwrseq *p;
- for (i = 0; i < ARRAY_SIZE(pwrseq_match); i++) {
- if (of_device_is_compatible(np, pwrseq_match[i].compatible)) {
- match = &pwrseq_match[i];
- break;
- }
- }
+ list_for_each_entry(p, &pwrseq_list, list)
+ if (p->dev->of_node == np)
+ return p;
- return match;
+ return NULL;
}
int mmc_pwrseq_alloc(struct mmc_host *host)
{
- struct platform_device *pdev;
struct device_node *np;
- struct mmc_pwrseq_match *match;
struct mmc_pwrseq *pwrseq;
int ret = 0;
@@ -59,25 +37,18 @@ int mmc_pwrseq_alloc(struct mmc_host *host)
if (!np)
return 0;
- pdev = of_find_device_by_node(np);
- if (!pdev) {
- ret = -ENODEV;
- goto err;
- }
-
- match = mmc_pwrseq_find(np);
- if (IS_ERR(match)) {
- ret = PTR_ERR(match);
- goto err;
- }
+ pwrseq = of_find_mmc_pwrseq(np);
- pwrseq = match->alloc(host, &pdev->dev);
- if (IS_ERR(pwrseq)) {
- ret = PTR_ERR(pwrseq);
- goto err;
+ if (pwrseq && pwrseq->ops && pwrseq->ops->alloc) {
+ host->pwrseq = pwrseq;
+ ret = pwrseq->ops->alloc(host);
+ if (IS_ERR_VALUE(ret)) {
+ host->pwrseq = NULL;
+ goto err;
+ }
}
+ pwrseq->users++;
- host->pwrseq = pwrseq;
dev_info(host->parent, "allocated mmc-pwrseq\n");
err:
@@ -116,5 +87,32 @@ void mmc_pwrseq_free(struct mmc_host *host)
if (pwrseq && pwrseq->ops && pwrseq->ops->free)
pwrseq->ops->free(host);
+ pwrseq->users--;
host->pwrseq = NULL;
}
+
+int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
+{
+ if (!pwrseq || !pwrseq->ops || !pwrseq->dev)
+ return -EINVAL;
+
+ mutex_lock(&pwrseq_list_mutex);
+ list_add(&pwrseq->list, &pwrseq_list);
+ mutex_unlock(&pwrseq_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_pwrseq_register);
+
+int mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq)
+{
+ if (!pwrseq->users) {
+ mutex_lock(&pwrseq_list_mutex);
+ list_del(&pwrseq->list);
+ mutex_unlock(&pwrseq_list_mutex);
+ return 0;
+ }
+
+ return -EBUSY;
+}
+EXPORT_SYMBOL_GPL(mmc_pwrseq_unregister);
diff --git a/drivers/mmc/core/pwrseq.h b/drivers/mmc/core/pwrseq.h
index 096da48c6a7e..462c0abfb651 100644
--- a/drivers/mmc/core/pwrseq.h
+++ b/drivers/mmc/core/pwrseq.h
@@ -8,7 +8,10 @@
#ifndef _MMC_CORE_PWRSEQ_H
#define _MMC_CORE_PWRSEQ_H
+#include <linux/mmc/host.h>
+
struct mmc_pwrseq_ops {
+ int (*alloc)(struct mmc_host *host);
void (*pre_power_on)(struct mmc_host *host);
void (*post_power_on)(struct mmc_host *host);
void (*power_off)(struct mmc_host *host);
@@ -17,23 +20,35 @@ struct mmc_pwrseq_ops {
struct mmc_pwrseq {
struct mmc_pwrseq_ops *ops;
+ struct device *dev;
+ struct list_head list;
+ int users;
+
};
#ifdef CONFIG_OF
+int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq);
+int mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq);
+
int mmc_pwrseq_alloc(struct mmc_host *host);
void mmc_pwrseq_pre_power_on(struct mmc_host *host);
void mmc_pwrseq_post_power_on(struct mmc_host *host);
void mmc_pwrseq_power_off(struct mmc_host *host);
void mmc_pwrseq_free(struct mmc_host *host);
-struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
- struct device *dev);
-struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
- struct device *dev);
-
#else
+static inline int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
+{
+ return -ENOSYS;
+}
+
+static inline int mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq)
+{
+ return -ENOSYS;
+}
+
static inline int mmc_pwrseq_alloc(struct mmc_host *host) { return 0; }
static inline void mmc_pwrseq_pre_power_on(struct mmc_host *host) {}
static inline void mmc_pwrseq_post_power_on(struct mmc_host *host) {}
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index 9d6d2fb21796..0b12bd79ba8e 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -9,6 +9,8 @@
*/
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -16,7 +18,6 @@
#include <linux/reboot.h>
#include <linux/mmc/host.h>
-
#include "pwrseq.h"
struct mmc_pwrseq_emmc {
@@ -25,6 +26,8 @@ struct mmc_pwrseq_emmc {
struct gpio_desc *reset_gpio;
};
+#define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
+
static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
{
gpiod_set_value(pwrseq->reset_gpio, 1);
@@ -35,52 +38,37 @@ static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
{
- struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_emmc, pwrseq);
+ struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
__mmc_pwrseq_emmc_reset(pwrseq);
}
static void mmc_pwrseq_emmc_free(struct mmc_host *host)
{
- struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_emmc, pwrseq);
+ struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
unregister_restart_handler(&pwrseq->reset_nb);
gpiod_put(pwrseq->reset_gpio);
- kfree(pwrseq);
}
-static struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
- .post_power_on = mmc_pwrseq_emmc_reset,
- .free = mmc_pwrseq_emmc_free,
-};
-
static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
unsigned long mode, void *cmd)
{
struct mmc_pwrseq_emmc *pwrseq = container_of(this,
- struct mmc_pwrseq_emmc, reset_nb);
+ struct mmc_pwrseq_emmc, reset_nb);
__mmc_pwrseq_emmc_reset(pwrseq);
return NOTIFY_DONE;
}
-struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
- struct device *dev)
+static int mmc_pwrseq_emmc_alloc(struct mmc_host *host)
{
- struct mmc_pwrseq_emmc *pwrseq;
- int ret = 0;
-
- pwrseq = kzalloc(sizeof(struct mmc_pwrseq_emmc), GFP_KERNEL);
- if (!pwrseq)
- return ERR_PTR(-ENOMEM);
-
- pwrseq->reset_gpio = gpiod_get_index(dev, "reset", 0, GPIOD_OUT_LOW);
- if (IS_ERR(pwrseq->reset_gpio)) {
- ret = PTR_ERR(pwrseq->reset_gpio);
- goto free;
- }
+ struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
+
+ pwrseq->reset_gpio = gpiod_get_index(host->pwrseq->dev,
+ "reset", 0, GPIOD_OUT_LOW);
+ if (IS_ERR(pwrseq->reset_gpio))
+ return PTR_ERR(pwrseq->reset_gpio);
/*
* register reset handler to ensure emmc reset also from
@@ -91,10 +79,53 @@ struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
pwrseq->reset_nb.priority = 129;
register_restart_handler(&pwrseq->reset_nb);
+ return 0;
+}
+
+static struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
+ .alloc = mmc_pwrseq_emmc_alloc,
+ .post_power_on = mmc_pwrseq_emmc_reset,
+ .free = mmc_pwrseq_emmc_free,
+};
+
+static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_emmc *pwrseq;
+ struct device *dev = &pdev->dev;
+
+ pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
+ if (!pwrseq)
+ return -ENOMEM;
+
pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
+ pwrseq->pwrseq.dev = dev;
+
+ platform_set_drvdata(pdev, pwrseq);
- return &pwrseq->pwrseq;
-free:
- kfree(pwrseq);
- return ERR_PTR(ret);
+ return mmc_pwrseq_register(&pwrseq->pwrseq);
}
+
+static int mmc_pwrseq_emmc_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_emmc *spwrseq = platform_get_drvdata(pdev);
+
+ return mmc_pwrseq_unregister(&spwrseq->pwrseq);
+}
+
+static const struct of_device_id mmc_pwrseq_emmc_of_match[] = {
+ { .compatible = "mmc-pwrseq-emmc",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_emmc_of_match);
+
+static struct platform_driver mmc_pwrseq_emmc_driver = {
+ .probe = mmc_pwrseq_emmc_probe,
+ .remove = mmc_pwrseq_emmc_remove,
+ .driver = {
+ .name = "pwrseq_emmc",
+ .of_match_table = mmc_pwrseq_emmc_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_emmc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index 0b14b83a53d6..f2a7613fd1e5 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk.h>
#include <linux/kernel.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -27,6 +28,8 @@ struct mmc_pwrseq_simple {
struct gpio_desc *reset_gpios[0];
};
+#define to_pwrseq_simple(p) container_of(p, struct mmc_pwrseq_simple, pwrseq)
+
static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
int value)
{
@@ -39,8 +42,7 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
if (!IS_ERR(pwrseq->ext_clk) && !pwrseq->clk_enabled) {
clk_prepare_enable(pwrseq->ext_clk);
@@ -52,16 +54,14 @@ static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
static void mmc_pwrseq_simple_post_power_on(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
mmc_pwrseq_simple_set_gpios_value(pwrseq, 0);
}
static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
@@ -73,8 +73,7 @@ static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
static void mmc_pwrseq_simple_free(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
int i;
for (i = 0; i < pwrseq->nr_gpios; i++)
@@ -84,39 +83,22 @@ static void mmc_pwrseq_simple_free(struct mmc_host *host)
if (!IS_ERR(pwrseq->ext_clk))
clk_put(pwrseq->ext_clk);
- kfree(pwrseq);
}
-static struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = {
- .pre_power_on = mmc_pwrseq_simple_pre_power_on,
- .post_power_on = mmc_pwrseq_simple_post_power_on,
- .power_off = mmc_pwrseq_simple_power_off,
- .free = mmc_pwrseq_simple_free,
-};
-
-struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
- struct device *dev)
+int mmc_pwrseq_simple_alloc(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq;
- int i, nr_gpios, ret = 0;
-
- nr_gpios = of_gpio_named_count(dev->of_node, "reset-gpios");
- if (nr_gpios < 0)
- nr_gpios = 0;
-
- pwrseq = kzalloc(sizeof(struct mmc_pwrseq_simple) + nr_gpios *
- sizeof(struct gpio_desc *), GFP_KERNEL);
- if (!pwrseq)
- return ERR_PTR(-ENOMEM);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
+ struct device *dev = host->pwrseq->dev;
+ int i, ret = 0;
pwrseq->ext_clk = clk_get(dev, "ext_clock");
if (IS_ERR(pwrseq->ext_clk) &&
PTR_ERR(pwrseq->ext_clk) != -ENOENT) {
- ret = PTR_ERR(pwrseq->ext_clk);
- goto free;
+ return PTR_ERR(pwrseq->ext_clk);
+
}
- for (i = 0; i < nr_gpios; i++) {
+ for (i = 0; i < pwrseq->nr_gpios; i++) {
pwrseq->reset_gpios[i] = gpiod_get_index(dev, "reset", i,
GPIOD_OUT_HIGH);
if (IS_ERR(pwrseq->reset_gpios[i]) &&
@@ -127,18 +109,70 @@ struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
while (i--)
gpiod_put(pwrseq->reset_gpios[i]);
- goto clk_put;
+ if (!IS_ERR(pwrseq->ext_clk))
+ clk_put(pwrseq->ext_clk);
+
+ return -EINVAL;
}
}
- pwrseq->nr_gpios = nr_gpios;
- pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
- return &pwrseq->pwrseq;
-clk_put:
- if (!IS_ERR(pwrseq->ext_clk))
- clk_put(pwrseq->ext_clk);
-free:
- kfree(pwrseq);
- return ERR_PTR(ret);
+ return 0;
+}
+
+static struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = {
+ .alloc = mmc_pwrseq_simple_alloc,
+ .pre_power_on = mmc_pwrseq_simple_pre_power_on,
+ .post_power_on = mmc_pwrseq_simple_post_power_on,
+ .power_off = mmc_pwrseq_simple_power_off,
+ .free = mmc_pwrseq_simple_free,
+};
+
+static const struct of_device_id mmc_pwrseq_simple_of_match[] = {
+ { .compatible = "mmc-pwrseq-simple",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_simple_of_match);
+
+static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_simple *spwrseq;
+ struct device *dev = &pdev->dev;
+ int nr_gpios;
+
+ nr_gpios = of_gpio_named_count(dev->of_node, "reset-gpios");
+ if (nr_gpios < 0)
+ nr_gpios = 0;
+
+ spwrseq = devm_kzalloc(dev, sizeof(struct mmc_pwrseq_simple) + nr_gpios *
+ sizeof(struct gpio_desc *), GFP_KERNEL);
+ if (!spwrseq)
+ return -ENOMEM;
+
+ spwrseq->pwrseq.dev = dev;
+ spwrseq->nr_gpios = nr_gpios;
+ spwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
+
+ platform_set_drvdata(pdev, spwrseq);
+
+ return mmc_pwrseq_register(&spwrseq->pwrseq);
}
+
+static int mmc_pwrseq_simple_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_simple *spwrseq = platform_get_drvdata(pdev);
+
+ return mmc_pwrseq_unregister(&spwrseq->pwrseq);
+}
+
+static struct platform_driver mmc_pwrseq_simple_driver = {
+ .probe = mmc_pwrseq_simple_probe,
+ .remove = mmc_pwrseq_simple_remove,
+ .driver = {
+ .name = "pwrseq_simple",
+ .of_match_table = mmc_pwrseq_simple_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_simple_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index fb266745f824..70805ba9577c 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -78,6 +78,7 @@ static unsigned int fmax = 515633;
* @qcom_fifo: enables qcom specific fifo pio read logic.
* @qcom_dml: enables qcom specific dma glue for dma transfers.
* @reversed_irq_handling: handle data irq before cmd irq.
+ * @any_blksize: true if block any sizes are supported
*/
struct variant_data {
unsigned int clkreg;
@@ -104,6 +105,7 @@ struct variant_data {
bool qcom_fifo;
bool qcom_dml;
bool reversed_irq_handling;
+ bool any_blksize;
};
static struct variant_data variant_arm = {
@@ -200,6 +202,7 @@ static struct variant_data variant_ux500v2 = {
.pwrreg_clkgate = true,
.busy_detect = true,
.pwrreg_nopower = true,
+ .any_blksize = true,
};
static struct variant_data variant_qcom = {
@@ -218,6 +221,7 @@ static struct variant_data variant_qcom = {
.explicit_mclk_control = true,
.qcom_fifo = true,
.qcom_dml = true,
+ .any_blksize = true,
};
static int mmci_card_busy(struct mmc_host *mmc)
@@ -245,10 +249,11 @@ static int mmci_card_busy(struct mmc_host *mmc)
static int mmci_validate_data(struct mmci_host *host,
struct mmc_data *data)
{
+ struct variant_data *variant = host->variant;
+
if (!data)
return 0;
-
- if (!is_power_of_2(data->blksz)) {
+ if (!is_power_of_2(data->blksz) && !variant->any_blksize) {
dev_err(mmc_dev(host->mmc),
"unsupported block size (%d bytes)\n", data->blksz);
return -EINVAL;
@@ -804,7 +809,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
writel(host->size, base + MMCIDATALENGTH);
blksz_bits = ffs(data->blksz) - 1;
- BUG_ON(1 << blksz_bits != data->blksz);
if (variant->blksz_datactrl16)
datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 4bcee033feda..4fbbc9712e6a 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -522,6 +522,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
/* Set HC_MODE_EN bit in HC_MODE register */
writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
+ host->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index fbc7efdddcb5..c21801a4556d 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1621,10 +1621,6 @@ static int sdhci_do_get_cd(struct sdhci_host *host)
if (host->flags & SDHCI_DEVICE_DEAD)
return 0;
- /* If nonremovable, assume that the card is always present. */
- if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
- return 1;
-
/*
* Try slot gpio detect, if defined it take precedence
* over build in controller functionality
@@ -1632,8 +1628,9 @@ static int sdhci_do_get_cd(struct sdhci_host *host)
if (!IS_ERR_VALUE(gpio_cd))
return !!gpio_cd;
- /* If polling, assume that the card is always present. */
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ /* If polling/nonremovable, assume that the card is always present. */
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
+ (host->mmc->caps & MMC_CAP_NONREMOVABLE))
return 1;
/* Host native card detect */
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
index 50c43b4382ba..9f6370f0cabc 100644
--- a/drivers/net/wireless/ath/wcn36xx/Makefile
+++ b/drivers/net/wireless/ath/wcn36xx/Makefile
@@ -1,7 +1,10 @@
-obj-$(CONFIG_WCN36XX) := wcn36xx.o
+obj-$(CONFIG_WCN36XX) := wcn36xx.o wcn36xx-platform.o
wcn36xx-y += main.o \
dxe.o \
txrx.o \
smd.o \
pmc.o \
debug.o
+
+wcn36xx-platform-y += wcn36xx-msm.o\
+ wcnss_core.o
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 086549b732b9..196c73fe8b27 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -46,7 +46,7 @@ static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data) \
do { \
- if (wcn->chip_version == WCN36XX_CHIP_3680) \
+ if (wcn->chip_version != WCN36XX_CHIP_3660) \
wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
else \
wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
@@ -79,6 +79,7 @@ static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
struct wcn36xx_dxe_ctl *cur_ctl = NULL;
int i;
+ spin_lock_init(&ch->lock);
for (i = 0; i < ch->desc_num; i++) {
cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
if (!cur_ctl)
@@ -169,7 +170,7 @@ void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
}
-static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
+static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
{
struct wcn36xx_dxe_desc *cur_dxe = NULL;
struct wcn36xx_dxe_desc *prev_dxe = NULL;
@@ -178,7 +179,7 @@ static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
int i;
size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
- wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
+ wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
GFP_KERNEL);
if (!wcn_ch->cpu_addr)
return -ENOMEM;
@@ -270,7 +271,7 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
return 0;
}
-static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
+static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl)
{
struct wcn36xx_dxe_desc *dxe = ctl->desc;
struct sk_buff *skb;
@@ -279,7 +280,7 @@ static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
if (skb == NULL)
return -ENOMEM;
- dxe->dst_addr_l = dma_map_single(NULL,
+ dxe->dst_addr_l = dma_map_single(dev,
skb_tail_pointer(skb),
WCN36XX_PKT_SIZE,
DMA_FROM_DEVICE);
@@ -297,7 +298,7 @@ static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
cur_ctl = wcn_ch->head_blk_ctl;
for (i = 0; i < wcn_ch->desc_num; i++) {
- wcn36xx_dxe_fill_skb(cur_ctl);
+ wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl);
cur_ctl = cur_ctl->next;
}
@@ -345,7 +346,7 @@ void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
{
- struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
+ struct wcn36xx_dxe_ctl *ctl;
struct ieee80211_tx_info *info;
unsigned long flags;
@@ -354,23 +355,25 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
* completely full head and tail are pointing to the same element
* and while-do will not make any cycles.
*/
+ spin_lock_irqsave(&ch->lock, flags);
+ ctl = ch->tail_blk_ctl;
do {
if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)
break;
if (ctl->skb) {
- dma_unmap_single(NULL, ctl->desc->src_addr_l,
+ dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
ctl->skb->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(ctl->skb);
if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
/* Keep frame until TX status comes */
ieee80211_free_txskb(wcn->hw, ctl->skb);
}
- spin_lock_irqsave(&ctl->skb_lock, flags);
+ spin_lock(&ctl->skb_lock);
if (wcn->queues_stopped) {
wcn->queues_stopped = false;
ieee80211_wake_queues(wcn->hw);
}
- spin_unlock_irqrestore(&ctl->skb_lock, flags);
+ spin_unlock(&ctl->skb_lock);
ctl->skb = NULL;
}
@@ -379,6 +382,7 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
!(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
ch->tail_blk_ctl = ctl;
+ spin_unlock_irqrestore(&ch->lock, flags);
}
static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
@@ -474,7 +478,7 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
skb = ctl->skb;
dma_addr = dxe->dst_addr_l;
- wcn36xx_dxe_fill_skb(ctl);
+ wcn36xx_dxe_fill_skb(wcn->dev, ctl);
switch (ch->ch_type) {
case WCN36XX_DXE_CH_RX_L:
@@ -491,7 +495,7 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
wcn36xx_warn("Unknown channel\n");
}
- dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
+ dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
DMA_FROM_DEVICE);
wcn36xx_rx_skb(wcn, skb);
ctl = ctl->next;
@@ -540,7 +544,7 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
16 - (WCN36XX_BD_CHUNK_SIZE % 8);
s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
- cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
+ cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr,
GFP_KERNEL);
if (!cpu_addr)
goto out_err;
@@ -555,7 +559,7 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
16 - (WCN36XX_BD_CHUNK_SIZE % 8);
s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
- cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
+ cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr,
GFP_KERNEL);
if (!cpu_addr)
goto out_err;
@@ -574,13 +578,13 @@ out_err:
void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
{
if (wcn->mgmt_mem_pool.virt_addr)
- dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
+ dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
WCN36XX_DXE_CH_DESC_NUMB_TX_H,
wcn->mgmt_mem_pool.virt_addr,
wcn->mgmt_mem_pool.phy_addr);
if (wcn->data_mem_pool.virt_addr) {
- dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
+ dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
WCN36XX_DXE_CH_DESC_NUMB_TX_L,
wcn->data_mem_pool.virt_addr,
wcn->data_mem_pool.phy_addr);
@@ -596,12 +600,14 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
struct wcn36xx_dxe_desc *desc = NULL;
struct wcn36xx_dxe_ch *ch = NULL;
unsigned long flags;
+ int ret;
ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
+ spin_lock_irqsave(&ch->lock, flags);
ctl = ch->head_blk_ctl;
- spin_lock_irqsave(&ctl->next->skb_lock, flags);
+ spin_lock(&ctl->next->skb_lock);
/*
* If skb is not null that means that we reached the tail of the ring
@@ -611,10 +617,11 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
if (NULL != ctl->next->skb) {
ieee80211_stop_queues(wcn->hw);
wcn->queues_stopped = true;
- spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+ spin_unlock(&ctl->next->skb_lock);
+ spin_unlock_irqrestore(&ch->lock, flags);
return -EBUSY;
}
- spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+ spin_unlock(&ctl->next->skb_lock);
ctl->skb = NULL;
desc = ctl->desc;
@@ -640,10 +647,11 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
desc = ctl->desc;
if (ctl->bd_cpu_addr) {
wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
- desc->src_addr_l = dma_map_single(NULL,
+ desc->src_addr_l = dma_map_single(wcn->dev,
ctl->skb->data,
ctl->skb->len,
DMA_TO_DEVICE);
@@ -679,7 +687,10 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
ch->reg_ctrl, ch->def_ctrl);
}
- return 0;
+ ret = 0;
+unlock:
+ spin_unlock_irqrestore(&ch->lock, flags);
+ return ret;
}
int wcn36xx_dxe_init(struct wcn36xx *wcn)
@@ -696,7 +707,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for TX LOW channel */
/***************************************/
- wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
+ wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
/* Write channel head to a NEXT register */
@@ -714,7 +725,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for TX HIGH channel */
/***************************************/
- wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
+ wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
/* Write channel head to a NEXT register */
@@ -734,7 +745,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for RX LOW channel */
/***************************************/
- wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
+ wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
/* For RX we need to preallocated buffers */
wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
@@ -764,7 +775,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for RX HIGH channel */
/***************************************/
- wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
+ wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
/* For RX we need to prealocat buffers */
wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
index 35ee7e966bd2..f869fd3a9951 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.h
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -243,6 +243,7 @@ struct wcn36xx_dxe_ctl {
};
struct wcn36xx_dxe_ch {
+ spinlock_t lock;
enum wcn36xx_dxe_ch_type ch_type;
void *cpu_addr;
dma_addr_t dma_addr;
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index a1f1127d7808..df4425d410e6 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -345,6 +345,10 @@ enum wcn36xx_hal_host_msg_type {
WCN36XX_HAL_DHCP_START_IND = 189,
WCN36XX_HAL_DHCP_STOP_IND = 190,
+ WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233,
+
+ WCN36XX_HAL_PRINT_REG_INFO_IND = 259,
+
WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE
};
@@ -4381,6 +4385,20 @@ enum place_holder_in_cap_bitmap {
RTT = 20,
RATECTRL = 21,
WOW = 22,
+ WLAN_ROAM_SCAN_OFFLOAD = 23,
+ SPECULATIVE_PS_POLL = 24,
+ SCAN_SCH = 25,
+ IBSS_HEARTBEAT_OFFLOAD = 26,
+ WLAN_SCAN_OFFLOAD = 27,
+ WLAN_PERIODIC_TX_PTRN = 28,
+ ADVANCE_TDLS = 29,
+ BATCH_SCAN = 30,
+ FW_IN_TX_PATH = 31,
+ EXTENDED_NSOFFLOAD_SLOT = 32,
+ CH_SWITCH_V1 = 33,
+ HT40_OBSS_SCAN = 34,
+ UPDATE_CHANNEL_LIST = 35,
+
MAX_FEATURE_SUPPORTED = 128,
};
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 900e72a089d8..bbd4eca5e9f1 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -201,7 +201,21 @@ static const char * const wcn36xx_caps_names[] = {
"BCN_FILTER", /* 19 */
"RTT", /* 20 */
"RATECTRL", /* 21 */
- "WOW" /* 22 */
+ "WOW", /* 22 */
+ "WLAN_ROAM_SCAN_OFFLOAD", /* 23 */
+ "SPECULATIVE_PS_POLL", /* 24 */
+ "SCAN_SCH", /* 25 */
+ "IBSS_HEARTBEAT_OFFLOAD", /* 26 */
+ "WLAN_SCAN_OFFLOAD", /* 27 */
+ "WLAN_PERIODIC_TX_PTRN", /* 28 */
+ "ADVANCE_TDLS", /* 29 */
+ "BATCH_SCAN", /* 30 */
+ "FW_IN_TX_PATH", /* 31 */
+ "EXTENDED_NSOFFLOAD_SLOT", /* 32 */
+ "CH_SWITCH_V1", /* 33 */
+ "HT40_OBSS_SCAN", /* 34 */
+ "UPDATE_CHANNEL_LIST", /* 35 */
+
};
static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
@@ -221,17 +235,6 @@ static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
}
}
-static void wcn36xx_detect_chip_version(struct wcn36xx *wcn)
-{
- if (get_feat_caps(wcn->fw_feat_caps, DOT11AC)) {
- wcn36xx_info("Chip is 3680\n");
- wcn->chip_version = WCN36XX_CHIP_3680;
- } else {
- wcn36xx_info("Chip is 3660\n");
- wcn->chip_version = WCN36XX_CHIP_3660;
- }
-}
-
static int wcn36xx_start(struct ieee80211_hw *hw)
{
struct wcn36xx *wcn = hw->priv;
@@ -286,8 +289,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
wcn36xx_feat_caps_info(wcn);
}
- wcn36xx_detect_chip_version(wcn);
-
/* DMA channel initialization */
ret = wcn36xx_dxe_init(wcn);
if (ret) {
@@ -951,6 +952,10 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
ieee80211_hw_set(wcn->hw, SIGNAL_DBM);
ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
+ /* 3620 powersaving currently unstable */
+ if (wcn->chip_version == WCN36XX_CHIP_3620)
+ __clear_bit(IEEE80211_HW_SUPPORTS_PS, wcn->hw->flags);
+
wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_ADHOC) |
@@ -1036,11 +1041,25 @@ static int wcn36xx_probe(struct platform_device *pdev)
wcn = hw->priv;
wcn->hw = hw;
wcn->dev = &pdev->dev;
- wcn->ctrl_ops = pdev->dev.platform_data;
+ wcn->dev->dma_mask = kzalloc(sizeof(*wcn->dev->dma_mask), GFP_KERNEL);
+ if (!wcn->dev->dma_mask) {
+ ret = -ENOMEM;
+ goto dma_mask_err;
+ }
+ dma_set_mask_and_coherent(wcn->dev, DMA_BIT_MASK(32));
+ wcn->wcn36xx_data = pdev->dev.platform_data;
+ wcn->ctrl_ops = &wcn->wcn36xx_data->ctrl_ops;
+ wcn->wcn36xx_data->wcn = wcn;
+ if (!wcn->ctrl_ops->get_chip_type) {
+ dev_err(&pdev->dev, "Missing ops->get_chip_type\n");
+ ret = -EINVAL;
+ goto out_wq;
+ }
+ wcn->chip_version = wcn->ctrl_ops->get_chip_type(wcn);
mutex_init(&wcn->hal_mutex);
- if (!wcn->ctrl_ops->get_hw_mac(addr)) {
+ if (!wcn->ctrl_ops->get_hw_mac(wcn, addr)) {
wcn36xx_info("mac address: %pM\n", addr);
SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
}
@@ -1059,6 +1078,8 @@ static int wcn36xx_probe(struct platform_device *pdev)
out_unmap:
iounmap(wcn->mmio);
out_wq:
+ kfree(wcn->dev->dma_mask);
+dma_mask_err:
ieee80211_free_hw(hw);
out_err:
return ret;
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index c9263e1c75d4..57cdeeee9dad 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -253,7 +253,7 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
init_completion(&wcn->hal_rsp_compl);
start = jiffies;
- ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
+ ret = wcn->ctrl_ops->tx(wcn, wcn->hal_buf, len);
if (ret) {
wcn36xx_err("HAL TX failed\n");
goto out;
@@ -302,6 +302,23 @@ static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
return 0;
}
+static int wcn36xx_smd_rsp_status_check_v2(struct wcn36xx *wcn, void *buf,
+ size_t len)
+{
+ struct wcn36xx_fw_msg_status_rsp_v2 *rsp;
+
+ if (wcn->chip_version != WCN36XX_CHIP_3620 ||
+ len < sizeof(struct wcn36xx_hal_msg_header) + sizeof(*rsp))
+ return wcn36xx_smd_rsp_status_check(buf, len);
+
+ rsp = buf + sizeof(struct wcn36xx_hal_msg_header);
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
+ return rsp->status;
+
+ return 0;
+}
+
int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
{
struct nv_data *nv_d;
@@ -1582,7 +1599,8 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
wcn36xx_err("Sending hal_remove_bsskey failed\n");
goto out;
}
- ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf,
+ wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
goto out;
@@ -1951,7 +1969,8 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
wcn36xx_err("Sending hal_trigger_ba failed\n");
goto out;
}
- ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf,
+ wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
goto out;
@@ -2128,6 +2147,10 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
complete(&wcn->hal_rsp_compl);
break;
+ case WCN36XX_HAL_DEL_BA_IND:
+ case WCN36XX_HAL_PRINT_REG_INFO_IND:
+ case WCN36XX_HAL_COEX_IND:
+ case WCN36XX_HAL_AVOID_FREQ_RANGE_IND:
case WCN36XX_HAL_OTA_TX_COMPL_IND:
case WCN36XX_HAL_MISSED_BEACON_IND:
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
@@ -2174,6 +2197,9 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
switch (msg_header->msg_type) {
+ case WCN36XX_HAL_COEX_IND:
+ case WCN36XX_HAL_AVOID_FREQ_RANGE_IND:
+ break;
case WCN36XX_HAL_OTA_TX_COMPL_IND:
wcn36xx_smd_tx_compl_ind(wcn,
hal_ind_msg->msg,
@@ -2227,7 +2253,7 @@ out:
void wcn36xx_smd_close(struct wcn36xx *wcn)
{
- wcn->ctrl_ops->close();
+ wcn->ctrl_ops->close(wcn);
destroy_workqueue(wcn->hal_ind_wq);
mutex_destroy(&wcn->hal_ind_mutex);
}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 008d03423dbf..8361f9e3995b 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -44,6 +44,15 @@ struct wcn36xx_fw_msg_status_rsp {
u32 status;
} __packed;
+/* wcn3620 returns this for tigger_ba */
+
+struct wcn36xx_fw_msg_status_rsp_v2 {
+ u8 bss_id[6];
+ u32 status __packed;
+ u16 count_following_candidates __packed;
+ /* candidate list follows */
+};
+
struct wcn36xx_hal_ind_msg {
struct list_head list;
u8 *msg;
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx-msm.c b/drivers/net/wireless/ath/wcn36xx/wcn36xx-msm.c
new file mode 100644
index 000000000000..647cf026d37c
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx-msm.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/remoteproc.h>
+#include <linux/soc/qcom/smd.h>
+#include "wcn36xx.h"
+#include "wcnss_core.h"
+
+#define MAC_ADDR_0 "wlan/macaddr0"
+
+static int wcn36xx_msm_smsm_change_state(u32 clear_mask, u32 set_mask)
+{
+ return 0;
+}
+
+static int wcn36xx_msm_get_hw_mac(struct wcn36xx *wcn, u8 *addr)
+{
+ const struct firmware *addr_file = NULL;
+ int status;
+ u8 tmp[18];
+ static const u8 qcom_oui[3] = {0x00, 0x0A, 0xF5};
+ static const char *files = {MAC_ADDR_0};
+ struct wcn36xx_platform_data *pdata = wcn->wcn36xx_data;
+
+ status = request_firmware(&addr_file, files, &pdata->core->dev);
+
+ if (status < 0) {
+ /* Assign a random mac with Qualcomm oui */
+ dev_err(&pdata->core->dev, "Failed (%d) to read macaddress"
+ "file %s, using a random address instead", status, files);
+ memcpy(addr, qcom_oui, 3);
+ get_random_bytes(addr + 3, 3);
+ } else {
+ memset(tmp, 0, sizeof(tmp));
+ memcpy(tmp, addr_file->data, sizeof(tmp) - 1);
+ sscanf(tmp, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+ &addr[0],
+ &addr[1],
+ &addr[2],
+ &addr[3],
+ &addr[4],
+ &addr[5]);
+
+ release_firmware(addr_file);
+ }
+
+ return 0;
+}
+
+static int wcn36xx_msm_smd_send_and_wait(struct wcn36xx *wcn, char *buf, size_t len)
+{
+ int ret = 0;
+ struct wcn36xx_platform_data *pdata = wcn->wcn36xx_data;
+
+ mutex_lock(&pdata->wlan_ctrl_lock);
+ ret = qcom_smd_send(pdata->wlan_ctrl_channel, buf, len);
+ if (ret) {
+ dev_err(wcn->dev, "wlan ctrl channel tx failed\n");
+ }
+ mutex_unlock(&pdata->wlan_ctrl_lock);
+
+ return ret;
+}
+
+static int wcn36xx_msm_smd_open(struct wcn36xx *wcn, void *rsp_cb)
+{
+ struct wcn36xx_platform_data *pdata = wcn->wcn36xx_data;
+
+ pdata->cb = rsp_cb;
+ return 0;
+}
+
+static void wcn36xx_msm_smd_close(struct wcn36xx *wcn)
+{
+ return;
+}
+
+static int wcn36xx_msm_get_chip_type(struct wcn36xx *wcn)
+{
+ struct wcn36xx_platform_data *pdata = wcn->wcn36xx_data;
+ return pdata->chip_type;
+}
+
+static struct wcn36xx_platform_data wcn36xx_data = {
+ .ctrl_ops = {
+ .open = wcn36xx_msm_smd_open,
+ .close = wcn36xx_msm_smd_close,
+ .tx = wcn36xx_msm_smd_send_and_wait,
+ .get_hw_mac = wcn36xx_msm_get_hw_mac,
+ .smsm_change_state = wcn36xx_msm_smsm_change_state,
+ .get_chip_type = wcn36xx_msm_get_chip_type,
+ },
+};
+
+static int qcom_smd_wlan_ctrl_probe(struct qcom_smd_device *sdev)
+{
+ pr_info("%s: enter\n", __func__);
+ mutex_init(&wcn36xx_data.wlan_ctrl_lock);
+ init_completion(&wcn36xx_data.wlan_ctrl_ack);
+
+ wcn36xx_data.sdev = sdev;
+
+ dev_set_drvdata(&sdev->dev, &wcn36xx_data);
+ wcn36xx_data.wlan_ctrl_channel = sdev->channel;
+
+ of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev);
+
+ return 0;
+}
+
+static void qcom_smd_wlan_ctrl_remove(struct qcom_smd_device *sdev)
+{
+ of_platform_depopulate(&sdev->dev);
+}
+
+static int qcom_smd_wlan_ctrl_callback(struct qcom_smd_device *qsdev,
+ const void *data,
+ size_t count)
+{
+ struct wcn36xx_platform_data *pdata = dev_get_drvdata(&qsdev->dev);
+ void *buf = kzalloc(count, GFP_ATOMIC);
+ if (!buf) {
+ dev_err(&pdata->core->dev, "can't allocate buffer\n");
+ return -ENOMEM;
+ }
+
+ memcpy_fromio(buf, data, count);
+ pdata->cb(pdata->wcn, buf, count);
+ kfree(buf);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smd_wlan_ctrl_of_match[] = {
+ { .compatible = "qcom,wlan-ctrl" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_wlan_ctrl_of_match);
+
+static struct qcom_smd_driver qcom_smd_wlan_ctrl_driver = {
+ .probe = qcom_smd_wlan_ctrl_probe,
+ .remove = qcom_smd_wlan_ctrl_remove,
+ .callback = qcom_smd_wlan_ctrl_callback,
+ .driver = {
+ .name = "qcom_smd_wlan_ctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_smd_wlan_ctrl_of_match,
+ },
+};
+
+static const struct of_device_id wcn36xx_msm_match_table[] = {
+ { .compatible = "qcom,wcn3660", .data = (void *)WCN36XX_CHIP_3660 },
+ { .compatible = "qcom,wcn3680", .data = (void *)WCN36XX_CHIP_3680 },
+ { .compatible = "qcom,wcn3620", .data = (void *)WCN36XX_CHIP_3620 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wcn36xx_msm_match_table);
+
+static int wcn36xx_msm_probe(struct platform_device *pdev)
+{
+ int ret;
+ const struct of_device_id *of_id;
+ struct resource *r;
+ struct resource res[3];
+ static const char const *rnames[] = {
+ "wcnss_mmio", "wcnss_wlantx_irq", "wcnss_wlanrx_irq" };
+ static const int rtype[] = {
+ IORESOURCE_MEM, IORESOURCE_IRQ, IORESOURCE_IRQ };
+ struct device_node *dn;
+ int n;
+
+ wcnss_core_prepare(pdev);
+
+ dn = of_parse_phandle(pdev->dev.of_node, "rproc", 0);
+ if (!dn) {
+ dev_err(&pdev->dev, "No rproc specified in DT\n");
+ } else {
+ struct rproc *rproc= rproc_get_by_phandle(dn->phandle);
+ if (rproc)
+ rproc_boot(rproc);
+ else {
+ dev_err(&pdev->dev, "No rproc registered\n");
+ }
+ }
+
+ qcom_smd_driver_register(&qcom_smd_wlan_ctrl_driver);
+ wcnss_core_init();
+
+ of_id = of_match_node(wcn36xx_msm_match_table, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
+
+ wcn36xx_data.chip_type = (enum wcn36xx_chip_type)of_id->data;
+
+ wcn36xx_data.core = platform_device_alloc("wcn36xx", -1);
+
+ for (n = 0; n < ARRAY_SIZE(rnames); n++) {
+ r = platform_get_resource_byname(pdev, rtype[n], rnames[n]);
+ if (!r) {
+ dev_err(&wcn36xx_data.core->dev,
+ "Missing resource %s'\n", rnames[n]);
+ ret = -ENOMEM;
+ return ret;
+ }
+ res[n] = *r;
+ }
+
+ platform_device_add_resources(wcn36xx_data.core, res, n);
+ wcn36xx_data.core->dev.platform_data = &wcn36xx_data;
+
+ platform_device_add(wcn36xx_data.core);
+
+ dev_info(&pdev->dev, "%s initialized\n", __func__);
+
+ return 0;
+}
+
+static int wcn36xx_msm_remove(struct platform_device *pdev)
+{
+ platform_device_del(wcn36xx_data.core);
+ platform_device_put(wcn36xx_data.core);
+ return 0;
+}
+
+static struct platform_driver wcn36xx_msm_driver = {
+ .probe = wcn36xx_msm_probe,
+ .remove = wcn36xx_msm_remove,
+ .driver = {
+ .name = "wcn36xx-msm",
+ .owner = THIS_MODULE,
+ .of_match_table = wcn36xx_msm_match_table,
+ },
+};
+
+static int __init wcn36xx_msm_init(void)
+{
+ return platform_driver_register(&wcn36xx_msm_driver);
+}
+module_init(wcn36xx_msm_init);
+
+static void __exit wcn36xx_msm_exit(void)
+{
+ platform_driver_unregister(&wcn36xx_msm_driver);
+}
+module_exit(wcn36xx_msm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
+MODULE_FIRMWARE(MAC_ADDR_0);
+
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 7b41e833e18c..d9f8b6450487 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -103,19 +103,45 @@ struct nv_data {
u8 table;
};
+enum wcn36xx_chip_type {
+ WCN36XX_CHIP_UNKNOWN,
+ WCN36XX_CHIP_3660,
+ WCN36XX_CHIP_3680,
+ WCN36XX_CHIP_3620,
+};
+
/* Interface for platform control path
*
* @open: hook must be called when wcn36xx wants to open control channel.
* @tx: sends a buffer.
*/
struct wcn36xx_platform_ctrl_ops {
- int (*open)(void *drv_priv, void *rsp_cb);
- void (*close)(void);
- int (*tx)(char *buf, size_t len);
- int (*get_hw_mac)(u8 *addr);
+ int (*open)(struct wcn36xx *wcn, void *rsp_cb);
+ void (*close)(struct wcn36xx *wcn);
+ int (*tx)(struct wcn36xx *wcn, char *buf, size_t len);
+ int (*get_hw_mac)(struct wcn36xx *wcn, u8 *addr);
+ int (*get_chip_type)(struct wcn36xx *wcn);
int (*smsm_change_state)(u32 clear_mask, u32 set_mask);
};
+struct wcn36xx_platform_data {
+ enum wcn36xx_chip_type chip_type;
+
+ struct platform_device *core;
+
+ struct qcom_smd_device *sdev;
+ struct qcom_smd_channel *wlan_ctrl_channel;
+ struct completion wlan_ctrl_ack;
+ struct mutex wlan_ctrl_lock;
+
+ struct pinctrl *pinctrl;
+
+ struct wcn36xx *wcn;
+
+ void (*cb)(struct wcn36xx *wcn, void *buf, size_t len);
+ struct wcn36xx_platform_ctrl_ops ctrl_ops;
+};
+
/**
* struct wcn36xx_vif - holds VIF related fields
*
@@ -193,7 +219,7 @@ struct wcn36xx {
u8 fw_minor;
u8 fw_major;
u32 fw_feat_caps[WCN36XX_HAL_CAPS_SIZE];
- u32 chip_version;
+ enum wcn36xx_chip_type chip_version;
/* extra byte for the NULL termination */
u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
@@ -204,6 +230,7 @@ struct wcn36xx {
int rx_irq;
void __iomem *mmio;
+ struct wcn36xx_platform_data *wcn36xx_data;
struct wcn36xx_platform_ctrl_ops *ctrl_ops;
/*
* smd_buf must be protected with smd_mutex to garantee
@@ -241,9 +268,6 @@ struct wcn36xx {
};
-#define WCN36XX_CHIP_3660 0
-#define WCN36XX_CHIP_3680 1
-
static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
u8 major,
u8 minor,
diff --git a/drivers/net/wireless/ath/wcn36xx/wcnss_core.c b/drivers/net/wireless/ath/wcn36xx/wcnss_core.c
new file mode 100644
index 000000000000..645968b39116
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/wcnss_core.c
@@ -0,0 +1,296 @@
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/soc/qcom/smd.h>
+#include "wcn36xx.h"
+#include "wcnss_core.h"
+
+#define WCNSS_CTRL_TIMEOUT (msecs_to_jiffies(500))
+
+static int wcnss_core_config(struct platform_device *pdev, void __iomem *base)
+{
+ int ret = 0;
+ u32 value, iris_read_v = INVALID_IRIS_REG;
+ int clk_48m = 0;
+
+ value = readl_relaxed(base + SPARE_OFFSET);
+ value |= WCNSS_FW_DOWNLOAD_ENABLE;
+ writel_relaxed(value, base + SPARE_OFFSET);
+
+ writel_relaxed(0, base + PMU_OFFSET);
+ value = readl_relaxed(base + PMU_OFFSET);
+ value |= WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP |
+ WCNSS_PMU_CFG_IRIS_XO_EN;
+ writel_relaxed(value, base + PMU_OFFSET);
+
+ iris_read_v = readl_relaxed(base + IRIS_REG_OFFSET);
+ pr_info("iris_read_v: 0x%x\n", iris_read_v);
+
+ iris_read_v &= 0xffff;
+ iris_read_v |= 0x04;
+ writel_relaxed(iris_read_v, base + IRIS_REG_OFFSET);
+
+ value = readl_relaxed(base + PMU_OFFSET);
+ value |= WCNSS_PMU_CFG_IRIS_XO_READ;
+ writel_relaxed(value, base + PMU_OFFSET);
+
+ while (readl_relaxed(base + PMU_OFFSET) &
+ WCNSS_PMU_CFG_IRIS_XO_READ_STS)
+ cpu_relax();
+
+ iris_read_v = readl_relaxed(base + 0x1134);
+ pr_info("wcnss: IRIS Reg: 0x%08x\n", iris_read_v);
+ clk_48m = (iris_read_v >> 30) ? 0 : 1;
+ value &= ~WCNSS_PMU_CFG_IRIS_XO_READ;
+
+ /* XO_MODE[b2:b1]. Clear implies 19.2MHz */
+ value &= ~WCNSS_PMU_CFG_IRIS_XO_MODE;
+
+ if (clk_48m)
+ value |= WCNSS_PMU_CFG_IRIS_XO_MODE_48;
+
+ writel_relaxed(value, base + PMU_OFFSET);
+
+ /* Reset IRIS */
+ value |= WCNSS_PMU_CFG_IRIS_RESET;
+ writel_relaxed(value, base + PMU_OFFSET);
+
+ while (readl_relaxed(base + PMU_OFFSET) &
+ WCNSS_PMU_CFG_IRIS_RESET_STS)
+ cpu_relax();
+
+ /* reset IRIS reset bit */
+ value &= ~WCNSS_PMU_CFG_IRIS_RESET;
+ writel_relaxed(value, base + PMU_OFFSET);
+
+ /* start IRIS XO configuration */
+ value |= WCNSS_PMU_CFG_IRIS_XO_CFG;
+ writel_relaxed(value, base + PMU_OFFSET);
+
+ /* Wait for XO configuration to finish */
+ while (readl_relaxed(base + PMU_OFFSET) &
+ WCNSS_PMU_CFG_IRIS_XO_CFG_STS)
+ cpu_relax();
+
+ /* Stop IRIS XO configuration */
+ value &= ~(WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP |
+ WCNSS_PMU_CFG_IRIS_XO_CFG);
+ writel_relaxed(value, base + PMU_OFFSET);
+
+ msleep(200);
+
+ return ret;
+}
+
+int wcnss_core_prepare(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ void __iomem *wcnss_base;
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "pronto_phy_base");
+ if (!res) {
+ ret = -EIO;
+ dev_err(&pdev->dev, "resource pronto_phy_base failed\n");
+ return ret;
+ }
+
+ wcnss_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wcnss_base)) {
+ dev_err(&pdev->dev, "pronto_phy_base map failed\n");
+ return PTR_ERR(wcnss_base);
+ }
+
+ ret = wcnss_core_config(pdev, wcnss_base);
+ return ret;
+}
+
+struct wcnss_platform_data {
+ struct qcom_smd_channel *channel;
+ struct completion ack;
+ struct mutex lock;
+
+ struct work_struct rx_work;
+ struct work_struct download_work;
+
+ struct qcom_smd_device *sdev;
+};
+
+static struct completion fw_ready_compl;
+#define NV_FILE_NAME "wlan/prima/WCNSS_qcom_wlan_nv.bin"
+static void wcn36xx_nv_download_work(struct work_struct *worker)
+{
+ int ret = 0, i;
+ const struct firmware *nv = NULL;
+ struct wcnss_platform_data *wcnss_data =
+ container_of(worker, struct wcnss_platform_data, download_work);
+ struct device *dev = &wcnss_data->sdev->dev;
+
+ struct nvbin_dnld_req_msg *msg;
+ const void *nv_blob_start;
+ char *pkt = NULL;
+ int nv_blob_size = 0, fragments;
+
+ ret = request_firmware(&nv, NV_FILE_NAME, dev);
+ if (ret || !nv || !nv->data || !nv->size) {
+ dev_err(dev, "request firmware for %s (ret = %d)\n",
+ NV_FILE_NAME, ret);
+ return;
+ }
+
+ nv_blob_start = nv->data + 4;
+ nv_blob_size = nv->size -4;
+
+ fragments = (nv_blob_size + NV_FRAGMENT_SIZE - 1)/NV_FRAGMENT_SIZE;
+
+ pkt = kzalloc(sizeof(struct nvbin_dnld_req_msg) + NV_FRAGMENT_SIZE,
+ GFP_KERNEL);
+ if (!pkt) {
+ dev_err(dev, "allocation packet for nv download failed\n");
+ release_firmware(nv);
+ }
+
+ msg = (struct nvbin_dnld_req_msg *)pkt;
+ msg->hdr.msg_type = WCNSS_NV_DOWNLOAD_REQ;
+ msg->dnld_req_params.msg_flags = 0;
+
+ i = 0;
+ do {
+ int pkt_len = 0;
+
+ msg->dnld_req_params.frag_number = i;
+ if (nv_blob_size > NV_FRAGMENT_SIZE) {
+ msg->dnld_req_params.msg_flags &=
+ ~LAST_FRAGMENT;
+ pkt_len = NV_FRAGMENT_SIZE;
+ } else {
+ pkt_len = nv_blob_size;
+ msg->dnld_req_params.msg_flags |=
+ LAST_FRAGMENT | CAN_RECEIVE_CALDATA;
+ }
+
+ msg->dnld_req_params.nvbin_buffer_size = pkt_len;
+ msg->hdr.msg_len =
+ sizeof(struct nvbin_dnld_req_msg) + pkt_len;
+
+ memcpy(pkt + sizeof(struct nvbin_dnld_req_msg),
+ nv_blob_start + i * NV_FRAGMENT_SIZE, pkt_len);
+
+ ret = qcom_smd_send(wcnss_data->channel, pkt, msg->hdr.msg_len);
+ if (ret) {
+ dev_err(dev, "nv download failed\n");
+ goto out;
+ }
+
+ i++;
+ nv_blob_size -= NV_FRAGMENT_SIZE;
+ msleep(100);
+ } while (nv_blob_size > 0);
+
+out:
+ kfree(pkt);
+ release_firmware(nv);
+ return;
+}
+
+static int qcom_smd_wcnss_ctrl_callback(struct qcom_smd_device *qsdev,
+ const void *data,
+ size_t count)
+{
+ struct wcnss_platform_data *wcnss_data = dev_get_drvdata(&qsdev->dev);
+ struct smd_msg_hdr phdr;
+ const unsigned char *tmp = data;
+
+ memcpy_fromio(&phdr, data, sizeof(struct smd_msg_hdr));
+
+ switch (phdr.msg_type) {
+ /* CBC COMPLETE means firmware ready for go */
+ case WCNSS_CBC_COMPLETE_IND:
+ complete(&fw_ready_compl);
+ pr_info("wcnss: received WCNSS_CBC_COMPLETE_IND from FW\n");
+ break;
+
+ case WCNSS_NV_DOWNLOAD_RSP:
+ pr_info("fw_status: %d\n", tmp[sizeof(struct smd_msg_hdr)]);
+ break;
+ }
+
+ complete(&wcnss_data->ack);
+ return 0;
+}
+
+static int qcom_smd_wcnss_ctrl_probe(struct qcom_smd_device *sdev)
+{
+ struct wcnss_platform_data *wcnss_data;
+
+ wcnss_data = devm_kzalloc(&sdev->dev, sizeof(*wcnss_data), GFP_KERNEL);
+ if (!wcnss_data)
+ return -ENOMEM;
+
+ mutex_init(&wcnss_data->lock);
+ init_completion(&wcnss_data->ack);
+
+ wcnss_data->sdev = sdev;
+
+ dev_set_drvdata(&sdev->dev, wcnss_data);
+ wcnss_data->channel = sdev->channel;
+
+ INIT_WORK(&wcnss_data->download_work, wcn36xx_nv_download_work);
+
+ of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev);
+
+ /* We are ready for download here */
+ schedule_work(&wcnss_data->download_work);
+ return 0;
+}
+
+static void qcom_smd_wcnss_ctrl_remove(struct qcom_smd_device *sdev)
+{
+ of_platform_depopulate(&sdev->dev);
+}
+
+static const struct of_device_id qcom_smd_wcnss_ctrl_of_match[] = {
+ { .compatible = "qcom,wcnss-ctrl" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_wcnss_ctrl_of_match);
+
+static struct qcom_smd_driver qcom_smd_wcnss_ctrl_driver = {
+ .probe = qcom_smd_wcnss_ctrl_probe,
+ .remove = qcom_smd_wcnss_ctrl_remove,
+ .callback = qcom_smd_wcnss_ctrl_callback,
+ .driver = {
+ .name = "qcom_smd_wcnss_ctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_smd_wcnss_ctrl_of_match,
+ },
+};
+
+void wcnss_core_init(void)
+{
+ int ret = 0;
+
+ init_completion(&fw_ready_compl);
+ qcom_smd_driver_register(&qcom_smd_wcnss_ctrl_driver);
+
+ ret = wait_for_completion_interruptible_timeout(
+ &fw_ready_compl, msecs_to_jiffies(FW_READY_TIMEOUT));
+ if (ret <= 0) {
+ pr_err("timeout waiting for wcnss firmware ready indicator\n");
+ return;
+ }
+
+ return;
+}
+
+void wcnss_core_deinit(void)
+{
+ qcom_smd_driver_unregister(&qcom_smd_wcnss_ctrl_driver);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/wcnss_core.h b/drivers/net/wireless/ath/wcn36xx/wcnss_core.h
new file mode 100644
index 000000000000..49524c8dddfb
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/wcnss_core.h
@@ -0,0 +1,99 @@
+#ifndef _WCNSS_CORE_H_
+#define _WCNSS_CORE_H_
+
+#define PMU_OFFSET 0x1004
+#define SPARE_OFFSET 0x1088
+#define IRIS_REG_OFFSET 0x1134
+
+#define INVALID_IRIS_REG 0xbaadbaad
+
+#define WCNSS_PMU_CFG_IRIS_XO_CFG BIT(3)
+#define WCNSS_PMU_CFG_IRIS_XO_EN BIT(4)
+#define WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP BIT(5)
+#define WCNSS_PMU_CFG_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */
+
+#define WCNSS_PMU_CFG_IRIS_RESET BIT(7)
+#define WCNSS_PMU_CFG_IRIS_RESET_STS BIT(8) /* 1: in progress, 0: done */
+#define WCNSS_PMU_CFG_IRIS_XO_READ BIT(9)
+#define WCNSS_PMU_CFG_IRIS_XO_READ_STS BIT(10)
+#define WCNSS_FW_DOWNLOAD_ENABLE BIT(25)
+
+#define WCNSS_PMU_CFG_IRIS_XO_MODE 0x6
+#define WCNSS_PMU_CFG_IRIS_XO_MODE_48 (3 << 1)
+
+#define NV_DOWNLOAD_TIMEOUT 500
+#define NV_FRAGMENT_SIZE 3072
+#define MAX_CALIBRATED_DATA_SIZE (64*1024)
+#define LAST_FRAGMENT (1 << 0)
+#define MESSAGE_TO_FOLLOW (1 << 1)
+#define CAN_RECEIVE_CALDATA (1 << 15)
+#define WCNSS_RESP_SUCCESS 1
+#define WCNSS_RESP_FAIL 0
+
+
+#define WCNSS_NV_DOWNLOAD_REQ 0x01000002
+#define WCNSS_NV_DOWNLOAD_RSP 0x01000003
+#define WCNSS_CBC_COMPLETE_IND 0x0100000C
+
+/*time out 10s for the firmware status ready indicator */
+#define FW_READY_TIMEOUT (10000)
+
+
+struct smd_msg_hdr {
+ unsigned int msg_type;
+ unsigned int msg_len;
+};
+
+struct nvbin_dnld_req_params {
+ /* Fragment sequence number of the NV bin Image. NV Bin Image
+ * might not fit into one message due to size limitation of
+ * the SMD channel FIFO so entire NV blob is chopped into
+ * multiple fragments starting with seqeunce number 0. The
+ * last fragment is indicated by marking is_last_fragment field
+ * to 1. At receiving side, NV blobs would be concatenated
+ * together without any padding bytes in between.
+ */
+ unsigned short frag_number;
+
+ /* bit 0: When set to 1 it indicates that no more fragments will
+ * be sent.
+ * bit 1: When set, a new message will be followed by this message
+ * bit 2- bit 14: Reserved
+ * bit 15: when set, it indicates that the sender is capable of
+ * receiving Calibrated data.
+ */
+ unsigned short msg_flags;
+
+ /* NV Image size (number of bytes) */
+ unsigned int nvbin_buffer_size;
+
+ /* Following the 'nvbin_buffer_size', there should be
+ * nvbin_buffer_size bytes of NV bin Image i.e.
+ * uint8[nvbin_buffer_size].
+ */
+};
+
+struct nvbin_dnld_req_msg {
+ /* Note: The length specified in nvbin_dnld_req_msg messages
+ * should be hdr.msg_len = sizeof(nvbin_dnld_req_msg) +
+ * nvbin_buffer_size.
+ */
+ struct smd_msg_hdr hdr;
+ struct nvbin_dnld_req_params dnld_req_params;
+};
+
+struct wcnss_version {
+ struct smd_msg_hdr hdr;
+ unsigned char major;
+ unsigned char minor;
+ unsigned char version;
+ unsigned char revision;
+};
+
+
+int wcnss_core_prepare(struct platform_device *pdev);
+void wcnss_core_init(void);
+void wcnss_core_deinit(void);
+
+#endif
+
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 384574c3987c..dbfab80f4d36 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -924,8 +924,8 @@ EXPORT_SYMBOL(of_io_request_and_map);
* CPU addr (phys_addr_t) : pna cells
* size : nsize cells
*
- * It returns -ENODEV if "dma-ranges" property was not found
- * for this device in DT.
+ * Return 0 on success, -ENODEV if the "dma-ranges" property was not found for
+ * this device in DT, or -EINVAL if the CPU address or size is invalid.
*/
int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size)
{
@@ -986,6 +986,22 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
*dma_addr = dmaaddr;
*size = of_read_number(ranges + naddr + pna, nsize);
+ /*
+ * DT nodes sometimes incorrectly set the size as a mask. Work around
+ * those incorrect DT by computing the size as mask + 1.
+ */
+ if (*size & 1) {
+ pr_warn("%s: size 0x%llx for dma-range in node(%s) set as mask\n",
+ __func__, *size, np->full_name);
+ *size = *size + 1;
+ }
+
+ if (!*size) {
+ pr_err("%s: invalid size zero for dma-range in node(%s)\n",
+ __func__, np->full_name);
+ ret = -EINVAL;
+ goto out;
+ }
pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
*dma_addr, *paddr, *size);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 8b91ea241b10..c4a2869c2cb0 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -70,7 +70,7 @@ int of_device_add(struct platform_device *ofdev)
}
/**
- * of_dma_configure - Setup DMA configuration
+ * of_dma_configure - Setup DMA masks and offset
* @dev: Device to apply DMA configuration
* @np: Pointer to OF node having DMA configuration
*
@@ -81,13 +81,11 @@ int of_device_add(struct platform_device *ofdev)
* can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
* to fix up DMA configuration.
*/
-void of_dma_configure(struct device *dev, struct device_node *np)
+void of_dma_configure_masks(struct device *dev, struct device_node *np)
{
- u64 dma_addr, paddr, size;
- int ret;
- bool coherent;
+ u64 dma_addr, paddr, size, range_mask;
unsigned long offset;
- struct iommu_ops *iommu;
+ int ret;
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
@@ -105,25 +103,11 @@ void of_dma_configure(struct device *dev, struct device_node *np)
ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
if (ret < 0) {
- dma_addr = offset = 0;
- size = dev->coherent_dma_mask + 1;
+ range_mask = dev->coherent_dma_mask + 1;
+ offset = 0;
} else {
+ range_mask = DMA_BIT_MASK(ilog2(dma_addr + size));
offset = PFN_DOWN(paddr - dma_addr);
-
- /*
- * Add a work around to treat the size as mask + 1 in case
- * it is defined in DT as a mask.
- */
- if (size & 1) {
- dev_warn(dev, "Invalid size 0x%llx for dma-range\n",
- size);
- size = size + 1;
- }
-
- if (!size) {
- dev_err(dev, "Adjusted size 0x%llx invalid\n", size);
- return;
- }
dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset);
}
@@ -133,22 +117,59 @@ void of_dma_configure(struct device *dev, struct device_node *np)
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
- dev->coherent_dma_mask = min(dev->coherent_dma_mask,
- DMA_BIT_MASK(ilog2(dma_addr + size)));
- *dev->dma_mask = min((*dev->dma_mask),
- DMA_BIT_MASK(ilog2(dma_addr + size)));
+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, range_mask);
+ *dev->dma_mask = min((*dev->dma_mask), range_mask);
+}
+EXPORT_SYMBOL_GPL(of_dma_configure_masks);
+
+/**
+ * of_dma_configure_ops - Setup DMA operations
+ * @dev: Device to apply DMA configuration
+ * @np: Pointer to OF node having DMA configuration
+ *
+ * Try to get devices's DMA configuration from DT and update it
+ * accordingly.
+ */
+int of_dma_configure_ops(struct device *dev, struct device_node *np)
+{
+ u64 dma_addr, paddr, size;
+ struct iommu_ops *iommu;
+ bool coherent;
+ int ret;
+
+ ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
+ if (ret < 0) {
+ dma_addr = 0;
+ size = dev->coherent_dma_mask + 1;
+ }
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
coherent ? " " : " not ");
iommu = of_iommu_configure(dev, np);
+ if (IS_ERR(iommu))
+ return PTR_ERR(iommu);
dev_dbg(dev, "device is%sbehind an iommu\n",
iommu ? " " : " not ");
arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_dma_configure_ops);
+
+/**
+ * of_dma_deconfigure - Clean up DMA configuration
+ * @dev: Device for which to clean up DMA configuration
+ *
+ * Clean up all configuration performed by of_dma_configure_ops() and free all
+ * resources that have been allocated.
+ */
+void of_dma_deconfigure(struct device *dev)
+{
+ arch_teardown_dma_ops(dev);
}
-EXPORT_SYMBOL_GPL(of_dma_configure);
int of_device_register(struct platform_device *pdev)
{
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 5751dc5b6494..c65803da2067 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -132,7 +132,8 @@ void of_pci_dma_configure(struct pci_dev *pci_dev)
if (!bridge->parent)
return;
- of_dma_configure(dev, bridge->parent->of_node);
+ of_dma_configure_masks(dev, bridge->parent->of_node);
+ of_dma_configure_ops(dev, bridge->parent->of_node);
pci_put_host_bridge_device(bridge);
}
EXPORT_SYMBOL_GPL(of_pci_dma_configure);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 1001efaedcb8..338b6744ff1e 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -150,11 +150,6 @@ struct platform_device *of_device_alloc(struct device_node *np,
}
EXPORT_SYMBOL(of_device_alloc);
-static void of_dma_deconfigure(struct device *dev)
-{
- arch_teardown_dma_ops(dev);
-}
-
/**
* of_platform_device_create_pdata - Alloc, initialize and register an of_device
* @np: pointer to node to create device for
@@ -183,11 +178,10 @@ static struct platform_device *of_platform_device_create_pdata(
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
- of_dma_configure(&dev->dev, dev->dev.of_node);
+ of_dma_configure_masks(&dev->dev, dev->dev.of_node);
of_msi_configure(&dev->dev, dev->dev.of_node);
if (of_device_add(dev) != 0) {
- of_dma_deconfigure(&dev->dev);
platform_device_put(dev);
goto err_clear_flag;
}
@@ -247,7 +241,8 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(&dev->dev);
- of_dma_configure(&dev->dev, dev->dev.of_node);
+ of_dma_configure_masks(&dev->dev, dev->dev.of_node);
+ of_dma_configure_ops(&dev->dev, dev->dev.of_node);
/* Allow the HW Peripheral ID to be overridden */
prop = of_get_property(node, "arm,primecell-periphid", NULL);
@@ -479,11 +474,12 @@ static int of_platform_device_destroy(struct device *dev, void *data)
if (dev->bus == &platform_bus_type)
platform_device_unregister(to_platform_device(dev));
#ifdef CONFIG_ARM_AMBA
- else if (dev->bus == &amba_bustype)
+ else if (dev->bus == &amba_bustype) {
amba_device_unregister(to_amba_device(dev));
+ of_dma_deconfigure(dev);
+ }
#endif
- of_dma_deconfigure(dev);
of_node_clear_flag(dev->of_node, OF_POPULATED);
of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
return 0;
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index d5e58bae95cf..f87b1c98292f 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -30,6 +30,11 @@ config PCI_IMX6
select PCIEPORTBUS
select PCIE_DW
+config PCI_QCOM
+ bool "Qualcomm PCIe controller"
+ default y
+ depends on ARCH_QCOM && !ARM64
+
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA && !ARM64
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 140d66f796e4..451d49e3df44 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
+obj-$(CONFIG_PCI_QCOM) += pci-qcom.o
diff --git a/drivers/pci/host/pci-qcom.c b/drivers/pci/host/pci-qcom.c
new file mode 100644
index 000000000000..5a39f6368aa3
--- /dev/null
+++ b/drivers/pci/host/pci-qcom.c
@@ -0,0 +1,967 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * QCOM MSM PCIe controller driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_gpio.h>
+#include <linux/msi.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/delay.h>
+
+#define INT_PCI_MSI_NR (8 * 32)
+#define MSM_PCIE_MSI_PHY 0xa0000000
+
+#define PCIE20_MSI_CTRL_ADDR (0x820)
+#define PCIE20_MSI_CTRL_UPPER_ADDR (0x824)
+#define PCIE20_MSI_CTRL_INTR_EN (0x828)
+#define PCIE20_MSI_CTRL_INTR_MASK (0x82C)
+#define PCIE20_MSI_CTRL_INTR_STATUS (0x830)
+
+#define PCIE20_MSI_CTRL_MAX 8
+/* Root Complex Port vendor/device IDs */
+#define PCIE_VENDOR_ID_RCP 0x17cb
+#define PCIE_DEVICE_ID_RCP 0x0101
+
+#define __set(v, a, b) (((v) << (b)) & GENMASK(a, b))
+
+#define PCIE20_PARF_PCS_DEEMPH 0x34
+#define PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN1(x) __set(x, 21, 16)
+#define PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) __set(x, 13, 8)
+#define PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) __set(x, 5, 0)
+
+#define PCIE20_PARF_PCS_SWING 0x38
+#define PCIE20_PARF_PCS_SWING_TX_SWING_FULL(x) __set(x, 14, 8)
+#define PCIE20_PARF_PCS_SWING_TX_SWING_LOW(x) __set(x, 6, 0)
+
+#define PCIE20_PARF_PHY_CTRL 0x40
+#define PCIE20_PARF_PHY_CTRL_PHY_TX0_TERM_OFFST(x) __set(x, 20, 16)
+#define PCIE20_PARF_PHY_CTRL_PHY_LOS_LEVEL(x) __set(x, 12, 8)
+#define PCIE20_PARF_PHY_CTRL_PHY_RTUNE_REQ (1 << 4)
+#define PCIE20_PARF_PHY_CTRL_PHY_TEST_BURNIN (1 << 2)
+#define PCIE20_PARF_PHY_CTRL_PHY_TEST_BYPASS (1 << 1)
+#define PCIE20_PARF_PHY_CTRL_PHY_TEST_PWR_DOWN (1 << 0)
+
+#define PCIE20_PARF_PHY_REFCLK 0x4C
+#define PCIE20_PARF_CONFIG_BITS 0x50
+
+#define PCIE20_ELBI_SYS_CTRL 0x04
+#define PCIE20_ELBI_SYS_CTRL_LTSSM_EN 0x01
+
+#define PCIE20_CAP 0x70
+#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
+
+#define PCIE20_COMMAND_STATUS 0x04
+#define PCIE20_BUSNUMBERS 0x18
+#define PCIE20_MEMORY_BASE_LIMIT 0x20
+
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
+#define PCIE20_PLR_IATU_VIEWPORT 0x900
+#define PCIE20_PLR_IATU_CTRL1 0x904
+#define PCIE20_PLR_IATU_CTRL2 0x908
+#define PCIE20_PLR_IATU_LBAR 0x90C
+#define PCIE20_PLR_IATU_UBAR 0x910
+#define PCIE20_PLR_IATU_LAR 0x914
+#define PCIE20_PLR_IATU_LTAR 0x918
+#define PCIE20_PLR_IATU_UTAR 0x91c
+
+#define MSM_PCIE_DEV_CFG_ADDR 0x01000000
+
+#define RD 0
+#define WR 1
+
+#define MAX_RC_NUM 3
+#define PCIE_BUS_PRIV_DATA(pdev) \
+ (((struct pci_sys_data *)pdev->bus->sysdata)->private_data)
+
+/* PCIe TLP types that we are interested in */
+#define PCI_CFG0_RDWR 0x4
+#define PCI_CFG1_RDWR 0x5
+
+#define readl_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
+({ \
+ unsigned long timeout = jiffies + usecs_to_jiffies(timeout_us); \
+ might_sleep_if(timeout_us); \
+ for (;;) { \
+ (val) = readl(addr); \
+ if (cond) \
+ break; \
+ if (timeout_us && time_after(jiffies, timeout)) { \
+ (val) = readl(addr); \
+ break; \
+ } \
+ if (sleep_us) \
+ usleep_range(DIV_ROUND_UP(sleep_us, 4), sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+struct qcom_msi {
+ struct msi_controller chip;
+ DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ struct irq_domain *domain;
+ unsigned long pages;
+ struct mutex lock;
+ int irq;
+};
+
+struct qcom_pcie {
+ void __iomem *elbi_base;
+ void __iomem *parf_base;
+ void __iomem *dwc_base;
+ void __iomem *cfg_base;
+ struct device *dev;
+ int reset_gpio;
+ bool ext_phy_ref_clk;
+ struct clk *iface_clk;
+ struct clk *bus_clk;
+ struct clk *phy_clk;
+ int irq_int[4];
+ struct reset_control *axi_reset;
+ struct reset_control *ahb_reset;
+ struct reset_control *por_reset;
+ struct reset_control *pci_reset;
+ struct reset_control *phy_reset;
+
+ struct resource conf;
+ struct resource io;
+ struct resource mem;
+
+ struct regulator *vdd_supply;
+ struct regulator *avdd_supply;
+ struct regulator *pcie_clk_supply;
+ struct regulator *pcie_ext3p3v_supply;
+
+ struct qcom_msi msi;
+};
+
+static int nr_controllers;
+static DEFINE_SPINLOCK(qcom_hw_pci_lock);
+
+static inline struct qcom_pcie *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+
+inline int is_msm_pcie_rc(struct pci_bus *bus)
+{
+ return (bus->number == 0);
+}
+
+static int qcom_pcie_is_link_up(struct qcom_pcie *dev)
+{
+ return readl_relaxed(dev->dwc_base + PCIE20_CAP_LINKCTRLSTATUS) &
+ BIT(29);
+}
+
+inline int msm_pcie_get_cfgtype(struct pci_bus *bus)
+{
+ /*
+ * http://www.tldp.org/LDP/tlk/dd/pci.html
+ * Pass it onto the secondary bus interface unchanged if the
+ * bus number specified is greater than the secondary bus
+ * number and less than or equal to the subordinate bus
+ * number.
+ *
+ * Read/Write to the RC and Device/Switch connected to the RC
+ * are CFG0 type transactions. Rest have to be forwarded
+ * down stream as CFG1 transactions.
+ *
+ */
+ if (bus->number == 0)
+ return PCI_CFG0_RDWR;
+
+ return PCI_CFG0_RDWR;
+}
+
+void msm_pcie_config_cfgtype(struct pci_bus *bus, u32 devfn)
+{
+ uint32_t bdf, cfgtype;
+ struct qcom_pcie *dev = sys_to_pcie(bus->sysdata);
+
+ cfgtype = msm_pcie_get_cfgtype(bus);
+
+ if (cfgtype == PCI_CFG0_RDWR) {
+ bdf = MSM_PCIE_DEV_CFG_ADDR;
+ } else {
+ /*
+ * iATU Lower Target Address Register
+ * Bits Description
+ * *-1:0 Forms bits [*:0] of the
+ * start address of the new
+ * address of the translated
+ * region. The start address
+ * must be aligned to a
+ * CX_ATU_MIN_REGION_SIZE kB
+ * boundary, so these bits are
+ * always 0. A write to this
+ * location is ignored by the
+ * PCIe core.
+ * 31:*1 Forms bits [31:*] of the of
+ * the new address of the
+ * translated region.
+ *
+ * * is log2(CX_ATU_MIN_REGION_SIZE)
+ */
+ bdf = (((bus->number & 0xff) << 24) & 0xff000000) |
+ (((devfn & 0xff) << 16) & 0x00ff0000);
+ }
+
+ writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_VIEWPORT);
+ wmb();
+
+ /* Program Bdf Address */
+ writel_relaxed(bdf, dev->dwc_base + PCIE20_PLR_IATU_LTAR);
+ wmb();
+
+ /* Write Config Request Type */
+ writel_relaxed(cfgtype, dev->dwc_base + PCIE20_PLR_IATU_CTRL1);
+ wmb();
+}
+
+static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
+ int where, int size, u32 *val)
+{
+ uint32_t word_offset, byte_offset, mask;
+ uint32_t rd_val, wr_val;
+ struct qcom_pcie *dev = sys_to_pcie(bus->sysdata);
+ void __iomem *config_base;
+ int rc;
+
+ rc = is_msm_pcie_rc(bus);
+
+ /*
+ * For downstream bus, make sure link is up
+ */
+ if (rc && (devfn != 0)) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ } else if ((!rc) && (!qcom_pcie_is_link_up(dev))) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ msm_pcie_config_cfgtype(bus, devfn);
+
+ word_offset = where & ~0x3;
+ byte_offset = where & 0x3;
+ mask = (~0 >> (8 * (4 - size))) << (8 * byte_offset);
+
+ config_base = (rc) ? dev->dwc_base : dev->cfg_base;
+ rd_val = readl_relaxed(config_base + word_offset);
+
+ if (oper == RD) {
+ *val = ((rd_val & mask) >> (8 * byte_offset));
+ } else {
+ wr_val = (rd_val & ~mask) |
+ ((*val << (8 * byte_offset)) & mask);
+ writel_relaxed(wr_val, config_base + word_offset);
+ wmb(); /* ensure config data is written to hardware register */
+ }
+
+ return 0;
+}
+
+static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ return msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
+}
+
+static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ /*
+ *Attempt to reset secondary bus is causing PCIE core to reset.
+ *Disable secondary bus reset functionality.
+ */
+ if ((bus->number == 0) && (where == PCI_BRIDGE_CONTROL) &&
+ (val & PCI_BRIDGE_CTL_BUS_RESET)) {
+ pr_info("PCIE secondary bus reset not supported\n");
+ val &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ }
+
+ return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
+}
+
+static int qcom_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct qcom_pcie *pcie_dev = PCIE_BUS_PRIV_DATA(dev);
+
+ return pcie_dev->irq_int[pin-1];
+}
+
+static int qcom_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct qcom_pcie *qcom_pcie = sys->private_data;
+
+ /*
+ * specify linux PCI framework to allocate device memory (BARs)
+ * from msm_pcie_dev.dev_mem_res resource.
+ */
+ sys->mem_offset = 0;
+ sys->io_offset = 0;
+
+ pci_add_resource(&sys->resources, &qcom_pcie->mem);
+ pci_add_resource(&sys->resources, &qcom_pcie->io);
+
+ return 1;
+}
+
+static struct pci_ops qcom_pcie_ops = {
+ .read = msm_pcie_rd_conf,
+ .write = msm_pcie_wr_conf,
+};
+
+static struct hw_pci qcom_hw_pci[MAX_RC_NUM] = {
+ {
+ .ops = &qcom_pcie_ops,
+ .nr_controllers = 1,
+ .swizzle = pci_common_swizzle,
+ .setup = qcom_pcie_setup,
+ .map_irq = qcom_pcie_map_irq,
+ },
+ {
+ .ops = &qcom_pcie_ops,
+ .nr_controllers = 1,
+ .swizzle = pci_common_swizzle,
+ .setup = qcom_pcie_setup,
+ .map_irq = qcom_pcie_map_irq,
+ },
+ {
+ .ops = &qcom_pcie_ops,
+ .nr_controllers = 1,
+ .swizzle = pci_common_swizzle,
+ .setup = qcom_pcie_setup,
+ .map_irq = qcom_pcie_map_irq,
+ },
+};
+
+static inline void qcom_elbi_writel_relaxed(struct qcom_pcie *pcie,
+ u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->elbi_base + reg);
+}
+
+static inline u32 qcom_elbi_readl_relaxed(struct qcom_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->elbi_base + reg);
+}
+
+static inline void qcom_parf_writel_relaxed(struct qcom_pcie *pcie,
+ u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->parf_base + reg);
+}
+
+static inline u32 qcom_parf_readl_relaxed(struct qcom_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->parf_base + reg);
+}
+
+static void msm_pcie_write_mask(void __iomem *addr,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ uint32_t val;
+
+ val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
+ writel_relaxed(val, addr);
+ wmb(); /* ensure data is written to hardware register */
+}
+
+static void qcom_pcie_config_controller(struct qcom_pcie *dev)
+{
+ /*
+ * program and enable address translation region 0 (device config
+ * address space); region type config;
+ * axi config address range to device config address range
+ */
+ writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_VIEWPORT);
+ /* ensure that hardware locks the region before programming it */
+ wmb();
+
+ writel_relaxed(4, dev->dwc_base + PCIE20_PLR_IATU_CTRL1);
+ writel_relaxed(BIT(31), dev->dwc_base + PCIE20_PLR_IATU_CTRL2);
+ writel_relaxed(dev->conf.start, dev->dwc_base + PCIE20_PLR_IATU_LBAR);
+ writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_UBAR);
+ writel_relaxed(dev->conf.end, dev->dwc_base + PCIE20_PLR_IATU_LAR);
+ writel_relaxed(MSM_PCIE_DEV_CFG_ADDR,
+ dev->dwc_base + PCIE20_PLR_IATU_LTAR);
+ writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_UTAR);
+ /* ensure that hardware registers the configuration */
+ wmb();
+
+ /*
+ * program and enable address translation region 2 (device resource
+ * address space); region type memory;
+ * axi device bar address range to device bar address range
+ */
+ writel_relaxed(2, dev->dwc_base + PCIE20_PLR_IATU_VIEWPORT);
+ /* ensure that hardware locks the region before programming it */
+ wmb();
+
+ writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_CTRL1);
+ writel_relaxed(BIT(31), dev->dwc_base + PCIE20_PLR_IATU_CTRL2);
+ writel_relaxed(dev->mem.start, dev->dwc_base + PCIE20_PLR_IATU_LBAR);
+ writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_UBAR);
+ writel_relaxed(dev->mem.end, dev->dwc_base + PCIE20_PLR_IATU_LAR);
+ writel_relaxed(dev->mem.start,
+ dev->dwc_base + PCIE20_PLR_IATU_LTAR);
+ writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_UTAR);
+ /* ensure that hardware registers the configuration */
+ wmb();
+
+ /* 1K PCIE buffer setting */
+ writel_relaxed(0x3, dev->dwc_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
+ writel_relaxed(0x1, dev->dwc_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
+ /* ensure that hardware registers the configuration */
+ wmb();
+}
+
+static int qcom_msi_alloc(struct qcom_msi *chip)
+{
+ int msi;
+
+ mutex_lock(&chip->lock);
+
+ msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
+ if (msi < INT_PCI_MSI_NR)
+ set_bit(msi, chip->used);
+ else
+ msi = -ENOSPC;
+
+ mutex_unlock(&chip->lock);
+
+ return msi;
+}
+
+static void qcom_msi_free(struct qcom_msi *chip, unsigned long irq)
+{
+ struct device *dev = chip->chip.dev;
+
+ mutex_lock(&chip->lock);
+
+ if (!test_bit(irq, chip->used))
+ dev_err(dev, "trying to free unused MSI#%lu\n", irq);
+ else
+ clear_bit(irq, chip->used);
+
+ mutex_unlock(&chip->lock);
+}
+
+
+static irqreturn_t handle_msi_irq(int irq, void *data)
+{
+ int i, j, index;
+ unsigned long val;
+ struct qcom_pcie *dev = data;
+ void __iomem *ctrl_status;
+ struct qcom_msi *msi = &dev->msi;
+
+ /* check for set bits, clear it by setting that bit
+ and trigger corresponding irq */
+ for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
+ ctrl_status = dev->dwc_base +
+ PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
+
+ val = readl_relaxed(ctrl_status);
+ while (val) {
+ j = find_first_bit(&val, 32);
+ index = j + (32 * i);
+ writel_relaxed(BIT(j), ctrl_status);
+ /* ensure that interrupt is cleared (acked) */
+ wmb();
+
+ irq = irq_find_mapping(msi->domain, index);
+ if (irq) {
+ if (test_bit(index, msi->used))
+ generic_handle_irq(irq);
+ else
+ dev_info(dev->dev, "unhandled MSI\n");
+ }
+ val = readl_relaxed(ctrl_status);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline struct qcom_msi *to_qcom_msi(struct msi_controller *chip)
+{
+ return container_of(chip, struct qcom_msi, chip);
+}
+
+static int qcom_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ struct qcom_msi *msi = to_qcom_msi(chip);
+ struct msi_msg msg;
+ unsigned int irq;
+ int hwirq;
+
+ hwirq = qcom_msi_alloc(msi);
+ if (hwirq < 0)
+ return hwirq;
+
+ irq = irq_create_mapping(msi->domain, hwirq);
+ if (!irq)
+ return -EINVAL;
+
+ irq_set_msi_desc(irq, desc);
+
+ msg.address_lo = MSM_PCIE_MSI_PHY;
+ /* 32 bit address only */
+ msg.address_hi = 0;
+ msg.data = hwirq;
+
+ write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void qcom_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+{
+ struct qcom_msi *msi = to_qcom_msi(chip);
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ qcom_msi_free(msi, d->hwirq);
+}
+
+static struct irq_chip qcom_msi_irq_chip = {
+ .name = "PCI-MSI",
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+};
+
+
+static int qcom_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &qcom_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = qcom_pcie_msi_map,
+};
+uint32_t msm_pcie_msi_init(struct qcom_pcie *pcie, struct platform_device *pdev)
+{
+ int i, rc;
+ struct qcom_msi *msi = &pcie->msi;
+ int err;
+
+ mutex_init(&msi->lock);
+
+ msi->chip.dev = pcie->dev;
+ msi->chip.setup_irq = qcom_msi_setup_irq;
+ msi->chip.teardown_irq = qcom_msi_teardown_irq;
+ msi->domain = irq_domain_add_linear(pdev->dev.of_node, INT_PCI_MSI_NR,
+ &msi_domain_ops, &msi->chip);
+ if (!msi->domain) {
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+
+ err = platform_get_irq_byname(pdev, "msi");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+ return err;
+ }
+
+ msi->irq = err;
+
+ /* program MSI controller and enable all interrupts */
+ writel_relaxed(MSM_PCIE_MSI_PHY, pcie->dwc_base + PCIE20_MSI_CTRL_ADDR);
+ writel_relaxed(0, pcie->dwc_base + PCIE20_MSI_CTRL_UPPER_ADDR);
+
+ for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
+ writel_relaxed(~0, pcie->dwc_base +
+ PCIE20_MSI_CTRL_INTR_EN + (i * 12));
+
+ /* ensure that hardware is configured before proceeding */
+ wmb();
+
+ /* register handler for physical MSI interrupt line */
+ rc = request_irq(msi->irq, handle_msi_irq, IRQF_TRIGGER_RISING,
+ "msm_pcie_msi", pcie);
+ if (rc) {
+ pr_err("Unable to allocate msi interrupt\n");
+ return rc;
+ }
+
+ return rc;
+}
+
+static int qcom_pcie_vreg_on(struct qcom_pcie *qcom_pcie)
+{
+ int err;
+ /* enable regulators */
+ err = regulator_enable(qcom_pcie->vdd_supply);
+ if (err < 0) {
+ dev_err(qcom_pcie->dev, "failed to enable VDD regulator\n");
+ return err;
+ }
+
+ err = regulator_enable(qcom_pcie->pcie_clk_supply);
+ if (err < 0) {
+ dev_err(qcom_pcie->dev, "failed to enable pcie-clk regulator\n");
+ return err;
+ }
+
+ err = regulator_enable(qcom_pcie->avdd_supply);
+ if (err < 0) {
+ dev_err(qcom_pcie->dev, "failed to enable AVDD regulator\n");
+ return err;
+ }
+
+ err = regulator_enable(qcom_pcie->pcie_ext3p3v_supply);
+ if (err < 0) {
+ dev_err(qcom_pcie->dev, "failed to enable pcie_ext3p3v regulator\n");
+ return err;
+ }
+
+ return err;
+
+}
+
+static int qcom_pcie_parse_dt(struct qcom_pcie *qcom_pcie,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *elbi_base, *parf_base, *dwc_base;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ int ret, i;
+
+ qcom_pcie->ext_phy_ref_clk = of_property_read_bool(np,
+ "qcom,external-phy-refclk");
+
+ elbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ qcom_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
+ if (IS_ERR(qcom_pcie->elbi_base)) {
+ dev_err(&pdev->dev, "Failed to ioremap elbi space\n");
+ return PTR_ERR(qcom_pcie->elbi_base);
+ }
+
+ parf_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
+ qcom_pcie->parf_base = devm_ioremap_resource(&pdev->dev, parf_base);
+ if (IS_ERR(qcom_pcie->parf_base)) {
+ dev_err(&pdev->dev, "Failed to ioremap parf space\n");
+ return PTR_ERR(qcom_pcie->parf_base);
+ }
+
+ dwc_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+ qcom_pcie->dwc_base = devm_ioremap_resource(&pdev->dev, dwc_base);
+ if (IS_ERR(qcom_pcie->dwc_base)) {
+ dev_err(&pdev->dev, "Failed to ioremap dwc_base space\n");
+ return PTR_ERR(qcom_pcie->dwc_base);
+ }
+
+ if (of_pci_range_parser_init(&parser, np)) {
+ dev_err(&pdev->dev, "missing ranges property\n");
+ return -EINVAL;
+ }
+
+ /* Get the I/O and memory ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+ switch (range.pci_space & 0x3) {
+ case 0: /* cfg */
+ of_pci_range_to_resource(&range, np, &qcom_pcie->conf);
+ qcom_pcie->conf.flags = IORESOURCE_MEM;
+ break;
+ case 1: /* io */
+ of_pci_range_to_resource(&range, np, &qcom_pcie->io);
+ break;
+ default: /* mem */
+ of_pci_range_to_resource(&range, np, &qcom_pcie->mem);
+ break;
+ }
+ }
+
+ qcom_pcie->vdd_supply = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(qcom_pcie->vdd_supply)) {
+ dev_err(&pdev->dev, "Failed to get vdd supply\n");
+ return PTR_ERR(qcom_pcie->vdd_supply);
+ }
+
+ qcom_pcie->pcie_clk_supply = devm_regulator_get(&pdev->dev, "pcie-clk");
+ if (IS_ERR(qcom_pcie->pcie_clk_supply)) {
+ dev_err(&pdev->dev, "Failed to get pcie clk supply\n");
+ return PTR_ERR(qcom_pcie->pcie_clk_supply);
+ }
+ qcom_pcie->avdd_supply = devm_regulator_get(&pdev->dev, "avdd");
+ if (IS_ERR(qcom_pcie->avdd_supply)) {
+ dev_err(&pdev->dev, "Failed to get avdd supply\n");
+ return PTR_ERR(qcom_pcie->avdd_supply);
+ }
+
+ qcom_pcie->pcie_ext3p3v_supply = devm_regulator_get(&pdev->dev,
+ "ext-3p3v");
+ if (IS_ERR(qcom_pcie->pcie_ext3p3v_supply)) {
+ dev_err(&pdev->dev, "Failed to get pcie_ext3p3v supply\n");
+ return PTR_ERR(qcom_pcie->pcie_ext3p3v_supply);
+ }
+
+ qcom_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ if (!gpio_is_valid(qcom_pcie->reset_gpio)) {
+ dev_err(&pdev->dev, "pcie reset gpio is not valid\n");
+ return -EINVAL;
+ }
+
+ ret = devm_gpio_request_one(&pdev->dev, qcom_pcie->reset_gpio,
+ GPIOF_DIR_OUT, "pcie_reset");
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request pcie reset gpio\n");
+ return ret;
+ }
+
+ qcom_pcie->iface_clk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(qcom_pcie->iface_clk)) {
+ dev_err(&pdev->dev, "Failed to get pcie iface clock\n");
+ return PTR_ERR(qcom_pcie->iface_clk);
+ }
+
+ qcom_pcie->phy_clk = devm_clk_get(&pdev->dev, "phy");
+ if (IS_ERR(qcom_pcie->phy_clk)) {
+ dev_err(&pdev->dev, "Failed to get pcie phy clock\n");
+ return PTR_ERR(qcom_pcie->phy_clk);
+ }
+
+ qcom_pcie->bus_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(qcom_pcie->bus_clk)) {
+ dev_err(&pdev->dev, "Failed to get pcie core clock\n");
+ return PTR_ERR(qcom_pcie->bus_clk);
+ }
+
+ qcom_pcie->axi_reset = devm_reset_control_get(&pdev->dev, "axi");
+ if (IS_ERR(qcom_pcie->axi_reset)) {
+ dev_err(&pdev->dev, "Failed to get axi reset\n");
+ return PTR_ERR(qcom_pcie->axi_reset);
+ }
+
+ qcom_pcie->ahb_reset = devm_reset_control_get(&pdev->dev, "ahb");
+ if (IS_ERR(qcom_pcie->ahb_reset)) {
+ dev_err(&pdev->dev, "Failed to get ahb reset\n");
+ return PTR_ERR(qcom_pcie->ahb_reset);
+ }
+
+ qcom_pcie->por_reset = devm_reset_control_get(&pdev->dev, "por");
+ if (IS_ERR(qcom_pcie->por_reset)) {
+ dev_err(&pdev->dev, "Failed to get por reset\n");
+ return PTR_ERR(qcom_pcie->por_reset);
+ }
+
+ qcom_pcie->pci_reset = devm_reset_control_get(&pdev->dev, "pci");
+ if (IS_ERR(qcom_pcie->pci_reset)) {
+ dev_err(&pdev->dev, "Failed to get pci reset\n");
+ return PTR_ERR(qcom_pcie->pci_reset);
+ }
+
+ qcom_pcie->phy_reset = devm_reset_control_get(&pdev->dev, "phy");
+ if (IS_ERR(qcom_pcie->phy_reset)) {
+ dev_err(&pdev->dev, "Failed to get phy reset\n");
+ return PTR_ERR(qcom_pcie->phy_reset);
+ }
+
+ for (i = 0; i < 4; i++) {
+ qcom_pcie->irq_int[i] = platform_get_irq(pdev, i+1);
+ if (qcom_pcie->irq_int[i] < 0) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ return qcom_pcie->irq_int[i];
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_probe(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct qcom_pcie *qcom_pcie;
+ struct hw_pci *hw;
+ int ret;
+ u32 val;
+
+ qcom_pcie = devm_kzalloc(&pdev->dev, sizeof(*qcom_pcie), GFP_KERNEL);
+ if (!qcom_pcie) {
+ dev_err(&pdev->dev, "no memory for qcom_pcie\n");
+ return -ENOMEM;
+ }
+ qcom_pcie->dev = &pdev->dev;
+
+ ret = qcom_pcie_parse_dt(qcom_pcie, pdev);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ qcom_pcie->cfg_base = devm_ioremap_resource(&pdev->dev,
+ &qcom_pcie->conf);
+ if (IS_ERR(qcom_pcie->cfg_base)) {
+ dev_err(&pdev->dev, "Failed to ioremap PCIe cfg space\n");
+ return PTR_ERR(qcom_pcie->cfg_base);
+ }
+
+ gpio_set_value(qcom_pcie->reset_gpio, 0);
+ usleep_range(10000, 15000);
+
+ /* enable power */
+ qcom_pcie_vreg_on(qcom_pcie);
+ /* assert PCIe PARF reset while powering the core */
+ reset_control_assert(qcom_pcie->ahb_reset);
+
+ /* enable clocks */
+ ret = clk_prepare_enable(qcom_pcie->iface_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(qcom_pcie->phy_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(qcom_pcie->bus_clk);
+ if (ret)
+ return ret;
+
+ /*
+ * de-assert PCIe PARF reset;
+ * wait 1us before accessing PARF registers
+ */
+ reset_control_deassert(qcom_pcie->ahb_reset);
+ udelay(1);
+
+ /* enable PCIe clocks and resets */
+ msm_pcie_write_mask(qcom_pcie->parf_base + PCIE20_PARF_PHY_CTRL,
+ BIT(0), 0);
+
+ /* Set Tx Termination Offset */
+ val = qcom_parf_readl_relaxed(qcom_pcie, PCIE20_PARF_PHY_CTRL);
+ val |= PCIE20_PARF_PHY_CTRL_PHY_TX0_TERM_OFFST(7);
+ qcom_parf_writel_relaxed(qcom_pcie, val, PCIE20_PARF_PHY_CTRL);
+
+ /* PARF programming */
+ qcom_parf_writel_relaxed(qcom_pcie,
+ PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN1(0x18) |
+ PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(0x18) |
+ PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(0x22),
+ PCIE20_PARF_PCS_DEEMPH);
+ qcom_parf_writel_relaxed(qcom_pcie,
+ PCIE20_PARF_PCS_SWING_TX_SWING_FULL(0x78) |
+ PCIE20_PARF_PCS_SWING_TX_SWING_LOW(0x78),
+ PCIE20_PARF_PCS_SWING);
+ qcom_parf_writel_relaxed(qcom_pcie, (4<<24), PCIE20_PARF_CONFIG_BITS);
+ /* ensure that hardware registers the PARF configuration */
+ wmb();
+
+ /* enable reference clock */
+ msm_pcie_write_mask(qcom_pcie->parf_base + PCIE20_PARF_PHY_REFCLK,
+ qcom_pcie->ext_phy_ref_clk ? 0 : BIT(12),
+ BIT(16));
+
+ /* ensure that access is enabled before proceeding */
+ wmb();
+
+ /* de-assert PICe PHY, Core, POR and AXI clk domain resets */
+ reset_control_deassert(qcom_pcie->phy_reset);
+ reset_control_deassert(qcom_pcie->pci_reset);
+ reset_control_deassert(qcom_pcie->por_reset);
+ reset_control_deassert(qcom_pcie->axi_reset);
+
+ /* wait 150ms for clock acquisition */
+ usleep_range(10000, 15000);
+
+ /* de-assert PCIe reset link to bring EP out of reset */
+ gpio_set_value(qcom_pcie->reset_gpio, 1);
+ usleep_range(10000, 15000);
+
+ /* enable link training */
+ val = qcom_elbi_readl_relaxed(qcom_pcie, PCIE20_ELBI_SYS_CTRL);
+ val |= PCIE20_ELBI_SYS_CTRL_LTSSM_EN;
+ qcom_elbi_writel_relaxed(qcom_pcie, val, PCIE20_ELBI_SYS_CTRL);
+ wmb();
+
+ /* poll for link to come up for upto 100ms */
+ ret = readl_poll_timeout(
+ (qcom_pcie->dwc_base + PCIE20_CAP_LINKCTRLSTATUS),
+ val, (val & BIT(29)), 10000, 100000);
+
+ dev_info(&pdev->dev, "link initialized %d\n", ret);
+
+ qcom_pcie_config_controller(qcom_pcie);
+
+ platform_set_drvdata(pdev, qcom_pcie);
+
+ spin_lock_irqsave(&qcom_hw_pci_lock, flags);
+ qcom_hw_pci[nr_controllers].private_data = (void **)&qcom_pcie;
+ hw = &qcom_hw_pci[nr_controllers];
+
+#ifdef CONFIG_PCI_MSI
+ hw->msi_ctrl = &qcom_pcie->msi.chip;
+#endif
+ nr_controllers++;
+ spin_unlock_irqrestore(&qcom_hw_pci_lock, flags);
+
+ ///pci_common_init(hw);
+ pci_common_init_dev(&pdev->dev, hw);
+
+ msm_pcie_msi_init(qcom_pcie, pdev);
+ return 0;
+}
+
+static int __exit qcom_pcie_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct of_device_id qcom_pcie_match[] = {
+ { .compatible = "qcom,pcie-ipq8064", },
+ {}
+};
+
+static struct platform_driver qcom_pcie_driver = {
+ .probe = qcom_pcie_probe,
+ .remove = qcom_pcie_remove,
+ .driver = {
+ .name = "qcom_pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_pcie_match,
+ },
+};
+
+static int qcom_pcie_init(void)
+{
+ return platform_driver_register(&qcom_pcie_driver);
+}
+subsys_initcall_sync(qcom_pcie_init);
+
+/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
+static void msm_pcie_fixup_early(struct pci_dev *dev)
+{
+ if (dev->hdr_type == 1)
+ dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
+}
+DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP, msm_pcie_fixup_early);
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index a67eeace6a89..67e77e4e86a9 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -11,6 +11,20 @@ menuconfig POWER_AVS
Say Y here to enable Adaptive Voltage Scaling class support.
+config QCOM_CPR
+ tristate "QCOM Core Power Reduction (CPR) support"
+ select PM_OPP
+ help
+ Say Y here to enable support for the CPR hardware found on Qualcomm
+ SoCs like MSM8916.
+
+ This driver populates CPU OPPs tables and makes adjustments to the
+ tables based on feedback from the CPR hardware. If you want to do
+ CPUfrequency scaling say Y here.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-cpr
+
config ROCKCHIP_IODOMAIN
tristate "Rockchip IO domain support"
depends on POWER_AVS && ARCH_ROCKCHIP && OF
diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile
index ba4c7bc69225..88f4d5d49cba 100644
--- a/drivers/power/avs/Makefile
+++ b/drivers/power/avs/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o
+obj-$(CONFIG_QCOM_CPR) += qcom-cpr.o
diff --git a/drivers/power/avs/qcom-cpr.c b/drivers/power/avs/qcom-cpr.c
new file mode 100644
index 000000000000..3daa2153d742
--- /dev/null
+++ b/drivers/power/avs/qcom-cpr.c
@@ -0,0 +1,1988 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/cpufreq.h>
+#include <linux/bitops.h>
+#include <linux/regulator/qcom_smd-regulator.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION 0
+#define RBCPR_VER_2 0x02
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * n)
+
+#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
+#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0)
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
+#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0)
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL 0x44
+#define REG_RBIF_TIMER_ADJUST 0x4c
+
+#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
+#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0)
+#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT 0x48
+#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0)
+#define RBIF_LIMIT_CEILING_SHIFT 6
+#define RBIF_LIMIT_FLOOR_BITS 6
+#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0)
+
+#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT 0
+
+#define REG_RBIF_SW_VLEVEL 0x94
+#define RBIF_SW_VLEVEL_DEFAULT 0x20
+
+#define REG_RBCPR_STEP_QUOT 0x80
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL 0x90
+
+#define RBCPR_CTL_LOOP_EN BIT(0)
+#define RBCPR_CTL_TIMER_EN BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
+#define RBCPR_CTL_COUNT_MODE BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
+#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD 0x98
+#define REG_RBIF_CONT_NACK_CMD 0x9c
+
+/* RBCPR Result status Register */
+#define REG_RBCPR_RESULT_0 0xa0
+
+#define RBCPR_RESULT0_BUSY_SHIFT 19
+#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
+#define RBCPR_RESULT0_ERROR_SHIFT 6
+#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
+#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
+#define RBCPR_RESULT0_STEP_UP_SHIFT 1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * n)
+#define REG_RBIF_IRQ_CLEAR 0x110
+#define REG_RBIF_IRQ_STATUS 0x114
+
+#define CPR_INT_DONE BIT(0)
+#define CPR_INT_MIN BIT(1)
+#define CPR_INT_DOWN BIT(2)
+#define CPR_INT_MID BIT(3)
+#define CPR_INT_UP BIT(4)
+#define CPR_INT_MAX BIT(5)
+#define CPR_INT_CLAMP BIT(6)
+#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+ CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
+
+#define CPR_NUM_RING_OSC 8
+
+/* RBCPR Clock Control Register */
+#define RBCPR_CLK_SEL_MASK BIT(-1)
+#define RBCPR_CLK_SEL_19P2_MHZ 0
+#define RBCPR_CLK_SEL_AHB_CLK BIT(0)
+
+/* CPR eFuse parameters */
+#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
+
+#define CPR_FUSE_MIN_QUOT_DIFF 50
+
+#define SPEED_BIN_NONE UINT_MAX
+
+#define FUSE_REVISION_UNKNOWN (-1)
+#define FUSE_MAP_NO_MATCH (-1)
+#define FUSE_PARAM_MATCH_ANY 0xffffffff
+
+/**
+ * enum vdd_mx_vmin_method - Method to determine vmin for vdd-mx
+ * @VDD_MX_VMIN_APC: Use APC voltage
+ * @VDD_MX_VMIN_APC_CORNER_CEILING: Use PVS corner ceiling voltage
+ * @VDD_MX_VMIN_APC_SLOW_CORNER_CEILING: Use slow speed corner ceiling
+ * @VDD_MX_VMIN_MX_VMAX: Use specified vdd-mx-vmax voltage
+ * @VDD_MX_VMIN_APC_CORNER_MAP: Use APC corner mapped MX voltage
+ */
+enum vdd_mx_vmin_method {
+ VDD_MX_VMIN_APC,
+ VDD_MX_VMIN_APC_CORNER_CEILING,
+ VDD_MX_VMIN_APC_SLOW_CORNER_CEILING,
+ VDD_MX_VMIN_MX_VMAX,
+ VDD_MX_VMIN_APC_CORNER_MAP,
+};
+/* TODO: Trim these above to used values */
+
+enum voltage_change_dir {
+ NO_CHANGE,
+ DOWN,
+ UP,
+};
+
+struct qfprom_offset {
+ u16 offset;
+ u8 width;
+ u8 shift;
+};
+
+struct cpr_fuse {
+ struct qfprom_offset ring_osc;
+ struct qfprom_offset init_voltage;
+ struct qfprom_offset quotient;
+ struct qfprom_offset quotient_offset;
+};
+
+struct fuse_corner_data {
+ int ref_uV;
+ int max_uV;
+ int min_uV;
+ int max_quot_scale;
+ int quot_offset;
+ int quot_scale;
+ int max_volt_scale;
+ int vdd_mx_req;
+};
+
+struct cpr_fuses {
+ struct qfprom_offset redundant;
+ u8 redundant_value;
+ int init_voltage_step;
+ struct fuse_corner_data *fuse_corner_data;
+ struct cpr_fuse *cpr_fuse;
+ struct qfprom_offset *disable;
+};
+
+struct pvs_bin {
+ int *uV;
+};
+
+struct pvs_fuses {
+ struct qfprom_offset redundant;
+ u8 redundant_value;
+ struct qfprom_offset *pvs_fuse;
+ struct pvs_bin *pvs_bins;
+};
+
+struct corner_data {
+ unsigned int fuse_corner;
+ unsigned long freq;
+};
+
+struct freq_plan {
+ u32 speed_bin;
+ u32 pvs_version;
+ const struct corner_data **plan;
+};
+
+struct fuse_conditional_min_volt {
+ struct qfprom_offset redundant;
+ u8 expected;
+ int min_uV;
+};
+
+struct fuse_uplift_wa {
+ struct qfprom_offset redundant;
+ u8 expected;
+ int uV;
+ int *quot;
+ int max_uV;
+ int speed_bin;
+};
+
+struct corner_override {
+ u32 speed_bin;
+ u32 pvs_version;
+ int *max_uV;
+ int *min_uV;
+};
+
+struct corner_adjustment {
+ u32 speed_bin;
+ u32 pvs_version;
+ u32 cpr_rev;
+ u8 *ring_osc_idx;
+ int *fuse_quot;
+ int *fuse_quot_diff;
+ int *fuse_quot_min;
+ int *fuse_quot_offset;
+ int *fuse_init_uV;
+ int *quot;
+ int *init_uV;
+ bool disable_closed_loop;
+};
+
+struct cpr_desc {
+ unsigned int num_fuse_corners;
+ unsigned int num_corners;
+ enum vdd_mx_vmin_method vdd_mx_vmin_method;
+ int vdd_mx_vmax;
+ int min_diff_quot;
+ int *step_quot;
+ struct cpr_fuses cpr_fuses;
+ struct qfprom_offset fuse_revision;
+ struct qfprom_offset speed_bin;
+ struct qfprom_offset pvs_version;
+ struct corner_data *corner_data;
+ struct freq_plan *freq_plans;
+ size_t num_freq_plans;
+ struct pvs_fuses *pvs_fuses;
+ struct fuse_conditional_min_volt *min_volt_fuse;
+ struct fuse_uplift_wa *uplift_wa;
+ struct corner_override *corner_overrides;
+ size_t num_corner_overrides;
+ struct corner_adjustment *adjustments;
+ size_t num_adjustments;
+ bool reduce_to_fuse_uV;
+ bool reduce_to_corner_uV;
+};
+
+struct acc_desc {
+ unsigned int enable_reg;
+ u32 enable_mask;
+
+ struct reg_sequence *settings;
+ struct reg_sequence *override_settings;
+ int num_regs_per_fuse;
+
+ struct qfprom_offset override;
+ u8 override_value;
+};
+
+struct fuse_corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int quot;
+ int step_quot;
+ const struct reg_sequence *accs;
+ int num_accs;
+ int vdd_mx_req;
+ unsigned long max_freq;
+ u8 ring_osc_idx;
+};
+
+struct corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int last_uV;
+ int quot_adjust;
+ u32 save_ctl;
+ u32 save_irq;
+ unsigned long freq;
+ struct fuse_corner *fuse_corner;
+};
+
+struct cpr_drv {
+ unsigned int num_fuse_corners;
+ unsigned int num_corners;
+
+ unsigned int nb_count;
+ struct notifier_block cpufreq_nb;
+ bool switching_opp;
+ struct notifier_block reg_nb;
+
+ unsigned int ref_clk_khz;
+ unsigned int timer_delay_us;
+ unsigned int timer_cons_up;
+ unsigned int timer_cons_down;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int idle_clocks;
+ unsigned int gcnt_us;
+ unsigned int vdd_apc_step_up_limit;
+ unsigned int vdd_apc_step_down_limit;
+ unsigned int clamp_timer_interval;
+ int ceiling_max;
+ enum vdd_mx_vmin_method vdd_mx_vmin_method;
+ int vdd_mx_vmax;
+
+ struct mutex lock;
+ void __iomem *base;
+ struct corner *corner;
+ struct regulator *vdd_apc;
+ struct regulator *vdd_mx;
+ struct clk *cpu_clk;
+ struct device *cpu_dev;
+ struct regmap *tcsr;
+ bool loop_disabled;
+ bool suspended;
+ u32 gcnt;
+ unsigned long flags;
+#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
+
+ struct fuse_corner *fuse_corners;
+ struct corner *corners;
+};
+
+static bool cpr_is_allowed(struct cpr_drv *drv)
+{
+ if (drv->loop_disabled) /* || disabled in software */
+ return false;
+ else
+ return true;
+}
+
+static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value)
+{
+ writel_relaxed(value, drv->base + offset);
+}
+
+static u32 cpr_read(struct cpr_drv *drv, u32 offset)
+{
+ return readl_relaxed(drv->base + offset);
+}
+
+static void
+cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value)
+{
+ u32 val;
+
+ val = readl_relaxed(drv->base + offset);
+ val &= ~mask;
+ val |= value & mask;
+ writel_relaxed(val, drv->base + offset);
+}
+
+static void cpr_irq_clr(struct cpr_drv *drv)
+{
+ cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits)
+{
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value)
+{
+ cpr_masked_write(drv, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 val, mask;
+
+ if (drv->suspended)
+ return;
+
+ /* Program Consecutive Up & Down */
+ val = drv->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= drv->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK;
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val);
+ cpr_masked_write(drv, REG_RBCPR_CTL,
+ RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+ corner->save_ctl);
+ cpr_irq_set(drv, corner->save_irq);
+
+ if (cpr_is_allowed(drv) /*&& drv->vreg_enabled */ &&
+ corner->max_uV > corner->min_uV)
+ val = RBCPR_CTL_LOOP_EN;
+ else
+ val = 0;
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr_drv *drv)
+{
+ if (drv->suspended)
+ return;
+
+ cpr_irq_set(drv, 0);
+ cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_CTL);
+ return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_RESULT_0);
+ return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner)
+{
+ corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL);
+ corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0));
+}
+
+static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 gcnt, ctl, irq, ro_sel, step_quot;
+ struct fuse_corner *fuse = corner->fuse_corner;
+ int i;
+
+ ro_sel = fuse->ring_osc_idx;
+ gcnt = drv->gcnt;
+ gcnt |= fuse->quot - corner->quot_adjust;
+
+ /* Program the step quotient and idle clocks */
+ step_quot = drv->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT;
+ step_quot |= fuse->step_quot;
+ cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
+ ctl = corner->save_ctl;
+ cpr_write(drv, REG_RBCPR_CTL, ctl);
+ irq = corner->save_irq;
+ cpr_irq_set(drv, irq);
+ pr_debug("gcnt = 0x%08x, ctl = 0x%08x, irq = 0x%08x\n", gcnt, ctl, irq);
+}
+
+static void cpr_corner_switch(struct cpr_drv *drv, struct corner *corner)
+{
+ if (drv->corner == corner)
+ return;
+
+ cpr_corner_restore(drv, corner);
+}
+
+static int
+cpr_mx_get(struct cpr_drv *drv, struct fuse_corner *fuse, int apc_volt)
+{
+ int vdd_mx;
+ struct fuse_corner *highest_fuse;
+
+ highest_fuse = &drv->fuse_corners[drv->num_fuse_corners - 1];
+
+ switch (drv->vdd_mx_vmin_method) {
+ case VDD_MX_VMIN_APC:
+ vdd_mx = apc_volt;
+ break;
+ case VDD_MX_VMIN_APC_CORNER_CEILING:
+ vdd_mx = fuse->max_uV;
+ break;
+ case VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
+ vdd_mx = highest_fuse->max_uV;
+ break;
+ case VDD_MX_VMIN_MX_VMAX:
+ vdd_mx = drv->vdd_mx_vmax;
+ break;
+ case VDD_MX_VMIN_APC_CORNER_MAP:
+ vdd_mx = fuse->vdd_mx_req;
+ break;
+ default:
+ BUG();
+ }
+
+ return vdd_mx;
+}
+
+static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f,
+ struct fuse_corner *end)
+{
+ if (f < end) {
+ for (f += 1; f <= end; f++)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ } else {
+ for (f -= 1; f >= end; f--)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ }
+}
+
+static int cpr_pre_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir, int vdd_mx_vmin)
+{
+ int ret = 0;
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == DOWN)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ if (vdd_mx_vmin && dir == UP)
+ ret = qcom_rpm_set_corner(drv->vdd_mx, vdd_mx_vmin);
+
+ return ret;
+}
+
+static int cpr_post_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir, int vdd_mx_vmin)
+{
+ int ret = 0;
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == UP)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ if (vdd_mx_vmin && dir == DOWN)
+ ret = qcom_rpm_set_corner(drv->vdd_mx, vdd_mx_vmin);
+
+ return ret;
+}
+
+static int cpr_regulator_notifier(struct notifier_block *nb,
+ unsigned long event, void *d)
+{
+ struct cpr_drv *drv = container_of(nb, struct cpr_drv, reg_nb);
+ u32 val, mask;
+ int last_uV, new_uV;
+
+ switch (event) {
+ case REGULATOR_EVENT_VOLTAGE_CHANGE:
+ new_uV = (int)(uintptr_t)d;
+ break;
+ default:
+ return 0;
+ }
+
+ mutex_lock(&drv->lock);
+
+ last_uV = drv->corner->last_uV;
+
+ if (drv->switching_opp) {
+ goto unlock;
+ } else if (last_uV < new_uV) {
+ /* Disable auto nack down */
+ mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = 0;
+ } else if (last_uV > new_uV) {
+ /* Restore default threshold for UP */
+ mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = drv->up_threshold;
+ val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ } else { /* Somehow it's the same? */
+ goto unlock;
+ }
+
+ cpr_ctl_modify(drv, mask, val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(drv);
+
+ /* Save register values for the corner */
+ cpr_corner_save(drv, drv->corner);
+ drv->corner->last_uV = new_uV;
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return 0;
+}
+
+static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
+{
+ u32 val, error_steps, reg_mask;
+ int last_uV, new_uV, step_uV;
+ struct corner *corner;
+ struct fuse_corner *fuse;
+
+ //step_uV = regulator_get_linear_step(drv->vdd_apc);
+ step_uV = 12500; /*TODO: Get step volt here */
+ corner = drv->corner;
+ fuse = corner->fuse_corner;
+
+ val = cpr_read(drv, REG_RBCPR_RESULT_0);
+
+ error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT;
+ error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK;
+ last_uV = corner->last_uV;
+
+ if (dir == UP) {
+ if (drv->clamp_timer_interval &&
+ error_steps < drv->up_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(drv->up_threshold,
+ drv->vdd_apc_step_up_limit);
+ }
+
+ if (last_uV >= corner->max_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Maximize the UP threshold */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = reg_mask;
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable UP interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+ return 0;
+ }
+
+ if (error_steps > drv->vdd_apc_step_up_limit)
+ error_steps = drv->vdd_apc_step_up_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV + error_steps * step_uV;
+ if (new_uV > corner->max_uV)
+ new_uV = corner->max_uV;
+ } else if (dir == DOWN) {
+ if (drv->clamp_timer_interval
+ && error_steps < drv->down_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(drv->down_threshold,
+ drv->vdd_apc_step_down_limit);
+ }
+
+ if (last_uV <= corner->min_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Enable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable DOWN interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+ return 0;
+ }
+
+ if (error_steps > drv->vdd_apc_step_down_limit)
+ error_steps = drv->vdd_apc_step_down_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV - error_steps * step_uV;
+ if (new_uV < corner->min_uV)
+ new_uV = corner->min_uV;
+ }
+
+ return new_uV;
+}
+
+static irqreturn_t cpr_irq_handler(int irq, void *dev)
+{
+ struct cpr_drv *drv = dev;
+ u32 val;
+ int new_uV = 0;
+ struct corner *corner;
+
+ mutex_lock(&drv->lock);
+
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+
+ pr_debug("IRQ_STATUS = %#02x\n", val);
+
+ if (!cpr_ctl_is_enabled(drv)) {
+ pr_debug("CPR is disabled\n");
+ goto unlock;
+ } else if (cpr_ctl_is_busy(drv) && !drv->clamp_timer_interval) {
+ pr_debug("CPR measurement is not ready\n");
+ goto unlock;
+ } else if (!cpr_is_allowed(drv)) {
+ val = cpr_read(drv, REG_RBCPR_CTL);
+ pr_err_ratelimited("Interrupt broken? RBCPR_CTL = %#02x\n",
+ val);
+ goto unlock;
+ }
+
+ /* Following sequence of handling is as per each IRQ's priority */
+ if (val & CPR_INT_UP) {
+ new_uV = cpr_scale(drv, UP);
+ } else if (val & CPR_INT_DOWN) {
+ new_uV = cpr_scale(drv, DOWN);
+ } else if (val & CPR_INT_MIN) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MAX) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MID) {
+ /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+ pr_debug("IRQ occurred for Mid Flag\n");
+ } else {
+ pr_debug("IRQ occurred for unknown flag (%#08x)\n", val);
+ }
+
+ /* Save register values for the corner */
+ corner = drv->corner;
+ cpr_corner_save(drv, corner);
+unlock:
+ mutex_unlock(&drv->lock);
+
+ if (new_uV)
+ dev_pm_opp_adjust_voltage(drv->cpu_dev, corner->freq, new_uV);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * TODO: Register for hotplug notifier and turn on/off CPR when CPUs are offline
+ */
+static int cpr_enable(struct cpr_drv *drv)
+{
+ int ret;
+
+ /* Enable dependency power before vdd_apc */
+ if (drv->vdd_mx) {
+ ret = regulator_enable(drv->vdd_mx);
+ if (ret)
+ return ret;
+ }
+
+ ret = regulator_enable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drv->lock);
+ //drv->vreg_enabled = true;
+ if (cpr_is_allowed(drv) && drv->corner) {
+ cpr_irq_clr(drv);
+ cpr_corner_restore(drv, drv->corner);
+ cpr_ctl_enable(drv, drv->corner);
+ }
+ mutex_unlock(&drv->lock);
+ pr_info("CPR is enabled!\n");
+
+ return 0;
+}
+
+static int cpr_disable(struct cpr_drv *drv)
+{
+ int ret;
+
+ ret = regulator_disable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ if (drv->vdd_mx)
+ ret = regulator_disable(drv->vdd_mx);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drv->lock);
+ //drv->vreg_enabled = false;
+ if (cpr_is_allowed(drv))
+ cpr_ctl_disable(drv);
+ mutex_unlock(&drv->lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cpr_suspend(struct device *dev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(to_platform_device(dev));
+
+ if (cpr_is_allowed(drv)) {
+ mutex_lock(&drv->lock);
+ cpr_ctl_disable(drv);
+ cpr_irq_clr(drv);
+ drv->suspended = true;
+ mutex_unlock(&drv->lock);
+ }
+
+ return 0;
+}
+
+static int cpr_resume(struct device *dev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(to_platform_device(dev));
+
+ if (cpr_is_allowed(drv)) {
+ mutex_lock(&drv->lock);
+ drv->suspended = false;
+ cpr_irq_clr(drv);
+ cpr_ctl_enable(drv, drv->corner);
+ mutex_unlock(&drv->lock);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cpr_pm_ops, cpr_suspend, cpr_resume);
+
+static int cpr_config(struct cpr_drv *drv)
+{
+ int i;
+ u32 val, gcnt;
+ struct corner *corner;
+
+ /* Disable interrupt and CPR */
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), 0);
+ cpr_write(drv, REG_RBCPR_CTL, 0);
+
+ /* Program the default HW Ceiling, Floor and vlevel */
+ val = RBIF_LIMIT_CEILING_DEFAULT << RBIF_LIMIT_CEILING_SHIFT;
+ val |= RBIF_LIMIT_FLOOR_DEFAULT;
+ cpr_write(drv, REG_RBIF_LIMIT, val);
+ cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ /* Init and save gcnt */
+ gcnt = (drv->ref_clk_khz * drv->gcnt_us) / 1000;
+ gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK;
+ gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT;
+ drv->gcnt = gcnt;
+
+ /* Program the delay count for the timer */
+ val = (drv->ref_clk_khz * drv->timer_delay_us) / 1000;
+ cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val);
+ pr_debug("Timer count: 0x%0x (for %d us)\n", val, drv->timer_delay_us);
+
+ /* Program Consecutive Up & Down */
+ val = drv->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= drv->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ val |= drv->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT;
+ cpr_write(drv, REG_RBIF_TIMER_ADJUST, val);
+
+ /* Program the control register */
+ val = drv->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val |= drv->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT;
+ val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+ val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+ cpr_write(drv, REG_RBCPR_CTL, val);
+
+ for (i = 0; i < drv->num_corners; i++) {
+ corner = &drv->corners[i];
+ corner->save_ctl = val;
+ corner->save_irq = CPR_INT_DEFAULT;
+ }
+
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ val = cpr_read(drv, REG_RBCPR_VERSION);
+ if (val <= RBCPR_VER_2)
+ drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+ return 0;
+}
+
+/* Called twice for each CPU in policy, one pre and one post event */
+static int
+cpr_cpufreq_notifier(struct notifier_block *nb, unsigned long event, void *f)
+{
+ struct cpr_drv *drv = container_of(nb, struct cpr_drv, cpufreq_nb);
+ struct cpufreq_freqs *freqs = f;
+ unsigned long old = freqs->old * 1000;
+ unsigned long new = freqs->new * 1000;
+ struct corner *corner, *end;
+ enum voltage_change_dir dir;
+ int ret = 0, new_uV;
+ int vdd_mx_vmin = 0;
+ struct fuse_corner *fuse_corner;
+
+ /* Determine direction */
+ if (old > new)
+ dir = DOWN;
+ else if (old < new)
+ dir = UP;
+ else
+ dir = NO_CHANGE;
+
+ /* Determine new corner we're going to */
+ corner = drv->corners;
+ end = &corner[drv->num_corners - 1];
+ for (; corner <= end; corner++)
+ if (corner->freq == new)
+ break;
+
+ if (corner > end)
+ return -EINVAL;
+
+ fuse_corner = corner->fuse_corner;
+
+ if (cpr_is_allowed(drv)) {
+ new_uV = corner->last_uV;
+ } else {
+ new_uV = corner->uV;
+ }
+
+ if (dir != NO_CHANGE && drv->vdd_mx)
+ vdd_mx_vmin = cpr_mx_get(drv, fuse_corner, new_uV);
+
+ mutex_lock(&drv->lock);
+ if (event == CPUFREQ_PRECHANGE) {
+ if (drv->nb_count++)
+ goto unlock;
+
+ pr_debug("Pre change [%ld] %p @ %lu?\n", corner - drv->corners,
+ corner, corner->freq);
+ if (cpr_is_allowed(drv))
+ cpr_ctl_disable(drv);
+
+ ret = cpr_pre_voltage(drv, fuse_corner, dir, vdd_mx_vmin);
+ if (ret)
+ goto unlock;
+
+ drv->switching_opp = true;
+ }
+
+ if (event == CPUFREQ_POSTCHANGE) {
+ if (--drv->nb_count)
+ goto unlock;
+
+ pr_debug("Post change [%ld] %p @ %lu?\n", corner - drv->corners,
+ corner, corner->freq);
+
+ ret = cpr_post_voltage(drv, fuse_corner, dir, vdd_mx_vmin);
+ if (ret)
+ goto unlock;
+
+ if (cpr_is_allowed(drv) /* && drv->vreg_enabled */) {
+ cpr_irq_clr(drv);
+ cpr_corner_switch(drv, corner);
+ cpr_ctl_enable(drv, corner);
+ }
+
+ drv->corner = corner;
+ drv->switching_opp = false;
+ }
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static u32 cpr_read_efuse(void __iomem *prom, const struct qfprom_offset *efuse)
+{
+ u64 buffer = 0;
+ u8 val;
+ int i, num_bytes;
+
+ num_bytes = DIV_ROUND_UP(efuse->width + efuse->shift, BITS_PER_BYTE);
+
+ for (i = 0; i < num_bytes; i++) {
+ val = readb_relaxed(prom + efuse->offset + i);
+ buffer |= val << (i * BITS_PER_BYTE);
+ }
+
+ buffer >>= efuse->shift;
+ buffer &= BIT(efuse->width) - 1;
+
+ return buffer;
+}
+
+static void
+cpr_populate_ring_osc_idx(const struct cpr_fuse *fuses, struct cpr_drv *drv,
+ void __iomem *prom)
+{
+ struct fuse_corner *fuse = drv->fuse_corners;
+ struct fuse_corner *end = fuse + drv->num_fuse_corners;
+
+ for (; fuse < end; fuse++, fuses++)
+ fuse->ring_osc_idx = cpr_read_efuse(prom, &fuses->ring_osc);
+}
+
+
+static const struct corner_adjustment *cpr_find_adjustment(u32 speed_bin,
+ u32 pvs_version, u32 cpr_rev, const struct cpr_desc *desc,
+ const struct cpr_drv *drv)
+{
+ int i, j;
+ u32 val, ro;
+ struct corner_adjustment *a;
+
+ for (i = 0; i < desc->num_adjustments; i++) {
+ a = &desc->adjustments[i];
+
+ if (a->speed_bin != speed_bin &&
+ a->speed_bin != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (a->pvs_version != pvs_version &&
+ a->pvs_version != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (a->cpr_rev != cpr_rev &&
+ a->cpr_rev != FUSE_PARAM_MATCH_ANY)
+ continue;
+ for (j = 0; j < drv->num_fuse_corners; j++) {
+ val = a->ring_osc_idx[j];
+ ro = drv->fuse_corners[j].ring_osc_idx;
+ if (val != ro && val != FUSE_PARAM_MATCH_ANY)
+ break;
+ }
+ if (j == drv->num_fuse_corners)
+ return a;
+ }
+
+ return NULL;
+}
+
+static void cpr_fuse_corner_init(struct cpr_drv *drv,
+ const struct cpr_desc *desc,
+ void __iomem *qfprom,
+ const struct cpr_fuse *fuses, u32 speed,
+ const struct corner_adjustment *adjustments,
+ const struct acc_desc *acc_desc)
+{
+ int i;
+ unsigned int idx = 0;
+ unsigned int step_volt;
+ int steps, step_size_uv;
+ const struct fuse_corner_data *fdata;
+ struct fuse_corner *fuse, *end, *prev;
+ const struct qfprom_offset *pvs_efuse;
+ const struct qfprom_offset *init_v_efuse;
+ const struct qfprom_offset *redun;
+ const struct fuse_conditional_min_volt *min_v;
+ const struct fuse_uplift_wa *up;
+ bool do_min_v = false, do_uplift = false;
+ const int *pvs_uV = NULL;
+ const int *adj_uV, *adj_quot, *adj_min, *min_diff_quot;
+ const int *step_quot;
+ int uV, diff;
+ u32 bits, bin;
+ u32 min_uV;
+ u8 expected;
+ const struct reg_sequence *accs;
+
+ redun = &acc_desc->override;
+ expected = acc_desc->override_value;
+ if (redun->width && cpr_read_efuse(qfprom, redun) == expected)
+ accs = acc_desc->override_settings;
+ else
+ accs = acc_desc->settings;
+
+ /* Figure out if we should apply workarounds */
+ min_v = desc->min_volt_fuse;
+ do_min_v = min_v &&
+ cpr_read_efuse(qfprom, &min_v->redundant) == min_v->expected;
+ if (do_min_v)
+ min_uV = min_v->min_uV;
+
+ up = desc->uplift_wa;
+ if (!do_min_v && up)
+ if (cpr_read_efuse(qfprom, &up->redundant) == up->expected)
+ do_uplift = up->speed_bin == speed;
+
+ adj_uV = adjustments ? adjustments->fuse_init_uV : NULL;
+ adj_quot = adjustments ? adjustments->fuse_quot : NULL;
+ adj_min = adjustments ? adjustments->fuse_quot_min : NULL;
+ min_diff_quot = adjustments ? adjustments->fuse_quot_diff : NULL;
+ fuse = drv->fuse_corners;
+ end = &fuse[drv->num_fuse_corners - 1];
+ fdata = desc->cpr_fuses.fuse_corner_data;
+ step_quot = desc->step_quot;
+
+ /*
+ * The initial voltage for each fuse corner may be determined by one of
+ * two ways. Either initial voltages are encoded for each fuse corner
+ * in a dedicated fuse per fuse corner (fuses::init_voltage), or we
+ * use the PVS bin fuse to use a table of initial voltages (pvs_uV).
+ */
+ if (fuses->init_voltage.width) {
+ //step_volt = regulator_get_linear_step(drv->vdd_apc);
+ step_volt = 12500; /* TODO: Replace with ^ when apc_reg ready */
+ step_size_uv = desc->cpr_fuses.init_voltage_step;
+ } else {
+ redun = &desc->pvs_fuses->redundant;
+ expected = desc->pvs_fuses->redundant_value;
+ if (redun->width)
+ idx = !!(cpr_read_efuse(qfprom, redun) == expected);
+
+ pvs_efuse = &desc->pvs_fuses->pvs_fuse[idx];
+ bin = cpr_read_efuse(qfprom, pvs_efuse);
+ pvs_uV = desc->pvs_fuses->pvs_bins[bin].uV;
+ }
+
+ /* Populate fuse_corner voltage and ring_osc_idx members */
+ prev = NULL;
+ for (i = 0; fuse <= end; fuse++, fuses++, i++) {
+ if (pvs_uV) {
+ uV = pvs_uV[i];
+ } else {
+ init_v_efuse = &fuses->init_voltage;
+ bits = cpr_read_efuse(qfprom, init_v_efuse);
+ /* Not two's complement.. instead highest bit is sign */
+ steps = bits & BIT(init_v_efuse->width - 1) ? -1 : 1;
+ steps *= bits & ~BIT(init_v_efuse->width - 1);
+
+ uV = fdata[i].ref_uV + steps * step_size_uv;
+ uV = DIV_ROUND_UP(uV, step_volt) * step_volt;
+ }
+
+ if (adj_uV)
+ uV += adj_uV[i];
+
+ fuse->min_uV = fdata[i].min_uV;
+ fuse->max_uV = fdata[i].max_uV;
+
+ if (do_min_v) {
+ if (fuse->max_uV < min_uV) {
+ fuse->max_uV = min_uV;
+ fuse->min_uV = min_uV;
+ } else if (fuse->min_uV < min_uV) {
+ fuse->min_uV = min_uV;
+ }
+ }
+
+ fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV);
+
+ if (fuse == end) {
+ if (do_uplift) {
+ end->uV += up->uV;
+ end->uV = clamp(end->uV, 0, up->max_uV);
+ }
+ /*
+ * Allow the highest fuse corner's PVS voltage to
+ * define the ceiling voltage for that corner in order
+ * to support SoC's in which variable ceiling values
+ * are required.
+ */
+ end->max_uV = max(end->max_uV, end->uV);
+ }
+
+ /* Unpack the target quotient by scaling. */
+ fuse->quot = cpr_read_efuse(qfprom, &fuses->quotient);
+ fuse->quot *= fdata[i].quot_scale;
+ fuse->quot += fdata[i].quot_offset;
+
+ if (adj_quot) {
+ fuse->quot += adj_quot[i];
+
+ if (prev && min_diff_quot) {
+ diff = min_diff_quot[i];
+ if (fuse->quot - prev->quot <= diff)
+ fuse->quot = prev->quot + adj_min[i];
+ }
+ prev = fuse;
+ }
+
+ if (do_uplift)
+ fuse->quot += up->quot[i];
+
+ fuse->step_quot = step_quot[fuse->ring_osc_idx];
+
+ fuse->accs = accs;
+ fuse->num_accs = acc_desc->num_regs_per_fuse;
+ accs += acc_desc->num_regs_per_fuse;
+
+ fuse->vdd_mx_req = fdata[i].vdd_mx_req;
+ }
+
+ /*
+ * Restrict all fuse corner PVS voltages based upon per corner
+ * ceiling and floor voltages.
+ */
+ for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) {
+ if (fuse->uV > fuse->max_uV)
+ fuse->uV = fuse->max_uV;
+ else if (fuse->uV < fuse->min_uV)
+ fuse->uV = fuse->min_uV;
+
+ pr_debug("fuse corner %d: [%d %d %d] RO%d quot %d squot %d\n", i,
+ fuse->min_uV, fuse->uV, fuse->max_uV,
+ fuse->ring_osc_idx, fuse->quot,
+ fuse->step_quot);
+ }
+
+ drv->ceiling_max = end->max_uV;
+}
+
+static int cpr_populate_opps(struct device_node *of_node, struct cpr_drv *drv,
+ const struct corner_data **plan)
+{
+ int i, j, ret, cpu;
+ struct device *cpu_dev;
+ struct device_node *np;
+ struct corner *corner;
+ const struct corner_data *p;
+
+ for (i = 0; (np = of_parse_phandle(of_node, "qcom,cpr-cpus", i)); i++) {
+ for_each_possible_cpu(cpu)
+ if (arch_find_n_match_cpu_physical_id(np, cpu, NULL))
+ break;
+
+ of_node_put(np);
+ if (cpu >= nr_cpu_ids) {
+ pr_err("Failed to find logical CPU for %s\n", np->name);
+ return -EINVAL;
+ }
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return -EINVAL;
+
+ /*
+ * Keep cpu_dev and its regulator and clock for monitoring
+ * voltage changes and updating OPPs later.
+ */
+ if (i == 0) {
+ drv->cpu_dev = cpu_dev;
+ drv->vdd_apc = devm_regulator_get(cpu_dev, "cpu");
+ if (IS_ERR(drv->vdd_apc))
+ return PTR_ERR(drv->vdd_apc);
+ drv->cpu_clk = devm_clk_get(cpu_dev, NULL);
+ if (IS_ERR(drv->cpu_clk))
+ return PTR_ERR(drv->cpu_clk);
+ }
+
+ for (j = 0, corner = drv->corners; plan[j]; j++, corner++) {
+ p = plan[j];
+ ret = dev_pm_opp_add(cpu_dev, p->freq, corner->uV);
+ if (ret)
+ return ret;
+ corner->freq = p->freq;
+ }
+ }
+
+ return 0;
+}
+
+static const struct corner_data **
+find_freq_plan(const struct cpr_desc *desc, u32 speed_bin, u32 pvs_version)
+{
+ int i;
+ const struct freq_plan *p;
+
+ for (i = 0; i < desc->num_freq_plans; i++) {
+ p = &desc->freq_plans[i];
+
+ if (p->speed_bin != speed_bin &&
+ p->speed_bin != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (p->pvs_version != pvs_version &&
+ p->pvs_version != FUSE_PARAM_MATCH_ANY)
+ continue;
+
+ return p->plan;
+ }
+
+ return NULL;
+
+}
+
+static struct corner_override *find_corner_override(const struct cpr_desc *desc,
+ u32 speed_bin, u32 pvs_version)
+{
+ int i;
+ struct corner_override *o;
+
+ for (i = 0; i < desc->num_corner_overrides; i++) {
+ o = &desc->corner_overrides[i];
+
+ if (o->speed_bin != speed_bin &&
+ o->speed_bin != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (o->pvs_version != pvs_version &&
+ o->pvs_version != FUSE_PARAM_MATCH_ANY)
+ continue;
+
+ return o;
+ }
+
+ return NULL;
+
+}
+
+static void cpr_corner_init(struct cpr_drv *drv, const struct cpr_desc *desc,
+ const struct cpr_fuse *fuses, u32 speed_bin,
+ u32 pvs_version, void __iomem *qfprom,
+ const struct corner_adjustment *adjustments,
+ const struct corner_data **plan)
+{
+ int i, fnum, quot_diff, scaling;
+ struct fuse_corner *fuse, *prev_fuse;
+ struct corner *corner, *end;
+ const struct corner_data *cdata, *p;
+ const struct fuse_corner_data *fdata;
+ bool apply_scaling = false;
+ const int *adj_quot, *adj_volt, *adj_quot_offset;
+ const struct qfprom_offset *quot_offset;
+ unsigned long freq_corner, freq_diff, freq_diff_mhz;
+ unsigned long freq_high, freq_low;
+ int volt_high;
+ u64 temp, temp_limit;
+ int step_volt = 12500; /* TODO: Get from regulator APIs */
+ const struct corner_override *override;
+
+ corner = drv->corners;
+ end = &corner[drv->num_corners - 1];
+ cdata = desc->corner_data;
+ fdata = desc->cpr_fuses.fuse_corner_data;
+ adj_quot = adjustments ? adjustments->quot : NULL;
+ adj_volt = adjustments ? adjustments->init_uV : NULL;
+ adj_quot_offset = adjustments ? adjustments->fuse_quot_offset : NULL;
+
+ override = find_corner_override(desc, speed_bin, pvs_version);
+
+ /*
+ * Store maximum frequency for each fuse corner based on the frequency
+ * plan
+ */
+ for (i = 0; plan[i]; i++) {
+ p = plan[i];
+ freq_corner = p->freq;
+ fnum = p->fuse_corner;
+ fuse = &drv->fuse_corners[fnum];
+ if (freq_corner > fuse->max_freq)
+ fuse->max_freq = freq_corner;
+
+ }
+
+ /*
+ * Get the quotient adjustment scaling factor, according to:
+ *
+ * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
+ * / (freq(corner_N) - freq(corner_N-1)), max_factor)
+ *
+ * QUOT(corner_N): quotient read from fuse for fuse corner N
+ * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
+ * freq(corner_N): max frequency in MHz supported by fuse corner N
+ * freq(corner_N-1): max frequency in MHz supported by fuse corner
+ * (N - 1)
+ *
+ * Then walk through the corners mapped to each fuse corner
+ * and calculate the quotient adjustment for each one using the
+ * following formula:
+ *
+ * quot_adjust = (freq_max - freq_corner) * scaling / 1000
+ *
+ * freq_max: max frequency in MHz supported by the fuse corner
+ * freq_corner: frequency in MHz corresponding to the corner
+ * scaling: calculated from above equation
+ *
+ *
+ * + +
+ * | v |
+ * q | f c o | f c
+ * u | c l | c
+ * o | f t | f
+ * t | c a | c
+ * | c f g | c f
+ * | e |
+ * +--------------- +----------------
+ * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ * corner corner
+ *
+ * c = corner
+ * f = fuse corner
+ *
+ */
+ for (apply_scaling = false, i = 0; corner <= end; corner++, i++) {
+ freq_corner = cdata[i].freq;
+ fnum = cdata[i].fuse_corner;
+ fuse = &drv->fuse_corners[fnum];
+ if (fnum)
+ prev_fuse = &drv->fuse_corners[fnum - 1];
+ else
+ prev_fuse = NULL;
+
+ corner->fuse_corner = fuse;
+ corner->uV = fuse->uV;
+ if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) {
+ quot_offset = &fuses[fnum].quotient_offset;
+ if (quot_offset->width) {
+ quot_diff = cpr_read_efuse(qfprom, quot_offset);
+ quot_diff *= fdata->quot_scale;
+ if (adj_quot_offset)
+ quot_diff += adj_quot_offset[fnum];
+ } else {
+ quot_diff = fuse->quot - prev_fuse->quot;
+ }
+
+ freq_diff = fuse->max_freq - prev_fuse->max_freq;
+ freq_diff /= 1000000; /* Convert to MHz */
+ scaling = 1000 * quot_diff / freq_diff;
+ scaling = min(scaling, fdata[fnum].max_quot_scale);
+
+ apply_scaling = true;
+ } else if (freq_corner == fuse->max_freq) {
+ /* This is a fuse corner; don't scale anything */
+ apply_scaling = false;
+ }
+
+ if (apply_scaling) {
+ freq_diff = fuse->max_freq - freq_corner;
+ freq_diff_mhz = freq_diff / 1000000;
+ corner->quot_adjust = scaling * freq_diff_mhz / 1000;
+
+ freq_high = fuse->max_freq;
+ freq_low = fuse->max_freq;
+ volt_high = fuse->uV;
+
+ /*
+ if (freq_high > freq_low && volt_high > volt_low &&
+ freq_high > freq_corner)
+ */
+
+ temp = freq_diff * (fuse->uV - prev_fuse->uV);
+ do_div(temp, freq_high - freq_low);
+
+ /*
+ * max_volt_scale has units of uV/MHz while freq values
+ * have units of Hz. Divide by 1000000 to convert to.
+ */
+ temp_limit = freq_diff * fdata[fnum].max_volt_scale;
+ do_div(temp_limit, 1000000);
+
+ corner->uV = volt_high - min(temp, temp_limit);
+ corner->uV = roundup(corner->uV, step_volt);
+ }
+
+ if (adj_quot)
+ corner->quot_adjust -= adj_quot[i];
+
+ if (adj_volt)
+ corner->uV += adj_volt[i];
+
+ /* Load per corner ceiling and floor voltages if they exist. */
+ if (override) {
+ corner->max_uV = override->max_uV[i];
+ corner->min_uV = override->min_uV[i];
+ } else {
+ corner->max_uV = fuse->max_uV;
+ corner->min_uV = fuse->min_uV;
+ }
+
+ if (drv->ceiling_max < corner->max_uV)
+ drv->ceiling_max = corner->max_uV;
+
+ corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV);
+ corner->last_uV = corner->uV;
+
+ /* Reduce the ceiling voltage if needed */
+ if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV)
+ corner->max_uV = corner->uV;
+ else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV)
+ corner->max_uV = max(corner->min_uV, fuse->uV);
+
+ pr_debug("corner %d: [%d %d %d] quot %d\n", i,
+ corner->min_uV, corner->uV, corner->max_uV,
+ fuse->quot - corner->quot_adjust);
+ }
+}
+
+static const struct cpr_fuse *
+cpr_get_fuses(const struct cpr_desc *desc, void __iomem *qfprom)
+{
+ u32 expected = desc->cpr_fuses.redundant_value;
+ const struct qfprom_offset *fuse = &desc->cpr_fuses.redundant;
+ unsigned int idx;
+
+ idx = !!(fuse->width && cpr_read_efuse(qfprom, fuse) == expected);
+
+ return &desc->cpr_fuses.cpr_fuse[idx * desc->num_fuse_corners];
+}
+
+static bool cpr_is_close_loop_disabled(struct cpr_drv *drv,
+ const struct cpr_desc *desc, void __iomem *qfprom,
+ const struct cpr_fuse *fuses,
+ const struct corner_adjustment *adj)
+{
+ const struct qfprom_offset *disable;
+ unsigned int idx;
+ struct fuse_corner *highest_fuse, *second_highest_fuse;
+ int min_diff_quot, diff_quot;
+
+ if (adj && adj->disable_closed_loop)
+ return true;
+
+ if (!desc->cpr_fuses.disable)
+ return false;
+
+ /*
+ * Are the fuses the redundant ones? This avoids reading the fuse
+ * redundant bit again
+ */
+ idx = !!(fuses == desc->cpr_fuses.cpr_fuse);
+ disable = &desc->cpr_fuses.disable[idx];
+
+ if (cpr_read_efuse(qfprom, disable))
+ return true;
+
+ if (!fuses->quotient_offset.width) {
+ /*
+ * Check if the target quotients for the highest two fuse
+ * corners are too close together.
+ */
+ highest_fuse = &drv->fuse_corners[drv->num_fuse_corners - 1];
+ second_highest_fuse = highest_fuse - 1;
+
+ min_diff_quot = desc->min_diff_quot;
+ diff_quot = highest_fuse->quot - second_highest_fuse->quot;
+
+ return diff_quot < min_diff_quot;
+ }
+
+ return false;
+}
+
+static int cpr_init_parameters(struct platform_device *pdev,
+ struct cpr_drv *drv)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-ref-clk",
+ &drv->ref_clk_khz);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,cpr-timer-delay-us",
+ &drv->timer_delay_us);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,cpr-timer-cons-up",
+ &drv->timer_cons_up);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,cpr-timer-cons-down",
+ &drv->timer_cons_down);
+ if (ret)
+ return ret;
+ drv->timer_cons_down &= RBIF_TIMER_ADJ_CONS_DOWN_MASK;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-up-threshold",
+ &drv->up_threshold);
+ drv->up_threshold &= RBCPR_CTL_UP_THRESHOLD_MASK;
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-down-threshold",
+ &drv->down_threshold);
+ drv->down_threshold &= RBCPR_CTL_DN_THRESHOLD_MASK;
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-idle-clocks",
+ &drv->idle_clocks);
+ drv->idle_clocks &= RBCPR_STEP_QUOT_IDLE_CLK_MASK;
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-gcnt-us", &drv->gcnt_us);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,vdd-apc-step-up-limit",
+ &drv->vdd_apc_step_up_limit);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,vdd-apc-step-down-limit",
+ &drv->vdd_apc_step_down_limit);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-clamp-timer-interval",
+ &drv->clamp_timer_interval);
+ if (ret && ret != -EINVAL)
+ return ret;
+
+ drv->clamp_timer_interval = min_t(unsigned int,
+ drv->clamp_timer_interval,
+ RBIF_TIMER_ADJ_CLAMP_INT_MASK);
+
+ pr_debug("up threshold = %u, down threshold = %u\n",
+ drv->up_threshold, drv->down_threshold);
+
+ return 0;
+}
+
+static int cpr_init_and_enable_corner(struct cpr_drv *drv)
+{
+ unsigned long rate;
+ struct corner *end;
+
+ end = &drv->corners[drv->num_corners - 1];
+ rate = clk_get_rate(drv->cpu_clk);
+
+ for (drv->corner = drv->corners; drv->corner <= end; drv->corner++)
+ if (drv->corner->freq == rate)
+ break;
+
+ if (drv->corner > end)
+ return -EINVAL;
+
+ return cpr_enable(drv);
+}
+
+static struct corner_data msm8916_corner_data[] = {
+ /* [corner] -> { fuse corner, freq } */
+ { 0, 200000000 },
+ { 0, 400000000 },
+ { 1, 533330000 },
+ { 1, 800000000 },
+ { 2, 998400000 },
+ { 2, 1094400000 },
+ { 2, 1152000000 },
+ { 2, 1209600000 },
+ { 2, 1363200000 },
+};
+
+static const struct cpr_desc msm8916_desc = {
+ .num_fuse_corners = 3,
+ .vdd_mx_vmin_method = VDD_MX_VMIN_APC_CORNER_MAP,
+ .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF,
+ .step_quot = (int []){ 26, 26, 26, 26, 26, 26, 26, 26 },
+ .cpr_fuses = {
+ .init_voltage_step = 10000,
+ .fuse_corner_data = (struct fuse_corner_data[]){
+ /* ref_uV max_uV min_uV max_q q_off q_scl v_scl mx */
+ { 1050000, 1050000, 1050000, 0, 0, 1, 0, 3 },
+ { 1150000, 1150000, 1050000, 0, 0, 1, 0, 4 },
+ { 1350000, 1350000, 1162500, 650, 0, 1, 0, 6 },
+ },
+ .cpr_fuse = (struct cpr_fuse[]){
+ {
+ .ring_osc = { 222, 3, 6},
+ .init_voltage = { 220, 6, 2 },
+ .quotient = { 221, 12, 2 },
+ },
+ {
+ .ring_osc = { 222, 3, 6},
+ .init_voltage = { 218, 6, 2 },
+ .quotient = { 219, 12, 0 },
+ },
+ {
+ .ring_osc = { 222, 3, 6},
+ .init_voltage = { 216, 6, 0 },
+ .quotient = { 216, 12, 6 },
+ },
+ },
+ .disable = &(struct qfprom_offset){ 223, 1, 1 },
+ },
+ .speed_bin = { 12, 3, 2 },
+ .pvs_version = { 6, 2, 7 },
+ .corner_data = msm8916_corner_data,
+ .num_corners = ARRAY_SIZE(msm8916_corner_data),
+ .num_freq_plans = 3,
+ .freq_plans = (struct freq_plan[]){
+ {
+ .speed_bin = 0,
+ .pvs_version = 0,
+ .plan = (const struct corner_data* []){
+ msm8916_corner_data + 0,
+ msm8916_corner_data + 1,
+ msm8916_corner_data + 2,
+ msm8916_corner_data + 3,
+ msm8916_corner_data + 4,
+ msm8916_corner_data + 5,
+ msm8916_corner_data + 6,
+ msm8916_corner_data + 7,
+ NULL
+ },
+ },
+ {
+ .speed_bin = 0,
+ .pvs_version = 1,
+ .plan = (const struct corner_data* []){
+ msm8916_corner_data + 0,
+ msm8916_corner_data + 1,
+ msm8916_corner_data + 2,
+ msm8916_corner_data + 3,
+ msm8916_corner_data + 4,
+ msm8916_corner_data + 5,
+ msm8916_corner_data + 6,
+ msm8916_corner_data + 7,
+ NULL
+ },
+ },
+ {
+ .speed_bin = 2,
+ .pvs_version = 0,
+ .plan = (const struct corner_data* []){
+ msm8916_corner_data + 0,
+ msm8916_corner_data + 1,
+ msm8916_corner_data + 2,
+ msm8916_corner_data + 3,
+ msm8916_corner_data + 4,
+ msm8916_corner_data + 5,
+ msm8916_corner_data + 6,
+ msm8916_corner_data + 7,
+ msm8916_corner_data + 8,
+ NULL
+ },
+ },
+ },
+};
+
+static const struct acc_desc msm8916_acc_desc = {
+ .settings = (struct reg_sequence[]){
+ { 0xf000, 0 },
+ { 0xf000, 0x100 },
+ { 0xf000, 0x101 }
+ },
+ .override_settings = (struct reg_sequence[]){
+ { 0xf000, 0 },
+ { 0xf000, 0x100 },
+ { 0xf000, 0x100 }
+ },
+ .num_regs_per_fuse = 1,
+ .override = { 6, 1, 4 },
+ .override_value = 1,
+};
+
+static const struct of_device_id cpr_descs[] = {
+ { .compatible = "qcom,qfprom-msm8916", .data = &msm8916_desc },
+ { }
+};
+
+static const struct of_device_id acc_descs[] = {
+ { .compatible = "qcom,tcsr-msm8916", .data = &msm8916_acc_desc },
+ { }
+};
+
+static int cpr_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct cpr_drv *drv;
+ const struct cpr_fuse *cpr_fuses;
+ const struct corner_adjustment *adj;
+ const struct corner_data **plan;
+ size_t len;
+ int irq, ret;
+ const struct cpr_desc *desc;
+ const struct acc_desc *acc_desc;
+ const struct of_device_id *match;
+ struct device_node *np;
+ void __iomem *qfprom;
+ u32 cpr_rev = FUSE_REVISION_UNKNOWN;
+ u32 speed_bin = SPEED_BIN_NONE;
+ u32 pvs_version = 0;
+ struct platform_device_info devinfo = { .name = "cpufreq-dt", };
+
+ np = of_parse_phandle(dev->of_node, "eeprom", 0);
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(cpr_descs, np);
+ if (!match)
+ return -EINVAL;
+ desc = match->data;
+
+ /* TODO: Get from eeprom API */
+ qfprom = devm_ioremap(dev, 0x58000, 0x7000);
+ if (!qfprom)
+ return -ENOMEM;
+
+ len = sizeof(*drv) +
+ sizeof(*drv->fuse_corners) * desc->num_fuse_corners +
+ sizeof(*drv->corners) * desc->num_corners;
+
+ drv = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ np = of_parse_phandle(dev->of_node, "acc-syscon", 0);
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(acc_descs, np);
+ if (!match)
+ return -EINVAL;
+
+ acc_desc = match->data;
+ drv->tcsr = syscon_node_to_regmap(np);
+ if (IS_ERR(drv->tcsr))
+ return PTR_ERR(drv->tcsr);
+
+ drv->num_fuse_corners = desc->num_fuse_corners;
+ drv->num_corners = desc->num_corners;
+ drv->fuse_corners = (struct fuse_corner *)(drv + 1);
+ drv->corners = (struct corner *)(drv->fuse_corners +
+ drv->num_fuse_corners);
+ mutex_init(&drv->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drv->base))
+ return PTR_ERR(drv->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ drv->vdd_mx = devm_regulator_get(dev, "vdd-mx");
+ if (IS_ERR(drv->vdd_mx))
+ return PTR_ERR(drv->vdd_mx);
+
+ drv->vdd_mx_vmin_method = desc->vdd_mx_vmin_method;
+ drv->vdd_mx_vmax = desc->vdd_mx_vmax;
+
+ if (desc->fuse_revision.width)
+ cpr_rev = cpr_read_efuse(qfprom, &desc->fuse_revision);
+ if (desc->speed_bin.width)
+ speed_bin = cpr_read_efuse(qfprom, &desc->speed_bin);
+ if (desc->pvs_version.width)
+ pvs_version = cpr_read_efuse(qfprom, &desc->pvs_version);
+
+ plan = find_freq_plan(desc, speed_bin, pvs_version);
+ if (!plan)
+ return -EINVAL;
+
+ cpr_fuses = cpr_get_fuses(desc, qfprom);
+ cpr_populate_ring_osc_idx(cpr_fuses, drv, qfprom);
+
+ adj = cpr_find_adjustment(speed_bin, pvs_version, cpr_rev, desc, drv);
+
+ cpr_fuse_corner_init(drv, desc, qfprom, cpr_fuses, speed_bin, adj,
+ acc_desc);
+ cpr_corner_init(drv, desc, cpr_fuses, speed_bin, pvs_version, qfprom,
+ adj, plan);
+
+ ret = cpr_populate_opps(dev->of_node, drv, plan);
+ if (ret)
+ return ret;
+
+ drv->loop_disabled = cpr_is_close_loop_disabled(drv, desc, qfprom,
+ cpr_fuses, adj);
+ pr_info("CPR closed loop is %sabled\n",
+ drv->loop_disabled ? "dis" : "en");
+
+ ret = cpr_init_parameters(pdev, drv);
+ if (ret)
+ return ret;
+
+ /* Configure CPR HW but keep it disabled */
+ ret = cpr_config(drv);
+ if (ret)
+ return ret;
+
+ /* Enable ACC if required */
+ if (acc_desc->enable_mask)
+ regmap_update_bits(drv->tcsr, acc_desc->enable_reg,
+ acc_desc->enable_mask,
+ acc_desc->enable_mask);
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ cpr_irq_handler, IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "cpr", drv);
+ if (ret)
+ return ret;
+
+ ret = cpr_init_and_enable_corner(drv);
+ if (ret)
+ return ret;
+
+ drv->reg_nb.notifier_call = cpr_regulator_notifier;
+ ret = regulator_register_notifier(drv->vdd_apc, &drv->reg_nb);
+ if (ret)
+ return ret;
+
+ drv->cpufreq_nb.notifier_call = cpr_cpufreq_notifier;
+ ret = cpufreq_register_notifier(&drv->cpufreq_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret) {
+ regulator_unregister_notifier(drv->vdd_apc, &drv->reg_nb);
+ return ret;
+ }
+
+ /*
+ * Ensure that enable state accurately reflects the case in which CPR
+ * is permanently disabled.
+ */
+ //cpr_vreg->enable &= !cpr_vreg->loop_disabled;
+
+ platform_set_drvdata(pdev, drv);
+
+ return PTR_ERR_OR_ZERO(platform_device_register_full(&devinfo));
+}
+
+static int cpr_remove(struct platform_device *pdev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(pdev);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_set(drv, 0);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id cpr_match_table[] = {
+ { .compatible = "qcom,cpr" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cpr_match_table);
+
+static struct platform_driver cpr_driver = {
+ .probe = cpr_probe,
+ .remove = cpr_remove,
+ .driver = {
+ .name = "qcom-cpr",
+ .of_match_table = cpr_match_table,
+ .pm = &cpr_pm_ops,
+ },
+};
+module_platform_driver(cpr_driver);
+
+MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-cpr");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index b5af2c676c78..9c29c975c6bd 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -562,6 +562,18 @@ config REGULATOR_QCOM_SPMI
Qualcomm SPMI PMICs as a module. The module will be named
"qcom_spmi-regulator".
+config REGULATOR_QCOM_SMD_RPM
+ tristate "Qualcomm SMD based RPM regulator driver"
+ depends on MFD_QCOM_SMD_RPM
+ help
+ If you say yes to this option, support will be included for the
+ regulators exposed by the Resource Power Manager found in Qualcomm
+ 8974 based devices.
+
+ Say M here if you want to include support for the regulators on the
+ Qualcomm RPM as a module. The module will be named
+ "qcom_smd-regulator".
+
config REGULATOR_RC5T583
tristate "RICOH RC5T583 Power regulators"
depends on MFD_RC5T583
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 0590e8f724c8..68679e2f9c8c 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
+obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
obj-$(CONFIG_REGULATOR_PWM) += pwm-regulator.o
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 9c6167dd2c8b..86b3cfcd0106 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -20,6 +20,9 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/soc/qcom/smd-rpm.h>
+#include <linux/regulator/qcom_smd-regulator.h>
+
+#include "internal.h"
struct qcom_rpm_reg {
struct device *dev;
@@ -44,6 +47,11 @@ struct rpm_regulator_req {
#define RPM_KEY_SWEN 0x6e657773 /* "swen" */
#define RPM_KEY_UV 0x00007675 /* "uv" */
#define RPM_KEY_MA 0x0000616d /* "ma" */
+#define RPM_KEY_FLOOR 0x00636676 /* "vfc" */
+#define RPM_KEY_CORNER 0x6e726f63 /* "corn" */
+
+#define RPM_MIN_FLOOR_CORNER 0
+#define RPM_MAX_FLOOR_CORNER 6
static int rpm_reg_write_active(struct qcom_rpm_reg *vreg,
struct rpm_regulator_req *req,
@@ -56,6 +64,50 @@ static int rpm_reg_write_active(struct qcom_rpm_reg *vreg,
req, size);
}
+int qcom_rpm_set_floor(struct regulator *regulator, int floor)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ struct rpm_regulator_req req;
+ int ret;
+
+ req.key = RPM_KEY_FLOOR;
+ req.nbytes = sizeof(u32);
+ req.value = floor;
+
+ if (floor < RPM_MIN_FLOOR_CORNER || floor > RPM_MAX_FLOOR_CORNER)
+ return -EINVAL;
+
+ ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+ if (ret)
+ dev_err(rdev_get_dev(rdev), "Failed to set floor %d\n", floor);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_set_floor);
+
+int qcom_rpm_set_corner(struct regulator *regulator, int corner)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ struct rpm_regulator_req req;
+ int ret;
+
+ req.key = RPM_KEY_CORNER;
+ req.nbytes = sizeof(u32);
+ req.value = corner;
+
+ if (corner < RPM_MIN_FLOOR_CORNER || corner > RPM_MAX_FLOOR_CORNER)
+ return -EINVAL;
+
+ ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+ if (ret)
+ dev_err(rdev_get_dev(rdev), "Failed to set corner %d\n", corner);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_set_corner);
+
static int rpm_reg_enable(struct regulator_dev *rdev)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
@@ -211,6 +263,43 @@ static const struct regulator_desc pm8941_switch = {
.ops = &rpm_switch_ops,
};
+static const struct regulator_desc pm8916_pldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(750000, 0, 208, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 209,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8916_nldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(375000, 0, 93, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 94,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8916_buck_lvo_smps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
+ REGULATOR_LINEAR_RANGE(750000, 96, 127, 25000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 128,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8916_buck_hvo_smps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1550000, 0, 31, 25000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 32,
+ .ops = &rpm_smps_ldo_ops,
+};
+
struct rpm_regulator_data {
const char *name;
u32 type;
@@ -272,9 +361,36 @@ static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm8916_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8916_buck_lvo_smps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8916_buck_lvo_smps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8916_buck_lvo_smps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8916_buck_hvo_smps, "vdd_s4" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8916_nldo, "vdd_l1_l2_l3" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8916_nldo, "vdd_l1_l2_l3" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8916_nldo, "vdd_l1_l2_l3" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8916_pldo, "vdd_l4_l5_l6" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8916_pldo, "vdd_l4_l5_l6" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8916_pldo, "vdd_l4_l5_l6" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8916_pldo, "vdd_l7" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ {}
+};
+
static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
+ { .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
{}
};
MODULE_DEVICE_TABLE(of, rpm_of_match);
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 28c711f0ac6b..54d5b637d14c 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -77,4 +77,20 @@ config DA8XX_REMOTEPROC
It's safe to say n here if you're not interested in multimedia
offloading.
+config QCOM_Q6V5_PIL
+ tristate "Qualcomm Hexagon V5 Peripherial Image Loader"
+ depends on OF && ARCH_QCOM
+ select REMOTEPROC
+ help
+ Say y here to support the Qualcomm Peripherial Image Loader for the
+ Hexagon V5 based remote processors.
+
+config QCOM_TZ_PIL
+ tristate "Qualcomm TrustZone based Peripherial Image Loader"
+ depends on OF && ARCH_QCOM
+ select REMOTEPROC
+ help
+ Say y here to support the TrustZone based Qualcomm Peripherial Image
+ Loader.
+
endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 81b04d1e2e58..4351f2e462ad 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -11,3 +11,5 @@ obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_STE_MODEM_RPROC) += ste_modem_rproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
+obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o
+obj-$(CONFIG_QCOM_TZ_PIL) += qcom_tz_pil.o
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
new file mode 100644
index 000000000000..4d63a84db588
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
@@ -0,0 +1,1075 @@
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/remoteproc.h>
+#include <linux/interrupt.h>
+#include <linux/memblock.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/elf.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/reset.h>
+
+#include "remoteproc_internal.h"
+
+#include <linux/qcom_scm.h>
+
+#define SCM_SVC_PIL 0x2
+
+struct qproc {
+ struct device *dev;
+ struct rproc *rproc;
+
+ void __iomem *reg_base;
+ void __iomem *halt_base;
+ void __iomem *rmb_base;
+
+ struct reset_control *mss_restart;
+
+ int wdog_irq;
+ int fatal_irq;
+ int ready_irq;
+ int handover_irq;
+ int stop_ack_irq;
+
+ struct gpio_desc *stop_gpio;
+
+ struct regulator *vdd;
+ struct regulator *cx;
+ struct regulator *mx;
+ struct regulator *pll;
+
+ struct clk *ahb_clk;
+ struct clk *axi_clk;
+ struct clk *rom_clk;
+
+ struct completion start_done;
+
+ void *mba_va;
+ dma_addr_t mba_da;
+ size_t mba_size;
+ struct dma_attrs mba_attrs;
+};
+
+#define VDD_MSS_UV 1000000
+#define VDD_MSS_UV_MAX 1150000
+#define VDD_MSS_UA 100000
+
+/* Q6 Register Offsets */
+#define QDSP6SS_RST_EVB 0x010
+
+/* AXI Halting Registers */
+#define MSS_Q6_HALT_BASE 0x180
+#define MSS_MODEM_HALT_BASE 0x200
+#define MSS_NC_HALT_BASE 0x280
+
+/* RMB Status Register Values */
+#define STATUS_PBL_SUCCESS 0x1
+#define STATUS_XPU_UNLOCKED 0x1
+#define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2
+
+/* PBL/MBA interface registers */
+#define RMB_MBA_IMAGE 0x00
+#define RMB_PBL_STATUS 0x04
+#define RMB_MBA_COMMAND 0x08
+#define RMB_MBA_STATUS 0x0C
+#define RMB_PMI_META_DATA 0x10
+#define RMB_PMI_CODE_START 0x14
+#define RMB_PMI_CODE_LENGTH 0x18
+
+#define POLL_INTERVAL_US 50
+
+#define CMD_META_DATA_READY 0x1
+#define CMD_LOAD_READY 0x2
+
+#define STATUS_META_DATA_AUTH_SUCCESS 0x3
+#define STATUS_AUTH_COMPLETE 0x4
+
+/* External BHS */
+#define EXTERNAL_BHS_ON BIT(0)
+#define EXTERNAL_BHS_STATUS BIT(4)
+#define BHS_TIMEOUT_US 50
+
+#define MSS_RESTART_ID 0xA
+
+/* QDSP6SS Register Offsets */
+#define QDSP6SS_RESET 0x014
+#define QDSP6SS_GFMUX_CTL 0x020
+#define QDSP6SS_PWR_CTL 0x030
+#define QDSP6SS_STRAP_ACC 0x110
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ 0x0
+#define AXI_HALTACK 0x4
+#define AXI_IDLE 0x8
+
+#define HALT_ACK_TIMEOUT_US 100000
+
+/* QDSP6SS_RESET */
+#define Q6SS_STOP_CORE BIT(0)
+#define Q6SS_CORE_ARES BIT(1)
+#define Q6SS_BUS_ARES_ENA BIT(2)
+
+/* QDSP6SS_GFMUX_CTL */
+#define Q6SS_CLK_ENA BIT(1)
+#define Q6SS_CLK_SRC_SEL_C BIT(3)
+#define Q6SS_CLK_SRC_SEL_FIELD 0xC
+#define Q6SS_CLK_SRC_SWITCH_CLK_OVR BIT(8)
+
+/* QDSP6SS_PWR_CTL */
+#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
+#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
+#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
+#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
+#define Q6SS_ETB_SLP_NRET_N BIT(17)
+#define Q6SS_L2DATA_STBY_N BIT(18)
+#define Q6SS_SLP_RET_N BIT(19)
+#define Q6SS_CLAMP_IO BIT(20)
+#define QDSS_BHS_ON BIT(21)
+#define QDSS_LDO_BYP BIT(22)
+
+/* QDSP6v55 parameters */
+#define QDSP6v55_LDO_ON BIT(26)
+#define QDSP6v55_LDO_BYP BIT(25)
+#define QDSP6v55_BHS_ON BIT(24)
+#define QDSP6v55_CLAMP_WL BIT(21)
+#define L1IU_SLP_NRET_N BIT(15)
+#define L1DU_SLP_NRET_N BIT(14)
+#define L2PLRU_SLP_NRET_N BIT(13)
+
+#define HALT_CHECK_MAX_LOOPS (200)
+#define QDSP6SS_XO_CBCR (0x0038)
+
+#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
+
+static int qproc_sanity_check(struct rproc *rproc,
+ const struct firmware *fw)
+{
+ if (!fw) {
+ dev_err(&rproc->dev, "failed to load %s\n", rproc->name);
+ return -EINVAL;
+ }
+
+ /* XXX: ??? */
+
+ return 0;
+}
+
+static struct resource_table * qproc_find_rsc_table(struct rproc *rproc,
+ const struct firmware *fw,
+ int *tablesz)
+{
+ static struct resource_table table = { .ver = 1, };
+
+ *tablesz = sizeof(table);
+ return &table;
+}
+
+static int qproc_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct qproc *qproc = rproc->priv;
+ DEFINE_DMA_ATTRS(attrs);
+ dma_addr_t phys;
+ dma_addr_t end;
+ void *ptr;
+
+ dma_set_mask(qproc->dev, DMA_BIT_MASK(32));
+ dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs);
+
+ ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs);
+ if (!ptr) {
+ dev_err(qproc->dev, "failed to allocate mba metadata buffer\n");
+ return -ENOMEM;
+ }
+
+ end = phys + fw->size;
+ dev_info(qproc->dev, "loading MBA from %pa to %pa\n", &phys, &end);
+
+ memcpy(ptr, fw->data, fw->size);
+
+ qproc->mba_va = ptr;
+ qproc->mba_da = phys;
+ qproc->mba_size = fw->size;
+ qproc->mba_attrs = attrs;
+
+ return 0;
+}
+
+static const struct rproc_fw_ops qproc_fw_ops = {
+ .find_rsc_table = qproc_find_rsc_table,
+ .load = qproc_load,
+ .sanity_check = qproc_sanity_check,
+};
+
+static void q6v5proc_reset(struct qproc *qproc)
+{
+ u32 val;
+
+ /* Assert resets, stop core */
+ val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
+ val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);
+
+ /* Enable power block headswitch, and wait for it to stabilize */
+ val = readl_relaxed(qproc->reg_base + QDSP6SS_PWR_CTL);
+ val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
+ mb();
+ udelay(1);
+
+ /*
+ * Turn on memories. L2 banks should be done individually
+ * to minimize inrush current.
+ */
+ val = readl_relaxed(qproc->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+ Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_2;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_1;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_0;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Remove IO clamp */
+ val &= ~Q6SS_CLAMP_IO;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Bring core out of reset */
+ val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
+ val &= ~Q6SS_CORE_ARES;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);
+
+ /* Turn on core clock */
+ val = readl_relaxed(qproc->reg_base + QDSP6SS_GFMUX_CTL);
+ val |= Q6SS_CLK_ENA;
+
+#if 0
+ /* Need a different clock source for v5.2.0 */
+ if (qproc->qdsp6v5_2_0) {
+ val &= ~Q6SS_CLK_SRC_SEL_FIELD;
+ val |= Q6SS_CLK_SRC_SEL_C;
+ }
+
+ /* force clock on during source switch */
+ if (qproc->qdsp6v56)
+ val |= Q6SS_CLK_SRC_SWITCH_CLK_OVR;
+#endif
+
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_GFMUX_CTL);
+
+ /* Start core execution */
+ val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
+ val &= ~Q6SS_STOP_CORE;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);
+}
+
+static void q6v5proc_halt_axi_port(struct qproc *qproc, void __iomem *halt)
+{
+ unsigned long timeout;
+
+ if (readl_relaxed(halt + AXI_IDLE))
+ return;
+
+ /* Assert halt request */
+ writel_relaxed(1, halt + AXI_HALTREQ);
+
+ /* Wait for halt */
+ timeout = jiffies + 10 * HZ;
+ for (;;) {
+ if (readl(halt + AXI_HALTACK) || time_after(jiffies, timeout))
+ break;
+
+ msleep(1);
+ }
+
+ if (!readl_relaxed(halt + AXI_IDLE))
+ dev_err(qproc->dev, "port %pa failed halt\n", &halt);
+
+ /* Clear halt request (port will remain halted until reset) */
+ writel_relaxed(0, halt + AXI_HALTREQ);
+}
+
+static int qproc_mba_load_mdt(struct qproc *qproc, const struct firmware *fw)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ unsigned long timeout;
+ dma_addr_t phys;
+ dma_addr_t end;
+ void *ptr;
+ int ret;
+ s32 val;
+
+ dma_set_mask(qproc->dev, DMA_BIT_MASK(32));
+ dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs);
+
+ ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs);
+ if (!ptr) {
+ dev_err(qproc->dev, "failed to allocate mba metadata buffer\n");
+ return -ENOMEM;
+ }
+
+ end = phys + fw->size;
+ dev_info(qproc->dev, "loading mdt header from %pa to %pa\n", &phys, &end);
+
+ memcpy(ptr, fw->data, fw->size);
+
+ writel_relaxed(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+
+ writel_relaxed(phys, qproc->rmb_base + RMB_PMI_META_DATA);
+ writel(CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND);
+
+ timeout = jiffies + HZ;
+ for (;;) {
+ msleep(1);
+
+ val = readl(qproc->rmb_base + RMB_MBA_STATUS);
+ if (val == STATUS_META_DATA_AUTH_SUCCESS || val < 0)
+ break;
+
+ if (time_after(jiffies, timeout))
+ break;
+ }
+ if (val == 0) {
+ dev_err(qproc->dev, "MBA authentication of headers timed out\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ } else if (val < 0) {
+ dev_err(qproc->dev, "MBA returned error %d for headers\n", val);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dev_err(qproc->dev, "mdt authenticated\n");
+
+ ret = 0;
+out:
+ dma_free_attrs(qproc->dev, fw->size, ptr, phys, &attrs);
+
+ return ret;
+}
+
+#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
+
+static int
+qproc_load_segments(struct qproc *qproc, const struct firmware *fw)
+{
+ struct device *dev = qproc->dev;
+ struct elf32_hdr *ehdr;
+ struct elf32_phdr *phdr;
+ int i, ret = 0;
+ const u8 *elf_data = fw->data;
+ const struct firmware *seg_fw;
+ char fw_name[20];
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ u32 da = phdr->p_paddr;
+ u32 memsz = phdr->p_memsz;
+ u32 filesz = phdr->p_filesz;
+ void *ptr;
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ if (segment_is_hash(phdr->p_flags))
+ continue;
+
+ if (filesz == 0)
+ continue;
+
+ dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
+ phdr->p_type, da, memsz, filesz);
+
+ if (filesz > memsz) {
+ dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ ptr = ioremap(da, memsz);
+ if (!ptr) {
+ dev_err(qproc->dev, "failed to allocate mba metadata buffer\n");
+ ret = -ENOMEM;
+ break;
+ }
+
+ if (filesz) {
+ snprintf(fw_name, sizeof(fw_name), "modem.b%02d", i);
+ ret = request_firmware(&seg_fw, fw_name, qproc->dev);
+ if (ret) {
+ iounmap(ptr);
+ break;
+ }
+
+ memcpy(ptr, seg_fw->data, filesz);
+
+ release_firmware(seg_fw);
+ }
+
+ if (memsz > filesz)
+ memset(ptr + filesz, 0, memsz - filesz);
+
+ wmb();
+ iounmap(ptr);
+ }
+
+ return ret;
+}
+
+static int qproc_verify_segments(struct qproc *qproc, const struct firmware *fw)
+{
+ struct elf32_hdr *ehdr;
+ struct elf32_phdr *phdr;
+ const u8 *elf_data = fw->data;
+ unsigned long timeout;
+ phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
+ u32 size = 0;
+ s32 val;
+ int ret;
+ int i;
+ u32 v;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+ v = readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %pa\n", &v);
+
+ msleep(1);
+
+ v = readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %pa\n", &v);
+
+#if 1
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ phys_addr_t da = phdr->p_paddr;
+ u32 memsz = phdr->p_memsz;
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ dev_err(qproc->dev, "0x%x %d %d\n", phdr->p_paddr, segment_is_hash(phdr->p_flags), !!(phdr->p_flags & BIT(27)));
+
+ if (segment_is_hash(phdr->p_flags))
+ continue;
+
+ if (memsz == 0)
+ continue;
+
+ if (da < min_addr)
+ min_addr = da;
+
+ size += memsz;
+ }
+
+ dev_err(qproc->dev, "verify: %pa:%pa\n", &min_addr, &size);
+
+ writel_relaxed(min_addr, qproc->rmb_base + RMB_PMI_CODE_START);
+ writel(CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND);
+ writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+#endif
+
+ v = readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %pa\n", &v);
+
+#if 0
+ writel_relaxed(0x08400000, qproc->rmb_base + RMB_PMI_CODE_START);
+ writel_relaxed(CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND);
+
+ size = 0;
+ size += 0x162f4;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x5f7620;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x719e1c;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x14000;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x2b929;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x0d500;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x19ab8;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x16d68;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x124a98;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x103588;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0xbf99b0;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0xa07a0;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x12000;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x01500;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x792878;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x256c44;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x14fee4;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x20d13c0;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x2c4f0;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x3a2a8;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x3ca000;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+#endif
+
+ timeout = jiffies + 10 * HZ;
+ for (;;) {
+ msleep(1);
+
+ val = readl(qproc->rmb_base + RMB_MBA_STATUS);
+ if (val == STATUS_AUTH_COMPLETE || val < 0)
+ break;
+
+ if (time_after(jiffies, timeout))
+ break;
+ }
+ if (val == 0) {
+ dev_err(qproc->dev, "MBA authentication of headers timed out\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ } else if (val < 0) {
+ dev_err(qproc->dev, "MBA returned error %d for segments\n", val);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+static int qproc_load_modem(struct qproc *qproc)
+{
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, "modem.mdt", qproc->dev);
+ if (ret < 0) {
+ dev_err(qproc->dev, "unable to load modem.mdt\n");
+ return ret;
+ }
+
+ ret = qproc_mba_load_mdt(qproc, fw);
+ if (ret)
+ goto out;
+
+ ret = qproc_load_segments(qproc, fw);
+ if (ret)
+ goto out;
+
+ ret = qproc_verify_segments(qproc, fw);
+ if (ret)
+ goto out;
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int qproc_start(struct rproc *rproc)
+{
+ struct qproc *qproc = (struct qproc *)rproc->priv;
+ unsigned long timeout;
+ int ret;
+ u32 val;
+
+ ret = regulator_enable(qproc->vdd);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable mss vdd\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(qproc->mss_restart);
+ if (ret) {
+ dev_err(qproc->dev, "failed to deassert mss restart\n");
+ goto disable_vdd;
+ }
+
+ ret = clk_prepare_enable(qproc->ahb_clk);
+ if (ret)
+ goto assert_reset;
+
+ ret = clk_prepare_enable(qproc->axi_clk);
+ if (ret)
+ goto disable_ahb_clk;
+
+ ret = clk_prepare_enable(qproc->rom_clk);
+ if (ret)
+ goto disable_axi_clk;
+
+ writel_relaxed(qproc->mba_da, qproc->rmb_base + RMB_MBA_IMAGE);
+
+ /* Ensure order of data/entry point and the following reset release */
+ wmb();
+
+ q6v5proc_reset(qproc);
+
+ timeout = jiffies + HZ;
+ for (;;) {
+ msleep(1);
+
+ val = readl(qproc->rmb_base + RMB_PBL_STATUS);
+ if (val || time_after(jiffies, timeout))
+ break;
+ }
+ if (val == 0) {
+ dev_err(qproc->dev, "PBL boot timed out\n");
+ ret = -ETIMEDOUT;
+ goto halt_axi_ports;
+ } else if (val != STATUS_PBL_SUCCESS) {
+ dev_err(qproc->dev, "PBL returned unexpected status %d\n", val);
+ ret = -EINVAL;
+ goto halt_axi_ports;
+ }
+
+ timeout = jiffies + HZ;
+ for (;;) {
+ msleep(1);
+
+ val = readl(qproc->rmb_base + RMB_MBA_STATUS);
+ if (val || time_after(jiffies, timeout))
+ break;
+ }
+ if (val == 0) {
+ dev_err(qproc->dev, "MBA boot timed out\n");
+ ret = -ETIMEDOUT;
+ goto halt_axi_ports;
+ } else if (val != STATUS_XPU_UNLOCKED && val != STATUS_XPU_UNLOCKED_SCRIBBLED) {
+ dev_err(qproc->dev, "MBA returned unexpected status %d\n", val);
+ ret = -EINVAL;
+ goto halt_axi_ports;
+ }
+
+ dev_info(qproc->dev, "MBA boot done\n");
+
+ ret = qproc_load_modem(qproc);
+ if (ret)
+ goto halt_axi_ports;
+
+ return 0;
+
+halt_axi_ports:
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_Q6_HALT_BASE);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_MODEM_HALT_BASE);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_NC_HALT_BASE);
+disable_axi_clk:
+ clk_disable_unprepare(qproc->axi_clk);
+disable_ahb_clk:
+ clk_disable_unprepare(qproc->ahb_clk);
+assert_reset:
+ reset_control_assert(qproc->mss_restart);
+disable_vdd:
+ regulator_disable(qproc->vdd);
+
+ dma_free_attrs(qproc->dev, qproc->mba_size, qproc->mba_va, qproc->mba_da, &qproc->mba_attrs);
+ return ret;
+}
+
+static int qproc_stop(struct rproc *rproc)
+{
+ struct qproc *qproc = (struct qproc *)rproc->priv;
+
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_Q6_HALT_BASE);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_MODEM_HALT_BASE);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_NC_HALT_BASE);
+
+ reset_control_assert(qproc->mss_restart);
+ clk_disable_unprepare(qproc->axi_clk);
+ clk_disable_unprepare(qproc->ahb_clk);
+ regulator_disable(qproc->vdd);
+
+ dma_free_attrs(qproc->dev, qproc->mba_size, qproc->mba_va, qproc->mba_da, &qproc->mba_attrs);
+
+ return 0;
+}
+
+static const struct rproc_ops qproc_ops = {
+ .start = qproc_start,
+ .stop = qproc_stop,
+};
+
+static irqreturn_t qproc_wdog_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ dev_err(qproc->dev, " WATCHDOG\n");
+
+ rproc_report_crash(qproc->rproc, RPROC_WATCHDOG);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_fatal_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ dev_err(qproc->dev, " FATAL\n");
+
+ rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_ready_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ dev_err(qproc->dev, " READY\n");
+
+ complete(&qproc->start_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_handover_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ dev_err(qproc->dev, " HANDOVER\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_stop_ack_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ dev_err(qproc->dev, " STOP-ACK\n");
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t qproc_boot_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct qproc *qproc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = rproc_boot(qproc->rproc);
+ return ret ? : size;
+}
+
+static ssize_t qproc_shutdown_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct qproc *qproc = dev_get_drvdata(dev);
+
+ rproc_shutdown(qproc->rproc);
+ return size;
+}
+
+static const struct device_attribute qproc_attrs[] = {
+ __ATTR(boot, S_IWUSR, 0, qproc_boot_store),
+ __ATTR(shutdown, S_IWUSR, 0, qproc_shutdown_store),
+};
+
+static int qproc_init_mem(struct qproc *qproc, struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
+ qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(qproc->reg_base))
+ return PTR_ERR(qproc->reg_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "halt_base");
+ qproc->halt_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(qproc->halt_base))
+ return PTR_ERR(qproc->halt_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb_base");
+ qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(qproc->rmb_base))
+ return PTR_ERR(qproc->rmb_base);
+
+ return 0;
+}
+
+static int qproc_init_clocks(struct qproc *qproc)
+{
+ qproc->ahb_clk = devm_clk_get(qproc->dev, "iface");
+ if (IS_ERR(qproc->ahb_clk))
+ return PTR_ERR(qproc->ahb_clk);
+
+ qproc->axi_clk = devm_clk_get(qproc->dev, "bus");
+ if (IS_ERR(qproc->axi_clk))
+ return PTR_ERR(qproc->axi_clk);
+
+ qproc->rom_clk = devm_clk_get(qproc->dev, "mem");
+ if (IS_ERR(qproc->rom_clk))
+ return PTR_ERR(qproc->rom_clk);
+
+ return 0;
+}
+
+static int qproc_init_regulators(struct qproc *qproc)
+{
+ int ret;
+ u32 uV;
+
+ qproc->vdd = devm_regulator_get(qproc->dev, "qcom,vdd");
+ if (IS_ERR(qproc->vdd))
+ return PTR_ERR(qproc->vdd);
+
+ regulator_set_voltage(qproc->vdd, VDD_MSS_UV, VDD_MSS_UV_MAX);
+ regulator_set_load(qproc->vdd, VDD_MSS_UA);
+
+ qproc->cx = devm_regulator_get(qproc->dev, "qcom,cx");
+ if (IS_ERR(qproc->cx))
+ return PTR_ERR(qproc->cx);
+
+ qproc->mx = devm_regulator_get(qproc->dev, "qcom,mx");
+ if (IS_ERR(qproc->mx))
+ return PTR_ERR(qproc->mx);
+
+ ret = of_property_read_u32(qproc->dev->of_node, "qcom,mx-uV", &uV);
+ if (!ret)
+ regulator_set_voltage(qproc->mx, uV, uV);
+
+ qproc->pll = devm_regulator_get(qproc->dev, "qcom,pll");
+ if (IS_ERR(qproc->pll))
+ return PTR_ERR(qproc->pll);
+
+ ret = of_property_read_u32(qproc->dev->of_node, "qcom,pll-uV", &uV);
+ if (!ret)
+ regulator_set_voltage(qproc->pll, uV, uV);
+
+ return 0;
+}
+
+static int qproc_init_reset(struct qproc *qproc)
+{
+ qproc->mss_restart = devm_reset_control_get(qproc->dev, NULL);
+ if (IS_ERR(qproc->mss_restart)) {
+ dev_err(qproc->dev, "failed to acquire mss restart\n");
+ return PTR_ERR(qproc->mss_restart);
+ }
+
+ return 0;
+}
+
+static int qproc_request_irq(struct qproc *qproc,
+ struct platform_device *pdev,
+ const char *name,
+ irq_handler_t thread_fn)
+{
+ int ret;
+
+ ret = platform_get_irq_byname(pdev, name);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no %s IRQ defined\n", name);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, ret,
+ NULL, thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "qproc", qproc);
+ if (ret)
+ dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+ return ret;
+}
+
+static int qproc_probe(struct platform_device *pdev)
+{
+ struct qproc *qproc;
+ struct rproc *rproc;
+ int ret;
+ int i;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &qproc_ops,
+ "mba.b00", sizeof(*qproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->fw_ops = &qproc_fw_ops;
+
+ qproc = (struct qproc *)rproc->priv;
+ qproc->dev = &pdev->dev;
+ qproc->rproc = rproc;
+ platform_set_drvdata(pdev, qproc);
+
+ init_completion(&qproc->start_done);
+
+ ret = qproc_init_mem(qproc, pdev);
+ if (ret)
+ goto free_rproc;
+
+ ret = qproc_init_clocks(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = qproc_init_regulators(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = qproc_init_reset(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = qproc_request_irq(qproc, pdev, "wdog", qproc_wdog_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ qproc->wdog_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "fatal", qproc_fatal_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ qproc->fatal_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "ready", qproc_ready_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ qproc->ready_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "handover", qproc_handover_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ qproc->handover_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "stop-ack", qproc_stop_ack_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ qproc->stop_ack_irq = ret;
+
+ qproc->stop_gpio = devm_gpiod_get(&pdev->dev, "qcom,stop", GPIOD_OUT_LOW);
+ if (IS_ERR(qproc->stop_gpio)) {
+ dev_err(&pdev->dev, "failed to acquire stop gpio\n");
+ return PTR_ERR(qproc->stop_gpio);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qproc_attrs); i++) {
+ ret = device_create_file(&pdev->dev, &qproc_attrs[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to create sysfs file\n");
+ goto remove_device_files;
+ }
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto remove_device_files;
+
+ return 0;
+
+remove_device_files:
+ for (i--; i >= 0; i--)
+ device_remove_file(&pdev->dev, &qproc_attrs[i]);
+
+free_rproc:
+ rproc_put(rproc);
+
+ return ret;
+}
+
+static int qproc_remove(struct platform_device *pdev)
+{
+ struct qproc *qproc = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qproc_attrs); i++)
+ device_remove_file(&pdev->dev, &qproc_attrs[i]);
+
+ rproc_put(qproc->rproc);
+
+ return 0;
+}
+
+static const struct of_device_id qproc_of_match[] = {
+ { .compatible = "qcom,q6v5-pil", },
+ { },
+};
+
+static struct platform_driver qproc_driver = {
+ .probe = qproc_probe,
+ .remove = qproc_remove,
+ .driver = {
+ .name = "qcom-q6v5-pil",
+ .of_match_table = qproc_of_match,
+ },
+};
+
+module_platform_driver(qproc_driver);
diff --git a/drivers/remoteproc/qcom_tz_pil.c b/drivers/remoteproc/qcom_tz_pil.c
new file mode 100644
index 000000000000..2f43b00fee13
--- /dev/null
+++ b/drivers/remoteproc/qcom_tz_pil.c
@@ -0,0 +1,719 @@
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/remoteproc.h>
+#include <linux/interrupt.h>
+#include <linux/memblock.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/elf.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/qcom_scm.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/soc/qcom/smd.h>
+
+#include "remoteproc_internal.h"
+
+#define PAS_INIT_IMAGE_CMD 1
+#define PAS_MEM_SETUP_CMD 2
+#define PAS_AUTH_AND_RESET_CMD 5
+#define PAS_SHUTDOWN_CMD 6
+#define PAS_IS_SUPPORTED_CMD 7
+
+struct qproc {
+ struct device *dev;
+ struct rproc *rproc;
+
+ int pas_id;
+
+ int wdog_irq;
+ int fatal_irq;
+ int ready_irq;
+ int handover_irq;
+ int stop_ack_irq;
+
+ struct gpio_desc *stop_gpio;
+
+ const char *name;
+ struct regulator *pll;
+
+ unsigned proxy_clk_count;
+ struct clk *scm_core_clk;
+ struct clk *scm_iface_clk;
+ struct clk *scm_bus_clk;
+ struct clk *scm_src_clk;
+
+ struct clk **proxy_clks;
+
+ struct completion start_done;
+ struct completion stop_done;
+
+ unsigned crash_reason;
+ struct device_node *smd_edge_node;
+
+ phys_addr_t reloc_phys;
+ size_t reloc_size;
+};
+
+static int qproc_scm_clk_enable(struct qproc *qproc)
+{
+ int ret;
+
+ ret = clk_prepare_enable(qproc->scm_core_clk);
+ if (ret)
+ goto bail;
+ ret = clk_prepare_enable(qproc->scm_iface_clk);
+ if (ret)
+ goto disable_core;
+ ret = clk_prepare_enable(qproc->scm_bus_clk);
+ if (ret)
+ goto disable_iface;
+
+ ret = clk_prepare_enable(qproc->scm_src_clk);
+ if (ret)
+ goto disable_bus;
+
+ return 0;
+
+disable_bus:
+ clk_disable_unprepare(qproc->scm_bus_clk);
+disable_iface:
+ clk_disable_unprepare(qproc->scm_iface_clk);
+disable_core:
+ clk_disable_unprepare(qproc->scm_core_clk);
+bail:
+ return ret;
+}
+
+static void qproc_scm_clk_disable(struct qproc *qproc)
+{
+ clk_disable_unprepare(qproc->scm_core_clk);
+ clk_disable_unprepare(qproc->scm_iface_clk);
+ clk_disable_unprepare(qproc->scm_bus_clk);
+ clk_disable_unprepare(qproc->scm_src_clk);
+}
+
+/**
+ * struct pil_mdt - Representation of <name>.mdt file in memory
+ * @hdr: ELF32 header
+ * @phdr: ELF32 program headers
+ */
+struct mdt_hdr {
+ struct elf32_hdr hdr;
+ struct elf32_phdr phdr[];
+};
+
+#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
+
+static int segment_is_loadable(const struct elf32_phdr *p)
+{
+ return (p->p_type == PT_LOAD) &&
+ !segment_is_hash(p->p_flags) &&
+ p->p_memsz;
+}
+
+static bool segment_is_relocatable(const struct elf32_phdr *p)
+{
+ return !!(p->p_flags & BIT(27));
+}
+
+/**
+ * rproc_mdt_sanity_check() - sanity check mdt firmware header
+ * @rproc: the remote processor handle
+ * @fw: the mdt header firmware image
+ */
+static int qproc_sanity_check(struct rproc *rproc,
+ const struct firmware *fw)
+{
+ struct elf32_hdr *ehdr;
+ struct mdt_hdr *mdt;
+
+ if (!fw) {
+ dev_err(&rproc->dev, "failed to load %s\n", rproc->name);
+ return -EINVAL;
+ }
+
+ if (fw->size < sizeof(struct elf32_hdr)) {
+ dev_err(&rproc->dev, "image is too small\n");
+ return -EINVAL;
+ }
+
+ mdt = (struct mdt_hdr *)fw->data;
+ ehdr = &mdt->hdr;
+
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ dev_err(&rproc->dev, "image is corrupted (bad magic)\n");
+ return -EINVAL;
+ }
+
+ if (ehdr->e_phnum == 0) {
+ dev_err(&rproc->dev, "no loadable segments\n");
+ return -EINVAL;
+ }
+
+ if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+ sizeof(struct elf32_hdr) > fw->size) {
+ dev_err(&rproc->dev, "firmware size is too small\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct resource_table * qproc_find_rsc_table(struct rproc *rproc,
+ const struct firmware *fw,
+ int *tablesz)
+{
+ static struct resource_table table = { .ver = 1, };
+
+ *tablesz = sizeof(table);
+ return &table;
+}
+
+static int qproc_load_segment(struct rproc *rproc, const char *fw_name,
+ const struct elf32_phdr *phdr, phys_addr_t paddr)
+{
+ const struct firmware *fw;
+ void *ptr;
+ int ret = 0;
+
+ ptr = ioremap_nocache(paddr, phdr->p_memsz);
+ if (!ptr) {
+ dev_err(&rproc->dev, "failed to ioremap segment area (%pa+0x%x)\n", &paddr, phdr->p_memsz);
+ return -EBUSY;
+ }
+
+ if (phdr->p_filesz) {
+ ret = request_firmware(&fw, fw_name, &rproc->dev);
+ if (ret) {
+ dev_err(&rproc->dev, "failed to load %s\n", fw_name);
+ goto out;
+ }
+
+ memcpy_toio(ptr, fw->data, fw->size);
+
+ release_firmware(fw);
+ }
+
+ if (phdr->p_memsz > phdr->p_filesz)
+ memset_io(ptr + phdr->p_filesz, 0,
+ phdr->p_memsz - phdr->p_filesz);
+
+out:
+ iounmap(ptr);
+ return ret;
+}
+
+static int qproc_load(struct rproc *rproc, const struct firmware *fw)
+{
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ const struct mdt_hdr *mdt;
+ phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
+ phys_addr_t max_addr = 0;
+ phys_addr_t diff_addr;
+ struct qproc *qproc = rproc->priv;
+ char *fw_name;
+ int ret;
+ int i;
+ size_t align = 0;
+ bool relocatable = false;
+ phys_addr_t paddr;
+
+ ret = qproc_scm_clk_enable(qproc);
+ if (ret)
+ return ret;
+
+ mdt = (struct mdt_hdr *)fw->data;
+ ehdr = &mdt->hdr;
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &mdt->phdr[i];
+
+ if (!segment_is_loadable(phdr))
+ continue;
+
+ if (phdr->p_paddr < min_addr) {
+ min_addr = phdr->p_paddr;
+
+ if (segment_is_relocatable(phdr)) {
+ align = phdr->p_align;
+ relocatable = true;
+ }
+ }
+
+ if (phdr->p_paddr + phdr->p_memsz > max_addr)
+ max_addr = round_up(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+ }
+
+ ret = qcom_scm_pas_init_image(qproc->dev,
+ qproc->pas_id, fw->data, fw->size);
+ if (ret) {
+ dev_err(qproc->dev, "Invalid firmware metadata\n");
+ return -EINVAL;
+ }
+
+ diff_addr = max_addr - min_addr;
+ dev_dbg(qproc->dev, "pas_mem_setup %pa, %pa\n", &min_addr, &diff_addr);
+
+ ret = qcom_scm_pas_mem_setup(qproc->pas_id,
+ relocatable ? qproc->reloc_phys : min_addr, max_addr - min_addr);
+ if (ret) {
+ dev_err(qproc->dev, "unable to setup memory for image\n");
+ return -EINVAL;
+ }
+
+ fw_name = kzalloc(strlen(qproc->name) + 5, GFP_KERNEL);
+ if (!fw_name)
+ return -ENOMEM;
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &mdt->phdr[i];
+
+ if (!segment_is_loadable(phdr))
+ continue;
+
+ paddr = relocatable ?
+ (phdr->p_paddr - min_addr + qproc->reloc_phys) :
+ phdr->p_paddr;
+ sprintf(fw_name, "%s.b%02d", qproc->name, i);
+ ret = qproc_load_segment(rproc, fw_name, phdr, paddr);
+ if (ret)
+ break;
+ }
+
+ kfree(fw_name);
+
+ qproc_scm_clk_disable(qproc);
+
+ return 0;
+}
+
+const struct rproc_fw_ops qproc_fw_ops = {
+ .find_rsc_table = qproc_find_rsc_table,
+ .load = qproc_load,
+ .sanity_check = qproc_sanity_check,
+};
+
+static int qproc_start(struct rproc *rproc)
+{
+ struct qproc *qproc = (struct qproc *)rproc->priv;
+ int ret;
+
+ ret = regulator_enable(qproc->pll);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable pll supply\n");
+ return ret;
+ }
+
+ ret = qproc_scm_clk_enable(qproc);
+ if (ret)
+ goto disable_regulator;
+
+ ret = qcom_scm_pas_auth_and_reset(qproc->pas_id);
+ if (ret) {
+ dev_err(qproc->dev,
+ "failed to authenticate image and release reset\n");
+ goto unroll_clocks;
+ }
+
+ /* if ready irq not provided skip waiting */
+ if (qproc->ready_irq < 0)
+ goto done;
+
+ ret = wait_for_completion_timeout(&qproc->start_done, msecs_to_jiffies(10000));
+ if (ret == 0) {
+ dev_err(qproc->dev, "start timed out\n");
+
+ qcom_scm_pas_shutdown(qproc->pas_id);
+ goto unroll_clocks;
+ }
+
+done:
+ dev_info(qproc->dev, "start successful\n");
+
+ return 0;
+
+unroll_clocks:
+ qproc_scm_clk_disable(qproc);
+
+disable_regulator:
+ regulator_disable(qproc->pll);
+
+ return ret;
+}
+
+static int qproc_stop(struct rproc *rproc)
+{
+ return 0;
+}
+
+static const struct rproc_ops qproc_ops = {
+ .start = qproc_start,
+ .stop = qproc_stop,
+};
+
+static irqreturn_t qproc_wdog_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ rproc_report_crash(qproc->rproc, RPROC_WATCHDOG);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_fatal_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+ size_t len;
+ char *msg;
+ int ret;
+
+ ret = qcom_smem_get(-1, qproc->crash_reason, (void**)&msg, &len);
+ if (!ret && len > 0 && msg[0])
+ dev_err(qproc->dev, "fatal error received: %s\n", msg);
+
+ rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR);
+
+ if (!ret)
+ msg[0] = '\0';
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_ready_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ complete(&qproc->start_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_handover_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ qproc_scm_clk_disable(qproc);
+ regulator_disable(qproc->pll);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_stop_ack_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ complete(&qproc->stop_done);
+ return IRQ_HANDLED;
+}
+
+static ssize_t qproc_boot_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct qproc *qproc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = rproc_boot(qproc->rproc);
+ return ret ? : size;
+}
+
+static ssize_t qproc_shutdown_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct qproc *qproc = dev_get_drvdata(dev);
+
+ rproc_shutdown(qproc->rproc);
+ return size;
+}
+
+static const struct device_attribute qproc_attrs[] = {
+ __ATTR(boot, S_IWUSR, 0, qproc_boot_store),
+ __ATTR(shutdown, S_IWUSR, 0, qproc_shutdown_store),
+};
+
+static int qproc_init_pas(struct qproc *qproc)
+{
+ char *key;
+ int ret;
+
+ key = "qcom,pas-id";
+ ret = of_property_read_u32(qproc->dev->of_node, key, &qproc->pas_id);
+ if (ret) {
+ dev_err(qproc->dev, "Missing or incorrect %s\n", key);
+ return -EINVAL;
+ }
+
+ if (!qcom_scm_pas_supported(qproc->pas_id)) {
+ dev_err(qproc->dev, "PAS is not available for %d\n", qproc->pas_id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int qproc_init_clocks(struct qproc *qproc)
+{
+ long rate;
+ int ret;
+
+ qproc->scm_core_clk = devm_clk_get(qproc->dev, "scm_core_clk");
+ if (IS_ERR(qproc->scm_core_clk)) {
+ if (PTR_ERR(qproc->scm_core_clk) != -EPROBE_DEFER)
+ dev_err(qproc->dev, "failed to acquire scm_core_clk\n");
+ return PTR_ERR(qproc->scm_core_clk);
+ }
+
+ qproc->scm_iface_clk = devm_clk_get(qproc->dev, "scm_iface_clk");
+ if (IS_ERR(qproc->scm_iface_clk)) {
+ if (PTR_ERR(qproc->scm_iface_clk) != -EPROBE_DEFER)
+ dev_err(qproc->dev, "failed to acquire scm_iface_clk\n");
+ return PTR_ERR(qproc->scm_iface_clk);
+ }
+
+ qproc->scm_bus_clk = devm_clk_get(qproc->dev, "scm_bus_clk");
+ if (IS_ERR(qproc->scm_bus_clk)) {
+ if (PTR_ERR(qproc->scm_bus_clk) != -EPROBE_DEFER)
+ dev_err(qproc->dev, "failed to acquire scm_bus_clk\n");
+ return PTR_ERR(qproc->scm_bus_clk);
+ }
+
+ qproc->scm_src_clk = devm_clk_get(qproc->dev, "scm_src_clk");
+ if (IS_ERR(qproc->scm_src_clk)) {
+ if (PTR_ERR(qproc->scm_src_clk) != -EPROBE_DEFER)
+ dev_err(qproc->dev, "failed to acquire scm_src_clk\n");
+ return PTR_ERR(qproc->scm_src_clk);
+ }
+
+ ret = clk_set_rate(qproc->scm_core_clk,
+clk_round_rate(qproc->scm_core_clk, 19200000));
+ ret = clk_set_rate(qproc->scm_bus_clk,
+clk_round_rate(qproc->scm_bus_clk, 19200000));
+ ret = clk_set_rate(qproc->scm_iface_clk,
+clk_round_rate(qproc->scm_iface_clk, 19200000));
+ rate = clk_round_rate(qproc->scm_core_clk, 80000000);
+ ret = clk_set_rate(qproc->scm_src_clk, rate);
+ if (ret) {
+ dev_err(qproc->dev, "failed to set rate of scm_core_clk\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qproc_init_regulators(struct qproc *qproc)
+{
+ int ret;
+ u32 uA;
+ u32 uV;
+
+ qproc->pll = devm_regulator_get(qproc->dev, "qcom,pll");
+ if (IS_ERR(qproc->pll)) {
+ if (PTR_ERR(qproc->pll) != -EPROBE_DEFER)
+ dev_err(qproc->dev, "failed to aquire regulator\n");
+ return PTR_ERR(qproc->pll);
+ }
+
+ ret = of_property_read_u32(qproc->dev->of_node, "qcom,pll-uV", &uV);
+ if (ret)
+ dev_warn(qproc->dev, "failed to read qcom,pll_uV, skipping\n");
+ else
+ regulator_set_voltage(qproc->pll, uV, uV);
+
+ ret = of_property_read_u32(qproc->dev->of_node, "qcom,pll-uA", &uA);
+ if (ret)
+ dev_warn(qproc->dev, "failed to read qcom,pll_uA, skipping\n");
+ else
+ regulator_set_load(qproc->pll, uA);
+
+ return 0;
+}
+
+static int qproc_request_irq(struct qproc *qproc, struct platform_device *pdev, const char *name, irq_handler_t thread_fn)
+{
+ int ret;
+
+ ret = platform_get_irq_byname(pdev, name);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no %s IRQ defined\n", name);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, ret,
+ NULL, thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "qproc", qproc);
+ if (ret)
+ dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+ return ret;
+}
+
+static int qproc_probe(struct platform_device *pdev)
+{
+ struct qproc *qproc;
+ struct rproc *rproc;
+ char *fw_name;
+ const char *name;
+ const char *key;
+ int ret;
+ int i;
+ struct device_node *np;
+ struct resource r;
+
+
+ key = "qcom,firmware-name";
+ ret = of_property_read_string(pdev->dev.of_node, key, &name);
+ if (ret) {
+ dev_err(&pdev->dev, "missing or incorrect %s\n", key);
+ return -EINVAL;
+ }
+
+ fw_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s.mdt", name);
+ if (!fw_name)
+ return -ENOMEM;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &qproc_ops,
+ fw_name, sizeof(*qproc));
+ if (!rproc) {
+ dev_err(&pdev->dev, "unable to allocate remoteproc\n");
+ return -ENOMEM;
+ }
+
+ rproc->fw_ops = &qproc_fw_ops;
+
+ qproc = (struct qproc *)rproc->priv;
+ qproc->dev = &pdev->dev;
+ qproc->rproc = rproc;
+ qproc->name = name;
+ platform_set_drvdata(pdev, qproc);
+
+ init_completion(&qproc->start_done);
+ init_completion(&qproc->stop_done);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,crash-reason",
+ &qproc->crash_reason);
+ if (ret)
+ dev_info(&pdev->dev, "no crash reason id\n");
+
+ qproc->smd_edge_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,smd-edges", 0);
+
+ ret = qproc_init_pas(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = qproc_init_clocks(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = qproc_init_regulators(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = qproc_request_irq(qproc, pdev, "wdog", qproc_wdog_interrupt);
+ qproc->wdog_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "fatal", qproc_fatal_interrupt);
+ qproc->fatal_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "ready", qproc_ready_interrupt);
+ qproc->ready_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "handover", qproc_handover_interrupt);
+ qproc->handover_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "stop-ack", qproc_stop_ack_interrupt);
+ qproc->stop_ack_irq = ret;
+
+ for (i = 0; i < ARRAY_SIZE(qproc_attrs); i++) {
+ ret = device_create_file(&pdev->dev, &qproc_attrs[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to create sysfs file\n");
+ goto remove_device_files;
+ }
+ }
+
+ np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "No memory region specified\n");
+ } else {
+
+ ret = of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ if (ret)
+ return ret;
+
+ qproc->reloc_phys = r.start;
+ qproc->reloc_size = resource_size(&r);
+
+ dev_info(&pdev->dev, "Found relocation area %lu@%pad\n",
+ qproc->reloc_size, &qproc->reloc_phys);
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto remove_device_files;
+
+ return 0;
+
+remove_device_files:
+ for (i--; i >= 0; i--)
+ device_remove_file(&pdev->dev, &qproc_attrs[i]);
+
+free_rproc:
+ rproc_put(rproc);
+
+ return ret;
+}
+
+static int qproc_remove(struct platform_device *pdev)
+{
+ struct qproc *qproc = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qproc_attrs); i++)
+ device_remove_file(&pdev->dev, &qproc_attrs[i]);
+
+ rproc_put(qproc->rproc);
+
+ return 0;
+}
+
+static const struct of_device_id qproc_of_match[] = {
+ { .compatible = "qcom,tz-pil", },
+ { },
+};
+
+static struct platform_driver qproc_driver = {
+ .probe = qproc_probe,
+ .remove = qproc_remove,
+ .driver = {
+ .name = "qcom-tz-pil",
+ .of_match_table = qproc_of_match,
+ },
+};
+
+module_platform_driver(qproc_driver);
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 8b3130f22b42..39656aa723b6 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -57,6 +57,8 @@ static DEFINE_IDA(rproc_dev_index);
static const char * const rproc_crash_names[] = {
[RPROC_MMUFAULT] = "mmufault",
+ [RPROC_WATCHDOG] = "watchdog",
+ [RPROC_FATAL_ERROR] = "fatal error",
};
/* translate rproc_crash_type to string */
@@ -789,6 +791,8 @@ static void rproc_resource_cleanup(struct rproc *rproc)
}
}
+static int __rproc_fw_config_virtio(struct rproc *rproc, const struct firmware *fw);
+
/*
* take a firmware and boot a remote processor with it.
*/
@@ -799,13 +803,16 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
struct resource_table *table, *loaded_table;
int ret, tablesz;
- if (!rproc->table_ptr)
- return -ENOMEM;
-
ret = rproc_fw_sanity_check(rproc, fw);
if (ret)
return ret;
+ if (!rproc->table_ptr) {
+ ret = __rproc_fw_config_virtio(rproc, fw);
+ if (ret)
+ return ret;
+ }
+
dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
/*
@@ -854,12 +861,8 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
* copy this information to device memory.
*/
loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
- if (!loaded_table) {
- ret = -EINVAL;
- goto clean_up;
- }
-
- memcpy(loaded_table, rproc->cached_table, tablesz);
+ if (loaded_table)
+ memcpy(loaded_table, rproc->cached_table, tablesz);
/* power up the remote processor */
ret = rproc->ops->start(rproc);
@@ -895,19 +898,15 @@ clean_up:
* to unregister the device. one other option is just to use kref here,
* that might be cleaner).
*/
-static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
+static int __rproc_fw_config_virtio(struct rproc *rproc, const struct firmware *fw)
{
- struct rproc *rproc = context;
struct resource_table *table;
int ret, tablesz;
- if (rproc_fw_sanity_check(rproc, fw) < 0)
- goto out;
-
/* look for the resource table */
table = rproc_find_rsc_table(rproc, fw, &tablesz);
if (!table)
- goto out;
+ return -EINVAL;
rproc->table_csum = crc32(0, table, tablesz);
@@ -919,7 +918,7 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
*/
rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
if (!rproc->cached_table)
- goto out;
+ return -ENOMEM;
rproc->table_ptr = rproc->cached_table;
@@ -928,12 +927,21 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
ret = rproc_handle_resources(rproc, tablesz,
rproc_count_vrings_handler);
if (ret)
- goto out;
+ return ret;
/* look for virtio devices and register them */
ret = rproc_handle_resources(rproc, tablesz, rproc_vdev_handler);
-out:
+ return ret;
+}
+
+static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
+{
+ struct rproc *rproc = context;
+
+ if (rproc_fw_sanity_check(rproc, fw) >= 0)
+ __rproc_fw_config_virtio(rproc, fw);
+
release_firmware(fw);
/* allow rproc_del() contexts, if any, to proceed */
complete_all(&rproc->firmware_loading_complete);
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 10a93d168e0e..41b668b0d836 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_ARM64) += cpu_ops.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_SMD) += smd.o
diff --git a/drivers/soc/qcom/cpu_ops.c b/drivers/soc/qcom/cpu_ops.c
new file mode 100644
index 000000000000..d831cb071d3d
--- /dev/null
+++ b/drivers/soc/qcom/cpu_ops.c
@@ -0,0 +1,343 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* MSM ARMv8 CPU Operations
+ * Based on arch/arm64/kernel/smp_spin_table.c
+ */
+
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/smp.h>
+#include <linux/qcom_scm.h>
+
+#include <asm/barrier.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+
+static DEFINE_RAW_SPINLOCK(boot_lock);
+
+DEFINE_PER_CPU(int, cold_boot_done);
+
+#if 0
+static int cold_boot_flags[] = {
+ 0,
+ QCOM_SCM_FLAG_COLDBOOT_CPU1,
+ QCOM_SCM_FLAG_COLDBOOT_CPU2,
+ QCOM_SCM_FLAG_COLDBOOT_CPU3,
+};
+#endif
+
+/* CPU power domain register offsets */
+#define CPU_PWR_CTL 0x4
+#define CPU_PWR_GATE_CTL 0x14
+#define LDO_BHS_PWR_CTL 0x28
+
+/* L2 power domain register offsets */
+#define L2_PWR_CTL_OVERRIDE 0xc
+#define L2_PWR_CTL 0x14
+#define L2_PWR_STATUS 0x18
+#define L2_CORE_CBCR 0x58
+#define L1_RST_DIS 0x284
+
+#define L2_SPM_STS 0xc
+#define L2_VREG_CTL 0x1c
+
+#define SCM_IO_READ 1
+#define SCM_IO_WRITE 2
+
+/*
+ * struct msm_l2ccc_of_info: represents of data for l2 cache clock controller.
+ * @compat: compat string for l2 cache clock controller
+ * @l2_pon: l2 cache power on routine
+ */
+struct msm_l2ccc_of_info {
+ const char *compat;
+ int (*l2_power_on) (struct device_node *dn, u32 l2_mask, int cpu);
+ u32 l2_power_on_mask;
+};
+
+
+static int power_on_l2_msm8916(struct device_node *l2ccc_node, u32 pon_mask,
+ int cpu)
+{
+ u32 pon_status;
+ void __iomem *l2_base;
+
+ l2_base = of_iomap(l2ccc_node, 0);
+ if (!l2_base)
+ return -ENOMEM;
+
+ /* Skip power-on sequence if l2 cache is already powered up*/
+ pon_status = (__raw_readl(l2_base + L2_PWR_STATUS) & pon_mask)
+ == pon_mask;
+ if (pon_status) {
+ iounmap(l2_base);
+ return 0;
+ }
+
+ /* Close L2/SCU Logic GDHS and power up the cache */
+ writel_relaxed(0x10D700, l2_base + L2_PWR_CTL);
+
+ /* Assert PRESETDBGn */
+ writel_relaxed(0x400000, l2_base + L2_PWR_CTL_OVERRIDE);
+ mb();
+ udelay(2);
+
+ /* De-assert L2/SCU memory Clamp */
+ writel_relaxed(0x101700, l2_base + L2_PWR_CTL);
+
+ /* Wakeup L2/SCU RAMs by deasserting sleep signals */
+ writel_relaxed(0x101703, l2_base + L2_PWR_CTL);
+ mb();
+ udelay(2);
+
+ /* Enable clocks via SW_CLK_EN */
+ writel_relaxed(0x01, l2_base + L2_CORE_CBCR);
+
+ /* De-assert L2/SCU logic clamp */
+ writel_relaxed(0x101603, l2_base + L2_PWR_CTL);
+ mb();
+ udelay(2);
+
+ /* De-assert PRESSETDBg */
+ writel_relaxed(0x0, l2_base + L2_PWR_CTL_OVERRIDE);
+
+ /* De-assert L2/SCU Logic reset */
+ writel_relaxed(0x100203, l2_base + L2_PWR_CTL);
+ mb();
+ udelay(54);
+
+ /* Turn on the PMIC_APC */
+ writel_relaxed(0x10100203, l2_base + L2_PWR_CTL);
+
+ /* Set H/W clock control for the cpu CBC block */
+ writel_relaxed(0x03, l2_base + L2_CORE_CBCR);
+ mb();
+ iounmap(l2_base);
+
+ return 0;
+}
+
+static const struct msm_l2ccc_of_info l2ccc_info[] = {
+ {
+ .compat = "qcom,8916-l2ccc",
+ .l2_power_on = power_on_l2_msm8916,
+ .l2_power_on_mask = BIT(9),
+ },
+};
+
+static int power_on_l2_cache(struct device_node *l2ccc_node, int cpu)
+{
+ int ret, i;
+ const char *compat;
+
+ ret = of_property_read_string(l2ccc_node, "compatible", &compat);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(l2ccc_info); i++) {
+ const struct msm_l2ccc_of_info *ptr = &l2ccc_info[i];
+
+ if (!of_compat_cmp(ptr->compat, compat, strlen(compat)))
+ return ptr->l2_power_on(l2ccc_node,
+ ptr->l2_power_on_mask, cpu);
+ }
+ pr_err("Compat string not found for L2CCC node\n");
+ return -EIO;
+}
+
+static int msm_unclamp_secondary_arm_cpu(unsigned int cpu)
+{
+
+ int ret = 0;
+ struct device_node *cpu_node, *acc_node, *l2_node, *l2ccc_node;
+ void __iomem *reg;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (!cpu_node)
+ return -ENODEV;
+
+ acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0);
+ if (!acc_node) {
+ ret = -ENODEV;
+ goto out_acc;
+ }
+
+ l2_node = of_parse_phandle(cpu_node, "next-level-cache", 0);
+ if (!l2_node) {
+ ret = -ENODEV;
+ goto out_l2;
+ }
+
+ l2ccc_node = of_parse_phandle(l2_node, "power-domain", 0);
+ if (!l2ccc_node) {
+ ret = -ENODEV;
+ goto out_l2;
+ }
+
+ /* Ensure L2-cache of the CPU is powered on before
+ * unclamping cpu power rails.
+ */
+ ret = power_on_l2_cache(l2ccc_node, cpu);
+ if (ret) {
+ pr_err("L2 cache power up failed for CPU%d\n", cpu);
+ goto out_l2ccc;
+ }
+
+ reg = of_iomap(acc_node, 0);
+ if (!reg) {
+ ret = -ENOMEM;
+ goto out_acc_reg;
+ }
+
+ /* Assert Reset on cpu-n */
+ writel_relaxed(0x00000033, reg + CPU_PWR_CTL);
+ mb();
+
+ /*Program skew to 16 X0 clock cycles*/
+ writel_relaxed(0x10000001, reg + CPU_PWR_GATE_CTL);
+ mb();
+ udelay(2);
+
+ /* De-assert coremem clamp */
+ writel_relaxed(0x00000031, reg + CPU_PWR_CTL);
+ mb();
+
+ /* Close coremem array gdhs */
+ writel_relaxed(0x00000039, reg + CPU_PWR_CTL);
+ mb();
+ udelay(2);
+
+ /* De-assert cpu-n clamp */
+ writel_relaxed(0x00020038, reg + CPU_PWR_CTL);
+ mb();
+ udelay(2);
+
+ /* De-assert cpu-n reset */
+ writel_relaxed(0x00020008, reg + CPU_PWR_CTL);
+ mb();
+
+ /* Assert PWRDUP signal on core-n */
+ writel_relaxed(0x00020088, reg + CPU_PWR_CTL);
+ mb();
+
+ /* Secondary CPU-N is now alive */
+ iounmap(reg);
+out_acc_reg:
+ of_node_put(l2ccc_node);
+out_l2ccc:
+ of_node_put(l2_node);
+out_l2:
+ of_node_put(acc_node);
+out_acc:
+ of_node_put(cpu_node);
+
+ return ret;
+}
+
+static void write_pen_release(u64 val)
+{
+ void *start = (void *)&secondary_holding_pen_release;
+ unsigned long size = sizeof(secondary_holding_pen_release);
+
+ secondary_holding_pen_release = val;
+ smp_wmb();
+ __flush_dcache_area(start, size);
+}
+
+static int secondary_pen_release(unsigned int cpu)
+{
+ unsigned long timeout;
+
+ /*
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ raw_spin_lock(&boot_lock);
+ write_pen_release(cpu_logical_map(cpu));
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ if (secondary_holding_pen_release == INVALID_HWID)
+ break;
+ udelay(10);
+ }
+ raw_spin_unlock(&boot_lock);
+
+ return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+}
+
+static int __init msm_cpu_init(struct device_node *dn, unsigned int cpu)
+{
+ /* Mark CPU0 cold boot flag as done */
+ if (!cpu && !per_cpu(cold_boot_done, cpu))
+ per_cpu(cold_boot_done, cpu) = true;
+
+ return 0;
+}
+
+static int __init msm_cpu_prepare(unsigned int cpu)
+{
+ const cpumask_t *mask = cpumask_of(cpu);
+
+ if (qcom_scm_set_cold_boot_addr(secondary_holding_pen, mask)) {
+ pr_warn("CPU%d:Failed to set boot address\n", cpu);
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static int msm_cpu_boot(unsigned int cpu)
+{
+ int ret = 0;
+
+ if (per_cpu(cold_boot_done, cpu) == false) {
+ ret = msm_unclamp_secondary_arm_cpu(cpu);
+ if (ret)
+ return ret;
+ per_cpu(cold_boot_done, cpu) = true;
+ }
+ return secondary_pen_release(cpu);
+}
+
+void msm_cpu_postboot(void)
+{
+ /*
+ * Let the primary processor know we're out of the pen.
+ */
+ write_pen_release(INVALID_HWID);
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
+}
+
+static const struct cpu_operations msm_cortex_a_ops = {
+ .name = "qcom,arm-cortex-acc",
+ .cpu_init = msm_cpu_init,
+ .cpu_prepare = msm_cpu_prepare,
+ .cpu_boot = msm_cpu_boot,
+ .cpu_postboot = msm_cpu_postboot,
+};
+CPU_METHOD_OF_DECLARE(msm_cortex_a_ops, &msm_cortex_a_ops);
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index 1392ccf14a20..f58d02e51bb8 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -121,7 +121,7 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
pkt.hdr.length = sizeof(struct qcom_rpm_request) + count;
pkt.req.msg_id = msg_id++;
- pkt.req.flags = BIT(state);
+ pkt.req.flags = state;
pkt.req.type = type;
pkt.req.id = id;
pkt.req.data_len = count;
@@ -212,6 +212,7 @@ static void qcom_smd_rpm_remove(struct qcom_smd_device *sdev)
static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-msm8974" },
+ { .compatible = "qcom,rpm-msm8916" },
{}
};
MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
index a6155c917d52..576c89fd8965 100644
--- a/drivers/soc/qcom/smd.c
+++ b/drivers/soc/qcom/smd.c
@@ -131,18 +131,6 @@ struct qcom_smd_edge {
struct work_struct work;
};
-/*
- * SMD channel states.
- */
-enum smd_channel_state {
- SMD_CHANNEL_CLOSED,
- SMD_CHANNEL_OPENING,
- SMD_CHANNEL_OPENED,
- SMD_CHANNEL_FLUSHING,
- SMD_CHANNEL_CLOSING,
- SMD_CHANNEL_RESET,
- SMD_CHANNEL_RESET_OPENING
-};
/**
* struct qcom_smd_channel - smd channel struct
@@ -166,38 +154,6 @@ enum smd_channel_state {
* @pkt_size: size of the currently handled packet
* @list: lite entry for @channels in qcom_smd_edge
*/
-struct qcom_smd_channel {
- struct qcom_smd_edge *edge;
-
- struct qcom_smd_device *qsdev;
-
- char *name;
- enum smd_channel_state state;
- enum smd_channel_state remote_state;
-
- struct smd_channel_info *tx_info;
- struct smd_channel_info *rx_info;
-
- struct smd_channel_info_word *tx_info_word;
- struct smd_channel_info_word *rx_info_word;
-
- struct mutex tx_lock;
- wait_queue_head_t fblockread_event;
-
- void *tx_fifo;
- void *rx_fifo;
- int fifo_size;
-
- void *bounce_buffer;
- int (*cb)(struct qcom_smd_device *, const void *, size_t);
-
- spinlock_t recv_lock;
-
- int pkt_size;
-
- struct list_head list;
-};
-
/**
* struct qcom_smd - smd struct
* @dev: device struct
@@ -362,43 +318,37 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
}
/*
- * Copy count bytes of data using 32bit accesses, if that's required.
+ * Copy count bytes of data from memory to device memory using 32bit accesses
*/
-static void smd_copy_to_fifo(void __iomem *_dst,
- const void *_src,
- size_t count,
- bool word_aligned)
+static void smd_copy_to_fifo(void __iomem *_dst, const void *_src, size_t count, bool word_aligned)
{
- u32 *dst = (u32 *)_dst;
- u32 *src = (u32 *)_src;
-
- if (word_aligned) {
- count /= sizeof(u32);
- while (count--)
- writel_relaxed(*src++, dst++);
- } else {
- memcpy_toio(_dst, _src, count);
- }
+ u32 *dst = (u32 *)_dst;
+ u32 *src = (u32 *)_src;
+
+ if (word_aligned) {
+ count /= sizeof(u32);
+ while (count--)
+ writel_relaxed(*src++, dst++);
+ } else {
+ memcpy_toio(_dst, _src, count);
+ }
}
/*
- * Copy count bytes of data using 32bit accesses, if that is required.
+ * Copy count bytes of data from device memory to memory using 32bit accesses
*/
-static void smd_copy_from_fifo(void *_dst,
- const void __iomem *_src,
- size_t count,
- bool word_aligned)
+static void smd_copy_from_fifo(void *_dst, const void __iomem *_src, size_t count, bool word_aligned)
{
- u32 *dst = (u32 *)_dst;
- u32 *src = (u32 *)_src;
-
- if (word_aligned) {
- count /= sizeof(u32);
- while (count--)
- *dst++ = readl_relaxed(src++);
- } else {
- memcpy_fromio(_dst, _src, count);
- }
+ u32 *dst = (u32 *)_dst;
+ u32 *src = (u32 *)_src;
+
+ if (word_aligned) {
+ count /= sizeof(u32);
+ while (count--)
+ *dst++ = readl_relaxed(src++);
+ } else {
+ memcpy_fromio(_dst, _src, count);
+ }
}
/*
@@ -416,19 +366,11 @@ static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
tail = GET_RX_CHANNEL_INFO(channel, tail);
len = min_t(size_t, count, channel->fifo_size - tail);
- if (len) {
- smd_copy_from_fifo(buf,
- channel->rx_fifo + tail,
- len,
- word_aligned);
- }
+ if (len)
+ smd_copy_from_fifo(buf, channel->rx_fifo + tail, len, word_aligned);
- if (len != count) {
- smd_copy_from_fifo(buf + len,
- channel->rx_fifo,
- count - len,
- word_aligned);
- }
+ if (len != count)
+ smd_copy_from_fifo(buf + len, channel->rx_fifo, count - len, word_aligned);
return count;
}
@@ -631,19 +573,11 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
head = GET_TX_CHANNEL_INFO(channel, head);
len = min_t(size_t, count, channel->fifo_size - head);
- if (len) {
- smd_copy_to_fifo(channel->tx_fifo + head,
- data,
- len,
- word_aligned);
- }
+ if (len)
+ smd_copy_to_fifo(channel->tx_fifo + head, data, len, word_aligned);
- if (len != count) {
- smd_copy_to_fifo(channel->tx_fifo,
- data + len,
- count - len,
- word_aligned);
- }
+ if (len != count)
+ smd_copy_to_fifo(channel->tx_fifo, data + len, count - len, word_aligned);
head += count;
head &= (channel->fifo_size - 1);
@@ -667,16 +601,19 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
{
u32 hdr[5] = {len,};
int tlen = sizeof(hdr) + len;
- int ret;
+ int ret, length;
/* Word aligned channels only accept word size aligned data */
if (channel->rx_info_word != NULL && len % 4)
return -EINVAL;
+ length = qcom_smd_get_tx_avail(channel);
+
ret = mutex_lock_interruptible(&channel->tx_lock);
if (ret)
return ret;
+ length = qcom_smd_get_tx_avail(channel);
while (qcom_smd_get_tx_avail(channel) < tlen) {
if (channel->state != SMD_CHANNEL_OPENED) {
ret = -EPIPE;
@@ -696,9 +633,12 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
+ length = qcom_smd_get_tx_avail(channel);
qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
qcom_smd_write_fifo(channel, data, len);
+ length = qcom_smd_get_tx_avail(channel);
+
SET_TX_CHANNEL_INFO(channel, fHEAD, 1);
/* Ensure ordering of channel info updates */
@@ -1008,7 +948,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
/* The channel consist of a rx and tx fifo of equal size */
fifo_size /= 2;
- dev_dbg(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
+ dev_err(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
name, info_size, fifo_size);
channel->tx_fifo = fifo_base;
@@ -1190,7 +1130,11 @@ static int qcom_smd_parse_edge(struct device *dev,
edge->remote_pid = QCOM_SMEM_HOST_ANY;
key = "qcom,remote-pid";
- of_property_read_u32(node, key, &edge->remote_pid);
+ ret = of_property_read_u32(node, key, &edge->remote_pid);
+ if (ret) {
+ dev_err(dev, "edge missing %s property\n", key);
+ return -EINVAL;
+ }
syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
if (!syscon_np) {
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 52365188a1c2..7fddf3b491b6 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -20,6 +20,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smem.h>
+#include <linux/debugfs.h>
/*
* The Qualcomm shared memory system is a allocate only heap structure that
@@ -85,6 +86,8 @@
/* Max number of processors/hosts in a system */
#define SMEM_HOST_COUNT 9
+#define SMEM_HEAP_INFO 1
+
/**
* struct smem_proc_comm - proc_comm communication struct (legacy)
* @command: current command to be executed
@@ -238,6 +241,9 @@ struct qcom_smem {
struct smem_partition_header *partitions[SMEM_HOST_COUNT];
+ struct dentry *dent;
+ u32 version;
+
unsigned num_regions;
struct smem_region regions[0];
};
@@ -642,6 +648,104 @@ static int qcom_smem_count_mem_regions(struct platform_device *pdev)
return num_regions;
}
+static void smem_debug_read_mem(struct seq_file *s)
+{
+ u32 *info;
+ size_t size;
+ int ret, i;
+ long flags;
+
+ ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HEAP_INFO,
+ (void **)&info, &size);
+
+ if (ret < 0)
+ seq_printf(s, "Can't get global heap information pool\n");
+ else {
+ seq_printf(s, "global heap\n");
+ seq_printf(s, " initialized: %d offset: %08x avail: %08x\n",
+ info[0], info[1], info[2]);
+
+ for (i = 0; i < 512; i++) {
+ ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, i,
+ (void **)&info, &size);
+ if (ret < 0)
+ continue;
+
+ seq_printf(s, " [%d]: p: %p s: %li\n", i, info,
+ size);
+ }
+ }
+
+ seq_printf(s, "\nSecure partitions accessible from APPS:\n");
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+
+ for (i = 0; i < SMEM_HOST_COUNT; i++) {
+ struct smem_partition_header *part_hdr = __smem->partitions[i];
+ void *p;
+
+ if (!part_hdr)
+ continue;
+
+ if (part_hdr->magic != SMEM_PART_MAGIC) {
+ seq_printf(s, " part[%d]: incorrect magic\n", i);
+ continue;
+ }
+
+ seq_printf(s, " part[%d]: (%d <-> %d) size: %d off: %08x\n",
+ i, part_hdr->host0, part_hdr->host1, part_hdr->size,
+ part_hdr->offset_free_uncached);
+
+ p = (void *)part_hdr + sizeof(*part_hdr);
+ while (p < (void *)part_hdr + part_hdr->offset_free_uncached) {
+ struct smem_private_entry *entry = p;
+
+ seq_printf(s,
+ " [%d]: %s size: %d pd: %d\n",
+ entry->item,
+ (entry->canary == SMEM_PRIVATE_CANARY) ?
+ "valid" : "invalid",
+ entry->size,
+ entry->padding_data);
+
+ p += sizeof(*entry) + entry->padding_hdr + entry->size;
+ }
+ }
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+}
+
+static void smem_debug_read_version(struct seq_file *s)
+{
+ seq_printf(s, "SBL version: %08x\n", __smem->version >> 16);
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+ void (*show)(struct seq_file *) = s->private;
+
+ show(s);
+
+ return 0;
+}
+
+static int smem_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
+}
+
+
+static const struct file_operations smem_debug_ops = {
+ .open = smem_debug_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+
+
static int qcom_smem_probe(struct platform_device *pdev)
{
struct smem_header *header;
@@ -652,7 +756,6 @@ static int qcom_smem_probe(struct platform_device *pdev)
size_t array_size;
int num_regions = 0;
int hwlock_id;
- u32 version;
int ret;
int i;
@@ -703,9 +806,9 @@ static int qcom_smem_probe(struct platform_device *pdev)
return -EINVAL;
}
- version = qcom_smem_get_sbl_version(smem);
- if (version >> 16 != SMEM_EXPECTED_VERSION) {
- dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
+ smem->version = qcom_smem_get_sbl_version(smem);
+ if (smem->version >> 16 != SMEM_EXPECTED_VERSION) {
+ dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", smem->version);
return -EINVAL;
}
@@ -725,6 +828,17 @@ static int qcom_smem_probe(struct platform_device *pdev)
__smem = smem;
+ /* setup debugfs information */
+ __smem->dent = debugfs_create_dir("smem", 0);
+ if (IS_ERR(__smem->dent))
+ dev_info(smem->dev, "unable to create debugfs\n");
+
+ if (!debugfs_create_file("mem", 0444, __smem->dent, smem_debug_read_mem, &smem_debug_ops))
+ dev_err(smem->dev, "couldnt create mem file\n");
+ if (!debugfs_create_file("version", 0444, __smem->dent, smem_debug_read_version,
+ &smem_debug_ops))
+ dev_err(smem->dev, "couldnt create mem file\n");
+
return 0;
}
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
index b04b05a0904e..7f751c5a4c94 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/soc/qcom/spm.c
@@ -20,11 +20,14 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/qcom_scm.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
#include <asm/cpuidle.h>
#include <asm/proc-fns.h>
@@ -36,6 +39,9 @@
#define SPM_CTL_INDEX_SHIFT 4
#define SPM_CTL_EN BIT(0)
+/* Specifies the PMIC internal slew rate in uV/us. */
+#define REGULATOR_SLEW_RATE 1250
+
enum pm_sleep_mode {
PM_SLEEP_MODE_STBY,
PM_SLEEP_MODE_RET,
@@ -51,6 +57,8 @@ enum spm_reg {
SPM_REG_PMIC_DLY,
SPM_REG_PMIC_DATA_0,
SPM_REG_PMIC_DATA_1,
+ SPM_REG_RST,
+ SPM_REG_STS_1,
SPM_REG_VCTL,
SPM_REG_SEQ_ENTRY,
SPM_REG_SPM_STS,
@@ -68,9 +76,23 @@ struct spm_reg_data {
u8 start_index[PM_SLEEP_MODE_NR];
};
+struct spm_vlevel_data {
+ struct spm_driver_data *drv;
+ unsigned selector;
+};
+
+struct saw2_vreg {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ unsigned int uV;
+ u32 vlevel;
+ struct spm_driver_data *drv;
+};
+
struct spm_driver_data {
void __iomem *reg_base;
const struct spm_reg_data *reg_data;
+ struct saw2_vreg *vreg;
};
static const u8 spm_reg_offset_v2_1[SPM_REG_NR] = {
@@ -94,10 +116,13 @@ static const struct spm_reg_data spm_reg_8974_8084_cpu = {
static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = {
[SPM_REG_CFG] = 0x08,
+ [SPM_REG_STS_1] = 0x10,
+ [SPM_REG_VCTL] = 0x14,
[SPM_REG_SPM_CTL] = 0x20,
[SPM_REG_PMIC_DLY] = 0x24,
[SPM_REG_PMIC_DATA_0] = 0x28,
[SPM_REG_PMIC_DATA_1] = 0x2C,
+ [SPM_REG_RST] = 0x30,
[SPM_REG_SEQ_ENTRY] = 0x80,
};
@@ -282,6 +307,146 @@ static struct cpuidle_ops qcom_cpuidle_ops __initdata = {
CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v1, "qcom,kpss-acc-v1", &qcom_cpuidle_ops);
CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v2, "qcom,kpss-acc-v2", &qcom_cpuidle_ops);
+static const unsigned int saw2_volt_table[] = {
+ 850000, 862500, 875000, 887500, 900000, 912500,
+ 925000, 937500, 950000, 962500, 975000, 987500,
+ 1000000, 1012500, 1025000, 1037500, 1050000, 1062500,
+ 1075000, 1087500, 1100000, 1112500, 1125000, 1137000,
+ 1137500, 1150000, 1162500, 1175000, 1187500, 1200000,
+ 1212500, 1225000, 1237500, 1250000, 1287500
+};
+
+static const u32 vlevels[] = {
+ 0x94, 0x94, 0x94, 0x94, 0x94, 0x94,
+ 0x94, 0x96, 0x96, 0x96, 0x98, 0x98,
+ 0x98, 0x9a, 0x9a, 0x9e, 0xa0, 0xa0,
+ 0xa2, 0xa6, 0xa8, 0xa8, 0xaa, 0xaa,
+ 0xac, 0xac, 0xac, 0xac, 0xac, 0xac,
+ 0xac, 0xac, 0xac, 0xac, 0xac
+};
+
+static int saw2_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct spm_driver_data *drv = rdev_get_drvdata(rdev);
+
+ return drv->vreg->uV;
+}
+
+static void spm_smp_set_vdd(void *data)
+{
+ struct spm_vlevel_data *vdata = (struct spm_vlevel_data *)data;
+ struct spm_driver_data *drv = vdata->drv;
+ struct saw2_vreg *vreg = drv->vreg;
+ unsigned long sel = vdata->selector;
+ u32 new_vlevel;
+ u32 vctl, data0, data1;
+ int timeout_us = 50;
+
+ if (vreg->vlevel == vlevels[sel])
+ return;
+
+ vctl = spm_register_read(drv, SPM_REG_VCTL);
+ data0 = spm_register_read(drv, SPM_REG_PMIC_DATA_0);
+ data1 = spm_register_read(drv, SPM_REG_PMIC_DATA_1);
+
+ vctl &= ~0xff;
+ vctl |= vlevels[sel];
+
+ data0 &= ~0xff;
+ data0 |= vlevels[sel];
+
+ data1 &= ~0x3f;
+ data1 |= (vlevels[sel] & 0x3f);
+ data1 &= ~0x3F0000;
+ data1 |= ((vlevels[sel] & 0x3f) << 16);
+
+ spm_register_write(drv, SPM_REG_RST, 1);
+ spm_register_write(drv, SPM_REG_VCTL, vctl);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_0, data0);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_1, data1);
+
+ do {
+ new_vlevel = spm_register_read(drv, SPM_REG_STS_1) & 0xff;
+ if (new_vlevel == vlevels[sel])
+ break;
+ udelay(1);
+ } while (--timeout_us);
+
+ if (!timeout_us) {
+ pr_info("%s: Voltage not changed %#x\n", __func__, new_vlevel);
+ return;
+ }
+
+ if (saw2_volt_table[sel] > vreg->uV) {
+ /* Wait for voltage to stabalize. */
+ udelay((saw2_volt_table[sel] - vreg->uV) / REGULATOR_SLEW_RATE);
+ }
+
+ vreg->uV = saw2_volt_table[sel];
+ vreg->vlevel = vlevels[sel];
+}
+
+static int saw2_regulator_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct spm_driver_data *drv = rdev_get_drvdata(rdev);
+ struct spm_vlevel_data data;
+ int ret;
+ int cpu = rdev_get_id(rdev);
+
+ data.drv = drv;
+ data.selector = selector;
+
+ ret = smp_call_function_single(cpu, spm_smp_set_vdd, &data, true);
+
+ return ret;
+}
+
+static struct regulator_ops saw2_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .get_voltage = saw2_regulator_get_voltage,
+ .set_voltage_sel = saw2_regulator_set_voltage_sel,
+};
+
+static struct regulator_desc saw2_regulator = {
+ .owner = THIS_MODULE,
+ .type = REGULATOR_VOLTAGE,
+ .ops = &saw2_regulator_ops,
+ .volt_table = saw2_volt_table,
+ .n_voltages = ARRAY_SIZE(saw2_volt_table),
+};
+
+static int register_saw2_regulator(struct spm_driver_data *drv,
+ struct platform_device *pdev, int cpu)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct saw2_vreg *vreg;
+ struct regulator_config config = { };
+
+ vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+
+ drv->vreg = vreg;
+ config.driver_data = drv;
+ config.dev = &pdev->dev;
+ config.of_node = np;
+
+ vreg->rdesc = saw2_regulator;
+ vreg->rdesc.id = cpu;
+ vreg->rdesc.name = of_get_property(np, "regulator-name", NULL);
+ config.init_data = of_get_regulator_init_data(&pdev->dev,
+ pdev->dev.of_node,
+ &vreg->rdesc);
+
+ vreg->rdev = devm_regulator_register(&pdev->dev, &vreg->rdesc, &config);
+ if (IS_ERR(vreg->rdev))
+ return PTR_ERR(vreg->rdev);
+
+ return 0;
+}
+
static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
int *spm_cpu)
{
@@ -327,7 +492,7 @@ static int spm_dev_probe(struct platform_device *pdev)
struct resource *res;
const struct of_device_id *match_id;
void __iomem *addr;
- int cpu;
+ int cpu, ret;
drv = spm_get_drv(pdev, &cpu);
if (!drv)
@@ -356,6 +521,7 @@ static int spm_dev_probe(struct platform_device *pdev)
* machine, before the sequences are completely written.
*/
spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
+
spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
spm_register_write(drv, SPM_REG_PMIC_DATA_0,
@@ -368,6 +534,10 @@ static int spm_dev_probe(struct platform_device *pdev)
per_cpu(cpu_spm_drv, cpu) = drv;
+ ret = register_saw2_regulator(drv, pdev, cpu);
+ if (ret)
+ dev_err(&pdev->dev, "error registering SAW2 regulator\n");
+
return 0;
}
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 5aabc4bc0d75..d49f2bd3d4d7 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -374,4 +374,9 @@ config QCOM_SPMI_TEMP_ALARM
real time die temperature if an ADC is present or an estimate of the
temperature based upon the over temperature stage value.
+menu "Qualcomm thermal drivers"
+depends on ARCH_QCOM && OF
+source "drivers/thermal/qcom/Kconfig"
+endmenu
+
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 26f160809959..cdaa55f9d4eb 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -43,5 +43,6 @@ obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_ST_THERMAL) += st/
+obj-$(CONFIG_QCOM_TSENS) += qcom/
obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
new file mode 100644
index 000000000000..f7e8e4074d80
--- /dev/null
+++ b/drivers/thermal/qcom/Kconfig
@@ -0,0 +1,10 @@
+config QCOM_TSENS
+ tristate "Qualcomm TSENS Temperature Alarm"
+ depends on THERMAL
+ depends on QCOM_QFPROM
+ help
+ This enables the thermal sysfs driver for the TSENS device. It shows
+ up in Sysfs as a thermal zone with multiple trip points. Disabling the
+ thermal zone device via the mode file results in disabling the sensor.
+ Also able to set threshold temperature for both hot and cold and update
+ when a threshold is reached.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
new file mode 100644
index 000000000000..f3cefd19e898
--- /dev/null
+++ b/drivers/thermal/qcom/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o
+qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o
diff --git a/drivers/thermal/qcom/tsens-8916.c b/drivers/thermal/qcom/tsens-8916.c
new file mode 100644
index 000000000000..a69aea35bf81
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-8916.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include "tsens.h"
+
+/* eeprom layout data for 8916 */
+#define BASE0_MASK 0x0000007f
+#define BASE1_MASK 0xfe000000
+#define BASE0_SHIFT 0
+#define BASE1_SHIFT 25
+
+#define S0_P1_MASK 0x00000f80
+#define S1_P1_MASK 0x003e0000
+#define S2_P1_MASK 0xf8000000
+#define S3_P1_MASK 0x000003e0
+#define S4_P1_MASK 0x000f8000
+
+#define S0_P2_MASK 0x0001f000
+#define S1_P2_MASK 0x07c00000
+#define S2_P2_MASK 0x0000001f
+#define S3_P2_MASK 0x00007c00
+#define S4_P2_MASK 0x01f00000
+
+#define S0_P1_SHIFT 7
+#define S1_P1_SHIFT 17
+#define S2_P1_SHIFT 27
+#define S3_P1_SHIFT 5
+#define S4_P1_SHIFT 15
+
+#define S0_P2_SHIFT 12
+#define S1_P2_SHIFT 22
+#define S2_P2_SHIFT 0
+#define S3_P2_SHIFT 10
+#define S4_P2_SHIFT 20
+
+#define CAL_SEL_MASK 0xe0000000
+#define CAL_SEL_SHIFT 29
+
+static int calibrate_8916(struct tsens_device *tmdev)
+{
+ int base0 = 0, base1 = 0, i;
+ u32 p1[5], p2[5];
+ int mode = 0;
+ u32 *qfprom_cdata, *qfprom_csel;
+
+ qfprom_cdata = (u32 *)qfprom_read(tmdev->dev, "calib");
+ if (IS_ERR(qfprom_cdata))
+ return PTR_ERR(qfprom_cdata);
+
+ qfprom_csel = (u32 *)qfprom_read(tmdev->dev, "calib_sel");
+ if (IS_ERR(qfprom_csel))
+ return PTR_ERR(qfprom_csel);
+
+ mode = (qfprom_csel[0] & CAL_SEL_MASK) >> CAL_SEL_SHIFT;
+ dev_dbg(tmdev->dev, "calibration mode is %d\n", mode);
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base1 = (qfprom_cdata[1] & BASE1_MASK) >> BASE1_SHIFT;
+ p2[0] = (qfprom_cdata[0] & S0_P2_MASK) >> S0_P2_SHIFT;
+ p2[1] = (qfprom_cdata[0] & S1_P2_MASK) >> S1_P2_SHIFT;
+ p2[2] = (qfprom_cdata[1] & S2_P2_MASK) >> S2_P2_SHIFT;
+ p2[3] = (qfprom_cdata[1] & S3_P2_MASK) >> S3_P2_SHIFT;
+ p2[4] = (qfprom_cdata[1] & S4_P2_MASK) >> S4_P2_SHIFT;
+ for (i = 0; i < tmdev->num_sensors; i++)
+ p2[i] = ((base1 + p2[i]) << 3);
+ /* Fall through */
+ case ONE_PT_CALIB2:
+ base0 = (qfprom_cdata[0] & BASE0_MASK);
+ p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+ p1[1] = (qfprom_cdata[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+ p1[2] = (qfprom_cdata[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+ p1[3] = (qfprom_cdata[1] & S3_P1_MASK) >> S3_P1_SHIFT;
+ p1[4] = (qfprom_cdata[1] & S4_P1_MASK) >> S4_P1_SHIFT;
+ for (i = 0; i < tmdev->num_sensors; i++)
+ p1[i] = (((base0) + p1[i]) << 3);
+ break;
+ default:
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ p1[i] = 500;
+ p2[i] = 780;
+ }
+ break;
+ }
+
+ compute_intercept_slope(tmdev, p1, p2, mode);
+
+ return 0;
+}
+
+const struct tsens_ops ops_8916 = {
+ .init = init_common,
+ .calibrate = calibrate_8916,
+ .get_temp = get_temp_common,
+};
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
new file mode 100644
index 000000000000..00f45e7f872f
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include "tsens.h"
+
+#define CAL_MDEGC 30000
+
+#define CONFIG_ADDR 0x3640
+#define CONFIG_ADDR_8660 0x3620
+/* CONFIG_ADDR bitmasks */
+#define CONFIG 0x9b
+#define CONFIG_MASK 0xf
+#define CONFIG_8660 1
+#define CONFIG_SHIFT_8660 28
+#define CONFIG_MASK_8660 (3 << CONFIG_SHIFT_8660)
+
+#define STATUS_CNTL_ADDR_8064 0x3660
+#define CNTL_ADDR 0x3620
+/* CNTL_ADDR bitmasks */
+#define EN BIT(0)
+#define SW_RST BIT(1)
+#define SENSOR0_EN BIT(3)
+#define SLP_CLK_ENA BIT(26)
+#define SLP_CLK_ENA_8660 BIT(24)
+#define MEASURE_PERIOD 1
+#define SENSOR0_SHIFT 3
+
+/* INT_STATUS_ADDR bitmasks */
+#define MIN_STATUS_MASK BIT(0)
+#define LOWER_STATUS_CLR BIT(1)
+#define UPPER_STATUS_CLR BIT(2)
+#define MAX_STATUS_MASK BIT(3)
+
+#define THRESHOLD_ADDR 0x3624
+/* THRESHOLD_ADDR bitmasks */
+#define THRESHOLD_MAX_LIMIT_SHIFT 24
+#define THRESHOLD_MIN_LIMIT_SHIFT 16
+#define THRESHOLD_UPPER_LIMIT_SHIFT 8
+#define THRESHOLD_LOWER_LIMIT_SHIFT 0
+
+/* Initial temperature threshold values */
+#define LOWER_LIMIT_TH 0x50
+#define UPPER_LIMIT_TH 0xdf
+#define MIN_LIMIT_TH 0x0
+#define MAX_LIMIT_TH 0xff
+
+#define S0_STATUS_ADDR 0x3628
+#define INT_STATUS_ADDR 0x363c
+#define TRDY_MASK BIT(7)
+
+static int suspend_8960(struct tsens_device *tmdev)
+{
+ int ret;
+ unsigned int mask;
+ struct regmap *map = tmdev->map;
+
+ ret = regmap_read(map, THRESHOLD_ADDR, &tmdev->ctx.threshold);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, CNTL_ADDR, &tmdev->ctx.control);
+ if (ret)
+ return ret;
+
+ if (tmdev->num_sensors > 1)
+ mask = SLP_CLK_ENA | EN;
+ else
+ mask = SLP_CLK_ENA_8660 | EN;
+
+ ret = regmap_update_bits(map, CNTL_ADDR, mask, 0);
+ if (ret)
+ return ret;
+
+ tmdev->trdy = false;
+
+ return 0;
+}
+
+static int resume_8960(struct tsens_device *tmdev)
+{
+ int ret;
+ unsigned long reg_cntl;
+ struct regmap *map = tmdev->map;
+
+ ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST);
+ if (ret)
+ return ret;
+
+ /*
+ * Separate CONFIG restore is not needed only for 8660 as
+ * config is part of CTRL Addr and its restored as such
+ */
+ if (tmdev->num_sensors > 1) {
+ ret = regmap_update_bits(map, CONFIG_ADDR, CONFIG_MASK, CONFIG);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_write(map, THRESHOLD_ADDR, tmdev->ctx.threshold);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(map, CNTL_ADDR, tmdev->ctx.control);
+ if (ret)
+ return ret;
+
+ reg_cntl = tmdev->ctx.control;
+ reg_cntl >>= SENSOR0_SHIFT;
+
+ return 0;
+}
+
+static int enable_8960(struct tsens_device *tmdev, int id)
+{
+ int ret;
+ u32 reg, mask;
+
+ ret = regmap_read(tmdev->map, CNTL_ADDR, &reg);
+ if (ret)
+ return ret;
+
+ mask = BIT(id + SENSOR0_SHIFT);
+ ret = regmap_write(tmdev->map, CNTL_ADDR, reg | SW_RST);
+ if (ret)
+ return ret;
+
+ if (tmdev->num_sensors > 1)
+ reg |= mask | SLP_CLK_ENA | EN;
+ else
+ reg |= mask | SLP_CLK_ENA_8660 | EN;
+
+ tmdev->trdy = false;
+ ret = regmap_write(tmdev->map, CNTL_ADDR, reg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void disable_8960(struct tsens_device *tmdev)
+{
+ int ret;
+ u32 reg_cntl;
+ u32 mask;
+
+ mask = GENMASK(tmdev->num_sensors - 1, 0);
+ mask <<= SENSOR0_SHIFT;
+ mask |= EN;
+
+ ret = regmap_read(tmdev->map, CNTL_ADDR, &reg_cntl);
+ if (ret)
+ return;
+
+ reg_cntl &= ~mask;
+
+ if (tmdev->num_sensors > 1)
+ reg_cntl &= ~SLP_CLK_ENA;
+ else
+ reg_cntl &= ~SLP_CLK_ENA_8660;
+
+ regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+}
+
+static int init_8960(struct tsens_device *tmdev)
+{
+ int ret, i;
+ u32 reg_cntl;
+
+ tmdev->map = dev_get_regmap(tmdev->dev, NULL);
+ if (!tmdev->map)
+ return -ENODEV;
+
+ /*
+ * The status registers for each sensor are discontiguous
+ * because some SoCs have 5 sensors while others have more
+ * but the control registers stay in the same place, i.e
+ * directly after the first 5 status registers.
+ */
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ if (i >= 5)
+ tmdev->sensor[i].status = S0_STATUS_ADDR + 40;
+ tmdev->sensor[i].status += i * 4;
+ }
+
+ reg_cntl = SW_RST;
+ ret = regmap_update_bits(tmdev->map, CNTL_ADDR, SW_RST, reg_cntl);
+ if (ret)
+ return ret;
+
+ if (tmdev->num_sensors > 1) {
+ reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18);
+ reg_cntl &= ~SW_RST;
+ ret = regmap_update_bits(tmdev->map, CONFIG_ADDR,
+ CONFIG_MASK, CONFIG);
+ } else {
+ reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16);
+ reg_cntl &= ~CONFIG_MASK_8660;
+ reg_cntl |= CONFIG_8660 << CONFIG_SHIFT_8660;
+ }
+
+ reg_cntl |= GENMASK(tmdev->num_sensors - 1, 0) << SENSOR0_SHIFT;
+ ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ if (ret)
+ return ret;
+
+ reg_cntl |= EN;
+ ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int calibrate_8960(struct tsens_device *tmdev)
+{
+ int i;
+ char *data;
+
+ ssize_t num_read = tmdev->num_sensors;
+ struct tsens_sensor *s = tmdev->sensor;
+
+ data = qfprom_read(tmdev->dev, "calib");
+ if (IS_ERR(data))
+ data = qfprom_read(tmdev->dev, "calib_backup");
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ for (i = 0; i < num_read; i++, s++)
+ s->offset = CAL_MDEGC - s->slope * data[i];
+
+ return 0;
+}
+
+/* Temperature on y axis and ADC-code on x-axis */
+static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s)
+{
+ return adc_code * s->slope + s->offset;
+}
+
+static int get_temp_8960(struct tsens_device *tmdev, int id, int *temp)
+{
+ int ret;
+ u32 code, trdy;
+ const struct tsens_sensor *s = &tmdev->sensor[id];
+
+ if (!tmdev->trdy) {
+ ret = regmap_read(tmdev->map, INT_STATUS_ADDR, &trdy);
+ if (ret)
+ return ret;
+ while (!(trdy & TRDY_MASK)) {
+ usleep_range(1000, 1100);
+ regmap_read(tmdev->map, INT_STATUS_ADDR, &trdy);
+ }
+ tmdev->trdy = true;
+ }
+
+ ret = regmap_read(tmdev->map, s->status, &code);
+ if (ret)
+ return ret;
+
+ *temp = code_to_mdegC(code, s);
+
+ dev_dbg(tmdev->dev, "Sensor%d temp is: %d", id, *temp);
+
+ return 0;
+}
+
+const struct tsens_ops ops_8960 = {
+ .init = init_8960,
+ .calibrate = calibrate_8960,
+ .get_temp = get_temp_8960,
+ .enable = enable_8960,
+ .disable = disable_8960,
+ .suspend = suspend_8960,
+ .resume = resume_8960,
+};
diff --git a/drivers/thermal/qcom/tsens-8974.c b/drivers/thermal/qcom/tsens-8974.c
new file mode 100644
index 000000000000..19d9258a439f
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-8974.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include "tsens.h"
+
+/* eeprom layout data for 8974 */
+#define BASE1_MASK 0xff
+#define S0_P1_MASK 0x3f00
+#define S1_P1_MASK 0xfc000
+#define S2_P1_MASK 0x3f00000
+#define S3_P1_MASK 0xfc000000
+#define S4_P1_MASK 0x3f
+#define S5_P1_MASK 0xfc0
+#define S6_P1_MASK 0x3f000
+#define S7_P1_MASK 0xfc0000
+#define S8_P1_MASK 0x3f000000
+#define S8_P1_MASK_BKP 0x3f
+#define S9_P1_MASK 0x3f
+#define S9_P1_MASK_BKP 0xfc0
+#define S10_P1_MASK 0xfc0
+#define S10_P1_MASK_BKP 0x3f000
+#define CAL_SEL_0_1 0xc0000000
+#define CAL_SEL_2 0x40000000
+#define CAL_SEL_SHIFT 30
+#define CAL_SEL_SHIFT_2 28
+
+#define S0_P1_SHIFT 8
+#define S1_P1_SHIFT 14
+#define S2_P1_SHIFT 20
+#define S3_P1_SHIFT 26
+#define S5_P1_SHIFT 6
+#define S6_P1_SHIFT 12
+#define S7_P1_SHIFT 18
+#define S8_P1_SHIFT 24
+#define S9_P1_BKP_SHIFT 6
+#define S10_P1_SHIFT 6
+#define S10_P1_BKP_SHIFT 12
+
+#define BASE2_SHIFT 12
+#define BASE2_BKP_SHIFT 18
+#define S0_P2_SHIFT 20
+#define S0_P2_BKP_SHIFT 26
+#define S1_P2_SHIFT 26
+#define S2_P2_BKP_SHIFT 6
+#define S3_P2_SHIFT 6
+#define S3_P2_BKP_SHIFT 12
+#define S4_P2_SHIFT 12
+#define S4_P2_BKP_SHIFT 18
+#define S5_P2_SHIFT 18
+#define S5_P2_BKP_SHIFT 24
+#define S6_P2_SHIFT 24
+#define S7_P2_BKP_SHIFT 6
+#define S8_P2_SHIFT 6
+#define S8_P2_BKP_SHIFT 12
+#define S9_P2_SHIFT 12
+#define S9_P2_BKP_SHIFT 18
+#define S10_P2_SHIFT 18
+#define S10_P2_BKP_SHIFT 24
+
+#define BASE2_MASK 0xff000
+#define BASE2_BKP_MASK 0xfc0000
+#define S0_P2_MASK 0x3f00000
+#define S0_P2_BKP_MASK 0xfc000000
+#define S1_P2_MASK 0xfc000000
+#define S1_P2_BKP_MASK 0x3f
+#define S2_P2_MASK 0x3f
+#define S2_P2_BKP_MASK 0xfc0
+#define S3_P2_MASK 0xfc0
+#define S3_P2_BKP_MASK 0x3f000
+#define S4_P2_MASK 0x3f000
+#define S4_P2_BKP_MASK 0xfc0000
+#define S5_P2_MASK 0xfc0000
+#define S5_P2_BKP_MASK 0x3f000000
+#define S6_P2_MASK 0x3f000000
+#define S6_P2_BKP_MASK 0x3f
+#define S7_P2_MASK 0x3f
+#define S7_P2_BKP_MASK 0xfc0
+#define S8_P2_MASK 0xfc0
+#define S8_P2_BKP_MASK 0x3f000
+#define S9_P2_MASK 0x3f000
+#define S9_P2_BKP_MASK 0xfc0000
+#define S10_P2_MASK 0xfc0000
+#define S10_P2_BKP_MASK 0x3f000000
+
+#define BKP_SEL 0x3
+#define BKP_REDUN_SEL 0xe0000000
+#define BKP_REDUN_SHIFT 29
+
+#define BIT_APPEND 0x3
+
+static int calibrate_8974(struct tsens_device *tmdev)
+{
+ int base1 = 0, base2 = 0, i;
+ u32 p1[11], p2[11];
+ int mode = 0;
+ u32 *calib, *bkp;
+ u32 calib_redun_sel;
+
+ calib = (u32 *)qfprom_read(tmdev->dev, "calib");
+ if (IS_ERR(calib))
+ return PTR_ERR(calib);
+
+ bkp = (u32 *)qfprom_read(tmdev->dev, "calib_backup");
+ if (IS_ERR(bkp))
+ return PTR_ERR(bkp);
+
+ calib_redun_sel = bkp[1] & BKP_REDUN_SEL;
+ calib_redun_sel >>= BKP_REDUN_SHIFT;
+
+ if (calib_redun_sel == BKP_SEL) {
+ mode = (calib[4] & CAL_SEL_0_1) >> CAL_SEL_SHIFT;
+ mode |= (calib[5] & CAL_SEL_2) >> CAL_SEL_SHIFT_2;
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base2 = (bkp[2] & BASE2_BKP_MASK) >> BASE2_BKP_SHIFT;
+ p2[0] = (bkp[2] & S0_P2_BKP_MASK) >> S0_P2_BKP_SHIFT;
+ p2[1] = (bkp[3] & S1_P2_BKP_MASK);
+ p2[2] = (bkp[3] & S2_P2_BKP_MASK) >> S2_P2_BKP_SHIFT;
+ p2[3] = (bkp[3] & S3_P2_BKP_MASK) >> S3_P2_BKP_SHIFT;
+ p2[4] = (bkp[3] & S4_P2_BKP_MASK) >> S4_P2_BKP_SHIFT;
+ p2[5] = (calib[4] & S5_P2_BKP_MASK) >> S5_P2_BKP_SHIFT;
+ p2[6] = (calib[5] & S6_P2_BKP_MASK);
+ p2[7] = (calib[5] & S7_P2_BKP_MASK) >> S7_P2_BKP_SHIFT;
+ p2[8] = (calib[5] & S8_P2_BKP_MASK) >> S8_P2_BKP_SHIFT;
+ p2[9] = (calib[5] & S9_P2_BKP_MASK) >> S9_P2_BKP_SHIFT;
+ p2[10] = (calib[5] & S10_P2_BKP_MASK) >> S10_P2_BKP_SHIFT;
+ /* Fall through */
+ case ONE_PT_CALIB:
+ case ONE_PT_CALIB2:
+ base1 = bkp[0] & BASE1_MASK;
+ p1[0] = (bkp[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+ p1[1] = (bkp[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+ p1[2] = (bkp[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+ p1[3] = (bkp[0] & S3_P1_MASK) >> S3_P1_SHIFT;
+ p1[4] = (bkp[1] & S4_P1_MASK);
+ p1[5] = (bkp[1] & S5_P1_MASK) >> S5_P1_SHIFT;
+ p1[6] = (bkp[1] & S6_P1_MASK) >> S6_P1_SHIFT;
+ p1[7] = (bkp[1] & S7_P1_MASK) >> S7_P1_SHIFT;
+ p1[8] = (bkp[2] & S8_P1_MASK_BKP) >> S8_P1_SHIFT;
+ p1[9] = (bkp[2] & S9_P1_MASK_BKP) >> S9_P1_BKP_SHIFT;
+ p1[10] = (bkp[2] & S10_P1_MASK_BKP) >> S10_P1_BKP_SHIFT;
+ break;
+ }
+ } else {
+ mode = (calib[1] & CAL_SEL_0_1) >> CAL_SEL_SHIFT;
+ mode |= (calib[3] & CAL_SEL_2) >> CAL_SEL_SHIFT_2;
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base2 = (calib[2] & BASE2_MASK) >> BASE2_SHIFT;
+ p2[0] = (calib[2] & S0_P2_MASK) >> S0_P2_SHIFT;
+ p2[1] = (calib[2] & S1_P2_MASK) >> S1_P2_SHIFT;
+ p2[2] = (calib[3] & S2_P2_MASK);
+ p2[3] = (calib[3] & S3_P2_MASK) >> S3_P2_SHIFT;
+ p2[4] = (calib[3] & S4_P2_MASK) >> S4_P2_SHIFT;
+ p2[5] = (calib[3] & S5_P2_MASK) >> S5_P2_SHIFT;
+ p2[6] = (calib[3] & S6_P2_MASK) >> S6_P2_SHIFT;
+ p2[7] = (calib[4] & S7_P2_MASK);
+ p2[8] = (calib[4] & S8_P2_MASK) >> S8_P2_SHIFT;
+ p2[9] = (calib[4] & S9_P2_MASK) >> S9_P2_SHIFT;
+ p2[10] = (calib[4] & S10_P2_MASK) >> S10_P2_SHIFT;
+ /* Fall through */
+ case ONE_PT_CALIB:
+ case ONE_PT_CALIB2:
+ base1 = calib[0] & BASE1_MASK;
+ p1[0] = (calib[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+ p1[1] = (calib[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+ p1[2] = (calib[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+ p1[3] = (calib[0] & S3_P1_MASK) >> S3_P1_SHIFT;
+ p1[4] = (calib[1] & S4_P1_MASK);
+ p1[5] = (calib[1] & S5_P1_MASK) >> S5_P1_SHIFT;
+ p1[6] = (calib[1] & S6_P1_MASK) >> S6_P1_SHIFT;
+ p1[7] = (calib[1] & S7_P1_MASK) >> S7_P1_SHIFT;
+ p1[8] = (calib[1] & S8_P1_MASK) >> S8_P1_SHIFT;
+ p1[9] = (calib[2] & S9_P1_MASK);
+ p1[10] = (calib[2] & S10_P1_MASK) >> S10_P1_SHIFT;
+ break;
+ }
+ }
+
+ switch (mode) {
+ case ONE_PT_CALIB:
+ for (i = 0; i < tmdev->num_sensors; i++)
+ p1[i] += (base1 << 2) | BIT_APPEND;
+ break;
+ case TWO_PT_CALIB:
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ p2[i] += base2;
+ p2[i] <<= 2;
+ p2[i] |= BIT_APPEND;
+ }
+ /* Fall through */
+ case ONE_PT_CALIB2:
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ p1[i] += base1;
+ p1[i] <<= 2;
+ p1[i] |= BIT_APPEND;
+ }
+ break;
+ default:
+ for (i = 0; i < tmdev->num_sensors; i++)
+ p2[i] = 780;
+ p1[0] = 502;
+ p1[1] = 509;
+ p1[2] = 503;
+ p1[3] = 509;
+ p1[4] = 505;
+ p1[5] = 509;
+ p1[6] = 507;
+ p1[7] = 510;
+ p1[8] = 508;
+ p1[9] = 509;
+ p1[10] = 508;
+ break;
+ }
+
+ compute_intercept_slope(tmdev, p1, p2, mode);
+
+ return 0;
+}
+
+const struct tsens_ops ops_8974 = {
+ .init = init_common,
+ .calibrate = calibrate_8974,
+ .get_temp = get_temp_common,
+};
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
new file mode 100644
index 000000000000..4acf52c3533e
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include "tsens.h"
+
+#define S0_ST_ADDR 0x1030
+#define SN_ADDR_OFFSET 0x4
+#define SN_ST_TEMP_MASK 0x3ff
+#define CAL_DEGC_PT1 30
+#define CAL_DEGC_PT2 120
+#define SLOPE_FACTOR 1000
+
+char *qfprom_read(struct device *dev, const char *cname)
+{
+ struct nvmem_cell *cell;
+ ssize_t data;
+ char *ret;
+
+ cell = nvmem_cell_get(dev, cname);
+ if (IS_ERR(cell))
+ return ERR_CAST(cell);
+
+ ret = nvmem_cell_read(cell, &data);
+ nvmem_cell_put(cell);
+ return ret;
+}
+
+void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1,
+ u32 *p2, u32 mode)
+{
+ int i;
+ int num, den;
+
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ dev_dbg(tmdev->dev,
+ "sensor%d - data_point1:%#x data_point2:%#x\n",
+ i, p1[i], p2[i]);
+
+ if (mode == TWO_PT_CALIB) {
+ /*
+ * slope (m) = adc_code2 - adc_code1 (y2 - y1)/
+ * temp_120_degc - temp_30_degc (x2 - x1)
+ */
+ num = p2[i] - p1[i];
+ num *= SLOPE_FACTOR;
+ den = CAL_DEGC_PT2 - CAL_DEGC_PT1;
+ tmdev->sensor[i].slope = num / den;
+ }
+
+ tmdev->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
+ (CAL_DEGC_PT1 *
+ tmdev->sensor[i].slope);
+ dev_dbg(tmdev->dev, "offset:%d\n", tmdev->sensor[i].offset);
+ }
+}
+
+static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
+{
+ int degc, num, den;
+
+ num = (adc_code * SLOPE_FACTOR) - s->offset;
+ den = s->slope;
+
+ if (num > 0)
+ degc = num + (den / 2);
+ else if (num < 0)
+ degc = num - (den / 2);
+ else
+ degc = num;
+
+ degc /= den;
+
+ return degc;
+}
+
+int get_temp_common(struct tsens_device *tmdev, int id, int *temp)
+{
+ struct tsens_sensor *s = &tmdev->sensor[id];
+ u32 code;
+ unsigned int sensor_addr;
+ int last_temp = 0, ret;
+
+ sensor_addr = S0_ST_ADDR + s->hw_id * SN_ADDR_OFFSET;
+ ret = regmap_read(tmdev->map, sensor_addr, &code);
+ if (ret)
+ return ret;
+ last_temp = code & SN_ST_TEMP_MASK;
+
+ *temp = code_to_degc(last_temp, s) * 1000;
+
+ return 0;
+}
+
+static const struct regmap_config tsens_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+int init_common(struct tsens_device *tmdev)
+{
+ void __iomem *base;
+
+ base = of_iomap(tmdev->dev->of_node, 0);
+ if (IS_ERR(base))
+ return -EINVAL;
+
+ tmdev->map = devm_regmap_init_mmio(tmdev->dev, base, &tsens_config);
+ if (!tmdev->map)
+ return -ENODEV;
+
+ return 0;
+}
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
new file mode 100644
index 000000000000..d640e8f329f6
--- /dev/null
+++ b/drivers/thermal/qcom/tsens.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include "tsens.h"
+
+static int tsens_get_temp(void *data, int *temp)
+{
+ const struct tsens_sensor *s = data;
+ struct tsens_device *tmdev = s->tmdev;
+
+ return tmdev->ops->get_temp(tmdev, s->id, temp);
+}
+
+static int tsens_get_trend(void *data, long *temp)
+{
+ const struct tsens_sensor *s = data;
+ struct tsens_device *tmdev = s->tmdev;
+
+ if (tmdev->ops->get_trend)
+ return tmdev->ops->get_trend(tmdev, s->id, temp);
+
+ return -ENOSYS;
+}
+
+static int tsens_suspend(struct device *dev)
+{
+ struct tsens_device *tmdev = dev_get_drvdata(dev);
+
+ if (tmdev->ops && tmdev->ops->suspend)
+ return tmdev->ops->suspend(tmdev);
+
+ return 0;
+}
+
+static int tsens_resume(struct device *dev)
+{
+ struct tsens_device *tmdev = dev_get_drvdata(dev);
+
+ if (tmdev->ops && tmdev->ops->resume)
+ return tmdev->ops->resume(tmdev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume);
+
+static const struct of_device_id tsens_table[] = {
+ {
+ .compatible = "qcom,msm8916-tsens",
+ .data = &ops_8916,
+ }, {
+ .compatible = "qcom,msm8974-tsens",
+ .data = &ops_8974,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tsens_table);
+
+static const struct thermal_zone_of_device_ops tsens_of_ops = {
+ .get_temp = tsens_get_temp,
+ .get_trend = tsens_get_trend,
+};
+
+static int tsens_register(struct tsens_device *tmdev)
+{
+ int i, ret;
+ struct thermal_zone_device *tzd;
+ u32 *hw_id, n = tmdev->num_sensors;
+ struct device_node *np = tmdev->dev->of_node;
+
+ hw_id = devm_kcalloc(tmdev->dev, n, sizeof(u32), GFP_KERNEL);
+ if (!hw_id)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(np, "qcom,sensor-id", hw_id, n);
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ if (ret)
+ tmdev->sensor[i].hw_id = i;
+ else
+ tmdev->sensor[i].hw_id = hw_id[i];
+ tmdev->sensor[i].tmdev = tmdev;
+ tmdev->sensor[i].id = i;
+ tzd = thermal_zone_of_sensor_register(tmdev->dev, i,
+ &tmdev->sensor[i],
+ &tsens_of_ops);
+ if (IS_ERR(tzd))
+ continue;
+ tmdev->sensor[i].tzd = tzd;
+ if (tmdev->ops->enable)
+ tmdev->ops->enable(tmdev, i);
+ }
+ return 0;
+}
+
+static int tsens_probe(struct platform_device *pdev)
+{
+ int ret, i, num;
+ struct device *dev;
+ struct device_node *np;
+ struct tsens_sensor *s;
+ struct tsens_device *tmdev;
+ const struct of_device_id *id;
+
+ if (pdev->dev.of_node)
+ dev = &pdev->dev;
+ else
+ dev = pdev->dev.parent;
+
+ np = dev->of_node;
+
+ num = of_property_count_u32_elems(np, "qcom,tsens-slopes");
+ if (num <= 0) {
+ dev_err(dev, "invalid tsens slopes\n");
+ return -EINVAL;
+ }
+
+ tmdev = devm_kzalloc(dev, sizeof(*tmdev) +
+ num * sizeof(*s), GFP_KERNEL);
+ if (!tmdev)
+ return -ENOMEM;
+
+ tmdev->dev = dev;
+ tmdev->num_sensors = num;
+
+ for (i = 0, s = tmdev->sensor; i < tmdev->num_sensors; i++, s++)
+ of_property_read_u32_index(np, "qcom,tsens-slopes", i,
+ &s->slope);
+
+ id = of_match_node(tsens_table, np);
+ if (id)
+ tmdev->ops = id->data;
+ else
+ tmdev->ops = &ops_8960;
+
+ if (!tmdev->ops || !tmdev->ops->init || !tmdev->ops->calibrate ||
+ !tmdev->ops->get_temp)
+ return -EINVAL;
+
+ ret = tmdev->ops->init(tmdev);
+ if (ret < 0) {
+ dev_err(dev, "tsens init failed\n");
+ return ret;
+ }
+
+ ret = tmdev->ops->calibrate(tmdev);
+ if (ret < 0) {
+ dev_err(dev, "tsens calibration failed\n");
+ return ret;
+ }
+
+ ret = tsens_register(tmdev);
+
+ platform_set_drvdata(pdev, tmdev);
+
+ return ret;
+}
+
+static int tsens_remove(struct platform_device *pdev)
+{
+ int i;
+ struct tsens_device *tmdev = platform_get_drvdata(pdev);
+ struct thermal_zone_device *tzd;
+
+ if (tmdev->ops->disable)
+ tmdev->ops->disable(tmdev);
+
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ tzd = tmdev->sensor[i].tzd;
+ thermal_zone_of_sensor_unregister(&pdev->dev, tzd);
+ }
+
+ return 0;
+}
+
+static struct platform_driver tsens_driver = {
+ .probe = tsens_probe,
+ .remove = tsens_remove,
+ .driver = {
+ .name = "qcom-tsens",
+ .pm = &tsens_pm_ops,
+ .of_match_table = tsens_table,
+ },
+};
+module_platform_driver(tsens_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QCOM Temperature Sensor driver");
+MODULE_ALIAS("platform:qcom-tsens");
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
new file mode 100644
index 000000000000..10e2e845a151
--- /dev/null
+++ b/drivers/thermal/qcom/tsens.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_TSENS_H__
+#define __QCOM_TSENS_H__
+
+#define ONE_PT_CALIB 0x1
+#define ONE_PT_CALIB2 0x2
+#define TWO_PT_CALIB 0x3
+
+struct tsens_device;
+
+struct tsens_sensor {
+ struct tsens_device *tmdev;
+ struct thermal_zone_device *tzd;
+ int offset;
+ int id;
+ int hw_id;
+ u32 slope;
+ u32 status;
+};
+
+struct tsens_ops {
+ /* mandatory callbacks */
+ int (*init)(struct tsens_device *);
+ int (*calibrate)(struct tsens_device *);
+ int (*get_temp)(struct tsens_device *, int, int *);
+ /* optional callbacks */
+ int (*enable)(struct tsens_device *, int);
+ void (*disable)(struct tsens_device *);
+ int (*suspend)(struct tsens_device *);
+ int (*resume)(struct tsens_device *);
+ int (*get_trend)(struct tsens_device *, int, long *);
+};
+
+/* Registers to be saved/restored across a context loss */
+struct tsens_context {
+ int threshold;
+ int control;
+};
+
+struct tsens_device {
+ struct device *dev;
+ u32 num_sensors;
+ struct regmap *map;
+ struct regmap_field *status_field;
+ struct tsens_context ctx;
+ bool trdy;
+ const struct tsens_ops *ops;
+ struct tsens_sensor sensor[0];
+};
+
+char *qfprom_read(struct device *, const char *);
+void compute_intercept_slope(struct tsens_device *, u32 *, u32 *, u32);
+int init_common(struct tsens_device *);
+int get_temp_common(struct tsens_device *, int, int *);
+
+extern const struct tsens_ops ops_8960, ops_8916, ops_8974;
+
+#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 687b1ea294b7..2a72799acc15 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1047,6 +1047,14 @@ config SERIAL_MSM
depends on ARCH_QCOM
select SERIAL_CORE
+config SERIAL_MSM_SMD
+ bool "Enable tty device interface for some SMD ports"
+ default n
+ depends on ARCH_QCOM && QCOM_SMD
+ help
+ Enables userspace clients to read and write to some streaming SMD
+ ports via tty device interface for MSM chipset.
+
config SERIAL_MSM_CONSOLE
bool "MSM serial console support"
depends on SERIAL_MSM=y
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 5ab41119b3dc..5b41963aa7f1 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o
obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
+obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index b73889c8ed4b..2d779b1f010f 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -20,6 +20,8 @@
#endif
#include <linux/atomic.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/hrtimer.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -31,6 +33,7 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
+#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -39,6 +42,11 @@
#include "msm_serial.h"
+#define UARTDM_BURST_SIZE 16 /* in bytes */
+#define UARTDM_TX_AIGN(x) ((x) & ~0x3) /* valid for > 1p3 */
+#define UARTDM_TX_MAX 256 /* in bytes, valid for <= 1p3 */
+#define UARTDM_RX_SIZE (UART_XMIT_SIZE / 4)
+
enum {
UARTDM_1P1 = 1,
UARTDM_1P2,
@@ -46,6 +54,17 @@ enum {
UARTDM_1P4,
};
+struct msm_dma {
+ struct dma_chan *chan;
+ enum dma_data_direction dir;
+ dma_addr_t phys;
+ unsigned char *virt;
+ dma_cookie_t cookie;
+ u32 enable_bit;
+ unsigned int count;
+ struct dma_async_tx_descriptor *desc;
+};
+
struct msm_port {
struct uart_port uart;
char name[16];
@@ -55,9 +74,153 @@ struct msm_port {
int is_uartdm;
unsigned int old_snap_state;
bool break_detected;
+ struct msm_dma tx_dma;
+ struct msm_dma rx_dma;
};
-static inline void wait_for_xmitr(struct uart_port *port)
+static void msm_handle_tx(struct uart_port *port);
+static void msm_start_rx_dma(struct msm_port *msm_port);
+
+void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
+{
+ struct device *dev = port->dev;
+ unsigned int mapped;
+ u32 val;
+
+ mapped = dma->count;
+ dma->count = 0;
+
+ dmaengine_terminate_all(dma->chan);
+
+ /*
+ * DMA Stall happens if enqueue and flush command happens concurrently.
+ * For example before changing the baud rate/protocol configuration and
+ * sending flush command to ADM, disable the channel of UARTDM.
+ * Note: should not reset the receiver here immediately as it is not
+ * suggested to do disable/reset or reset/disable at the same time.
+ */
+ val = msm_read(port, UARTDM_DMEN);
+ val &= ~dma->enable_bit;
+ msm_write(port, val, UARTDM_DMEN);
+
+ if (mapped)
+ dma_unmap_single(dev, dma->phys, mapped, dma->dir);
+}
+
+static void msm_release_dma(struct msm_port *msm_port)
+{
+ struct msm_dma *dma;
+
+ dma = &msm_port->tx_dma;
+ if (dma->chan) {
+ msm_stop_dma(&msm_port->uart, dma);
+ dma_release_channel(dma->chan);
+ }
+
+ memset(dma, 0, sizeof(*dma));
+
+ dma = &msm_port->rx_dma;
+ if (dma->chan) {
+ msm_stop_dma(&msm_port->uart, dma);
+ dma_release_channel(dma->chan);
+ kfree(dma->virt);
+ }
+
+ memset(dma, 0, sizeof(*dma));
+}
+
+static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base)
+{
+ struct device *dev = msm_port->uart.dev;
+ struct dma_slave_config conf;
+ struct msm_dma *dma;
+ u32 crci = 0;
+ int ret;
+
+ dma = &msm_port->tx_dma;
+
+ /* allocate DMA resources, if available */
+ dma->chan = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(dma->chan))
+ goto no_tx;
+
+ of_property_read_u32(dev->of_node, "qcom,tx-crci", &crci);
+
+ memset(&conf, 0, sizeof(conf));
+ conf.direction = DMA_MEM_TO_DEV;
+ conf.device_fc = true;
+ conf.dst_addr = base + UARTDM_TF;
+ conf.dst_maxburst = UARTDM_BURST_SIZE;
+ conf.slave_id = crci;
+
+ ret = dmaengine_slave_config(dma->chan, &conf);
+ if (ret)
+ goto rel_tx;
+
+ dma->dir = DMA_TO_DEVICE;
+
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ dma->enable_bit = UARTDM_DMEN_TX_DM_ENABLE;
+ else
+ dma->enable_bit = UARTDM_DMEN_TX_BAM_ENABLE;
+
+ return;
+
+rel_tx:
+ dma_release_channel(dma->chan);
+no_tx:
+ memset(dma, 0, sizeof(*dma));
+}
+
+static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
+{
+ struct device *dev = msm_port->uart.dev;
+ struct dma_slave_config conf;
+ struct msm_dma *dma;
+ u32 crci = 0;
+ int ret;
+
+ dma = &msm_port->rx_dma;
+
+ /* allocate DMA resources, if available */
+ dma->chan = dma_request_slave_channel_reason(dev, "rx");
+ if (IS_ERR(dma->chan))
+ goto no_rx;
+
+ of_property_read_u32(dev->of_node, "qcom,rx-crci", &crci);
+
+ dma->virt = kzalloc(UARTDM_RX_SIZE, GFP_KERNEL);
+ if (!dma->virt)
+ goto rel_rx;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.direction = DMA_DEV_TO_MEM;
+ conf.device_fc = true;
+ conf.src_addr = base + UARTDM_RF;
+ conf.src_maxburst = UARTDM_BURST_SIZE;
+ conf.slave_id = crci;
+
+ ret = dmaengine_slave_config(dma->chan, &conf);
+ if (ret)
+ goto err;
+
+ dma->dir = DMA_FROM_DEVICE;
+
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ dma->enable_bit = UARTDM_DMEN_RX_DM_ENABLE;
+ else
+ dma->enable_bit = UARTDM_DMEN_RX_BAM_ENABLE;
+
+ return;
+err:
+ kfree(dma->virt);
+rel_rx:
+ dma_release_channel(dma->chan);
+no_rx:
+ memset(dma, 0, sizeof(*dma));
+}
+
+static inline void msm_wait_for_xmitr(struct uart_port *port)
{
while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
@@ -78,17 +241,277 @@ static void msm_stop_tx(struct uart_port *port)
static void msm_start_tx(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_dma *dma = &msm_port->tx_dma;
+
+ /* Already started in DMA mode */
+ if (dma->count)
+ return;
+
+ msm_port->imr |= UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_reset_dm_count(struct uart_port *port, int count)
+{
+ msm_wait_for_xmitr(port);
+ msm_write(port, count, UARTDM_NCF_TX);
+ msm_read(port, UARTDM_NCF_TX);
+}
+
+static void msm_complete_tx_dma(void *args)
+{
+ struct msm_port *msm_port = args;
+ struct uart_port *port = &msm_port->uart;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct msm_dma *dma = &msm_port->tx_dma;
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned long flags;
+ unsigned int count;
+ u32 val;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Already stopped */
+ if (!dma->count)
+ goto done;
+
+ status = dmaengine_tx_status(dma->chan, dma->cookie, &state);
+
+ dma_unmap_single(port->dev, dma->phys, dma->count, dma->dir);
+ val = msm_read(port, UARTDM_DMEN);
+ val &= ~dma->enable_bit;
+ msm_write(port, val, UARTDM_DMEN);
+
+ if (msm_port->is_uartdm > UARTDM_1P3) {
+ msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
+ msm_write(port, UART_CR_TX_ENABLE, UART_CR);
+ }
+
+ count = dma->count - state.residue;
+ port->icount.tx += count;
+ dma->count = 0;
+
+ xmit->tail += count;
+ xmit->tail &= UART_XMIT_SIZE - 1;
+
+ /* Restore "Tx FIFO below watermark" interrupt */
msm_port->imr |= UART_IMR_TXLEV;
msm_write(port, msm_port->imr, UART_IMR);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ msm_handle_tx(port);
+done:
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
+{
+ struct circ_buf *xmit = &msm_port->uart.state->xmit;
+ struct uart_port *port = &msm_port->uart;
+ struct msm_dma *dma = &msm_port->tx_dma;
+ void *cpu_addr;
+ int ret;
+ u32 val;
+
+ cpu_addr = &xmit->buf[xmit->tail];
+
+ dma->phys = dma_map_single(port->dev, cpu_addr, count, dma->dir);
+ ret = dma_mapping_error(port->dev, dma->phys);
+ if (ret)
+ return ret;
+
+ dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys,
+ count, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT |
+ DMA_PREP_FENCE);
+ if (!dma->desc) {
+ ret = -EIO;
+ goto unmap;
+ }
+
+ dma->desc->callback = msm_complete_tx_dma;
+ dma->desc->callback_param = msm_port;
+
+ dma->cookie = dmaengine_submit(dma->desc);
+ ret = dma_submit_error(dma->cookie);
+ if (ret)
+ goto unmap;
+
+ /*
+ * Using DMA complete for Tx FIFO reload, no need for
+ * "Tx FIFO below watermark" one, disable it
+ */
+ msm_port->imr &= ~UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+
+ dma->count = count;
+
+ val = msm_read(port, UARTDM_DMEN);
+ val |= dma->enable_bit;
+
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ msm_write(port, val, UARTDM_DMEN);
+
+ msm_reset_dm_count(port, count);
+
+ if (msm_port->is_uartdm > UARTDM_1P3)
+ msm_write(port, val, UARTDM_DMEN);
+
+ dma_async_issue_pending(dma->chan);
+ return 0;
+unmap:
+ dma_unmap_single(port->dev, dma->phys, count, dma->dir);
+ return ret;
+}
+
+static void msm_complete_rx_dma(void *args)
+{
+ struct msm_port *msm_port = args;
+ struct uart_port *port = &msm_port->uart;
+ struct tty_port *tport = &port->state->port;
+ struct msm_dma *dma = &msm_port->rx_dma;
+ int count = 0, i, sysrq;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Already stopped */
+ if (!dma->count)
+ goto done;
+
+ val = msm_read(port, UARTDM_DMEN);
+ val &= ~dma->enable_bit;
+ msm_write(port, val, UARTDM_DMEN);
+
+ /* Restore interrupts */
+ msm_port->imr |= UART_IMR_RXLEV | UART_IMR_RXSTALE;
+ msm_write(port, msm_port->imr, UART_IMR);
+
+ if (msm_read(port, UART_SR) & UART_SR_OVERRUN) {
+ port->icount.overrun++;
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ }
+
+ count = msm_read(port, UARTDM_RX_TOTAL_SNAP);
+
+ port->icount.rx += count;
+
+ dma->count = 0;
+
+ dma_unmap_single(port->dev, dma->phys, UARTDM_RX_SIZE, dma->dir);
+
+ for (i = 0; i < count; i++) {
+ char flag = TTY_NORMAL;
+
+ if (msm_port->break_detected && dma->virt[i] == 0) {
+ port->icount.brk++;
+ flag = TTY_BREAK;
+ msm_port->break_detected = false;
+ if (uart_handle_break(port))
+ continue;
+ }
+
+ if (!(port->read_status_mask & UART_SR_RX_BREAK))
+ flag = TTY_NORMAL;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ sysrq = uart_handle_sysrq_char(port, dma->virt[i]);
+ spin_lock_irqsave(&port->lock, flags);
+ if (!sysrq)
+ tty_insert_flip_char(tport, dma->virt[i], flag);
+ }
+
+ msm_start_rx_dma(msm_port);
+done:
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (count)
+ tty_flip_buffer_push(tport);
+}
+
+static void msm_start_rx_dma(struct msm_port *msm_port)
+{
+ struct msm_dma *dma = &msm_port->rx_dma;
+ struct uart_port *uart = &msm_port->uart;
+ u32 val;
+ int ret;
+
+ if (!dma->chan)
+ return;
+
+ dma->phys = dma_map_single(uart->dev, dma->virt,
+ UARTDM_RX_SIZE, dma->dir);
+ ret = dma_mapping_error(uart->dev, dma->phys);
+ if (ret)
+ return;
+
+ dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys,
+ UARTDM_RX_SIZE, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!dma->desc)
+ goto unmap;
+
+ dma->desc->callback = msm_complete_rx_dma;
+ dma->desc->callback_param = msm_port;
+
+ dma->cookie = dmaengine_submit(dma->desc);
+ ret = dma_submit_error(dma->cookie);
+ if (ret)
+ goto unmap;
+ /*
+ * Using DMA for FIFO off-load, no need for "Rx FIFO over
+ * watermark" or "stale" interrupts, disable them
+ */
+ msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
+
+ /*
+ * Well, when DMA is ADM3 engine(implied by <= UARTDM v1.3),
+ * we need RXSTALE to flush input DMA fifo to memory
+ */
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ msm_port->imr |= UART_IMR_RXSTALE;
+
+ msm_write(uart, msm_port->imr, UART_IMR);
+
+ dma->count = UARTDM_RX_SIZE;
+
+ dma_async_issue_pending(dma->chan);
+
+ msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+
+ val = msm_read(uart, UARTDM_DMEN);
+ val |= dma->enable_bit;
+
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ msm_write(uart, val, UARTDM_DMEN);
+
+ msm_write(uart, UARTDM_RX_SIZE, UARTDM_DMRX);
+
+ if (msm_port->is_uartdm > UARTDM_1P3)
+ msm_write(uart, val, UARTDM_DMEN);
+
+ return;
+unmap:
+ dma_unmap_single(uart->dev, dma->phys, UARTDM_RX_SIZE, dma->dir);
}
static void msm_stop_rx(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_dma *dma = &msm_port->rx_dma;
msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
msm_write(port, msm_port->imr, UART_IMR);
+
+ if (dma->chan)
+ msm_stop_dma(port, dma);
}
static void msm_enable_ms(struct uart_port *port)
@@ -99,7 +522,7 @@ static void msm_enable_ms(struct uart_port *port)
msm_write(port, msm_port->imr, UART_IMR);
}
-static void handle_rx_dm(struct uart_port *port, unsigned int misr)
+static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
{
struct tty_port *tport = &port->state->port;
unsigned int sr;
@@ -169,9 +592,12 @@ static void handle_rx_dm(struct uart_port *port, unsigned int misr)
msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+
+ /* Try to use DMA */
+ msm_start_rx_dma(msm_port);
}
-static void handle_rx(struct uart_port *port)
+static void msm_handle_rx(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
unsigned int sr;
@@ -224,18 +650,11 @@ static void handle_rx(struct uart_port *port)
spin_lock(&port->lock);
}
-static void reset_dm_count(struct uart_port *port, int count)
-{
- wait_for_xmitr(port);
- msm_write(port, count, UARTDM_NCF_TX);
- msm_read(port, UARTDM_NCF_TX);
-}
-
-static void handle_tx(struct uart_port *port)
+static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
{
struct circ_buf *xmit = &port->state->xmit;
struct msm_port *msm_port = UART_TO_MSM(port);
- unsigned int tx_count, num_chars;
+ unsigned int num_chars;
unsigned int tf_pointer = 0;
void __iomem *tf;
@@ -244,20 +663,8 @@ static void handle_tx(struct uart_port *port)
else
tf = port->membase + UART_TF;
- tx_count = uart_circ_chars_pending(xmit);
- tx_count = min3(tx_count, (unsigned int)UART_XMIT_SIZE - xmit->tail,
- port->fifosize);
-
- if (port->x_char) {
- if (msm_port->is_uartdm)
- reset_dm_count(port, tx_count + 1);
-
- iowrite8_rep(tf, &port->x_char, 1);
- port->icount.tx++;
- port->x_char = 0;
- } else if (tx_count && msm_port->is_uartdm) {
- reset_dm_count(port, tx_count);
- }
+ if (tx_count && msm_port->is_uartdm)
+ msm_reset_dm_count(port, tx_count);
while (tf_pointer < tx_count) {
int i;
@@ -290,7 +697,60 @@ static void handle_tx(struct uart_port *port)
uart_write_wakeup(port);
}
-static void handle_delta_cts(struct uart_port *port)
+static void msm_handle_tx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ struct circ_buf *xmit = &msm_port->uart.state->xmit;
+ struct msm_dma *dma = &msm_port->tx_dma;
+ unsigned int pio_count, dma_count, dma_min;
+ void __iomem *tf;
+ int err = 0;
+
+ if (port->x_char) {
+ if (msm_port->is_uartdm)
+ tf = port->membase + UARTDM_TF;
+ else
+ tf = port->membase + UART_TF;
+
+ if (msm_port->is_uartdm)
+ msm_reset_dm_count(port, 1);
+
+ iowrite8_rep(tf, &port->x_char, 1);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ msm_stop_tx(port);
+ return;
+ }
+
+ pio_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+ dma_min = 1; /* Always DMA */
+ if (msm_port->is_uartdm > UARTDM_1P3) {
+ dma_count = UARTDM_TX_AIGN(dma_count);
+ dma_min = UARTDM_BURST_SIZE;
+ } else {
+ if (dma_count > UARTDM_TX_MAX)
+ dma_count = UARTDM_TX_MAX;
+ }
+
+ if (pio_count > port->fifosize)
+ pio_count = port->fifosize;
+
+ if (!dma->chan || dma_count < dma_min)
+ msm_handle_tx_pio(port, pio_count);
+ else
+ err = msm_handle_tx_dma(msm_port, dma_count);
+
+ if (err) /* fall back to PIO mode */
+ msm_handle_tx_pio(port, pio_count);
+}
+
+static void msm_handle_delta_cts(struct uart_port *port)
{
msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
port->icount.cts++;
@@ -301,9 +761,12 @@ static irqreturn_t msm_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_dma *dma = &msm_port->rx_dma;
+ unsigned long flags;
unsigned int misr;
+ u32 val;
- spin_lock(&port->lock);
+ spin_lock_irqsave(&port->lock, flags);
misr = msm_read(port, UART_MISR);
msm_write(port, 0, UART_IMR); /* disable interrupt */
@@ -313,18 +776,29 @@ static irqreturn_t msm_irq(int irq, void *dev_id)
}
if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) {
- if (msm_port->is_uartdm)
- handle_rx_dm(port, misr);
- else
- handle_rx(port);
+ if (dma->count) {
+ val = UART_CR_CMD_STALE_EVENT_DISABLE;
+ msm_write(port, val, UART_CR);
+ val = UART_CR_CMD_RESET_STALE_INT;
+ msm_write(port, val, UART_CR);
+ /*
+ * Flush DMA input fifo to memory, this will also
+ * trigger DMA RX completion
+ */
+ dmaengine_terminate_all(dma->chan);
+ } else if (msm_port->is_uartdm) {
+ msm_handle_rx_dm(port, misr);
+ } else {
+ msm_handle_rx(port);
+ }
}
if (misr & UART_IMR_TXLEV)
- handle_tx(port);
+ msm_handle_tx(port);
if (misr & UART_IMR_DELTA_CTS)
- handle_delta_cts(port);
+ msm_handle_delta_cts(port);
msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
- spin_unlock(&port->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
return IRQ_HANDLED;
}
@@ -408,6 +882,7 @@ msm_find_best_baud(struct uart_port *port, unsigned int baud)
{ 3, 0xdd, 8 },
{ 2, 0xee, 16 },
{ 1, 0xff, 31 },
+ { 0, 0xff, 31 },
};
divisor = uart_get_divisor(port, baud);
@@ -419,21 +894,41 @@ msm_find_best_baud(struct uart_port *port, unsigned int baud)
return entry; /* Default to smallest divider */
}
-static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
+static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
+ unsigned long *saved_flags)
{
- unsigned int rxstale, watermark;
+ unsigned int rxstale, watermark, mask;
struct msm_port *msm_port = UART_TO_MSM(port);
const struct msm_baud_map *entry;
+ unsigned long flags;
entry = msm_find_best_baud(port, baud);
msm_write(port, entry->code, UART_CSR);
+ if (baud > 460800)
+ port->uartclk = baud * 16;
+
+ flags = *saved_flags;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ clk_set_rate(msm_port->clk, port->uartclk);
+
+ spin_lock_irqsave(&port->lock, flags);
+ *saved_flags = flags;
+
/* RX stale watermark */
rxstale = entry->rxstale;
watermark = UART_IPR_STALE_LSB & rxstale;
- watermark |= UART_IPR_RXSTALE_LAST;
- watermark |= UART_IPR_STALE_TIMEOUT_MSB & (rxstale << 2);
+ if (msm_port->is_uartdm) {
+ mask = UART_DM_IPR_STALE_TIMEOUT_MSB;
+ } else {
+ watermark |= UART_IPR_RXSTALE_LAST;
+ mask = UART_IPR_STALE_TIMEOUT_MSB;
+ }
+
+ watermark |= mask & (rxstale << 2);
+
msm_write(port, watermark, UART_IPR);
/* set RX watermark */
@@ -476,7 +971,7 @@ static void msm_init_clock(struct uart_port *port)
static int msm_startup(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
- unsigned int data, rfr_level;
+ unsigned int data, rfr_level, mask;
int ret;
snprintf(msm_port->name, sizeof(msm_port->name),
@@ -496,11 +991,23 @@ static int msm_startup(struct uart_port *port)
/* set automatic RFR level */
data = msm_read(port, UART_MR1);
- data &= ~UART_MR1_AUTO_RFR_LEVEL1;
+
+ if (msm_port->is_uartdm)
+ mask = UART_DM_MR1_AUTO_RFR_LEVEL1;
+ else
+ mask = UART_MR1_AUTO_RFR_LEVEL1;
+
+ data &= ~mask;
data &= ~UART_MR1_AUTO_RFR_LEVEL0;
- data |= UART_MR1_AUTO_RFR_LEVEL1 & (rfr_level << 2);
+ data |= mask & (rfr_level << 2);
data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
msm_write(port, data, UART_MR1);
+
+ if (msm_port->is_uartdm) {
+ msm_request_tx_dma(msm_port, msm_port->uart.mapbase);
+ msm_request_rx_dma(msm_port, msm_port->uart.mapbase);
+ }
+
return 0;
}
@@ -511,6 +1018,9 @@ static void msm_shutdown(struct uart_port *port)
msm_port->imr = 0;
msm_write(port, 0, UART_IMR); /* disable interrupts */
+ if (msm_port->is_uartdm)
+ msm_release_dma(msm_port);
+
clk_disable_unprepare(msm_port->clk);
free_irq(port->irq, port);
@@ -519,14 +1029,19 @@ static void msm_shutdown(struct uart_port *port)
static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_dma *dma = &msm_port->rx_dma;
unsigned long flags;
unsigned int baud, mr;
spin_lock_irqsave(&port->lock, flags);
+ if (dma->chan) /* Terminate if any */
+ msm_stop_dma(port, dma);
+
/* calculate and set baud rate */
- baud = uart_get_baud_rate(port, termios, old, 300, 115200);
- baud = msm_set_baud_rate(port, baud);
+ baud = uart_get_baud_rate(port, termios, old, 300, 4000000);
+ baud = msm_set_baud_rate(port, baud, &flags);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
@@ -588,6 +1103,9 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
uart_update_timeout(port, termios->c_cflag, baud);
+ /* Try to use DMA */
+ msm_start_rx_dma(msm_port);
+
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -765,7 +1283,7 @@ static void msm_poll_put_char(struct uart_port *port, unsigned char c)
msm_write(port, 0, UART_IMR);
if (msm_port->is_uartdm)
- reset_dm_count(port, 1);
+ msm_reset_dm_count(port, 1);
/* Wait until FIFO is empty */
while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
@@ -839,7 +1357,7 @@ static struct msm_port msm_uart_ports[] = {
#define UART_NR ARRAY_SIZE(msm_uart_ports)
-static inline struct uart_port *get_port_from_line(unsigned int line)
+static inline struct uart_port *msm_get_port_from_line(unsigned int line)
{
return &msm_uart_ports[line].uart;
}
@@ -866,7 +1384,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
spin_lock(&port->lock);
if (is_uartdm)
- reset_dm_count(port, count);
+ msm_reset_dm_count(port, count);
i = 0;
while (i < count) {
@@ -911,7 +1429,7 @@ static void msm_console_write(struct console *co, const char *s,
BUG_ON(co->index < 0 || co->index >= UART_NR);
- port = get_port_from_line(co->index);
+ port = msm_get_port_from_line(co->index);
msm_port = UART_TO_MSM(port);
__msm_console_write(port, s, count, msm_port->is_uartdm);
@@ -928,7 +1446,7 @@ static int __init msm_console_setup(struct console *co, char *options)
if (unlikely(co->index >= UART_NR || co->index < 0))
return -ENXIO;
- port = get_port_from_line(co->index);
+ port = msm_get_port_from_line(co->index);
if (unlikely(!port->membase))
return -ENXIO;
@@ -1043,7 +1561,7 @@ static int msm_serial_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "msm_serial: detected port #%d\n", line);
- port = get_port_from_line(line);
+ port = msm_get_port_from_line(line);
port->dev = &pdev->dev;
msm_port = UART_TO_MSM(port);
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index 737f69fe7113..bc1d7b39eba8 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -20,11 +20,12 @@
#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
-#define UART_MR1_RX_RDY_CTL (1 << 7)
-#define UART_MR1_CTS_CTL (1 << 6)
+#define UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00
+#define UART_MR1_RX_RDY_CTL BIT(7)
+#define UART_MR1_CTS_CTL BIT(6)
#define UART_MR2 0x0004
-#define UART_MR2_ERROR_MODE (1 << 6)
+#define UART_MR2_ERROR_MODE BIT(6)
#define UART_MR2_BITS_PER_CHAR 0x30
#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
@@ -58,26 +59,28 @@
#define UART_CR_CMD_SET_RFR (13 << 4)
#define UART_CR_CMD_RESET_RFR (14 << 4)
#define UART_CR_CMD_PROTECTION_EN (16 << 4)
-#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
+#define UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8)
+#define UART_CR_CMD_STALE_EVENT_ENABLE (5 << 8)
#define UART_CR_CMD_FORCE_STALE (4 << 8)
#define UART_CR_CMD_RESET_TX_READY (3 << 8)
-#define UART_CR_TX_DISABLE (1 << 3)
-#define UART_CR_TX_ENABLE (1 << 2)
-#define UART_CR_RX_DISABLE (1 << 1)
-#define UART_CR_RX_ENABLE (1 << 0)
+#define UART_CR_TX_DISABLE BIT(3)
+#define UART_CR_TX_ENABLE BIT(2)
+#define UART_CR_RX_DISABLE BIT(1)
+#define UART_CR_RX_ENABLE BIT(0)
#define UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
#define UART_IMR 0x0014
-#define UART_IMR_TXLEV (1 << 0)
-#define UART_IMR_RXSTALE (1 << 3)
-#define UART_IMR_RXLEV (1 << 4)
-#define UART_IMR_DELTA_CTS (1 << 5)
-#define UART_IMR_CURRENT_CTS (1 << 6)
-#define UART_IMR_RXBREAK_START (1 << 10)
+#define UART_IMR_TXLEV BIT(0)
+#define UART_IMR_RXSTALE BIT(3)
+#define UART_IMR_RXLEV BIT(4)
+#define UART_IMR_DELTA_CTS BIT(5)
+#define UART_IMR_CURRENT_CTS BIT(6)
+#define UART_IMR_RXBREAK_START BIT(10)
#define UART_IPR_RXSTALE_LAST 0x20
#define UART_IPR_STALE_LSB 0x1F
#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
+#define UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80
#define UART_IPR 0x0018
#define UART_TFWR 0x001C
@@ -96,20 +99,20 @@
#define UART_TEST_CTRL 0x0050
#define UART_SR 0x0008
-#define UART_SR_HUNT_CHAR (1 << 7)
-#define UART_SR_RX_BREAK (1 << 6)
-#define UART_SR_PAR_FRAME_ERR (1 << 5)
-#define UART_SR_OVERRUN (1 << 4)
-#define UART_SR_TX_EMPTY (1 << 3)
-#define UART_SR_TX_READY (1 << 2)
-#define UART_SR_RX_FULL (1 << 1)
-#define UART_SR_RX_READY (1 << 0)
+#define UART_SR_HUNT_CHAR BIT(7)
+#define UART_SR_RX_BREAK BIT(6)
+#define UART_SR_PAR_FRAME_ERR BIT(5)
+#define UART_SR_OVERRUN BIT(4)
+#define UART_SR_TX_EMPTY BIT(3)
+#define UART_SR_TX_READY BIT(2)
+#define UART_SR_RX_FULL BIT(1)
+#define UART_SR_RX_READY BIT(0)
#define UART_RF 0x000C
#define UARTDM_RF 0x0070
#define UART_MISR 0x0010
#define UART_ISR 0x0014
-#define UART_ISR_TX_READY (1 << 7)
+#define UART_ISR_TX_READY BIT(7)
#define UARTDM_RXFS 0x50
#define UARTDM_RXFS_BUF_SHIFT 0x7
@@ -119,6 +122,12 @@
#define UARTDM_DMEN_RX_SC_ENABLE BIT(5)
#define UARTDM_DMEN_TX_SC_ENABLE BIT(4)
+#define UARTDM_DMEN_TX_BAM_ENABLE BIT(2) /* UARTDM_1P4 */
+#define UARTDM_DMEN_TX_DM_ENABLE BIT(0) /* < UARTDM_1P4 */
+
+#define UARTDM_DMEN_RX_BAM_ENABLE BIT(3) /* UARTDM_1P4 */
+#define UARTDM_DMEN_RX_DM_ENABLE BIT(1) /* < UARTDM_1P4 */
+
#define UARTDM_DMRX 0x34
#define UARTDM_NCF_TX 0x40
#define UARTDM_RX_TOTAL_SNAP 0x38
diff --git a/drivers/tty/serial/msm_smd_tty.c b/drivers/tty/serial/msm_smd_tty.c
new file mode 100644
index 000000000000..1ffab5ea0d92
--- /dev/null
+++ b/drivers/tty/serial/msm_smd_tty.c
@@ -0,0 +1,417 @@
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/soc/qcom/smd.h>
+#include <linux/suspend.h>
+
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#define MAX_SMD_TTY_PORTS 32
+#define CHANNEL_PROBE_TIMEOUT 60000
+
+static struct tty_driver *smd_tty_driver;
+
+struct msm_smd_port_info_s {
+ char *name;
+ u32 flags;
+ int index;
+ int smd_tty_info_index;
+};
+
+#define SMD_DROP_ONE_TX_BYTE (1)
+#define SMD_TTY_ACL (1 << 1)
+#define SMD_TTY_CMD (1 << 2)
+
+static struct msm_smd_port_info_s msm_smd_port_info[] = {
+ {
+ .name = "DUMMY_SMD_TTY0",
+ .flags = 0,
+ .index = 0,
+ .smd_tty_info_index = 0,
+ }, {
+ .name = "DUMMY_SMD_TTY1",
+ .flags = 0,
+ .index = 1,
+ .smd_tty_info_index = 1,
+ }, {
+ .name = "APPS_RIVA_BT_ACL",
+ .flags = SMD_DROP_ONE_TX_BYTE | SMD_TTY_ACL,
+ .index = 2,
+ .smd_tty_info_index = 3,
+ }, {
+ .name = "APPS_RIVA_BT_CMD",
+ .flags = SMD_DROP_ONE_TX_BYTE | SMD_TTY_CMD,
+ .index = 3,
+ .smd_tty_info_index = 3,
+ },
+};
+
+struct msm_smd_port_info_s *match_smd_port_info(char *name)
+{
+ int i;
+ struct msm_smd_port_info_s *pinfo = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(msm_smd_port_info); i++) {
+ if (!strcmp(name, msm_smd_port_info[i].name)) {
+ pinfo = &msm_smd_port_info[i];
+ break;
+ }
+ }
+
+ return pinfo;
+}
+
+struct msm_smd_channel_info {
+ struct qcom_smd_device *cmd_sdev;
+ struct qcom_smd_channel *channel;
+
+ struct completion ack;
+ struct mutex lock;
+
+ u32 flags;
+ int smd_tty_info_index;
+};
+
+struct msm_smd_tty_info_s {
+ /* For some special case (like BT), out/in channel are differnet.
+ * In most case, they should be same.
+ */
+ struct msm_smd_channel_info *cmd_info;
+ struct msm_smd_channel_info *acl_info;
+
+ struct tty_port port;
+ struct device *tty;
+
+ struct mutex open_lock;
+ struct completion probe_done;
+ int opened;
+};
+
+void dump_buf(unsigned char *buf, int len)
+{
+ int i;
+ unsigned char *tmp = buf;
+
+ printk("\n\t");
+
+ for (i = 0; i < len; i++) {
+ printk("0x%02x ", tmp[i]);
+ if (((i + 1) % 16) == 0)
+ printk("\n\t");
+ }
+}
+
+static struct msm_smd_tty_info_s msm_smd_tty_info[MAX_SMD_TTY_PORTS];
+
+static int qcom_smd_tty_probe(struct qcom_smd_device *sdev)
+{
+ struct msm_smd_channel_info *ch_info;
+ struct msm_smd_port_info_s *pinfo;
+ int idx = 0;
+
+ ch_info = devm_kzalloc(&sdev->dev, sizeof(*ch_info), GFP_KERNEL);
+ if (!ch_info)
+ return -ENOMEM;
+
+ mutex_init(&ch_info->lock);
+ init_completion(&ch_info->ack);
+ ch_info->channel = sdev->channel;
+
+ pinfo = match_smd_port_info(ch_info->channel->name);
+ if (!pinfo) {
+ pr_err("Can't match channel %s\n", ch_info->channel->name);
+ kfree(ch_info);
+ return -EINVAL;
+ }
+
+ ch_info->flags = pinfo->flags;
+ idx = ch_info->smd_tty_info_index = pinfo->smd_tty_info_index;
+
+ if (pinfo->flags & SMD_TTY_ACL) {
+ msm_smd_tty_info[idx].acl_info = ch_info;
+ } else if (pinfo->flags & SMD_TTY_CMD) {
+ msm_smd_tty_info[idx].cmd_info = ch_info;
+ } else {
+ msm_smd_tty_info[idx].cmd_info =
+ msm_smd_tty_info[idx].acl_info = ch_info;
+ }
+
+ /* We use all here to prevent the maltiple port open
+ * blocking.
+ */
+ complete_all(&msm_smd_tty_info[pinfo->index].probe_done);
+
+ dev_set_drvdata(&sdev->dev, ch_info);
+ return 0;
+}
+
+static void qcom_smd_tty_remove(struct qcom_smd_device *sdev)
+{
+ struct msm_smd_channel_info *ch_info = dev_get_drvdata(&sdev->dev);
+
+ kfree(ch_info);
+ return;
+}
+
+static int qcom_smd_tty_callback(struct qcom_smd_device *sdev,
+ const void *data,
+ size_t count)
+{
+ unsigned char *ptr, *tmp;
+ int buf_len;
+ struct msm_smd_channel_info *ch_info;
+ struct tty_struct *tty;
+
+ ch_info = dev_get_drvdata(&sdev->dev);
+ tty = tty_port_tty_get(
+ &msm_smd_tty_info[ch_info->smd_tty_info_index].port);
+ if (!tty) {
+ pr_err("can't get tty for index: %d\n",
+ ch_info->smd_tty_info_index);
+ return 0;
+ }
+
+ if (ch_info->flags & SMD_DROP_ONE_TX_BYTE)
+ buf_len = count + 1;
+ else
+ buf_len = count;
+
+ if (buf_len > ch_info->channel->fifo_size)
+ buf_len = ch_info->channel->fifo_size;
+
+ buf_len = tty_prepare_flip_string(tty->port, &ptr, buf_len);
+ if (buf_len <= 0) {
+ pr_err("can't get flip buffer for tty index: %d\n",
+ ch_info->smd_tty_info_index);
+ tty_kref_put(tty);
+ return 0;
+ }
+
+ tmp = ptr;
+ if (ch_info->flags & SMD_TTY_ACL) {
+ *ptr++ = HCI_ACLDATA_PKT;
+ } else if (ch_info->flags & SMD_TTY_CMD) {
+ *ptr++ = HCI_EVENT_PKT;
+ }
+
+ memcpy_fromio(ptr, data, count);
+
+ tty_flip_buffer_push(tty->port);
+ tty_kref_put(tty);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smd_tty_of_match[] = {
+ { .compatible = "qcom,hci-smd-bt-cmd"},
+ { .compatible = "qcom,hci-smd-bt-acl"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_tty_of_match);
+
+static struct qcom_smd_driver qcom_smd_tty_driver = {
+ .probe = qcom_smd_tty_probe,
+ .remove = qcom_smd_tty_remove,
+ .callback = qcom_smd_tty_callback,
+ .driver = {
+ .name = "qcom_smd_tty",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_smd_tty_of_match,
+ },
+};
+
+static int smd_tty_open(struct tty_struct *tty, struct file *f)
+{
+ struct msm_smd_tty_info_s *info = msm_smd_tty_info + tty->index;
+
+ return tty_port_open(&info->port, tty, f);
+}
+
+static void smd_tty_close(struct tty_struct *tty, struct file *f)
+{
+ struct msm_smd_tty_info_s *info = tty->driver_data;
+
+ tty_port_close(&info->port, tty, f);
+}
+
+static int smd_tty_write(struct tty_struct *tty, const unsigned char *buf,
+ int len)
+{
+ int ret = 0;
+ int avail = len;
+ struct msm_smd_tty_info_s *info = tty->driver_data;
+ struct msm_smd_channel_info *cinfo;
+
+ if ((info->acl_info->flags & SMD_DROP_ONE_TX_BYTE) ||
+ (info->cmd_info->flags & SMD_DROP_ONE_TX_BYTE)) {
+ if (HCI_COMMAND_PKT == *buf)
+ cinfo = info->cmd_info;
+ else
+ cinfo = info->acl_info;
+ buf++;
+ avail--;
+ }
+
+ mutex_lock(&cinfo->lock);
+ ret = qcom_smd_send(cinfo->channel, buf, avail);
+ if (ret) {
+ pr_err("smd_tty_write failed on smd tty %d\n", tty->index);
+ }
+ mutex_unlock(&cinfo->lock);
+
+ return len;
+}
+
+static int smd_tty_port_activate(struct tty_port *tport,
+ struct tty_struct *tty)
+{
+ int ret = 0;
+ unsigned int n = tty->index;
+ struct msm_smd_tty_info_s *info;
+
+ if (n >= ARRAY_SIZE(msm_smd_port_info))
+ return -ENODEV;
+
+ info = &msm_smd_tty_info[n];
+
+ mutex_lock(&info->open_lock);
+ tty->driver_data = info;
+ if (info->opened) {
+ mutex_unlock(&info->open_lock);
+ return 0;
+ }
+
+ /* We'd better to add the channel probe for BT here.
+ * So the open will be blocked till channel is probe.
+ */
+ ret = wait_for_completion_interruptible_timeout(
+ &info->probe_done, msecs_to_jiffies(CHANNEL_PROBE_TIMEOUT));
+ if (ret == 0) {
+ pr_err("Wait for smd channel ready timeout\n");
+ info->opened--;
+ return -ETIMEDOUT;
+ } else if (ret < 0) {
+ pr_err("Error waiting for smd channel ready\n");
+ info->opened--;
+ return ret;
+ }
+
+ info->opened++;
+ mutex_unlock(&info->open_lock);
+
+ return 0;
+}
+
+static void smd_tty_port_shutdown(struct tty_port *tport)
+{
+ struct msm_smd_tty_info_s *info;
+ struct tty_struct *tty = tty_port_tty_get(tport);
+
+ info = tty->driver_data;
+
+ mutex_lock(&info->open_lock);
+ info->opened--;
+ if (info->opened > 0) {
+ mutex_unlock(&info->open_lock);
+ tty_kref_put(tty);
+ return;
+ }
+ mutex_unlock(&info->open_lock);
+ tty_kref_put(tty);
+ return;
+}
+
+static const struct tty_port_operations smd_tty_port_ops = {
+ .shutdown = smd_tty_port_shutdown,
+ .activate = smd_tty_port_activate,
+};
+
+static const struct tty_operations smd_tty_ops = {
+ .open = smd_tty_open,
+ .close = smd_tty_close,
+ .write = smd_tty_write,
+};
+
+static void smd_tty_device_init(int idx)
+{
+ struct tty_port *port;
+
+ port = &msm_smd_tty_info[idx].port;
+ tty_port_init(port);
+ mutex_init(&msm_smd_tty_info[idx].open_lock);
+ init_completion(&msm_smd_tty_info[idx].probe_done);
+
+ msm_smd_tty_info[idx].opened = 0;
+
+ port->ops = &smd_tty_port_ops;
+ msm_smd_tty_info[idx].tty = tty_port_register_device(port,
+ smd_tty_driver, idx, NULL);
+ return;
+}
+
+static int smd_tty_register_driver(void)
+{
+ int ret = 0;
+
+ smd_tty_driver = tty_alloc_driver(MAX_SMD_TTY_PORTS,
+ TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_RESET_TERMIOS);
+ if (IS_ERR(smd_tty_driver)) {
+ ret = PTR_ERR(smd_tty_driver);
+ return ret;
+ }
+
+ smd_tty_driver->owner = THIS_MODULE;
+ smd_tty_driver->driver_name = "smd_tty_driver";
+ smd_tty_driver->name = "smd";
+ smd_tty_driver->major = 0;
+ smd_tty_driver->minor_start = 0;
+ smd_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ smd_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ smd_tty_driver->init_termios = tty_std_termios;
+ smd_tty_driver->init_termios.c_iflag = 0;
+ smd_tty_driver->init_termios.c_oflag = 0;
+ smd_tty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
+ smd_tty_driver->init_termios.c_lflag = 0;
+ tty_set_operations(smd_tty_driver, &smd_tty_ops);
+
+ ret = tty_register_driver(smd_tty_driver);
+ if (ret) {
+ put_tty_driver(smd_tty_driver);
+ pr_err("register tty driver failed (%d) for smd tty\n", ret);
+ }
+
+ return ret;
+}
+
+static int __init smd_tty_init(void)
+{
+ int ret = 0;
+ int i = 0;
+ ret = smd_tty_register_driver();
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(msm_smd_port_info); i++)
+ smd_tty_device_init(i);
+
+ qcom_smd_driver_register(&qcom_smd_tty_driver);
+
+ return 0;
+}
+
+module_init(smd_tty_init);
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 5ce3f1d6a6ed..5619b8ca3bf3 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -1,6 +1,7 @@
config USB_CHIPIDEA
tristate "ChipIdea Highspeed Dual Role Controller"
depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
+ select EXTCON
help
Say Y here if your system has a dual role high speed USB
controller based on ChipIdea silicon IP. Currently, only the
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 3feebf7f31f0..573c2876b263 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -47,6 +47,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/extcon.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/module.h>
@@ -602,9 +603,45 @@ static irqreturn_t ci_irq(int irq, void *data)
return ret;
}
+static int ci_vbus_notifier(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct ci_hdrc_cable *vbus = container_of(nb, struct ci_hdrc_cable, nb);
+ struct ci_hdrc *ci = vbus->ci;
+
+ if (event)
+ vbus->state = true;
+ else
+ vbus->state = false;
+
+ vbus->changed = true;
+
+ ci_irq(ci->irq, ci);
+ return NOTIFY_DONE;
+}
+
+static int ci_id_notifier(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct ci_hdrc_cable *id = container_of(nb, struct ci_hdrc_cable, nb);
+ struct ci_hdrc *ci = id->ci;
+
+ if (event)
+ id->state = false;
+ else
+ id->state = true;
+
+ id->changed = true;
+
+ ci_irq(ci->irq, ci);
+ return NOTIFY_DONE;
+}
+
static int ci_get_platdata(struct device *dev,
struct ci_hdrc_platform_data *platdata)
{
+ struct extcon_dev *ext_vbus, *ext_id;
+ struct ci_hdrc_cable *cable;
int ret;
if (!platdata->phy_mode)
@@ -695,9 +732,91 @@ static int ci_get_platdata(struct device *dev,
platdata->flags |= CI_HDRC_OVERRIDE_RX_BURST;
}
+ ext_id = ERR_PTR(-ENODEV);
+ ext_vbus = ERR_PTR(-ENODEV);
+ if (of_property_read_bool(dev->of_node, "extcon")) {
+ /* Each one of them is not mandatory */
+ ext_vbus = extcon_get_edev_by_phandle(dev, 0);
+ if (IS_ERR(ext_vbus) && PTR_ERR(ext_vbus) != -ENODEV)
+ return PTR_ERR(ext_vbus);
+
+ ext_id = extcon_get_edev_by_phandle(dev, 1);
+ if (IS_ERR(ext_id) && PTR_ERR(ext_id) != -ENODEV)
+ return PTR_ERR(ext_id);
+ }
+
+ cable = &platdata->vbus_extcon;
+ cable->nb.notifier_call = ci_vbus_notifier;
+ cable->edev = ext_vbus;
+
+ if (!IS_ERR(ext_vbus)) {
+ ret = extcon_get_cable_state_(cable->edev, EXTCON_USB);
+ if (ret)
+ cable->state = true;
+ else
+ cable->state = false;
+ }
+
+ cable = &platdata->id_extcon;
+ cable->nb.notifier_call = ci_id_notifier;
+ cable->edev = ext_id;
+
+ if (!IS_ERR(ext_id)) {
+ ret = extcon_get_cable_state_(cable->edev, EXTCON_USB_HOST);
+ if (ret)
+ cable->state = false;
+ else
+ cable->state = true;
+ }
return 0;
}
+static int ci_extcon_register(struct ci_hdrc *ci)
+{
+ struct ci_hdrc_cable *id, *vbus;
+ int ret;
+
+ id = &ci->platdata->id_extcon;
+ id->ci = ci;
+ if (!IS_ERR(id->edev)) {
+ ret = extcon_register_notifier(id->edev, EXTCON_USB_HOST,
+ &id->nb);
+ if (ret < 0) {
+ dev_err(ci->dev, "register ID failed\n");
+ return ret;
+ }
+ }
+
+ vbus = &ci->platdata->vbus_extcon;
+ vbus->ci = ci;
+ if (!IS_ERR(vbus->edev)) {
+ ret = extcon_register_notifier(vbus->edev, EXTCON_USB,
+ &vbus->nb);
+ if (ret < 0) {
+ extcon_unregister_notifier(id->edev, EXTCON_USB_HOST,
+ &id->nb);
+ dev_err(ci->dev, "register VBUS failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ci_extcon_unregister(struct ci_hdrc *ci)
+{
+ struct ci_hdrc_cable *cable;
+
+ cable = &ci->platdata->id_extcon;
+ if (!IS_ERR(cable->edev))
+ extcon_unregister_notifier(cable->edev, EXTCON_USB_HOST,
+ &cable->nb);
+
+ cable = &ci->platdata->vbus_extcon;
+ if (!IS_ERR(cable->edev))
+ extcon_unregister_notifier(cable->edev, EXTCON_USB, &cable->nb);
+}
+
static DEFINE_IDA(ci_ida);
struct platform_device *ci_hdrc_add_device(struct device *dev,
@@ -921,6 +1040,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
if (ret)
goto stop;
+ ret = ci_extcon_register(ci);
+ if (ret)
+ goto stop;
+
if (ci->supports_runtime_pm) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -938,6 +1061,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
if (!ret)
return 0;
+ ci_extcon_unregister(ci);
stop:
ci_role_destroy(ci);
deinit_phy:
@@ -957,6 +1081,7 @@ static int ci_hdrc_remove(struct platform_device *pdev)
}
dbg_remove_files(ci);
+ ci_extcon_unregister(ci);
ci_role_destroy(ci);
ci_hdrc_enter_lpm(ci, true);
ci_usb_phy_exit(ci);
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index ad6c87a4653c..ab4bd0c2d4ef 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -30,7 +30,44 @@
*/
u32 hw_read_otgsc(struct ci_hdrc *ci, u32 mask)
{
- return hw_read(ci, OP_OTGSC, mask);
+ struct ci_hdrc_cable *cable;
+ u32 val = hw_read(ci, OP_OTGSC, mask);
+
+ /*
+ * If using extcon framework for VBUS and/or ID signal
+ * detection overwrite OTGSC register value
+ */
+ cable = &ci->platdata->vbus_extcon;
+ if (!IS_ERR(cable->edev)) {
+ if (cable->changed)
+ val |= OTGSC_BSVIS;
+ else
+ val &= ~OTGSC_BSVIS;
+
+ cable->changed = false;
+
+ if (cable->state)
+ val |= OTGSC_BSV;
+ else
+ val &= ~OTGSC_BSV;
+ }
+
+ cable = &ci->platdata->id_extcon;
+ if (!IS_ERR(cable->edev)) {
+ if (cable->changed)
+ val |= OTGSC_IDIS;
+ else
+ val &= ~OTGSC_IDIS;
+
+ cable->changed = false;
+
+ if (cable->state)
+ val |= OTGSC_ID;
+ else
+ val &= ~OTGSC_ID;
+ }
+
+ return val;
}
/**
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index c58c3c0dbe35..a51f74c40631 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -76,6 +76,9 @@ static int msm_hsusb_init_vddcx(struct msm_otg *motg, int init)
{
int ret = 0;
+ if (IS_ERR(motg->vddcx))
+ return 0;
+
if (init) {
ret = regulator_set_voltage(motg->vddcx,
motg->vdd_levels[VDD_LEVEL_MIN],
@@ -1598,6 +1601,8 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
ret = extcon_register_notifier(ext_id, EXTCON_USB_HOST,
&motg->id.nb);
if (ret < 0) {
+ extcon_unregister_notifier(motg->vbus.extcon,
+ EXTCON_USB, &motg->vbus.nb);
dev_err(&pdev->dev, "register ID notifier failed\n");
return ret;
}
@@ -1647,7 +1652,7 @@ static int msm_otg_reboot_notify(struct notifier_block *this,
static int msm_otg_probe(struct platform_device *pdev)
{
- struct regulator_bulk_data regs[3];
+ struct regulator_bulk_data regs[2];
int ret = 0;
struct device_node *np = pdev->dev.of_node;
struct msm_otg_platform_data *pdata;
@@ -1660,15 +1665,6 @@ static int msm_otg_probe(struct platform_device *pdev)
if (!motg)
return -ENOMEM;
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- if (!np)
- return -ENXIO;
- ret = msm_otg_read_dt(pdev, motg);
- if (ret)
- return ret;
- }
-
motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
GFP_KERNEL);
if (!motg->phy.otg)
@@ -1676,6 +1672,8 @@ static int msm_otg_probe(struct platform_device *pdev)
phy = &motg->phy;
phy->dev = &pdev->dev;
+ INIT_WORK(&motg->sm_work, msm_otg_sm_work);
+ INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
motg->clk = devm_clk_get(&pdev->dev, np ? "core" : "usb_hs_clk");
if (IS_ERR(motg->clk)) {
@@ -1731,17 +1729,29 @@ static int msm_otg_probe(struct platform_device *pdev)
return motg->irq;
}
- regs[0].supply = "vddcx";
- regs[1].supply = "v3p3";
- regs[2].supply = "v1p8";
+ regs[0].supply = "v3p3";
+ regs[1].supply = "v1p8";
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ if (!np)
+ return -ENXIO;
+ ret = msm_otg_read_dt(pdev, motg);
+ if (ret)
+ return ret;
+ }
ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
- if (ret)
+ if (ret) {
+ dev_err(&pdev->dev, "no v3p3 or v1p8\n");
return ret;
+ }
+
+ motg->v3p3 = regs[0].consumer;
+ motg->v1p8 = regs[1].consumer;
- motg->vddcx = regs[0].consumer;
- motg->v3p3 = regs[1].consumer;
- motg->v1p8 = regs[2].consumer;
+ motg->vddcx = devm_regulator_get_optional(motg->phy.dev, "vddcx");
+ if (IS_ERR(motg->vddcx))
+ dev_info(&pdev->dev, "no vddcx\n");
clk_set_rate(motg->clk, 60000000);
@@ -1771,8 +1781,6 @@ static int msm_otg_probe(struct platform_device *pdev)
writel(0, USB_USBINTR);
writel(0, USB_OTGSC);
- INIT_WORK(&motg->sm_work, msm_otg_sm_work);
- INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
ret = devm_request_irq(&pdev->dev, motg->irq, msm_otg_irq, IRQF_SHARED,
"msm_otg", motg);
if (ret) {
@@ -1821,7 +1829,6 @@ static int msm_otg_probe(struct platform_device *pdev)
register_reboot_notifier(&motg->reboot);
pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
return 0;
@@ -1834,6 +1841,11 @@ disable_clks:
clk_disable_unprepare(motg->clk);
if (!IS_ERR(motg->core_clk))
clk_disable_unprepare(motg->core_clk);
+
+ extcon_unregister_notifier(motg->id.extcon,
+ EXTCON_USB_HOST, &motg->id.nb);
+ extcon_unregister_notifier(motg->vbus.extcon,
+ EXTCON_USB, &motg->vbus.nb);
return ret;
}