summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Green <andy.green@linaro.org>2014-12-16 13:06:27 +0800
committerAndy Green <andy.green@linaro.org>2014-12-26 18:44:19 +0800
commitc29017dc18a08dc72983469001b5f514a3b82056 (patch)
tree0434a4802886eda725ff5572cb9b29cbea3fdbb6
parent8b301eb471b45aba1433dbcd9baeff86e603dbc3 (diff)
spmispmi
Signed-off-by: Andy Green <andy.green@linaro.org>
-rw-r--r--arch/arm/boot/dts/msm-pm8916.dtsi20
-rw-r--r--arch/arm/boot/dts/msm8916-regulator.dtsi3
-rw-r--r--arch/arm/boot/dts/msm8916.dtsi5
-rw-r--r--arch/arm/mach-qcom/board.c2
-rw-r--r--drivers/base/core.c12
-rw-r--r--drivers/clk/qcom/Makefile2
-rw-r--r--drivers/clk/qcom/clock-a7.c359
-rw-r--r--drivers/clk/qcom/clock.c12
-rw-r--r--drivers/of/platform.c9
-rw-r--r--drivers/regulator/cpr-regulator.c2580
-rw-r--r--drivers/regulator/mem-acc-regulator.c600
-rw-r--r--drivers/regulator/qpnp-regulator.c1
-rw-r--r--drivers/regulator/spm-regulator.c86
-rw-r--r--drivers/spmi/spmi-pmic-arb.c17
-rw-r--r--drivers/spmi/spmi.c58
-rw-r--r--include/linux/of_spmi.h34
-rw-r--r--include/linux/spmi.h2
17 files changed, 3706 insertions, 96 deletions
diff --git a/arch/arm/boot/dts/msm-pm8916.dtsi b/arch/arm/boot/dts/msm-pm8916.dtsi
index 1a738de55bb4..2128e65619bd 100644
--- a/arch/arm/boot/dts/msm-pm8916.dtsi
+++ b/arch/arm/boot/dts/msm-pm8916.dtsi
@@ -12,11 +12,14 @@
&spmi_bus {
- qcom,pm8916@0 {
- spmi-slave-container;
- reg = <0x0>;
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+
+ qcom,pm8916@0 {
+ reg = <0x0 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
pm8916_revid: qcom,revid@100 {
compatible = "qcom,qpnp-revid";
@@ -320,10 +323,9 @@
};
qcom,pm8916@1 {
- spmi-slave-container;
- reg = <0x1>;
- #address-cells = <1>;
- #size-cells = <1>;
+ reg = <1 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
regulator@1400 {
compatible = "qcom,qpnp-regulator";
diff --git a/arch/arm/boot/dts/msm8916-regulator.dtsi b/arch/arm/boot/dts/msm8916-regulator.dtsi
index 749c55261a3b..26eda8bb85bf 100644
--- a/arch/arm/boot/dts/msm8916-regulator.dtsi
+++ b/arch/arm/boot/dts/msm8916-regulator.dtsi
@@ -14,6 +14,9 @@
&spmi_bus {
qcom,pm8916@1 {
status = "okay";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
pm8916_s2: spm-regulator@1700 {
compatible = "qcom,spm-regulator";
regulator-name = "8916_s2";
diff --git a/arch/arm/boot/dts/msm8916.dtsi b/arch/arm/boot/dts/msm8916.dtsi
index df9dbc92ca72..8a66d3b0d6b4 100644
--- a/arch/arm/boot/dts/msm8916.dtsi
+++ b/arch/arm/boot/dts/msm8916.dtsi
@@ -902,8 +902,9 @@
<0x200a000 0x2100>;
reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
interrupts = <0 190 0>;
- qcom,pmic-arb-channel = <0>;
- qcom,pmic-arb-ee = <0>;
+ interrupt-names = "periph_irq";
+ qcom,channel = <0>;
+ qcom,ee = <0>;
#interrupt-cells = <3>;
interrupt-controller;
#address-cells = <1>;
diff --git a/arch/arm/mach-qcom/board.c b/arch/arm/mach-qcom/board.c
index 235666188eb1..43a7559cfa40 100644
--- a/arch/arm/mach-qcom/board.c
+++ b/arch/arm/mach-qcom/board.c
@@ -75,7 +75,7 @@ static void __init msm8916_init(void)
msm_smd_init();
msm_rpm_driver_init();
- spm_regulator_init();
+// spm_regulator_init();
msm_spm_device_init();
qpnp_regulator_init();
msm_pm_sleep_status_init();
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 97e2baf6e5d8..7908d978d41b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -968,13 +968,16 @@ int device_add(struct device *dev)
int error = -EINVAL;
dev = get_device(dev);
- if (!dev)
- goto done;
-
+ if (!dev) {
+ pr_err("x1\n");
+ goto done;
+ }
if (!dev->p) {
error = device_private_init(dev);
- if (error)
+ if (error) {
+ pr_err("x2\n");
goto done;
+ }
}
/*
@@ -992,6 +995,7 @@ int device_add(struct device *dev)
dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
if (!dev_name(dev)) {
+ pr_err("x4\n");
error = -EINVAL;
goto name_error;
}
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 1039de0feb2d..ff61a1f8b292 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -22,6 +22,6 @@ obj-y += clock.o clock-dummy.o clock-generic.o clock-pll.o clock-local2.o clock-
#obj-$(CONFIG_DEBUG_FS) += clock-debug.o
# MSM8916
-obj-$(CONFIG_ARCH_MSM8916) += clock-rpm-8916.o clock-gcc-8916.o mdss-pll.c
+obj-$(CONFIG_ARCH_MSM8916) += clock-rpm-8916.o clock-gcc-8916.o mdss-pll.c clock-a7.o
clk-qcom-y += gdsc.o
diff --git a/drivers/clk/qcom/clock-a7.c b/drivers/clk/qcom/clock-a7.c
new file mode 100644
index 000000000000..2d006e1aa621
--- /dev/null
+++ b/drivers/clk/qcom/clock-a7.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <soc/qcom/clock-local2.h>
+
+DEFINE_VDD_REGS_INIT(vdd_cpu, 1);
+
+static struct mux_div_clk a7ssmux = {
+ .ops = &rcg_mux_div_ops,
+ .safe_freq = 300000000,
+ .data = {
+ .max_div = 32,
+ .min_div = 2,
+ .is_half_divider = true,
+ },
+ .c = {
+ .dbg_name = "a7ssmux",
+ .ops = &clk_ops_mux_div_clk,
+ .vdd_class = &vdd_cpu,
+ CLK_INIT(a7ssmux.c),
+ },
+ .parents = (struct clk_src[8]) {},
+ .div_mask = BM(4, 0),
+ .src_mask = BM(10, 8) >> 8,
+ .src_shift = 8,
+};
+
+static struct clk_lookup clock_tbl_a7[] = {
+ CLK_LOOKUP("cpu0_clk", a7ssmux.c, "0.qcom,msm-cpufreq"),
+ CLK_LOOKUP("cpu1_clk", a7ssmux.c, "0.qcom,msm-cpufreq"),
+ CLK_LOOKUP("cpu2_clk", a7ssmux.c, "0.qcom,msm-cpufreq"),
+ CLK_LOOKUP("cpu3_clk", a7ssmux.c, "0.qcom,msm-cpufreq"),
+ CLK_LOOKUP("cpu0_clk", a7ssmux.c, "fe805664.qcom,pm"),
+ CLK_LOOKUP("cpu1_clk", a7ssmux.c, "fe805664.qcom,pm"),
+ CLK_LOOKUP("cpu2_clk", a7ssmux.c, "fe805664.qcom,pm"),
+ CLK_LOOKUP("cpu3_clk", a7ssmux.c, "fe805664.qcom,pm"),
+ CLK_LOOKUP("cpu0_clk", a7ssmux.c, "8600664.qcom,pm"),
+ CLK_LOOKUP("cpu1_clk", a7ssmux.c, "8600664.qcom,pm"),
+ CLK_LOOKUP("cpu2_clk", a7ssmux.c, "8600664.qcom,pm"),
+ CLK_LOOKUP("cpu3_clk", a7ssmux.c, "8600664.qcom,pm"),
+};
+
+static int of_get_fmax_vdd_class(struct platform_device *pdev, struct clk *c,
+ char *prop_name)
+{
+ struct device_node *of = pdev->dev.of_node;
+ int prop_len, i;
+ struct clk_vdd_class *vdd = c->vdd_class;
+ u32 *array;
+
+ if (!of_find_property(of, prop_name, &prop_len)) {
+ dev_err(&pdev->dev, "missing %s\n", prop_name);
+ return -EINVAL;
+ }
+
+ prop_len /= sizeof(u32);
+ if (prop_len % 2) {
+ dev_err(&pdev->dev, "bad length %d\n", prop_len);
+ return -EINVAL;
+ }
+
+ prop_len /= 2;
+ vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+ GFP_KERNEL);
+ if (!vdd->level_votes)
+ return -ENOMEM;
+
+ vdd->vdd_uv = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+ GFP_KERNEL);
+ if (!vdd->vdd_uv)
+ return -ENOMEM;
+
+ c->fmax = devm_kzalloc(&pdev->dev, prop_len * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!c->fmax)
+ return -ENOMEM;
+
+ array = devm_kzalloc(&pdev->dev,
+ prop_len * sizeof(u32) * 2, GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ of_property_read_u32_array(of, prop_name, array, prop_len * 2);
+ for (i = 0; i < prop_len; i++) {
+ c->fmax[i] = array[2 * i];
+ vdd->vdd_uv[i] = array[2 * i + 1];
+ }
+
+ devm_kfree(&pdev->dev, array);
+ vdd->num_levels = prop_len;
+ vdd->cur_level = prop_len;
+ c->num_fmax = prop_len;
+ return 0;
+}
+
+static void get_speed_bin(struct platform_device *pdev, int *bin, int *version)
+{
+ struct resource *res;
+ void __iomem *base;
+ u32 pte_efuse, redundant_sel, valid;
+
+ *bin = 0;
+ *version = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse");
+ if (!res) {
+ dev_info(&pdev->dev,
+ "No speed/PVS binning available. Defaulting to 0!\n");
+ return;
+ }
+
+ base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!base) {
+ dev_warn(&pdev->dev,
+ "Unable to read efuse data. Defaulting to 0!\n");
+ return;
+ }
+
+ pte_efuse = readl_relaxed(base);
+ devm_iounmap(&pdev->dev, base);
+
+ redundant_sel = (pte_efuse >> 24) & 0x7;
+ *bin = pte_efuse & 0x7;
+ valid = (pte_efuse >> 3) & 0x1;
+ *version = (pte_efuse >> 4) & 0x3;
+
+ if (redundant_sel == 1)
+ *bin = (pte_efuse >> 27) & 0x7;
+
+ if (!valid) {
+ dev_info(&pdev->dev, "Speed bin not set. Defaulting to 0!\n");
+ *bin = 0;
+ } else {
+ dev_info(&pdev->dev, "Speed bin: %d\n", *bin);
+ }
+
+ dev_info(&pdev->dev, "PVS version: %d\n", *version);
+
+ return;
+}
+
+static void get_speed_bin_b(struct platform_device *pdev, int *bin,
+ int *version)
+{
+ struct resource *res;
+ void __iomem *base;
+ u32 pte_efuse, shift = 2, mask = 0x7;
+
+ *bin = 0;
+ *version = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse");
+ if (!res) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "efuse1");
+ if (!res) {
+ dev_info(&pdev->dev,
+ "No speed/PVS binning available. Defaulting to 0!\n");
+ return;
+ }
+ shift = 23;
+ mask = 0x3;
+ }
+
+ base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!base) {
+ dev_warn(&pdev->dev,
+ "Unable to read efuse data. Defaulting to 0!\n");
+ return;
+ }
+
+ pte_efuse = readl_relaxed(base);
+ devm_iounmap(&pdev->dev, base);
+
+ *bin = (pte_efuse >> shift) & mask;
+
+ dev_info(&pdev->dev, "Speed bin: %d PVS Version: %d\n", *bin,
+ *version);
+}
+
+static int of_get_clk_src(struct platform_device *pdev, struct clk_src *parents)
+{
+ struct device_node *of = pdev->dev.of_node;
+ int num_parents, i, j, index;
+ struct clk *c;
+ char clk_name[] = "clk-x";
+
+ num_parents = of_property_count_strings(of, "clock-names");
+ if (num_parents <= 0 || num_parents > 8) {
+ dev_err(&pdev->dev, "missing clock-names\n");
+ return -EINVAL;
+ }
+
+ j = 0;
+ for (i = 0; i < 8; i++) {
+ snprintf(clk_name, ARRAY_SIZE(clk_name), "clk-%d", i);
+ index = of_property_match_string(of, "clock-names", clk_name);
+ if (IS_ERR_VALUE(index))
+ continue;
+
+ parents[j].sel = i;
+ parents[j].src = c = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(c)) {
+ if (c != ERR_PTR(-EPROBE_DEFER))
+ dev_err(&pdev->dev, "clk_get: %s\n fail",
+ clk_name);
+ return PTR_ERR(c);
+ }
+ j++;
+ }
+
+ return num_parents;
+}
+
+static int clock_a7_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int speed_bin = 0, version = 0, rc, cpu;
+ unsigned long rate, aux_rate;
+ struct clk *aux_clk, *main_pll;
+ char prop_name[] = "qcom,speedX-bin-vX";
+ const void *prop;
+ bool compat_bin = false;
+
+ compat_bin = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,clock-a53-8916");
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rcg-base");
+ if (!res) {
+ dev_err(&pdev->dev, "missing rcg-base\n");
+ return -EINVAL;
+ }
+ a7ssmux.base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!a7ssmux.base) {
+ dev_err(&pdev->dev, "ioremap failed for rcg-base\n");
+ return -ENOMEM;
+ }
+
+ vdd_cpu.regulator[0] = devm_regulator_get(&pdev->dev, "cpu-vdd");
+ if (IS_ERR(vdd_cpu.regulator[0])) {
+ if (PTR_ERR(vdd_cpu.regulator[0]) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "unable to get regulator\n");
+ else
+ dev_info(&pdev->dev, "deferring on cpu-vdd reg get\n");
+ return PTR_ERR(vdd_cpu.regulator[0]);
+ }
+
+ rc = of_get_clk_src(pdev, a7ssmux.parents);
+ if (IS_ERR_VALUE(rc)) {
+ dev_info(&pdev->dev, "deferring on parents\n");
+ return rc;
+ }
+
+ a7ssmux.num_parents = rc;
+
+ /* Override the existing safe operating frequency */
+ prop = of_get_property(pdev->dev.of_node, "qcom,safe-freq", NULL);
+ if (prop)
+ a7ssmux.safe_freq = of_read_ulong(prop, 1);
+
+ if (compat_bin)
+ get_speed_bin_b(pdev, &speed_bin, &version);
+ else
+ get_speed_bin(pdev, &speed_bin, &version);
+
+ snprintf(prop_name, ARRAY_SIZE(prop_name),
+ "qcom,speed%d-bin-v%d", speed_bin, version);
+ rc = of_get_fmax_vdd_class(pdev, &a7ssmux.c, prop_name);
+ if (rc) {
+ /* Fall back to most conservative PVS table */
+ dev_err(&pdev->dev, "Unable to load voltage plan %s!\n",
+ prop_name);
+ rc = of_get_fmax_vdd_class(pdev, &a7ssmux.c,
+ "qcom,speed0-bin-v0");
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Unable to load safe voltage plan\n");
+ return rc;
+ }
+ dev_info(&pdev->dev, "Safe voltage plan loaded.\n");
+ }
+
+ rc = msm_clock_register(clock_tbl_a7, ARRAY_SIZE(clock_tbl_a7));
+ if (rc) {
+ dev_err(&pdev->dev, "msm_clock_register failed\n");
+ return rc;
+ }
+
+ /* Force a PLL reconfiguration */
+ aux_clk = a7ssmux.parents[0].src;
+ main_pll = a7ssmux.parents[1].src;
+
+ aux_rate = clk_get_rate(aux_clk);
+ rate = clk_get_rate(&a7ssmux.c);
+ clk_set_rate(&a7ssmux.c, aux_rate);
+ clk_set_rate(main_pll, clk_round_rate(main_pll, 1));
+ clk_set_rate(&a7ssmux.c, rate);
+
+ /*
+ * We don't want the CPU clocks to be turned off at late init
+ * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
+ * refcount of these clocks. Any cpufreq/hotplug manager can assume
+ * that the clocks have already been prepared and enabled by the time
+ * they take over.
+ */
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ WARN(clk_prepare_enable(&a7ssmux.c),
+ "Unable to turn on CPU clock");
+ put_online_cpus();
+ return 0;
+}
+
+static struct of_device_id clock_a7_match_table[] = {
+ {.compatible = "qcom,clock-a7-8226"},
+ {.compatible = "qcom,clock-a7-krypton"},
+ {.compatible = "qcom,clock-a7-9630"},
+ {.compatible = "qcom,clock-a53-8916"},
+ {}
+};
+
+static struct platform_driver clock_a7_driver = {
+ .probe = clock_a7_probe,
+ .driver = {
+ .name = "clock-a7",
+ .of_match_table = clock_a7_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init clock_a7_init(void)
+{
+ return platform_driver_register(&clock_a7_driver);
+}
+arch_initcall(clock_a7_init);
diff --git a/drivers/clk/qcom/clock.c b/drivers/clk/qcom/clock.c
index 5deec0bfe39b..505a6200f302 100644
--- a/drivers/clk/qcom/clock.c
+++ b/drivers/clk/qcom/clock.c
@@ -87,8 +87,10 @@ static int update_vdd(struct clk_vdd_class *vdd_class)
for (i = 0; i < vdd_class->num_regulators; i++) {
rc = regulator_set_voltage(r[i], uv[new_base + i],
uv[max_lvl + i]);
- if (rc)
+ if (rc) {
+ pr_err("%s: regulator_set_voltage fail\%d\n", __func__, rc);
goto set_voltage_fail;
+ }
if (ua) {
rc = regulator_set_optimum_mode(r[i], ua[new_base + i]);
@@ -142,8 +144,10 @@ int vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
{
int rc;
- if (level >= vdd_class->num_levels)
+ if (level >= vdd_class->num_levels) {
+ pr_err("%s: level %d >= class levels %d\n", __func__, level, vdd_class->num_levels);
return -EINVAL;
+ }
mutex_lock(&vdd_class->lock);
vdd_class->level_votes[level]++;
@@ -186,8 +190,10 @@ static int vote_rate_vdd(struct clk *clk, unsigned long rate)
return 0;
level = find_vdd_level(clk, rate);
- if (level < 0)
+ if (level < 0) {
+ pr_err("%s: find_vdd_level told %d\n", __func__, level);
return level;
+ }
return vote_vdd_level(clk->vdd_class, level);
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 5b33c6a21807..6b29e7930607 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -24,6 +24,8 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#define DEBUG 1
+
const struct of_device_id of_default_bus_match_table[] = {
{ .compatible = "simple-bus", },
#ifdef CONFIG_ARM_AMBA
@@ -416,9 +418,10 @@ static int of_platform_bus_create(struct device_node *bus,
return 0;
for_each_child_of_node(bus, child) {
- pr_debug(" create child: %s\n", child->full_name);
+ pr_info(" create child: %s\n", child->full_name);
rc = of_platform_bus_create(child, matches, lookup, &dev->dev, strict);
if (rc) {
+ pr_info(" failed\n");
of_node_put(child);
break;
}
@@ -447,8 +450,8 @@ int of_platform_bus_probe(struct device_node *root,
if (!root)
return -EINVAL;
- pr_debug("of_platform_bus_probe()\n");
- pr_debug(" starting at: %s\n", root->full_name);
+ pr_info("of_platform_bus_probe()\n");
+ pr_info(" starting at: %s\n", root->full_name);
/* Do a self check of bus type, if there's a match, create children */
if (of_match_node(matches, root)) {
diff --git a/drivers/regulator/cpr-regulator.c b/drivers/regulator/cpr-regulator.c
new file mode 100644
index 000000000000..2c18dbaffa48
--- /dev/null
+++ b/drivers/regulator/cpr-regulator.c
@@ -0,0 +1,2580 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#define DEBUG 1
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/cpr-regulator.h>
+#include <soc/qcom/scm.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION 0
+#define RBCPR_VER_2 0x02
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * n)
+
+#define RBCPR_GCNT_TARGET_GCNT_BITS 10
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
+#define RBCPR_GCNT_TARGET_GCNT_MASK ((1<<RBCPR_GCNT_TARGET_GCNT_BITS)-1)
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL 0x44
+#define REG_RBIF_TIMER_ADJUST 0x4C
+
+#define RBIF_TIMER_ADJ_CONS_UP_BITS 4
+#define RBIF_TIMER_ADJ_CONS_UP_MASK ((1<<RBIF_TIMER_ADJ_CONS_UP_BITS)-1)
+#define RBIF_TIMER_ADJ_CONS_DOWN_BITS 4
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK ((1<<RBIF_TIMER_ADJ_CONS_DOWN_BITS)-1)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT 0x48
+#define REG_RBCPR_STEP_QUOT 0x80
+#define REG_RBIF_SW_VLEVEL 0x94
+
+#define RBIF_LIMIT_CEILING_BITS 6
+#define RBIF_LIMIT_CEILING_MASK ((1<<RBIF_LIMIT_CEILING_BITS)-1)
+#define RBIF_LIMIT_CEILING_SHIFT 6
+#define RBIF_LIMIT_FLOOR_BITS 6
+#define RBIF_LIMIT_FLOOR_MASK ((1<<RBIF_LIMIT_FLOOR_BITS)-1)
+
+#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT 0
+#define RBIF_SW_VLEVEL_DEFAULT 0x20
+
+#define RBCPR_STEP_QUOT_STEPQUOT_BITS 8
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK ((1<<RBCPR_STEP_QUOT_STEPQUOT_BITS)-1)
+#define RBCPR_STEP_QUOT_IDLE_CLK_BITS 4
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK ((1<<RBCPR_STEP_QUOT_IDLE_CLK_BITS)-1)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL 0x90
+
+#define RBCPR_CTL_LOOP_EN BIT(0)
+#define RBCPR_CTL_TIMER_EN BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
+#define RBCPR_CTL_COUNT_MODE BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_BITS 4
+#define RBCPR_CTL_UP_THRESHOLD_MASK ((1<<RBCPR_CTL_UP_THRESHOLD_BITS)-1)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
+#define RBCPR_CTL_DN_THRESHOLD_BITS 4
+#define RBCPR_CTL_DN_THRESHOLD_MASK ((1<<RBCPR_CTL_DN_THRESHOLD_BITS)-1)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD 0x98
+#define REG_RBIF_CONT_NACK_CMD 0x9C
+
+/* RBCPR Result status Register */
+#define REG_RBCPR_RESULT_0 0xA0
+
+#define RBCPR_RESULT0_BUSY_SHIFT 19
+#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
+#define RBCPR_RESULT0_ERROR_SHIFT 6
+#define RBCPR_RESULT0_ERROR_BITS 12
+#define RBCPR_RESULT0_ERROR_MASK ((1<<RBCPR_RESULT0_ERROR_BITS)-1)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
+#define RBCPR_RESULT0_ERROR_STEPS_BITS 4
+#define RBCPR_RESULT0_ERROR_STEPS_MASK ((1<<RBCPR_RESULT0_ERROR_STEPS_BITS)-1)
+#define RBCPR_RESULT0_STEP_UP_SHIFT 1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * n)
+#define REG_RBIF_IRQ_CLEAR 0x110
+#define REG_RBIF_IRQ_STATUS 0x114
+
+#define CPR_INT_DONE BIT(0)
+#define CPR_INT_MIN BIT(1)
+#define CPR_INT_DOWN BIT(2)
+#define CPR_INT_MID BIT(3)
+#define CPR_INT_UP BIT(4)
+#define CPR_INT_MAX BIT(5)
+#define CPR_INT_CLAMP BIT(6)
+#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+ CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
+
+#define CPR_NUM_RING_OSC 8
+
+/* RBCPR Clock Control Register */
+#define RBCPR_CLK_SEL_MASK BIT(0)
+#define RBCPR_CLK_SEL_19P2_MHZ 0
+#define RBCPR_CLK_SEL_AHB_CLK BIT(0)
+
+/* CPR eFuse parameters */
+#define CPR_FUSE_TARGET_QUOT_BITS 12
+#define CPR_FUSE_TARGET_QUOT_BITS_MASK ((1<<CPR_FUSE_TARGET_QUOT_BITS)-1)
+#define CPR_FUSE_RO_SEL_BITS 3
+#define CPR_FUSE_RO_SEL_BITS_MASK ((1<<CPR_FUSE_RO_SEL_BITS)-1)
+
+#define CPR_FUSE_MIN_QUOT_DIFF 100
+
+#define BYTES_PER_FUSE_ROW 8
+
+#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
+#define FLAGS_SET_MIN_VOLTAGE BIT(1)
+#define FLAGS_UPLIFT_QUOT_VOLT BIT(2)
+
+struct quot_adjust_info {
+ int speed_bin;
+ int virtual_corner;
+ int quot_adjust;
+};
+
+static const char * const vdd_apc_name[] = {/* "vdd-apc-optional-prim", */
+ /* "vdd-apc-optional-sec" , */
+ "vdd-apc"};
+
+enum voltage_change_dir {
+ NO_CHANGE,
+ DOWN,
+ UP,
+};
+
+struct cpr_regulator {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ bool vreg_enabled;
+ int corner;
+ int ceiling_max;
+
+ /* eFuse parameters */
+ phys_addr_t efuse_addr;
+ void __iomem *efuse_base;
+
+ /* Process voltage parameters */
+ u32 pvs_corner_v[CPR_FUSE_CORNER_MAX];
+ /* Process voltage variables */
+ u32 pvs_bin;
+ u32 speed_bin;
+ u32 pvs_version;
+
+ /* APC voltage regulator */
+ struct regulator *vdd_apc;
+
+ /* Dependency parameters */
+ struct regulator *vdd_mx;
+ int vdd_mx_vmax;
+ int vdd_mx_vmin_method;
+ int vdd_mx_vmin;
+ int vdd_mx_corner_map[CPR_FUSE_CORNER_MAX];
+
+ /* mem-acc regulator */
+ struct regulator *mem_acc_vreg;
+
+ /* CPR parameters */
+ u64 cpr_fuse_bits;
+ bool cpr_fuse_disable;
+ bool cpr_fuse_local;
+ int cpr_fuse_target_quot[CPR_FUSE_CORNER_MAX];
+ int cpr_fuse_ro_sel[CPR_FUSE_CORNER_MAX];
+ int gcnt;
+
+ unsigned int cpr_irq;
+ void __iomem *rbcpr_base;
+ phys_addr_t rbcpr_clk_addr;
+ struct mutex cpr_mutex;
+
+ int ceiling_volt[CPR_FUSE_CORNER_MAX];
+ int floor_volt[CPR_FUSE_CORNER_MAX];
+ int *last_volt;
+ int step_volt;
+
+ int *save_ctl;
+ int *save_irq;
+
+ /* Config parameters */
+ bool enable;
+ u32 ref_clk_khz;
+ u32 timer_delay_us;
+ u32 timer_cons_up;
+ u32 timer_cons_down;
+ u32 irq_line;
+ u32 step_quotient;
+ u32 up_threshold;
+ u32 down_threshold;
+ u32 idle_clocks;
+ u32 gcnt_time_us;
+ u32 vdd_apc_step_up_limit;
+ u32 vdd_apc_step_down_limit;
+ u32 flags;
+ int *corner_map;
+ u32 num_corners;
+ int *quot_adjust;
+
+ bool is_cpr_suspended;
+};
+
+#define CPR_DEBUG_MASK_IRQ BIT(0)
+#define CPR_DEBUG_MASK_API BIT(1)
+
+static int cpr_debug_enable = CPR_DEBUG_MASK_IRQ;
+static int cpr_enable;
+static struct cpr_regulator *the_cpr;
+#if defined(CONFIG_DEBUG_FS)
+static struct dentry *cpr_debugfs_entry;
+#endif
+
+module_param_named(debug_enable, cpr_debug_enable, int, S_IRUGO | S_IWUSR);
+#define cpr_debug(message, ...) \
+ do { \
+ if (cpr_debug_enable & CPR_DEBUG_MASK_API) \
+ pr_info(message, ##__VA_ARGS__); \
+ } while (0)
+#define cpr_debug_irq(message, ...) \
+ do { \
+ if (cpr_debug_enable & CPR_DEBUG_MASK_IRQ) \
+ pr_info(message, ##__VA_ARGS__); \
+ else \
+ pr_debug(message, ##__VA_ARGS__); \
+ } while (0)
+
+
+static u64 cpr_read_efuse_row(struct cpr_regulator *cpr_vreg, u32 row_num,
+ bool use_tz_api)
+{
+ int rc;
+ u64 efuse_bits;
+ struct cpr_read_req {
+ u32 row_address;
+ int addr_type;
+ } req;
+
+ struct cpr_read_rsp {
+ u32 row_data[2];
+ u32 status;
+ } rsp;
+
+ if (!use_tz_api) {
+ efuse_bits = readq_relaxed(cpr_vreg->efuse_base
+ + row_num * BYTES_PER_FUSE_ROW);
+ return efuse_bits;
+ }
+
+ req.row_address = cpr_vreg->efuse_addr + row_num * BYTES_PER_FUSE_ROW;
+ req.addr_type = 0;
+ efuse_bits = 0;
+
+ rc = scm_call(SCM_SVC_FUSE, SCM_FUSE_READ,
+ &req, sizeof(req), &rsp, sizeof(rsp));
+
+ if (rc) {
+ pr_err("read row %d failed, err code = %d", row_num, rc);
+ } else {
+ efuse_bits = ((u64)(rsp.row_data[1]) << 32) +
+ (u64)rsp.row_data[0];
+ }
+
+ return efuse_bits;
+}
+
+/**
+ * cpr_read_efuse_param() - read a parameter from one or two eFuse rows
+ * @cpr_vreg: Pointer to cpr_regulator struct for this regulator.
+ * @row_start: Fuse row number to start reading from.
+ * @bit_start: The LSB of the parameter to read from the fuse.
+ * @bit_len: The length of the parameter in bits.
+ * @use_tz_api: Flag to indicate if an SCM call should be used to read the fuse.
+ *
+ * This function reads a parameter of specified offset and bit size out of one
+ * or two consecutive eFuse rows. This allows for the reading of parameters
+ * that happen to be split between two eFuse rows.
+ *
+ * Returns the fuse parameter on success or 0 on failure.
+ */
+static u64 cpr_read_efuse_param(struct cpr_regulator *cpr_vreg, int row_start,
+ int bit_start, int bit_len, bool use_tz_api)
+{
+ u64 fuse[2];
+ u64 param = 0;
+ int bits_first, bits_second;
+
+ if (bit_start < 0) {
+ pr_err("Invalid LSB = %d specified\n", bit_start);
+ return 0;
+ }
+
+ if (bit_len < 0 || bit_len > 64) {
+ pr_err("Invalid bit length = %d specified\n", bit_len);
+ return 0;
+ }
+
+ /* Allow bit indexing to start beyond the end of the start row. */
+ if (bit_start >= 64) {
+ row_start += bit_start >> 6; /* equivalent to bit_start / 64 */
+ bit_start &= 0x3F;
+ }
+
+ fuse[0] = cpr_read_efuse_row(cpr_vreg, row_start, use_tz_api);
+
+ if (bit_start == 0 && bit_len == 64) {
+ param = fuse[0];
+ } else if (bit_start + bit_len <= 64) {
+ param = (fuse[0] >> bit_start) & ((1 << bit_len) - 1);
+ } else {
+ fuse[1] = cpr_read_efuse_row(cpr_vreg, row_start + 1,
+ use_tz_api);
+ bits_first = 64 - bit_start;
+ bits_second = bit_len - bits_first;
+ param = (fuse[0] >> bit_start) & ((1 << bits_first) - 1);
+ param |= (fuse[1] & ((1 << bits_second) - 1)) << bits_first;
+ }
+
+ return param;
+}
+
+static bool cpr_is_allowed(struct cpr_regulator *cpr_vreg)
+{
+ if (cpr_vreg->cpr_fuse_disable || !cpr_enable)
+ return false;
+ else
+ return true;
+}
+
+static void cpr_write(struct cpr_regulator *cpr_vreg, u32 offset, u32 value)
+{
+ writel_relaxed(value, cpr_vreg->rbcpr_base + offset);
+}
+
+static u32 cpr_read(struct cpr_regulator *cpr_vreg, u32 offset)
+{
+ return readl_relaxed(cpr_vreg->rbcpr_base + offset);
+}
+
+static void cpr_masked_write(struct cpr_regulator *cpr_vreg, u32 offset,
+ u32 mask, u32 value)
+{
+ u32 reg_val;
+
+ reg_val = readl_relaxed(cpr_vreg->rbcpr_base + offset);
+ reg_val &= ~mask;
+ reg_val |= value & mask;
+ writel_relaxed(reg_val, cpr_vreg->rbcpr_base + offset);
+}
+
+static void cpr_irq_clr(struct cpr_regulator *cpr_vreg)
+{
+ cpr_write(cpr_vreg, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr_regulator *cpr_vreg)
+{
+ cpr_irq_clr(cpr_vreg);
+ cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr_regulator *cpr_vreg)
+{
+ cpr_irq_clr(cpr_vreg);
+ cpr_write(cpr_vreg, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr_regulator *cpr_vreg, u32 int_bits)
+{
+ cpr_write(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line), int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr_regulator *cpr_vreg, u32 mask, u32 value)
+{
+ cpr_masked_write(cpr_vreg, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr_regulator *cpr_vreg, int corner)
+{
+ u32 val;
+ int fuse_corner = cpr_vreg->corner_map[corner];
+
+ if (cpr_vreg->is_cpr_suspended)
+ return;
+
+ /* Program Consecutive Up & Down */
+ val = ((cpr_vreg->timer_cons_down & RBIF_TIMER_ADJ_CONS_DOWN_MASK)
+ << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT) |
+ (cpr_vreg->timer_cons_up & RBIF_TIMER_ADJ_CONS_UP_MASK);
+ cpr_masked_write(cpr_vreg, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, val);
+ cpr_masked_write(cpr_vreg, REG_RBCPR_CTL,
+ RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+ cpr_vreg->save_ctl[corner]);
+ cpr_irq_set(cpr_vreg, cpr_vreg->save_irq[corner]);
+
+ if (cpr_is_allowed(cpr_vreg) &&
+ (cpr_vreg->ceiling_volt[fuse_corner] >
+ cpr_vreg->floor_volt[fuse_corner]))
+ val = RBCPR_CTL_LOOP_EN;
+ else
+ val = 0;
+ cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr_regulator *cpr_vreg)
+{
+ if (cpr_vreg->is_cpr_suspended)
+ return;
+
+ cpr_irq_set(cpr_vreg, 0);
+ cpr_ctl_modify(cpr_vreg, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+ cpr_masked_write(cpr_vreg, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+ cpr_irq_clr(cpr_vreg);
+ cpr_write(cpr_vreg, REG_RBIF_CONT_ACK_CMD, 1);
+ cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
+ cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr_regulator *cpr_vreg)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+ return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr_regulator *cpr_vreg)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
+ return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr_regulator *cpr_vreg, int corner)
+{
+ cpr_vreg->save_ctl[corner] = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+ cpr_vreg->save_irq[corner] =
+ cpr_read(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line));
+}
+
+static void cpr_corner_restore(struct cpr_regulator *cpr_vreg, int corner)
+{
+ u32 gcnt, ctl, irq, ro_sel;
+ int fuse_corner = cpr_vreg->corner_map[corner];
+
+ ro_sel = cpr_vreg->cpr_fuse_ro_sel[fuse_corner];
+ gcnt = cpr_vreg->gcnt | (cpr_vreg->cpr_fuse_target_quot[fuse_corner] -
+ cpr_vreg->quot_adjust[corner]);
+
+ cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
+ ctl = cpr_vreg->save_ctl[corner];
+ cpr_write(cpr_vreg, REG_RBCPR_CTL, ctl);
+ irq = cpr_vreg->save_irq[corner];
+ cpr_irq_set(cpr_vreg, irq);
+ cpr_debug("gcnt = 0x%08x, ctl = 0x%08x, irq = 0x%08x\n",
+ gcnt, ctl, irq);
+}
+
+static void cpr_corner_switch(struct cpr_regulator *cpr_vreg, int corner)
+{
+ if (cpr_vreg->corner == corner)
+ return;
+
+ cpr_corner_restore(cpr_vreg, corner);
+}
+
+/* Module parameter ops */
+static int cpr_enable_param_set(const char *val, const struct kernel_param *kp)
+{
+ int rc;
+ int old_cpr_enable;
+
+ if (!the_cpr) {
+ pr_err("the_cpr = NULL\n");
+ return -ENXIO;
+ }
+
+ mutex_lock(&the_cpr->cpr_mutex);
+
+ old_cpr_enable = cpr_enable;
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_err("param_set_int: rc = %d\n", rc);
+ goto _exit;
+ }
+
+ cpr_debug("%d -> %d [corner=%d, fuse_corner=%d]\n",
+ old_cpr_enable, cpr_enable, the_cpr->corner,
+ the_cpr->corner_map[the_cpr->corner]);
+
+ if (the_cpr->cpr_fuse_disable) {
+ /* Already disabled */
+ pr_info("CPR disabled by fuse\n");
+ goto _exit;
+ }
+
+ if ((old_cpr_enable != cpr_enable) && the_cpr->corner) {
+ if (cpr_enable) {
+ cpr_ctl_disable(the_cpr);
+ cpr_irq_clr(the_cpr);
+ cpr_corner_restore(the_cpr, the_cpr->corner);
+ cpr_ctl_enable(the_cpr, the_cpr->corner);
+ } else {
+ cpr_ctl_disable(the_cpr);
+ cpr_irq_set(the_cpr, 0);
+ }
+ }
+
+_exit:
+ mutex_unlock(&the_cpr->cpr_mutex);
+ return 0;
+}
+
+static struct kernel_param_ops cpr_enable_ops = {
+ .set = cpr_enable_param_set,
+ .get = param_get_int,
+};
+
+module_param_cb(cpr_enable, &cpr_enable_ops, &cpr_enable, S_IRUGO | S_IWUSR);
+
+static int cpr_apc_set(struct cpr_regulator *cpr_vreg, u32 new_volt)
+{
+ int max_volt, rc;
+
+ max_volt = cpr_vreg->ceiling_max;
+ rc = regulator_set_voltage(cpr_vreg->vdd_apc, new_volt, max_volt);
+ if (rc)
+ pr_err("set: vdd_apc = %d uV: max= %duV rc=%d\n", new_volt, max_volt, rc);
+ return rc;
+}
+
+static int cpr_mx_get(struct cpr_regulator *cpr_vreg, int corner, int apc_volt)
+{
+ int vdd_mx;
+ int fuse_corner = cpr_vreg->corner_map[corner];
+
+ switch (cpr_vreg->vdd_mx_vmin_method) {
+ case VDD_MX_VMIN_APC:
+ vdd_mx = apc_volt;
+ break;
+ case VDD_MX_VMIN_APC_CORNER_CEILING:
+ vdd_mx = cpr_vreg->ceiling_volt[fuse_corner];
+ break;
+ case VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
+ vdd_mx = cpr_vreg->ceiling_volt[CPR_FUSE_CORNER_TURBO];
+ break;
+ case VDD_MX_VMIN_MX_VMAX:
+ vdd_mx = cpr_vreg->vdd_mx_vmax;
+ break;
+ case VDD_MX_VMIN_APC_CORNER_MAP:
+ vdd_mx = cpr_vreg->vdd_mx_corner_map[fuse_corner];
+ break;
+ default:
+ vdd_mx = 0;
+ break;
+ }
+
+ return vdd_mx;
+}
+
+static int cpr_mx_set(struct cpr_regulator *cpr_vreg, int corner,
+ int vdd_mx_vmin)
+{
+ int rc;
+ int fuse_corner = cpr_vreg->corner_map[corner];
+
+ rc = regulator_set_voltage(cpr_vreg->vdd_mx, vdd_mx_vmin,
+ cpr_vreg->vdd_mx_vmax);
+ cpr_debug("[corner:%d, fuse_corner:%d] %d uV\n", corner,
+ fuse_corner, vdd_mx_vmin);
+
+ if (!rc) {
+ cpr_vreg->vdd_mx_vmin = vdd_mx_vmin;
+ } else {
+ pr_err("set: vdd_mx [corner:%d, fuse_corner:%d] = %d uV failed: rc=%d\n",
+ corner, fuse_corner, vdd_mx_vmin, rc);
+ }
+ return rc;
+}
+
+static int cpr_scale_voltage(struct cpr_regulator *cpr_vreg, int corner,
+ int new_apc_volt, enum voltage_change_dir dir)
+{
+ int rc = 0, vdd_mx_vmin = 0;
+ int fuse_corner = cpr_vreg->corner_map[corner];
+
+ /* Determine the vdd_mx voltage */
+ if (dir != NO_CHANGE && cpr_vreg->vdd_mx != NULL)
+ vdd_mx_vmin = cpr_mx_get(cpr_vreg, corner, new_apc_volt);
+
+ if (cpr_vreg->mem_acc_vreg && dir == DOWN) {
+ rc = regulator_set_voltage(cpr_vreg->mem_acc_vreg,
+ fuse_corner, fuse_corner);
+ if (rc)
+ pr_err("%s: 1\n", __func__);
+ }
+
+ if (vdd_mx_vmin && dir == UP) {
+ if (vdd_mx_vmin != cpr_vreg->vdd_mx_vmin)
+ rc = cpr_mx_set(cpr_vreg, corner, vdd_mx_vmin);
+ if (rc)
+ pr_err("%s: 2\n", __func__);
+ }
+
+ if (!rc)
+ rc = cpr_apc_set(cpr_vreg, new_apc_volt);
+
+ if (rc)
+ pr_err("%s: 3\n", __func__);
+
+
+ if (!rc && cpr_vreg->mem_acc_vreg && dir == UP)
+ rc = regulator_set_voltage(cpr_vreg->mem_acc_vreg,
+ fuse_corner, fuse_corner);
+ if (rc)
+ pr_err("%s: 4\n", __func__);
+
+ if (!rc && vdd_mx_vmin && dir == DOWN) {
+ if (vdd_mx_vmin != cpr_vreg->vdd_mx_vmin)
+ rc = cpr_mx_set(cpr_vreg, corner, vdd_mx_vmin);
+ }
+
+ return rc;
+}
+
+static void cpr_scale(struct cpr_regulator *cpr_vreg,
+ enum voltage_change_dir dir)
+{
+ u32 reg_val, error_steps, reg_mask;
+ int last_volt, new_volt, corner, fuse_corner;
+ u32 gcnt, quot;
+
+ corner = cpr_vreg->corner;
+ fuse_corner = cpr_vreg->corner_map[corner];
+
+ reg_val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
+
+ error_steps = (reg_val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+ & RBCPR_RESULT0_ERROR_STEPS_MASK;
+ last_volt = cpr_vreg->last_volt[corner];
+
+ cpr_debug_irq("last_volt[corner:%d, fuse_corner:%d] = %d uV\n", corner,
+ fuse_corner, last_volt);
+
+ gcnt = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET
+ (cpr_vreg->cpr_fuse_ro_sel[fuse_corner]));
+ quot = gcnt & ((1 << RBCPR_GCNT_TARGET_GCNT_SHIFT) - 1);
+
+ if (dir == UP) {
+ cpr_debug_irq("Up: cpr status = 0x%08x (error_steps=%d)\n",
+ reg_val, error_steps);
+
+ if (last_volt >= cpr_vreg->ceiling_volt[fuse_corner]) {
+ cpr_debug_irq(
+ "[corn:%d, fuse_corn:%d] @ ceiling: %d >= %d: NACK\n",
+ corner, fuse_corner, last_volt,
+ cpr_vreg->ceiling_volt[fuse_corner]);
+ cpr_irq_clr_nack(cpr_vreg);
+
+ cpr_debug_irq("gcnt = 0x%08x (quot = %d)\n", gcnt,
+ quot);
+
+ /* Maximize the UP threshold */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK <<
+ RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ reg_val = reg_mask;
+ cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+ /* Disable UP interrupt */
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+ return;
+ }
+
+ if (error_steps > cpr_vreg->vdd_apc_step_up_limit) {
+ cpr_debug_irq("%d is over up-limit(%d): Clamp\n",
+ error_steps,
+ cpr_vreg->vdd_apc_step_up_limit);
+ error_steps = cpr_vreg->vdd_apc_step_up_limit;
+ }
+
+ /* Calculate new voltage */
+ new_volt = last_volt + (error_steps * cpr_vreg->step_volt);
+ if (new_volt > cpr_vreg->ceiling_volt[fuse_corner]) {
+ cpr_debug_irq("new_volt(%d) >= ceiling(%d): Clamp\n",
+ new_volt,
+ cpr_vreg->ceiling_volt[fuse_corner]);
+
+ new_volt = cpr_vreg->ceiling_volt[fuse_corner];
+ }
+
+ if (cpr_scale_voltage(cpr_vreg, corner, new_volt, dir)) {
+ cpr_irq_clr_nack(cpr_vreg);
+ return;
+ }
+ cpr_vreg->last_volt[corner] = new_volt;
+
+ /* Disable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ reg_val = 0;
+
+ cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(cpr_vreg);
+
+ cpr_debug_irq(
+ "UP: -> new_volt[corner:%d, fuse_corner:%d] = %d uV\n",
+ corner, fuse_corner, new_volt);
+ } else if (dir == DOWN) {
+ cpr_debug_irq("Down: cpr status = 0x%08x (error_steps=%d)\n",
+ reg_val, error_steps);
+
+ if (last_volt <= cpr_vreg->floor_volt[fuse_corner]) {
+ cpr_debug_irq(
+ "[corn:%d, fuse_corner:%d] @ floor: %d <= %d: NACK\n",
+ corner, fuse_corner, last_volt,
+ cpr_vreg->floor_volt[fuse_corner]);
+ cpr_irq_clr_nack(cpr_vreg);
+
+ cpr_debug_irq("gcnt = 0x%08x (quot = %d)\n", gcnt,
+ quot);
+
+ /* Enable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ reg_val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+ cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+ /* Disable DOWN interrupt */
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+ return;
+ }
+
+ if (error_steps > cpr_vreg->vdd_apc_step_down_limit) {
+ cpr_debug_irq("%d is over down-limit(%d): Clamp\n",
+ error_steps,
+ cpr_vreg->vdd_apc_step_down_limit);
+ error_steps = cpr_vreg->vdd_apc_step_down_limit;
+ }
+
+ /* Calculte new voltage */
+ new_volt = last_volt - (error_steps * cpr_vreg->step_volt);
+ if (new_volt < cpr_vreg->floor_volt[fuse_corner]) {
+ cpr_debug_irq("new_volt(%d) < floor(%d): Clamp\n",
+ new_volt,
+ cpr_vreg->floor_volt[fuse_corner]);
+ new_volt = cpr_vreg->floor_volt[fuse_corner];
+ }
+
+ if (cpr_scale_voltage(cpr_vreg, corner, new_volt, dir)) {
+ cpr_irq_clr_nack(cpr_vreg);
+ return;
+ }
+ cpr_vreg->last_volt[corner] = new_volt;
+
+ /* Restore default threshold for UP */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK <<
+ RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ reg_val = cpr_vreg->up_threshold <<
+ RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(cpr_vreg);
+
+ cpr_debug_irq(
+ "DOWN: -> new_volt[corner:%d, fuse_corner:%d] = %d uV\n",
+ corner, fuse_corner, new_volt);
+ }
+}
+
+static irqreturn_t cpr_irq_handler(int irq, void *dev)
+{
+ struct cpr_regulator *cpr_vreg = dev;
+ u32 reg_val;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ reg_val = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
+ if (cpr_vreg->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+ reg_val = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
+
+ cpr_debug_irq("IRQ_STATUS = 0x%02X\n", reg_val);
+
+ if (!cpr_ctl_is_enabled(cpr_vreg)) {
+ cpr_debug_irq("CPR is disabled\n");
+ goto _exit;
+ } else if (cpr_ctl_is_busy(cpr_vreg)) {
+ cpr_debug_irq("CPR measurement is not ready\n");
+ goto _exit;
+ } else if (!cpr_is_allowed(cpr_vreg)) {
+ reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+ pr_err("Interrupt broken? RBCPR_CTL = 0x%02X\n", reg_val);
+ goto _exit;
+ }
+
+ /* Following sequence of handling is as per each IRQ's priority */
+ if (reg_val & CPR_INT_UP) {
+ cpr_scale(cpr_vreg, UP);
+ } else if (reg_val & CPR_INT_DOWN) {
+ cpr_scale(cpr_vreg, DOWN);
+ } else if (reg_val & CPR_INT_MIN) {
+ cpr_irq_clr_nack(cpr_vreg);
+ } else if (reg_val & CPR_INT_MAX) {
+ cpr_irq_clr_nack(cpr_vreg);
+ } else if (reg_val & CPR_INT_MID) {
+ /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+ cpr_debug_irq("IRQ occured for Mid Flag\n");
+ } else {
+ pr_err("IRQ occured for unknown flag (0x%08x)\n", reg_val);
+ }
+
+ /* Save register values for the corner */
+ cpr_corner_save(cpr_vreg, cpr_vreg->corner);
+
+_exit:
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+ return IRQ_HANDLED;
+}
+
+static int cpr_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+
+ return cpr_vreg->vreg_enabled;
+}
+
+static int cpr_regulator_enable(struct regulator_dev *rdev)
+{
+ struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+ int rc = 0;
+
+ /* Enable dependency power before vdd_apc */
+ if (cpr_vreg->vdd_mx) {
+ rc = regulator_enable(cpr_vreg->vdd_mx);
+ if (rc) {
+ pr_err("regulator_enable: vdd_mx: rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ rc = regulator_enable(cpr_vreg->vdd_apc);
+ if (rc) {
+ pr_err("regulator_enable: vdd_apc: rc=%d\n", rc);
+ return rc;
+ }
+
+ cpr_vreg->vreg_enabled = true;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+ if (cpr_is_allowed(cpr_vreg) && cpr_vreg->corner) {
+ cpr_irq_clr(cpr_vreg);
+ cpr_corner_switch(cpr_vreg, cpr_vreg->corner);
+ cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+ }
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+
+ return rc;
+}
+
+static int cpr_regulator_disable(struct regulator_dev *rdev)
+{
+ struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = regulator_disable(cpr_vreg->vdd_apc);
+ if (!rc) {
+ if (cpr_vreg->vdd_mx)
+ rc = regulator_disable(cpr_vreg->vdd_mx);
+
+ if (rc) {
+ pr_err("regulator_disable: vdd_mx: rc=%d\n", rc);
+ return rc;
+ }
+
+ cpr_vreg->vreg_enabled = false;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+ if (cpr_is_allowed(cpr_vreg))
+ cpr_ctl_disable(cpr_vreg);
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+ } else {
+ pr_err("regulator_disable: vdd_apc: rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
+static int cpr_regulator_set_voltage(struct regulator_dev *rdev,
+ int corner, int corner_max, unsigned *selector)
+{
+ struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+ int rc;
+ int new_volt;
+ enum voltage_change_dir change_dir = NO_CHANGE;
+ int fuse_corner = cpr_vreg->corner_map[corner];
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ if (cpr_is_allowed(cpr_vreg)) {
+ cpr_ctl_disable(cpr_vreg);
+ new_volt = cpr_vreg->last_volt[corner];
+ } else {
+ new_volt = cpr_vreg->pvs_corner_v[fuse_corner];
+ }
+
+ pr_info("[corner:%d, fuse_corner:%d] = %d uV\n", corner, fuse_corner,
+ new_volt);
+
+ if (corner > cpr_vreg->corner)
+ change_dir = UP;
+ else if (corner < cpr_vreg->corner)
+ change_dir = DOWN;
+
+ rc = cpr_scale_voltage(cpr_vreg, corner, new_volt, change_dir);
+ if (rc) {
+ pr_err("%s: cpr_scale_voltage told %d\n", __func__, rc);
+ goto _exit;
+ }
+
+ if (cpr_is_allowed(cpr_vreg) && cpr_vreg->vreg_enabled) {
+ cpr_irq_clr(cpr_vreg);
+ cpr_corner_switch(cpr_vreg, corner);
+ cpr_ctl_enable(cpr_vreg, corner);
+ }
+
+ cpr_vreg->corner = corner;
+
+_exit:
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+
+ return rc;
+}
+
+static int cpr_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+
+ return cpr_vreg->corner;
+}
+
+static struct regulator_ops cpr_corner_ops = {
+ .enable = cpr_regulator_enable,
+ .disable = cpr_regulator_disable,
+ .is_enabled = cpr_regulator_is_enabled,
+ .set_voltage = cpr_regulator_set_voltage,
+ .get_voltage = cpr_regulator_get_voltage,
+};
+
+#ifdef CONFIG_PM
+static int cpr_suspend(struct cpr_regulator *cpr_vreg)
+{
+ cpr_debug("suspend\n");
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ cpr_ctl_disable(cpr_vreg);
+
+ cpr_irq_clr(cpr_vreg);
+
+ cpr_vreg->is_cpr_suspended = true;
+
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+ return 0;
+}
+
+static int cpr_resume(struct cpr_regulator *cpr_vreg)
+
+{
+ cpr_debug("resume\n");
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ cpr_vreg->is_cpr_suspended = false;
+ cpr_irq_clr(cpr_vreg);
+
+ cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+ return 0;
+}
+
+static int cpr_regulator_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct cpr_regulator *cpr_vreg = platform_get_drvdata(pdev);
+
+ if (cpr_is_allowed(cpr_vreg))
+ return cpr_suspend(cpr_vreg);
+ else
+ return 0;
+}
+
+static int cpr_regulator_resume(struct platform_device *pdev)
+{
+ struct cpr_regulator *cpr_vreg = platform_get_drvdata(pdev);
+
+ if (cpr_is_allowed(cpr_vreg))
+ return cpr_resume(cpr_vreg);
+ else
+ return 0;
+}
+#else
+#define cpr_regulator_suspend NULL
+#define cpr_regulator_resume NULL
+#endif
+
+static int cpr_config(struct cpr_regulator *cpr_vreg, struct device *dev)
+{
+ int i;
+ u32 val, gcnt, reg;
+ void __iomem *rbcpr_clk;
+ int size;
+
+ /* Use 19.2 MHz clock for CPR. */
+ rbcpr_clk = ioremap(cpr_vreg->rbcpr_clk_addr, 4);
+ if (!rbcpr_clk) {
+ pr_err("Unable to map rbcpr_clk\n");
+ return -EINVAL;
+ }
+ reg = readl_relaxed(rbcpr_clk);
+ reg &= ~RBCPR_CLK_SEL_MASK;
+ reg |= RBCPR_CLK_SEL_19P2_MHZ & RBCPR_CLK_SEL_MASK;
+ writel_relaxed(reg, rbcpr_clk);
+ iounmap(rbcpr_clk);
+
+ /* Disable interrupt and CPR */
+ cpr_write(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line), 0);
+ cpr_write(cpr_vreg, REG_RBCPR_CTL, 0);
+
+ /* Program the default HW Ceiling, Floor and vlevel */
+ val = ((RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
+ << RBIF_LIMIT_CEILING_SHIFT)
+ | (RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK);
+ cpr_write(cpr_vreg, REG_RBIF_LIMIT, val);
+ cpr_write(cpr_vreg, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ /* Init and save gcnt */
+ gcnt = (cpr_vreg->ref_clk_khz * cpr_vreg->gcnt_time_us) / 1000;
+ gcnt = (gcnt & RBCPR_GCNT_TARGET_GCNT_MASK) <<
+ RBCPR_GCNT_TARGET_GCNT_SHIFT;
+ cpr_vreg->gcnt = gcnt;
+
+ /* Program the step quotient and idle clocks */
+ val = ((cpr_vreg->idle_clocks & RBCPR_STEP_QUOT_IDLE_CLK_MASK)
+ << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT) |
+ (cpr_vreg->step_quotient & RBCPR_STEP_QUOT_STEPQUOT_MASK);
+ cpr_write(cpr_vreg, REG_RBCPR_STEP_QUOT, val);
+
+ /* Program the delay count for the timer */
+ val = (cpr_vreg->ref_clk_khz * cpr_vreg->timer_delay_us) / 1000;
+ cpr_write(cpr_vreg, REG_RBCPR_TIMER_INTERVAL, val);
+ pr_info("Timer count: 0x%0x (for %d us)\n", val,
+ cpr_vreg->timer_delay_us);
+
+ /* Program Consecutive Up & Down */
+ val = ((cpr_vreg->timer_cons_down & RBIF_TIMER_ADJ_CONS_DOWN_MASK)
+ << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT) |
+ (cpr_vreg->timer_cons_up & RBIF_TIMER_ADJ_CONS_UP_MASK);
+ cpr_write(cpr_vreg, REG_RBIF_TIMER_ADJUST, val);
+
+ /* Program the control register */
+ cpr_vreg->up_threshold &= RBCPR_CTL_UP_THRESHOLD_MASK;
+ cpr_vreg->down_threshold &= RBCPR_CTL_DN_THRESHOLD_MASK;
+ val = (cpr_vreg->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT)
+ | (cpr_vreg->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT);
+ val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+ val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+ cpr_write(cpr_vreg, REG_RBCPR_CTL, val);
+
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+ val = cpr_read(cpr_vreg, REG_RBCPR_VERSION);
+ if (val <= RBCPR_VER_2)
+ cpr_vreg->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+ size = cpr_vreg->num_corners + 1;
+ cpr_vreg->save_ctl = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
+ cpr_vreg->save_irq = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
+ if (!cpr_vreg->save_ctl || !cpr_vreg->save_irq)
+ return -ENOMEM;
+
+ for (i = 1; i < size; i++)
+ cpr_corner_save(cpr_vreg, i);
+
+ return 0;
+}
+
+static int cpr_fuse_is_setting_expected(struct cpr_regulator *cpr_vreg,
+ u32 sel_array[5])
+{
+ u64 fuse_bits;
+ u32 ret;
+
+ fuse_bits = cpr_read_efuse_row(cpr_vreg, sel_array[0], sel_array[4]);
+ ret = (fuse_bits >> sel_array[1]) & ((1 << sel_array[2]) - 1);
+ if (ret == sel_array[3])
+ ret = 1;
+ else
+ ret = 0;
+
+ pr_info("[row:%d] = 0x%llx @%d:%d == %d ?: %s\n",
+ sel_array[0], fuse_bits,
+ sel_array[1], sel_array[2],
+ sel_array[3],
+ (ret == 1) ? "yes" : "no");
+ return ret;
+}
+
+static int cpr_voltage_uplift_wa_inc_volt(struct cpr_regulator *cpr_vreg,
+ struct device_node *of_node)
+{
+ u32 uplift_voltage;
+ u32 uplift_max_volt = 0;
+ int rc;
+
+ rc = of_property_read_u32(of_node,
+ "qcom,cpr-uplift-voltage", &uplift_voltage);
+ if (rc < 0) {
+ pr_err("cpr-uplift-voltage is missing, rc = %d", rc);
+ return rc;
+ }
+ rc = of_property_read_u32(of_node,
+ "qcom,cpr-uplift-max-volt", &uplift_max_volt);
+ if (rc < 0) {
+ pr_err("cpr-uplift-max-volt is missing, rc = %d", rc);
+ return rc;
+ }
+
+ cpr_vreg->pvs_corner_v[CPR_FUSE_CORNER_TURBO] += uplift_voltage;
+ if (cpr_vreg->pvs_corner_v[CPR_FUSE_CORNER_TURBO] > uplift_max_volt)
+ cpr_vreg->pvs_corner_v[CPR_FUSE_CORNER_TURBO] = uplift_max_volt;
+
+ return rc;
+}
+
+/*
+ * Property qcom,cpr-fuse-init-voltage specifies the fuse position of the
+ * initial voltage for each fuse corner. MSB of the fuse value is a sign
+ * bit, and the remaining bits define the steps of the offset. Each step has
+ * units of microvolts defined in the qcom,cpr-fuse-init-voltage-step property.
+ * The initial voltages can be calculated using the formula:
+ * pvs_corner_v[corner] = ceiling_volt[corner] + (sign * steps * step_size_uv)
+ */
+static int cpr_pvs_per_corner_init(struct device_node *of_node,
+ struct cpr_regulator *cpr_vreg)
+{
+ u64 efuse_bits;
+ int i, size, sign, steps, step_size_uv, rc;
+ u32 *fuse_sel, *tmp;
+ struct property *prop;
+
+ prop = of_find_property(of_node, "qcom,cpr-fuse-init-voltage", NULL);
+ if (!prop) {
+ pr_err("qcom,cpr-fuse-init-voltage is missing\n");
+ return -EINVAL;
+ }
+ size = prop->length / sizeof(u32);
+ if (size != (CPR_FUSE_CORNER_MAX - 1) * 4) {
+ pr_err("fuse position for init voltages is invalid\n");
+ return -EINVAL;
+ }
+ fuse_sel = kzalloc(sizeof(u32) * size, GFP_KERNEL);
+ if (!fuse_sel) {
+ pr_err("memory alloc failed.\n");
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-init-voltage",
+ fuse_sel, size);
+ if (rc < 0) {
+ pr_err("read cpr-fuse-init-voltage failed, rc = %d\n", rc);
+ kfree(fuse_sel);
+ return rc;
+ }
+ rc = of_property_read_u32(of_node, "qcom,cpr-init-voltage-step",
+ &step_size_uv);
+ if (rc < 0) {
+ pr_err("read cpr-init-voltage-step failed, rc = %d\n", rc);
+ kfree(fuse_sel);
+ return rc;
+ }
+ tmp = fuse_sel;
+ for (i = CPR_FUSE_CORNER_SVS; i < CPR_FUSE_CORNER_MAX; i++) {
+ efuse_bits = cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
+ fuse_sel[1], fuse_sel[2], fuse_sel[3]);
+ sign = (efuse_bits & (1 << (fuse_sel[2] - 1))) ? -1 : 1;
+ steps = efuse_bits & ((1 << (fuse_sel[2] - 1)) - 1);
+ pr_debug("corner %d: sign = %d, steps = %d\n", i, sign, steps);
+ cpr_vreg->pvs_corner_v[i] = cpr_vreg->ceiling_volt[i] +
+ sign * steps * step_size_uv;
+ cpr_vreg->pvs_corner_v[i] = DIV_ROUND_UP(
+ cpr_vreg->pvs_corner_v[i],
+ cpr_vreg->step_volt) *
+ cpr_vreg->step_volt;
+ if (cpr_vreg->pvs_corner_v[i] > cpr_vreg->ceiling_volt[i]) {
+ pr_info("Warning: initial voltage[%d] %d above ceiling %d\n",
+ i, cpr_vreg->pvs_corner_v[i],
+ cpr_vreg->ceiling_volt[i]);
+ cpr_vreg->pvs_corner_v[i] = cpr_vreg->ceiling_volt[i];
+ } else if (cpr_vreg->pvs_corner_v[i] <
+ cpr_vreg->floor_volt[i]) {
+ pr_info("Warning: initial voltage[%d] %d below floor %d\n",
+ i, cpr_vreg->pvs_corner_v[i],
+ cpr_vreg->floor_volt[i]);
+ cpr_vreg->pvs_corner_v[i] = cpr_vreg->floor_volt[i];
+ }
+ fuse_sel += 4;
+ }
+ kfree(tmp);
+
+ return 0;
+}
+
+/*
+ * A single PVS bin is stored in a fuse that's position is defined either
+ * in the qcom,pvs-fuse-redun property or in the qcom,pvs-fuse property.
+ * The fuse value defined in the qcom,pvs-fuse-redun-sel property is used
+ * to pick between the primary or redudant PVS fuse position.
+ * After the PVS bin value is read out successfully, it is used as the row
+ * index to get initial voltages for each fuse corner from the voltage table
+ * defined in the qcom,pvs-voltage-table property.
+ */
+static int cpr_pvs_single_bin_init(struct device_node *of_node,
+ struct cpr_regulator *cpr_vreg)
+{
+ u64 efuse_bits;
+ u32 pvs_fuse[4], pvs_fuse_redun_sel[5];
+ int rc, i, stripe_size;
+ bool redundant;
+ size_t pvs_bins;
+ u32 *tmp;
+
+ rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse-redun-sel",
+ pvs_fuse_redun_sel, 5);
+ if (rc < 0) {
+ pr_err("pvs-fuse-redun-sel missing: rc=%d\n", rc);
+ return rc;
+ }
+
+ redundant = cpr_fuse_is_setting_expected(cpr_vreg, pvs_fuse_redun_sel);
+ if (redundant) {
+ rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse-redun",
+ pvs_fuse, 4);
+ if (rc < 0) {
+ pr_err("pvs-fuse-redun missing: rc=%d\n", rc);
+ return rc;
+ }
+ } else {
+ rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse",
+ pvs_fuse, 4);
+ if (rc < 0) {
+ pr_err("pvs-fuse missing: rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Construct PVS process # from the efuse bits */
+ efuse_bits = cpr_read_efuse_row(cpr_vreg, pvs_fuse[0], pvs_fuse[3]);
+ cpr_vreg->pvs_bin = (efuse_bits >> pvs_fuse[1]) &
+ ((1 << pvs_fuse[2]) - 1);
+ pvs_bins = 1 << pvs_fuse[2];
+ stripe_size = CPR_FUSE_CORNER_MAX - 1;
+ tmp = kzalloc(sizeof(u32) * pvs_bins * stripe_size, GFP_KERNEL);
+ if (!tmp) {
+ pr_err("memory alloc failed\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,pvs-voltage-table",
+ tmp, pvs_bins * stripe_size);
+ if (rc < 0) {
+ pr_err("pvs-voltage-table missing: rc=%d\n", rc);
+ kfree(tmp);
+ return rc;
+ }
+
+ for (i = CPR_FUSE_CORNER_SVS; i < CPR_FUSE_CORNER_MAX; i++)
+ cpr_vreg->pvs_corner_v[i] = tmp[cpr_vreg->pvs_bin *
+ stripe_size + i - 1];
+ kfree(tmp);
+
+ return 0;
+}
+
+/*
+ * The initial voltage for each fuse corner may be determined by one of two
+ * possible styles of fuse. If qcom,cpr-fuse-init-voltage is present, then
+ * the initial voltages are encoded in a fuse for each fuse corner. If it is
+ * not present, then the initial voltages are all determined using a single
+ * PVS bin fuse value.
+ */
+static int cpr_pvs_init(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int i, rc;
+
+ rc = of_property_read_u32(of_node, "qcom,cpr-apc-volt-step",
+ &cpr_vreg->step_volt);
+ if (rc < 0) {
+ pr_err("read cpr-apc-volt-step failed, rc = %d\n", rc);
+ return rc;
+ } else if (cpr_vreg->step_volt == 0) {
+ pr_err("apc voltage step size can't be set to 0.\n");
+ return -EINVAL;
+ }
+
+ if (of_find_property(of_node, "qcom,cpr-fuse-init-voltage", NULL)) {
+ rc = cpr_pvs_per_corner_init(of_node, cpr_vreg);
+ if (rc < 0) {
+ pr_err("get pvs per corner failed, rc = %d", rc);
+ return rc;
+ }
+ } else {
+ rc = cpr_pvs_single_bin_init(of_node, cpr_vreg);
+ if (rc < 0) {
+ pr_err("get pvs from single bin failed, rc = %d", rc);
+ return rc;
+ }
+ }
+
+ if (cpr_vreg->flags & FLAGS_UPLIFT_QUOT_VOLT) {
+ rc = cpr_voltage_uplift_wa_inc_volt(cpr_vreg, of_node);
+ if (rc < 0) {
+ pr_err("pvs volt uplift wa apply failed: %d", rc);
+ return rc;
+ }
+ }
+
+ if (cpr_vreg->pvs_corner_v[CPR_FUSE_CORNER_TURBO] >
+ cpr_vreg->ceiling_volt[CPR_FUSE_CORNER_TURBO])
+ cpr_vreg->ceiling_volt[CPR_FUSE_CORNER_TURBO] =
+ cpr_vreg->pvs_corner_v[CPR_FUSE_CORNER_TURBO];
+
+ for (i = CPR_FUSE_CORNER_SVS; i < CPR_FUSE_CORNER_TURBO; i++)
+ if (cpr_vreg->pvs_corner_v[i] > cpr_vreg->ceiling_volt[i])
+ cpr_vreg->pvs_corner_v[i] = cpr_vreg->ceiling_volt[i];
+ else if (cpr_vreg->pvs_corner_v[i] < cpr_vreg->floor_volt[i])
+ cpr_vreg->pvs_corner_v[i] = cpr_vreg->floor_volt[i];
+
+ cpr_vreg->ceiling_max = cpr_vreg->ceiling_volt[CPR_FUSE_CORNER_TURBO];
+
+ pr_info("pvs voltage: [%d %d %d] uV\n",
+ cpr_vreg->pvs_corner_v[CPR_FUSE_CORNER_SVS],
+ cpr_vreg->pvs_corner_v[CPR_FUSE_CORNER_NORMAL],
+ cpr_vreg->pvs_corner_v[CPR_FUSE_CORNER_TURBO]);
+ pr_info("ceiling voltage: [%d %d %d] uV\n",
+ cpr_vreg->ceiling_volt[CPR_FUSE_CORNER_SVS],
+ cpr_vreg->ceiling_volt[CPR_FUSE_CORNER_NORMAL],
+ cpr_vreg->ceiling_volt[CPR_FUSE_CORNER_TURBO]);
+ pr_info("floor voltage: [%d %d %d] uV\n",
+ cpr_vreg->floor_volt[CPR_FUSE_CORNER_SVS],
+ cpr_vreg->floor_volt[CPR_FUSE_CORNER_NORMAL],
+ cpr_vreg->floor_volt[CPR_FUSE_CORNER_TURBO]);
+
+ return 0;
+}
+
+#define CPR_PROP_READ_U32(of_node, cpr_property, cpr_config, rc) \
+do { \
+ if (!rc) { \
+ rc = of_property_read_u32(of_node, \
+ "qcom," cpr_property, \
+ cpr_config); \
+ if (rc) { \
+ pr_err("Missing " #cpr_property \
+ ": rc = %d\n", rc); \
+ } \
+ } \
+} while (0)
+
+static int cpr_apc_init(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int i, rc = 0;
+
+ pr_err("%s -----------------\n", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(vdd_apc_name); i++) {
+ cpr_vreg->vdd_apc = devm_regulator_get(&pdev->dev,
+ vdd_apc_name[i]);
+ rc = PTR_RET(cpr_vreg->vdd_apc);
+ if (!IS_ERR_OR_NULL(cpr_vreg->vdd_apc))
+ break;
+ }
+
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ pr_err("devm_regulator_get: rc=%d\n", rc);
+ else
+ pr_err("%s: deferring\n", __func__);
+ return rc;
+ }
+
+ /* Check dependencies */
+ if (of_property_read_bool(of_node, "vdd-mx-supply")) {
+ cpr_vreg->vdd_mx = devm_regulator_get(&pdev->dev, "vdd-mx");
+ if (IS_ERR_OR_NULL(cpr_vreg->vdd_mx)) {
+ rc = PTR_RET(cpr_vreg->vdd_mx);
+ if (rc != -EPROBE_DEFER)
+ pr_err("devm_regulator_get: vdd-mx: rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* Parse dependency parameters */
+ if (cpr_vreg->vdd_mx) {
+ rc = of_property_read_u32(of_node, "qcom,vdd-mx-vmax",
+ &cpr_vreg->vdd_mx_vmax);
+ if (rc < 0) {
+ pr_err("vdd-mx-vmax missing: rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,vdd-mx-vmin-method",
+ &cpr_vreg->vdd_mx_vmin_method);
+ if (rc < 0) {
+ pr_err("vdd-mx-vmin-method missing: rc=%d\n", rc);
+ return rc;
+ }
+ if (cpr_vreg->vdd_mx_vmin_method > VDD_MX_VMIN_APC_CORNER_MAP) {
+ pr_err("Invalid vdd-mx-vmin-method(%d)\n",
+ cpr_vreg->vdd_mx_vmin_method);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,vdd-mx-corner-map",
+ &cpr_vreg->vdd_mx_corner_map[1],
+ CPR_FUSE_CORNER_MAX - 1);
+ if (rc && cpr_vreg->vdd_mx_vmin_method ==
+ VDD_MX_VMIN_APC_CORNER_MAP) {
+ pr_err("qcom,vdd-mx-corner-map missing: rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ }
+
+ return 0;
+}
+
+static void cpr_apc_exit(struct cpr_regulator *cpr_vreg)
+{
+ if (cpr_vreg->vreg_enabled) {
+ regulator_disable(cpr_vreg->vdd_apc);
+
+ if (cpr_vreg->vdd_mx)
+ regulator_disable(cpr_vreg->vdd_mx);
+ }
+}
+
+static int cpr_voltage_uplift_wa_inc_quot(struct cpr_regulator *cpr_vreg,
+ struct device_node *of_node)
+{
+ u32 delta_quot[3];
+ int rc, i;
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-uplift-quotient", delta_quot, 3);
+ if (rc < 0) {
+ pr_err("cpr-uplift-quotient is missing: %d", rc);
+ return rc;
+ }
+ for (i = CPR_FUSE_CORNER_SVS; i < CPR_FUSE_CORNER_MAX; i++)
+ cpr_vreg->cpr_fuse_target_quot[i] += delta_quot[i-1];
+ return rc;
+}
+
+static void cpr_parse_pvs_version_fuse(struct cpr_regulator *cpr_vreg,
+ struct device_node *of_node)
+{
+ int rc;
+ u64 fuse_bits;
+ u32 fuse_sel[4];
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,pvs-version-fuse-sel", fuse_sel, 4);
+ if (!rc) {
+ fuse_bits = cpr_read_efuse_row(cpr_vreg,
+ fuse_sel[0], fuse_sel[3]);
+ cpr_vreg->pvs_version = (fuse_bits >> fuse_sel[1]) &
+ ((1 << fuse_sel[2]) - 1);
+ pr_info("[row: %d]: 0x%llx, pvs_version = %d\n",
+ fuse_sel[0], fuse_bits, cpr_vreg->pvs_version);
+ } else {
+ cpr_vreg->pvs_version = 0;
+ }
+}
+
+/*
+ * cpr_get_corner_quot_adjustment() -- get the quot_adjust for each corner.
+ *
+ * Get the corner to fuse corner (SVS/NORMAL/TURBO) mappings and corner to
+ * APC clock frequency mappings from device tree.
+ * Calculate the quotient adjustment scaling factor for those corners mapping
+ * to the TURBO fuse corner.
+ * Calculate the quotient adjustment for each corner which map to the TURBO
+ * fuse corner.
+ */
+static int cpr_get_corner_quot_adjustment(struct cpr_regulator *cpr_vreg,
+ struct device *dev)
+{
+ int rc = 0;
+ int i, size;
+ struct property *prop;
+ bool corners_mapped;
+ u32 *tmp, *freq_mappings = NULL;
+ u32 scaling, max_factor;
+ u32 corner, turbo_corner = 0, normal_corner = 0, svs_corner = 0;
+ u32 freq_turbo, freq_normal, freq_corner;
+
+ prop = of_find_property(dev->of_node, "qcom,cpr-corner-map", NULL);
+
+ if (prop) {
+ size = prop->length / sizeof(u32);
+ corners_mapped = true;
+ } else {
+ size = CPR_FUSE_CORNER_MAX - 1;
+ corners_mapped = false;
+ }
+
+ cpr_vreg->corner_map = devm_kzalloc(dev, sizeof(int) * (size + 1),
+ GFP_KERNEL);
+ if (!cpr_vreg->corner_map) {
+ pr_err("Can't allocate memory for cpr_vreg->corner_map\n");
+ return -ENOMEM;
+ }
+ cpr_vreg->num_corners = size;
+
+ cpr_vreg->quot_adjust = devm_kzalloc(dev,
+ sizeof(u32) * (cpr_vreg->num_corners + 1),
+ GFP_KERNEL);
+ if (!cpr_vreg->quot_adjust) {
+ pr_err("Can't allocate memory for cpr_vreg->quot_adjust\n");
+ return -ENOMEM;
+ }
+
+ if (!corners_mapped) {
+ for (i = CPR_FUSE_CORNER_SVS; i < CPR_FUSE_CORNER_MAX; i++)
+ cpr_vreg->corner_map[i] = i;
+ return 0;
+ } else {
+ rc = of_property_read_u32_array(dev->of_node,
+ "qcom,cpr-corner-map", &cpr_vreg->corner_map[1], size);
+
+ if (rc) {
+ pr_err("qcom,cpr-corner-map missing, rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ prop = of_find_property(dev->of_node,
+ "qcom,cpr-speed-bin-max-corners", NULL);
+ if (!prop) {
+ cpr_debug("qcom,cpr-speed-bin-max-corner missing\n");
+ return 0;
+ }
+
+ size = prop->length / sizeof(u32);
+ tmp = kzalloc(size * sizeof(u32), GFP_KERNEL);
+ if (!tmp) {
+ pr_err("memory alloc failed\n");
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(dev->of_node,
+ "qcom,cpr-speed-bin-max-corners", tmp, size);
+ if (rc < 0) {
+ kfree(tmp);
+ pr_err("get cpr-speed-bin-max-corners failed, rc = %d\n", rc);
+ return rc;
+ }
+
+ cpr_parse_pvs_version_fuse(cpr_vreg, dev->of_node);
+
+ /*
+ * According to speed_bin && pvs_version, get the maximum
+ * corner corresponding to SVS/NORMAL/TURBO fuse corner.
+ */
+ for (i = 0; i < size; i += 5) {
+ if (tmp[i] == cpr_vreg->speed_bin &&
+ tmp[i + 1] == cpr_vreg->pvs_version) {
+ svs_corner = tmp[i + 2];
+ normal_corner = tmp[i + 3];
+ turbo_corner = tmp[i + 4];
+ break;
+ }
+ }
+ kfree(tmp);
+ /*
+ * Return success if the virtual corner values read from
+ * qcom,cpr-speed-bin-max-corners property are incorrect,
+ * which make sure the driver could continue run without
+ * error.
+ */
+ if (turbo_corner <= normal_corner ||
+ turbo_corner > cpr_vreg->num_corners) {
+ cpr_debug("turbo:%d should be larger than normal:%d\n",
+ turbo_corner, normal_corner);
+ return 0;
+ }
+
+ prop = of_find_property(dev->of_node,
+ "qcom,cpr-corner-frequency-map", NULL);
+ if (!prop) {
+ cpr_debug("qcom,cpr-corner-frequency-map missing\n");
+ return 0;
+ }
+
+ size = prop->length / sizeof(u32);
+ tmp = kzalloc(sizeof(u32) * size, GFP_KERNEL);
+ if (!tmp) {
+ pr_err("memory alloc failed\n");
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(dev->of_node,
+ "qcom,cpr-corner-frequency-map", tmp, size);
+ if (rc < 0) {
+ pr_err("get cpr-corner-frequency-map failed, rc = %d\n", rc);
+ kfree(tmp);
+ return rc;
+ }
+ freq_mappings = kzalloc(sizeof(u32) * (cpr_vreg->num_corners + 1),
+ GFP_KERNEL);
+ if (!freq_mappings) {
+ pr_err("memory alloc for freq_mappings failed!\n");
+ kfree(tmp);
+ return -ENOMEM;
+ }
+ for (i = 0; i < size; i += 2) {
+ corner = tmp[i];
+ if ((corner < 1) || (corner > cpr_vreg->num_corners)) {
+ pr_err("corner should be in 1~%d range: %d\n",
+ cpr_vreg->num_corners, corner);
+ continue;
+ }
+ freq_mappings[corner] = tmp[i + 1];
+ cpr_debug("Frequency at virtual corner %d is %d Hz.\n",
+ corner, freq_mappings[corner]);
+ }
+ kfree(tmp);
+
+ rc = of_property_read_u32(dev->of_node,
+ "qcom,cpr-quot-adjust-scaling-factor-max",
+ &max_factor);
+ if (rc < 0) {
+ cpr_debug("get cpr-quot-adjust-scaling-factor-max failed\n");
+ kfree(freq_mappings);
+ return 0;
+ }
+
+ /*
+ * Get the quot adjust scaling factor, according to:
+ * scaling =
+ * min(1000 * (QUOT(fused @turbo) - QUOT(fused @normal)) /
+ * (freq_turbo - freq_normal), max_factor)
+ *
+ * @QUOT(fused @turbo): quotient read from fuse for TURBO fuse corner;
+ * @QUOT(fused @normal): quotient read from fuse for NORMAL fuse corner;
+ * @freq_turbo: MHz, max frequency running at TURBO fuse corner;
+ * @freq_normal: MHz, max frequency running at NORMAL fuse corner.
+ */
+
+ freq_turbo = freq_mappings[turbo_corner];
+ freq_normal = freq_mappings[normal_corner];
+ if (freq_normal == 0 || freq_turbo <= freq_normal) {
+ pr_err("freq_turbo: %d should larger than freq_normal: %d\n",
+ freq_turbo, freq_normal);
+ kfree(freq_mappings);
+ return -EINVAL;
+ }
+ freq_turbo /= 1000000; /* MHz */
+ freq_normal /= 1000000;
+ scaling = 1000 *
+ (cpr_vreg->cpr_fuse_target_quot[CPR_FUSE_CORNER_TURBO] -
+ cpr_vreg->cpr_fuse_target_quot[CPR_FUSE_CORNER_NORMAL]) /
+ (freq_turbo - freq_normal);
+ scaling = min(scaling, max_factor);
+ pr_info("quotient adjustment scaling factor: %d.%03d\n",
+ scaling / 1000, scaling % 1000);
+
+ /*
+ * Walk through the corners mapped to the TURBO fuse corner and
+ * calculate the quotient adjustment for each one using the following
+ * formula:
+ * quot_adjust = (freq_turbo - freq_corner) * scaling / 1000
+ *
+ * @freq_turbo: MHz, max frequency running at TURBO fuse corner;
+ * @freq_corner: MHz, frequency running at a corner.
+ */
+ for (i = turbo_corner; i > normal_corner; i--) {
+ freq_corner = freq_mappings[i] / 1000000; /* MHz */
+ if (freq_corner > 0) {
+ cpr_vreg->quot_adjust[i] =
+ scaling * (freq_turbo - freq_corner) / 1000;
+ }
+ pr_info("adjusted quotient[%d] = %d\n", i,
+ (cpr_vreg->cpr_fuse_target_quot[cpr_vreg->corner_map[i]]
+ - cpr_vreg->quot_adjust[i]));
+ }
+ kfree(freq_mappings);
+ return 0;
+}
+
+static int cpr_init_cpr_efuse(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int i, rc = 0;
+ bool redundant = false, scheme_fuse_valid = false;
+ bool disable_fuse_valid = false;
+ u32 cpr_fuse_redun_sel[5];
+ char *targ_quot_str, *ro_sel_str;
+ u32 cpr_fuse_row[2];
+ u32 bp_cpr_disable, bp_scheme;
+ int bp_target_quot[CPR_FUSE_CORNER_MAX];
+ int bp_ro_sel[CPR_FUSE_CORNER_MAX];
+ u64 fuse_bits, fuse_bits_2;
+ u32 quot_adjust[CPR_FUSE_CORNER_MAX];
+ u32 target_quot_size[CPR_FUSE_CORNER_MAX] = {
+ [CPR_FUSE_CORNER_SVS] = CPR_FUSE_TARGET_QUOT_BITS,
+ [CPR_FUSE_CORNER_NORMAL] = CPR_FUSE_TARGET_QUOT_BITS,
+ [CPR_FUSE_CORNER_TURBO] = CPR_FUSE_TARGET_QUOT_BITS,
+ };
+
+ if (of_find_property(of_node, "qcom,cpr-fuse-redun-sel", NULL)) {
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-redun-sel",
+ cpr_fuse_redun_sel, 5);
+ if (rc < 0) {
+ pr_err("cpr-fuse-redun-sel missing: rc=%d\n", rc);
+ return rc;
+ }
+ redundant = cpr_fuse_is_setting_expected(cpr_vreg,
+ cpr_fuse_redun_sel);
+ }
+
+ if (redundant) {
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-redun-row",
+ cpr_fuse_row, 2);
+ targ_quot_str = "qcom,cpr-fuse-redun-target-quot";
+ ro_sel_str = "qcom,cpr-fuse-redun-ro-sel";
+ } else {
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-row",
+ cpr_fuse_row, 2);
+ targ_quot_str = "qcom,cpr-fuse-target-quot";
+ ro_sel_str = "qcom,cpr-fuse-ro-sel";
+ }
+ if (rc)
+ return rc;
+
+ rc = of_property_read_u32_array(of_node,
+ targ_quot_str,
+ &bp_target_quot[CPR_FUSE_CORNER_SVS],
+ CPR_FUSE_CORNER_MAX - CPR_FUSE_CORNER_SVS);
+ if (rc < 0) {
+ pr_err("missing %s: rc=%d\n", targ_quot_str, rc);
+ return rc;
+ }
+
+ if (of_property_read_bool(of_node, "qcom,cpr-fuse-target-quot-size")) {
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-target-quot-size",
+ &target_quot_size[CPR_FUSE_CORNER_SVS],
+ CPR_FUSE_CORNER_MAX - CPR_FUSE_CORNER_SVS);
+ if (rc < 0) {
+ pr_err("error while reading qcom,cpr-fuse-target-quot-size: rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = of_property_read_u32_array(of_node,
+ ro_sel_str,
+ &bp_ro_sel[CPR_FUSE_CORNER_SVS],
+ CPR_FUSE_CORNER_MAX - CPR_FUSE_CORNER_SVS);
+ if (rc < 0) {
+ pr_err("missing %s: rc=%d\n", ro_sel_str, rc);
+ return rc;
+ }
+
+ /* Read the control bits of eFuse */
+ fuse_bits = cpr_read_efuse_row(cpr_vreg, cpr_fuse_row[0],
+ cpr_fuse_row[1]);
+ pr_info("[row:%d] = 0x%llx\n", cpr_fuse_row[0], fuse_bits);
+
+ if (redundant) {
+ if (of_property_read_bool(of_node,
+ "qcom,cpr-fuse-redun-bp-cpr-disable")) {
+ CPR_PROP_READ_U32(of_node,
+ "cpr-fuse-redun-bp-cpr-disable",
+ &bp_cpr_disable, rc);
+ disable_fuse_valid = true;
+ if (of_find_property(of_node,
+ "qcom,cpr-fuse-redun-bp-scheme",
+ NULL)) {
+ CPR_PROP_READ_U32(of_node,
+ "cpr-fuse-redun-bp-scheme",
+ &bp_scheme, rc);
+ scheme_fuse_valid = true;
+ }
+ if (rc)
+ return rc;
+ fuse_bits_2 = fuse_bits;
+ } else {
+ u32 temp_row[2];
+
+ /* Use original fuse if no optional property */
+ if (of_property_read_bool(of_node,
+ "qcom,cpr-fuse-bp-cpr-disable")) {
+ CPR_PROP_READ_U32(of_node,
+ "cpr-fuse-bp-cpr-disable",
+ &bp_cpr_disable, rc);
+ disable_fuse_valid = true;
+ }
+ if (of_find_property(of_node,
+ "qcom,cpr-fuse-bp-scheme",
+ NULL)) {
+ CPR_PROP_READ_U32(of_node,
+ "cpr-fuse-bp-scheme",
+ &bp_scheme, rc);
+ scheme_fuse_valid = true;
+ }
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-row",
+ temp_row, 2);
+ if (rc)
+ return rc;
+
+ fuse_bits_2 = cpr_read_efuse_row(cpr_vreg, temp_row[0],
+ temp_row[1]);
+ pr_info("[original row:%d] = 0x%llx\n",
+ temp_row[0], fuse_bits_2);
+ }
+ } else {
+ if (of_property_read_bool(of_node,
+ "qcom,cpr-fuse-bp-cpr-disable")) {
+ CPR_PROP_READ_U32(of_node,
+ "cpr-fuse-bp-cpr-disable", &bp_cpr_disable, rc);
+ disable_fuse_valid = true;
+ }
+ if (of_find_property(of_node, "qcom,cpr-fuse-bp-scheme",
+ NULL)) {
+ CPR_PROP_READ_U32(of_node, "cpr-fuse-bp-scheme",
+ &bp_scheme, rc);
+ scheme_fuse_valid = true;
+ }
+ if (rc)
+ return rc;
+ fuse_bits_2 = fuse_bits;
+ }
+
+ if (disable_fuse_valid) {
+ cpr_vreg->cpr_fuse_disable =
+ (fuse_bits_2 >> bp_cpr_disable) & 0x01;
+ pr_info("disable = %d\n", cpr_vreg->cpr_fuse_disable);
+ } else {
+ cpr_vreg->cpr_fuse_disable = false;
+ }
+
+ if (scheme_fuse_valid) {
+ cpr_vreg->cpr_fuse_local = (fuse_bits_2 >> bp_scheme) & 0x01;
+ pr_info("local = %d\n", cpr_vreg->cpr_fuse_local);
+ } else {
+ cpr_vreg->cpr_fuse_local = true;
+ }
+
+ for (i = CPR_FUSE_CORNER_SVS; i < CPR_FUSE_CORNER_MAX; i++) {
+ cpr_vreg->cpr_fuse_ro_sel[i]
+ = cpr_read_efuse_param(cpr_vreg, cpr_fuse_row[0],
+ bp_ro_sel[i], CPR_FUSE_RO_SEL_BITS,
+ cpr_fuse_row[1]);
+ cpr_vreg->cpr_fuse_target_quot[i]
+ = cpr_read_efuse_param(cpr_vreg, cpr_fuse_row[0],
+ bp_target_quot[i], target_quot_size[i],
+ cpr_fuse_row[1]);
+ pr_info("Corner[%d]: ro_sel = %d, target quot = %d\n", i,
+ cpr_vreg->cpr_fuse_ro_sel[i],
+ cpr_vreg->cpr_fuse_target_quot[i]);
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-quotient-adjustment",
+ &quot_adjust[1], CPR_FUSE_CORNER_MAX - 1);
+ if (!rc) {
+ for (i = CPR_FUSE_CORNER_SVS; i < CPR_FUSE_CORNER_MAX; i++) {
+ cpr_vreg->cpr_fuse_target_quot[i] += quot_adjust[i];
+ pr_info("Corner[%d]: adjusted target quot = %d\n",
+ i, cpr_vreg->cpr_fuse_target_quot[i]);
+ }
+ }
+
+ if (cpr_vreg->flags & FLAGS_UPLIFT_QUOT_VOLT) {
+ cpr_voltage_uplift_wa_inc_quot(cpr_vreg, of_node);
+ for (i = CPR_FUSE_CORNER_SVS; i < CPR_FUSE_CORNER_MAX; i++) {
+ pr_info("Corner[%d]: uplifted target quot = %d\n",
+ i, cpr_vreg->cpr_fuse_target_quot[i]);
+ }
+ }
+
+ rc = cpr_get_corner_quot_adjustment(cpr_vreg, &pdev->dev);
+ if (rc)
+ return rc;
+
+ cpr_vreg->cpr_fuse_bits = fuse_bits;
+ if (!cpr_vreg->cpr_fuse_bits) {
+ cpr_vreg->cpr_fuse_disable = 1;
+ pr_err("cpr_fuse_bits = 0: set cpr_fuse_disable = 1\n");
+ } else {
+ /* Check if the target quotients are too close together */
+ int *quot = cpr_vreg->cpr_fuse_target_quot;
+ bool valid_fuse = true;
+
+ if (quot[CPR_FUSE_CORNER_TURBO] >
+ quot[CPR_FUSE_CORNER_NORMAL]) {
+ if ((quot[CPR_FUSE_CORNER_TURBO] -
+ quot[CPR_FUSE_CORNER_NORMAL])
+ <= CPR_FUSE_MIN_QUOT_DIFF)
+ valid_fuse = false;
+ } else {
+ valid_fuse = false;
+ }
+
+ if (!valid_fuse) {
+ cpr_vreg->cpr_fuse_disable = 1;
+ pr_err("invalid quotient values\n");
+ }
+ }
+
+ return 0;
+}
+
+static int cpr_init_cpr_voltages(struct cpr_regulator *cpr_vreg,
+ struct device *dev)
+{
+ int i;
+ int size = cpr_vreg->num_corners + 1;
+
+ cpr_vreg->last_volt = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
+ if (!cpr_vreg->last_volt)
+ return -EINVAL;
+
+ for (i = 1; i < size; i++) {
+ cpr_vreg->last_volt[i] = cpr_vreg->pvs_corner_v
+ [cpr_vreg->corner_map[i]];
+ }
+
+ return 0;
+}
+
+static int cpr_init_cpr_parameters(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int rc = 0;
+
+ CPR_PROP_READ_U32(of_node, "cpr-ref-clk",
+ &cpr_vreg->ref_clk_khz, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(of_node, "cpr-timer-delay",
+ &cpr_vreg->timer_delay_us, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(of_node, "cpr-timer-cons-up",
+ &cpr_vreg->timer_cons_up, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(of_node, "cpr-timer-cons-down",
+ &cpr_vreg->timer_cons_down, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(of_node, "cpr-irq-line",
+ &cpr_vreg->irq_line, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(of_node, "cpr-step-quotient",
+ &cpr_vreg->step_quotient, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(of_node, "cpr-up-threshold",
+ &cpr_vreg->up_threshold, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(of_node, "cpr-down-threshold",
+ &cpr_vreg->down_threshold, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(of_node, "cpr-idle-clocks",
+ &cpr_vreg->idle_clocks, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(of_node, "cpr-gcnt-time",
+ &cpr_vreg->gcnt_time_us, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(of_node, "vdd-apc-step-up-limit",
+ &cpr_vreg->vdd_apc_step_up_limit, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(of_node, "vdd-apc-step-down-limit",
+ &cpr_vreg->vdd_apc_step_down_limit, rc);
+ if (rc)
+ return rc;
+
+ /* Init module parameter with the DT value */
+ cpr_vreg->enable = of_property_read_bool(of_node, "qcom,cpr-enable");
+ cpr_enable = (int) cpr_vreg->enable;
+ pr_info("CPR is %s by default.\n",
+ cpr_vreg->enable ? "enabled" : "disabled");
+
+ return rc;
+}
+
+static int cpr_init_cpr(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct resource *res;
+ int rc = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rbcpr_clk");
+ if (!res || !res->start) {
+ pr_err("missing rbcpr_clk address: res=%p\n", res);
+ return -EINVAL;
+ }
+ cpr_vreg->rbcpr_clk_addr = res->start;
+
+ rc = cpr_init_cpr_efuse(pdev, cpr_vreg);
+ if (rc)
+ return rc;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rbcpr");
+ if (!res || !res->start) {
+ pr_err("missing rbcpr address: res=%p\n", res);
+ return -EINVAL;
+ }
+ cpr_vreg->rbcpr_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+
+ /* Init all voltage set points of APC regulator for CPR */
+ rc = cpr_init_cpr_voltages(cpr_vreg, &pdev->dev);
+ if (rc)
+ return rc;
+
+ /* Init CPR configuration parameters */
+ rc = cpr_init_cpr_parameters(pdev, cpr_vreg);
+ if (rc)
+ return rc;
+
+ /* Get and Init interrupt */
+ cpr_vreg->cpr_irq = platform_get_irq(pdev, 0);
+ if (!cpr_vreg->cpr_irq) {
+ pr_err("missing CPR IRQ\n");
+ return -EINVAL;
+ }
+
+ /* Configure CPR HW but keep it disabled */
+ rc = cpr_config(cpr_vreg, &pdev->dev);
+ if (rc)
+ return rc;
+
+ rc = request_threaded_irq(cpr_vreg->cpr_irq, NULL, cpr_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING, "cpr",
+ cpr_vreg);
+ if (rc) {
+ pr_err("CPR: request irq failed for IRQ %d\n",
+ cpr_vreg->cpr_irq);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int cpr_efuse_init(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct resource *res;
+ int len;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse_addr");
+ if (!res || !res->start) {
+ pr_err("efuse_addr missing: res=%p\n", res);
+ return -EINVAL;
+ }
+
+ cpr_vreg->efuse_addr = res->start;
+ len = res->end - res->start + 1;
+
+ pr_info("efuse_addr = %pa (len=0x%x)\n", &res->start, len);
+
+ cpr_vreg->efuse_base = ioremap(cpr_vreg->efuse_addr, len);
+ if (!cpr_vreg->efuse_base) {
+ pr_err("Unable to map efuse_addr %pa\n",
+ &cpr_vreg->efuse_addr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void cpr_efuse_free(struct cpr_regulator *cpr_vreg)
+{
+ iounmap(cpr_vreg->efuse_base);
+}
+
+static void cpr_parse_cond_min_volt_fuse(struct cpr_regulator *cpr_vreg,
+ struct device_node *of_node)
+{
+ int rc;
+ u32 fuse_sel[5];
+ /*
+ * Restrict all pvs corner voltages to a minimum value of
+ * qcom,cpr-cond-min-voltage if the fuse defined in
+ * qcom,cpr-fuse-cond-min-volt-sel does not read back with
+ * the expected value.
+ */
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-cond-min-volt-sel", fuse_sel, 5);
+ if (!rc) {
+ if (!cpr_fuse_is_setting_expected(cpr_vreg, fuse_sel))
+ cpr_vreg->flags |= FLAGS_SET_MIN_VOLTAGE;
+ }
+}
+
+static void cpr_parse_speed_bin_fuse(struct cpr_regulator *cpr_vreg,
+ struct device_node *of_node)
+{
+ int rc;
+ u64 fuse_bits;
+ u32 fuse_sel[4];
+ u32 speed_bits;
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,speed-bin-fuse-sel", fuse_sel, 4);
+
+ if (!rc) {
+ fuse_bits = cpr_read_efuse_row(cpr_vreg,
+ fuse_sel[0], fuse_sel[3]);
+ speed_bits = (fuse_bits >> fuse_sel[1]) &
+ ((1 << fuse_sel[2]) - 1);
+ pr_info("[row: %d]: 0x%llx, speed_bits = %d\n",
+ fuse_sel[0], fuse_bits, speed_bits);
+ cpr_vreg->speed_bin = speed_bits;
+ } else {
+ cpr_vreg->speed_bin = UINT_MAX;
+ }
+}
+
+static int cpr_voltage_uplift_enable_check(struct cpr_regulator *cpr_vreg,
+ struct device_node *of_node)
+{
+ int rc;
+ u32 fuse_sel[5];
+ u32 uplift_speed_bin;
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-uplift-sel", fuse_sel, 5);
+ if (!rc) {
+ rc = of_property_read_u32(of_node,
+ "qcom,cpr-uplift-speed-bin",
+ &uplift_speed_bin);
+ if (rc < 0) {
+ pr_err("qcom,cpr-uplift-speed-bin missing\n");
+ return rc;
+ }
+ if (cpr_fuse_is_setting_expected(cpr_vreg, fuse_sel)
+ && (uplift_speed_bin == cpr_vreg->speed_bin)
+ && !(cpr_vreg->flags & FLAGS_SET_MIN_VOLTAGE)) {
+ cpr_vreg->flags |= FLAGS_UPLIFT_QUOT_VOLT;
+ }
+ }
+ return 0;
+}
+
+static int cpr_voltage_plan_init(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int rc, i;
+ u32 min_uv = 0;
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-voltage-ceiling",
+ &cpr_vreg->ceiling_volt[CPR_FUSE_CORNER_SVS],
+ CPR_FUSE_CORNER_MAX - 1);
+ if (rc < 0) {
+ pr_err("cpr-voltage-ceiling missing: rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-voltage-floor",
+ &cpr_vreg->floor_volt[CPR_FUSE_CORNER_SVS],
+ CPR_FUSE_CORNER_MAX - 1);
+ if (rc < 0) {
+ pr_err("cpr-voltage-floor missing: rc=%d\n", rc);
+ return rc;
+ }
+
+ cpr_parse_cond_min_volt_fuse(cpr_vreg, of_node);
+ cpr_parse_speed_bin_fuse(cpr_vreg, of_node);
+ rc = cpr_voltage_uplift_enable_check(cpr_vreg, of_node);
+ if (rc < 0) {
+ pr_err("voltage uplift enable check failed, %d\n", rc);
+ return rc;
+ }
+ if (cpr_vreg->flags & FLAGS_SET_MIN_VOLTAGE) {
+ of_property_read_u32(of_node, "qcom,cpr-cond-min-voltage",
+ &min_uv);
+ for (i = CPR_FUSE_CORNER_SVS; i < CPR_FUSE_CORNER_MAX; i++)
+ if (cpr_vreg->ceiling_volt[i] < min_uv) {
+ cpr_vreg->ceiling_volt[i] = min_uv;
+ cpr_vreg->floor_volt[i] = min_uv;
+ } else if (cpr_vreg->floor_volt[i] < min_uv) {
+ cpr_vreg->floor_volt[i] = min_uv;
+ }
+ }
+
+ return 0;
+}
+
+static int cpr_mem_acc_init(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ int rc;
+
+ if (of_property_read_bool(pdev->dev.of_node, "mem-acc-supply")) {
+ cpr_vreg->mem_acc_vreg = devm_regulator_get(&pdev->dev,
+ "mem-acc");
+ if (IS_ERR_OR_NULL(cpr_vreg->mem_acc_vreg)) {
+ rc = PTR_RET(cpr_vreg->mem_acc_vreg);
+ if (rc != -EPROBE_DEFER)
+ pr_err("devm_regulator_get: mem-acc: rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static ssize_t cpr_debugfs_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ char *debugfs_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ ssize_t len, ret = 0;
+ u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
+ u32 step_dn, step_up, error, error_lt0, busy;
+ int fuse_corner;
+
+ if (!debugfs_buf)
+ return -ENOMEM;
+
+ mutex_lock(&the_cpr->cpr_mutex);
+
+ fuse_corner = the_cpr->corner_map[the_cpr->corner];
+
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "corner = %d, current_volt = %d uV\n",
+ the_cpr->corner, the_cpr->last_volt[the_cpr->corner]);
+ ret += len;
+
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "fuse_corner = %d, current_volt = %d uV\n",
+ fuse_corner, the_cpr->last_volt[the_cpr->corner]);
+ ret += len;
+
+ ro_sel = the_cpr->cpr_fuse_ro_sel[fuse_corner];
+ gcnt = cpr_read(the_cpr, REG_RBCPR_GCNT_TARGET(ro_sel));
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "rbcpr_gcnt_target (%u) = 0x%02X\n", ro_sel, gcnt);
+ ret += len;
+
+ ctl = cpr_read(the_cpr, REG_RBCPR_CTL);
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "rbcpr_ctl = 0x%02X\n", ctl);
+ ret += len;
+
+ irq_status = cpr_read(the_cpr, REG_RBIF_IRQ_STATUS);
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "rbcpr_irq_status = 0x%02X\n", irq_status);
+ ret += len;
+
+ reg = cpr_read(the_cpr, REG_RBCPR_RESULT_0);
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "rbcpr_result_0 = 0x%02X\n", reg);
+ ret += len;
+
+ step_dn = reg & 0x01;
+ step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ " [step_dn = %u", step_dn);
+ ret += len;
+
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", step_up = %u", step_up);
+ ret += len;
+
+ error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+ & RBCPR_RESULT0_ERROR_STEPS_MASK;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", error_steps = %u", error_steps);
+ ret += len;
+
+ error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", error = %u", error);
+ ret += len;
+
+ error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", error_lt_0 = %u", error_lt0);
+ ret += len;
+
+ busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", busy = %u]\n", busy);
+ ret += len;
+ mutex_unlock(&the_cpr->cpr_mutex);
+
+
+ ret = simple_read_from_buffer(buff, count, ppos, debugfs_buf, ret);
+ kfree(debugfs_buf);
+ return ret;
+}
+
+static const struct file_operations cpr_debugfs_fops = {
+ .read = cpr_debugfs_read,
+};
+
+static void cpr_debugfs_init(void)
+{
+ cpr_debugfs_entry = debugfs_create_file("debug_info", 0444,
+ the_cpr->rdev->debugfs, NULL,
+ &cpr_debugfs_fops);
+ if (!cpr_debugfs_entry)
+ pr_err("cpr_irq_debugfs_entry creation failed.\n");
+}
+
+static void cpr_debugfs_remove(void)
+{
+ debugfs_remove(cpr_debugfs_entry);
+}
+
+#else
+
+static void cpr_debugfs_init(void)
+{}
+
+static void cpr_debugfs_remove(void)
+{}
+
+#endif
+
+static int cpr_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_config reg_config = {};
+ struct cpr_regulator *cpr_vreg;
+ struct regulator_desc *rdesc;
+ struct regulator_init_data *init_data = pdev->dev.platform_data;
+ int rc;
+
+ if (!pdev->dev.of_node) {
+ pr_err("Device tree node is missing\n");
+ return -EINVAL;
+ }
+
+ init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
+ if (!init_data) {
+ pr_err("regulator init data is missing\n");
+ return -EINVAL;
+ } else {
+ init_data->constraints.input_uV
+ = init_data->constraints.max_uV;
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS;
+ }
+
+ cpr_vreg = devm_kzalloc(&pdev->dev, sizeof(struct cpr_regulator),
+ GFP_KERNEL);
+ if (!cpr_vreg) {
+ pr_err("Can't allocate cpr_regulator memory\n");
+ return -ENOMEM;
+ }
+
+ rc = cpr_mem_acc_init(pdev, cpr_vreg);
+ if (rc) {
+ pr_err("mem_acc intialization error rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = cpr_efuse_init(pdev, cpr_vreg);
+ if (rc) {
+ pr_err("Wrong eFuse address specified: rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = cpr_voltage_plan_init(pdev, cpr_vreg);
+ if (rc) {
+ pr_err("Wrong DT parameter specified: rc=%d\n", rc);
+ goto err_out;
+ }
+
+ rc = cpr_pvs_init(pdev, cpr_vreg);
+ if (rc) {
+ pr_err("Initialize PVS wrong: rc=%d\n", rc);
+ goto err_out;
+ }
+
+ rc = cpr_apc_init(pdev, cpr_vreg);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ pr_err("Initialize APC wrong: rc=%d\n", rc);
+ goto err_out;
+ }
+
+ rc = cpr_init_cpr(pdev, cpr_vreg);
+ if (rc) {
+ pr_err("Initialize CPR failed: rc=%d\n", rc);
+ goto err_out;
+ }
+
+ cpr_efuse_free(cpr_vreg);
+
+ mutex_init(&cpr_vreg->cpr_mutex);
+
+ rdesc = &cpr_vreg->rdesc;
+ rdesc->owner = THIS_MODULE;
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->ops = &cpr_corner_ops;
+ rdesc->name = init_data->constraints.name;
+
+ reg_config.dev = &pdev->dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = cpr_vreg;
+ reg_config.of_node = pdev->dev.of_node;
+ cpr_vreg->rdev = regulator_register(rdesc, &reg_config);
+ if (IS_ERR(cpr_vreg->rdev)) {
+ rc = PTR_ERR(cpr_vreg->rdev);
+ pr_err("regulator_register failed: rc=%d\n", rc);
+
+ cpr_apc_exit(cpr_vreg);
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, cpr_vreg);
+ the_cpr = cpr_vreg;
+ cpr_debugfs_init();
+
+ return 0;
+
+err_out:
+ cpr_efuse_free(cpr_vreg);
+ return rc;
+}
+
+static int cpr_regulator_remove(struct platform_device *pdev)
+{
+ struct cpr_regulator *cpr_vreg;
+
+ cpr_vreg = platform_get_drvdata(pdev);
+ if (cpr_vreg) {
+ /* Disable CPR */
+ if (cpr_is_allowed(cpr_vreg)) {
+ cpr_ctl_disable(cpr_vreg);
+ cpr_irq_set(cpr_vreg, 0);
+ }
+
+ cpr_apc_exit(cpr_vreg);
+ cpr_debugfs_remove();
+ regulator_unregister(cpr_vreg->rdev);
+ }
+
+ return 0;
+}
+
+static struct of_device_id cpr_regulator_match_table[] = {
+ { .compatible = CPR_REGULATOR_DRIVER_NAME, },
+ {}
+};
+
+static struct platform_driver cpr_regulator_driver = {
+ .driver = {
+ .name = CPR_REGULATOR_DRIVER_NAME,
+ .of_match_table = cpr_regulator_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = cpr_regulator_probe,
+ .remove = cpr_regulator_remove,
+ .suspend = cpr_regulator_suspend,
+ .resume = cpr_regulator_resume,
+};
+
+/**
+ * cpr_regulator_init() - register cpr-regulator driver
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ */
+int __init cpr_regulator_init(void)
+{
+ static bool initialized;
+
+ if (initialized)
+ return 0;
+ else
+ initialized = true;
+
+ return platform_driver_register(&cpr_regulator_driver);
+}
+EXPORT_SYMBOL(cpr_regulator_init);
+
+static void __exit cpr_regulator_exit(void)
+{
+ platform_driver_unregister(&cpr_regulator_driver);
+}
+
+MODULE_DESCRIPTION("CPR regulator driver");
+MODULE_LICENSE("GPL v2");
+
+arch_initcall(cpr_regulator_init);
+module_exit(cpr_regulator_exit);
diff --git a/drivers/regulator/mem-acc-regulator.c b/drivers/regulator/mem-acc-regulator.c
new file mode 100644
index 000000000000..b726823d09f2
--- /dev/null
+++ b/drivers/regulator/mem-acc-regulator.c
@@ -0,0 +1,600 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "ACC: %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <soc/qcom/scm.h>
+
+#define MEM_ACC_SEL_MASK 0x3
+
+#define BYTES_PER_FUSE_ROW 8
+
+/* mem-acc config flags */
+#define MEM_ACC_SKIP_L1_CONFIG BIT(0)
+
+enum {
+ MEMORY_L1,
+ MEMORY_L2,
+ MEMORY_MAX,
+};
+
+struct mem_acc_regulator {
+ struct device *dev;
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+
+ int corner;
+ bool mem_acc_supported[MEMORY_MAX];
+
+ u32 *acc_sel_mask[MEMORY_MAX];
+ u32 *acc_sel_bit_pos[MEMORY_MAX];
+ u32 num_acc_sel[MEMORY_MAX];
+ u32 *acc_en_bit_pos;
+ u32 num_acc_en;
+ u32 *corner_acc_map;
+ u32 num_corners;
+
+ void __iomem *acc_sel_base[MEMORY_MAX];
+ void __iomem *acc_en_base;
+ phys_addr_t acc_sel_addr[MEMORY_MAX];
+ phys_addr_t acc_en_addr;
+ u32 flags;
+
+ /* eFuse parameters */
+ phys_addr_t efuse_addr;
+ void __iomem *efuse_base;
+};
+
+static u64 mem_acc_read_efuse_row(struct mem_acc_regulator *mem_acc_vreg,
+ u32 row_num, bool use_tz_api)
+{
+ int rc;
+ u64 efuse_bits;
+ struct mem_acc_read_req {
+ u32 row_address;
+ int addr_type;
+ } req;
+
+ struct mem_acc_read_rsp {
+ u32 row_data[2];
+ u32 status;
+ } rsp;
+
+ if (!use_tz_api) {
+ efuse_bits = readq_relaxed(mem_acc_vreg->efuse_base
+ + row_num * BYTES_PER_FUSE_ROW);
+ return efuse_bits;
+ }
+
+ req.row_address = mem_acc_vreg->efuse_addr +
+ row_num * BYTES_PER_FUSE_ROW;
+ req.addr_type = 0;
+ efuse_bits = 0;
+
+ rc = scm_call(SCM_SVC_FUSE, SCM_FUSE_READ,
+ &req, sizeof(req), &rsp, sizeof(rsp));
+
+ if (rc) {
+ pr_err("read row %d failed, err code = %d", row_num, rc);
+ } else {
+ efuse_bits = ((u64)(rsp.row_data[1]) << 32) +
+ (u64)rsp.row_data[0];
+ }
+
+ return efuse_bits;
+}
+
+static int mem_acc_fuse_is_setting_expected(
+ struct mem_acc_regulator *mem_acc_vreg, u32 sel_array[5])
+{
+ u64 fuse_bits;
+ u32 ret;
+
+ fuse_bits = mem_acc_read_efuse_row(mem_acc_vreg, sel_array[0],
+ sel_array[4]);
+ ret = (fuse_bits >> sel_array[1]) & ((1 << sel_array[2]) - 1);
+ if (ret == sel_array[3])
+ ret = 1;
+ else
+ ret = 0;
+
+ pr_info("[row:%d] = 0x%llx @%d:%d == %d ?: %s\n",
+ sel_array[0], fuse_bits,
+ sel_array[1], sel_array[2],
+ sel_array[3],
+ (ret == 1) ? "yes" : "no");
+ return ret;
+}
+
+static inline u32 apc_to_acc_corner(struct mem_acc_regulator *mem_acc_vreg,
+ int corner)
+{
+ /*
+ * corner_acc_map maps the corner from index 0 and APC corner value
+ * starts from the value 1
+ */
+ return mem_acc_vreg->corner_acc_map[corner - 1];
+}
+
+static void __update_acc_sel(struct mem_acc_regulator *mem_acc_vreg,
+ int corner, int mem_type)
+{
+ u32 acc_data, acc_data_old, i, bit, acc_corner;
+
+ /*
+ * Do not configure the L1 ACC corner if the the corresponding flag is
+ * set.
+ */
+ if ((mem_type == MEMORY_L1)
+ && (mem_acc_vreg->flags & MEM_ACC_SKIP_L1_CONFIG))
+ return;
+
+ acc_data = readl_relaxed(mem_acc_vreg->acc_sel_base[mem_type]);
+ acc_data_old = acc_data;
+ for (i = 0; i < mem_acc_vreg->num_acc_sel[mem_type]; i++) {
+ bit = mem_acc_vreg->acc_sel_bit_pos[mem_type][i];
+ acc_data &= ~mem_acc_vreg->acc_sel_mask[mem_type][i];
+ acc_corner = apc_to_acc_corner(mem_acc_vreg, corner);
+ acc_data |= (acc_corner << bit) &
+ mem_acc_vreg->acc_sel_mask[mem_type][i];
+ }
+ pr_debug("corner=%d old_acc_sel=0x%02x new_acc_sel=0x%02x mem_type=%d\n",
+ corner, acc_data_old, acc_data, mem_type);
+ writel_relaxed(acc_data, mem_acc_vreg->acc_sel_base[mem_type]);
+}
+
+static void update_acc_sel(struct mem_acc_regulator *mem_acc_vreg, int corner)
+{
+ int i;
+
+ for (i = 0; i < MEMORY_MAX; i++) {
+ if (mem_acc_vreg->mem_acc_supported[i])
+ __update_acc_sel(mem_acc_vreg, corner, i);
+ }
+}
+
+static int mem_acc_regulator_set_voltage(struct regulator_dev *rdev,
+ int corner, int corner_max, unsigned *selector)
+{
+ struct mem_acc_regulator *mem_acc_vreg = rdev_get_drvdata(rdev);
+ int i;
+
+ if (corner > mem_acc_vreg->num_corners) {
+ pr_err("Invalid corner=%d requested\n", corner);
+ return -EINVAL;
+ }
+
+ pr_debug("old corner=%d, new corner=%d\n",
+ mem_acc_vreg->corner, corner);
+
+ if (corner == mem_acc_vreg->corner)
+ return 0;
+
+ /* go up or down one level at a time */
+ if (corner > mem_acc_vreg->corner) {
+ for (i = mem_acc_vreg->corner + 1; i <= corner; i++) {
+ pr_debug("UP: to corner %d\n", i);
+ update_acc_sel(mem_acc_vreg, i);
+ }
+ } else {
+ for (i = mem_acc_vreg->corner - 1; i >= corner; i--) {
+ pr_debug("DOWN: to corner %d\n", i);
+ update_acc_sel(mem_acc_vreg, i);
+ }
+ }
+
+ pr_debug("new voltage corner set %d\n", corner);
+
+ mem_acc_vreg->corner = corner;
+
+ return 0;
+}
+
+static int mem_acc_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct mem_acc_regulator *mem_acc_vreg = rdev_get_drvdata(rdev);
+
+ return mem_acc_vreg->corner;
+}
+
+static struct regulator_ops mem_acc_corner_ops = {
+ .set_voltage = mem_acc_regulator_set_voltage,
+ .get_voltage = mem_acc_regulator_get_voltage,
+};
+
+static int __mem_acc_sel_init(struct mem_acc_regulator *mem_acc_vreg,
+ int mem_type)
+{
+ int i;
+ u32 bit;
+
+ mem_acc_vreg->acc_sel_mask[mem_type] = devm_kzalloc(mem_acc_vreg->dev,
+ mem_acc_vreg->num_acc_sel[mem_type] * sizeof(u32), GFP_KERNEL);
+ if (!mem_acc_vreg->acc_sel_mask[mem_type]) {
+ pr_err("Unable to allocate memory for mem_type=%d\n", mem_type);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < mem_acc_vreg->num_acc_sel[mem_type]; i++) {
+ bit = mem_acc_vreg->acc_sel_bit_pos[mem_type][i];
+ mem_acc_vreg->acc_sel_mask[mem_type][i] =
+ MEM_ACC_SEL_MASK << bit;
+ }
+
+ return 0;
+}
+
+static int mem_acc_sel_init(struct mem_acc_regulator *mem_acc_vreg)
+{
+ int i, rc;
+
+ for (i = 0; i < MEMORY_MAX; i++) {
+ if (mem_acc_vreg->mem_acc_supported[i]) {
+ rc = __mem_acc_sel_init(mem_acc_vreg, i);
+ if (rc) {
+ pr_err("Unable to intialize mem_type=%d rc=%d\n",
+ i, rc);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void mem_acc_en_init(struct mem_acc_regulator *mem_acc_vreg)
+{
+ int i, bit;
+ u32 acc_data;
+
+ acc_data = readl_relaxed(mem_acc_vreg->acc_en_base);
+ pr_debug("init: acc_en_register=%x\n", acc_data);
+ for (i = 0; i < mem_acc_vreg->num_acc_en; i++) {
+ bit = mem_acc_vreg->acc_en_bit_pos[i];
+ acc_data |= BIT(bit);
+ }
+ pr_debug("final: acc_en_register=%x\n", acc_data);
+ writel_relaxed(acc_data, mem_acc_vreg->acc_en_base);
+}
+
+static int populate_acc_data(struct mem_acc_regulator *mem_acc_vreg,
+ const char *prop_name, u32 **value, u32 *len)
+{
+ int rc;
+
+ if (!of_get_property(mem_acc_vreg->dev->of_node, prop_name, len)) {
+ pr_err("Unable to find %s property\n", prop_name);
+ return -EINVAL;
+ }
+ *len /= sizeof(u32);
+ if (!(*len)) {
+ pr_err("Incorrect entries in %s\n", prop_name);
+ return -EINVAL;
+ }
+
+ *value = devm_kzalloc(mem_acc_vreg->dev, (*len) * sizeof(u32),
+ GFP_KERNEL);
+ if (!(*value)) {
+ pr_err("Unable to allocate memory for %s\n", prop_name);
+ return -ENOMEM;
+ }
+
+ pr_debug("Found %s, data-length = %d\n", prop_name, *len);
+
+ rc = of_property_read_u32_array(mem_acc_vreg->dev->of_node,
+ prop_name, *value, *len);
+ if (rc) {
+ pr_err("Unable to populate %s rc=%d\n", prop_name, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int mem_acc_sel_setup(struct mem_acc_regulator *mem_acc_vreg,
+ struct resource *res, int mem_type)
+{
+ int len, rc;
+ char *mem_select_str;
+
+ mem_acc_vreg->acc_sel_addr[mem_type] = res->start;
+ len = res->end - res->start + 1;
+ pr_debug("'acc_sel_addr' = %pa mem_type=%d (len=%d)\n",
+ &res->start, mem_type, len);
+
+ mem_acc_vreg->acc_sel_base[mem_type] = devm_ioremap(mem_acc_vreg->dev,
+ mem_acc_vreg->acc_sel_addr[mem_type], len);
+ if (!mem_acc_vreg->acc_sel_base[mem_type]) {
+ pr_err("Unable to map 'acc_sel_addr' %pa for mem_type=%d\n",
+ &mem_acc_vreg->acc_sel_addr[mem_type], mem_type);
+ return -EINVAL;
+ }
+
+ switch (mem_type) {
+ case MEMORY_L1:
+ mem_select_str = "qcom,acc-sel-l1-bit-pos";
+ break;
+ case MEMORY_L2:
+ mem_select_str = "qcom,acc-sel-l2-bit-pos";
+ break;
+ }
+
+ rc = populate_acc_data(mem_acc_vreg, mem_select_str,
+ &mem_acc_vreg->acc_sel_bit_pos[mem_type],
+ &mem_acc_vreg->num_acc_sel[mem_type]);
+ if (rc)
+ pr_err("Unable to populate '%s' rc=%d\n", mem_select_str, rc);
+
+ return rc;
+}
+
+static int mem_acc_efuse_init(struct platform_device *pdev,
+ struct mem_acc_regulator *mem_acc_vreg)
+{
+ struct resource *res;
+ int len, rc = 0;
+ u32 l1_config_skip_fuse_sel[5];
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse_addr");
+ if (!res || !res->start) {
+ mem_acc_vreg->efuse_base = NULL;
+ pr_debug("'efuse_addr' resource missing or not used.\n");
+ return 0;
+ }
+
+ mem_acc_vreg->efuse_addr = res->start;
+ len = res->end - res->start + 1;
+
+ pr_info("efuse_addr = %pa (len=0x%x)\n", &res->start, len);
+
+ mem_acc_vreg->efuse_base = ioremap(mem_acc_vreg->efuse_addr, len);
+ if (!mem_acc_vreg->efuse_base) {
+ pr_err("Unable to map efuse_addr %pa\n",
+ &mem_acc_vreg->efuse_addr);
+ return -EINVAL;
+ }
+
+ if (of_find_property(mem_acc_vreg->dev->of_node,
+ "qcom,l1-config-skip-fuse-sel", NULL)) {
+ rc = of_property_read_u32_array(mem_acc_vreg->dev->of_node,
+ "qcom,l1-config-skip-fuse-sel",
+ l1_config_skip_fuse_sel, 5);
+ if (rc < 0) {
+ pr_err("Read failed - qcom,l1-config-skip-fuse-sel rc=%d\n",
+ rc);
+ goto err_out;
+ }
+
+ if (mem_acc_fuse_is_setting_expected(mem_acc_vreg,
+ l1_config_skip_fuse_sel)) {
+ mem_acc_vreg->flags |= MEM_ACC_SKIP_L1_CONFIG;
+ pr_debug("Skip L1 configuration enabled\n");
+ }
+ }
+
+
+err_out:
+ iounmap(mem_acc_vreg->efuse_base);
+ return rc;
+}
+
+static int mem_acc_init(struct platform_device *pdev,
+ struct mem_acc_regulator *mem_acc_vreg)
+{
+ struct resource *res;
+ int len, rc, i;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-en");
+ if (!res || !res->start) {
+ pr_debug("'acc-en' resource missing or not used.\n");
+ } else {
+ mem_acc_vreg->acc_en_addr = res->start;
+ len = res->end - res->start + 1;
+ pr_debug("'acc_en_addr' = %pa (len=0x%x)\n", &res->start, len);
+
+ mem_acc_vreg->acc_en_base = devm_ioremap(mem_acc_vreg->dev,
+ mem_acc_vreg->acc_en_addr, len);
+ if (!mem_acc_vreg->acc_en_base) {
+ pr_err("Unable to map 'acc_en_addr' %pa\n",
+ &mem_acc_vreg->acc_en_addr);
+ return -EINVAL;
+ }
+
+ rc = populate_acc_data(mem_acc_vreg, "qcom,acc-en-bit-pos",
+ &mem_acc_vreg->acc_en_bit_pos,
+ &mem_acc_vreg->num_acc_en);
+ if (rc) {
+ pr_err("Unable to populate 'qcom,acc-en-bit-pos' rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = mem_acc_efuse_init(pdev, mem_acc_vreg);
+ if (rc) {
+ pr_err("Wrong eFuse address specified: rc=%d\n", rc);
+ return rc;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-sel-l1");
+ if (!res || !res->start) {
+ pr_debug("'acc-sel-l1' resource missing or not used.\n");
+ } else {
+ rc = mem_acc_sel_setup(mem_acc_vreg, res, MEMORY_L1);
+ if (rc) {
+ pr_err("Unable to setup mem-acc for mem_type=%d rc=%d\n",
+ MEMORY_L1, rc);
+ return rc;
+ }
+ mem_acc_vreg->mem_acc_supported[MEMORY_L1] = true;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-sel-l2");
+ if (!res || !res->start) {
+ pr_debug("'acc-sel-l2' resource missing or not used.\n");
+ } else {
+ rc = mem_acc_sel_setup(mem_acc_vreg, res, MEMORY_L2);
+ if (rc) {
+ pr_err("Unable to setup mem-acc for mem_type=%d rc=%d\n",
+ MEMORY_L2, rc);
+ return rc;
+ }
+ mem_acc_vreg->mem_acc_supported[MEMORY_L2] = true;
+ }
+
+ rc = populate_acc_data(mem_acc_vreg, "qcom,corner-acc-map",
+ &mem_acc_vreg->corner_acc_map,
+ &mem_acc_vreg->num_corners);
+ if (rc) {
+ pr_err("Unable to find 'qcom,corner-acc-map' rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_debug("num_corners = %d\n", mem_acc_vreg->num_corners);
+
+ /* Check if at least one valid mem-acc config. is specified */
+ for (i = 0; i < MEMORY_MAX; i++) {
+ if (mem_acc_vreg->mem_acc_supported[i])
+ break;
+ }
+ if (i == MEMORY_MAX) {
+ pr_err("No mem-acc configuration specified\n");
+ return -EINVAL;
+ }
+
+ if (mem_acc_vreg->num_acc_en)
+ mem_acc_en_init(mem_acc_vreg);
+
+ rc = mem_acc_sel_init(mem_acc_vreg);
+ if (rc) {
+ pr_err("Unable to intialize mem_acc_sel reg rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int mem_acc_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_config reg_config = {};
+ struct mem_acc_regulator *mem_acc_vreg;
+ struct regulator_desc *rdesc;
+ struct regulator_init_data *init_data;
+ int rc;
+
+ if (!pdev->dev.of_node) {
+ pr_err("Device tree node is missing\n");
+ return -EINVAL;
+ }
+
+ init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
+ if (!init_data) {
+ pr_err("regulator init data is missing\n");
+ return -EINVAL;
+ } else {
+ init_data->constraints.input_uV
+ = init_data->constraints.max_uV;
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE;
+ }
+
+ mem_acc_vreg = devm_kzalloc(&pdev->dev, sizeof(*mem_acc_vreg),
+ GFP_KERNEL);
+ if (!mem_acc_vreg) {
+ pr_err("Can't allocate mem_acc_vreg memory\n");
+ return -ENOMEM;
+ }
+ mem_acc_vreg->dev = &pdev->dev;
+
+ rc = mem_acc_init(pdev, mem_acc_vreg);
+ if (rc) {
+ pr_err("Unable to initialize mem_acc configuration rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rdesc = &mem_acc_vreg->rdesc;
+ rdesc->owner = THIS_MODULE;
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->ops = &mem_acc_corner_ops;
+ rdesc->name = init_data->constraints.name;
+
+ reg_config.dev = &pdev->dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = mem_acc_vreg;
+ reg_config.of_node = pdev->dev.of_node;
+ mem_acc_vreg->rdev = regulator_register(rdesc, &reg_config);
+ if (IS_ERR(mem_acc_vreg->rdev)) {
+ rc = PTR_ERR(mem_acc_vreg->rdev);
+ if (rc != -EPROBE_DEFER)
+ pr_err("regulator_register failed: rc=%d\n", rc);
+ else
+ dev_info(&pdev->dev, "deferred at register\n");
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, mem_acc_vreg);
+
+ dev_info(&pdev->dev, "initialized\n");
+
+ return 0;
+}
+
+static int mem_acc_regulator_remove(struct platform_device *pdev)
+{
+ struct mem_acc_regulator *mem_acc_vreg = platform_get_drvdata(pdev);
+
+ regulator_unregister(mem_acc_vreg->rdev);
+
+ return 0;
+}
+
+static struct of_device_id mem_acc_regulator_match_table[] = {
+ { .compatible = "qcom,mem-acc-regulator", },
+ {}
+};
+
+static struct platform_driver mem_acc_regulator_driver = {
+ .probe = mem_acc_regulator_probe,
+ .remove = mem_acc_regulator_remove,
+ .driver = {
+ .name = "qcom,mem-acc-regulator",
+ .of_match_table = mem_acc_regulator_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init mem_acc_regulator_init(void)
+{
+ return platform_driver_register(&mem_acc_regulator_driver);
+}
+postcore_initcall(mem_acc_regulator_init);
+
+static void __exit mem_acc_regulator_exit(void)
+{
+ platform_driver_unregister(&mem_acc_regulator_driver);
+}
+module_exit(mem_acc_regulator_exit);
+
+MODULE_DESCRIPTION("MEM-ACC-SEL regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/qpnp-regulator.c b/drivers/regulator/qpnp-regulator.c
index cdabf9510125..5717303bb408 100644
--- a/drivers/regulator/qpnp-regulator.c
+++ b/drivers/regulator/qpnp-regulator.c
@@ -1994,7 +1994,6 @@ static struct spmi_driver qpnp_regulator_driver = {
},
.probe = qpnp_regulator_probe,
.remove = qpnp_regulator_remove,
-// .id_table = qpnp_regulator_id,
};
/*
diff --git a/drivers/regulator/spm-regulator.c b/drivers/regulator/spm-regulator.c
index cc9d788b35c6..6725d1d36c78 100644
--- a/drivers/regulator/spm-regulator.c
+++ b/drivers/regulator/spm-regulator.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spmi.h>
#include <linux/string.h>
@@ -459,55 +460,59 @@ static int qpnp_smps_init_step_rate(struct spm_vreg *vreg)
return rc;
}
-static int spm_regulator_probe(struct spmi_device *spmi)
+static int spm_regulator_probe(struct platform_device *pdev)
{
struct regulator_config reg_config = {};
- struct device_node *node = spmi->dev.of_node;
+ struct device_node *node = pdev->dev.of_node;
struct regulator_init_data *init_data;
struct spm_vreg *vreg;
- struct resource *res;
struct regulator_desc desc;
+ struct resource res;
+ struct spmi_device *spmi;
int rc;
- dev_err(&spmi->dev, "%s\n", __func__);
-
if (!node) {
- dev_err(&spmi->dev, "%s: device node missing\n", __func__);
+ dev_err(&pdev->dev, "%s: device node missing\n", __func__);
return -ENODEV;
}
+ spmi = spmi_from_parent(&pdev->dev);
+ dev_err(&pdev->dev, "spmi = %p\n", spmi);
+
rc = msm_spm_probe_done();
if (rc) {
if (rc != -EPROBE_DEFER)
- dev_err(&spmi->dev, "%s: spm unavailable, rc=%d\n",
+ dev_err(&pdev->dev, "%s: spm unavailable, rc=%d\n",
__func__, rc);
return rc;
}
- vreg = devm_kzalloc(&spmi->dev, sizeof(*vreg), GFP_KERNEL);
+ vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
if (!vreg) {
pr_err("allocation failed.\n");
return -ENOMEM;
}
vreg->spmi_dev = spmi;
-
- res = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&spmi->dev, "%s: node is missing base address\n",
+ //rc = of_address_to_resource(pdev->dev.of_node, 0, &res);
+ //if (rc)
+ // return rc;
+ res.start = 0x1700;
+ if (!res.start) {
+ dev_err(&pdev->dev, "%s: node is missing base address\n",
__func__);
return -EINVAL;
}
- vreg->spmi_base_addr = res->start;
-
+ vreg->spmi_base_addr = res.start;
+pr_err("a\n");
rc = qpnp_smps_check_type(vreg);
if (rc)
return rc;
-
+pr_err("b\n");
/* Specify CPU 0 as default in order to handle shared regulator case. */
vreg->cpu_num = 0;
of_property_read_u32(vreg->spmi_dev->dev.of_node, "qcom,cpu-num",
&vreg->cpu_num);
-
+pr_err("c\n");
/*
* The regulator must be initialized to range 0 or range 1 during
* PMIC power on sequence. Once it is set, it cannot be changed
@@ -519,7 +524,7 @@ static int spm_regulator_probe(struct spmi_device *spmi)
rc = qpnp_ult_hf_init_range(vreg);
if (rc)
return rc;
-
+pr_err("d\n");
rc = qpnp_smps_init_voltage(vreg);
if (rc)
return rc;
@@ -534,7 +539,7 @@ static int spm_regulator_probe(struct spmi_device *spmi)
init_data = of_get_regulator_init_data(&spmi->dev, node, &desc);
if (!init_data) {
- dev_err(&spmi->dev, "%s: unable to allocate memory\n",
+ dev_err(&pdev->dev, "%s: unable to allocate memory\n",
__func__);
return -ENOMEM;
}
@@ -543,7 +548,7 @@ static int spm_regulator_probe(struct spmi_device *spmi)
| REGULATOR_CHANGE_VOLTAGE;
if (!init_data->constraints.name) {
- dev_err(&spmi->dev, "%s: node is missing regulator name\n",
+ dev_err(&pdev->dev, "%s: node is missing regulator name\n",
__func__);
return -EINVAL;
}
@@ -556,19 +561,19 @@ static int spm_regulator_probe(struct spmi_device *spmi)
= (vreg->range->max_uV - vreg->range->set_point_min_uV)
/ vreg->range->step_uV + 1;
- reg_config.dev = &spmi->dev;
+ reg_config.dev = &pdev->dev;
reg_config.init_data = init_data;
reg_config.driver_data = vreg;
reg_config.of_node = node;
vreg->rdev = regulator_register(&vreg->rdesc, &reg_config);
if (IS_ERR(vreg->rdev)) {
rc = PTR_ERR(vreg->rdev);
- dev_err(&spmi->dev, "%s: regulator_register failed, rc=%d\n",
+ dev_err(&pdev->dev, "%s: regulator_register failed, rc=%d\n",
__func__, rc);
return rc;
}
- dev_set_drvdata(&spmi->dev, vreg);
+ dev_set_drvdata(&pdev->dev, vreg);
pr_info("name=%s, range=%s, voltage=%d uV, mode=%s, step rate=%d uV/us\n",
vreg->rdesc.name, vreg->range == &fts2_range0 ? "LV" : "MV",
@@ -580,11 +585,13 @@ static int spm_regulator_probe(struct spmi_device *spmi)
return rc;
}
-static void spm_regulator_remove(struct spmi_device *spmi)
+static int spm_regulator_remove(struct platform_device *pdev)
{
- struct spm_vreg *vreg = dev_get_drvdata(&spmi->dev);
+ struct spm_vreg *vreg = dev_get_drvdata(&pdev->dev);
regulator_unregister(vreg->rdev);
+
+ return 0;
}
static struct of_device_id spm_regulator_match_table[] = {
@@ -598,7 +605,7 @@ static const struct spmi_device_id spm_regulator_id[] = {
};
MODULE_DEVICE_TABLE(spmi, spm_regulator_id);
-static struct spmi_driver spm_regulator_driver = {
+static struct platform_driver spm_regulator_driver = {
.driver = {
.name = SPM_REGULATOR_DRIVER_NAME,
.of_match_table = spm_regulator_match_table,
@@ -606,36 +613,9 @@ static struct spmi_driver spm_regulator_driver = {
},
.probe = spm_regulator_probe,
.remove = spm_regulator_remove,
-// .id_table = spm_regulator_id,
};
-/**
- * spm_regulator_init() - register spmi driver for spm-regulator
- *
- * This initialization function should be called in systems in which driver
- * registration ordering must be controlled precisely.
- *
- * Returns 0 on success or errno on failure.
- */
-int __init spm_regulator_init(void)
-{
- static bool has_registered;
-
- if (has_registered)
- return 0;
- has_registered = true;
-
- return spmi_driver_register(&spm_regulator_driver);
-}
-EXPORT_SYMBOL(spm_regulator_init);
-
-static void __exit spm_regulator_exit(void)
-{
- spmi_driver_unregister(&spm_regulator_driver);
-}
-
-//arch_initcall(spm_regulator_init);
-module_exit(spm_regulator_exit);
+module_platform_driver(spm_regulator_driver)
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SPM regulator driver");
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 20559ab3466d..7061f4463d0b 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -139,7 +139,9 @@ static inline u32 pmic_arb_base_read(struct spmi_pmic_arb_dev *dev, u32 offset)
static inline void pmic_arb_base_write(struct spmi_pmic_arb_dev *dev,
u32 offset, u32 val)
{
+ pr_err("%s: 0x%x + 0x%x\n", __func__, (u32)dev->base, offset);
writel_relaxed(val, dev->base + offset);
+ pr_err(" survived\n");
}
/**
@@ -248,7 +250,7 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
PMIC_ARB_MAX_TRANS_BYTES, len);
return -EINVAL;
}
-
+pr_err("1\n");
/* Check the opcode */
if (opc >= 0x60 && opc <= 0x7F)
opc = PMIC_ARB_OP_READ;
@@ -260,13 +262,13 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
return -EINVAL;
cmd = (opc << 27) | ((sid & 0xf) << 20) | (addr << 4) | (bc & 0x7);
-
+pr_err("2\n");
raw_spin_lock_irqsave(&pmic_arb->lock, flags);
pmic_arb_base_write(pmic_arb, PMIC_ARB_CMD(pmic_arb->channel), cmd);
rc = pmic_arb_wait_for_done(ctrl);
if (rc)
goto done;
-
+pr_err("3\n");
pa_read_data(pmic_arb, buf, PMIC_ARB_RDATA0(pmic_arb->channel),
min_t(u8, bc, 3));
@@ -650,33 +652,28 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
err = PTR_ERR(pa->base);
goto err_put_ctrl;
}
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
pa->intr = devm_ioremap_resource(&ctrl->dev, res);
if (IS_ERR(pa->intr)) {
err = PTR_ERR(pa->intr);
goto err_put_ctrl;
}
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg");
pa->cnfg = devm_ioremap_resource(&ctrl->dev, res);
if (IS_ERR(pa->cnfg)) {
err = PTR_ERR(pa->cnfg);
goto err_put_ctrl;
}
-
pa->irq = platform_get_irq_byname(pdev, "periph_irq");
if (pa->irq < 0) {
err = pa->irq;
goto err_put_ctrl;
}
-
err = of_property_read_u32(pdev->dev.of_node, "qcom,channel", &channel);
if (err) {
dev_err(&pdev->dev, "channel unspecified.\n");
goto err_put_ctrl;
}
-
if (channel > 5) {
dev_err(&pdev->dev, "invalid channel (%u) specified.\n",
channel);
@@ -696,7 +693,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
err = -EINVAL;
goto err_put_ctrl;
}
-
pa->ee = ee;
for (i = 0; i < ARRAY_SIZE(pa->mapping_table); ++i)
@@ -723,7 +719,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
err = -ENOMEM;
goto err_put_ctrl;
}
-
irq_set_handler_data(pa->irq, pa);
irq_set_chained_handler(pa->irq, pmic_arb_chained_irq);
@@ -731,7 +726,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
if (err)
goto err_domain_remove;
- dev_dbg(&ctrl->dev, "PMIC Arb Version 0x%x\n",
+ dev_err(&ctrl->dev, "PMIC Arb Version 0x%x\n",
pmic_arb_base_read(pa, PMIC_ARB_VERSION));
return 0;
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 6de645eca3f2..466d93eb47b9 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -9,6 +9,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+
+
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/idr.h>
@@ -22,8 +24,6 @@
#include <dt-bindings/spmi/spmi.h>
-#define DEBUG 1
-
static DEFINE_IDA(ctrl_ida);
static void spmi_dev_release(struct device *dev)
@@ -49,8 +49,12 @@ static const struct device_type spmi_ctrl_type = {
static int spmi_device_match(struct device *dev, struct device_driver *drv)
{
- if (of_driver_match_device(dev, drv))
+
+ pr_info("%s: %s vs %s\n", __func__, dev_name(dev), drv->name);
+ if (of_driver_match_device(dev, drv)) {
+ pr_err("of_driver_match says hit\n");
return 1;
+ }
if (drv->name)
return strncmp(dev_name(dev), drv->name,
@@ -66,7 +70,11 @@ static int spmi_device_match(struct device *dev, struct device_driver *drv)
int spmi_device_add(struct spmi_device *sdev)
{
struct spmi_controller *ctrl = sdev->ctrl;
+ struct device_node *node;
+ struct platform_device *pdev;
int err;
+ static int index;
+ char *name;
dev_set_name(&sdev->dev, "%d-%02x", ctrl->nr, sdev->usid);
@@ -77,6 +85,26 @@ int spmi_device_add(struct spmi_device *sdev)
goto err_device_add;
}
+
+ for_each_available_child_of_node(sdev->dev.of_node, node) {
+ name = kzalloc(64, GFP_KERNEL);
+ sprintf(name, " %s-%d", node->name, index++);
+
+ pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
+
+ pdev->name = name;
+ pdev->dev.parent = &sdev->dev;
+ pdev->dev.of_node = node;
+ pdev->dev.init_name = name;
+
+ err = platform_device_register(pdev);
+ if (err) {
+ pr_err("Failed to create device %s: %d\n", name, err);
+ kfree(pdev);
+ } else
+ dev_info(&sdev->dev, "Created %s\n", name);
+ }
+
dev_dbg(&sdev->dev, "device %s registered\n", dev_name(&sdev->dev));
err_device_add:
@@ -109,6 +137,8 @@ static inline int spmi_read_cmd(struct spmi_controller *ctrl, u8 opcode,
if (!ctrl || !ctrl->read_cmd || ctrl->dev.type != &spmi_ctrl_type)
return -EINVAL;
+ pr_err("ctrl->read_cmd = %p\n", (void *)ctrl->read_cmd);
+
return ctrl->read_cmd(ctrl, opcode, sid, addr, buf, len);
}
@@ -178,7 +208,7 @@ int spmi_ext_register_readl(struct spmi_device *sdev, u16 addr, u8 *buf,
/* 16-bit register address, up to 8 bytes */
if (len == 0 || len > 8)
return -EINVAL;
-
+pr_err("sdev->ctrl = %p\n", sdev->ctrl);
return spmi_read_cmd(sdev->ctrl, SPMI_CMD_EXT_READL, sdev->usid, addr,
buf, len);
}
@@ -382,10 +412,23 @@ struct spmi_device *spmi_device_alloc(struct spmi_controller *ctrl)
sdev->dev.parent = &ctrl->dev;
sdev->dev.bus = &spmi_bus_type;
sdev->dev.type = &spmi_dev_type;
+
+ dev_set_drvdata(&sdev->dev, sdev);
+
return sdev;
}
EXPORT_SYMBOL_GPL(spmi_device_alloc);
+struct spmi_device *spmi_from_parent(struct device *dev)
+{
+ if (!dev->parent)
+ return NULL;
+
+ return dev_get_drvdata(dev->parent);
+}
+
+EXPORT_SYMBOL_GPL(spmi_from_parent);
+
/**
* spmi_controller_alloc() - Allocate a new SPMI controller
* @parent: parent device
@@ -439,12 +482,12 @@ static void of_spmi_register_devices(struct spmi_controller *ctrl)
if (!ctrl->dev.of_node)
return;
-
+pr_err("%s\n", __func__);
for_each_available_child_of_node(ctrl->dev.of_node, node) {
struct spmi_device *sdev;
u32 reg[2];
- dev_dbg(&ctrl->dev, "adding child %s\n", node->full_name);
+ dev_info(&ctrl->dev, "adding child %s\n", node->full_name);
err = of_property_read_u32_array(node, "reg", reg, 2);
if (err) {
@@ -468,7 +511,8 @@ static void of_spmi_register_devices(struct spmi_controller *ctrl)
continue;
}
- dev_dbg(&ctrl->dev, "read usid %02x\n", reg[0]);
+ dev_info(&ctrl->dev, "read usid %02x\n", reg[0]);
+
sdev = spmi_device_alloc(ctrl);
if (!sdev)
diff --git a/include/linux/of_spmi.h b/include/linux/of_spmi.h
new file mode 100644
index 000000000000..b22696bf9a3f
--- /dev/null
+++ b/include/linux/of_spmi.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/spmi.h>
+#include <linux/of_irq.h>
+
+#ifdef CONFIG_OF_SPMI
+/**
+ * of_spmi_register_devices() - Register devices in the SPMI Device Tree
+ * @ctrl: spmi_controller which devices should be registered to.
+ *
+ * This routine scans the SPMI Device Tree, allocating resources and
+ * creating spmi_devices according to the SPMI bus Device Tree
+ * hierarchy. Details of this hierarchy can be found in
+ * Documentation/devicetree/bindings/spmi. This routine is normally
+ * called from the probe routine of the driver registering as a
+ * spmi_controller.
+ */
+int of_spmi_register_devices(struct spmi_controller *ctrl);
+#else
+static int of_spmi_register_devices(struct spmi_controller *ctrl)
+{
+ return -ENXIO;
+}
+#endif /* CONFIG_OF_SPMI */
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index be22a46c5bc2..c4f05001479d 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -257,6 +257,6 @@ static inline const char *spmi_get_primary_dev_name(struct spmi_device *dev)
struct spmi_resource *spmi_get_dev_container_byname(struct spmi_device *dev,
const char *label);
-
+struct spmi_device *spmi_from_parent(struct device *dev);
#endif