summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndy Green <andy.green@linaro.org>2014-12-20 11:48:25 +0800
committerAndy Green <andy.green@linaro.org>2015-01-22 00:46:30 +0800
commit6d65a9e443bd88729bf19a0399d0161e24d1ffd0 (patch)
tree02b321bcb138dc5bba6805867fe52b3b5056efdc
parent9f21941f32faf05be0bd71d4057497f9f2a8fd54 (diff)
Peripheral Image Loader (PIL)
PIL is Qualcomm's Peripheral Image Loader It deals with initializing whole subsystems with magic firmware Signed-off-by: Andy Green <andy.green@linaro.org>
-rw-r--r--arch/arm/mach-qcom/Kconfig1
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-msm-smp2p.c771
-rw-r--r--drivers/soc/qcom/peripheral-loader.c902
-rw-r--r--drivers/soc/qcom/peripheral-loader.h100
5 files changed, 1776 insertions, 0 deletions
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index 94ab55a713b3..a610ccccc730 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -57,6 +57,7 @@ config ARCH_MSM8916
select SOC_BUS
select MSM_SCM
select MSM_SPM_V2
+ select HAS_DMA
config QCOM_SCM
bool
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 81755f1305e6..5e3b1d37d3e3 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -109,3 +109,5 @@ obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o
obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o
obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o
obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o
+
+obj-$(CONFIG_MSM_SMP2P) += gpio-msm-smp2p.o
diff --git a/drivers/gpio/gpio-msm-smp2p.c b/drivers/gpio/gpio-msm-smp2p.c
new file mode 100644
index 000000000000..4aff7e6de564
--- /dev/null
+++ b/drivers/gpio/gpio-msm-smp2p.c
@@ -0,0 +1,771 @@
+/* drivers/gpio/gpio-msm-smp2p.c
+ *
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/bitmap.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/ipc_logging.h>
+#include "../soc/qcom/smp2p_private_api.h"
+#include "../soc/qcom/smp2p_private.h"
+
+/* GPIO device - one per SMP2P entry. */
+struct smp2p_chip_dev {
+ struct list_head entry_list;
+ char name[SMP2P_MAX_ENTRY_NAME];
+ int remote_pid;
+ bool is_inbound;
+ bool is_open;
+ struct notifier_block out_notifier;
+ struct notifier_block in_notifier;
+ struct msm_smp2p_out *out_handle;
+
+ struct gpio_chip gpio;
+ struct irq_domain *irq_domain;
+ int irq_base;
+
+ spinlock_t irq_lock;
+ DECLARE_BITMAP(irq_enabled, SMP2P_BITS_PER_ENTRY);
+ DECLARE_BITMAP(irq_rising_edge, SMP2P_BITS_PER_ENTRY);
+ DECLARE_BITMAP(irq_falling_edge, SMP2P_BITS_PER_ENTRY);
+};
+
+static struct platform_driver smp2p_gpio_driver;
+static struct lock_class_key smp2p_gpio_lock_class;
+static struct irq_chip smp2p_gpio_irq_chip;
+static DEFINE_SPINLOCK(smp2p_entry_lock_lha1);
+static LIST_HEAD(smp2p_entry_list);
+
+/* Used for mapping edge to name for logging. */
+static const char * const edge_names[] = {
+ "-",
+ "0->1",
+ "1->0",
+ "-",
+};
+
+/* Used for mapping edge to value for logging. */
+static const char * const edge_name_rising[] = {
+ "-",
+ "0->1",
+};
+
+/* Used for mapping edge to value for logging. */
+static const char * const edge_name_falling[] = {
+ "-",
+ "1->0",
+};
+
+static int smp2p_gpio_to_irq(struct gpio_chip *cp,
+ unsigned offset);
+
+/**
+ * smp2p_get_value - Retrieves GPIO value.
+ *
+ * @cp: GPIO chip pointer
+ * @offset: Pin offset
+ * @returns: >=0: value of GPIO Pin; < 0 for error
+ *
+ * Error codes:
+ * -ENODEV - chip/entry invalid
+ * -ENETDOWN - valid entry, but entry not yet created
+ */
+static int smp2p_get_value(struct gpio_chip *cp,
+ unsigned offset)
+{
+ struct smp2p_chip_dev *chip;
+ int ret = 0;
+ uint32_t data;
+
+ if (!cp)
+ return -ENODEV;
+
+ chip = container_of(cp, struct smp2p_chip_dev, gpio);
+ if (!chip->is_open)
+ return -ENETDOWN;
+
+ if (chip->is_inbound)
+ ret = msm_smp2p_in_read(chip->remote_pid, chip->name, &data);
+ else
+ ret = msm_smp2p_out_read(chip->out_handle, &data);
+
+ if (!ret)
+ ret = (data & (1 << offset)) ? 1 : 0;
+
+ return ret;
+}
+
+/**
+ * smp2p_set_value - Sets GPIO value.
+ *
+ * @cp: GPIO chip pointer
+ * @offset: Pin offset
+ * @value: New value
+ */
+static void smp2p_set_value(struct gpio_chip *cp, unsigned offset, int value)
+{
+ struct smp2p_chip_dev *chip;
+ uint32_t data_set;
+ uint32_t data_clear;
+ int ret;
+
+ if (!cp)
+ return;
+
+ chip = container_of(cp, struct smp2p_chip_dev, gpio);
+ if (!chip->is_open)
+ return;
+
+ if (chip->is_inbound) {
+ SMP2P_INFO("%s: '%s':%d virq %d invalid operation\n",
+ __func__, chip->name, chip->remote_pid,
+ chip->irq_base + offset);
+ return;
+ }
+
+ if (value) {
+ data_set = 1 << offset;
+ data_clear = 0;
+ } else {
+ data_set = 0;
+ data_clear = 1 << offset;
+ }
+
+ ret = msm_smp2p_out_modify(chip->out_handle,
+ data_set, data_clear);
+
+ if (ret)
+ SMP2P_GPIO("'%s':%d gpio %d set to %d failed (%d)\n",
+ chip->name, chip->remote_pid,
+ chip->gpio.base + offset, value, ret);
+ else
+ SMP2P_GPIO("'%s':%d gpio %d set to %d\n",
+ chip->name, chip->remote_pid,
+ chip->gpio.base + offset, value);
+}
+
+/**
+ * smp2p_direction_input - Sets GPIO direction to input.
+ *
+ * @cp: GPIO chip pointer
+ * @offset: Pin offset
+ * @returns: 0 for success; < 0 for failure
+ */
+static int smp2p_direction_input(struct gpio_chip *cp, unsigned offset)
+{
+ struct smp2p_chip_dev *chip;
+
+ if (!cp)
+ return -ENODEV;
+
+ chip = container_of(cp, struct smp2p_chip_dev, gpio);
+ if (!chip->is_inbound)
+ return -EPERM;
+
+ return 0;
+}
+
+/**
+ * smp2p_direction_output - Sets GPIO direction to output.
+ *
+ * @cp: GPIO chip pointer
+ * @offset: Pin offset
+ * @value: Direction
+ * @returns: 0 for success; < 0 for failure
+ */
+static int smp2p_direction_output(struct gpio_chip *cp,
+ unsigned offset, int value)
+{
+ struct smp2p_chip_dev *chip;
+
+ if (!cp)
+ return -ENODEV;
+
+ chip = container_of(cp, struct smp2p_chip_dev, gpio);
+ if (chip->is_inbound)
+ return -EPERM;
+
+ return 0;
+}
+
+/**
+ * smp2p_gpio_to_irq - Convert GPIO pin to virtual IRQ pin.
+ *
+ * @cp: GPIO chip pointer
+ * @offset: Pin offset
+ * @returns: >0 for virtual irq value; < 0 for failure
+ */
+static int smp2p_gpio_to_irq(struct gpio_chip *cp, unsigned offset)
+{
+ struct smp2p_chip_dev *chip;
+
+ chip = container_of(cp, struct smp2p_chip_dev, gpio);
+ if (!cp || chip->irq_base <= 0)
+ return -ENODEV;
+
+ return chip->irq_base + offset;
+}
+
+/**
+ * smp2p_gpio_irq_mask_helper - Mask/Unmask interrupt.
+ *
+ * @d: IRQ data
+ * @mask: true to mask (disable), false to unmask (enable)
+ */
+static void smp2p_gpio_irq_mask_helper(struct irq_data *d, bool mask)
+{
+ struct smp2p_chip_dev *chip;
+ int offset;
+ unsigned long flags;
+
+ chip = (struct smp2p_chip_dev *)irq_get_chip_data(d->irq);
+ if (!chip || chip->irq_base <= 0)
+ return;
+
+ offset = d->irq - chip->irq_base;
+ spin_lock_irqsave(&chip->irq_lock, flags);
+ if (mask)
+ clear_bit(offset, chip->irq_enabled);
+ else
+ set_bit(offset, chip->irq_enabled);
+ spin_unlock_irqrestore(&chip->irq_lock, flags);
+}
+
+/**
+ * smp2p_gpio_irq_mask - Mask interrupt.
+ *
+ * @d: IRQ data
+ */
+static void smp2p_gpio_irq_mask(struct irq_data *d)
+{
+ smp2p_gpio_irq_mask_helper(d, true);
+}
+
+/**
+ * smp2p_gpio_irq_unmask - Unmask interrupt.
+ *
+ * @d: IRQ data
+ */
+static void smp2p_gpio_irq_unmask(struct irq_data *d)
+{
+ smp2p_gpio_irq_mask_helper(d, false);
+}
+
+/**
+ * smp2p_gpio_irq_set_type - Set interrupt edge type.
+ *
+ * @d: IRQ data
+ * @type: Edge type for interrupt
+ * @returns 0 for success; < 0 for failure
+ */
+static int smp2p_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct smp2p_chip_dev *chip;
+ int offset;
+ unsigned long flags;
+ int ret = 0;
+
+ chip = (struct smp2p_chip_dev *)irq_get_chip_data(d->irq);
+ if (!chip)
+ return -ENODEV;
+
+ if (chip->irq_base <= 0) {
+ SMP2P_ERR("%s: '%s':%d virqbase %d invalid\n",
+ __func__, chip->name, chip->remote_pid,
+ chip->irq_base);
+ return -ENODEV;
+ }
+
+ offset = d->irq - chip->irq_base;
+
+ spin_lock_irqsave(&chip->irq_lock, flags);
+ clear_bit(offset, chip->irq_rising_edge);
+ clear_bit(offset, chip->irq_falling_edge);
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ set_bit(offset, chip->irq_rising_edge);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ set_bit(offset, chip->irq_falling_edge);
+ break;
+
+ case IRQ_TYPE_NONE:
+ case IRQ_TYPE_DEFAULT:
+ case IRQ_TYPE_EDGE_BOTH:
+ set_bit(offset, chip->irq_rising_edge);
+ set_bit(offset, chip->irq_falling_edge);
+ break;
+
+ default:
+ SMP2P_ERR("%s: unsupported interrupt type 0x%x\n",
+ __func__, type);
+ ret = -EINVAL;
+ break;
+ }
+ spin_unlock_irqrestore(&chip->irq_lock, flags);
+ return ret;
+}
+
+/**
+ * smp2p_irq_map - Creates or updates binding of virtual IRQ
+ *
+ * @domain_ptr: Interrupt domain pointer
+ * @virq: Virtual IRQ
+ * @hw: Hardware IRQ (same as virq for nomap)
+ * @returns: 0 for success
+ */
+static int smp2p_irq_map(struct irq_domain *domain_ptr, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct smp2p_chip_dev *chip;
+
+ chip = domain_ptr->host_data;
+ if (!chip) {
+ SMP2P_ERR("%s: invalid domain ptr %p\n", __func__, domain_ptr);
+ return -ENODEV;
+ }
+
+ /* map chip structures to device */
+ irq_set_lockdep_class(virq, &smp2p_gpio_lock_class);
+ irq_set_chip_and_handler(virq, &smp2p_gpio_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(virq, chip);
+ set_irq_flags(virq, IRQF_VALID);
+
+ return 0;
+}
+
+static struct irq_chip smp2p_gpio_irq_chip = {
+ .name = "smp2p_gpio",
+ .irq_mask = smp2p_gpio_irq_mask,
+ .irq_unmask = smp2p_gpio_irq_unmask,
+ .irq_set_type = smp2p_gpio_irq_set_type,
+};
+
+/* No-map interrupt Domain */
+static const struct irq_domain_ops smp2p_irq_domain_ops = {
+ .map = smp2p_irq_map,
+};
+
+/**
+ * msm_summary_irq_handler - Handles inbound entry change notification.
+ *
+ * @chip: GPIO chip pointer
+ * @entry: Change notification data
+ *
+ * Whenever an entry changes, this callback is triggered to determine
+ * which bits changed and if the corresponding interrupts need to be
+ * triggered.
+ */
+static void msm_summary_irq_handler(struct smp2p_chip_dev *chip,
+ struct msm_smp2p_update_notif *entry)
+{
+ int i;
+ uint32_t cur_val;
+ uint32_t prev_val;
+ uint32_t edge;
+ unsigned long flags;
+ bool trigger_interrrupt;
+ bool irq_rising;
+ bool irq_falling;
+
+ cur_val = entry->current_value;
+ prev_val = entry->previous_value;
+
+ if (chip->irq_base <= 0)
+ return;
+
+ SMP2P_GPIO("'%s':%d GPIO Summary IRQ Change %08x->%08x\n",
+ chip->name, chip->remote_pid, prev_val, cur_val);
+
+ for (i = 0; i < SMP2P_BITS_PER_ENTRY; ++i) {
+ spin_lock_irqsave(&chip->irq_lock, flags);
+ trigger_interrrupt = false;
+ edge = (prev_val & 0x1) << 1 | (cur_val & 0x1);
+ irq_rising = test_bit(i, chip->irq_rising_edge);
+ irq_falling = test_bit(i, chip->irq_falling_edge);
+
+ if (test_bit(i, chip->irq_enabled)) {
+ if (edge == 0x1 && irq_rising)
+ /* 0->1 transition */
+ trigger_interrrupt = true;
+ else if (edge == 0x2 && irq_falling)
+ /* 1->0 transition */
+ trigger_interrrupt = true;
+ } else {
+ SMP2P_GPIO(
+ "'%s':%d GPIO bit %d virq %d (%s,%s) - edge %s disabled\n",
+ chip->name, chip->remote_pid, i,
+ chip->irq_base + i,
+ edge_name_rising[irq_rising],
+ edge_name_falling[irq_falling],
+ edge_names[edge]);
+ }
+ spin_unlock_irqrestore(&chip->irq_lock, flags);
+
+ if (trigger_interrrupt) {
+ SMP2P_INFO(
+ "'%s':%d GPIO bit %d virq %d (%s,%s) - edge %s triggering\n",
+ chip->name, chip->remote_pid, i,
+ chip->irq_base + i,
+ edge_name_rising[irq_rising],
+ edge_name_falling[irq_falling],
+ edge_names[edge]);
+ (void)generic_handle_irq(chip->irq_base + i);
+ }
+
+ cur_val >>= 1;
+ prev_val >>= 1;
+ }
+}
+
+/**
+ * Adds an interrupt domain based upon the DT node.
+ *
+ * @chip: pointer to GPIO chip
+ * @node: pointer to Device Tree node
+ */
+static void smp2p_add_irq_domain(struct smp2p_chip_dev *chip,
+ struct device_node *node)
+{
+ int irq_base;
+
+ /* map GPIO pins to interrupts */
+ chip->irq_domain = irq_domain_add_linear(node, 32,
+ &smp2p_irq_domain_ops, chip);
+ if (!chip->irq_domain) {
+ SMP2P_ERR("%s: unable to create interrupt domain '%s':%d\n",
+ __func__, chip->name, chip->remote_pid);
+ return;
+ }
+
+ chip->irq_domain->name = chip->name;
+
+ /* alloc a contiguous set of virt irqs from anywhere in the irq space */
+ irq_base = irq_alloc_descs_from(0, SMP2P_BITS_PER_ENTRY,
+ of_node_to_nid(chip->irq_domain->of_node));
+ if (irq_base < 0) {
+ SMP2P_ERR("alloc virt irqs failed:%d name:%s pid%d\n", irq_base,
+ chip->name, chip->remote_pid);
+ goto irq_alloc_fail;
+ }
+
+ /* map the allocated irqs to gpios */
+ irq_domain_associate_many(chip->irq_domain, irq_base, 0,
+ SMP2P_BITS_PER_ENTRY);
+
+ chip->irq_base = irq_base;
+ SMP2P_DBG("create mapping:%d naem:%s pid:%d\n", chip->irq_base,
+ chip->name, chip->remote_pid);
+ return;
+
+irq_alloc_fail:
+ irq_domain_remove(chip->irq_domain);
+}
+
+/**
+ * Notifier function passed into smp2p API for out bound entries.
+ *
+ * @self: Pointer to calling notifier block
+ * @event: Event
+ * @data: Event-specific data
+ * @returns: 0
+ */
+static int smp2p_gpio_out_notify(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ struct smp2p_chip_dev *chip;
+
+ chip = container_of(self, struct smp2p_chip_dev, out_notifier);
+
+ switch (event) {
+ case SMP2P_OPEN:
+ chip->is_open = 1;
+ SMP2P_GPIO("%s: Opened out '%s':%d\n", __func__,
+ chip->name, chip->remote_pid);
+ break;
+ case SMP2P_ENTRY_UPDATE:
+ break;
+ default:
+ SMP2P_ERR("%s: Unknown event\n", __func__);
+ break;
+ }
+ return 0;
+}
+
+/**
+ * Notifier function passed into smp2p API for in bound entries.
+ *
+ * @self: Pointer to calling notifier block
+ * @event: Event
+ * @data: Event-specific data
+ * @returns: 0
+ */
+static int smp2p_gpio_in_notify(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ struct smp2p_chip_dev *chip;
+
+ chip = container_of(self, struct smp2p_chip_dev, in_notifier);
+
+ switch (event) {
+ case SMP2P_OPEN:
+ chip->is_open = 1;
+ SMP2P_GPIO("%s: Opened in '%s':%d\n", __func__,
+ chip->name, chip->remote_pid);
+ break;
+ case SMP2P_ENTRY_UPDATE:
+ msm_summary_irq_handler(chip, data);
+ break;
+ default:
+ SMP2P_ERR("%s: Unknown event\n", __func__);
+ break;
+ }
+ return 0;
+}
+
+/**
+ * Device tree probe function.
+ *
+ * @pdev: Pointer to device tree data.
+ * @returns: 0 on success; -ENODEV otherwise
+ *
+ * Called for each smp2pgpio entry in the device tree.
+ */
+static int smp2p_gpio_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
+ char *key;
+ struct smp2p_chip_dev *chip;
+ const char *name_tmp;
+ unsigned long flags;
+ bool is_test_entry = false;
+ int ret;
+
+ chip = kzalloc(sizeof(struct smp2p_chip_dev), GFP_KERNEL);
+ if (!chip) {
+ SMP2P_ERR("%s: out of memory\n", __func__);
+ ret = -ENOMEM;
+ goto fail;
+ }
+ spin_lock_init(&chip->irq_lock);
+
+ /* parse device tree */
+ node = pdev->dev.of_node;
+ key = "qcom,entry-name";
+ ret = of_property_read_string(node, key, &name_tmp);
+ if (ret) {
+ SMP2P_ERR("%s: missing DT key '%s'\n", __func__, key);
+ goto fail;
+ }
+ strlcpy(chip->name, name_tmp, sizeof(chip->name));
+
+ key = "qcom,remote-pid";
+ ret = of_property_read_u32(node, key, &chip->remote_pid);
+ if (ret) {
+ SMP2P_ERR("%s: missing DT key '%s'\n", __func__, key);
+ goto fail;
+ }
+
+ key = "qcom,is-inbound";
+ chip->is_inbound = of_property_read_bool(node, key);
+
+ /* create virtual GPIO controller */
+ chip->gpio.label = chip->name;
+ chip->gpio.dev = &pdev->dev;
+ chip->gpio.owner = THIS_MODULE;
+ chip->gpio.direction_input = smp2p_direction_input,
+ chip->gpio.get = smp2p_get_value;
+ chip->gpio.direction_output = smp2p_direction_output,
+ chip->gpio.set = smp2p_set_value;
+ chip->gpio.to_irq = smp2p_gpio_to_irq,
+ chip->gpio.base = -1; /* use dynamic GPIO pin allocation */
+ chip->gpio.ngpio = SMP2P_BITS_PER_ENTRY;
+ ret = gpiochip_add(&chip->gpio);
+ if (ret) {
+ SMP2P_ERR("%s: unable to register GPIO '%s' ret %d\n",
+ __func__, chip->name, ret);
+ goto fail;
+ }
+
+ /*
+ * Test entries opened by GPIO Test conflict with loopback
+ * support, so the test entries must be explicitly opened
+ * in the unit test framework.
+ */
+ if (strncmp("smp2p", chip->name, SMP2P_MAX_ENTRY_NAME) == 0)
+ is_test_entry = true;
+
+ if (!chip->is_inbound) {
+ chip->out_notifier.notifier_call = smp2p_gpio_out_notify;
+ if (!is_test_entry) {
+ ret = msm_smp2p_out_open(chip->remote_pid, chip->name,
+ &chip->out_notifier,
+ &chip->out_handle);
+ if (ret < 0)
+ goto error;
+ }
+ } else {
+ chip->in_notifier.notifier_call = smp2p_gpio_in_notify;
+ if (!is_test_entry) {
+ ret = msm_smp2p_in_register(chip->remote_pid,
+ chip->name,
+ &chip->in_notifier);
+ if (ret < 0)
+ goto error;
+ }
+ }
+
+ spin_lock_irqsave(&smp2p_entry_lock_lha1, flags);
+ list_add(&chip->entry_list, &smp2p_entry_list);
+ spin_unlock_irqrestore(&smp2p_entry_lock_lha1, flags);
+
+ /*
+ * Create interrupt domain - note that chip can't be removed from the
+ * interrupt domain, so chip cannot be deleted after this point.
+ */
+ if (chip->is_inbound)
+ smp2p_add_irq_domain(chip, node);
+ else
+ chip->irq_base = -1;
+
+ SMP2P_GPIO("%s: added %s%s entry '%s':%d gpio %d irq %d",
+ __func__,
+ is_test_entry ? "test " : "",
+ chip->is_inbound ? "in" : "out",
+ chip->name, chip->remote_pid,
+ chip->gpio.base, chip->irq_base);
+
+ return 0;
+error:
+ gpiochip_remove(&chip->gpio);
+
+fail:
+ kfree(chip);
+ return ret;
+}
+
+/**
+ * smp2p_gpio_open_close - Opens or closes entry.
+ *
+ * @entry: Entry to open or close
+ * @do_open: true = open port; false = close
+ */
+static void smp2p_gpio_open_close(struct smp2p_chip_dev *entry,
+ bool do_open)
+{
+ int ret;
+
+ if (do_open) {
+ /* open entry */
+ if (entry->is_inbound)
+ ret = msm_smp2p_in_register(entry->remote_pid,
+ entry->name, &entry->in_notifier);
+ else
+ ret = msm_smp2p_out_open(entry->remote_pid,
+ entry->name, &entry->out_notifier,
+ &entry->out_handle);
+ SMP2P_GPIO("%s: opened %s '%s':%d ret %d\n",
+ __func__,
+ entry->is_inbound ? "in" : "out",
+ entry->name, entry->remote_pid,
+ ret);
+ } else {
+ /* close entry */
+ if (entry->is_inbound)
+ ret = msm_smp2p_in_unregister(entry->remote_pid,
+ entry->name, &entry->in_notifier);
+ else
+ ret = msm_smp2p_out_close(&entry->out_handle);
+ entry->is_open = false;
+ SMP2P_GPIO("%s: closed %s '%s':%d ret %d\n",
+ __func__,
+ entry->is_inbound ? "in" : "out",
+ entry->name, entry->remote_pid, ret);
+ }
+}
+
+/**
+ * smp2p_gpio_open_test_entry - Opens or closes test entries for unit testing.
+ *
+ * @name: Name of the entry
+ * @remote_pid: Remote processor ID
+ * @do_open: true = open port; false = close
+ */
+void smp2p_gpio_open_test_entry(const char *name, int remote_pid, bool do_open)
+{
+ struct smp2p_chip_dev *entry;
+ struct smp2p_chip_dev *start_entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smp2p_entry_lock_lha1, flags);
+ if (list_empty(&smp2p_entry_list)) {
+ spin_unlock_irqrestore(&smp2p_entry_lock_lha1, flags);
+ return;
+ }
+ start_entry = list_first_entry(&smp2p_entry_list,
+ struct smp2p_chip_dev,
+ entry_list);
+ entry = start_entry;
+ do {
+ if (!strncmp(entry->name, name, SMP2P_MAX_ENTRY_NAME)
+ && entry->remote_pid == remote_pid) {
+ /* found entry to change */
+ spin_unlock_irqrestore(&smp2p_entry_lock_lha1, flags);
+ smp2p_gpio_open_close(entry, do_open);
+ spin_lock_irqsave(&smp2p_entry_lock_lha1, flags);
+ }
+ list_rotate_left(&smp2p_entry_list);
+ entry = list_first_entry(&smp2p_entry_list,
+ struct smp2p_chip_dev,
+ entry_list);
+ } while (entry != start_entry);
+ spin_unlock_irqrestore(&smp2p_entry_lock_lha1, flags);
+}
+
+static struct of_device_id msm_smp2p_match_table[] = {
+ {.compatible = "qcom,smp2pgpio", },
+ {},
+};
+
+static struct platform_driver smp2p_gpio_driver = {
+ .probe = smp2p_gpio_probe,
+ .driver = {
+ .name = "smp2pgpio",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_smp2p_match_table,
+ },
+};
+
+static int smp2p_init(void)
+{
+ INIT_LIST_HEAD(&smp2p_entry_list);
+ return platform_driver_register(&smp2p_gpio_driver);
+}
+module_init(smp2p_init);
+
+static void __exit smp2p_exit(void)
+{
+ platform_driver_unregister(&smp2p_gpio_driver);
+}
+module_exit(smp2p_exit);
+
+MODULE_DESCRIPTION("SMP2P GPIO");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
new file mode 100644
index 000000000000..d2e1b7e8f978
--- /dev/null
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -0,0 +1,902 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/elf.h>
+#include <linux/mutex.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/rwsem.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include <asm/uaccess.h>
+#include <asm/setup.h>
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+#include <asm-generic/cacheflush.h>
+
+#include "peripheral-loader.h"
+
+#define pil_err(desc, fmt, ...) \
+ dev_err(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
+#define pil_info(desc, fmt, ...) \
+ dev_info(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
+
+#define PIL_NUM_DESC 10
+static void __iomem *pil_info_base;
+
+/**
+ * proxy_timeout - Override for proxy vote timeouts
+ * -1: Use driver-specified timeout
+ * 0: Hold proxy votes until shutdown
+ * >0: Specify a custom timeout in ms
+ */
+static int proxy_timeout_ms = -1;
+module_param(proxy_timeout_ms, int, S_IRUGO | S_IWUSR);
+
+/**
+ * struct pil_mdt - Representation of <name>.mdt file in memory
+ * @hdr: ELF32 header
+ * @phdr: ELF32 program headers
+ */
+struct pil_mdt {
+ struct elf32_hdr hdr;
+ struct elf32_phdr phdr[];
+};
+
+/**
+ * struct pil_seg - memory map representing one segment
+ * @next: points to next seg mentor NULL if last segment
+ * @paddr: start address of segment
+ * @sz: size of segment
+ * @filesz: size of segment on disk
+ * @num: segment number
+ * @relocated: true if segment is relocated, false otherwise
+ *
+ * Loosely based on an elf program header. Contains all necessary information
+ * to load and initialize a segment of the image in memory.
+ */
+struct pil_seg {
+ phys_addr_t paddr;
+ unsigned long sz;
+ unsigned long filesz;
+ int num;
+ struct list_head list;
+ bool relocated;
+};
+
+/**
+ * struct pil_image_info - information in IMEM about image and where it is loaded
+ * @name: name of image (may or may not be NULL terminated)
+ * @start: indicates physical address where image starts (little endian)
+ * @size: size of image (little endian)
+ */
+struct pil_image_info {
+ char name[8];
+ __le64 start;
+ __le32 size;
+} __attribute__((__packed__));
+
+/**
+ * struct pil_priv - Private state for a pil_desc
+ * @proxy: work item used to run the proxy unvoting routine
+ * @desc: pointer to pil_desc this is private data for
+ * @seg: list of segments sorted by physical address
+ * @entry_addr: physical address where processor starts booting at
+ * @base_addr: smallest start address among all segments that are relocatable
+ * @region_start: address where relocatable region starts or lowest address
+ * for non-relocatable images
+ * @region_end: address where relocatable region ends or highest address for
+ * non-relocatable images
+ * @region: region allocated for relocatable images
+ * @unvoted_flag: flag to keep track if we have unvoted or not.
+ *
+ * This struct contains data for a pil_desc that should not be exposed outside
+ * of this file. This structure points to the descriptor and the descriptor
+ * points to this structure so that PIL drivers can't access the private
+ * data of a descriptor but this file can access both.
+ */
+struct pil_priv {
+ struct delayed_work proxy;
+ struct pil_desc *desc;
+ struct list_head segs;
+ phys_addr_t entry_addr;
+ phys_addr_t base_addr;
+ phys_addr_t region_start;
+ phys_addr_t region_end;
+ void *region;
+ struct pil_image_info __iomem *info;
+ int id;
+ int unvoted_flag;
+ size_t region_size;
+};
+
+/**
+ * pil_do_ramdump() - Ramdump an image
+ * @desc: descriptor from pil_desc_init()
+ * @ramdump_dev: ramdump device returned from create_ramdump_device()
+ *
+ * Calls the ramdump API with a list of segments generated from the addresses
+ * that the descriptor corresponds to.
+ */
+int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+{
+ struct pil_priv *priv = desc->priv;
+ struct pil_seg *seg;
+ int count = 0, ret;
+ struct ramdump_segment *ramdump_segs, *s;
+
+ list_for_each_entry(seg, &priv->segs, list)
+ count++;
+
+ ramdump_segs = kmalloc_array(count, sizeof(*ramdump_segs), GFP_KERNEL);
+ if (!ramdump_segs)
+ return -ENOMEM;
+
+ s = ramdump_segs;
+ list_for_each_entry(seg, &priv->segs, list) {
+ s->address = seg->paddr;
+ s->size = seg->sz;
+ s++;
+ }
+
+ ret = do_elf_ramdump(ramdump_dev, ramdump_segs, count);
+ kfree(ramdump_segs);
+
+ return ret;
+}
+EXPORT_SYMBOL(pil_do_ramdump);
+
+/**
+ * pil_get_entry_addr() - Retrieve the entry address of a peripheral image
+ * @desc: descriptor from pil_desc_init()
+ *
+ * Returns the physical address where the image boots at or 0 if unknown.
+ */
+phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
+{
+ return desc->priv ? desc->priv->entry_addr : 0;
+}
+EXPORT_SYMBOL(pil_get_entry_addr);
+
+static void __pil_proxy_unvote(struct pil_priv *priv)
+{
+ struct pil_desc *desc = priv->desc;
+
+ desc->ops->proxy_unvote(desc);
+ notify_proxy_unvote(desc->dev);
+ module_put(desc->owner);
+
+}
+
+static void pil_proxy_unvote_work(struct work_struct *work)
+{
+ struct delayed_work *delayed = to_delayed_work(work);
+ struct pil_priv *priv = container_of(delayed, struct pil_priv, proxy);
+ __pil_proxy_unvote(priv);
+}
+
+static int pil_proxy_vote(struct pil_desc *desc)
+{
+ int ret = 0;
+
+ if (desc->ops->proxy_vote) {
+ ret = desc->ops->proxy_vote(desc);
+ }
+
+ if (desc->proxy_unvote_irq)
+ enable_irq(desc->proxy_unvote_irq);
+ notify_proxy_vote(desc->dev);
+
+ return ret;
+}
+
+static void pil_proxy_unvote(struct pil_desc *desc, int immediate)
+{
+ struct pil_priv *priv = desc->priv;
+ unsigned long timeout;
+
+ if (proxy_timeout_ms == 0 && !immediate)
+ return;
+ else if (proxy_timeout_ms > 0)
+ timeout = proxy_timeout_ms;
+ else
+ timeout = desc->proxy_timeout;
+
+ if (desc->ops->proxy_unvote) {
+ if (WARN_ON(!try_module_get(desc->owner)))
+ return;
+
+ if (immediate)
+ timeout = 0;
+
+ if (!desc->proxy_unvote_irq || immediate)
+ schedule_delayed_work(&priv->proxy,
+ msecs_to_jiffies(timeout));
+ }
+}
+
+static irqreturn_t proxy_unvote_intr_handler(int irq, void *dev_id)
+{
+ struct pil_desc *desc = dev_id;
+ struct pil_priv *priv = desc->priv;
+
+ pil_info(desc, "Power/Clock ready interrupt received\n");
+ if (!desc->priv->unvoted_flag) {
+ desc->priv->unvoted_flag = 1;
+ __pil_proxy_unvote(priv);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static bool segment_is_relocatable(const struct elf32_phdr *p)
+{
+ return !!(p->p_flags & BIT(27));
+}
+
+static phys_addr_t pil_reloc(const struct pil_priv *priv, phys_addr_t addr)
+{
+ return addr - priv->base_addr + priv->region_start;
+}
+
+static struct pil_seg *pil_init_seg(const struct pil_desc *desc,
+ const struct elf32_phdr *phdr, int num)
+{
+ bool reloc = segment_is_relocatable(phdr);
+ const struct pil_priv *priv = desc->priv;
+ struct pil_seg *seg;
+#if 0
+ if (!reloc && memblock_overlaps_memory(phdr->p_paddr, phdr->p_memsz)) {
+ pil_err(desc, "kernel memory would be overwritten [%#08lx, %#08lx)\n",
+ (unsigned long)phdr->p_paddr,
+ (unsigned long)(phdr->p_paddr + phdr->p_memsz));
+ return ERR_PTR(-EPERM);
+ }
+#endif
+ if (phdr->p_filesz > phdr->p_memsz) {
+ pil_err(desc, "Segment %d: file size (%u) is greater than mem size (%u).\n",
+ num, phdr->p_filesz, phdr->p_memsz);
+ return ERR_PTR(-EINVAL);
+ }
+
+ seg = kmalloc(sizeof(*seg), GFP_KERNEL);
+ if (!seg)
+ return ERR_PTR(-ENOMEM);
+ seg->num = num;
+ seg->paddr = reloc ? pil_reloc(priv, phdr->p_paddr) : phdr->p_paddr;
+ seg->filesz = phdr->p_filesz;
+ seg->sz = phdr->p_memsz;
+ seg->relocated = reloc;
+ INIT_LIST_HEAD(&seg->list);
+
+ return seg;
+}
+
+#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
+
+static int segment_is_loadable(const struct elf32_phdr *p)
+{
+ return (p->p_type == PT_LOAD) && !segment_is_hash(p->p_flags) &&
+ p->p_memsz;
+}
+
+static void pil_dump_segs(const struct pil_priv *priv)
+{
+ struct pil_seg *seg;
+ phys_addr_t seg_h_paddr;
+
+ list_for_each_entry(seg, &priv->segs, list) {
+ seg_h_paddr = seg->paddr + seg->sz;
+ pil_info(priv->desc, "%d: %pa %pa\n", seg->num,
+ &seg->paddr, &seg_h_paddr);
+ }
+}
+
+/*
+ * Ensure the entry address lies within the image limits and if the image is
+ * relocatable ensure it lies within a relocatable segment.
+ */
+static int pil_init_entry_addr(struct pil_priv *priv, const struct pil_mdt *mdt)
+{
+ struct pil_seg *seg;
+ phys_addr_t entry = mdt->hdr.e_entry;
+ bool image_relocated = priv->region;
+
+ if (image_relocated)
+ entry = pil_reloc(priv, entry);
+ priv->entry_addr = entry;
+
+ if (priv->desc->flags & PIL_SKIP_ENTRY_CHECK)
+ return 0;
+
+ list_for_each_entry(seg, &priv->segs, list) {
+ if (entry >= seg->paddr && entry < seg->paddr + seg->sz) {
+ if (!image_relocated)
+ return 0;
+ else if (seg->relocated)
+ return 0;
+ }
+ }
+ pil_err(priv->desc, "entry address %pa not within range\n", &entry);
+ pil_dump_segs(priv);
+ return -EADDRNOTAVAIL;
+}
+
+static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr,
+ phys_addr_t max_addr, size_t align)
+{
+ void *region;
+ size_t size = max_addr - min_addr;
+ size_t aligned_size;
+
+ /* Don't reallocate due to fragmentation concerns, just sanity check */
+ if (priv->region) {
+ if (WARN(priv->region_end - priv->region_start < size,
+ "Can't reuse PIL memory, too small\n"))
+ return -ENOMEM;
+ return 0;
+ }
+
+ if (align > SZ_4M)
+ aligned_size = ALIGN(size, SZ_4M);
+ else
+ aligned_size = ALIGN(size, SZ_1M);
+
+ priv->region_start = 0x8b600000;
+ region = devm_ioremap_nocache(priv->desc->dev, priv->region_start, aligned_size);
+
+ priv->region = region;
+ priv->region_end = priv->region_start + size;
+ priv->base_addr = min_addr;
+ priv->region_size = aligned_size;
+
+ return 0;
+}
+
+static int pil_setup_region(struct pil_priv *priv, const struct pil_mdt *mdt)
+{
+ const struct elf32_phdr *phdr;
+ phys_addr_t min_addr_r, min_addr_n, max_addr_r, max_addr_n, start, end;
+ size_t align = 0;
+ int i, ret = 0;
+ bool relocatable = false;
+
+ min_addr_n = min_addr_r = (phys_addr_t)ULLONG_MAX;
+ max_addr_n = max_addr_r = 0;
+
+ /* Find the image limits */
+ for (i = 0; i < mdt->hdr.e_phnum; i++) {
+ phdr = &mdt->phdr[i];
+ if (!segment_is_loadable(phdr))
+ continue;
+
+ start = phdr->p_paddr;
+ end = start + phdr->p_memsz;
+
+ if (segment_is_relocatable(phdr)) {
+ min_addr_r = min(min_addr_r, start);
+ max_addr_r = max(max_addr_r, end);
+ /*
+ * Lowest relocatable segment dictates alignment of
+ * relocatable region
+ */
+ if (min_addr_r == start)
+ align = phdr->p_align;
+ relocatable = true;
+ } else {
+ min_addr_n = min(min_addr_n, start);
+ max_addr_n = max(max_addr_n, end);
+ }
+
+ }
+
+ /*
+ * Align the max address to the next 4K boundary to satisfy iommus and
+ * XPUs that operate on 4K chunks.
+ */
+ max_addr_n = ALIGN(max_addr_n, SZ_4K);
+ max_addr_r = ALIGN(max_addr_r, SZ_4K);
+
+ if (relocatable) {
+ ret = pil_alloc_region(priv, min_addr_r, max_addr_r, align);
+ } else {
+ priv->region_start = min_addr_n;
+ priv->region_end = max_addr_n;
+ priv->base_addr = min_addr_n;
+ }
+
+ if (priv->info) {
+ __iowrite32_copy(&priv->info->start, &priv->region_start,
+ sizeof(priv->region_start) / 4);
+ writel_relaxed(priv->region_end - priv->region_start,
+ &priv->info->size);
+ }
+
+ return ret;
+}
+
+static int pil_cmp_seg(void *priv, struct list_head *a, struct list_head *b)
+{
+ int ret = 0;
+ struct pil_seg *seg_a = list_entry(a, struct pil_seg, list);
+ struct pil_seg *seg_b = list_entry(b, struct pil_seg, list);
+
+ if (seg_a->paddr < seg_b->paddr)
+ ret = -1;
+ else if (seg_a->paddr > seg_b->paddr)
+ ret = 1;
+
+ return ret;
+}
+
+static int pil_init_mmap(struct pil_desc *desc, const struct pil_mdt *mdt)
+{
+ struct pil_priv *priv = desc->priv;
+ const struct elf32_phdr *phdr;
+ struct pil_seg *seg;
+ int i, ret;
+
+ ret = pil_setup_region(priv, mdt);
+ if (ret)
+ return ret;
+
+ pil_info(desc, "loading from %pa to %pa\n", &priv->region_start,
+ &priv->region_end);
+
+ for (i = 0; i < mdt->hdr.e_phnum; i++) {
+ phdr = &mdt->phdr[i];
+ if (!segment_is_loadable(phdr))
+ continue;
+
+ seg = pil_init_seg(desc, phdr, i);
+ if (IS_ERR(seg))
+ return PTR_ERR(seg);
+
+ list_add_tail(&seg->list, &priv->segs);
+ }
+ list_sort(NULL, &priv->segs, pil_cmp_seg);
+
+ return pil_init_entry_addr(priv, mdt);
+}
+
+static void pil_release_mmap(struct pil_desc *desc)
+{
+ struct pil_priv *priv = desc->priv;
+ struct pil_seg *p, *tmp;
+ u64 zero = 0ULL;
+
+ if (priv->info) {
+ __iowrite32_copy(&priv->info->start, &zero,
+ sizeof(zero) / 4);
+ writel_relaxed(0, &priv->info->size);
+ }
+
+ list_for_each_entry_safe(p, tmp, &priv->segs, list) {
+ list_del(&p->list);
+ kfree(p);
+ }
+}
+
+#define IOMAP_SIZE SZ_1M
+
+struct pil_map_fw_info {
+ int relocated;
+ void *region;
+ phys_addr_t base_addr;
+ struct device *dev;
+};
+
+static void *map_fw_mem(phys_addr_t paddr, size_t size, void *data)
+{
+ void *base;
+
+ base = ioremap_nocache(paddr, size);
+
+ return base;
+}
+
+static void unmap_fw_mem(void *vaddr, void *data)
+{
+ iounmap(vaddr);
+}
+
+static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
+{
+ int ret = 0, count;
+ phys_addr_t paddr;
+ char fw_name[30];
+ int num = seg->num;
+ const struct firmware *fw;
+
+ if (seg->filesz) {
+ snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
+ desc->name, num);
+ //pr_err("%s\n", fw_name);
+ ret = request_firmware_direct(&fw, fw_name, desc->dev);
+ if (ret < 0) {
+ pil_err(desc, "Failed to locate blob %s or blob is too big.\n",
+ fw_name);
+ return ret;
+ }
+
+ //pr_err("%s: memcpy to PA 0x%x len 0x%x\n", __func__,
+ //(u32)seg->paddr, (u32)seg->filesz);
+ memcpy(desc->priv->region + (seg->paddr - desc->priv->region_start),
+ fw->data, seg->filesz);
+
+
+ //print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, desc->priv->region + (seg->paddr - desc->priv->region_start), 48, true);
+
+ wmb();
+ flush_cache_all();
+ release_firmware(fw);
+
+ ret = 0;
+ } //else
+ //pr_err("zero len seg\n");
+
+ /* Zero out trailing memory */
+ paddr = seg->paddr + seg->filesz;
+ count = seg->sz - seg->filesz;
+
+ if (count > 0) {
+ memset(desc->priv->region + paddr - desc->priv->region_start, 0, count);
+
+ //pr_err("%s: zeroing PA 0x%x len %d\n", __func__, (u32)paddr, count);
+ }
+
+ if (desc->ops->verify_blob) {
+ ret = desc->ops->verify_blob(desc, seg->paddr, seg->sz);
+ if (ret)
+ pil_err(desc, "Blob%u failed verification\n", num);
+ }
+
+ return ret;
+}
+
+static int pil_parse_devicetree(struct pil_desc *desc)
+{
+ int clk_ready = 0;
+ if (desc->ops->proxy_unvote &&
+ of_find_property(desc->dev->of_node,
+ "qcom,gpio-proxy-unvote",
+ NULL)) {
+ clk_ready = of_get_named_gpio(desc->dev->of_node,
+ "qcom,gpio-proxy-unvote", 0);
+
+ if (clk_ready < 0) {
+ dev_err(desc->dev,
+ "[%s]: Error getting proxy unvoting gpio\n",
+ desc->name);
+ return clk_ready;
+ }
+
+ clk_ready = gpio_to_irq(clk_ready);
+ if (clk_ready < 0) {
+ dev_err(desc->dev,
+ "[%s]: Error getting proxy unvote IRQ\n",
+ desc->name);
+ return clk_ready;
+ }
+ }
+
+ dev_err(desc->dev, "%s: completed OK\n", __func__);
+
+ desc->proxy_unvote_irq = clk_ready;
+ return 0;
+}
+
+/* Synchronize request_firmware() with suspend */
+static DECLARE_RWSEM(pil_pm_rwsem);
+
+/**
+ * pil_boot() - Load a peripheral image into memory and boot it
+ * @desc: descriptor from pil_desc_init()
+ *
+ * Returns 0 on success or -ERROR on failure.
+ */
+int pil_boot(struct pil_desc *desc)
+{
+ int ret;
+ char fw_name[30];
+ const struct pil_mdt *mdt;
+ const struct elf32_hdr *ehdr;
+ struct pil_seg *seg;
+ const struct firmware *fw;
+ struct pil_priv *priv = desc->priv;
+
+ /* Reinitialize for new image */
+ pil_release_mmap(desc);
+
+ down_read(&pil_pm_rwsem);
+ snprintf(fw_name, sizeof(fw_name), "%s.mdt", desc->name);
+ ret = request_firmware(&fw, fw_name, desc->dev);
+ if (ret) {
+ pil_err(desc, "Failed to locate %s\n", fw_name);
+ goto out;
+ }
+
+ if (fw->size < sizeof(*ehdr)) {
+ pil_err(desc, "Not big enough to be an elf header\n");
+ ret = -EIO;
+ goto release_fw;
+ }
+
+ mdt = (const struct pil_mdt *)fw->data;
+ ehdr = &mdt->hdr;
+
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ pil_err(desc, "Not an elf header\n");
+ ret = -EIO;
+ goto release_fw;
+ }
+
+ if (ehdr->e_phnum == 0) {
+ pil_err(desc, "No loadable segments\n");
+ ret = -EIO;
+ goto release_fw;
+ }
+ if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+ sizeof(struct elf32_hdr) > fw->size) {
+ pil_err(desc, "Program headers not within mdt\n");
+ ret = -EIO;
+ goto release_fw;
+ }
+
+ ret = pil_init_mmap(desc, mdt);
+ if (ret)
+ goto release_fw;
+
+ desc->priv->unvoted_flag = 0;
+ ret = pil_proxy_vote(desc);
+ if (ret) {
+ pil_err(desc, "Failed to proxy vote\n");
+ goto release_fw;
+ }
+
+ if (desc->ops->init_image)
+ ret = desc->ops->init_image(desc, fw->data, fw->size);
+ if (ret) {
+ pil_err(desc, "Invalid firmware metadata\n");
+ goto err_boot;
+ }
+
+ if (desc->ops->mem_setup)
+ ret = desc->ops->mem_setup(desc, priv->region_start,
+ priv->region_end - priv->region_start);
+ if (ret) {
+ pil_err(desc, "Memory setup error\n");
+ goto err_deinit_image;
+ }
+
+ list_for_each_entry(seg, &desc->priv->segs, list) {
+ ret = pil_load_seg(desc, seg);
+ if (ret)
+ goto err_deinit_image;
+ }
+
+ ret = desc->ops->auth_and_reset(desc);
+ if (ret) {
+ pil_err(desc, "Failed to bring out of reset\n");
+ goto err_deinit_image;
+ }
+ pil_info(desc, "Brought out of reset\n");
+err_deinit_image:
+ if (ret && desc->ops->deinit_image)
+ desc->ops->deinit_image(desc);
+err_boot:
+ if (ret && desc->proxy_unvote_irq)
+ disable_irq(desc->proxy_unvote_irq);
+ pil_proxy_unvote(desc, ret);
+release_fw:
+ release_firmware(fw);
+out:
+ up_read(&pil_pm_rwsem);
+ if (ret) {
+ priv->region = NULL;
+ pil_release_mmap(desc);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(pil_boot);
+
+/**
+ * pil_shutdown() - Shutdown a peripheral
+ * @desc: descriptor from pil_desc_init()
+ */
+void pil_shutdown(struct pil_desc *desc)
+{
+ struct pil_priv *priv = desc->priv;
+
+ if (desc->ops->shutdown)
+ desc->ops->shutdown(desc);
+
+ if (desc->proxy_unvote_irq) {
+ disable_irq(desc->proxy_unvote_irq);
+ if (!desc->priv->unvoted_flag)
+ pil_proxy_unvote(desc, 1);
+ } else if (!proxy_timeout_ms)
+ pil_proxy_unvote(desc, 1);
+ else
+ flush_delayed_work(&priv->proxy);
+
+ if (priv->region) {
+ priv->region = NULL;
+ }
+}
+EXPORT_SYMBOL(pil_shutdown);
+
+static DEFINE_IDA(pil_ida);
+
+/**
+ * pil_desc_init() - Initialize a pil descriptor
+ * @desc: descriptor to intialize
+ *
+ * Initialize a pil descriptor for use by other pil functions. This function
+ * must be called before calling pil_boot() or pil_shutdown().
+ *
+ * Returns 0 for success and -ERROR on failure.
+ */
+int pil_desc_init(struct pil_desc *desc)
+{
+ struct pil_priv *priv;
+ int ret;
+ void __iomem *addr;
+ char buf[sizeof(priv->info->name)];
+
+ if (WARN(desc->ops->proxy_unvote && !desc->ops->proxy_vote,
+ "Invalid proxy voting. Ignoring\n"))
+ ((struct pil_reset_ops *)desc->ops)->proxy_unvote = NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ desc->priv = priv;
+ priv->desc = desc;
+
+ priv->id = ret = ida_simple_get(&pil_ida, 0, PIL_NUM_DESC, GFP_KERNEL);
+ if (priv->id < 0)
+ goto err;
+
+ if (pil_info_base) {
+ addr = pil_info_base + sizeof(struct pil_image_info) * priv->id;
+ priv->info = (struct pil_image_info __iomem *)addr;
+
+ strncpy(buf, desc->name, sizeof(buf));
+ __iowrite32_copy(priv->info->name, buf, sizeof(buf) / 4);
+ }
+
+ ret = pil_parse_devicetree(desc);
+ if (ret)
+ goto err_parse_dt;
+
+ /* Ignore users who don't make any sense */
+ WARN(desc->ops->proxy_unvote && desc->proxy_unvote_irq == 0
+ && !desc->proxy_timeout,
+ "Invalid proxy unvote callback or a proxy timeout of 0"
+ " was specified or no proxy unvote IRQ was specified.\n");
+
+ if (desc->proxy_unvote_irq) {
+ ret = request_threaded_irq(desc->proxy_unvote_irq,
+ NULL,
+ proxy_unvote_intr_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ desc->name, desc);
+ if (ret < 0) {
+ dev_err(desc->dev,
+ "Unable to request proxy unvote IRQ: %d\n",
+ ret);
+ goto err;
+ }
+ disable_irq(desc->proxy_unvote_irq);
+ }
+
+ INIT_DELAYED_WORK(&priv->proxy, pil_proxy_unvote_work);
+ INIT_LIST_HEAD(&priv->segs);
+
+ /* Make sure mapping functions are set. */
+ if (!desc->map_fw_mem)
+ desc->map_fw_mem = map_fw_mem;
+
+ if (!desc->unmap_fw_mem)
+ desc->unmap_fw_mem = unmap_fw_mem;
+
+ dev_err(desc->dev, "%s: completed OK\n", __func__);
+
+ return 0;
+err_parse_dt:
+ ida_simple_remove(&pil_ida, priv->id);
+err:
+ kfree(priv);
+ return ret;
+}
+EXPORT_SYMBOL(pil_desc_init);
+
+/**
+ * pil_desc_release() - Release a pil descriptor
+ * @desc: descriptor to free
+ */
+void pil_desc_release(struct pil_desc *desc)
+{
+ struct pil_priv *priv = desc->priv;
+
+ if (priv) {
+ ida_simple_remove(&pil_ida, priv->id);
+ flush_delayed_work(&priv->proxy);
+ }
+ desc->priv = NULL;
+ kfree(priv);
+}
+EXPORT_SYMBOL(pil_desc_release);
+
+static int pil_pm_notify(struct notifier_block *b, unsigned long event, void *p)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ down_write(&pil_pm_rwsem);
+ break;
+ case PM_POST_SUSPEND:
+ up_write(&pil_pm_rwsem);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pil_pm_notifier = {
+ .notifier_call = pil_pm_notify,
+};
+
+static int __init msm_pil_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-pil");
+ if (np) {
+ pil_info_base = of_iomap(np, 0);
+ if (!pil_info_base)
+ pr_warn("pil: could not map imem region\n");
+ } else {
+ pr_warn("pil: failed to find qcom,msm-imem-pil node\n");
+ }
+
+ return register_pm_notifier(&pil_pm_notifier);
+}
+device_initcall(msm_pil_init);
+
+static void __exit msm_pil_exit(void)
+{
+ unregister_pm_notifier(&pil_pm_notifier);
+ if (pil_info_base)
+ iounmap(pil_info_base);
+}
+module_exit(msm_pil_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Load peripheral images and bring peripherals out of reset");
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
new file mode 100644
index 000000000000..092fffc01a76
--- /dev/null
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -0,0 +1,100 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_PERIPHERAL_LOADER_H
+#define __MSM_PERIPHERAL_LOADER_H
+
+#include <linux/dma-attrs.h>
+
+struct device;
+struct module;
+struct pil_priv;
+
+/**
+ * struct pil_desc - PIL descriptor
+ * @name: string used for pil_get()
+ * @dev: parent device
+ * @ops: callback functions
+ * @owner: module the descriptor belongs to
+ * @proxy_timeout: delay in ms until proxy vote is removed
+ * @flags: bitfield for image flags
+ * @priv: DON'T USE - internal only
+ * @proxy_unvote_irq: IRQ to trigger a proxy unvote. proxy_timeout
+ * is ignored if this is set.
+ * @map_fw_mem: Custom function used to map physical address space to virtual.
+ * This defaults to ioremap if not specified.
+ * @unmap_fw_mem: Custom function used to undo mapping by map_fw_mem.
+ * This defaults to iounmap if not specified.
+ */
+struct pil_desc {
+ const char *name;
+ struct device *dev;
+ const struct pil_reset_ops *ops;
+ struct module *owner;
+ unsigned long proxy_timeout;
+ unsigned long flags;
+#define PIL_SKIP_ENTRY_CHECK BIT(0)
+ struct pil_priv *priv;
+ struct dma_attrs attrs;
+ unsigned int proxy_unvote_irq;
+ void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data);
+ void (*unmap_fw_mem)(void *virt, void *data);
+ void *map_data;
+};
+
+/**
+ * struct pil_reset_ops - PIL operations
+ * @init_image: prepare an image for authentication
+ * @mem_setup: prepare the image memory region
+ * @verify_blob: authenticate a program segment, called once for each loadable
+ * program segment (optional)
+ * @proxy_vote: make proxy votes before auth_and_reset (optional)
+ * @auth_and_reset: boot the processor
+ * @proxy_unvote: remove any proxy votes (optional)
+ * @deinit_image: restore actions performed in init_image if necessary
+ * @shutdown: shutdown the processor
+ */
+struct pil_reset_ops {
+ int (*init_image)(struct pil_desc *pil, const u8 *metadata,
+ size_t size);
+ int (*mem_setup)(struct pil_desc *pil, phys_addr_t addr, size_t size);
+ int (*verify_blob)(struct pil_desc *pil, phys_addr_t phy_addr,
+ size_t size);
+ int (*proxy_vote)(struct pil_desc *pil);
+ int (*auth_and_reset)(struct pil_desc *pil);
+ void (*proxy_unvote)(struct pil_desc *pil);
+ int (*deinit_image)(struct pil_desc *pil);
+ int (*shutdown)(struct pil_desc *pil);
+};
+
+#ifdef CONFIG_MSM_PIL
+extern int pil_desc_init(struct pil_desc *desc);
+extern int pil_boot(struct pil_desc *desc);
+extern void pil_shutdown(struct pil_desc *desc);
+extern void pil_desc_release(struct pil_desc *desc);
+extern phys_addr_t pil_get_entry_addr(struct pil_desc *desc);
+extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev);
+#else
+static inline int pil_desc_init(struct pil_desc *desc) { return 0; }
+static inline int pil_boot(struct pil_desc *desc) { return 0; }
+static inline void pil_shutdown(struct pil_desc *desc) { }
+static inline void pil_desc_release(struct pil_desc *desc) { }
+static inline phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
+{
+ return 0;
+}
+static inline int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+{
+ return 0;
+}
+#endif
+
+#endif