aboutsummaryrefslogtreecommitdiff
path: root/drivers/mailbox
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mailbox')
-rw-r--r--drivers/mailbox/Kconfig27
-rw-r--r--drivers/mailbox/Makefile7
-rw-r--r--drivers/mailbox/arm_mhu.c320
-rw-r--r--drivers/mailbox/arm_mhu.h33
-rw-r--r--drivers/mailbox/mailbox.c467
-rw-r--r--drivers/mailbox/pl320-ipc.c2
-rw-r--r--drivers/mailbox/scpi_protocol.c350
7 files changed, 1205 insertions, 1 deletions
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 9fd9c6717e0c..8e5e5b1becec 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -6,6 +6,33 @@ menuconfig MAILBOX
signals. Say Y if your platform supports hardware mailboxes.
if MAILBOX
+config ARM_MHU_MBOX
+ bool "ARM Message Handling Unit (MHU) Mailbox"
+ help
+ This driver provides support for inter-processor communication
+ between System Control Processor (SCP) with Cortex-M3 processor
+ and Application Processors (AP) on some ARM based systems with
+ MHU peripheral.
+
+ SCP controls most of the power managament on the Application
+ Processors. It offers control and management of: the core/cluster
+ power states, various power domain DVFS including the core/cluster,
+ certain system clocks configuration, thermal sensors and many
+ others.
+
+config ARM_SCPI_PROTOCOL
+ bool "ARM System Control and Power Interface (SCPI) Message Protocol"
+ select ARM_MHU_MBOX
+ help
+ System Control and Power Interface (SCPI) Message Protocol is
+ defined for the purpose of communication between the Application
+ Cores(AP) and the System Control Processor(SCP). The MHU peripheral
+ provides a mechanism for inter-processor communication between SCP
+ and AP.
+
+ This protocol library provides interface for all the client drivers
+ making use of the features offered by the SCP.
+
config PL320_MBOX
bool "ARM PL320 Mailbox"
depends on ARM_AMBA
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 6d184dbcaca8..e31c84b6bc40 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -1,3 +1,10 @@
+# Generic MAILBOX API
+
+obj-$(CONFIG_MAILBOX) += mailbox.o
+
+obj-$(CONFIG_ARM_MHU_MBOX) += arm_mhu.o
+obj-$(CONFIG_ARM_SCPI_PROTOCOL) += scpi_protocol.o
+
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
new file mode 100644
index 000000000000..28fb4f01b413
--- /dev/null
+++ b/drivers/mailbox/arm_mhu.c
@@ -0,0 +1,320 @@
+/*
+ * Driver for the Message Handling Unit (MHU) which is the peripheral in
+ * the Compute SubSystem (CSS) providing a mechanism for inter-processor
+ * communication between System Control Processor (SCP) with Cortex-M3
+ * processor and Application Processors (AP).
+ *
+ * The MHU peripheral provides a mechanism to assert interrupt signals to
+ * facilitate inter-processor message passing between the SCP and the AP.
+ * The message payload can be deposited into main memory or on-chip memories.
+ * The MHU supports three bi-directional channels - low priority, high
+ * priority and secure(can't be used in non-secure execution modes)
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * Author: Sudeep Holla <sudeep.holla@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "arm_mhu.h"
+
+struct device* the_scpi_device;
+
+#define DRIVER_NAME "arm_mhu"
+
+/*
+ * +--------------------+-------+---------------+
+ * | Hardware Register | Offset| Driver View |
+ * +--------------------+-------+---------------+
+ * | SCP_INTR_L_STAT | 0x000 | RX_STATUS(L) |
+ * | SCP_INTR_L_SET | 0x008 | RX_SET(L) |
+ * | SCP_INTR_L_CLEAR | 0x010 | RX_CLEAR(L) |
+ * +--------------------+-------+---------------+
+ * | SCP_INTR_H_STAT | 0x020 | RX_STATUS(H) |
+ * | SCP_INTR_H_SET | 0x028 | RX_SET(H) |
+ * | SCP_INTR_H_CLEAR | 0x030 | RX_CLEAR(H) |
+ * +--------------------+-------+---------------+
+ * | CPU_INTR_L_STAT | 0x100 | TX_STATUS(L) |
+ * | CPU_INTR_L_SET | 0x108 | TX_SET(L) |
+ * | CPU_INTR_L_CLEAR | 0x110 | TX_CLEAR(L) |
+ * +--------------------+-------+---------------+
+ * | CPU_INTR_H_STAT | 0x120 | TX_STATUS(H) |
+ * | CPU_INTR_H_SET | 0x128 | TX_SET(H) |
+ * | CPU_INTR_H_CLEAR | 0x130 | TX_CLEAR(H) |
+ * +--------------------+-------+---------------+
+*/
+#define RX_OFFSET(chan) ((idx) * 0x20)
+#define RX_STATUS(chan) RX_OFFSET(chan)
+#define RX_SET(chan) (RX_OFFSET(chan) + 0x8)
+#define RX_CLEAR(chan) (RX_OFFSET(chan) + 0x10)
+
+#define TX_OFFSET(chan) (0x100 + (idx) * 0x20)
+#define TX_STATUS(chan) TX_OFFSET(chan)
+#define TX_SET(chan) (TX_OFFSET(chan) + 0x8)
+#define TX_CLEAR(chan) (TX_OFFSET(chan) + 0x10)
+
+/*
+ * +---------------+-------+----------------+
+ * | Payload | Offset| Driver View |
+ * +---------------+-------+----------------+
+ * | SCP->AP Low | 0x000 | RX_PAYLOAD(L) |
+ * | SCP->AP High | 0x400 | RX_PAYLOAD(H) |
+ * +---------------+-------+----------------+
+ * | AP->SCP Low | 0x200 | TX_PAYLOAD(H) |
+ * | AP->SCP High | 0x600 | TX_PAYLOAD(H) |
+ * +---------------+-------+----------------+
+*/
+#define PAYLOAD_MAX_SIZE 0x200
+#define PAYLOAD_OFFSET 0x400
+#define RX_PAYLOAD(chan) ((chan) * PAYLOAD_OFFSET)
+#define TX_PAYLOAD(chan) ((chan) * PAYLOAD_OFFSET + PAYLOAD_MAX_SIZE)
+
+struct mhu_chan {
+ int index;
+ int rx_irq;
+ struct mhu_ctlr *ctlr;
+ struct mhu_data_buf *data;
+};
+
+struct mhu_ctlr {
+ struct device *dev;
+ void __iomem *mbox_base;
+ void __iomem *payload_base;
+ struct mbox_controller mbox_con;
+ struct mhu_chan channels[CHANNEL_MAX];
+};
+
+static irqreturn_t mbox_handler(int irq, void *p)
+{
+ struct mbox_chan *link = (struct mbox_chan *)p;
+ struct mhu_chan *chan = link->con_priv;
+ struct mhu_ctlr *ctlr = chan->ctlr;
+ void __iomem *mbox_base = ctlr->mbox_base;
+ void __iomem *payload = ctlr->payload_base;
+ int idx = chan->index;
+ u32 status = readl(mbox_base + RX_STATUS(idx));
+
+ if (status && irq == chan->rx_irq) {
+ struct mhu_data_buf *data = chan->data;
+ if (!data)
+ return IRQ_NONE; /* spurious */
+ if (data->rx_buf)
+ memcpy(data->rx_buf, payload + RX_PAYLOAD(idx),
+ data->rx_size);
+ chan->data = NULL;
+ mbox_chan_received_data(link, data);
+ writel(~0, mbox_base + RX_CLEAR(idx));
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mhu_send_data(struct mbox_chan *link, void *msg)
+{
+ struct mhu_chan *chan = link->con_priv;
+ struct mhu_ctlr *ctlr = chan->ctlr;
+ void __iomem *mbox_base = ctlr->mbox_base;
+ void __iomem *payload = ctlr->payload_base;
+ struct mhu_data_buf *data = (struct mhu_data_buf *)msg;
+ int idx = chan->index;
+
+ if (!data)
+ return -EINVAL;
+
+ chan->data = data;
+ if (data->tx_buf)
+ memcpy(payload + TX_PAYLOAD(idx), data->tx_buf, data->tx_size);
+ writel(data->cmd, mbox_base + TX_SET(idx));
+
+ return 0;
+}
+
+static int mhu_startup(struct mbox_chan *link)
+{
+ struct mhu_chan *chan = link->con_priv;
+ int err, mbox_irq = chan->rx_irq;
+
+ err = request_threaded_irq(mbox_irq, NULL, mbox_handler, IRQF_ONESHOT,
+ DRIVER_NAME, link);
+ return err;
+}
+
+static void mhu_shutdown(struct mbox_chan *link)
+{
+ struct mhu_chan *chan = link->con_priv;
+
+ chan->data = NULL;
+ free_irq(chan->rx_irq, link);
+}
+
+static bool mhu_last_tx_done(struct mbox_chan *link)
+{
+ struct mhu_chan *chan = link->con_priv;
+ struct mhu_ctlr *ctlr = chan->ctlr;
+ void __iomem *mbox_base = ctlr->mbox_base;
+ int idx = chan->index;
+
+ return !readl(mbox_base + TX_STATUS(idx));
+}
+
+static struct mbox_chan_ops mhu_ops = {
+ .send_data = mhu_send_data,
+ .startup = mhu_startup,
+ .shutdown = mhu_shutdown,
+ .last_tx_done = mhu_last_tx_done,
+};
+
+static int mhu_probe(struct platform_device *pdev)
+{
+ struct mhu_ctlr *ctlr;
+ struct mhu_chan *chan;
+ struct device *dev = &pdev->dev;
+ struct mbox_chan *l;
+ struct resource *res;
+ int idx;
+ static const char * const channel_names[] = {
+ CHANNEL_LOW_PRIORITY,
+ CHANNEL_HIGH_PRIORITY
+ };
+
+ ctlr = devm_kzalloc(dev, sizeof(*ctlr), GFP_KERNEL);
+ if (!ctlr) {
+ dev_err(dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get mailbox memory resource\n");
+ return -ENXIO;
+ }
+
+ ctlr->mbox_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctlr->mbox_base)) {
+ dev_err(dev, "failed to request or ioremap mailbox control\n");
+ return PTR_ERR(ctlr->mbox_base);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(dev, "failed to get payload memory resource\n");
+ return -ENXIO;
+ }
+
+ ctlr->payload_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctlr->payload_base)) {
+ dev_err(dev, "failed to request or ioremap mailbox payload\n");
+ return PTR_ERR(ctlr->payload_base);
+ }
+
+ ctlr->dev = dev;
+ platform_set_drvdata(pdev, ctlr);
+
+ l = devm_kzalloc(dev, sizeof(*l) * CHANNEL_MAX, GFP_KERNEL);
+ if (!l) {
+ dev_err(dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ ctlr->mbox_con.chans = l;
+ ctlr->mbox_con.num_chans = CHANNEL_MAX;
+ ctlr->mbox_con.txdone_irq = true;
+ ctlr->mbox_con.ops = &mhu_ops;
+ ctlr->mbox_con.dev = dev;
+
+ for (idx = 0; idx < CHANNEL_MAX; idx++) {
+ chan = &ctlr->channels[idx];
+ chan->index = idx;
+ chan->ctlr = ctlr;
+ chan->rx_irq = platform_get_irq(pdev, idx);
+ if (chan->rx_irq < 0) {
+ dev_err(dev, "failed to get interrupt for %s\n",
+ channel_names[idx]);
+ return -ENXIO;
+ }
+ l[idx].con_priv = chan;
+ }
+
+ if (mbox_controller_register(&ctlr->mbox_con)) {
+ dev_err(dev, "failed to register mailbox controller\n");
+ return -ENOMEM;
+ }
+
+ the_scpi_device = dev;
+ return 0;
+}
+
+static int mhu_remove(struct platform_device *pdev)
+{
+ struct mhu_ctlr *ctlr = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ mbox_controller_unregister(&ctlr->mbox_con);
+ devm_kfree(dev, ctlr->mbox_con.chans);
+
+ devm_iounmap(dev, ctlr->payload_base);
+ devm_iounmap(dev, ctlr->mbox_base);
+
+ platform_set_drvdata(pdev, NULL);
+ devm_kfree(dev, ctlr);
+ return 0;
+}
+
+static struct of_device_id mhu_of_match[] = {
+ { .compatible = "arm,mhu" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mhu_of_match);
+
+static struct platform_driver mhu_driver = {
+ .probe = mhu_probe,
+ .remove = mhu_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = mhu_of_match,
+ },
+};
+
+static int __init mhu_init(void)
+{
+ return platform_driver_register(&mhu_driver);
+}
+core_initcall(mhu_init);
+
+static void __exit mhu_exit(void)
+{
+ platform_driver_unregister(&mhu_driver);
+}
+module_exit(mhu_exit);
+
+MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
+MODULE_DESCRIPTION("ARM MHU mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/arm_mhu.h b/drivers/mailbox/arm_mhu.h
new file mode 100644
index 000000000000..f78c1fa369c8
--- /dev/null
+++ b/drivers/mailbox/arm_mhu.h
@@ -0,0 +1,33 @@
+/*
+ * ARM Message Handling Unit (MHU) driver header
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#define CONTROLLER_NAME "mhu_ctlr"
+
+#define CHANNEL_MAX 2
+#define CHANNEL_LOW_PRIORITY "cpu_to_scp_low"
+#define CHANNEL_HIGH_PRIORITY "cpu_to_scp_high"
+
+struct mhu_data_buf {
+ u32 cmd;
+ int tx_size;
+ void *tx_buf;
+ int rx_size;
+ void *rx_buf;
+ void *cl_data;
+};
+
+extern struct device* the_scpi_device;
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
new file mode 100644
index 000000000000..df09d3bf55e4
--- /dev/null
+++ b/drivers/mailbox/mailbox.c
@@ -0,0 +1,467 @@
+/*
+ * Mailbox: Common code for Mailbox controllers and users
+ *
+ * Copyright (C) 2013-2014 Linaro Ltd.
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/bitops.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
+
+#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */
+#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
+#define TXDONE_BY_ACK BIT(2) /* S/W ACK recevied by Client ticks the TX */
+
+static LIST_HEAD(mbox_cons);
+static DEFINE_MUTEX(con_mutex);
+
+static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
+{
+ int idx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* See if there is any space left */
+ if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -ENOBUFS;
+ }
+
+ idx = chan->msg_free;
+ chan->msg_data[idx] = mssg;
+ chan->msg_count++;
+
+ if (idx == MBOX_TX_QUEUE_LEN - 1)
+ chan->msg_free = 0;
+ else
+ chan->msg_free++;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return idx;
+}
+
+static void msg_submit(struct mbox_chan *chan)
+{
+ unsigned count, idx;
+ unsigned long flags;
+ void *data;
+ int err;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (!chan->msg_count || chan->active_req)
+ goto exit;
+
+ count = chan->msg_count;
+ idx = chan->msg_free;
+ if (idx >= count)
+ idx -= count;
+ else
+ idx += MBOX_TX_QUEUE_LEN - count;
+
+ data = chan->msg_data[idx];
+
+ /* Try to submit a message to the MBOX controller */
+ err = chan->mbox->ops->send_data(chan, data);
+ if (!err) {
+ chan->active_req = data;
+ chan->msg_count--;
+ }
+exit:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void tx_tick(struct mbox_chan *chan, int r)
+{
+ unsigned long flags;
+ void *mssg;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ mssg = chan->active_req;
+ chan->active_req = NULL;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Submit next message */
+ msg_submit(chan);
+
+ /* Notify the client */
+ if (mssg && chan->cl->tx_done)
+ chan->cl->tx_done(chan->cl, mssg, r);
+
+ if (chan->cl->tx_block)
+ complete(&chan->tx_complete);
+}
+
+static void poll_txdone(unsigned long data)
+{
+ struct mbox_controller *mbox = (struct mbox_controller *)data;
+ bool txdone, resched = false;
+ int i;
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ struct mbox_chan *chan = &mbox->chans[i];
+
+ if (chan->active_req && chan->cl) {
+ resched = true;
+ txdone = chan->mbox->ops->last_tx_done(chan);
+ if (txdone)
+ tx_tick(chan, 0);
+ }
+ }
+
+ if (resched)
+ mod_timer(&mbox->poll, jiffies +
+ msecs_to_jiffies(mbox->period));
+}
+
+/**
+ * mbox_chan_received_data - A way for controller driver to push data
+ * received from remote to the upper layer.
+ * @chan: Pointer to the mailbox channel on which RX happened.
+ * @mssg: Client specific message typecasted as void *
+ *
+ * After startup and before shutdown any data received on the chan
+ * is passed on to the API via atomic mbox_chan_received_data().
+ * The controller should ACK the RX only after this call returns.
+ */
+void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
+{
+ /* No buffering the received data */
+ if (chan->cl->rx_callback)
+ chan->cl->rx_callback(chan->cl, mssg);
+}
+EXPORT_SYMBOL_GPL(mbox_chan_received_data);
+
+/**
+ * mbox_chan_txdone - A way for controller driver to notify the
+ * framework that the last TX has completed.
+ * @chan: Pointer to the mailbox chan on which TX happened.
+ * @r: Status of last TX - OK or ERROR
+ *
+ * The controller that has IRQ for TX ACK calls this atomic API
+ * to tick the TX state machine. It works only if txdone_irq
+ * is set by the controller.
+ */
+void mbox_chan_txdone(struct mbox_chan *chan, int r)
+{
+ if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
+ dev_err(chan->mbox->dev,
+ "Controller can't run the TX ticker\n");
+ return;
+ }
+
+ tx_tick(chan, r);
+}
+EXPORT_SYMBOL_GPL(mbox_chan_txdone);
+
+/**
+ * mbox_client_txdone - The way for a client to run the TX state machine.
+ * @chan: Mailbox channel assigned to this client.
+ * @r: Success status of last transmission.
+ *
+ * The client/protocol had received some 'ACK' packet and it notifies
+ * the API that the last packet was sent successfully. This only works
+ * if the controller can't sense TX-Done.
+ */
+void mbox_client_txdone(struct mbox_chan *chan, int r)
+{
+ if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
+ dev_err(chan->mbox->dev, "Client can't run the TX ticker\n");
+ return;
+ }
+
+ tx_tick(chan, r);
+}
+EXPORT_SYMBOL_GPL(mbox_client_txdone);
+
+/**
+ * mbox_client_peek_data - A way for client driver to pull data
+ * received from remote by the controller.
+ * @chan: Mailbox channel assigned to this client.
+ *
+ * A poke to controller driver for any received data.
+ * The data is actually passed onto client via the
+ * mbox_chan_received_data()
+ * The call can be made from atomic context, so the controller's
+ * implementation of peek_data() must not sleep.
+ *
+ * Return: True, if controller has, and is going to push after this,
+ * some data.
+ * False, if controller doesn't have any data to be read.
+ */
+bool mbox_client_peek_data(struct mbox_chan *chan)
+{
+ if (chan->mbox->ops->peek_data)
+ return chan->mbox->ops->peek_data(chan);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(mbox_client_peek_data);
+
+/**
+ * mbox_send_message - For client to submit a message to be
+ * sent to the remote.
+ * @chan: Mailbox channel assigned to this client.
+ * @mssg: Client specific message typecasted.
+ *
+ * For client to submit data to the controller destined for a remote
+ * processor. If the client had set 'tx_block', the call will return
+ * either when the remote receives the data or when 'tx_tout' millisecs
+ * run out.
+ * In non-blocking mode, the requests are buffered by the API and a
+ * non-negative token is returned for each queued request. If the request
+ * is not queued, a negative token is returned. Upon failure or successful
+ * TX, the API calls 'tx_done' from atomic context, from which the client
+ * could submit yet another request.
+ * The pointer to message should be preserved until it is sent
+ * over the chan, i.e, tx_done() is made.
+ * This function could be called from atomic context as it simply
+ * queues the data and returns a token against the request.
+ *
+ * Return: Non-negative integer for successful submission (non-blocking mode)
+ * or transmission over chan (blocking mode).
+ * Negative value denotes failure.
+ */
+int mbox_send_message(struct mbox_chan *chan, void *mssg)
+{
+ int t;
+
+ if (!chan || !chan->cl)
+ return -EINVAL;
+
+ t = add_to_rbuf(chan, mssg);
+ if (t < 0) {
+ dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n");
+ return t;
+ }
+
+ msg_submit(chan);
+
+ init_completion(&chan->tx_complete);
+
+ if (chan->txdone_method == TXDONE_BY_POLL)
+ poll_txdone((unsigned long)chan->mbox);
+
+ if (chan->cl->tx_block && chan->active_req) {
+ unsigned long wait;
+ int ret;
+
+ if (!chan->cl->tx_tout) /* wait forever */
+ wait = msecs_to_jiffies(3600000);
+ else
+ wait = msecs_to_jiffies(chan->cl->tx_tout);
+
+ ret = wait_for_completion_timeout(&chan->tx_complete, wait);
+ if (ret == 0) {
+ t = -EIO;
+ tx_tick(chan, -EIO);
+ }
+ }
+
+ return t;
+}
+EXPORT_SYMBOL_GPL(mbox_send_message);
+
+/**
+ * mbox_request_channel - Request a mailbox channel.
+ * @cl: Identity of the client requesting the channel.
+ * @index: Index of mailbox specifier in 'mboxes' property.
+ *
+ * The Client specifies its requirements and capabilities while asking for
+ * a mailbox channel. It can't be called from atomic context.
+ * The channel is exclusively allocated and can't be used by another
+ * client before the owner calls mbox_free_channel.
+ * After assignment, any packet received on this channel will be
+ * handed over to the client via the 'rx_callback'.
+ * The framework holds reference to the client, so the mbox_client
+ * structure shouldn't be modified until the mbox_free_channel returns.
+ *
+ * Return: Pointer to the channel assigned to the client if successful.
+ * ERR_PTR for request failure.
+ */
+struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
+{
+ struct device *dev = cl->dev;
+ struct mbox_controller *mbox;
+ struct of_phandle_args spec;
+ struct mbox_chan *chan;
+ unsigned long flags;
+ int ret;
+
+ if (!dev || !dev->of_node) {
+ pr_debug("%s: No owner device node\n", __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&con_mutex);
+
+ if (of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", index, &spec)) {
+ dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+ mutex_unlock(&con_mutex);
+ return ERR_PTR(-ENODEV);
+ }
+
+ chan = NULL;
+ list_for_each_entry(mbox, &mbox_cons, node)
+ if (mbox->dev->of_node == spec.np) {
+ chan = mbox->of_xlate(mbox, &spec);
+ break;
+ }
+
+ of_node_put(spec.np);
+
+ if (!chan || chan->cl || !try_module_get(mbox->dev->driver->owner)) {
+ dev_dbg(dev, "%s: mailbox not free\n", __func__);
+ mutex_unlock(&con_mutex);
+ return ERR_PTR(-EBUSY);
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->msg_free = 0;
+ chan->msg_count = 0;
+ chan->active_req = NULL;
+ chan->cl = cl;
+ init_completion(&chan->tx_complete);
+
+ if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
+ chan->txdone_method |= TXDONE_BY_ACK;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ ret = chan->mbox->ops->startup(chan);
+ if (ret) {
+ dev_err(dev, "Unable to startup the chan (%d)\n", ret);
+ mbox_free_channel(chan);
+ chan = ERR_PTR(ret);
+ }
+
+ mutex_unlock(&con_mutex);
+ return chan;
+}
+EXPORT_SYMBOL_GPL(mbox_request_channel);
+
+/**
+ * mbox_free_channel - The client relinquishes control of a mailbox
+ * channel by this call.
+ * @chan: The mailbox channel to be freed.
+ */
+void mbox_free_channel(struct mbox_chan *chan)
+{
+ unsigned long flags;
+
+ if (!chan || !chan->cl)
+ return;
+
+ chan->mbox->ops->shutdown(chan);
+
+ /* The queued TX requests are simply aborted, no callbacks are made */
+ spin_lock_irqsave(&chan->lock, flags);
+ chan->cl = NULL;
+ chan->active_req = NULL;
+ if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
+ chan->txdone_method = TXDONE_BY_POLL;
+
+ module_put(chan->mbox->dev->driver->owner);
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+EXPORT_SYMBOL_GPL(mbox_free_channel);
+
+static struct mbox_chan *
+of_mbox_index_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
+{
+ int ind = sp->args[0];
+
+ if (ind >= mbox->num_chans)
+ return NULL;
+
+ return &mbox->chans[ind];
+}
+
+/**
+ * mbox_controller_register - Register the mailbox controller
+ * @mbox: Pointer to the mailbox controller.
+ *
+ * The controller driver registers its communication channels
+ */
+int mbox_controller_register(struct mbox_controller *mbox)
+{
+ int i, txdone;
+
+ /* Sanity check */
+ if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
+ return -EINVAL;
+
+ if (mbox->txdone_irq)
+ txdone = TXDONE_BY_IRQ;
+ else if (mbox->txdone_poll)
+ txdone = TXDONE_BY_POLL;
+ else /* It has to be ACK then */
+ txdone = TXDONE_BY_ACK;
+
+ if (txdone == TXDONE_BY_POLL) {
+ mbox->poll.function = &poll_txdone;
+ mbox->poll.data = (unsigned long)mbox;
+ init_timer(&mbox->poll);
+ }
+
+ for (i = 0; i < mbox->num_chans; i++) {
+ struct mbox_chan *chan = &mbox->chans[i];
+
+ chan->cl = NULL;
+ chan->mbox = mbox;
+ chan->txdone_method = txdone;
+ spin_lock_init(&chan->lock);
+ }
+
+ if (!mbox->of_xlate)
+ mbox->of_xlate = of_mbox_index_xlate;
+
+ mutex_lock(&con_mutex);
+ list_add_tail(&mbox->node, &mbox_cons);
+ mutex_unlock(&con_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mbox_controller_register);
+
+/**
+ * mbox_controller_unregister - Unregister the mailbox controller
+ * @mbox: Pointer to the mailbox controller.
+ */
+void mbox_controller_unregister(struct mbox_controller *mbox)
+{
+ int i;
+
+ if (!mbox)
+ return;
+
+ mutex_lock(&con_mutex);
+
+ list_del(&mbox->node);
+
+ for (i = 0; i < mbox->num_chans; i++)
+ mbox_free_channel(&mbox->chans[i]);
+
+ if (mbox->txdone_poll)
+ del_timer_sync(&mbox->poll);
+
+ mutex_unlock(&con_mutex);
+}
+EXPORT_SYMBOL_GPL(mbox_controller_unregister);
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
index d873cbae2fbb..f3755e0aa935 100644
--- a/drivers/mailbox/pl320-ipc.c
+++ b/drivers/mailbox/pl320-ipc.c
@@ -26,7 +26,7 @@
#include <linux/device.h>
#include <linux/amba/bus.h>
-#include <linux/mailbox.h>
+#include <linux/pl320-ipc.h>
#define IPCMxSOURCE(m) ((m) * 0x40)
#define IPCMxDSET(m) (((m) * 0x40) + 0x004)
diff --git a/drivers/mailbox/scpi_protocol.c b/drivers/mailbox/scpi_protocol.c
new file mode 100644
index 000000000000..49b500cd87ef
--- /dev/null
+++ b/drivers/mailbox/scpi_protocol.c
@@ -0,0 +1,350 @@
+/*
+ * System Control and Power Interface (SCPI) Message Protocol driver
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/mailbox_client.h>
+#include <linux/scpi_protocol.h>
+#include <linux/slab.h>
+
+#include "arm_mhu.h"
+
+#define CMD_ID_SHIFT 0
+#define CMD_ID_MASK 0xff
+#define CMD_SENDER_ID_SHIFT 8
+#define CMD_SENDER_ID_MASK 0xff
+#define CMD_DATA_SIZE_SHIFT 20
+#define CMD_DATA_SIZE_MASK 0x1ff
+#define PACK_SCPI_CMD(cmd, sender, txsz) \
+ ((((cmd) & CMD_ID_MASK) << CMD_ID_SHIFT) | \
+ (((sender) & CMD_SENDER_ID_MASK) << CMD_SENDER_ID_SHIFT) | \
+ (((txsz) & CMD_DATA_SIZE_MASK) << CMD_DATA_SIZE_SHIFT))
+
+#define MAX_DVFS_DOMAINS 3
+#define MAX_DVFS_OPPS 4
+#define DVFS_LATENCY(hdr) ((hdr) >> 16)
+#define DVFS_OPP_COUNT(hdr) (((hdr) >> 8) & 0xff)
+
+enum scpi_error_codes {
+ SCPI_SUCCESS = 0, /* Success */
+ SCPI_ERR_PARAM = 1, /* Invalid parameter(s) */
+ SCPI_ERR_ALIGN = 2, /* Invalid alignment */
+ SCPI_ERR_SIZE = 3, /* Invalid size */
+ SCPI_ERR_HANDLER = 4, /* Invalid handler/callback */
+ SCPI_ERR_ACCESS = 5, /* Invalid access/permission denied */
+ SCPI_ERR_RANGE = 6, /* Value out of range */
+ SCPI_ERR_TIMEOUT = 7, /* Timeout has occurred */
+ SCPI_ERR_NOMEM = 8, /* Invalid memory area or pointer */
+ SCPI_ERR_PWRSTATE = 9, /* Invalid power state */
+ SCPI_ERR_SUPPORT = 10, /* Not supported or disabled */
+ SCPI_ERR_DEVICE = 11, /* Device error */
+ SCPI_ERR_MAX
+};
+
+enum scpi_client_id {
+ SCPI_CL_NONE,
+ SCPI_CL_CLOCKS,
+ SCPI_CL_DVFS,
+ SCPI_CL_POWER,
+ SCPI_MAX,
+};
+
+enum scpi_std_cmd {
+ SCPI_CMD_INVALID = 0x00,
+ SCPI_CMD_SCPI_READY = 0x01,
+ SCPI_CMD_SCPI_CAPABILITIES = 0x02,
+ SCPI_CMD_EVENT = 0x03,
+ SCPI_CMD_SET_CSS_PWR_STATE = 0x04,
+ SCPI_CMD_GET_CSS_PWR_STATE = 0x05,
+ SCPI_CMD_CFG_PWR_STATE_STAT = 0x06,
+ SCPI_CMD_GET_PWR_STATE_STAT = 0x07,
+ SCPI_CMD_SYS_PWR_STATE = 0x08,
+ SCPI_CMD_L2_READY = 0x09,
+ SCPI_CMD_SET_AP_TIMER = 0x0a,
+ SCPI_CMD_CANCEL_AP_TIME = 0x0b,
+ SCPI_CMD_DVFS_CAPABILITIES = 0x0c,
+ SCPI_CMD_GET_DVFS_INFO = 0x0d,
+ SCPI_CMD_SET_DVFS = 0x0e,
+ SCPI_CMD_GET_DVFS = 0x0f,
+ SCPI_CMD_GET_DVFS_STAT = 0x10,
+ SCPI_CMD_SET_RTC = 0x11,
+ SCPI_CMD_GET_RTC = 0x12,
+ SCPI_CMD_CLOCK_CAPABILITIES = 0x13,
+ SCPI_CMD_SET_CLOCK_INDEX = 0x14,
+ SCPI_CMD_SET_CLOCK_VALUE = 0x15,
+ SCPI_CMD_GET_CLOCK_VALUE = 0x16,
+ SCPI_CMD_PSU_CAPABILITIES = 0x17,
+ SCPI_CMD_SET_PSU = 0x18,
+ SCPI_CMD_GET_PSU = 0x19,
+ SCPI_CMD_SENSOR_CAPABILITIES = 0x1a,
+ SCPI_CMD_SENSOR_INFO = 0x1b,
+ SCPI_CMD_SENSOR_VALUE = 0x1c,
+ SCPI_CMD_SENSOR_CFG_PERIODIC = 0x1d,
+ SCPI_CMD_SENSOR_CFG_BOUNDS = 0x1e,
+ SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1f,
+ SCPI_CMD_COUNT
+};
+
+struct scpi_data_buf {
+ int client_id;
+ struct mhu_data_buf *data;
+ struct completion complete;
+};
+
+static int high_priority_cmds[] = {
+ SCPI_CMD_GET_CSS_PWR_STATE,
+ SCPI_CMD_CFG_PWR_STATE_STAT,
+ SCPI_CMD_GET_PWR_STATE_STAT,
+ SCPI_CMD_SET_DVFS,
+ SCPI_CMD_GET_DVFS,
+ SCPI_CMD_SET_RTC,
+ SCPI_CMD_GET_RTC,
+ SCPI_CMD_SET_CLOCK_INDEX,
+ SCPI_CMD_SET_CLOCK_VALUE,
+ SCPI_CMD_GET_CLOCK_VALUE,
+ SCPI_CMD_SET_PSU,
+ SCPI_CMD_GET_PSU,
+ SCPI_CMD_SENSOR_VALUE,
+ SCPI_CMD_SENSOR_CFG_PERIODIC,
+ SCPI_CMD_SENSOR_CFG_BOUNDS,
+};
+
+static struct scpi_opp *scpi_opps[MAX_DVFS_DOMAINS];
+
+static int scpi_linux_errmap[SCPI_ERR_MAX] = {
+ 0, -EINVAL, -ENOEXEC, -EMSGSIZE,
+ -EINVAL, -EACCES, -ERANGE, -ETIMEDOUT,
+ -ENOMEM, -EINVAL, -EOPNOTSUPP, -EIO,
+};
+
+static inline int scpi_to_linux_errno(int errno)
+{
+ if (errno >= SCPI_SUCCESS && errno < SCPI_ERR_MAX)
+ return scpi_linux_errmap[errno];
+ return -EIO;
+}
+
+static bool high_priority_chan_supported(int cmd)
+{
+ int idx;
+ for (idx = 0; idx < ARRAY_SIZE(high_priority_cmds); idx++)
+ if (cmd == high_priority_cmds[idx])
+ return true;
+ return false;
+}
+
+static void scpi_rx_callback(struct mbox_client *cl, void *msg)
+{
+ struct mhu_data_buf *data = (struct mhu_data_buf *)msg;
+ struct scpi_data_buf *scpi_buf = data->cl_data;
+ complete(&scpi_buf->complete);
+}
+
+static int send_scpi_cmd(struct scpi_data_buf *scpi_buf, bool high_priority)
+{
+ struct mbox_chan *chan;
+ struct mbox_client cl;
+ struct mhu_data_buf *data = scpi_buf->data;
+ u32 status;
+
+ cl.dev = the_scpi_device;
+ cl.rx_callback = scpi_rx_callback;
+ cl.tx_done = NULL;
+ cl.tx_block = false;
+ cl.knows_txdone = false;
+
+ chan = mbox_request_channel(&cl, high_priority);
+ if (IS_ERR(chan))
+ return PTR_ERR(chan);
+
+ init_completion(&scpi_buf->complete);
+ if (mbox_send_message(chan, (void *)data) < 0) {
+ status = SCPI_ERR_TIMEOUT;
+ goto free_channel;
+ }
+
+ wait_for_completion(&scpi_buf->complete);
+ status = *(u32 *)(data->rx_buf); /* read first word */
+
+free_channel:
+ mbox_free_channel(chan);
+
+ return scpi_to_linux_errno(status);
+}
+
+#define SCPI_SETUP_DBUF(scpi_buf, mhu_buf, _client_id,\
+ _cmd, _tx_buf, _rx_buf) \
+do { \
+ struct mhu_data_buf *pdata = &mhu_buf; \
+ pdata->cmd = _cmd; \
+ pdata->tx_buf = &_tx_buf; \
+ pdata->tx_size = sizeof(_tx_buf); \
+ pdata->rx_buf = &_rx_buf; \
+ pdata->rx_size = sizeof(_rx_buf); \
+ scpi_buf.client_id = _client_id; \
+ scpi_buf.data = pdata; \
+} while (0)
+
+static int scpi_execute_cmd(struct scpi_data_buf *scpi_buf)
+{
+ struct mhu_data_buf *data;
+ bool high_priority;
+
+ if (!scpi_buf || !scpi_buf->data)
+ return -EINVAL;
+
+ data = scpi_buf->data;
+ high_priority = high_priority_chan_supported(data->cmd);
+ data->cmd = PACK_SCPI_CMD(data->cmd, scpi_buf->client_id,
+ data->tx_size);
+ data->cl_data = scpi_buf;
+
+ return send_scpi_cmd(scpi_buf, high_priority);
+}
+
+unsigned long scpi_clk_get_val(u16 clk_id)
+{
+ struct scpi_data_buf sdata;
+ struct mhu_data_buf mdata;
+ struct __packed {
+ u32 status;
+ u32 clk_rate;
+ } buf;
+
+ SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_CLOCKS,
+ SCPI_CMD_GET_CLOCK_VALUE, clk_id, buf);
+ if (scpi_execute_cmd(&sdata))
+ return 0;
+
+ return buf.clk_rate;
+}
+EXPORT_SYMBOL_GPL(scpi_clk_get_val);
+
+int scpi_clk_set_val(u16 clk_id, unsigned long rate)
+{
+ struct scpi_data_buf sdata;
+ struct mhu_data_buf mdata;
+ int stat;
+ struct __packed {
+ u32 clk_rate;
+ u16 clk_id;
+ } buf;
+
+ buf.clk_rate = (u32)rate;
+ buf.clk_id = clk_id;
+
+ SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_CLOCKS,
+ SCPI_CMD_SET_CLOCK_VALUE, buf, stat);
+ return scpi_execute_cmd(&sdata);
+}
+EXPORT_SYMBOL_GPL(scpi_clk_set_val);
+
+struct scpi_opp *scpi_dvfs_get_opps(u8 domain)
+{
+ struct scpi_data_buf sdata;
+ struct mhu_data_buf mdata;
+ struct __packed {
+ u32 status;
+ u32 header;
+ u32 freqs[MAX_DVFS_OPPS];
+ } buf;
+ struct scpi_opp *opp;
+ size_t freqs_sz;
+ int count, ret;
+
+ if (domain >= MAX_DVFS_DOMAINS)
+ return ERR_PTR(-EINVAL);
+
+ if (scpi_opps[domain]) /* data already populated */
+ return scpi_opps[domain];
+
+ SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_DVFS,
+ SCPI_CMD_GET_DVFS_INFO, domain, buf);
+ ret = scpi_execute_cmd(&sdata);
+ if (ret)
+ return ERR_PTR(ret);
+
+ opp = kmalloc(sizeof(*opp), GFP_KERNEL);
+ if (!opp)
+ return ERR_PTR(-ENOMEM);
+
+ count = DVFS_OPP_COUNT(buf.header);
+ freqs_sz = count * sizeof(*(opp->freqs));
+
+ opp->count = count;
+ opp->latency = DVFS_LATENCY(buf.header);
+ opp->freqs = kmalloc(freqs_sz, GFP_KERNEL);
+ if (!opp->freqs) {
+ kfree(opp);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memcpy(opp->freqs, &buf.freqs[0], freqs_sz);
+ scpi_opps[domain] = opp;
+
+ return opp;
+}
+EXPORT_SYMBOL_GPL(scpi_dvfs_get_opps);
+
+int scpi_dvfs_get_idx(u8 domain)
+{
+ struct scpi_data_buf sdata;
+ struct mhu_data_buf mdata;
+ struct __packed {
+ u32 status;
+ u8 dvfs_idx;
+ } buf;
+ int ret;
+
+ if (domain >= MAX_DVFS_DOMAINS)
+ return -EINVAL;
+
+ SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_DVFS,
+ SCPI_CMD_GET_DVFS, domain, buf);
+ ret = scpi_execute_cmd(&sdata);
+
+ if (!ret)
+ ret = buf.dvfs_idx;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scpi_dvfs_get_idx);
+
+int scpi_dvfs_set_idx(u8 domain, u8 idx)
+{
+ struct scpi_data_buf sdata;
+ struct mhu_data_buf mdata;
+ struct __packed {
+ u8 dvfs_domain;
+ u8 dvfs_idx;
+ } buf;
+ int stat;
+
+ buf.dvfs_idx = idx;
+ buf.dvfs_domain = domain;
+
+ if (domain >= MAX_DVFS_DOMAINS)
+ return -EINVAL;
+
+ SCPI_SETUP_DBUF(sdata, mdata, SCPI_CL_DVFS,
+ SCPI_CMD_SET_DVFS, buf, stat);
+ return scpi_execute_cmd(&sdata);
+}
+EXPORT_SYMBOL_GPL(scpi_dvfs_set_idx);