aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHemant Kumar <hemantk@codeaurora.org>2020-11-27 19:26:06 -0800
committerLoic Poulain <loic.poulain@linaro.org>2021-01-11 11:12:49 +0100
commitd40b454bd5261352dd6bb7ad11732ed0025f51d7 (patch)
treef2692f7ad9e3b4d1e4082ff19b3a20d900844b15
parent10d6795cd28f5647836c2a60752c84a4ed7f6cd4 (diff)
bus: mhi: Add userspace client interface driver
This MHI client driver allows userspace clients to transfer raw data between MHI device and host using standard file operations. Driver instantiates UCI device object which is associated to device file node. UCI device object instantiates UCI channel object when device file node is opened. UCI channel object is used to manage MHI channels by calling MHI core APIs for read and write operations. MHI channels are started as part of device open(). MHI channels remain in start state until last release() is called on UCI device file node. Device file node is created with format /dev/mhi_<mhi_device_name> Currently it supports QMI channel. Signed-off-by: Hemant Kumar <hemantk@codeaurora.org> Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
-rw-r--r--drivers/bus/mhi/Kconfig13
-rw-r--r--drivers/bus/mhi/Makefile3
-rw-r--r--drivers/bus/mhi/uci.c534
3 files changed, 550 insertions, 0 deletions
diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig
index da5cd0c9fc62..5194e8ee50bd 100644
--- a/drivers/bus/mhi/Kconfig
+++ b/drivers/bus/mhi/Kconfig
@@ -29,3 +29,16 @@ config MHI_BUS_PCI_GENERIC
This driver provides MHI PCI controller driver for devices such as
Qualcomm SDX55 based PCIe modems.
+config MHI_UCI
+ tristate "MHI UCI"
+ depends on MHI_BUS
+ help
+ MHI based Userspace Client Interface (UCI) driver is used for
+ transferring raw data between host and device using standard file
+ operations from userspace. Open, read, write, poll and close
+ operations are supported by this driver. Please check
+ mhi_uci_match_table for all supported channels that are exposed to
+ userspace.
+
+ To compile this driver as a module, choose M here: the module will be
+ called mhi_uci.
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
index 0a2d778d6fb4..69f21116e40c 100644
--- a/drivers/bus/mhi/Makefile
+++ b/drivers/bus/mhi/Makefile
@@ -4,3 +4,6 @@ obj-y += core/
obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o
mhi_pci_generic-y += pci_generic.o
+# MHI client
+mhi_uci-y := uci.o
+obj-$(CONFIG_MHI_UCI) += mhi_uci.o
diff --git a/drivers/bus/mhi/uci.c b/drivers/bus/mhi/uci.c
new file mode 100644
index 000000000000..e6ceb9fce910
--- /dev/null
+++ b/drivers/bus/mhi/uci.c
@@ -0,0 +1,534 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.*/
+
+#include <linux/kernel.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+
+#define MHI_DEVICE_NAME "mhi"
+#define MHI_UCI_DRIVER_NAME "mhi_uci"
+#define MHI_MAX_UCI_MINORS 128
+
+static DEFINE_IDR(uci_idr);
+static DEFINE_MUTEX(uci_drv_lock);
+static struct class *uci_dev_class;
+static int uci_dev_major;
+
+struct uci_buf {
+ struct list_head node;
+ void *data;
+ size_t len;
+ size_t consumed;
+};
+
+struct uci_dev {
+ unsigned int minor;
+ size_t mtu;
+
+ struct mhi_device *mhi_dev;
+ struct mutex mhi_dev_lock;
+
+ wait_queue_head_t ul_wq;
+ wait_queue_head_t dl_wq;
+
+ spinlock_t dl_queue_lock;
+ struct list_head dl_queue;
+
+ struct mutex write_lock;
+
+ unsigned long flags;
+#define UCI_DEV_DL_CAP 0
+#define UCI_DEV_UL_CAP 1
+#define UCI_DEV_CONNECTED 2
+
+ struct kref ref_count;
+};
+
+static void mhi_uci_dev_release(struct kref *ref)
+{
+ struct uci_dev *udev = container_of(ref, struct uci_dev, ref_count);
+ struct uci_buf *buf_itr, *tmp;
+
+ /* Release non consumed buffers */
+ list_for_each_entry_safe(buf_itr, tmp, &udev->dl_queue, node) {
+ list_del(&buf_itr->node);
+ kfree(buf_itr->data);
+ }
+
+ mutex_destroy(&udev->mhi_dev_lock);
+ mutex_destroy(&udev->write_lock);
+
+ kfree(udev);
+}
+
+static int mhi_uci_queue_inbound(struct uci_dev *udev)
+{
+ struct mhi_device *mhi_dev = udev->mhi_dev;
+ struct device *dev = &mhi_dev->dev;
+ int nr_desc, i, ret = -EIO;
+ struct uci_buf *ubuf;
+ void *buf;
+
+ /*
+ * skip queuing without error if dl channel is not supported. This
+ * allows open to succeed for udev, supporting ul only channel.
+ */
+ if (!udev->mhi_dev->dl_chan)
+ return 0;
+
+ nr_desc = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
+
+ for (i = 0; i < nr_desc; i++) {
+ buf = kmalloc(udev->mtu + sizeof(*ubuf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* save uci_buf info at the end of buf */
+ ubuf = buf + udev->mtu;
+ ubuf->data = buf;
+
+ dev_dbg(dev, "Allocated buf %d of %d size %zu\n", i + 1, nr_desc, udev->mtu);
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, ubuf->data, udev->mtu, MHI_EOT);
+ if (ret) {
+ kfree(buf);
+ dev_err(dev, "Failed to queue buffer %d\n", i);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int mhi_uci_open(struct inode *inode, struct file *filp)
+{
+ unsigned int minor = iminor(inode);
+ struct uci_dev *udev = NULL;
+ int ret;
+
+ /* Retrieve corresponding uci_dev and get a reference */
+ mutex_lock(&uci_drv_lock);
+ udev = idr_find(&uci_idr, minor);
+ if (!udev) {
+ mutex_unlock(&uci_drv_lock);
+ return -ENODEV;
+ }
+ kref_get(&udev->ref_count);
+ mutex_unlock(&uci_drv_lock);
+
+ /* Start MHI channel(s) and fill RX queue */
+ mutex_lock(&udev->mhi_dev_lock);
+ ret = mhi_prepare_for_transfer(udev->mhi_dev);
+ if (!ret)
+ ret = mhi_uci_queue_inbound(udev);
+ mutex_unlock(&udev->mhi_dev_lock);
+
+ if (ret)
+ return ret;
+
+ filp->private_data = udev;
+
+ /* stream-like non-seekable file descriptor */
+ stream_open(inode, filp);
+
+ return 0;
+}
+
+static int mhi_uci_release(struct inode *inode, struct file *file)
+{
+ struct uci_dev *udev = file->private_data;
+
+ /* Stop the channels, if it is not already destroyed */
+ mutex_lock(&udev->mhi_dev_lock);
+ if (udev->mhi_dev)
+ mhi_unprepare_from_transfer(udev->mhi_dev);
+ mutex_unlock(&udev->mhi_dev_lock);
+
+ file->private_data = NULL;
+
+ kref_put(&udev->ref_count, mhi_uci_dev_release);
+
+ return 0;
+}
+
+static __poll_t mhi_uci_poll(struct file *file, poll_table *wait)
+{
+ struct uci_dev *udev = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &udev->ul_wq, wait);
+ poll_wait(file, &udev->dl_wq, wait);
+
+ /* Any buffer in the DL queue ? */
+ spin_lock_bh(&udev->dl_queue_lock);
+ if (!list_empty(&udev->dl_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ spin_unlock_bh(&udev->dl_queue_lock);
+
+ /* If MHI queue is not full, write is possible */
+ mutex_lock(&udev->mhi_dev_lock);
+ if (!udev->mhi_dev)
+ mask = EPOLLERR;
+ else if (!mhi_queue_is_full(udev->mhi_dev, DMA_TO_DEVICE))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ mutex_unlock(&udev->mhi_dev_lock);
+
+ return mask;
+}
+
+static ssize_t mhi_uci_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ struct uci_dev *udev = file->private_data;
+ size_t bytes_xfered = 0;
+ void *kbuf = NULL;
+ int ret;
+
+ if (!test_bit(UCI_DEV_UL_CAP, &udev->flags))
+ return -EOPNOTSUPP;
+
+ if (!buf || !count)
+ return -EINVAL;
+
+ /* Serialize MHI queueing */
+ if (mutex_lock_interruptible(&udev->write_lock))
+ return -EINTR;
+
+ while (count) {
+ size_t xfer_size;
+
+ /* Wait for available transfer descriptor */
+ ret = wait_event_interruptible(udev->ul_wq,
+ !test_bit(UCI_DEV_CONNECTED, &udev->flags) ||
+ !mhi_queue_is_full(udev->mhi_dev, DMA_TO_DEVICE));
+ if (ret)
+ break;
+
+ if (!test_bit(UCI_DEV_CONNECTED, &udev->flags)) {
+ ret = -EPIPE;
+ break;
+ }
+
+ xfer_size = min_t(size_t, count, udev->mtu);
+ kbuf = kmalloc(xfer_size, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ ret = copy_from_user(kbuf, buf, xfer_size);
+ if (ret)
+ break;
+
+ /* Add buffer to MHI queue */
+ ret = mhi_queue_buf(udev->mhi_dev, DMA_TO_DEVICE, kbuf, xfer_size, MHI_EOT);
+ if (ret)
+ break;
+
+ bytes_xfered += xfer_size;
+ count -= xfer_size;
+ buf += xfer_size;
+ kbuf = NULL;
+ }
+
+ mutex_unlock(&udev->write_lock);
+
+ if (kbuf) /* aborted buffer queueing */
+ kfree(kbuf);
+
+ return ret ? ret : bytes_xfered;
+}
+
+static int mhi_uci_recycle_ubuf(struct uci_dev *udev, struct uci_buf *ubuf)
+{
+ int ret;
+
+ mutex_lock(&udev->mhi_dev_lock);
+
+ if (!udev->mhi_dev) {
+ ret = -ENODEV;
+ goto exit_unlock;
+ }
+
+ ret = mhi_queue_buf(udev->mhi_dev, DMA_FROM_DEVICE, ubuf->data,
+ udev->mtu, MHI_EOT);
+
+exit_unlock:
+ mutex_unlock(&udev->mhi_dev_lock);
+
+ return ret;
+}
+
+static ssize_t mhi_uci_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct uci_dev *udev = file->private_data;
+ bool recycle_buf = false;
+ struct uci_buf *ubuf;
+ size_t copy_len;
+ char *copy_ptr;
+ int ret = 0;
+
+ if (!test_bit(UCI_DEV_DL_CAP, &udev->flags))
+ return -EOPNOTSUPP;
+
+ if (!buf)
+ return -EINVAL;
+
+ spin_lock_irq(&udev->dl_queue_lock);
+
+ ret = wait_event_interruptible_lock_irq(udev->dl_wq,
+ !list_empty(&udev->dl_queue) ||
+ !test_bit(UCI_DEV_CONNECTED, &udev->flags),
+ udev->dl_queue_lock);
+ if (ret) {
+ goto err_unlock;
+ } else if (!test_bit(UCI_DEV_CONNECTED, &udev->flags)) {
+ ret = -EPIPE;
+ goto err_unlock;
+ }
+
+ ubuf = list_first_entry_or_null(&udev->dl_queue, struct uci_buf, node);
+ if (!ubuf) {
+ ret = -EIO;
+ goto err_unlock;
+ }
+
+ /* Consume the buffer */
+ copy_len = min_t(size_t, count, ubuf->len - ubuf->consumed);
+ copy_ptr = ubuf->data + ubuf->consumed;
+ ubuf->consumed += copy_len;
+
+ /* Remove buffer from the DL queue if entirely consumed */
+ if (ubuf->consumed == ubuf->len) {
+ list_del(&ubuf->node);
+ recycle_buf = true;
+ }
+
+ spin_unlock_irq(&udev->dl_queue_lock);
+
+ ret = copy_to_user(buf, copy_ptr, copy_len);
+ if (ret)
+ return -EFAULT;
+
+ if (recycle_buf) {
+ /* Give the buffer back to MHI queue */
+ ret = mhi_uci_recycle_ubuf(udev, ubuf);
+ if (ret) /* unable to recycle, release */
+ kfree(ubuf->data);
+ }
+
+ return copy_len;
+
+err_unlock:
+ spin_unlock_irq(&udev->dl_queue_lock);
+
+ return ret;
+}
+
+static const struct file_operations mhidev_fops = {
+ .owner = THIS_MODULE,
+ .open = mhi_uci_open,
+ .release = mhi_uci_release,
+ .read = mhi_uci_read,
+ .write = mhi_uci_write,
+ .poll = mhi_uci_poll,
+};
+
+static void mhi_ul_xfer_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+ struct uci_dev *udev = dev_get_drvdata(&mhi_dev->dev);
+ struct device *dev = &mhi_dev->dev;
+
+ dev_dbg(dev, "%s: status: %d xfer_len: %zu\n", __func__,
+ mhi_result->transaction_status, mhi_result->bytes_xferd);
+
+ kfree(mhi_result->buf_addr);
+
+ if (!mhi_result->transaction_status)
+ wake_up_interruptible(&udev->ul_wq);
+}
+
+static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_result)
+{
+ struct uci_dev *udev = dev_get_drvdata(&mhi_dev->dev);
+ struct uci_buf *ubuf;
+
+ dev_dbg(&mhi_dev->dev, "%s: status: %d receive_len: %zu\n", __func__,
+ mhi_result->transaction_status, mhi_result->bytes_xferd);
+
+ if (mhi_result->transaction_status &&
+ mhi_result->transaction_status != -EOVERFLOW) {
+ kfree(mhi_result->buf_addr);
+ return;
+ }
+
+ /* ubuf is placed at the end of the buffer (cf mhi_uci_queue_inbound) */
+ ubuf = mhi_result->buf_addr + udev->mtu;
+
+ /* paranoia, should never happen */
+ if (WARN_ON(mhi_result->buf_addr != ubuf->data)) {
+ kfree(mhi_result->buf_addr);
+ return;
+ }
+
+ ubuf->data = mhi_result->buf_addr;
+ ubuf->len = mhi_result->bytes_xferd;
+ ubuf->consumed = 0;
+
+ spin_lock_bh(&udev->dl_queue_lock);
+ list_add_tail(&ubuf->node, &udev->dl_queue);
+ spin_unlock_bh(&udev->dl_queue_lock);
+
+ wake_up_interruptible(&udev->dl_wq);
+}
+
+static int mhi_uci_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ struct uci_dev *udev;
+ struct device *dev;
+ int index, err;
+
+ /* Create UCI data context */
+ udev = kzalloc(sizeof(*udev), GFP_KERNEL);
+ if (!udev)
+ return -ENOMEM;
+
+ /* Retrieve index */
+ mutex_lock(&uci_drv_lock);
+ index = idr_alloc(&uci_idr, udev, 0, MHI_MAX_UCI_MINORS, GFP_KERNEL);
+ mutex_unlock(&uci_drv_lock);
+ if (index < 0) {
+ err = index;
+ goto err_free_udev;
+ }
+
+ /* Init UCI data */
+ kref_init(&udev->ref_count);
+ mutex_init(&udev->mhi_dev_lock);
+ mutex_init(&udev->write_lock);
+ init_waitqueue_head(&udev->ul_wq);
+ init_waitqueue_head(&udev->dl_wq);
+ spin_lock_init(&udev->dl_queue_lock);
+ INIT_LIST_HEAD(&udev->dl_queue);
+ udev->mhi_dev = mhi_dev;
+ udev->minor = index;
+ udev->mtu = min_t(size_t, id->driver_data, MHI_MAX_MTU);
+ set_bit(UCI_DEV_CONNECTED, &udev->flags);
+
+ if (mhi_dev->dl_chan)
+ set_bit(UCI_DEV_DL_CAP, &udev->flags);
+ if (mhi_dev->ul_chan)
+ set_bit(UCI_DEV_UL_CAP, &udev->flags);
+
+ dev_set_drvdata(&mhi_dev->dev, udev);
+
+ /* Creates a new device and registers it with sysfs */
+ dev = device_create(uci_dev_class, &mhi_dev->dev,
+ MKDEV(uci_dev_major, index), udev,
+ dev_name(&mhi_dev->dev));
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ goto err_free_idr;
+ }
+
+ dev_dbg(&mhi_dev->dev, "probed uci dev: %s\n", id->chan);
+
+ return 0;
+
+err_free_idr:
+ mutex_lock(&uci_drv_lock);
+ idr_remove(&uci_idr, udev->minor);
+ mutex_unlock(&uci_drv_lock);
+err_free_udev:
+ kfree(udev);
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+
+ return err;
+};
+
+static void mhi_uci_remove(struct mhi_device *mhi_dev)
+{
+ struct uci_dev *udev = dev_get_drvdata(&mhi_dev->dev);
+
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+
+ mutex_lock(&uci_drv_lock);
+ idr_remove(&uci_idr, udev->minor);
+ mutex_unlock(&uci_drv_lock);
+
+ clear_bit(UCI_DEV_CONNECTED, &udev->flags);
+ device_destroy(uci_dev_class, MKDEV(uci_dev_major, udev->minor));
+
+ /* Unlink mhi_dev from uci_dev */
+ mutex_lock(&udev->mhi_dev_lock);
+ udev->mhi_dev = NULL;
+ mutex_unlock(&udev->mhi_dev_lock);
+
+ /* wake up any blocked user */
+ wake_up_interruptible(&udev->dl_wq);
+ wake_up_interruptible(&udev->ul_wq);
+
+ kref_put(&udev->ref_count, mhi_uci_dev_release);
+}
+
+/* .driver_data stores max mtu */
+static const struct mhi_device_id mhi_uci_match_table[] = {
+ { .chan = "QMI", .driver_data = 0x1000},
+ {},
+};
+MODULE_DEVICE_TABLE(mhi, mhi_uci_match_table);
+
+static struct mhi_driver mhi_uci_driver = {
+ .id_table = mhi_uci_match_table,
+ .remove = mhi_uci_remove,
+ .probe = mhi_uci_probe,
+ .ul_xfer_cb = mhi_ul_xfer_cb,
+ .dl_xfer_cb = mhi_dl_xfer_cb,
+ .driver = {
+ .name = MHI_UCI_DRIVER_NAME,
+ },
+};
+
+static int __init mhi_uci_init(void)
+{
+ int ret;
+
+ ret = register_chrdev(0, MHI_UCI_DRIVER_NAME, &mhidev_fops);
+ if (ret < 0)
+ return ret;
+
+ uci_dev_major = ret;
+ uci_dev_class = class_create(THIS_MODULE, MHI_UCI_DRIVER_NAME);
+ if (IS_ERR(uci_dev_class)) {
+ unregister_chrdev(uci_dev_major, MHI_UCI_DRIVER_NAME);
+ return PTR_ERR(uci_dev_class);
+ }
+
+ ret = mhi_driver_register(&mhi_uci_driver);
+ if (ret) {
+ class_destroy(uci_dev_class);
+ unregister_chrdev(uci_dev_major, MHI_UCI_DRIVER_NAME);
+ }
+
+ return ret;
+}
+
+static void __exit mhi_uci_exit(void)
+{
+ mhi_driver_unregister(&mhi_uci_driver);
+ class_destroy(uci_dev_class);
+ unregister_chrdev(uci_dev_major, MHI_UCI_DRIVER_NAME);
+ idr_destroy(&uci_idr);
+}
+
+module_init(mhi_uci_init);
+module_exit(mhi_uci_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI UCI Driver");