aboutsummaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/dma_buf_lock/src/Kbuild18
-rw-r--r--drivers/base/dma_buf_lock/src/Makefile32
-rw-r--r--drivers/base/dma_buf_lock/src/dma_buf_lock.c481
-rw-r--r--drivers/base/dma_buf_lock/src/dma_buf_lock.h42
-rw-r--r--drivers/base/dma_buf_test_exporter/Kbuild18
-rw-r--r--drivers/base/dma_buf_test_exporter/Kconfig20
-rw-r--r--drivers/base/dma_buf_test_exporter/Makefile30
-rw-r--r--drivers/base/dma_buf_test_exporter/dma-buf-test-exporter.c703
-rw-r--r--drivers/base/kds/Kbuild17
-rw-r--r--drivers/base/kds/Kconfig20
-rw-r--r--drivers/base/kds/Makefile31
-rw-r--r--drivers/base/kds/kds.c557
-rw-r--r--drivers/base/smc_protected_mode_switcher/Kbuild18
-rw-r--r--drivers/base/smc_protected_mode_switcher/Kconfig20
-rw-r--r--drivers/base/smc_protected_mode_switcher/Makefile31
-rw-r--r--drivers/base/smc_protected_mode_switcher/protected_mode_switcher_device.c164
-rw-r--r--drivers/base/smc_protected_mode_switcher/protected_mode_switcher_smc.S23
-rw-r--r--drivers/base/ump/Kbuild18
-rw-r--r--drivers/base/ump/Kconfig26
-rw-r--r--drivers/base/ump/docs/Doxyfile125
-rw-r--r--drivers/base/ump/example_kernel_api.c73
-rw-r--r--drivers/base/ump/example_user_api.c153
-rw-r--r--drivers/base/ump/src/Kbuild50
-rw-r--r--drivers/base/ump/src/Makefile81
-rw-r--r--drivers/base/ump/src/Makefile.common19
-rw-r--r--drivers/base/ump/src/arch-arm/config.h27
-rw-r--r--drivers/base/ump/src/arch-arm64/config.h27
-rw-r--r--drivers/base/ump/src/common/ump_kernel_core.c756
-rw-r--r--drivers/base/ump/src/common/ump_kernel_core.h228
-rw-r--r--drivers/base/ump/src/common/ump_kernel_descriptor_mapping.c162
-rw-r--r--drivers/base/ump/src/common/ump_kernel_descriptor_mapping.h94
-rw-r--r--drivers/base/ump/src/common/ump_kernel_priv.h80
-rw-r--r--drivers/base/ump/src/imports/ion/Makefile53
-rw-r--r--drivers/base/ump/src/imports/ion/ump_kernel_import_ion.c204
-rw-r--r--drivers/base/ump/src/linux/ump_kernel_linux.c831
-rw-r--r--drivers/base/ump/src/linux/ump_kernel_linux_mem.c250
-rw-r--r--drivers/base/ump/src/linux/ump_kernel_linux_mem.h26
-rw-r--r--drivers/base/ump/src/ump_arch.h42
-rw-r--r--drivers/base/ump/ump_ref_drv.h33
39 files changed, 5583 insertions, 0 deletions
diff --git a/drivers/base/dma_buf_lock/src/Kbuild b/drivers/base/dma_buf_lock/src/Kbuild
new file mode 100644
index 000000000000..68dad07ad607
--- /dev/null
+++ b/drivers/base/dma_buf_lock/src/Kbuild
@@ -0,0 +1,18 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+ifneq ($(CONFIG_DMA_SHARED_BUFFER),)
+obj-m := dma_buf_lock.o
+endif
diff --git a/drivers/base/dma_buf_lock/src/Makefile b/drivers/base/dma_buf_lock/src/Makefile
new file mode 100644
index 000000000000..cf76132e6dde
--- /dev/null
+++ b/drivers/base/dma_buf_lock/src/Makefile
@@ -0,0 +1,32 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+# linux build system bootstrap for out-of-tree module
+
+# default to building for the host
+ARCH ?= $(shell uname -m)
+
+ifeq ($(KDIR),)
+$(error Must specify KDIR to point to the kernel to target))
+endif
+
+all: dma_buf_lock
+
+dma_buf_lock:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../../include"
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
diff --git a/drivers/base/dma_buf_lock/src/dma_buf_lock.c b/drivers/base/dma_buf_lock/src/dma_buf_lock.c
new file mode 100644
index 000000000000..9613ffcfd696
--- /dev/null
+++ b/drivers/base/dma_buf_lock/src/dma_buf_lock.c
@@ -0,0 +1,481 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <asm/uaccess.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/dma-buf.h>
+#include <linux/kds.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/anon_inodes.h>
+#include <linux/file.h>
+
+#include "dma_buf_lock.h"
+
+/* Maximum number of buffers that a single handle can address */
+#define DMA_BUF_LOCK_BUF_MAX 32
+
+#define DMA_BUF_LOCK_DEBUG 1
+
+static dev_t dma_buf_lock_dev;
+static struct cdev dma_buf_lock_cdev;
+static struct class *dma_buf_lock_class;
+static char dma_buf_lock_dev_name[] = "dma_buf_lock";
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long dma_buf_lock_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int dma_buf_lock_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+
+static struct file_operations dma_buf_lock_fops =
+{
+ .owner = THIS_MODULE,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = dma_buf_lock_ioctl,
+#else
+ .ioctl = dma_buf_lock_ioctl,
+#endif
+ .compat_ioctl = dma_buf_lock_ioctl,
+};
+
+typedef struct dma_buf_lock_resource
+{
+ int *list_of_dma_buf_fds; /* List of buffers copied from userspace */
+ atomic_t locked; /* Status of lock */
+ struct dma_buf **dma_bufs;
+ struct kds_resource **kds_resources; /* List of KDS resources associated with buffers */
+ struct kds_resource_set *resource_set;
+ unsigned long exclusive; /* Exclusive access bitmap */
+ wait_queue_head_t wait;
+ struct kds_callback cb;
+ struct kref refcount;
+ struct list_head link;
+ int count;
+} dma_buf_lock_resource;
+
+static LIST_HEAD(dma_buf_lock_resource_list);
+static DEFINE_MUTEX(dma_buf_lock_mutex);
+
+static inline int is_dma_buf_lock_file(struct file *);
+static void dma_buf_lock_dounlock(struct kref *ref);
+
+static int dma_buf_lock_handle_release(struct inode *inode, struct file *file)
+{
+ dma_buf_lock_resource *resource;
+
+ if (!is_dma_buf_lock_file(file))
+ return -EINVAL;
+
+ resource = file->private_data;
+#if DMA_BUF_LOCK_DEBUG
+ printk("dma_buf_lock_handle_release\n");
+#endif
+ mutex_lock(&dma_buf_lock_mutex);
+ kref_put(&resource->refcount, dma_buf_lock_dounlock);
+ mutex_unlock(&dma_buf_lock_mutex);
+
+ return 0;
+}
+
+static void dma_buf_lock_kds_callback(void *param1, void *param2)
+{
+ dma_buf_lock_resource *resource = param1;
+#if DMA_BUF_LOCK_DEBUG
+ printk("dma_buf_lock_kds_callback\n");
+#endif
+ atomic_set(&resource->locked, 1);
+
+ wake_up(&resource->wait);
+}
+
+static unsigned int dma_buf_lock_handle_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ dma_buf_lock_resource *resource;
+ unsigned int ret = 0;
+
+ if (!is_dma_buf_lock_file(file))
+ return POLLERR;
+
+ resource = file->private_data;
+#if DMA_BUF_LOCK_DEBUG
+ printk("dma_buf_lock_handle_poll\n");
+#endif
+ if (1 == atomic_read(&resource->locked))
+ {
+ /* Resources have been locked */
+ ret = POLLIN | POLLRDNORM;
+ if (resource->exclusive)
+ {
+ ret |= POLLOUT | POLLWRNORM;
+ }
+ }
+ else
+ {
+ if (!poll_does_not_wait(wait))
+ {
+ poll_wait(file, &resource->wait, wait);
+ }
+ }
+#if DMA_BUF_LOCK_DEBUG
+ printk("dma_buf_lock_handle_poll : return %i\n", ret);
+#endif
+ return ret;
+}
+
+static const struct file_operations dma_buf_lock_handle_fops = {
+ .release = dma_buf_lock_handle_release,
+ .poll = dma_buf_lock_handle_poll,
+};
+
+/*
+ * is_dma_buf_lock_file - Check if struct file* is associated with dma_buf_lock
+ */
+static inline int is_dma_buf_lock_file(struct file *file)
+{
+ return file->f_op == &dma_buf_lock_handle_fops;
+}
+
+
+
+/*
+ * Start requested lock.
+ *
+ * Allocates required memory, copies dma_buf_fd list from userspace,
+ * acquires related KDS resources, and starts the lock.
+ */
+static int dma_buf_lock_dolock(dma_buf_lock_k_request *request)
+{
+ dma_buf_lock_resource *resource;
+ int size;
+ int fd;
+ int i;
+ int ret;
+
+ if (NULL == request->list_of_dma_buf_fds)
+ {
+ return -EINVAL;
+ }
+ if (request->count <= 0)
+ {
+ return -EINVAL;
+ }
+ if (request->count > DMA_BUF_LOCK_BUF_MAX)
+ {
+ return -EINVAL;
+ }
+ if (request->exclusive != DMA_BUF_LOCK_NONEXCLUSIVE &&
+ request->exclusive != DMA_BUF_LOCK_EXCLUSIVE)
+ {
+ return -EINVAL;
+ }
+
+ resource = kzalloc(sizeof(dma_buf_lock_resource), GFP_KERNEL);
+ if (NULL == resource)
+ {
+ return -ENOMEM;
+ }
+
+ atomic_set(&resource->locked, 0);
+ kref_init(&resource->refcount);
+ INIT_LIST_HEAD(&resource->link);
+ resource->count = request->count;
+
+ /* Allocate space to store dma_buf_fds received from user space */
+ size = request->count * sizeof(int);
+ resource->list_of_dma_buf_fds = kmalloc(size, GFP_KERNEL);
+
+ if (NULL == resource->list_of_dma_buf_fds)
+ {
+ kfree(resource);
+ return -ENOMEM;
+ }
+
+ /* Allocate space to store dma_buf pointers associated with dma_buf_fds */
+ size = sizeof(struct dma_buf *) * request->count;
+ resource->dma_bufs = kmalloc(size, GFP_KERNEL);
+
+ if (NULL == resource->dma_bufs)
+ {
+ kfree(resource->list_of_dma_buf_fds);
+ kfree(resource);
+ return -ENOMEM;
+ }
+ /* Allocate space to store kds_resources associated with dma_buf_fds */
+ size = sizeof(struct kds_resource *) * request->count;
+ resource->kds_resources = kmalloc(size, GFP_KERNEL);
+
+ if (NULL == resource->kds_resources)
+ {
+ kfree(resource->dma_bufs);
+ kfree(resource->list_of_dma_buf_fds);
+ kfree(resource);
+ return -ENOMEM;
+ }
+
+ /* Copy requested list of dma_buf_fds from user space */
+ size = request->count * sizeof(int);
+ if (0 != copy_from_user(resource->list_of_dma_buf_fds, (void __user *)request->list_of_dma_buf_fds, size))
+ {
+ kfree(resource->list_of_dma_buf_fds);
+ kfree(resource->dma_bufs);
+ kfree(resource->kds_resources);
+ kfree(resource);
+ return -ENOMEM;
+ }
+#if DMA_BUF_LOCK_DEBUG
+ for (i = 0; i < request->count; i++)
+ {
+ printk("dma_buf %i = %X\n", i, resource->list_of_dma_buf_fds[i]);
+ }
+#endif
+
+ /* Add resource to global list */
+ mutex_lock(&dma_buf_lock_mutex);
+
+ list_add(&resource->link, &dma_buf_lock_resource_list);
+
+ mutex_unlock(&dma_buf_lock_mutex);
+
+ for (i = 0; i < request->count; i++)
+ {
+ /* Convert fd into dma_buf structure */
+ resource->dma_bufs[i] = dma_buf_get(resource->list_of_dma_buf_fds[i]);
+
+ if (IS_ERR_VALUE(PTR_ERR(resource->dma_bufs[i])))
+ {
+ mutex_lock(&dma_buf_lock_mutex);
+ kref_put(&resource->refcount, dma_buf_lock_dounlock);
+ mutex_unlock(&dma_buf_lock_mutex);
+ return -EINVAL;
+ }
+
+ /*Get kds_resource associated with dma_buf */
+ resource->kds_resources[i] = get_dma_buf_kds_resource(resource->dma_bufs[i]);
+
+ if (NULL == resource->kds_resources[i])
+ {
+ mutex_lock(&dma_buf_lock_mutex);
+ kref_put(&resource->refcount, dma_buf_lock_dounlock);
+ mutex_unlock(&dma_buf_lock_mutex);
+ return -EINVAL;
+ }
+#if DMA_BUF_LOCK_DEBUG
+ printk("dma_buf_lock_dolock : dma_buf_fd %i dma_buf %X kds_resource %X\n", resource->list_of_dma_buf_fds[i],
+ (unsigned int)resource->dma_bufs[i], (unsigned int)resource->kds_resources[i]);
+#endif
+ }
+
+ kds_callback_init(&resource->cb, 1, dma_buf_lock_kds_callback);
+ init_waitqueue_head(&resource->wait);
+
+ kref_get(&resource->refcount);
+
+ /* Create file descriptor associated with lock request */
+ fd = anon_inode_getfd("dma_buf_lock", &dma_buf_lock_handle_fops,
+ (void *)resource, 0);
+ if (fd < 0)
+ {
+ mutex_lock(&dma_buf_lock_mutex);
+ kref_put(&resource->refcount, dma_buf_lock_dounlock);
+ kref_put(&resource->refcount, dma_buf_lock_dounlock);
+ mutex_unlock(&dma_buf_lock_mutex);
+ return fd;
+ }
+
+ resource->exclusive = request->exclusive;
+
+ /* Start locking process */
+ ret = kds_async_waitall(&resource->resource_set,
+ &resource->cb, resource, NULL,
+ request->count, &resource->exclusive,
+ resource->kds_resources);
+
+ if (IS_ERR_VALUE(ret))
+ {
+ put_unused_fd(fd);
+
+ mutex_lock(&dma_buf_lock_mutex);
+ kref_put(&resource->refcount, dma_buf_lock_dounlock);
+ mutex_unlock(&dma_buf_lock_mutex);
+
+ return ret;
+ }
+
+#if DMA_BUF_LOCK_DEBUG
+ printk("dma_buf_lock_dolock : complete\n");
+#endif
+ mutex_lock(&dma_buf_lock_mutex);
+ kref_put(&resource->refcount, dma_buf_lock_dounlock);
+ mutex_unlock(&dma_buf_lock_mutex);
+
+ return fd;
+}
+
+static void dma_buf_lock_dounlock(struct kref *ref)
+{
+ int i;
+ dma_buf_lock_resource *resource = container_of(ref, dma_buf_lock_resource, refcount);
+
+ atomic_set(&resource->locked, 0);
+
+ kds_callback_term(&resource->cb);
+
+ kds_resource_set_release(&resource->resource_set);
+
+ list_del(&resource->link);
+
+ for (i = 0; i < resource->count; i++)
+ {
+ dma_buf_put(resource->dma_bufs[i]);
+ }
+
+ kfree(resource->kds_resources);
+ kfree(resource->dma_bufs);
+ kfree(resource->list_of_dma_buf_fds);
+ kfree(resource);
+}
+
+static int __init dma_buf_lock_init(void)
+{
+ int err;
+#if DMA_BUF_LOCK_DEBUG
+ printk("dma_buf_lock_init\n");
+#endif
+ err = alloc_chrdev_region(&dma_buf_lock_dev, 0, 1, dma_buf_lock_dev_name);
+
+ if (0 == err)
+ {
+ cdev_init(&dma_buf_lock_cdev, &dma_buf_lock_fops);
+
+ err = cdev_add(&dma_buf_lock_cdev, dma_buf_lock_dev, 1);
+
+ if (0 == err)
+ {
+ dma_buf_lock_class = class_create(THIS_MODULE, dma_buf_lock_dev_name);
+ if (IS_ERR(dma_buf_lock_class))
+ {
+ err = PTR_ERR(dma_buf_lock_class);
+ }
+ else
+ {
+ struct device *mdev;
+ mdev = device_create(dma_buf_lock_class, NULL, dma_buf_lock_dev, NULL, dma_buf_lock_dev_name);
+ if (!IS_ERR(mdev))
+ {
+ return 0;
+ }
+
+ err = PTR_ERR(mdev);
+ class_destroy(dma_buf_lock_class);
+ }
+ cdev_del(&dma_buf_lock_cdev);
+ }
+
+ unregister_chrdev_region(dma_buf_lock_dev, 1);
+ }
+#if DMA_BUF_LOCK_DEBUG
+ printk("dma_buf_lock_init failed\n");
+#endif
+ return err;
+}
+
+static void __exit dma_buf_lock_exit(void)
+{
+#if DMA_BUF_LOCK_DEBUG
+ printk("dma_buf_lock_exit\n");
+#endif
+
+ /* Unlock all outstanding references */
+ while (1)
+ {
+ mutex_lock(&dma_buf_lock_mutex);
+ if (list_empty(&dma_buf_lock_resource_list))
+ {
+ mutex_unlock(&dma_buf_lock_mutex);
+ break;
+ }
+ else
+ {
+ dma_buf_lock_resource *resource = list_entry(dma_buf_lock_resource_list.next,
+ dma_buf_lock_resource, link);
+ kref_put(&resource->refcount, dma_buf_lock_dounlock);
+ mutex_unlock(&dma_buf_lock_mutex);
+ }
+ }
+
+ device_destroy(dma_buf_lock_class, dma_buf_lock_dev);
+
+ class_destroy(dma_buf_lock_class);
+
+ cdev_del(&dma_buf_lock_cdev);
+
+ unregister_chrdev_region(dma_buf_lock_dev, 1);
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long dma_buf_lock_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int dma_buf_lock_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ dma_buf_lock_k_request request;
+ int size = _IOC_SIZE(cmd);
+
+ if (_IOC_TYPE(cmd) != DMA_BUF_LOCK_IOC_MAGIC)
+ {
+ return -ENOTTY;
+
+ }
+ if ((_IOC_NR(cmd) < DMA_BUF_LOCK_IOC_MINNR) || (_IOC_NR(cmd) > DMA_BUF_LOCK_IOC_MAXNR))
+ {
+ return -ENOTTY;
+ }
+
+ switch (cmd)
+ {
+ case DMA_BUF_LOCK_FUNC_LOCK_ASYNC:
+ if (size != sizeof(dma_buf_lock_k_request))
+ {
+ return -ENOTTY;
+ }
+ if (copy_from_user(&request, (void __user *)arg, size))
+ {
+ return -EFAULT;
+ }
+#if DMA_BUF_LOCK_DEBUG
+ printk("DMA_BUF_LOCK_FUNC_LOCK_ASYNC - %i\n", request.count);
+#endif
+ return dma_buf_lock_dolock(&request);
+ }
+
+ return -ENOTTY;
+}
+
+module_init(dma_buf_lock_init);
+module_exit(dma_buf_lock_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/base/dma_buf_lock/src/dma_buf_lock.h b/drivers/base/dma_buf_lock/src/dma_buf_lock.h
new file mode 100644
index 000000000000..e1b9348b9cfd
--- /dev/null
+++ b/drivers/base/dma_buf_lock/src/dma_buf_lock.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#ifndef _DMA_BUF_LOCK_H
+#define _DMA_BUF_LOCK_H
+
+typedef enum dma_buf_lock_exclusive
+{
+ DMA_BUF_LOCK_NONEXCLUSIVE = 0,
+ DMA_BUF_LOCK_EXCLUSIVE = -1
+} dma_buf_lock_exclusive;
+
+typedef struct dma_buf_lock_k_request
+{
+ int count;
+ int *list_of_dma_buf_fds;
+ int timeout;
+ dma_buf_lock_exclusive exclusive;
+} dma_buf_lock_k_request;
+
+#define DMA_BUF_LOCK_IOC_MAGIC '~'
+
+#define DMA_BUF_LOCK_FUNC_LOCK_ASYNC _IOW(DMA_BUF_LOCK_IOC_MAGIC, 11, dma_buf_lock_k_request)
+
+#define DMA_BUF_LOCK_IOC_MINNR 11
+#define DMA_BUF_LOCK_IOC_MAXNR 11
+
+#endif /* _DMA_BUF_LOCK_H */
diff --git a/drivers/base/dma_buf_test_exporter/Kbuild b/drivers/base/dma_buf_test_exporter/Kbuild
new file mode 100644
index 000000000000..56b9f86d2d52
--- /dev/null
+++ b/drivers/base/dma_buf_test_exporter/Kbuild
@@ -0,0 +1,18 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+ifneq ($(CONFIG_DMA_SHARED_BUFFER),)
+obj-$(CONFIG_DMA_SHARED_BUFFER_TEST_EXPORTER) += dma-buf-test-exporter.o
+endif
diff --git a/drivers/base/dma_buf_test_exporter/Kconfig b/drivers/base/dma_buf_test_exporter/Kconfig
new file mode 100644
index 000000000000..974f0c23dbd2
--- /dev/null
+++ b/drivers/base/dma_buf_test_exporter/Kconfig
@@ -0,0 +1,20 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+config DMA_SHARED_BUFFER_TEST_EXPORTER
+ tristate "Test exporter for the dma-buf framework"
+ depends on DMA_SHARED_BUFFER
+ help
+ This option enables the test exporter usable to help test importerts.
diff --git a/drivers/base/dma_buf_test_exporter/Makefile b/drivers/base/dma_buf_test_exporter/Makefile
new file mode 100644
index 000000000000..06e3d5c121a5
--- /dev/null
+++ b/drivers/base/dma_buf_test_exporter/Makefile
@@ -0,0 +1,30 @@
+#
+# (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+# linux build system bootstrap for out-of-tree module
+
+# default to building for the host
+ARCH ?= $(shell uname -m)
+
+ifeq ($(KDIR),)
+$(error Must specify KDIR to point to the kernel to target))
+endif
+
+all:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../include" CONFIG_DMA_SHARED_BUFFER_TEST_EXPORTER=m
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
diff --git a/drivers/base/dma_buf_test_exporter/dma-buf-test-exporter.c b/drivers/base/dma_buf_test_exporter/dma-buf-test-exporter.c
new file mode 100644
index 000000000000..08ab49aebe1e
--- /dev/null
+++ b/drivers/base/dma_buf_test_exporter/dma-buf-test-exporter.c
@@ -0,0 +1,703 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/dma-buf-test-exporter.h>
+#include <linux/dma-buf.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/atomic.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0))
+#include <linux/dma-attrs.h>
+#endif
+#include <linux/dma-mapping.h>
+#endif
+
+struct dma_buf_te_alloc {
+ /* the real alloc */
+ int nr_pages;
+ struct page **pages;
+
+ /* the debug usage tracking */
+ int nr_attached_devices;
+ int nr_device_mappings;
+ int nr_cpu_mappings;
+
+ /* failure simulation */
+ int fail_attach;
+ int fail_map;
+ int fail_mmap;
+
+ bool contiguous;
+ dma_addr_t contig_dma_addr;
+ void *contig_cpu_addr;
+};
+
+static struct miscdevice te_device;
+
+static int dma_buf_te_attach(struct dma_buf *buf, struct device *dev, struct dma_buf_attachment *attachment)
+{
+ struct dma_buf_te_alloc *alloc;
+ alloc = buf->priv;
+
+ if (alloc->fail_attach)
+ return -EFAULT;
+
+ /* dma_buf is externally locked during call */
+ alloc->nr_attached_devices++;
+ return 0;
+}
+
+static void dma_buf_te_detach(struct dma_buf *buf, struct dma_buf_attachment *attachment)
+{
+ struct dma_buf_te_alloc *alloc;
+ alloc = buf->priv;
+ /* dma_buf is externally locked during call */
+
+ alloc->nr_attached_devices--;
+}
+
+static struct sg_table *dma_buf_te_map(struct dma_buf_attachment *attachment, enum dma_data_direction direction)
+{
+ struct sg_table *sg;
+ struct scatterlist *iter;
+ struct dma_buf_te_alloc *alloc;
+ int i;
+ int ret;
+
+ alloc = attachment->dmabuf->priv;
+
+ if (alloc->fail_map)
+ return ERR_PTR(-ENOMEM);
+
+#if !(defined(ARCH_HAS_SG_CHAIN) || defined(CONFIG_ARCH_HAS_SG_CHAIN))
+ /* if the ARCH can't chain we can't have allocs larger than a single sg can hold */
+ if (alloc->nr_pages > SG_MAX_SINGLE_ALLOC)
+ return ERR_PTR(-EINVAL);
+#endif
+
+ sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!sg)
+ return ERR_PTR(-ENOMEM);
+
+ /* from here we access the allocation object, so lock the dmabuf pointing to it */
+ mutex_lock(&attachment->dmabuf->lock);
+
+ if (alloc->contiguous)
+ ret = sg_alloc_table(sg, 1, GFP_KERNEL);
+ else
+ ret = sg_alloc_table(sg, alloc->nr_pages, GFP_KERNEL);
+ if (ret) {
+ mutex_unlock(&attachment->dmabuf->lock);
+ kfree(sg);
+ return ERR_PTR(ret);
+ }
+
+ if (alloc->contiguous) {
+ sg_dma_len(sg->sgl) = alloc->nr_pages * PAGE_SIZE;
+ sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(alloc->contig_dma_addr)), alloc->nr_pages * PAGE_SIZE, 0);
+ sg_dma_address(sg->sgl) = alloc->contig_dma_addr;
+ } else {
+ for_each_sg(sg->sgl, iter, alloc->nr_pages, i)
+ sg_set_page(iter, alloc->pages[i], PAGE_SIZE, 0);
+ }
+
+ if (!dma_map_sg(attachment->dev, sg->sgl, sg->nents, direction)) {
+ mutex_unlock(&attachment->dmabuf->lock);
+ sg_free_table(sg);
+ kfree(sg);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ alloc->nr_device_mappings++;
+ mutex_unlock(&attachment->dmabuf->lock);
+ return sg;
+}
+
+static void dma_buf_te_unmap(struct dma_buf_attachment *attachment,
+ struct sg_table *sg, enum dma_data_direction direction)
+{
+ struct dma_buf_te_alloc *alloc;
+
+ alloc = attachment->dmabuf->priv;
+
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, direction);
+ sg_free_table(sg);
+ kfree(sg);
+
+ mutex_lock(&attachment->dmabuf->lock);
+ alloc->nr_device_mappings--;
+ mutex_unlock(&attachment->dmabuf->lock);
+}
+
+static void dma_buf_te_release(struct dma_buf *buf)
+{
+ int i;
+ struct dma_buf_te_alloc *alloc;
+ alloc = buf->priv;
+ /* no need for locking */
+
+ if (alloc->contiguous) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ dma_free_attrs(te_device.this_device,
+ alloc->nr_pages * PAGE_SIZE,
+ alloc->contig_cpu_addr,
+ alloc->contig_dma_addr,
+ DMA_ATTR_WRITE_COMBINE);
+
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ dma_free_attrs(te_device.this_device,
+ alloc->nr_pages * PAGE_SIZE,
+ alloc->contig_cpu_addr, alloc->contig_dma_addr, &attrs);
+#else
+ dma_free_writecombine(te_device.this_device,
+ alloc->nr_pages * PAGE_SIZE,
+ alloc->contig_cpu_addr, alloc->contig_dma_addr);
+#endif
+ } else {
+ for (i = 0; i < alloc->nr_pages; i++)
+ __free_page(alloc->pages[i]);
+ }
+ kfree(alloc->pages);
+ kfree(alloc);
+}
+
+
+static void dma_buf_te_mmap_open(struct vm_area_struct *vma)
+{
+ struct dma_buf *dma_buf;
+ struct dma_buf_te_alloc *alloc;
+ dma_buf = vma->vm_private_data;
+ alloc = dma_buf->priv;
+
+ mutex_lock(&dma_buf->lock);
+ alloc->nr_cpu_mappings++;
+ mutex_unlock(&dma_buf->lock);
+}
+
+static void dma_buf_te_mmap_close(struct vm_area_struct *vma)
+{
+ struct dma_buf *dma_buf;
+ struct dma_buf_te_alloc *alloc;
+ dma_buf = vma->vm_private_data;
+ alloc = dma_buf->priv;
+
+ BUG_ON(alloc->nr_cpu_mappings <= 0);
+ mutex_lock(&dma_buf->lock);
+ alloc->nr_cpu_mappings--;
+ mutex_unlock(&dma_buf->lock);
+}
+
+static int dma_buf_te_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct dma_buf_te_alloc *alloc;
+ struct dma_buf *dmabuf;
+ struct page *pageptr;
+
+ dmabuf = vma->vm_private_data;
+ alloc = dmabuf->priv;
+
+ if (vmf->pgoff > alloc->nr_pages)
+ return VM_FAULT_SIGBUS;
+
+ pageptr = alloc->pages[vmf->pgoff];
+
+ BUG_ON(!pageptr);
+
+ get_page(pageptr);
+ vmf->page = pageptr;
+
+ return 0;
+}
+
+struct vm_operations_struct dma_buf_te_vm_ops = {
+ .open = dma_buf_te_mmap_open,
+ .close = dma_buf_te_mmap_close,
+ .fault = dma_buf_te_mmap_fault
+};
+
+static int dma_buf_te_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct dma_buf_te_alloc *alloc;
+ alloc = dmabuf->priv;
+
+ if (alloc->fail_mmap)
+ return -ENOMEM;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+#else
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTEXPAND;
+#endif
+ vma->vm_ops = &dma_buf_te_vm_ops;
+ vma->vm_private_data = dmabuf;
+
+ /* we fault in the pages on access */
+
+ /* call open to do the ref-counting */
+ dma_buf_te_vm_ops.open(vma);
+
+ return 0;
+}
+
+static void *dma_buf_te_kmap_atomic(struct dma_buf *buf, unsigned long page_num)
+{
+ /* IGNORE */
+ return NULL;
+}
+
+static void *dma_buf_te_kmap(struct dma_buf *buf, unsigned long page_num)
+{
+ struct dma_buf_te_alloc *alloc;
+
+ alloc = buf->priv;
+ if (page_num >= alloc->nr_pages)
+ return NULL;
+
+ return kmap(alloc->pages[page_num]);
+}
+static void dma_buf_te_kunmap(struct dma_buf *buf,
+ unsigned long page_num, void *addr)
+{
+ struct dma_buf_te_alloc *alloc;
+
+ alloc = buf->priv;
+ if (page_num >= alloc->nr_pages)
+ return;
+
+ kunmap(alloc->pages[page_num]);
+ return;
+}
+
+static struct dma_buf_ops dma_buf_te_ops = {
+ /* real handlers */
+ .attach = dma_buf_te_attach,
+ .detach = dma_buf_te_detach,
+ .map_dma_buf = dma_buf_te_map,
+ .unmap_dma_buf = dma_buf_te_unmap,
+ .release = dma_buf_te_release,
+ .mmap = dma_buf_te_mmap,
+ .kmap = dma_buf_te_kmap,
+ .kunmap = dma_buf_te_kunmap,
+
+ /* nop handlers for mandatory functions we ignore */
+ .kmap_atomic = dma_buf_te_kmap_atomic
+};
+
+static int do_dma_buf_te_ioctl_version(struct dma_buf_te_ioctl_version __user *buf)
+{
+ struct dma_buf_te_ioctl_version v;
+
+ if (copy_from_user(&v, buf, sizeof(v)))
+ return -EFAULT;
+
+ if (v.op != DMA_BUF_TE_ENQ)
+ return -EFAULT;
+
+ v.op = DMA_BUF_TE_ACK;
+ v.major = DMA_BUF_TE_VER_MAJOR;
+ v.minor = DMA_BUF_TE_VER_MINOR;
+
+ if (copy_to_user(buf, &v, sizeof(v)))
+ return -EFAULT;
+ else
+ return 0;
+}
+
+static int do_dma_buf_te_ioctl_alloc(struct dma_buf_te_ioctl_alloc __user *buf, bool contiguous)
+{
+ struct dma_buf_te_ioctl_alloc alloc_req;
+ struct dma_buf_te_alloc *alloc;
+ struct dma_buf *dma_buf;
+ int i = 0;
+ int fd;
+
+ if (copy_from_user(&alloc_req, buf, sizeof(alloc_req))) {
+ dev_err(te_device.this_device, "%s: couldn't get user data", __func__);
+ goto no_input;
+ }
+
+ if (!alloc_req.size) {
+ dev_err(te_device.this_device, "%s: no size specified", __func__);
+ goto invalid_size;
+ }
+
+#if !(defined(ARCH_HAS_SG_CHAIN) || defined(CONFIG_ARCH_HAS_SG_CHAIN))
+ /* Whilst it is possible to allocate larger buffer, we won't be able to
+ * map it during actual usage (mmap() still succeeds). We fail here so
+ * userspace code can deal with it early than having driver failure
+ * later on. */
+ if (alloc_req.size > SG_MAX_SINGLE_ALLOC) {
+ dev_err(te_device.this_device, "%s: buffer size of %llu pages exceeded the mapping limit of %lu pages",
+ __func__, alloc_req.size, SG_MAX_SINGLE_ALLOC);
+ goto invalid_size;
+ }
+#endif
+
+ alloc = kzalloc(sizeof(struct dma_buf_te_alloc), GFP_KERNEL);
+ if (NULL == alloc) {
+ dev_err(te_device.this_device, "%s: couldn't alloc object", __func__);
+ goto no_alloc_object;
+ }
+
+ alloc->nr_pages = alloc_req.size;
+ alloc->contiguous = contiguous;
+
+ alloc->pages = kzalloc(sizeof(struct page *) * alloc->nr_pages, GFP_KERNEL);
+ if (!alloc->pages) {
+ dev_err(te_device.this_device,
+ "%s: couldn't alloc %d page structures", __func__,
+ alloc->nr_pages);
+ goto free_alloc_object;
+ }
+
+ if (contiguous) {
+ dma_addr_t dma_aux;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ alloc->contig_cpu_addr = dma_alloc_attrs(te_device.this_device,
+ alloc->nr_pages * PAGE_SIZE,
+ &alloc->contig_dma_addr,
+ GFP_KERNEL | __GFP_ZERO,
+ DMA_ATTR_WRITE_COMBINE);
+
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ alloc->contig_cpu_addr = dma_alloc_attrs(te_device.this_device,
+ alloc->nr_pages * PAGE_SIZE,
+ &alloc->contig_dma_addr,
+ GFP_KERNEL | __GFP_ZERO, &attrs);
+#else
+ alloc->contig_cpu_addr = dma_alloc_writecombine(te_device.this_device,
+ alloc->nr_pages * PAGE_SIZE,
+ &alloc->contig_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+#endif
+ if (!alloc->contig_cpu_addr) {
+ dev_err(te_device.this_device, "%s: couldn't alloc contiguous buffer %d pages", __func__, alloc->nr_pages);
+ goto free_page_struct;
+ }
+ dma_aux = alloc->contig_dma_addr;
+ for (i = 0; i < alloc->nr_pages; i++) {
+ alloc->pages[i] = pfn_to_page(PFN_DOWN(dma_aux));
+ dma_aux += PAGE_SIZE;
+ }
+ } else {
+ for (i = 0; i < alloc->nr_pages; i++) {
+ alloc->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (NULL == alloc->pages[i]) {
+ dev_err(te_device.this_device, "%s: couldn't alloc page", __func__);
+ goto no_page;
+ }
+ }
+ }
+
+ /* alloc ready, let's export it */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ {
+ struct dma_buf_export_info export_info = {
+ .exp_name = "dma_buf_te",
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0))
+ .owner = THIS_MODULE,
+#endif
+ .ops = &dma_buf_te_ops,
+ .size = alloc->nr_pages << PAGE_SHIFT,
+ .flags = O_CLOEXEC | O_RDWR,
+ .priv = alloc,
+ };
+
+ dma_buf = dma_buf_export(&export_info);
+ }
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
+ dma_buf = dma_buf_export(alloc, &dma_buf_te_ops,
+ alloc->nr_pages << PAGE_SHIFT, O_CLOEXEC|O_RDWR, NULL);
+#else
+ dma_buf = dma_buf_export(alloc, &dma_buf_te_ops,
+ alloc->nr_pages << PAGE_SHIFT, O_CLOEXEC|O_RDWR);
+#endif
+
+ if (IS_ERR_OR_NULL(dma_buf)) {
+ dev_err(te_device.this_device, "%s: couldn't export dma_buf", __func__);
+ goto no_export;
+ }
+
+ /* get fd for buf */
+ fd = dma_buf_fd(dma_buf, O_CLOEXEC);
+
+ if (fd < 0) {
+ dev_err(te_device.this_device, "%s: couldn't get fd from dma_buf", __func__);
+ goto no_fd;
+ }
+
+ return fd;
+
+no_fd:
+ dma_buf_put(dma_buf);
+no_export:
+ /* i still valid */
+no_page:
+ if (contiguous) {
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0))
+ dma_free_attrs(te_device.this_device,
+ alloc->nr_pages * PAGE_SIZE,
+ alloc->contig_cpu_addr,
+ alloc->contig_dma_addr,
+ DMA_ATTR_WRITE_COMBINE);
+
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ dma_free_attrs(te_device.this_device,
+ alloc->nr_pages * PAGE_SIZE,
+ alloc->contig_cpu_addr, alloc->contig_dma_addr, &attrs);
+#else
+ dma_free_writecombine(te_device.this_device,
+ alloc->nr_pages * PAGE_SIZE,
+ alloc->contig_cpu_addr, alloc->contig_dma_addr);
+#endif
+ } else {
+ while (i-- > 0)
+ __free_page(alloc->pages[i]);
+ }
+free_page_struct:
+ kfree(alloc->pages);
+free_alloc_object:
+ kfree(alloc);
+no_alloc_object:
+invalid_size:
+no_input:
+ return -EFAULT;
+}
+
+static int do_dma_buf_te_ioctl_status(struct dma_buf_te_ioctl_status __user *arg)
+{
+ struct dma_buf_te_ioctl_status status;
+ struct dma_buf *dmabuf;
+ struct dma_buf_te_alloc *alloc;
+ int res = -EINVAL;
+
+ if (copy_from_user(&status, arg, sizeof(status)))
+ return -EFAULT;
+
+ dmabuf = dma_buf_get(status.fd);
+ if (IS_ERR_OR_NULL(dmabuf))
+ return -EINVAL;
+
+ /* verify it's one of ours */
+ if (dmabuf->ops != &dma_buf_te_ops)
+ goto err_have_dmabuf;
+
+ /* ours, get the current status */
+ alloc = dmabuf->priv;
+
+ /* lock while reading status to take a snapshot */
+ mutex_lock(&dmabuf->lock);
+ status.attached_devices = alloc->nr_attached_devices;
+ status.device_mappings = alloc->nr_device_mappings;
+ status.cpu_mappings = alloc->nr_cpu_mappings;
+ mutex_unlock(&dmabuf->lock);
+
+ if (copy_to_user(arg, &status, sizeof(status)))
+ goto err_have_dmabuf;
+
+ /* All OK */
+ res = 0;
+
+err_have_dmabuf:
+ dma_buf_put(dmabuf);
+ return res;
+}
+
+static int do_dma_buf_te_ioctl_set_failing(struct dma_buf_te_ioctl_set_failing __user *arg)
+{
+ struct dma_buf *dmabuf;
+ struct dma_buf_te_ioctl_set_failing f;
+ struct dma_buf_te_alloc *alloc;
+ int res = -EINVAL;
+
+ if (copy_from_user(&f, arg, sizeof(f)))
+ return -EFAULT;
+
+ dmabuf = dma_buf_get(f.fd);
+ if (IS_ERR_OR_NULL(dmabuf))
+ return -EINVAL;
+
+ /* verify it's one of ours */
+ if (dmabuf->ops != &dma_buf_te_ops)
+ goto err_have_dmabuf;
+
+ /* ours, set the fail modes */
+ alloc = dmabuf->priv;
+ /* lock to set the fail modes atomically */
+ mutex_lock(&dmabuf->lock);
+ alloc->fail_attach = f.fail_attach;
+ alloc->fail_map = f.fail_map;
+ alloc->fail_mmap = f.fail_mmap;
+ mutex_unlock(&dmabuf->lock);
+
+ /* success */
+ res = 0;
+
+err_have_dmabuf:
+ dma_buf_put(dmabuf);
+ return res;
+}
+
+static u32 dma_te_buf_fill(struct dma_buf *dma_buf, unsigned int value)
+{
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ unsigned int count;
+ unsigned int offset = 0;
+ int ret = 0;
+ int i;
+
+ attachment = dma_buf_attach(dma_buf, te_device.this_device);
+ if (IS_ERR_OR_NULL(attachment))
+ return -EBUSY;
+
+ sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto no_import;
+ }
+
+ ret = dma_buf_begin_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ 0, dma_buf->size,
+#endif
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto no_cpu_access;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ for (i = 0; i < sg_dma_len(sg); i = i + PAGE_SIZE) {
+ void *addr;
+
+ addr = dma_buf_kmap(dma_buf, i >> PAGE_SHIFT);
+ if (!addr) {
+ /* dma_buf_kmap is unimplemented in exynos and returns NULL */
+ ret = -EPERM;
+ goto no_kmap;
+ }
+ memset(addr, value, PAGE_SIZE);
+ dma_buf_kunmap(dma_buf, i >> PAGE_SHIFT, addr);
+ }
+ offset += sg_dma_len(sg);
+ }
+
+no_kmap:
+ dma_buf_end_cpu_access(dma_buf,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+ 0, dma_buf->size,
+#endif
+ DMA_BIDIRECTIONAL);
+no_cpu_access:
+ dma_buf_unmap_attachment(attachment, sgt, DMA_BIDIRECTIONAL);
+no_import:
+ dma_buf_detach(dma_buf, attachment);
+ return ret;
+}
+
+static int do_dma_buf_te_ioctl_fill(struct dma_buf_te_ioctl_fill __user *arg)
+{
+
+ struct dma_buf *dmabuf;
+ struct dma_buf_te_ioctl_fill f;
+ int ret;
+
+ if (copy_from_user(&f, arg, sizeof(f)))
+ return -EFAULT;
+
+ dmabuf = dma_buf_get(f.fd);
+ if (IS_ERR_OR_NULL(dmabuf))
+ return -EINVAL;
+
+ ret = dma_te_buf_fill(dmabuf, f.value);
+ dma_buf_put(dmabuf);
+
+ return ret;
+}
+
+static long dma_buf_te_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case DMA_BUF_TE_VERSION:
+ return do_dma_buf_te_ioctl_version((struct dma_buf_te_ioctl_version __user *)arg);
+ case DMA_BUF_TE_ALLOC:
+ return do_dma_buf_te_ioctl_alloc((struct dma_buf_te_ioctl_alloc __user *)arg, false);
+ case DMA_BUF_TE_ALLOC_CONT:
+ return do_dma_buf_te_ioctl_alloc((struct dma_buf_te_ioctl_alloc __user *)arg, true);
+ case DMA_BUF_TE_QUERY:
+ return do_dma_buf_te_ioctl_status((struct dma_buf_te_ioctl_status __user *)arg);
+ case DMA_BUF_TE_SET_FAILING:
+ return do_dma_buf_te_ioctl_set_failing((struct dma_buf_te_ioctl_set_failing __user *)arg);
+ case DMA_BUF_TE_FILL:
+ return do_dma_buf_te_ioctl_fill((struct dma_buf_te_ioctl_fill __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations dma_buf_te_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = dma_buf_te_ioctl,
+ .compat_ioctl = dma_buf_te_ioctl,
+};
+
+static int __init dma_buf_te_init(void)
+{
+ int res;
+ te_device.minor = MISC_DYNAMIC_MINOR;
+ te_device.name = "dma_buf_te";
+ te_device.fops = &dma_buf_te_fops;
+
+ res = misc_register(&te_device);
+ if (res) {
+ printk(KERN_WARNING"Misc device registration failed of 'dma_buf_te'\n");
+ return res;
+ }
+ te_device.this_device->coherent_dma_mask = DMA_BIT_MASK(32);
+
+ dev_info(te_device.this_device, "dma_buf_te ready\n");
+ return 0;
+
+}
+
+static void __exit dma_buf_te_exit(void)
+{
+ misc_deregister(&te_device);
+}
+
+module_init(dma_buf_te_init);
+module_exit(dma_buf_te_exit);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/base/kds/Kbuild b/drivers/base/kds/Kbuild
new file mode 100644
index 000000000000..37b6cca58195
--- /dev/null
+++ b/drivers/base/kds/Kbuild
@@ -0,0 +1,17 @@
+#
+# (C) COPYRIGHT 2012-2013, 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+obj-$(CONFIG_KDS) += kds.o
diff --git a/drivers/base/kds/Kconfig b/drivers/base/kds/Kconfig
new file mode 100644
index 000000000000..5f96165f67d5
--- /dev/null
+++ b/drivers/base/kds/Kconfig
@@ -0,0 +1,20 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+config KDS
+ tristate "Kernel dependency system"
+ help
+ This option enables the generic kernel dependency system
diff --git a/drivers/base/kds/Makefile b/drivers/base/kds/Makefile
new file mode 100644
index 000000000000..d6e90812d928
--- /dev/null
+++ b/drivers/base/kds/Makefile
@@ -0,0 +1,31 @@
+#
+# (C) COPYRIGHT 2011-2013, 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+# linux build system bootstrap for out-of-tree module
+
+# default to building for the host
+ARCH ?= $(shell uname -m)
+
+ifeq ($(KDIR),)
+$(error Must specify KDIR to point to the kernel to target))
+endif
+
+kds:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../include" CONFIG_KDS=m
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
diff --git a/drivers/base/kds/kds.c b/drivers/base/kds/kds.c
new file mode 100644
index 000000000000..bb61662cb22f
--- /dev/null
+++ b/drivers/base/kds/kds.c
@@ -0,0 +1,557 @@
+/*
+ *
+ * (C) COPYRIGHT 2012-2015, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/kds.h>
+#include <linux/kref.h>
+
+#include <asm/atomic.h>
+
+#define KDS_LINK_TRIGGERED (1u << 0)
+#define KDS_LINK_EXCLUSIVE (1u << 1)
+
+#define KDS_INVALID (void *)-2
+#define KDS_RESOURCE (void *)-1
+
+struct kds_resource_set
+{
+ unsigned long num_resources;
+ unsigned long pending;
+ struct kds_callback *cb;
+ void *callback_parameter;
+ void *callback_extra_parameter;
+ struct list_head callback_link;
+ struct work_struct callback_work;
+ atomic_t cb_queued;
+ /* This resource set will be freed when there are no pending
+ * callbacks */
+ struct kref refcount;
+
+ /* This is only initted when kds_waitall() is called. */
+ wait_queue_head_t wake;
+
+ struct kds_link resources[0];
+
+};
+
+static DEFINE_SPINLOCK(kds_lock);
+
+static void __resource_set_release(struct kref *ref)
+{
+ struct kds_resource_set *rset = container_of(ref,
+ struct kds_resource_set, refcount);
+
+ kfree(rset);
+}
+
+int kds_callback_init(struct kds_callback *cb, int direct, kds_callback_fn user_cb)
+{
+ int ret = 0;
+
+ cb->direct = direct;
+ cb->user_cb = user_cb;
+
+ if (!direct)
+ {
+ cb->wq = alloc_workqueue("kds", WQ_UNBOUND | WQ_HIGHPRI, WQ_UNBOUND_MAX_ACTIVE);
+ if (!cb->wq)
+ ret = -ENOMEM;
+ }
+ else
+ {
+ cb->wq = NULL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(kds_callback_init);
+
+void kds_callback_term(struct kds_callback *cb)
+{
+ if (!cb->direct)
+ {
+ BUG_ON(!cb->wq);
+ destroy_workqueue(cb->wq);
+ }
+ else
+ {
+ BUG_ON(cb->wq);
+ }
+}
+
+EXPORT_SYMBOL(kds_callback_term);
+
+static void kds_do_user_callback(struct kds_resource_set *rset)
+{
+ rset->cb->user_cb(rset->callback_parameter, rset->callback_extra_parameter);
+}
+
+static void kds_queued_callback(struct work_struct *work)
+{
+ struct kds_resource_set *rset;
+ rset = container_of(work, struct kds_resource_set, callback_work);
+
+ atomic_dec(&rset->cb_queued);
+
+ kds_do_user_callback(rset);
+}
+
+static void kds_callback_perform(struct kds_resource_set *rset)
+{
+ if (rset->cb->direct)
+ kds_do_user_callback(rset);
+ else
+ {
+ int result;
+
+ atomic_inc(&rset->cb_queued);
+
+ result = queue_work(rset->cb->wq, &rset->callback_work);
+ /* if we got a 0 return it means we've triggered the same rset twice! */
+ WARN_ON(!result);
+ }
+}
+
+void kds_resource_init(struct kds_resource * const res)
+{
+ BUG_ON(!res);
+ INIT_LIST_HEAD(&res->waiters.link);
+ res->waiters.parent = KDS_RESOURCE;
+}
+EXPORT_SYMBOL(kds_resource_init);
+
+int kds_resource_term(struct kds_resource *res)
+{
+ unsigned long lflags;
+ BUG_ON(!res);
+ spin_lock_irqsave(&kds_lock, lflags);
+ if (!list_empty(&res->waiters.link))
+ {
+ spin_unlock_irqrestore(&kds_lock, lflags);
+ printk(KERN_ERR "ERROR: KDS resource is still in use\n");
+ return -EBUSY;
+ }
+ res->waiters.parent = KDS_INVALID;
+ spin_unlock_irqrestore(&kds_lock, lflags);
+ return 0;
+}
+EXPORT_SYMBOL(kds_resource_term);
+
+int kds_async_waitall(
+ struct kds_resource_set ** const pprset,
+ struct kds_callback *cb,
+ void *callback_parameter,
+ void *callback_extra_parameter,
+ int number_resources,
+ unsigned long *exclusive_access_bitmap,
+ struct kds_resource **resource_list)
+{
+ struct kds_resource_set *rset = NULL;
+ unsigned long lflags;
+ int i;
+ int triggered;
+
+ BUG_ON(!pprset);
+ BUG_ON(!resource_list);
+ BUG_ON(!cb);
+
+ WARN_ONCE(number_resources > 10, "Waiting on a high numbers of resources may increase latency, see documentation.");
+
+ rset = kmalloc(sizeof(*rset) + number_resources * sizeof(struct kds_link), GFP_KERNEL);
+ if (!rset)
+ {
+ return -ENOMEM;
+ }
+
+ rset->num_resources = number_resources;
+ rset->pending = number_resources;
+ rset->cb = cb;
+ rset->callback_parameter = callback_parameter;
+ rset->callback_extra_parameter = callback_extra_parameter;
+ INIT_LIST_HEAD(&rset->callback_link);
+ INIT_WORK(&rset->callback_work, kds_queued_callback);
+ atomic_set(&rset->cb_queued, 0);
+ kref_init(&rset->refcount);
+
+ for (i = 0; i < number_resources; i++)
+ {
+ INIT_LIST_HEAD(&rset->resources[i].link);
+ rset->resources[i].parent = rset;
+ }
+
+ spin_lock_irqsave(&kds_lock, lflags);
+
+ for (i = 0; i < number_resources; i++)
+ {
+ unsigned long link_state = 0;
+
+ if (test_bit(i, exclusive_access_bitmap))
+ {
+ link_state |= KDS_LINK_EXCLUSIVE;
+ }
+
+ /* no-one else waiting? */
+ if (list_empty(&resource_list[i]->waiters.link))
+ {
+ link_state |= KDS_LINK_TRIGGERED;
+ rset->pending--;
+ }
+ /* Adding a non-exclusive and the current tail is a triggered non-exclusive? */
+ else if (((link_state & KDS_LINK_EXCLUSIVE) == 0) &&
+ (((list_entry(resource_list[i]->waiters.link.prev, struct kds_link, link)->state & (KDS_LINK_EXCLUSIVE | KDS_LINK_TRIGGERED)) == KDS_LINK_TRIGGERED)))
+ {
+ link_state |= KDS_LINK_TRIGGERED;
+ rset->pending--;
+ }
+ rset->resources[i].state = link_state;
+
+ /* avoid double wait (hang) */
+ if (!list_empty(&resource_list[i]->waiters.link))
+ {
+ /* adding same rset again? */
+ if (list_entry(resource_list[i]->waiters.link.prev, struct kds_link, link)->parent == rset)
+ {
+ goto roll_back;
+ }
+ }
+ list_add_tail(&rset->resources[i].link, &resource_list[i]->waiters.link);
+ }
+
+ triggered = (rset->pending == 0);
+
+ /* set the pointer before the callback is called so it sees it */
+ *pprset = rset;
+
+ spin_unlock_irqrestore(&kds_lock, lflags);
+
+ if (triggered)
+ {
+ /* all resources obtained, trigger callback */
+ kds_callback_perform(rset);
+ }
+
+ return 0;
+
+roll_back:
+ /* roll back */
+ while (i-- > 0)
+ {
+ list_del(&rset->resources[i].link);
+ }
+
+ spin_unlock_irqrestore(&kds_lock, lflags);
+ kfree(rset);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(kds_async_waitall);
+
+static void wake_up_sync_call(void *callback_parameter, void *callback_extra_parameter)
+{
+ wait_queue_head_t *wait = (wait_queue_head_t *)callback_parameter;
+ wake_up(wait);
+}
+
+static struct kds_callback sync_cb =
+{
+ wake_up_sync_call,
+ 1,
+ NULL,
+};
+
+struct kds_resource_set *kds_waitall(
+ int number_resources,
+ unsigned long *exclusive_access_bitmap,
+ struct kds_resource **resource_list,
+ unsigned long jiffies_timeout)
+{
+ struct kds_resource_set *rset;
+ unsigned long lflags;
+ int i;
+ int triggered = 0;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
+
+ rset = kmalloc(sizeof(*rset) + number_resources * sizeof(struct kds_link), GFP_KERNEL);
+ if (!rset)
+ return rset;
+
+ rset->num_resources = number_resources;
+ rset->pending = number_resources;
+ init_waitqueue_head(&rset->wake);
+ INIT_LIST_HEAD(&rset->callback_link);
+ INIT_WORK(&rset->callback_work, kds_queued_callback);
+ atomic_set(&rset->cb_queued, 0);
+ kref_init(&rset->refcount);
+
+ spin_lock_irqsave(&kds_lock, lflags);
+
+ for (i = 0; i < number_resources; i++)
+ {
+ unsigned long link_state = 0;
+
+ if (test_bit(i, exclusive_access_bitmap))
+ {
+ link_state |= KDS_LINK_EXCLUSIVE;
+ }
+
+ if (list_empty(&resource_list[i]->waiters.link))
+ {
+ link_state |= KDS_LINK_TRIGGERED;
+ rset->pending--;
+ }
+ /* Adding a non-exclusive and the current tail is a triggered non-exclusive? */
+ else if (((link_state & KDS_LINK_EXCLUSIVE) == 0) &&
+ (((list_entry(resource_list[i]->waiters.link.prev, struct kds_link, link)->state & (KDS_LINK_EXCLUSIVE | KDS_LINK_TRIGGERED)) == KDS_LINK_TRIGGERED)))
+ {
+ link_state |= KDS_LINK_TRIGGERED;
+ rset->pending--;
+ }
+
+ INIT_LIST_HEAD(&rset->resources[i].link);
+ rset->resources[i].parent = rset;
+ rset->resources[i].state = link_state;
+
+ /* avoid double wait (hang) */
+ if (!list_empty(&resource_list[i]->waiters.link))
+ {
+ /* adding same rset again? */
+ if (list_entry(resource_list[i]->waiters.link.prev, struct kds_link, link)->parent == rset)
+ {
+ goto roll_back;
+ }
+ }
+
+ list_add_tail(&rset->resources[i].link, &resource_list[i]->waiters.link);
+ }
+
+ if (rset->pending == 0)
+ triggered = 1;
+ else
+ {
+ rset->cb = &sync_cb;
+ rset->callback_parameter = &rset->wake;
+ rset->callback_extra_parameter = NULL;
+ }
+
+ spin_unlock_irqrestore(&kds_lock, lflags);
+
+ if (!triggered)
+ {
+ long wait_res = 0;
+ long timeout = (jiffies_timeout == KDS_WAIT_BLOCKING) ?
+ MAX_SCHEDULE_TIMEOUT : jiffies_timeout;
+
+ if (timeout)
+ {
+ wait_res = wait_event_interruptible_timeout(rset->wake,
+ rset->pending == 0, timeout);
+ }
+
+ if ((wait_res == -ERESTARTSYS) || (wait_res == 0))
+ {
+ /* use \a kds_resource_set_release to roll back */
+ kds_resource_set_release(&rset);
+ return ERR_PTR(wait_res);
+ }
+ }
+ return rset;
+
+roll_back:
+ /* roll back */
+ while (i-- > 0)
+ {
+ list_del(&rset->resources[i].link);
+ }
+
+ spin_unlock_irqrestore(&kds_lock, lflags);
+ kfree(rset);
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL(kds_waitall);
+
+static void trigger_new_rset_owner(struct kds_resource_set *rset,
+ struct list_head *triggered)
+{
+ if (0 == --rset->pending) {
+ /* new owner now triggered, track for callback later */
+ kref_get(&rset->refcount);
+ list_add(&rset->callback_link, triggered);
+ }
+}
+
+static void __kds_resource_set_release_common(struct kds_resource_set *rset)
+{
+ struct list_head triggered = LIST_HEAD_INIT(triggered);
+ struct kds_resource_set *it;
+ unsigned long lflags;
+ int i;
+
+ spin_lock_irqsave(&kds_lock, lflags);
+
+ for (i = 0; i < rset->num_resources; i++)
+ {
+ struct kds_resource *resource;
+ struct kds_link *it = NULL;
+
+ /* fetch the previous entry on the linked list */
+ it = list_entry(rset->resources[i].link.prev, struct kds_link, link);
+ /* unlink ourself */
+ list_del(&rset->resources[i].link);
+
+ /* any waiters? */
+ if (list_empty(&it->link))
+ continue;
+
+ /* were we the head of the list? (head if prev is a resource) */
+ if (it->parent != KDS_RESOURCE)
+ {
+ if ((it->state & KDS_LINK_TRIGGERED) && !(it->state & KDS_LINK_EXCLUSIVE))
+ {
+ /*
+ * previous was triggered and not exclusive, so we
+ * trigger non-exclusive until end-of-list or first
+ * exclusive
+ */
+
+ struct kds_link *it_waiting = it;
+
+ list_for_each_entry(it, &it_waiting->link, link)
+ {
+ /* exclusive found, stop triggering */
+ if (it->state & KDS_LINK_EXCLUSIVE)
+ break;
+
+ it->state |= KDS_LINK_TRIGGERED;
+ /* a parent to update? */
+ if (it->parent != KDS_RESOURCE)
+ trigger_new_rset_owner(
+ it->parent,
+ &triggered);
+ }
+ }
+ continue;
+ }
+
+ /* we were the head, find the kds_resource */
+ resource = container_of(it, struct kds_resource, waiters);
+
+ /* we know there is someone waiting from the any-waiters test above */
+
+ /* find the head of the waiting list */
+ it = list_first_entry(&resource->waiters.link, struct kds_link, link);
+
+ /* new exclusive owner? */
+ if (it->state & KDS_LINK_EXCLUSIVE)
+ {
+ /* link now triggered */
+ it->state |= KDS_LINK_TRIGGERED;
+ /* a parent to update? */
+ trigger_new_rset_owner(it->parent, &triggered);
+ }
+ /* exclusive releasing ? */
+ else if (rset->resources[i].state & KDS_LINK_EXCLUSIVE)
+ {
+ /* trigger non-exclusive until end-of-list or first exclusive */
+ list_for_each_entry(it, &resource->waiters.link, link)
+ {
+ /* exclusive found, stop triggering */
+ if (it->state & KDS_LINK_EXCLUSIVE)
+ break;
+
+ it->state |= KDS_LINK_TRIGGERED;
+ /* a parent to update? */
+ trigger_new_rset_owner(it->parent, &triggered);
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&kds_lock, lflags);
+
+ while (!list_empty(&triggered))
+ {
+ it = list_first_entry(&triggered, struct kds_resource_set, callback_link);
+ list_del(&it->callback_link);
+ kds_callback_perform(it);
+
+ /* Free the resource set if no callbacks pending */
+ kref_put(&it->refcount, &__resource_set_release);
+ }
+}
+
+void kds_resource_set_release(struct kds_resource_set **pprset)
+{
+ struct kds_resource_set *rset;
+ int queued;
+
+ rset = cmpxchg(pprset, *pprset, NULL);
+
+ if (!rset)
+ {
+ /* caught a race between a cancelation
+ * and a completion, nothing to do */
+ return;
+ }
+
+ __kds_resource_set_release_common(rset);
+
+ /*
+ * Caller is responsible for guaranteeing that callback work is not
+ * pending (i.e. its running or completed) prior to calling release.
+ */
+ queued = atomic_read(&rset->cb_queued);
+ BUG_ON(queued);
+
+ kref_put(&rset->refcount, &__resource_set_release);
+}
+EXPORT_SYMBOL(kds_resource_set_release);
+
+void kds_resource_set_release_sync(struct kds_resource_set **pprset)
+{
+ struct kds_resource_set *rset;
+
+ rset = cmpxchg(pprset, *pprset, NULL);
+ if (!rset)
+ {
+ /* caught a race between a cancelation
+ * and a completion, nothing to do */
+ return;
+ }
+
+ __kds_resource_set_release_common(rset);
+
+ /*
+ * In the case of a kds async wait cancellation ensure the deferred
+ * call back does not get scheduled if a trigger fired at the same time
+ * to release the wait.
+ */
+ cancel_work_sync(&rset->callback_work);
+
+ kref_put(&rset->refcount, &__resource_set_release);
+}
+EXPORT_SYMBOL(kds_resource_set_release_sync);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION("1.0");
diff --git a/drivers/base/smc_protected_mode_switcher/Kbuild b/drivers/base/smc_protected_mode_switcher/Kbuild
new file mode 100644
index 000000000000..244c70b6e79b
--- /dev/null
+++ b/drivers/base/smc_protected_mode_switcher/Kbuild
@@ -0,0 +1,18 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+obj-$(CONFIG_SMC_PROTECTED_MODE_SWITCHER) := smc_protected_mode_switcher.o
+smc_protected_mode_switcher-y := protected_mode_switcher_device.o \
+ protected_mode_switcher_smc.o
diff --git a/drivers/base/smc_protected_mode_switcher/Kconfig b/drivers/base/smc_protected_mode_switcher/Kconfig
new file mode 100644
index 000000000000..27a6bab952b3
--- /dev/null
+++ b/drivers/base/smc_protected_mode_switcher/Kconfig
@@ -0,0 +1,20 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+config SMC_PROTECTED_MODE_SWITCHER
+ tristate "SMC protected mode switcher"
+ help
+ This option enables the SMC protected mode switcher
diff --git a/drivers/base/smc_protected_mode_switcher/Makefile b/drivers/base/smc_protected_mode_switcher/Makefile
new file mode 100644
index 000000000000..3e1788c115c8
--- /dev/null
+++ b/drivers/base/smc_protected_mode_switcher/Makefile
@@ -0,0 +1,31 @@
+#
+# (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+# linux build system bootstrap for out-of-tree module
+
+# default to building for the host
+ARCH ?= $(shell uname -m)
+
+ifeq ($(KDIR),)
+$(error Must specify KDIR to point to the kernel to target))
+endif
+
+all:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="-I$(CURDIR)/../../../include -I$(CURDIR)/../../gpu/arm/midgard -DCONFIG_SMC_PROTECTED_MODE_SWITCHER" CONFIG_SMC_PROTECTED_MODE_SWITCHER=m modules
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
diff --git a/drivers/base/smc_protected_mode_switcher/protected_mode_switcher_device.c b/drivers/base/smc_protected_mode_switcher/protected_mode_switcher_device.c
new file mode 100644
index 000000000000..31c57cdb2c88
--- /dev/null
+++ b/drivers/base/smc_protected_mode_switcher/protected_mode_switcher_device.c
@@ -0,0 +1,164 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/compiler.h>
+
+#include <linux/protected_mode_switcher.h>
+
+/*
+ * Protected Mode Switch
+ */
+
+#define SMC_FAST_CALL (1 << 31)
+#define SMC_64 (1 << 30)
+#define SMC_OEN_OFFSET 24
+#define SMC_OEN_SIP (2 << SMC_OEN_OFFSET)
+
+struct smc_protected_mode_device {
+ u16 smc_fid_enable;
+ u16 smc_fid_disable;
+ struct device *dev;
+};
+
+asmlinkage u64 __invoke_protected_mode_switch_smc(u64, u64, u64, u64);
+
+static u64 invoke_smc(u32 oen, u16 function_number, bool smc64,
+ u64 arg0, u64 arg1, u64 arg2)
+{
+ u32 fid = 0;
+
+ fid |= SMC_FAST_CALL; /* Bit 31: Fast call */
+ if (smc64)
+ fid |= SMC_64; /* Bit 30: 1=SMC64, 0=SMC32 */
+ fid |= oen; /* Bit 29:24: OEN */
+ /* Bit 23:16: Must be zero for fast calls */
+ fid |= (function_number); /* Bit 15:0: function number */
+
+ return __invoke_protected_mode_switch_smc(fid, arg0, arg1, arg2);
+}
+
+static int protected_mode_enable(struct protected_mode_device *protected_dev)
+{
+ struct smc_protected_mode_device *sdev = protected_dev->data;
+
+ if (!sdev)
+ /* Not supported */
+ return -EINVAL;
+
+ return invoke_smc(SMC_OEN_SIP,
+ sdev->smc_fid_enable, false,
+ 0, 0, 0);
+
+}
+
+static int protected_mode_disable(struct protected_mode_device *protected_dev)
+{
+ struct smc_protected_mode_device *sdev = protected_dev->data;
+
+ if (!sdev)
+ /* Not supported */
+ return -EINVAL;
+
+ return invoke_smc(SMC_OEN_SIP,
+ sdev->smc_fid_disable, false,
+ 0, 0, 0);
+}
+
+
+static int protected_mode_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct protected_mode_device *protected_dev;
+ struct smc_protected_mode_device *sdev;
+ u32 tmp = 0;
+
+ protected_dev = devm_kzalloc(&pdev->dev, sizeof(*protected_dev),
+ GFP_KERNEL);
+ if (!protected_dev)
+ return -ENOMEM;
+
+ sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
+ if (!sdev) {
+ devm_kfree(&pdev->dev, protected_dev);
+ return -ENOMEM;
+ }
+
+ protected_dev->data = sdev;
+ protected_dev->ops.protected_mode_enable = protected_mode_enable;
+ protected_dev->ops.protected_mode_disable = protected_mode_disable;
+ sdev->dev = dev;
+
+ if (!of_property_read_u32(dev->of_node, "arm,smc,protected_enable",
+ &tmp))
+ sdev->smc_fid_enable = tmp;
+
+ if (!of_property_read_u32(dev->of_node, "arm,smc,protected_disable",
+ &tmp))
+ sdev->smc_fid_disable = tmp;
+
+ /* Check older property names, for compatibility with outdated DTBs */
+ if (!of_property_read_u32(dev->of_node, "arm,smc,secure_enable", &tmp))
+ sdev->smc_fid_enable = tmp;
+
+ if (!of_property_read_u32(dev->of_node, "arm,smc,secure_disable", &tmp))
+ sdev->smc_fid_disable = tmp;
+
+ platform_set_drvdata(pdev, protected_dev);
+
+ dev_info(&pdev->dev, "Protected mode switcher %s loaded\n", pdev->name);
+ dev_info(&pdev->dev, "SMC enable: 0x%x\n", sdev->smc_fid_enable);
+ dev_info(&pdev->dev, "SMC disable: 0x%x\n", sdev->smc_fid_disable);
+
+ return 0;
+}
+
+static int protected_mode_remove(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "Protected mode switcher %s removed\n",
+ pdev->name);
+
+ return 0;
+}
+
+static const struct of_device_id protected_mode_dt_ids[] = {
+ { .compatible = "arm,smc-protected-mode-switcher" },
+ { .compatible = "arm,secure-mode-switcher" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, protected_mode_dt_ids);
+
+static struct platform_driver protected_mode_driver = {
+ .probe = protected_mode_probe,
+ .remove = protected_mode_remove,
+ .driver = {
+ .name = "smc-protected-mode-switcher",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(protected_mode_dt_ids),
+ }
+};
+
+module_platform_driver(protected_mode_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION("1.0");
diff --git a/drivers/base/smc_protected_mode_switcher/protected_mode_switcher_smc.S b/drivers/base/smc_protected_mode_switcher/protected_mode_switcher_smc.S
new file mode 100644
index 000000000000..5eae3282f26c
--- /dev/null
+++ b/drivers/base/smc_protected_mode_switcher/protected_mode_switcher_smc.S
@@ -0,0 +1,23 @@
+/*
+ *
+ * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+#include <linux/linkage.h>
+
+/* u64 invoke_protected_mode_switch_smc(u64 function_id, u64 arg0, u64 arg1,
+ u64 arg2) */
+ENTRY(__invoke_protected_mode_switch_smc)
+ smc #0
+ ret
+ENDPROC(__invoke_protected_mode_switch_smc)
diff --git a/drivers/base/ump/Kbuild b/drivers/base/ump/Kbuild
new file mode 100644
index 000000000000..2bbdba27d76f
--- /dev/null
+++ b/drivers/base/ump/Kbuild
@@ -0,0 +1,18 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+obj-y += src/
+
diff --git a/drivers/base/ump/Kconfig b/drivers/base/ump/Kconfig
new file mode 100644
index 000000000000..f7451e67a5ee
--- /dev/null
+++ b/drivers/base/ump/Kconfig
@@ -0,0 +1,26 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+config UMP
+ tristate "Enable Unified Memory Provider (UMP) support"
+ default n
+ help
+ Enable this option to build support for the ARM UMP module.
+ UMP can be used by the Mali T6xx module to improve performance
+ by reducing the copying of data by sharing memory.
+
+ To compile this driver as a module, choose M here:
+ this will generate one module, called ump.
diff --git a/drivers/base/ump/docs/Doxyfile b/drivers/base/ump/docs/Doxyfile
new file mode 100644
index 000000000000..fbec8eb40a02
--- /dev/null
+++ b/drivers/base/ump/docs/Doxyfile
@@ -0,0 +1,125 @@
+#
+# (C) COPYRIGHT 2011-2013 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+##############################################################################
+
+# This file contains per-module Doxygen configuration. Please do not add
+# extra settings to this file without consulting all stakeholders, as they
+# may cause override project-wide settings.
+#
+# Additionally, when defining aliases, macros, sections etc, use the module
+# name as a prefix e.g. gles_my_alias.
+
+##############################################################################
+
+@INCLUDE = ../../bldsys/Doxyfile_common
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT += ../../kernel/include/linux/ump-common.h ../../kernel/include/linux/ump.h
+
+##############################################################################
+# Everything below here is optional, and in most cases not required
+##############################################################################
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES +=
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS +=
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS +=
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE +=
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS +=
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS +=
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH += ../../kernel/drivers/base/ump
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH +=
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH +=
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED +=
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED +=
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS +=
diff --git a/drivers/base/ump/example_kernel_api.c b/drivers/base/ump/example_kernel_api.c
new file mode 100644
index 000000000000..858dd8b52f04
--- /dev/null
+++ b/drivers/base/ump/example_kernel_api.c
@@ -0,0 +1,73 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <linux/ump.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+
+/*
+ * Example routine to display information about an UMP allocation
+ * The routine takes an secure_id which can come from a different kernel module
+ * or from a client application (i.e. an ioctl).
+ * It creates a ump handle from the secure id (which validates the secure id)
+ * and if successful dumps the physical memory information.
+ * It follows the API and pins the memory while "using" the physical memory.
+ * Finally it calls the release function to indicate it's finished with the handle.
+ *
+ * If the function can't look up the handle it fails with return value -1.
+ * If the testy succeeds then it return 0.
+ * */
+
+static int display_ump_memory_information(ump_secure_id secure_id)
+{
+ const ump_dd_physical_block_64 * ump_blocks = NULL;
+ ump_dd_handle ump_mem;
+ uint64_t nr_blocks;
+ int i;
+ ump_alloc_flags flags;
+
+ /* get a handle from the secure id */
+ ump_mem = ump_dd_from_secure_id(secure_id);
+
+ if (UMP_DD_INVALID_MEMORY_HANDLE == ump_mem)
+ {
+ /* invalid ID received */
+ return -1;
+ }
+
+ /* at this point we know we've added a reference to the ump allocation, so we must release it with ump_dd_release */
+
+ ump_dd_phys_blocks_get_64(ump_mem, &nr_blocks, &ump_blocks);
+ flags = ump_dd_allocation_flags_get(ump_mem);
+
+ printf("UMP allocation with secure ID %u consists of %zd physical block(s):\n", secure_id, nr_blocks);
+
+ for(i=0; i<nr_blocks; ++i)
+ {
+ printf("\tBlock %d: 0x%08zX size 0x%08zX\n", i, ump_blocks[i].addr, ump_blocks[i].size);
+ }
+
+ printf("and was allocated using the following bitflag combo: 0x%lX\n", flags);
+
+ ump_dd_release(ump_mem);
+
+ return 0;
+}
+
diff --git a/drivers/base/ump/example_user_api.c b/drivers/base/ump/example_user_api.c
new file mode 100644
index 000000000000..d143a640512d
--- /dev/null
+++ b/drivers/base/ump/example_user_api.c
@@ -0,0 +1,153 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <ump/ump.h>
+#include <memory.h>
+#include <stdio.h>
+
+/*
+ * Example routine to exercise the user space UMP api.
+ * This routine initializes the UMP api and allocates some CPU+device X memory.
+ * No usage hints are given, so the driver will use the default cacheability policy.
+ * With the allocation it creates a duplicate handle and plays with the reference count.
+ * Then it simulates interacting with a device and contains pseudo code for the device.
+ *
+ * If any error is detected correct cleanup will be performed and -1 will be returned.
+ * If successful then 0 will be returned.
+ */
+
+static int test_ump_user_api(void)
+{
+ /* This is the size we try to allocate*/
+ const size_t alloc_size = 4096;
+
+ ump_handle h = UMP_INVALID_MEMORY_HANDLE;
+ ump_handle h_copy = UMP_INVALID_MEMORY_HANDLE;
+ ump_handle h_clone = UMP_INVALID_MEMORY_HANDLE;
+
+ void * mapping = NULL;
+
+ ump_result ump_api_res;
+ int result = -1;
+
+ ump_secure_id id;
+
+ size_t size_returned;
+
+ ump_api_res = ump_open();
+ if (UMP_OK != ump_api_res)
+ {
+ /* failed to open an ump session */
+ /* early out */
+ return -1;
+ }
+
+ h = ump_allocate_64(alloc_size, UMP_PROT_CPU_RD | UMP_PROT_CPU_WR | UMP_PROT_X_RD | UMP_PROT_X_WR);
+ /* the refcount is now 1 */
+ if (UMP_INVALID_MEMORY_HANDLE == h)
+ {
+ /* allocation failure */
+ goto cleanup;
+ }
+
+ /* this is how we could share this allocation with another process */
+
+ /* in process A: */
+ id = ump_secure_id_get(h);
+ /* still ref count 1 */
+ /* send the id to process B */
+
+ /* in process B: */
+ /* receive the id from A */
+ h_clone = ump_from_secure_id(id);
+ /* the ref count of the allocation is now 2 (one from each handle to it) */
+ /* do something ... */
+ /* release our clone */
+ ump_release(h_clone); /* safe to call even if ump_from_secure_id failed */
+ h_clone = UMP_INVALID_MEMORY_HANDLE;
+
+
+ /* a simple save-for-future-use logic inside the driver would just copy the handle (but add a ref manually too!) */
+ /*
+ * void assign_memory_to_job(h)
+ * {
+ */
+ h_copy = h;
+ ump_retain(h_copy); /* manual retain needed as we just assigned the handle, now 2 */
+ /*
+ * }
+ *
+ * void job_completed(void)
+ * {
+ */
+ ump_release(h_copy); /* normal handle release as if we got via an ump_allocate */
+ h_copy = UMP_INVALID_MEMORY_HANDLE;
+ /*
+ * }
+ */
+
+ /* we're now back at ref count 1, and only h is a valid handle */
+ /* enough handle duplication show-off, let's play with the contents instead */
+
+ mapping = ump_map(h, 0, alloc_size);
+ if (NULL == mapping)
+ {
+ /* mapping failure, either out of address space or some other error */
+ goto cleanup;
+ }
+
+ memset(mapping, 0, alloc_size);
+
+ /* let's pretend we're going to start some hw device on this buffer and read the result afterwards */
+ ump_cpu_msync_now(h, UMP_MSYNC_CLEAN, mapping, alloc_size);
+ /*
+ device cache invalidate
+
+ memory barrier
+
+ start device
+
+ memory barrier
+
+ wait for device
+
+ memory barrier
+
+ device cache clean
+
+ memory barrier
+ */
+ ump_cpu_msync_now(h, UMP_MSYNC_CLEAN_AND_INVALIDATE, mapping, alloc_size);
+
+ /* we could now peek at the result produced by the hw device, which is now accessible via our mapping */
+
+ /* unmap the buffer when we're done with it */
+ ump_unmap(h, mapping, alloc_size);
+
+ result = 0;
+
+cleanup:
+ ump_release(h);
+ h = UMP_INVALID_MEMORY_HANDLE;
+
+ ump_close();
+
+ return result;
+}
+
diff --git a/drivers/base/ump/src/Kbuild b/drivers/base/ump/src/Kbuild
new file mode 100644
index 000000000000..de6d30770d15
--- /dev/null
+++ b/drivers/base/ump/src/Kbuild
@@ -0,0 +1,50 @@
+#
+# (C) COPYRIGHT 2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+# Paths required for build
+UMP_PATH = $(src)/../..
+UMP_DEVICEDRV_PATH = $(src)/.
+
+# Set up defaults if not defined by the user
+MALI_UNIT_TEST ?= 0
+
+SRC :=\
+ common/ump_kernel_core.c \
+ common/ump_kernel_descriptor_mapping.c \
+ linux/ump_kernel_linux.c \
+ linux/ump_kernel_linux_mem.c
+
+UNIT_TEST_DEFINES=
+ifeq ($(MALI_UNIT_TEST), 1)
+ MALI_DEBUG ?= 1
+
+ UNIT_TEST_DEFINES = -DMALI_UNIT_TEST=1 \
+ -DMALI_DEBUG=$(MALI_DEBUG)
+endif
+
+# Use our defines when compiling
+ccflags-y += -I$(UMP_PATH) -I$(UMP_DEVICEDRV_PATH) $(UNIT_TEST_DEFINES)
+
+
+# Tell the Linux build system from which .o file to create the kernel module
+obj-$(CONFIG_UMP) += ump.o
+ifeq ($(CONFIG_ION),y)
+ccflags-y += -I$(srctree)/drivers/staging/android/ion -I$(srctree)/include/linux
+obj-$(CONFIG_UMP) += imports/ion/ump_kernel_import_ion.o
+endif
+
+# Tell the Linux build system to enable building of our .c files
+ump-y := $(SRC:.c=.o)
diff --git a/drivers/base/ump/src/Makefile b/drivers/base/ump/src/Makefile
new file mode 100644
index 000000000000..45428adbdf77
--- /dev/null
+++ b/drivers/base/ump/src/Makefile
@@ -0,0 +1,81 @@
+#
+# (C) COPYRIGHT 2008-2014 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+ifneq ($(KBUILD_EXTMOD),)
+include $(KBUILD_EXTMOD)/Makefile.common
+else
+include ./Makefile.common
+endif
+
+# default to building for the host
+ARCH ?= $(shell uname -m)
+
+# linux build system integration
+RELATIVE_ROOT=../../../../..
+ROOT = $(CURDIR)/$(RELATIVE_ROOT)
+
+EXTRA_CFLAGS=-I$(CURDIR)/../../../../include
+
+ifeq ($(MALI_UNIT_TEST),1)
+ EXTRA_CFLAGS += -DMALI_UNIT_TEST=$(MALI_UNIT_TEST)
+endif
+
+# Get any user defined KDIR-<names> or maybe even a hardcoded KDIR
+-include KDIR_CONFIGURATION
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+CONFIG ?= $(ARCH)
+
+# default cpu to select
+CPU ?= $(shell uname -m)
+
+# look up KDIR based om CPU selection
+KDIR ?= $(KDIR-$(CPU))
+
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(CPU))
+endif
+
+# Validate selected config
+ifneq ($(shell [ -d arch-$(CONFIG) ] && [ -f arch-$(CONFIG)/config.h ] && echo "OK"), OK)
+$(warning Current directory is $(shell pwd))
+$(error No configuration found for config $(CONFIG). Check that arch-$(CONFIG)/config.h exists)
+else
+# Link arch to the selected arch-config directory
+$(shell [ -L arch ] && rm arch)
+$(shell ln -sf arch-$(CONFIG) arch)
+$(shell touch arch/config.h)
+endif
+
+EXTRA_SYMBOLS=
+
+ifeq ($(MALI_UNIT_TEST),1)
+ KBASE_PATH=$(ROOT)/kernel/drivers/gpu/arm/midgard
+ EXTRA_SYMBOLS+=$(KBASE_PATH)/tests/internal/src/kernel_assert_module/linux/Module.symvers
+endif
+KDS_PATH=$(ROOT)/kernel/drivers/base/kds
+EXTRA_SYMBOLS+=$(KDS_PATH)/Module.symvers
+
+all:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) EXTRA_CFLAGS="$(EXTRA_CFLAGS) $(SCONS_CFLAGS)" CONFIG_UMP=m KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" modules
+
+kernelrelease:
+ $(MAKE) -C $(KDIR) KBUILD_EXTRA_SYMBOLS="$(EXTRA_SYMBOLS)" kernelrelease
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
diff --git a/drivers/base/ump/src/Makefile.common b/drivers/base/ump/src/Makefile.common
new file mode 100644
index 000000000000..f29a4c1cffa5
--- /dev/null
+++ b/drivers/base/ump/src/Makefile.common
@@ -0,0 +1,19 @@
+#
+# (C) COPYRIGHT 2008-2010, 2013 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+SRC = $(UMP_FILE_PREFIX)/common/ump_kernel_core.c \
+ $(UMP_FILE_PREFIX)/common/ump_kernel_descriptor_mapping.c
+
diff --git a/drivers/base/ump/src/arch-arm/config.h b/drivers/base/ump/src/arch-arm/config.h
new file mode 100644
index 000000000000..152d98f38af8
--- /dev/null
+++ b/drivers/base/ump/src/arch-arm/config.h
@@ -0,0 +1,27 @@
+/*
+ *
+ * (C) COPYRIGHT 2008-2009, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT 1
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0x00000000
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 32UL * 1024UL * 1024UL
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/base/ump/src/arch-arm64/config.h b/drivers/base/ump/src/arch-arm64/config.h
new file mode 100644
index 000000000000..cfb14ca54876
--- /dev/null
+++ b/drivers/base/ump/src/arch-arm64/config.h
@@ -0,0 +1,27 @@
+/*
+ *
+ * (C) COPYRIGHT 2008-2009, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT 1
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0x00000000
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 32UL * 1024UL * 1024UL
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/base/ump/src/common/ump_kernel_core.c b/drivers/base/ump/src/common/ump_kernel_core.c
new file mode 100644
index 000000000000..07aa07739f9f
--- /dev/null
+++ b/drivers/base/ump/src/common/ump_kernel_core.c
@@ -0,0 +1,756 @@
+/*
+ *
+ * (C) COPYRIGHT 2008-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/* module headers */
+#include <linux/ump.h>
+#include <linux/ump-ioctl.h>
+
+/* local headers */
+#include <common/ump_kernel_core.h>
+#include <common/ump_kernel_descriptor_mapping.h>
+#include <ump_arch.h>
+#include <common/ump_kernel_priv.h>
+
+#define UMP_FLAGS_RANGE ((UMP_PROT_SHAREABLE<<1) - 1u)
+
+static umpp_device device;
+
+ump_result umpp_core_constructor(void)
+{
+ mutex_init(&device.secure_id_map_lock);
+ device.secure_id_map = umpp_descriptor_mapping_create(UMP_EXPECTED_IDS, UMP_MAX_IDS);
+ if (NULL != device.secure_id_map)
+ {
+ if (UMP_OK == umpp_device_initialize())
+ {
+ return UMP_OK;
+ }
+ umpp_descriptor_mapping_destroy(device.secure_id_map);
+ }
+ mutex_destroy(&device.secure_id_map_lock);
+
+ return UMP_ERROR;
+}
+
+void umpp_core_destructor(void)
+{
+ umpp_device_terminate();
+ umpp_descriptor_mapping_destroy(device.secure_id_map);
+ mutex_destroy(&device.secure_id_map_lock);
+}
+
+umpp_session *umpp_core_session_start(void)
+{
+ umpp_session * session;
+
+ session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (NULL != session)
+ {
+ mutex_init(&session->session_lock);
+
+ INIT_LIST_HEAD(&session->memory_usage);
+
+ /* try to create import client session, not a failure if they fail to initialize */
+ umpp_import_handlers_init(session);
+ }
+
+ return session;
+}
+
+void umpp_core_session_end(umpp_session *session)
+{
+ umpp_session_memory_usage * usage, *_usage;
+ UMP_ASSERT(session);
+
+ list_for_each_entry_safe(usage, _usage, &session->memory_usage, link)
+ {
+ printk(KERN_WARNING "UMP: Memory usage cleanup, releasing secure ID %d\n", ump_dd_secure_id_get(usage->mem));
+ ump_dd_release(usage->mem);
+ kfree(usage);
+
+ }
+
+ /* we should now not hold any imported memory objects,
+ * detatch all import handlers */
+ umpp_import_handlers_term(session);
+
+ mutex_destroy(&session->session_lock);
+ kfree(session);
+}
+
+ump_dd_handle ump_dd_allocate_64(uint64_t size, ump_alloc_flags flags, ump_dd_security_filter filter_func, ump_dd_final_release_callback final_release_func, void* callback_data)
+{
+ umpp_allocation * alloc;
+ int i;
+
+ UMP_ASSERT(size);
+
+ if (flags & (~UMP_FLAGS_RANGE))
+ {
+ printk(KERN_WARNING "UMP: allocation flags out of allowed bits range\n");
+ return UMP_DD_INVALID_MEMORY_HANDLE;
+ }
+
+ if( ( flags & (UMP_PROT_CPU_RD | UMP_PROT_W_RD | UMP_PROT_X_RD | UMP_PROT_Y_RD | UMP_PROT_Z_RD ) ) == 0 ||
+ ( flags & (UMP_PROT_CPU_WR | UMP_PROT_W_WR | UMP_PROT_X_WR | UMP_PROT_Y_WR | UMP_PROT_Z_WR )) == 0 )
+ {
+ printk(KERN_WARNING "UMP: allocation flags should have at least one read and one write permission bit set\n");
+ return UMP_DD_INVALID_MEMORY_HANDLE;
+ }
+
+ /*check permission flags to be set if hit flags are set too*/
+ for (i = UMP_DEVICE_CPU_SHIFT; i<=UMP_DEVICE_Z_SHIFT; i+=4)
+ {
+ if (flags & (UMP_HINT_DEVICE_RD<<i))
+ {
+ UMP_ASSERT(flags & (UMP_PROT_DEVICE_RD<<i));
+ }
+ if (flags & (UMP_HINT_DEVICE_WR<<i))
+ {
+ UMP_ASSERT(flags & (UMP_PROT_DEVICE_WR<<i));
+ }
+ }
+
+ alloc = kzalloc(sizeof(*alloc), GFP_KERNEL | __GFP_HARDWALL);
+
+ if (NULL == alloc)
+ goto out1;
+
+ alloc->flags = flags;
+ alloc->filter_func = filter_func;
+ alloc->final_release_func = final_release_func;
+ alloc->callback_data = callback_data;
+ alloc->size = size;
+
+ mutex_init(&alloc->map_list_lock);
+ INIT_LIST_HEAD(&alloc->map_list);
+ atomic_set(&alloc->refcount, 1);
+
+#ifdef CONFIG_KDS
+ kds_resource_init(&alloc->kds_res);
+#endif
+
+ if (!(alloc->flags & UMP_PROT_SHAREABLE))
+ {
+ alloc->owner = get_current()->pid;
+ }
+
+ if (0 != umpp_phys_commit(alloc))
+ {
+ goto out2;
+ }
+
+ /* all set up, allocate an ID for it */
+
+ mutex_lock(&device.secure_id_map_lock);
+ alloc->id = umpp_descriptor_mapping_allocate(device.secure_id_map, (void*)alloc);
+ mutex_unlock(&device.secure_id_map_lock);
+
+ if ((int)alloc->id == 0)
+ {
+ /* failed to allocate a secure_id */
+ goto out3;
+ }
+
+ return alloc;
+
+out3:
+ umpp_phys_free(alloc);
+out2:
+ kfree(alloc);
+out1:
+ return UMP_DD_INVALID_MEMORY_HANDLE;
+}
+
+uint64_t ump_dd_size_get_64(const ump_dd_handle mem)
+{
+ umpp_allocation * alloc;
+
+ UMP_ASSERT(mem);
+
+ alloc = (umpp_allocation*)mem;
+
+ return alloc->size;
+}
+
+/*
+ * UMP v1 API
+ */
+unsigned long ump_dd_size_get(ump_dd_handle mem)
+{
+ umpp_allocation * alloc;
+
+ UMP_ASSERT(mem);
+
+ alloc = (umpp_allocation*)mem;
+
+ UMP_ASSERT(alloc->flags & UMP_CONSTRAINT_32BIT_ADDRESSABLE);
+ UMP_ASSERT(alloc->size <= UMP_UINT32_MAX);
+
+ return (unsigned long)alloc->size;
+}
+
+ump_secure_id ump_dd_secure_id_get(const ump_dd_handle mem)
+{
+ umpp_allocation * alloc;
+
+ UMP_ASSERT(mem);
+
+ alloc = (umpp_allocation*)mem;
+
+ return alloc->id;
+}
+
+#ifdef CONFIG_KDS
+struct kds_resource * ump_dd_kds_resource_get(const ump_dd_handle mem)
+{
+ umpp_allocation * alloc;
+
+ UMP_ASSERT(mem);
+
+ alloc = (umpp_allocation*)mem;
+
+ return &alloc->kds_res;
+}
+#endif
+
+ump_alloc_flags ump_dd_allocation_flags_get(const ump_dd_handle mem)
+{
+ const umpp_allocation * alloc;
+
+ UMP_ASSERT(mem);
+ alloc = (const umpp_allocation *)mem;
+
+ return alloc->flags;
+}
+
+ump_dd_handle ump_dd_from_secure_id(ump_secure_id secure_id)
+{
+ umpp_allocation * alloc = UMP_DD_INVALID_MEMORY_HANDLE;
+
+ mutex_lock(&device.secure_id_map_lock);
+
+ if (0 == umpp_descriptor_mapping_lookup(device.secure_id_map, secure_id, (void**)&alloc))
+ {
+ if (NULL != alloc->filter_func)
+ {
+ if (!alloc->filter_func(secure_id, alloc, alloc->callback_data))
+ {
+ alloc = UMP_DD_INVALID_MEMORY_HANDLE; /* the filter denied access */
+ }
+ }
+
+ /* check permission to access it */
+ if ((UMP_DD_INVALID_MEMORY_HANDLE != alloc) && !(alloc->flags & UMP_PROT_SHAREABLE))
+ {
+ if (alloc->owner != get_current()->pid)
+ {
+ alloc = UMP_DD_INVALID_MEMORY_HANDLE; /*no rights for the current process*/
+ }
+ }
+
+ if (UMP_DD_INVALID_MEMORY_HANDLE != alloc)
+ {
+ if( ump_dd_retain(alloc) != UMP_DD_SUCCESS)
+ {
+ alloc = UMP_DD_INVALID_MEMORY_HANDLE;
+ }
+ }
+ }
+ mutex_unlock(&device.secure_id_map_lock);
+
+ return alloc;
+}
+
+/*
+ * UMP v1 API
+ */
+ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
+{
+ return ump_dd_from_secure_id(secure_id);
+}
+
+int ump_dd_retain(ump_dd_handle mem)
+{
+ umpp_allocation * alloc;
+
+ UMP_ASSERT(mem);
+
+ alloc = (umpp_allocation*)mem;
+
+ /* check for overflow */
+ while(1)
+ {
+ int refcnt = atomic_read(&alloc->refcount);
+ if (refcnt + 1 > 0)
+ {
+ if(atomic_cmpxchg(&alloc->refcount, refcnt, refcnt + 1) == refcnt)
+ {
+ return 0;
+ }
+ }
+ else
+ {
+ return -EBUSY;
+ }
+ }
+}
+
+/*
+ * UMP v1 API
+ */
+void ump_dd_reference_add(ump_dd_handle mem)
+{
+ ump_dd_retain(mem);
+}
+
+
+void ump_dd_release(ump_dd_handle mem)
+{
+ umpp_allocation * alloc;
+ uint32_t new_cnt;
+
+ UMP_ASSERT(mem);
+
+ alloc = (umpp_allocation*)mem;
+
+ /* secure the id for lookup while releasing */
+ mutex_lock(&device.secure_id_map_lock);
+
+ /* do the actual release */
+ new_cnt = atomic_sub_return(1, &alloc->refcount);
+ if (0 == new_cnt)
+ {
+ /* remove from the table as this was the last ref */
+ umpp_descriptor_mapping_remove(device.secure_id_map, alloc->id);
+ }
+
+ /* release the lock as early as possible */
+ mutex_unlock(&device.secure_id_map_lock);
+
+ if (0 != new_cnt)
+ {
+ /* exit if still have refs */
+ return;
+ }
+
+ UMP_ASSERT(list_empty(&alloc->map_list));
+
+#ifdef CONFIG_KDS
+ if (kds_resource_term(&alloc->kds_res))
+ {
+ printk(KERN_ERR "ump_dd_release: kds_resource_term failed,"
+ "unable to release UMP allocation\n");
+ return;
+ }
+#endif
+ /* cleanup */
+ if (NULL != alloc->final_release_func)
+ {
+ alloc->final_release_func(alloc, alloc->callback_data);
+ }
+
+ if (0 == (alloc->management_flags & UMP_MGMT_EXTERNAL))
+ {
+ umpp_phys_free(alloc);
+ }
+ else
+ {
+ kfree(alloc->block_array);
+ }
+
+ mutex_destroy(&alloc->map_list_lock);
+
+ kfree(alloc);
+}
+
+/*
+ * UMP v1 API
+ */
+void ump_dd_reference_release(ump_dd_handle mem)
+{
+ ump_dd_release(mem);
+}
+
+void ump_dd_phys_blocks_get_64(const ump_dd_handle mem, uint64_t * const pCount, const ump_dd_physical_block_64 ** const pArray)
+{
+ const umpp_allocation * alloc;
+ UMP_ASSERT(pCount);
+ UMP_ASSERT(pArray);
+ UMP_ASSERT(mem);
+ alloc = (const umpp_allocation *)mem;
+ *pCount = alloc->blocksCount;
+ *pArray = alloc->block_array;
+}
+
+/*
+ * UMP v1 API
+ */
+ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle mem, ump_dd_physical_block * const blocks, unsigned long num_blocks)
+{
+ const umpp_allocation * alloc;
+ unsigned long i;
+ UMP_ASSERT(mem);
+ UMP_ASSERT(blocks);
+ UMP_ASSERT(num_blocks);
+
+ alloc = (const umpp_allocation *)mem;
+
+ UMP_ASSERT(alloc->flags & UMP_CONSTRAINT_32BIT_ADDRESSABLE);
+
+ if((uint64_t)num_blocks != alloc->blocksCount)
+ {
+ return UMP_DD_INVALID;
+ }
+
+ for( i = 0; i < num_blocks; i++)
+ {
+ UMP_ASSERT(alloc->block_array[i].addr <= UMP_UINT32_MAX);
+ UMP_ASSERT(alloc->block_array[i].size <= UMP_UINT32_MAX);
+
+ blocks[i].addr = (unsigned long)alloc->block_array[i].addr;
+ blocks[i].size = (unsigned long)alloc->block_array[i].size;
+ }
+
+ return UMP_DD_SUCCESS;
+}
+/*
+ * UMP v1 API
+ */
+ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle mem, unsigned long index, ump_dd_physical_block * const block)
+{
+ const umpp_allocation * alloc;
+ UMP_ASSERT(mem);
+ UMP_ASSERT(block);
+ alloc = (const umpp_allocation *)mem;
+
+ UMP_ASSERT(alloc->flags & UMP_CONSTRAINT_32BIT_ADDRESSABLE);
+
+ UMP_ASSERT(alloc->block_array[index].addr <= UMP_UINT32_MAX);
+ UMP_ASSERT(alloc->block_array[index].size <= UMP_UINT32_MAX);
+
+ block->addr = (unsigned long)alloc->block_array[index].addr;
+ block->size = (unsigned long)alloc->block_array[index].size;
+
+ return UMP_DD_SUCCESS;
+}
+
+/*
+ * UMP v1 API
+ */
+unsigned long ump_dd_phys_block_count_get(ump_dd_handle mem)
+{
+ const umpp_allocation * alloc;
+ UMP_ASSERT(mem);
+ alloc = (const umpp_allocation *)mem;
+
+ UMP_ASSERT(alloc->flags & UMP_CONSTRAINT_32BIT_ADDRESSABLE);
+ UMP_ASSERT(alloc->blocksCount <= UMP_UINT32_MAX);
+
+ return (unsigned long)alloc->blocksCount;
+}
+
+umpp_cpu_mapping * umpp_dd_find_enclosing_mapping(umpp_allocation * alloc, void *uaddr, size_t size)
+{
+ umpp_cpu_mapping *map;
+
+ void *target_first = uaddr;
+ void *target_last = (void*)((uintptr_t)uaddr - 1 + size);
+
+ if (target_last < target_first) /* wrapped */
+ {
+ return NULL;
+ }
+
+ mutex_lock(&alloc->map_list_lock);
+ list_for_each_entry(map, &alloc->map_list, link)
+ {
+ if ( map->vaddr_start <= target_first &&
+ (void*)((uintptr_t)map->vaddr_start + (map->nr_pages << PAGE_SHIFT) - 1) >= target_last)
+ {
+ goto out;
+ }
+ }
+ map = NULL;
+out:
+ mutex_unlock(&alloc->map_list_lock);
+
+ return map;
+}
+
+void umpp_dd_add_cpu_mapping(umpp_allocation * alloc, umpp_cpu_mapping * map)
+{
+ UMP_ASSERT(alloc);
+ UMP_ASSERT(map);
+ mutex_lock(&alloc->map_list_lock);
+ list_add(&map->link, &alloc->map_list);
+ mutex_unlock(&alloc->map_list_lock);
+}
+
+void umpp_dd_remove_cpu_mapping(umpp_allocation * alloc, umpp_cpu_mapping * target)
+{
+ umpp_cpu_mapping * map;
+
+ UMP_ASSERT(alloc);
+ UMP_ASSERT(target);
+
+ mutex_lock(&alloc->map_list_lock);
+ list_for_each_entry(map, &alloc->map_list, link)
+ {
+ if (map == target)
+ {
+ list_del(&target->link);
+ kfree(target);
+ mutex_unlock(&alloc->map_list_lock);
+ return;
+ }
+ }
+
+ /* not found, error */
+ UMP_ASSERT(0);
+}
+
+int umpp_dd_find_start_block(const umpp_allocation * alloc, uint64_t offset, uint64_t * const block_index, uint64_t * const block_internal_offset)
+{
+ uint64_t i;
+
+ for (i = 0 ; i < alloc->blocksCount; i++)
+ {
+ if (offset < alloc->block_array[i].size)
+ {
+ /* found the block_array element containing this offset */
+ *block_index = i;
+ *block_internal_offset = offset;
+ return 0;
+ }
+ offset -= alloc->block_array[i].size;
+ }
+
+ return -ENXIO;
+}
+
+void umpp_dd_cpu_msync_now(ump_dd_handle mem, ump_cpu_msync_op op, void * address, size_t size)
+{
+ umpp_allocation * alloc;
+ void *vaddr;
+ umpp_cpu_mapping * mapping;
+ uint64_t virt_page_off; /* offset of given address from beginning of the virtual mapping */
+ uint64_t phys_page_off; /* offset of the virtual mapping from the beginning of the physical buffer */
+ uint64_t page_count; /* number of pages to sync */
+ uint64_t i;
+ uint64_t block_idx;
+ uint64_t block_offset;
+ uint64_t paddr;
+
+ UMP_ASSERT((UMP_MSYNC_CLEAN == op) || (UMP_MSYNC_CLEAN_AND_INVALIDATE == op));
+
+ alloc = (umpp_allocation*)mem;
+ vaddr = (void*)(uintptr_t)address;
+
+ if((alloc->flags & UMP_CONSTRAINT_UNCACHED) != 0)
+ {
+ /* mapping is not cached */
+ return;
+ }
+
+ mapping = umpp_dd_find_enclosing_mapping(alloc, vaddr, size);
+ if (NULL == mapping)
+ {
+ printk(KERN_WARNING "UMP: Illegal cache sync address %lx\n", (uintptr_t)vaddr);
+ return; /* invalid pointer or size causes out-of-bounds */
+ }
+
+ /* we already know that address + size don't wrap around as umpp_dd_find_enclosing_mapping didn't fail */
+ page_count = ((((((uintptr_t)address + size - 1) & PAGE_MASK) - ((uintptr_t)address & PAGE_MASK))) >> PAGE_SHIFT) + 1;
+ virt_page_off = (vaddr - mapping->vaddr_start) >> PAGE_SHIFT;
+ phys_page_off = mapping->page_off;
+
+ if (umpp_dd_find_start_block(alloc, (virt_page_off + phys_page_off) << PAGE_SHIFT, &block_idx, &block_offset))
+ {
+ /* should not fail as a valid mapping was found, so the phys mem must exists */
+ printk(KERN_WARNING "UMP: Unable to find physical start block with offset %llx\n", virt_page_off + phys_page_off);
+ UMP_ASSERT(0);
+ return;
+ }
+
+ paddr = alloc->block_array[block_idx].addr + block_offset + (((uintptr_t)vaddr) & ((1u << PAGE_SHIFT)-1));
+
+ for (i = 0; i < page_count; i++)
+ {
+ size_t offset = ((uintptr_t)vaddr) & ((1u << PAGE_SHIFT)-1);
+ size_t sz = min((size_t)PAGE_SIZE - offset, size);
+
+ /* check if we've overrrun the current block, if so move to the next block */
+ if (paddr >= (alloc->block_array[block_idx].addr + alloc->block_array[block_idx].size))
+ {
+ block_idx++;
+ UMP_ASSERT(block_idx < alloc->blocksCount);
+ paddr = alloc->block_array[block_idx].addr;
+ }
+
+ if (UMP_MSYNC_CLEAN == op)
+ {
+ ump_sync_to_memory(paddr, vaddr, sz);
+ }
+ else /* (UMP_MSYNC_CLEAN_AND_INVALIDATE == op) already validated on entry */
+ {
+ ump_sync_to_cpu(paddr, vaddr, sz);
+ }
+
+ /* advance to next page */
+ vaddr = (void*)((uintptr_t)vaddr + sz);
+ size -= sz;
+ paddr += sz;
+ }
+}
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_create_from_phys_blocks_64(const ump_dd_physical_block_64 * blocks, uint64_t num_blocks, ump_alloc_flags flags, ump_dd_security_filter filter_func, ump_dd_final_release_callback final_release_func, void* callback_data)
+{
+ uint64_t size = 0;
+ uint64_t i;
+ umpp_allocation * alloc;
+
+ UMP_ASSERT(blocks);
+ UMP_ASSERT(num_blocks);
+
+ for (i = 0; i < num_blocks; i++)
+ {
+ size += blocks[i].size;
+ }
+ UMP_ASSERT(size);
+
+ if (flags & (~UMP_FLAGS_RANGE))
+ {
+ printk(KERN_WARNING "UMP: allocation flags out of allowed bits range\n");
+ return UMP_DD_INVALID_MEMORY_HANDLE;
+ }
+
+ if( ( flags & (UMP_PROT_CPU_RD | UMP_PROT_W_RD | UMP_PROT_X_RD | UMP_PROT_Y_RD | UMP_PROT_Z_RD
+ | UMP_PROT_CPU_WR | UMP_PROT_W_WR | UMP_PROT_X_WR | UMP_PROT_Y_WR | UMP_PROT_Z_WR )) == 0 )
+ {
+ printk(KERN_WARNING "UMP: allocation flags should have at least one read or write permission bit set\n");
+ return UMP_DD_INVALID_MEMORY_HANDLE;
+ }
+
+ /*check permission flags to be set if hit flags are set too*/
+ for (i = UMP_DEVICE_CPU_SHIFT; i<=UMP_DEVICE_Z_SHIFT; i+=4)
+ {
+ if (flags & (UMP_HINT_DEVICE_RD<<i))
+ {
+ UMP_ASSERT(flags & (UMP_PROT_DEVICE_RD<<i));
+ }
+ if (flags & (UMP_HINT_DEVICE_WR<<i))
+ {
+ UMP_ASSERT(flags & (UMP_PROT_DEVICE_WR<<i));
+ }
+ }
+
+ alloc = kzalloc(sizeof(*alloc),__GFP_HARDWALL | GFP_KERNEL);
+
+ if (NULL == alloc)
+ {
+ goto out1;
+ }
+
+ alloc->block_array = kzalloc(sizeof(ump_dd_physical_block_64) * num_blocks,__GFP_HARDWALL | GFP_KERNEL);
+ if (NULL == alloc->block_array)
+ {
+ goto out2;
+ }
+
+ memcpy(alloc->block_array, blocks, sizeof(ump_dd_physical_block_64) * num_blocks);
+
+#ifdef CONFIG_KDS
+ kds_resource_init(&alloc->kds_res);
+#endif
+ alloc->size = size;
+ alloc->blocksCount = num_blocks;
+ alloc->flags = flags;
+ alloc->filter_func = filter_func;
+ alloc->final_release_func = final_release_func;
+ alloc->callback_data = callback_data;
+
+ if (!(alloc->flags & UMP_PROT_SHAREABLE))
+ {
+ alloc->owner = get_current()->pid;
+ }
+
+ mutex_init(&alloc->map_list_lock);
+ INIT_LIST_HEAD(&alloc->map_list);
+ atomic_set(&alloc->refcount, 1);
+
+ /* all set up, allocate an ID */
+
+ mutex_lock(&device.secure_id_map_lock);
+ alloc->id = umpp_descriptor_mapping_allocate(device.secure_id_map, (void*)alloc);
+ mutex_unlock(&device.secure_id_map_lock);
+
+ if ((int)alloc->id == 0)
+ {
+ /* failed to allocate a secure_id */
+ goto out3;
+ }
+
+ alloc->management_flags |= UMP_MGMT_EXTERNAL;
+
+ return alloc;
+
+out3:
+ kfree(alloc->block_array);
+out2:
+ kfree(alloc);
+out1:
+ return UMP_DD_INVALID_MEMORY_HANDLE;
+}
+
+
+/*
+ * UMP v1 API
+ */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks)
+{
+ ump_dd_handle mem;
+ ump_dd_physical_block_64 *block_64_array;
+ ump_alloc_flags flags = UMP_V1_API_DEFAULT_ALLOCATION_FLAGS;
+ unsigned long i;
+
+ UMP_ASSERT(blocks);
+ UMP_ASSERT(num_blocks);
+
+ block_64_array = kzalloc(num_blocks * sizeof(*block_64_array), __GFP_HARDWALL | GFP_KERNEL);
+
+ if(block_64_array == NULL)
+ {
+ return UMP_DD_INVALID_MEMORY_HANDLE;
+ }
+
+ /* copy physical blocks */
+ for( i = 0; i < num_blocks; i++)
+ {
+ block_64_array[i].addr = blocks[i].addr;
+ block_64_array[i].size = blocks[i].size;
+ }
+
+ mem = ump_dd_create_from_phys_blocks_64(block_64_array, num_blocks, flags, NULL, NULL, NULL);
+
+ kfree(block_64_array);
+
+ return mem;
+
+}
diff --git a/drivers/base/ump/src/common/ump_kernel_core.h b/drivers/base/ump/src/common/ump_kernel_core.h
new file mode 100644
index 000000000000..e71372ab1749
--- /dev/null
+++ b/drivers/base/ump/src/common/ump_kernel_core.h
@@ -0,0 +1,228 @@
+/*
+ *
+ * (C) COPYRIGHT 2008-2013, 2017 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _UMP_KERNEL_CORE_H_
+#define _UMP_KERNEL_CORE_H_
+
+
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <asm/atomic.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/cred.h>
+#include <asm/mmu_context.h>
+
+#ifdef CONFIG_KDS
+#include <linux/kds.h>
+#endif
+#include <linux/ump-common.h>
+#include <common/ump_kernel_descriptor_mapping.h>
+
+/* forward decl */
+struct umpp_session;
+
+/**
+ * UMP handle metadata.
+ * Tracks various data about a handle not of any use to user space
+ */
+typedef enum
+{
+ UMP_MGMT_EXTERNAL = (1ul << 0) /**< Handle created via the ump_dd_create_from_phys_blocks interface */
+ /* (1ul << 31) not to be used */
+} umpp_management_flags;
+
+/**
+ * Structure tracking the single global UMP device.
+ * Holds global data like the ID map
+ */
+typedef struct umpp_device
+{
+ struct mutex secure_id_map_lock; /**< Lock protecting access to the map */
+ umpp_descriptor_mapping * secure_id_map; /**< Map of all known secure IDs on the system */
+} umpp_device;
+
+/**
+ * Structure tracking all memory allocations of a UMP allocation.
+ * Tracks info about an mapping so we can verify cache maintenace
+ * operations and help in the unmap cleanup.
+ */
+typedef struct umpp_cpu_mapping
+{
+ struct list_head link; /**< link to list of mappings for an allocation */
+ void *vaddr_start; /**< CPU VA start of the mapping */
+ size_t nr_pages; /**< Size (in pages) of the mapping */
+ uint64_t page_off; /**< Offset (in pages) from start of the allocation where the mapping starts */
+ ump_dd_handle handle; /**< Which handle this mapping is linked to */
+ struct umpp_session * session; /**< Which session created the mapping */
+} umpp_cpu_mapping;
+
+/**
+ * Structure tracking UMP allocation.
+ * Represent a memory allocation with its ID.
+ * Tracks all needed meta-data about an allocation.
+ * */
+typedef struct umpp_allocation
+{
+ ump_secure_id id; /**< Secure ID of the allocation */
+ atomic_t refcount; /**< Usage count */
+
+ ump_alloc_flags flags; /**< Flags for all supported devices */
+ uint32_t management_flags; /**< Managment flags tracking */
+
+ pid_t owner; /**< The process ID owning the memory if not sharable */
+
+ ump_dd_security_filter filter_func; /**< Hook to verify use, called during retains from new clients */
+ ump_dd_final_release_callback final_release_func; /**< Hook called when the last reference is removed */
+ void* callback_data; /**< Additional data given to release hook */
+
+ uint64_t size; /**< Size (in bytes) of the allocation */
+ uint64_t blocksCount; /**< Number of physsical blocks the allocation is built up of */
+ ump_dd_physical_block_64 * block_array; /**< Array, one entry per block, describing block start and length */
+
+ struct mutex map_list_lock; /**< Lock protecting the map_list */
+ struct list_head map_list; /**< Tracks all CPU VA mappings of this allocation */
+
+#ifdef CONFIG_KDS
+ struct kds_resource kds_res; /**< The KDS resource controlling access to this allocation */
+#endif
+
+ void * backendData; /**< Physical memory backend meta-data */
+} umpp_allocation;
+
+/**
+ * Structure tracking use of UMP memory by a session.
+ * Tracks the use of an allocation by a session so session termination can clean up any outstanding references.
+ * Also protects agains non-matched release calls from user space.
+ */
+typedef struct umpp_session_memory_usage
+{
+ ump_secure_id id; /**< ID being used. For quick look-up */
+ ump_dd_handle mem; /**< Handle being used. */
+
+ /**
+ * Track how many times has the process retained this handle in the kernel.
+ * This should usually just be 1(allocated or resolved) or 2(mapped),
+ * but could be more if someone is playing with the low-level API
+ * */
+ atomic_t process_usage_count;
+
+ struct list_head link; /**< link to other usage trackers for a session */
+} umpp_session_memory_usage;
+
+/**
+ * Structure representing a session/client.
+ * Tracks the UMP allocations being used by this client.
+ */
+typedef struct umpp_session
+{
+ struct mutex session_lock; /**< Lock for memory usage manipulation */
+ struct list_head memory_usage; /**< list of memory currently being used by the this session */
+ void* import_handler_data[UMPP_EXTERNAL_MEM_COUNT]; /**< Import modules per-session data pointer */
+} umpp_session;
+
+/**
+ * UMP core setup.
+ * Called by any OS specific startup function to initialize the common part.
+ * @return UMP_OK if core initialized correctly, any other value for errors
+ */
+ump_result umpp_core_constructor(void);
+
+/**
+ * UMP core teardown.
+ * Called by any OS specific unload function to clean up the common part.
+ */
+void umpp_core_destructor(void);
+
+/**
+ * UMP session start.
+ * Called by any OS specific session handler when a new session is detected
+ * @return Non-NULL if a matching core session could be set up. NULL on failure
+ */
+umpp_session *umpp_core_session_start(void);
+
+/**
+ * UMP session end.
+ * Called by any OS specific session handler when a session is ended/terminated.
+ * @param session The core session object returned by ump_core_session_start
+ */
+void umpp_core_session_end(umpp_session *session);
+
+/**
+ * Find a mapping object (if any) for this allocation.
+ * Called by any function needing to identify a mapping from a user virtual address.
+ * Verifies that the whole range to be within a mapping object.
+ * @param alloc The UMP allocation to find a matching mapping object of
+ * @param uaddr User mapping address to find the mapping object for
+ * @param size Length of the mapping
+ * @return NULL on error (no match found), pointer to mapping object if match found
+ */
+umpp_cpu_mapping * umpp_dd_find_enclosing_mapping(umpp_allocation * alloc, void* uaddr, size_t size);
+
+/**
+ * Register a new mapping of an allocation.
+ * Called by functions creating a new mapping of an allocation, typically OS specific handlers.
+ * @param alloc The allocation object which has been mapped
+ * @param map Info about the mapping
+ */
+void umpp_dd_add_cpu_mapping(umpp_allocation * alloc, umpp_cpu_mapping * map);
+
+/**
+ * Remove and free mapping object from an allocation.
+ * @param alloc The allocation object to remove the mapping info from
+ * @param target The mapping object to remove
+ */
+void umpp_dd_remove_cpu_mapping(umpp_allocation * alloc, umpp_cpu_mapping * target);
+
+/**
+ * Helper to find a block in the blockArray which holds a given byte offset.
+ * @param alloc The allocation object to find the block in
+ * @param offset Offset (in bytes) from allocation start to find the block of
+ * @param[out] block_index Pointer to the index of the block matching
+ * @param[out] block_internal_offset Offset within the returned block of the searched offset
+ * @return 0 if a matching block was found, any other value for error
+ */
+int umpp_dd_find_start_block(const umpp_allocation * alloc, uint64_t offset, uint64_t * const block_index, uint64_t * const block_internal_offset);
+
+/**
+ * Cache maintenance helper.
+ * Performs the requested cache operation on the given handle.
+ * @param mem Allocation handle
+ * @param op Cache maintenance operation to perform
+ * @param address User mapping at which to do the operation
+ * @param size Length (in bytes) of the range to do the operation on
+ */
+void umpp_dd_cpu_msync_now(ump_dd_handle mem, ump_cpu_msync_op op, void * address, size_t size);
+
+/**
+ * Import module session early init.
+ * Calls session_begin on all installed import modules.
+ * @param session The core session object to initialized the import handler for
+ * */
+void umpp_import_handlers_init(umpp_session * session);
+
+/**
+ * Import module session cleanup.
+ * Calls session_end on all import modules bound to the session.
+ * @param session The core session object to initialized the import handler for
+ */
+void umpp_import_handlers_term(umpp_session * session);
+
+#endif /* _UMP_KERNEL_CORE_H_ */
+
diff --git a/drivers/base/ump/src/common/ump_kernel_descriptor_mapping.c b/drivers/base/ump/src/common/ump_kernel_descriptor_mapping.c
new file mode 100644
index 000000000000..c5b0d7440081
--- /dev/null
+++ b/drivers/base/ump/src/common/ump_kernel_descriptor_mapping.c
@@ -0,0 +1,162 @@
+/*
+ *
+ * (C) COPYRIGHT 2008-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+
+#include <common/ump_kernel_descriptor_mapping.h>
+#include <common/ump_kernel_priv.h>
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static umpp_descriptor_table * descriptor_table_alloc(unsigned int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(umpp_descriptor_table * table);
+
+umpp_descriptor_mapping * umpp_descriptor_mapping_create(unsigned int init_entries, unsigned int max_entries)
+{
+ umpp_descriptor_mapping * map = kzalloc(sizeof(umpp_descriptor_mapping), GFP_KERNEL);
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map)
+ {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table)
+ {
+ init_rwsem( &map->lock);
+ set_bit(0, map->table->usage);
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+
+ descriptor_table_free(map->table);
+ }
+ kfree(map);
+ }
+ return NULL;
+}
+
+void umpp_descriptor_mapping_destroy(umpp_descriptor_mapping * map)
+{
+ UMP_ASSERT(NULL != map);
+ descriptor_table_free(map->table);
+ kfree(map);
+}
+
+unsigned int umpp_descriptor_mapping_allocate(umpp_descriptor_mapping * map, void * target)
+{
+ int descriptor = 0;
+ UMP_ASSERT(NULL != map);
+ down_write( &map->lock);
+ descriptor = find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (descriptor == map->current_nr_mappings)
+ {
+ /* no free descriptor, try to expand the table */
+ umpp_descriptor_table * new_table;
+ umpp_descriptor_table * old_table = map->table;
+ int nr_mappings_new = map->current_nr_mappings + BITS_PER_LONG;
+
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed)
+ {
+ descriptor = 0;
+ goto unlock_and_exit;
+ }
+
+ new_table = descriptor_table_alloc(nr_mappings_new);
+ if (NULL == new_table)
+ {
+ descriptor = 0;
+ goto unlock_and_exit;
+ }
+
+ memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+
+ map->table = new_table;
+ map->current_nr_mappings = nr_mappings_new;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ set_bit(descriptor, map->table->usage);
+ map->table->mappings[descriptor] = target;
+
+unlock_and_exit:
+ up_write(&map->lock);
+ return descriptor;
+}
+
+int umpp_descriptor_mapping_lookup(umpp_descriptor_mapping * map, unsigned int descriptor, void** const target)
+{
+ int result = -EINVAL;
+ UMP_ASSERT(map);
+ UMP_ASSERT(target);
+ down_read(&map->lock);
+ if ( (descriptor > 0) && (descriptor < map->current_nr_mappings) && test_bit(descriptor, map->table->usage) )
+ {
+ *target = map->table->mappings[descriptor];
+ result = 0;
+ }
+ /* keep target untouched if the descriptor was not found */
+ up_read(&map->lock);
+ return result;
+}
+
+void umpp_descriptor_mapping_remove(umpp_descriptor_mapping * map, unsigned int descriptor)
+{
+ UMP_ASSERT(map);
+ down_write(&map->lock);
+ if ( (descriptor > 0) && (descriptor < map->current_nr_mappings) && test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = NULL;
+ clear_bit(descriptor, map->table->usage);
+ }
+ up_write(&map->lock);
+}
+
+static umpp_descriptor_table * descriptor_table_alloc(unsigned int count)
+{
+ umpp_descriptor_table * table;
+
+ table = kzalloc(sizeof(umpp_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count), __GFP_HARDWALL | GFP_KERNEL );
+
+ if (NULL != table)
+ {
+ table->usage = (unsigned long*)((u8*)table + sizeof(umpp_descriptor_table));
+ table->mappings = (void**)((u8*)table + sizeof(umpp_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(umpp_descriptor_table * table)
+{
+ UMP_ASSERT(table);
+ kfree(table);
+}
+
diff --git a/drivers/base/ump/src/common/ump_kernel_descriptor_mapping.h b/drivers/base/ump/src/common/ump_kernel_descriptor_mapping.h
new file mode 100644
index 000000000000..d06c1455371a
--- /dev/null
+++ b/drivers/base/ump/src/common/ump_kernel_descriptor_mapping.h
@@ -0,0 +1,94 @@
+/*
+ *
+ * (C) COPYRIGHT 2008-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file ump_kernel_descriptor_mapping.h
+ */
+
+#ifndef _UMP_KERNEL_DESCRIPTOR_MAPPING_H_
+#define _UMP_KERNEL_DESCRIPTOR_MAPPING_H_
+
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct umpp_descriptor_table
+{
+ /* keep as a unsigned long to rely on the OS's bitops support */
+ unsigned long * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used(1) or not(0) */
+ void** mappings; /**< Array of the pointers the descriptors map to */
+} umpp_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct umpp_descriptor_mapping
+{
+ struct rw_semaphore lock; /**< Lock protecting access to the mapping object */
+ unsigned int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ unsigned int current_nr_mappings; /**< Current number of possible mappings */
+ umpp_descriptor_table * table; /**< Pointer to the current mapping table */
+} umpp_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object.
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries.
+ * ID 0 is reserved so the number of available entries will be max - 1.
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+umpp_descriptor_mapping * umpp_descriptor_mapping_create(unsigned int init_entries, unsigned int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param[in] map The map to free
+ */
+void umpp_descriptor_mapping_destroy(umpp_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param[in] map The map to allocate a new entry in
+ * @param[in] target The value to map to
+ * @return The descriptor allocated, ID 0 on failure.
+ */
+unsigned int umpp_descriptor_mapping_allocate(umpp_descriptor_mapping * map, void * target);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param[in] map The map to lookup the descriptor id in
+ * @param[in] descriptor The descriptor ID to lookup
+ * @param[out] target Pointer to a pointer which will receive the stored value
+ *
+ * @return 0 on success lookup, -EINVAL on lookup failure.
+ */
+int umpp_descriptor_mapping_lookup(umpp_descriptor_mapping * map, unsigned int descriptor, void** const target);
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param[in] map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ */
+void umpp_descriptor_mapping_remove(umpp_descriptor_mapping * map, unsigned int descriptor);
+
+#endif /* _UMP_KERNEL_DESCRIPTOR_MAPPING_H_ */
diff --git a/drivers/base/ump/src/common/ump_kernel_priv.h b/drivers/base/ump/src/common/ump_kernel_priv.h
new file mode 100644
index 000000000000..38b6f1b197fb
--- /dev/null
+++ b/drivers/base/ump/src/common/ump_kernel_priv.h
@@ -0,0 +1,80 @@
+/*
+ *
+ * (C) COPYRIGHT 2008-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _UMP_KERNEL_PRIV_H_
+#define _UMP_KERNEL_PRIV_H_
+
+#ifdef __KERNEL__
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <asm/cacheflush.h>
+#endif
+
+
+#define UMP_EXPECTED_IDS 64
+#define UMP_MAX_IDS 32768
+
+#ifdef __KERNEL__
+#define UMP_ASSERT(expr) \
+ if (!(expr)) { \
+ printk(KERN_ERR "UMP: Assertion failed! %s,%s,%s,line=%d\n",\
+ #expr,__FILE__,__func__,__LINE__); \
+ BUG(); \
+ }
+
+static inline void ump_sync_to_memory(uint64_t paddr, void* vaddr, size_t sz)
+{
+#ifdef CONFIG_ARM
+ __cpuc_flush_dcache_area(vaddr, sz);
+ outer_flush_range(paddr, paddr+sz);
+#elif defined(CONFIG_ARM64)
+ /*TODO (MID64-46): There's no other suitable cache flush function for ARM64 */
+ flush_cache_all();
+#elif defined(CONFIG_X86)
+ struct scatterlist scl = {0, };
+ sg_set_page(&scl, pfn_to_page(PFN_DOWN(paddr)), sz,
+ paddr & (PAGE_SIZE -1 ));
+ dma_sync_sg_for_cpu(NULL, &scl, 1, DMA_TO_DEVICE);
+ mb(); /* for outer_sync (if needed) */
+#else
+#error Implement cache maintenance for your architecture here
+#endif
+}
+
+static inline void ump_sync_to_cpu(uint64_t paddr, void* vaddr, size_t sz)
+{
+#ifdef CONFIG_ARM
+ __cpuc_flush_dcache_area(vaddr, sz);
+ outer_flush_range(paddr, paddr+sz);
+#elif defined(CONFIG_ARM64)
+ /* TODO (MID64-46): There's no other suitable cache flush function for ARM64 */
+ flush_cache_all();
+#elif defined(CONFIG_X86)
+ struct scatterlist scl = {0, };
+ sg_set_page(&scl, pfn_to_page(PFN_DOWN(paddr)), sz,
+ paddr & (PAGE_SIZE -1 ));
+ dma_sync_sg_for_cpu(NULL, &scl, 1, DMA_FROM_DEVICE);
+#else
+#error Implement cache maintenance for your architecture here
+#endif
+}
+#endif /* __KERNEL__*/
+#endif /* _UMP_KERNEL_PRIV_H_ */
+
diff --git a/drivers/base/ump/src/imports/ion/Makefile b/drivers/base/ump/src/imports/ion/Makefile
new file mode 100644
index 000000000000..ef74b273f7ad
--- /dev/null
+++ b/drivers/base/ump/src/imports/ion/Makefile
@@ -0,0 +1,53 @@
+#
+# (C) COPYRIGHT 2011, 2013 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+#
+
+
+
+
+# default to building for the host
+ARCH ?= $(shell uname -m)
+
+# linux build system integration
+
+ifneq ($(KERNELRELEASE),)
+# Inside the kernel build system
+
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD) -I$(KBUILD_EXTMOD)/../../../../..
+KBUILD_EXTRA_SYMBOLS += "$(KBUILD_EXTMOD)/../../Module.symvers"
+
+SRC += ump_kernel_import_ion.c
+
+MODULE:=ump_ion_import.ko
+
+obj-m := $(MODULE:.ko=.o)
+$(MODULE:.ko=-y) := $(SRC:.c=.o)
+$(MODULE:.ko=-objs) := $(SRC:.c=.o)
+
+else
+# Outside the kernel build system
+#
+#
+
+ifeq ($(KDIR),)
+$(error Must specify KDIR to point to the kernel to target))
+endif
+
+all:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR)
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
+endif
+
diff --git a/drivers/base/ump/src/imports/ion/ump_kernel_import_ion.c b/drivers/base/ump/src/imports/ion/ump_kernel_import_ion.c
new file mode 100644
index 000000000000..12b2e325b45e
--- /dev/null
+++ b/drivers/base/ump/src/imports/ion/ump_kernel_import_ion.c
@@ -0,0 +1,204 @@
+/*
+ *
+ * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <linux/ump.h>
+#include <linux/dma-mapping.h>
+#include "ion.h"
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+struct ion_wrapping_info
+{
+ struct ion_client * ion_client;
+ struct ion_handle * ion_handle;
+ int num_phys_blocks;
+ struct scatterlist * sglist;
+};
+
+static struct ion_device * ion_device_get(void)
+{
+ /* < Customer to provide implementation >
+ * Return a pointer to the global ion_device on the system
+ */
+ return NULL;
+}
+
+static int import_ion_client_create(void** const custom_session_data)
+{
+ struct ion_client ** ion_client;
+
+ ion_client = (struct ion_client**)custom_session_data;
+
+ *ion_client = ion_client_create(ion_device_get(), "ump");
+
+ return PTR_RET(*ion_client);
+}
+
+
+static void import_ion_client_destroy(void* custom_session_data)
+{
+ struct ion_client * ion_client;
+
+ ion_client = (struct ion_client*)custom_session_data;
+ BUG_ON(!ion_client);
+
+ ion_client_destroy(ion_client);
+}
+
+
+static void import_ion_final_release_callback(const ump_dd_handle handle, void * info)
+{
+ struct ion_wrapping_info * ion_info;
+
+ BUG_ON(!info);
+
+ (void)handle;
+ ion_info = (struct ion_wrapping_info*)info;
+
+ dma_unmap_sg(NULL, ion_info->sglist, ion_info->num_phys_blocks, DMA_BIDIRECTIONAL);
+
+ ion_free(ion_info->ion_client, ion_info->ion_handle);
+ kfree(ion_info);
+ module_put(THIS_MODULE);
+}
+
+static ump_dd_handle import_ion_import(void * custom_session_data, void * pfd, ump_alloc_flags flags)
+{
+ int fd;
+ ump_dd_handle ump_handle;
+ struct scatterlist * sg;
+ int num_dma_blocks;
+ ump_dd_physical_block_64 * phys_blocks;
+ unsigned long i;
+ struct sg_table * sgt;
+
+ struct ion_wrapping_info * ion_info;
+
+ BUG_ON(!custom_session_data);
+ BUG_ON(!pfd);
+
+ ion_info = kzalloc(GFP_KERNEL, sizeof(*ion_info));
+ if (NULL == ion_info)
+ {
+ return UMP_DD_INVALID_MEMORY_HANDLE;
+ }
+
+ ion_info->ion_client = (struct ion_client*)custom_session_data;
+
+ if (get_user(fd, (int*)pfd))
+ {
+ goto out;
+ }
+
+ ion_info->ion_handle = ion_import_dma_buf(ion_info->ion_client, fd);
+
+ if (IS_ERR_OR_NULL(ion_info->ion_handle))
+ {
+ goto out;
+ }
+
+ sgt = ion_sg_table(ion_info->ion_client, ion_info->ion_handle);
+ if (IS_ERR_OR_NULL(sgt))
+ {
+ goto ion_dma_map_failed;
+ }
+
+ ion_info->sglist = sgt->sgl;
+
+ sg = ion_info->sglist;
+ while (sg)
+ {
+ ion_info->num_phys_blocks++;
+ sg = sg_next(sg);
+ }
+
+ num_dma_blocks = dma_map_sg(NULL, ion_info->sglist, ion_info->num_phys_blocks, DMA_BIDIRECTIONAL);
+
+ if (0 == num_dma_blocks)
+ {
+ goto linux_dma_map_failed;
+ }
+
+ phys_blocks = vmalloc(num_dma_blocks * sizeof(*phys_blocks));
+ if (NULL == phys_blocks)
+ {
+ goto vmalloc_failed;
+ }
+
+ for_each_sg(ion_info->sglist, sg, num_dma_blocks, i)
+ {
+ phys_blocks[i].addr = sg_phys(sg);
+ phys_blocks[i].size = sg_dma_len(sg);
+ }
+
+ ump_handle = ump_dd_create_from_phys_blocks_64(phys_blocks, num_dma_blocks, flags, NULL, import_ion_final_release_callback, ion_info);
+
+ vfree(phys_blocks);
+
+ if (ump_handle != UMP_DD_INVALID_MEMORY_HANDLE)
+ {
+ /*
+ * As we have a final release callback installed
+ * we must keep the module locked until
+ * the callback has been triggered
+ * */
+ __module_get(THIS_MODULE);
+ return ump_handle;
+ }
+
+ /* failed*/
+vmalloc_failed:
+ dma_unmap_sg(NULL, ion_info->sglist, ion_info->num_phys_blocks, DMA_BIDIRECTIONAL);
+linux_dma_map_failed:
+ion_dma_map_failed:
+ ion_free(ion_info->ion_client, ion_info->ion_handle);
+out:
+ kfree(ion_info);
+ return UMP_DD_INVALID_MEMORY_HANDLE;
+}
+
+struct ump_import_handler import_handler_ion =
+{
+ .linux_module = THIS_MODULE,
+ .session_begin = import_ion_client_create,
+ .session_end = import_ion_client_destroy,
+ .import = import_ion_import
+};
+
+static int __init import_ion_initialize_module(void)
+{
+ /* register with UMP */
+ return ump_import_module_register(UMP_EXTERNAL_MEM_TYPE_ION, &import_handler_ion);
+}
+
+static void __exit import_ion_cleanup_module(void)
+{
+ /* unregister import handler */
+ ump_import_module_unregister(UMP_EXTERNAL_MEM_TYPE_ION);
+}
+
+/* Setup init and exit functions for this module */
+module_init(import_ion_initialize_module);
+module_exit(import_ion_cleanup_module);
+
+/* And some module information */
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION("1.0");
diff --git a/drivers/base/ump/src/linux/ump_kernel_linux.c b/drivers/base/ump/src/linux/ump_kernel_linux.c
new file mode 100644
index 000000000000..d6c3c5354907
--- /dev/null
+++ b/drivers/base/ump/src/linux/ump_kernel_linux.c
@@ -0,0 +1,831 @@
+/*
+ *
+ * (C) COPYRIGHT 2008-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <linux/ump-ioctl.h>
+#include <linux/ump.h>
+
+#include <asm/uaccess.h> /* copy_*_user */
+#include <linux/compat.h>
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/ioport.h> /* request_mem_region */
+#include <linux/device.h> /* class registration support */
+
+#include <common/ump_kernel_core.h>
+
+#include "ump_kernel_linux_mem.h"
+#include <ump_arch.h>
+
+
+struct ump_linux_device
+{
+ struct cdev cdev;
+ struct class * ump_class;
+};
+
+/* Name of the UMP device driver */
+static char ump_dev_name[] = "ump"; /* should be const, but the functions we call requires non-cost */
+
+/* Module parameter to control log level */
+int ump_debug_level = 2;
+module_param(ump_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(ump_debug_level, "Higher number, more dmesg output");
+
+/* By default the module uses any available major, but it's possible to set it at load time to a specific number */
+int ump_major = 0;
+module_param(ump_major, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_major, "Device major number");
+
+#define UMP_REV_STRING "1.0"
+
+char * ump_revision = UMP_REV_STRING;
+module_param(ump_revision, charp, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_revision, "Revision info");
+
+static int umpp_linux_open(struct inode *inode, struct file *filp);
+static int umpp_linux_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long umpp_linux_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int umpp_linux_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+
+/* This variable defines the file operations this UMP device driver offers */
+static struct file_operations ump_fops =
+{
+ .owner = THIS_MODULE,
+ .open = umpp_linux_open,
+ .release = umpp_linux_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = umpp_linux_ioctl,
+#else
+ .ioctl = umpp_linux_ioctl,
+#endif
+ .compat_ioctl = umpp_linux_ioctl,
+ .mmap = umpp_linux_mmap
+};
+
+/* import module handling */
+DEFINE_MUTEX(import_list_lock);
+struct ump_import_handler * import_handlers[UMPP_EXTERNAL_MEM_COUNT];
+
+/* The global variable containing the global device data */
+static struct ump_linux_device ump_linux_device;
+
+#define DBG_MSG(level, ...) do { \
+if ((level) <= ump_debug_level)\
+{\
+printk(KERN_DEBUG "UMP<" #level ">:\n" __VA_ARGS__);\
+} \
+} while (0)
+
+#define MSG_ERR(...) do{ \
+printk(KERN_ERR "UMP: ERR: %s\n %s()%4d\n", __FILE__, __func__ , __LINE__) ; \
+printk(KERN_ERR __VA_ARGS__); \
+printk(KERN_ERR "\n"); \
+} while(0)
+
+#define MSG(...) do{ \
+printk(KERN_INFO "UMP: " __VA_ARGS__);\
+} while (0)
+
+/*
+ * This function is called by Linux to initialize this module.
+ * All we do is initialize the UMP device driver.
+ */
+static int __init umpp_linux_initialize_module(void)
+{
+ ump_result err;
+
+ err = umpp_core_constructor();
+ if (UMP_OK != err)
+ {
+ MSG_ERR("UMP device driver init failed\n");
+ return -ENOTTY;
+ }
+
+ MSG("UMP device driver %s loaded\n", UMP_REV_STRING);
+ return 0;
+}
+
+
+
+/*
+ * This function is called by Linux to unload/terminate/exit/cleanup this module.
+ * All we do is terminate the UMP device driver.
+ */
+static void __exit umpp_linux_cleanup_module(void)
+{
+ DBG_MSG(2, "Unloading UMP device driver\n");
+ umpp_core_destructor();
+ DBG_MSG(2, "Module unloaded\n");
+}
+
+
+
+/*
+ * Initialize the UMP device driver.
+ */
+ump_result umpp_device_initialize(void)
+{
+ int err;
+ dev_t dev = 0;
+
+ if (0 == ump_major)
+ {
+ /* auto select a major */
+ err = alloc_chrdev_region(&dev, 0, 1, ump_dev_name);
+ ump_major = MAJOR(dev);
+ }
+ else
+ {
+ /* use load time defined major number */
+ dev = MKDEV(ump_major, 0);
+ err = register_chrdev_region(dev, 1, ump_dev_name);
+ }
+
+ if (0 == err)
+ {
+ memset(&ump_linux_device, 0, sizeof(ump_linux_device));
+
+ /* initialize our char dev data */
+ cdev_init(&ump_linux_device.cdev, &ump_fops);
+ ump_linux_device.cdev.owner = THIS_MODULE;
+ ump_linux_device.cdev.ops = &ump_fops;
+
+ /* register char dev with the kernel */
+ err = cdev_add(&ump_linux_device.cdev, dev, 1/*count*/);
+ if (0 == err)
+ {
+
+ ump_linux_device.ump_class = class_create(THIS_MODULE, ump_dev_name);
+ if (IS_ERR(ump_linux_device.ump_class))
+ {
+ err = PTR_ERR(ump_linux_device.ump_class);
+ }
+ else
+ {
+ struct device * mdev;
+ mdev = device_create(ump_linux_device.ump_class, NULL, dev, NULL, ump_dev_name);
+ if (!IS_ERR(mdev))
+ {
+ return UMP_OK;
+ }
+
+ err = PTR_ERR(mdev);
+ class_destroy(ump_linux_device.ump_class);
+ }
+ cdev_del(&ump_linux_device.cdev);
+ }
+
+ unregister_chrdev_region(dev, 1);
+ }
+
+ return UMP_ERROR;
+}
+
+
+
+/*
+ * Terminate the UMP device driver
+ */
+void umpp_device_terminate(void)
+{
+ dev_t dev = MKDEV(ump_major, 0);
+
+ device_destroy(ump_linux_device.ump_class, dev);
+ class_destroy(ump_linux_device.ump_class);
+
+ /* unregister char device */
+ cdev_del(&ump_linux_device.cdev);
+
+ /* free major */
+ unregister_chrdev_region(dev, 1);
+}
+
+
+static int umpp_linux_open(struct inode *inode, struct file *filp)
+{
+ umpp_session *session;
+
+ session = umpp_core_session_start();
+ if (NULL == session)
+ {
+ return -EFAULT;
+ }
+
+ filp->private_data = session;
+
+ return 0;
+}
+
+static int umpp_linux_release(struct inode *inode, struct file *filp)
+{
+ umpp_session *session;
+
+ session = filp->private_data;
+
+ umpp_core_session_end(session);
+
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+/**************************/
+/*ioctl specific functions*/
+/**************************/
+static int do_ump_dd_allocate(umpp_session * session, ump_k_allocate * params)
+{
+ ump_dd_handle new_allocation;
+ new_allocation = ump_dd_allocate_64(params->size, params->alloc_flags, NULL, NULL, NULL);
+
+ if (UMP_DD_INVALID_MEMORY_HANDLE != new_allocation)
+ {
+ umpp_session_memory_usage * tracker;
+
+ tracker = kmalloc(sizeof(*tracker), GFP_KERNEL | __GFP_HARDWALL);
+ if (NULL != tracker)
+ {
+ /* update the return struct with the new ID */
+ params->secure_id = ump_dd_secure_id_get(new_allocation);
+
+ tracker->mem = new_allocation;
+ tracker->id = params->secure_id;
+ atomic_set(&tracker->process_usage_count, 1);
+
+ /* link it into the session in-use list */
+ mutex_lock(&session->session_lock);
+ list_add(&tracker->link, &session->memory_usage);
+ mutex_unlock(&session->session_lock);
+
+ return 0;
+ }
+ ump_dd_release(new_allocation);
+ }
+
+ printk(KERN_WARNING "UMP: Allocation FAILED\n");
+ return -ENOMEM;
+}
+
+static int do_ump_dd_retain(umpp_session * session, ump_k_retain * params)
+{
+ umpp_session_memory_usage * it;
+
+ mutex_lock(&session->session_lock);
+
+ /* try to find it on the session usage list */
+ list_for_each_entry(it, &session->memory_usage, link)
+ {
+ if (it->id == params->secure_id)
+ {
+ /* found to already be in use */
+ /* check for overflow */
+ while(1)
+ {
+ int refcnt = atomic_read(&it->process_usage_count);
+ if (refcnt + 1 > 0)
+ {
+ /* add a process local ref */
+ if(atomic_cmpxchg(&it->process_usage_count, refcnt, refcnt + 1) == refcnt)
+ {
+ mutex_unlock(&session->session_lock);
+ return 0;
+ }
+ }
+ else
+ {
+ /* maximum usage cap reached */
+ mutex_unlock(&session->session_lock);
+ return -EBUSY;
+ }
+ }
+ }
+ }
+ /* try to look it up globally */
+
+ it = kmalloc(sizeof(*it), GFP_KERNEL);
+
+ if (NULL != it)
+ {
+ it->mem = ump_dd_from_secure_id(params->secure_id);
+ if (UMP_DD_INVALID_MEMORY_HANDLE != it->mem)
+ {
+ /* found, add it to the session usage list */
+ it->id = params->secure_id;
+ atomic_set(&it->process_usage_count, 1);
+ list_add(&it->link, &session->memory_usage);
+ }
+ else
+ {
+ /* not found */
+ kfree(it);
+ it = NULL;
+ }
+ }
+
+ mutex_unlock(&session->session_lock);
+
+ return (NULL != it) ? 0 : -ENODEV;
+}
+
+
+static int do_ump_dd_release(umpp_session * session, ump_k_release * params)
+{
+ umpp_session_memory_usage * it;
+ int result = -ENODEV;
+
+ mutex_lock(&session->session_lock);
+
+ /* only do a release if found on the session list */
+ list_for_each_entry(it, &session->memory_usage, link)
+ {
+ if (it->id == params->secure_id)
+ {
+ /* found, a valid call */
+ result = 0;
+
+ if (0 == atomic_sub_return(1, &it->process_usage_count))
+ {
+ /* last ref in this process remove from the usage list and remove the underlying ref */
+ list_del(&it->link);
+ ump_dd_release(it->mem);
+ kfree(it);
+ }
+
+ break;
+ }
+ }
+ mutex_unlock(&session->session_lock);
+
+ return result;
+}
+
+static int do_ump_dd_sizequery(umpp_session * session, ump_k_sizequery * params)
+{
+ umpp_session_memory_usage * it;
+ int result = -ENODEV;
+
+ mutex_lock(&session->session_lock);
+
+ /* only valid if found on the session list */
+ list_for_each_entry(it, &session->memory_usage, link)
+ {
+ if (it->id == params->secure_id)
+ {
+ /* found, a valid call */
+ params->size = ump_dd_size_get_64(it->mem);
+ result = 0;
+ break;
+ }
+
+ }
+ mutex_unlock(&session->session_lock);
+
+ return result;
+}
+
+static int do_ump_dd_allocation_flags_get(umpp_session * session, ump_k_allocation_flags * params)
+{
+ umpp_session_memory_usage * it;
+ int result = -ENODEV;
+
+ mutex_lock(&session->session_lock);
+
+ /* only valid if found on the session list */
+ list_for_each_entry(it, &session->memory_usage, link)
+ {
+ if (it->id == params->secure_id)
+ {
+ /* found, a valid call */
+ params->alloc_flags = ump_dd_allocation_flags_get(it->mem);
+ result = 0;
+ break;
+ }
+
+ }
+ mutex_unlock(&session->session_lock);
+
+ return result;
+}
+
+static int do_ump_dd_msync_now(umpp_session * session, ump_k_msync * params)
+{
+ umpp_session_memory_usage * it;
+ int result = -ENODEV;
+
+ mutex_lock(&session->session_lock);
+
+ /* only valid if found on the session list */
+ list_for_each_entry(it, &session->memory_usage, link)
+ {
+ if (it->id == params->secure_id)
+ {
+ /* found, do the cache op */
+#ifdef CONFIG_COMPAT
+ if (is_compat_task())
+ {
+ umpp_dd_cpu_msync_now(it->mem, params->cache_operation, compat_ptr(params->mapped_ptr.compat_value), params->size);
+ result = 0;
+ }
+ else
+ {
+#endif
+ umpp_dd_cpu_msync_now(it->mem, params->cache_operation, params->mapped_ptr.value, params->size);
+ result = 0;
+#ifdef CONFIG_COMPAT
+ }
+#endif
+ break;
+ }
+ }
+ mutex_unlock(&session->session_lock);
+
+ return result;
+}
+
+
+void umpp_import_handlers_init(umpp_session * session)
+{
+ int i;
+ mutex_lock(&import_list_lock);
+ for ( i = 1; i < UMPP_EXTERNAL_MEM_COUNT; i++ )
+ {
+ if (import_handlers[i])
+ {
+ import_handlers[i]->session_begin(&session->import_handler_data[i]);
+ /* It is OK if session_begin returned an error.
+ * We won't do any import calls if so */
+ }
+ }
+ mutex_unlock(&import_list_lock);
+}
+
+void umpp_import_handlers_term(umpp_session * session)
+{
+ int i;
+ mutex_lock(&import_list_lock);
+ for ( i = 1; i < UMPP_EXTERNAL_MEM_COUNT; i++ )
+ {
+ /* only call if session_begin succeeded */
+ if (session->import_handler_data[i] != NULL)
+ {
+ /* if session_beging succeeded the handler
+ * should not have unregistered with us */
+ BUG_ON(!import_handlers[i]);
+ import_handlers[i]->session_end(session->import_handler_data[i]);
+ session->import_handler_data[i] = NULL;
+ }
+ }
+ mutex_unlock(&import_list_lock);
+}
+
+int ump_import_module_register(enum ump_external_memory_type type, struct ump_import_handler * handler)
+{
+ int res = -EEXIST;
+
+ /* validate input */
+ BUG_ON(type == 0 || type >= UMPP_EXTERNAL_MEM_COUNT);
+ BUG_ON(!handler);
+ BUG_ON(!handler->linux_module);
+ BUG_ON(!handler->session_begin);
+ BUG_ON(!handler->session_end);
+ BUG_ON(!handler->import);
+
+ mutex_lock(&import_list_lock);
+
+ if (!import_handlers[type])
+ {
+ import_handlers[type] = handler;
+ res = 0;
+ }
+
+ mutex_unlock(&import_list_lock);
+
+ return res;
+}
+
+void ump_import_module_unregister(enum ump_external_memory_type type)
+{
+ BUG_ON(type == 0 || type >= UMPP_EXTERNAL_MEM_COUNT);
+
+ mutex_lock(&import_list_lock);
+ /* an error to call this if ump_import_module_register didn't succeed */
+ BUG_ON(!import_handlers[type]);
+ import_handlers[type] = NULL;
+ mutex_unlock(&import_list_lock);
+}
+
+static struct ump_import_handler * import_handler_get(unsigned int type_id)
+{
+ enum ump_external_memory_type type;
+ struct ump_import_handler * handler;
+
+ /* validate and convert input */
+ /* handle bad data here, not just BUG_ON */
+ if (type_id == 0 || type_id >= UMPP_EXTERNAL_MEM_COUNT)
+ return NULL;
+
+ type = (enum ump_external_memory_type)type_id;
+
+ /* find the handler */
+ mutex_lock(&import_list_lock);
+
+ handler = import_handlers[type];
+
+ if (handler)
+ {
+ if (!try_module_get(handler->linux_module))
+ {
+ handler = NULL;
+ }
+ }
+
+ mutex_unlock(&import_list_lock);
+
+ return handler;
+}
+
+static void import_handler_put(struct ump_import_handler * handler)
+{
+ module_put(handler->linux_module);
+}
+
+static int do_ump_dd_import(umpp_session * session, ump_k_import * params)
+{
+ ump_dd_handle new_allocation = UMP_DD_INVALID_MEMORY_HANDLE;
+ struct ump_import_handler * handler;
+
+ handler = import_handler_get(params->type);
+
+ if (handler)
+ {
+ /* try late binding if not already bound */
+ if (!session->import_handler_data[params->type])
+ {
+ handler->session_begin(&session->import_handler_data[params->type]);
+ }
+
+ /* do we have a bound session? */
+ if (session->import_handler_data[params->type])
+ {
+ new_allocation = handler->import( session->import_handler_data[params->type],
+ params->phandle.value,
+ params->alloc_flags);
+ }
+
+ /* done with the handler */
+ import_handler_put(handler);
+ }
+
+ /* did the import succeed? */
+ if (UMP_DD_INVALID_MEMORY_HANDLE != new_allocation)
+ {
+ umpp_session_memory_usage * tracker;
+
+ tracker = kmalloc(sizeof(*tracker), GFP_KERNEL | __GFP_HARDWALL);
+ if (NULL != tracker)
+ {
+ /* update the return struct with the new ID */
+ params->secure_id = ump_dd_secure_id_get(new_allocation);
+
+ tracker->mem = new_allocation;
+ tracker->id = params->secure_id;
+ atomic_set(&tracker->process_usage_count, 1);
+
+ /* link it into the session in-use list */
+ mutex_lock(&session->session_lock);
+ list_add(&tracker->link, &session->memory_usage);
+ mutex_unlock(&session->session_lock);
+
+ return 0;
+ }
+ ump_dd_release(new_allocation);
+ }
+
+ return -ENOMEM;
+
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long umpp_linux_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int umpp_linux_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int ret;
+ uint64_t msg[(UMP_CALL_MAX_SIZE+7)>>3]; /* alignment fixup */
+ uint32_t size = _IOC_SIZE(cmd);
+ struct umpp_session *session = filp->private_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ (void)inode; /* unused arg */
+#endif
+
+ /*
+ * extract the type and number bitfields, and don't decode
+ * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()
+ */
+ if (_IOC_TYPE(cmd) != UMP_IOC_MAGIC)
+ {
+ return -ENOTTY;
+
+ }
+ if (_IOC_NR(cmd) > UMP_IOC_MAXNR)
+ {
+ return -ENOTTY;
+ }
+
+ switch(cmd)
+ {
+ case UMP_FUNC_ALLOCATE:
+ if (size != sizeof(ump_k_allocate))
+ {
+ return -ENOTTY;
+ }
+ if (copy_from_user(&msg, (void __user *)arg, size))
+ {
+ return -EFAULT;
+ }
+ ret = do_ump_dd_allocate(session, (ump_k_allocate *)&msg);
+ if (ret)
+ {
+ return ret;
+ }
+ if (copy_to_user((void *)arg, &msg, size))
+ {
+ return -EFAULT;
+ }
+ return 0;
+ case UMP_FUNC_SIZEQUERY:
+ if (size != sizeof(ump_k_sizequery))
+ {
+ return -ENOTTY;
+ }
+ if (copy_from_user(&msg, (void __user *)arg, size))
+ {
+ return -EFAULT;
+ }
+ ret = do_ump_dd_sizequery(session,(ump_k_sizequery*) &msg);
+ if (ret)
+ {
+ return ret;
+ }
+ if (copy_to_user((void *)arg, &msg, size))
+ {
+ return -EFAULT;
+ }
+ return 0;
+ case UMP_FUNC_MSYNC:
+ if (size != sizeof(ump_k_msync))
+ {
+ return -ENOTTY;
+ }
+ if (copy_from_user(&msg, (void __user *)arg, size))
+ {
+ return -EFAULT;
+ }
+ ret = do_ump_dd_msync_now(session,(ump_k_msync*) &msg);
+ if (ret)
+ {
+ return ret;
+ }
+ if (copy_to_user((void *)arg, &msg, size))
+ {
+ return -EFAULT;
+ }
+ return 0;
+ case UMP_FUNC_IMPORT:
+ if (size != sizeof(ump_k_import))
+ {
+ return -ENOTTY;
+ }
+ if (copy_from_user(&msg, (void __user*)arg, size))
+ {
+ return -EFAULT;
+ }
+ ret = do_ump_dd_import(session, (ump_k_import*) &msg);
+ if (ret)
+ {
+ return ret;
+ }
+ if (copy_to_user((void *)arg, &msg, size))
+ {
+ return -EFAULT;
+ }
+ return 0;
+ /* used only by v1 API */
+ case UMP_FUNC_ALLOCATION_FLAGS_GET:
+ if (size != sizeof(ump_k_allocation_flags))
+ {
+ return -ENOTTY;
+ }
+ if (copy_from_user(&msg, (void __user *)arg, size))
+ {
+ return -EFAULT;
+ }
+ ret = do_ump_dd_allocation_flags_get(session,(ump_k_allocation_flags*) &msg);
+ if (ret)
+ {
+ return ret;
+ }
+ if (copy_to_user((void *)arg, &msg, size))
+ {
+ return -EFAULT;
+ }
+ return 0;
+ case UMP_FUNC_RETAIN:
+ if (size != sizeof(ump_k_retain))
+ {
+ return -ENOTTY;
+ }
+ if (copy_from_user(&msg, (void __user *)arg, size))
+ {
+ return -EFAULT;
+ }
+ ret = do_ump_dd_retain(session,(ump_k_retain*) &msg);
+ if (ret)
+ {
+ return ret;
+ }
+ return 0;
+ case UMP_FUNC_RELEASE:
+ if (size != sizeof(ump_k_release))
+ {
+ return -ENOTTY;
+ }
+ if (copy_from_user(&msg, (void __user *)arg, size))
+ {
+ return -EFAULT;
+ }
+ ret = do_ump_dd_release(session,(ump_k_release*) &msg);
+ if (ret)
+ {
+ return ret;
+ }
+ return 0;
+ default:
+ /* not ours */
+ return -ENOTTY;
+ }
+ /*redundant below*/
+ return -ENOTTY;
+}
+
+
+/* Export UMP kernel space API functions */
+EXPORT_SYMBOL(ump_dd_allocate_64);
+EXPORT_SYMBOL(ump_dd_allocation_flags_get);
+EXPORT_SYMBOL(ump_dd_secure_id_get);
+EXPORT_SYMBOL(ump_dd_from_secure_id);
+EXPORT_SYMBOL(ump_dd_phys_blocks_get_64);
+EXPORT_SYMBOL(ump_dd_size_get_64);
+EXPORT_SYMBOL(ump_dd_retain);
+EXPORT_SYMBOL(ump_dd_release);
+EXPORT_SYMBOL(ump_dd_create_from_phys_blocks_64);
+#ifdef CONFIG_KDS
+EXPORT_SYMBOL(ump_dd_kds_resource_get);
+#endif
+
+/* import API */
+EXPORT_SYMBOL(ump_import_module_register);
+EXPORT_SYMBOL(ump_import_module_unregister);
+
+
+
+/* V1 API */
+EXPORT_SYMBOL(ump_dd_handle_create_from_secure_id);
+EXPORT_SYMBOL(ump_dd_phys_block_count_get);
+EXPORT_SYMBOL(ump_dd_phys_block_get);
+EXPORT_SYMBOL(ump_dd_phys_blocks_get);
+EXPORT_SYMBOL(ump_dd_size_get);
+EXPORT_SYMBOL(ump_dd_reference_add);
+EXPORT_SYMBOL(ump_dd_reference_release);
+EXPORT_SYMBOL(ump_dd_handle_create_from_phys_blocks);
+
+
+/* Setup init and exit functions for this module */
+module_init(umpp_linux_initialize_module);
+module_exit(umpp_linux_cleanup_module);
+
+/* And some module informatio */
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(UMP_REV_STRING);
diff --git a/drivers/base/ump/src/linux/ump_kernel_linux_mem.c b/drivers/base/ump/src/linux/ump_kernel_linux_mem.c
new file mode 100644
index 000000000000..9186dd031c04
--- /dev/null
+++ b/drivers/base/ump/src/linux/ump_kernel_linux_mem.c
@@ -0,0 +1,250 @@
+/*
+ *
+ * (C) COPYRIGHT 2008-2013, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#include <linux/ump.h>
+#include <linux/ump-ioctl.h>
+
+#include <linux/version.h>
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/ioport.h> /* request_mem_region */
+#include <linux/mm.h> /* memory mananger definitions */
+#include <linux/pfn.h>
+#include <linux/highmem.h> /*kmap*/
+
+#include <linux/compat.h> /* is_compat_task */
+
+#include <common/ump_kernel_core.h>
+#include <ump_arch.h>
+#include <common/ump_kernel_priv.h>
+
+static void umpp_vm_close(struct vm_area_struct *vma)
+{
+ umpp_cpu_mapping * mapping;
+ umpp_session * session;
+ ump_dd_handle handle;
+
+ mapping = (umpp_cpu_mapping*)vma->vm_private_data;
+ UMP_ASSERT(mapping);
+
+ session = mapping->session;
+ handle = mapping->handle;
+
+ umpp_dd_remove_cpu_mapping(mapping->handle, mapping); /* will free the mapping object */
+ ump_dd_release(handle);
+}
+
+
+static const struct vm_operations_struct umpp_vm_ops = {
+ .close = umpp_vm_close
+};
+
+int umpp_phys_commit(umpp_allocation * alloc)
+{
+ uint64_t i;
+
+ /* round up to a page boundary */
+ alloc->size = (alloc->size + PAGE_SIZE - 1) & ~((uint64_t)PAGE_SIZE-1) ;
+ /* calculate number of pages */
+ alloc->blocksCount = alloc->size >> PAGE_SHIFT;
+
+ if( (sizeof(ump_dd_physical_block_64) * alloc->blocksCount) > ((size_t)-1))
+ {
+ printk(KERN_WARNING "UMP: umpp_phys_commit - trying to allocate more than possible\n");
+ return -ENOMEM;
+ }
+
+ alloc->block_array = kmalloc(sizeof(ump_dd_physical_block_64) * alloc->blocksCount, __GFP_HARDWALL | GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+ if (NULL == alloc->block_array)
+ {
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < alloc->blocksCount; i++)
+ {
+ void * mp;
+ struct page * page = alloc_page(GFP_HIGHUSER | __GFP_NORETRY | __GFP_NOWARN | __GFP_COLD);
+ if (NULL == page)
+ {
+ break;
+ }
+
+ alloc->block_array[i].addr = PFN_PHYS(page_to_pfn(page));
+ alloc->block_array[i].size = PAGE_SIZE;
+
+ mp = kmap(page);
+ if (NULL == mp)
+ {
+ __free_page(page);
+ break;
+ }
+
+ memset(mp, 0x00, PAGE_SIZE); /* instead of __GFP_ZERO, so we can do cache maintenance */
+ ump_sync_to_memory(PFN_PHYS(page_to_pfn(page)), mp, PAGE_SIZE);
+ kunmap(page);
+ }
+
+ if (i == alloc->blocksCount)
+ {
+ return 0;
+ }
+ else
+ {
+ uint64_t j;
+ for (j = 0; j < i; j++)
+ {
+ struct page * page;
+ page = pfn_to_page(alloc->block_array[j].addr >> PAGE_SHIFT);
+ __free_page(page);
+ }
+
+ kfree(alloc->block_array);
+
+ return -ENOMEM;
+ }
+}
+
+void umpp_phys_free(umpp_allocation * alloc)
+{
+ uint64_t i;
+
+ for (i = 0; i < alloc->blocksCount; i++)
+ {
+ __free_page(pfn_to_page(alloc->block_array[i].addr >> PAGE_SHIFT));
+ }
+
+ kfree(alloc->block_array);
+}
+
+int umpp_linux_mmap(struct file * filp, struct vm_area_struct * vma)
+{
+ ump_secure_id id;
+ ump_dd_handle h;
+ size_t offset;
+ int err = -EINVAL;
+ size_t length = vma->vm_end - vma->vm_start;
+
+ umpp_cpu_mapping * map = NULL;
+ umpp_session *session = filp->private_data;
+
+ if ( 0 == length )
+ {
+ return -EINVAL;
+ }
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (NULL == map)
+ {
+ WARN_ON(1);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* unpack our arg */
+#if defined CONFIG_64BIT && CONFIG_64BIT
+ if (is_compat_task())
+ {
+#endif
+ id = vma->vm_pgoff >> UMP_LINUX_OFFSET_BITS_32;
+ offset = vma->vm_pgoff & UMP_LINUX_OFFSET_MASK_32;
+#if defined CONFIG_64BIT && CONFIG_64BIT
+ }
+ else
+ {
+ id = vma->vm_pgoff >> UMP_LINUX_OFFSET_BITS_64;
+ offset = vma->vm_pgoff & UMP_LINUX_OFFSET_MASK_64;
+ }
+#endif
+
+ h = ump_dd_from_secure_id(id);
+ if (UMP_DD_INVALID_MEMORY_HANDLE != h)
+ {
+ uint64_t i;
+ uint64_t block_idx;
+ uint64_t block_offset;
+ uint64_t paddr;
+ umpp_allocation * alloc;
+ uint64_t last_byte;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_IO | VM_MIXEDMAP | VM_DONTDUMP;
+#else
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO | VM_MIXEDMAP;
+#endif
+ vma->vm_ops = &umpp_vm_ops;
+ vma->vm_private_data = map;
+
+ alloc = (umpp_allocation*)h;
+
+ if( (alloc->flags & UMP_CONSTRAINT_UNCACHED) != 0)
+ {
+ /* cache disabled flag set, disable caching for cpu mappings */
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ }
+
+ last_byte = length + (offset << PAGE_SHIFT) - 1;
+ if (last_byte >= alloc->size || last_byte < (offset << PAGE_SHIFT))
+ {
+ goto err_out;
+ }
+
+ if (umpp_dd_find_start_block(alloc, offset << PAGE_SHIFT, &block_idx, &block_offset))
+ {
+ goto err_out;
+ }
+
+ paddr = alloc->block_array[block_idx].addr + block_offset;
+
+ for (i = 0; i < (length >> PAGE_SHIFT); i++)
+ {
+ /* check if we've overrrun the current block, if so move to the next block */
+ if (paddr >= (alloc->block_array[block_idx].addr + alloc->block_array[block_idx].size))
+ {
+ block_idx++;
+ UMP_ASSERT(block_idx < alloc->blocksCount);
+ paddr = alloc->block_array[block_idx].addr;
+ }
+
+ err = vm_insert_mixed(vma, vma->vm_start + (i << PAGE_SHIFT), paddr >> PAGE_SHIFT);
+ paddr += PAGE_SIZE;
+ }
+
+ map->vaddr_start = (void*)vma->vm_start;
+ map->nr_pages = length >> PAGE_SHIFT;
+ map->page_off = offset;
+ map->handle = h;
+ map->session = session;
+
+ umpp_dd_add_cpu_mapping(h, map);
+
+ return 0;
+
+ err_out:
+
+ ump_dd_release(h);
+ }
+
+ kfree(map);
+
+out:
+
+ return err;
+}
+
diff --git a/drivers/base/ump/src/linux/ump_kernel_linux_mem.h b/drivers/base/ump/src/linux/ump_kernel_linux_mem.h
new file mode 100644
index 000000000000..4fbf3b2a9119
--- /dev/null
+++ b/drivers/base/ump/src/linux/ump_kernel_linux_mem.h
@@ -0,0 +1,26 @@
+/*
+ *
+ * (C) COPYRIGHT 2008-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _UMP_KERNEL_LINUX_MEM_H_
+#define _UMP_KERNEL_LINUX_MEM_H_
+
+
+int umpp_linux_mmap(struct file * filp, struct vm_area_struct * vma);
+
+#endif /* _UMP_KERNEL_LINUX_MEM_H_ */
diff --git a/drivers/base/ump/src/ump_arch.h b/drivers/base/ump/src/ump_arch.h
new file mode 100644
index 000000000000..2303d56505a7
--- /dev/null
+++ b/drivers/base/ump/src/ump_arch.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * (C) COPYRIGHT 2010-2011, 2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+#ifndef _UMP_ARCH_H_
+#define _UMP_ARCH_H_
+
+#include <common/ump_kernel_core.h>
+
+/**
+ * Device specific setup.
+ * Called by the UMP core code to to host OS/device specific setup.
+ * Typical use case is device node creation for talking to user space.
+ * @return UMP_OK on success, any other value on failure
+ */
+extern ump_result umpp_device_initialize(void);
+
+/**
+ * Device specific teardown.
+ * Undo any things done by ump_device_initialize.
+ */
+extern void umpp_device_terminate(void);
+
+extern int umpp_phys_commit(umpp_allocation * alloc);
+extern void umpp_phys_free(umpp_allocation * alloc);
+
+#endif /* _UMP_ARCH_H_ */
diff --git a/drivers/base/ump/ump_ref_drv.h b/drivers/base/ump/ump_ref_drv.h
new file mode 100644
index 000000000000..9a265fe12587
--- /dev/null
+++ b/drivers/base/ump/ump_ref_drv.h
@@ -0,0 +1,33 @@
+/*
+ *
+ * (C) COPYRIGHT 2008-2013 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ *
+ */
+
+
+
+
+
+/**
+ * @file ump_ref_drv.h
+ *
+ * This file contains the link to user space part of the UMP API for usage by MALI 400 gralloc.
+ *
+ */
+
+#ifndef _UMP_REF_DRV_H_
+#define _UMP_REF_DRV_H_
+
+#include <ump/ump.h>
+
+
+#endif /* _UMP_REF_DRV_H_ */