aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/ion/ion.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/ion/ion.c')
-rw-r--r--drivers/gpu/ion/ion.c260
1 files changed, 160 insertions, 100 deletions
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 6c93365c3db..e4ffc9d5b94 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -32,10 +32,13 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
+#include <linux/idr.h>
#include "ion_priv.h"
+#include "compat_ion.h"
/**
* struct ion_device - the metadata of the ion device node
@@ -63,6 +66,7 @@ struct ion_device {
* @node: node in the tree of all clients
* @dev: backpointer to ion device
* @handles: an rb tree of all the handles in this client
+ * @idr: an idr space for allocating handle ids
* @lock: lock protecting the tree of handles
* @name: used for debugging
* @task: used for debugging
@@ -75,6 +79,7 @@ struct ion_client {
struct rb_node node;
struct ion_device *dev;
struct rb_root handles;
+ struct idr idr;
struct mutex lock;
const char *name;
struct task_struct *task;
@@ -89,7 +94,7 @@ struct ion_client {
* @buffer: pointer to the buffer
* @node: node in the client's handle rbtree
* @kmap_cnt: count of times this client has mapped to kernel
- * @dmap_cnt: count of times this client has mapped for dma
+ * @id: client-unique id allocated by client->idr
*
* Modifications to node, map_cnt or mapping should be protected by the
* lock in the client. Other fields are never changed after initialization.
@@ -100,17 +105,38 @@ struct ion_handle {
struct ion_buffer *buffer;
struct rb_node node;
unsigned int kmap_cnt;
+ int id;
};
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
- return ((buffer->flags & ION_FLAG_CACHED) &&
- !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
+ return ((buffer->flags & ION_FLAG_CACHED) &&
+ !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
}
bool ion_buffer_cached(struct ion_buffer *buffer)
{
- return !!(buffer->flags & ION_FLAG_CACHED);
+ return !!(buffer->flags & ION_FLAG_CACHED);
+}
+
+static inline struct page *ion_buffer_page(struct page *page)
+{
+ return (struct page *)((unsigned long)page & ~(1UL));
+}
+
+static inline bool ion_buffer_page_is_dirty(struct page *page)
+{
+ return !!((unsigned long)page & 1UL);
+}
+
+static inline void ion_buffer_page_dirty(struct page **page)
+{
+ *page = (struct page *)((unsigned long)(*page) | 1UL);
+}
+
+static inline void ion_buffer_page_clean(struct page **page)
+{
+ *page = (struct page *)((unsigned long)(*page) & ~(1UL));
}
/* this function should only be called while dev->lock is held */
@@ -139,8 +165,6 @@ static void ion_buffer_add(struct ion_device *dev,
rb_insert_color(&buffer->node, &dev->buffers);
}
-static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
-
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
@@ -178,24 +202,32 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->size = len;
table = heap->ops->map_dma(heap, buffer);
- if (IS_ERR_OR_NULL(table)) {
+ if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
+ table = ERR_PTR(-EINVAL);
+ if (IS_ERR(table)) {
heap->ops->free(buffer);
kfree(buffer);
return ERR_PTR(PTR_ERR(table));
}
buffer->sg_table = table;
if (ion_buffer_fault_user_mappings(buffer)) {
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
- i) {
- if (sg_dma_len(sg) == PAGE_SIZE)
- continue;
- pr_err("%s: cached mappings that will be faulted in "
- "must have pagewise sg_lists\n", __func__);
- ret = -EINVAL;
- goto err;
+ int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ struct scatterlist *sg;
+ int i, j, k = 0;
+
+ buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
+ if (!buffer->pages) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
+
+ for (j = 0; j < sg_dma_len(sg) / PAGE_SIZE; j++)
+ buffer->pages[k++] = page++;
}
- ret = ion_buffer_alloc_dirty(buffer);
if (ret)
goto err;
}
@@ -222,6 +254,9 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
err:
heap->ops->unmap_dma(heap, buffer);
heap->ops->free(buffer);
+err1:
+ if (buffer->pages)
+ vfree(buffer->pages);
err2:
kfree(buffer);
return ERR_PTR(ret);
@@ -233,8 +268,8 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
- if (buffer->flags & ION_FLAG_CACHED)
- kfree(buffer->dirty);
+ if (buffer->pages)
+ vfree(buffer->pages);
kfree(buffer);
}
@@ -326,6 +361,7 @@ static void ion_handle_destroy(struct kref *kref)
ion_handle_kmap_put(handle);
mutex_unlock(&buffer->lock);
+ idr_remove(&client->idr, handle->id);
if (!RB_EMPTY_NODE(&handle->node))
rb_erase(&handle->node, &client->handles);
@@ -353,47 +389,56 @@ static int ion_handle_put(struct ion_handle *handle)
static struct ion_handle *ion_handle_lookup(struct ion_client *client,
struct ion_buffer *buffer)
{
- struct rb_node *n;
-
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
- if (handle->buffer == buffer)
- return handle;
- }
- return NULL;
-}
-
-static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
-{
struct rb_node *n = client->handles.rb_node;
while (n) {
- struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
- node);
- if (handle < handle_node)
+ struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
+ if (buffer < entry->buffer)
n = n->rb_left;
- else if (handle > handle_node)
+ else if (buffer > entry->buffer)
n = n->rb_right;
else
- return true;
+ return entry;
}
- return false;
+ return ERR_PTR(-EINVAL);
+}
+
+static struct ion_handle *ion_uhandle_get(struct ion_client *client, int id)
+{
+ return idr_find(&client->idr, id);
}
-static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
{
+ return (ion_uhandle_get(client, handle->id) == handle);
+}
+
+static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+{
+ int rc;
struct rb_node **p = &client->handles.rb_node;
struct rb_node *parent = NULL;
struct ion_handle *entry;
+ do {
+ int id;
+ rc = idr_pre_get(&client->idr, GFP_KERNEL);
+ if (!rc)
+ return -ENOMEM;
+ rc = idr_get_new_above(&client->idr, handle, 1, &id);
+ handle->id = id;
+ } while (rc == -EAGAIN);
+
+ if (rc < 0)
+ return rc;
+
while (*p) {
parent = *p;
entry = rb_entry(parent, struct ion_handle, node);
- if (handle < entry)
+ if (handle->buffer < entry->buffer)
p = &(*p)->rb_left;
- else if (handle > entry)
+ else if (handle->buffer > entry->buffer)
p = &(*p)->rb_right;
else
WARN(1, "%s: buffer already found.", __func__);
@@ -401,6 +446,8 @@ static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
rb_link_node(&handle->node, parent, p);
rb_insert_color(&handle->node, &client->handles);
+
+ return 0;
}
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
@@ -411,6 +458,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
struct ion_device *dev = client->dev;
struct ion_buffer *buffer = NULL;
struct ion_heap *heap;
+ int ret;
pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
len, align, heap_id_mask, flags);
@@ -431,7 +479,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
if (!((1 << heap->id) & heap_id_mask))
continue;
buffer = ion_buffer_create(heap, dev, len, align, flags);
- if (!IS_ERR_OR_NULL(buffer))
+ if (!IS_ERR(buffer))
break;
}
up_read(&dev->lock);
@@ -450,12 +498,16 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
*/
ion_buffer_put(buffer);
- if (!IS_ERR(handle)) {
- mutex_lock(&client->lock);
- ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- }
+ if (IS_ERR(handle))
+ return handle;
+ mutex_lock(&client->lock);
+ ret = ion_handle_add(client, handle);
+ if (ret) {
+ ion_handle_put(handle);
+ handle = ERR_PTR(ret);
+ }
+ mutex_unlock(&client->lock);
return handle;
}
@@ -515,7 +567,9 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
return buffer->vaddr;
}
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
- if (IS_ERR_OR_NULL(vaddr))
+ if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
+ return ERR_PTR(-EINVAL);
+ if (IS_ERR(vaddr))
return vaddr;
buffer->vaddr = vaddr;
buffer->kmap_cnt++;
@@ -532,7 +586,7 @@ static void *ion_handle_kmap_get(struct ion_handle *handle)
return buffer->vaddr;
}
vaddr = ion_buffer_kmap_get(buffer);
- if (IS_ERR_OR_NULL(vaddr))
+ if (IS_ERR(vaddr))
return vaddr;
handle->kmap_cnt++;
return vaddr;
@@ -673,6 +727,7 @@ struct ion_client *ion_client_create(struct ion_device *dev,
client->dev = dev;
client->handles = RB_ROOT;
+ idr_init(&client->idr);
mutex_init(&client->lock);
client->name = name;
client->task = task;
@@ -713,6 +768,10 @@ void ion_client_destroy(struct ion_client *client)
node);
ion_handle_destroy(&handle->ref);
}
+
+ idr_remove_all(&client->idr);
+ idr_destroy(&client->idr);
+
down_write(&dev->lock);
if (client->task)
put_task_struct(client->task);
@@ -764,17 +823,6 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
{
}
-static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
-{
- unsigned long pages = buffer->sg_table->nents;
- unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
-
- buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
- if (!buffer->dirty)
- return -ENOMEM;
- return 0;
-}
-
struct ion_vma_list {
struct list_head list;
struct vm_area_struct *vma;
@@ -784,9 +832,9 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
struct device *dev,
enum dma_data_direction dir)
{
- struct scatterlist *sg;
- int i;
struct ion_vma_list *vma_list;
+ int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+ int i;
pr_debug("%s: syncing for device %s\n", __func__,
dev ? dev_name(dev) : "null");
@@ -795,11 +843,12 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
return;
mutex_lock(&buffer->lock);
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
- if (!test_bit(i, buffer->dirty))
- continue;
- dma_sync_sg_for_device(dev, sg, 1, dir);
- clear_bit(i, buffer->dirty);
+ for (i = 0; i < pages; i++) {
+ struct page *page = buffer->pages[i];
+
+ if (ion_buffer_page_is_dirty(page))
+ __dma_page_cpu_to_dev(page, 0, PAGE_SIZE, dir);
+ ion_buffer_page_clean(buffer->pages + i);
}
list_for_each_entry(vma_list, &buffer->vmas, list) {
struct vm_area_struct *vma = vma_list->vma;
@@ -813,21 +862,18 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ion_buffer *buffer = vma->vm_private_data;
- struct scatterlist *sg;
- int i;
+ int ret;
mutex_lock(&buffer->lock);
- set_bit(vmf->pgoff, buffer->dirty);
+ ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
- for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
- if (i != vmf->pgoff)
- continue;
- dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
- vm_insert_page(vma, (unsigned long)vmf->virtual_address,
- sg_page(sg));
- break;
- }
+ BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
+ ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
+ ion_buffer_page(buffer->pages[vmf->pgoff]));
mutex_unlock(&buffer->lock);
+ if (ret)
+ return VM_FAULT_ERROR;
+
return VM_FAULT_NOPAGE;
}
@@ -939,8 +985,6 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
mutex_unlock(&buffer->lock);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
- if (!vaddr)
- return -ENOMEM;
return 0;
}
@@ -1017,9 +1061,10 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
struct dma_buf *dmabuf;
struct ion_buffer *buffer;
struct ion_handle *handle;
+ int ret;
dmabuf = dma_buf_get(fd);
- if (IS_ERR_OR_NULL(dmabuf))
+ if (IS_ERR(dmabuf))
return ERR_PTR(PTR_ERR(dmabuf));
/* if this memory came from ion */
@@ -1034,14 +1079,18 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
mutex_lock(&client->lock);
/* if a handle exists for this buffer just take a reference to it */
handle = ion_handle_lookup(client, buffer);
- if (!IS_ERR_OR_NULL(handle)) {
+ if (!IS_ERR(handle)) {
ion_handle_get(handle);
goto end;
}
handle = ion_handle_create(client, buffer);
- if (IS_ERR_OR_NULL(handle))
+ if (IS_ERR(handle))
goto end;
- ion_handle_add(client, handle);
+ ret = ion_handle_add(client, handle);
+ if (ret) {
+ ion_handle_put(handle);
+ handle = ERR_PTR(ret);
+ }
end:
mutex_unlock(&client->lock);
dma_buf_put(dmabuf);
@@ -1055,7 +1104,7 @@ static int ion_sync_for_device(struct ion_client *client, int fd)
struct ion_buffer *buffer;
dmabuf = dma_buf_get(fd);
- if (IS_ERR_OR_NULL(dmabuf))
+ if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
/* if this memory came from ion */
@@ -1081,17 +1130,20 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case ION_IOC_ALLOC:
{
struct ion_allocation_data data;
+ struct ion_handle *handle;
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
- data.handle = ion_alloc(client, data.len, data.align,
+ handle = ion_alloc(client, data.len, data.align,
data.heap_id_mask, data.flags);
- if (IS_ERR(data.handle))
- return PTR_ERR(data.handle);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ data.handle = handle->id;
if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- ion_free(client, data.handle);
+ ion_free(client, handle);
return -EFAULT;
}
break;
@@ -1099,27 +1151,29 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case ION_IOC_FREE:
{
struct ion_handle_data data;
- bool valid;
+ struct ion_handle *handle;
if (copy_from_user(&data, (void __user *)arg,
sizeof(struct ion_handle_data)))
return -EFAULT;
mutex_lock(&client->lock);
- valid = ion_handle_validate(client, data.handle);
+ handle = ion_uhandle_get(client, data.handle);
mutex_unlock(&client->lock);
- if (!valid)
+ if (!handle)
return -EINVAL;
- ion_free(client, data.handle);
+ ion_free(client, handle);
break;
}
case ION_IOC_SHARE:
case ION_IOC_MAP:
{
struct ion_fd_data data;
+ struct ion_handle *handle;
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
- data.fd = ion_share_dma_buf_fd(client, data.handle);
+ handle = ion_uhandle_get(client, data.handle);
+ data.fd = ion_share_dma_buf_fd(client, handle);
if (copy_to_user((void __user *)arg, &data, sizeof(data)))
return -EFAULT;
if (data.fd < 0)
@@ -1129,15 +1183,17 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case ION_IOC_IMPORT:
{
struct ion_fd_data data;
+ struct ion_handle *handle;
int ret = 0;
if (copy_from_user(&data, (void __user *)arg,
sizeof(struct ion_fd_data)))
return -EFAULT;
- data.handle = ion_import_dma_buf(client, data.fd);
- if (IS_ERR(data.handle)) {
- ret = PTR_ERR(data.handle);
- data.handle = NULL;
- }
+ handle = ion_import_dma_buf(client, data.fd);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
+ else
+ data.handle = handle->id;
+
if (copy_to_user((void __user *)arg, &data,
sizeof(struct ion_fd_data)))
return -EFAULT;
@@ -1189,7 +1245,7 @@ static int ion_open(struct inode *inode, struct file *file)
pr_debug("%s: %d\n", __func__, __LINE__);
client = ion_client_create(dev, "user");
- if (IS_ERR_OR_NULL(client))
+ if (IS_ERR(client))
return PTR_ERR(client);
file->private_data = client;
@@ -1201,6 +1257,7 @@ static const struct file_operations ion_fops = {
.open = ion_open,
.release = ion_release,
.unlocked_ioctl = ion_ioctl,
+ .compat_ioctl = compat_ion_ioctl,
};
static size_t ion_debug_heap_total(struct ion_client *client,
@@ -1271,6 +1328,9 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
seq_printf(s, "%16.s %16u\n", "total orphaned",
total_orphaned_size);
seq_printf(s, "%16.s %16u\n", "total ", total_size);
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ seq_printf(s, "%16.s %16u\n", "deferred free",
+ heap->free_list_size);
seq_printf(s, "----------------------------------------------------\n");
if (heap->debug_show)
@@ -1382,7 +1442,7 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
}
idev->debug_root = debugfs_create_dir("ion", NULL);
- if (IS_ERR_OR_NULL(idev->debug_root))
+ if (!idev->debug_root)
pr_err("ion: failed to create debug files.\n");
idev->custom_ioctl = custom_ioctl;