aboutsummaryrefslogtreecommitdiff
path: root/drivers/staging/android/ion/ion_carveout_heap.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/android/ion/ion_carveout_heap.c')
-rw-r--r--drivers/staging/android/ion/ion_carveout_heap.c119
1 files changed, 63 insertions, 56 deletions
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index 4a94b17da67..5165de2ce34 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -14,7 +14,7 @@
*
*/
#include <linux/spinlock.h>
-
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/genalloc.h>
#include <linux/io.h>
@@ -25,8 +25,6 @@
#include "ion.h"
#include "ion_priv.h"
-#include <asm/mach/map.h>
-
struct ion_carveout_heap {
struct ion_heap heap;
struct gen_pool *pool;
@@ -62,7 +60,11 @@ static int ion_carveout_heap_phys(struct ion_heap *heap,
struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len)
{
- *addr = buffer->priv_phys;
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+ *addr = paddr;
*len = buffer->size;
return 0;
}
@@ -72,75 +74,66 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
unsigned long size, unsigned long align,
unsigned long flags)
{
- buffer->priv_phys = ion_carveout_allocate(heap, size, align);
- return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0;
-}
-
-static void ion_carveout_heap_free(struct ion_buffer *buffer)
-{
- struct ion_heap *heap = buffer->heap;
-
- ion_carveout_free(heap, buffer->priv_phys, buffer->size);
- buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
-}
-
-struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
struct sg_table *table;
+ ion_phys_addr_t paddr;
int ret;
+ if (align > PAGE_SIZE)
+ return -EINVAL;
+
table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
ret = sg_alloc_table(table, 1, GFP_KERNEL);
- if (ret) {
- kfree(table);
- return ERR_PTR(ret);
+ if (ret)
+ goto err_free;
+
+ paddr = ion_carveout_allocate(heap, size, align);
+ if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
+ ret = -ENOMEM;
+ goto err_free_table;
}
- sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
- 0);
- return table;
-}
-void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
-{
- sg_free_table(buffer->sg_table);
+ sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
+ buffer->priv_virt = table;
+
+ return 0;
+
+err_free_table:
+ sg_free_table(table);
+err_free:
+ kfree(table);
+ return ret;
}
-void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
{
- void *ret;
- int mtype = MT_MEMORY_NONCACHED;
+ struct ion_heap *heap = buffer->heap;
+ struct sg_table *table = buffer->priv_virt;
+ struct page *page = sg_page(table->sgl);
+ ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
- if (buffer->flags & ION_FLAG_CACHED)
- mtype = MT_MEMORY;
+ ion_heap_buffer_zero(buffer);
- ret = __arm_ioremap(buffer->priv_phys, buffer->size,
- mtype);
- if (ret == NULL)
- return ERR_PTR(-ENOMEM);
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
- return ret;
+ ion_carveout_free(heap, paddr, buffer->size);
+ sg_free_table(table);
+ kfree(table);
}
-void ion_carveout_heap_unmap_kernel(struct ion_heap *heap,
- struct ion_buffer *buffer)
+static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
- __arm_iounmap(buffer->vaddr);
- buffer->vaddr = NULL;
- return;
+ return buffer->priv_virt;
}
-int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
- struct vm_area_struct *vma)
+static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
{
- return remap_pfn_range(vma, vma->vm_start,
- __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
- vma->vm_end - vma->vm_start,
- pgprot_noncached(vma->vm_page_prot));
+ return;
}
static struct ion_heap_ops carveout_heap_ops = {
@@ -149,14 +142,27 @@ static struct ion_heap_ops carveout_heap_ops = {
.phys = ion_carveout_heap_phys,
.map_dma = ion_carveout_heap_map_dma,
.unmap_dma = ion_carveout_heap_unmap_dma,
- .map_user = ion_carveout_heap_map_user,
- .map_kernel = ion_carveout_heap_map_kernel,
- .unmap_kernel = ion_carveout_heap_unmap_kernel,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
};
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_carveout_heap *carveout_heap;
+ int ret;
+
+ struct page *page;
+ size_t size;
+
+ page = pfn_to_page(PFN_DOWN(heap_data->base));
+ size = heap_data->size;
+
+ ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+ ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+ if (ret)
+ return ERR_PTR(ret);
carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
if (!carveout_heap)
@@ -172,6 +178,7 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
-1);
carveout_heap->heap.ops = &carveout_heap_ops;
carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+ carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
return &carveout_heap->heap;
}