aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRebecca Schultz Zavin <rebecca@android.com>2013-04-01 16:40:02 -0700
committerArve Hjønnevåg <arve@android.com>2013-04-29 14:43:24 -0700
commite29c274a330b23fb873a6349d4c7fc285cabecb6 (patch)
tree1de97277e1e4525677f7e4794e5b19c46135155a
parent2ab266e5724d4d1de0b67fff6e04409f856b1b3a (diff)
gpu: ion: ion_chunk_heap: Zero chunk heap memory at creation time
Allocations from the ion heap need to be zeroed to protect userspace from seeing memory belonging to other processes. First allocations from this heap were not zero'd allowing users to see memory from other processes on a warm reset. Change-Id: I524a7b79cb76c390c870fcf8b30d213185fc85a0 Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
-rw-r--r--drivers/gpu/ion/ion_chunk_heap.c34
1 files changed, 32 insertions, 2 deletions
diff --git a/drivers/gpu/ion/ion_chunk_heap.c b/drivers/gpu/ion/ion_chunk_heap.c
index 7f482b62547..cc824f3a81d 100644
--- a/drivers/gpu/ion/ion_chunk_heap.c
+++ b/drivers/gpu/ion/ion_chunk_heap.c
@@ -140,6 +140,10 @@ static struct ion_heap_ops chunk_heap_ops = {
struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_chunk_heap *chunk_heap;
+ struct vm_struct *vm_struct;
+ pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
+ int i, ret;
+
chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
if (!chunk_heap)
@@ -149,12 +153,30 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
PAGE_SHIFT, -1);
if (!chunk_heap->pool) {
- kfree(chunk_heap);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto error_gen_pool_create;
}
chunk_heap->base = heap_data->base;
chunk_heap->size = heap_data->size;
chunk_heap->allocated = 0;
+
+ vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
+ if (!vm_struct) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ for (i = 0; i < chunk_heap->size; i += PAGE_SIZE) {
+ struct page *page = phys_to_page(chunk_heap->base + i);
+ struct page **pages = &page;
+
+ ret = map_vm_area(vm_struct, pgprot, &pages);
+ if (ret)
+ goto error_map_vm_area;
+ memset(vm_struct->addr, 0, PAGE_SIZE);
+ unmap_kernel_range((unsigned long)vm_struct->addr, PAGE_SIZE);
+ }
+ free_vm_area(vm_struct);
+
__dma_page_cpu_to_dev(phys_to_page(heap_data->base), 0, heap_data->size,
DMA_BIDIRECTIONAL);
gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
@@ -165,6 +187,14 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
heap_data->size, heap_data->align);
return &chunk_heap->heap;
+
+error_map_vm_area:
+ free_vm_area(vm_struct);
+error:
+ gen_pool_destroy(chunk_heap->pool);
+error_gen_pool_create:
+ kfree(chunk_heap);
+ return ERR_PTR(ret);
}
void ion_chunk_heap_destroy(struct ion_heap *heap)